aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/display
diff options
context:
space:
mode:
authorHarry Wentland <harry.wentland@amd.com>2017-09-12 15:58:20 -0400
committerAlex Deucher <alexander.deucher@amd.com>2017-09-26 17:01:32 -0400
commit4562236b3bc0a28aeb6ee93b2d8a849a4c4e1c7c (patch)
tree84301c04dcaaa05c3318a8fe62cf62ab52ecc162 /drivers/gpu/drm/amd/display
parent9c5b2b0d409304c2e3c1f4d1c9bb4958e1d46f8f (diff)
drm/amd/dc: Add dc display driver (v2)
Supported DCE versions: 8.0, 10.0, 11.0, 11.2 v2: rebase against 4.11 Signed-off-by: Harry Wentland <harry.wentland@amd.com> Acked-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/display')
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig28
-rw-r--r--drivers/gpu/drm/amd/display/Makefile22
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/Makefile17
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c1564
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h171
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c484
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c829
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h122
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c443
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h36
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c463
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_types.c3150
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_types.h101
-rw-r--r--drivers/gpu/drm/amd/display/dc/Makefile28
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/Makefile11
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/conversion.c223
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/conversion.h51
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c691
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/fixpt32_32.c221
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/grph_object_id.c134
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/log_helpers.c100
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/logger.c457
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/logger.h67
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/register_logger.c197
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/signal_types.c116
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/vector.c307
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/Makefile24
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.c4220
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c82
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h40
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser_interface.c50
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser_types_internal.h72
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table.c2609
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table.h112
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c288
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper.h90
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.c364
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.c418
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce80/command_table_helper_dce80.c354
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce80/command_table_helper_dce80.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/Makefile10
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/bandwidth_calcs.c3108
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/bw_fixed.c299
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/gamma_calcs.c1382
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c1846
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_debug.c270
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c93
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c1899
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c1098
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c2462
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c222
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c1934
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_sink.c113
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c141
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_surface.c213
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_target.c334
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h780
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_bios_types.h224
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_ddc_types.h115
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h105
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_helper.c144
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h588
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h493
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/Makefile14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_audio.c920
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_audio.h145
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c1264
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h109
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c195
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h250
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c2176
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h363
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c384
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h217
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters.c501
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c1302
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h564
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_transform.c1002
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_transform.h313
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/Makefile23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c140
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c1085
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.h19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/Makefile15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c859
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.h78
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c1978
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h62
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_ipp.c62
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_ipp.h76
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_ipp_cursor.c253
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_ipp_gamma.c303
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input.c535
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input.h131
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c1081
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.h94
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_opp.c77
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_opp.h149
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_csc.c363
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_csc_v.c738
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_formatter.c627
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_regamma.c537
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_regamma_v.c551
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_v.c63
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_v.h56
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c1413
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.h56
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c1953
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h273
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c743
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c704
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.h37
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_types.h30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/Makefile11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_compressor.c859
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_compressor.h78
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c166
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.h36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_mem_input.c54
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_mem_input.h38
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_opp.c72
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_opp.h48
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_opp_formatter.c215
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c1418
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h55
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/Makefile16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_compressor.c839
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_compressor.h78
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c141
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.h36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_ipp.c64
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_ipp.h47
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_ipp_gamma.c76
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_mem_input.c83
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_mem_input.h36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_opp.c136
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_opp.h130
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_opp_csc.c363
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_opp_formatter.c577
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_opp_regamma.c543
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c1063
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.h39
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c241
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.h45
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_helpers.h101
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services.h424
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services_types.h242
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/Makefile38
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_factory_dce110.c178
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_factory_dce110.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_translate_dce110.c387
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_translate_dce110.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_factory_dce80.c173
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_factory_dce80.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_translate_dce80.c411
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_translate_dce80.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h150
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.c63
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_translate_diag.c40
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_translate_diag.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c272
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_regs.h45
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c592
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_service.h56
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hpd_regs.h79
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c243
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.h46
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c93
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_factory.h74
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_gpio.c205
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_gpio.h144
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c175
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.h46
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c75
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_translate.h50
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpu/Makefile36
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpu/dce110/display_clock_dce110.c1035
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpu/dce110/display_clock_dce110.h53
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpu/dce112/display_clock_dce112.c964
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpu/dce112/display_clock_dce112.h114
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpu/dce80/display_clock_dce80.c934
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpu/dce80/display_clock_dce80.h57
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpu/display_clock.c217
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpu/display_clock.h89
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpu/divider_range.c127
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpu/divider_range.h62
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/Makefile58
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c567
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.h117
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.c112
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c456
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.h78
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c577
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.h214
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_sw_engine_dce110.c171
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_sw_engine_dce110.h43
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.c323
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.h53
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.c140
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_hw_engine_dce80.c885
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_hw_engine_dce80.h54
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_sw_engine_dce80.c184
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_sw_engine_dce80.h43
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2caux_dce80.c295
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2caux_dce80.h38
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/diagnostics/i2caux_diag.c108
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/diagnostics/i2caux_diag.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/engine.h120
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/engine_base.c53
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.c121
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.h113
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2c_generic_hw_engine.c286
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2c_generic_hw_engine.h77
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.c246
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.h80
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2c_sw_engine.c610
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2c_sw_engine.h81
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c459
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2caux.h122
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/bandwidth_calcs.h503
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/bw_fixed.h63
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/clock_source.h178
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/compressor.h93
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_dc.h50
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_status.h47
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h319
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h145
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h60
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/gamma_calcs.h19
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/gamma_types.h38
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/audio.h62
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/gpio.h86
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h74
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/ipp.h121
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h263
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h106
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/opp.h322
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h121
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h162
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/transform.h179
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h156
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link_hwss.h73
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/reg_helper.h290
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h164
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/Makefile28
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c367
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.h48
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c283
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.h35
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/irq_service.c163
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/irq_service.h85
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq_types.h185
-rw-r--r--drivers/gpu/drm/amd/display/dc/os_types.h61
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/Makefile9
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c150
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c132
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.h39
-rw-r--r--drivers/gpu/drm/amd/display/include/asic_capability_interface.h55
-rw-r--r--drivers/gpu/drm/amd/display/include/asic_capability_types.h116
-rw-r--r--drivers/gpu/drm/amd/display/include/audio_types.h106
-rw-r--r--drivers/gpu/drm/amd/display/include/bios_parser_interface.h44
-rw-r--r--drivers/gpu/drm/amd/display/include/bios_parser_types.h338
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_asic_id.h125
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_register_logger.h42
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_types.h44
-rw-r--r--drivers/gpu/drm/amd/display/include/ddc_service_types.h189
-rw-r--r--drivers/gpu/drm/amd/display/include/display_clock_interface.h175
-rw-r--r--drivers/gpu/drm/amd/display/include/dpcd_defs.h742
-rw-r--r--drivers/gpu/drm/amd/display/include/fixed31_32.h390
-rw-r--r--drivers/gpu/drm/amd/display/include/fixed32_32.h83
-rw-r--r--drivers/gpu/drm/amd/display/include/gpio_interface.h92
-rw-r--r--drivers/gpu/drm/amd/display/include/gpio_service_interface.h105
-rw-r--r--drivers/gpu/drm/amd/display/include/gpio_types.h332
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h407
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_defs.h140
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_id.h256
-rw-r--r--drivers/gpu/drm/amd/display/include/hw_sequencer_types.h105
-rw-r--r--drivers/gpu/drm/amd/display/include/i2caux_interface.h89
-rw-r--r--drivers/gpu/drm/amd/display/include/irq_interface.h31
-rw-r--r--drivers/gpu/drm/amd/display/include/irq_service_interface.h51
-rw-r--r--drivers/gpu/drm/amd/display/include/link_service_types.h232
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_interface.h140
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_types.h95
-rw-r--r--drivers/gpu/drm/amd/display/include/set_mode_types.h127
-rw-r--r--drivers/gpu/drm/amd/display/include/signal_types.h59
-rw-r--r--drivers/gpu/drm/amd/display/include/vector.h150
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color.c2094
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/Makefile10
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c1158
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_color.h179
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h149
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_power.h112
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power.c784
302 files changed, 99243 insertions, 0 deletions
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
new file mode 100644
index 000000000000..47c8e2940a91
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -0,0 +1,28 @@
1menu "Display Engine Configuration"
2 depends on DRM && DRM_AMDGPU
3
4config DRM_AMD_DC
5 bool "AMD DC - Enable new display engine"
6 default y
7 help
8 Choose this option if you want to use the new display engine
9 support for AMDGPU. This adds required support for Vega and
10 Raven ASICs.
11
12config DRM_AMD_DC_PRE_VEGA
13 bool "DC support for Polaris and older ASICs"
14 default n
15 help
16 Choose this option to enable the new DC support for older asics
17 by default. This includes Polaris, Carrizo, Tonga, Bonaire,
18 and Hawaii.
19
20config DEBUG_KERNEL_DC
21 bool "Enable kgdb break in DC"
22 depends on DRM_AMD_DC
23 help
24 Choose this option
25 if you want to hit
26 kdgb_break in assert.
27
28endmenu
diff --git a/drivers/gpu/drm/amd/display/Makefile b/drivers/gpu/drm/amd/display/Makefile
new file mode 100644
index 000000000000..8ba37dd9cf7f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/Makefile
@@ -0,0 +1,22 @@
1#
2# Makefile for the DAL (Display Abstract Layer), which is a sub-component
3# of the AMDGPU drm driver.
4# It provides the HW control for display related functionalities.
5
6AMDDALPATH = $(RELATIVE_AMD_DISPLAY_PATH)
7
8subdir-ccflags-y += -I$(AMDDALPATH)/ -I$(AMDDALPATH)/include
9
10subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/inc/
11subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/inc/hw
12subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/inc
13subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/freesync
14
15#TODO: remove when Timing Sync feature is complete
16subdir-ccflags-y += -DBUILD_FEATURE_TIMING_SYNC=0
17
18DAL_LIBS = amdgpu_dm dc modules/freesync
19
20AMD_DAL = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/,$(DAL_LIBS)))
21
22include $(AMD_DAL)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
new file mode 100644
index 000000000000..698b1d4f83f5
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
@@ -0,0 +1,17 @@
1#
2# Makefile for the 'dm' sub-component of DAL.
3# It provides the control and status of dm blocks.
4
5
6
7AMDGPUDM = amdgpu_dm_types.o amdgpu_dm.o amdgpu_dm_irq.o amdgpu_dm_mst_types.o
8
9ifneq ($(CONFIG_DRM_AMD_DC),)
10AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o
11endif
12
13subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc
14
15AMDGPU_DM = $(addprefix $(AMDDALPATH)/amdgpu_dm/,$(AMDGPUDM))
16
17AMD_DISPLAY_FILES += $(AMDGPU_DM)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
new file mode 100644
index 000000000000..ae4ba7777839
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -0,0 +1,1564 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services_types.h"
27#include "dc.h"
28
29#include "vid.h"
30#include "amdgpu.h"
31#include "atom.h"
32#include "amdgpu_dm.h"
33#include "amdgpu_dm_types.h"
34
35#include "amd_shared.h"
36#include "amdgpu_dm_irq.h"
37#include "dm_helpers.h"
38
39#include "ivsrcid/ivsrcid_vislands30.h"
40
41#include <linux/module.h>
42#include <linux/moduleparam.h>
43#include <linux/version.h>
44
45#include <drm/drm_atomic.h>
46#include <drm/drm_atomic_helper.h>
47#include <drm/drm_dp_mst_helper.h>
48
49#include "modules/inc/mod_freesync.h"
50
51/* Debug facilities */
52#define AMDGPU_DM_NOT_IMPL(fmt, ...) \
53 DRM_INFO("DM_NOT_IMPL: " fmt, ##__VA_ARGS__)
54
55/*
56 * dm_vblank_get_counter
57 *
58 * @brief
59 * Get counter for number of vertical blanks
60 *
61 * @param
62 * struct amdgpu_device *adev - [in] desired amdgpu device
63 * int disp_idx - [in] which CRTC to get the counter from
64 *
65 * @return
66 * Counter for vertical blanks
67 */
68static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
69{
70 if (crtc >= adev->mode_info.num_crtc)
71 return 0;
72 else {
73 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
74
75 if (NULL == acrtc->target) {
76 DRM_ERROR("dc_target is NULL for crtc '%d'!\n", crtc);
77 return 0;
78 }
79
80 return dc_target_get_vblank_counter(acrtc->target);
81 }
82}
83
84static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
85 u32 *vbl, u32 *position)
86{
87 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
88 return -EINVAL;
89 else {
90 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
91
92 if (NULL == acrtc->target) {
93 DRM_ERROR("dc_target is NULL for crtc '%d'!\n", crtc);
94 return 0;
95 }
96
97 return dc_target_get_scanoutpos(acrtc->target, vbl, position);
98 }
99
100 return 0;
101}
102
103static bool dm_is_idle(void *handle)
104{
105 /* XXX todo */
106 return true;
107}
108
109static int dm_wait_for_idle(void *handle)
110{
111 /* XXX todo */
112 return 0;
113}
114
115static bool dm_check_soft_reset(void *handle)
116{
117 return false;
118}
119
120static int dm_soft_reset(void *handle)
121{
122 /* XXX todo */
123 return 0;
124}
125
126static struct amdgpu_crtc *get_crtc_by_otg_inst(
127 struct amdgpu_device *adev,
128 int otg_inst)
129{
130 struct drm_device *dev = adev->ddev;
131 struct drm_crtc *crtc;
132 struct amdgpu_crtc *amdgpu_crtc;
133
134 /*
135 * following if is check inherited from both functions where this one is
136 * used now. Need to be checked why it could happen.
137 */
138 if (otg_inst == -1) {
139 WARN_ON(1);
140 return adev->mode_info.crtcs[0];
141 }
142
143 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
144 amdgpu_crtc = to_amdgpu_crtc(crtc);
145
146 if (amdgpu_crtc->otg_inst == otg_inst)
147 return amdgpu_crtc;
148 }
149
150 return NULL;
151}
152
153static void dm_pflip_high_irq(void *interrupt_params)
154{
155 struct amdgpu_flip_work *works;
156 struct amdgpu_crtc *amdgpu_crtc;
157 struct common_irq_params *irq_params = interrupt_params;
158 struct amdgpu_device *adev = irq_params->adev;
159 unsigned long flags;
160
161 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
162
163 /* IRQ could occur when in initial stage */
164 /*TODO work and BO cleanup */
165 if (amdgpu_crtc == NULL) {
166 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
167 return;
168 }
169
170 spin_lock_irqsave(&adev->ddev->event_lock, flags);
171 works = amdgpu_crtc->pflip_works;
172
173 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
174 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
175 amdgpu_crtc->pflip_status,
176 AMDGPU_FLIP_SUBMITTED,
177 amdgpu_crtc->crtc_id,
178 amdgpu_crtc);
179 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
180 return;
181 }
182
183 /* page flip completed. clean up */
184 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
185 amdgpu_crtc->pflip_works = NULL;
186
187 /* wakeup usersapce */
188 if (works->event)
189 drm_crtc_send_vblank_event(&amdgpu_crtc->base,
190 works->event);
191
192 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
193
194 DRM_DEBUG_DRIVER("%s - crtc :%d[%p], pflip_stat:AMDGPU_FLIP_NONE, work: %p,\n",
195 __func__, amdgpu_crtc->crtc_id, amdgpu_crtc, works);
196
197 drm_crtc_vblank_put(&amdgpu_crtc->base);
198 schedule_work(&works->unpin_work);
199}
200
201static void dm_crtc_high_irq(void *interrupt_params)
202{
203 struct common_irq_params *irq_params = interrupt_params;
204 struct amdgpu_device *adev = irq_params->adev;
205 uint8_t crtc_index = 0;
206 struct amdgpu_crtc *acrtc;
207
208 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
209
210 if (acrtc)
211 crtc_index = acrtc->crtc_id;
212
213 drm_handle_vblank(adev->ddev, crtc_index);
214}
215
216static int dm_set_clockgating_state(void *handle,
217 enum amd_clockgating_state state)
218{
219 return 0;
220}
221
222static int dm_set_powergating_state(void *handle,
223 enum amd_powergating_state state)
224{
225 return 0;
226}
227
228/* Prototypes of private functions */
229static int dm_early_init(void* handle);
230
231static void hotplug_notify_work_func(struct work_struct *work)
232{
233 struct amdgpu_display_manager *dm = container_of(work, struct amdgpu_display_manager, mst_hotplug_work);
234 struct drm_device *dev = dm->ddev;
235
236 drm_kms_helper_hotplug_event(dev);
237}
238
239/* Init display KMS
240 *
241 * Returns 0 on success
242 */
243int amdgpu_dm_init(struct amdgpu_device *adev)
244{
245 struct dc_init_data init_data;
246 adev->dm.ddev = adev->ddev;
247 adev->dm.adev = adev;
248
249 DRM_INFO("DAL is enabled\n");
250 /* Zero all the fields */
251 memset(&init_data, 0, sizeof(init_data));
252
253 /* initialize DAL's lock (for SYNC context use) */
254 spin_lock_init(&adev->dm.dal_lock);
255
256 /* initialize DAL's mutex */
257 mutex_init(&adev->dm.dal_mutex);
258
259 if(amdgpu_dm_irq_init(adev)) {
260 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
261 goto error;
262 }
263
264 init_data.asic_id.chip_family = adev->family;
265
266 init_data.asic_id.pci_revision_id = adev->rev_id;
267 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
268
269 init_data.asic_id.vram_width = adev->mc.vram_width;
270 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
271 init_data.asic_id.atombios_base_address =
272 adev->mode_info.atom_context->bios;
273
274 init_data.driver = adev;
275
276 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
277
278 if (!adev->dm.cgs_device) {
279 DRM_ERROR("amdgpu: failed to create cgs device.\n");
280 goto error;
281 }
282
283 init_data.cgs_device = adev->dm.cgs_device;
284
285 adev->dm.dal = NULL;
286
287 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
288
289 /* Display Core create. */
290 adev->dm.dc = dc_create(&init_data);
291
292 if (!adev->dm.dc)
293 DRM_INFO("Display Core failed to initialize!\n");
294
295 INIT_WORK(&adev->dm.mst_hotplug_work, hotplug_notify_work_func);
296
297 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
298 if (!adev->dm.freesync_module) {
299 DRM_ERROR(
300 "amdgpu: failed to initialize freesync_module.\n");
301 } else
302 DRM_INFO("amdgpu: freesync_module init done %p.\n",
303 adev->dm.freesync_module);
304
305 if (amdgpu_dm_initialize_drm_device(adev)) {
306 DRM_ERROR(
307 "amdgpu: failed to initialize sw for display support.\n");
308 goto error;
309 }
310
311 /* Update the actual used number of crtc */
312 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
313
314 /* TODO: Add_display_info? */
315
316 /* TODO use dynamic cursor width */
317 adev->ddev->mode_config.cursor_width = 128;
318 adev->ddev->mode_config.cursor_height = 128;
319
320 if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
321 DRM_ERROR(
322 "amdgpu: failed to initialize sw for display support.\n");
323 goto error;
324 }
325
326 DRM_INFO("KMS initialized.\n");
327
328 return 0;
329error:
330 amdgpu_dm_fini(adev);
331
332 return -1;
333}
334
335void amdgpu_dm_fini(struct amdgpu_device *adev)
336{
337 amdgpu_dm_destroy_drm_device(&adev->dm);
338 /*
339 * TODO: pageflip, vlank interrupt
340 *
341 * amdgpu_dm_irq_fini(adev);
342 */
343
344 if (adev->dm.cgs_device) {
345 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
346 adev->dm.cgs_device = NULL;
347 }
348 if (adev->dm.freesync_module) {
349 mod_freesync_destroy(adev->dm.freesync_module);
350 adev->dm.freesync_module = NULL;
351 }
352 /* DC Destroy TODO: Replace destroy DAL */
353 {
354 dc_destroy(&adev->dm.dc);
355 }
356 return;
357}
358
359/* moved from amdgpu_dm_kms.c */
360void amdgpu_dm_destroy()
361{
362}
363
364static int dm_sw_init(void *handle)
365{
366 return 0;
367}
368
369static int dm_sw_fini(void *handle)
370{
371 return 0;
372}
373
374static void detect_link_for_all_connectors(struct drm_device *dev)
375{
376 struct amdgpu_connector *aconnector;
377 struct drm_connector *connector;
378
379 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
380
381 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
382 aconnector = to_amdgpu_connector(connector);
383 if (aconnector->dc_link->type == dc_connection_mst_branch) {
384 DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n",
385 aconnector, aconnector->base.base.id);
386
387 if (drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true) < 0) {
388 DRM_ERROR("DM_MST: Failed to start MST\n");
389 ((struct dc_link *)aconnector->dc_link)->type = dc_connection_single;
390 }
391 }
392 }
393
394 drm_modeset_unlock(&dev->mode_config.connection_mutex);
395}
396
397static void s3_handle_mst(struct drm_device *dev, bool suspend)
398{
399 struct amdgpu_connector *aconnector;
400 struct drm_connector *connector;
401
402 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
403
404 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
405 aconnector = to_amdgpu_connector(connector);
406 if (aconnector->dc_link->type == dc_connection_mst_branch &&
407 !aconnector->mst_port) {
408
409 if (suspend)
410 drm_dp_mst_topology_mgr_suspend(&aconnector->mst_mgr);
411 else
412 drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr);
413 }
414 }
415
416 drm_modeset_unlock(&dev->mode_config.connection_mutex);
417}
418
419static int dm_hw_init(void *handle)
420{
421 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
422 /* Create DAL display manager */
423 amdgpu_dm_init(adev);
424
425 amdgpu_dm_hpd_init(adev);
426
427 detect_link_for_all_connectors(adev->ddev);
428
429 return 0;
430}
431
432static int dm_hw_fini(void *handle)
433{
434 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
435
436 amdgpu_dm_hpd_fini(adev);
437
438 amdgpu_dm_irq_fini(adev);
439
440 return 0;
441}
442
443static int dm_suspend(void *handle)
444{
445 struct amdgpu_device *adev = handle;
446 struct amdgpu_display_manager *dm = &adev->dm;
447 int ret = 0;
448 struct drm_crtc *crtc;
449
450 s3_handle_mst(adev->ddev, true);
451
452 /* flash all pending vblank events and turn interrupt off
453 * before disabling CRTCs. They will be enabled back in
454 * dm_display_resume
455 */
456 drm_modeset_lock_all(adev->ddev);
457 list_for_each_entry(crtc, &adev->ddev->mode_config.crtc_list, head) {
458 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
459 if (acrtc->target)
460 drm_crtc_vblank_off(crtc);
461 }
462 drm_modeset_unlock_all(adev->ddev);
463
464 amdgpu_dm_irq_suspend(adev);
465
466 dc_set_power_state(
467 dm->dc,
468 DC_ACPI_CM_POWER_STATE_D3,
469 DC_VIDEO_POWER_SUSPEND);
470
471 return ret;
472}
473
474struct amdgpu_connector *amdgpu_dm_find_first_crct_matching_connector(
475 struct drm_atomic_state *state,
476 struct drm_crtc *crtc,
477 bool from_state_var)
478{
479 uint32_t i;
480 struct drm_connector_state *conn_state;
481 struct drm_connector *connector;
482 struct drm_crtc *crtc_from_state;
483
484 for_each_connector_in_state(
485 state,
486 connector,
487 conn_state,
488 i) {
489 crtc_from_state =
490 from_state_var ?
491 conn_state->crtc :
492 connector->state->crtc;
493
494 if (crtc_from_state == crtc)
495 return to_amdgpu_connector(connector);
496 }
497
498 return NULL;
499}
500
501static int dm_display_resume(struct drm_device *ddev)
502{
503 int ret = 0;
504 struct drm_connector *connector;
505
506 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
507 struct drm_plane *plane;
508 struct drm_crtc *crtc;
509 struct amdgpu_connector *aconnector;
510 struct drm_connector_state *conn_state;
511
512 if (!state)
513 return ENOMEM;
514
515 state->acquire_ctx = ddev->mode_config.acquire_ctx;
516
517 /* Construct an atomic state to restore previous display setting */
518
519 /*
520 * Attach connectors to drm_atomic_state
521 * Should be done in the first place in order to make connectors
522 * available in state during crtc state processing. It is used for
523 * making decision if crtc should be disabled in case sink got
524 * disconnected.
525 *
526 * Connectors state crtc with NULL dc_sink should be cleared, because it
527 * will fail validation during commit
528 */
529 list_for_each_entry(connector, &ddev->mode_config.connector_list, head) {
530 aconnector = to_amdgpu_connector(connector);
531 conn_state = drm_atomic_get_connector_state(state, connector);
532
533 ret = PTR_ERR_OR_ZERO(conn_state);
534 if (ret)
535 goto err;
536 }
537
538 /* Attach crtcs to drm_atomic_state*/
539 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
540 struct drm_crtc_state *crtc_state =
541 drm_atomic_get_crtc_state(state, crtc);
542
543 ret = PTR_ERR_OR_ZERO(crtc_state);
544 if (ret)
545 goto err;
546
547 /* force a restore */
548 crtc_state->mode_changed = true;
549 }
550
551
552 /* Attach planes to drm_atomic_state */
553 list_for_each_entry(plane, &ddev->mode_config.plane_list, head) {
554
555 struct drm_crtc *crtc;
556 struct drm_gem_object *obj;
557 struct drm_framebuffer *fb;
558 struct amdgpu_framebuffer *afb;
559 struct amdgpu_bo *rbo;
560 int r;
561 struct drm_plane_state *plane_state = drm_atomic_get_plane_state(state, plane);
562
563 ret = PTR_ERR_OR_ZERO(plane_state);
564 if (ret)
565 goto err;
566
567 crtc = plane_state->crtc;
568 fb = plane_state->fb;
569
570 if (!crtc || !crtc->state || !crtc->state->active)
571 continue;
572
573 if (!fb) {
574 DRM_DEBUG_KMS("No FB bound\n");
575 return 0;
576 }
577
578 /*
579 * Pin back the front buffers, cursor buffer was already pinned
580 * back in amdgpu_resume_kms
581 */
582
583 afb = to_amdgpu_framebuffer(fb);
584
585 obj = afb->obj;
586 rbo = gem_to_amdgpu_bo(obj);
587 r = amdgpu_bo_reserve(rbo, false);
588 if (unlikely(r != 0))
589 return r;
590
591 r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, NULL);
592
593 amdgpu_bo_unreserve(rbo);
594
595 if (unlikely(r != 0)) {
596 DRM_ERROR("Failed to pin framebuffer\n");
597 return r;
598 }
599
600 }
601
602
603 /* Call commit internally with the state we just constructed */
604 ret = drm_atomic_commit(state);
605 if (!ret)
606 return 0;
607
608err:
609 DRM_ERROR("Restoring old state failed with %i\n", ret);
610 drm_atomic_state_put(state);
611
612 return ret;
613}
614
615static int dm_resume(void *handle)
616{
617 struct amdgpu_device *adev = handle;
618 struct amdgpu_display_manager *dm = &adev->dm;
619
620 /* power on hardware */
621 dc_set_power_state(
622 dm->dc,
623 DC_ACPI_CM_POWER_STATE_D0,
624 DC_VIDEO_POWER_ON);
625
626 return 0;
627}
628
629int amdgpu_dm_display_resume(struct amdgpu_device *adev )
630{
631 struct drm_device *ddev = adev->ddev;
632 struct amdgpu_display_manager *dm = &adev->dm;
633 struct amdgpu_connector *aconnector;
634 struct drm_connector *connector;
635 int ret = 0;
636 struct drm_crtc *crtc;
637
638 /* program HPD filter */
639 dc_resume(dm->dc);
640
641 /* On resume we need to rewrite the MSTM control bits to enamble MST*/
642 s3_handle_mst(ddev, false);
643
644 /*
645 * early enable HPD Rx IRQ, should be done before set mode as short
646 * pulse interrupts are used for MST
647 */
648 amdgpu_dm_irq_resume_early(adev);
649
650 drm_modeset_lock_all(ddev);
651 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
652 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
653 if (acrtc->target)
654 drm_crtc_vblank_on(crtc);
655 }
656 drm_modeset_unlock_all(ddev);
657
658 /* Do detection*/
659 list_for_each_entry(connector,
660 &ddev->mode_config.connector_list, head) {
661 aconnector = to_amdgpu_connector(connector);
662
663 /*
664 * this is the case when traversing through already created
665 * MST connectors, should be skipped
666 */
667 if (aconnector->mst_port)
668 continue;
669
670 dc_link_detect(aconnector->dc_link, false);
671 aconnector->dc_sink = NULL;
672 amdgpu_dm_update_connector_after_detect(aconnector);
673 }
674
675 drm_modeset_lock_all(ddev);
676 ret = dm_display_resume(ddev);
677 drm_modeset_unlock_all(ddev);
678
679 amdgpu_dm_irq_resume(adev);
680
681 return ret;
682}
683
684static const struct amd_ip_funcs amdgpu_dm_funcs = {
685 .name = "dm",
686 .early_init = dm_early_init,
687 .late_init = NULL,
688 .sw_init = dm_sw_init,
689 .sw_fini = dm_sw_fini,
690 .hw_init = dm_hw_init,
691 .hw_fini = dm_hw_fini,
692 .suspend = dm_suspend,
693 .resume = dm_resume,
694 .is_idle = dm_is_idle,
695 .wait_for_idle = dm_wait_for_idle,
696 .check_soft_reset = dm_check_soft_reset,
697 .soft_reset = dm_soft_reset,
698 .set_clockgating_state = dm_set_clockgating_state,
699 .set_powergating_state = dm_set_powergating_state,
700};
701
702const struct amdgpu_ip_block_version dm_ip_block =
703{
704 .type = AMD_IP_BLOCK_TYPE_DCE,
705 .major = 1,
706 .minor = 0,
707 .rev = 0,
708 .funcs = &amdgpu_dm_funcs,
709};
710
711/* TODO: it is temporary non-const, should fixed later */
712static struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
713 .atomic_check = amdgpu_dm_atomic_check,
714 .atomic_commit = amdgpu_dm_atomic_commit
715};
716
717void amdgpu_dm_update_connector_after_detect(
718 struct amdgpu_connector *aconnector)
719{
720 struct drm_connector *connector = &aconnector->base;
721 struct drm_device *dev = connector->dev;
722 const struct dc_sink *sink;
723
724 /* MST handled by drm_mst framework */
725 if (aconnector->mst_mgr.mst_state == true)
726 return;
727
728
729 sink = aconnector->dc_link->local_sink;
730
731 /* Edid mgmt connector gets first update only in mode_valid hook and then
732 * the connector sink is set to either fake or physical sink depends on link status.
733 * don't do it here if u are during boot
734 */
735 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
736 && aconnector->dc_em_sink) {
737
738 /* For S3 resume with headless use eml_sink to fake target
739 * because on resume connecotr->sink is set ti NULL
740 */
741 mutex_lock(&dev->mode_config.mutex);
742
743 if (sink) {
744 if (aconnector->dc_sink)
745 amdgpu_dm_remove_sink_from_freesync_module(
746 connector);
747 aconnector->dc_sink = sink;
748 amdgpu_dm_add_sink_to_freesync_module(
749 connector, aconnector->edid);
750 } else {
751 amdgpu_dm_remove_sink_from_freesync_module(connector);
752 if (!aconnector->dc_sink)
753 aconnector->dc_sink = aconnector->dc_em_sink;
754 }
755
756 mutex_unlock(&dev->mode_config.mutex);
757 return;
758 }
759
760 /*
761 * TODO: temporary guard to look for proper fix
762 * if this sink is MST sink, we should not do anything
763 */
764 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
765 return;
766
767 if (aconnector->dc_sink == sink) {
768 /* We got a DP short pulse (Link Loss, DP CTS, etc...).
769 * Do nothing!! */
770 DRM_INFO("DCHPD: connector_id=%d: dc_sink didn't change.\n",
771 aconnector->connector_id);
772 return;
773 }
774
775 DRM_INFO("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
776 aconnector->connector_id, aconnector->dc_sink, sink);
777
778 mutex_lock(&dev->mode_config.mutex);
779
780 /* 1. Update status of the drm connector
781 * 2. Send an event and let userspace tell us what to do */
782 if (sink) {
783 /* TODO: check if we still need the S3 mode update workaround.
784 * If yes, put it here. */
785 if (aconnector->dc_sink)
786 amdgpu_dm_remove_sink_from_freesync_module(
787 connector);
788
789 aconnector->dc_sink = sink;
790 if (sink->dc_edid.length == 0)
791 aconnector->edid = NULL;
792 else {
793 aconnector->edid =
794 (struct edid *) sink->dc_edid.raw_edid;
795
796
797 drm_mode_connector_update_edid_property(connector,
798 aconnector->edid);
799 }
800 amdgpu_dm_add_sink_to_freesync_module(connector, aconnector->edid);
801
802 } else {
803 amdgpu_dm_remove_sink_from_freesync_module(connector);
804 drm_mode_connector_update_edid_property(connector, NULL);
805 aconnector->num_modes = 0;
806 aconnector->dc_sink = NULL;
807 }
808
809 mutex_unlock(&dev->mode_config.mutex);
810}
811
812static void handle_hpd_irq(void *param)
813{
814 struct amdgpu_connector *aconnector = (struct amdgpu_connector *)param;
815 struct drm_connector *connector = &aconnector->base;
816 struct drm_device *dev = connector->dev;
817
818 /* In case of failure or MST no need to update connector status or notify the OS
819 * since (for MST case) MST does this in it's own context.
820 */
821 mutex_lock(&aconnector->hpd_lock);
822 if (dc_link_detect(aconnector->dc_link, false)) {
823 amdgpu_dm_update_connector_after_detect(aconnector);
824
825
826 drm_modeset_lock_all(dev);
827 dm_restore_drm_connector_state(dev, connector);
828 drm_modeset_unlock_all(dev);
829
830 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
831 drm_kms_helper_hotplug_event(dev);
832 }
833 mutex_unlock(&aconnector->hpd_lock);
834
835}
836
837static void dm_handle_hpd_rx_irq(struct amdgpu_connector *aconnector)
838{
839 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
840 uint8_t dret;
841 bool new_irq_handled = false;
842 int dpcd_addr;
843 int dpcd_bytes_to_read;
844
845 const int max_process_count = 30;
846 int process_count = 0;
847
848 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
849
850 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
851 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
852 /* DPCD 0x200 - 0x201 for downstream IRQ */
853 dpcd_addr = DP_SINK_COUNT;
854 } else {
855 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
856 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
857 dpcd_addr = DP_SINK_COUNT_ESI;
858 }
859
860 dret = drm_dp_dpcd_read(
861 &aconnector->dm_dp_aux.aux,
862 dpcd_addr,
863 esi,
864 dpcd_bytes_to_read);
865
866 while (dret == dpcd_bytes_to_read &&
867 process_count < max_process_count) {
868 uint8_t retry;
869 dret = 0;
870
871 process_count++;
872
873 DRM_DEBUG_KMS("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
874#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)
875 /* handle HPD short pulse irq */
876 if (aconnector->mst_mgr.mst_state)
877 drm_dp_mst_hpd_irq(
878 &aconnector->mst_mgr,
879 esi,
880 &new_irq_handled);
881#endif
882
883 if (new_irq_handled) {
884 /* ACK at DPCD to notify down stream */
885 const int ack_dpcd_bytes_to_write =
886 dpcd_bytes_to_read - 1;
887
888 for (retry = 0; retry < 3; retry++) {
889 uint8_t wret;
890
891 wret = drm_dp_dpcd_write(
892 &aconnector->dm_dp_aux.aux,
893 dpcd_addr + 1,
894 &esi[1],
895 ack_dpcd_bytes_to_write);
896 if (wret == ack_dpcd_bytes_to_write)
897 break;
898 }
899
900 /* check if there is new irq to be handle */
901 dret = drm_dp_dpcd_read(
902 &aconnector->dm_dp_aux.aux,
903 dpcd_addr,
904 esi,
905 dpcd_bytes_to_read);
906
907 new_irq_handled = false;
908 } else
909 break;
910 }
911
912 if (process_count == max_process_count)
913 DRM_DEBUG_KMS("Loop exceeded max iterations\n");
914}
915
916static void handle_hpd_rx_irq(void *param)
917{
918 struct amdgpu_connector *aconnector = (struct amdgpu_connector *)param;
919 struct drm_connector *connector = &aconnector->base;
920 struct drm_device *dev = connector->dev;
921 const struct dc_link *dc_link = aconnector->dc_link;
922 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
923
924 /* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
925 * conflict, after implement i2c helper, this mutex should be
926 * retired.
927 */
928 if (aconnector->dc_link->type != dc_connection_mst_branch)
929 mutex_lock(&aconnector->hpd_lock);
930
931 if (dc_link_handle_hpd_rx_irq(aconnector->dc_link) &&
932 !is_mst_root_connector) {
933 /* Downstream Port status changed. */
934 if (dc_link_detect(aconnector->dc_link, false)) {
935 amdgpu_dm_update_connector_after_detect(aconnector);
936
937
938 drm_modeset_lock_all(dev);
939 dm_restore_drm_connector_state(dev, connector);
940 drm_modeset_unlock_all(dev);
941
942 drm_kms_helper_hotplug_event(dev);
943 }
944 }
945 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
946 (dc_link->type == dc_connection_mst_branch))
947 dm_handle_hpd_rx_irq(aconnector);
948
949 if (aconnector->dc_link->type != dc_connection_mst_branch)
950 mutex_unlock(&aconnector->hpd_lock);
951}
952
953static void register_hpd_handlers(struct amdgpu_device *adev)
954{
955 struct drm_device *dev = adev->ddev;
956 struct drm_connector *connector;
957 struct amdgpu_connector *aconnector;
958 const struct dc_link *dc_link;
959 struct dc_interrupt_params int_params = {0};
960
961 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
962 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
963
964 list_for_each_entry(connector,
965 &dev->mode_config.connector_list, head) {
966
967 aconnector = to_amdgpu_connector(connector);
968 dc_link = aconnector->dc_link;
969
970 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
971 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
972 int_params.irq_source = dc_link->irq_source_hpd;
973
974 amdgpu_dm_irq_register_interrupt(adev, &int_params,
975 handle_hpd_irq,
976 (void *) aconnector);
977 }
978
979 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
980
981 /* Also register for DP short pulse (hpd_rx). */
982 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
983 int_params.irq_source = dc_link->irq_source_hpd_rx;
984
985 amdgpu_dm_irq_register_interrupt(adev, &int_params,
986 handle_hpd_rx_irq,
987 (void *) aconnector);
988 }
989 }
990}
991
992/* Register IRQ sources and initialize IRQ callbacks */
993static int dce110_register_irq_handlers(struct amdgpu_device *adev)
994{
995 struct dc *dc = adev->dm.dc;
996 struct common_irq_params *c_irq_params;
997 struct dc_interrupt_params int_params = {0};
998 int r;
999 int i;
1000
1001 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1002 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1003
1004 /* Actions of amdgpu_irq_add_id():
1005 * 1. Register a set() function with base driver.
1006 * Base driver will call set() function to enable/disable an
1007 * interrupt in DC hardware.
1008 * 2. Register amdgpu_dm_irq_handler().
1009 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1010 * coming from DC hardware.
1011 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1012 * for acknowledging and handling. */
1013
1014 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT;
1015 i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
1016 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->crtc_irq);
1017 if (r) {
1018 DRM_ERROR("Failed to add crtc irq id!\n");
1019 return r;
1020 }
1021
1022 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1023 int_params.irq_source =
1024 dc_interrupt_to_irq_source(dc, i, 0);
1025
1026 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
1027
1028 c_irq_params->adev = adev;
1029 c_irq_params->irq_src = int_params.irq_source;
1030
1031 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1032 dm_crtc_high_irq, c_irq_params);
1033 }
1034
1035 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
1036 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
1037 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq);
1038 if (r) {
1039 DRM_ERROR("Failed to add page flip irq id!\n");
1040 return r;
1041 }
1042
1043 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1044 int_params.irq_source =
1045 dc_interrupt_to_irq_source(dc, i, 0);
1046
1047 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
1048
1049 c_irq_params->adev = adev;
1050 c_irq_params->irq_src = int_params.irq_source;
1051
1052 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1053 dm_pflip_high_irq, c_irq_params);
1054
1055 }
1056
1057 /* HPD */
1058 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A,
1059 &adev->hpd_irq);
1060 if (r) {
1061 DRM_ERROR("Failed to add hpd irq id!\n");
1062 return r;
1063 }
1064
1065 register_hpd_handlers(adev);
1066
1067 return 0;
1068}
1069
1070static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
1071{
1072 int r;
1073
1074 adev->mode_info.mode_config_initialized = true;
1075
1076 amdgpu_dm_mode_funcs.fb_create =
1077 amdgpu_mode_funcs.fb_create;
1078 amdgpu_dm_mode_funcs.output_poll_changed =
1079 amdgpu_mode_funcs.output_poll_changed;
1080
1081 adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
1082
1083 adev->ddev->mode_config.max_width = 16384;
1084 adev->ddev->mode_config.max_height = 16384;
1085
1086 adev->ddev->mode_config.preferred_depth = 24;
1087 adev->ddev->mode_config.prefer_shadow = 1;
1088 /* indicate support of immediate flip */
1089 adev->ddev->mode_config.async_page_flip = true;
1090
1091 adev->ddev->mode_config.fb_base = adev->mc.aper_base;
1092
1093 r = amdgpu_modeset_create_props(adev);
1094 if (r)
1095 return r;
1096
1097 return 0;
1098}
1099
1100#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
1101 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
1102
1103static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
1104{
1105 struct amdgpu_display_manager *dm = bl_get_data(bd);
1106
1107 if (dc_link_set_backlight_level(dm->backlight_link,
1108 bd->props.brightness, 0, 0))
1109 return 0;
1110 else
1111 return 1;
1112}
1113
1114static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
1115{
1116 return bd->props.brightness;
1117}
1118
1119static const struct backlight_ops amdgpu_dm_backlight_ops = {
1120 .get_brightness = amdgpu_dm_backlight_get_brightness,
1121 .update_status = amdgpu_dm_backlight_update_status,
1122};
1123
1124void amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
1125{
1126 char bl_name[16];
1127 struct backlight_properties props = { 0 };
1128
1129 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
1130 props.type = BACKLIGHT_RAW;
1131
1132 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
1133 dm->adev->ddev->primary->index);
1134
1135 dm->backlight_dev = backlight_device_register(bl_name,
1136 dm->adev->ddev->dev,
1137 dm,
1138 &amdgpu_dm_backlight_ops,
1139 &props);
1140
1141 if (NULL == dm->backlight_dev)
1142 DRM_ERROR("DM: Backlight registration failed!\n");
1143 else
1144 DRM_INFO("DM: Registered Backlight device: %s\n", bl_name);
1145}
1146
1147#endif
1148
1149/* In this architecture, the association
1150 * connector -> encoder -> crtc
1151 * id not really requried. The crtc and connector will hold the
1152 * display_index as an abstraction to use with DAL component
1153 *
1154 * Returns 0 on success
1155 */
1156int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
1157{
1158 struct amdgpu_display_manager *dm = &adev->dm;
1159 uint32_t i;
1160 struct amdgpu_connector *aconnector;
1161 struct amdgpu_encoder *aencoder;
1162 struct amdgpu_crtc *acrtc;
1163 uint32_t link_cnt;
1164
1165 link_cnt = dm->dc->caps.max_links;
1166
1167 if (amdgpu_dm_mode_config_init(dm->adev)) {
1168 DRM_ERROR("DM: Failed to initialize mode config\n");
1169 return -1;
1170 }
1171
1172 for (i = 0; i < dm->dc->caps.max_targets; i++) {
1173 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
1174 if (!acrtc)
1175 goto fail;
1176
1177 if (amdgpu_dm_crtc_init(
1178 dm,
1179 acrtc,
1180 i)) {
1181 DRM_ERROR("KMS: Failed to initialize crtc\n");
1182 kfree(acrtc);
1183 goto fail;
1184 }
1185 }
1186
1187 dm->display_indexes_num = dm->dc->caps.max_targets;
1188
1189 /* loops over all connectors on the board */
1190 for (i = 0; i < link_cnt; i++) {
1191
1192 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
1193 DRM_ERROR(
1194 "KMS: Cannot support more than %d display indexes\n",
1195 AMDGPU_DM_MAX_DISPLAY_INDEX);
1196 continue;
1197 }
1198
1199 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
1200 if (!aconnector)
1201 goto fail;
1202
1203 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
1204 if (!aencoder) {
1205 goto fail_free_connector;
1206 }
1207
1208 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
1209 DRM_ERROR("KMS: Failed to initialize encoder\n");
1210 goto fail_free_encoder;
1211 }
1212
1213 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
1214 DRM_ERROR("KMS: Failed to initialize connector\n");
1215 goto fail_free_connector;
1216 }
1217
1218 if (dc_link_detect(dc_get_link_at_index(dm->dc, i), true))
1219 amdgpu_dm_update_connector_after_detect(aconnector);
1220 }
1221
1222 /* Software is initialized. Now we can register interrupt handlers. */
1223 switch (adev->asic_type) {
1224 case CHIP_BONAIRE:
1225 case CHIP_HAWAII:
1226 case CHIP_TONGA:
1227 case CHIP_FIJI:
1228 case CHIP_CARRIZO:
1229 case CHIP_STONEY:
1230 case CHIP_POLARIS11:
1231 case CHIP_POLARIS10:
1232 if (dce110_register_irq_handlers(dm->adev)) {
1233 DRM_ERROR("DM: Failed to initialize IRQ\n");
1234 return -1;
1235 }
1236 break;
1237 default:
1238 DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
1239 return -1;
1240 }
1241
1242 drm_mode_config_reset(dm->ddev);
1243
1244 return 0;
1245fail_free_encoder:
1246 kfree(aencoder);
1247fail_free_connector:
1248 kfree(aconnector);
1249fail:
1250 return -1;
1251}
1252
1253void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
1254{
1255 drm_mode_config_cleanup(dm->ddev);
1256 return;
1257}
1258
1259/******************************************************************************
1260 * amdgpu_display_funcs functions
1261 *****************************************************************************/
1262
1263/**
1264 * dm_bandwidth_update - program display watermarks
1265 *
1266 * @adev: amdgpu_device pointer
1267 *
1268 * Calculate and program the display watermarks and line buffer allocation.
1269 */
1270static void dm_bandwidth_update(struct amdgpu_device *adev)
1271{
1272 AMDGPU_DM_NOT_IMPL("%s\n", __func__);
1273}
1274
1275static void dm_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder,
1276 u8 level)
1277{
1278 /* TODO: translate amdgpu_encoder to display_index and call DAL */
1279 AMDGPU_DM_NOT_IMPL("%s\n", __func__);
1280}
1281
1282static u8 dm_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder)
1283{
1284 /* TODO: translate amdgpu_encoder to display_index and call DAL */
1285 AMDGPU_DM_NOT_IMPL("%s\n", __func__);
1286 return 0;
1287}
1288
1289/******************************************************************************
1290 * Page Flip functions
1291 ******************************************************************************/
1292
1293/**
1294 * dm_page_flip - called by amdgpu_flip_work_func(), which is triggered
1295 * via DRM IOCTL, by user mode.
1296 *
1297 * @adev: amdgpu_device pointer
1298 * @crtc_id: crtc to cleanup pageflip on
1299 * @crtc_base: new address of the crtc (GPU MC address)
1300 *
1301 * Does the actual pageflip (surface address update).
1302 */
1303static void dm_page_flip(struct amdgpu_device *adev,
1304 int crtc_id, u64 crtc_base, bool async)
1305{
1306 struct amdgpu_crtc *acrtc;
1307 struct dc_target *target;
1308 struct dc_flip_addrs addr = { {0} };
1309
1310 /*
1311 * TODO risk of concurrency issues
1312 *
1313 * This should guarded by the dal_mutex but we can't do this since the
1314 * caller uses a spin_lock on event_lock.
1315 *
1316 * If we wait on the dal_mutex a second page flip interrupt might come,
1317 * spin on the event_lock, disabling interrupts while it does so. At
1318 * this point the core can no longer be pre-empted and return to the
1319 * thread that waited on the dal_mutex and we're deadlocked.
1320 *
1321 * With multiple cores the same essentially happens but might just take
1322 * a little longer to lock up all cores.
1323 *
1324 * The reason we should lock on dal_mutex is so that we can be sure
1325 * nobody messes with acrtc->target after we read and check its value.
1326 *
1327 * We might be able to fix our concurrency issues with a work queue
1328 * where we schedule all work items (mode_set, page_flip, etc.) and
1329 * execute them one by one. Care needs to be taken to still deal with
1330 * any potential concurrency issues arising from interrupt calls.
1331 */
1332
1333 acrtc = adev->mode_info.crtcs[crtc_id];
1334 target = acrtc->target;
1335
1336 /*
1337 * Received a page flip call after the display has been reset.
1338 * Just return in this case. Everything should be clean-up on reset.
1339 */
1340
1341 if (!target) {
1342 WARN_ON(1);
1343 return;
1344 }
1345
1346 addr.address.grph.addr.low_part = lower_32_bits(crtc_base);
1347 addr.address.grph.addr.high_part = upper_32_bits(crtc_base);
1348 addr.flip_immediate = async;
1349
1350 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x \n",
1351 __func__,
1352 addr.address.grph.addr.high_part,
1353 addr.address.grph.addr.low_part);
1354
1355 dc_flip_surface_addrs(
1356 adev->dm.dc,
1357 dc_target_get_status(target)->surfaces,
1358 &addr, 1);
1359}
1360
1361static int amdgpu_notify_freesync(struct drm_device *dev, void *data,
1362 struct drm_file *filp)
1363{
1364 struct mod_freesync_params freesync_params;
1365 uint8_t num_targets;
1366 uint8_t i;
1367 struct dc_target *target;
1368
1369 struct amdgpu_device *adev = dev->dev_private;
1370 int r = 0;
1371
1372 /* Get freesync enable flag from DRM */
1373
1374 num_targets = dc_get_current_target_count(adev->dm.dc);
1375
1376 for (i = 0; i < num_targets; i++) {
1377
1378 target = dc_get_target_at_index(adev->dm.dc, i);
1379
1380 mod_freesync_update_state(adev->dm.freesync_module,
1381 target->streams,
1382 target->stream_count,
1383 &freesync_params);
1384 }
1385
1386 return r;
1387}
1388
1389#ifdef CONFIG_DRM_AMDGPU_CIK
1390static const struct amdgpu_display_funcs dm_dce_v8_0_display_funcs = {
1391 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
1392 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
1393 .vblank_wait = NULL,
1394 .backlight_set_level =
1395 dm_set_backlight_level,/* called unconditionally */
1396 .backlight_get_level =
1397 dm_get_backlight_level,/* called unconditionally */
1398 .hpd_sense = NULL,/* called unconditionally */
1399 .hpd_set_polarity = NULL, /* called unconditionally */
1400 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
1401 .page_flip = dm_page_flip, /* called unconditionally */
1402 .page_flip_get_scanoutpos =
1403 dm_crtc_get_scanoutpos,/* called unconditionally */
1404 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
1405 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
1406 .notify_freesync = amdgpu_notify_freesync,
1407};
1408#endif
1409
1410static const struct amdgpu_display_funcs dm_dce_v10_0_display_funcs = {
1411 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
1412 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
1413 .vblank_wait = NULL,
1414 .backlight_set_level =
1415 dm_set_backlight_level,/* called unconditionally */
1416 .backlight_get_level =
1417 dm_get_backlight_level,/* called unconditionally */
1418 .hpd_sense = NULL,/* called unconditionally */
1419 .hpd_set_polarity = NULL, /* called unconditionally */
1420 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
1421 .page_flip = dm_page_flip, /* called unconditionally */
1422 .page_flip_get_scanoutpos =
1423 dm_crtc_get_scanoutpos,/* called unconditionally */
1424 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
1425 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
1426 .notify_freesync = amdgpu_notify_freesync,
1427
1428};
1429
1430static const struct amdgpu_display_funcs dm_dce_v11_0_display_funcs = {
1431 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
1432 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
1433 .vblank_wait = NULL,
1434 .backlight_set_level =
1435 dm_set_backlight_level,/* called unconditionally */
1436 .backlight_get_level =
1437 dm_get_backlight_level,/* called unconditionally */
1438 .hpd_sense = NULL,/* called unconditionally */
1439 .hpd_set_polarity = NULL, /* called unconditionally */
1440 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
1441 .page_flip = dm_page_flip, /* called unconditionally */
1442 .page_flip_get_scanoutpos =
1443 dm_crtc_get_scanoutpos,/* called unconditionally */
1444 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
1445 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
1446 .notify_freesync = amdgpu_notify_freesync,
1447
1448};
1449
1450#if defined(CONFIG_DEBUG_KERNEL_DC)
1451
1452static ssize_t s3_debug_store(
1453 struct device *device,
1454 struct device_attribute *attr,
1455 const char *buf,
1456 size_t count)
1457{
1458 int ret;
1459 int s3_state;
1460 struct pci_dev *pdev = to_pci_dev(device);
1461 struct drm_device *drm_dev = pci_get_drvdata(pdev);
1462 struct amdgpu_device *adev = drm_dev->dev_private;
1463
1464 ret = kstrtoint(buf, 0, &s3_state);
1465
1466 if (ret == 0) {
1467 if (s3_state) {
1468 dm_resume(adev);
1469 amdgpu_dm_display_resume(adev);
1470 drm_kms_helper_hotplug_event(adev->ddev);
1471 } else
1472 dm_suspend(adev);
1473 }
1474
1475 return ret == 0 ? count : 0;
1476}
1477
1478DEVICE_ATTR_WO(s3_debug);
1479
1480#endif
1481
1482static int dm_early_init(void *handle)
1483{
1484 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1485
1486 amdgpu_dm_set_irq_funcs(adev);
1487
1488 switch (adev->asic_type) {
1489 case CHIP_BONAIRE:
1490 case CHIP_HAWAII:
1491 adev->mode_info.num_crtc = 6;
1492 adev->mode_info.num_hpd = 6;
1493 adev->mode_info.num_dig = 6;
1494#ifdef CONFIG_DRM_AMDGPU_CIK
1495 if (adev->mode_info.funcs == NULL)
1496 adev->mode_info.funcs = &dm_dce_v8_0_display_funcs;
1497#endif
1498 break;
1499 case CHIP_FIJI:
1500 case CHIP_TONGA:
1501 adev->mode_info.num_crtc = 6;
1502 adev->mode_info.num_hpd = 6;
1503 adev->mode_info.num_dig = 7;
1504 if (adev->mode_info.funcs == NULL)
1505 adev->mode_info.funcs = &dm_dce_v10_0_display_funcs;
1506 break;
1507 case CHIP_CARRIZO:
1508 adev->mode_info.num_crtc = 3;
1509 adev->mode_info.num_hpd = 6;
1510 adev->mode_info.num_dig = 9;
1511 if (adev->mode_info.funcs == NULL)
1512 adev->mode_info.funcs = &dm_dce_v11_0_display_funcs;
1513 break;
1514 case CHIP_STONEY:
1515 adev->mode_info.num_crtc = 2;
1516 adev->mode_info.num_hpd = 6;
1517 adev->mode_info.num_dig = 9;
1518 if (adev->mode_info.funcs == NULL)
1519 adev->mode_info.funcs = &dm_dce_v11_0_display_funcs;
1520 break;
1521 case CHIP_POLARIS11:
1522 adev->mode_info.num_crtc = 5;
1523 adev->mode_info.num_hpd = 5;
1524 adev->mode_info.num_dig = 5;
1525 if (adev->mode_info.funcs == NULL)
1526 adev->mode_info.funcs = &dm_dce_v11_0_display_funcs;
1527 break;
1528 case CHIP_POLARIS10:
1529 adev->mode_info.num_crtc = 6;
1530 adev->mode_info.num_hpd = 6;
1531 adev->mode_info.num_dig = 6;
1532 if (adev->mode_info.funcs == NULL)
1533 adev->mode_info.funcs = &dm_dce_v11_0_display_funcs;
1534 break;
1535 default:
1536 DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
1537 return -EINVAL;
1538 }
1539
1540 /* Note: Do NOT change adev->audio_endpt_rreg and
1541 * adev->audio_endpt_wreg because they are initialised in
1542 * amdgpu_device_init() */
1543#if defined(CONFIG_DEBUG_KERNEL_DC)
1544 device_create_file(
1545 adev->ddev->dev,
1546 &dev_attr_s3_debug);
1547#endif
1548
1549 return 0;
1550}
1551
1552bool amdgpu_dm_acquire_dal_lock(struct amdgpu_display_manager *dm)
1553{
1554 /* TODO */
1555 return true;
1556}
1557
1558bool amdgpu_dm_release_dal_lock(struct amdgpu_display_manager *dm)
1559{
1560 /* TODO */
1561 return true;
1562}
1563
1564
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
new file mode 100644
index 000000000000..1b54566f5da1
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -0,0 +1,171 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __AMDGPU_DM_H__
27#define __AMDGPU_DM_H__
28
29/*
30#include "linux/switch.h"
31*/
32
33/*
34 * This file contains the definition for amdgpu_display_manager
35 * and its API for amdgpu driver's use.
36 * This component provides all the display related functionality
37 * and this is the only component that calls DAL API.
38 * The API contained here intended for amdgpu driver use.
39 * The API that is called directly from KMS framework is located
40 * in amdgpu_dm_kms.h file
41 */
42
43#define AMDGPU_DM_MAX_DISPLAY_INDEX 31
44/*
45#include "include/amdgpu_dal_power_if.h"
46#include "amdgpu_dm_irq.h"
47*/
48
49#include "irq_types.h"
50#include "signal_types.h"
51
52/* Forward declarations */
53struct amdgpu_device;
54struct drm_device;
55struct amdgpu_dm_irq_handler_data;
56
57struct amdgpu_dm_prev_state {
58 struct drm_framebuffer *fb;
59 int32_t x;
60 int32_t y;
61 struct drm_display_mode mode;
62};
63
64struct common_irq_params {
65 struct amdgpu_device *adev;
66 enum dc_irq_source irq_src;
67};
68
69struct irq_list_head {
70 struct list_head head;
71 /* In case this interrupt needs post-processing, 'work' will be queued*/
72 struct work_struct work;
73};
74
75struct amdgpu_display_manager {
76 struct dal *dal;
77 struct dc *dc;
78 struct cgs_device *cgs_device;
79 /* lock to be used when DAL is called from SYNC IRQ context */
80 spinlock_t dal_lock;
81
82 struct amdgpu_device *adev; /*AMD base driver*/
83 struct drm_device *ddev; /*DRM base driver*/
84 u16 display_indexes_num;
85
86 struct amdgpu_dm_prev_state prev_state;
87
88 /*
89 * 'irq_source_handler_table' holds a list of handlers
90 * per (DAL) IRQ source.
91 *
92 * Each IRQ source may need to be handled at different contexts.
93 * By 'context' we mean, for example:
94 * - The ISR context, which is the direct interrupt handler.
95 * - The 'deferred' context - this is the post-processing of the
96 * interrupt, but at a lower priority.
97 *
98 * Note that handlers are called in the same order as they were
99 * registered (FIFO).
100 */
101 struct irq_list_head irq_handler_list_low_tab[DAL_IRQ_SOURCES_NUMBER];
102 struct list_head irq_handler_list_high_tab[DAL_IRQ_SOURCES_NUMBER];
103
104 struct common_irq_params
105 pflip_params[DC_IRQ_SOURCE_PFLIP_LAST - DC_IRQ_SOURCE_PFLIP_FIRST + 1];
106
107 struct common_irq_params
108 vupdate_params[DC_IRQ_SOURCE_VUPDATE6 - DC_IRQ_SOURCE_VUPDATE1 + 1];
109
110 /* this spin lock synchronizes access to 'irq_handler_list_table' */
111 spinlock_t irq_handler_list_table_lock;
112
113 /* Timer-related data. */
114 struct list_head timer_handler_list;
115 struct workqueue_struct *timer_workqueue;
116
117 /* Use dal_mutex for any activity which is NOT syncronized by
118 * DRM mode setting locks.
119 * For example: amdgpu_dm_hpd_low_irq() calls into DAL *without*
120 * DRM mode setting locks being acquired. This is where dal_mutex
121 * is acquired before calling into DAL. */
122 struct mutex dal_mutex;
123
124 struct backlight_device *backlight_dev;
125
126 const struct dc_link *backlight_link;
127
128 struct work_struct mst_hotplug_work;
129
130 struct mod_freesync *freesync_module;
131};
132
133/* basic init/fini API */
134int amdgpu_dm_init(struct amdgpu_device *adev);
135
136void amdgpu_dm_fini(struct amdgpu_device *adev);
137
138void amdgpu_dm_destroy(void);
139
140/* initializes drm_device display related structures, based on the information
141 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
142 * drm_encoder, drm_mode_config
143 *
144 * Returns 0 on success
145 */
146int amdgpu_dm_initialize_drm_device(
147 struct amdgpu_device *adev);
148
149/* removes and deallocates the drm structures, created by the above function */
150void amdgpu_dm_destroy_drm_device(
151 struct amdgpu_display_manager *dm);
152
153/* Locking/Mutex */
154bool amdgpu_dm_acquire_dal_lock(struct amdgpu_display_manager *dm);
155
156bool amdgpu_dm_release_dal_lock(struct amdgpu_display_manager *dm);
157
158/* Register "Backlight device" accessible by user-mode. */
159void amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm);
160
161extern const struct amdgpu_ip_block_version dm_ip_block;
162
163void amdgpu_dm_update_connector_after_detect(
164 struct amdgpu_connector *aconnector);
165
166struct amdgpu_connector *amdgpu_dm_find_first_crct_matching_connector(
167 struct drm_atomic_state *state,
168 struct drm_crtc *crtc,
169 bool from_state_var);
170
171#endif /* __AMDGPU_DM_H__ */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
new file mode 100644
index 000000000000..d4e01b51f949
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -0,0 +1,484 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include <linux/string.h>
27#include <linux/acpi.h>
28#include <linux/version.h>
29#include <linux/i2c.h>
30
31#include <drm/drmP.h>
32#include <drm/drm_crtc_helper.h>
33#include <drm/amdgpu_drm.h>
34#include <drm/drm_edid.h>
35
36#include "dm_services.h"
37#include "amdgpu.h"
38#include "dc.h"
39#include "amdgpu_dm.h"
40#include "amdgpu_dm_irq.h"
41#include "amdgpu_dm_types.h"
42
43#include "dm_helpers.h"
44
45/* dm_helpers_parse_edid_caps
46 *
47 * Parse edid caps
48 *
49 * @edid: [in] pointer to edid
50 * edid_caps: [in] pointer to edid caps
51 * @return
52 * void
53 * */
54enum dc_edid_status dm_helpers_parse_edid_caps(
55 struct dc_context *ctx,
56 const struct dc_edid *edid,
57 struct dc_edid_caps *edid_caps)
58{
59 struct edid *edid_buf = (struct edid *) edid->raw_edid;
60 struct cea_sad *sads;
61 int sad_count = -1;
62 int sadb_count = -1;
63 int i = 0;
64 int j = 0;
65 uint8_t *sadb = NULL;
66
67 enum dc_edid_status result = EDID_OK;
68
69 if (!edid_caps || !edid)
70 return EDID_BAD_INPUT;
71
72 if (!drm_edid_is_valid(edid_buf))
73 result = EDID_BAD_CHECKSUM;
74
75 edid_caps->manufacturer_id = (uint16_t) edid_buf->mfg_id[0] |
76 ((uint16_t) edid_buf->mfg_id[1])<<8;
77 edid_caps->product_id = (uint16_t) edid_buf->prod_code[0] |
78 ((uint16_t) edid_buf->prod_code[1])<<8;
79 edid_caps->serial_number = edid_buf->serial;
80 edid_caps->manufacture_week = edid_buf->mfg_week;
81 edid_caps->manufacture_year = edid_buf->mfg_year;
82
83 /* One of the four detailed_timings stores the monitor name. It's
84 * stored in an array of length 13. */
85 for (i = 0; i < 4; i++) {
86 if (edid_buf->detailed_timings[i].data.other_data.type == 0xfc) {
87 while (j < 13 && edid_buf->detailed_timings[i].data.other_data.data.str.str[j]) {
88 if (edid_buf->detailed_timings[i].data.other_data.data.str.str[j] == '\n')
89 break;
90
91 edid_caps->display_name[j] =
92 edid_buf->detailed_timings[i].data.other_data.data.str.str[j];
93 j++;
94 }
95 }
96 }
97
98 edid_caps->edid_hdmi = drm_detect_hdmi_monitor(
99 (struct edid *) edid->raw_edid);
100
101 sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads);
102 if (sad_count <= 0) {
103 DRM_INFO("SADs count is: %d, don't need to read it\n",
104 sad_count);
105 return result;
106 }
107
108 edid_caps->audio_mode_count = sad_count < DC_MAX_AUDIO_DESC_COUNT ? sad_count : DC_MAX_AUDIO_DESC_COUNT;
109 for (i = 0; i < edid_caps->audio_mode_count; ++i) {
110 struct cea_sad *sad = &sads[i];
111
112 edid_caps->audio_modes[i].format_code = sad->format;
113 edid_caps->audio_modes[i].channel_count = sad->channels;
114 edid_caps->audio_modes[i].sample_rate = sad->freq;
115 edid_caps->audio_modes[i].sample_size = sad->byte2;
116 }
117
118 sadb_count = drm_edid_to_speaker_allocation((struct edid *) edid->raw_edid, &sadb);
119
120 if (sadb_count < 0) {
121 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sadb_count);
122 sadb_count = 0;
123 }
124
125 if (sadb_count)
126 edid_caps->speaker_flags = sadb[0];
127 else
128 edid_caps->speaker_flags = DEFAULT_SPEAKER_LOCATION;
129
130 kfree(sads);
131 kfree(sadb);
132
133 return result;
134}
135
136static struct amdgpu_connector *get_connector_for_sink(
137 struct drm_device *dev,
138 const struct dc_sink *sink)
139{
140 struct drm_connector *connector;
141
142 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
143 struct amdgpu_connector *aconnector = to_amdgpu_connector(connector);
144 if (aconnector->dc_sink == sink)
145 return aconnector;
146 }
147
148 return NULL;
149}
150
151static struct amdgpu_connector *get_connector_for_link(
152 struct drm_device *dev,
153 const struct dc_link *link)
154{
155 struct drm_connector *connector;
156
157 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
158 struct amdgpu_connector *aconnector = to_amdgpu_connector(connector);
159 if (aconnector->dc_link == link)
160 return aconnector;
161 }
162
163 return NULL;
164}
165
166static void get_payload_table(
167 struct amdgpu_connector *aconnector,
168 struct dp_mst_stream_allocation_table *proposed_table)
169{
170 int i;
171 struct drm_dp_mst_topology_mgr *mst_mgr =
172 &aconnector->mst_port->mst_mgr;
173
174 mutex_lock(&mst_mgr->payload_lock);
175
176 proposed_table->stream_count = 0;
177
178 /* number of active streams */
179 for (i = 0; i < mst_mgr->max_payloads; i++) {
180 if (mst_mgr->payloads[i].num_slots == 0)
181 break; /* end of vcp_id table */
182
183 ASSERT(mst_mgr->payloads[i].payload_state !=
184 DP_PAYLOAD_DELETE_LOCAL);
185
186 if (mst_mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL ||
187 mst_mgr->payloads[i].payload_state ==
188 DP_PAYLOAD_REMOTE) {
189
190 struct dp_mst_stream_allocation *sa =
191 &proposed_table->stream_allocations[
192 proposed_table->stream_count];
193
194 sa->slot_count = mst_mgr->payloads[i].num_slots;
195 sa->vcp_id = mst_mgr->proposed_vcpis[i]->vcpi;
196 proposed_table->stream_count++;
197 }
198 }
199
200 mutex_unlock(&mst_mgr->payload_lock);
201}
202
203/*
204 * Writes payload allocation table in immediate downstream device.
205 */
206bool dm_helpers_dp_mst_write_payload_allocation_table(
207 struct dc_context *ctx,
208 const struct dc_stream *stream,
209 struct dp_mst_stream_allocation_table *proposed_table,
210 bool enable)
211{
212 struct amdgpu_device *adev = ctx->driver_context;
213 struct drm_device *dev = adev->ddev;
214 struct amdgpu_connector *aconnector;
215 struct drm_dp_mst_topology_mgr *mst_mgr;
216 struct drm_dp_mst_port *mst_port;
217 int slots = 0;
218 bool ret;
219 int clock;
220 int bpp = 0;
221 int pbn = 0;
222
223 aconnector = get_connector_for_sink(dev, stream->sink);
224
225 if (!aconnector || !aconnector->mst_port)
226 return false;
227
228 mst_mgr = &aconnector->mst_port->mst_mgr;
229
230 if (!mst_mgr->mst_state)
231 return false;
232
233 mst_port = aconnector->port;
234
235 if (enable) {
236 clock = stream->timing.pix_clk_khz;
237
238 switch (stream->timing.display_color_depth) {
239
240 case COLOR_DEPTH_666:
241 bpp = 6;
242 break;
243 case COLOR_DEPTH_888:
244 bpp = 8;
245 break;
246 case COLOR_DEPTH_101010:
247 bpp = 10;
248 break;
249 case COLOR_DEPTH_121212:
250 bpp = 12;
251 break;
252 case COLOR_DEPTH_141414:
253 bpp = 14;
254 break;
255 case COLOR_DEPTH_161616:
256 bpp = 16;
257 break;
258 default:
259 ASSERT(bpp != 0);
260 break;
261 }
262
263 bpp = bpp * 3;
264
265 /* TODO need to know link rate */
266
267 pbn = drm_dp_calc_pbn_mode(clock, bpp);
268
269 slots = drm_dp_find_vcpi_slots(mst_mgr, pbn);
270 ret = drm_dp_mst_allocate_vcpi(mst_mgr, mst_port, pbn, slots);
271
272 if (!ret)
273 return false;
274
275 } else {
276 drm_dp_mst_reset_vcpi_slots(mst_mgr, mst_port);
277 }
278
279 ret = drm_dp_update_payload_part1(mst_mgr);
280
281 /* mst_mgr->->payloads are VC payload notify MST branch using DPCD or
282 * AUX message. The sequence is slot 1-63 allocated sequence for each
283 * stream. AMD ASIC stream slot allocation should follow the same
284 * sequence. copy DRM MST allocation to dc */
285
286 get_payload_table(aconnector, proposed_table);
287
288 if (ret)
289 return false;
290
291 return true;
292}
293
294/*
295 * Polls for ACT (allocation change trigger) handled and sends
296 * ALLOCATE_PAYLOAD message.
297 */
298bool dm_helpers_dp_mst_poll_for_allocation_change_trigger(
299 struct dc_context *ctx,
300 const struct dc_stream *stream)
301{
302 struct amdgpu_device *adev = ctx->driver_context;
303 struct drm_device *dev = adev->ddev;
304 struct amdgpu_connector *aconnector;
305 struct drm_dp_mst_topology_mgr *mst_mgr;
306 int ret;
307
308 aconnector = get_connector_for_sink(dev, stream->sink);
309
310 if (!aconnector || !aconnector->mst_port)
311 return false;
312
313 mst_mgr = &aconnector->mst_port->mst_mgr;
314
315 if (!mst_mgr->mst_state)
316 return false;
317
318 ret = drm_dp_check_act_status(mst_mgr);
319
320 if (ret)
321 return false;
322
323 return true;
324}
325
326bool dm_helpers_dp_mst_send_payload_allocation(
327 struct dc_context *ctx,
328 const struct dc_stream *stream,
329 bool enable)
330{
331 struct amdgpu_device *adev = ctx->driver_context;
332 struct drm_device *dev = adev->ddev;
333 struct amdgpu_connector *aconnector;
334 struct drm_dp_mst_topology_mgr *mst_mgr;
335 struct drm_dp_mst_port *mst_port;
336 int ret;
337
338 aconnector = get_connector_for_sink(dev, stream->sink);
339
340 if (!aconnector || !aconnector->mst_port)
341 return false;
342
343 mst_port = aconnector->port;
344
345 mst_mgr = &aconnector->mst_port->mst_mgr;
346
347 if (!mst_mgr->mst_state)
348 return false;
349
350 ret = drm_dp_update_payload_part2(mst_mgr);
351
352 if (ret)
353 return false;
354
355 if (!enable)
356 drm_dp_mst_deallocate_vcpi(mst_mgr, mst_port);
357
358 return true;
359}
360
361bool dm_helpers_dp_mst_start_top_mgr(
362 struct dc_context *ctx,
363 const struct dc_link *link,
364 bool boot)
365{
366 struct amdgpu_device *adev = ctx->driver_context;
367 struct drm_device *dev = adev->ddev;
368 struct amdgpu_connector *aconnector = get_connector_for_link(dev, link);
369
370 if (!aconnector) {
371 DRM_ERROR("Failed to found connector for link!");
372 return false;
373 }
374
375 if (boot) {
376 DRM_INFO("DM_MST: Differing MST start on aconnector: %p [id: %d]\n",
377 aconnector, aconnector->base.base.id);
378 return true;
379 }
380
381 DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n",
382 aconnector, aconnector->base.base.id);
383
384 return (drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true) == 0);
385}
386
387void dm_helpers_dp_mst_stop_top_mgr(
388 struct dc_context *ctx,
389 const struct dc_link *link)
390{
391 struct amdgpu_device *adev = ctx->driver_context;
392 struct drm_device *dev = adev->ddev;
393 struct amdgpu_connector *aconnector = get_connector_for_link(dev, link);
394
395 if (!aconnector) {
396 DRM_ERROR("Failed to found connector for link!");
397 return;
398 }
399
400 DRM_INFO("DM_MST: stopping TM on aconnector: %p [id: %d]\n",
401 aconnector, aconnector->base.base.id);
402
403 if (aconnector->mst_mgr.mst_state == true)
404 drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, false);
405}
406
407bool dm_helpers_dp_read_dpcd(
408 struct dc_context *ctx,
409 const struct dc_link *link,
410 uint32_t address,
411 uint8_t *data,
412 uint32_t size)
413{
414
415 struct amdgpu_device *adev = ctx->driver_context;
416 struct drm_device *dev = adev->ddev;
417 struct amdgpu_connector *aconnector = get_connector_for_link(dev, link);
418
419 if (!aconnector) {
420 DRM_ERROR("Failed to found connector for link!");
421 return false;
422 }
423
424 return drm_dp_dpcd_read(&aconnector->dm_dp_aux.aux, address,
425 data, size) > 0;
426}
427
428bool dm_helpers_dp_write_dpcd(
429 struct dc_context *ctx,
430 const struct dc_link *link,
431 uint32_t address,
432 const uint8_t *data,
433 uint32_t size)
434{
435
436 struct amdgpu_device *adev = ctx->driver_context;
437 struct drm_device *dev = adev->ddev;
438 struct amdgpu_connector *aconnector = get_connector_for_link(dev, link);
439
440 if (!aconnector) {
441 DRM_ERROR("Failed to found connector for link!");
442 return false;
443 }
444
445 return drm_dp_dpcd_write(&aconnector->dm_dp_aux.aux,
446 address, (uint8_t *)data, size) > 0;
447}
448
449bool dm_helpers_submit_i2c(
450 struct dc_context *ctx,
451 const struct dc_link *link,
452 struct i2c_command *cmd)
453{
454 struct amdgpu_device *adev = ctx->driver_context;
455 struct drm_device *dev = adev->ddev;
456 struct amdgpu_connector *aconnector = get_connector_for_link(dev, link);
457 struct i2c_msg *msgs;
458 int i = 0;
459 int num = cmd->number_of_payloads;
460 bool result;
461
462 if (!aconnector) {
463 DRM_ERROR("Failed to found connector for link!");
464 return false;
465 }
466
467 msgs = kzalloc(num * sizeof(struct i2c_msg), GFP_KERNEL);
468
469 if (!msgs)
470 return false;
471
472 for (i = 0; i < num; i++) {
473 msgs[i].flags = cmd->payloads[i].write ? I2C_M_RD : 0;
474 msgs[i].addr = cmd->payloads[i].address;
475 msgs[i].len = cmd->payloads[i].length;
476 msgs[i].buf = cmd->payloads[i].data;
477 }
478
479 result = i2c_transfer(&aconnector->i2c->base, msgs, num) == num;
480
481 kfree(msgs);
482
483 return result;
484}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
new file mode 100644
index 000000000000..20e074971bff
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -0,0 +1,829 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include <drm/drmP.h>
27
28#include "dm_services_types.h"
29#include "dc.h"
30
31#include "amdgpu.h"
32#include "amdgpu_dm.h"
33#include "amdgpu_dm_irq.h"
34
35/******************************************************************************
36 * Private declarations.
37 *****************************************************************************/
38
39struct handler_common_data {
40 struct list_head list;
41 interrupt_handler handler;
42 void *handler_arg;
43
44 /* DM which this handler belongs to */
45 struct amdgpu_display_manager *dm;
46};
47
48struct amdgpu_dm_irq_handler_data {
49 struct handler_common_data hcd;
50 /* DAL irq source which registered for this interrupt. */
51 enum dc_irq_source irq_source;
52};
53
54struct amdgpu_dm_timer_handler_data {
55 struct handler_common_data hcd;
56 struct delayed_work d_work;
57};
58
59#define DM_IRQ_TABLE_LOCK(adev, flags) \
60 spin_lock_irqsave(&adev->dm.irq_handler_list_table_lock, flags)
61
62#define DM_IRQ_TABLE_UNLOCK(adev, flags) \
63 spin_unlock_irqrestore(&adev->dm.irq_handler_list_table_lock, flags)
64
65/******************************************************************************
66 * Private functions.
67 *****************************************************************************/
68
69static void init_handler_common_data(
70 struct handler_common_data *hcd,
71 void (*ih)(void *),
72 void *args,
73 struct amdgpu_display_manager *dm)
74{
75 hcd->handler = ih;
76 hcd->handler_arg = args;
77 hcd->dm = dm;
78}
79
80/**
81 * dm_irq_work_func - Handle an IRQ outside of the interrupt handler proper.
82 *
83 * @work: work struct
84 */
85static void dm_irq_work_func(struct work_struct *work)
86{
87 struct list_head *entry;
88 struct irq_list_head *irq_list_head =
89 container_of(work, struct irq_list_head, work);
90 struct list_head *handler_list = &irq_list_head->head;
91 struct amdgpu_dm_irq_handler_data *handler_data;
92
93 list_for_each(entry, handler_list) {
94 handler_data =
95 list_entry(
96 entry,
97 struct amdgpu_dm_irq_handler_data,
98 hcd.list);
99
100 DRM_DEBUG_KMS("DM_IRQ: work_func: for dal_src=%d\n",
101 handler_data->irq_source);
102
103 DRM_DEBUG_KMS("DM_IRQ: schedule_work: for dal_src=%d\n",
104 handler_data->irq_source);
105
106 handler_data->hcd.handler(handler_data->hcd.handler_arg);
107 }
108
109 /* Call a DAL subcomponent which registered for interrupt notification
110 * at INTERRUPT_LOW_IRQ_CONTEXT.
111 * (The most common use is HPD interrupt) */
112}
113
114/**
115 * Remove a handler and return a pointer to hander list from which the
116 * handler was removed.
117 */
118static struct list_head *remove_irq_handler(
119 struct amdgpu_device *adev,
120 void *ih,
121 const struct dc_interrupt_params *int_params)
122{
123 struct list_head *hnd_list;
124 struct list_head *entry, *tmp;
125 struct amdgpu_dm_irq_handler_data *handler;
126 unsigned long irq_table_flags;
127 bool handler_removed = false;
128 enum dc_irq_source irq_source;
129
130 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
131
132 irq_source = int_params->irq_source;
133
134 switch (int_params->int_context) {
135 case INTERRUPT_HIGH_IRQ_CONTEXT:
136 hnd_list = &adev->dm.irq_handler_list_high_tab[irq_source];
137 break;
138 case INTERRUPT_LOW_IRQ_CONTEXT:
139 default:
140 hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source].head;
141 break;
142 }
143
144 list_for_each_safe(entry, tmp, hnd_list) {
145
146 handler = list_entry(entry, struct amdgpu_dm_irq_handler_data,
147 hcd.list);
148
149 if (ih == handler) {
150 /* Found our handler. Remove it from the list. */
151 list_del(&handler->hcd.list);
152 handler_removed = true;
153 break;
154 }
155 }
156
157 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
158
159 if (handler_removed == false) {
160 /* Not necessarily an error - caller may not
161 * know the context. */
162 return NULL;
163 }
164
165 kfree(handler);
166
167 DRM_DEBUG_KMS(
168 "DM_IRQ: removed irq handler: %p for: dal_src=%d, irq context=%d\n",
169 ih, int_params->irq_source, int_params->int_context);
170
171 return hnd_list;
172}
173
174/* If 'handler_in == NULL' then remove ALL handlers. */
175static void remove_timer_handler(
176 struct amdgpu_device *adev,
177 struct amdgpu_dm_timer_handler_data *handler_in)
178{
179 struct amdgpu_dm_timer_handler_data *handler_temp;
180 struct list_head *handler_list;
181 struct list_head *entry, *tmp;
182 unsigned long irq_table_flags;
183 bool handler_removed = false;
184
185 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
186
187 handler_list = &adev->dm.timer_handler_list;
188
189 list_for_each_safe(entry, tmp, handler_list) {
190 /* Note that list_for_each_safe() guarantees that
191 * handler_temp is NOT null. */
192 handler_temp = list_entry(entry,
193 struct amdgpu_dm_timer_handler_data, hcd.list);
194
195 if (handler_in == NULL || handler_in == handler_temp) {
196 list_del(&handler_temp->hcd.list);
197 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
198
199 DRM_DEBUG_KMS("DM_IRQ: removing timer handler: %p\n",
200 handler_temp);
201
202 if (handler_in == NULL) {
203 /* Since it is still in the queue, it must
204 * be cancelled. */
205 cancel_delayed_work_sync(&handler_temp->d_work);
206 }
207
208 kfree(handler_temp);
209 handler_removed = true;
210
211 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
212 }
213
214 if (handler_in == NULL) {
215 /* Remove ALL handlers. */
216 continue;
217 }
218
219 if (handler_in == handler_temp) {
220 /* Remove a SPECIFIC handler.
221 * Found our handler - we can stop here. */
222 break;
223 }
224 }
225
226 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
227
228 if (handler_in != NULL && handler_removed == false) {
229 DRM_ERROR("DM_IRQ: handler: %p is not in the list!\n",
230 handler_in);
231 }
232}
233
234/**
235 * dm_timer_work_func - Handle a timer.
236 *
237 * @work: work struct
238 */
239static void dm_timer_work_func(
240 struct work_struct *work)
241{
242 struct amdgpu_dm_timer_handler_data *handler_data =
243 container_of(work, struct amdgpu_dm_timer_handler_data,
244 d_work.work);
245
246 DRM_DEBUG_KMS("DM_IRQ: work_func: handler_data=%p\n", handler_data);
247
248 /* Call a DAL subcomponent which registered for timer notification. */
249 handler_data->hcd.handler(handler_data->hcd.handler_arg);
250
251 /* We support only "single shot" timers. That means we must delete
252 * the handler after it was called. */
253 remove_timer_handler(handler_data->hcd.dm->adev, handler_data);
254}
255
256static bool validate_irq_registration_params(
257 struct dc_interrupt_params *int_params,
258 void (*ih)(void *))
259{
260 if (NULL == int_params || NULL == ih) {
261 DRM_ERROR("DM_IRQ: invalid input!\n");
262 return false;
263 }
264
265 if (int_params->int_context >= INTERRUPT_CONTEXT_NUMBER) {
266 DRM_ERROR("DM_IRQ: invalid context: %d!\n",
267 int_params->int_context);
268 return false;
269 }
270
271 if (!DAL_VALID_IRQ_SRC_NUM(int_params->irq_source)) {
272 DRM_ERROR("DM_IRQ: invalid irq_source: %d!\n",
273 int_params->irq_source);
274 return false;
275 }
276
277 return true;
278}
279
280static bool validate_irq_unregistration_params(
281 enum dc_irq_source irq_source,
282 irq_handler_idx handler_idx)
283{
284 if (DAL_INVALID_IRQ_HANDLER_IDX == handler_idx) {
285 DRM_ERROR("DM_IRQ: invalid handler_idx==NULL!\n");
286 return false;
287 }
288
289 if (!DAL_VALID_IRQ_SRC_NUM(irq_source)) {
290 DRM_ERROR("DM_IRQ: invalid irq_source:%d!\n", irq_source);
291 return false;
292 }
293
294 return true;
295}
296/******************************************************************************
297 * Public functions.
298 *
299 * Note: caller is responsible for input validation.
300 *****************************************************************************/
301
302void *amdgpu_dm_irq_register_interrupt(
303 struct amdgpu_device *adev,
304 struct dc_interrupt_params *int_params,
305 void (*ih)(void *),
306 void *handler_args)
307{
308 struct list_head *hnd_list;
309 struct amdgpu_dm_irq_handler_data *handler_data;
310 unsigned long irq_table_flags;
311 enum dc_irq_source irq_source;
312
313 if (false == validate_irq_registration_params(int_params, ih))
314 return DAL_INVALID_IRQ_HANDLER_IDX;
315
316 handler_data = kzalloc(sizeof(*handler_data), GFP_KERNEL);
317 if (!handler_data) {
318 DRM_ERROR("DM_IRQ: failed to allocate irq handler!\n");
319 return DAL_INVALID_IRQ_HANDLER_IDX;
320 }
321
322 memset(handler_data, 0, sizeof(*handler_data));
323
324 init_handler_common_data(&handler_data->hcd, ih, handler_args,
325 &adev->dm);
326
327 irq_source = int_params->irq_source;
328
329 handler_data->irq_source = irq_source;
330
331 /* Lock the list, add the handler. */
332 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
333
334 switch (int_params->int_context) {
335 case INTERRUPT_HIGH_IRQ_CONTEXT:
336 hnd_list = &adev->dm.irq_handler_list_high_tab[irq_source];
337 break;
338 case INTERRUPT_LOW_IRQ_CONTEXT:
339 default:
340 hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source].head;
341 break;
342 }
343
344 list_add_tail(&handler_data->hcd.list, hnd_list);
345
346 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
347
348 /* This pointer will be stored by code which requested interrupt
349 * registration.
350 * The same pointer will be needed in order to unregister the
351 * interrupt. */
352
353 DRM_DEBUG_KMS(
354 "DM_IRQ: added irq handler: %p for: dal_src=%d, irq context=%d\n",
355 handler_data,
356 irq_source,
357 int_params->int_context);
358
359 return handler_data;
360}
361
362void amdgpu_dm_irq_unregister_interrupt(
363 struct amdgpu_device *adev,
364 enum dc_irq_source irq_source,
365 void *ih)
366{
367 struct list_head *handler_list;
368 struct dc_interrupt_params int_params;
369 int i;
370
371 if (false == validate_irq_unregistration_params(irq_source, ih))
372 return;
373
374 memset(&int_params, 0, sizeof(int_params));
375
376 int_params.irq_source = irq_source;
377
378 for (i = 0; i < INTERRUPT_CONTEXT_NUMBER; i++) {
379
380 int_params.int_context = i;
381
382 handler_list = remove_irq_handler(adev, ih, &int_params);
383
384 if (handler_list != NULL)
385 break;
386 }
387
388 if (handler_list == NULL) {
389 /* If we got here, it means we searched all irq contexts
390 * for this irq source, but the handler was not found. */
391 DRM_ERROR(
392 "DM_IRQ: failed to find irq handler:%p for irq_source:%d!\n",
393 ih, irq_source);
394 }
395}
396
397int amdgpu_dm_irq_init(
398 struct amdgpu_device *adev)
399{
400 int src;
401 struct irq_list_head *lh;
402
403 DRM_DEBUG_KMS("DM_IRQ\n");
404
405 spin_lock_init(&adev->dm.irq_handler_list_table_lock);
406
407 for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
408 /* low context handler list init */
409 lh = &adev->dm.irq_handler_list_low_tab[src];
410 INIT_LIST_HEAD(&lh->head);
411 INIT_WORK(&lh->work, dm_irq_work_func);
412
413 /* high context handler init */
414 INIT_LIST_HEAD(&adev->dm.irq_handler_list_high_tab[src]);
415 }
416
417 INIT_LIST_HEAD(&adev->dm.timer_handler_list);
418
419 /* allocate and initialize the workqueue for DM timer */
420 adev->dm.timer_workqueue = create_singlethread_workqueue(
421 "dm_timer_queue");
422 if (adev->dm.timer_workqueue == NULL) {
423 DRM_ERROR("DM_IRQ: unable to create timer queue!\n");
424 return -1;
425 }
426
427 return 0;
428}
429
430void amdgpu_dm_irq_register_timer(
431 struct amdgpu_device *adev,
432 struct dc_timer_interrupt_params *int_params,
433 interrupt_handler ih,
434 void *args)
435{
436 unsigned long jf_delay;
437 struct list_head *handler_list;
438 struct amdgpu_dm_timer_handler_data *handler_data;
439 unsigned long irq_table_flags;
440
441 handler_data = kzalloc(sizeof(*handler_data), GFP_KERNEL);
442 if (!handler_data) {
443 DRM_ERROR("DM_IRQ: failed to allocate timer handler!\n");
444 return;
445 }
446
447 memset(handler_data, 0, sizeof(*handler_data));
448
449 init_handler_common_data(&handler_data->hcd, ih, args, &adev->dm);
450
451 INIT_DELAYED_WORK(&handler_data->d_work, dm_timer_work_func);
452
453 /* Lock the list, add the handler. */
454 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
455
456 handler_list = &adev->dm.timer_handler_list;
457
458 list_add_tail(&handler_data->hcd.list, handler_list);
459
460 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
461
462 jf_delay = usecs_to_jiffies(int_params->micro_sec_interval);
463
464 queue_delayed_work(adev->dm.timer_workqueue, &handler_data->d_work,
465 jf_delay);
466
467 DRM_DEBUG_KMS("DM_IRQ: added handler:%p with micro_sec_interval=%u\n",
468 handler_data, int_params->micro_sec_interval);
469 return;
470}
471
472/* DM IRQ and timer resource release */
473void amdgpu_dm_irq_fini(
474 struct amdgpu_device *adev)
475{
476 int src;
477 struct irq_list_head *lh;
478 DRM_DEBUG_KMS("DM_IRQ: releasing resources.\n");
479
480 for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
481
482 /* The handler was removed from the table,
483 * it means it is safe to flush all the 'work'
484 * (because no code can schedule a new one). */
485 lh = &adev->dm.irq_handler_list_low_tab[src];
486 flush_work(&lh->work);
487 }
488
489 /* Cancel ALL timers and release handlers (if any). */
490 remove_timer_handler(adev, NULL);
491 /* Release the queue itself. */
492 destroy_workqueue(adev->dm.timer_workqueue);
493}
494
495int amdgpu_dm_irq_suspend(
496 struct amdgpu_device *adev)
497{
498 int src;
499 struct list_head *hnd_list_h;
500 struct list_head *hnd_list_l;
501 unsigned long irq_table_flags;
502
503 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
504
505 DRM_DEBUG_KMS("DM_IRQ: suspend\n");
506
507 /* disable HW interrupt */
508 for (src = DC_IRQ_SOURCE_HPD1; src < DAL_IRQ_SOURCES_NUMBER; src++) {
509 hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
510 hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
511 if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
512 dc_interrupt_set(adev->dm.dc, src, false);
513
514 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
515 flush_work(&adev->dm.irq_handler_list_low_tab[src].work);
516
517 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
518 }
519
520 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
521 return 0;
522}
523
524int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev)
525{
526 int src;
527 struct list_head *hnd_list_h, *hnd_list_l;
528 unsigned long irq_table_flags;
529
530 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
531
532 DRM_DEBUG_KMS("DM_IRQ: early resume\n");
533
534 /* re-enable short pulse interrupts HW interrupt */
535 for (src = DC_IRQ_SOURCE_HPD1RX; src < DC_IRQ_SOURCE_HPD6RX + 1; src++) {
536 hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
537 hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
538 if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
539 dc_interrupt_set(adev->dm.dc, src, true);
540 }
541
542 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
543
544 return 0;
545}
546
547int amdgpu_dm_irq_resume(struct amdgpu_device *adev)
548{
549 int src;
550 struct list_head *hnd_list_h, *hnd_list_l;
551 unsigned long irq_table_flags;
552
553 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
554
555 DRM_DEBUG_KMS("DM_IRQ: resume\n");
556
557 /* re-enable HW interrupt */
558 for (src = DC_IRQ_SOURCE_HPD1; src < DAL_IRQ_SOURCES_NUMBER; src++) {
559 hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
560 hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
561 if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
562 dc_interrupt_set(adev->dm.dc, src, true);
563 }
564
565 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
566 return 0;
567}
568
569/**
570 * amdgpu_dm_irq_schedule_work - schedule all work items registered for the
571 * "irq_source".
572 */
573static void amdgpu_dm_irq_schedule_work(
574 struct amdgpu_device *adev,
575 enum dc_irq_source irq_source)
576{
577 unsigned long irq_table_flags;
578 struct work_struct *work = NULL;
579
580 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
581
582 if (!list_empty(&adev->dm.irq_handler_list_low_tab[irq_source].head))
583 work = &adev->dm.irq_handler_list_low_tab[irq_source].work;
584
585 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
586
587 if (work) {
588 if (!schedule_work(work))
589 DRM_INFO("amdgpu_dm_irq_schedule_work FAILED src %d\n",
590 irq_source);
591 }
592
593}
594
595/** amdgpu_dm_irq_immediate_work
596 * Callback high irq work immediately, don't send to work queue
597 */
598static void amdgpu_dm_irq_immediate_work(
599 struct amdgpu_device *adev,
600 enum dc_irq_source irq_source)
601{
602 struct amdgpu_dm_irq_handler_data *handler_data;
603 struct list_head *entry;
604 unsigned long irq_table_flags;
605
606 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
607
608 list_for_each(
609 entry,
610 &adev->dm.irq_handler_list_high_tab[irq_source]) {
611
612 handler_data =
613 list_entry(
614 entry,
615 struct amdgpu_dm_irq_handler_data,
616 hcd.list);
617
618 /* Call a subcomponent which registered for immediate
619 * interrupt notification */
620 handler_data->hcd.handler(handler_data->hcd.handler_arg);
621 }
622
623 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
624}
625
626/*
627 * amdgpu_dm_irq_handler
628 *
629 * Generic IRQ handler, calls all registered high irq work immediately, and
630 * schedules work for low irq
631 */
632int amdgpu_dm_irq_handler(
633 struct amdgpu_device *adev,
634 struct amdgpu_irq_src *source,
635 struct amdgpu_iv_entry *entry)
636{
637
638 enum dc_irq_source src =
639 dc_interrupt_to_irq_source(
640 adev->dm.dc,
641 entry->src_id,
642 entry->src_data[0]);
643
644 dc_interrupt_ack(adev->dm.dc, src);
645
646 /* Call high irq work immediately */
647 amdgpu_dm_irq_immediate_work(adev, src);
648 /*Schedule low_irq work */
649 amdgpu_dm_irq_schedule_work(adev, src);
650
651 return 0;
652}
653
654static enum dc_irq_source amdgpu_dm_hpd_to_dal_irq_source(unsigned type)
655{
656 switch (type) {
657 case AMDGPU_HPD_1:
658 return DC_IRQ_SOURCE_HPD1;
659 case AMDGPU_HPD_2:
660 return DC_IRQ_SOURCE_HPD2;
661 case AMDGPU_HPD_3:
662 return DC_IRQ_SOURCE_HPD3;
663 case AMDGPU_HPD_4:
664 return DC_IRQ_SOURCE_HPD4;
665 case AMDGPU_HPD_5:
666 return DC_IRQ_SOURCE_HPD5;
667 case AMDGPU_HPD_6:
668 return DC_IRQ_SOURCE_HPD6;
669 default:
670 return DC_IRQ_SOURCE_INVALID;
671 }
672}
673
674static int amdgpu_dm_set_hpd_irq_state(struct amdgpu_device *adev,
675 struct amdgpu_irq_src *source,
676 unsigned type,
677 enum amdgpu_interrupt_state state)
678{
679 enum dc_irq_source src = amdgpu_dm_hpd_to_dal_irq_source(type);
680 bool st = (state == AMDGPU_IRQ_STATE_ENABLE);
681
682 dc_interrupt_set(adev->dm.dc, src, st);
683 return 0;
684}
685
686static inline int dm_irq_state(
687 struct amdgpu_device *adev,
688 struct amdgpu_irq_src *source,
689 unsigned crtc_id,
690 enum amdgpu_interrupt_state state,
691 const enum irq_type dal_irq_type,
692 const char *func)
693{
694 bool st;
695 enum dc_irq_source irq_source;
696
697 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc_id];
698
699 if (!acrtc) {
700 DRM_ERROR(
701 "%s: crtc is NULL at id :%d\n",
702 func,
703 crtc_id);
704 return 0;
705 }
706
707 irq_source = dal_irq_type + acrtc->otg_inst;
708
709 st = (state == AMDGPU_IRQ_STATE_ENABLE);
710
711 dc_interrupt_set(adev->dm.dc, irq_source, st);
712 return 0;
713}
714
715static int amdgpu_dm_set_pflip_irq_state(struct amdgpu_device *adev,
716 struct amdgpu_irq_src *source,
717 unsigned crtc_id,
718 enum amdgpu_interrupt_state state)
719{
720 return dm_irq_state(
721 adev,
722 source,
723 crtc_id,
724 state,
725 IRQ_TYPE_PFLIP,
726 __func__);
727}
728
729static int amdgpu_dm_set_crtc_irq_state(struct amdgpu_device *adev,
730 struct amdgpu_irq_src *source,
731 unsigned crtc_id,
732 enum amdgpu_interrupt_state state)
733{
734 return dm_irq_state(
735 adev,
736 source,
737 crtc_id,
738 state,
739 IRQ_TYPE_VUPDATE,
740 __func__);
741}
742
743static const struct amdgpu_irq_src_funcs dm_crtc_irq_funcs = {
744 .set = amdgpu_dm_set_crtc_irq_state,
745 .process = amdgpu_dm_irq_handler,
746};
747
748static const struct amdgpu_irq_src_funcs dm_pageflip_irq_funcs = {
749 .set = amdgpu_dm_set_pflip_irq_state,
750 .process = amdgpu_dm_irq_handler,
751};
752
753static const struct amdgpu_irq_src_funcs dm_hpd_irq_funcs = {
754 .set = amdgpu_dm_set_hpd_irq_state,
755 .process = amdgpu_dm_irq_handler,
756};
757
758void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev)
759{
760 adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
761 adev->crtc_irq.funcs = &dm_crtc_irq_funcs;
762
763 adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
764 adev->pageflip_irq.funcs = &dm_pageflip_irq_funcs;
765
766 adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
767 adev->hpd_irq.funcs = &dm_hpd_irq_funcs;
768}
769
770/*
771 * amdgpu_dm_hpd_init - hpd setup callback.
772 *
773 * @adev: amdgpu_device pointer
774 *
775 * Setup the hpd pins used by the card (evergreen+).
776 * Enable the pin, set the polarity, and enable the hpd interrupts.
777 */
778void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
779{
780 struct drm_device *dev = adev->ddev;
781 struct drm_connector *connector;
782
783 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
784 struct amdgpu_connector *amdgpu_connector =
785 to_amdgpu_connector(connector);
786
787 const struct dc_link *dc_link = amdgpu_connector->dc_link;
788
789 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
790 dc_interrupt_set(adev->dm.dc,
791 dc_link->irq_source_hpd,
792 true);
793 }
794
795 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
796 dc_interrupt_set(adev->dm.dc,
797 dc_link->irq_source_hpd_rx,
798 true);
799 }
800 }
801}
802
803/**
804 * amdgpu_dm_hpd_fini - hpd tear down callback.
805 *
806 * @adev: amdgpu_device pointer
807 *
808 * Tear down the hpd pins used by the card (evergreen+).
809 * Disable the hpd interrupts.
810 */
811void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
812{
813 struct drm_device *dev = adev->ddev;
814 struct drm_connector *connector;
815
816 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
817 struct amdgpu_connector *amdgpu_connector =
818 to_amdgpu_connector(connector);
819 const struct dc_link *dc_link = amdgpu_connector->dc_link;
820
821 dc_interrupt_set(adev->dm.dc, dc_link->irq_source_hpd, false);
822
823 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
824 dc_interrupt_set(adev->dm.dc,
825 dc_link->irq_source_hpd_rx,
826 false);
827 }
828 }
829}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h
new file mode 100644
index 000000000000..9339861c8897
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h
@@ -0,0 +1,122 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef __AMDGPU_DM_IRQ_H__
25#define __AMDGPU_DM_IRQ_H__
26
27#include "irq_types.h" /* DAL irq definitions */
28
29/*
30 * Display Manager IRQ-related interfaces (for use by DAL).
31 */
32
33/**
34 * amdgpu_dm_irq_init - Initialize internal structures of 'amdgpu_dm_irq'.
35 *
36 * This function should be called exactly once - during DM initialization.
37 *
38 * Returns:
39 * 0 - success
40 * non-zero - error
41 */
42int amdgpu_dm_irq_init(
43 struct amdgpu_device *adev);
44
45/**
46 * amdgpu_dm_irq_fini - deallocate internal structures of 'amdgpu_dm_irq'.
47 *
48 * This function should be called exactly once - during DM destruction.
49 *
50 */
51void amdgpu_dm_irq_fini(
52 struct amdgpu_device *adev);
53
54/**
55 * amdgpu_dm_irq_register_interrupt - register irq handler for Display block.
56 *
57 * @adev: AMD DRM device
58 * @int_params: parameters for the irq
59 * @ih: pointer to the irq hander function
60 * @handler_args: arguments which will be passed to ih
61 *
62 * Returns:
63 * IRQ Handler Index on success.
64 * NULL on failure.
65 *
66 * Cannot be called from an interrupt handler.
67 */
68void *amdgpu_dm_irq_register_interrupt(
69 struct amdgpu_device *adev,
70 struct dc_interrupt_params *int_params,
71 void (*ih)(void *),
72 void *handler_args);
73
74/**
75 * amdgpu_dm_irq_unregister_interrupt - unregister handler which was registered
76 * by amdgpu_dm_irq_register_interrupt().
77 *
78 * @adev: AMD DRM device.
79 * @ih_index: irq handler index which was returned by
80 * amdgpu_dm_irq_register_interrupt
81 */
82void amdgpu_dm_irq_unregister_interrupt(
83 struct amdgpu_device *adev,
84 enum dc_irq_source irq_source,
85 void *ih_index);
86
87void amdgpu_dm_irq_register_timer(
88 struct amdgpu_device *adev,
89 struct dc_timer_interrupt_params *int_params,
90 interrupt_handler ih,
91 void *args);
92
93/**
94 * amdgpu_dm_irq_handler
95 * Generic IRQ handler, calls all registered high irq work immediately, and
96 * schedules work for low irq
97 */
98int amdgpu_dm_irq_handler(
99 struct amdgpu_device *adev,
100 struct amdgpu_irq_src *source,
101 struct amdgpu_iv_entry *entry);
102
103void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev);
104
105void amdgpu_dm_hpd_init(struct amdgpu_device *adev);
106void amdgpu_dm_hpd_fini(struct amdgpu_device *adev);
107
108/**
109 * amdgpu_dm_irq_suspend - disable ASIC interrupt during suspend.
110 *
111 */
112int amdgpu_dm_irq_suspend(struct amdgpu_device *adev);
113
114/**
115 * amdgpu_dm_irq_resume_early - enable HPDRX ASIC interrupts during resume.
116 * amdgpu_dm_irq_resume - enable ASIC interrupt during resume.
117 *
118 */
119int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev);
120int amdgpu_dm_irq_resume(struct amdgpu_device *adev);
121
122#endif /* __AMDGPU_DM_IRQ_H__ */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
new file mode 100644
index 000000000000..e4d94d4c215d
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -0,0 +1,443 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include <linux/version.h>
27#include <drm/drm_atomic_helper.h>
28#include "dm_services.h"
29#include "amdgpu.h"
30#include "amdgpu_dm_types.h"
31#include "amdgpu_dm_mst_types.h"
32
33#include "dc.h"
34#include "dm_helpers.h"
35
36/* #define TRACE_DPCD */
37
38#ifdef TRACE_DPCD
39#define SIDE_BAND_MSG(address) (address >= DP_SIDEBAND_MSG_DOWN_REQ_BASE && address < DP_SINK_COUNT_ESI)
40
41static inline char *side_band_msg_type_to_str(uint32_t address)
42{
43 static char str[10] = {0};
44
45 if (address < DP_SIDEBAND_MSG_UP_REP_BASE)
46 strcpy(str, "DOWN_REQ");
47 else if (address < DP_SIDEBAND_MSG_DOWN_REP_BASE)
48 strcpy(str, "UP_REP");
49 else if (address < DP_SIDEBAND_MSG_UP_REQ_BASE)
50 strcpy(str, "DOWN_REP");
51 else
52 strcpy(str, "UP_REQ");
53
54 return str;
55}
56
57void log_dpcd(uint8_t type,
58 uint32_t address,
59 uint8_t *data,
60 uint32_t size,
61 bool res)
62{
63 DRM_DEBUG_KMS("Op: %s, addr: %04x, SideBand Msg: %s, Op res: %s\n",
64 (type == DP_AUX_NATIVE_READ) ||
65 (type == DP_AUX_I2C_READ) ?
66 "Read" : "Write",
67 address,
68 SIDE_BAND_MSG(address) ?
69 side_band_msg_type_to_str(address) : "Nop",
70 res ? "OK" : "Fail");
71
72 if (res) {
73 print_hex_dump(KERN_INFO, "Body: ", DUMP_PREFIX_NONE, 16, 1, data, size, false);
74 }
75}
76#endif
77
78static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
79{
80 struct pci_dev *pdev = to_pci_dev(aux->dev);
81 struct drm_device *drm_dev = pci_get_drvdata(pdev);
82 struct amdgpu_device *adev = drm_dev->dev_private;
83 struct dc *dc = adev->dm.dc;
84 bool res;
85
86 switch (msg->request) {
87 case DP_AUX_NATIVE_READ:
88 res = dc_read_dpcd(
89 dc,
90 TO_DM_AUX(aux)->link_index,
91 msg->address,
92 msg->buffer,
93 msg->size);
94 break;
95 case DP_AUX_NATIVE_WRITE:
96 res = dc_write_dpcd(
97 dc,
98 TO_DM_AUX(aux)->link_index,
99 msg->address,
100 msg->buffer,
101 msg->size);
102 break;
103 default:
104 return 0;
105 }
106
107#ifdef TRACE_DPCD
108 log_dpcd(msg->request,
109 msg->address,
110 msg->buffer,
111 msg->size,
112 res);
113#endif
114
115 return msg->size;
116}
117
118static enum drm_connector_status
119dm_dp_mst_detect(struct drm_connector *connector, bool force)
120{
121 struct amdgpu_connector *aconnector = to_amdgpu_connector(connector);
122 struct amdgpu_connector *master = aconnector->mst_port;
123
124 enum drm_connector_status status =
125 drm_dp_mst_detect_port(
126 connector,
127 &master->mst_mgr,
128 aconnector->port);
129
130 /*
131 * we do not want to make this connector connected until we have edid on
132 * it
133 */
134 if (status == connector_status_connected &&
135 !aconnector->port->cached_edid)
136 status = connector_status_disconnected;
137
138 return status;
139}
140
141static void
142dm_dp_mst_connector_destroy(struct drm_connector *connector)
143{
144 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
145 struct amdgpu_encoder *amdgpu_encoder = amdgpu_connector->mst_encoder;
146
147 drm_encoder_cleanup(&amdgpu_encoder->base);
148 kfree(amdgpu_encoder);
149 drm_connector_cleanup(connector);
150 kfree(amdgpu_connector);
151}
152
153static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
154 .detect = dm_dp_mst_detect,
155 .fill_modes = drm_helper_probe_single_connector_modes,
156 .destroy = dm_dp_mst_connector_destroy,
157 .reset = amdgpu_dm_connector_funcs_reset,
158 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
159 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
160 .atomic_set_property = amdgpu_dm_connector_atomic_set_property
161};
162
163static int dm_dp_mst_get_modes(struct drm_connector *connector)
164{
165 struct amdgpu_connector *aconnector = to_amdgpu_connector(connector);
166 int ret = 0;
167
168 ret = drm_add_edid_modes(&aconnector->base, aconnector->edid);
169
170 drm_edid_to_eld(&aconnector->base, aconnector->edid);
171
172 return ret;
173}
174
175static struct drm_encoder *dm_mst_best_encoder(struct drm_connector *connector)
176{
177 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
178
179 return &amdgpu_connector->mst_encoder->base;
180}
181
182static const struct drm_connector_helper_funcs dm_dp_mst_connector_helper_funcs = {
183 .get_modes = dm_dp_mst_get_modes,
184 .mode_valid = amdgpu_dm_connector_mode_valid,
185 .best_encoder = dm_mst_best_encoder,
186};
187
188static struct amdgpu_encoder *
189dm_dp_create_fake_mst_encoder(struct amdgpu_connector *connector)
190{
191 struct drm_device *dev = connector->base.dev;
192 struct amdgpu_device *adev = dev->dev_private;
193 struct amdgpu_encoder *amdgpu_encoder;
194 struct drm_encoder *encoder;
195 const struct drm_connector_helper_funcs *connector_funcs =
196 connector->base.helper_private;
197 struct drm_encoder *enc_master =
198 connector_funcs->best_encoder(&connector->base);
199
200 DRM_DEBUG_KMS("enc master is %p\n", enc_master);
201 amdgpu_encoder = kzalloc(sizeof(*amdgpu_encoder), GFP_KERNEL);
202 if (!amdgpu_encoder)
203 return NULL;
204
205 encoder = &amdgpu_encoder->base;
206 encoder->possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
207
208 drm_encoder_init(
209 dev,
210 &amdgpu_encoder->base,
211 NULL,
212 DRM_MODE_ENCODER_DPMST,
213 NULL);
214
215 drm_encoder_helper_add(encoder, &amdgpu_dm_encoder_helper_funcs);
216
217 return amdgpu_encoder;
218}
219
220static struct drm_connector *dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
221 struct drm_dp_mst_port *port,
222 const char *pathprop)
223{
224 struct amdgpu_connector *master = container_of(mgr, struct amdgpu_connector, mst_mgr);
225 struct drm_device *dev = master->base.dev;
226 struct amdgpu_device *adev = dev->dev_private;
227 struct amdgpu_connector *aconnector;
228 struct drm_connector *connector;
229
230 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
231 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
232 aconnector = to_amdgpu_connector(connector);
233 if (aconnector->mst_port == master
234 && !aconnector->port) {
235 DRM_INFO("DM_MST: reusing connector: %p [id: %d] [master: %p]\n",
236 aconnector, connector->base.id, aconnector->mst_port);
237
238 aconnector->port = port;
239 drm_mode_connector_set_path_property(connector, pathprop);
240
241 drm_modeset_unlock(&dev->mode_config.connection_mutex);
242 return &aconnector->base;
243 }
244 }
245 drm_modeset_unlock(&dev->mode_config.connection_mutex);
246
247 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
248 if (!aconnector)
249 return NULL;
250
251 connector = &aconnector->base;
252 aconnector->port = port;
253 aconnector->mst_port = master;
254
255 if (drm_connector_init(
256 dev,
257 connector,
258 &dm_dp_mst_connector_funcs,
259 DRM_MODE_CONNECTOR_DisplayPort)) {
260 kfree(aconnector);
261 return NULL;
262 }
263 drm_connector_helper_add(connector, &dm_dp_mst_connector_helper_funcs);
264
265 amdgpu_dm_connector_init_helper(
266 &adev->dm,
267 aconnector,
268 DRM_MODE_CONNECTOR_DisplayPort,
269 master->dc_link,
270 master->connector_id);
271
272 aconnector->mst_encoder = dm_dp_create_fake_mst_encoder(master);
273
274 /*
275 * TODO: understand why this one is needed
276 */
277 drm_object_attach_property(
278 &connector->base,
279 dev->mode_config.path_property,
280 0);
281 drm_object_attach_property(
282 &connector->base,
283 dev->mode_config.tile_property,
284 0);
285
286 drm_mode_connector_set_path_property(connector, pathprop);
287
288 /*
289 * Initialize connector state before adding the connectror to drm and
290 * framebuffer lists
291 */
292 amdgpu_dm_connector_funcs_reset(connector);
293
294 DRM_INFO("DM_MST: added connector: %p [id: %d] [master: %p]\n",
295 aconnector, connector->base.id, aconnector->mst_port);
296
297 DRM_DEBUG_KMS(":%d\n", connector->base.id);
298
299 return connector;
300}
301
302static void dm_dp_destroy_mst_connector(
303 struct drm_dp_mst_topology_mgr *mgr,
304 struct drm_connector *connector)
305{
306 struct amdgpu_connector *aconnector = to_amdgpu_connector(connector);
307
308 DRM_INFO("DM_MST: Disabling connector: %p [id: %d] [master: %p]\n",
309 aconnector, connector->base.id, aconnector->mst_port);
310
311 aconnector->port = NULL;
312 if (aconnector->dc_sink) {
313 amdgpu_dm_remove_sink_from_freesync_module(connector);
314 dc_link_remove_remote_sink(aconnector->dc_link, aconnector->dc_sink);
315 dc_sink_release(aconnector->dc_sink);
316 aconnector->dc_sink = NULL;
317 }
318 if (aconnector->edid) {
319 kfree(aconnector->edid);
320 aconnector->edid = NULL;
321 }
322
323 drm_mode_connector_update_edid_property(
324 &aconnector->base,
325 NULL);
326}
327
328static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
329{
330 struct amdgpu_connector *master = container_of(mgr, struct amdgpu_connector, mst_mgr);
331 struct drm_device *dev = master->base.dev;
332 struct amdgpu_device *adev = dev->dev_private;
333 struct drm_connector *connector;
334 struct amdgpu_connector *aconnector;
335 struct edid *edid;
336
337 drm_modeset_lock_all(dev);
338 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
339 aconnector = to_amdgpu_connector(connector);
340 if (aconnector->port &&
341 aconnector->port->pdt != DP_PEER_DEVICE_NONE &&
342 aconnector->port->pdt != DP_PEER_DEVICE_MST_BRANCHING &&
343 !aconnector->dc_sink) {
344 /*
345 * This is plug in case, where port has been created but
346 * sink hasn't been created yet
347 */
348 if (!aconnector->edid) {
349 struct dc_sink_init_data init_params = {
350 .link = aconnector->dc_link,
351 .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST};
352 edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port);
353
354 if (!edid) {
355 drm_mode_connector_update_edid_property(
356 &aconnector->base,
357 NULL);
358 continue;
359 }
360
361 aconnector->edid = edid;
362
363 aconnector->dc_sink = dc_link_add_remote_sink(
364 aconnector->dc_link,
365 (uint8_t *)edid,
366 (edid->extensions + 1) * EDID_LENGTH,
367 &init_params);
368 if (aconnector->dc_sink)
369 amdgpu_dm_add_sink_to_freesync_module(
370 connector,
371 edid);
372
373 dm_restore_drm_connector_state(connector->dev, connector);
374 } else
375 edid = aconnector->edid;
376
377 DRM_DEBUG_KMS("edid retrieved %p\n", edid);
378
379 drm_mode_connector_update_edid_property(
380 &aconnector->base,
381 aconnector->edid);
382 }
383 }
384 drm_modeset_unlock_all(dev);
385
386 schedule_work(&adev->dm.mst_hotplug_work);
387}
388
389static void dm_dp_mst_register_connector(struct drm_connector *connector)
390{
391 struct drm_device *dev = connector->dev;
392 struct amdgpu_device *adev = dev->dev_private;
393 int i;
394
395 drm_modeset_lock_all(dev);
396 if (adev->mode_info.rfbdev) {
397 /*Do not add if already registered in past*/
398 for (i = 0; i < adev->mode_info.rfbdev->helper.connector_count; i++) {
399 if (adev->mode_info.rfbdev->helper.connector_info[i]->connector
400 == connector) {
401 drm_modeset_unlock_all(dev);
402 return;
403 }
404 }
405
406 drm_fb_helper_add_one_connector(&adev->mode_info.rfbdev->helper, connector);
407 }
408 else
409 DRM_ERROR("adev->mode_info.rfbdev is NULL\n");
410
411 drm_modeset_unlock_all(dev);
412
413 drm_connector_register(connector);
414
415}
416
417static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
418 .add_connector = dm_dp_add_mst_connector,
419 .destroy_connector = dm_dp_destroy_mst_connector,
420 .hotplug = dm_dp_mst_hotplug,
421 .register_connector = dm_dp_mst_register_connector
422};
423
424void amdgpu_dm_initialize_mst_connector(
425 struct amdgpu_display_manager *dm,
426 struct amdgpu_connector *aconnector)
427{
428 aconnector->dm_dp_aux.aux.name = "dmdc";
429 aconnector->dm_dp_aux.aux.dev = dm->adev->dev;
430 aconnector->dm_dp_aux.aux.transfer = dm_dp_aux_transfer;
431 aconnector->dm_dp_aux.link_index = aconnector->connector_id;
432
433 drm_dp_aux_register(&aconnector->dm_dp_aux.aux);
434 aconnector->mst_mgr.cbs = &dm_mst_cbs;
435 drm_dp_mst_topology_mgr_init(
436 &aconnector->mst_mgr,
437 dm->adev->ddev,
438 &aconnector->dm_dp_aux.aux,
439 16,
440 4,
441 aconnector->connector_id);
442}
443
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
new file mode 100644
index 000000000000..6130d62ac65c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
@@ -0,0 +1,36 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_AMDGPU_DM_MST_TYPES_H__
27#define __DAL_AMDGPU_DM_MST_TYPES_H__
28
29struct amdgpu_display_manager;
30struct amdgpu_connector;
31
32void amdgpu_dm_initialize_mst_connector(
33 struct amdgpu_display_manager *dm,
34 struct amdgpu_connector *aconnector);
35
36#endif
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
new file mode 100644
index 000000000000..9d5125951acd
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
@@ -0,0 +1,463 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include <linux/string.h>
27#include <linux/acpi.h>
28
29#include <drm/drmP.h>
30#include <drm/drm_crtc_helper.h>
31#include <drm/amdgpu_drm.h>
32#include "dm_services.h"
33#include "amdgpu.h"
34#include "amdgpu_dm.h"
35#include "amdgpu_dm_irq.h"
36#include "amdgpu_dm_types.h"
37#include "amdgpu_pm.h"
38
39#define dm_alloc(size) kzalloc(size, GFP_KERNEL)
40#define dm_realloc(ptr, size) krealloc(ptr, size, GFP_KERNEL)
41#define dm_free(ptr) kfree(ptr)
42
43/******************************************************************************
44 * IRQ Interfaces.
45 *****************************************************************************/
46
47void dal_register_timer_interrupt(
48 struct dc_context *ctx,
49 struct dc_timer_interrupt_params *int_params,
50 interrupt_handler ih,
51 void *args)
52{
53 struct amdgpu_device *adev = ctx->driver_context;
54
55 if (!adev || !int_params) {
56 DRM_ERROR("DM_IRQ: invalid input!\n");
57 return;
58 }
59
60 if (int_params->int_context != INTERRUPT_LOW_IRQ_CONTEXT) {
61 /* only low irq ctx is supported. */
62 DRM_ERROR("DM_IRQ: invalid context: %d!\n",
63 int_params->int_context);
64 return;
65 }
66
67 amdgpu_dm_irq_register_timer(adev, int_params, ih, args);
68}
69
70void dal_isr_acquire_lock(struct dc_context *ctx)
71{
72 /*TODO*/
73}
74
75void dal_isr_release_lock(struct dc_context *ctx)
76{
77 /*TODO*/
78}
79
80/******************************************************************************
81 * End-of-IRQ Interfaces.
82 *****************************************************************************/
83
84bool dm_get_platform_info(struct dc_context *ctx,
85 struct platform_info_params *params)
86{
87 /*TODO*/
88 return false;
89}
90
91bool dm_write_persistent_data(struct dc_context *ctx,
92 const struct dc_sink *sink,
93 const char *module_name,
94 const char *key_name,
95 void *params,
96 unsigned int size,
97 struct persistent_data_flag *flag)
98{
99 /*TODO implement*/
100 return false;
101}
102
103bool dm_read_persistent_data(struct dc_context *ctx,
104 const struct dc_sink *sink,
105 const char *module_name,
106 const char *key_name,
107 void *params,
108 unsigned int size,
109 struct persistent_data_flag *flag)
110{
111 /*TODO implement*/
112 return false;
113}
114
115void dm_delay_in_microseconds(struct dc_context *ctx,
116 unsigned int microSeconds)
117{
118 /*TODO implement*/
119 return;
120}
121
122/**** power component interfaces ****/
123
124bool dm_pp_pre_dce_clock_change(
125 struct dc_context *ctx,
126 struct dm_pp_gpu_clock_range *requested_state,
127 struct dm_pp_gpu_clock_range *actual_state)
128{
129 /*TODO*/
130 return false;
131}
132
133bool dm_pp_apply_safe_state(
134 const struct dc_context *ctx)
135{
136 struct amdgpu_device *adev = ctx->driver_context;
137
138 if (adev->pm.dpm_enabled) {
139 /* TODO: Does this require PreModeChange event to PPLIB? */
140 }
141
142 return true;
143}
144
145bool dm_pp_apply_display_requirements(
146 const struct dc_context *ctx,
147 const struct dm_pp_display_configuration *pp_display_cfg)
148{
149 struct amdgpu_device *adev = ctx->driver_context;
150
151 if (adev->pm.dpm_enabled) {
152
153 memset(&adev->pm.pm_display_cfg, 0,
154 sizeof(adev->pm.pm_display_cfg));
155
156 adev->pm.pm_display_cfg.cpu_cc6_disable =
157 pp_display_cfg->cpu_cc6_disable;
158
159 adev->pm.pm_display_cfg.cpu_pstate_disable =
160 pp_display_cfg->cpu_pstate_disable;
161
162 adev->pm.pm_display_cfg.cpu_pstate_separation_time =
163 pp_display_cfg->cpu_pstate_separation_time;
164
165 adev->pm.pm_display_cfg.nb_pstate_switch_disable =
166 pp_display_cfg->nb_pstate_switch_disable;
167
168 adev->pm.pm_display_cfg.num_display =
169 pp_display_cfg->display_count;
170 adev->pm.pm_display_cfg.num_path_including_non_display =
171 pp_display_cfg->display_count;
172
173 adev->pm.pm_display_cfg.min_core_set_clock =
174 pp_display_cfg->min_engine_clock_khz/10;
175 adev->pm.pm_display_cfg.min_core_set_clock_in_sr =
176 pp_display_cfg->min_engine_clock_deep_sleep_khz/10;
177 adev->pm.pm_display_cfg.min_mem_set_clock =
178 pp_display_cfg->min_memory_clock_khz/10;
179
180 adev->pm.pm_display_cfg.multi_monitor_in_sync =
181 pp_display_cfg->all_displays_in_sync;
182 adev->pm.pm_display_cfg.min_vblank_time =
183 pp_display_cfg->avail_mclk_switch_time_us;
184
185 adev->pm.pm_display_cfg.display_clk =
186 pp_display_cfg->disp_clk_khz/10;
187
188 adev->pm.pm_display_cfg.dce_tolerable_mclk_in_active_latency =
189 pp_display_cfg->avail_mclk_switch_time_in_disp_active_us;
190
191 adev->pm.pm_display_cfg.crtc_index = pp_display_cfg->crtc_index;
192 adev->pm.pm_display_cfg.line_time_in_us =
193 pp_display_cfg->line_time_in_us;
194
195 adev->pm.pm_display_cfg.vrefresh = pp_display_cfg->disp_configs[0].v_refresh;
196 adev->pm.pm_display_cfg.crossfire_display_index = -1;
197 adev->pm.pm_display_cfg.min_bus_bandwidth = 0;
198
199 /* TODO: complete implementation of
200 * amd_powerplay_display_configuration_change().
201 * Follow example of:
202 * PHM_StoreDALConfigurationData - powerplay\hwmgr\hardwaremanager.c
203 * PP_IRI_DisplayConfigurationChange - powerplay\eventmgr\iri.c */
204 amd_powerplay_display_configuration_change(
205 adev->powerplay.pp_handle,
206 &adev->pm.pm_display_cfg);
207
208 /* TODO: replace by a separate call to 'apply display cfg'? */
209 amdgpu_pm_compute_clocks(adev);
210 }
211
212 return true;
213}
214
215bool dc_service_get_system_clocks_range(
216 const struct dc_context *ctx,
217 struct dm_pp_gpu_clock_range *sys_clks)
218{
219 struct amdgpu_device *adev = ctx->driver_context;
220
221 /* Default values, in case PPLib is not compiled-in. */
222 sys_clks->mclk.max_khz = 800000;
223 sys_clks->mclk.min_khz = 800000;
224
225 sys_clks->sclk.max_khz = 600000;
226 sys_clks->sclk.min_khz = 300000;
227
228 if (adev->pm.dpm_enabled) {
229 sys_clks->mclk.max_khz = amdgpu_dpm_get_mclk(adev, false);
230 sys_clks->mclk.min_khz = amdgpu_dpm_get_mclk(adev, true);
231
232 sys_clks->sclk.max_khz = amdgpu_dpm_get_sclk(adev, false);
233 sys_clks->sclk.min_khz = amdgpu_dpm_get_sclk(adev, true);
234 }
235
236 return true;
237}
238
239static void get_default_clock_levels(
240 enum dm_pp_clock_type clk_type,
241 struct dm_pp_clock_levels *clks)
242{
243 uint32_t disp_clks_in_khz[6] = {
244 300000, 400000, 496560, 626090, 685720, 757900 };
245 uint32_t sclks_in_khz[6] = {
246 300000, 360000, 423530, 514290, 626090, 720000 };
247 uint32_t mclks_in_khz[2] = { 333000, 800000 };
248
249 switch (clk_type) {
250 case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
251 clks->num_levels = 6;
252 memmove(clks->clocks_in_khz, disp_clks_in_khz,
253 sizeof(disp_clks_in_khz));
254 break;
255 case DM_PP_CLOCK_TYPE_ENGINE_CLK:
256 clks->num_levels = 6;
257 memmove(clks->clocks_in_khz, sclks_in_khz,
258 sizeof(sclks_in_khz));
259 break;
260 case DM_PP_CLOCK_TYPE_MEMORY_CLK:
261 clks->num_levels = 2;
262 memmove(clks->clocks_in_khz, mclks_in_khz,
263 sizeof(mclks_in_khz));
264 break;
265 default:
266 clks->num_levels = 0;
267 break;
268 }
269}
270
271static enum amd_pp_clock_type dc_to_pp_clock_type(
272 enum dm_pp_clock_type dm_pp_clk_type)
273{
274 enum amd_pp_clock_type amd_pp_clk_type = 0;
275
276 switch (dm_pp_clk_type) {
277 case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
278 amd_pp_clk_type = amd_pp_disp_clock;
279 break;
280 case DM_PP_CLOCK_TYPE_ENGINE_CLK:
281 amd_pp_clk_type = amd_pp_sys_clock;
282 break;
283 case DM_PP_CLOCK_TYPE_MEMORY_CLK:
284 amd_pp_clk_type = amd_pp_mem_clock;
285 break;
286 default:
287 DRM_ERROR("DM_PPLIB: invalid clock type: %d!\n",
288 dm_pp_clk_type);
289 break;
290 }
291
292 return amd_pp_clk_type;
293}
294
295static void pp_to_dc_clock_levels(
296 const struct amd_pp_clocks *pp_clks,
297 struct dm_pp_clock_levels *dc_clks,
298 enum dm_pp_clock_type dc_clk_type)
299{
300 uint32_t i;
301
302 if (pp_clks->count > DM_PP_MAX_CLOCK_LEVELS) {
303 DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
304 DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
305 pp_clks->count,
306 DM_PP_MAX_CLOCK_LEVELS);
307
308 dc_clks->num_levels = DM_PP_MAX_CLOCK_LEVELS;
309 } else
310 dc_clks->num_levels = pp_clks->count;
311
312 DRM_INFO("DM_PPLIB: values for %s clock\n",
313 DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
314
315 for (i = 0; i < dc_clks->num_levels; i++) {
316 DRM_INFO("DM_PPLIB:\t %d\n", pp_clks->clock[i]);
317 /* translate 10kHz to kHz */
318 dc_clks->clocks_in_khz[i] = pp_clks->clock[i] * 10;
319 }
320}
321
322bool dm_pp_get_clock_levels_by_type(
323 const struct dc_context *ctx,
324 enum dm_pp_clock_type clk_type,
325 struct dm_pp_clock_levels *dc_clks)
326{
327 struct amdgpu_device *adev = ctx->driver_context;
328 void *pp_handle = adev->powerplay.pp_handle;
329 struct amd_pp_clocks pp_clks = { 0 };
330 struct amd_pp_simple_clock_info validation_clks = { 0 };
331 uint32_t i;
332
333 if (amd_powerplay_get_clock_by_type(pp_handle,
334 dc_to_pp_clock_type(clk_type), &pp_clks)) {
335 /* Error in pplib. Provide default values. */
336 get_default_clock_levels(clk_type, dc_clks);
337 return true;
338 }
339
340 pp_to_dc_clock_levels(&pp_clks, dc_clks, clk_type);
341
342 if (amd_powerplay_get_display_mode_validation_clocks(pp_handle,
343 &validation_clks)) {
344 /* Error in pplib. Provide default values. */
345 DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n");
346 validation_clks.engine_max_clock = 72000;
347 validation_clks.memory_max_clock = 80000;
348 validation_clks.level = 0;
349 }
350
351 DRM_INFO("DM_PPLIB: Validation clocks:\n");
352 DRM_INFO("DM_PPLIB: engine_max_clock: %d\n",
353 validation_clks.engine_max_clock);
354 DRM_INFO("DM_PPLIB: memory_max_clock: %d\n",
355 validation_clks.memory_max_clock);
356 DRM_INFO("DM_PPLIB: level : %d\n",
357 validation_clks.level);
358
359 /* Translate 10 kHz to kHz. */
360 validation_clks.engine_max_clock *= 10;
361 validation_clks.memory_max_clock *= 10;
362
363 /* Determine the highest non-boosted level from the Validation Clocks */
364 if (clk_type == DM_PP_CLOCK_TYPE_ENGINE_CLK) {
365 for (i = 0; i < dc_clks->num_levels; i++) {
366 if (dc_clks->clocks_in_khz[i] > validation_clks.engine_max_clock) {
367 /* This clock is higher the validation clock.
368 * Than means the previous one is the highest
369 * non-boosted one. */
370 DRM_INFO("DM_PPLIB: reducing engine clock level from %d to %d\n",
371 dc_clks->num_levels, i + 1);
372 dc_clks->num_levels = i;
373 break;
374 }
375 }
376 } else if (clk_type == DM_PP_CLOCK_TYPE_MEMORY_CLK) {
377 for (i = 0; i < dc_clks->num_levels; i++) {
378 if (dc_clks->clocks_in_khz[i] > validation_clks.memory_max_clock) {
379 DRM_INFO("DM_PPLIB: reducing memory clock level from %d to %d\n",
380 dc_clks->num_levels, i + 1);
381 dc_clks->num_levels = i;
382 break;
383 }
384 }
385 }
386
387 return true;
388}
389
390bool dm_pp_get_clock_levels_by_type_with_latency(
391 const struct dc_context *ctx,
392 enum dm_pp_clock_type clk_type,
393 struct dm_pp_clock_levels_with_latency *clk_level_info)
394{
395 /* TODO: to be implemented */
396 return false;
397}
398
399bool dm_pp_get_clock_levels_by_type_with_voltage(
400 const struct dc_context *ctx,
401 enum dm_pp_clock_type clk_type,
402 struct dm_pp_clock_levels_with_voltage *clk_level_info)
403{
404 /* TODO: to be implemented */
405 return false;
406}
407
408bool dm_pp_notify_wm_clock_changes(
409 const struct dc_context *ctx,
410 struct dm_pp_wm_sets_with_clock_ranges *wm_with_clock_ranges)
411{
412 /* TODO: to be implemented */
413 return false;
414}
415
416bool dm_pp_apply_power_level_change_request(
417 const struct dc_context *ctx,
418 struct dm_pp_power_level_change_request *level_change_req)
419{
420 /* TODO: to be implemented */
421 return false;
422}
423
424bool dm_pp_apply_clock_for_voltage_request(
425 const struct dc_context *ctx,
426 struct dm_pp_clock_for_voltage_req *clock_for_voltage_req)
427{
428 /* TODO: to be implemented */
429 return false;
430}
431
432bool dm_pp_get_static_clocks(
433 const struct dc_context *ctx,
434 struct dm_pp_static_clock_info *static_clk_info)
435{
436 /* TODO: to be implemented */
437 return false;
438}
439
440/**** end of power component interfaces ****/
441
442/* Calls to notification */
443
444void dal_notify_setmode_complete(struct dc_context *ctx,
445 uint32_t h_total,
446 uint32_t v_total,
447 uint32_t h_active,
448 uint32_t v_active,
449 uint32_t pix_clk_in_khz)
450{
451 /*TODO*/
452}
453/* End of calls to notification */
454
455long dm_get_pid(void)
456{
457 return current->pid;
458}
459
460long dm_get_tgid(void)
461{
462 return current->tgid;
463}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_types.c
new file mode 100644
index 000000000000..c073f4558cf4
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_types.c
@@ -0,0 +1,3150 @@
1/*
2 * Copyright 2012-13 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include <linux/types.h>
27#include <linux/version.h>
28
29#include <drm/drmP.h>
30#include <drm/drm_atomic_helper.h>
31#include <drm/drm_fb_helper.h>
32#include <drm/drm_atomic.h>
33#include <drm/drm_edid.h>
34
35#include "amdgpu.h"
36#include "amdgpu_pm.h"
37#include "dm_services_types.h"
38
39// We need to #undef FRAME_SIZE and DEPRECATED because they conflict
40// with ptrace-abi.h's #define's of them.
41#undef FRAME_SIZE
42#undef DEPRECATED
43
44#include "dc.h"
45
46#include "amdgpu_dm_types.h"
47#include "amdgpu_dm_mst_types.h"
48
49#include "modules/inc/mod_freesync.h"
50
51struct dm_connector_state {
52 struct drm_connector_state base;
53
54 enum amdgpu_rmx_type scaling;
55 uint8_t underscan_vborder;
56 uint8_t underscan_hborder;
57 bool underscan_enable;
58};
59
60#define to_dm_connector_state(x)\
61 container_of((x), struct dm_connector_state, base)
62
63
64void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
65{
66 drm_encoder_cleanup(encoder);
67 kfree(encoder);
68}
69
70static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
71 .destroy = amdgpu_dm_encoder_destroy,
72};
73
74static void dm_set_cursor(
75 struct amdgpu_crtc *amdgpu_crtc,
76 uint64_t gpu_addr,
77 uint32_t width,
78 uint32_t height)
79{
80 struct dc_cursor_attributes attributes;
81 amdgpu_crtc->cursor_width = width;
82 amdgpu_crtc->cursor_height = height;
83
84 attributes.address.high_part = upper_32_bits(gpu_addr);
85 attributes.address.low_part = lower_32_bits(gpu_addr);
86 attributes.width = width;
87 attributes.height = height;
88 attributes.x_hot = 0;
89 attributes.y_hot = 0;
90 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
91 attributes.rotation_angle = 0;
92 attributes.attribute_flags.value = 0;
93
94 if (!dc_target_set_cursor_attributes(
95 amdgpu_crtc->target,
96 &attributes)) {
97 DRM_ERROR("DC failed to set cursor attributes\n");
98 }
99}
100
101static int dm_crtc_unpin_cursor_bo_old(
102 struct amdgpu_crtc *amdgpu_crtc)
103{
104 struct amdgpu_bo *robj;
105 int ret = 0;
106
107 if (NULL != amdgpu_crtc && NULL != amdgpu_crtc->cursor_bo) {
108 robj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
109
110 ret = amdgpu_bo_reserve(robj, false);
111
112 if (likely(ret == 0)) {
113 ret = amdgpu_bo_unpin(robj);
114
115 if (unlikely(ret != 0)) {
116 DRM_ERROR(
117 "%s: unpin failed (ret=%d), bo %p\n",
118 __func__,
119 ret,
120 amdgpu_crtc->cursor_bo);
121 }
122
123 amdgpu_bo_unreserve(robj);
124 } else {
125 DRM_ERROR(
126 "%s: reserve failed (ret=%d), bo %p\n",
127 __func__,
128 ret,
129 amdgpu_crtc->cursor_bo);
130 }
131
132 drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
133 amdgpu_crtc->cursor_bo = NULL;
134 }
135
136 return ret;
137}
138
139static int dm_crtc_pin_cursor_bo_new(
140 struct drm_crtc *crtc,
141 struct drm_file *file_priv,
142 uint32_t handle,
143 struct amdgpu_bo **ret_obj)
144{
145 struct amdgpu_crtc *amdgpu_crtc;
146 struct amdgpu_bo *robj;
147 struct drm_gem_object *obj;
148 int ret = -EINVAL;
149
150 if (NULL != crtc) {
151 struct drm_device *dev = crtc->dev;
152 struct amdgpu_device *adev = dev->dev_private;
153 uint64_t gpu_addr;
154
155 amdgpu_crtc = to_amdgpu_crtc(crtc);
156
157 obj = drm_gem_object_lookup(file_priv, handle);
158
159 if (!obj) {
160 DRM_ERROR(
161 "Cannot find cursor object %x for crtc %d\n",
162 handle,
163 amdgpu_crtc->crtc_id);
164 goto release;
165 }
166 robj = gem_to_amdgpu_bo(obj);
167
168 ret = amdgpu_bo_reserve(robj, false);
169
170 if (unlikely(ret != 0)) {
171 drm_gem_object_unreference_unlocked(obj);
172 DRM_ERROR("dm_crtc_pin_cursor_bo_new ret %x, handle %x\n",
173 ret, handle);
174 goto release;
175 }
176
177 ret = amdgpu_bo_pin_restricted(robj, AMDGPU_GEM_DOMAIN_VRAM, 0,
178 adev->mc.visible_vram_size,
179 &gpu_addr);
180
181 if (ret == 0) {
182 amdgpu_crtc->cursor_addr = gpu_addr;
183 *ret_obj = robj;
184 }
185 amdgpu_bo_unreserve(robj);
186 if (ret)
187 drm_gem_object_unreference_unlocked(obj);
188
189 }
190release:
191
192 return ret;
193}
194
195static int dm_crtc_cursor_set(
196 struct drm_crtc *crtc,
197 struct drm_file *file_priv,
198 uint32_t handle,
199 uint32_t width,
200 uint32_t height)
201{
202 struct amdgpu_bo *new_cursor_bo;
203 struct dc_cursor_position position;
204
205 int ret;
206
207 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
208
209 ret = EINVAL;
210 new_cursor_bo = NULL;
211
212 DRM_DEBUG_KMS(
213 "%s: crtc_id=%d with handle %d and size %d to %d, bo_object %p\n",
214 __func__,
215 amdgpu_crtc->crtc_id,
216 handle,
217 width,
218 height,
219 amdgpu_crtc->cursor_bo);
220
221 if (!handle) {
222 /* turn off cursor */
223 position.enable = false;
224 position.x = 0;
225 position.y = 0;
226 position.hot_spot_enable = false;
227
228 if (amdgpu_crtc->target) {
229 /*set cursor visible false*/
230 dc_target_set_cursor_position(
231 amdgpu_crtc->target,
232 &position);
233 }
234 /*unpin old cursor buffer and update cache*/
235 ret = dm_crtc_unpin_cursor_bo_old(amdgpu_crtc);
236 goto release;
237
238 }
239
240 if ((width > amdgpu_crtc->max_cursor_width) ||
241 (height > amdgpu_crtc->max_cursor_height)) {
242 DRM_ERROR(
243 "%s: bad cursor width or height %d x %d\n",
244 __func__,
245 width,
246 height);
247 goto release;
248 }
249 /*try to pin new cursor bo*/
250 ret = dm_crtc_pin_cursor_bo_new(crtc, file_priv, handle, &new_cursor_bo);
251 /*if map not successful then return an error*/
252 if (ret)
253 goto release;
254
255 /*program new cursor bo to hardware*/
256 dm_set_cursor(amdgpu_crtc, amdgpu_crtc->cursor_addr, width, height);
257
258 /*un map old, not used anymore cursor bo ,
259 * return memory and mapping back */
260 dm_crtc_unpin_cursor_bo_old(amdgpu_crtc);
261
262 /*assign new cursor bo to our internal cache*/
263 amdgpu_crtc->cursor_bo = &new_cursor_bo->gem_base;
264
265release:
266 return ret;
267
268}
269
270static int dm_crtc_cursor_move(struct drm_crtc *crtc,
271 int x, int y)
272{
273 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
274 int xorigin = 0, yorigin = 0;
275 struct dc_cursor_position position;
276
277 /* avivo cursor are offset into the total surface */
278 x += crtc->primary->state->src_x >> 16;
279 y += crtc->primary->state->src_y >> 16;
280
281 /*
282 * TODO: for cursor debugging unguard the following
283 */
284#if 0
285 DRM_DEBUG_KMS(
286 "%s: x %d y %d c->x %d c->y %d\n",
287 __func__,
288 x,
289 y,
290 crtc->x,
291 crtc->y);
292#endif
293
294 if (x < 0) {
295 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
296 x = 0;
297 }
298 if (y < 0) {
299 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
300 y = 0;
301 }
302
303 position.enable = true;
304 position.x = x;
305 position.y = y;
306
307 position.hot_spot_enable = true;
308 position.x_hotspot = xorigin;
309 position.y_hotspot = yorigin;
310
311 if (amdgpu_crtc->target) {
312 if (!dc_target_set_cursor_position(
313 amdgpu_crtc->target,
314 &position)) {
315 DRM_ERROR("DC failed to set cursor position\n");
316 return -EINVAL;
317 }
318 }
319
320 return 0;
321}
322
323static void dm_crtc_cursor_reset(struct drm_crtc *crtc)
324{
325 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
326
327 DRM_DEBUG_KMS(
328 "%s: with cursor_bo %p\n",
329 __func__,
330 amdgpu_crtc->cursor_bo);
331
332 if (amdgpu_crtc->cursor_bo && amdgpu_crtc->target) {
333 dm_set_cursor(
334 amdgpu_crtc,
335 amdgpu_crtc->cursor_addr,
336 amdgpu_crtc->cursor_width,
337 amdgpu_crtc->cursor_height);
338 }
339}
340static bool fill_rects_from_plane_state(
341 const struct drm_plane_state *state,
342 struct dc_surface *surface)
343{
344 surface->src_rect.x = state->src_x >> 16;
345 surface->src_rect.y = state->src_y >> 16;
346 /*we ignore for now mantissa and do not to deal with floating pixels :(*/
347 surface->src_rect.width = state->src_w >> 16;
348
349 if (surface->src_rect.width == 0)
350 return false;
351
352 surface->src_rect.height = state->src_h >> 16;
353 if (surface->src_rect.height == 0)
354 return false;
355
356 surface->dst_rect.x = state->crtc_x;
357 surface->dst_rect.y = state->crtc_y;
358
359 if (state->crtc_w == 0)
360 return false;
361
362 surface->dst_rect.width = state->crtc_w;
363
364 if (state->crtc_h == 0)
365 return false;
366
367 surface->dst_rect.height = state->crtc_h;
368
369 surface->clip_rect = surface->dst_rect;
370
371 switch (state->rotation & DRM_MODE_ROTATE_MASK) {
372 case DRM_MODE_ROTATE_0:
373 surface->rotation = ROTATION_ANGLE_0;
374 break;
375 case DRM_MODE_ROTATE_90:
376 surface->rotation = ROTATION_ANGLE_90;
377 break;
378 case DRM_MODE_ROTATE_180:
379 surface->rotation = ROTATION_ANGLE_180;
380 break;
381 case DRM_MODE_ROTATE_270:
382 surface->rotation = ROTATION_ANGLE_270;
383 break;
384 default:
385 surface->rotation = ROTATION_ANGLE_0;
386 break;
387 }
388
389 return true;
390}
391static bool get_fb_info(
392 const struct amdgpu_framebuffer *amdgpu_fb,
393 uint64_t *tiling_flags,
394 uint64_t *fb_location)
395{
396 struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
397 int r = amdgpu_bo_reserve(rbo, false);
398 if (unlikely(r != 0)){
399 DRM_ERROR("Unable to reserve buffer\n");
400 return false;
401 }
402
403 if (fb_location)
404 *fb_location = amdgpu_bo_gpu_offset(rbo);
405
406 if (tiling_flags)
407 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
408
409 amdgpu_bo_unreserve(rbo);
410
411 return true;
412}
413static void fill_plane_attributes_from_fb(
414 struct dc_surface *surface,
415 const struct amdgpu_framebuffer *amdgpu_fb, bool addReq)
416{
417 uint64_t tiling_flags;
418 uint64_t fb_location = 0;
419 const struct drm_framebuffer *fb = &amdgpu_fb->base;
420 struct drm_format_name_buf format_name;
421
422 get_fb_info(
423 amdgpu_fb,
424 &tiling_flags,
425 addReq == true ? &fb_location:NULL);
426
427 surface->address.type = PLN_ADDR_TYPE_GRAPHICS;
428 surface->address.grph.addr.low_part = lower_32_bits(fb_location);
429 surface->address.grph.addr.high_part = upper_32_bits(fb_location);
430
431 switch (fb->format->format) {
432 case DRM_FORMAT_C8:
433 surface->format = SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
434 break;
435 case DRM_FORMAT_RGB565:
436 surface->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
437 break;
438 case DRM_FORMAT_XRGB8888:
439 case DRM_FORMAT_ARGB8888:
440 surface->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
441 break;
442 case DRM_FORMAT_XRGB2101010:
443 case DRM_FORMAT_ARGB2101010:
444 surface->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
445 break;
446 case DRM_FORMAT_XBGR2101010:
447 case DRM_FORMAT_ABGR2101010:
448 surface->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
449 break;
450 default:
451 DRM_ERROR("Unsupported screen format %s\n",
452 drm_get_format_name(fb->format->format, &format_name));
453 return;
454 }
455
456 memset(&surface->tiling_info, 0, sizeof(surface->tiling_info));
457
458 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1)
459 {
460 unsigned bankw, bankh, mtaspect, tile_split, num_banks;
461
462 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
463 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
464 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
465 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
466 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
467
468 /* XXX fix me for VI */
469 surface->tiling_info.gfx8.num_banks = num_banks;
470 surface->tiling_info.gfx8.array_mode =
471 DC_ARRAY_2D_TILED_THIN1;
472 surface->tiling_info.gfx8.tile_split = tile_split;
473 surface->tiling_info.gfx8.bank_width = bankw;
474 surface->tiling_info.gfx8.bank_height = bankh;
475 surface->tiling_info.gfx8.tile_aspect = mtaspect;
476 surface->tiling_info.gfx8.tile_mode =
477 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
478 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
479 == DC_ARRAY_1D_TILED_THIN1) {
480 surface->tiling_info.gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
481 }
482
483 surface->tiling_info.gfx8.pipe_config =
484 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
485
486 surface->plane_size.grph.surface_size.x = 0;
487 surface->plane_size.grph.surface_size.y = 0;
488 surface->plane_size.grph.surface_size.width = fb->width;
489 surface->plane_size.grph.surface_size.height = fb->height;
490 surface->plane_size.grph.surface_pitch =
491 fb->pitches[0] / fb->format->cpp[0];
492
493 surface->visible = true;
494 surface->scaling_quality.h_taps_c = 0;
495 surface->scaling_quality.v_taps_c = 0;
496
497 /* TODO: unhardcode */
498 surface->color_space = COLOR_SPACE_SRGB;
499 /* is this needed? is surface zeroed at allocation? */
500 surface->scaling_quality.h_taps = 0;
501 surface->scaling_quality.v_taps = 0;
502 surface->stereo_format = PLANE_STEREO_FORMAT_NONE;
503
504}
505
506#define NUM_OF_RAW_GAMMA_RAMP_RGB_256 256
507
508static void fill_gamma_from_crtc(
509 const struct drm_crtc *crtc,
510 struct dc_surface *dc_surface)
511{
512 int i;
513 struct dc_gamma *gamma;
514 struct drm_crtc_state *state = crtc->state;
515 struct drm_color_lut *lut = (struct drm_color_lut *) state->gamma_lut->data;
516
517 gamma = dc_create_gamma();
518
519 if (gamma == NULL)
520 return;
521
522 for (i = 0; i < NUM_OF_RAW_GAMMA_RAMP_RGB_256; i++) {
523 gamma->gamma_ramp_rgb256x3x16.red[i] = lut[i].red;
524 gamma->gamma_ramp_rgb256x3x16.green[i] = lut[i].green;
525 gamma->gamma_ramp_rgb256x3x16.blue[i] = lut[i].blue;
526 }
527
528 gamma->type = GAMMA_RAMP_RBG256X3X16;
529 gamma->size = sizeof(gamma->gamma_ramp_rgb256x3x16);
530
531 dc_surface->gamma_correction = gamma;
532}
533
534static void fill_plane_attributes(
535 struct dc_surface *surface,
536 struct drm_plane_state *state, bool addrReq)
537{
538 const struct amdgpu_framebuffer *amdgpu_fb =
539 to_amdgpu_framebuffer(state->fb);
540 const struct drm_crtc *crtc = state->crtc;
541
542 fill_rects_from_plane_state(state, surface);
543 fill_plane_attributes_from_fb(
544 surface,
545 amdgpu_fb,
546 addrReq);
547
548 /* In case of gamma set, update gamma value */
549 if (state->crtc->state->gamma_lut) {
550 fill_gamma_from_crtc(crtc, surface);
551 }
552}
553
554/*****************************************************************************/
555
556struct amdgpu_connector *aconnector_from_drm_crtc_id(
557 const struct drm_crtc *crtc)
558{
559 struct drm_device *dev = crtc->dev;
560 struct drm_connector *connector;
561 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
562 struct amdgpu_connector *aconnector;
563
564 list_for_each_entry(connector,
565 &dev->mode_config.connector_list, head) {
566
567 aconnector = to_amdgpu_connector(connector);
568
569 if (aconnector->base.state->crtc != &acrtc->base)
570 continue;
571
572 /* Found the connector */
573 return aconnector;
574 }
575
576 /* If we get here, not found. */
577 return NULL;
578}
579
580static void update_stream_scaling_settings(
581 const struct drm_display_mode *mode,
582 const struct dm_connector_state *dm_state,
583 const struct dc_stream *stream)
584{
585 struct amdgpu_device *adev = dm_state->base.crtc->dev->dev_private;
586 enum amdgpu_rmx_type rmx_type;
587
588 struct rect src = { 0 }; /* viewport in target space*/
589 struct rect dst = { 0 }; /* stream addressable area */
590
591 /* Full screen scaling by default */
592 src.width = mode->hdisplay;
593 src.height = mode->vdisplay;
594 dst.width = stream->timing.h_addressable;
595 dst.height = stream->timing.v_addressable;
596
597 rmx_type = dm_state->scaling;
598 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
599 if (src.width * dst.height <
600 src.height * dst.width) {
601 /* height needs less upscaling/more downscaling */
602 dst.width = src.width *
603 dst.height / src.height;
604 } else {
605 /* width needs less upscaling/more downscaling */
606 dst.height = src.height *
607 dst.width / src.width;
608 }
609 } else if (rmx_type == RMX_CENTER) {
610 dst = src;
611 }
612
613 dst.x = (stream->timing.h_addressable - dst.width) / 2;
614 dst.y = (stream->timing.v_addressable - dst.height) / 2;
615
616 if (dm_state->underscan_enable) {
617 dst.x += dm_state->underscan_hborder / 2;
618 dst.y += dm_state->underscan_vborder / 2;
619 dst.width -= dm_state->underscan_hborder;
620 dst.height -= dm_state->underscan_vborder;
621 }
622
623 adev->dm.dc->stream_funcs.stream_update_scaling(adev->dm.dc, stream, &src, &dst);
624
625 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n",
626 dst.x, dst.y, dst.width, dst.height);
627
628}
629
630static void dm_dc_surface_commit(
631 struct dc *dc,
632 struct drm_crtc *crtc)
633{
634 struct dc_surface *dc_surface;
635 const struct dc_surface *dc_surfaces[1];
636 const struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
637 struct dc_target *dc_target = acrtc->target;
638
639 if (!dc_target) {
640 dm_error(
641 "%s: Failed to obtain target on crtc (%d)!\n",
642 __func__,
643 acrtc->crtc_id);
644 goto fail;
645 }
646
647 dc_surface = dc_create_surface(dc);
648
649 if (!dc_surface) {
650 dm_error(
651 "%s: Failed to create a surface!\n",
652 __func__);
653 goto fail;
654 }
655
656 /* Surface programming */
657 fill_plane_attributes(dc_surface, crtc->primary->state, true);
658
659 dc_surfaces[0] = dc_surface;
660
661 if (false == dc_commit_surfaces_to_target(
662 dc,
663 dc_surfaces,
664 1,
665 dc_target)) {
666 dm_error(
667 "%s: Failed to attach surface!\n",
668 __func__);
669 }
670
671 dc_surface_release(dc_surface);
672fail:
673 return;
674}
675
676static enum dc_color_depth convert_color_depth_from_display_info(
677 const struct drm_connector *connector)
678{
679 uint32_t bpc = connector->display_info.bpc;
680
681 /* Limited color depth to 8bit
682 * TODO: Still need to handle deep color*/
683 if (bpc > 8)
684 bpc = 8;
685
686 switch (bpc) {
687 case 0:
688 /* Temporary Work around, DRM don't parse color depth for
689 * EDID revision before 1.4
690 * TODO: Fix edid parsing
691 */
692 return COLOR_DEPTH_888;
693 case 6:
694 return COLOR_DEPTH_666;
695 case 8:
696 return COLOR_DEPTH_888;
697 case 10:
698 return COLOR_DEPTH_101010;
699 case 12:
700 return COLOR_DEPTH_121212;
701 case 14:
702 return COLOR_DEPTH_141414;
703 case 16:
704 return COLOR_DEPTH_161616;
705 default:
706 return COLOR_DEPTH_UNDEFINED;
707 }
708}
709
710static enum dc_aspect_ratio get_aspect_ratio(
711 const struct drm_display_mode *mode_in)
712{
713 int32_t width = mode_in->crtc_hdisplay * 9;
714 int32_t height = mode_in->crtc_vdisplay * 16;
715 if ((width - height) < 10 && (width - height) > -10)
716 return ASPECT_RATIO_16_9;
717 else
718 return ASPECT_RATIO_4_3;
719}
720
721static enum dc_color_space get_output_color_space(
722 const struct dc_crtc_timing *dc_crtc_timing)
723{
724 enum dc_color_space color_space = COLOR_SPACE_SRGB;
725
726 switch (dc_crtc_timing->pixel_encoding) {
727 case PIXEL_ENCODING_YCBCR422:
728 case PIXEL_ENCODING_YCBCR444:
729 case PIXEL_ENCODING_YCBCR420:
730 {
731 /*
732 * 27030khz is the separation point between HDTV and SDTV
733 * according to HDMI spec, we use YCbCr709 and YCbCr601
734 * respectively
735 */
736 if (dc_crtc_timing->pix_clk_khz > 27030) {
737 if (dc_crtc_timing->flags.Y_ONLY)
738 color_space =
739 COLOR_SPACE_YCBCR709_LIMITED;
740 else
741 color_space = COLOR_SPACE_YCBCR709;
742 } else {
743 if (dc_crtc_timing->flags.Y_ONLY)
744 color_space =
745 COLOR_SPACE_YCBCR601_LIMITED;
746 else
747 color_space = COLOR_SPACE_YCBCR601;
748 }
749
750 }
751 break;
752 case PIXEL_ENCODING_RGB:
753 color_space = COLOR_SPACE_SRGB;
754 break;
755
756 default:
757 WARN_ON(1);
758 break;
759 }
760
761 return color_space;
762}
763
764/*****************************************************************************/
765
766static void fill_stream_properties_from_drm_display_mode(
767 struct dc_stream *stream,
768 const struct drm_display_mode *mode_in,
769 const struct drm_connector *connector)
770{
771 struct dc_crtc_timing *timing_out = &stream->timing;
772 memset(timing_out, 0, sizeof(struct dc_crtc_timing));
773
774 timing_out->h_border_left = 0;
775 timing_out->h_border_right = 0;
776 timing_out->v_border_top = 0;
777 timing_out->v_border_bottom = 0;
778 /* TODO: un-hardcode */
779
780 if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
781 && stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
782 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
783 else
784 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
785
786 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
787 timing_out->display_color_depth = convert_color_depth_from_display_info(
788 connector);
789 timing_out->scan_type = SCANNING_TYPE_NODATA;
790 timing_out->hdmi_vic = 0;
791 timing_out->vic = drm_match_cea_mode(mode_in);
792
793 timing_out->h_addressable = mode_in->crtc_hdisplay;
794 timing_out->h_total = mode_in->crtc_htotal;
795 timing_out->h_sync_width =
796 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
797 timing_out->h_front_porch =
798 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
799 timing_out->v_total = mode_in->crtc_vtotal;
800 timing_out->v_addressable = mode_in->crtc_vdisplay;
801 timing_out->v_front_porch =
802 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
803 timing_out->v_sync_width =
804 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
805 timing_out->pix_clk_khz = mode_in->crtc_clock;
806 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
807 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
808 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
809 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
810 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
811
812 stream->output_color_space = get_output_color_space(timing_out);
813
814}
815
816static void fill_audio_info(
817 struct audio_info *audio_info,
818 const struct drm_connector *drm_connector,
819 const struct dc_sink *dc_sink)
820{
821 int i = 0;
822 int cea_revision = 0;
823 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
824
825 audio_info->manufacture_id = edid_caps->manufacturer_id;
826 audio_info->product_id = edid_caps->product_id;
827
828 cea_revision = drm_connector->display_info.cea_rev;
829
830 while (i < AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS &&
831 edid_caps->display_name[i]) {
832 audio_info->display_name[i] = edid_caps->display_name[i];
833 i++;
834 }
835
836 if(cea_revision >= 3) {
837 audio_info->mode_count = edid_caps->audio_mode_count;
838
839 for (i = 0; i < audio_info->mode_count; ++i) {
840 audio_info->modes[i].format_code =
841 (enum audio_format_code)
842 (edid_caps->audio_modes[i].format_code);
843 audio_info->modes[i].channel_count =
844 edid_caps->audio_modes[i].channel_count;
845 audio_info->modes[i].sample_rates.all =
846 edid_caps->audio_modes[i].sample_rate;
847 audio_info->modes[i].sample_size =
848 edid_caps->audio_modes[i].sample_size;
849 }
850 }
851
852 audio_info->flags.all = edid_caps->speaker_flags;
853
854 /* TODO: We only check for the progressive mode, check for interlace mode too */
855 if(drm_connector->latency_present[0]) {
856 audio_info->video_latency = drm_connector->video_latency[0];
857 audio_info->audio_latency = drm_connector->audio_latency[0];
858 }
859
860 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
861
862}
863
864static void copy_crtc_timing_for_drm_display_mode(
865 const struct drm_display_mode *src_mode,
866 struct drm_display_mode *dst_mode)
867{
868 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
869 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
870 dst_mode->crtc_clock = src_mode->crtc_clock;
871 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
872 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
873 dst_mode->crtc_hsync_start= src_mode->crtc_hsync_start;
874 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
875 dst_mode->crtc_htotal = src_mode->crtc_htotal;
876 dst_mode->crtc_hskew = src_mode->crtc_hskew;
877 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;;
878 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;;
879 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;;
880 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;;
881 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;;
882}
883
884static void decide_crtc_timing_for_drm_display_mode(
885 struct drm_display_mode *drm_mode,
886 const struct drm_display_mode *native_mode,
887 bool scale_enabled)
888{
889 if (scale_enabled) {
890 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
891 } else if (native_mode->clock == drm_mode->clock &&
892 native_mode->htotal == drm_mode->htotal &&
893 native_mode->vtotal == drm_mode->vtotal) {
894 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
895 } else {
896 /* no scaling nor amdgpu inserted, no need to patch */
897 }
898}
899
900static struct dc_target *create_target_for_sink(
901 const struct amdgpu_connector *aconnector,
902 const struct drm_display_mode *drm_mode,
903 const struct dm_connector_state *dm_state)
904{
905 struct drm_display_mode *preferred_mode = NULL;
906 const struct drm_connector *drm_connector;
907 struct dc_target *target = NULL;
908 struct dc_stream *stream;
909 struct drm_display_mode mode = *drm_mode;
910 bool native_mode_found = false;
911
912 if (NULL == aconnector) {
913 DRM_ERROR("aconnector is NULL!\n");
914 goto drm_connector_null;
915 }
916
917 if (NULL == dm_state) {
918 DRM_ERROR("dm_state is NULL!\n");
919 goto dm_state_null;
920 }
921
922 drm_connector = &aconnector->base;
923 stream = dc_create_stream_for_sink(aconnector->dc_sink);
924
925 if (NULL == stream) {
926 DRM_ERROR("Failed to create stream for sink!\n");
927 goto stream_create_fail;
928 }
929
930 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
931 /* Search for preferred mode */
932 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
933 native_mode_found = true;
934 break;
935 }
936 }
937 if (!native_mode_found)
938 preferred_mode = list_first_entry_or_null(
939 &aconnector->base.modes,
940 struct drm_display_mode,
941 head);
942
943 if (NULL == preferred_mode) {
944 /* This may not be an error, the use case is when we we have no
945 * usermode calls to reset and set mode upon hotplug. In this
946 * case, we call set mode ourselves to restore the previous mode
947 * and the modelist may not be filled in in time.
948 */
949 DRM_INFO("No preferred mode found\n");
950 } else {
951 decide_crtc_timing_for_drm_display_mode(
952 &mode, preferred_mode,
953 dm_state->scaling != RMX_OFF);
954 }
955
956 fill_stream_properties_from_drm_display_mode(stream,
957 &mode, &aconnector->base);
958 update_stream_scaling_settings(&mode, dm_state, stream);
959
960 fill_audio_info(
961 &stream->audio_info,
962 drm_connector,
963 aconnector->dc_sink);
964
965 target = dc_create_target_for_streams(&stream, 1);
966 dc_stream_release(stream);
967
968 if (NULL == target) {
969 DRM_ERROR("Failed to create target with streams!\n");
970 goto target_create_fail;
971 }
972
973dm_state_null:
974drm_connector_null:
975target_create_fail:
976stream_create_fail:
977 return target;
978}
979
980void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
981{
982 drm_crtc_cleanup(crtc);
983 kfree(crtc);
984}
985
986/* Implemented only the options currently availible for the driver */
987static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
988 .reset = drm_atomic_helper_crtc_reset,
989 .cursor_set = dm_crtc_cursor_set,
990 .cursor_move = dm_crtc_cursor_move,
991 .destroy = amdgpu_dm_crtc_destroy,
992 .gamma_set = drm_atomic_helper_legacy_gamma_set,
993 .set_config = drm_atomic_helper_set_config,
994 .page_flip = drm_atomic_helper_page_flip,
995 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
996 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
997};
998
999static enum drm_connector_status
1000amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
1001{
1002 bool connected;
1003 struct amdgpu_connector *aconnector = to_amdgpu_connector(connector);
1004
1005 /* Notes:
1006 * 1. This interface is NOT called in context of HPD irq.
1007 * 2. This interface *is called* in context of user-mode ioctl. Which
1008 * makes it a bad place for *any* MST-related activit. */
1009
1010 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
1011 connected = (aconnector->dc_sink != NULL);
1012 else
1013 connected = (aconnector->base.force == DRM_FORCE_ON);
1014
1015 return (connected ? connector_status_connected :
1016 connector_status_disconnected);
1017}
1018
1019int amdgpu_dm_connector_atomic_set_property(
1020 struct drm_connector *connector,
1021 struct drm_connector_state *connector_state,
1022 struct drm_property *property,
1023 uint64_t val)
1024{
1025 struct drm_device *dev = connector->dev;
1026 struct amdgpu_device *adev = dev->dev_private;
1027 struct dm_connector_state *dm_old_state =
1028 to_dm_connector_state(connector->state);
1029 struct dm_connector_state *dm_new_state =
1030 to_dm_connector_state(connector_state);
1031
1032 struct drm_crtc_state *new_crtc_state;
1033 struct drm_crtc *crtc;
1034 int i;
1035 int ret = -EINVAL;
1036
1037 if (property == dev->mode_config.scaling_mode_property) {
1038 enum amdgpu_rmx_type rmx_type;
1039
1040 switch (val) {
1041 case DRM_MODE_SCALE_CENTER:
1042 rmx_type = RMX_CENTER;
1043 break;
1044 case DRM_MODE_SCALE_ASPECT:
1045 rmx_type = RMX_ASPECT;
1046 break;
1047 case DRM_MODE_SCALE_FULLSCREEN:
1048 rmx_type = RMX_FULL;
1049 break;
1050 case DRM_MODE_SCALE_NONE:
1051 default:
1052 rmx_type = RMX_OFF;
1053 break;
1054 }
1055
1056 if (dm_old_state->scaling == rmx_type)
1057 return 0;
1058
1059 dm_new_state->scaling = rmx_type;
1060 ret = 0;
1061 } else if (property == adev->mode_info.underscan_hborder_property) {
1062 dm_new_state->underscan_hborder = val;
1063 ret = 0;
1064 } else if (property == adev->mode_info.underscan_vborder_property) {
1065 dm_new_state->underscan_vborder = val;
1066 ret = 0;
1067 } else if (property == adev->mode_info.underscan_property) {
1068 dm_new_state->underscan_enable = val;
1069 ret = 0;
1070 }
1071
1072 for_each_crtc_in_state(
1073 connector_state->state,
1074 crtc,
1075 new_crtc_state,
1076 i) {
1077
1078 if (crtc == connector_state->crtc) {
1079 struct drm_plane_state *plane_state;
1080
1081 /*
1082 * Bit of magic done here. We need to ensure
1083 * that planes get update after mode is set.
1084 * So, we need to add primary plane to state,
1085 * and this way atomic_update would be called
1086 * for it
1087 */
1088 plane_state =
1089 drm_atomic_get_plane_state(
1090 connector_state->state,
1091 crtc->primary);
1092
1093 if (!plane_state)
1094 return -EINVAL;
1095 }
1096 }
1097
1098 return ret;
1099}
1100
1101void amdgpu_dm_connector_destroy(struct drm_connector *connector)
1102{
1103 struct amdgpu_connector *aconnector = to_amdgpu_connector(connector);
1104 const struct dc_link *link = aconnector->dc_link;
1105 struct amdgpu_device *adev = connector->dev->dev_private;
1106 struct amdgpu_display_manager *dm = &adev->dm;
1107#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
1108 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
1109
1110 if (link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) {
1111 amdgpu_dm_register_backlight_device(dm);
1112
1113 if (dm->backlight_dev) {
1114 backlight_device_unregister(dm->backlight_dev);
1115 dm->backlight_dev = NULL;
1116 }
1117
1118 }
1119#endif
1120 drm_connector_unregister(connector);
1121 drm_connector_cleanup(connector);
1122 kfree(connector);
1123}
1124
1125void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
1126{
1127 struct dm_connector_state *state =
1128 to_dm_connector_state(connector->state);
1129
1130 kfree(state);
1131
1132 state = kzalloc(sizeof(*state), GFP_KERNEL);
1133
1134 if (state) {
1135 state->scaling = RMX_OFF;
1136 state->underscan_enable = false;
1137 state->underscan_hborder = 0;
1138 state->underscan_vborder = 0;
1139
1140 connector->state = &state->base;
1141 connector->state->connector = connector;
1142 }
1143}
1144
1145struct drm_connector_state *amdgpu_dm_connector_atomic_duplicate_state(
1146 struct drm_connector *connector)
1147{
1148 struct dm_connector_state *state =
1149 to_dm_connector_state(connector->state);
1150
1151 struct dm_connector_state *new_state =
1152 kmemdup(state, sizeof(*state), GFP_KERNEL);
1153
1154 if (new_state) {
1155 __drm_atomic_helper_connector_duplicate_state(connector,
1156 &new_state->base);
1157 return &new_state->base;
1158 }
1159
1160 return NULL;
1161}
1162
1163static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
1164 .reset = amdgpu_dm_connector_funcs_reset,
1165 .detect = amdgpu_dm_connector_detect,
1166 .fill_modes = drm_helper_probe_single_connector_modes,
1167 .destroy = amdgpu_dm_connector_destroy,
1168 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
1169 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
1170 .atomic_set_property = amdgpu_dm_connector_atomic_set_property
1171};
1172
1173static struct drm_encoder *best_encoder(struct drm_connector *connector)
1174{
1175 int enc_id = connector->encoder_ids[0];
1176 struct drm_mode_object *obj;
1177 struct drm_encoder *encoder;
1178
1179 DRM_DEBUG_KMS("Finding the best encoder\n");
1180
1181 /* pick the encoder ids */
1182 if (enc_id) {
1183 obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER);
1184 if (!obj) {
1185 DRM_ERROR("Couldn't find a matching encoder for our connector\n");
1186 return NULL;
1187 }
1188 encoder = obj_to_encoder(obj);
1189 return encoder;
1190 }
1191 DRM_ERROR("No encoder id\n");
1192 return NULL;
1193}
1194
1195static int get_modes(struct drm_connector *connector)
1196{
1197 return amdgpu_dm_connector_get_modes(connector);
1198}
1199
1200static void create_eml_sink(struct amdgpu_connector *aconnector)
1201{
1202 struct dc_sink_init_data init_params = {
1203 .link = aconnector->dc_link,
1204 .sink_signal = SIGNAL_TYPE_VIRTUAL
1205 };
1206 struct edid *edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
1207
1208 if (!aconnector->base.edid_blob_ptr ||
1209 !aconnector->base.edid_blob_ptr->data) {
1210 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
1211 aconnector->base.name);
1212
1213 aconnector->base.force = DRM_FORCE_OFF;
1214 aconnector->base.override_edid = false;
1215 return;
1216 }
1217
1218 aconnector->edid = edid;
1219
1220 aconnector->dc_em_sink = dc_link_add_remote_sink(
1221 aconnector->dc_link,
1222 (uint8_t *)edid,
1223 (edid->extensions + 1) * EDID_LENGTH,
1224 &init_params);
1225
1226 if (aconnector->base.force
1227 == DRM_FORCE_ON)
1228 aconnector->dc_sink = aconnector->dc_link->local_sink ?
1229 aconnector->dc_link->local_sink :
1230 aconnector->dc_em_sink;
1231}
1232
1233static void handle_edid_mgmt(struct amdgpu_connector *aconnector)
1234{
1235 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
1236
1237 /* In case of headless boot with force on for DP managed connector
1238 * Those settings have to be != 0 to get initial modeset
1239 */
1240 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
1241 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
1242 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
1243 }
1244
1245
1246 aconnector->base.override_edid = true;
1247 create_eml_sink(aconnector);
1248}
1249
1250int amdgpu_dm_connector_mode_valid(
1251 struct drm_connector *connector,
1252 struct drm_display_mode *mode)
1253{
1254 int result = MODE_ERROR;
1255 const struct dc_sink *dc_sink;
1256 struct amdgpu_device *adev = connector->dev->dev_private;
1257 struct dc_validation_set val_set = { 0 };
1258 /* TODO: Unhardcode stream count */
1259 struct dc_stream *streams[1];
1260 struct dc_target *target;
1261 struct amdgpu_connector *aconnector = to_amdgpu_connector(connector);
1262
1263 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
1264 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
1265 return result;
1266
1267 /* Only run this the first time mode_valid is called to initilialize
1268 * EDID mgmt
1269 */
1270 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
1271 !aconnector->dc_em_sink)
1272 handle_edid_mgmt(aconnector);
1273
1274 dc_sink = to_amdgpu_connector(connector)->dc_sink;
1275
1276 if (NULL == dc_sink) {
1277 DRM_ERROR("dc_sink is NULL!\n");
1278 goto stream_create_fail;
1279 }
1280
1281 streams[0] = dc_create_stream_for_sink(dc_sink);
1282
1283 if (NULL == streams[0]) {
1284 DRM_ERROR("Failed to create stream for sink!\n");
1285 goto stream_create_fail;
1286 }
1287
1288 drm_mode_set_crtcinfo(mode, 0);
1289 fill_stream_properties_from_drm_display_mode(streams[0], mode, connector);
1290
1291 target = dc_create_target_for_streams(streams, 1);
1292 val_set.target = target;
1293
1294 if (NULL == val_set.target) {
1295 DRM_ERROR("Failed to create target with stream!\n");
1296 goto target_create_fail;
1297 }
1298
1299 val_set.surface_count = 0;
1300 streams[0]->src.width = mode->hdisplay;
1301 streams[0]->src.height = mode->vdisplay;
1302 streams[0]->dst = streams[0]->src;
1303
1304 if (dc_validate_resources(adev->dm.dc, &val_set, 1))
1305 result = MODE_OK;
1306
1307 dc_target_release(target);
1308target_create_fail:
1309 dc_stream_release(streams[0]);
1310stream_create_fail:
1311 /* TODO: error handling*/
1312 return result;
1313}
1314
1315static const struct drm_connector_helper_funcs
1316amdgpu_dm_connector_helper_funcs = {
1317 /*
1318 * If hotplug a second bigger display in FB Con mode, bigger resolution
1319 * modes will be filtered by drm_mode_validate_size(), and those modes
1320 * is missing after user start lightdm. So we need to renew modes list.
1321 * in get_modes call back, not just return the modes count
1322 */
1323 .get_modes = get_modes,
1324 .mode_valid = amdgpu_dm_connector_mode_valid,
1325 .best_encoder = best_encoder
1326};
1327
1328static void dm_crtc_helper_disable(struct drm_crtc *crtc)
1329{
1330}
1331
1332static int dm_crtc_helper_atomic_check(
1333 struct drm_crtc *crtc,
1334 struct drm_crtc_state *state)
1335{
1336 return 0;
1337}
1338
1339static bool dm_crtc_helper_mode_fixup(
1340 struct drm_crtc *crtc,
1341 const struct drm_display_mode *mode,
1342 struct drm_display_mode *adjusted_mode)
1343{
1344 return true;
1345}
1346
1347static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
1348 .disable = dm_crtc_helper_disable,
1349 .atomic_check = dm_crtc_helper_atomic_check,
1350 .mode_fixup = dm_crtc_helper_mode_fixup
1351};
1352
1353static void dm_encoder_helper_disable(struct drm_encoder *encoder)
1354{
1355
1356}
1357
1358static int dm_encoder_helper_atomic_check(
1359 struct drm_encoder *encoder,
1360 struct drm_crtc_state *crtc_state,
1361 struct drm_connector_state *conn_state)
1362{
1363 return 0;
1364}
1365
1366const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
1367 .disable = dm_encoder_helper_disable,
1368 .atomic_check = dm_encoder_helper_atomic_check
1369};
1370
1371static const struct drm_plane_funcs dm_plane_funcs = {
1372 .reset = drm_atomic_helper_plane_reset,
1373 .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
1374 .atomic_destroy_state = drm_atomic_helper_plane_destroy_state
1375};
1376
1377static void clear_unrelated_fields(struct drm_plane_state *state)
1378{
1379 state->crtc = NULL;
1380 state->fb = NULL;
1381 state->state = NULL;
1382 state->fence = NULL;
1383}
1384
1385static bool page_flip_needed(
1386 const struct drm_plane_state *new_state,
1387 const struct drm_plane_state *old_state,
1388 struct drm_pending_vblank_event *event,
1389 bool commit_surface_required)
1390{
1391 struct drm_plane_state old_state_tmp;
1392 struct drm_plane_state new_state_tmp;
1393
1394 struct amdgpu_framebuffer *amdgpu_fb_old;
1395 struct amdgpu_framebuffer *amdgpu_fb_new;
1396 struct amdgpu_crtc *acrtc_new;
1397
1398 uint64_t old_tiling_flags;
1399 uint64_t new_tiling_flags;
1400
1401 bool page_flip_required;
1402
1403 if (!old_state)
1404 return false;
1405
1406 if (!old_state->fb)
1407 return false;
1408
1409 if (!new_state)
1410 return false;
1411
1412 if (!new_state->fb)
1413 return false;
1414
1415 old_state_tmp = *old_state;
1416 new_state_tmp = *new_state;
1417
1418 if (!event)
1419 return false;
1420
1421 amdgpu_fb_old = to_amdgpu_framebuffer(old_state->fb);
1422 amdgpu_fb_new = to_amdgpu_framebuffer(new_state->fb);
1423
1424 if (!get_fb_info(amdgpu_fb_old, &old_tiling_flags, NULL))
1425 return false;
1426
1427 if (!get_fb_info(amdgpu_fb_new, &new_tiling_flags, NULL))
1428 return false;
1429
1430 if (commit_surface_required == true &&
1431 old_tiling_flags != new_tiling_flags)
1432 return false;
1433
1434 clear_unrelated_fields(&old_state_tmp);
1435 clear_unrelated_fields(&new_state_tmp);
1436
1437 page_flip_required = memcmp(&old_state_tmp,
1438 &new_state_tmp,
1439 sizeof(old_state_tmp)) == 0 ? true:false;
1440 if (new_state->crtc && page_flip_required == false) {
1441 acrtc_new = to_amdgpu_crtc(new_state->crtc);
1442 if (acrtc_new->flip_flags & DRM_MODE_PAGE_FLIP_ASYNC)
1443 page_flip_required = true;
1444 }
1445 return page_flip_required;
1446}
1447
1448static int dm_plane_helper_prepare_fb(
1449 struct drm_plane *plane,
1450 struct drm_plane_state *new_state)
1451{
1452 struct amdgpu_framebuffer *afb;
1453 struct drm_gem_object *obj;
1454 struct amdgpu_bo *rbo;
1455 int r;
1456
1457 if (!new_state->fb) {
1458 DRM_DEBUG_KMS("No FB bound\n");
1459 return 0;
1460 }
1461
1462 afb = to_amdgpu_framebuffer(new_state->fb);
1463
1464 obj = afb->obj;
1465 rbo = gem_to_amdgpu_bo(obj);
1466 r = amdgpu_bo_reserve(rbo, false);
1467 if (unlikely(r != 0))
1468 return r;
1469
1470 r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, NULL);
1471
1472 amdgpu_bo_unreserve(rbo);
1473
1474 if (unlikely(r != 0)) {
1475 DRM_ERROR("Failed to pin framebuffer\n");
1476 return r;
1477 }
1478
1479 return 0;
1480}
1481
1482static void dm_plane_helper_cleanup_fb(
1483 struct drm_plane *plane,
1484 struct drm_plane_state *old_state)
1485{
1486 struct amdgpu_bo *rbo;
1487 struct amdgpu_framebuffer *afb;
1488 int r;
1489
1490 if (!old_state->fb)
1491 return;
1492
1493 afb = to_amdgpu_framebuffer(old_state->fb);
1494 rbo = gem_to_amdgpu_bo(afb->obj);
1495 r = amdgpu_bo_reserve(rbo, false);
1496 if (unlikely(r)) {
1497 DRM_ERROR("failed to reserve rbo before unpin\n");
1498 return;
1499 } else {
1500 amdgpu_bo_unpin(rbo);
1501 amdgpu_bo_unreserve(rbo);
1502 }
1503}
1504
1505int dm_create_validation_set_for_target(struct drm_connector *connector,
1506 struct drm_display_mode *mode, struct dc_validation_set *val_set)
1507{
1508 int result = MODE_ERROR;
1509 const struct dc_sink *dc_sink =
1510 to_amdgpu_connector(connector)->dc_sink;
1511 /* TODO: Unhardcode stream count */
1512 struct dc_stream *streams[1];
1513 struct dc_target *target;
1514
1515 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
1516 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
1517 return result;
1518
1519 if (NULL == dc_sink) {
1520 DRM_ERROR("dc_sink is NULL!\n");
1521 return result;
1522 }
1523
1524 streams[0] = dc_create_stream_for_sink(dc_sink);
1525
1526 if (NULL == streams[0]) {
1527 DRM_ERROR("Failed to create stream for sink!\n");
1528 return result;
1529 }
1530
1531 drm_mode_set_crtcinfo(mode, 0);
1532
1533 fill_stream_properties_from_drm_display_mode(streams[0], mode, connector);
1534
1535 target = dc_create_target_for_streams(streams, 1);
1536 val_set->target = target;
1537
1538 if (NULL == val_set->target) {
1539 DRM_ERROR("Failed to create target with stream!\n");
1540 goto fail;
1541 }
1542
1543 streams[0]->src.width = mode->hdisplay;
1544 streams[0]->src.height = mode->vdisplay;
1545 streams[0]->dst = streams[0]->src;
1546
1547 return MODE_OK;
1548
1549fail:
1550 dc_stream_release(streams[0]);
1551 return result;
1552
1553}
1554
1555static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
1556 .prepare_fb = dm_plane_helper_prepare_fb,
1557 .cleanup_fb = dm_plane_helper_cleanup_fb,
1558};
1559
1560/*
1561 * TODO: these are currently initialized to rgb formats only.
1562 * For future use cases we should either initialize them dynamically based on
1563 * plane capabilities, or initialize this array to all formats, so internal drm
1564 * check will succeed, and let DC to implement proper check
1565 */
1566static uint32_t rgb_formats[] = {
1567 DRM_FORMAT_XRGB4444,
1568 DRM_FORMAT_ARGB4444,
1569 DRM_FORMAT_RGBA4444,
1570 DRM_FORMAT_ARGB1555,
1571 DRM_FORMAT_RGB565,
1572 DRM_FORMAT_RGB888,
1573 DRM_FORMAT_XRGB8888,
1574 DRM_FORMAT_ARGB8888,
1575 DRM_FORMAT_RGBA8888,
1576 DRM_FORMAT_XRGB2101010,
1577 DRM_FORMAT_XBGR2101010,
1578 DRM_FORMAT_ARGB2101010,
1579 DRM_FORMAT_ABGR2101010,
1580};
1581
1582int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
1583 struct amdgpu_crtc *acrtc,
1584 uint32_t crtc_index)
1585{
1586 int res = -ENOMEM;
1587
1588 struct drm_plane *primary_plane =
1589 kzalloc(sizeof(*primary_plane), GFP_KERNEL);
1590
1591 if (!primary_plane)
1592 goto fail_plane;
1593
1594 primary_plane->format_default = true;
1595
1596 res = drm_universal_plane_init(
1597 dm->adev->ddev,
1598 primary_plane,
1599 0,
1600 &dm_plane_funcs,
1601 rgb_formats,
1602 ARRAY_SIZE(rgb_formats),
1603 NULL,
1604 DRM_PLANE_TYPE_PRIMARY, NULL);
1605
1606 primary_plane->crtc = &acrtc->base;
1607
1608 drm_plane_helper_add(primary_plane, &dm_plane_helper_funcs);
1609
1610 res = drm_crtc_init_with_planes(
1611 dm->ddev,
1612 &acrtc->base,
1613 primary_plane,
1614 NULL,
1615 &amdgpu_dm_crtc_funcs, NULL);
1616
1617 if (res)
1618 goto fail;
1619
1620 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
1621
1622 acrtc->max_cursor_width = 128;
1623 acrtc->max_cursor_height = 128;
1624
1625 acrtc->crtc_id = crtc_index;
1626 acrtc->base.enabled = false;
1627
1628 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
1629 drm_mode_crtc_set_gamma_size(&acrtc->base, 256);
1630
1631 return 0;
1632fail:
1633 kfree(primary_plane);
1634fail_plane:
1635 acrtc->crtc_id = -1;
1636 return res;
1637}
1638
1639static int to_drm_connector_type(enum signal_type st)
1640{
1641 switch (st) {
1642 case SIGNAL_TYPE_HDMI_TYPE_A:
1643 return DRM_MODE_CONNECTOR_HDMIA;
1644 case SIGNAL_TYPE_EDP:
1645 return DRM_MODE_CONNECTOR_eDP;
1646 case SIGNAL_TYPE_RGB:
1647 return DRM_MODE_CONNECTOR_VGA;
1648 case SIGNAL_TYPE_DISPLAY_PORT:
1649 case SIGNAL_TYPE_DISPLAY_PORT_MST:
1650 return DRM_MODE_CONNECTOR_DisplayPort;
1651 case SIGNAL_TYPE_DVI_DUAL_LINK:
1652 case SIGNAL_TYPE_DVI_SINGLE_LINK:
1653 return DRM_MODE_CONNECTOR_DVID;
1654 case SIGNAL_TYPE_VIRTUAL:
1655 return DRM_MODE_CONNECTOR_VIRTUAL;
1656
1657 default:
1658 return DRM_MODE_CONNECTOR_Unknown;
1659 }
1660}
1661
1662static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
1663{
1664 const struct drm_connector_helper_funcs *helper =
1665 connector->helper_private;
1666 struct drm_encoder *encoder;
1667 struct amdgpu_encoder *amdgpu_encoder;
1668
1669 encoder = helper->best_encoder(connector);
1670
1671 if (encoder == NULL)
1672 return;
1673
1674 amdgpu_encoder = to_amdgpu_encoder(encoder);
1675
1676 amdgpu_encoder->native_mode.clock = 0;
1677
1678 if (!list_empty(&connector->probed_modes)) {
1679 struct drm_display_mode *preferred_mode = NULL;
1680 list_for_each_entry(preferred_mode,
1681 &connector->probed_modes,
1682 head) {
1683 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
1684 amdgpu_encoder->native_mode = *preferred_mode;
1685 }
1686 break;
1687 }
1688
1689 }
1690}
1691
1692static struct drm_display_mode *amdgpu_dm_create_common_mode(
1693 struct drm_encoder *encoder, char *name,
1694 int hdisplay, int vdisplay)
1695{
1696 struct drm_device *dev = encoder->dev;
1697 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1698 struct drm_display_mode *mode = NULL;
1699 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
1700
1701 mode = drm_mode_duplicate(dev, native_mode);
1702
1703 if(mode == NULL)
1704 return NULL;
1705
1706 mode->hdisplay = hdisplay;
1707 mode->vdisplay = vdisplay;
1708 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
1709 strncpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
1710
1711 return mode;
1712
1713}
1714
1715static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
1716 struct drm_connector *connector)
1717{
1718 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1719 struct drm_display_mode *mode = NULL;
1720 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
1721 struct amdgpu_connector *amdgpu_connector =
1722 to_amdgpu_connector(connector);
1723 int i;
1724 int n;
1725 struct mode_size {
1726 char name[DRM_DISPLAY_MODE_LEN];
1727 int w;
1728 int h;
1729 }common_modes[] = {
1730 { "640x480", 640, 480},
1731 { "800x600", 800, 600},
1732 { "1024x768", 1024, 768},
1733 { "1280x720", 1280, 720},
1734 { "1280x800", 1280, 800},
1735 {"1280x1024", 1280, 1024},
1736 { "1440x900", 1440, 900},
1737 {"1680x1050", 1680, 1050},
1738 {"1600x1200", 1600, 1200},
1739 {"1920x1080", 1920, 1080},
1740 {"1920x1200", 1920, 1200}
1741 };
1742
1743 n = sizeof(common_modes) / sizeof(common_modes[0]);
1744
1745 for (i = 0; i < n; i++) {
1746 struct drm_display_mode *curmode = NULL;
1747 bool mode_existed = false;
1748
1749 if (common_modes[i].w > native_mode->hdisplay ||
1750 common_modes[i].h > native_mode->vdisplay ||
1751 (common_modes[i].w == native_mode->hdisplay &&
1752 common_modes[i].h == native_mode->vdisplay))
1753 continue;
1754
1755 list_for_each_entry(curmode, &connector->probed_modes, head) {
1756 if (common_modes[i].w == curmode->hdisplay &&
1757 common_modes[i].h == curmode->vdisplay) {
1758 mode_existed = true;
1759 break;
1760 }
1761 }
1762
1763 if (mode_existed)
1764 continue;
1765
1766 mode = amdgpu_dm_create_common_mode(encoder,
1767 common_modes[i].name, common_modes[i].w,
1768 common_modes[i].h);
1769 drm_mode_probed_add(connector, mode);
1770 amdgpu_connector->num_modes++;
1771 }
1772}
1773
1774static void amdgpu_dm_connector_ddc_get_modes(
1775 struct drm_connector *connector,
1776 struct edid *edid)
1777{
1778 struct amdgpu_connector *amdgpu_connector =
1779 to_amdgpu_connector(connector);
1780
1781 if (edid) {
1782 /* empty probed_modes */
1783 INIT_LIST_HEAD(&connector->probed_modes);
1784 amdgpu_connector->num_modes =
1785 drm_add_edid_modes(connector, edid);
1786
1787 drm_edid_to_eld(connector, edid);
1788
1789 amdgpu_dm_get_native_mode(connector);
1790 } else
1791 amdgpu_connector->num_modes = 0;
1792}
1793
1794int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
1795{
1796 const struct drm_connector_helper_funcs *helper =
1797 connector->helper_private;
1798 struct amdgpu_connector *amdgpu_connector =
1799 to_amdgpu_connector(connector);
1800 struct drm_encoder *encoder;
1801 struct edid *edid = amdgpu_connector->edid;
1802
1803 encoder = helper->best_encoder(connector);
1804
1805 amdgpu_dm_connector_ddc_get_modes(connector, edid);
1806 amdgpu_dm_connector_add_common_modes(encoder, connector);
1807 return amdgpu_connector->num_modes;
1808}
1809
1810void amdgpu_dm_connector_init_helper(
1811 struct amdgpu_display_manager *dm,
1812 struct amdgpu_connector *aconnector,
1813 int connector_type,
1814 const struct dc_link *link,
1815 int link_index)
1816{
1817 struct amdgpu_device *adev = dm->ddev->dev_private;
1818
1819 aconnector->connector_id = link_index;
1820 aconnector->dc_link = link;
1821 aconnector->base.interlace_allowed = true;
1822 aconnector->base.doublescan_allowed = true;
1823 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
1824 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
1825
1826 mutex_init(&aconnector->hpd_lock);
1827
1828 /*configure suport HPD hot plug connector_>polled default value is 0
1829 * which means HPD hot plug not supported*/
1830 switch (connector_type) {
1831 case DRM_MODE_CONNECTOR_HDMIA:
1832 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
1833 break;
1834 case DRM_MODE_CONNECTOR_DisplayPort:
1835 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
1836 break;
1837 case DRM_MODE_CONNECTOR_DVID:
1838 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
1839 break;
1840 default:
1841 break;
1842 }
1843
1844 drm_object_attach_property(&aconnector->base.base,
1845 dm->ddev->mode_config.scaling_mode_property,
1846 DRM_MODE_SCALE_NONE);
1847
1848 drm_object_attach_property(&aconnector->base.base,
1849 adev->mode_info.underscan_property,
1850 UNDERSCAN_OFF);
1851 drm_object_attach_property(&aconnector->base.base,
1852 adev->mode_info.underscan_hborder_property,
1853 0);
1854 drm_object_attach_property(&aconnector->base.base,
1855 adev->mode_info.underscan_vborder_property,
1856 0);
1857
1858}
1859
1860int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
1861 struct i2c_msg *msgs, int num)
1862{
1863 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
1864 struct i2c_command cmd;
1865 int i;
1866 int result = -EIO;
1867
1868 cmd.payloads = kzalloc(num * sizeof(struct i2c_payload), GFP_KERNEL);
1869
1870 if (!cmd.payloads)
1871 return result;
1872
1873 cmd.number_of_payloads = num;
1874 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
1875 cmd.speed = 100;
1876
1877 for (i = 0; i < num; i++) {
1878 cmd.payloads[i].write = (msgs[i].flags & I2C_M_RD);
1879 cmd.payloads[i].address = msgs[i].addr;
1880 cmd.payloads[i].length = msgs[i].len;
1881 cmd.payloads[i].data = msgs[i].buf;
1882 }
1883
1884 if (dc_submit_i2c(i2c->dm->dc, i2c->link_index, &cmd))
1885 result = num;
1886
1887 kfree(cmd.payloads);
1888
1889 return result;
1890}
1891
1892u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
1893{
1894 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
1895}
1896
1897static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
1898 .master_xfer = amdgpu_dm_i2c_xfer,
1899 .functionality = amdgpu_dm_i2c_func,
1900};
1901
1902struct amdgpu_i2c_adapter *create_i2c(unsigned int link_index, struct amdgpu_display_manager *dm, int *res)
1903{
1904 struct amdgpu_i2c_adapter *i2c;
1905
1906 i2c = kzalloc(sizeof (struct amdgpu_i2c_adapter), GFP_KERNEL);
1907 i2c->dm = dm;
1908 i2c->base.owner = THIS_MODULE;
1909 i2c->base.class = I2C_CLASS_DDC;
1910 i2c->base.dev.parent = &dm->adev->pdev->dev;
1911 i2c->base.algo = &amdgpu_dm_i2c_algo;
1912 snprintf(i2c->base.name, sizeof (i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
1913 i2c->link_index = link_index;
1914 i2c_set_adapdata(&i2c->base, i2c);
1915
1916 return i2c;
1917}
1918
1919/* Note: this function assumes that dc_link_detect() was called for the
1920 * dc_link which will be represented by this aconnector. */
1921int amdgpu_dm_connector_init(
1922 struct amdgpu_display_manager *dm,
1923 struct amdgpu_connector *aconnector,
1924 uint32_t link_index,
1925 struct amdgpu_encoder *aencoder)
1926{
1927 int res = 0;
1928 int connector_type;
1929 struct dc *dc = dm->dc;
1930 const struct dc_link *link = dc_get_link_at_index(dc, link_index);
1931 struct amdgpu_i2c_adapter *i2c;
1932
1933 DRM_DEBUG_KMS("%s()\n", __func__);
1934
1935 i2c = create_i2c(link->link_index, dm, &res);
1936 aconnector->i2c = i2c;
1937 res = i2c_add_adapter(&i2c->base);
1938
1939 if (res) {
1940 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
1941 goto out_free;
1942 }
1943
1944 connector_type = to_drm_connector_type(link->connector_signal);
1945
1946 res = drm_connector_init(
1947 dm->ddev,
1948 &aconnector->base,
1949 &amdgpu_dm_connector_funcs,
1950 connector_type);
1951
1952 if (res) {
1953 DRM_ERROR("connector_init failed\n");
1954 aconnector->connector_id = -1;
1955 goto out_free;
1956 }
1957
1958 drm_connector_helper_add(
1959 &aconnector->base,
1960 &amdgpu_dm_connector_helper_funcs);
1961
1962 amdgpu_dm_connector_init_helper(
1963 dm,
1964 aconnector,
1965 connector_type,
1966 link,
1967 link_index);
1968
1969 drm_mode_connector_attach_encoder(
1970 &aconnector->base, &aencoder->base);
1971
1972 drm_connector_register(&aconnector->base);
1973
1974 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
1975 || connector_type == DRM_MODE_CONNECTOR_eDP)
1976 amdgpu_dm_initialize_mst_connector(dm, aconnector);
1977
1978#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
1979 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
1980
1981 /* NOTE: this currently will create backlight device even if a panel
1982 * is not connected to the eDP/LVDS connector.
1983 *
1984 * This is less than ideal but we don't have sink information at this
1985 * stage since detection happens after. We can't do detection earlier
1986 * since MST detection needs connectors to be created first.
1987 */
1988 if (link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) {
1989 /* Event if registration failed, we should continue with
1990 * DM initialization because not having a backlight control
1991 * is better then a black screen. */
1992 amdgpu_dm_register_backlight_device(dm);
1993
1994 if (dm->backlight_dev)
1995 dm->backlight_link = link;
1996 }
1997#endif
1998
1999out_free:
2000 if (res) {
2001 kfree(i2c);
2002 aconnector->i2c = NULL;
2003 }
2004 return res;
2005}
2006
2007int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
2008{
2009 switch (adev->mode_info.num_crtc) {
2010 case 1:
2011 return 0x1;
2012 case 2:
2013 return 0x3;
2014 case 3:
2015 return 0x7;
2016 case 4:
2017 return 0xf;
2018 case 5:
2019 return 0x1f;
2020 case 6:
2021 default:
2022 return 0x3f;
2023 }
2024}
2025
2026int amdgpu_dm_encoder_init(
2027 struct drm_device *dev,
2028 struct amdgpu_encoder *aencoder,
2029 uint32_t link_index)
2030{
2031 struct amdgpu_device *adev = dev->dev_private;
2032
2033 int res = drm_encoder_init(dev,
2034 &aencoder->base,
2035 &amdgpu_dm_encoder_funcs,
2036 DRM_MODE_ENCODER_TMDS,
2037 NULL);
2038
2039 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
2040
2041 if (!res)
2042 aencoder->encoder_id = link_index;
2043 else
2044 aencoder->encoder_id = -1;
2045
2046 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
2047
2048 return res;
2049}
2050
2051enum dm_commit_action {
2052 DM_COMMIT_ACTION_NOTHING,
2053 DM_COMMIT_ACTION_RESET,
2054 DM_COMMIT_ACTION_DPMS_ON,
2055 DM_COMMIT_ACTION_DPMS_OFF,
2056 DM_COMMIT_ACTION_SET
2057};
2058
2059static enum dm_commit_action get_dm_commit_action(struct drm_crtc_state *state)
2060{
2061 /* mode changed means either actually mode changed or enabled changed */
2062 /* active changed means dpms changed */
2063
2064 DRM_DEBUG_KMS("crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n",
2065 state->enable,
2066 state->active,
2067 state->planes_changed,
2068 state->mode_changed,
2069 state->active_changed,
2070 state->connectors_changed);
2071
2072 if (state->mode_changed) {
2073 /* if it is got disabled - call reset mode */
2074 if (!state->enable)
2075 return DM_COMMIT_ACTION_RESET;
2076
2077 if (state->active)
2078 return DM_COMMIT_ACTION_SET;
2079 else
2080 return DM_COMMIT_ACTION_RESET;
2081 } else {
2082 /* ! mode_changed */
2083
2084 /* if it is remain disable - skip it */
2085 if (!state->enable)
2086 return DM_COMMIT_ACTION_NOTHING;
2087
2088 if (state->active && state->connectors_changed)
2089 return DM_COMMIT_ACTION_SET;
2090
2091 if (state->active_changed) {
2092 if (state->active) {
2093 return DM_COMMIT_ACTION_DPMS_ON;
2094 } else {
2095 return DM_COMMIT_ACTION_DPMS_OFF;
2096 }
2097 } else {
2098 /* ! active_changed */
2099 return DM_COMMIT_ACTION_NOTHING;
2100 }
2101 }
2102}
2103
2104
2105typedef bool (*predicate)(struct amdgpu_crtc *acrtc);
2106
2107static void wait_while_pflip_status(struct amdgpu_device *adev,
2108 struct amdgpu_crtc *acrtc, predicate f) {
2109 int count = 0;
2110 while (f(acrtc)) {
2111 /* Spin Wait*/
2112 msleep(1);
2113 count++;
2114 if (count == 1000) {
2115 DRM_ERROR("%s - crtc:%d[%p], pflip_stat:%d, probable hang!\n",
2116 __func__, acrtc->crtc_id,
2117 acrtc,
2118 acrtc->pflip_status);
2119
2120 /* we do not expect to hit this case except on Polaris with PHY PLL
2121 * 1. DP to HDMI passive dongle connected
2122 * 2. unplug (headless)
2123 * 3. plug in DP
2124 * 3a. on plug in, DP will try verify link by training, and training
2125 * would disable PHY PLL which HDMI rely on to drive TG
2126 * 3b. this will cause flip interrupt cannot be generated, and we
2127 * exit when timeout expired. however we do not have code to clean
2128 * up flip, flip clean up will happen when the address is written
2129 * with the restore mode change
2130 */
2131 WARN_ON(1);
2132 break;
2133 }
2134 }
2135
2136 DRM_DEBUG_DRIVER("%s - Finished waiting for:%d msec, crtc:%d[%p], pflip_stat:%d \n",
2137 __func__,
2138 count,
2139 acrtc->crtc_id,
2140 acrtc,
2141 acrtc->pflip_status);
2142}
2143
2144static bool pflip_in_progress_predicate(struct amdgpu_crtc *acrtc)
2145{
2146 return acrtc->pflip_status != AMDGPU_FLIP_NONE;
2147}
2148
2149static void manage_dm_interrupts(
2150 struct amdgpu_device *adev,
2151 struct amdgpu_crtc *acrtc,
2152 bool enable)
2153{
2154 /*
2155 * this is not correct translation but will work as soon as VBLANK
2156 * constant is the same as PFLIP
2157 */
2158 int irq_type =
2159 amdgpu_crtc_idx_to_irq_type(
2160 adev,
2161 acrtc->crtc_id);
2162
2163 if (enable) {
2164 drm_crtc_vblank_on(&acrtc->base);
2165 amdgpu_irq_get(
2166 adev,
2167 &adev->pageflip_irq,
2168 irq_type);
2169 } else {
2170 wait_while_pflip_status(adev, acrtc,
2171 pflip_in_progress_predicate);
2172
2173 amdgpu_irq_put(
2174 adev,
2175 &adev->pageflip_irq,
2176 irq_type);
2177 drm_crtc_vblank_off(&acrtc->base);
2178 }
2179}
2180
2181
2182static bool pflip_pending_predicate(struct amdgpu_crtc *acrtc)
2183{
2184 return acrtc->pflip_status == AMDGPU_FLIP_PENDING;
2185}
2186
2187static bool is_scaling_state_different(
2188 const struct dm_connector_state *dm_state,
2189 const struct dm_connector_state *old_dm_state)
2190{
2191 if (dm_state->scaling != old_dm_state->scaling)
2192 return true;
2193 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
2194 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
2195 return true;
2196 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
2197 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
2198 return true;
2199 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder
2200 || dm_state->underscan_vborder != old_dm_state->underscan_vborder)
2201 return true;
2202 return false;
2203}
2204
2205static void remove_target(struct amdgpu_device *adev, struct amdgpu_crtc *acrtc)
2206{
2207 int i;
2208
2209 /*
2210 * we evade vblanks and pflips on crtc that
2211 * should be changed
2212 */
2213 manage_dm_interrupts(adev, acrtc, false);
2214 /* this is the update mode case */
2215 if (adev->dm.freesync_module)
2216 for (i = 0; i < acrtc->target->stream_count; i++)
2217 mod_freesync_remove_stream(
2218 adev->dm.freesync_module,
2219 acrtc->target->streams[i]);
2220 dc_target_release(acrtc->target);
2221 acrtc->target = NULL;
2222 acrtc->otg_inst = -1;
2223 acrtc->enabled = false;
2224}
2225
2226int amdgpu_dm_atomic_commit(
2227 struct drm_device *dev,
2228 struct drm_atomic_state *state,
2229 bool async)
2230{
2231 struct amdgpu_device *adev = dev->dev_private;
2232 struct amdgpu_display_manager *dm = &adev->dm;
2233 struct drm_plane *plane;
2234 struct drm_plane_state *new_plane_state;
2235 struct drm_plane_state *old_plane_state;
2236 uint32_t i, j;
2237 int32_t ret = 0;
2238 uint32_t commit_targets_count = 0;
2239 uint32_t new_crtcs_count = 0;
2240 uint32_t flip_crtcs_count = 0;
2241 struct drm_crtc *crtc;
2242 struct drm_crtc_state *old_crtc_state;
2243
2244 struct dc_target *commit_targets[MAX_TARGETS];
2245 struct amdgpu_crtc *new_crtcs[MAX_TARGETS];
2246 struct dc_target *new_target;
2247 struct drm_crtc *flip_crtcs[MAX_TARGETS];
2248 struct amdgpu_flip_work *work[MAX_TARGETS] = {0};
2249 struct amdgpu_bo *new_abo[MAX_TARGETS] = {0};
2250
2251 /* In this step all new fb would be pinned */
2252
2253 /*
2254 * TODO: Revisit when we support true asynchronous commit.
2255 * Right now we receive async commit only from pageflip, in which case
2256 * we should not pin/unpin the fb here, it should be done in
2257 * amdgpu_crtc_flip and from the vblank irq handler.
2258 */
2259 if (!async) {
2260 ret = drm_atomic_helper_prepare_planes(dev, state);
2261 if (ret)
2262 return ret;
2263 }
2264
2265 /* Page flip if needed */
2266 for_each_plane_in_state(state, plane, new_plane_state, i) {
2267 struct drm_plane_state *old_plane_state = plane->state;
2268 struct drm_crtc *crtc = new_plane_state->crtc;
2269 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
2270 struct drm_framebuffer *fb = new_plane_state->fb;
2271 struct drm_crtc_state *crtc_state;
2272
2273 if (!fb || !crtc)
2274 continue;
2275
2276 crtc_state = drm_atomic_get_crtc_state(state, crtc);
2277
2278 if (!crtc_state->planes_changed || !crtc_state->active)
2279 continue;
2280
2281 if (page_flip_needed(
2282 new_plane_state,
2283 old_plane_state,
2284 crtc_state->event,
2285 false)) {
2286 ret = amdgpu_crtc_prepare_flip(crtc,
2287 fb,
2288 crtc_state->event,
2289 acrtc->flip_flags,
2290 drm_crtc_vblank_count(crtc),
2291 &work[flip_crtcs_count],
2292 &new_abo[flip_crtcs_count]);
2293
2294 if (ret) {
2295 /* According to atomic_commit hook API, EINVAL is not allowed */
2296 if (unlikely(ret == -EINVAL))
2297 ret = -ENOMEM;
2298
2299 DRM_ERROR("Atomic commit: Flip for crtc id %d: [%p], "
2300 "failed, errno = %d\n",
2301 acrtc->crtc_id,
2302 acrtc,
2303 ret);
2304 /* cleanup all flip configurations which
2305 * succeeded in this commit
2306 */
2307 for (i = 0; i < flip_crtcs_count; i++)
2308 amdgpu_crtc_cleanup_flip_ctx(
2309 work[i],
2310 new_abo[i]);
2311
2312 return ret;
2313 }
2314
2315 flip_crtcs[flip_crtcs_count] = crtc;
2316 flip_crtcs_count++;
2317 }
2318 }
2319
2320 /*
2321 * This is the point of no return - everything below never fails except
2322 * when the hw goes bonghits. Which means we can commit the new state on
2323 * the software side now.
2324 */
2325
2326 drm_atomic_helper_swap_state(state, true);
2327
2328 /*
2329 * From this point state become old state really. New state is
2330 * initialized to appropriate objects and could be accessed from there
2331 */
2332
2333 /*
2334 * there is no fences usage yet in state. We can skip the following line
2335 * wait_for_fences(dev, state);
2336 */
2337
2338 drm_atomic_helper_update_legacy_modeset_state(dev, state);
2339
2340 /* update changed items */
2341 for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
2342 struct amdgpu_crtc *acrtc;
2343 struct amdgpu_connector *aconnector = NULL;
2344 enum dm_commit_action action;
2345 struct drm_crtc_state *new_state = crtc->state;
2346
2347 acrtc = to_amdgpu_crtc(crtc);
2348
2349 aconnector =
2350 amdgpu_dm_find_first_crct_matching_connector(
2351 state,
2352 crtc,
2353 false);
2354
2355 /* handles headless hotplug case, updating new_state and
2356 * aconnector as needed
2357 */
2358
2359 action = get_dm_commit_action(new_state);
2360
2361 switch (action) {
2362 case DM_COMMIT_ACTION_DPMS_ON:
2363 case DM_COMMIT_ACTION_SET: {
2364 struct dm_connector_state *dm_state = NULL;
2365 new_target = NULL;
2366
2367 if (aconnector)
2368 dm_state = to_dm_connector_state(aconnector->base.state);
2369
2370 new_target = create_target_for_sink(
2371 aconnector,
2372 &crtc->state->mode,
2373 dm_state);
2374
2375 DRM_INFO("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
2376
2377 if (!new_target) {
2378 /*
2379 * this could happen because of issues with
2380 * userspace notifications delivery.
2381 * In this case userspace tries to set mode on
2382 * display which is disconnect in fact.
2383 * dc_sink in NULL in this case on aconnector.
2384 * We expect reset mode will come soon.
2385 *
2386 * This can also happen when unplug is done
2387 * during resume sequence ended
2388 *
2389 * In this case, we want to pretend we still
2390 * have a sink to keep the pipe running so that
2391 * hw state is consistent with the sw state
2392 */
2393 DRM_DEBUG_KMS("%s: Failed to create new target for crtc %d\n",
2394 __func__, acrtc->base.base.id);
2395 break;
2396 }
2397
2398 if (acrtc->target)
2399 remove_target(adev, acrtc);
2400
2401 /*
2402 * this loop saves set mode crtcs
2403 * we needed to enable vblanks once all
2404 * resources acquired in dc after dc_commit_targets
2405 */
2406 new_crtcs[new_crtcs_count] = acrtc;
2407 new_crtcs_count++;
2408
2409 acrtc->target = new_target;
2410 acrtc->enabled = true;
2411 acrtc->hw_mode = crtc->state->mode;
2412 crtc->hwmode = crtc->state->mode;
2413
2414 break;
2415 }
2416
2417 case DM_COMMIT_ACTION_NOTHING: {
2418 struct dm_connector_state *dm_state = NULL;
2419
2420 if (!aconnector)
2421 break;
2422
2423 dm_state = to_dm_connector_state(aconnector->base.state);
2424
2425 /* Scaling update */
2426 update_stream_scaling_settings(
2427 &crtc->state->mode,
2428 dm_state,
2429 acrtc->target->streams[0]);
2430
2431 break;
2432 }
2433 case DM_COMMIT_ACTION_DPMS_OFF:
2434 case DM_COMMIT_ACTION_RESET:
2435 DRM_INFO("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
2436 /* i.e. reset mode */
2437 if (acrtc->target)
2438 remove_target(adev, acrtc);
2439 break;
2440 } /* switch() */
2441 } /* for_each_crtc_in_state() */
2442
2443 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2444
2445 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
2446
2447 if (acrtc->target) {
2448 commit_targets[commit_targets_count] = acrtc->target;
2449 ++commit_targets_count;
2450 }
2451 }
2452
2453 /*
2454 * Add streams after required streams from new and replaced targets
2455 * are removed from freesync module
2456 */
2457 if (adev->dm.freesync_module) {
2458 for (i = 0; i < new_crtcs_count; i++) {
2459 struct amdgpu_connector *aconnector = NULL;
2460 new_target = new_crtcs[i]->target;
2461 aconnector =
2462 amdgpu_dm_find_first_crct_matching_connector(
2463 state,
2464 &new_crtcs[i]->base,
2465 false);
2466 if (!aconnector) {
2467 DRM_INFO(
2468 "Atomic commit: Failed to find connector for acrtc id:%d "
2469 "skipping freesync init\n",
2470 new_crtcs[i]->crtc_id);
2471 continue;
2472 }
2473
2474 for (j = 0; j < new_target->stream_count; j++)
2475 mod_freesync_add_stream(
2476 adev->dm.freesync_module,
2477 new_target->streams[j], &aconnector->caps);
2478 }
2479 }
2480
2481 /* DC is optimized not to do anything if 'targets' didn't change. */
2482 dc_commit_targets(dm->dc, commit_targets, commit_targets_count);
2483
2484 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2485 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
2486
2487 if (acrtc->target != NULL)
2488 acrtc->otg_inst =
2489 dc_target_get_status(acrtc->target)->primary_otg_inst;
2490 }
2491
2492 /* update planes when needed */
2493 for_each_plane_in_state(state, plane, old_plane_state, i) {
2494 struct drm_plane_state *plane_state = plane->state;
2495 struct drm_crtc *crtc = plane_state->crtc;
2496 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
2497 struct drm_framebuffer *fb = plane_state->fb;
2498 struct drm_connector *connector;
2499 struct dm_connector_state *dm_state = NULL;
2500 enum dm_commit_action action;
2501
2502 if (!fb || !crtc || !crtc->state->active)
2503 continue;
2504
2505 action = get_dm_commit_action(crtc->state);
2506
2507 /* Surfaces are created under two scenarios:
2508 * 1. This commit is not a page flip.
2509 * 2. This commit is a page flip, and targets are created.
2510 */
2511 if (!page_flip_needed(
2512 plane_state,
2513 old_plane_state,
2514 crtc->state->event, true) ||
2515 action == DM_COMMIT_ACTION_DPMS_ON ||
2516 action == DM_COMMIT_ACTION_SET) {
2517 list_for_each_entry(connector,
2518 &dev->mode_config.connector_list, head) {
2519 if (connector->state->crtc == crtc) {
2520 dm_state = to_dm_connector_state(
2521 connector->state);
2522 break;
2523 }
2524 }
2525
2526 /*
2527 * This situation happens in the following case:
2528 * we are about to get set mode for connector who's only
2529 * possible crtc (in encoder crtc mask) is used by
2530 * another connector, that is why it will try to
2531 * re-assing crtcs in order to make configuration
2532 * supported. For our implementation we need to make all
2533 * encoders support all crtcs, then this issue will
2534 * never arise again. But to guard code from this issue
2535 * check is left.
2536 *
2537 * Also it should be needed when used with actual
2538 * drm_atomic_commit ioctl in future
2539 */
2540 if (!dm_state)
2541 continue;
2542
2543 /*
2544 * if flip is pending (ie, still waiting for fence to return
2545 * before address is submitted) here, we cannot commit_surface
2546 * as commit_surface will pre-maturely write out the future
2547 * address. wait until flip is submitted before proceeding.
2548 */
2549 wait_while_pflip_status(adev, acrtc, pflip_pending_predicate);
2550
2551 dm_dc_surface_commit(dm->dc, crtc);
2552 }
2553 }
2554
2555 for (i = 0; i < new_crtcs_count; i++) {
2556 /*
2557 * loop to enable interrupts on newly arrived crtc
2558 */
2559 struct amdgpu_crtc *acrtc = new_crtcs[i];
2560
2561 if (adev->dm.freesync_module) {
2562 for (j = 0; j < acrtc->target->stream_count; j++)
2563 mod_freesync_notify_mode_change(
2564 adev->dm.freesync_module,
2565 acrtc->target->streams,
2566 acrtc->target->stream_count);
2567 }
2568
2569 manage_dm_interrupts(adev, acrtc, true);
2570 dm_crtc_cursor_reset(&acrtc->base);
2571
2572 }
2573
2574 /* Do actual flip */
2575 flip_crtcs_count = 0;
2576 for_each_plane_in_state(state, plane, old_plane_state, i) {
2577 struct drm_plane_state *plane_state = plane->state;
2578 struct drm_crtc *crtc = plane_state->crtc;
2579 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
2580 struct drm_framebuffer *fb = plane_state->fb;
2581
2582 if (!fb || !crtc || !crtc->state->planes_changed ||
2583 !crtc->state->active)
2584 continue;
2585
2586 if (page_flip_needed(
2587 plane_state,
2588 old_plane_state,
2589 crtc->state->event,
2590 false)) {
2591 amdgpu_crtc_submit_flip(
2592 crtc,
2593 fb,
2594 work[flip_crtcs_count],
2595 new_abo[i]);
2596 flip_crtcs_count++;
2597 /*clean up the flags for next usage*/
2598 acrtc->flip_flags = 0;
2599 }
2600 }
2601
2602 /* In this state all old framebuffers would be unpinned */
2603
2604 /* TODO: Revisit when we support true asynchronous commit.*/
2605 if (!async)
2606 drm_atomic_helper_cleanup_planes(dev, state);
2607
2608 drm_atomic_state_put(state);
2609
2610 return ret;
2611}
2612/*
2613 * This functions handle all cases when set mode does not come upon hotplug.
2614 * This include when the same display is unplugged then plugged back into the
2615 * same port and when we are running without usermode desktop manager supprot
2616 */
2617void dm_restore_drm_connector_state(struct drm_device *dev, struct drm_connector *connector)
2618{
2619 struct drm_crtc *crtc;
2620 struct amdgpu_device *adev = dev->dev_private;
2621 struct dc *dc = adev->dm.dc;
2622 struct amdgpu_connector *aconnector = to_amdgpu_connector(connector);
2623 struct amdgpu_crtc *disconnected_acrtc;
2624 const struct dc_sink *sink;
2625 struct dc_target *commit_targets[6];
2626 struct dc_target *current_target;
2627 uint32_t commit_targets_count = 0;
2628 int i;
2629
2630 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
2631 return;
2632
2633 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
2634
2635 if (!disconnected_acrtc || !disconnected_acrtc->target)
2636 return;
2637
2638 sink = disconnected_acrtc->target->streams[0]->sink;
2639
2640 /*
2641 * If the previous sink is not released and different from the current,
2642 * we deduce we are in a state where we can not rely on usermode call
2643 * to turn on the display, so we do it here
2644 */
2645 if (sink != aconnector->dc_sink) {
2646 struct dm_connector_state *dm_state =
2647 to_dm_connector_state(aconnector->base.state);
2648
2649 struct dc_target *new_target =
2650 create_target_for_sink(
2651 aconnector,
2652 &disconnected_acrtc->base.state->mode,
2653 dm_state);
2654
2655 DRM_INFO("Headless hotplug, restoring connector state\n");
2656 /*
2657 * we evade vblanks and pflips on crtc that
2658 * should be changed
2659 */
2660 manage_dm_interrupts(adev, disconnected_acrtc, false);
2661 /* this is the update mode case */
2662
2663 current_target = disconnected_acrtc->target;
2664
2665 disconnected_acrtc->target = new_target;
2666 disconnected_acrtc->enabled = true;
2667 disconnected_acrtc->hw_mode = disconnected_acrtc->base.state->mode;
2668
2669 commit_targets_count = 0;
2670
2671 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2672 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
2673
2674 if (acrtc->target) {
2675 commit_targets[commit_targets_count] = acrtc->target;
2676 ++commit_targets_count;
2677 }
2678 }
2679
2680 /* DC is optimized not to do anything if 'targets' didn't change. */
2681 if (!dc_commit_targets(dc, commit_targets,
2682 commit_targets_count)) {
2683 DRM_INFO("Failed to restore connector state!\n");
2684 dc_target_release(disconnected_acrtc->target);
2685 disconnected_acrtc->target = current_target;
2686 manage_dm_interrupts(adev, disconnected_acrtc, true);
2687 return;
2688 }
2689
2690 if (adev->dm.freesync_module) {
2691
2692 for (i = 0; i < current_target->stream_count; i++)
2693 mod_freesync_remove_stream(
2694 adev->dm.freesync_module,
2695 current_target->streams[i]);
2696
2697 for (i = 0; i < new_target->stream_count; i++)
2698 mod_freesync_add_stream(
2699 adev->dm.freesync_module,
2700 new_target->streams[i],
2701 &aconnector->caps);
2702 }
2703 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2704 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
2705
2706 if (acrtc->target != NULL) {
2707 acrtc->otg_inst =
2708 dc_target_get_status(acrtc->target)->primary_otg_inst;
2709 }
2710 }
2711
2712 dc_target_release(current_target);
2713
2714 dm_dc_surface_commit(dc, &disconnected_acrtc->base);
2715
2716 manage_dm_interrupts(adev, disconnected_acrtc, true);
2717 dm_crtc_cursor_reset(&disconnected_acrtc->base);
2718
2719 }
2720}
2721
2722static uint32_t add_val_sets_surface(
2723 struct dc_validation_set *val_sets,
2724 uint32_t set_count,
2725 const struct dc_target *target,
2726 const struct dc_surface *surface)
2727{
2728 uint32_t i = 0;
2729
2730 while (i < set_count) {
2731 if (val_sets[i].target == target)
2732 break;
2733 ++i;
2734 }
2735
2736 val_sets[i].surfaces[val_sets[i].surface_count] = surface;
2737 val_sets[i].surface_count++;
2738
2739 return val_sets[i].surface_count;
2740}
2741
2742static uint32_t update_in_val_sets_target(
2743 struct dc_validation_set *val_sets,
2744 struct drm_crtc **crtcs,
2745 uint32_t set_count,
2746 const struct dc_target *old_target,
2747 const struct dc_target *new_target,
2748 struct drm_crtc *crtc)
2749{
2750 uint32_t i = 0;
2751
2752 while (i < set_count) {
2753 if (val_sets[i].target == old_target)
2754 break;
2755 ++i;
2756 }
2757
2758 val_sets[i].target = new_target;
2759 crtcs[i] = crtc;
2760
2761 if (i == set_count) {
2762 /* nothing found. add new one to the end */
2763 return set_count + 1;
2764 }
2765
2766 return set_count;
2767}
2768
2769static uint32_t remove_from_val_sets(
2770 struct dc_validation_set *val_sets,
2771 uint32_t set_count,
2772 const struct dc_target *target)
2773{
2774 int i;
2775
2776 for (i = 0; i < set_count; i++)
2777 if (val_sets[i].target == target)
2778 break;
2779
2780 if (i == set_count) {
2781 /* nothing found */
2782 return set_count;
2783 }
2784
2785 set_count--;
2786
2787 for (; i < set_count; i++) {
2788 val_sets[i] = val_sets[i + 1];
2789 }
2790
2791 return set_count;
2792}
2793
2794int amdgpu_dm_atomic_check(struct drm_device *dev,
2795 struct drm_atomic_state *state)
2796{
2797 struct drm_crtc *crtc;
2798 struct drm_crtc_state *crtc_state;
2799 struct drm_plane *plane;
2800 struct drm_plane_state *plane_state;
2801 int i, j;
2802 int ret;
2803 int set_count;
2804 int new_target_count;
2805 struct dc_validation_set set[MAX_TARGETS] = {{ 0 }};
2806 struct dc_target *new_targets[MAX_TARGETS] = { 0 };
2807 struct drm_crtc *crtc_set[MAX_TARGETS] = { 0 };
2808 struct amdgpu_device *adev = dev->dev_private;
2809 struct dc *dc = adev->dm.dc;
2810 bool need_to_validate = false;
2811
2812 ret = drm_atomic_helper_check(dev, state);
2813
2814 if (ret) {
2815 DRM_ERROR("Atomic state validation failed with error :%d !\n",
2816 ret);
2817 return ret;
2818 }
2819
2820 ret = -EINVAL;
2821
2822 /* copy existing configuration */
2823 new_target_count = 0;
2824 set_count = 0;
2825 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2826
2827 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
2828
2829 if (acrtc->target) {
2830 set[set_count].target = acrtc->target;
2831 crtc_set[set_count] = crtc;
2832 ++set_count;
2833 }
2834 }
2835
2836 /* update changed items */
2837 for_each_crtc_in_state(state, crtc, crtc_state, i) {
2838 struct amdgpu_crtc *acrtc = NULL;
2839 struct amdgpu_connector *aconnector = NULL;
2840 enum dm_commit_action action;
2841
2842 acrtc = to_amdgpu_crtc(crtc);
2843
2844 aconnector = amdgpu_dm_find_first_crct_matching_connector(state, crtc, true);
2845
2846 action = get_dm_commit_action(crtc_state);
2847
2848 switch (action) {
2849 case DM_COMMIT_ACTION_DPMS_ON:
2850 case DM_COMMIT_ACTION_SET: {
2851 struct dc_target *new_target = NULL;
2852 struct drm_connector_state *conn_state = NULL;
2853 struct dm_connector_state *dm_state = NULL;
2854
2855 if (aconnector) {
2856 conn_state = drm_atomic_get_connector_state(state, &aconnector->base);
2857 if (IS_ERR(conn_state))
2858 return ret;
2859 dm_state = to_dm_connector_state(conn_state);
2860 }
2861
2862 new_target = create_target_for_sink(aconnector, &crtc_state->mode, dm_state);
2863
2864 /*
2865 * we can have no target on ACTION_SET if a display
2866 * was disconnected during S3, in this case it not and
2867 * error, the OS will be updated after detection, and
2868 * do the right thing on next atomic commit
2869 */
2870 if (!new_target) {
2871 DRM_DEBUG_KMS("%s: Failed to create new target for crtc %d\n",
2872 __func__, acrtc->base.base.id);
2873 break;
2874 }
2875
2876 new_targets[new_target_count] = new_target;
2877 set_count = update_in_val_sets_target(
2878 set,
2879 crtc_set,
2880 set_count,
2881 acrtc->target,
2882 new_target,
2883 crtc);
2884
2885 new_target_count++;
2886 need_to_validate = true;
2887 break;
2888 }
2889
2890 case DM_COMMIT_ACTION_NOTHING: {
2891 const struct drm_connector *drm_connector = NULL;
2892 struct drm_connector_state *conn_state = NULL;
2893 struct dm_connector_state *dm_state = NULL;
2894 struct dm_connector_state *old_dm_state = NULL;
2895 struct dc_target *new_target;
2896
2897 if (!aconnector)
2898 break;
2899
2900 for_each_connector_in_state(
2901 state, drm_connector, conn_state, j) {
2902 if (&aconnector->base == drm_connector)
2903 break;
2904 }
2905
2906 old_dm_state = to_dm_connector_state(drm_connector->state);
2907 dm_state = to_dm_connector_state(conn_state);
2908
2909 /* Support underscan adjustment*/
2910 if (!is_scaling_state_different(dm_state, old_dm_state))
2911 break;
2912
2913 new_target = create_target_for_sink(aconnector, &crtc_state->mode, dm_state);
2914
2915 if (!new_target) {
2916 DRM_ERROR("%s: Failed to create new target for crtc %d\n",
2917 __func__, acrtc->base.base.id);
2918 break;
2919 }
2920
2921 new_targets[new_target_count] = new_target;
2922 set_count = update_in_val_sets_target(
2923 set,
2924 crtc_set,
2925 set_count,
2926 acrtc->target,
2927 new_target,
2928 crtc);
2929
2930 new_target_count++;
2931 need_to_validate = true;
2932
2933 break;
2934 }
2935 case DM_COMMIT_ACTION_DPMS_OFF:
2936 case DM_COMMIT_ACTION_RESET:
2937 /* i.e. reset mode */
2938 if (acrtc->target) {
2939 set_count = remove_from_val_sets(
2940 set,
2941 set_count,
2942 acrtc->target);
2943 }
2944 break;
2945 }
2946
2947 /*
2948 * TODO revisit when removing commit action
2949 * and looking at atomic flags directly
2950 */
2951
2952 /* commit needs planes right now (for gamma, eg.) */
2953 /* TODO rework commit to chack crtc for gamma change */
2954 ret = drm_atomic_add_affected_planes(state, crtc);
2955 if (ret)
2956 return ret;
2957 }
2958
2959 for (i = 0; i < set_count; i++) {
2960 for_each_plane_in_state(state, plane, plane_state, j) {
2961 struct drm_plane_state *old_plane_state = plane->state;
2962 struct drm_crtc *crtc = plane_state->crtc;
2963 struct drm_framebuffer *fb = plane_state->fb;
2964 struct drm_connector *connector;
2965 struct dm_connector_state *dm_state = NULL;
2966 enum dm_commit_action action;
2967 struct drm_crtc_state *crtc_state;
2968
2969
2970 if (!fb || !crtc || crtc_set[i] != crtc ||
2971 !crtc->state->planes_changed || !crtc->state->active)
2972 continue;
2973
2974 action = get_dm_commit_action(crtc->state);
2975
2976 /* Surfaces are created under two scenarios:
2977 * 1. This commit is not a page flip.
2978 * 2. This commit is a page flip, and targets are created.
2979 */
2980 crtc_state = drm_atomic_get_crtc_state(state, crtc);
2981 if (!page_flip_needed(plane_state, old_plane_state,
2982 crtc_state->event, true) ||
2983 action == DM_COMMIT_ACTION_DPMS_ON ||
2984 action == DM_COMMIT_ACTION_SET) {
2985 struct dc_surface *surface;
2986
2987 list_for_each_entry(connector,
2988 &dev->mode_config.connector_list, head) {
2989 if (connector->state->crtc == crtc) {
2990 dm_state = to_dm_connector_state(
2991 connector->state);
2992 break;
2993 }
2994 }
2995
2996 /*
2997 * This situation happens in the following case:
2998 * we are about to get set mode for connector who's only
2999 * possible crtc (in encoder crtc mask) is used by
3000 * another connector, that is why it will try to
3001 * re-assing crtcs in order to make configuration
3002 * supported. For our implementation we need to make all
3003 * encoders support all crtcs, then this issue will
3004 * never arise again. But to guard code from this issue
3005 * check is left.
3006 *
3007 * Also it should be needed when used with actual
3008 * drm_atomic_commit ioctl in future
3009 */
3010 if (!dm_state)
3011 continue;
3012
3013 surface = dc_create_surface(dc);
3014 fill_plane_attributes(
3015 surface,
3016 plane_state,
3017 false);
3018
3019 add_val_sets_surface(
3020 set,
3021 set_count,
3022 set[i].target,
3023 surface);
3024
3025 need_to_validate = true;
3026 }
3027 }
3028 }
3029
3030 if (need_to_validate == false || set_count == 0 ||
3031 dc_validate_resources(dc, set, set_count))
3032 ret = 0;
3033
3034 for (i = 0; i < set_count; i++) {
3035 for (j = 0; j < set[i].surface_count; j++) {
3036 dc_surface_release(set[i].surfaces[j]);
3037 }
3038 }
3039 for (i = 0; i < new_target_count; i++)
3040 dc_target_release(new_targets[i]);
3041
3042 if (ret != 0)
3043 DRM_ERROR("Atomic check failed.\n");
3044
3045 return ret;
3046}
3047
3048static bool is_dp_capable_without_timing_msa(
3049 struct dc *dc,
3050 struct amdgpu_connector *amdgpu_connector)
3051{
3052 uint8_t dpcd_data;
3053 bool capable = false;
3054 if (amdgpu_connector->dc_link &&
3055 dc_read_dpcd(dc, amdgpu_connector->dc_link->link_index,
3056 DP_DOWN_STREAM_PORT_COUNT,
3057 &dpcd_data, sizeof(dpcd_data)) )
3058 capable = dpcd_data & DP_MSA_TIMING_PAR_IGNORED? true:false;
3059
3060 return capable;
3061}
3062void amdgpu_dm_add_sink_to_freesync_module(
3063 struct drm_connector *connector,
3064 struct edid *edid)
3065{
3066 int i;
3067 uint64_t val_capable;
3068 bool edid_check_required;
3069 struct detailed_timing *timing;
3070 struct detailed_non_pixel *data;
3071 struct detailed_data_monitor_range *range;
3072 struct amdgpu_connector *amdgpu_connector =
3073 to_amdgpu_connector(connector);
3074
3075 struct drm_device *dev = connector->dev;
3076 struct amdgpu_device *adev = dev->dev_private;
3077 edid_check_required = false;
3078 if (!amdgpu_connector->dc_sink) {
3079 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
3080 return;
3081 }
3082 if (!adev->dm.freesync_module)
3083 return;
3084 /*
3085 * if edid non zero restrict freesync only for dp and edp
3086 */
3087 if (edid) {
3088 if (amdgpu_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
3089 || amdgpu_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
3090 edid_check_required = is_dp_capable_without_timing_msa(
3091 adev->dm.dc,
3092 amdgpu_connector);
3093 }
3094 }
3095 val_capable = 0;
3096 if (edid_check_required == true && (edid->version > 1 ||
3097 (edid->version == 1 && edid->revision > 1))) {
3098 for (i = 0; i < 4; i++) {
3099
3100 timing = &edid->detailed_timings[i];
3101 data = &timing->data.other_data;
3102 range = &data->data.range;
3103 /*
3104 * Check if monitor has continuous frequency mode
3105 */
3106 if (data->type != EDID_DETAIL_MONITOR_RANGE)
3107 continue;
3108 /*
3109 * Check for flag range limits only. If flag == 1 then
3110 * no additional timing information provided.
3111 * Default GTF, GTF Secondary curve and CVT are not
3112 * supported
3113 */
3114 if (range->flags != 1)
3115 continue;
3116
3117 amdgpu_connector->min_vfreq = range->min_vfreq;
3118 amdgpu_connector->max_vfreq = range->max_vfreq;
3119 amdgpu_connector->pixel_clock_mhz =
3120 range->pixel_clock_mhz * 10;
3121 break;
3122 }
3123
3124 if (amdgpu_connector->max_vfreq -
3125 amdgpu_connector->min_vfreq > 10) {
3126 amdgpu_connector->caps.supported = true;
3127 amdgpu_connector->caps.min_refresh_in_micro_hz =
3128 amdgpu_connector->min_vfreq * 1000000;
3129 amdgpu_connector->caps.max_refresh_in_micro_hz =
3130 amdgpu_connector->max_vfreq * 1000000;
3131 val_capable = 1;
3132 }
3133 }
3134
3135 /*
3136 * TODO figure out how to notify user-mode or DRM of freesync caps
3137 * once we figure out how to deal with freesync in an upstreamable
3138 * fashion
3139 */
3140
3141}
3142
3143void amdgpu_dm_remove_sink_from_freesync_module(
3144 struct drm_connector *connector)
3145{
3146 /*
3147 * TODO fill in once we figure out how to deal with freesync in
3148 * an upstreamable fashion
3149 */
3150}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_types.h
new file mode 100644
index 000000000000..4f7bd3bae44e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_types.h
@@ -0,0 +1,101 @@
1/*
2 * Copyright 2012-13 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __AMDGPU_DM_TYPES_H__
27#define __AMDGPU_DM_TYPES_H__
28
29#include <drm/drmP.h>
30
31struct amdgpu_framebuffer;
32struct amdgpu_display_manager;
33struct dc_validation_set;
34struct dc_surface;
35
36/*TODO Jodan Hersen use the one in amdgpu_dm*/
37int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
38 struct amdgpu_crtc *amdgpu_crtc,
39 uint32_t link_index);
40int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
41 struct amdgpu_connector *amdgpu_connector,
42 uint32_t link_index,
43 struct amdgpu_encoder *amdgpu_encoder);
44int amdgpu_dm_encoder_init(
45 struct drm_device *dev,
46 struct amdgpu_encoder *aencoder,
47 uint32_t link_index);
48
49void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc);
50void amdgpu_dm_connector_destroy(struct drm_connector *connector);
51void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder);
52
53int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
54
55int amdgpu_dm_atomic_commit(
56 struct drm_device *dev,
57 struct drm_atomic_state *state,
58 bool async);
59int amdgpu_dm_atomic_check(struct drm_device *dev,
60 struct drm_atomic_state *state);
61
62int dm_create_validation_set_for_target(
63 struct drm_connector *connector,
64 struct drm_display_mode *mode,
65 struct dc_validation_set *val_set);
66
67void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector);
68struct drm_connector_state *amdgpu_dm_connector_atomic_duplicate_state(
69 struct drm_connector *connector);
70
71int amdgpu_dm_connector_atomic_set_property(
72 struct drm_connector *connector,
73 struct drm_connector_state *state,
74 struct drm_property *property,
75 uint64_t val);
76
77int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev);
78
79void amdgpu_dm_connector_init_helper(
80 struct amdgpu_display_manager *dm,
81 struct amdgpu_connector *aconnector,
82 int connector_type,
83 const struct dc_link *link,
84 int link_index);
85
86int amdgpu_dm_connector_mode_valid(
87 struct drm_connector *connector,
88 struct drm_display_mode *mode);
89
90void dm_restore_drm_connector_state(struct drm_device *dev, struct drm_connector *connector);
91
92void amdgpu_dm_add_sink_to_freesync_module(
93 struct drm_connector *connector,
94 struct edid *edid);
95
96void amdgpu_dm_remove_sink_from_freesync_module(
97 struct drm_connector *connector);
98
99extern const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs;
100
101#endif /* __AMDGPU_DM_TYPES_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile
new file mode 100644
index 000000000000..5fac034093e9
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/Makefile
@@ -0,0 +1,28 @@
1#
2# Makefile for Display Core (dc) component.
3#
4
5DC_LIBS = basics bios calcs dce \
6gpio gpu i2caux irq virtual
7
8DC_LIBS += dce112
9DC_LIBS += dce110
10DC_LIBS += dce100
11DC_LIBS += dce80
12
13AMD_DC = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/dc/,$(DC_LIBS)))
14
15include $(AMD_DC)
16
17DISPLAY_CORE = dc.o dc_link.o dc_resource.o dc_hw_sequencer.o dc_target.o dc_sink.o dc_stream.o \
18dc_surface.o dc_link_hwss.o dc_link_dp.o dc_link_ddc.o dc_debug.o
19
20AMD_DISPLAY_CORE = $(addprefix $(AMDDALPATH)/dc/core/,$(DISPLAY_CORE))
21
22AMD_DM_REG_UPDATE = $(addprefix $(AMDDALPATH)/dc/,dc_helper.o)
23
24AMD_DISPLAY_FILES += $(AMD_DISPLAY_CORE)
25AMD_DISPLAY_FILES += $(AMD_DM_REG_UPDATE)
26
27
28
diff --git a/drivers/gpu/drm/amd/display/dc/basics/Makefile b/drivers/gpu/drm/amd/display/dc/basics/Makefile
new file mode 100644
index 000000000000..a263cadcc0df
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/basics/Makefile
@@ -0,0 +1,11 @@
1#
2# Makefile for the 'utils' sub-component of DAL.
3# It provides the general basic services required by other DAL
4# subcomponents.
5
6BASICS = conversion.o fixpt31_32.o fixpt32_32.o grph_object_id.o \
7 logger.o log_helpers.o register_logger.o signal_types.o vector.o
8
9AMD_DAL_BASICS = $(addprefix $(AMDDALPATH)/dc/basics/,$(BASICS))
10
11AMD_DISPLAY_FILES += $(AMD_DAL_BASICS)
diff --git a/drivers/gpu/drm/amd/display/dc/basics/conversion.c b/drivers/gpu/drm/amd/display/dc/basics/conversion.c
new file mode 100644
index 000000000000..ebe14e17cc2e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/basics/conversion.c
@@ -0,0 +1,223 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28#define DIVIDER 10000
29
30/* S2D13 value in [-3.00...0.9999] */
31#define S2D13_MIN (-3 * DIVIDER)
32#define S2D13_MAX (3 * DIVIDER)
33
34uint16_t fixed_point_to_int_frac(
35 struct fixed31_32 arg,
36 uint8_t integer_bits,
37 uint8_t fractional_bits)
38{
39 int32_t numerator;
40 int32_t divisor = 1 << fractional_bits;
41
42 uint16_t result;
43
44 uint16_t d = (uint16_t)dal_fixed31_32_floor(
45 dal_fixed31_32_abs(
46 arg));
47
48 if (d <= (uint16_t)(1 << integer_bits) - (1 / (uint16_t)divisor))
49 numerator = (uint16_t)dal_fixed31_32_floor(
50 dal_fixed31_32_mul_int(
51 arg,
52 divisor));
53 else {
54 numerator = dal_fixed31_32_floor(
55 dal_fixed31_32_sub(
56 dal_fixed31_32_from_int(
57 1LL << integer_bits),
58 dal_fixed31_32_recip(
59 dal_fixed31_32_from_int(
60 divisor))));
61 }
62
63 if (numerator >= 0)
64 result = (uint16_t)numerator;
65 else
66 result = (uint16_t)(
67 (1 << (integer_bits + fractional_bits + 1)) + numerator);
68
69 if ((result != 0) && dal_fixed31_32_lt(
70 arg, dal_fixed31_32_zero))
71 result |= 1 << (integer_bits + fractional_bits);
72
73 return result;
74}
75/**
76* convert_float_matrix
77* This converts a double into HW register spec defined format S2D13.
78* @param :
79* @return None
80*/
81void convert_float_matrix(
82 uint16_t *matrix,
83 struct fixed31_32 *flt,
84 uint32_t buffer_size)
85{
86 const struct fixed31_32 min_2_13 =
87 dal_fixed31_32_from_fraction(S2D13_MIN, DIVIDER);
88 const struct fixed31_32 max_2_13 =
89 dal_fixed31_32_from_fraction(S2D13_MAX, DIVIDER);
90 uint32_t i;
91
92 for (i = 0; i < buffer_size; ++i) {
93 uint32_t reg_value =
94 fixed_point_to_int_frac(
95 dal_fixed31_32_clamp(
96 flt[i],
97 min_2_13,
98 max_2_13),
99 2,
100 13);
101
102 matrix[i] = (uint16_t)reg_value;
103 }
104}
105
106static void calculate_adjustments_common(
107 const struct fixed31_32 *ideal_matrix,
108 const struct dc_csc_adjustments *adjustments,
109 struct fixed31_32 *matrix)
110{
111 const struct fixed31_32 sin_hue =
112 dal_fixed31_32_sin(adjustments->hue);
113 const struct fixed31_32 cos_hue =
114 dal_fixed31_32_cos(adjustments->hue);
115
116 const struct fixed31_32 multiplier =
117 dal_fixed31_32_mul(
118 adjustments->contrast,
119 adjustments->saturation);
120
121 matrix[0] = dal_fixed31_32_mul(
122 ideal_matrix[0],
123 adjustments->contrast);
124
125 matrix[1] = dal_fixed31_32_mul(
126 ideal_matrix[1],
127 adjustments->contrast);
128
129 matrix[2] = dal_fixed31_32_mul(
130 ideal_matrix[2],
131 adjustments->contrast);
132
133 matrix[4] = dal_fixed31_32_mul(
134 multiplier,
135 dal_fixed31_32_add(
136 dal_fixed31_32_mul(
137 ideal_matrix[8],
138 sin_hue),
139 dal_fixed31_32_mul(
140 ideal_matrix[4],
141 cos_hue)));
142
143 matrix[5] = dal_fixed31_32_mul(
144 multiplier,
145 dal_fixed31_32_add(
146 dal_fixed31_32_mul(
147 ideal_matrix[9],
148 sin_hue),
149 dal_fixed31_32_mul(
150 ideal_matrix[5],
151 cos_hue)));
152
153 matrix[6] = dal_fixed31_32_mul(
154 multiplier,
155 dal_fixed31_32_add(
156 dal_fixed31_32_mul(
157 ideal_matrix[10],
158 sin_hue),
159 dal_fixed31_32_mul(
160 ideal_matrix[6],
161 cos_hue)));
162
163 matrix[7] = ideal_matrix[7];
164
165 matrix[8] = dal_fixed31_32_mul(
166 multiplier,
167 dal_fixed31_32_sub(
168 dal_fixed31_32_mul(
169 ideal_matrix[8],
170 cos_hue),
171 dal_fixed31_32_mul(
172 ideal_matrix[4],
173 sin_hue)));
174
175 matrix[9] = dal_fixed31_32_mul(
176 multiplier,
177 dal_fixed31_32_sub(
178 dal_fixed31_32_mul(
179 ideal_matrix[9],
180 cos_hue),
181 dal_fixed31_32_mul(
182 ideal_matrix[5],
183 sin_hue)));
184
185 matrix[10] = dal_fixed31_32_mul(
186 multiplier,
187 dal_fixed31_32_sub(
188 dal_fixed31_32_mul(
189 ideal_matrix[10],
190 cos_hue),
191 dal_fixed31_32_mul(
192 ideal_matrix[6],
193 sin_hue)));
194
195 matrix[11] = ideal_matrix[11];
196}
197
198void calculate_adjustments(
199 const struct fixed31_32 *ideal_matrix,
200 const struct dc_csc_adjustments *adjustments,
201 struct fixed31_32 *matrix)
202{
203 calculate_adjustments_common(ideal_matrix, adjustments, matrix);
204
205 matrix[3] = dal_fixed31_32_add(
206 ideal_matrix[3],
207 dal_fixed31_32_mul(
208 adjustments->brightness,
209 dal_fixed31_32_from_fraction(86, 100)));
210}
211
212void calculate_adjustments_y_only(
213 const struct fixed31_32 *ideal_matrix,
214 const struct dc_csc_adjustments *adjustments,
215 struct fixed31_32 *matrix)
216{
217 calculate_adjustments_common(ideal_matrix, adjustments, matrix);
218
219 matrix[3] = dal_fixed31_32_add(
220 ideal_matrix[3],
221 adjustments->brightness);
222}
223
diff --git a/drivers/gpu/drm/amd/display/dc/basics/conversion.h b/drivers/gpu/drm/amd/display/dc/basics/conversion.h
new file mode 100644
index 000000000000..18cbe41e80ff
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/basics/conversion.h
@@ -0,0 +1,51 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_CONVERSION_H__
27#define __DAL_CONVERSION_H__
28
29#include "include/fixed31_32.h"
30
31uint16_t fixed_point_to_int_frac(
32 struct fixed31_32 arg,
33 uint8_t integer_bits,
34 uint8_t fractional_bits);
35
36void convert_float_matrix(
37 uint16_t *matrix,
38 struct fixed31_32 *flt,
39 uint32_t buffer_size);
40
41void calculate_adjustments(
42 const struct fixed31_32 *ideal_matrix,
43 const struct dc_csc_adjustments *adjustments,
44 struct fixed31_32 *matrix);
45
46void calculate_adjustments_y_only(
47 const struct fixed31_32 *ideal_matrix,
48 const struct dc_csc_adjustments *adjustments,
49 struct fixed31_32 *matrix);
50
51#endif
diff --git a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
new file mode 100644
index 000000000000..5a6e46843502
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
@@ -0,0 +1,691 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27#include "include/fixed31_32.h"
28
29static inline uint64_t abs_i64(
30 int64_t arg)
31{
32 if (arg > 0)
33 return (uint64_t)arg;
34 else
35 return (uint64_t)(-arg);
36}
37
38/*
39 * @brief
40 * result = dividend / divisor
41 * *remainder = dividend % divisor
42 */
43static inline uint64_t complete_integer_division_u64(
44 uint64_t dividend,
45 uint64_t divisor,
46 uint64_t *remainder)
47{
48 uint64_t result;
49
50 ASSERT(divisor);
51
52 result = div64_u64_rem(dividend, divisor, remainder);
53
54 return result;
55}
56
57#define BITS_PER_FRACTIONAL_PART \
58 32
59
60#define FRACTIONAL_PART_MASK \
61 ((1ULL << BITS_PER_FRACTIONAL_PART) - 1)
62
63#define GET_INTEGER_PART(x) \
64 ((x) >> BITS_PER_FRACTIONAL_PART)
65
66#define GET_FRACTIONAL_PART(x) \
67 (FRACTIONAL_PART_MASK & (x))
68
69struct fixed31_32 dal_fixed31_32_from_fraction(
70 int64_t numerator,
71 int64_t denominator)
72{
73 struct fixed31_32 res;
74
75 bool arg1_negative = numerator < 0;
76 bool arg2_negative = denominator < 0;
77
78 uint64_t arg1_value = arg1_negative ? -numerator : numerator;
79 uint64_t arg2_value = arg2_negative ? -denominator : denominator;
80
81 uint64_t remainder;
82
83 /* determine integer part */
84
85 uint64_t res_value = complete_integer_division_u64(
86 arg1_value, arg2_value, &remainder);
87
88 ASSERT(res_value <= LONG_MAX);
89
90 /* determine fractional part */
91 {
92 uint32_t i = BITS_PER_FRACTIONAL_PART;
93
94 do {
95 remainder <<= 1;
96
97 res_value <<= 1;
98
99 if (remainder >= arg2_value) {
100 res_value |= 1;
101 remainder -= arg2_value;
102 }
103 } while (--i != 0);
104 }
105
106 /* round up LSB */
107 {
108 uint64_t summand = (remainder << 1) >= arg2_value;
109
110 ASSERT(res_value <= LLONG_MAX - summand);
111
112 res_value += summand;
113 }
114
115 res.value = (int64_t)res_value;
116
117 if (arg1_negative ^ arg2_negative)
118 res.value = -res.value;
119
120 return res;
121}
122
123struct fixed31_32 dal_fixed31_32_from_int(
124 int64_t arg)
125{
126 struct fixed31_32 res;
127
128 ASSERT((LONG_MIN <= arg) && (arg <= LONG_MAX));
129
130 res.value = arg << BITS_PER_FRACTIONAL_PART;
131
132 return res;
133}
134
135struct fixed31_32 dal_fixed31_32_neg(
136 struct fixed31_32 arg)
137{
138 struct fixed31_32 res;
139
140 res.value = -arg.value;
141
142 return res;
143}
144
145struct fixed31_32 dal_fixed31_32_abs(
146 struct fixed31_32 arg)
147{
148 if (arg.value < 0)
149 return dal_fixed31_32_neg(arg);
150 else
151 return arg;
152}
153
154bool dal_fixed31_32_lt(
155 struct fixed31_32 arg1,
156 struct fixed31_32 arg2)
157{
158 return arg1.value < arg2.value;
159}
160
161bool dal_fixed31_32_le(
162 struct fixed31_32 arg1,
163 struct fixed31_32 arg2)
164{
165 return arg1.value <= arg2.value;
166}
167
168bool dal_fixed31_32_eq(
169 struct fixed31_32 arg1,
170 struct fixed31_32 arg2)
171{
172 return arg1.value == arg2.value;
173}
174
175struct fixed31_32 dal_fixed31_32_min(
176 struct fixed31_32 arg1,
177 struct fixed31_32 arg2)
178{
179 if (arg1.value <= arg2.value)
180 return arg1;
181 else
182 return arg2;
183}
184
185struct fixed31_32 dal_fixed31_32_max(
186 struct fixed31_32 arg1,
187 struct fixed31_32 arg2)
188{
189 if (arg1.value <= arg2.value)
190 return arg2;
191 else
192 return arg1;
193}
194
195struct fixed31_32 dal_fixed31_32_clamp(
196 struct fixed31_32 arg,
197 struct fixed31_32 min_value,
198 struct fixed31_32 max_value)
199{
200 if (dal_fixed31_32_le(arg, min_value))
201 return min_value;
202 else if (dal_fixed31_32_le(max_value, arg))
203 return max_value;
204 else
205 return arg;
206}
207
208struct fixed31_32 dal_fixed31_32_shl(
209 struct fixed31_32 arg,
210 uint8_t shift)
211{
212 struct fixed31_32 res;
213
214 ASSERT(((arg.value >= 0) && (arg.value <= LLONG_MAX >> shift)) ||
215 ((arg.value < 0) && (arg.value >= LLONG_MIN >> shift)));
216
217 res.value = arg.value << shift;
218
219 return res;
220}
221
222struct fixed31_32 dal_fixed31_32_shr(
223 struct fixed31_32 arg,
224 uint8_t shift)
225{
226 struct fixed31_32 res;
227
228 ASSERT(shift < 64);
229
230 res.value = arg.value >> shift;
231
232 return res;
233}
234
235struct fixed31_32 dal_fixed31_32_add(
236 struct fixed31_32 arg1,
237 struct fixed31_32 arg2)
238{
239 struct fixed31_32 res;
240
241 ASSERT(((arg1.value >= 0) && (LLONG_MAX - arg1.value >= arg2.value)) ||
242 ((arg1.value < 0) && (LLONG_MIN - arg1.value <= arg2.value)));
243
244 res.value = arg1.value + arg2.value;
245
246 return res;
247}
248
249struct fixed31_32 dal_fixed31_32_sub_int(
250 struct fixed31_32 arg1,
251 int32_t arg2)
252{
253 return dal_fixed31_32_sub(
254 arg1,
255 dal_fixed31_32_from_int(arg2));
256}
257
258struct fixed31_32 dal_fixed31_32_sub(
259 struct fixed31_32 arg1,
260 struct fixed31_32 arg2)
261{
262 struct fixed31_32 res;
263
264 ASSERT(((arg2.value >= 0) && (LLONG_MIN + arg2.value <= arg1.value)) ||
265 ((arg2.value < 0) && (LLONG_MAX + arg2.value >= arg1.value)));
266
267 res.value = arg1.value - arg2.value;
268
269 return res;
270}
271
272struct fixed31_32 dal_fixed31_32_mul_int(
273 struct fixed31_32 arg1,
274 int32_t arg2)
275{
276 return dal_fixed31_32_mul(
277 arg1,
278 dal_fixed31_32_from_int(arg2));
279}
280
281struct fixed31_32 dal_fixed31_32_mul(
282 struct fixed31_32 arg1,
283 struct fixed31_32 arg2)
284{
285 struct fixed31_32 res;
286
287 bool arg1_negative = arg1.value < 0;
288 bool arg2_negative = arg2.value < 0;
289
290 uint64_t arg1_value = arg1_negative ? -arg1.value : arg1.value;
291 uint64_t arg2_value = arg2_negative ? -arg2.value : arg2.value;
292
293 uint64_t arg1_int = GET_INTEGER_PART(arg1_value);
294 uint64_t arg2_int = GET_INTEGER_PART(arg2_value);
295
296 uint64_t arg1_fra = GET_FRACTIONAL_PART(arg1_value);
297 uint64_t arg2_fra = GET_FRACTIONAL_PART(arg2_value);
298
299 uint64_t tmp;
300
301 res.value = arg1_int * arg2_int;
302
303 ASSERT(res.value <= LONG_MAX);
304
305 res.value <<= BITS_PER_FRACTIONAL_PART;
306
307 tmp = arg1_int * arg2_fra;
308
309 ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value));
310
311 res.value += tmp;
312
313 tmp = arg2_int * arg1_fra;
314
315 ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value));
316
317 res.value += tmp;
318
319 tmp = arg1_fra * arg2_fra;
320
321 tmp = (tmp >> BITS_PER_FRACTIONAL_PART) +
322 (tmp >= (uint64_t)dal_fixed31_32_half.value);
323
324 ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value));
325
326 res.value += tmp;
327
328 if (arg1_negative ^ arg2_negative)
329 res.value = -res.value;
330
331 return res;
332}
333
334struct fixed31_32 dal_fixed31_32_sqr(
335 struct fixed31_32 arg)
336{
337 struct fixed31_32 res;
338
339 uint64_t arg_value = abs_i64(arg.value);
340
341 uint64_t arg_int = GET_INTEGER_PART(arg_value);
342
343 uint64_t arg_fra = GET_FRACTIONAL_PART(arg_value);
344
345 uint64_t tmp;
346
347 res.value = arg_int * arg_int;
348
349 ASSERT(res.value <= LONG_MAX);
350
351 res.value <<= BITS_PER_FRACTIONAL_PART;
352
353 tmp = arg_int * arg_fra;
354
355 ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value));
356
357 res.value += tmp;
358
359 ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value));
360
361 res.value += tmp;
362
363 tmp = arg_fra * arg_fra;
364
365 tmp = (tmp >> BITS_PER_FRACTIONAL_PART) +
366 (tmp >= (uint64_t)dal_fixed31_32_half.value);
367
368 ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value));
369
370 res.value += tmp;
371
372 return res;
373}
374
375struct fixed31_32 dal_fixed31_32_div_int(
376 struct fixed31_32 arg1,
377 int64_t arg2)
378{
379 return dal_fixed31_32_from_fraction(
380 arg1.value,
381 dal_fixed31_32_from_int(arg2).value);
382}
383
384struct fixed31_32 dal_fixed31_32_div(
385 struct fixed31_32 arg1,
386 struct fixed31_32 arg2)
387{
388 return dal_fixed31_32_from_fraction(
389 arg1.value,
390 arg2.value);
391}
392
393struct fixed31_32 dal_fixed31_32_recip(
394 struct fixed31_32 arg)
395{
396 /*
397 * @note
398 * Good idea to use Newton's method
399 */
400
401 ASSERT(arg.value);
402
403 return dal_fixed31_32_from_fraction(
404 dal_fixed31_32_one.value,
405 arg.value);
406}
407
408struct fixed31_32 dal_fixed31_32_sinc(
409 struct fixed31_32 arg)
410{
411 struct fixed31_32 square;
412
413 struct fixed31_32 res = dal_fixed31_32_one;
414
415 int32_t n = 27;
416
417 struct fixed31_32 arg_norm = arg;
418
419 if (dal_fixed31_32_le(
420 dal_fixed31_32_two_pi,
421 dal_fixed31_32_abs(arg))) {
422 arg_norm = dal_fixed31_32_sub(
423 arg_norm,
424 dal_fixed31_32_mul_int(
425 dal_fixed31_32_two_pi,
426 (int32_t)div64_s64(
427 arg_norm.value,
428 dal_fixed31_32_two_pi.value)));
429 }
430
431 square = dal_fixed31_32_sqr(arg_norm);
432
433 do {
434 res = dal_fixed31_32_sub(
435 dal_fixed31_32_one,
436 dal_fixed31_32_div_int(
437 dal_fixed31_32_mul(
438 square,
439 res),
440 n * (n - 1)));
441
442 n -= 2;
443 } while (n > 2);
444
445 if (arg.value != arg_norm.value)
446 res = dal_fixed31_32_div(
447 dal_fixed31_32_mul(res, arg_norm),
448 arg);
449
450 return res;
451}
452
453struct fixed31_32 dal_fixed31_32_sin(
454 struct fixed31_32 arg)
455{
456 return dal_fixed31_32_mul(
457 arg,
458 dal_fixed31_32_sinc(arg));
459}
460
461struct fixed31_32 dal_fixed31_32_cos(
462 struct fixed31_32 arg)
463{
464 /* TODO implement argument normalization */
465
466 const struct fixed31_32 square = dal_fixed31_32_sqr(arg);
467
468 struct fixed31_32 res = dal_fixed31_32_one;
469
470 int32_t n = 26;
471
472 do {
473 res = dal_fixed31_32_sub(
474 dal_fixed31_32_one,
475 dal_fixed31_32_div_int(
476 dal_fixed31_32_mul(
477 square,
478 res),
479 n * (n - 1)));
480
481 n -= 2;
482 } while (n != 0);
483
484 return res;
485}
486
487/*
488 * @brief
489 * result = exp(arg),
490 * where abs(arg) < 1
491 *
492 * Calculated as Taylor series.
493 */
494static struct fixed31_32 fixed31_32_exp_from_taylor_series(
495 struct fixed31_32 arg)
496{
497 uint32_t n = 9;
498
499 struct fixed31_32 res = dal_fixed31_32_from_fraction(
500 n + 2,
501 n + 1);
502 /* TODO find correct res */
503
504 ASSERT(dal_fixed31_32_lt(arg, dal_fixed31_32_one));
505
506 do
507 res = dal_fixed31_32_add(
508 dal_fixed31_32_one,
509 dal_fixed31_32_div_int(
510 dal_fixed31_32_mul(
511 arg,
512 res),
513 n));
514 while (--n != 1);
515
516 return dal_fixed31_32_add(
517 dal_fixed31_32_one,
518 dal_fixed31_32_mul(
519 arg,
520 res));
521}
522
523struct fixed31_32 dal_fixed31_32_exp(
524 struct fixed31_32 arg)
525{
526 /*
527 * @brief
528 * Main equation is:
529 * exp(x) = exp(r + m * ln(2)) = (1 << m) * exp(r),
530 * where m = round(x / ln(2)), r = x - m * ln(2)
531 */
532
533 if (dal_fixed31_32_le(
534 dal_fixed31_32_ln2_div_2,
535 dal_fixed31_32_abs(arg))) {
536 int32_t m = dal_fixed31_32_round(
537 dal_fixed31_32_div(
538 arg,
539 dal_fixed31_32_ln2));
540
541 struct fixed31_32 r = dal_fixed31_32_sub(
542 arg,
543 dal_fixed31_32_mul_int(
544 dal_fixed31_32_ln2,
545 m));
546
547 ASSERT(m != 0);
548
549 ASSERT(dal_fixed31_32_lt(
550 dal_fixed31_32_abs(r),
551 dal_fixed31_32_one));
552
553 if (m > 0)
554 return dal_fixed31_32_shl(
555 fixed31_32_exp_from_taylor_series(r),
556 (uint8_t)m);
557 else
558 return dal_fixed31_32_div_int(
559 fixed31_32_exp_from_taylor_series(r),
560 1LL << -m);
561 } else if (arg.value != 0)
562 return fixed31_32_exp_from_taylor_series(arg);
563 else
564 return dal_fixed31_32_one;
565}
566
567struct fixed31_32 dal_fixed31_32_log(
568 struct fixed31_32 arg)
569{
570 struct fixed31_32 res = dal_fixed31_32_neg(dal_fixed31_32_one);
571 /* TODO improve 1st estimation */
572
573 struct fixed31_32 error;
574
575 ASSERT(arg.value > 0);
576 /* TODO if arg is negative, return NaN */
577 /* TODO if arg is zero, return -INF */
578
579 do {
580 struct fixed31_32 res1 = dal_fixed31_32_add(
581 dal_fixed31_32_sub(
582 res,
583 dal_fixed31_32_one),
584 dal_fixed31_32_div(
585 arg,
586 dal_fixed31_32_exp(res)));
587
588 error = dal_fixed31_32_sub(
589 res,
590 res1);
591
592 res = res1;
593 /* TODO determine max_allowed_error based on quality of exp() */
594 } while (abs_i64(error.value) > 100ULL);
595
596 return res;
597}
598
599struct fixed31_32 dal_fixed31_32_pow(
600 struct fixed31_32 arg1,
601 struct fixed31_32 arg2)
602{
603 return dal_fixed31_32_exp(
604 dal_fixed31_32_mul(
605 dal_fixed31_32_log(arg1),
606 arg2));
607}
608
609int32_t dal_fixed31_32_floor(
610 struct fixed31_32 arg)
611{
612 uint64_t arg_value = abs_i64(arg.value);
613
614 if (arg.value >= 0)
615 return (int32_t)GET_INTEGER_PART(arg_value);
616 else
617 return -(int32_t)GET_INTEGER_PART(arg_value);
618}
619
620int32_t dal_fixed31_32_round(
621 struct fixed31_32 arg)
622{
623 uint64_t arg_value = abs_i64(arg.value);
624
625 const int64_t summand = dal_fixed31_32_half.value;
626
627 ASSERT(LLONG_MAX - (int64_t)arg_value >= summand);
628
629 arg_value += summand;
630
631 if (arg.value >= 0)
632 return (int32_t)GET_INTEGER_PART(arg_value);
633 else
634 return -(int32_t)GET_INTEGER_PART(arg_value);
635}
636
637int32_t dal_fixed31_32_ceil(
638 struct fixed31_32 arg)
639{
640 uint64_t arg_value = abs_i64(arg.value);
641
642 const int64_t summand = dal_fixed31_32_one.value -
643 dal_fixed31_32_epsilon.value;
644
645 ASSERT(LLONG_MAX - (int64_t)arg_value >= summand);
646
647 arg_value += summand;
648
649 if (arg.value >= 0)
650 return (int32_t)GET_INTEGER_PART(arg_value);
651 else
652 return -(int32_t)GET_INTEGER_PART(arg_value);
653}
654
655/* this function is a generic helper to translate fixed point value to
656 * specified integer format that will consist of integer_bits integer part and
657 * fractional_bits fractional part. For example it is used in
658 * dal_fixed31_32_u2d19 to receive 2 bits integer part and 19 bits fractional
659 * part in 32 bits. It is used in hw programming (scaler)
660 */
661
662static inline uint32_t ux_dy(
663 int64_t value,
664 uint32_t integer_bits,
665 uint32_t fractional_bits)
666{
667 /* 1. create mask of integer part */
668 uint32_t result = (1 << integer_bits) - 1;
669 /* 2. mask out fractional part */
670 uint32_t fractional_part = FRACTIONAL_PART_MASK & value;
671 /* 3. shrink fixed point integer part to be of integer_bits width*/
672 result &= GET_INTEGER_PART(value);
673 /* 4. make space for fractional part to be filled in after integer */
674 result <<= fractional_bits;
675 /* 5. shrink fixed point fractional part to of fractional_bits width*/
676 fractional_part >>= BITS_PER_FRACTIONAL_PART - fractional_bits;
677 /* 6. merge the result */
678 return result | fractional_part;
679}
680
681uint32_t dal_fixed31_32_u2d19(
682 struct fixed31_32 arg)
683{
684 return ux_dy(arg.value, 2, 19);
685}
686
687uint32_t dal_fixed31_32_u0d19(
688 struct fixed31_32 arg)
689{
690 return ux_dy(arg.value, 0, 19);
691}
diff --git a/drivers/gpu/drm/amd/display/dc/basics/fixpt32_32.c b/drivers/gpu/drm/amd/display/dc/basics/fixpt32_32.c
new file mode 100644
index 000000000000..911e90bb1b5c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/basics/fixpt32_32.c
@@ -0,0 +1,221 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27#include "include/fixed32_32.h"
28
29static uint64_t u64_div(uint64_t n, uint64_t d)
30{
31 uint32_t i = 0;
32 uint64_t r;
33 uint64_t q = div64_u64_rem(n, d, &r);
34
35 for (i = 0; i < 32; ++i) {
36 uint64_t sbit = q & (1ULL<<63);
37
38 r <<= 1;
39 r |= sbit ? 1 : 0;
40 q <<= 1;
41 if (r >= d) {
42 r -= d;
43 q |= 1;
44 }
45 }
46
47 if (2*r >= d)
48 q += 1;
49 return q;
50}
51
52struct fixed32_32 dal_fixed32_32_from_fraction(uint32_t n, uint32_t d)
53{
54 struct fixed32_32 fx;
55
56 fx.value = u64_div((uint64_t)n << 32, (uint64_t)d << 32);
57 return fx;
58}
59
60struct fixed32_32 dal_fixed32_32_from_int(uint32_t value)
61{
62 struct fixed32_32 fx;
63
64 fx.value = (uint64_t)value<<32;
65 return fx;
66}
67
68struct fixed32_32 dal_fixed32_32_add(
69 struct fixed32_32 lhs,
70 struct fixed32_32 rhs)
71{
72 struct fixed32_32 fx = {lhs.value + rhs.value};
73
74 ASSERT(fx.value >= rhs.value);
75 return fx;
76}
77
78struct fixed32_32 dal_fixed32_32_add_int(struct fixed32_32 lhs, uint32_t rhs)
79{
80 struct fixed32_32 fx = {lhs.value + ((uint64_t)rhs << 32)};
81
82 ASSERT(fx.value >= (uint64_t)rhs << 32);
83 return fx;
84
85}
86struct fixed32_32 dal_fixed32_32_sub(
87 struct fixed32_32 lhs,
88 struct fixed32_32 rhs)
89{
90 struct fixed32_32 fx;
91
92 ASSERT(lhs.value >= rhs.value);
93 fx.value = lhs.value - rhs.value;
94 return fx;
95}
96
97struct fixed32_32 dal_fixed32_32_sub_int(struct fixed32_32 lhs, uint32_t rhs)
98{
99 struct fixed32_32 fx;
100
101 ASSERT(lhs.value >= ((uint64_t)rhs<<32));
102 fx.value = lhs.value - ((uint64_t)rhs<<32);
103 return fx;
104}
105
106struct fixed32_32 dal_fixed32_32_mul(
107 struct fixed32_32 lhs,
108 struct fixed32_32 rhs)
109{
110 struct fixed32_32 fx;
111 uint64_t lhs_int = lhs.value>>32;
112 uint64_t lhs_frac = (uint32_t)lhs.value;
113 uint64_t rhs_int = rhs.value>>32;
114 uint64_t rhs_frac = (uint32_t)rhs.value;
115 uint64_t ahbh = lhs_int * rhs_int;
116 uint64_t ahbl = lhs_int * rhs_frac;
117 uint64_t albh = lhs_frac * rhs_int;
118 uint64_t albl = lhs_frac * rhs_frac;
119
120 ASSERT((ahbh>>32) == 0);
121
122 fx.value = (ahbh<<32) + ahbl + albh + (albl>>32);
123 return fx;
124
125}
126
127struct fixed32_32 dal_fixed32_32_mul_int(struct fixed32_32 lhs, uint32_t rhs)
128{
129 struct fixed32_32 fx;
130 uint64_t lhsi = (lhs.value>>32) * (uint64_t)rhs;
131 uint64_t lhsf;
132
133 ASSERT((lhsi>>32) == 0);
134 lhsf = ((uint32_t)lhs.value) * (uint64_t)rhs;
135 ASSERT((lhsi<<32) + lhsf >= lhsf);
136 fx.value = (lhsi<<32) + lhsf;
137 return fx;
138}
139
140struct fixed32_32 dal_fixed32_32_div(
141 struct fixed32_32 lhs,
142 struct fixed32_32 rhs)
143{
144 struct fixed32_32 fx;
145
146 fx.value = u64_div(lhs.value, rhs.value);
147 return fx;
148}
149
150struct fixed32_32 dal_fixed32_32_div_int(struct fixed32_32 lhs, uint32_t rhs)
151{
152 struct fixed32_32 fx;
153
154 fx.value = u64_div(lhs.value, (uint64_t)rhs << 32);
155 return fx;
156}
157
158struct fixed32_32 dal_fixed32_32_min(
159 struct fixed32_32 lhs,
160 struct fixed32_32 rhs)
161{
162 return (lhs.value < rhs.value) ? lhs : rhs;
163}
164
165struct fixed32_32 dal_fixed32_32_max(
166 struct fixed32_32 lhs,
167 struct fixed32_32 rhs)
168{
169 return (lhs.value > rhs.value) ? lhs : rhs;
170}
171
172bool dal_fixed32_32_gt(struct fixed32_32 lhs, struct fixed32_32 rhs)
173{
174 return lhs.value > rhs.value;
175}
176bool dal_fixed32_32_gt_int(struct fixed32_32 lhs, uint32_t rhs)
177{
178 return lhs.value > ((uint64_t)rhs<<32);
179}
180
181bool dal_fixed32_32_lt(struct fixed32_32 lhs, struct fixed32_32 rhs)
182{
183 return lhs.value < rhs.value;
184}
185
186bool dal_fixed32_32_le(struct fixed32_32 lhs, struct fixed32_32 rhs)
187{
188 return lhs.value <= rhs.value;
189}
190
191bool dal_fixed32_32_lt_int(struct fixed32_32 lhs, uint32_t rhs)
192{
193 return lhs.value < ((uint64_t)rhs<<32);
194}
195
196bool dal_fixed32_32_le_int(struct fixed32_32 lhs, uint32_t rhs)
197{
198 return lhs.value <= ((uint64_t)rhs<<32);
199}
200
201uint32_t dal_fixed32_32_ceil(struct fixed32_32 v)
202{
203 ASSERT((uint32_t)v.value ? (v.value >> 32) + 1 >= 1 : true);
204 return (v.value>>32) + ((uint32_t)v.value ? 1 : 0);
205}
206
207uint32_t dal_fixed32_32_floor(struct fixed32_32 v)
208{
209 return v.value>>32;
210}
211
212uint32_t dal_fixed32_32_round(struct fixed32_32 v)
213{
214 ASSERT(v.value + (1ULL<<31) >= (1ULL<<31));
215 return (v.value + (1ULL<<31))>>32;
216}
217
218bool dal_fixed32_32_eq(struct fixed32_32 lhs, struct fixed32_32 rhs)
219{
220 return lhs.value == rhs.value;
221}
diff --git a/drivers/gpu/drm/amd/display/dc/basics/grph_object_id.c b/drivers/gpu/drm/amd/display/dc/basics/grph_object_id.c
new file mode 100644
index 000000000000..9c80847d03a9
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/basics/grph_object_id.c
@@ -0,0 +1,134 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27#include "include/grph_object_id.h"
28
29bool dal_graphics_object_id_is_valid(struct graphics_object_id id)
30{
31 bool rc = true;
32
33 switch (id.type) {
34 case OBJECT_TYPE_UNKNOWN:
35 rc = false;
36 break;
37 case OBJECT_TYPE_GPU:
38 case OBJECT_TYPE_ENGINE:
39 /* do NOT check for id.id == 0 */
40 if (id.enum_id == ENUM_ID_UNKNOWN)
41 rc = false;
42 break;
43 default:
44 if (id.id == 0 || id.enum_id == ENUM_ID_UNKNOWN)
45 rc = false;
46 break;
47 }
48
49 return rc;
50}
51
52bool dal_graphics_object_id_is_equal(
53 struct graphics_object_id id1,
54 struct graphics_object_id id2)
55{
56 if (false == dal_graphics_object_id_is_valid(id1)) {
57 dm_output_to_console(
58 "%s: Warning: comparing invalid object 'id1'!\n", __func__);
59 return false;
60 }
61
62 if (false == dal_graphics_object_id_is_valid(id2)) {
63 dm_output_to_console(
64 "%s: Warning: comparing invalid object 'id2'!\n", __func__);
65 return false;
66 }
67
68 if (id1.id == id2.id && id1.enum_id == id2.enum_id
69 && id1.type == id2.type)
70 return true;
71
72 return false;
73}
74
75/* Based on internal data members memory layout */
76uint32_t dal_graphics_object_id_to_uint(struct graphics_object_id id)
77{
78 uint32_t object_id = 0;
79
80 object_id = id.id + (id.enum_id << 0x8) + (id.type << 0xc);
81 return object_id;
82}
83
84/*
85 * ******* get specific ID - internal safe cast into specific type *******
86 */
87
88enum controller_id dal_graphics_object_id_get_controller_id(
89 struct graphics_object_id id)
90{
91 if (id.type == OBJECT_TYPE_CONTROLLER)
92 return id.id;
93 return CONTROLLER_ID_UNDEFINED;
94}
95
96enum clock_source_id dal_graphics_object_id_get_clock_source_id(
97 struct graphics_object_id id)
98{
99 if (id.type == OBJECT_TYPE_CLOCK_SOURCE)
100 return id.id;
101 return CLOCK_SOURCE_ID_UNDEFINED;
102}
103
104enum encoder_id dal_graphics_object_id_get_encoder_id(
105 struct graphics_object_id id)
106{
107 if (id.type == OBJECT_TYPE_ENCODER)
108 return id.id;
109 return ENCODER_ID_UNKNOWN;
110}
111
112enum connector_id dal_graphics_object_id_get_connector_id(
113 struct graphics_object_id id)
114{
115 if (id.type == OBJECT_TYPE_CONNECTOR)
116 return id.id;
117 return CONNECTOR_ID_UNKNOWN;
118}
119
120enum audio_id dal_graphics_object_id_get_audio_id(struct graphics_object_id id)
121{
122 if (id.type == OBJECT_TYPE_AUDIO)
123 return id.id;
124 return AUDIO_ID_UNKNOWN;
125}
126
127enum engine_id dal_graphics_object_id_get_engine_id(
128 struct graphics_object_id id)
129{
130 if (id.type == OBJECT_TYPE_ENGINE)
131 return id.id;
132 return ENGINE_ID_UNKNOWN;
133}
134
diff --git a/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c b/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c
new file mode 100644
index 000000000000..61f36a7f322b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c
@@ -0,0 +1,100 @@
1/*
2 * Copyright 2012-16 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "core_types.h"
27#include "logger.h"
28#include "include/logger_interface.h"
29
30#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
31
32struct dc_signal_type_info {
33 enum signal_type type;
34 char name[MAX_NAME_LEN];
35};
36
37static const struct dc_signal_type_info signal_type_info_tbl[] = {
38 {SIGNAL_TYPE_NONE, "NC"},
39 {SIGNAL_TYPE_DVI_SINGLE_LINK, "DVI"},
40 {SIGNAL_TYPE_DVI_DUAL_LINK, "DDVI"},
41 {SIGNAL_TYPE_HDMI_TYPE_A, "HDMIA"},
42 {SIGNAL_TYPE_LVDS, "LVDS"},
43 {SIGNAL_TYPE_RGB, "VGA"},
44 {SIGNAL_TYPE_DISPLAY_PORT, "DP"},
45 {SIGNAL_TYPE_DISPLAY_PORT_MST, "MST"},
46 {SIGNAL_TYPE_EDP, "eDP"},
47 {SIGNAL_TYPE_WIRELESS, "Wireless"},
48 {SIGNAL_TYPE_VIRTUAL, "Virtual"}
49};
50
51void dc_conn_log(struct dc_context *ctx,
52 const struct dc_link *link,
53 uint8_t *hex_data,
54 int hex_data_count,
55 enum dc_log_type event,
56 const char *msg,
57 ...)
58{
59 int i;
60 va_list args;
61 struct log_entry entry = { 0 };
62 enum signal_type signal;
63
64 if (link->local_sink)
65 signal = link->local_sink->sink_signal;
66 else
67 signal = link->connector_signal;
68
69 if (link->type == dc_connection_mst_branch)
70 signal = SIGNAL_TYPE_DISPLAY_PORT_MST;
71
72 dm_logger_open(ctx->logger, &entry, event);
73
74 for (i = 0; i < NUM_ELEMENTS(signal_type_info_tbl); i++)
75 if (signal == signal_type_info_tbl[i].type)
76 break;
77
78 dm_logger_append(&entry, "[%s][ConnIdx:%d] ",
79 signal_type_info_tbl[i].name,
80 link->link_index);
81
82 va_start(args, msg);
83 entry.buf_offset += dm_log_to_buffer(
84 &entry.buf[entry.buf_offset],
85 LOG_MAX_LINE_SIZE - entry.buf_offset,
86 msg, args);
87
88 if (entry.buf[strlen(entry.buf) - 1] == '\n') {
89 entry.buf[strlen(entry.buf) - 1] = '\0';
90 entry.buf_offset--;
91 }
92
93 if (hex_data)
94 for (i = 0; i < hex_data_count; i++)
95 dm_logger_append(&entry, "%2.2X ", hex_data[i]);
96
97 dm_logger_append(&entry, "^\n");
98 dm_logger_close(&entry);
99 va_end(args);
100}
diff --git a/drivers/gpu/drm/amd/display/dc/basics/logger.c b/drivers/gpu/drm/amd/display/dc/basics/logger.c
new file mode 100644
index 000000000000..a5625a3badab
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/basics/logger.c
@@ -0,0 +1,457 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25#include "dm_services.h"
26#include "include/logger_interface.h"
27#include "logger.h"
28
29
30#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
31
32static const struct dc_log_type_info log_type_info_tbl[] = {
33 {LOG_ERROR, "Error"},
34 {LOG_WARNING, "Warning"},
35 {LOG_DC, "DC_Interface"},
36 {LOG_SURFACE, "Surface"},
37 {LOG_HW_HOTPLUG, "HW_Hotplug"},
38 {LOG_HW_LINK_TRAINING, "HW_LKTN"},
39 {LOG_HW_SET_MODE, "HW_Mode"},
40 {LOG_HW_RESUME_S3, "HW_Resume"},
41 {LOG_HW_AUDIO, "HW_Audio"},
42 {LOG_HW_HPD_IRQ, "HW_HPDIRQ"},
43 {LOG_MST, "MST"},
44 {LOG_SCALER, "Scaler"},
45 {LOG_BIOS, "BIOS"},
46 {LOG_BANDWIDTH_CALCS, "BWCalcs"},
47 {LOG_BANDWIDTH_VALIDATION, "BWValidation"},
48 {LOG_I2C_AUX, "I2C_AUX"},
49 {LOG_SYNC, "Sync"},
50 {LOG_BACKLIGHT, "Backlight"},
51 {LOG_FEATURE_OVERRIDE, "Override"},
52 {LOG_DETECTION_EDID_PARSER, "Edid"},
53 {LOG_DETECTION_DP_CAPS, "DP_Caps"},
54 {LOG_RESOURCE, "Resource"},
55 {LOG_DML, "DML"},
56 {LOG_EVENT_MODE_SET, "Mode"},
57 {LOG_EVENT_DETECTION, "Detect"},
58 {LOG_EVENT_LINK_TRAINING, "LKTN"},
59 {LOG_EVENT_LINK_LOSS, "LinkLoss"},
60 {LOG_EVENT_UNDERFLOW, "Underflow"},
61 {LOG_IF_TRACE, "InterfaceTrace"}
62};
63
64
65#define DC_DEFAULT_LOG_MASK ((1 << LOG_ERROR) | \
66 (1 << LOG_WARNING) | \
67 (1 << LOG_EVENT_MODE_SET) | \
68 (1 << LOG_EVENT_DETECTION) | \
69 (1 << LOG_EVENT_LINK_TRAINING) | \
70 (1 << LOG_EVENT_LINK_LOSS) | \
71 (1 << LOG_EVENT_UNDERFLOW) | \
72 (1 << LOG_RESOURCE) | \
73 (1 << LOG_FEATURE_OVERRIDE) | \
74 (1 << LOG_DETECTION_EDID_PARSER) | \
75 (1 << LOG_DC) | \
76 (1 << LOG_HW_HOTPLUG) | \
77 (1 << LOG_HW_SET_MODE) | \
78 (1 << LOG_HW_RESUME_S3) | \
79 (1 << LOG_HW_HPD_IRQ) | \
80 (1 << LOG_SYNC) | \
81 (1 << LOG_BANDWIDTH_VALIDATION) | \
82 (1 << LOG_MST) | \
83 (1 << LOG_BIOS) | \
84 (1 << LOG_DETECTION_EDID_PARSER) | \
85 (1 << LOG_DETECTION_DP_CAPS) | \
86 (1 << LOG_BACKLIGHT)) | \
87 (1 << LOG_I2C_AUX) | \
88 (1 << LOG_IF_TRACE) /* | \
89 (1 << LOG_SURFACE) | \
90 (1 << LOG_SCALER) | \
91 (1 << LOG_DML) | \
92 (1 << LOG_HW_LINK_TRAINING) | \
93 (1 << LOG_HW_AUDIO)| \
94 (1 << LOG_BANDWIDTH_CALCS)*/
95
96/* ----------- Object init and destruction ----------- */
97static bool construct(struct dc_context *ctx, struct dal_logger *logger)
98{
99 /* malloc buffer and init offsets */
100 logger->log_buffer_size = DAL_LOGGER_BUFFER_MAX_SIZE;
101 logger->log_buffer = (char *)dm_alloc(logger->log_buffer_size *
102 sizeof(char));
103
104 if (!logger->log_buffer)
105 return false;
106
107 /* Initialize both offsets to start of buffer (empty) */
108 logger->buffer_read_offset = 0;
109 logger->buffer_write_offset = 0;
110
111 logger->write_wrap_count = 0;
112 logger->read_wrap_count = 0;
113 logger->open_count = 0;
114
115 logger->flags.bits.ENABLE_CONSOLE = 1;
116 logger->flags.bits.ENABLE_BUFFER = 0;
117
118 logger->ctx = ctx;
119
120 logger->mask = DC_DEFAULT_LOG_MASK;
121
122 return true;
123}
124
125static void destruct(struct dal_logger *logger)
126{
127 if (logger->log_buffer) {
128 dm_free(logger->log_buffer);
129 logger->log_buffer = NULL;
130 }
131}
132
133struct dal_logger *dal_logger_create(struct dc_context *ctx)
134{
135 /* malloc struct */
136 struct dal_logger *logger = dm_alloc(sizeof(struct dal_logger));
137
138 if (!logger)
139 return NULL;
140 if (!construct(ctx, logger)) {
141 dm_free(logger);
142 return NULL;
143 }
144
145 return logger;
146}
147
148uint32_t dal_logger_destroy(struct dal_logger **logger)
149{
150 if (logger == NULL || *logger == NULL)
151 return 1;
152 destruct(*logger);
153 dm_free(*logger);
154 *logger = NULL;
155
156 return 0;
157}
158
159/* ------------------------------------------------------------------------ */
160
161
162static bool dal_logger_should_log(
163 struct dal_logger *logger,
164 enum dc_log_type log_type)
165{
166 if (logger->mask & (1 << log_type))
167 return true;
168
169 return false;
170}
171
172static void log_to_debug_console(struct log_entry *entry)
173{
174 struct dal_logger *logger = entry->logger;
175
176 if (logger->flags.bits.ENABLE_CONSOLE == 0)
177 return;
178
179 if (entry->buf_offset) {
180 switch (entry->type) {
181 case LOG_ERROR:
182 dm_error("%s", entry->buf);
183 break;
184 default:
185 dm_output_to_console("%s", entry->buf);
186 break;
187 }
188 }
189}
190
191/* Print everything unread existing in log_buffer to debug console*/
192static void flush_to_debug_console(struct dal_logger *logger)
193{
194 int i = logger->buffer_read_offset;
195 char *string_start = &logger->log_buffer[i];
196
197 dm_output_to_console(
198 "---------------- FLUSHING LOG BUFFER ----------------\n");
199 while (i < logger->buffer_write_offset) {
200
201 if (logger->log_buffer[i] == '\0') {
202 dm_output_to_console("%s", string_start);
203 string_start = (char *)logger->log_buffer + i + 1;
204 }
205 i++;
206 }
207 dm_output_to_console(
208 "-------------- END FLUSHING LOG BUFFER --------------\n\n");
209}
210
211static void log_to_internal_buffer(struct log_entry *entry)
212{
213
214 uint32_t size = entry->buf_offset;
215 struct dal_logger *logger = entry->logger;
216
217 if (logger->flags.bits.ENABLE_BUFFER == 0)
218 return;
219
220 if (logger->log_buffer == NULL)
221 return;
222
223 if (size > 0 && size < logger->log_buffer_size) {
224
225 int total_free_space = 0;
226 int space_before_wrap = 0;
227
228 if (logger->buffer_write_offset > logger->buffer_read_offset) {
229 total_free_space = logger->log_buffer_size -
230 logger->buffer_write_offset +
231 logger->buffer_read_offset;
232 space_before_wrap = logger->log_buffer_size -
233 logger->buffer_write_offset;
234 } else if (logger->buffer_write_offset <
235 logger->buffer_read_offset) {
236 total_free_space = logger->log_buffer_size -
237 logger->buffer_read_offset +
238 logger->buffer_write_offset;
239 space_before_wrap = total_free_space;
240 } else if (logger->write_wrap_count !=
241 logger->read_wrap_count) {
242 /* Buffer is completely full already */
243 total_free_space = 0;
244 space_before_wrap = 0;
245 } else {
246 /* Buffer is empty, start writing at beginning */
247 total_free_space = logger->log_buffer_size;
248 space_before_wrap = logger->log_buffer_size;
249 logger->buffer_write_offset = 0;
250 logger->buffer_read_offset = 0;
251 }
252
253 if (space_before_wrap > size) {
254 /* No wrap around, copy 'size' bytes
255 * from 'entry->buf' to 'log_buffer'
256 */
257 memmove(logger->log_buffer +
258 logger->buffer_write_offset,
259 entry->buf, size);
260 logger->buffer_write_offset += size;
261
262 } else if (total_free_space > size) {
263 /* We have enough room without flushing,
264 * but need to wrap around */
265
266 int space_after_wrap = total_free_space -
267 space_before_wrap;
268
269 memmove(logger->log_buffer +
270 logger->buffer_write_offset,
271 entry->buf, space_before_wrap);
272 memmove(logger->log_buffer, entry->buf +
273 space_before_wrap, space_after_wrap);
274
275 logger->buffer_write_offset = space_after_wrap;
276 logger->write_wrap_count++;
277
278 } else {
279 /* Not enough room remaining, we should flush
280 * existing logs */
281
282 /* Flush existing unread logs to console */
283 flush_to_debug_console(logger);
284
285 /* Start writing to beginning of buffer */
286 memmove(logger->log_buffer, entry->buf, size);
287 logger->buffer_write_offset = size;
288 logger->buffer_read_offset = 0;
289 }
290
291 }
292}
293
294static void log_heading(struct log_entry *entry)
295{
296 int j;
297
298 for (j = 0; j < NUM_ELEMENTS(log_type_info_tbl); j++) {
299
300 const struct dc_log_type_info *info = &log_type_info_tbl[j];
301
302 if (info->type == entry->type)
303 dm_logger_append(entry, "[%s]\t", info->name);
304 }
305}
306
307static void append_entry(
308 struct log_entry *entry,
309 char *buffer,
310 uint32_t buf_size)
311{
312 if (!entry->buf ||
313 entry->buf_offset + buf_size > entry->max_buf_bytes
314 ) {
315 BREAK_TO_DEBUGGER();
316 return;
317 }
318
319 /* Todo: check if off by 1 byte due to \0 anywhere */
320 memmove(entry->buf + entry->buf_offset, buffer, buf_size);
321 entry->buf_offset += buf_size;
322}
323
324/* ------------------------------------------------------------------------ */
325
326/* Warning: Be careful that 'msg' is null terminated and the total size is
327 * less than DAL_LOGGER_BUFFER_MAX_LOG_LINE_SIZE (256) including '\0'
328 */
329void dm_logger_write(
330 struct dal_logger *logger,
331 enum dc_log_type log_type,
332 const char *msg,
333 ...)
334{
335 if (logger && dal_logger_should_log(logger, log_type)) {
336 uint32_t size;
337 va_list args;
338 char buffer[LOG_MAX_LINE_SIZE];
339 struct log_entry entry;
340
341 va_start(args, msg);
342
343 entry.logger = logger;
344
345 entry.buf = buffer;
346
347 entry.buf_offset = 0;
348 entry.max_buf_bytes = DAL_LOGGER_BUFFER_MAX_SIZE * sizeof(char);
349
350 entry.type = log_type;
351
352 log_heading(&entry);
353
354 size = dm_log_to_buffer(
355 buffer, LOG_MAX_LINE_SIZE, msg, args);
356
357 entry.buf_offset += size;
358
359 /* --Flush log_entry buffer-- */
360 /* print to kernel console */
361 log_to_debug_console(&entry);
362 /* log internally for dsat */
363 log_to_internal_buffer(&entry);
364
365 va_end(args);
366 }
367}
368
369/* Same as dm_logger_write, except without open() and close(), which must
370 * be done separately.
371 */
372void dm_logger_append(
373 struct log_entry *entry,
374 const char *msg,
375 ...)
376{
377 struct dal_logger *logger;
378
379 if (!entry) {
380 BREAK_TO_DEBUGGER();
381 return;
382 }
383
384 logger = entry->logger;
385
386 if (logger && logger->open_count > 0 &&
387 dal_logger_should_log(logger, entry->type)) {
388
389 uint32_t size;
390 va_list args;
391 char buffer[LOG_MAX_LINE_SIZE];
392
393 va_start(args, msg);
394
395 size = dm_log_to_buffer(
396 buffer, LOG_MAX_LINE_SIZE, msg, args);
397
398 if (size < LOG_MAX_LINE_SIZE - 1) {
399 append_entry(entry, buffer, size);
400 } else {
401 append_entry(entry, "LOG_ERROR, line too long\n", 27);
402 }
403
404 va_end(args);
405 }
406}
407
408void dm_logger_open(
409 struct dal_logger *logger,
410 struct log_entry *entry, /* out */
411 enum dc_log_type log_type)
412{
413 if (!entry) {
414 BREAK_TO_DEBUGGER();
415 return;
416 }
417
418 entry->type = log_type;
419 entry->logger = logger;
420
421 entry->buf = dm_alloc(DAL_LOGGER_BUFFER_MAX_SIZE * sizeof(char));
422
423 entry->buf_offset = 0;
424 entry->max_buf_bytes = DAL_LOGGER_BUFFER_MAX_SIZE * sizeof(char);
425
426 logger->open_count++;
427
428 log_heading(entry);
429}
430
431void dm_logger_close(struct log_entry *entry)
432{
433 struct dal_logger *logger = entry->logger;
434
435 if (logger && logger->open_count > 0) {
436 logger->open_count--;
437 } else {
438 BREAK_TO_DEBUGGER();
439 goto cleanup;
440 }
441
442 /* --Flush log_entry buffer-- */
443 /* print to kernel console */
444 log_to_debug_console(entry);
445 /* log internally for dsat */
446 log_to_internal_buffer(entry);
447
448 /* TODO: Write end heading */
449
450cleanup:
451 if (entry->buf) {
452 dm_free(entry->buf);
453 entry->buf = NULL;
454 entry->buf_offset = 0;
455 entry->max_buf_bytes = 0;
456 }
457}
diff --git a/drivers/gpu/drm/amd/display/dc/basics/logger.h b/drivers/gpu/drm/amd/display/dc/basics/logger.h
new file mode 100644
index 000000000000..2f7a5df4c811
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/basics/logger.h
@@ -0,0 +1,67 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_LOGGER_H__
27#define __DAL_LOGGER_H__
28
29/* Structure for keeping track of offsets, buffer, etc */
30
31#define DAL_LOGGER_BUFFER_MAX_SIZE 2048
32
33/*Connectivity log needs to output EDID, which needs at lease 256x3 bytes,
34 * change log line size to 896 to meet the request.
35 */
36#define LOG_MAX_LINE_SIZE 896
37
38#include "include/logger_types.h"
39
40struct dal_logger {
41
42 /* How far into the circular buffer has been read by dsat
43 * Read offset should never cross write offset. Write \0's to
44 * read data just to be sure?
45 */
46 uint32_t buffer_read_offset;
47
48 /* How far into the circular buffer we have written
49 * Write offset should never cross read offset
50 */
51 uint32_t buffer_write_offset;
52
53 uint32_t write_wrap_count;
54 uint32_t read_wrap_count;
55
56 uint32_t open_count;
57
58 char *log_buffer; /* Pointer to malloc'ed buffer */
59 uint32_t log_buffer_size; /* Size of circular buffer */
60
61 uint32_t mask; /*array of masks for major elements*/
62
63 union logger_flags flags;
64 struct dc_context *ctx;
65};
66
67#endif /* __DAL_LOGGER_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/basics/register_logger.c b/drivers/gpu/drm/amd/display/dc/basics/register_logger.c
new file mode 100644
index 000000000000..b8d57d919fe4
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/basics/register_logger.c
@@ -0,0 +1,197 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27#include "include/dal_types.h"
28#include "include/logger_interface.h"
29#include "logger.h"
30
31/******************************************************************************
32 * Register Logger.
33 * A facility to create register R/W logs.
34 * Currently used for DAL Test.
35 *****************************************************************************/
36
37/******************************************************************************
38 * Private structures
39 *****************************************************************************/
40struct dal_reg_dump_stack_location {
41 const char *current_caller_func;
42 long current_pid;
43 long current_tgid;
44 uint32_t rw_count;/* register access counter for current function. */
45};
46
47/* This the maximum number of nested calls to the 'reg_dump' facility. */
48#define DAL_REG_DUMP_STACK_MAX_SIZE 32
49
50struct dal_reg_dump_stack {
51 int32_t stack_pointer;
52 struct dal_reg_dump_stack_location
53 stack_locations[DAL_REG_DUMP_STACK_MAX_SIZE];
54 uint32_t total_rw_count; /* Total count for *all* functions. */
55};
56
57static struct dal_reg_dump_stack reg_dump_stack = {0};
58
59/******************************************************************************
60 * Private functions
61 *****************************************************************************/
62
63/* Check if current process is the one which requested register dump.
64 * The reason for the check:
65 * mmCRTC_STATUS_FRAME_COUNT is accessed by dal_controller_get_vblank_counter().
66 * Which runs all the time when at least one display is connected.
67 * (Triggered by drm_mode_page_flip_ioctl()). */
68static bool is_reg_dump_process(void)
69{
70 uint32_t i;
71
72 /* walk the list of our processes */
73 for (i = 0; i < reg_dump_stack.stack_pointer; i++) {
74 struct dal_reg_dump_stack_location *stack_location
75 = &reg_dump_stack.stack_locations[i];
76
77 if (stack_location->current_pid == dm_get_pid()
78 && stack_location->current_tgid == dm_get_tgid())
79 return true;
80 }
81
82 return false;
83}
84
85static bool dal_reg_dump_stack_is_empty(void)
86{
87 if (reg_dump_stack.stack_pointer <= 0)
88 return true;
89 else
90 return false;
91}
92
93static struct dal_reg_dump_stack_location *dal_reg_dump_stack_push(void)
94{
95 struct dal_reg_dump_stack_location *current_location = NULL;
96
97 if (reg_dump_stack.stack_pointer >= DAL_REG_DUMP_STACK_MAX_SIZE) {
98 /* stack is full */
99 dm_output_to_console("[REG_DUMP]: %s: stack is full!\n",
100 __func__);
101 } else {
102 current_location =
103 &reg_dump_stack.stack_locations[reg_dump_stack.stack_pointer];
104 ++reg_dump_stack.stack_pointer;
105 }
106
107 return current_location;
108}
109
110static struct dal_reg_dump_stack_location *dal_reg_dump_stack_pop(void)
111{
112 struct dal_reg_dump_stack_location *current_location = NULL;
113
114 if (dal_reg_dump_stack_is_empty()) {
115 /* stack is empty */
116 dm_output_to_console("[REG_DUMP]: %s: stack is empty!\n",
117 __func__);
118 } else {
119 --reg_dump_stack.stack_pointer;
120 current_location =
121 &reg_dump_stack.stack_locations[reg_dump_stack.stack_pointer];
122 }
123
124 return current_location;
125}
126
127/******************************************************************************
128 * Public functions
129 *****************************************************************************/
130
131void dal_reg_logger_push(const char *caller_func)
132{
133 struct dal_reg_dump_stack_location *free_stack_location;
134
135 free_stack_location = dal_reg_dump_stack_push();
136
137 if (NULL == free_stack_location)
138 return;
139
140 memset(free_stack_location, 0, sizeof(*free_stack_location));
141
142 free_stack_location->current_caller_func = caller_func;
143 free_stack_location->current_pid = dm_get_pid();
144 free_stack_location->current_tgid = dm_get_tgid();
145
146 dm_output_to_console("[REG_DUMP]:%s - start (pid:%ld, tgid:%ld)\n",
147 caller_func,
148 free_stack_location->current_pid,
149 free_stack_location->current_tgid);
150}
151
152void dal_reg_logger_pop(void)
153{
154 struct dal_reg_dump_stack_location *top_stack_location;
155
156 top_stack_location = dal_reg_dump_stack_pop();
157
158 if (NULL == top_stack_location) {
159 dm_output_to_console("[REG_DUMP]:%s - Stack is Empty!\n",
160 __func__);
161 return;
162 }
163
164 dm_output_to_console(
165 "[REG_DUMP]:%s - end."\
166 " Reg R/W Count: Total=%d Function=%d. (pid:%ld, tgid:%ld)\n",
167 top_stack_location->current_caller_func,
168 reg_dump_stack.total_rw_count,
169 top_stack_location->rw_count,
170 dm_get_pid(),
171 dm_get_tgid());
172
173 memset(top_stack_location, 0, sizeof(*top_stack_location));
174}
175
176void dal_reg_logger_rw_count_increment(void)
177{
178 ++reg_dump_stack.total_rw_count;
179
180 ++reg_dump_stack.stack_locations
181 [reg_dump_stack.stack_pointer - 1].rw_count;
182}
183
184bool dal_reg_logger_should_dump_register(void)
185{
186 if (true == dal_reg_dump_stack_is_empty())
187 return false;
188
189 if (false == is_reg_dump_process())
190 return false;
191
192 return true;
193}
194
195/******************************************************************************
196 * End of File.
197 *****************************************************************************/
diff --git a/drivers/gpu/drm/amd/display/dc/basics/signal_types.c b/drivers/gpu/drm/amd/display/dc/basics/signal_types.c
new file mode 100644
index 000000000000..44447e07803a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/basics/signal_types.c
@@ -0,0 +1,116 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27#include "include/signal_types.h"
28
29bool dc_is_hdmi_signal(enum signal_type signal)
30{
31 return (signal == SIGNAL_TYPE_HDMI_TYPE_A);
32}
33
34bool dc_is_dp_sst_signal(enum signal_type signal)
35{
36 return (signal == SIGNAL_TYPE_DISPLAY_PORT ||
37 signal == SIGNAL_TYPE_EDP);
38}
39
40bool dc_is_dp_signal(enum signal_type signal)
41{
42 return (signal == SIGNAL_TYPE_DISPLAY_PORT ||
43 signal == SIGNAL_TYPE_EDP ||
44 signal == SIGNAL_TYPE_DISPLAY_PORT_MST);
45}
46
47bool dc_is_dp_external_signal(enum signal_type signal)
48{
49 return (signal == SIGNAL_TYPE_DISPLAY_PORT ||
50 signal == SIGNAL_TYPE_DISPLAY_PORT_MST);
51}
52
53bool dc_is_analog_signal(enum signal_type signal)
54{
55 switch (signal) {
56 case SIGNAL_TYPE_RGB:
57 return true;
58 break;
59 default:
60 return false;
61 }
62}
63
64bool dc_is_embedded_signal(enum signal_type signal)
65{
66 return (signal == SIGNAL_TYPE_EDP || signal == SIGNAL_TYPE_LVDS);
67}
68
69bool dc_is_dvi_signal(enum signal_type signal)
70{
71 switch (signal) {
72 case SIGNAL_TYPE_DVI_SINGLE_LINK:
73 case SIGNAL_TYPE_DVI_DUAL_LINK:
74 return true;
75 break;
76 default:
77 return false;
78 }
79}
80
81bool dc_is_dvi_single_link_signal(enum signal_type signal)
82{
83 return (signal == SIGNAL_TYPE_DVI_SINGLE_LINK);
84}
85
86bool dc_is_dual_link_signal(enum signal_type signal)
87{
88 return (signal == SIGNAL_TYPE_DVI_DUAL_LINK);
89}
90
91bool dc_is_audio_capable_signal(enum signal_type signal)
92{
93 return (signal == SIGNAL_TYPE_DISPLAY_PORT ||
94 signal == SIGNAL_TYPE_DISPLAY_PORT_MST ||
95 dc_is_hdmi_signal(signal) ||
96 signal == SIGNAL_TYPE_WIRELESS);
97}
98
99/*
100 * @brief
101 * Returns whether the signal is compatible
102 * with other digital encoder signal types.
103 * This is true for DVI, LVDS, and HDMI signal types.
104 */
105bool dc_is_digital_encoder_compatible_signal(enum signal_type signal)
106{
107 switch (signal) {
108 case SIGNAL_TYPE_DVI_SINGLE_LINK:
109 case SIGNAL_TYPE_DVI_DUAL_LINK:
110 case SIGNAL_TYPE_HDMI_TYPE_A:
111 case SIGNAL_TYPE_LVDS:
112 return true;
113 default:
114 return false;
115 }
116}
diff --git a/drivers/gpu/drm/amd/display/dc/basics/vector.c b/drivers/gpu/drm/amd/display/dc/basics/vector.c
new file mode 100644
index 000000000000..bb72a1857160
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/basics/vector.c
@@ -0,0 +1,307 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27#include "include/vector.h"
28
29bool dal_vector_construct(
30 struct vector *vector,
31 struct dc_context *ctx,
32 uint32_t capacity,
33 uint32_t struct_size)
34{
35 vector->container = NULL;
36
37 if (!struct_size || !capacity) {
38 /* Container must be non-zero size*/
39 BREAK_TO_DEBUGGER();
40 return false;
41 }
42
43 vector->container = dm_alloc(struct_size * capacity);
44 if (vector->container == NULL)
45 return false;
46 vector->capacity = capacity;
47 vector->struct_size = struct_size;
48 vector->count = 0;
49 vector->ctx = ctx;
50 return true;
51}
52
53bool dal_vector_presized_costruct(
54 struct vector *vector,
55 struct dc_context *ctx,
56 uint32_t count,
57 void *initial_value,
58 uint32_t struct_size)
59{
60 uint32_t i;
61
62 vector->container = NULL;
63
64 if (!struct_size || !count) {
65 /* Container must be non-zero size*/
66 BREAK_TO_DEBUGGER();
67 return false;
68 }
69
70 vector->container = dm_alloc(struct_size * count);
71
72 if (vector->container == NULL)
73 return false;
74
75 /* If caller didn't supply initial value then the default
76 * of all zeros is expected, which is exactly what dal_alloc()
77 * initialises the memory to. */
78 if (NULL != initial_value) {
79 for (i = 0; i < count; ++i)
80 memmove(
81 vector->container + i * struct_size,
82 initial_value,
83 struct_size);
84 }
85
86 vector->capacity = count;
87 vector->struct_size = struct_size;
88 vector->count = count;
89 return true;
90}
91
92struct vector *dal_vector_presized_create(
93 struct dc_context *ctx,
94 uint32_t size,
95 void *initial_value,
96 uint32_t struct_size)
97{
98 struct vector *vector = dm_alloc(sizeof(struct vector));
99
100 if (vector == NULL)
101 return NULL;
102
103 if (dal_vector_presized_costruct(
104 vector, ctx, size, initial_value, struct_size))
105 return vector;
106
107 BREAK_TO_DEBUGGER();
108 dm_free(vector);
109 return NULL;
110}
111
112struct vector *dal_vector_create(
113 struct dc_context *ctx,
114 uint32_t capacity,
115 uint32_t struct_size)
116{
117 struct vector *vector = dm_alloc(sizeof(struct vector));
118
119 if (vector == NULL)
120 return NULL;
121
122 if (dal_vector_construct(vector, ctx, capacity, struct_size))
123 return vector;
124
125 BREAK_TO_DEBUGGER();
126 dm_free(vector);
127 return NULL;
128}
129
130void dal_vector_destruct(
131 struct vector *vector)
132{
133 if (vector->container != NULL)
134 dm_free(vector->container);
135 vector->count = 0;
136 vector->capacity = 0;
137}
138
139void dal_vector_destroy(
140 struct vector **vector)
141{
142 if (vector == NULL || *vector == NULL)
143 return;
144 dal_vector_destruct(*vector);
145 dm_free(*vector);
146 *vector = NULL;
147}
148
149uint32_t dal_vector_get_count(
150 const struct vector *vector)
151{
152 return vector->count;
153}
154
155void *dal_vector_at_index(
156 const struct vector *vector,
157 uint32_t index)
158{
159 if (vector->container == NULL || index >= vector->count)
160 return NULL;
161 return vector->container + (index * vector->struct_size);
162}
163
164bool dal_vector_remove_at_index(
165 struct vector *vector,
166 uint32_t index)
167{
168 if (index >= vector->count)
169 return false;
170
171 if (index != vector->count - 1)
172 memmove(
173 vector->container + (index * vector->struct_size),
174 vector->container + ((index + 1) * vector->struct_size),
175 (vector->count - index - 1) * vector->struct_size);
176 vector->count -= 1;
177
178 return true;
179}
180
181void dal_vector_set_at_index(
182 const struct vector *vector,
183 const void *what,
184 uint32_t index)
185{
186 void *where = dal_vector_at_index(vector, index);
187
188 if (!where) {
189 BREAK_TO_DEBUGGER();
190 return;
191 }
192 memmove(
193 where,
194 what,
195 vector->struct_size);
196}
197
198static inline uint32_t calc_increased_capacity(
199 uint32_t old_capacity)
200{
201 return old_capacity * 2;
202}
203
204bool dal_vector_insert_at(
205 struct vector *vector,
206 const void *what,
207 uint32_t position)
208{
209 uint8_t *insert_address;
210
211 if (vector->count == vector->capacity) {
212 if (!dal_vector_reserve(
213 vector,
214 calc_increased_capacity(vector->capacity)))
215 return false;
216 }
217
218 insert_address = vector->container + (vector->struct_size * position);
219
220 if (vector->count && position < vector->count)
221 memmove(
222 insert_address + vector->struct_size,
223 insert_address,
224 vector->struct_size * (vector->count - position));
225
226 memmove(
227 insert_address,
228 what,
229 vector->struct_size);
230
231 vector->count++;
232
233 return true;
234}
235
236bool dal_vector_append(
237 struct vector *vector,
238 const void *item)
239{
240 return dal_vector_insert_at(vector, item, vector->count);
241}
242
243struct vector *dal_vector_clone(
244 const struct vector *vector)
245{
246 struct vector *vec_cloned;
247 uint32_t count;
248
249 /* create new vector */
250 count = dal_vector_get_count(vector);
251
252 if (count == 0)
253 /* when count is 0 we still want to create clone of the vector
254 */
255 vec_cloned = dal_vector_create(
256 vector->ctx,
257 vector->capacity,
258 vector->struct_size);
259 else
260 /* Call "presized create" version, independently of how the
261 * original vector was created.
262 * The owner of original vector must know how to treat the new
263 * vector - as "presized" or as "regular".
264 * But from vector point of view it doesn't matter. */
265 vec_cloned = dal_vector_presized_create(vector->ctx, count,
266 NULL,/* no initial value */
267 vector->struct_size);
268
269 if (NULL == vec_cloned) {
270 BREAK_TO_DEBUGGER();
271 return NULL;
272 }
273
274 /* copy vector's data */
275 memmove(vec_cloned->container, vector->container,
276 vec_cloned->struct_size * vec_cloned->capacity);
277
278 return vec_cloned;
279}
280
281uint32_t dal_vector_capacity(const struct vector *vector)
282{
283 return vector->capacity;
284}
285
286bool dal_vector_reserve(struct vector *vector, uint32_t capacity)
287{
288 void *new_container;
289
290 if (capacity <= vector->capacity)
291 return true;
292
293 new_container = dm_realloc(vector->container, capacity * vector->struct_size);
294
295 if (new_container) {
296 vector->container = new_container;
297 vector->capacity = capacity;
298 return true;
299 }
300
301 return false;
302}
303
304void dal_vector_clear(struct vector *vector)
305{
306 vector->count = 0;
307}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/Makefile b/drivers/gpu/drm/amd/display/dc/bios/Makefile
new file mode 100644
index 000000000000..9ba677f0ff01
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/Makefile
@@ -0,0 +1,24 @@
1#
2# Makefile for the 'bios' sub-component of DAL.
3# It provides the parsing and executing controls for atom bios image.
4
5BIOS = bios_parser.o bios_parser_interface.o bios_parser_helper.o command_table.o command_table_helper.o
6
7AMD_DAL_BIOS = $(addprefix $(AMDDALPATH)/dc/bios/,$(BIOS))
8
9AMD_DISPLAY_FILES += $(AMD_DAL_BIOS)
10
11###############################################################################
12# DCE 8x
13###############################################################################
14# All DCE8.x are derived from DCE8.0, so 8.0 MUST be defined if ANY of
15# DCE8.x is compiled.
16AMD_DISPLAY_FILES += $(AMDDALPATH)/dc/bios/dce80/command_table_helper_dce80.o
17
18###############################################################################
19# DCE 11x
20###############################################################################
21AMD_DISPLAY_FILES += $(AMDDALPATH)/dc/bios/dce110/command_table_helper_dce110.o
22
23ccflags-y += -DLATEST_ATOM_BIOS_SUPPORT
24AMD_DISPLAY_FILES += $(AMDDALPATH)/dc/bios/dce112/command_table_helper_dce112.o
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
new file mode 100644
index 000000000000..ebd2e419f8f2
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
@@ -0,0 +1,4220 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28#include "atom.h"
29
30#include "dc_bios_types.h"
31#include "include/gpio_service_interface.h"
32#include "include/grph_object_ctrl_defs.h"
33#include "include/bios_parser_interface.h"
34#include "include/i2caux_interface.h"
35#include "include/logger_interface.h"
36
37#include "command_table.h"
38#include "bios_parser_helper.h"
39#include "command_table_helper.h"
40#include "bios_parser.h"
41#include "bios_parser_types_internal.h"
42#include "bios_parser_interface.h"
43
44/* TODO remove - only needed for default i2c speed */
45#include "dc.h"
46
47#define THREE_PERCENT_OF_10000 300
48
49#define LAST_RECORD_TYPE 0xff
50
51/* GUID to validate external display connection info table (aka OPM module) */
52static const uint8_t ext_display_connection_guid[NUMBER_OF_UCHAR_FOR_GUID] = {
53 0x91, 0x6E, 0x57, 0x09,
54 0x3F, 0x6D, 0xD2, 0x11,
55 0x39, 0x8E, 0x00, 0xA0,
56 0xC9, 0x69, 0x72, 0x3B};
57
58#define DATA_TABLES(table) (bp->master_data_tbl->ListOfDataTables.table)
59
60static enum object_type object_type_from_bios_object_id(
61 uint32_t bios_object_id);
62static struct graphics_object_id object_id_from_bios_object_id(
63 uint32_t bios_object_id);
64static enum object_enum_id enum_id_from_bios_object_id(uint32_t bios_object_id);
65static enum encoder_id encoder_id_from_bios_object_id(uint32_t bios_object_id);
66static enum connector_id connector_id_from_bios_object_id(
67 uint32_t bios_object_id);
68static uint32_t id_from_bios_object_id(enum object_type type,
69 uint32_t bios_object_id);
70static uint32_t gpu_id_from_bios_object_id(uint32_t bios_object_id);
71static enum generic_id generic_id_from_bios_object_id(uint32_t bios_object_id);
72static void get_atom_data_table_revision(
73 ATOM_COMMON_TABLE_HEADER *atom_data_tbl,
74 struct atom_data_revision *tbl_revision);
75static uint32_t get_dst_number_from_object(struct bios_parser *bp,
76 ATOM_OBJECT *object);
77static uint32_t get_src_obj_list(struct bios_parser *bp, ATOM_OBJECT *object,
78 uint16_t **id_list);
79static uint32_t get_dest_obj_list(struct bios_parser *bp,
80 ATOM_OBJECT *object, uint16_t **id_list);
81static ATOM_OBJECT *get_bios_object(struct bios_parser *bp,
82 struct graphics_object_id id);
83static enum bp_result get_gpio_i2c_info(struct bios_parser *bp,
84 ATOM_I2C_RECORD *record,
85 struct graphics_object_i2c_info *info);
86static ATOM_HPD_INT_RECORD *get_hpd_record(struct bios_parser *bp,
87 ATOM_OBJECT *object);
88static struct device_id device_type_from_device_id(uint16_t device_id);
89static uint32_t signal_to_ss_id(enum as_signal_type signal);
90static uint32_t get_support_mask_for_device_id(struct device_id device_id);
91static ATOM_ENCODER_CAP_RECORD *get_encoder_cap_record(
92 struct bios_parser *bp,
93 ATOM_OBJECT *object);
94
95#define BIOS_IMAGE_SIZE_OFFSET 2
96#define BIOS_IMAGE_SIZE_UNIT 512
97
98/*****************************************************************************/
99static bool bios_parser_construct(
100 struct bios_parser *bp,
101 struct bp_init_data *init,
102 enum dce_version dce_version);
103
104static uint8_t bios_parser_get_connectors_number(
105 struct dc_bios *dcb);
106
107static enum bp_result bios_parser_get_embedded_panel_info(
108 struct dc_bios *dcb,
109 struct embedded_panel_info *info);
110
111/*****************************************************************************/
112
113struct dc_bios *bios_parser_create(
114 struct bp_init_data *init,
115 enum dce_version dce_version)
116{
117 struct bios_parser *bp = NULL;
118
119 bp = dm_alloc(sizeof(struct bios_parser));
120 if (!bp)
121 return NULL;
122
123 if (bios_parser_construct(bp, init, dce_version))
124 return &bp->base;
125
126 dm_free(bp);
127 BREAK_TO_DEBUGGER();
128 return NULL;
129}
130
131static void destruct(struct bios_parser *bp)
132{
133 if (bp->base.bios_local_image)
134 dm_free(bp->base.bios_local_image);
135
136 if (bp->base.integrated_info)
137 dm_free(bp->base.integrated_info);
138}
139
140static void bios_parser_destroy(struct dc_bios **dcb)
141{
142 struct bios_parser *bp = BP_FROM_DCB(*dcb);
143
144 if (!bp) {
145 BREAK_TO_DEBUGGER();
146 return;
147 }
148
149 destruct(bp);
150
151 dm_free(bp);
152 *dcb = NULL;
153}
154
155static uint8_t get_number_of_objects(struct bios_parser *bp, uint32_t offset)
156{
157 ATOM_OBJECT_TABLE *table;
158
159 uint32_t object_table_offset = bp->object_info_tbl_offset + offset;
160
161 table = GET_IMAGE(ATOM_OBJECT_TABLE, object_table_offset);
162
163 if (!table)
164 return 0;
165 else
166 return table->ucNumberOfObjects;
167}
168
169static uint8_t bios_parser_get_connectors_number(struct dc_bios *dcb)
170{
171 struct bios_parser *bp = BP_FROM_DCB(dcb);
172
173 return get_number_of_objects(bp,
174 le16_to_cpu(bp->object_info_tbl.v1_1->usConnectorObjectTableOffset));
175}
176
177static struct graphics_object_id bios_parser_get_encoder_id(
178 struct dc_bios *dcb,
179 uint32_t i)
180{
181 struct bios_parser *bp = BP_FROM_DCB(dcb);
182 struct graphics_object_id object_id = dal_graphics_object_id_init(
183 0, ENUM_ID_UNKNOWN, OBJECT_TYPE_UNKNOWN);
184
185 uint32_t encoder_table_offset = bp->object_info_tbl_offset
186 + le16_to_cpu(bp->object_info_tbl.v1_1->usEncoderObjectTableOffset);
187
188 ATOM_OBJECT_TABLE *tbl =
189 GET_IMAGE(ATOM_OBJECT_TABLE, encoder_table_offset);
190
191 if (tbl && tbl->ucNumberOfObjects > i) {
192 const uint16_t id = le16_to_cpu(tbl->asObjects[i].usObjectID);
193
194 object_id = object_id_from_bios_object_id(id);
195 }
196
197 return object_id;
198}
199
200static struct graphics_object_id bios_parser_get_connector_id(
201 struct dc_bios *dcb,
202 uint8_t i)
203{
204 struct bios_parser *bp = BP_FROM_DCB(dcb);
205 struct graphics_object_id object_id = dal_graphics_object_id_init(
206 0, ENUM_ID_UNKNOWN, OBJECT_TYPE_UNKNOWN);
207
208 uint32_t connector_table_offset = bp->object_info_tbl_offset
209 + le16_to_cpu(bp->object_info_tbl.v1_1->usConnectorObjectTableOffset);
210
211 ATOM_OBJECT_TABLE *tbl =
212 GET_IMAGE(ATOM_OBJECT_TABLE, connector_table_offset);
213
214 if (tbl && tbl->ucNumberOfObjects > i) {
215 const uint16_t id = le16_to_cpu(tbl->asObjects[i].usObjectID);
216
217 object_id = object_id_from_bios_object_id(id);
218 }
219
220 return object_id;
221}
222
223static uint32_t bios_parser_get_dst_number(struct dc_bios *dcb,
224 struct graphics_object_id id)
225{
226 struct bios_parser *bp = BP_FROM_DCB(dcb);
227 ATOM_OBJECT *object = get_bios_object(bp, id);
228
229 return get_dst_number_from_object(bp, object);
230}
231
232static enum bp_result bios_parser_get_src_obj(struct dc_bios *dcb,
233 struct graphics_object_id object_id, uint32_t index,
234 struct graphics_object_id *src_object_id)
235{
236 uint32_t number;
237 uint16_t *id;
238 ATOM_OBJECT *object;
239 struct bios_parser *bp = BP_FROM_DCB(dcb);
240
241 if (!src_object_id)
242 return BP_RESULT_BADINPUT;
243
244 object = get_bios_object(bp, object_id);
245
246 if (!object) {
247 BREAK_TO_DEBUGGER(); /* Invalid object id */
248 return BP_RESULT_BADINPUT;
249 }
250
251 number = get_src_obj_list(bp, object, &id);
252
253 if (number <= index)
254 return BP_RESULT_BADINPUT;
255
256 *src_object_id = object_id_from_bios_object_id(id[index]);
257
258 return BP_RESULT_OK;
259}
260
261static enum bp_result bios_parser_get_dst_obj(struct dc_bios *dcb,
262 struct graphics_object_id object_id, uint32_t index,
263 struct graphics_object_id *dest_object_id)
264{
265 uint32_t number;
266 uint16_t *id;
267 ATOM_OBJECT *object;
268 struct bios_parser *bp = BP_FROM_DCB(dcb);
269
270 if (!dest_object_id)
271 return BP_RESULT_BADINPUT;
272
273 object = get_bios_object(bp, object_id);
274
275 number = get_dest_obj_list(bp, object, &id);
276
277 if (number <= index)
278 return BP_RESULT_BADINPUT;
279
280 *dest_object_id = object_id_from_bios_object_id(id[index]);
281
282 return BP_RESULT_OK;
283}
284
285static enum bp_result bios_parser_get_i2c_info(struct dc_bios *dcb,
286 struct graphics_object_id id,
287 struct graphics_object_i2c_info *info)
288{
289 uint32_t offset;
290 ATOM_OBJECT *object;
291 ATOM_COMMON_RECORD_HEADER *header;
292 ATOM_I2C_RECORD *record;
293 struct bios_parser *bp = BP_FROM_DCB(dcb);
294
295 if (!info)
296 return BP_RESULT_BADINPUT;
297
298 object = get_bios_object(bp, id);
299
300 if (!object)
301 return BP_RESULT_BADINPUT;
302
303 offset = le16_to_cpu(object->usRecordOffset)
304 + bp->object_info_tbl_offset;
305
306 for (;;) {
307 header = GET_IMAGE(ATOM_COMMON_RECORD_HEADER, offset);
308
309 if (!header)
310 return BP_RESULT_BADBIOSTABLE;
311
312 if (LAST_RECORD_TYPE == header->ucRecordType ||
313 !header->ucRecordSize)
314 break;
315
316 if (ATOM_I2C_RECORD_TYPE == header->ucRecordType
317 && sizeof(ATOM_I2C_RECORD) <= header->ucRecordSize) {
318 /* get the I2C info */
319 record = (ATOM_I2C_RECORD *) header;
320
321 if (get_gpio_i2c_info(bp, record, info) == BP_RESULT_OK)
322 return BP_RESULT_OK;
323 }
324
325 offset += header->ucRecordSize;
326 }
327
328 return BP_RESULT_NORECORD;
329}
330
331static enum bp_result get_voltage_ddc_info_v1(uint8_t *i2c_line,
332 ATOM_COMMON_TABLE_HEADER *header,
333 uint8_t *address)
334{
335 enum bp_result result = BP_RESULT_NORECORD;
336 ATOM_VOLTAGE_OBJECT_INFO *info =
337 (ATOM_VOLTAGE_OBJECT_INFO *) address;
338
339 uint8_t *voltage_current_object = (uint8_t *) &info->asVoltageObj[0];
340
341 while ((address + le16_to_cpu(header->usStructureSize)) > voltage_current_object) {
342 ATOM_VOLTAGE_OBJECT *object =
343 (ATOM_VOLTAGE_OBJECT *) voltage_current_object;
344
345 if ((object->ucVoltageType == SET_VOLTAGE_INIT_MODE) &&
346 (object->ucVoltageType &
347 VOLTAGE_CONTROLLED_BY_I2C_MASK)) {
348
349 *i2c_line = object->asControl.ucVoltageControlI2cLine
350 ^ 0x90;
351 result = BP_RESULT_OK;
352 break;
353 }
354
355 voltage_current_object += object->ucSize;
356 }
357 return result;
358}
359
360static enum bp_result get_voltage_ddc_info_v3(uint8_t *i2c_line,
361 uint32_t index,
362 ATOM_COMMON_TABLE_HEADER *header,
363 uint8_t *address)
364{
365 enum bp_result result = BP_RESULT_NORECORD;
366 ATOM_VOLTAGE_OBJECT_INFO_V3_1 *info =
367 (ATOM_VOLTAGE_OBJECT_INFO_V3_1 *) address;
368
369 uint8_t *voltage_current_object =
370 (uint8_t *) (&(info->asVoltageObj[0]));
371
372 while ((address + le16_to_cpu(header->usStructureSize)) > voltage_current_object) {
373 ATOM_I2C_VOLTAGE_OBJECT_V3 *object =
374 (ATOM_I2C_VOLTAGE_OBJECT_V3 *) voltage_current_object;
375
376 if (object->sHeader.ucVoltageMode ==
377 ATOM_INIT_VOLTAGE_REGULATOR) {
378 if (object->sHeader.ucVoltageType == index) {
379 *i2c_line = object->ucVoltageControlI2cLine
380 ^ 0x90;
381 result = BP_RESULT_OK;
382 break;
383 }
384 }
385
386 voltage_current_object += le16_to_cpu(object->sHeader.usSize);
387 }
388 return result;
389}
390
391static enum bp_result bios_parser_get_thermal_ddc_info(
392 struct dc_bios *dcb,
393 uint32_t i2c_channel_id,
394 struct graphics_object_i2c_info *info)
395{
396 struct bios_parser *bp = BP_FROM_DCB(dcb);
397 ATOM_I2C_ID_CONFIG_ACCESS *config;
398 ATOM_I2C_RECORD record;
399
400 if (!info)
401 return BP_RESULT_BADINPUT;
402
403 config = (ATOM_I2C_ID_CONFIG_ACCESS *) &i2c_channel_id;
404
405 record.sucI2cId.bfHW_Capable = config->sbfAccess.bfHW_Capable;
406 record.sucI2cId.bfI2C_LineMux = config->sbfAccess.bfI2C_LineMux;
407 record.sucI2cId.bfHW_EngineID = config->sbfAccess.bfHW_EngineID;
408
409 return get_gpio_i2c_info(bp, &record, info);
410}
411
412static enum bp_result bios_parser_get_voltage_ddc_info(struct dc_bios *dcb,
413 uint32_t index,
414 struct graphics_object_i2c_info *info)
415{
416 uint8_t i2c_line = 0;
417 enum bp_result result = BP_RESULT_NORECORD;
418 uint8_t *voltage_info_address;
419 ATOM_COMMON_TABLE_HEADER *header;
420 struct atom_data_revision revision = {0};
421 struct bios_parser *bp = BP_FROM_DCB(dcb);
422
423 if (!DATA_TABLES(VoltageObjectInfo))
424 return result;
425
426 voltage_info_address = get_image(&bp->base, DATA_TABLES(VoltageObjectInfo), sizeof(ATOM_COMMON_TABLE_HEADER));
427
428 header = (ATOM_COMMON_TABLE_HEADER *) voltage_info_address;
429
430 get_atom_data_table_revision(header, &revision);
431
432 switch (revision.major) {
433 case 1:
434 case 2:
435 result = get_voltage_ddc_info_v1(&i2c_line, header,
436 voltage_info_address);
437 break;
438 case 3:
439 if (revision.minor != 1)
440 break;
441 result = get_voltage_ddc_info_v3(&i2c_line, index, header,
442 voltage_info_address);
443 break;
444 }
445
446 if (result == BP_RESULT_OK)
447 result = bios_parser_get_thermal_ddc_info(dcb,
448 i2c_line, info);
449
450 return result;
451}
452
453/* TODO: temporary commented out to suppress 'defined but not used' warning */
454#if 0
455static enum bp_result bios_parser_get_ddc_info_for_i2c_line(
456 struct bios_parser *bp,
457 uint8_t i2c_line, struct graphics_object_i2c_info *info)
458{
459 uint32_t offset;
460 ATOM_OBJECT *object;
461 ATOM_OBJECT_TABLE *table;
462 uint32_t i;
463
464 if (!info)
465 return BP_RESULT_BADINPUT;
466
467 offset = le16_to_cpu(bp->object_info_tbl.v1_1->usConnectorObjectTableOffset);
468
469 offset += bp->object_info_tbl_offset;
470
471 table = GET_IMAGE(ATOM_OBJECT_TABLE, offset);
472
473 if (!table)
474 return BP_RESULT_BADBIOSTABLE;
475
476 for (i = 0; i < table->ucNumberOfObjects; i++) {
477 object = &table->asObjects[i];
478
479 if (!object) {
480 BREAK_TO_DEBUGGER(); /* Invalid object id */
481 return BP_RESULT_BADINPUT;
482 }
483
484 offset = le16_to_cpu(object->usRecordOffset)
485 + bp->object_info_tbl_offset;
486
487 for (;;) {
488 ATOM_COMMON_RECORD_HEADER *header =
489 GET_IMAGE(ATOM_COMMON_RECORD_HEADER, offset);
490
491 if (!header)
492 return BP_RESULT_BADBIOSTABLE;
493
494 offset += header->ucRecordSize;
495
496 if (LAST_RECORD_TYPE == header->ucRecordType ||
497 !header->ucRecordSize)
498 break;
499
500 if (ATOM_I2C_RECORD_TYPE == header->ucRecordType
501 && sizeof(ATOM_I2C_RECORD) <=
502 header->ucRecordSize) {
503 ATOM_I2C_RECORD *record =
504 (ATOM_I2C_RECORD *) header;
505
506 if (i2c_line != record->sucI2cId.bfI2C_LineMux)
507 continue;
508
509 /* get the I2C info */
510 if (get_gpio_i2c_info(bp, record, info) ==
511 BP_RESULT_OK)
512 return BP_RESULT_OK;
513 }
514 }
515 }
516
517 return BP_RESULT_NORECORD;
518}
519#endif
520
521static enum bp_result bios_parser_get_hpd_info(struct dc_bios *dcb,
522 struct graphics_object_id id,
523 struct graphics_object_hpd_info *info)
524{
525 struct bios_parser *bp = BP_FROM_DCB(dcb);
526 ATOM_OBJECT *object;
527 ATOM_HPD_INT_RECORD *record = NULL;
528
529 if (!info)
530 return BP_RESULT_BADINPUT;
531
532 object = get_bios_object(bp, id);
533
534 if (!object)
535 return BP_RESULT_BADINPUT;
536
537 record = get_hpd_record(bp, object);
538
539 if (record != NULL) {
540 info->hpd_int_gpio_uid = record->ucHPDIntGPIOID;
541 info->hpd_active = record->ucPlugged_PinState;
542 return BP_RESULT_OK;
543 }
544
545 return BP_RESULT_NORECORD;
546}
547
548static uint32_t bios_parser_get_gpio_record(
549 struct dc_bios *dcb,
550 struct graphics_object_id id,
551 struct bp_gpio_cntl_info *gpio_record,
552 uint32_t record_size)
553{
554 struct bios_parser *bp = BP_FROM_DCB(dcb);
555 ATOM_COMMON_RECORD_HEADER *header = NULL;
556 ATOM_OBJECT_GPIO_CNTL_RECORD *record = NULL;
557 ATOM_OBJECT *object = get_bios_object(bp, id);
558 uint32_t offset;
559 uint32_t pins_number;
560 uint32_t i;
561
562 if (!object)
563 return 0;
564
565 /* Initialise offset */
566 offset = le16_to_cpu(object->usRecordOffset)
567 + bp->object_info_tbl_offset;
568
569 for (;;) {
570 /* Get record header */
571 header = GET_IMAGE(ATOM_COMMON_RECORD_HEADER, offset);
572 if (!header || header->ucRecordType == LAST_RECORD_TYPE ||
573 !header->ucRecordSize)
574 break;
575
576 /* If this is gpio control record - stop. We found the record */
577 if (header->ucRecordType == ATOM_OBJECT_GPIO_CNTL_RECORD_TYPE
578 && header->ucRecordSize
579 >= sizeof(ATOM_OBJECT_GPIO_CNTL_RECORD)) {
580 record = (ATOM_OBJECT_GPIO_CNTL_RECORD *) header;
581 break;
582 }
583
584 /* Advance to next record */
585 offset += header->ucRecordSize;
586 }
587
588 /* If we did not find a record - return */
589 if (!record)
590 return 0;
591
592 /* Extract gpio IDs from bios record (make sure we do not exceed passed
593 * array size) */
594 pins_number = (record->ucNumberOfPins < record_size ?
595 record->ucNumberOfPins : record_size);
596 for (i = 0; i < pins_number; i++) {
597 uint8_t output_state = ((record->asGpio[i].ucGPIO_PinState
598 & GPIO_PIN_OUTPUT_STATE_MASK)
599 >> GPIO_PIN_OUTPUT_STATE_SHIFT);
600 gpio_record[i].id = record->asGpio[i].ucGPIOID;
601
602 switch (output_state) {
603 case GPIO_PIN_STATE_ACTIVE_LOW:
604 gpio_record[i].state =
605 GPIO_PIN_OUTPUT_STATE_ACTIVE_LOW;
606 break;
607
608 case GPIO_PIN_STATE_ACTIVE_HIGH:
609 gpio_record[i].state =
610 GPIO_PIN_OUTPUT_STATE_ACTIVE_HIGH;
611 break;
612
613 default:
614 BREAK_TO_DEBUGGER(); /* Invalid Pin Output State */
615 break;
616 }
617 }
618
619 return pins_number;
620}
621
622enum bp_result bios_parser_get_device_tag_record(
623 struct bios_parser *bp,
624 ATOM_OBJECT *object,
625 ATOM_CONNECTOR_DEVICE_TAG_RECORD **record)
626{
627 ATOM_COMMON_RECORD_HEADER *header;
628 uint32_t offset;
629
630 offset = le16_to_cpu(object->usRecordOffset)
631 + bp->object_info_tbl_offset;
632
633 for (;;) {
634 header = GET_IMAGE(ATOM_COMMON_RECORD_HEADER, offset);
635
636 if (!header)
637 return BP_RESULT_BADBIOSTABLE;
638
639 offset += header->ucRecordSize;
640
641 if (LAST_RECORD_TYPE == header->ucRecordType ||
642 !header->ucRecordSize)
643 break;
644
645 if (ATOM_CONNECTOR_DEVICE_TAG_RECORD_TYPE !=
646 header->ucRecordType)
647 continue;
648
649 if (sizeof(ATOM_CONNECTOR_DEVICE_TAG) > header->ucRecordSize)
650 continue;
651
652 *record = (ATOM_CONNECTOR_DEVICE_TAG_RECORD *) header;
653 return BP_RESULT_OK;
654 }
655
656 return BP_RESULT_NORECORD;
657}
658
659static enum bp_result bios_parser_get_device_tag(
660 struct dc_bios *dcb,
661 struct graphics_object_id connector_object_id,
662 uint32_t device_tag_index,
663 struct connector_device_tag_info *info)
664{
665 struct bios_parser *bp = BP_FROM_DCB(dcb);
666 ATOM_OBJECT *object;
667 ATOM_CONNECTOR_DEVICE_TAG_RECORD *record = NULL;
668 ATOM_CONNECTOR_DEVICE_TAG *device_tag;
669
670 if (!info)
671 return BP_RESULT_BADINPUT;
672
673 /* getBiosObject will return MXM object */
674 object = get_bios_object(bp, connector_object_id);
675
676 if (!object) {
677 BREAK_TO_DEBUGGER(); /* Invalid object id */
678 return BP_RESULT_BADINPUT;
679 }
680
681 if (bios_parser_get_device_tag_record(bp, object, &record)
682 != BP_RESULT_OK)
683 return BP_RESULT_NORECORD;
684
685 if (device_tag_index >= record->ucNumberOfDevice)
686 return BP_RESULT_NORECORD;
687
688 device_tag = &record->asDeviceTag[device_tag_index];
689
690 info->acpi_device = le32_to_cpu(device_tag->ulACPIDeviceEnum);
691 info->dev_id =
692 device_type_from_device_id(le16_to_cpu(device_tag->usDeviceID));
693
694 return BP_RESULT_OK;
695}
696
697static enum bp_result get_firmware_info_v1_4(
698 struct bios_parser *bp,
699 struct firmware_info *info);
700static enum bp_result get_firmware_info_v2_1(
701 struct bios_parser *bp,
702 struct firmware_info *info);
703static enum bp_result get_firmware_info_v2_2(
704 struct bios_parser *bp,
705 struct firmware_info *info);
706
707static enum bp_result bios_parser_get_firmware_info(
708 struct dc_bios *dcb,
709 struct firmware_info *info)
710{
711 struct bios_parser *bp = BP_FROM_DCB(dcb);
712 enum bp_result result = BP_RESULT_BADBIOSTABLE;
713 ATOM_COMMON_TABLE_HEADER *header;
714 struct atom_data_revision revision;
715
716 if (info && DATA_TABLES(FirmwareInfo)) {
717 header = GET_IMAGE(ATOM_COMMON_TABLE_HEADER,
718 DATA_TABLES(FirmwareInfo));
719 get_atom_data_table_revision(header, &revision);
720 switch (revision.major) {
721 case 1:
722 switch (revision.minor) {
723 case 4:
724 result = get_firmware_info_v1_4(bp, info);
725 break;
726 default:
727 break;
728 }
729 break;
730
731 case 2:
732 switch (revision.minor) {
733 case 1:
734 result = get_firmware_info_v2_1(bp, info);
735 break;
736 case 2:
737 result = get_firmware_info_v2_2(bp, info);
738 break;
739 default:
740 break;
741 }
742 break;
743 default:
744 break;
745 }
746 }
747
748 return result;
749}
750
751static enum bp_result get_firmware_info_v1_4(
752 struct bios_parser *bp,
753 struct firmware_info *info)
754{
755 ATOM_FIRMWARE_INFO_V1_4 *firmware_info =
756 GET_IMAGE(ATOM_FIRMWARE_INFO_V1_4,
757 DATA_TABLES(FirmwareInfo));
758
759 if (!info)
760 return BP_RESULT_BADINPUT;
761
762 if (!firmware_info)
763 return BP_RESULT_BADBIOSTABLE;
764
765 memset(info, 0, sizeof(*info));
766
767 /* Pixel clock pll information. We need to convert from 10KHz units into
768 * KHz units */
769 info->pll_info.crystal_frequency =
770 le16_to_cpu(firmware_info->usReferenceClock) * 10;
771 info->pll_info.min_input_pxl_clk_pll_frequency =
772 le16_to_cpu(firmware_info->usMinPixelClockPLL_Input) * 10;
773 info->pll_info.max_input_pxl_clk_pll_frequency =
774 le16_to_cpu(firmware_info->usMaxPixelClockPLL_Input) * 10;
775 info->pll_info.min_output_pxl_clk_pll_frequency =
776 le32_to_cpu(firmware_info->ulMinPixelClockPLL_Output) * 10;
777 info->pll_info.max_output_pxl_clk_pll_frequency =
778 le32_to_cpu(firmware_info->ulMaxPixelClockPLL_Output) * 10;
779
780 if (firmware_info->usFirmwareCapability.sbfAccess.MemoryClockSS_Support)
781 /* Since there is no information on the SS, report conservative
782 * value 3% for bandwidth calculation */
783 /* unit of 0.01% */
784 info->feature.memory_clk_ss_percentage = THREE_PERCENT_OF_10000;
785
786 if (firmware_info->usFirmwareCapability.sbfAccess.EngineClockSS_Support)
787 /* Since there is no information on the SS,report conservative
788 * value 3% for bandwidth calculation */
789 /* unit of 0.01% */
790 info->feature.engine_clk_ss_percentage = THREE_PERCENT_OF_10000;
791
792 return BP_RESULT_OK;
793}
794
795static enum bp_result get_ss_info_v3_1(
796 struct bios_parser *bp,
797 uint32_t id,
798 uint32_t index,
799 struct spread_spectrum_info *ss_info);
800
801static enum bp_result get_firmware_info_v2_1(
802 struct bios_parser *bp,
803 struct firmware_info *info)
804{
805 ATOM_FIRMWARE_INFO_V2_1 *firmwareInfo =
806 GET_IMAGE(ATOM_FIRMWARE_INFO_V2_1, DATA_TABLES(FirmwareInfo));
807 struct spread_spectrum_info internalSS;
808 uint32_t index;
809
810 if (!info)
811 return BP_RESULT_BADINPUT;
812
813 if (!firmwareInfo)
814 return BP_RESULT_BADBIOSTABLE;
815
816 memset(info, 0, sizeof(*info));
817
818 /* Pixel clock pll information. We need to convert from 10KHz units into
819 * KHz units */
820 info->pll_info.crystal_frequency =
821 le16_to_cpu(firmwareInfo->usCoreReferenceClock) * 10;
822 info->pll_info.min_input_pxl_clk_pll_frequency =
823 le16_to_cpu(firmwareInfo->usMinPixelClockPLL_Input) * 10;
824 info->pll_info.max_input_pxl_clk_pll_frequency =
825 le16_to_cpu(firmwareInfo->usMaxPixelClockPLL_Input) * 10;
826 info->pll_info.min_output_pxl_clk_pll_frequency =
827 le32_to_cpu(firmwareInfo->ulMinPixelClockPLL_Output) * 10;
828 info->pll_info.max_output_pxl_clk_pll_frequency =
829 le32_to_cpu(firmwareInfo->ulMaxPixelClockPLL_Output) * 10;
830 info->default_display_engine_pll_frequency =
831 le32_to_cpu(firmwareInfo->ulDefaultDispEngineClkFreq) * 10;
832 info->external_clock_source_frequency_for_dp =
833 le16_to_cpu(firmwareInfo->usUniphyDPModeExtClkFreq) * 10;
834 info->min_allowed_bl_level = firmwareInfo->ucMinAllowedBL_Level;
835
836 /* There should be only one entry in the SS info table for Memory Clock
837 */
838 index = 0;
839 if (firmwareInfo->usFirmwareCapability.sbfAccess.MemoryClockSS_Support)
840 /* Since there is no information for external SS, report
841 * conservative value 3% for bandwidth calculation */
842 /* unit of 0.01% */
843 info->feature.memory_clk_ss_percentage = THREE_PERCENT_OF_10000;
844 else if (get_ss_info_v3_1(bp,
845 ASIC_INTERNAL_MEMORY_SS, index, &internalSS) == BP_RESULT_OK) {
846 if (internalSS.spread_spectrum_percentage) {
847 info->feature.memory_clk_ss_percentage =
848 internalSS.spread_spectrum_percentage;
849 if (internalSS.type.CENTER_MODE) {
850 /* if it is centermode, the exact SS Percentage
851 * will be round up of half of the percentage
852 * reported in the SS table */
853 ++info->feature.memory_clk_ss_percentage;
854 info->feature.memory_clk_ss_percentage /= 2;
855 }
856 }
857 }
858
859 /* There should be only one entry in the SS info table for Engine Clock
860 */
861 index = 1;
862 if (firmwareInfo->usFirmwareCapability.sbfAccess.EngineClockSS_Support)
863 /* Since there is no information for external SS, report
864 * conservative value 3% for bandwidth calculation */
865 /* unit of 0.01% */
866 info->feature.engine_clk_ss_percentage = THREE_PERCENT_OF_10000;
867 else if (get_ss_info_v3_1(bp,
868 ASIC_INTERNAL_ENGINE_SS, index, &internalSS) == BP_RESULT_OK) {
869 if (internalSS.spread_spectrum_percentage) {
870 info->feature.engine_clk_ss_percentage =
871 internalSS.spread_spectrum_percentage;
872 if (internalSS.type.CENTER_MODE) {
873 /* if it is centermode, the exact SS Percentage
874 * will be round up of half of the percentage
875 * reported in the SS table */
876 ++info->feature.engine_clk_ss_percentage;
877 info->feature.engine_clk_ss_percentage /= 2;
878 }
879 }
880 }
881
882 return BP_RESULT_OK;
883}
884
885static enum bp_result get_firmware_info_v2_2(
886 struct bios_parser *bp,
887 struct firmware_info *info)
888{
889 ATOM_FIRMWARE_INFO_V2_2 *firmware_info;
890 struct spread_spectrum_info internal_ss;
891 uint32_t index;
892
893 if (!info)
894 return BP_RESULT_BADINPUT;
895
896 firmware_info = GET_IMAGE(ATOM_FIRMWARE_INFO_V2_2,
897 DATA_TABLES(FirmwareInfo));
898
899 if (!firmware_info)
900 return BP_RESULT_BADBIOSTABLE;
901
902 memset(info, 0, sizeof(*info));
903
904 /* Pixel clock pll information. We need to convert from 10KHz units into
905 * KHz units */
906 info->pll_info.crystal_frequency =
907 le16_to_cpu(firmware_info->usCoreReferenceClock) * 10;
908 info->pll_info.min_input_pxl_clk_pll_frequency =
909 le16_to_cpu(firmware_info->usMinPixelClockPLL_Input) * 10;
910 info->pll_info.max_input_pxl_clk_pll_frequency =
911 le16_to_cpu(firmware_info->usMaxPixelClockPLL_Input) * 10;
912 info->pll_info.min_output_pxl_clk_pll_frequency =
913 le32_to_cpu(firmware_info->ulMinPixelClockPLL_Output) * 10;
914 info->pll_info.max_output_pxl_clk_pll_frequency =
915 le32_to_cpu(firmware_info->ulMaxPixelClockPLL_Output) * 10;
916 info->default_display_engine_pll_frequency =
917 le32_to_cpu(firmware_info->ulDefaultDispEngineClkFreq) * 10;
918 info->external_clock_source_frequency_for_dp =
919 le16_to_cpu(firmware_info->usUniphyDPModeExtClkFreq) * 10;
920
921 /* There should be only one entry in the SS info table for Memory Clock
922 */
923 index = 0;
924 if (firmware_info->usFirmwareCapability.sbfAccess.MemoryClockSS_Support)
925 /* Since there is no information for external SS, report
926 * conservative value 3% for bandwidth calculation */
927 /* unit of 0.01% */
928 info->feature.memory_clk_ss_percentage = THREE_PERCENT_OF_10000;
929 else if (get_ss_info_v3_1(bp,
930 ASIC_INTERNAL_MEMORY_SS, index, &internal_ss) == BP_RESULT_OK) {
931 if (internal_ss.spread_spectrum_percentage) {
932 info->feature.memory_clk_ss_percentage =
933 internal_ss.spread_spectrum_percentage;
934 if (internal_ss.type.CENTER_MODE) {
935 /* if it is centermode, the exact SS Percentage
936 * will be round up of half of the percentage
937 * reported in the SS table */
938 ++info->feature.memory_clk_ss_percentage;
939 info->feature.memory_clk_ss_percentage /= 2;
940 }
941 }
942 }
943
944 /* There should be only one entry in the SS info table for Engine Clock
945 */
946 index = 1;
947 if (firmware_info->usFirmwareCapability.sbfAccess.EngineClockSS_Support)
948 /* Since there is no information for external SS, report
949 * conservative value 3% for bandwidth calculation */
950 /* unit of 0.01% */
951 info->feature.engine_clk_ss_percentage = THREE_PERCENT_OF_10000;
952 else if (get_ss_info_v3_1(bp,
953 ASIC_INTERNAL_ENGINE_SS, index, &internal_ss) == BP_RESULT_OK) {
954 if (internal_ss.spread_spectrum_percentage) {
955 info->feature.engine_clk_ss_percentage =
956 internal_ss.spread_spectrum_percentage;
957 if (internal_ss.type.CENTER_MODE) {
958 /* if it is centermode, the exact SS Percentage
959 * will be round up of half of the percentage
960 * reported in the SS table */
961 ++info->feature.engine_clk_ss_percentage;
962 info->feature.engine_clk_ss_percentage /= 2;
963 }
964 }
965 }
966
967 /* Remote Display */
968 info->remote_display_config = firmware_info->ucRemoteDisplayConfig;
969
970 /* Is allowed minimum BL level */
971 info->min_allowed_bl_level = firmware_info->ucMinAllowedBL_Level;
972 /* Used starting from CI */
973 info->smu_gpu_pll_output_freq =
974 (uint32_t) (le32_to_cpu(firmware_info->ulGPUPLL_OutputFreq) * 10);
975
976 return BP_RESULT_OK;
977}
978
979static enum bp_result get_ss_info_v3_1(
980 struct bios_parser *bp,
981 uint32_t id,
982 uint32_t index,
983 struct spread_spectrum_info *ss_info)
984{
985 ATOM_ASIC_INTERNAL_SS_INFO_V3 *ss_table_header_include;
986 ATOM_ASIC_SS_ASSIGNMENT_V3 *tbl;
987 uint32_t table_size;
988 uint32_t i;
989 uint32_t table_index = 0;
990
991 if (!ss_info)
992 return BP_RESULT_BADINPUT;
993
994 if (!DATA_TABLES(ASIC_InternalSS_Info))
995 return BP_RESULT_UNSUPPORTED;
996
997 ss_table_header_include = GET_IMAGE(ATOM_ASIC_INTERNAL_SS_INFO_V3,
998 DATA_TABLES(ASIC_InternalSS_Info));
999 table_size =
1000 (le16_to_cpu(ss_table_header_include->sHeader.usStructureSize)
1001 - sizeof(ATOM_COMMON_TABLE_HEADER))
1002 / sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3);
1003
1004 tbl = (ATOM_ASIC_SS_ASSIGNMENT_V3 *)
1005 &ss_table_header_include->asSpreadSpectrum[0];
1006
1007 memset(ss_info, 0, sizeof(struct spread_spectrum_info));
1008
1009 for (i = 0; i < table_size; i++) {
1010 if (tbl[i].ucClockIndication != (uint8_t) id)
1011 continue;
1012
1013 if (table_index != index) {
1014 table_index++;
1015 continue;
1016 }
1017 /* VBIOS introduced new defines for Version 3, same values as
1018 * before, so now use these new ones for Version 3.
1019 * Shouldn't affect field VBIOS's V3 as define values are still
1020 * same.
1021 * #define SS_MODE_V3_CENTRE_SPREAD_MASK 0x01
1022 * #define SS_MODE_V3_EXTERNAL_SS_MASK 0x02
1023
1024 * Old VBIOS defines:
1025 * #define ATOM_SS_CENTRE_SPREAD_MODE_MASK 0x00000001
1026 * #define ATOM_EXTERNAL_SS_MASK 0x00000002
1027 */
1028
1029 if (SS_MODE_V3_EXTERNAL_SS_MASK & tbl[i].ucSpreadSpectrumMode)
1030 ss_info->type.EXTERNAL = true;
1031
1032 if (SS_MODE_V3_CENTRE_SPREAD_MASK & tbl[i].ucSpreadSpectrumMode)
1033 ss_info->type.CENTER_MODE = true;
1034
1035 /* Older VBIOS (in field) always provides SS percentage in 0.01%
1036 * units set Divider to 100 */
1037 ss_info->spread_percentage_divider = 100;
1038
1039 /* #define SS_MODE_V3_PERCENTAGE_DIV_BY_1000_MASK 0x10 */
1040 if (SS_MODE_V3_PERCENTAGE_DIV_BY_1000_MASK
1041 & tbl[i].ucSpreadSpectrumMode)
1042 ss_info->spread_percentage_divider = 1000;
1043
1044 ss_info->type.STEP_AND_DELAY_INFO = false;
1045 /* convert [10KHz] into [KHz] */
1046 ss_info->target_clock_range =
1047 le32_to_cpu(tbl[i].ulTargetClockRange) * 10;
1048 ss_info->spread_spectrum_percentage =
1049 (uint32_t)le16_to_cpu(tbl[i].usSpreadSpectrumPercentage);
1050 ss_info->spread_spectrum_range =
1051 (uint32_t)(le16_to_cpu(tbl[i].usSpreadRateIn10Hz) * 10);
1052
1053 return BP_RESULT_OK;
1054 }
1055 return BP_RESULT_NORECORD;
1056}
1057
1058static enum bp_result bios_parser_transmitter_control(
1059 struct dc_bios *dcb,
1060 struct bp_transmitter_control *cntl)
1061{
1062 struct bios_parser *bp = BP_FROM_DCB(dcb);
1063
1064 if (!bp->cmd_tbl.transmitter_control)
1065 return BP_RESULT_FAILURE;
1066
1067 return bp->cmd_tbl.transmitter_control(bp, cntl);
1068}
1069
1070static enum bp_result bios_parser_encoder_control(
1071 struct dc_bios *dcb,
1072 struct bp_encoder_control *cntl)
1073{
1074 struct bios_parser *bp = BP_FROM_DCB(dcb);
1075
1076 if (!bp->cmd_tbl.dig_encoder_control)
1077 return BP_RESULT_FAILURE;
1078
1079 return bp->cmd_tbl.dig_encoder_control(bp, cntl);
1080}
1081
1082static enum bp_result bios_parser_adjust_pixel_clock(
1083 struct dc_bios *dcb,
1084 struct bp_adjust_pixel_clock_parameters *bp_params)
1085{
1086 struct bios_parser *bp = BP_FROM_DCB(dcb);
1087
1088 if (!bp->cmd_tbl.adjust_display_pll)
1089 return BP_RESULT_FAILURE;
1090
1091 return bp->cmd_tbl.adjust_display_pll(bp, bp_params);
1092}
1093
1094static enum bp_result bios_parser_set_pixel_clock(
1095 struct dc_bios *dcb,
1096 struct bp_pixel_clock_parameters *bp_params)
1097{
1098 struct bios_parser *bp = BP_FROM_DCB(dcb);
1099
1100 if (!bp->cmd_tbl.set_pixel_clock)
1101 return BP_RESULT_FAILURE;
1102
1103 return bp->cmd_tbl.set_pixel_clock(bp, bp_params);
1104}
1105
1106static enum bp_result bios_parser_set_dce_clock(
1107 struct dc_bios *dcb,
1108 struct bp_set_dce_clock_parameters *bp_params)
1109{
1110 struct bios_parser *bp = BP_FROM_DCB(dcb);
1111
1112 if (!bp->cmd_tbl.set_dce_clock)
1113 return BP_RESULT_FAILURE;
1114
1115 return bp->cmd_tbl.set_dce_clock(bp, bp_params);
1116}
1117
1118static enum bp_result bios_parser_enable_spread_spectrum_on_ppll(
1119 struct dc_bios *dcb,
1120 struct bp_spread_spectrum_parameters *bp_params,
1121 bool enable)
1122{
1123 struct bios_parser *bp = BP_FROM_DCB(dcb);
1124
1125 if (!bp->cmd_tbl.enable_spread_spectrum_on_ppll)
1126 return BP_RESULT_FAILURE;
1127
1128 return bp->cmd_tbl.enable_spread_spectrum_on_ppll(
1129 bp, bp_params, enable);
1130
1131}
1132
1133static enum bp_result bios_parser_program_crtc_timing(
1134 struct dc_bios *dcb,
1135 struct bp_hw_crtc_timing_parameters *bp_params)
1136{
1137 struct bios_parser *bp = BP_FROM_DCB(dcb);
1138
1139 if (!bp->cmd_tbl.set_crtc_timing)
1140 return BP_RESULT_FAILURE;
1141
1142 return bp->cmd_tbl.set_crtc_timing(bp, bp_params);
1143}
1144
1145static enum bp_result bios_parser_program_display_engine_pll(
1146 struct dc_bios *dcb,
1147 struct bp_pixel_clock_parameters *bp_params)
1148{
1149 struct bios_parser *bp = BP_FROM_DCB(dcb);
1150
1151 if (!bp->cmd_tbl.program_clock)
1152 return BP_RESULT_FAILURE;
1153
1154 return bp->cmd_tbl.program_clock(bp, bp_params);
1155
1156}
1157
1158
1159static enum bp_result bios_parser_enable_crtc(
1160 struct dc_bios *dcb,
1161 enum controller_id id,
1162 bool enable)
1163{
1164 struct bios_parser *bp = BP_FROM_DCB(dcb);
1165
1166 if (!bp->cmd_tbl.enable_crtc)
1167 return BP_RESULT_FAILURE;
1168
1169 return bp->cmd_tbl.enable_crtc(bp, id, enable);
1170}
1171
1172static enum bp_result bios_parser_crtc_source_select(
1173 struct dc_bios *dcb,
1174 struct bp_crtc_source_select *bp_params)
1175{
1176 struct bios_parser *bp = BP_FROM_DCB(dcb);
1177
1178 if (!bp->cmd_tbl.select_crtc_source)
1179 return BP_RESULT_FAILURE;
1180
1181 return bp->cmd_tbl.select_crtc_source(bp, bp_params);
1182}
1183
1184static enum bp_result bios_parser_enable_disp_power_gating(
1185 struct dc_bios *dcb,
1186 enum controller_id controller_id,
1187 enum bp_pipe_control_action action)
1188{
1189 struct bios_parser *bp = BP_FROM_DCB(dcb);
1190
1191 if (!bp->cmd_tbl.enable_disp_power_gating)
1192 return BP_RESULT_FAILURE;
1193
1194 return bp->cmd_tbl.enable_disp_power_gating(bp, controller_id,
1195 action);
1196}
1197
1198static bool bios_parser_is_device_id_supported(
1199 struct dc_bios *dcb,
1200 struct device_id id)
1201{
1202 struct bios_parser *bp = BP_FROM_DCB(dcb);
1203
1204 uint32_t mask = get_support_mask_for_device_id(id);
1205
1206 return (le16_to_cpu(bp->object_info_tbl.v1_1->usDeviceSupport) & mask) != 0;
1207}
1208
1209static enum bp_result bios_parser_crt_control(
1210 struct dc_bios *dcb,
1211 enum engine_id engine_id,
1212 bool enable,
1213 uint32_t pixel_clock)
1214{
1215 struct bios_parser *bp = BP_FROM_DCB(dcb);
1216 uint8_t standard;
1217
1218 if (!bp->cmd_tbl.dac1_encoder_control &&
1219 engine_id == ENGINE_ID_DACA)
1220 return BP_RESULT_FAILURE;
1221 if (!bp->cmd_tbl.dac2_encoder_control &&
1222 engine_id == ENGINE_ID_DACB)
1223 return BP_RESULT_FAILURE;
1224 /* validate params */
1225 switch (engine_id) {
1226 case ENGINE_ID_DACA:
1227 case ENGINE_ID_DACB:
1228 break;
1229 default:
1230 /* unsupported engine */
1231 return BP_RESULT_FAILURE;
1232 }
1233
1234 standard = ATOM_DAC1_PS2; /* == ATOM_DAC2_PS2 */
1235
1236 if (enable) {
1237 if (engine_id == ENGINE_ID_DACA) {
1238 bp->cmd_tbl.dac1_encoder_control(bp, enable,
1239 pixel_clock, standard);
1240 if (bp->cmd_tbl.dac1_output_control != NULL)
1241 bp->cmd_tbl.dac1_output_control(bp, enable);
1242 } else {
1243 bp->cmd_tbl.dac2_encoder_control(bp, enable,
1244 pixel_clock, standard);
1245 if (bp->cmd_tbl.dac2_output_control != NULL)
1246 bp->cmd_tbl.dac2_output_control(bp, enable);
1247 }
1248 } else {
1249 if (engine_id == ENGINE_ID_DACA) {
1250 if (bp->cmd_tbl.dac1_output_control != NULL)
1251 bp->cmd_tbl.dac1_output_control(bp, enable);
1252 bp->cmd_tbl.dac1_encoder_control(bp, enable,
1253 pixel_clock, standard);
1254 } else {
1255 if (bp->cmd_tbl.dac2_output_control != NULL)
1256 bp->cmd_tbl.dac2_output_control(bp, enable);
1257 bp->cmd_tbl.dac2_encoder_control(bp, enable,
1258 pixel_clock, standard);
1259 }
1260 }
1261
1262 return BP_RESULT_OK;
1263}
1264
1265static ATOM_HPD_INT_RECORD *get_hpd_record(struct bios_parser *bp,
1266 ATOM_OBJECT *object)
1267{
1268 ATOM_COMMON_RECORD_HEADER *header;
1269 uint32_t offset;
1270
1271 if (!object) {
1272 BREAK_TO_DEBUGGER(); /* Invalid object */
1273 return NULL;
1274 }
1275
1276 offset = le16_to_cpu(object->usRecordOffset)
1277 + bp->object_info_tbl_offset;
1278
1279 for (;;) {
1280 header = GET_IMAGE(ATOM_COMMON_RECORD_HEADER, offset);
1281
1282 if (!header)
1283 return NULL;
1284
1285 if (LAST_RECORD_TYPE == header->ucRecordType ||
1286 !header->ucRecordSize)
1287 break;
1288
1289 if (ATOM_HPD_INT_RECORD_TYPE == header->ucRecordType
1290 && sizeof(ATOM_HPD_INT_RECORD) <= header->ucRecordSize)
1291 return (ATOM_HPD_INT_RECORD *) header;
1292
1293 offset += header->ucRecordSize;
1294 }
1295
1296 return NULL;
1297}
1298
1299/**
1300 * Get I2C information of input object id
1301 *
1302 * search all records to find the ATOM_I2C_RECORD_TYPE record IR
1303 */
1304static ATOM_I2C_RECORD *get_i2c_record(
1305 struct bios_parser *bp,
1306 ATOM_OBJECT *object)
1307{
1308 uint32_t offset;
1309 ATOM_COMMON_RECORD_HEADER *record_header;
1310
1311 if (!object) {
1312 BREAK_TO_DEBUGGER();
1313 /* Invalid object */
1314 return NULL;
1315 }
1316
1317 offset = le16_to_cpu(object->usRecordOffset)
1318 + bp->object_info_tbl_offset;
1319
1320 for (;;) {
1321 record_header = GET_IMAGE(ATOM_COMMON_RECORD_HEADER, offset);
1322
1323 if (!record_header)
1324 return NULL;
1325
1326 if (LAST_RECORD_TYPE == record_header->ucRecordType ||
1327 0 == record_header->ucRecordSize)
1328 break;
1329
1330 if (ATOM_I2C_RECORD_TYPE == record_header->ucRecordType &&
1331 sizeof(ATOM_I2C_RECORD) <=
1332 record_header->ucRecordSize) {
1333 return (ATOM_I2C_RECORD *)record_header;
1334 }
1335
1336 offset += record_header->ucRecordSize;
1337 }
1338
1339 return NULL;
1340}
1341
1342static enum bp_result get_ss_info_from_ss_info_table(
1343 struct bios_parser *bp,
1344 uint32_t id,
1345 struct spread_spectrum_info *ss_info);
1346static enum bp_result get_ss_info_from_tbl(
1347 struct bios_parser *bp,
1348 uint32_t id,
1349 struct spread_spectrum_info *ss_info);
1350/**
1351 * bios_parser_get_spread_spectrum_info
1352 * Get spread spectrum information from the ASIC_InternalSS_Info(ver 2.1 or
1353 * ver 3.1) or SS_Info table from the VBIOS. Currently ASIC_InternalSS_Info
1354 * ver 2.1 can co-exist with SS_Info table. Expect ASIC_InternalSS_Info ver 3.1,
1355 * there is only one entry for each signal /ss id. However, there is
1356 * no planning of supporting multiple spread Sprectum entry for EverGreen
1357 * @param [in] this
1358 * @param [in] signal, ASSignalType to be converted to info index
1359 * @param [in] index, number of entries that match the converted info index
1360 * @param [out] ss_info, sprectrum information structure,
1361 * @return Bios parser result code
1362 */
1363static enum bp_result bios_parser_get_spread_spectrum_info(
1364 struct dc_bios *dcb,
1365 enum as_signal_type signal,
1366 uint32_t index,
1367 struct spread_spectrum_info *ss_info)
1368{
1369 struct bios_parser *bp = BP_FROM_DCB(dcb);
1370 enum bp_result result = BP_RESULT_UNSUPPORTED;
1371 uint32_t clk_id_ss = 0;
1372 ATOM_COMMON_TABLE_HEADER *header;
1373 struct atom_data_revision tbl_revision;
1374
1375 if (!ss_info) /* check for bad input */
1376 return BP_RESULT_BADINPUT;
1377 /* signal translation */
1378 clk_id_ss = signal_to_ss_id(signal);
1379
1380 if (!DATA_TABLES(ASIC_InternalSS_Info))
1381 if (!index)
1382 return get_ss_info_from_ss_info_table(bp, clk_id_ss,
1383 ss_info);
1384
1385 header = GET_IMAGE(ATOM_COMMON_TABLE_HEADER,
1386 DATA_TABLES(ASIC_InternalSS_Info));
1387 get_atom_data_table_revision(header, &tbl_revision);
1388
1389 switch (tbl_revision.major) {
1390 case 2:
1391 switch (tbl_revision.minor) {
1392 case 1:
1393 /* there can not be more then one entry for Internal
1394 * SS Info table version 2.1 */
1395 if (!index)
1396 return get_ss_info_from_tbl(bp, clk_id_ss,
1397 ss_info);
1398 break;
1399 default:
1400 break;
1401 }
1402 break;
1403
1404 case 3:
1405 switch (tbl_revision.minor) {
1406 case 1:
1407 return get_ss_info_v3_1(bp, clk_id_ss, index, ss_info);
1408 default:
1409 break;
1410 }
1411 break;
1412 default:
1413 break;
1414 }
1415 /* there can not be more then one entry for SS Info table */
1416 return result;
1417}
1418
1419static enum bp_result get_ss_info_from_internal_ss_info_tbl_V2_1(
1420 struct bios_parser *bp,
1421 uint32_t id,
1422 struct spread_spectrum_info *info);
1423
1424/**
1425 * get_ss_info_from_table
1426 * Get spread sprectrum information from the ASIC_InternalSS_Info Ver 2.1 or
1427 * SS_Info table from the VBIOS
1428 * There can not be more than 1 entry for ASIC_InternalSS_Info Ver 2.1 or
1429 * SS_Info.
1430 *
1431 * @param this
1432 * @param id, spread sprectrum info index
1433 * @param pSSinfo, sprectrum information structure,
1434 * @return Bios parser result code
1435 */
1436static enum bp_result get_ss_info_from_tbl(
1437 struct bios_parser *bp,
1438 uint32_t id,
1439 struct spread_spectrum_info *ss_info)
1440{
1441 if (!ss_info) /* check for bad input, if ss_info is not NULL */
1442 return BP_RESULT_BADINPUT;
1443 /* for SS_Info table only support DP and LVDS */
1444 if (id == ASIC_INTERNAL_SS_ON_DP || id == ASIC_INTERNAL_SS_ON_LVDS)
1445 return get_ss_info_from_ss_info_table(bp, id, ss_info);
1446 else
1447 return get_ss_info_from_internal_ss_info_tbl_V2_1(bp, id,
1448 ss_info);
1449}
1450
1451/**
1452 * get_ss_info_from_internal_ss_info_tbl_V2_1
1453 * Get spread sprectrum information from the ASIC_InternalSS_Info table Ver 2.1
1454 * from the VBIOS
1455 * There will not be multiple entry for Ver 2.1
1456 *
1457 * @param id, spread sprectrum info index
1458 * @param pSSinfo, sprectrum information structure,
1459 * @return Bios parser result code
1460 */
1461static enum bp_result get_ss_info_from_internal_ss_info_tbl_V2_1(
1462 struct bios_parser *bp,
1463 uint32_t id,
1464 struct spread_spectrum_info *info)
1465{
1466 enum bp_result result = BP_RESULT_UNSUPPORTED;
1467 ATOM_ASIC_INTERNAL_SS_INFO_V2 *header;
1468 ATOM_ASIC_SS_ASSIGNMENT_V2 *tbl;
1469 uint32_t tbl_size, i;
1470
1471 if (!DATA_TABLES(ASIC_InternalSS_Info))
1472 return result;
1473
1474 header = GET_IMAGE(ATOM_ASIC_INTERNAL_SS_INFO_V2,
1475 DATA_TABLES(ASIC_InternalSS_Info));
1476
1477 memset(info, 0, sizeof(struct spread_spectrum_info));
1478
1479 tbl_size = (le16_to_cpu(header->sHeader.usStructureSize)
1480 - sizeof(ATOM_COMMON_TABLE_HEADER))
1481 / sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2);
1482
1483 tbl = (ATOM_ASIC_SS_ASSIGNMENT_V2 *)
1484 &(header->asSpreadSpectrum[0]);
1485 for (i = 0; i < tbl_size; i++) {
1486 result = BP_RESULT_NORECORD;
1487
1488 if (tbl[i].ucClockIndication != (uint8_t)id)
1489 continue;
1490
1491 if (ATOM_EXTERNAL_SS_MASK
1492 & tbl[i].ucSpreadSpectrumMode) {
1493 info->type.EXTERNAL = true;
1494 }
1495 if (ATOM_SS_CENTRE_SPREAD_MODE_MASK
1496 & tbl[i].ucSpreadSpectrumMode) {
1497 info->type.CENTER_MODE = true;
1498 }
1499 info->type.STEP_AND_DELAY_INFO = false;
1500 /* convert [10KHz] into [KHz] */
1501 info->target_clock_range =
1502 le32_to_cpu(tbl[i].ulTargetClockRange) * 10;
1503 info->spread_spectrum_percentage =
1504 (uint32_t)le16_to_cpu(tbl[i].usSpreadSpectrumPercentage);
1505 info->spread_spectrum_range =
1506 (uint32_t)(le16_to_cpu(tbl[i].usSpreadRateIn10Hz) * 10);
1507 result = BP_RESULT_OK;
1508 break;
1509 }
1510
1511 return result;
1512
1513}
1514
1515/**
1516 * get_ss_info_from_ss_info_table
1517 * Get spread sprectrum information from the SS_Info table from the VBIOS
1518 * if the pointer to info is NULL, indicate the caller what to know the number
1519 * of entries that matches the id
1520 * for, the SS_Info table, there should not be more than 1 entry match.
1521 *
1522 * @param [in] id, spread sprectrum id
1523 * @param [out] pSSinfo, sprectrum information structure,
1524 * @return Bios parser result code
1525 */
1526static enum bp_result get_ss_info_from_ss_info_table(
1527 struct bios_parser *bp,
1528 uint32_t id,
1529 struct spread_spectrum_info *ss_info)
1530{
1531 enum bp_result result = BP_RESULT_UNSUPPORTED;
1532 ATOM_SPREAD_SPECTRUM_INFO *tbl;
1533 ATOM_COMMON_TABLE_HEADER *header;
1534 uint32_t table_size;
1535 uint32_t i;
1536 uint32_t id_local = SS_ID_UNKNOWN;
1537 struct atom_data_revision revision;
1538
1539 /* exist of the SS_Info table */
1540 /* check for bad input, pSSinfo can not be NULL */
1541 if (!DATA_TABLES(SS_Info) || !ss_info)
1542 return result;
1543
1544 header = GET_IMAGE(ATOM_COMMON_TABLE_HEADER, DATA_TABLES(SS_Info));
1545 get_atom_data_table_revision(header, &revision);
1546
1547 tbl = GET_IMAGE(ATOM_SPREAD_SPECTRUM_INFO, DATA_TABLES(SS_Info));
1548
1549 if (1 != revision.major || 2 > revision.minor)
1550 return result;
1551
1552 /* have to convert from Internal_SS format to SS_Info format */
1553 switch (id) {
1554 case ASIC_INTERNAL_SS_ON_DP:
1555 id_local = SS_ID_DP1;
1556 break;
1557 case ASIC_INTERNAL_SS_ON_LVDS:
1558 {
1559 struct embedded_panel_info panel_info;
1560
1561 if (bios_parser_get_embedded_panel_info(&bp->base, &panel_info)
1562 == BP_RESULT_OK)
1563 id_local = panel_info.ss_id;
1564 break;
1565 }
1566 default:
1567 break;
1568 }
1569
1570 if (id_local == SS_ID_UNKNOWN)
1571 return result;
1572
1573 table_size = (le16_to_cpu(tbl->sHeader.usStructureSize) -
1574 sizeof(ATOM_COMMON_TABLE_HEADER)) /
1575 sizeof(ATOM_SPREAD_SPECTRUM_ASSIGNMENT);
1576
1577 for (i = 0; i < table_size; i++) {
1578 if (id_local != (uint32_t)tbl->asSS_Info[i].ucSS_Id)
1579 continue;
1580
1581 memset(ss_info, 0, sizeof(struct spread_spectrum_info));
1582
1583 if (ATOM_EXTERNAL_SS_MASK &
1584 tbl->asSS_Info[i].ucSpreadSpectrumType)
1585 ss_info->type.EXTERNAL = true;
1586
1587 if (ATOM_SS_CENTRE_SPREAD_MODE_MASK &
1588 tbl->asSS_Info[i].ucSpreadSpectrumType)
1589 ss_info->type.CENTER_MODE = true;
1590
1591 ss_info->type.STEP_AND_DELAY_INFO = true;
1592 ss_info->spread_spectrum_percentage =
1593 (uint32_t)le16_to_cpu(tbl->asSS_Info[i].usSpreadSpectrumPercentage);
1594 ss_info->step_and_delay_info.step = tbl->asSS_Info[i].ucSS_Step;
1595 ss_info->step_and_delay_info.delay =
1596 tbl->asSS_Info[i].ucSS_Delay;
1597 ss_info->step_and_delay_info.recommended_ref_div =
1598 tbl->asSS_Info[i].ucRecommendedRef_Div;
1599 ss_info->spread_spectrum_range =
1600 (uint32_t)tbl->asSS_Info[i].ucSS_Range * 10000;
1601
1602 /* there will be only one entry for each display type in SS_info
1603 * table */
1604 result = BP_RESULT_OK;
1605 break;
1606 }
1607
1608 return result;
1609}
1610static enum bp_result get_embedded_panel_info_v1_2(
1611 struct bios_parser *bp,
1612 struct embedded_panel_info *info);
1613static enum bp_result get_embedded_panel_info_v1_3(
1614 struct bios_parser *bp,
1615 struct embedded_panel_info *info);
1616
1617static enum bp_result bios_parser_get_embedded_panel_info(
1618 struct dc_bios *dcb,
1619 struct embedded_panel_info *info)
1620{
1621 struct bios_parser *bp = BP_FROM_DCB(dcb);
1622 ATOM_COMMON_TABLE_HEADER *hdr;
1623
1624 if (!DATA_TABLES(LCD_Info))
1625 return BP_RESULT_FAILURE;
1626
1627 hdr = GET_IMAGE(ATOM_COMMON_TABLE_HEADER, DATA_TABLES(LCD_Info));
1628
1629 if (!hdr)
1630 return BP_RESULT_BADBIOSTABLE;
1631
1632 switch (hdr->ucTableFormatRevision) {
1633 case 1:
1634 switch (hdr->ucTableContentRevision) {
1635 case 0:
1636 case 1:
1637 case 2:
1638 return get_embedded_panel_info_v1_2(bp, info);
1639 case 3:
1640 return get_embedded_panel_info_v1_3(bp, info);
1641 default:
1642 break;
1643 }
1644 default:
1645 break;
1646 }
1647
1648 return BP_RESULT_FAILURE;
1649}
1650
1651static enum bp_result get_embedded_panel_info_v1_2(
1652 struct bios_parser *bp,
1653 struct embedded_panel_info *info)
1654{
1655 ATOM_LVDS_INFO_V12 *lvds;
1656
1657 if (!info)
1658 return BP_RESULT_BADINPUT;
1659
1660 if (!DATA_TABLES(LVDS_Info))
1661 return BP_RESULT_UNSUPPORTED;
1662
1663 lvds =
1664 GET_IMAGE(ATOM_LVDS_INFO_V12, DATA_TABLES(LVDS_Info));
1665
1666 if (!lvds)
1667 return BP_RESULT_BADBIOSTABLE;
1668
1669 if (1 != lvds->sHeader.ucTableFormatRevision
1670 || 2 > lvds->sHeader.ucTableContentRevision)
1671 return BP_RESULT_UNSUPPORTED;
1672
1673 memset(info, 0, sizeof(struct embedded_panel_info));
1674
1675 /* We need to convert from 10KHz units into KHz units*/
1676 info->lcd_timing.pixel_clk =
1677 le16_to_cpu(lvds->sLCDTiming.usPixClk) * 10;
1678 /* usHActive does not include borders, according to VBIOS team*/
1679 info->lcd_timing.horizontal_addressable =
1680 le16_to_cpu(lvds->sLCDTiming.usHActive);
1681 /* usHBlanking_Time includes borders, so we should really be subtracting
1682 * borders duing this translation, but LVDS generally*/
1683 /* doesn't have borders, so we should be okay leaving this as is for
1684 * now. May need to revisit if we ever have LVDS with borders*/
1685 info->lcd_timing.horizontal_blanking_time =
1686 le16_to_cpu(lvds->sLCDTiming.usHBlanking_Time);
1687 /* usVActive does not include borders, according to VBIOS team*/
1688 info->lcd_timing.vertical_addressable =
1689 le16_to_cpu(lvds->sLCDTiming.usVActive);
1690 /* usVBlanking_Time includes borders, so we should really be subtracting
1691 * borders duing this translation, but LVDS generally*/
1692 /* doesn't have borders, so we should be okay leaving this as is for
1693 * now. May need to revisit if we ever have LVDS with borders*/
1694 info->lcd_timing.vertical_blanking_time =
1695 le16_to_cpu(lvds->sLCDTiming.usVBlanking_Time);
1696 info->lcd_timing.horizontal_sync_offset =
1697 le16_to_cpu(lvds->sLCDTiming.usHSyncOffset);
1698 info->lcd_timing.horizontal_sync_width =
1699 le16_to_cpu(lvds->sLCDTiming.usHSyncWidth);
1700 info->lcd_timing.vertical_sync_offset =
1701 le16_to_cpu(lvds->sLCDTiming.usVSyncOffset);
1702 info->lcd_timing.vertical_sync_width =
1703 le16_to_cpu(lvds->sLCDTiming.usVSyncWidth);
1704 info->lcd_timing.horizontal_border = lvds->sLCDTiming.ucHBorder;
1705 info->lcd_timing.vertical_border = lvds->sLCDTiming.ucVBorder;
1706 info->lcd_timing.misc_info.HORIZONTAL_CUT_OFF =
1707 lvds->sLCDTiming.susModeMiscInfo.sbfAccess.HorizontalCutOff;
1708 info->lcd_timing.misc_info.H_SYNC_POLARITY =
1709 ~(uint32_t)
1710 lvds->sLCDTiming.susModeMiscInfo.sbfAccess.HSyncPolarity;
1711 info->lcd_timing.misc_info.V_SYNC_POLARITY =
1712 ~(uint32_t)
1713 lvds->sLCDTiming.susModeMiscInfo.sbfAccess.VSyncPolarity;
1714 info->lcd_timing.misc_info.VERTICAL_CUT_OFF =
1715 lvds->sLCDTiming.susModeMiscInfo.sbfAccess.VerticalCutOff;
1716 info->lcd_timing.misc_info.H_REPLICATION_BY2 =
1717 lvds->sLCDTiming.susModeMiscInfo.sbfAccess.H_ReplicationBy2;
1718 info->lcd_timing.misc_info.V_REPLICATION_BY2 =
1719 lvds->sLCDTiming.susModeMiscInfo.sbfAccess.V_ReplicationBy2;
1720 info->lcd_timing.misc_info.COMPOSITE_SYNC =
1721 lvds->sLCDTiming.susModeMiscInfo.sbfAccess.CompositeSync;
1722 info->lcd_timing.misc_info.INTERLACE =
1723 lvds->sLCDTiming.susModeMiscInfo.sbfAccess.Interlace;
1724 info->lcd_timing.misc_info.DOUBLE_CLOCK =
1725 lvds->sLCDTiming.susModeMiscInfo.sbfAccess.DoubleClock;
1726 info->ss_id = lvds->ucSS_Id;
1727
1728 {
1729 uint8_t rr = le16_to_cpu(lvds->usSupportedRefreshRate);
1730 /* Get minimum supported refresh rate*/
1731 if (SUPPORTED_LCD_REFRESHRATE_30Hz & rr)
1732 info->supported_rr.REFRESH_RATE_30HZ = 1;
1733 else if (SUPPORTED_LCD_REFRESHRATE_40Hz & rr)
1734 info->supported_rr.REFRESH_RATE_40HZ = 1;
1735 else if (SUPPORTED_LCD_REFRESHRATE_48Hz & rr)
1736 info->supported_rr.REFRESH_RATE_48HZ = 1;
1737 else if (SUPPORTED_LCD_REFRESHRATE_50Hz & rr)
1738 info->supported_rr.REFRESH_RATE_50HZ = 1;
1739 else if (SUPPORTED_LCD_REFRESHRATE_60Hz & rr)
1740 info->supported_rr.REFRESH_RATE_60HZ = 1;
1741 }
1742
1743 /*Drr panel support can be reported by VBIOS*/
1744 if (LCDPANEL_CAP_DRR_SUPPORTED
1745 & lvds->ucLCDPanel_SpecialHandlingCap)
1746 info->drr_enabled = 1;
1747
1748 if (ATOM_PANEL_MISC_DUAL & lvds->ucLVDS_Misc)
1749 info->lcd_timing.misc_info.DOUBLE_CLOCK = true;
1750
1751 if (ATOM_PANEL_MISC_888RGB & lvds->ucLVDS_Misc)
1752 info->lcd_timing.misc_info.RGB888 = true;
1753
1754 info->lcd_timing.misc_info.GREY_LEVEL =
1755 (uint32_t) (ATOM_PANEL_MISC_GREY_LEVEL &
1756 lvds->ucLVDS_Misc) >> ATOM_PANEL_MISC_GREY_LEVEL_SHIFT;
1757
1758 if (ATOM_PANEL_MISC_SPATIAL & lvds->ucLVDS_Misc)
1759 info->lcd_timing.misc_info.SPATIAL = true;
1760
1761 if (ATOM_PANEL_MISC_TEMPORAL & lvds->ucLVDS_Misc)
1762 info->lcd_timing.misc_info.TEMPORAL = true;
1763
1764 if (ATOM_PANEL_MISC_API_ENABLED & lvds->ucLVDS_Misc)
1765 info->lcd_timing.misc_info.API_ENABLED = true;
1766
1767 return BP_RESULT_OK;
1768}
1769
1770static enum bp_result get_embedded_panel_info_v1_3(
1771 struct bios_parser *bp,
1772 struct embedded_panel_info *info)
1773{
1774 ATOM_LCD_INFO_V13 *lvds;
1775
1776 if (!info)
1777 return BP_RESULT_BADINPUT;
1778
1779 if (!DATA_TABLES(LCD_Info))
1780 return BP_RESULT_UNSUPPORTED;
1781
1782 lvds = GET_IMAGE(ATOM_LCD_INFO_V13, DATA_TABLES(LCD_Info));
1783
1784 if (!lvds)
1785 return BP_RESULT_BADBIOSTABLE;
1786
1787 if (!((1 == lvds->sHeader.ucTableFormatRevision)
1788 && (3 <= lvds->sHeader.ucTableContentRevision)))
1789 return BP_RESULT_UNSUPPORTED;
1790
1791 memset(info, 0, sizeof(struct embedded_panel_info));
1792
1793 /* We need to convert from 10KHz units into KHz units */
1794 info->lcd_timing.pixel_clk =
1795 le16_to_cpu(lvds->sLCDTiming.usPixClk) * 10;
1796 /* usHActive does not include borders, according to VBIOS team */
1797 info->lcd_timing.horizontal_addressable =
1798 le16_to_cpu(lvds->sLCDTiming.usHActive);
1799 /* usHBlanking_Time includes borders, so we should really be subtracting
1800 * borders duing this translation, but LVDS generally*/
1801 /* doesn't have borders, so we should be okay leaving this as is for
1802 * now. May need to revisit if we ever have LVDS with borders*/
1803 info->lcd_timing.horizontal_blanking_time =
1804 le16_to_cpu(lvds->sLCDTiming.usHBlanking_Time);
1805 /* usVActive does not include borders, according to VBIOS team*/
1806 info->lcd_timing.vertical_addressable =
1807 le16_to_cpu(lvds->sLCDTiming.usVActive);
1808 /* usVBlanking_Time includes borders, so we should really be subtracting
1809 * borders duing this translation, but LVDS generally*/
1810 /* doesn't have borders, so we should be okay leaving this as is for
1811 * now. May need to revisit if we ever have LVDS with borders*/
1812 info->lcd_timing.vertical_blanking_time =
1813 le16_to_cpu(lvds->sLCDTiming.usVBlanking_Time);
1814 info->lcd_timing.horizontal_sync_offset =
1815 le16_to_cpu(lvds->sLCDTiming.usHSyncOffset);
1816 info->lcd_timing.horizontal_sync_width =
1817 le16_to_cpu(lvds->sLCDTiming.usHSyncWidth);
1818 info->lcd_timing.vertical_sync_offset =
1819 le16_to_cpu(lvds->sLCDTiming.usVSyncOffset);
1820 info->lcd_timing.vertical_sync_width =
1821 le16_to_cpu(lvds->sLCDTiming.usVSyncWidth);
1822 info->lcd_timing.horizontal_border = lvds->sLCDTiming.ucHBorder;
1823 info->lcd_timing.vertical_border = lvds->sLCDTiming.ucVBorder;
1824 info->lcd_timing.misc_info.HORIZONTAL_CUT_OFF =
1825 lvds->sLCDTiming.susModeMiscInfo.sbfAccess.HorizontalCutOff;
1826 info->lcd_timing.misc_info.H_SYNC_POLARITY =
1827 ~(uint32_t)
1828 lvds->sLCDTiming.susModeMiscInfo.sbfAccess.HSyncPolarity;
1829 info->lcd_timing.misc_info.V_SYNC_POLARITY =
1830 ~(uint32_t)
1831 lvds->sLCDTiming.susModeMiscInfo.sbfAccess.VSyncPolarity;
1832 info->lcd_timing.misc_info.VERTICAL_CUT_OFF =
1833 lvds->sLCDTiming.susModeMiscInfo.sbfAccess.VerticalCutOff;
1834 info->lcd_timing.misc_info.H_REPLICATION_BY2 =
1835 lvds->sLCDTiming.susModeMiscInfo.sbfAccess.H_ReplicationBy2;
1836 info->lcd_timing.misc_info.V_REPLICATION_BY2 =
1837 lvds->sLCDTiming.susModeMiscInfo.sbfAccess.V_ReplicationBy2;
1838 info->lcd_timing.misc_info.COMPOSITE_SYNC =
1839 lvds->sLCDTiming.susModeMiscInfo.sbfAccess.CompositeSync;
1840 info->lcd_timing.misc_info.INTERLACE =
1841 lvds->sLCDTiming.susModeMiscInfo.sbfAccess.Interlace;
1842 info->lcd_timing.misc_info.DOUBLE_CLOCK =
1843 lvds->sLCDTiming.susModeMiscInfo.sbfAccess.DoubleClock;
1844 info->ss_id = lvds->ucSS_Id;
1845
1846 /* Drr panel support can be reported by VBIOS*/
1847 if (LCDPANEL_CAP_V13_DRR_SUPPORTED
1848 & lvds->ucLCDPanel_SpecialHandlingCap)
1849 info->drr_enabled = 1;
1850
1851 /* Get supported refresh rate*/
1852 if (info->drr_enabled == 1) {
1853 uint8_t min_rr =
1854 lvds->sRefreshRateSupport.ucMinRefreshRateForDRR;
1855 uint8_t rr = lvds->sRefreshRateSupport.ucSupportedRefreshRate;
1856
1857 if (min_rr != 0) {
1858 if (SUPPORTED_LCD_REFRESHRATE_30Hz & min_rr)
1859 info->supported_rr.REFRESH_RATE_30HZ = 1;
1860 else if (SUPPORTED_LCD_REFRESHRATE_40Hz & min_rr)
1861 info->supported_rr.REFRESH_RATE_40HZ = 1;
1862 else if (SUPPORTED_LCD_REFRESHRATE_48Hz & min_rr)
1863 info->supported_rr.REFRESH_RATE_48HZ = 1;
1864 else if (SUPPORTED_LCD_REFRESHRATE_50Hz & min_rr)
1865 info->supported_rr.REFRESH_RATE_50HZ = 1;
1866 else if (SUPPORTED_LCD_REFRESHRATE_60Hz & min_rr)
1867 info->supported_rr.REFRESH_RATE_60HZ = 1;
1868 } else {
1869 if (SUPPORTED_LCD_REFRESHRATE_30Hz & rr)
1870 info->supported_rr.REFRESH_RATE_30HZ = 1;
1871 else if (SUPPORTED_LCD_REFRESHRATE_40Hz & rr)
1872 info->supported_rr.REFRESH_RATE_40HZ = 1;
1873 else if (SUPPORTED_LCD_REFRESHRATE_48Hz & rr)
1874 info->supported_rr.REFRESH_RATE_48HZ = 1;
1875 else if (SUPPORTED_LCD_REFRESHRATE_50Hz & rr)
1876 info->supported_rr.REFRESH_RATE_50HZ = 1;
1877 else if (SUPPORTED_LCD_REFRESHRATE_60Hz & rr)
1878 info->supported_rr.REFRESH_RATE_60HZ = 1;
1879 }
1880 }
1881
1882 if (ATOM_PANEL_MISC_V13_DUAL & lvds->ucLCD_Misc)
1883 info->lcd_timing.misc_info.DOUBLE_CLOCK = true;
1884
1885 if (ATOM_PANEL_MISC_V13_8BIT_PER_COLOR & lvds->ucLCD_Misc)
1886 info->lcd_timing.misc_info.RGB888 = true;
1887
1888 info->lcd_timing.misc_info.GREY_LEVEL =
1889 (uint32_t) (ATOM_PANEL_MISC_V13_GREY_LEVEL &
1890 lvds->ucLCD_Misc) >> ATOM_PANEL_MISC_V13_GREY_LEVEL_SHIFT;
1891
1892 return BP_RESULT_OK;
1893}
1894
1895/**
1896 * bios_parser_get_encoder_cap_info
1897 *
1898 * @brief
1899 * Get encoder capability information of input object id
1900 *
1901 * @param object_id, Object id
1902 * @param object_id, encoder cap information structure
1903 *
1904 * @return Bios parser result code
1905 *
1906 */
1907static enum bp_result bios_parser_get_encoder_cap_info(
1908 struct dc_bios *dcb,
1909 struct graphics_object_id object_id,
1910 struct bp_encoder_cap_info *info)
1911{
1912 struct bios_parser *bp = BP_FROM_DCB(dcb);
1913 ATOM_OBJECT *object;
1914 ATOM_ENCODER_CAP_RECORD *record = NULL;
1915
1916 if (!info)
1917 return BP_RESULT_BADINPUT;
1918
1919 object = get_bios_object(bp, object_id);
1920
1921 if (!object)
1922 return BP_RESULT_BADINPUT;
1923
1924 record = get_encoder_cap_record(bp, object);
1925 if (!record)
1926 return BP_RESULT_NORECORD;
1927
1928 info->DP_HBR2_CAP = record->usHBR2Cap;
1929 info->DP_HBR2_EN = record->usHBR2En;
1930 return BP_RESULT_OK;
1931}
1932
1933/**
1934 * get_encoder_cap_record
1935 *
1936 * @brief
1937 * Get encoder cap record for the object
1938 *
1939 * @param object, ATOM object
1940 *
1941 * @return atom encoder cap record
1942 *
1943 * @note
1944 * search all records to find the ATOM_ENCODER_CAP_RECORD record
1945 */
1946static ATOM_ENCODER_CAP_RECORD *get_encoder_cap_record(
1947 struct bios_parser *bp,
1948 ATOM_OBJECT *object)
1949{
1950 ATOM_COMMON_RECORD_HEADER *header;
1951 uint32_t offset;
1952
1953 if (!object) {
1954 BREAK_TO_DEBUGGER(); /* Invalid object */
1955 return NULL;
1956 }
1957
1958 offset = le16_to_cpu(object->usRecordOffset)
1959 + bp->object_info_tbl_offset;
1960
1961 for (;;) {
1962 header = GET_IMAGE(ATOM_COMMON_RECORD_HEADER, offset);
1963
1964 if (!header)
1965 return NULL;
1966
1967 offset += header->ucRecordSize;
1968
1969 if (LAST_RECORD_TYPE == header->ucRecordType ||
1970 !header->ucRecordSize)
1971 break;
1972
1973 if (ATOM_ENCODER_CAP_RECORD_TYPE != header->ucRecordType)
1974 continue;
1975
1976 if (sizeof(ATOM_ENCODER_CAP_RECORD) <= header->ucRecordSize)
1977 return (ATOM_ENCODER_CAP_RECORD *)header;
1978 }
1979
1980 return NULL;
1981}
1982
1983static uint32_t get_ss_entry_number(
1984 struct bios_parser *bp,
1985 uint32_t id);
1986static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_v2_1(
1987 struct bios_parser *bp,
1988 uint32_t id);
1989static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_V3_1(
1990 struct bios_parser *bp,
1991 uint32_t id);
1992static uint32_t get_ss_entry_number_from_ss_info_tbl(
1993 struct bios_parser *bp,
1994 uint32_t id);
1995
1996/**
1997 * BiosParserObject::GetNumberofSpreadSpectrumEntry
1998 * Get Number of SpreadSpectrum Entry from the ASIC_InternalSS_Info table from
1999 * the VBIOS that match the SSid (to be converted from signal)
2000 *
2001 * @param[in] signal, ASSignalType to be converted to SSid
2002 * @return number of SS Entry that match the signal
2003 */
2004static uint32_t bios_parser_get_ss_entry_number(
2005 struct dc_bios *dcb,
2006 enum as_signal_type signal)
2007{
2008 struct bios_parser *bp = BP_FROM_DCB(dcb);
2009 uint32_t ss_id = 0;
2010 ATOM_COMMON_TABLE_HEADER *header;
2011 struct atom_data_revision revision;
2012
2013 ss_id = signal_to_ss_id(signal);
2014
2015 if (!DATA_TABLES(ASIC_InternalSS_Info))
2016 return get_ss_entry_number_from_ss_info_tbl(bp, ss_id);
2017
2018 header = GET_IMAGE(ATOM_COMMON_TABLE_HEADER,
2019 DATA_TABLES(ASIC_InternalSS_Info));
2020 get_atom_data_table_revision(header, &revision);
2021
2022 switch (revision.major) {
2023 case 2:
2024 switch (revision.minor) {
2025 case 1:
2026 return get_ss_entry_number(bp, ss_id);
2027 default:
2028 break;
2029 }
2030 break;
2031 case 3:
2032 switch (revision.minor) {
2033 case 1:
2034 return
2035 get_ss_entry_number_from_internal_ss_info_tbl_V3_1(
2036 bp, ss_id);
2037 default:
2038 break;
2039 }
2040 break;
2041 default:
2042 break;
2043 }
2044
2045 return 0;
2046}
2047
2048/**
2049 * get_ss_entry_number_from_ss_info_tbl
2050 * Get Number of spread spectrum entry from the SS_Info table from the VBIOS.
2051 *
2052 * @note There can only be one entry for each id for SS_Info Table
2053 *
2054 * @param [in] id, spread spectrum id
2055 * @return number of SS Entry that match the id
2056 */
2057static uint32_t get_ss_entry_number_from_ss_info_tbl(
2058 struct bios_parser *bp,
2059 uint32_t id)
2060{
2061 ATOM_SPREAD_SPECTRUM_INFO *tbl;
2062 ATOM_COMMON_TABLE_HEADER *header;
2063 uint32_t table_size;
2064 uint32_t i;
2065 uint32_t number = 0;
2066 uint32_t id_local = SS_ID_UNKNOWN;
2067 struct atom_data_revision revision;
2068
2069 /* SS_Info table exist */
2070 if (!DATA_TABLES(SS_Info))
2071 return number;
2072
2073 header = GET_IMAGE(ATOM_COMMON_TABLE_HEADER,
2074 DATA_TABLES(SS_Info));
2075 get_atom_data_table_revision(header, &revision);
2076
2077 tbl = GET_IMAGE(ATOM_SPREAD_SPECTRUM_INFO,
2078 DATA_TABLES(SS_Info));
2079
2080 if (1 != revision.major || 2 > revision.minor)
2081 return number;
2082
2083 /* have to convert from Internal_SS format to SS_Info format */
2084 switch (id) {
2085 case ASIC_INTERNAL_SS_ON_DP:
2086 id_local = SS_ID_DP1;
2087 break;
2088 case ASIC_INTERNAL_SS_ON_LVDS: {
2089 struct embedded_panel_info panel_info;
2090
2091 if (bios_parser_get_embedded_panel_info(&bp->base, &panel_info)
2092 == BP_RESULT_OK)
2093 id_local = panel_info.ss_id;
2094 break;
2095 }
2096 default:
2097 break;
2098 }
2099
2100 if (id_local == SS_ID_UNKNOWN)
2101 return number;
2102
2103 table_size = (le16_to_cpu(tbl->sHeader.usStructureSize) -
2104 sizeof(ATOM_COMMON_TABLE_HEADER)) /
2105 sizeof(ATOM_SPREAD_SPECTRUM_ASSIGNMENT);
2106
2107 for (i = 0; i < table_size; i++)
2108 if (id_local == (uint32_t)tbl->asSS_Info[i].ucSS_Id) {
2109 number = 1;
2110 break;
2111 }
2112
2113 return number;
2114}
2115
2116/**
2117 * get_ss_entry_number
2118 * Get spread sprectrum information from the ASIC_InternalSS_Info Ver 2.1 or
2119 * SS_Info table from the VBIOS
2120 * There can not be more than 1 entry for ASIC_InternalSS_Info Ver 2.1 or
2121 * SS_Info.
2122 *
2123 * @param id, spread sprectrum info index
2124 * @return Bios parser result code
2125 */
2126static uint32_t get_ss_entry_number(struct bios_parser *bp, uint32_t id)
2127{
2128 if (id == ASIC_INTERNAL_SS_ON_DP || id == ASIC_INTERNAL_SS_ON_LVDS)
2129 return get_ss_entry_number_from_ss_info_tbl(bp, id);
2130
2131 return get_ss_entry_number_from_internal_ss_info_tbl_v2_1(bp, id);
2132}
2133
2134/**
2135 * get_ss_entry_number_from_internal_ss_info_tbl_v2_1
2136 * Get NUmber of spread sprectrum entry from the ASIC_InternalSS_Info table
2137 * Ver 2.1 from the VBIOS
2138 * There will not be multiple entry for Ver 2.1
2139 *
2140 * @param id, spread sprectrum info index
2141 * @return number of SS Entry that match the id
2142 */
2143static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_v2_1(
2144 struct bios_parser *bp,
2145 uint32_t id)
2146{
2147 ATOM_ASIC_INTERNAL_SS_INFO_V2 *header_include;
2148 ATOM_ASIC_SS_ASSIGNMENT_V2 *tbl;
2149 uint32_t size;
2150 uint32_t i;
2151
2152 if (!DATA_TABLES(ASIC_InternalSS_Info))
2153 return 0;
2154
2155 header_include = GET_IMAGE(ATOM_ASIC_INTERNAL_SS_INFO_V2,
2156 DATA_TABLES(ASIC_InternalSS_Info));
2157
2158 size = (le16_to_cpu(header_include->sHeader.usStructureSize)
2159 - sizeof(ATOM_COMMON_TABLE_HEADER))
2160 / sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2);
2161
2162 tbl = (ATOM_ASIC_SS_ASSIGNMENT_V2 *)
2163 &header_include->asSpreadSpectrum[0];
2164 for (i = 0; i < size; i++)
2165 if (tbl[i].ucClockIndication == (uint8_t)id)
2166 return 1;
2167
2168 return 0;
2169}
2170/**
2171 * get_ss_entry_number_from_internal_ss_info_table_V3_1
2172 * Get Number of SpreadSpectrum Entry from the ASIC_InternalSS_Info table of
2173 * the VBIOS that matches id
2174 *
2175 * @param[in] id, spread sprectrum id
2176 * @return number of SS Entry that match the id
2177 */
2178static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_V3_1(
2179 struct bios_parser *bp,
2180 uint32_t id)
2181{
2182 uint32_t number = 0;
2183 ATOM_ASIC_INTERNAL_SS_INFO_V3 *header_include;
2184 ATOM_ASIC_SS_ASSIGNMENT_V3 *tbl;
2185 uint32_t size;
2186 uint32_t i;
2187
2188 if (!DATA_TABLES(ASIC_InternalSS_Info))
2189 return number;
2190
2191 header_include = GET_IMAGE(ATOM_ASIC_INTERNAL_SS_INFO_V3,
2192 DATA_TABLES(ASIC_InternalSS_Info));
2193 size = (le16_to_cpu(header_include->sHeader.usStructureSize) -
2194 sizeof(ATOM_COMMON_TABLE_HEADER)) /
2195 sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3);
2196
2197 tbl = (ATOM_ASIC_SS_ASSIGNMENT_V3 *)
2198 &header_include->asSpreadSpectrum[0];
2199
2200 for (i = 0; i < size; i++)
2201 if (tbl[i].ucClockIndication == (uint8_t)id)
2202 number++;
2203
2204 return number;
2205}
2206
2207/**
2208 * bios_parser_get_gpio_pin_info
2209 * Get GpioPin information of input gpio id
2210 *
2211 * @param gpio_id, GPIO ID
2212 * @param info, GpioPin information structure
2213 * @return Bios parser result code
2214 * @note
2215 * to get the GPIO PIN INFO, we need:
2216 * 1. get the GPIO_ID from other object table, see GetHPDInfo()
2217 * 2. in DATA_TABLE.GPIO_Pin_LUT, search all records, to get the registerA
2218 * offset/mask
2219 */
2220static enum bp_result bios_parser_get_gpio_pin_info(
2221 struct dc_bios *dcb,
2222 uint32_t gpio_id,
2223 struct gpio_pin_info *info)
2224{
2225 struct bios_parser *bp = BP_FROM_DCB(dcb);
2226 ATOM_GPIO_PIN_LUT *header;
2227 uint32_t count = 0;
2228 uint32_t i = 0;
2229
2230 if (!DATA_TABLES(GPIO_Pin_LUT))
2231 return BP_RESULT_BADBIOSTABLE;
2232
2233 header = GET_IMAGE(ATOM_GPIO_PIN_LUT, DATA_TABLES(GPIO_Pin_LUT));
2234 if (!header)
2235 return BP_RESULT_BADBIOSTABLE;
2236
2237 if (sizeof(ATOM_COMMON_TABLE_HEADER) + sizeof(ATOM_GPIO_PIN_LUT)
2238 > le16_to_cpu(header->sHeader.usStructureSize))
2239 return BP_RESULT_BADBIOSTABLE;
2240
2241 if (1 != header->sHeader.ucTableContentRevision)
2242 return BP_RESULT_UNSUPPORTED;
2243
2244 count = (le16_to_cpu(header->sHeader.usStructureSize)
2245 - sizeof(ATOM_COMMON_TABLE_HEADER))
2246 / sizeof(ATOM_GPIO_PIN_ASSIGNMENT);
2247 for (i = 0; i < count; ++i) {
2248 if (header->asGPIO_Pin[i].ucGPIO_ID != gpio_id)
2249 continue;
2250
2251 info->offset =
2252 (uint32_t) le16_to_cpu(header->asGPIO_Pin[i].usGpioPin_AIndex);
2253 info->offset_y = info->offset + 2;
2254 info->offset_en = info->offset + 1;
2255 info->offset_mask = info->offset - 1;
2256
2257 info->mask = (uint32_t) (1 <<
2258 header->asGPIO_Pin[i].ucGpioPinBitShift);
2259 info->mask_y = info->mask + 2;
2260 info->mask_en = info->mask + 1;
2261 info->mask_mask = info->mask - 1;
2262
2263 return BP_RESULT_OK;
2264 }
2265
2266 return BP_RESULT_NORECORD;
2267}
2268
2269static enum bp_result get_gpio_i2c_info(struct bios_parser *bp,
2270 ATOM_I2C_RECORD *record,
2271 struct graphics_object_i2c_info *info)
2272{
2273 ATOM_GPIO_I2C_INFO *header;
2274 uint32_t count = 0;
2275
2276 if (!info)
2277 return BP_RESULT_BADINPUT;
2278
2279 /* get the GPIO_I2C info */
2280 if (!DATA_TABLES(GPIO_I2C_Info))
2281 return BP_RESULT_BADBIOSTABLE;
2282
2283 header = GET_IMAGE(ATOM_GPIO_I2C_INFO, DATA_TABLES(GPIO_I2C_Info));
2284 if (!header)
2285 return BP_RESULT_BADBIOSTABLE;
2286
2287 if (sizeof(ATOM_COMMON_TABLE_HEADER) + sizeof(ATOM_GPIO_I2C_ASSIGMENT)
2288 > le16_to_cpu(header->sHeader.usStructureSize))
2289 return BP_RESULT_BADBIOSTABLE;
2290
2291 if (1 != header->sHeader.ucTableContentRevision)
2292 return BP_RESULT_UNSUPPORTED;
2293
2294 /* get data count */
2295 count = (le16_to_cpu(header->sHeader.usStructureSize)
2296 - sizeof(ATOM_COMMON_TABLE_HEADER))
2297 / sizeof(ATOM_GPIO_I2C_ASSIGMENT);
2298 if (count < record->sucI2cId.bfI2C_LineMux)
2299 return BP_RESULT_BADBIOSTABLE;
2300
2301 /* get the GPIO_I2C_INFO */
2302 info->i2c_hw_assist = record->sucI2cId.bfHW_Capable;
2303 info->i2c_line = record->sucI2cId.bfI2C_LineMux;
2304 info->i2c_engine_id = record->sucI2cId.bfHW_EngineID;
2305 info->i2c_slave_address = record->ucI2CAddr;
2306
2307 info->gpio_info.clk_mask_register_index =
2308 le16_to_cpu(header->asGPIO_Info[info->i2c_line].usClkMaskRegisterIndex);
2309 info->gpio_info.clk_en_register_index =
2310 le16_to_cpu(header->asGPIO_Info[info->i2c_line].usClkEnRegisterIndex);
2311 info->gpio_info.clk_y_register_index =
2312 le16_to_cpu(header->asGPIO_Info[info->i2c_line].usClkY_RegisterIndex);
2313 info->gpio_info.clk_a_register_index =
2314 le16_to_cpu(header->asGPIO_Info[info->i2c_line].usClkA_RegisterIndex);
2315 info->gpio_info.data_mask_register_index =
2316 le16_to_cpu(header->asGPIO_Info[info->i2c_line].usDataMaskRegisterIndex);
2317 info->gpio_info.data_en_register_index =
2318 le16_to_cpu(header->asGPIO_Info[info->i2c_line].usDataEnRegisterIndex);
2319 info->gpio_info.data_y_register_index =
2320 le16_to_cpu(header->asGPIO_Info[info->i2c_line].usDataY_RegisterIndex);
2321 info->gpio_info.data_a_register_index =
2322 le16_to_cpu(header->asGPIO_Info[info->i2c_line].usDataA_RegisterIndex);
2323
2324 info->gpio_info.clk_mask_shift =
2325 header->asGPIO_Info[info->i2c_line].ucClkMaskShift;
2326 info->gpio_info.clk_en_shift =
2327 header->asGPIO_Info[info->i2c_line].ucClkEnShift;
2328 info->gpio_info.clk_y_shift =
2329 header->asGPIO_Info[info->i2c_line].ucClkY_Shift;
2330 info->gpio_info.clk_a_shift =
2331 header->asGPIO_Info[info->i2c_line].ucClkA_Shift;
2332 info->gpio_info.data_mask_shift =
2333 header->asGPIO_Info[info->i2c_line].ucDataMaskShift;
2334 info->gpio_info.data_en_shift =
2335 header->asGPIO_Info[info->i2c_line].ucDataEnShift;
2336 info->gpio_info.data_y_shift =
2337 header->asGPIO_Info[info->i2c_line].ucDataY_Shift;
2338 info->gpio_info.data_a_shift =
2339 header->asGPIO_Info[info->i2c_line].ucDataA_Shift;
2340
2341 return BP_RESULT_OK;
2342}
2343
2344static ATOM_OBJECT *get_bios_object(struct bios_parser *bp,
2345 struct graphics_object_id id)
2346{
2347 uint32_t offset;
2348 ATOM_OBJECT_TABLE *tbl;
2349 uint32_t i;
2350
2351 switch (id.type) {
2352 case OBJECT_TYPE_ENCODER:
2353 offset = le16_to_cpu(bp->object_info_tbl.v1_1->usEncoderObjectTableOffset);
2354 break;
2355
2356 case OBJECT_TYPE_CONNECTOR:
2357 offset = le16_to_cpu(bp->object_info_tbl.v1_1->usConnectorObjectTableOffset);
2358 break;
2359
2360 case OBJECT_TYPE_ROUTER:
2361 offset = le16_to_cpu(bp->object_info_tbl.v1_1->usRouterObjectTableOffset);
2362 break;
2363
2364 case OBJECT_TYPE_GENERIC:
2365 if (bp->object_info_tbl.revision.minor < 3)
2366 return NULL;
2367 offset = le16_to_cpu(bp->object_info_tbl.v1_3->usMiscObjectTableOffset);
2368 break;
2369
2370 default:
2371 return NULL;
2372 }
2373
2374 offset += bp->object_info_tbl_offset;
2375
2376 tbl = GET_IMAGE(ATOM_OBJECT_TABLE, offset);
2377 if (!tbl)
2378 return NULL;
2379
2380 for (i = 0; i < tbl->ucNumberOfObjects; i++)
2381 if (dal_graphics_object_id_is_equal(id,
2382 object_id_from_bios_object_id(
2383 le16_to_cpu(tbl->asObjects[i].usObjectID))))
2384 return &tbl->asObjects[i];
2385
2386 return NULL;
2387}
2388
2389static uint32_t get_dest_obj_list(struct bios_parser *bp,
2390 ATOM_OBJECT *object, uint16_t **id_list)
2391{
2392 uint32_t offset;
2393 uint8_t *number;
2394
2395 if (!object) {
2396 BREAK_TO_DEBUGGER(); /* Invalid object id */
2397 return 0;
2398 }
2399
2400 offset = le16_to_cpu(object->usSrcDstTableOffset)
2401 + bp->object_info_tbl_offset;
2402
2403 number = GET_IMAGE(uint8_t, offset);
2404 if (!number)
2405 return 0;
2406
2407 offset += sizeof(uint8_t);
2408 offset += sizeof(uint16_t) * (*number);
2409
2410 number = GET_IMAGE(uint8_t, offset);
2411 if ((!number) || (!*number))
2412 return 0;
2413
2414 offset += sizeof(uint8_t);
2415 *id_list = (uint16_t *)get_image(&bp->base, offset, *number * sizeof(uint16_t));
2416
2417 if (!*id_list)
2418 return 0;
2419
2420 return *number;
2421}
2422
2423static uint32_t get_src_obj_list(struct bios_parser *bp, ATOM_OBJECT *object,
2424 uint16_t **id_list)
2425{
2426 uint32_t offset;
2427 uint8_t *number;
2428
2429 if (!object) {
2430 BREAK_TO_DEBUGGER(); /* Invalid object id */
2431 return 0;
2432 }
2433
2434 offset = le16_to_cpu(object->usSrcDstTableOffset)
2435 + bp->object_info_tbl_offset;
2436
2437 number = GET_IMAGE(uint8_t, offset);
2438 if (!number)
2439 return 0;
2440
2441 offset += sizeof(uint8_t);
2442 *id_list = (uint16_t *)get_image(&bp->base, offset, *number * sizeof(uint16_t));
2443
2444 if (!*id_list)
2445 return 0;
2446
2447 return *number;
2448}
2449
2450static uint32_t get_dst_number_from_object(struct bios_parser *bp,
2451 ATOM_OBJECT *object)
2452{
2453 uint32_t offset;
2454 uint8_t *number;
2455
2456 if (!object) {
2457 BREAK_TO_DEBUGGER(); /* Invalid encoder object id*/
2458 return 0;
2459 }
2460
2461 offset = le16_to_cpu(object->usSrcDstTableOffset)
2462 + bp->object_info_tbl_offset;
2463
2464 number = GET_IMAGE(uint8_t, offset);
2465 if (!number)
2466 return 0;
2467
2468 offset += sizeof(uint8_t);
2469 offset += sizeof(uint16_t) * (*number);
2470
2471 number = GET_IMAGE(uint8_t, offset);
2472
2473 if (!number)
2474 return 0;
2475
2476 return *number;
2477}
2478
2479
2480static struct graphics_object_id object_id_from_bios_object_id(
2481 uint32_t bios_object_id)
2482{
2483 enum object_type type;
2484 enum object_enum_id enum_id;
2485 struct graphics_object_id go_id = { 0 };
2486
2487 type = object_type_from_bios_object_id(bios_object_id);
2488
2489 if (OBJECT_TYPE_UNKNOWN == type)
2490 return go_id;
2491
2492 enum_id = enum_id_from_bios_object_id(bios_object_id);
2493
2494 if (ENUM_ID_UNKNOWN == enum_id)
2495 return go_id;
2496
2497 go_id = dal_graphics_object_id_init(
2498 id_from_bios_object_id(type, bios_object_id), enum_id, type);
2499
2500 return go_id;
2501}
2502
2503static enum object_type object_type_from_bios_object_id(uint32_t bios_object_id)
2504{
2505 uint32_t bios_object_type = (bios_object_id & OBJECT_TYPE_MASK)
2506 >> OBJECT_TYPE_SHIFT;
2507 enum object_type object_type;
2508
2509 switch (bios_object_type) {
2510 case GRAPH_OBJECT_TYPE_GPU:
2511 object_type = OBJECT_TYPE_GPU;
2512 break;
2513 case GRAPH_OBJECT_TYPE_ENCODER:
2514 object_type = OBJECT_TYPE_ENCODER;
2515 break;
2516 case GRAPH_OBJECT_TYPE_CONNECTOR:
2517 object_type = OBJECT_TYPE_CONNECTOR;
2518 break;
2519 case GRAPH_OBJECT_TYPE_ROUTER:
2520 object_type = OBJECT_TYPE_ROUTER;
2521 break;
2522 case GRAPH_OBJECT_TYPE_GENERIC:
2523 object_type = OBJECT_TYPE_GENERIC;
2524 break;
2525 default:
2526 object_type = OBJECT_TYPE_UNKNOWN;
2527 break;
2528 }
2529
2530 return object_type;
2531}
2532
2533static enum object_enum_id enum_id_from_bios_object_id(uint32_t bios_object_id)
2534{
2535 uint32_t bios_enum_id =
2536 (bios_object_id & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
2537 enum object_enum_id id;
2538
2539 switch (bios_enum_id) {
2540 case GRAPH_OBJECT_ENUM_ID1:
2541 id = ENUM_ID_1;
2542 break;
2543 case GRAPH_OBJECT_ENUM_ID2:
2544 id = ENUM_ID_2;
2545 break;
2546 case GRAPH_OBJECT_ENUM_ID3:
2547 id = ENUM_ID_3;
2548 break;
2549 case GRAPH_OBJECT_ENUM_ID4:
2550 id = ENUM_ID_4;
2551 break;
2552 case GRAPH_OBJECT_ENUM_ID5:
2553 id = ENUM_ID_5;
2554 break;
2555 case GRAPH_OBJECT_ENUM_ID6:
2556 id = ENUM_ID_6;
2557 break;
2558 case GRAPH_OBJECT_ENUM_ID7:
2559 id = ENUM_ID_7;
2560 break;
2561 default:
2562 id = ENUM_ID_UNKNOWN;
2563 break;
2564 }
2565
2566 return id;
2567}
2568
2569static uint32_t id_from_bios_object_id(enum object_type type,
2570 uint32_t bios_object_id)
2571{
2572 switch (type) {
2573 case OBJECT_TYPE_GPU:
2574 return gpu_id_from_bios_object_id(bios_object_id);
2575 case OBJECT_TYPE_ENCODER:
2576 return (uint32_t)encoder_id_from_bios_object_id(bios_object_id);
2577 case OBJECT_TYPE_CONNECTOR:
2578 return (uint32_t)connector_id_from_bios_object_id(
2579 bios_object_id);
2580 case OBJECT_TYPE_GENERIC:
2581 return generic_id_from_bios_object_id(bios_object_id);
2582 default:
2583 return 0;
2584 }
2585}
2586
2587static enum connector_id connector_id_from_bios_object_id(
2588 uint32_t bios_object_id)
2589{
2590 uint32_t bios_connector_id = gpu_id_from_bios_object_id(bios_object_id);
2591
2592 enum connector_id id;
2593
2594 switch (bios_connector_id) {
2595 case CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I:
2596 id = CONNECTOR_ID_SINGLE_LINK_DVII;
2597 break;
2598 case CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I:
2599 id = CONNECTOR_ID_DUAL_LINK_DVII;
2600 break;
2601 case CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D:
2602 id = CONNECTOR_ID_SINGLE_LINK_DVID;
2603 break;
2604 case CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D:
2605 id = CONNECTOR_ID_DUAL_LINK_DVID;
2606 break;
2607 case CONNECTOR_OBJECT_ID_VGA:
2608 id = CONNECTOR_ID_VGA;
2609 break;
2610 case CONNECTOR_OBJECT_ID_HDMI_TYPE_A:
2611 id = CONNECTOR_ID_HDMI_TYPE_A;
2612 break;
2613 case CONNECTOR_OBJECT_ID_LVDS:
2614 id = CONNECTOR_ID_LVDS;
2615 break;
2616 case CONNECTOR_OBJECT_ID_PCIE_CONNECTOR:
2617 id = CONNECTOR_ID_PCIE;
2618 break;
2619 case CONNECTOR_OBJECT_ID_HARDCODE_DVI:
2620 id = CONNECTOR_ID_HARDCODE_DVI;
2621 break;
2622 case CONNECTOR_OBJECT_ID_DISPLAYPORT:
2623 id = CONNECTOR_ID_DISPLAY_PORT;
2624 break;
2625 case CONNECTOR_OBJECT_ID_eDP:
2626 id = CONNECTOR_ID_EDP;
2627 break;
2628 case CONNECTOR_OBJECT_ID_MXM:
2629 id = CONNECTOR_ID_MXM;
2630 break;
2631 default:
2632 id = CONNECTOR_ID_UNKNOWN;
2633 break;
2634 }
2635
2636 return id;
2637}
2638
2639static enum encoder_id encoder_id_from_bios_object_id(uint32_t bios_object_id)
2640{
2641 uint32_t bios_encoder_id = gpu_id_from_bios_object_id(bios_object_id);
2642 enum encoder_id id;
2643
2644 switch (bios_encoder_id) {
2645 case ENCODER_OBJECT_ID_INTERNAL_LVDS:
2646 id = ENCODER_ID_INTERNAL_LVDS;
2647 break;
2648 case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
2649 id = ENCODER_ID_INTERNAL_TMDS1;
2650 break;
2651 case ENCODER_OBJECT_ID_INTERNAL_TMDS2:
2652 id = ENCODER_ID_INTERNAL_TMDS2;
2653 break;
2654 case ENCODER_OBJECT_ID_INTERNAL_DAC1:
2655 id = ENCODER_ID_INTERNAL_DAC1;
2656 break;
2657 case ENCODER_OBJECT_ID_INTERNAL_DAC2:
2658 id = ENCODER_ID_INTERNAL_DAC2;
2659 break;
2660 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
2661 id = ENCODER_ID_INTERNAL_LVTM1;
2662 break;
2663 case ENCODER_OBJECT_ID_HDMI_INTERNAL:
2664 id = ENCODER_ID_INTERNAL_HDMI;
2665 break;
2666 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
2667 id = ENCODER_ID_INTERNAL_KLDSCP_TMDS1;
2668 break;
2669 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
2670 id = ENCODER_ID_INTERNAL_KLDSCP_DAC1;
2671 break;
2672 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
2673 id = ENCODER_ID_INTERNAL_KLDSCP_DAC2;
2674 break;
2675 case ENCODER_OBJECT_ID_MVPU_FPGA:
2676 id = ENCODER_ID_EXTERNAL_MVPU_FPGA;
2677 break;
2678 case ENCODER_OBJECT_ID_INTERNAL_DDI:
2679 id = ENCODER_ID_INTERNAL_DDI;
2680 break;
2681 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
2682 id = ENCODER_ID_INTERNAL_UNIPHY;
2683 break;
2684 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
2685 id = ENCODER_ID_INTERNAL_KLDSCP_LVTMA;
2686 break;
2687 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2688 id = ENCODER_ID_INTERNAL_UNIPHY1;
2689 break;
2690 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2691 id = ENCODER_ID_INTERNAL_UNIPHY2;
2692 break;
2693 case ENCODER_OBJECT_ID_ALMOND: /* ENCODER_OBJECT_ID_NUTMEG */
2694 id = ENCODER_ID_EXTERNAL_NUTMEG;
2695 break;
2696 case ENCODER_OBJECT_ID_TRAVIS:
2697 id = ENCODER_ID_EXTERNAL_TRAVIS;
2698 break;
2699 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
2700 id = ENCODER_ID_INTERNAL_UNIPHY3;
2701 break;
2702 default:
2703 id = ENCODER_ID_UNKNOWN;
2704 ASSERT(0);
2705 break;
2706 }
2707
2708 return id;
2709}
2710
2711uint32_t gpu_id_from_bios_object_id(uint32_t bios_object_id)
2712{
2713 return (bios_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
2714}
2715
2716enum generic_id generic_id_from_bios_object_id(uint32_t bios_object_id)
2717{
2718 uint32_t bios_generic_id = gpu_id_from_bios_object_id(bios_object_id);
2719
2720 enum generic_id id;
2721
2722 switch (bios_generic_id) {
2723 case GENERIC_OBJECT_ID_MXM_OPM:
2724 id = GENERIC_ID_MXM_OPM;
2725 break;
2726 case GENERIC_OBJECT_ID_GLSYNC:
2727 id = GENERIC_ID_GLSYNC;
2728 break;
2729 case GENERIC_OBJECT_ID_STEREO_PIN:
2730 id = GENERIC_ID_STEREO;
2731 break;
2732 default:
2733 id = GENERIC_ID_UNKNOWN;
2734 break;
2735 }
2736
2737 return id;
2738}
2739
2740static struct device_id device_type_from_device_id(uint16_t device_id)
2741{
2742
2743 struct device_id result_device_id;
2744
2745 switch (device_id) {
2746 case ATOM_DEVICE_LCD1_SUPPORT:
2747 result_device_id.device_type = DEVICE_TYPE_LCD;
2748 result_device_id.enum_id = 1;
2749 break;
2750
2751 case ATOM_DEVICE_LCD2_SUPPORT:
2752 result_device_id.device_type = DEVICE_TYPE_LCD;
2753 result_device_id.enum_id = 2;
2754 break;
2755
2756 case ATOM_DEVICE_CRT1_SUPPORT:
2757 result_device_id.device_type = DEVICE_TYPE_CRT;
2758 result_device_id.enum_id = 1;
2759 break;
2760
2761 case ATOM_DEVICE_CRT2_SUPPORT:
2762 result_device_id.device_type = DEVICE_TYPE_CRT;
2763 result_device_id.enum_id = 2;
2764 break;
2765
2766 case ATOM_DEVICE_DFP1_SUPPORT:
2767 result_device_id.device_type = DEVICE_TYPE_DFP;
2768 result_device_id.enum_id = 1;
2769 break;
2770
2771 case ATOM_DEVICE_DFP2_SUPPORT:
2772 result_device_id.device_type = DEVICE_TYPE_DFP;
2773 result_device_id.enum_id = 2;
2774 break;
2775
2776 case ATOM_DEVICE_DFP3_SUPPORT:
2777 result_device_id.device_type = DEVICE_TYPE_DFP;
2778 result_device_id.enum_id = 3;
2779 break;
2780
2781 case ATOM_DEVICE_DFP4_SUPPORT:
2782 result_device_id.device_type = DEVICE_TYPE_DFP;
2783 result_device_id.enum_id = 4;
2784 break;
2785
2786 case ATOM_DEVICE_DFP5_SUPPORT:
2787 result_device_id.device_type = DEVICE_TYPE_DFP;
2788 result_device_id.enum_id = 5;
2789 break;
2790
2791 case ATOM_DEVICE_DFP6_SUPPORT:
2792 result_device_id.device_type = DEVICE_TYPE_DFP;
2793 result_device_id.enum_id = 6;
2794 break;
2795
2796 default:
2797 BREAK_TO_DEBUGGER(); /* Invalid device Id */
2798 result_device_id.device_type = DEVICE_TYPE_UNKNOWN;
2799 result_device_id.enum_id = 0;
2800 }
2801 return result_device_id;
2802}
2803
2804static void get_atom_data_table_revision(
2805 ATOM_COMMON_TABLE_HEADER *atom_data_tbl,
2806 struct atom_data_revision *tbl_revision)
2807{
2808 if (!tbl_revision)
2809 return;
2810
2811 /* initialize the revision to 0 which is invalid revision */
2812 tbl_revision->major = 0;
2813 tbl_revision->minor = 0;
2814
2815 if (!atom_data_tbl)
2816 return;
2817
2818 tbl_revision->major =
2819 (uint32_t) GET_DATA_TABLE_MAJOR_REVISION(atom_data_tbl);
2820 tbl_revision->minor =
2821 (uint32_t) GET_DATA_TABLE_MINOR_REVISION(atom_data_tbl);
2822}
2823
2824static uint32_t signal_to_ss_id(enum as_signal_type signal)
2825{
2826 uint32_t clk_id_ss = 0;
2827
2828 switch (signal) {
2829 case AS_SIGNAL_TYPE_DVI:
2830 clk_id_ss = ASIC_INTERNAL_SS_ON_TMDS;
2831 break;
2832 case AS_SIGNAL_TYPE_HDMI:
2833 clk_id_ss = ASIC_INTERNAL_SS_ON_HDMI;
2834 break;
2835 case AS_SIGNAL_TYPE_LVDS:
2836 clk_id_ss = ASIC_INTERNAL_SS_ON_LVDS;
2837 break;
2838 case AS_SIGNAL_TYPE_DISPLAY_PORT:
2839 clk_id_ss = ASIC_INTERNAL_SS_ON_DP;
2840 break;
2841 case AS_SIGNAL_TYPE_GPU_PLL:
2842 clk_id_ss = ASIC_INTERNAL_GPUPLL_SS;
2843 break;
2844 default:
2845 break;
2846 }
2847 return clk_id_ss;
2848}
2849
2850static uint32_t get_support_mask_for_device_id(struct device_id device_id)
2851{
2852 enum dal_device_type device_type = device_id.device_type;
2853 uint32_t enum_id = device_id.enum_id;
2854
2855 switch (device_type) {
2856 case DEVICE_TYPE_LCD:
2857 switch (enum_id) {
2858 case 1:
2859 return ATOM_DEVICE_LCD1_SUPPORT;
2860 case 2:
2861 return ATOM_DEVICE_LCD2_SUPPORT;
2862 default:
2863 break;
2864 }
2865 break;
2866 case DEVICE_TYPE_CRT:
2867 switch (enum_id) {
2868 case 1:
2869 return ATOM_DEVICE_CRT1_SUPPORT;
2870 case 2:
2871 return ATOM_DEVICE_CRT2_SUPPORT;
2872 default:
2873 break;
2874 }
2875 break;
2876 case DEVICE_TYPE_DFP:
2877 switch (enum_id) {
2878 case 1:
2879 return ATOM_DEVICE_DFP1_SUPPORT;
2880 case 2:
2881 return ATOM_DEVICE_DFP2_SUPPORT;
2882 case 3:
2883 return ATOM_DEVICE_DFP3_SUPPORT;
2884 case 4:
2885 return ATOM_DEVICE_DFP4_SUPPORT;
2886 case 5:
2887 return ATOM_DEVICE_DFP5_SUPPORT;
2888 case 6:
2889 return ATOM_DEVICE_DFP6_SUPPORT;
2890 default:
2891 break;
2892 }
2893 break;
2894 case DEVICE_TYPE_CV:
2895 switch (enum_id) {
2896 case 1:
2897 return ATOM_DEVICE_CV_SUPPORT;
2898 default:
2899 break;
2900 }
2901 break;
2902 case DEVICE_TYPE_TV:
2903 switch (enum_id) {
2904 case 1:
2905 return ATOM_DEVICE_TV1_SUPPORT;
2906 default:
2907 break;
2908 }
2909 break;
2910 default:
2911 break;
2912 };
2913
2914 /* Unidentified device ID, return empty support mask. */
2915 return 0;
2916}
2917
2918/**
2919 * HwContext interface for writing MM registers
2920 */
2921
2922static bool i2c_read(
2923 struct bios_parser *bp,
2924 struct graphics_object_i2c_info *i2c_info,
2925 uint8_t *buffer,
2926 uint32_t length)
2927{
2928 struct ddc *ddc;
2929 uint8_t offset[2] = { 0, 0 };
2930 bool result = false;
2931 struct i2c_command cmd;
2932 struct gpio_ddc_hw_info hw_info = {
2933 i2c_info->i2c_hw_assist,
2934 i2c_info->i2c_line };
2935
2936 ddc = dal_gpio_create_ddc(bp->base.ctx->gpio_service,
2937 i2c_info->gpio_info.clk_a_register_index,
2938 (1 << i2c_info->gpio_info.clk_a_shift), &hw_info);
2939
2940 if (!ddc)
2941 return result;
2942
2943 /*Using SW engine */
2944 cmd.engine = I2C_COMMAND_ENGINE_SW;
2945 cmd.speed = ddc->ctx->dc->caps.i2c_speed_in_khz;
2946
2947 {
2948 struct i2c_payload payloads[] = {
2949 {
2950 .address = i2c_info->i2c_slave_address >> 1,
2951 .data = offset,
2952 .length = sizeof(offset),
2953 .write = true
2954 },
2955 {
2956 .address = i2c_info->i2c_slave_address >> 1,
2957 .data = buffer,
2958 .length = length,
2959 .write = false
2960 }
2961 };
2962
2963 cmd.payloads = payloads;
2964 cmd.number_of_payloads = ARRAY_SIZE(payloads);
2965
2966 /* TODO route this through drm i2c_adapter */
2967 result = dal_i2caux_submit_i2c_command(
2968 ddc->ctx->i2caux,
2969 ddc,
2970 &cmd);
2971 }
2972
2973 dal_gpio_destroy_ddc(&ddc);
2974
2975 return result;
2976}
2977
2978/**
2979 * Read external display connection info table through i2c.
2980 * validate the GUID and checksum.
2981 *
2982 * @return enum bp_result whether all data was sucessfully read
2983 */
2984static enum bp_result get_ext_display_connection_info(
2985 struct bios_parser *bp,
2986 ATOM_OBJECT *opm_object,
2987 ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO *ext_display_connection_info_tbl)
2988{
2989 bool config_tbl_present = false;
2990 ATOM_I2C_RECORD *i2c_record = NULL;
2991 uint32_t i = 0;
2992
2993 if (opm_object == NULL)
2994 return BP_RESULT_BADINPUT;
2995
2996 i2c_record = get_i2c_record(bp, opm_object);
2997
2998 if (i2c_record != NULL) {
2999 ATOM_GPIO_I2C_INFO *gpio_i2c_header;
3000 struct graphics_object_i2c_info i2c_info;
3001
3002 gpio_i2c_header = GET_IMAGE(ATOM_GPIO_I2C_INFO,
3003 bp->master_data_tbl->ListOfDataTables.GPIO_I2C_Info);
3004
3005 if (NULL == gpio_i2c_header)
3006 return BP_RESULT_BADBIOSTABLE;
3007
3008 if (get_gpio_i2c_info(bp, i2c_record, &i2c_info) !=
3009 BP_RESULT_OK)
3010 return BP_RESULT_BADBIOSTABLE;
3011
3012 if (i2c_read(bp,
3013 &i2c_info,
3014 (uint8_t *)ext_display_connection_info_tbl,
3015 sizeof(ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO))) {
3016 config_tbl_present = true;
3017 }
3018 }
3019
3020 /* Validate GUID */
3021 if (config_tbl_present)
3022 for (i = 0; i < NUMBER_OF_UCHAR_FOR_GUID; i++) {
3023 if (ext_display_connection_info_tbl->ucGuid[i]
3024 != ext_display_connection_guid[i]) {
3025 config_tbl_present = false;
3026 break;
3027 }
3028 }
3029
3030 /* Validate checksum */
3031 if (config_tbl_present) {
3032 uint8_t check_sum = 0;
3033 uint8_t *buf =
3034 (uint8_t *)ext_display_connection_info_tbl;
3035
3036 for (i = 0; i < sizeof(ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO);
3037 i++) {
3038 check_sum += buf[i];
3039 }
3040
3041 if (check_sum != 0)
3042 config_tbl_present = false;
3043 }
3044
3045 if (config_tbl_present)
3046 return BP_RESULT_OK;
3047 else
3048 return BP_RESULT_FAILURE;
3049}
3050
3051/*
3052 * Gets the first device ID in the same group as the given ID for enumerating.
3053 * For instance, if any DFP device ID is passed, returns the device ID for DFP1.
3054 *
3055 * The first device ID in the same group as the passed device ID, or 0 if no
3056 * matching device group found.
3057 */
3058static uint32_t enum_first_device_id(uint32_t dev_id)
3059{
3060 /* Return the first in the group that this ID belongs to. */
3061 if (dev_id & ATOM_DEVICE_CRT_SUPPORT)
3062 return ATOM_DEVICE_CRT1_SUPPORT;
3063 else if (dev_id & ATOM_DEVICE_DFP_SUPPORT)
3064 return ATOM_DEVICE_DFP1_SUPPORT;
3065 else if (dev_id & ATOM_DEVICE_LCD_SUPPORT)
3066 return ATOM_DEVICE_LCD1_SUPPORT;
3067 else if (dev_id & ATOM_DEVICE_TV_SUPPORT)
3068 return ATOM_DEVICE_TV1_SUPPORT;
3069 else if (dev_id & ATOM_DEVICE_CV_SUPPORT)
3070 return ATOM_DEVICE_CV_SUPPORT;
3071
3072 /* No group found for this device ID. */
3073
3074 dm_error("%s: incorrect input %d\n", __func__, dev_id);
3075 /* No matching support flag for given device ID */
3076 return 0;
3077}
3078
3079/*
3080 * Gets the next device ID in the group for a given device ID.
3081 *
3082 * The current device ID being enumerated on.
3083 *
3084 * The next device ID in the group, or 0 if no device exists.
3085 */
3086static uint32_t enum_next_dev_id(uint32_t dev_id)
3087{
3088 /* Get next device ID in the group. */
3089 switch (dev_id) {
3090 case ATOM_DEVICE_CRT1_SUPPORT:
3091 return ATOM_DEVICE_CRT2_SUPPORT;
3092 case ATOM_DEVICE_LCD1_SUPPORT:
3093 return ATOM_DEVICE_LCD2_SUPPORT;
3094 case ATOM_DEVICE_DFP1_SUPPORT:
3095 return ATOM_DEVICE_DFP2_SUPPORT;
3096 case ATOM_DEVICE_DFP2_SUPPORT:
3097 return ATOM_DEVICE_DFP3_SUPPORT;
3098 case ATOM_DEVICE_DFP3_SUPPORT:
3099 return ATOM_DEVICE_DFP4_SUPPORT;
3100 case ATOM_DEVICE_DFP4_SUPPORT:
3101 return ATOM_DEVICE_DFP5_SUPPORT;
3102 case ATOM_DEVICE_DFP5_SUPPORT:
3103 return ATOM_DEVICE_DFP6_SUPPORT;
3104 }
3105
3106 /* Done enumerating through devices. */
3107 return 0;
3108}
3109
3110/*
3111 * Returns the new device tag record for patched BIOS object.
3112 *
3113 * [IN] pExtDisplayPath - External display path to copy device tag from.
3114 * [IN] deviceSupport - Bit vector for device ID support flags.
3115 * [OUT] pDeviceTag - Device tag structure to fill with patched data.
3116 *
3117 * True if a compatible device ID was found, false otherwise.
3118 */
3119static bool get_patched_device_tag(
3120 struct bios_parser *bp,
3121 EXT_DISPLAY_PATH *ext_display_path,
3122 uint32_t device_support,
3123 ATOM_CONNECTOR_DEVICE_TAG *device_tag)
3124{
3125 uint32_t dev_id;
3126 /* Use fallback behaviour if not supported. */
3127 if (!bp->remap_device_tags) {
3128 device_tag->ulACPIDeviceEnum =
3129 cpu_to_le32((uint32_t) le16_to_cpu(ext_display_path->usDeviceACPIEnum));
3130 device_tag->usDeviceID =
3131 cpu_to_le16(le16_to_cpu(ext_display_path->usDeviceTag));
3132 return true;
3133 }
3134
3135 /* Find the first unused in the same group. */
3136 dev_id = enum_first_device_id(le16_to_cpu(ext_display_path->usDeviceTag));
3137 while (dev_id != 0) {
3138 /* Assign this device ID if supported. */
3139 if ((device_support & dev_id) != 0) {
3140 device_tag->ulACPIDeviceEnum =
3141 cpu_to_le32((uint32_t) le16_to_cpu(ext_display_path->usDeviceACPIEnum));
3142 device_tag->usDeviceID = cpu_to_le16((USHORT) dev_id);
3143 return true;
3144 }
3145
3146 dev_id = enum_next_dev_id(dev_id);
3147 }
3148
3149 /* No compatible device ID found. */
3150 return false;
3151}
3152
3153/*
3154 * Adds a device tag to a BIOS object's device tag record if there is
3155 * matching device ID supported.
3156 *
3157 * pObject - Pointer to the BIOS object to add the device tag to.
3158 * pExtDisplayPath - Display path to retrieve base device ID from.
3159 * pDeviceSupport - Pointer to bit vector for supported device IDs.
3160 */
3161static void add_device_tag_from_ext_display_path(
3162 struct bios_parser *bp,
3163 ATOM_OBJECT *object,
3164 EXT_DISPLAY_PATH *ext_display_path,
3165 uint32_t *device_support)
3166{
3167 /* Get device tag record for object. */
3168 ATOM_CONNECTOR_DEVICE_TAG *device_tag = NULL;
3169 ATOM_CONNECTOR_DEVICE_TAG_RECORD *device_tag_record = NULL;
3170 enum bp_result result =
3171 bios_parser_get_device_tag_record(
3172 bp, object, &device_tag_record);
3173
3174 if ((le16_to_cpu(ext_display_path->usDeviceTag) != CONNECTOR_OBJECT_ID_NONE)
3175 && (result == BP_RESULT_OK)) {
3176 uint8_t index;
3177
3178 if ((device_tag_record->ucNumberOfDevice == 1) &&
3179 (le16_to_cpu(device_tag_record->asDeviceTag[0].usDeviceID) == 0)) {
3180 /*Workaround bug in current VBIOS releases where
3181 * ucNumberOfDevice = 1 but there is no actual device
3182 * tag data. This w/a is temporary until the updated
3183 * VBIOS is distributed. */
3184 device_tag_record->ucNumberOfDevice =
3185 device_tag_record->ucNumberOfDevice - 1;
3186 }
3187
3188 /* Attempt to find a matching device ID. */
3189 index = device_tag_record->ucNumberOfDevice;
3190 device_tag = &device_tag_record->asDeviceTag[index];
3191 if (get_patched_device_tag(
3192 bp,
3193 ext_display_path,
3194 *device_support,
3195 device_tag)) {
3196 /* Update cached device support to remove assigned ID.
3197 */
3198 *device_support &= ~le16_to_cpu(device_tag->usDeviceID);
3199 device_tag_record->ucNumberOfDevice++;
3200 }
3201 }
3202}
3203
3204/*
3205 * Read out a single EXT_DISPLAY_PATH from the external display connection info
3206 * table. The specific entry in the table is determined by the enum_id passed
3207 * in.
3208 *
3209 * EXT_DISPLAY_PATH describing a single Configuration table entry
3210 */
3211
3212#define INVALID_CONNECTOR 0xffff
3213
3214static EXT_DISPLAY_PATH *get_ext_display_path_entry(
3215 ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO *config_table,
3216 uint32_t bios_object_id)
3217{
3218 EXT_DISPLAY_PATH *ext_display_path;
3219 uint32_t ext_display_path_index =
3220 ((bios_object_id & ENUM_ID_MASK) >> ENUM_ID_SHIFT) - 1;
3221
3222 if (ext_display_path_index >= MAX_NUMBER_OF_EXT_DISPLAY_PATH)
3223 return NULL;
3224
3225 ext_display_path = &config_table->sPath[ext_display_path_index];
3226
3227 if (le16_to_cpu(ext_display_path->usDeviceConnector) == INVALID_CONNECTOR)
3228 ext_display_path->usDeviceConnector = cpu_to_le16(0);
3229
3230 return ext_display_path;
3231}
3232
3233/*
3234 * Get AUX/DDC information of input object id
3235 *
3236 * search all records to find the ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE record
3237 * IR
3238 */
3239static ATOM_CONNECTOR_AUXDDC_LUT_RECORD *get_ext_connector_aux_ddc_lut_record(
3240 struct bios_parser *bp,
3241 ATOM_OBJECT *object)
3242{
3243 uint32_t offset;
3244 ATOM_COMMON_RECORD_HEADER *header;
3245
3246 if (!object) {
3247 BREAK_TO_DEBUGGER();
3248 /* Invalid object */
3249 return NULL;
3250 }
3251
3252 offset = le16_to_cpu(object->usRecordOffset)
3253 + bp->object_info_tbl_offset;
3254
3255 for (;;) {
3256 header = GET_IMAGE(ATOM_COMMON_RECORD_HEADER, offset);
3257
3258 if (!header)
3259 return NULL;
3260
3261 if (LAST_RECORD_TYPE == header->ucRecordType ||
3262 0 == header->ucRecordSize)
3263 break;
3264
3265 if (ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE ==
3266 header->ucRecordType &&
3267 sizeof(ATOM_CONNECTOR_AUXDDC_LUT_RECORD) <=
3268 header->ucRecordSize)
3269 return (ATOM_CONNECTOR_AUXDDC_LUT_RECORD *)(header);
3270
3271 offset += header->ucRecordSize;
3272 }
3273
3274 return NULL;
3275}
3276
3277/*
3278 * Get AUX/DDC information of input object id
3279 *
3280 * search all records to find the ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE record
3281 * IR
3282 */
3283static ATOM_CONNECTOR_HPDPIN_LUT_RECORD *get_ext_connector_hpd_pin_lut_record(
3284 struct bios_parser *bp,
3285 ATOM_OBJECT *object)
3286{
3287 uint32_t offset;
3288 ATOM_COMMON_RECORD_HEADER *header;
3289
3290 if (!object) {
3291 BREAK_TO_DEBUGGER();
3292 /* Invalid object */
3293 return NULL;
3294 }
3295
3296 offset = le16_to_cpu(object->usRecordOffset)
3297 + bp->object_info_tbl_offset;
3298
3299 for (;;) {
3300 header = GET_IMAGE(ATOM_COMMON_RECORD_HEADER, offset);
3301
3302 if (!header)
3303 return NULL;
3304
3305 if (LAST_RECORD_TYPE == header->ucRecordType ||
3306 0 == header->ucRecordSize)
3307 break;
3308
3309 if (ATOM_CONNECTOR_HPDPIN_LUT_RECORD_TYPE ==
3310 header->ucRecordType &&
3311 sizeof(ATOM_CONNECTOR_HPDPIN_LUT_RECORD) <=
3312 header->ucRecordSize)
3313 return (ATOM_CONNECTOR_HPDPIN_LUT_RECORD *)header;
3314
3315 offset += header->ucRecordSize;
3316 }
3317
3318 return NULL;
3319}
3320
3321/*
3322 * Check whether we need to patch the VBIOS connector info table with
3323 * data from an external display connection info table. This is
3324 * necessary to support MXM boards with an OPM (output personality
3325 * module). With these designs, the VBIOS connector info table
3326 * specifies an MXM_CONNECTOR with a unique ID. The driver retrieves
3327 * the external connection info table through i2c and then looks up the
3328 * connector ID to find the real connector type (e.g. DFP1).
3329 *
3330 */
3331static enum bp_result patch_bios_image_from_ext_display_connection_info(
3332 struct bios_parser *bp)
3333{
3334 ATOM_OBJECT_TABLE *connector_tbl;
3335 uint32_t connector_tbl_offset;
3336 struct graphics_object_id object_id;
3337 ATOM_OBJECT *object;
3338 ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO ext_display_connection_info_tbl;
3339 EXT_DISPLAY_PATH *ext_display_path;
3340 ATOM_CONNECTOR_AUXDDC_LUT_RECORD *aux_ddc_lut_record = NULL;
3341 ATOM_I2C_RECORD *i2c_record = NULL;
3342 ATOM_CONNECTOR_HPDPIN_LUT_RECORD *hpd_pin_lut_record = NULL;
3343 ATOM_HPD_INT_RECORD *hpd_record = NULL;
3344 ATOM_OBJECT_TABLE *encoder_table;
3345 uint32_t encoder_table_offset;
3346 ATOM_OBJECT *opm_object = NULL;
3347 uint32_t i = 0;
3348 struct graphics_object_id opm_object_id =
3349 dal_graphics_object_id_init(
3350 GENERIC_ID_MXM_OPM,
3351 ENUM_ID_1,
3352 OBJECT_TYPE_GENERIC);
3353 ATOM_CONNECTOR_DEVICE_TAG_RECORD *dev_tag_record;
3354 uint32_t cached_device_support =
3355 le16_to_cpu(bp->object_info_tbl.v1_1->usDeviceSupport);
3356
3357 uint32_t dst_number;
3358 uint16_t *dst_object_id_list;
3359
3360 opm_object = get_bios_object(bp, opm_object_id);
3361 if (!opm_object)
3362 return BP_RESULT_UNSUPPORTED;
3363
3364 memset(&ext_display_connection_info_tbl, 0,
3365 sizeof(ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO));
3366
3367 connector_tbl_offset = bp->object_info_tbl_offset
3368 + le16_to_cpu(bp->object_info_tbl.v1_1->usConnectorObjectTableOffset);
3369 connector_tbl = GET_IMAGE(ATOM_OBJECT_TABLE, connector_tbl_offset);
3370
3371 /* Read Connector info table from EEPROM through i2c */
3372 if (get_ext_display_connection_info(bp,
3373 opm_object,
3374 &ext_display_connection_info_tbl) != BP_RESULT_OK) {
3375
3376 dm_logger_write(bp->base.ctx->logger, LOG_BIOS,
3377 "%s: Failed to read Connection Info Table", __func__);
3378 return BP_RESULT_UNSUPPORTED;
3379 }
3380
3381 /* Get pointer to AUX/DDC and HPD LUTs */
3382 aux_ddc_lut_record =
3383 get_ext_connector_aux_ddc_lut_record(bp, opm_object);
3384 hpd_pin_lut_record =
3385 get_ext_connector_hpd_pin_lut_record(bp, opm_object);
3386
3387 if ((aux_ddc_lut_record == NULL) || (hpd_pin_lut_record == NULL))
3388 return BP_RESULT_UNSUPPORTED;
3389
3390 /* Cache support bits for currently unmapped device types. */
3391 if (bp->remap_device_tags) {
3392 for (i = 0; i < connector_tbl->ucNumberOfObjects; ++i) {
3393 uint32_t j;
3394 /* Remove support for all non-MXM connectors. */
3395 object = &connector_tbl->asObjects[i];
3396 object_id = object_id_from_bios_object_id(
3397 le16_to_cpu(object->usObjectID));
3398 if ((OBJECT_TYPE_CONNECTOR != object_id.type) ||
3399 (CONNECTOR_ID_MXM == object_id.id))
3400 continue;
3401
3402 /* Remove support for all device tags. */
3403 if (bios_parser_get_device_tag_record(
3404 bp, object, &dev_tag_record) != BP_RESULT_OK)
3405 continue;
3406
3407 for (j = 0; j < dev_tag_record->ucNumberOfDevice; ++j) {
3408 ATOM_CONNECTOR_DEVICE_TAG *device_tag =
3409 &dev_tag_record->asDeviceTag[j];
3410 cached_device_support &=
3411 ~le16_to_cpu(device_tag->usDeviceID);
3412 }
3413 }
3414 }
3415
3416 /* Find all MXM connector objects and patch them with connector info
3417 * from the external display connection info table. */
3418 for (i = 0; i < connector_tbl->ucNumberOfObjects; i++) {
3419 uint32_t j;
3420
3421 object = &connector_tbl->asObjects[i];
3422 object_id = object_id_from_bios_object_id(le16_to_cpu(object->usObjectID));
3423 if ((OBJECT_TYPE_CONNECTOR != object_id.type) ||
3424 (CONNECTOR_ID_MXM != object_id.id))
3425 continue;
3426
3427 /* Get the correct connection info table entry based on the enum
3428 * id. */
3429 ext_display_path = get_ext_display_path_entry(
3430 &ext_display_connection_info_tbl,
3431 le16_to_cpu(object->usObjectID));
3432 if (!ext_display_path)
3433 return BP_RESULT_FAILURE;
3434
3435 /* Patch device connector ID */
3436 object->usObjectID =
3437 cpu_to_le16(le16_to_cpu(ext_display_path->usDeviceConnector));
3438
3439 /* Patch device tag, ulACPIDeviceEnum. */
3440 add_device_tag_from_ext_display_path(
3441 bp,
3442 object,
3443 ext_display_path,
3444 &cached_device_support);
3445
3446 /* Patch HPD info */
3447 if (ext_display_path->ucExtHPDPINLutIndex <
3448 MAX_NUMBER_OF_EXT_HPDPIN_LUT_ENTRIES) {
3449 hpd_record = get_hpd_record(bp, object);
3450 if (hpd_record) {
3451 uint8_t index =
3452 ext_display_path->ucExtHPDPINLutIndex;
3453 hpd_record->ucHPDIntGPIOID =
3454 hpd_pin_lut_record->ucHPDPINMap[index];
3455 } else {
3456 BREAK_TO_DEBUGGER();
3457 /* Invalid hpd record */
3458 return BP_RESULT_FAILURE;
3459 }
3460 }
3461
3462 /* Patch I2C/AUX info */
3463 if (ext_display_path->ucExtHPDPINLutIndex <
3464 MAX_NUMBER_OF_EXT_AUXDDC_LUT_ENTRIES) {
3465 i2c_record = get_i2c_record(bp, object);
3466 if (i2c_record) {
3467 uint8_t index =
3468 ext_display_path->ucExtAUXDDCLutIndex;
3469 i2c_record->sucI2cId =
3470 aux_ddc_lut_record->ucAUXDDCMap[index];
3471 } else {
3472 BREAK_TO_DEBUGGER();
3473 /* Invalid I2C record */
3474 return BP_RESULT_FAILURE;
3475 }
3476 }
3477
3478 /* Merge with other MXM connectors that map to the same physical
3479 * connector. */
3480 for (j = i + 1;
3481 j < connector_tbl->ucNumberOfObjects; j++) {
3482 ATOM_OBJECT *next_object;
3483 struct graphics_object_id next_object_id;
3484 EXT_DISPLAY_PATH *next_ext_display_path;
3485
3486 next_object = &connector_tbl->asObjects[j];
3487 next_object_id = object_id_from_bios_object_id(
3488 le16_to_cpu(next_object->usObjectID));
3489
3490 if ((OBJECT_TYPE_CONNECTOR != next_object_id.type) &&
3491 (CONNECTOR_ID_MXM == next_object_id.id))
3492 continue;
3493
3494 next_ext_display_path = get_ext_display_path_entry(
3495 &ext_display_connection_info_tbl,
3496 le16_to_cpu(next_object->usObjectID));
3497
3498 if (next_ext_display_path == NULL)
3499 return BP_RESULT_FAILURE;
3500
3501 /* Merge if using same connector. */
3502 if ((le16_to_cpu(next_ext_display_path->usDeviceConnector) ==
3503 le16_to_cpu(ext_display_path->usDeviceConnector)) &&
3504 (le16_to_cpu(ext_display_path->usDeviceConnector) != 0)) {
3505 /* Clear duplicate connector from table. */
3506 next_object->usObjectID = cpu_to_le16(0);
3507 add_device_tag_from_ext_display_path(
3508 bp,
3509 object,
3510 ext_display_path,
3511 &cached_device_support);
3512 }
3513 }
3514 }
3515
3516 /* Find all encoders which have an MXM object as their destination.
3517 * Replace the MXM object with the real connector Id from the external
3518 * display connection info table */
3519
3520 encoder_table_offset = bp->object_info_tbl_offset
3521 + le16_to_cpu(bp->object_info_tbl.v1_1->usEncoderObjectTableOffset);
3522 encoder_table = GET_IMAGE(ATOM_OBJECT_TABLE, encoder_table_offset);
3523
3524 for (i = 0; i < encoder_table->ucNumberOfObjects; i++) {
3525 uint32_t j;
3526
3527 object = &encoder_table->asObjects[i];
3528
3529 dst_number = get_dest_obj_list(bp, object, &dst_object_id_list);
3530
3531 for (j = 0; j < dst_number; j++) {
3532 object_id = object_id_from_bios_object_id(
3533 dst_object_id_list[j]);
3534
3535 if ((OBJECT_TYPE_CONNECTOR != object_id.type) ||
3536 (CONNECTOR_ID_MXM != object_id.id))
3537 continue;
3538
3539 /* Get the correct connection info table entry based on
3540 * the enum id. */
3541 ext_display_path =
3542 get_ext_display_path_entry(
3543 &ext_display_connection_info_tbl,
3544 dst_object_id_list[j]);
3545
3546 if (ext_display_path == NULL)
3547 return BP_RESULT_FAILURE;
3548
3549 dst_object_id_list[j] =
3550 le16_to_cpu(ext_display_path->usDeviceConnector);
3551 }
3552 }
3553
3554 return BP_RESULT_OK;
3555}
3556
3557/*
3558 * Check whether we need to patch the VBIOS connector info table with
3559 * data from an external display connection info table. This is
3560 * necessary to support MXM boards with an OPM (output personality
3561 * module). With these designs, the VBIOS connector info table
3562 * specifies an MXM_CONNECTOR with a unique ID. The driver retrieves
3563 * the external connection info table through i2c and then looks up the
3564 * connector ID to find the real connector type (e.g. DFP1).
3565 *
3566 */
3567
3568static void process_ext_display_connection_info(struct bios_parser *bp)
3569{
3570 ATOM_OBJECT_TABLE *connector_tbl;
3571 uint32_t connector_tbl_offset;
3572 struct graphics_object_id object_id;
3573 ATOM_OBJECT *object;
3574 bool mxm_connector_found = false;
3575 bool null_entry_found = false;
3576 uint32_t i = 0;
3577
3578 connector_tbl_offset = bp->object_info_tbl_offset +
3579 le16_to_cpu(bp->object_info_tbl.v1_1->usConnectorObjectTableOffset);
3580 connector_tbl = GET_IMAGE(ATOM_OBJECT_TABLE, connector_tbl_offset);
3581
3582 /* Look for MXM connectors to determine whether we need patch the VBIOS
3583 * connector info table. Look for null entries to determine whether we
3584 * need to compact connector table. */
3585 for (i = 0; i < connector_tbl->ucNumberOfObjects; i++) {
3586 object = &connector_tbl->asObjects[i];
3587 object_id = object_id_from_bios_object_id(le16_to_cpu(object->usObjectID));
3588
3589 if ((OBJECT_TYPE_CONNECTOR == object_id.type) &&
3590 (CONNECTOR_ID_MXM == object_id.id)) {
3591 /* Once we found MXM connector - we can break */
3592 mxm_connector_found = true;
3593 break;
3594 } else if (OBJECT_TYPE_CONNECTOR != object_id.type) {
3595 /* We need to continue looping - to check if MXM
3596 * connector present */
3597 null_entry_found = true;
3598 }
3599 }
3600
3601 /* Patch BIOS image */
3602 if (mxm_connector_found || null_entry_found) {
3603 uint32_t connectors_num = 0;
3604 uint8_t *original_bios;
3605 /* Step 1: Replace bios image with the new copy which will be
3606 * patched */
3607 bp->base.bios_local_image = dm_alloc(bp->base.bios_size);
3608 if (bp->base.bios_local_image == NULL) {
3609 BREAK_TO_DEBUGGER();
3610 /* Failed to alloc bp->base.bios_local_image */
3611 return;
3612 }
3613
3614 memmove(bp->base.bios_local_image, bp->base.bios, bp->base.bios_size);
3615 original_bios = bp->base.bios;
3616 bp->base.bios = bp->base.bios_local_image;
3617 connector_tbl =
3618 GET_IMAGE(ATOM_OBJECT_TABLE, connector_tbl_offset);
3619
3620 /* Step 2: (only if MXM connector found) Patch BIOS image with
3621 * info from external module */
3622 if (mxm_connector_found &&
3623 patch_bios_image_from_ext_display_connection_info(bp) !=
3624 BP_RESULT_OK) {
3625 /* Patching the bios image has failed. We will copy
3626 * again original image provided and afterwards
3627 * only remove null entries */
3628 memmove(
3629 bp->base.bios_local_image,
3630 original_bios,
3631 bp->base.bios_size);
3632 }
3633
3634 /* Step 3: Compact connector table (remove null entries, valid
3635 * entries moved to beginning) */
3636 for (i = 0; i < connector_tbl->ucNumberOfObjects; i++) {
3637 object = &connector_tbl->asObjects[i];
3638 object_id = object_id_from_bios_object_id(
3639 le16_to_cpu(object->usObjectID));
3640
3641 if (OBJECT_TYPE_CONNECTOR != object_id.type)
3642 continue;
3643
3644 if (i != connectors_num) {
3645 memmove(
3646 &connector_tbl->
3647 asObjects[connectors_num],
3648 object,
3649 sizeof(ATOM_OBJECT));
3650 }
3651 ++connectors_num;
3652 }
3653 connector_tbl->ucNumberOfObjects = (uint8_t)connectors_num;
3654 }
3655}
3656
3657static void bios_parser_post_init(struct dc_bios *dcb)
3658{
3659 struct bios_parser *bp = BP_FROM_DCB(dcb);
3660
3661 process_ext_display_connection_info(bp);
3662}
3663
3664/**
3665 * bios_parser_set_scratch_critical_state
3666 *
3667 * @brief
3668 * update critical state bit in VBIOS scratch register
3669 *
3670 * @param
3671 * bool - to set or reset state
3672 */
3673static void bios_parser_set_scratch_critical_state(
3674 struct dc_bios *dcb,
3675 bool state)
3676{
3677 bios_set_scratch_critical_state(dcb, state);
3678}
3679
3680/*
3681 * get_integrated_info_v8
3682 *
3683 * @brief
3684 * Get V8 integrated BIOS information
3685 *
3686 * @param
3687 * bios_parser *bp - [in]BIOS parser handler to get master data table
3688 * integrated_info *info - [out] store and output integrated info
3689 *
3690 * @return
3691 * enum bp_result - BP_RESULT_OK if information is available,
3692 * BP_RESULT_BADBIOSTABLE otherwise.
3693 */
3694static enum bp_result get_integrated_info_v8(
3695 struct bios_parser *bp,
3696 struct integrated_info *info)
3697{
3698 ATOM_INTEGRATED_SYSTEM_INFO_V1_8 *info_v8;
3699 uint32_t i;
3700
3701 info_v8 = GET_IMAGE(ATOM_INTEGRATED_SYSTEM_INFO_V1_8,
3702 bp->master_data_tbl->ListOfDataTables.IntegratedSystemInfo);
3703
3704 if (info_v8 == NULL)
3705 return BP_RESULT_BADBIOSTABLE;
3706 info->boot_up_engine_clock = le32_to_cpu(info_v8->ulBootUpEngineClock) * 10;
3707 info->dentist_vco_freq = le32_to_cpu(info_v8->ulDentistVCOFreq) * 10;
3708 info->boot_up_uma_clock = le32_to_cpu(info_v8->ulBootUpUMAClock) * 10;
3709
3710 for (i = 0; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
3711 /* Convert [10KHz] into [KHz] */
3712 info->disp_clk_voltage[i].max_supported_clk =
3713 le32_to_cpu(info_v8->sDISPCLK_Voltage[i].
3714 ulMaximumSupportedCLK) * 10;
3715 info->disp_clk_voltage[i].voltage_index =
3716 le32_to_cpu(info_v8->sDISPCLK_Voltage[i].ulVoltageIndex);
3717 }
3718
3719 info->boot_up_req_display_vector =
3720 le32_to_cpu(info_v8->ulBootUpReqDisplayVector);
3721 info->gpu_cap_info =
3722 le32_to_cpu(info_v8->ulGPUCapInfo);
3723
3724 /*
3725 * system_config: Bit[0] = 0 : PCIE power gating disabled
3726 * = 1 : PCIE power gating enabled
3727 * Bit[1] = 0 : DDR-PLL shut down disabled
3728 * = 1 : DDR-PLL shut down enabled
3729 * Bit[2] = 0 : DDR-PLL power down disabled
3730 * = 1 : DDR-PLL power down enabled
3731 */
3732 info->system_config = le32_to_cpu(info_v8->ulSystemConfig);
3733 info->cpu_cap_info = le32_to_cpu(info_v8->ulCPUCapInfo);
3734 info->boot_up_nb_voltage =
3735 le16_to_cpu(info_v8->usBootUpNBVoltage);
3736 info->ext_disp_conn_info_offset =
3737 le16_to_cpu(info_v8->usExtDispConnInfoOffset);
3738 info->memory_type = info_v8->ucMemoryType;
3739 info->ma_channel_number = info_v8->ucUMAChannelNumber;
3740 info->gmc_restore_reset_time =
3741 le32_to_cpu(info_v8->ulGMCRestoreResetTime);
3742
3743 info->minimum_n_clk =
3744 le32_to_cpu(info_v8->ulNbpStateNClkFreq[0]);
3745 for (i = 1; i < 4; ++i)
3746 info->minimum_n_clk =
3747 info->minimum_n_clk < le32_to_cpu(info_v8->ulNbpStateNClkFreq[i]) ?
3748 info->minimum_n_clk : le32_to_cpu(info_v8->ulNbpStateNClkFreq[i]);
3749
3750 info->idle_n_clk = le32_to_cpu(info_v8->ulIdleNClk);
3751 info->ddr_dll_power_up_time =
3752 le32_to_cpu(info_v8->ulDDR_DLL_PowerUpTime);
3753 info->ddr_pll_power_up_time =
3754 le32_to_cpu(info_v8->ulDDR_PLL_PowerUpTime);
3755 info->pcie_clk_ss_type = le16_to_cpu(info_v8->usPCIEClkSSType);
3756 info->lvds_ss_percentage =
3757 le16_to_cpu(info_v8->usLvdsSSPercentage);
3758 info->lvds_sspread_rate_in_10hz =
3759 le16_to_cpu(info_v8->usLvdsSSpreadRateIn10Hz);
3760 info->hdmi_ss_percentage =
3761 le16_to_cpu(info_v8->usHDMISSPercentage);
3762 info->hdmi_sspread_rate_in_10hz =
3763 le16_to_cpu(info_v8->usHDMISSpreadRateIn10Hz);
3764 info->dvi_ss_percentage =
3765 le16_to_cpu(info_v8->usDVISSPercentage);
3766 info->dvi_sspread_rate_in_10_hz =
3767 le16_to_cpu(info_v8->usDVISSpreadRateIn10Hz);
3768
3769 info->max_lvds_pclk_freq_in_single_link =
3770 le16_to_cpu(info_v8->usMaxLVDSPclkFreqInSingleLink);
3771 info->lvds_misc = info_v8->ucLvdsMisc;
3772 info->lvds_pwr_on_seq_dig_on_to_de_in_4ms =
3773 info_v8->ucLVDSPwrOnSeqDIGONtoDE_in4Ms;
3774 info->lvds_pwr_on_seq_de_to_vary_bl_in_4ms =
3775 info_v8->ucLVDSPwrOnSeqDEtoVARY_BL_in4Ms;
3776 info->lvds_pwr_on_seq_vary_bl_to_blon_in_4ms =
3777 info_v8->ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms;
3778 info->lvds_pwr_off_seq_vary_bl_to_de_in4ms =
3779 info_v8->ucLVDSPwrOffSeqVARY_BLtoDE_in4Ms;
3780 info->lvds_pwr_off_seq_de_to_dig_on_in4ms =
3781 info_v8->ucLVDSPwrOffSeqDEtoDIGON_in4Ms;
3782 info->lvds_pwr_off_seq_blon_to_vary_bl_in_4ms =
3783 info_v8->ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms;
3784 info->lvds_off_to_on_delay_in_4ms =
3785 info_v8->ucLVDSOffToOnDelay_in4Ms;
3786 info->lvds_bit_depth_control_val =
3787 le32_to_cpu(info_v8->ulLCDBitDepthControlVal);
3788
3789 for (i = 0; i < NUMBER_OF_AVAILABLE_SCLK; ++i) {
3790 /* Convert [10KHz] into [KHz] */
3791 info->avail_s_clk[i].supported_s_clk =
3792 le32_to_cpu(info_v8->sAvail_SCLK[i].ulSupportedSCLK) * 10;
3793 info->avail_s_clk[i].voltage_index =
3794 le16_to_cpu(info_v8->sAvail_SCLK[i].usVoltageIndex);
3795 info->avail_s_clk[i].voltage_id =
3796 le16_to_cpu(info_v8->sAvail_SCLK[i].usVoltageID);
3797 }
3798
3799 for (i = 0; i < NUMBER_OF_UCHAR_FOR_GUID; ++i) {
3800 info->ext_disp_conn_info.gu_id[i] =
3801 info_v8->sExtDispConnInfo.ucGuid[i];
3802 }
3803
3804 for (i = 0; i < MAX_NUMBER_OF_EXT_DISPLAY_PATH; ++i) {
3805 info->ext_disp_conn_info.path[i].device_connector_id =
3806 object_id_from_bios_object_id(
3807 le16_to_cpu(info_v8->sExtDispConnInfo.sPath[i].usDeviceConnector));
3808
3809 info->ext_disp_conn_info.path[i].ext_encoder_obj_id =
3810 object_id_from_bios_object_id(
3811 le16_to_cpu(info_v8->sExtDispConnInfo.sPath[i].usExtEncoderObjId));
3812
3813 info->ext_disp_conn_info.path[i].device_tag =
3814 le16_to_cpu(info_v8->sExtDispConnInfo.sPath[i].usDeviceTag);
3815 info->ext_disp_conn_info.path[i].device_acpi_enum =
3816 le16_to_cpu(info_v8->sExtDispConnInfo.sPath[i].usDeviceACPIEnum);
3817 info->ext_disp_conn_info.path[i].ext_aux_ddc_lut_index =
3818 info_v8->sExtDispConnInfo.sPath[i].ucExtAUXDDCLutIndex;
3819 info->ext_disp_conn_info.path[i].ext_hpd_pin_lut_index =
3820 info_v8->sExtDispConnInfo.sPath[i].ucExtHPDPINLutIndex;
3821 info->ext_disp_conn_info.path[i].channel_mapping.raw =
3822 info_v8->sExtDispConnInfo.sPath[i].ucChannelMapping;
3823 }
3824 info->ext_disp_conn_info.checksum =
3825 info_v8->sExtDispConnInfo.ucChecksum;
3826
3827 return BP_RESULT_OK;
3828}
3829
3830/*
3831 * get_integrated_info_v8
3832 *
3833 * @brief
3834 * Get V8 integrated BIOS information
3835 *
3836 * @param
3837 * bios_parser *bp - [in]BIOS parser handler to get master data table
3838 * integrated_info *info - [out] store and output integrated info
3839 *
3840 * @return
3841 * enum bp_result - BP_RESULT_OK if information is available,
3842 * BP_RESULT_BADBIOSTABLE otherwise.
3843 */
3844static enum bp_result get_integrated_info_v9(
3845 struct bios_parser *bp,
3846 struct integrated_info *info)
3847{
3848 ATOM_INTEGRATED_SYSTEM_INFO_V1_9 *info_v9;
3849 uint32_t i;
3850
3851 info_v9 = GET_IMAGE(ATOM_INTEGRATED_SYSTEM_INFO_V1_9,
3852 bp->master_data_tbl->ListOfDataTables.IntegratedSystemInfo);
3853
3854 if (!info_v9)
3855 return BP_RESULT_BADBIOSTABLE;
3856
3857 info->boot_up_engine_clock = le32_to_cpu(info_v9->ulBootUpEngineClock) * 10;
3858 info->dentist_vco_freq = le32_to_cpu(info_v9->ulDentistVCOFreq) * 10;
3859 info->boot_up_uma_clock = le32_to_cpu(info_v9->ulBootUpUMAClock) * 10;
3860
3861 for (i = 0; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
3862 /* Convert [10KHz] into [KHz] */
3863 info->disp_clk_voltage[i].max_supported_clk =
3864 le32_to_cpu(info_v9->sDISPCLK_Voltage[i].ulMaximumSupportedCLK) * 10;
3865 info->disp_clk_voltage[i].voltage_index =
3866 le32_to_cpu(info_v9->sDISPCLK_Voltage[i].ulVoltageIndex);
3867 }
3868
3869 info->boot_up_req_display_vector =
3870 le32_to_cpu(info_v9->ulBootUpReqDisplayVector);
3871 info->gpu_cap_info = le32_to_cpu(info_v9->ulGPUCapInfo);
3872
3873 /*
3874 * system_config: Bit[0] = 0 : PCIE power gating disabled
3875 * = 1 : PCIE power gating enabled
3876 * Bit[1] = 0 : DDR-PLL shut down disabled
3877 * = 1 : DDR-PLL shut down enabled
3878 * Bit[2] = 0 : DDR-PLL power down disabled
3879 * = 1 : DDR-PLL power down enabled
3880 */
3881 info->system_config = le32_to_cpu(info_v9->ulSystemConfig);
3882 info->cpu_cap_info = le32_to_cpu(info_v9->ulCPUCapInfo);
3883 info->boot_up_nb_voltage = le16_to_cpu(info_v9->usBootUpNBVoltage);
3884 info->ext_disp_conn_info_offset = le16_to_cpu(info_v9->usExtDispConnInfoOffset);
3885 info->memory_type = info_v9->ucMemoryType;
3886 info->ma_channel_number = info_v9->ucUMAChannelNumber;
3887 info->gmc_restore_reset_time = le32_to_cpu(info_v9->ulGMCRestoreResetTime);
3888
3889 info->minimum_n_clk = le32_to_cpu(info_v9->ulNbpStateNClkFreq[0]);
3890 for (i = 1; i < 4; ++i)
3891 info->minimum_n_clk =
3892 info->minimum_n_clk < le32_to_cpu(info_v9->ulNbpStateNClkFreq[i]) ?
3893 info->minimum_n_clk : le32_to_cpu(info_v9->ulNbpStateNClkFreq[i]);
3894
3895 info->idle_n_clk = le32_to_cpu(info_v9->ulIdleNClk);
3896 info->ddr_dll_power_up_time = le32_to_cpu(info_v9->ulDDR_DLL_PowerUpTime);
3897 info->ddr_pll_power_up_time = le32_to_cpu(info_v9->ulDDR_PLL_PowerUpTime);
3898 info->pcie_clk_ss_type = le16_to_cpu(info_v9->usPCIEClkSSType);
3899 info->lvds_ss_percentage = le16_to_cpu(info_v9->usLvdsSSPercentage);
3900 info->lvds_sspread_rate_in_10hz = le16_to_cpu(info_v9->usLvdsSSpreadRateIn10Hz);
3901 info->hdmi_ss_percentage = le16_to_cpu(info_v9->usHDMISSPercentage);
3902 info->hdmi_sspread_rate_in_10hz = le16_to_cpu(info_v9->usHDMISSpreadRateIn10Hz);
3903 info->dvi_ss_percentage = le16_to_cpu(info_v9->usDVISSPercentage);
3904 info->dvi_sspread_rate_in_10_hz = le16_to_cpu(info_v9->usDVISSpreadRateIn10Hz);
3905
3906 info->max_lvds_pclk_freq_in_single_link =
3907 le16_to_cpu(info_v9->usMaxLVDSPclkFreqInSingleLink);
3908 info->lvds_misc = info_v9->ucLvdsMisc;
3909 info->lvds_pwr_on_seq_dig_on_to_de_in_4ms =
3910 info_v9->ucLVDSPwrOnSeqDIGONtoDE_in4Ms;
3911 info->lvds_pwr_on_seq_de_to_vary_bl_in_4ms =
3912 info_v9->ucLVDSPwrOnSeqDEtoVARY_BL_in4Ms;
3913 info->lvds_pwr_on_seq_vary_bl_to_blon_in_4ms =
3914 info_v9->ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms;
3915 info->lvds_pwr_off_seq_vary_bl_to_de_in4ms =
3916 info_v9->ucLVDSPwrOffSeqVARY_BLtoDE_in4Ms;
3917 info->lvds_pwr_off_seq_de_to_dig_on_in4ms =
3918 info_v9->ucLVDSPwrOffSeqDEtoDIGON_in4Ms;
3919 info->lvds_pwr_off_seq_blon_to_vary_bl_in_4ms =
3920 info_v9->ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms;
3921 info->lvds_off_to_on_delay_in_4ms =
3922 info_v9->ucLVDSOffToOnDelay_in4Ms;
3923 info->lvds_bit_depth_control_val =
3924 le32_to_cpu(info_v9->ulLCDBitDepthControlVal);
3925
3926 for (i = 0; i < NUMBER_OF_AVAILABLE_SCLK; ++i) {
3927 /* Convert [10KHz] into [KHz] */
3928 info->avail_s_clk[i].supported_s_clk =
3929 le32_to_cpu(info_v9->sAvail_SCLK[i].ulSupportedSCLK) * 10;
3930 info->avail_s_clk[i].voltage_index =
3931 le16_to_cpu(info_v9->sAvail_SCLK[i].usVoltageIndex);
3932 info->avail_s_clk[i].voltage_id =
3933 le16_to_cpu(info_v9->sAvail_SCLK[i].usVoltageID);
3934 }
3935
3936 for (i = 0; i < NUMBER_OF_UCHAR_FOR_GUID; ++i) {
3937 info->ext_disp_conn_info.gu_id[i] =
3938 info_v9->sExtDispConnInfo.ucGuid[i];
3939 }
3940
3941 for (i = 0; i < MAX_NUMBER_OF_EXT_DISPLAY_PATH; ++i) {
3942 info->ext_disp_conn_info.path[i].device_connector_id =
3943 object_id_from_bios_object_id(
3944 le16_to_cpu(info_v9->sExtDispConnInfo.sPath[i].usDeviceConnector));
3945
3946 info->ext_disp_conn_info.path[i].ext_encoder_obj_id =
3947 object_id_from_bios_object_id(
3948 le16_to_cpu(info_v9->sExtDispConnInfo.sPath[i].usExtEncoderObjId));
3949
3950 info->ext_disp_conn_info.path[i].device_tag =
3951 le16_to_cpu(info_v9->sExtDispConnInfo.sPath[i].usDeviceTag);
3952 info->ext_disp_conn_info.path[i].device_acpi_enum =
3953 le16_to_cpu(info_v9->sExtDispConnInfo.sPath[i].usDeviceACPIEnum);
3954 info->ext_disp_conn_info.path[i].ext_aux_ddc_lut_index =
3955 info_v9->sExtDispConnInfo.sPath[i].ucExtAUXDDCLutIndex;
3956 info->ext_disp_conn_info.path[i].ext_hpd_pin_lut_index =
3957 info_v9->sExtDispConnInfo.sPath[i].ucExtHPDPINLutIndex;
3958 info->ext_disp_conn_info.path[i].channel_mapping.raw =
3959 info_v9->sExtDispConnInfo.sPath[i].ucChannelMapping;
3960 }
3961 info->ext_disp_conn_info.checksum =
3962 info_v9->sExtDispConnInfo.ucChecksum;
3963
3964 return BP_RESULT_OK;
3965}
3966
3967/*
3968 * construct_integrated_info
3969 *
3970 * @brief
3971 * Get integrated BIOS information based on table revision
3972 *
3973 * @param
3974 * bios_parser *bp - [in]BIOS parser handler to get master data table
3975 * integrated_info *info - [out] store and output integrated info
3976 *
3977 * @return
3978 * enum bp_result - BP_RESULT_OK if information is available,
3979 * BP_RESULT_BADBIOSTABLE otherwise.
3980 */
3981static enum bp_result construct_integrated_info(
3982 struct bios_parser *bp,
3983 struct integrated_info *info)
3984{
3985 enum bp_result result = BP_RESULT_BADBIOSTABLE;
3986
3987 ATOM_COMMON_TABLE_HEADER *header;
3988 struct atom_data_revision revision;
3989
3990 if (bp->master_data_tbl->ListOfDataTables.IntegratedSystemInfo) {
3991 header = GET_IMAGE(ATOM_COMMON_TABLE_HEADER,
3992 bp->master_data_tbl->ListOfDataTables.IntegratedSystemInfo);
3993
3994 get_atom_data_table_revision(header, &revision);
3995
3996 /* Don't need to check major revision as they are all 1 */
3997 switch (revision.minor) {
3998 case 8:
3999 result = get_integrated_info_v8(bp, info);
4000 break;
4001 case 9:
4002 result = get_integrated_info_v9(bp, info);
4003 break;
4004 default:
4005 return result;
4006
4007 }
4008 }
4009
4010 /* Sort voltage table from low to high*/
4011 if (result == BP_RESULT_OK) {
4012 struct clock_voltage_caps temp = {0, 0};
4013 uint32_t i;
4014 uint32_t j;
4015
4016 for (i = 1; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
4017 for (j = i; j > 0; --j) {
4018 if (
4019 info->disp_clk_voltage[j].max_supported_clk <
4020 info->disp_clk_voltage[j-1].max_supported_clk) {
4021 /* swap j and j - 1*/
4022 temp = info->disp_clk_voltage[j-1];
4023 info->disp_clk_voltage[j-1] =
4024 info->disp_clk_voltage[j];
4025 info->disp_clk_voltage[j] = temp;
4026 }
4027 }
4028 }
4029
4030 }
4031
4032 return result;
4033}
4034
4035static struct integrated_info *bios_parser_create_integrated_info(
4036 struct dc_bios *dcb)
4037{
4038 struct bios_parser *bp = BP_FROM_DCB(dcb);
4039 struct integrated_info *info = NULL;
4040
4041 info = dm_alloc(sizeof(struct integrated_info));
4042
4043 if (info == NULL) {
4044 ASSERT_CRITICAL(0);
4045 return NULL;
4046 }
4047
4048 if (construct_integrated_info(bp, info) == BP_RESULT_OK)
4049 return info;
4050
4051 dm_free(info);
4052
4053 return NULL;
4054}
4055
4056/******************************************************************************/
4057
4058static const struct dc_vbios_funcs vbios_funcs = {
4059 .get_connectors_number = bios_parser_get_connectors_number,
4060
4061 .get_encoder_id = bios_parser_get_encoder_id,
4062
4063 .get_connector_id = bios_parser_get_connector_id,
4064
4065 .get_dst_number = bios_parser_get_dst_number,
4066
4067 .get_gpio_record = bios_parser_get_gpio_record,
4068
4069 .get_src_obj = bios_parser_get_src_obj,
4070
4071 .get_dst_obj = bios_parser_get_dst_obj,
4072
4073 .get_i2c_info = bios_parser_get_i2c_info,
4074
4075 .get_voltage_ddc_info = bios_parser_get_voltage_ddc_info,
4076
4077 .get_thermal_ddc_info = bios_parser_get_thermal_ddc_info,
4078
4079 .get_hpd_info = bios_parser_get_hpd_info,
4080
4081 .get_device_tag = bios_parser_get_device_tag,
4082
4083 .get_firmware_info = bios_parser_get_firmware_info,
4084
4085 .get_spread_spectrum_info = bios_parser_get_spread_spectrum_info,
4086
4087 .get_ss_entry_number = bios_parser_get_ss_entry_number,
4088
4089 .get_embedded_panel_info = bios_parser_get_embedded_panel_info,
4090
4091 .get_gpio_pin_info = bios_parser_get_gpio_pin_info,
4092
4093 .get_embedded_panel_info = bios_parser_get_embedded_panel_info,
4094
4095 .get_gpio_pin_info = bios_parser_get_gpio_pin_info,
4096
4097 .get_encoder_cap_info = bios_parser_get_encoder_cap_info,
4098
4099 /* bios scratch register communication */
4100 .is_accelerated_mode = bios_is_accelerated_mode,
4101
4102 .set_scratch_critical_state = bios_parser_set_scratch_critical_state,
4103
4104 .is_device_id_supported = bios_parser_is_device_id_supported,
4105
4106 /* COMMANDS */
4107 .encoder_control = bios_parser_encoder_control,
4108
4109 .transmitter_control = bios_parser_transmitter_control,
4110
4111 .crt_control = bios_parser_crt_control, /* not used in DAL3. keep for now in case we need to support VGA on Bonaire */
4112
4113 .enable_crtc = bios_parser_enable_crtc,
4114
4115 .adjust_pixel_clock = bios_parser_adjust_pixel_clock,
4116
4117 .set_pixel_clock = bios_parser_set_pixel_clock,
4118
4119 .set_dce_clock = bios_parser_set_dce_clock,
4120
4121 .enable_spread_spectrum_on_ppll = bios_parser_enable_spread_spectrum_on_ppll,
4122
4123 .program_crtc_timing = bios_parser_program_crtc_timing, /* still use. should probably retire and program directly */
4124
4125 .crtc_source_select = bios_parser_crtc_source_select, /* still use. should probably retire and program directly */
4126
4127 .program_display_engine_pll = bios_parser_program_display_engine_pll,
4128
4129 .enable_disp_power_gating = bios_parser_enable_disp_power_gating,
4130
4131 /* SW init and patch */
4132 .post_init = bios_parser_post_init, /* patch vbios table for mxm module by reading i2c */
4133
4134 .bios_parser_destroy = bios_parser_destroy,
4135};
4136
4137static bool bios_parser_construct(
4138 struct bios_parser *bp,
4139 struct bp_init_data *init,
4140 enum dce_version dce_version)
4141{
4142 uint16_t *rom_header_offset = NULL;
4143 ATOM_ROM_HEADER *rom_header = NULL;
4144 ATOM_OBJECT_HEADER *object_info_tbl;
4145 struct atom_data_revision tbl_rev = {0};
4146
4147 if (!init)
4148 return false;
4149
4150 if (!init->bios)
4151 return false;
4152
4153 bp->base.funcs = &vbios_funcs;
4154 bp->base.bios = init->bios;
4155 bp->base.bios_size = bp->base.bios[BIOS_IMAGE_SIZE_OFFSET] * BIOS_IMAGE_SIZE_UNIT;
4156
4157 bp->base.ctx = init->ctx;
4158 bp->base.bios_local_image = NULL;
4159
4160 rom_header_offset =
4161 GET_IMAGE(uint16_t, OFFSET_TO_POINTER_TO_ATOM_ROM_HEADER);
4162
4163 if (!rom_header_offset)
4164 return false;
4165
4166 rom_header = GET_IMAGE(ATOM_ROM_HEADER, *rom_header_offset);
4167
4168 if (!rom_header)
4169 return false;
4170
4171 get_atom_data_table_revision(&rom_header->sHeader, &tbl_rev);
4172 if (tbl_rev.major >= 2 && tbl_rev.minor >= 2)
4173 return false;
4174
4175 bp->master_data_tbl =
4176 GET_IMAGE(ATOM_MASTER_DATA_TABLE,
4177 rom_header->usMasterDataTableOffset);
4178
4179 if (!bp->master_data_tbl)
4180 return false;
4181
4182 bp->object_info_tbl_offset = DATA_TABLES(Object_Header);
4183
4184 if (!bp->object_info_tbl_offset)
4185 return false;
4186
4187 object_info_tbl =
4188 GET_IMAGE(ATOM_OBJECT_HEADER, bp->object_info_tbl_offset);
4189
4190 if (!object_info_tbl)
4191 return false;
4192
4193 get_atom_data_table_revision(&object_info_tbl->sHeader,
4194 &bp->object_info_tbl.revision);
4195
4196 if (bp->object_info_tbl.revision.major == 1
4197 && bp->object_info_tbl.revision.minor >= 3) {
4198 ATOM_OBJECT_HEADER_V3 *tbl_v3;
4199
4200 tbl_v3 = GET_IMAGE(ATOM_OBJECT_HEADER_V3,
4201 bp->object_info_tbl_offset);
4202 if (!tbl_v3)
4203 return false;
4204
4205 bp->object_info_tbl.v1_3 = tbl_v3;
4206 } else if (bp->object_info_tbl.revision.major == 1
4207 && bp->object_info_tbl.revision.minor >= 1)
4208 bp->object_info_tbl.v1_1 = object_info_tbl;
4209 else
4210 return false;
4211
4212 dal_bios_parser_init_cmd_tbl(bp);
4213 dal_bios_parser_init_cmd_tbl_helper(&bp->cmd_helper, dce_version);
4214
4215 bp->base.integrated_info = bios_parser_create_integrated_info(&bp->base);
4216
4217 return true;
4218}
4219
4220/******************************************************************************/
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.h b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.h
new file mode 100644
index 000000000000..d6f16275048f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.h
@@ -0,0 +1,33 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_BIOS_PARSER_H__
27#define __DAL_BIOS_PARSER_H__
28
29struct dc_bios *bios_parser_create(
30 struct bp_init_data *init,
31 enum dce_version dce_version);
32
33#endif
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c
new file mode 100644
index 000000000000..8e56d2f25dea
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c
@@ -0,0 +1,82 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28#include "atom.h"
29
30#include "include/bios_parser_types.h"
31#include "bios_parser_helper.h"
32#include "command_table_helper.h"
33#include "command_table.h"
34#include "bios_parser_types_internal.h"
35
36uint8_t *get_image(struct dc_bios *bp,
37 uint32_t offset,
38 uint32_t size)
39{
40 if (bp->bios && offset + size < bp->bios_size)
41 return bp->bios + offset;
42 else
43 return NULL;
44}
45
46#include "reg_helper.h"
47
48#define CTX \
49 bios->ctx
50#define REG(reg)\
51 (bios->regs->reg)
52
53#undef FN
54#define FN(reg_name, field_name) \
55 ATOM_ ## field_name ## _SHIFT, ATOM_ ## field_name
56
57bool bios_is_accelerated_mode(
58 struct dc_bios *bios)
59{
60 uint32_t acc_mode;
61 REG_GET(BIOS_SCRATCH_6, S6_ACC_MODE, &acc_mode);
62 return (acc_mode == 1);
63}
64
65
66void bios_set_scratch_acc_mode_change(
67 struct dc_bios *bios)
68{
69 REG_UPDATE(BIOS_SCRATCH_6, S6_ACC_MODE, 1);
70}
71
72
73void bios_set_scratch_critical_state(
74 struct dc_bios *bios,
75 bool state)
76{
77 uint32_t critial_state = state ? 1 : 0;
78 REG_UPDATE(BIOS_SCRATCH_6, S6_CRITICAL_STATE, critial_state);
79}
80
81
82
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h
new file mode 100644
index 000000000000..a8fbb82b8c8e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h
@@ -0,0 +1,40 @@
1/*
2 * Copyright 2012-16 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_BIOS_PARSER_HELPER_H__
27#define __DAL_BIOS_PARSER_HELPER_H__
28
29struct bios_parser;
30
31uint8_t *get_image(struct dc_bios *bp, uint32_t offset,
32 uint32_t size);
33
34bool bios_is_accelerated_mode(struct dc_bios *bios);
35void bios_set_scratch_acc_mode_change(struct dc_bios *bios);
36void bios_set_scratch_critical_state(struct dc_bios *bios, bool state);
37
38#define GET_IMAGE(type, offset) ((type *) get_image(&bp->base, offset, sizeof(type)))
39
40#endif
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_interface.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_interface.c
new file mode 100644
index 000000000000..42272c35df2d
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_interface.c
@@ -0,0 +1,50 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27#include "include/logger_interface.h"
28
29#include "bios_parser_interface.h"
30#include "bios_parser.h"
31
32
33struct dc_bios *dal_bios_parser_create(
34 struct bp_init_data *init,
35 enum dce_version dce_version)
36{
37 struct dc_bios *bios = NULL;
38
39 bios = bios_parser_create(init, dce_version);
40
41 return bios;
42}
43
44void dal_bios_parser_destroy(struct dc_bios **dcb)
45{
46 struct dc_bios *bios = *dcb;
47
48 bios->funcs->bios_parser_destroy(dcb);
49}
50
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_types_internal.h b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_types_internal.h
new file mode 100644
index 000000000000..5918923bfb93
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_types_internal.h
@@ -0,0 +1,72 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_BIOS_PARSER_TYPES_BIOS_H__
27#define __DAL_BIOS_PARSER_TYPES_BIOS_H__
28
29#include "dc_bios_types.h"
30#include "bios_parser_helper.h"
31
32struct atom_data_revision {
33 uint32_t major;
34 uint32_t minor;
35};
36
37struct object_info_table {
38 struct atom_data_revision revision;
39 union {
40 ATOM_OBJECT_HEADER *v1_1;
41 ATOM_OBJECT_HEADER_V3 *v1_3;
42 };
43};
44
45enum spread_spectrum_id {
46 SS_ID_UNKNOWN = 0,
47 SS_ID_DP1 = 0xf1,
48 SS_ID_DP2 = 0xf2,
49 SS_ID_LVLINK_2700MHZ = 0xf3,
50 SS_ID_LVLINK_1620MHZ = 0xf4
51};
52
53struct bios_parser {
54 struct dc_bios base;
55
56 struct object_info_table object_info_tbl;
57 uint32_t object_info_tbl_offset;
58 ATOM_MASTER_DATA_TABLE *master_data_tbl;
59
60 const struct bios_parser_helper *bios_helper;
61
62 const struct command_table_helper *cmd_helper;
63 struct cmd_tbl cmd_tbl;
64
65 bool remap_device_tags;
66};
67
68/* Bios Parser from DC Bios */
69#define BP_FROM_DCB(dc_bios) \
70 container_of(dc_bios, struct bios_parser, base)
71
72#endif
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
new file mode 100644
index 000000000000..51f6052eac96
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
@@ -0,0 +1,2609 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28#include "atom.h"
29
30#include "include/bios_parser_interface.h"
31
32#include "command_table.h"
33#include "command_table_helper.h"
34#include "bios_parser_helper.h"
35#include "bios_parser_types_internal.h"
36
37#define EXEC_BIOS_CMD_TABLE(command, params)\
38 (cgs_atom_exec_cmd_table(bp->base.ctx->cgs_device, \
39 GetIndexIntoMasterTable(COMMAND, command), \
40 &params) == 0)
41
42#define BIOS_CMD_TABLE_REVISION(command, frev, crev)\
43 cgs_atom_get_cmd_table_revs(bp->base.ctx->cgs_device, \
44 GetIndexIntoMasterTable(COMMAND, command), &frev, &crev)
45
46#define BIOS_CMD_TABLE_PARA_REVISION(command)\
47 bios_cmd_table_para_revision(bp->base.ctx->cgs_device, \
48 GetIndexIntoMasterTable(COMMAND, command))
49
50static void init_dig_encoder_control(struct bios_parser *bp);
51static void init_transmitter_control(struct bios_parser *bp);
52static void init_set_pixel_clock(struct bios_parser *bp);
53static void init_enable_spread_spectrum_on_ppll(struct bios_parser *bp);
54static void init_adjust_display_pll(struct bios_parser *bp);
55static void init_dac_encoder_control(struct bios_parser *bp);
56static void init_dac_output_control(struct bios_parser *bp);
57static void init_blank_crtc(struct bios_parser *bp);
58static void init_set_crtc_timing(struct bios_parser *bp);
59static void init_set_crtc_overscan(struct bios_parser *bp);
60static void init_select_crtc_source(struct bios_parser *bp);
61static void init_enable_crtc(struct bios_parser *bp);
62static void init_enable_crtc_mem_req(struct bios_parser *bp);
63static void init_compute_memore_engine_pll(struct bios_parser *bp);
64static void init_external_encoder_control(struct bios_parser *bp);
65static void init_enable_disp_power_gating(struct bios_parser *bp);
66static void init_program_clock(struct bios_parser *bp);
67static void init_set_dce_clock(struct bios_parser *bp);
68
69void dal_bios_parser_init_cmd_tbl(struct bios_parser *bp)
70{
71 init_dig_encoder_control(bp);
72 init_transmitter_control(bp);
73 init_set_pixel_clock(bp);
74 init_enable_spread_spectrum_on_ppll(bp);
75 init_adjust_display_pll(bp);
76 init_dac_encoder_control(bp);
77 init_dac_output_control(bp);
78 init_blank_crtc(bp);
79 init_set_crtc_timing(bp);
80 init_set_crtc_overscan(bp);
81 init_select_crtc_source(bp);
82 init_enable_crtc(bp);
83 init_enable_crtc_mem_req(bp);
84 init_program_clock(bp);
85 init_compute_memore_engine_pll(bp);
86 init_external_encoder_control(bp);
87 init_enable_disp_power_gating(bp);
88 init_set_dce_clock(bp);
89}
90
91static uint32_t bios_cmd_table_para_revision(void *cgs_device,
92 uint32_t index)
93{
94 uint8_t frev, crev;
95
96 if (cgs_atom_get_cmd_table_revs(cgs_device,
97 index,
98 &frev, &crev) != 0)
99 return 0;
100 return crev;
101}
102
103/*******************************************************************************
104 ********************************************************************************
105 **
106 ** D I G E N C O D E R C O N T R O L
107 **
108 ********************************************************************************
109 *******************************************************************************/
110static enum bp_result encoder_control_digx_v3(
111 struct bios_parser *bp,
112 struct bp_encoder_control *cntl);
113
114static enum bp_result encoder_control_digx_v4(
115 struct bios_parser *bp,
116 struct bp_encoder_control *cntl);
117
118#ifdef LATEST_ATOM_BIOS_SUPPORT
119static enum bp_result encoder_control_digx_v5(
120 struct bios_parser *bp,
121 struct bp_encoder_control *cntl);
122#endif
123
124static void init_encoder_control_dig_v1(struct bios_parser *bp);
125
126static void init_dig_encoder_control(struct bios_parser *bp)
127{
128 uint32_t version =
129 BIOS_CMD_TABLE_PARA_REVISION(DIGxEncoderControl);
130
131 switch (version) {
132 case 2:
133 bp->cmd_tbl.dig_encoder_control = encoder_control_digx_v3;
134 break;
135 case 4:
136 bp->cmd_tbl.dig_encoder_control = encoder_control_digx_v4;
137 break;
138
139#ifdef LATEST_ATOM_BIOS_SUPPORT
140 case 5:
141 bp->cmd_tbl.dig_encoder_control = encoder_control_digx_v5;
142 break;
143#endif
144
145 default:
146 init_encoder_control_dig_v1(bp);
147 break;
148 }
149}
150
151static enum bp_result encoder_control_dig_v1(
152 struct bios_parser *bp,
153 struct bp_encoder_control *cntl);
154static enum bp_result encoder_control_dig1_v1(
155 struct bios_parser *bp,
156 struct bp_encoder_control *cntl);
157static enum bp_result encoder_control_dig2_v1(
158 struct bios_parser *bp,
159 struct bp_encoder_control *cntl);
160
161static void init_encoder_control_dig_v1(struct bios_parser *bp)
162{
163 struct cmd_tbl *cmd_tbl = &bp->cmd_tbl;
164
165 if (1 == BIOS_CMD_TABLE_PARA_REVISION(DIG1EncoderControl))
166 cmd_tbl->encoder_control_dig1 = encoder_control_dig1_v1;
167 else
168 cmd_tbl->encoder_control_dig1 = NULL;
169
170 if (1 == BIOS_CMD_TABLE_PARA_REVISION(DIG2EncoderControl))
171 cmd_tbl->encoder_control_dig2 = encoder_control_dig2_v1;
172 else
173 cmd_tbl->encoder_control_dig2 = NULL;
174
175 cmd_tbl->dig_encoder_control = encoder_control_dig_v1;
176}
177
178static enum bp_result encoder_control_dig_v1(
179 struct bios_parser *bp,
180 struct bp_encoder_control *cntl)
181{
182 enum bp_result result = BP_RESULT_FAILURE;
183 struct cmd_tbl *cmd_tbl = &bp->cmd_tbl;
184
185 if (cntl != NULL)
186 switch (cntl->engine_id) {
187 case ENGINE_ID_DIGA:
188 if (cmd_tbl->encoder_control_dig1 != NULL)
189 result =
190 cmd_tbl->encoder_control_dig1(bp, cntl);
191 break;
192 case ENGINE_ID_DIGB:
193 if (cmd_tbl->encoder_control_dig2 != NULL)
194 result =
195 cmd_tbl->encoder_control_dig2(bp, cntl);
196 break;
197
198 default:
199 break;
200 }
201
202 return result;
203}
204
205static enum bp_result encoder_control_dig1_v1(
206 struct bios_parser *bp,
207 struct bp_encoder_control *cntl)
208{
209 enum bp_result result = BP_RESULT_FAILURE;
210 DIG_ENCODER_CONTROL_PARAMETERS_V2 params = {0};
211
212 bp->cmd_helper->assign_control_parameter(bp->cmd_helper, cntl, &params);
213
214 if (EXEC_BIOS_CMD_TABLE(DIG1EncoderControl, params))
215 result = BP_RESULT_OK;
216
217 return result;
218}
219
220static enum bp_result encoder_control_dig2_v1(
221 struct bios_parser *bp,
222 struct bp_encoder_control *cntl)
223{
224 enum bp_result result = BP_RESULT_FAILURE;
225 DIG_ENCODER_CONTROL_PARAMETERS_V2 params = {0};
226
227 bp->cmd_helper->assign_control_parameter(bp->cmd_helper, cntl, &params);
228
229 if (EXEC_BIOS_CMD_TABLE(DIG2EncoderControl, params))
230 result = BP_RESULT_OK;
231
232 return result;
233}
234
235static enum bp_result encoder_control_digx_v3(
236 struct bios_parser *bp,
237 struct bp_encoder_control *cntl)
238{
239 enum bp_result result = BP_RESULT_FAILURE;
240 DIG_ENCODER_CONTROL_PARAMETERS_V3 params = {0};
241
242 if (LANE_COUNT_FOUR < cntl->lanes_number)
243 params.acConfig.ucDPLinkRate = 1; /* dual link 2.7GHz */
244 else
245 params.acConfig.ucDPLinkRate = 0; /* single link 1.62GHz */
246
247 params.acConfig.ucDigSel = (uint8_t)(cntl->engine_id);
248
249 /* We need to convert from KHz units into 10KHz units */
250 params.ucAction = bp->cmd_helper->encoder_action_to_atom(cntl->action);
251 params.usPixelClock = cpu_to_le16((uint16_t)(cntl->pixel_clock / 10));
252 params.ucEncoderMode =
253 (uint8_t)bp->cmd_helper->encoder_mode_bp_to_atom(
254 cntl->signal,
255 cntl->enable_dp_audio);
256 params.ucLaneNum = (uint8_t)(cntl->lanes_number);
257
258 if (EXEC_BIOS_CMD_TABLE(DIGxEncoderControl, params))
259 result = BP_RESULT_OK;
260
261 return result;
262}
263
264static enum bp_result encoder_control_digx_v4(
265 struct bios_parser *bp,
266 struct bp_encoder_control *cntl)
267{
268 enum bp_result result = BP_RESULT_FAILURE;
269 DIG_ENCODER_CONTROL_PARAMETERS_V4 params = {0};
270
271 if (LANE_COUNT_FOUR < cntl->lanes_number)
272 params.acConfig.ucDPLinkRate = 1; /* dual link 2.7GHz */
273 else
274 params.acConfig.ucDPLinkRate = 0; /* single link 1.62GHz */
275
276 params.acConfig.ucDigSel = (uint8_t)(cntl->engine_id);
277
278 /* We need to convert from KHz units into 10KHz units */
279 params.ucAction = bp->cmd_helper->encoder_action_to_atom(cntl->action);
280 params.usPixelClock = cpu_to_le16((uint16_t)(cntl->pixel_clock / 10));
281 params.ucEncoderMode =
282 (uint8_t)(bp->cmd_helper->encoder_mode_bp_to_atom(
283 cntl->signal,
284 cntl->enable_dp_audio));
285 params.ucLaneNum = (uint8_t)(cntl->lanes_number);
286
287 if (EXEC_BIOS_CMD_TABLE(DIGxEncoderControl, params))
288 result = BP_RESULT_OK;
289
290 return result;
291}
292
293#ifdef LATEST_ATOM_BIOS_SUPPORT
294static enum bp_result encoder_control_digx_v5(
295 struct bios_parser *bp,
296 struct bp_encoder_control *cntl)
297{
298 enum bp_result result = BP_RESULT_FAILURE;
299 ENCODER_STREAM_SETUP_PARAMETERS_V5 params = {0};
300
301 params.ucDigId = (uint8_t)(cntl->engine_id);
302 params.ucAction = bp->cmd_helper->encoder_action_to_atom(cntl->action);
303
304 params.ulPixelClock = cntl->pixel_clock / 10;
305 params.ucDigMode =
306 (uint8_t)(bp->cmd_helper->encoder_mode_bp_to_atom(
307 cntl->signal,
308 cntl->enable_dp_audio));
309 params.ucLaneNum = (uint8_t)(cntl->lanes_number);
310
311 switch (cntl->color_depth) {
312 case COLOR_DEPTH_888:
313 params.ucBitPerColor = PANEL_8BIT_PER_COLOR;
314 break;
315 case COLOR_DEPTH_101010:
316 params.ucBitPerColor = PANEL_10BIT_PER_COLOR;
317 break;
318 case COLOR_DEPTH_121212:
319 params.ucBitPerColor = PANEL_12BIT_PER_COLOR;
320 break;
321 case COLOR_DEPTH_161616:
322 params.ucBitPerColor = PANEL_16BIT_PER_COLOR;
323 break;
324 default:
325 break;
326 }
327
328 if (cntl->signal == SIGNAL_TYPE_HDMI_TYPE_A)
329 switch (cntl->color_depth) {
330 case COLOR_DEPTH_101010:
331 params.ulPixelClock =
332 (params.ulPixelClock * 30) / 24;
333 break;
334 case COLOR_DEPTH_121212:
335 params.ulPixelClock =
336 (params.ulPixelClock * 36) / 24;
337 break;
338 case COLOR_DEPTH_161616:
339 params.ulPixelClock =
340 (params.ulPixelClock * 48) / 24;
341 break;
342 default:
343 break;
344 }
345
346 if (EXEC_BIOS_CMD_TABLE(DIGxEncoderControl, params))
347 result = BP_RESULT_OK;
348
349 return result;
350}
351#endif
352
353/*******************************************************************************
354 ********************************************************************************
355 **
356 ** TRANSMITTER CONTROL
357 **
358 ********************************************************************************
359 *******************************************************************************/
360
361static enum bp_result transmitter_control_v2(
362 struct bios_parser *bp,
363 struct bp_transmitter_control *cntl);
364static enum bp_result transmitter_control_v3(
365 struct bios_parser *bp,
366 struct bp_transmitter_control *cntl);
367static enum bp_result transmitter_control_v4(
368 struct bios_parser *bp,
369 struct bp_transmitter_control *cntl);
370static enum bp_result transmitter_control_v1_5(
371 struct bios_parser *bp,
372 struct bp_transmitter_control *cntl);
373static enum bp_result transmitter_control_v1_6(
374 struct bios_parser *bp,
375 struct bp_transmitter_control *cntl);
376
377static void init_transmitter_control(struct bios_parser *bp)
378{
379 uint8_t frev;
380 uint8_t crev;
381
382 if (BIOS_CMD_TABLE_REVISION(UNIPHYTransmitterControl,
383 frev, crev) != 0)
384 BREAK_TO_DEBUGGER();
385 switch (crev) {
386 case 2:
387 bp->cmd_tbl.transmitter_control = transmitter_control_v2;
388 break;
389 case 3:
390 bp->cmd_tbl.transmitter_control = transmitter_control_v3;
391 break;
392 case 4:
393 bp->cmd_tbl.transmitter_control = transmitter_control_v4;
394 break;
395 case 5:
396 bp->cmd_tbl.transmitter_control = transmitter_control_v1_5;
397 break;
398 case 6:
399 bp->cmd_tbl.transmitter_control = transmitter_control_v1_6;
400 break;
401 default:
402 bp->cmd_tbl.transmitter_control = NULL;
403 break;
404 }
405}
406
407static enum bp_result transmitter_control_v2(
408 struct bios_parser *bp,
409 struct bp_transmitter_control *cntl)
410{
411 enum bp_result result = BP_RESULT_FAILURE;
412 DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 params;
413 enum connector_id connector_id =
414 dal_graphics_object_id_get_connector_id(cntl->connector_obj_id);
415
416 memset(&params, 0, sizeof(params));
417
418 switch (cntl->transmitter) {
419 case TRANSMITTER_UNIPHY_A:
420 case TRANSMITTER_UNIPHY_B:
421 case TRANSMITTER_UNIPHY_C:
422 case TRANSMITTER_UNIPHY_D:
423 case TRANSMITTER_UNIPHY_E:
424 case TRANSMITTER_UNIPHY_F:
425 case TRANSMITTER_TRAVIS_LCD:
426 break;
427 default:
428 return BP_RESULT_BADINPUT;
429 }
430
431 switch (cntl->action) {
432 case TRANSMITTER_CONTROL_INIT:
433 if ((CONNECTOR_ID_DUAL_LINK_DVII == connector_id) ||
434 (CONNECTOR_ID_DUAL_LINK_DVID == connector_id))
435 /* on INIT this bit should be set according to the
436 * phisycal connector
437 * Bit0: dual link connector flag
438 * =0 connector is single link connector
439 * =1 connector is dual link connector
440 */
441 params.acConfig.fDualLinkConnector = 1;
442
443 /* connector object id */
444 params.usInitInfo =
445 cpu_to_le16((uint8_t)cntl->connector_obj_id.id);
446 break;
447 case TRANSMITTER_CONTROL_SET_VOLTAGE_AND_PREEMPASIS:
448 /* votage swing and pre-emphsis */
449 params.asMode.ucLaneSel = (uint8_t)cntl->lane_select;
450 params.asMode.ucLaneSet = (uint8_t)cntl->lane_settings;
451 break;
452 default:
453 /* if dual-link */
454 if (LANE_COUNT_FOUR < cntl->lanes_number) {
455 /* on ENABLE/DISABLE this bit should be set according to
456 * actual timing (number of lanes)
457 * Bit0: dual link connector flag
458 * =0 connector is single link connector
459 * =1 connector is dual link connector
460 */
461 params.acConfig.fDualLinkConnector = 1;
462
463 /* link rate, half for dual link
464 * We need to convert from KHz units into 20KHz units
465 */
466 params.usPixelClock =
467 cpu_to_le16((uint16_t)(cntl->pixel_clock / 20));
468 } else
469 /* link rate, half for dual link
470 * We need to convert from KHz units into 10KHz units
471 */
472 params.usPixelClock =
473 cpu_to_le16((uint16_t)(cntl->pixel_clock / 10));
474 break;
475 }
476
477 /* 00 - coherent mode
478 * 01 - incoherent mode
479 */
480
481 params.acConfig.fCoherentMode = cntl->coherent;
482
483 if ((TRANSMITTER_UNIPHY_B == cntl->transmitter)
484 || (TRANSMITTER_UNIPHY_D == cntl->transmitter)
485 || (TRANSMITTER_UNIPHY_F == cntl->transmitter))
486 /* Bit2: Transmitter Link selection
487 * =0 when bit0=0, single link A/C/E, when bit0=1,
488 * master link A/C/E
489 * =1 when bit0=0, single link B/D/F, when bit0=1,
490 * master link B/D/F
491 */
492 params.acConfig.ucLinkSel = 1;
493
494 if (ENGINE_ID_DIGB == cntl->engine_id)
495 /* Bit3: Transmitter data source selection
496 * =0 DIGA is data source.
497 * =1 DIGB is data source.
498 * This bit is only useful when ucAction= ATOM_ENABLE
499 */
500 params.acConfig.ucEncoderSel = 1;
501
502 if (CONNECTOR_ID_DISPLAY_PORT == connector_id)
503 /* Bit4: DP connector flag
504 * =0 connector is none-DP connector
505 * =1 connector is DP connector
506 */
507 params.acConfig.fDPConnector = 1;
508
509 /* Bit[7:6]: Transmitter selection
510 * =0 UNIPHY_ENCODER: UNIPHYA/B
511 * =1 UNIPHY1_ENCODER: UNIPHYC/D
512 * =2 UNIPHY2_ENCODER: UNIPHYE/F
513 * =3 reserved
514 */
515 params.acConfig.ucTransmitterSel =
516 (uint8_t)bp->cmd_helper->transmitter_bp_to_atom(
517 cntl->transmitter);
518
519 params.ucAction = (uint8_t)cntl->action;
520
521 if (EXEC_BIOS_CMD_TABLE(UNIPHYTransmitterControl, params))
522 result = BP_RESULT_OK;
523
524 return result;
525}
526
527static enum bp_result transmitter_control_v3(
528 struct bios_parser *bp,
529 struct bp_transmitter_control *cntl)
530{
531 enum bp_result result = BP_RESULT_FAILURE;
532 DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 params;
533 uint32_t pll_id;
534 enum connector_id conn_id =
535 dal_graphics_object_id_get_connector_id(cntl->connector_obj_id);
536 const struct command_table_helper *cmd = bp->cmd_helper;
537 bool dual_link_conn = (CONNECTOR_ID_DUAL_LINK_DVII == conn_id)
538 || (CONNECTOR_ID_DUAL_LINK_DVID == conn_id);
539
540 memset(&params, 0, sizeof(params));
541
542 switch (cntl->transmitter) {
543 case TRANSMITTER_UNIPHY_A:
544 case TRANSMITTER_UNIPHY_B:
545 case TRANSMITTER_UNIPHY_C:
546 case TRANSMITTER_UNIPHY_D:
547 case TRANSMITTER_UNIPHY_E:
548 case TRANSMITTER_UNIPHY_F:
549 case TRANSMITTER_TRAVIS_LCD:
550 break;
551 default:
552 return BP_RESULT_BADINPUT;
553 }
554
555 if (!cmd->clock_source_id_to_atom(cntl->pll_id, &pll_id))
556 return BP_RESULT_BADINPUT;
557
558 /* fill information based on the action */
559 switch (cntl->action) {
560 case TRANSMITTER_CONTROL_INIT:
561 if (dual_link_conn) {
562 /* on INIT this bit should be set according to the
563 * phisycal connector
564 * Bit0: dual link connector flag
565 * =0 connector is single link connector
566 * =1 connector is dual link connector
567 */
568 params.acConfig.fDualLinkConnector = 1;
569 }
570
571 /* connector object id */
572 params.usInitInfo =
573 cpu_to_le16((uint8_t)(cntl->connector_obj_id.id));
574 break;
575 case TRANSMITTER_CONTROL_SET_VOLTAGE_AND_PREEMPASIS:
576 /* votage swing and pre-emphsis */
577 params.asMode.ucLaneSel = (uint8_t)cntl->lane_select;
578 params.asMode.ucLaneSet = (uint8_t)cntl->lane_settings;
579 break;
580 default:
581 if (dual_link_conn && cntl->multi_path)
582 /* on ENABLE/DISABLE this bit should be set according to
583 * actual timing (number of lanes)
584 * Bit0: dual link connector flag
585 * =0 connector is single link connector
586 * =1 connector is dual link connector
587 */
588 params.acConfig.fDualLinkConnector = 1;
589
590 /* if dual-link */
591 if (LANE_COUNT_FOUR < cntl->lanes_number) {
592 /* on ENABLE/DISABLE this bit should be set according to
593 * actual timing (number of lanes)
594 * Bit0: dual link connector flag
595 * =0 connector is single link connector
596 * =1 connector is dual link connector
597 */
598 params.acConfig.fDualLinkConnector = 1;
599
600 /* link rate, half for dual link
601 * We need to convert from KHz units into 20KHz units
602 */
603 params.usPixelClock =
604 cpu_to_le16((uint16_t)(cntl->pixel_clock / 20));
605 } else {
606 /* link rate, half for dual link
607 * We need to convert from KHz units into 10KHz units
608 */
609 params.usPixelClock =
610 cpu_to_le16((uint16_t)(cntl->pixel_clock / 10));
611 }
612 break;
613 }
614
615 /* 00 - coherent mode
616 * 01 - incoherent mode
617 */
618
619 params.acConfig.fCoherentMode = cntl->coherent;
620
621 if ((TRANSMITTER_UNIPHY_B == cntl->transmitter)
622 || (TRANSMITTER_UNIPHY_D == cntl->transmitter)
623 || (TRANSMITTER_UNIPHY_F == cntl->transmitter))
624 /* Bit2: Transmitter Link selection
625 * =0 when bit0=0, single link A/C/E, when bit0=1,
626 * master link A/C/E
627 * =1 when bit0=0, single link B/D/F, when bit0=1,
628 * master link B/D/F
629 */
630 params.acConfig.ucLinkSel = 1;
631
632 if (ENGINE_ID_DIGB == cntl->engine_id)
633 /* Bit3: Transmitter data source selection
634 * =0 DIGA is data source.
635 * =1 DIGB is data source.
636 * This bit is only useful when ucAction= ATOM_ENABLE
637 */
638 params.acConfig.ucEncoderSel = 1;
639
640 /* Bit[7:6]: Transmitter selection
641 * =0 UNIPHY_ENCODER: UNIPHYA/B
642 * =1 UNIPHY1_ENCODER: UNIPHYC/D
643 * =2 UNIPHY2_ENCODER: UNIPHYE/F
644 * =3 reserved
645 */
646 params.acConfig.ucTransmitterSel =
647 (uint8_t)cmd->transmitter_bp_to_atom(cntl->transmitter);
648
649 params.ucLaneNum = (uint8_t)cntl->lanes_number;
650
651 params.acConfig.ucRefClkSource = (uint8_t)pll_id;
652
653 params.ucAction = (uint8_t)cntl->action;
654
655 if (EXEC_BIOS_CMD_TABLE(UNIPHYTransmitterControl, params))
656 result = BP_RESULT_OK;
657
658 return result;
659}
660
661static enum bp_result transmitter_control_v4(
662 struct bios_parser *bp,
663 struct bp_transmitter_control *cntl)
664{
665 enum bp_result result = BP_RESULT_FAILURE;
666 DIG_TRANSMITTER_CONTROL_PARAMETERS_V4 params;
667 uint32_t ref_clk_src_id;
668 enum connector_id conn_id =
669 dal_graphics_object_id_get_connector_id(cntl->connector_obj_id);
670 const struct command_table_helper *cmd = bp->cmd_helper;
671
672 memset(&params, 0, sizeof(params));
673
674 switch (cntl->transmitter) {
675 case TRANSMITTER_UNIPHY_A:
676 case TRANSMITTER_UNIPHY_B:
677 case TRANSMITTER_UNIPHY_C:
678 case TRANSMITTER_UNIPHY_D:
679 case TRANSMITTER_UNIPHY_E:
680 case TRANSMITTER_UNIPHY_F:
681 case TRANSMITTER_TRAVIS_LCD:
682 break;
683 default:
684 return BP_RESULT_BADINPUT;
685 }
686
687 if (!cmd->clock_source_id_to_ref_clk_src(cntl->pll_id, &ref_clk_src_id))
688 return BP_RESULT_BADINPUT;
689
690 switch (cntl->action) {
691 case TRANSMITTER_CONTROL_INIT:
692 {
693 if ((CONNECTOR_ID_DUAL_LINK_DVII == conn_id) ||
694 (CONNECTOR_ID_DUAL_LINK_DVID == conn_id))
695 /* on INIT this bit should be set according to the
696 * phisycal connector
697 * Bit0: dual link connector flag
698 * =0 connector is single link connector
699 * =1 connector is dual link connector
700 */
701 params.acConfig.fDualLinkConnector = 1;
702
703 /* connector object id */
704 params.usInitInfo =
705 cpu_to_le16((uint8_t)(cntl->connector_obj_id.id));
706 }
707 break;
708 case TRANSMITTER_CONTROL_SET_VOLTAGE_AND_PREEMPASIS:
709 /* votage swing and pre-emphsis */
710 params.asMode.ucLaneSel = (uint8_t)(cntl->lane_select);
711 params.asMode.ucLaneSet = (uint8_t)(cntl->lane_settings);
712 break;
713 default:
714 if ((CONNECTOR_ID_DUAL_LINK_DVII == conn_id) ||
715 (CONNECTOR_ID_DUAL_LINK_DVID == conn_id))
716 /* on ENABLE/DISABLE this bit should be set according to
717 * actual timing (number of lanes)
718 * Bit0: dual link connector flag
719 * =0 connector is single link connector
720 * =1 connector is dual link connector
721 */
722 params.acConfig.fDualLinkConnector = 1;
723
724 /* if dual-link */
725 if (LANE_COUNT_FOUR < cntl->lanes_number)
726 /* link rate, half for dual link
727 * We need to convert from KHz units into 20KHz units
728 */
729 params.usPixelClock =
730 cpu_to_le16((uint16_t)(cntl->pixel_clock / 20));
731 else {
732 /* link rate, half for dual link
733 * We need to convert from KHz units into 10KHz units
734 */
735 params.usPixelClock =
736 cpu_to_le16((uint16_t)(cntl->pixel_clock / 10));
737 }
738 break;
739 }
740
741 /* 00 - coherent mode
742 * 01 - incoherent mode
743 */
744
745 params.acConfig.fCoherentMode = cntl->coherent;
746
747 if ((TRANSMITTER_UNIPHY_B == cntl->transmitter)
748 || (TRANSMITTER_UNIPHY_D == cntl->transmitter)
749 || (TRANSMITTER_UNIPHY_F == cntl->transmitter))
750 /* Bit2: Transmitter Link selection
751 * =0 when bit0=0, single link A/C/E, when bit0=1,
752 * master link A/C/E
753 * =1 when bit0=0, single link B/D/F, when bit0=1,
754 * master link B/D/F
755 */
756 params.acConfig.ucLinkSel = 1;
757
758 if (ENGINE_ID_DIGB == cntl->engine_id)
759 /* Bit3: Transmitter data source selection
760 * =0 DIGA is data source.
761 * =1 DIGB is data source.
762 * This bit is only useful when ucAction= ATOM_ENABLE
763 */
764 params.acConfig.ucEncoderSel = 1;
765
766 /* Bit[7:6]: Transmitter selection
767 * =0 UNIPHY_ENCODER: UNIPHYA/B
768 * =1 UNIPHY1_ENCODER: UNIPHYC/D
769 * =2 UNIPHY2_ENCODER: UNIPHYE/F
770 * =3 reserved
771 */
772 params.acConfig.ucTransmitterSel =
773 (uint8_t)(cmd->transmitter_bp_to_atom(cntl->transmitter));
774 params.ucLaneNum = (uint8_t)(cntl->lanes_number);
775 params.acConfig.ucRefClkSource = (uint8_t)(ref_clk_src_id);
776 params.ucAction = (uint8_t)(cntl->action);
777
778 if (EXEC_BIOS_CMD_TABLE(UNIPHYTransmitterControl, params))
779 result = BP_RESULT_OK;
780
781 return result;
782}
783
784static enum bp_result transmitter_control_v1_5(
785 struct bios_parser *bp,
786 struct bp_transmitter_control *cntl)
787{
788 enum bp_result result = BP_RESULT_FAILURE;
789 const struct command_table_helper *cmd = bp->cmd_helper;
790 DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5 params;
791
792 memset(&params, 0, sizeof(params));
793 params.ucPhyId = cmd->phy_id_to_atom(cntl->transmitter);
794 params.ucAction = (uint8_t)cntl->action;
795 params.ucLaneNum = (uint8_t)cntl->lanes_number;
796 params.ucConnObjId = (uint8_t)cntl->connector_obj_id.id;
797
798 params.ucDigMode =
799 cmd->signal_type_to_atom_dig_mode(cntl->signal);
800 params.asConfig.ucPhyClkSrcId =
801 cmd->clock_source_id_to_atom_phy_clk_src_id(cntl->pll_id);
802 /* 00 - coherent mode */
803 params.asConfig.ucCoherentMode = cntl->coherent;
804 params.asConfig.ucHPDSel =
805 cmd->hpd_sel_to_atom(cntl->hpd_sel);
806 params.ucDigEncoderSel =
807 cmd->dig_encoder_sel_to_atom(cntl->engine_id);
808 params.ucDPLaneSet = (uint8_t) cntl->lane_settings;
809 params.usSymClock = cpu_to_le16((uint16_t) (cntl->pixel_clock / 10));
810 /*
811 * In SI/TN case, caller have to set usPixelClock as following:
812 * DP mode: usPixelClock = DP_LINK_CLOCK/10
813 * (DP_LINK_CLOCK = 1.62GHz, 2.7GHz, 5.4GHz)
814 * DVI single link mode: usPixelClock = pixel clock
815 * DVI dual link mode: usPixelClock = pixel clock
816 * HDMI mode: usPixelClock = pixel clock * deep_color_ratio
817 * (=1: 8bpp, =1.25: 10bpp, =1.5:12bpp, =2: 16bpp)
818 * LVDS mode: usPixelClock = pixel clock
819 */
820
821 if (EXEC_BIOS_CMD_TABLE(UNIPHYTransmitterControl, params))
822 result = BP_RESULT_OK;
823
824 return result;
825}
826
827static enum bp_result transmitter_control_v1_6(
828 struct bios_parser *bp,
829 struct bp_transmitter_control *cntl)
830{
831 enum bp_result result = BP_RESULT_FAILURE;
832#ifdef LATEST_ATOM_BIOS_SUPPORT
833 const struct command_table_helper *cmd = bp->cmd_helper;
834 DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_6 params;
835
836 memset(&params, 0, sizeof(params));
837 params.ucPhyId = cmd->phy_id_to_atom(cntl->transmitter);
838 params.ucAction = (uint8_t)cntl->action;
839
840 if (cntl->action == TRANSMITTER_CONTROL_SET_VOLTAGE_AND_PREEMPASIS)
841 params.ucDPLaneSet = (uint8_t)cntl->lane_settings;
842 else
843 params.ucDigMode = cmd->signal_type_to_atom_dig_mode(cntl->signal);
844
845 params.ucLaneNum = (uint8_t)cntl->lanes_number;
846 params.ucHPDSel = cmd->hpd_sel_to_atom(cntl->hpd_sel);
847 params.ucDigEncoderSel = cmd->dig_encoder_sel_to_atom(cntl->engine_id);
848 params.ucConnObjId = (uint8_t)cntl->connector_obj_id.id;
849 params.ulSymClock = cntl->pixel_clock/10;
850
851 /*
852 * In SI/TN case, caller have to set usPixelClock as following:
853 * DP mode: usPixelClock = DP_LINK_CLOCK/10
854 * (DP_LINK_CLOCK = 1.62GHz, 2.7GHz, 5.4GHz)
855 * DVI single link mode: usPixelClock = pixel clock
856 * DVI dual link mode: usPixelClock = pixel clock
857 * HDMI mode: usPixelClock = pixel clock * deep_color_ratio
858 * (=1: 8bpp, =1.25: 10bpp, =1.5:12bpp, =2: 16bpp)
859 * LVDS mode: usPixelClock = pixel clock
860 */
861 switch (cntl->signal) {
862 case SIGNAL_TYPE_HDMI_TYPE_A:
863 switch (cntl->color_depth) {
864 case COLOR_DEPTH_101010:
865 params.ulSymClock =
866 cpu_to_le16((le16_to_cpu(params.ulSymClock) * 30) / 24);
867 break;
868 case COLOR_DEPTH_121212:
869 params.ulSymClock =
870 cpu_to_le16((le16_to_cpu(params.ulSymClock) * 36) / 24);
871 break;
872 case COLOR_DEPTH_161616:
873 params.ulSymClock =
874 cpu_to_le16((le16_to_cpu(params.ulSymClock) * 48) / 24);
875 break;
876 default:
877 break;
878 }
879 break;
880 default:
881 break;
882 }
883
884 if (EXEC_BIOS_CMD_TABLE(UNIPHYTransmitterControl, params))
885 result = BP_RESULT_OK;
886#endif
887 return result;
888}
889
890/*******************************************************************************
891 ********************************************************************************
892 **
893 ** SET PIXEL CLOCK
894 **
895 ********************************************************************************
896 *******************************************************************************/
897
898static enum bp_result set_pixel_clock_v3(
899 struct bios_parser *bp,
900 struct bp_pixel_clock_parameters *bp_params);
901static enum bp_result set_pixel_clock_v5(
902 struct bios_parser *bp,
903 struct bp_pixel_clock_parameters *bp_params);
904static enum bp_result set_pixel_clock_v6(
905 struct bios_parser *bp,
906 struct bp_pixel_clock_parameters *bp_params);
907static enum bp_result set_pixel_clock_v7(
908 struct bios_parser *bp,
909 struct bp_pixel_clock_parameters *bp_params);
910
911static void init_set_pixel_clock(struct bios_parser *bp)
912{
913 switch (BIOS_CMD_TABLE_PARA_REVISION(SetPixelClock)) {
914 case 3:
915 bp->cmd_tbl.set_pixel_clock = set_pixel_clock_v3;
916 break;
917 case 5:
918 bp->cmd_tbl.set_pixel_clock = set_pixel_clock_v5;
919 break;
920 case 6:
921 bp->cmd_tbl.set_pixel_clock = set_pixel_clock_v6;
922 break;
923 case 7:
924 bp->cmd_tbl.set_pixel_clock = set_pixel_clock_v7;
925 break;
926 default:
927 bp->cmd_tbl.set_pixel_clock = NULL;
928 break;
929 }
930}
931
932static enum bp_result set_pixel_clock_v3(
933 struct bios_parser *bp,
934 struct bp_pixel_clock_parameters *bp_params)
935{
936 enum bp_result result = BP_RESULT_FAILURE;
937 PIXEL_CLOCK_PARAMETERS_V3 *params;
938 SET_PIXEL_CLOCK_PS_ALLOCATION allocation;
939
940 memset(&allocation, 0, sizeof(allocation));
941
942 if (CLOCK_SOURCE_ID_PLL1 == bp_params->pll_id)
943 allocation.sPCLKInput.ucPpll = ATOM_PPLL1;
944 else if (CLOCK_SOURCE_ID_PLL2 == bp_params->pll_id)
945 allocation.sPCLKInput.ucPpll = ATOM_PPLL2;
946 else
947 return BP_RESULT_BADINPUT;
948
949 allocation.sPCLKInput.usRefDiv =
950 cpu_to_le16((uint16_t)bp_params->reference_divider);
951 allocation.sPCLKInput.usFbDiv =
952 cpu_to_le16((uint16_t)bp_params->feedback_divider);
953 allocation.sPCLKInput.ucFracFbDiv =
954 (uint8_t)bp_params->fractional_feedback_divider;
955 allocation.sPCLKInput.ucPostDiv =
956 (uint8_t)bp_params->pixel_clock_post_divider;
957
958 /* We need to convert from KHz units into 10KHz units */
959 allocation.sPCLKInput.usPixelClock =
960 cpu_to_le16((uint16_t)(bp_params->target_pixel_clock / 10));
961
962 params = (PIXEL_CLOCK_PARAMETERS_V3 *)&allocation.sPCLKInput;
963 params->ucTransmitterId =
964 bp->cmd_helper->encoder_id_to_atom(
965 dal_graphics_object_id_get_encoder_id(
966 bp_params->encoder_object_id));
967 params->ucEncoderMode =
968 (uint8_t)(bp->cmd_helper->encoder_mode_bp_to_atom(
969 bp_params->signal_type, false));
970
971 if (bp_params->flags.FORCE_PROGRAMMING_OF_PLL)
972 params->ucMiscInfo |= PIXEL_CLOCK_MISC_FORCE_PROG_PPLL;
973
974 if (bp_params->flags.USE_E_CLOCK_AS_SOURCE_FOR_D_CLOCK)
975 params->ucMiscInfo |= PIXEL_CLOCK_MISC_USE_ENGINE_FOR_DISPCLK;
976
977 if (CONTROLLER_ID_D1 != bp_params->controller_id)
978 params->ucMiscInfo |= PIXEL_CLOCK_MISC_CRTC_SEL_CRTC2;
979
980 if (EXEC_BIOS_CMD_TABLE(SetPixelClock, allocation))
981 result = BP_RESULT_OK;
982
983 return result;
984}
985
986#ifndef SET_PIXEL_CLOCK_PS_ALLOCATION_V5
987/* video bios did not define this: */
988typedef struct _SET_PIXEL_CLOCK_PS_ALLOCATION_V5 {
989 PIXEL_CLOCK_PARAMETERS_V5 sPCLKInput;
990 /* Caller doesn't need to init this portion */
991 ENABLE_SPREAD_SPECTRUM_ON_PPLL sReserved;
992} SET_PIXEL_CLOCK_PS_ALLOCATION_V5;
993#endif
994
995#ifndef SET_PIXEL_CLOCK_PS_ALLOCATION_V6
996/* video bios did not define this: */
997typedef struct _SET_PIXEL_CLOCK_PS_ALLOCATION_V6 {
998 PIXEL_CLOCK_PARAMETERS_V6 sPCLKInput;
999 /* Caller doesn't need to init this portion */
1000 ENABLE_SPREAD_SPECTRUM_ON_PPLL sReserved;
1001} SET_PIXEL_CLOCK_PS_ALLOCATION_V6;
1002#endif
1003
1004static enum bp_result set_pixel_clock_v5(
1005 struct bios_parser *bp,
1006 struct bp_pixel_clock_parameters *bp_params)
1007{
1008 enum bp_result result = BP_RESULT_FAILURE;
1009 SET_PIXEL_CLOCK_PS_ALLOCATION_V5 clk;
1010 uint8_t controller_id;
1011 uint32_t pll_id;
1012
1013 memset(&clk, 0, sizeof(clk));
1014
1015 if (bp->cmd_helper->clock_source_id_to_atom(bp_params->pll_id, &pll_id)
1016 && bp->cmd_helper->controller_id_to_atom(
1017 bp_params->controller_id, &controller_id)) {
1018 clk.sPCLKInput.ucCRTC = controller_id;
1019 clk.sPCLKInput.ucPpll = (uint8_t)pll_id;
1020 clk.sPCLKInput.ucRefDiv =
1021 (uint8_t)(bp_params->reference_divider);
1022 clk.sPCLKInput.usFbDiv =
1023 cpu_to_le16((uint16_t)(bp_params->feedback_divider));
1024 clk.sPCLKInput.ulFbDivDecFrac =
1025 cpu_to_le32(bp_params->fractional_feedback_divider);
1026 clk.sPCLKInput.ucPostDiv =
1027 (uint8_t)(bp_params->pixel_clock_post_divider);
1028 clk.sPCLKInput.ucTransmitterID =
1029 bp->cmd_helper->encoder_id_to_atom(
1030 dal_graphics_object_id_get_encoder_id(
1031 bp_params->encoder_object_id));
1032 clk.sPCLKInput.ucEncoderMode =
1033 (uint8_t)bp->cmd_helper->encoder_mode_bp_to_atom(
1034 bp_params->signal_type, false);
1035
1036 /* We need to convert from KHz units into 10KHz units */
1037 clk.sPCLKInput.usPixelClock =
1038 cpu_to_le16((uint16_t)(bp_params->target_pixel_clock / 10));
1039
1040 if (bp_params->flags.FORCE_PROGRAMMING_OF_PLL)
1041 clk.sPCLKInput.ucMiscInfo |=
1042 PIXEL_CLOCK_MISC_FORCE_PROG_PPLL;
1043
1044 if (bp_params->flags.SET_EXTERNAL_REF_DIV_SRC)
1045 clk.sPCLKInput.ucMiscInfo |=
1046 PIXEL_CLOCK_MISC_REF_DIV_SRC;
1047
1048 /* clkV5.ucMiscInfo bit[3:2]= HDMI panel bit depth: =0: 24bpp
1049 * =1:30bpp, =2:32bpp
1050 * driver choose program it itself, i.e. here we program it
1051 * to 888 by default.
1052 */
1053
1054 if (EXEC_BIOS_CMD_TABLE(SetPixelClock, clk))
1055 result = BP_RESULT_OK;
1056 }
1057
1058 return result;
1059}
1060
1061static enum bp_result set_pixel_clock_v6(
1062 struct bios_parser *bp,
1063 struct bp_pixel_clock_parameters *bp_params)
1064{
1065 enum bp_result result = BP_RESULT_FAILURE;
1066 SET_PIXEL_CLOCK_PS_ALLOCATION_V6 clk;
1067 uint8_t controller_id;
1068 uint32_t pll_id;
1069
1070 memset(&clk, 0, sizeof(clk));
1071
1072 if (bp->cmd_helper->clock_source_id_to_atom(bp_params->pll_id, &pll_id)
1073 && bp->cmd_helper->controller_id_to_atom(
1074 bp_params->controller_id, &controller_id)) {
1075 /* Note: VBIOS still wants to use ucCRTC name which is now
1076 * 1 byte in ULONG
1077 *typedef struct _CRTC_PIXEL_CLOCK_FREQ
1078 *{
1079 * target the pixel clock to drive the CRTC timing.
1080 * ULONG ulPixelClock:24;
1081 * 0 means disable PPLL/DCPLL. Expanded to 24 bits comparing to
1082 * previous version.
1083 * ATOM_CRTC1~6, indicate the CRTC controller to
1084 * ULONG ucCRTC:8;
1085 * drive the pixel clock. not used for DCPLL case.
1086 *}CRTC_PIXEL_CLOCK_FREQ;
1087 *union
1088 *{
1089 * pixel clock and CRTC id frequency
1090 * CRTC_PIXEL_CLOCK_FREQ ulCrtcPclkFreq;
1091 * ULONG ulDispEngClkFreq; dispclk frequency
1092 *};
1093 */
1094 clk.sPCLKInput.ulCrtcPclkFreq.ucCRTC = controller_id;
1095 clk.sPCLKInput.ucPpll = (uint8_t) pll_id;
1096 clk.sPCLKInput.ucRefDiv =
1097 (uint8_t) bp_params->reference_divider;
1098 clk.sPCLKInput.usFbDiv =
1099 cpu_to_le16((uint16_t) bp_params->feedback_divider);
1100 clk.sPCLKInput.ulFbDivDecFrac =
1101 cpu_to_le32(bp_params->fractional_feedback_divider);
1102 clk.sPCLKInput.ucPostDiv =
1103 (uint8_t) bp_params->pixel_clock_post_divider;
1104 clk.sPCLKInput.ucTransmitterID =
1105 bp->cmd_helper->encoder_id_to_atom(
1106 dal_graphics_object_id_get_encoder_id(
1107 bp_params->encoder_object_id));
1108 clk.sPCLKInput.ucEncoderMode =
1109 (uint8_t) bp->cmd_helper->encoder_mode_bp_to_atom(
1110 bp_params->signal_type, false);
1111
1112 /* We need to convert from KHz units into 10KHz units */
1113 clk.sPCLKInput.ulCrtcPclkFreq.ulPixelClock =
1114 cpu_to_le32(bp_params->target_pixel_clock / 10);
1115
1116 if (bp_params->flags.FORCE_PROGRAMMING_OF_PLL) {
1117 clk.sPCLKInput.ucMiscInfo |=
1118 PIXEL_CLOCK_V6_MISC_FORCE_PROG_PPLL;
1119 }
1120
1121 if (bp_params->flags.SET_EXTERNAL_REF_DIV_SRC) {
1122 clk.sPCLKInput.ucMiscInfo |=
1123 PIXEL_CLOCK_V6_MISC_REF_DIV_SRC;
1124 }
1125
1126 /* clkV6.ucMiscInfo bit[3:2]= HDMI panel bit depth: =0:
1127 * 24bpp =1:30bpp, =2:32bpp
1128 * driver choose program it itself, i.e. here we pass required
1129 * target rate that includes deep color.
1130 */
1131
1132 if (EXEC_BIOS_CMD_TABLE(SetPixelClock, clk))
1133 result = BP_RESULT_OK;
1134 }
1135
1136 return result;
1137}
1138
1139static enum bp_result set_pixel_clock_v7(
1140 struct bios_parser *bp,
1141 struct bp_pixel_clock_parameters *bp_params)
1142{
1143 enum bp_result result = BP_RESULT_FAILURE;
1144#ifdef LATEST_ATOM_BIOS_SUPPORT
1145 PIXEL_CLOCK_PARAMETERS_V7 clk;
1146 uint8_t controller_id;
1147 uint32_t pll_id;
1148
1149 memset(&clk, 0, sizeof(clk));
1150
1151 if (bp->cmd_helper->clock_source_id_to_atom(bp_params->pll_id, &pll_id)
1152 && bp->cmd_helper->controller_id_to_atom(bp_params->controller_id, &controller_id)) {
1153 /* Note: VBIOS still wants to use ucCRTC name which is now
1154 * 1 byte in ULONG
1155 *typedef struct _CRTC_PIXEL_CLOCK_FREQ
1156 *{
1157 * target the pixel clock to drive the CRTC timing.
1158 * ULONG ulPixelClock:24;
1159 * 0 means disable PPLL/DCPLL. Expanded to 24 bits comparing to
1160 * previous version.
1161 * ATOM_CRTC1~6, indicate the CRTC controller to
1162 * ULONG ucCRTC:8;
1163 * drive the pixel clock. not used for DCPLL case.
1164 *}CRTC_PIXEL_CLOCK_FREQ;
1165 *union
1166 *{
1167 * pixel clock and CRTC id frequency
1168 * CRTC_PIXEL_CLOCK_FREQ ulCrtcPclkFreq;
1169 * ULONG ulDispEngClkFreq; dispclk frequency
1170 *};
1171 */
1172 clk.ucCRTC = controller_id;
1173 clk.ucPpll = (uint8_t) pll_id;
1174 clk.ucTransmitterID = bp->cmd_helper->encoder_id_to_atom(dal_graphics_object_id_get_encoder_id(bp_params->encoder_object_id));
1175 clk.ucEncoderMode = (uint8_t) bp->cmd_helper->encoder_mode_bp_to_atom(bp_params->signal_type, false);
1176
1177 /* We need to convert from KHz units into 10KHz units */
1178 clk.ulPixelClock = cpu_to_le32(bp_params->target_pixel_clock * 10);
1179
1180 clk.ucDeepColorRatio = (uint8_t) bp->cmd_helper->transmitter_color_depth_to_atom(bp_params->color_depth);
1181
1182 if (bp_params->flags.FORCE_PROGRAMMING_OF_PLL)
1183 clk.ucMiscInfo |= PIXEL_CLOCK_V7_MISC_FORCE_PROG_PPLL;
1184
1185 if (bp_params->flags.SET_EXTERNAL_REF_DIV_SRC)
1186 clk.ucMiscInfo |= PIXEL_CLOCK_V7_MISC_REF_DIV_SRC;
1187
1188 if (bp_params->flags.PROGRAM_PHY_PLL_ONLY)
1189 clk.ucMiscInfo |= PIXEL_CLOCK_V7_MISC_PROG_PHYPLL;
1190
1191 if (bp_params->flags.SUPPORT_YUV_420)
1192 clk.ucMiscInfo |= PIXEL_CLOCK_V7_MISC_YUV420_MODE;
1193
1194 if (bp_params->flags.SET_XTALIN_REF_SRC)
1195 clk.ucMiscInfo |= PIXEL_CLOCK_V7_MISC_REF_DIV_SRC_XTALIN;
1196
1197 if (bp_params->flags.SET_GENLOCK_REF_DIV_SRC)
1198 clk.ucMiscInfo |= PIXEL_CLOCK_V7_MISC_REF_DIV_SRC_GENLK;
1199
1200 if (bp_params->signal_type == SIGNAL_TYPE_DVI_DUAL_LINK)
1201 clk.ucMiscInfo |= PIXEL_CLOCK_V7_MISC_DVI_DUALLINK_EN;
1202
1203 if (EXEC_BIOS_CMD_TABLE(SetPixelClock, clk))
1204 result = BP_RESULT_OK;
1205 }
1206#endif
1207 return result;
1208}
1209
1210/*******************************************************************************
1211 ********************************************************************************
1212 **
1213 ** ENABLE PIXEL CLOCK SS
1214 **
1215 ********************************************************************************
1216 *******************************************************************************/
1217static enum bp_result enable_spread_spectrum_on_ppll_v1(
1218 struct bios_parser *bp,
1219 struct bp_spread_spectrum_parameters *bp_params,
1220 bool enable);
1221static enum bp_result enable_spread_spectrum_on_ppll_v2(
1222 struct bios_parser *bp,
1223 struct bp_spread_spectrum_parameters *bp_params,
1224 bool enable);
1225static enum bp_result enable_spread_spectrum_on_ppll_v3(
1226 struct bios_parser *bp,
1227 struct bp_spread_spectrum_parameters *bp_params,
1228 bool enable);
1229
1230static void init_enable_spread_spectrum_on_ppll(struct bios_parser *bp)
1231{
1232 switch (BIOS_CMD_TABLE_PARA_REVISION(EnableSpreadSpectrumOnPPLL)) {
1233 case 1:
1234 bp->cmd_tbl.enable_spread_spectrum_on_ppll =
1235 enable_spread_spectrum_on_ppll_v1;
1236 break;
1237 case 2:
1238 bp->cmd_tbl.enable_spread_spectrum_on_ppll =
1239 enable_spread_spectrum_on_ppll_v2;
1240 break;
1241 case 3:
1242 bp->cmd_tbl.enable_spread_spectrum_on_ppll =
1243 enable_spread_spectrum_on_ppll_v3;
1244 break;
1245 default:
1246 bp->cmd_tbl.enable_spread_spectrum_on_ppll = NULL;
1247 break;
1248 }
1249}
1250
1251static enum bp_result enable_spread_spectrum_on_ppll_v1(
1252 struct bios_parser *bp,
1253 struct bp_spread_spectrum_parameters *bp_params,
1254 bool enable)
1255{
1256 enum bp_result result = BP_RESULT_FAILURE;
1257 ENABLE_SPREAD_SPECTRUM_ON_PPLL params;
1258
1259 memset(&params, 0, sizeof(params));
1260
1261 if ((enable == true) && (bp_params->percentage > 0))
1262 params.ucEnable = ATOM_ENABLE;
1263 else
1264 params.ucEnable = ATOM_DISABLE;
1265
1266 params.usSpreadSpectrumPercentage =
1267 cpu_to_le16((uint16_t)bp_params->percentage);
1268 params.ucSpreadSpectrumStep =
1269 (uint8_t)bp_params->ver1.step;
1270 params.ucSpreadSpectrumDelay =
1271 (uint8_t)bp_params->ver1.delay;
1272 /* convert back to unit of 10KHz */
1273 params.ucSpreadSpectrumRange =
1274 (uint8_t)(bp_params->ver1.range / 10000);
1275
1276 if (bp_params->flags.EXTERNAL_SS)
1277 params.ucSpreadSpectrumType |= ATOM_EXTERNAL_SS_MASK;
1278
1279 if (bp_params->flags.CENTER_SPREAD)
1280 params.ucSpreadSpectrumType |= ATOM_SS_CENTRE_SPREAD_MODE;
1281
1282 if (bp_params->pll_id == CLOCK_SOURCE_ID_PLL1)
1283 params.ucPpll = ATOM_PPLL1;
1284 else if (bp_params->pll_id == CLOCK_SOURCE_ID_PLL2)
1285 params.ucPpll = ATOM_PPLL2;
1286 else
1287 BREAK_TO_DEBUGGER(); /* Unexpected PLL value!! */
1288
1289 if (EXEC_BIOS_CMD_TABLE(EnableSpreadSpectrumOnPPLL, params))
1290 result = BP_RESULT_OK;
1291
1292 return result;
1293}
1294
1295static enum bp_result enable_spread_spectrum_on_ppll_v2(
1296 struct bios_parser *bp,
1297 struct bp_spread_spectrum_parameters *bp_params,
1298 bool enable)
1299{
1300 enum bp_result result = BP_RESULT_FAILURE;
1301 ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2 params;
1302
1303 memset(&params, 0, sizeof(params));
1304
1305 if (bp_params->pll_id == CLOCK_SOURCE_ID_PLL1)
1306 params.ucSpreadSpectrumType = ATOM_PPLL_SS_TYPE_V2_P1PLL;
1307 else if (bp_params->pll_id == CLOCK_SOURCE_ID_PLL2)
1308 params.ucSpreadSpectrumType = ATOM_PPLL_SS_TYPE_V2_P2PLL;
1309 else
1310 BREAK_TO_DEBUGGER(); /* Unexpected PLL value!! */
1311
1312 if ((enable == true) && (bp_params->percentage > 0)) {
1313 params.ucEnable = ATOM_ENABLE;
1314
1315 params.usSpreadSpectrumPercentage =
1316 cpu_to_le16((uint16_t)(bp_params->percentage));
1317 params.usSpreadSpectrumStep =
1318 cpu_to_le16((uint16_t)(bp_params->ds.ds_frac_size));
1319
1320 if (bp_params->flags.EXTERNAL_SS)
1321 params.ucSpreadSpectrumType |=
1322 ATOM_PPLL_SS_TYPE_V2_EXT_SPREAD;
1323
1324 if (bp_params->flags.CENTER_SPREAD)
1325 params.ucSpreadSpectrumType |=
1326 ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD;
1327
1328 /* Both amounts need to be left shifted first before bit
1329 * comparison. Otherwise, the result will always be zero here
1330 */
1331 params.usSpreadSpectrumAmount = cpu_to_le16((uint16_t)(
1332 ((bp_params->ds.feedback_amount <<
1333 ATOM_PPLL_SS_AMOUNT_V2_FBDIV_SHIFT) &
1334 ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK) |
1335 ((bp_params->ds.nfrac_amount <<
1336 ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT) &
1337 ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK)));
1338 } else
1339 params.ucEnable = ATOM_DISABLE;
1340
1341 if (EXEC_BIOS_CMD_TABLE(EnableSpreadSpectrumOnPPLL, params))
1342 result = BP_RESULT_OK;
1343
1344 return result;
1345}
1346
1347static enum bp_result enable_spread_spectrum_on_ppll_v3(
1348 struct bios_parser *bp,
1349 struct bp_spread_spectrum_parameters *bp_params,
1350 bool enable)
1351{
1352 enum bp_result result = BP_RESULT_FAILURE;
1353 ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3 params;
1354
1355 memset(&params, 0, sizeof(params));
1356
1357 switch (bp_params->pll_id) {
1358 case CLOCK_SOURCE_ID_PLL0:
1359 /* ATOM_PPLL_SS_TYPE_V3_P0PLL; this is pixel clock only,
1360 * not for SI display clock.
1361 */
1362 params.ucSpreadSpectrumType = ATOM_PPLL_SS_TYPE_V3_DCPLL;
1363 break;
1364 case CLOCK_SOURCE_ID_PLL1:
1365 params.ucSpreadSpectrumType = ATOM_PPLL_SS_TYPE_V3_P1PLL;
1366 break;
1367
1368 case CLOCK_SOURCE_ID_PLL2:
1369 params.ucSpreadSpectrumType = ATOM_PPLL_SS_TYPE_V3_P2PLL;
1370 break;
1371
1372 case CLOCK_SOURCE_ID_DCPLL:
1373 params.ucSpreadSpectrumType = ATOM_PPLL_SS_TYPE_V3_DCPLL;
1374 break;
1375
1376 default:
1377 BREAK_TO_DEBUGGER();
1378 /* Unexpected PLL value!! */
1379 return result;
1380 }
1381
1382 if (enable == true) {
1383 params.ucEnable = ATOM_ENABLE;
1384
1385 params.usSpreadSpectrumAmountFrac =
1386 cpu_to_le16((uint16_t)(bp_params->ds_frac_amount));
1387 params.usSpreadSpectrumStep =
1388 cpu_to_le16((uint16_t)(bp_params->ds.ds_frac_size));
1389
1390 if (bp_params->flags.EXTERNAL_SS)
1391 params.ucSpreadSpectrumType |=
1392 ATOM_PPLL_SS_TYPE_V3_EXT_SPREAD;
1393 if (bp_params->flags.CENTER_SPREAD)
1394 params.ucSpreadSpectrumType |=
1395 ATOM_PPLL_SS_TYPE_V3_CENTRE_SPREAD;
1396
1397 /* Both amounts need to be left shifted first before bit
1398 * comparison. Otherwise, the result will always be zero here
1399 */
1400 params.usSpreadSpectrumAmount = cpu_to_le16((uint16_t)(
1401 ((bp_params->ds.feedback_amount <<
1402 ATOM_PPLL_SS_AMOUNT_V3_FBDIV_SHIFT) &
1403 ATOM_PPLL_SS_AMOUNT_V3_FBDIV_MASK) |
1404 ((bp_params->ds.nfrac_amount <<
1405 ATOM_PPLL_SS_AMOUNT_V3_NFRAC_SHIFT) &
1406 ATOM_PPLL_SS_AMOUNT_V3_NFRAC_MASK)));
1407 } else
1408 params.ucEnable = ATOM_DISABLE;
1409
1410 if (EXEC_BIOS_CMD_TABLE(EnableSpreadSpectrumOnPPLL, params))
1411 result = BP_RESULT_OK;
1412
1413 return result;
1414}
1415
1416/*******************************************************************************
1417 ********************************************************************************
1418 **
1419 ** ADJUST DISPLAY PLL
1420 **
1421 ********************************************************************************
1422 *******************************************************************************/
1423
1424static enum bp_result adjust_display_pll_v2(
1425 struct bios_parser *bp,
1426 struct bp_adjust_pixel_clock_parameters *bp_params);
1427static enum bp_result adjust_display_pll_v3(
1428 struct bios_parser *bp,
1429 struct bp_adjust_pixel_clock_parameters *bp_params);
1430
1431static void init_adjust_display_pll(struct bios_parser *bp)
1432{
1433 switch (BIOS_CMD_TABLE_PARA_REVISION(AdjustDisplayPll)) {
1434 case 2:
1435 bp->cmd_tbl.adjust_display_pll = adjust_display_pll_v2;
1436 break;
1437 case 3:
1438 bp->cmd_tbl.adjust_display_pll = adjust_display_pll_v3;
1439 break;
1440 default:
1441 bp->cmd_tbl.adjust_display_pll = NULL;
1442 break;
1443 }
1444}
1445
1446static enum bp_result adjust_display_pll_v2(
1447 struct bios_parser *bp,
1448 struct bp_adjust_pixel_clock_parameters *bp_params)
1449{
1450 enum bp_result result = BP_RESULT_FAILURE;
1451 ADJUST_DISPLAY_PLL_PS_ALLOCATION params = { 0 };
1452
1453 /* We need to convert from KHz units into 10KHz units and then convert
1454 * output pixel clock back 10KHz-->KHz */
1455 uint32_t pixel_clock_10KHz_in = bp_params->pixel_clock / 10;
1456
1457 params.usPixelClock = cpu_to_le16((uint16_t)(pixel_clock_10KHz_in));
1458 params.ucTransmitterID =
1459 bp->cmd_helper->encoder_id_to_atom(
1460 dal_graphics_object_id_get_encoder_id(
1461 bp_params->encoder_object_id));
1462 params.ucEncodeMode =
1463 (uint8_t)bp->cmd_helper->encoder_mode_bp_to_atom(
1464 bp_params->signal_type, false);
1465 return result;
1466}
1467
1468static enum bp_result adjust_display_pll_v3(
1469 struct bios_parser *bp,
1470 struct bp_adjust_pixel_clock_parameters *bp_params)
1471{
1472 enum bp_result result = BP_RESULT_FAILURE;
1473 ADJUST_DISPLAY_PLL_PS_ALLOCATION_V3 params;
1474 uint32_t pixel_clk_10_kHz_in = bp_params->pixel_clock / 10;
1475
1476 memset(&params, 0, sizeof(params));
1477
1478 /* We need to convert from KHz units into 10KHz units and then convert
1479 * output pixel clock back 10KHz-->KHz */
1480 params.sInput.usPixelClock = cpu_to_le16((uint16_t)pixel_clk_10_kHz_in);
1481 params.sInput.ucTransmitterID =
1482 bp->cmd_helper->encoder_id_to_atom(
1483 dal_graphics_object_id_get_encoder_id(
1484 bp_params->encoder_object_id));
1485 params.sInput.ucEncodeMode =
1486 (uint8_t)bp->cmd_helper->encoder_mode_bp_to_atom(
1487 bp_params->signal_type, false);
1488
1489 if (bp_params->ss_enable == true)
1490 params.sInput.ucDispPllConfig |= DISPPLL_CONFIG_SS_ENABLE;
1491
1492 if (bp_params->signal_type == SIGNAL_TYPE_DVI_DUAL_LINK)
1493 params.sInput.ucDispPllConfig |= DISPPLL_CONFIG_DUAL_LINK;
1494
1495 if (EXEC_BIOS_CMD_TABLE(AdjustDisplayPll, params)) {
1496 /* Convert output pixel clock back 10KHz-->KHz: multiply
1497 * original pixel clock in KHz by ratio
1498 * [output pxlClk/input pxlClk] */
1499 uint64_t pixel_clk_10_khz_out =
1500 (uint64_t)le32_to_cpu(params.sOutput.ulDispPllFreq);
1501 uint64_t pixel_clk = (uint64_t)bp_params->pixel_clock;
1502
1503 if (pixel_clk_10_kHz_in != 0) {
1504 bp_params->adjusted_pixel_clock =
1505 div_u64(pixel_clk * pixel_clk_10_khz_out,
1506 pixel_clk_10_kHz_in);
1507 } else {
1508 bp_params->adjusted_pixel_clock = 0;
1509 BREAK_TO_DEBUGGER();
1510 }
1511
1512 bp_params->reference_divider = params.sOutput.ucRefDiv;
1513 bp_params->pixel_clock_post_divider = params.sOutput.ucPostDiv;
1514
1515 result = BP_RESULT_OK;
1516 }
1517
1518 return result;
1519}
1520
1521/*******************************************************************************
1522 ********************************************************************************
1523 **
1524 ** DAC ENCODER CONTROL
1525 **
1526 ********************************************************************************
1527 *******************************************************************************/
1528
1529static enum bp_result dac1_encoder_control_v1(
1530 struct bios_parser *bp,
1531 bool enable,
1532 uint32_t pixel_clock,
1533 uint8_t dac_standard);
1534static enum bp_result dac2_encoder_control_v1(
1535 struct bios_parser *bp,
1536 bool enable,
1537 uint32_t pixel_clock,
1538 uint8_t dac_standard);
1539
1540static void init_dac_encoder_control(struct bios_parser *bp)
1541{
1542 switch (BIOS_CMD_TABLE_PARA_REVISION(DAC1EncoderControl)) {
1543 case 1:
1544 bp->cmd_tbl.dac1_encoder_control = dac1_encoder_control_v1;
1545 break;
1546 default:
1547 bp->cmd_tbl.dac1_encoder_control = NULL;
1548 break;
1549 }
1550 switch (BIOS_CMD_TABLE_PARA_REVISION(DAC2EncoderControl)) {
1551 case 1:
1552 bp->cmd_tbl.dac2_encoder_control = dac2_encoder_control_v1;
1553 break;
1554 default:
1555 bp->cmd_tbl.dac2_encoder_control = NULL;
1556 break;
1557 }
1558}
1559
1560static void dac_encoder_control_prepare_params(
1561 DAC_ENCODER_CONTROL_PS_ALLOCATION *params,
1562 bool enable,
1563 uint32_t pixel_clock,
1564 uint8_t dac_standard)
1565{
1566 params->ucDacStandard = dac_standard;
1567 if (enable)
1568 params->ucAction = ATOM_ENABLE;
1569 else
1570 params->ucAction = ATOM_DISABLE;
1571
1572 /* We need to convert from KHz units into 10KHz units
1573 * it looks as if the TvControl do not care about pixel clock
1574 */
1575 params->usPixelClock = cpu_to_le16((uint16_t)(pixel_clock / 10));
1576}
1577
1578static enum bp_result dac1_encoder_control_v1(
1579 struct bios_parser *bp,
1580 bool enable,
1581 uint32_t pixel_clock,
1582 uint8_t dac_standard)
1583{
1584 enum bp_result result = BP_RESULT_FAILURE;
1585 DAC_ENCODER_CONTROL_PS_ALLOCATION params;
1586
1587 dac_encoder_control_prepare_params(
1588 &params,
1589 enable,
1590 pixel_clock,
1591 dac_standard);
1592
1593 if (EXEC_BIOS_CMD_TABLE(DAC1EncoderControl, params))
1594 result = BP_RESULT_OK;
1595
1596 return result;
1597}
1598
1599static enum bp_result dac2_encoder_control_v1(
1600 struct bios_parser *bp,
1601 bool enable,
1602 uint32_t pixel_clock,
1603 uint8_t dac_standard)
1604{
1605 enum bp_result result = BP_RESULT_FAILURE;
1606 DAC_ENCODER_CONTROL_PS_ALLOCATION params;
1607
1608 dac_encoder_control_prepare_params(
1609 &params,
1610 enable,
1611 pixel_clock,
1612 dac_standard);
1613
1614 if (EXEC_BIOS_CMD_TABLE(DAC2EncoderControl, params))
1615 result = BP_RESULT_OK;
1616
1617 return result;
1618}
1619
1620/*******************************************************************************
1621 ********************************************************************************
1622 **
1623 ** DAC OUTPUT CONTROL
1624 **
1625 ********************************************************************************
1626 *******************************************************************************/
1627static enum bp_result dac1_output_control_v1(
1628 struct bios_parser *bp,
1629 bool enable);
1630static enum bp_result dac2_output_control_v1(
1631 struct bios_parser *bp,
1632 bool enable);
1633
1634static void init_dac_output_control(struct bios_parser *bp)
1635{
1636 switch (BIOS_CMD_TABLE_PARA_REVISION(DAC1OutputControl)) {
1637 case 1:
1638 bp->cmd_tbl.dac1_output_control = dac1_output_control_v1;
1639 break;
1640 default:
1641 bp->cmd_tbl.dac1_output_control = NULL;
1642 break;
1643 }
1644 switch (BIOS_CMD_TABLE_PARA_REVISION(DAC2OutputControl)) {
1645 case 1:
1646 bp->cmd_tbl.dac2_output_control = dac2_output_control_v1;
1647 break;
1648 default:
1649 bp->cmd_tbl.dac2_output_control = NULL;
1650 break;
1651 }
1652}
1653
1654static enum bp_result dac1_output_control_v1(
1655 struct bios_parser *bp, bool enable)
1656{
1657 enum bp_result result = BP_RESULT_FAILURE;
1658 DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION params;
1659
1660 if (enable)
1661 params.ucAction = ATOM_ENABLE;
1662 else
1663 params.ucAction = ATOM_DISABLE;
1664
1665 if (EXEC_BIOS_CMD_TABLE(DAC1OutputControl, params))
1666 result = BP_RESULT_OK;
1667
1668 return result;
1669}
1670
1671static enum bp_result dac2_output_control_v1(
1672 struct bios_parser *bp, bool enable)
1673{
1674 enum bp_result result = BP_RESULT_FAILURE;
1675 DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION params;
1676
1677 if (enable)
1678 params.ucAction = ATOM_ENABLE;
1679 else
1680 params.ucAction = ATOM_DISABLE;
1681
1682 if (EXEC_BIOS_CMD_TABLE(DAC2OutputControl, params))
1683 result = BP_RESULT_OK;
1684
1685 return result;
1686}
1687
1688/*******************************************************************************
1689 ********************************************************************************
1690 **
1691 ** BLANK CRTC
1692 **
1693 ********************************************************************************
1694 *******************************************************************************/
1695
1696static enum bp_result blank_crtc_v1(
1697 struct bios_parser *bp,
1698 struct bp_blank_crtc_parameters *bp_params,
1699 bool blank);
1700
1701static void init_blank_crtc(struct bios_parser *bp)
1702{
1703 switch (BIOS_CMD_TABLE_PARA_REVISION(BlankCRTC)) {
1704 case 1:
1705 bp->cmd_tbl.blank_crtc = blank_crtc_v1;
1706 break;
1707 default:
1708 bp->cmd_tbl.blank_crtc = NULL;
1709 break;
1710 }
1711}
1712
1713static enum bp_result blank_crtc_v1(
1714 struct bios_parser *bp,
1715 struct bp_blank_crtc_parameters *bp_params,
1716 bool blank)
1717{
1718 enum bp_result result = BP_RESULT_FAILURE;
1719 BLANK_CRTC_PARAMETERS params = {0};
1720 uint8_t atom_controller_id;
1721
1722 if (bp->cmd_helper->controller_id_to_atom(bp_params->controller_id,
1723 &atom_controller_id)) {
1724 params.ucCRTC = (uint8_t)atom_controller_id;
1725
1726 if (blank)
1727 params.ucBlanking = ATOM_BLANKING;
1728 else
1729 params.ucBlanking = ATOM_BLANKING_OFF;
1730 params.usBlackColorRCr =
1731 cpu_to_le16((uint16_t)bp_params->black_color_rcr);
1732 params.usBlackColorGY =
1733 cpu_to_le16((uint16_t)bp_params->black_color_gy);
1734 params.usBlackColorBCb =
1735 cpu_to_le16((uint16_t)bp_params->black_color_bcb);
1736
1737 if (EXEC_BIOS_CMD_TABLE(BlankCRTC, params))
1738 result = BP_RESULT_OK;
1739 } else
1740 /* Not support more than two CRTC as current ASIC, update this
1741 * if needed.
1742 */
1743 result = BP_RESULT_BADINPUT;
1744
1745 return result;
1746}
1747
1748/*******************************************************************************
1749 ********************************************************************************
1750 **
1751 ** SET CRTC TIMING
1752 **
1753 ********************************************************************************
1754 *******************************************************************************/
1755
1756static enum bp_result set_crtc_using_dtd_timing_v3(
1757 struct bios_parser *bp,
1758 struct bp_hw_crtc_timing_parameters *bp_params);
1759static enum bp_result set_crtc_timing_v1(
1760 struct bios_parser *bp,
1761 struct bp_hw_crtc_timing_parameters *bp_params);
1762
1763static void init_set_crtc_timing(struct bios_parser *bp)
1764{
1765 uint32_t dtd_version =
1766 BIOS_CMD_TABLE_PARA_REVISION(SetCRTC_UsingDTDTiming);
1767 if (dtd_version > 2)
1768 switch (dtd_version) {
1769 case 3:
1770 bp->cmd_tbl.set_crtc_timing =
1771 set_crtc_using_dtd_timing_v3;
1772 break;
1773 default:
1774 bp->cmd_tbl.set_crtc_timing = NULL;
1775 break;
1776 }
1777 else
1778 switch (BIOS_CMD_TABLE_PARA_REVISION(SetCRTC_Timing)) {
1779 case 1:
1780 bp->cmd_tbl.set_crtc_timing = set_crtc_timing_v1;
1781 break;
1782 default:
1783 bp->cmd_tbl.set_crtc_timing = NULL;
1784 break;
1785 }
1786}
1787
1788static enum bp_result set_crtc_timing_v1(
1789 struct bios_parser *bp,
1790 struct bp_hw_crtc_timing_parameters *bp_params)
1791{
1792 enum bp_result result = BP_RESULT_FAILURE;
1793 SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION params = {0};
1794 uint8_t atom_controller_id;
1795
1796 if (bp->cmd_helper->controller_id_to_atom(
1797 bp_params->controller_id, &atom_controller_id))
1798 params.ucCRTC = atom_controller_id;
1799
1800 params.usH_Total = cpu_to_le16((uint16_t)(bp_params->h_total));
1801 params.usH_Disp = cpu_to_le16((uint16_t)(bp_params->h_addressable));
1802 params.usH_SyncStart = cpu_to_le16((uint16_t)(bp_params->h_sync_start));
1803 params.usH_SyncWidth = cpu_to_le16((uint16_t)(bp_params->h_sync_width));
1804 params.usV_Total = cpu_to_le16((uint16_t)(bp_params->v_total));
1805 params.usV_Disp = cpu_to_le16((uint16_t)(bp_params->v_addressable));
1806 params.usV_SyncStart =
1807 cpu_to_le16((uint16_t)(bp_params->v_sync_start));
1808 params.usV_SyncWidth =
1809 cpu_to_le16((uint16_t)(bp_params->v_sync_width));
1810
1811 /* VBIOS does not expect any value except zero into this call, for
1812 * underscan use another entry ProgramOverscan call but when mode
1813 * 1776x1000 with the overscan 72x44 .e.i. 1920x1080 @30 DAL2 is ok,
1814 * but when same ,but 60 Hz there is corruption
1815 * DAL1 does not allow the mode 1776x1000@60
1816 */
1817 params.ucOverscanRight = (uint8_t)bp_params->h_overscan_right;
1818 params.ucOverscanLeft = (uint8_t)bp_params->h_overscan_left;
1819 params.ucOverscanBottom = (uint8_t)bp_params->v_overscan_bottom;
1820 params.ucOverscanTop = (uint8_t)bp_params->v_overscan_top;
1821
1822 if (0 == bp_params->flags.HSYNC_POSITIVE_POLARITY)
1823 params.susModeMiscInfo.usAccess =
1824 cpu_to_le16(le16_to_cpu(params.susModeMiscInfo.usAccess) | ATOM_HSYNC_POLARITY);
1825
1826 if (0 == bp_params->flags.VSYNC_POSITIVE_POLARITY)
1827 params.susModeMiscInfo.usAccess =
1828 cpu_to_le16(le16_to_cpu(params.susModeMiscInfo.usAccess) | ATOM_VSYNC_POLARITY);
1829
1830 if (bp_params->flags.INTERLACE) {
1831 params.susModeMiscInfo.usAccess =
1832 cpu_to_le16(le16_to_cpu(params.susModeMiscInfo.usAccess) | ATOM_INTERLACE);
1833
1834 /* original DAL code has this condition to apply tis for
1835 * non-TV/CV only due to complex MV testing for possible
1836 * impact
1837 * if (pACParameters->signal != SignalType_YPbPr &&
1838 * pACParameters->signal != SignalType_Composite &&
1839 * pACParameters->signal != SignalType_SVideo)
1840 */
1841 /* HW will deduct 0.5 line from 2nd feild.
1842 * i.e. for 1080i, it is 2 lines for 1st field, 2.5
1843 * lines for the 2nd feild. we need input as 5 instead
1844 * of 4, but it is 4 either from Edid data
1845 * (spec CEA 861) or CEA timing table.
1846 */
1847 params.usV_SyncStart =
1848 cpu_to_le16((uint16_t)(bp_params->v_sync_start + 1));
1849 }
1850
1851 if (bp_params->flags.HORZ_COUNT_BY_TWO)
1852 params.susModeMiscInfo.usAccess =
1853 cpu_to_le16(le16_to_cpu(params.susModeMiscInfo.usAccess) | ATOM_DOUBLE_CLOCK_MODE);
1854
1855 if (EXEC_BIOS_CMD_TABLE(SetCRTC_Timing, params))
1856 result = BP_RESULT_OK;
1857
1858 return result;
1859}
1860
1861static enum bp_result set_crtc_using_dtd_timing_v3(
1862 struct bios_parser *bp,
1863 struct bp_hw_crtc_timing_parameters *bp_params)
1864{
1865 enum bp_result result = BP_RESULT_FAILURE;
1866 SET_CRTC_USING_DTD_TIMING_PARAMETERS params = {0};
1867 uint8_t atom_controller_id;
1868
1869 if (bp->cmd_helper->controller_id_to_atom(
1870 bp_params->controller_id, &atom_controller_id))
1871 params.ucCRTC = atom_controller_id;
1872
1873 /* bios usH_Size wants h addressable size */
1874 params.usH_Size = cpu_to_le16((uint16_t)bp_params->h_addressable);
1875 /* bios usH_Blanking_Time wants borders included in blanking */
1876 params.usH_Blanking_Time =
1877 cpu_to_le16((uint16_t)(bp_params->h_total - bp_params->h_addressable));
1878 /* bios usV_Size wants v addressable size */
1879 params.usV_Size = cpu_to_le16((uint16_t)bp_params->v_addressable);
1880 /* bios usV_Blanking_Time wants borders included in blanking */
1881 params.usV_Blanking_Time =
1882 cpu_to_le16((uint16_t)(bp_params->v_total - bp_params->v_addressable));
1883 /* bios usHSyncOffset is the offset from the end of h addressable,
1884 * our horizontalSyncStart is the offset from the beginning
1885 * of h addressable */
1886 params.usH_SyncOffset =
1887 cpu_to_le16((uint16_t)(bp_params->h_sync_start - bp_params->h_addressable));
1888 params.usH_SyncWidth = cpu_to_le16((uint16_t)bp_params->h_sync_width);
1889 /* bios usHSyncOffset is the offset from the end of v addressable,
1890 * our verticalSyncStart is the offset from the beginning of
1891 * v addressable */
1892 params.usV_SyncOffset =
1893 cpu_to_le16((uint16_t)(bp_params->v_sync_start - bp_params->v_addressable));
1894 params.usV_SyncWidth = cpu_to_le16((uint16_t)bp_params->v_sync_width);
1895
1896 /* we assume that overscan from original timing does not get bigger
1897 * than 255
1898 * we will program all the borders in the Set CRTC Overscan call below
1899 */
1900
1901 if (0 == bp_params->flags.HSYNC_POSITIVE_POLARITY)
1902 params.susModeMiscInfo.usAccess =
1903 cpu_to_le16(le16_to_cpu(params.susModeMiscInfo.usAccess) | ATOM_HSYNC_POLARITY);
1904
1905 if (0 == bp_params->flags.VSYNC_POSITIVE_POLARITY)
1906 params.susModeMiscInfo.usAccess =
1907 cpu_to_le16(le16_to_cpu(params.susModeMiscInfo.usAccess) | ATOM_VSYNC_POLARITY);
1908
1909 if (bp_params->flags.INTERLACE) {
1910 params.susModeMiscInfo.usAccess =
1911 cpu_to_le16(le16_to_cpu(params.susModeMiscInfo.usAccess) | ATOM_INTERLACE);
1912
1913 /* original DAL code has this condition to apply this
1914 * for non-TV/CV only
1915 * due to complex MV testing for possible impact
1916 * if ( pACParameters->signal != SignalType_YPbPr &&
1917 * pACParameters->signal != SignalType_Composite &&
1918 * pACParameters->signal != SignalType_SVideo)
1919 */
1920 {
1921 /* HW will deduct 0.5 line from 2nd feild.
1922 * i.e. for 1080i, it is 2 lines for 1st field,
1923 * 2.5 lines for the 2nd feild. we need input as 5
1924 * instead of 4.
1925 * but it is 4 either from Edid data (spec CEA 861)
1926 * or CEA timing table.
1927 */
1928 params.usV_SyncOffset =
1929 cpu_to_le16(le16_to_cpu(params.usV_SyncOffset) + 1);
1930
1931 }
1932 }
1933
1934 if (bp_params->flags.HORZ_COUNT_BY_TWO)
1935 params.susModeMiscInfo.usAccess =
1936 cpu_to_le16(le16_to_cpu(params.susModeMiscInfo.usAccess) | ATOM_DOUBLE_CLOCK_MODE);
1937
1938 if (EXEC_BIOS_CMD_TABLE(SetCRTC_UsingDTDTiming, params))
1939 result = BP_RESULT_OK;
1940
1941 return result;
1942}
1943
1944/*******************************************************************************
1945 ********************************************************************************
1946 **
1947 ** SET CRTC OVERSCAN
1948 **
1949 ********************************************************************************
1950 *******************************************************************************/
1951
1952static enum bp_result set_crtc_overscan_v1(
1953 struct bios_parser *bp,
1954 struct bp_hw_crtc_overscan_parameters *bp_params);
1955
1956static void init_set_crtc_overscan(struct bios_parser *bp)
1957{
1958 switch (BIOS_CMD_TABLE_PARA_REVISION(SetCRTC_OverScan)) {
1959 case 1:
1960 bp->cmd_tbl.set_crtc_overscan = set_crtc_overscan_v1;
1961 break;
1962 default:
1963 bp->cmd_tbl.set_crtc_overscan = NULL;
1964 break;
1965 }
1966}
1967
1968static enum bp_result set_crtc_overscan_v1(
1969 struct bios_parser *bp,
1970 struct bp_hw_crtc_overscan_parameters *bp_params)
1971{
1972 enum bp_result result = BP_RESULT_FAILURE;
1973 SET_CRTC_OVERSCAN_PARAMETERS params = {0};
1974 uint8_t atom_controller_id;
1975
1976 if (bp->cmd_helper->controller_id_to_atom(
1977 bp_params->controller_id, &atom_controller_id))
1978 params.ucCRTC = atom_controller_id;
1979 else
1980 return BP_RESULT_BADINPUT;
1981
1982 params.usOverscanRight =
1983 cpu_to_le16((uint16_t)bp_params->h_overscan_right);
1984 params.usOverscanLeft =
1985 cpu_to_le16((uint16_t)bp_params->h_overscan_left);
1986 params.usOverscanBottom =
1987 cpu_to_le16((uint16_t)bp_params->v_overscan_bottom);
1988 params.usOverscanTop =
1989 cpu_to_le16((uint16_t)bp_params->v_overscan_top);
1990
1991 if (EXEC_BIOS_CMD_TABLE(SetCRTC_OverScan, params))
1992 result = BP_RESULT_OK;
1993
1994 return result;
1995}
1996
1997/*******************************************************************************
1998 ********************************************************************************
1999 **
2000 ** SELECT CRTC SOURCE
2001 **
2002 ********************************************************************************
2003 *******************************************************************************/
2004
2005static enum bp_result select_crtc_source_v2(
2006 struct bios_parser *bp,
2007 struct bp_crtc_source_select *bp_params);
2008static enum bp_result select_crtc_source_v3(
2009 struct bios_parser *bp,
2010 struct bp_crtc_source_select *bp_params);
2011
2012static void init_select_crtc_source(struct bios_parser *bp)
2013{
2014 switch (BIOS_CMD_TABLE_PARA_REVISION(SelectCRTC_Source)) {
2015 case 2:
2016 bp->cmd_tbl.select_crtc_source = select_crtc_source_v2;
2017 break;
2018 case 3:
2019 bp->cmd_tbl.select_crtc_source = select_crtc_source_v3;
2020 break;
2021 default:
2022 bp->cmd_tbl.select_crtc_source = NULL;
2023 break;
2024 }
2025}
2026
2027static enum bp_result select_crtc_source_v2(
2028 struct bios_parser *bp,
2029 struct bp_crtc_source_select *bp_params)
2030{
2031 enum bp_result result = BP_RESULT_FAILURE;
2032 SELECT_CRTC_SOURCE_PARAMETERS_V2 params;
2033 uint8_t atom_controller_id;
2034 uint32_t atom_engine_id;
2035 enum signal_type s = bp_params->signal;
2036
2037 memset(&params, 0, sizeof(params));
2038
2039 /* set controller id */
2040 if (bp->cmd_helper->controller_id_to_atom(
2041 bp_params->controller_id, &atom_controller_id))
2042 params.ucCRTC = atom_controller_id;
2043 else
2044 return BP_RESULT_FAILURE;
2045
2046 /* set encoder id */
2047 if (bp->cmd_helper->engine_bp_to_atom(
2048 bp_params->engine_id, &atom_engine_id))
2049 params.ucEncoderID = (uint8_t)atom_engine_id;
2050 else
2051 return BP_RESULT_FAILURE;
2052
2053 if (SIGNAL_TYPE_EDP == s ||
2054 (SIGNAL_TYPE_DISPLAY_PORT == s &&
2055 SIGNAL_TYPE_LVDS == bp_params->sink_signal))
2056 s = SIGNAL_TYPE_LVDS;
2057
2058 params.ucEncodeMode =
2059 (uint8_t)bp->cmd_helper->encoder_mode_bp_to_atom(
2060 s, bp_params->enable_dp_audio);
2061
2062 if (EXEC_BIOS_CMD_TABLE(SelectCRTC_Source, params))
2063 result = BP_RESULT_OK;
2064
2065 return result;
2066}
2067
2068static enum bp_result select_crtc_source_v3(
2069 struct bios_parser *bp,
2070 struct bp_crtc_source_select *bp_params)
2071{
2072 bool result = BP_RESULT_FAILURE;
2073 SELECT_CRTC_SOURCE_PARAMETERS_V3 params;
2074 uint8_t atom_controller_id;
2075 uint32_t atom_engine_id;
2076 enum signal_type s = bp_params->signal;
2077
2078 memset(&params, 0, sizeof(params));
2079
2080 if (bp->cmd_helper->controller_id_to_atom(bp_params->controller_id,
2081 &atom_controller_id))
2082 params.ucCRTC = atom_controller_id;
2083 else
2084 return result;
2085
2086 if (bp->cmd_helper->engine_bp_to_atom(bp_params->engine_id,
2087 &atom_engine_id))
2088 params.ucEncoderID = (uint8_t)atom_engine_id;
2089 else
2090 return result;
2091
2092 if (SIGNAL_TYPE_EDP == s ||
2093 (SIGNAL_TYPE_DISPLAY_PORT == s &&
2094 SIGNAL_TYPE_LVDS == bp_params->sink_signal))
2095 s = SIGNAL_TYPE_LVDS;
2096
2097 params.ucEncodeMode =
2098 bp->cmd_helper->encoder_mode_bp_to_atom(
2099 s, bp_params->enable_dp_audio);
2100 /* Needed for VBIOS Random Spatial Dithering feature */
2101 params.ucDstBpc = (uint8_t)(bp_params->display_output_bit_depth);
2102
2103 if (EXEC_BIOS_CMD_TABLE(SelectCRTC_Source, params))
2104 result = BP_RESULT_OK;
2105
2106 return result;
2107}
2108
2109/*******************************************************************************
2110 ********************************************************************************
2111 **
2112 ** ENABLE CRTC
2113 **
2114 ********************************************************************************
2115 *******************************************************************************/
2116
2117static enum bp_result enable_crtc_v1(
2118 struct bios_parser *bp,
2119 enum controller_id controller_id,
2120 bool enable);
2121
2122static void init_enable_crtc(struct bios_parser *bp)
2123{
2124 switch (BIOS_CMD_TABLE_PARA_REVISION(EnableCRTC)) {
2125 case 1:
2126 bp->cmd_tbl.enable_crtc = enable_crtc_v1;
2127 break;
2128 default:
2129 bp->cmd_tbl.enable_crtc = NULL;
2130 break;
2131 }
2132}
2133
2134static enum bp_result enable_crtc_v1(
2135 struct bios_parser *bp,
2136 enum controller_id controller_id,
2137 bool enable)
2138{
2139 bool result = BP_RESULT_FAILURE;
2140 ENABLE_CRTC_PARAMETERS params = {0};
2141 uint8_t id;
2142
2143 if (bp->cmd_helper->controller_id_to_atom(controller_id, &id))
2144 params.ucCRTC = id;
2145 else
2146 return BP_RESULT_BADINPUT;
2147
2148 if (enable)
2149 params.ucEnable = ATOM_ENABLE;
2150 else
2151 params.ucEnable = ATOM_DISABLE;
2152
2153 if (EXEC_BIOS_CMD_TABLE(EnableCRTC, params))
2154 result = BP_RESULT_OK;
2155
2156 return result;
2157}
2158
2159/*******************************************************************************
2160 ********************************************************************************
2161 **
2162 ** ENABLE CRTC MEM REQ
2163 **
2164 ********************************************************************************
2165 *******************************************************************************/
2166
2167static enum bp_result enable_crtc_mem_req_v1(
2168 struct bios_parser *bp,
2169 enum controller_id controller_id,
2170 bool enable);
2171
2172static void init_enable_crtc_mem_req(struct bios_parser *bp)
2173{
2174 switch (BIOS_CMD_TABLE_PARA_REVISION(EnableCRTCMemReq)) {
2175 case 1:
2176 bp->cmd_tbl.enable_crtc_mem_req = enable_crtc_mem_req_v1;
2177 break;
2178 default:
2179 bp->cmd_tbl.enable_crtc_mem_req = NULL;
2180 break;
2181 }
2182}
2183
2184static enum bp_result enable_crtc_mem_req_v1(
2185 struct bios_parser *bp,
2186 enum controller_id controller_id,
2187 bool enable)
2188{
2189 bool result = BP_RESULT_BADINPUT;
2190 ENABLE_CRTC_PARAMETERS params = {0};
2191 uint8_t id;
2192
2193 if (bp->cmd_helper->controller_id_to_atom(controller_id, &id)) {
2194 params.ucCRTC = id;
2195
2196 if (enable)
2197 params.ucEnable = ATOM_ENABLE;
2198 else
2199 params.ucEnable = ATOM_DISABLE;
2200
2201 if (EXEC_BIOS_CMD_TABLE(EnableCRTCMemReq, params))
2202 result = BP_RESULT_OK;
2203 else
2204 result = BP_RESULT_FAILURE;
2205 }
2206
2207 return result;
2208}
2209
2210/*******************************************************************************
2211 ********************************************************************************
2212 **
2213 ** DISPLAY PLL
2214 **
2215 ********************************************************************************
2216 *******************************************************************************/
2217
2218static enum bp_result program_clock_v5(
2219 struct bios_parser *bp,
2220 struct bp_pixel_clock_parameters *bp_params);
2221static enum bp_result program_clock_v6(
2222 struct bios_parser *bp,
2223 struct bp_pixel_clock_parameters *bp_params);
2224
2225static void init_program_clock(struct bios_parser *bp)
2226{
2227 switch (BIOS_CMD_TABLE_PARA_REVISION(SetPixelClock)) {
2228 case 5:
2229 bp->cmd_tbl.program_clock = program_clock_v5;
2230 break;
2231 case 6:
2232 bp->cmd_tbl.program_clock = program_clock_v6;
2233 break;
2234 default:
2235 bp->cmd_tbl.program_clock = NULL;
2236 break;
2237 }
2238}
2239
2240static enum bp_result program_clock_v5(
2241 struct bios_parser *bp,
2242 struct bp_pixel_clock_parameters *bp_params)
2243{
2244 enum bp_result result = BP_RESULT_FAILURE;
2245
2246 SET_PIXEL_CLOCK_PS_ALLOCATION_V5 params;
2247 uint32_t atom_pll_id;
2248
2249 memset(&params, 0, sizeof(params));
2250 if (!bp->cmd_helper->clock_source_id_to_atom(
2251 bp_params->pll_id, &atom_pll_id)) {
2252 BREAK_TO_DEBUGGER(); /* Invalid Inpute!! */
2253 return BP_RESULT_BADINPUT;
2254 }
2255
2256 /* We need to convert from KHz units into 10KHz units */
2257 params.sPCLKInput.ucPpll = (uint8_t) atom_pll_id;
2258 params.sPCLKInput.usPixelClock =
2259 cpu_to_le16((uint16_t) (bp_params->target_pixel_clock / 10));
2260 params.sPCLKInput.ucCRTC = (uint8_t) ATOM_CRTC_INVALID;
2261
2262 if (bp_params->flags.SET_EXTERNAL_REF_DIV_SRC)
2263 params.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_MISC_REF_DIV_SRC;
2264
2265 if (EXEC_BIOS_CMD_TABLE(SetPixelClock, params))
2266 result = BP_RESULT_OK;
2267
2268 return result;
2269}
2270
2271static enum bp_result program_clock_v6(
2272 struct bios_parser *bp,
2273 struct bp_pixel_clock_parameters *bp_params)
2274{
2275 enum bp_result result = BP_RESULT_FAILURE;
2276
2277 SET_PIXEL_CLOCK_PS_ALLOCATION_V6 params;
2278 uint32_t atom_pll_id;
2279
2280 memset(&params, 0, sizeof(params));
2281
2282 if (!bp->cmd_helper->clock_source_id_to_atom(
2283 bp_params->pll_id, &atom_pll_id)) {
2284 BREAK_TO_DEBUGGER(); /*Invalid Input!!*/
2285 return BP_RESULT_BADINPUT;
2286 }
2287
2288 /* We need to convert from KHz units into 10KHz units */
2289 params.sPCLKInput.ucPpll = (uint8_t)atom_pll_id;
2290 params.sPCLKInput.ulDispEngClkFreq =
2291 cpu_to_le32(bp_params->target_pixel_clock / 10);
2292
2293 if (bp_params->flags.SET_EXTERNAL_REF_DIV_SRC)
2294 params.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_MISC_REF_DIV_SRC;
2295
2296 if (EXEC_BIOS_CMD_TABLE(SetPixelClock, params)) {
2297 /* True display clock is returned by VBIOS if DFS bypass
2298 * is enabled. */
2299 bp_params->dfs_bypass_display_clock =
2300 (uint32_t)(le32_to_cpu(params.sPCLKInput.ulDispEngClkFreq) * 10);
2301 result = BP_RESULT_OK;
2302 }
2303
2304 return result;
2305}
2306
2307/*******************************************************************************
2308 ********************************************************************************
2309 **
2310 ** COMPUTE MEMORY ENGINE PLL
2311 **
2312 ********************************************************************************
2313 *******************************************************************************/
2314
2315static enum bp_result compute_memore_engine_pll_v4(
2316 struct bios_parser *bp,
2317 struct bp_display_clock_parameters *bp_params);
2318
2319static void init_compute_memore_engine_pll(struct bios_parser *bp)
2320{
2321 switch (BIOS_CMD_TABLE_PARA_REVISION(ComputeMemoryEnginePLL)) {
2322 case 4:
2323 bp->cmd_tbl.compute_memore_engine_pll =
2324 compute_memore_engine_pll_v4;
2325 break;
2326 default:
2327 bp->cmd_tbl.compute_memore_engine_pll = NULL;
2328 break;
2329 }
2330}
2331
2332static enum bp_result compute_memore_engine_pll_v4(
2333 struct bios_parser *bp,
2334 struct bp_display_clock_parameters *bp_params)
2335{
2336 enum bp_result result = BP_RESULT_FAILURE;
2337 COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 params;
2338
2339 memset(&params, 0, sizeof(params));
2340
2341 params.ulClock = cpu_to_le32(bp_params->target_display_clock / 10);
2342
2343 /* Initialize this to the target clock in case this call fails */
2344 bp_params->actual_display_clock = bp_params->target_display_clock;
2345
2346 if (EXEC_BIOS_CMD_TABLE(ComputeMemoryEnginePLL, params)) {
2347 /* Convert from 10KHz units back to KHz */
2348 bp_params->actual_display_clock =
2349 le32_to_cpu(params.ulClock) * 10;
2350 bp_params->actual_post_divider_id = params.ucPostDiv;
2351 result = BP_RESULT_OK;
2352 }
2353
2354 return result;
2355}
2356
2357/*******************************************************************************
2358 ********************************************************************************
2359 **
2360 ** EXTERNAL ENCODER CONTROL
2361 **
2362 ********************************************************************************
2363 *******************************************************************************/
2364
2365static enum bp_result external_encoder_control_v3(
2366 struct bios_parser *bp,
2367 struct bp_external_encoder_control *cntl);
2368
2369static void init_external_encoder_control(
2370 struct bios_parser *bp)
2371{
2372 switch (BIOS_CMD_TABLE_PARA_REVISION(ExternalEncoderControl)) {
2373 case 3:
2374 bp->cmd_tbl.external_encoder_control =
2375 external_encoder_control_v3;
2376 break;
2377 default:
2378 bp->cmd_tbl.external_encoder_control = NULL;
2379 break;
2380 }
2381}
2382
2383static enum bp_result external_encoder_control_v3(
2384 struct bios_parser *bp,
2385 struct bp_external_encoder_control *cntl)
2386{
2387 enum bp_result result = BP_RESULT_FAILURE;
2388
2389 /* we need use _PS_Alloc struct */
2390 EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3 params;
2391 EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3 *cntl_params;
2392 struct graphics_object_id encoder;
2393 bool is_input_signal_dp = false;
2394
2395 memset(&params, 0, sizeof(params));
2396
2397 cntl_params = &params.sExtEncoder;
2398
2399 encoder = cntl->encoder_id;
2400
2401 /* check if encoder supports external encoder control table */
2402 switch (dal_graphics_object_id_get_encoder_id(encoder)) {
2403 case ENCODER_ID_EXTERNAL_NUTMEG:
2404 case ENCODER_ID_EXTERNAL_TRAVIS:
2405 is_input_signal_dp = true;
2406 break;
2407
2408 default:
2409 BREAK_TO_DEBUGGER();
2410 return BP_RESULT_BADINPUT;
2411 }
2412
2413 /* Fill information based on the action
2414 *
2415 * Bit[6:4]: indicate external encoder, applied to all functions.
2416 * =0: external encoder1, mapped to external encoder enum id1
2417 * =1: external encoder2, mapped to external encoder enum id2
2418 *
2419 * enum ObjectEnumId
2420 * {
2421 * EnumId_Unknown = 0,
2422 * EnumId_1,
2423 * EnumId_2,
2424 * };
2425 */
2426 cntl_params->ucConfig = (uint8_t)((encoder.enum_id - 1) << 4);
2427
2428 switch (cntl->action) {
2429 case EXTERNAL_ENCODER_CONTROL_INIT:
2430 /* output display connector type. Only valid in encoder
2431 * initialization */
2432 cntl_params->usConnectorId =
2433 cpu_to_le16((uint16_t)cntl->connector_obj_id.id);
2434 break;
2435 case EXTERNAL_ENCODER_CONTROL_SETUP:
2436 /* EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3 pixel clock unit in
2437 * 10KHz
2438 * output display device pixel clock frequency in unit of 10KHz.
2439 * Only valid in setup and enableoutput
2440 */
2441 cntl_params->usPixelClock =
2442 cpu_to_le16((uint16_t)(cntl->pixel_clock / 10));
2443 /* Indicate display output signal type drive by external
2444 * encoder, only valid in setup and enableoutput */
2445 cntl_params->ucEncoderMode =
2446 (uint8_t)bp->cmd_helper->encoder_mode_bp_to_atom(
2447 cntl->signal, false);
2448
2449 if (is_input_signal_dp) {
2450 /* Bit[0]: indicate link rate, =1: 2.7Ghz, =0: 1.62Ghz,
2451 * only valid in encoder setup with DP mode. */
2452 if (LINK_RATE_HIGH == cntl->link_rate)
2453 cntl_params->ucConfig |= 1;
2454 /* output color depth Indicate encoder data bpc format
2455 * in DP mode, only valid in encoder setup in DP mode.
2456 */
2457 cntl_params->ucBitPerColor =
2458 (uint8_t)(cntl->color_depth);
2459 }
2460 /* Indicate how many lanes used by external encoder, only valid
2461 * in encoder setup and enableoutput. */
2462 cntl_params->ucLaneNum = (uint8_t)(cntl->lanes_number);
2463 break;
2464 case EXTERNAL_ENCODER_CONTROL_ENABLE:
2465 cntl_params->usPixelClock =
2466 cpu_to_le16((uint16_t)(cntl->pixel_clock / 10));
2467 cntl_params->ucEncoderMode =
2468 (uint8_t)bp->cmd_helper->encoder_mode_bp_to_atom(
2469 cntl->signal, false);
2470 cntl_params->ucLaneNum = (uint8_t)cntl->lanes_number;
2471 break;
2472 default:
2473 break;
2474 }
2475
2476 cntl_params->ucAction = (uint8_t)cntl->action;
2477
2478 if (EXEC_BIOS_CMD_TABLE(ExternalEncoderControl, params))
2479 result = BP_RESULT_OK;
2480
2481 return result;
2482}
2483
2484/*******************************************************************************
2485 ********************************************************************************
2486 **
2487 ** ENABLE DISPLAY POWER GATING
2488 **
2489 ********************************************************************************
2490 *******************************************************************************/
2491
2492static enum bp_result enable_disp_power_gating_v2_1(
2493 struct bios_parser *bp,
2494 enum controller_id crtc_id,
2495 enum bp_pipe_control_action action);
2496
2497static void init_enable_disp_power_gating(
2498 struct bios_parser *bp)
2499{
2500 switch (BIOS_CMD_TABLE_PARA_REVISION(EnableDispPowerGating)) {
2501 case 1:
2502 bp->cmd_tbl.enable_disp_power_gating =
2503 enable_disp_power_gating_v2_1;
2504 break;
2505 default:
2506 bp->cmd_tbl.enable_disp_power_gating = NULL;
2507 break;
2508 }
2509}
2510
2511static enum bp_result enable_disp_power_gating_v2_1(
2512 struct bios_parser *bp,
2513 enum controller_id crtc_id,
2514 enum bp_pipe_control_action action)
2515{
2516 enum bp_result result = BP_RESULT_FAILURE;
2517
2518 ENABLE_DISP_POWER_GATING_PS_ALLOCATION params = {0};
2519 uint8_t atom_crtc_id;
2520
2521 if (bp->cmd_helper->controller_id_to_atom(crtc_id, &atom_crtc_id))
2522 params.ucDispPipeId = atom_crtc_id;
2523 else
2524 return BP_RESULT_BADINPUT;
2525
2526 params.ucEnable =
2527 bp->cmd_helper->disp_power_gating_action_to_atom(action);
2528
2529 if (EXEC_BIOS_CMD_TABLE(EnableDispPowerGating, params))
2530 result = BP_RESULT_OK;
2531
2532 return result;
2533}
2534
2535/*******************************************************************************
2536 ********************************************************************************
2537 **
2538 ** SET DCE CLOCK
2539 **
2540 ********************************************************************************
2541 *******************************************************************************/
2542#ifdef LATEST_ATOM_BIOS_SUPPORT
2543static enum bp_result set_dce_clock_v2_1(
2544 struct bios_parser *bp,
2545 struct bp_set_dce_clock_parameters *bp_params);
2546#endif
2547
2548static void init_set_dce_clock(struct bios_parser *bp)
2549{
2550#ifdef LATEST_ATOM_BIOS_SUPPORT
2551 switch (BIOS_CMD_TABLE_PARA_REVISION(SetDCEClock)) {
2552 case 1:
2553 bp->cmd_tbl.set_dce_clock = set_dce_clock_v2_1;
2554 break;
2555 default:
2556 bp->cmd_tbl.set_dce_clock = NULL;
2557 break;
2558 }
2559#endif
2560}
2561
2562#ifdef LATEST_ATOM_BIOS_SUPPORT
2563static enum bp_result set_dce_clock_v2_1(
2564 struct bios_parser *bp,
2565 struct bp_set_dce_clock_parameters *bp_params)
2566{
2567 enum bp_result result = BP_RESULT_FAILURE;
2568
2569 SET_DCE_CLOCK_PS_ALLOCATION_V2_1 params;
2570 uint32_t atom_pll_id;
2571 uint32_t atom_clock_type;
2572 const struct command_table_helper *cmd = bp->cmd_helper;
2573
2574 memset(&params, 0, sizeof(params));
2575
2576 if (!cmd->clock_source_id_to_atom(bp_params->pll_id, &atom_pll_id) ||
2577 !cmd->dc_clock_type_to_atom(bp_params->clock_type, &atom_clock_type))
2578 return BP_RESULT_BADINPUT;
2579
2580 params.asParam.ucDCEClkSrc = atom_pll_id;
2581 params.asParam.ucDCEClkType = atom_clock_type;
2582
2583 if (bp_params->clock_type == DCECLOCK_TYPE_DPREFCLK) {
2584 if (bp_params->flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK)
2585 params.asParam.ucDCEClkFlag |= DCE_CLOCK_FLAG_PLL_REFCLK_SRC_GENLK;
2586
2587 if (bp_params->flags.USE_PCIE_AS_SOURCE_FOR_DPREFCLK)
2588 params.asParam.ucDCEClkFlag |= DCE_CLOCK_FLAG_PLL_REFCLK_SRC_PCIE;
2589
2590 if (bp_params->flags.USE_XTALIN_AS_SOURCE_FOR_DPREFCLK)
2591 params.asParam.ucDCEClkFlag |= DCE_CLOCK_FLAG_PLL_REFCLK_SRC_XTALIN;
2592
2593 if (bp_params->flags.USE_GENERICA_AS_SOURCE_FOR_DPREFCLK)
2594 params.asParam.ucDCEClkFlag |= DCE_CLOCK_FLAG_PLL_REFCLK_SRC_GENERICA;
2595 }
2596 else
2597 /* only program clock frequency if display clock is used; VBIOS will program DPREFCLK */
2598 /* We need to convert from KHz units into 10KHz units */
2599 params.asParam.ulDCEClkFreq = cpu_to_le32(bp_params->target_clock_frequency / 10);
2600
2601 if (EXEC_BIOS_CMD_TABLE(SetDCEClock, params)) {
2602 /* Convert from 10KHz units back to KHz */
2603 bp_params->target_clock_frequency = le32_to_cpu(params.asParam.ulDCEClkFreq) * 10;
2604 result = BP_RESULT_OK;
2605 }
2606
2607 return result;
2608}
2609#endif
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.h b/drivers/gpu/drm/amd/display/dc/bios/command_table.h
new file mode 100644
index 000000000000..e1cd21b6e968
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.h
@@ -0,0 +1,112 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_COMMAND_TABLE_H__
27#define __DAL_COMMAND_TABLE_H__
28
29struct bios_parser;
30struct bp_encoder_control;
31
32struct cmd_tbl {
33 enum bp_result (*dig_encoder_control)(
34 struct bios_parser *bp,
35 struct bp_encoder_control *control);
36 enum bp_result (*encoder_control_dig1)(
37 struct bios_parser *bp,
38 struct bp_encoder_control *control);
39 enum bp_result (*encoder_control_dig2)(
40 struct bios_parser *bp,
41 struct bp_encoder_control *control);
42 enum bp_result (*transmitter_control)(
43 struct bios_parser *bp,
44 struct bp_transmitter_control *control);
45 enum bp_result (*set_pixel_clock)(
46 struct bios_parser *bp,
47 struct bp_pixel_clock_parameters *bp_params);
48 enum bp_result (*enable_spread_spectrum_on_ppll)(
49 struct bios_parser *bp,
50 struct bp_spread_spectrum_parameters *bp_params,
51 bool enable);
52 enum bp_result (*adjust_display_pll)(
53 struct bios_parser *bp,
54 struct bp_adjust_pixel_clock_parameters *bp_params);
55 enum bp_result (*dac1_encoder_control)(
56 struct bios_parser *bp,
57 bool enable,
58 uint32_t pixel_clock,
59 uint8_t dac_standard);
60 enum bp_result (*dac2_encoder_control)(
61 struct bios_parser *bp,
62 bool enable,
63 uint32_t pixel_clock,
64 uint8_t dac_standard);
65 enum bp_result (*dac1_output_control)(
66 struct bios_parser *bp,
67 bool enable);
68 enum bp_result (*dac2_output_control)(
69 struct bios_parser *bp,
70 bool enable);
71 enum bp_result (*blank_crtc)(
72 struct bios_parser *bp,
73 struct bp_blank_crtc_parameters *bp_params,
74 bool blank);
75 enum bp_result (*set_crtc_timing)(
76 struct bios_parser *bp,
77 struct bp_hw_crtc_timing_parameters *bp_params);
78 enum bp_result (*set_crtc_overscan)(
79 struct bios_parser *bp,
80 struct bp_hw_crtc_overscan_parameters *bp_params);
81 enum bp_result (*select_crtc_source)(
82 struct bios_parser *bp,
83 struct bp_crtc_source_select *bp_params);
84 enum bp_result (*enable_crtc)(
85 struct bios_parser *bp,
86 enum controller_id controller_id,
87 bool enable);
88 enum bp_result (*enable_crtc_mem_req)(
89 struct bios_parser *bp,
90 enum controller_id controller_id,
91 bool enable);
92 enum bp_result (*program_clock)(
93 struct bios_parser *bp,
94 struct bp_pixel_clock_parameters *bp_params);
95 enum bp_result (*compute_memore_engine_pll)(
96 struct bios_parser *bp,
97 struct bp_display_clock_parameters *bp_params);
98 enum bp_result (*external_encoder_control)(
99 struct bios_parser *bp,
100 struct bp_external_encoder_control *cntl);
101 enum bp_result (*enable_disp_power_gating)(
102 struct bios_parser *bp,
103 enum controller_id crtc_id,
104 enum bp_pipe_control_action action);
105 enum bp_result (*set_dce_clock)(
106 struct bios_parser *bp,
107 struct bp_set_dce_clock_parameters *bp_params);
108};
109
110void dal_bios_parser_init_cmd_tbl(struct bios_parser *bp);
111
112#endif
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c
new file mode 100644
index 000000000000..40d9a9921c45
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c
@@ -0,0 +1,288 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28#include "atom.h"
29
30#include "include/bios_parser_types.h"
31
32#include "command_table_helper.h"
33
34bool dal_bios_parser_init_cmd_tbl_helper(
35 const struct command_table_helper **h,
36 enum dce_version dce)
37{
38 switch (dce) {
39 case DCE_VERSION_8_0:
40 *h = dal_cmd_tbl_helper_dce80_get_table();
41 return true;
42
43 case DCE_VERSION_10_0:
44 *h = dal_cmd_tbl_helper_dce110_get_table();
45 return true;
46
47 case DCE_VERSION_11_0:
48 *h = dal_cmd_tbl_helper_dce110_get_table();
49 return true;
50
51 case DCE_VERSION_11_2:
52 *h = dal_cmd_tbl_helper_dce112_get_table();
53 return true;
54
55 default:
56 /* Unsupported DCE */
57 BREAK_TO_DEBUGGER();
58 return false;
59 }
60}
61
62/* real implementations */
63
64bool dal_cmd_table_helper_controller_id_to_atom(
65 enum controller_id id,
66 uint8_t *atom_id)
67{
68 if (atom_id == NULL) {
69 BREAK_TO_DEBUGGER();
70 return false;
71 }
72
73 switch (id) {
74 case CONTROLLER_ID_D0:
75 *atom_id = ATOM_CRTC1;
76 return true;
77 case CONTROLLER_ID_D1:
78 *atom_id = ATOM_CRTC2;
79 return true;
80 case CONTROLLER_ID_D2:
81 *atom_id = ATOM_CRTC3;
82 return true;
83 case CONTROLLER_ID_D3:
84 *atom_id = ATOM_CRTC4;
85 return true;
86 case CONTROLLER_ID_D4:
87 *atom_id = ATOM_CRTC5;
88 return true;
89 case CONTROLLER_ID_D5:
90 *atom_id = ATOM_CRTC6;
91 return true;
92 case CONTROLLER_ID_UNDERLAY0:
93 *atom_id = ATOM_UNDERLAY_PIPE0;
94 return true;
95 case CONTROLLER_ID_UNDEFINED:
96 *atom_id = ATOM_CRTC_INVALID;
97 return true;
98 default:
99 /* Wrong controller id */
100 BREAK_TO_DEBUGGER();
101 return false;
102 }
103}
104
105/**
106* translate_transmitter_bp_to_atom
107*
108* @brief
109* Translate the Transmitter to the corresponding ATOM BIOS value
110*
111* @param
112* input transmitter
113* output digitalTransmitter
114* // =00: Digital Transmitter1 ( UNIPHY linkAB )
115* // =01: Digital Transmitter2 ( UNIPHY linkCD )
116* // =02: Digital Transmitter3 ( UNIPHY linkEF )
117*/
118uint8_t dal_cmd_table_helper_transmitter_bp_to_atom(
119 enum transmitter t)
120{
121 switch (t) {
122 case TRANSMITTER_UNIPHY_A:
123 case TRANSMITTER_UNIPHY_B:
124 case TRANSMITTER_TRAVIS_LCD:
125 return 0;
126 case TRANSMITTER_UNIPHY_C:
127 case TRANSMITTER_UNIPHY_D:
128 return 1;
129 case TRANSMITTER_UNIPHY_E:
130 case TRANSMITTER_UNIPHY_F:
131 return 2;
132 default:
133 /* Invalid Transmitter Type! */
134 BREAK_TO_DEBUGGER();
135 return 0;
136 }
137}
138
139uint32_t dal_cmd_table_helper_encoder_mode_bp_to_atom(
140 enum signal_type s,
141 bool enable_dp_audio)
142{
143 switch (s) {
144 case SIGNAL_TYPE_DVI_SINGLE_LINK:
145 case SIGNAL_TYPE_DVI_DUAL_LINK:
146 return ATOM_ENCODER_MODE_DVI;
147 case SIGNAL_TYPE_HDMI_TYPE_A:
148 return ATOM_ENCODER_MODE_HDMI;
149 case SIGNAL_TYPE_LVDS:
150 return ATOM_ENCODER_MODE_LVDS;
151 case SIGNAL_TYPE_EDP:
152 case SIGNAL_TYPE_DISPLAY_PORT_MST:
153 case SIGNAL_TYPE_DISPLAY_PORT:
154 case SIGNAL_TYPE_VIRTUAL:
155 if (enable_dp_audio)
156 return ATOM_ENCODER_MODE_DP_AUDIO;
157 else
158 return ATOM_ENCODER_MODE_DP;
159 case SIGNAL_TYPE_RGB:
160 return ATOM_ENCODER_MODE_CRT;
161 default:
162 return ATOM_ENCODER_MODE_CRT;
163 }
164}
165
166void dal_cmd_table_helper_assign_control_parameter(
167 const struct command_table_helper *h,
168 struct bp_encoder_control *control,
169 DIG_ENCODER_CONTROL_PARAMETERS_V2 *ctrl_param)
170{
171 /* there are three transmitter blocks, each one has two links 4-lanes
172 * each, A+B, C+D, E+F, Uniphy A, C and E are enumerated as link 0 in
173 * each transmitter block B, D and F as link 1, third transmitter block
174 * has non splitable links (UniphyE and UniphyF can not be configured
175 * separately to drive two different streams)
176 */
177 if ((control->transmitter == TRANSMITTER_UNIPHY_B) ||
178 (control->transmitter == TRANSMITTER_UNIPHY_D) ||
179 (control->transmitter == TRANSMITTER_UNIPHY_F)) {
180 /* Bit2: Link Select
181 * =0: PHY linkA/C/E
182 * =1: PHY linkB/D/F
183 */
184 ctrl_param->acConfig.ucLinkSel = 1;
185 }
186
187 /* Bit[4:3]: Transmitter Selection
188 * =00: Digital Transmitter1 ( UNIPHY linkAB )
189 * =01: Digital Transmitter2 ( UNIPHY linkCD )
190 * =02: Digital Transmitter3 ( UNIPHY linkEF )
191 * =03: Reserved
192 */
193 ctrl_param->acConfig.ucTransmitterSel =
194 (uint8_t)(h->transmitter_bp_to_atom(control->transmitter));
195
196 /* We need to convert from KHz units into 10KHz units */
197 ctrl_param->ucAction = h->encoder_action_to_atom(control->action);
198 ctrl_param->usPixelClock = cpu_to_le16((uint16_t)(control->pixel_clock / 10));
199 ctrl_param->ucEncoderMode =
200 (uint8_t)(h->encoder_mode_bp_to_atom(
201 control->signal, control->enable_dp_audio));
202 ctrl_param->ucLaneNum = (uint8_t)(control->lanes_number);
203}
204
205bool dal_cmd_table_helper_clock_source_id_to_ref_clk_src(
206 enum clock_source_id id,
207 uint32_t *ref_clk_src_id)
208{
209 if (ref_clk_src_id == NULL) {
210 BREAK_TO_DEBUGGER();
211 return false;
212 }
213
214 switch (id) {
215 case CLOCK_SOURCE_ID_PLL1:
216 *ref_clk_src_id = ENCODER_REFCLK_SRC_P1PLL;
217 return true;
218 case CLOCK_SOURCE_ID_PLL2:
219 *ref_clk_src_id = ENCODER_REFCLK_SRC_P2PLL;
220 return true;
221 case CLOCK_SOURCE_ID_DCPLL:
222 *ref_clk_src_id = ENCODER_REFCLK_SRC_DCPLL;
223 return true;
224 case CLOCK_SOURCE_ID_EXTERNAL:
225 *ref_clk_src_id = ENCODER_REFCLK_SRC_EXTCLK;
226 return true;
227 case CLOCK_SOURCE_ID_UNDEFINED:
228 *ref_clk_src_id = ENCODER_REFCLK_SRC_INVALID;
229 return true;
230 default:
231 /* Unsupported clock source id */
232 BREAK_TO_DEBUGGER();
233 return false;
234 }
235}
236
237uint8_t dal_cmd_table_helper_encoder_id_to_atom(
238 enum encoder_id id)
239{
240 switch (id) {
241 case ENCODER_ID_INTERNAL_LVDS:
242 return ENCODER_OBJECT_ID_INTERNAL_LVDS;
243 case ENCODER_ID_INTERNAL_TMDS1:
244 return ENCODER_OBJECT_ID_INTERNAL_TMDS1;
245 case ENCODER_ID_INTERNAL_TMDS2:
246 return ENCODER_OBJECT_ID_INTERNAL_TMDS2;
247 case ENCODER_ID_INTERNAL_DAC1:
248 return ENCODER_OBJECT_ID_INTERNAL_DAC1;
249 case ENCODER_ID_INTERNAL_DAC2:
250 return ENCODER_OBJECT_ID_INTERNAL_DAC2;
251 case ENCODER_ID_INTERNAL_LVTM1:
252 return ENCODER_OBJECT_ID_INTERNAL_LVTM1;
253 case ENCODER_ID_INTERNAL_HDMI:
254 return ENCODER_OBJECT_ID_HDMI_INTERNAL;
255 case ENCODER_ID_EXTERNAL_TRAVIS:
256 return ENCODER_OBJECT_ID_TRAVIS;
257 case ENCODER_ID_EXTERNAL_NUTMEG:
258 return ENCODER_OBJECT_ID_NUTMEG;
259 case ENCODER_ID_INTERNAL_KLDSCP_TMDS1:
260 return ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1;
261 case ENCODER_ID_INTERNAL_KLDSCP_DAC1:
262 return ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1;
263 case ENCODER_ID_INTERNAL_KLDSCP_DAC2:
264 return ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2;
265 case ENCODER_ID_EXTERNAL_MVPU_FPGA:
266 return ENCODER_OBJECT_ID_MVPU_FPGA;
267 case ENCODER_ID_INTERNAL_DDI:
268 return ENCODER_OBJECT_ID_INTERNAL_DDI;
269 case ENCODER_ID_INTERNAL_UNIPHY:
270 return ENCODER_OBJECT_ID_INTERNAL_UNIPHY;
271 case ENCODER_ID_INTERNAL_KLDSCP_LVTMA:
272 return ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA;
273 case ENCODER_ID_INTERNAL_UNIPHY1:
274 return ENCODER_OBJECT_ID_INTERNAL_UNIPHY1;
275 case ENCODER_ID_INTERNAL_UNIPHY2:
276 return ENCODER_OBJECT_ID_INTERNAL_UNIPHY2;
277 case ENCODER_ID_INTERNAL_UNIPHY3:
278 return ENCODER_OBJECT_ID_INTERNAL_UNIPHY3;
279 case ENCODER_ID_INTERNAL_WIRELESS:
280 return ENCODER_OBJECT_ID_INTERNAL_VCE;
281 case ENCODER_ID_UNKNOWN:
282 return ENCODER_OBJECT_ID_NONE;
283 default:
284 /* Invalid encoder id */
285 BREAK_TO_DEBUGGER();
286 return ENCODER_OBJECT_ID_NONE;
287 }
288}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.h b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.h
new file mode 100644
index 000000000000..1fab634b66be
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.h
@@ -0,0 +1,90 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_COMMAND_TABLE_HELPER_H__
27#define __DAL_COMMAND_TABLE_HELPER_H__
28
29#include "dce80/command_table_helper_dce80.h"
30#include "dce110/command_table_helper_dce110.h"
31#include "dce112/command_table_helper_dce112.h"
32
33struct command_table_helper {
34 bool (*controller_id_to_atom)(enum controller_id id, uint8_t *atom_id);
35 uint8_t (*encoder_action_to_atom)(
36 enum bp_encoder_control_action action);
37 uint32_t (*encoder_mode_bp_to_atom)(enum signal_type s,
38 bool enable_dp_audio);
39 bool (*engine_bp_to_atom)(enum engine_id engine_id,
40 uint32_t *atom_engine_id);
41 void (*assign_control_parameter)(
42 const struct command_table_helper *h,
43 struct bp_encoder_control *control,
44 DIG_ENCODER_CONTROL_PARAMETERS_V2 *ctrl_param);
45 bool (*clock_source_id_to_atom)(enum clock_source_id id,
46 uint32_t *atom_pll_id);
47 bool (*clock_source_id_to_ref_clk_src)(
48 enum clock_source_id id,
49 uint32_t *ref_clk_src_id);
50 uint8_t (*transmitter_bp_to_atom)(enum transmitter t);
51 uint8_t (*encoder_id_to_atom)(enum encoder_id id);
52 uint8_t (*clock_source_id_to_atom_phy_clk_src_id)(
53 enum clock_source_id id);
54 uint8_t (*signal_type_to_atom_dig_mode)(enum signal_type s);
55 uint8_t (*hpd_sel_to_atom)(enum hpd_source_id id);
56 uint8_t (*dig_encoder_sel_to_atom)(enum engine_id engine_id);
57 uint8_t (*phy_id_to_atom)(enum transmitter t);
58 uint8_t (*disp_power_gating_action_to_atom)(
59 enum bp_pipe_control_action action);
60 bool (*dc_clock_type_to_atom)(enum bp_dce_clock_type id,
61 uint32_t *atom_clock_type);
62 uint8_t (*transmitter_color_depth_to_atom)(enum transmitter_color_depth id);
63};
64
65bool dal_bios_parser_init_cmd_tbl_helper(const struct command_table_helper **h,
66 enum dce_version dce);
67
68bool dal_cmd_table_helper_controller_id_to_atom(
69 enum controller_id id,
70 uint8_t *atom_id);
71
72uint32_t dal_cmd_table_helper_encoder_mode_bp_to_atom(
73 enum signal_type s,
74 bool enable_dp_audio);
75
76void dal_cmd_table_helper_assign_control_parameter(
77 const struct command_table_helper *h,
78 struct bp_encoder_control *control,
79DIG_ENCODER_CONTROL_PARAMETERS_V2 *ctrl_param);
80
81bool dal_cmd_table_helper_clock_source_id_to_ref_clk_src(
82 enum clock_source_id id,
83 uint32_t *ref_clk_src_id);
84
85uint8_t dal_cmd_table_helper_transmitter_bp_to_atom(
86 enum transmitter t);
87
88uint8_t dal_cmd_table_helper_encoder_id_to_atom(
89 enum encoder_id id);
90#endif
diff --git a/drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.c b/drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.c
new file mode 100644
index 000000000000..dff82dd11721
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.c
@@ -0,0 +1,364 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28#include "atom.h"
29
30#include "include/bios_parser_types.h"
31
32#include "../command_table_helper.h"
33
34static uint8_t phy_id_to_atom(enum transmitter t)
35{
36 uint8_t atom_phy_id;
37
38 switch (t) {
39 case TRANSMITTER_UNIPHY_A:
40 atom_phy_id = ATOM_PHY_ID_UNIPHYA;
41 break;
42 case TRANSMITTER_UNIPHY_B:
43 atom_phy_id = ATOM_PHY_ID_UNIPHYB;
44 break;
45 case TRANSMITTER_UNIPHY_C:
46 atom_phy_id = ATOM_PHY_ID_UNIPHYC;
47 break;
48 case TRANSMITTER_UNIPHY_D:
49 atom_phy_id = ATOM_PHY_ID_UNIPHYD;
50 break;
51 case TRANSMITTER_UNIPHY_E:
52 atom_phy_id = ATOM_PHY_ID_UNIPHYE;
53 break;
54 case TRANSMITTER_UNIPHY_F:
55 atom_phy_id = ATOM_PHY_ID_UNIPHYF;
56 break;
57 case TRANSMITTER_UNIPHY_G:
58 atom_phy_id = ATOM_PHY_ID_UNIPHYG;
59 break;
60 default:
61 atom_phy_id = ATOM_PHY_ID_UNIPHYA;
62 break;
63 }
64 return atom_phy_id;
65}
66
67static uint8_t signal_type_to_atom_dig_mode(enum signal_type s)
68{
69 uint8_t atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DP;
70
71 switch (s) {
72 case SIGNAL_TYPE_DISPLAY_PORT:
73 case SIGNAL_TYPE_EDP:
74 atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DP;
75 break;
76 case SIGNAL_TYPE_LVDS:
77 atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_LVDS;
78 break;
79 case SIGNAL_TYPE_DVI_SINGLE_LINK:
80 case SIGNAL_TYPE_DVI_DUAL_LINK:
81 atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DVI;
82 break;
83 case SIGNAL_TYPE_HDMI_TYPE_A:
84 atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_HDMI;
85 break;
86 case SIGNAL_TYPE_DISPLAY_PORT_MST:
87 atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DP_MST;
88 break;
89 default:
90 atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DVI;
91 break;
92 }
93
94 return atom_dig_mode;
95}
96
97static uint8_t clock_source_id_to_atom_phy_clk_src_id(
98 enum clock_source_id id)
99{
100 uint8_t atom_phy_clk_src_id = 0;
101
102 switch (id) {
103 case CLOCK_SOURCE_ID_PLL0:
104 atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P0PLL;
105 break;
106 case CLOCK_SOURCE_ID_PLL1:
107 atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
108 break;
109 case CLOCK_SOURCE_ID_PLL2:
110 atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P2PLL;
111 break;
112 case CLOCK_SOURCE_ID_EXTERNAL:
113 atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SRC_EXT;
114 break;
115 default:
116 atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
117 break;
118 }
119
120 return atom_phy_clk_src_id >> 2;
121}
122
123static uint8_t hpd_sel_to_atom(enum hpd_source_id id)
124{
125 uint8_t atom_hpd_sel = 0;
126
127 switch (id) {
128 case HPD_SOURCEID1:
129 atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD1_SEL;
130 break;
131 case HPD_SOURCEID2:
132 atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD2_SEL;
133 break;
134 case HPD_SOURCEID3:
135 atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD3_SEL;
136 break;
137 case HPD_SOURCEID4:
138 atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD4_SEL;
139 break;
140 case HPD_SOURCEID5:
141 atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD5_SEL;
142 break;
143 case HPD_SOURCEID6:
144 atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD6_SEL;
145 break;
146 case HPD_SOURCEID_UNKNOWN:
147 default:
148 atom_hpd_sel = 0;
149 break;
150 }
151 return atom_hpd_sel >> 4;
152}
153
154static uint8_t dig_encoder_sel_to_atom(enum engine_id id)
155{
156 uint8_t atom_dig_encoder_sel = 0;
157
158 switch (id) {
159 case ENGINE_ID_DIGA:
160 atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGA_SEL;
161 break;
162 case ENGINE_ID_DIGB:
163 atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGB_SEL;
164 break;
165 case ENGINE_ID_DIGC:
166 atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGC_SEL;
167 break;
168 case ENGINE_ID_DIGD:
169 atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGD_SEL;
170 break;
171 case ENGINE_ID_DIGE:
172 atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGE_SEL;
173 break;
174 case ENGINE_ID_DIGF:
175 atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGF_SEL;
176 break;
177 case ENGINE_ID_DIGG:
178 atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGG_SEL;
179 break;
180 case ENGINE_ID_UNKNOWN:
181 /* No DIG_FRONT is associated to DIG_BACKEND */
182 atom_dig_encoder_sel = 0;
183 break;
184 default:
185 atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGA_SEL;
186 break;
187 }
188
189 return atom_dig_encoder_sel;
190}
191
192static bool clock_source_id_to_atom(
193 enum clock_source_id id,
194 uint32_t *atom_pll_id)
195{
196 bool result = true;
197
198 if (atom_pll_id != NULL)
199 switch (id) {
200 case CLOCK_SOURCE_ID_PLL0:
201 *atom_pll_id = ATOM_PPLL0;
202 break;
203 case CLOCK_SOURCE_ID_PLL1:
204 *atom_pll_id = ATOM_PPLL1;
205 break;
206 case CLOCK_SOURCE_ID_PLL2:
207 *atom_pll_id = ATOM_PPLL2;
208 break;
209 case CLOCK_SOURCE_ID_EXTERNAL:
210 *atom_pll_id = ATOM_PPLL_INVALID;
211 break;
212 case CLOCK_SOURCE_ID_DFS:
213 *atom_pll_id = ATOM_EXT_PLL1;
214 break;
215 case CLOCK_SOURCE_ID_VCE:
216 /* for VCE encoding,
217 * we need to pass in ATOM_PPLL_INVALID
218 */
219 *atom_pll_id = ATOM_PPLL_INVALID;
220 break;
221 case CLOCK_SOURCE_ID_DP_DTO:
222 /* When programming DP DTO PLL ID should be invalid */
223 *atom_pll_id = ATOM_PPLL_INVALID;
224 break;
225 case CLOCK_SOURCE_ID_UNDEFINED:
226 /* Should not happen */
227 *atom_pll_id = ATOM_PPLL_INVALID;
228 result = false;
229 break;
230 default:
231 result = false;
232 break;
233 }
234
235 return result;
236}
237
238static bool engine_bp_to_atom(enum engine_id id, uint32_t *atom_engine_id)
239{
240 bool result = false;
241
242 if (atom_engine_id != NULL)
243 switch (id) {
244 case ENGINE_ID_DIGA:
245 *atom_engine_id = ASIC_INT_DIG1_ENCODER_ID;
246 result = true;
247 break;
248 case ENGINE_ID_DIGB:
249 *atom_engine_id = ASIC_INT_DIG2_ENCODER_ID;
250 result = true;
251 break;
252 case ENGINE_ID_DIGC:
253 *atom_engine_id = ASIC_INT_DIG3_ENCODER_ID;
254 result = true;
255 break;
256 case ENGINE_ID_DIGD:
257 *atom_engine_id = ASIC_INT_DIG4_ENCODER_ID;
258 result = true;
259 break;
260 case ENGINE_ID_DIGE:
261 *atom_engine_id = ASIC_INT_DIG5_ENCODER_ID;
262 result = true;
263 break;
264 case ENGINE_ID_DIGF:
265 *atom_engine_id = ASIC_INT_DIG6_ENCODER_ID;
266 result = true;
267 break;
268 case ENGINE_ID_DIGG:
269 *atom_engine_id = ASIC_INT_DIG7_ENCODER_ID;
270 result = true;
271 break;
272 case ENGINE_ID_DACA:
273 *atom_engine_id = ASIC_INT_DAC1_ENCODER_ID;
274 result = true;
275 break;
276 default:
277 break;
278 }
279
280 return result;
281}
282
283static uint8_t encoder_action_to_atom(enum bp_encoder_control_action action)
284{
285 uint8_t atom_action = 0;
286
287 switch (action) {
288 case ENCODER_CONTROL_ENABLE:
289 atom_action = ATOM_ENABLE;
290 break;
291 case ENCODER_CONTROL_DISABLE:
292 atom_action = ATOM_DISABLE;
293 break;
294 case ENCODER_CONTROL_SETUP:
295 atom_action = ATOM_ENCODER_CMD_SETUP;
296 break;
297 case ENCODER_CONTROL_INIT:
298 atom_action = ATOM_ENCODER_INIT;
299 break;
300 default:
301 BREAK_TO_DEBUGGER(); /* Unhandle action in driver.!! */
302 break;
303 }
304
305 return atom_action;
306}
307
308static uint8_t disp_power_gating_action_to_atom(
309 enum bp_pipe_control_action action)
310{
311 uint8_t atom_pipe_action = 0;
312
313 switch (action) {
314 case ASIC_PIPE_DISABLE:
315 atom_pipe_action = ATOM_DISABLE;
316 break;
317 case ASIC_PIPE_ENABLE:
318 atom_pipe_action = ATOM_ENABLE;
319 break;
320 case ASIC_PIPE_INIT:
321 atom_pipe_action = ATOM_INIT;
322 break;
323 default:
324 ASSERT_CRITICAL(false); /* Unhandle action in driver! */
325 break;
326 }
327
328 return atom_pipe_action;
329}
330
331/* function table */
332static const struct command_table_helper command_table_helper_funcs = {
333 .controller_id_to_atom = dal_cmd_table_helper_controller_id_to_atom,
334 .encoder_action_to_atom = encoder_action_to_atom,
335 .engine_bp_to_atom = engine_bp_to_atom,
336 .clock_source_id_to_atom = clock_source_id_to_atom,
337 .clock_source_id_to_atom_phy_clk_src_id =
338 clock_source_id_to_atom_phy_clk_src_id,
339 .signal_type_to_atom_dig_mode = signal_type_to_atom_dig_mode,
340 .hpd_sel_to_atom = hpd_sel_to_atom,
341 .dig_encoder_sel_to_atom = dig_encoder_sel_to_atom,
342 .phy_id_to_atom = phy_id_to_atom,
343 .disp_power_gating_action_to_atom = disp_power_gating_action_to_atom,
344 .assign_control_parameter = NULL,
345 .clock_source_id_to_ref_clk_src = NULL,
346 .transmitter_bp_to_atom = NULL,
347 .encoder_id_to_atom = dal_cmd_table_helper_encoder_id_to_atom,
348 .encoder_mode_bp_to_atom = dal_cmd_table_helper_encoder_mode_bp_to_atom,
349};
350
351/*
352 * dal_cmd_tbl_helper_dce110_get_table
353 *
354 * @brief
355 * Initialize command table helper functions
356 *
357 * @param
358 * const struct command_table_helper **h - [out] struct of functions
359 *
360 */
361const struct command_table_helper *dal_cmd_tbl_helper_dce110_get_table()
362{
363 return &command_table_helper_funcs;
364}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.h b/drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.h
new file mode 100644
index 000000000000..eb60c2ead992
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.h
@@ -0,0 +1,34 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_COMMAND_TABLE_HELPER_DCE110_H__
27#define __DAL_COMMAND_TABLE_HELPER_DCE110_H__
28
29struct command_table_helper;
30
31/* Initialize command table helper functions */
32const struct command_table_helper *dal_cmd_tbl_helper_dce110_get_table(void);
33
34#endif /* __DAL_COMMAND_TABLE_HELPER_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.c b/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.c
new file mode 100644
index 000000000000..8b72aa588b86
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.c
@@ -0,0 +1,418 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28#include "atom.h"
29
30#include "include/bios_parser_types.h"
31
32#include "../command_table_helper.h"
33
34static uint8_t phy_id_to_atom(enum transmitter t)
35{
36 uint8_t atom_phy_id;
37
38 switch (t) {
39 case TRANSMITTER_UNIPHY_A:
40 atom_phy_id = ATOM_PHY_ID_UNIPHYA;
41 break;
42 case TRANSMITTER_UNIPHY_B:
43 atom_phy_id = ATOM_PHY_ID_UNIPHYB;
44 break;
45 case TRANSMITTER_UNIPHY_C:
46 atom_phy_id = ATOM_PHY_ID_UNIPHYC;
47 break;
48 case TRANSMITTER_UNIPHY_D:
49 atom_phy_id = ATOM_PHY_ID_UNIPHYD;
50 break;
51 case TRANSMITTER_UNIPHY_E:
52 atom_phy_id = ATOM_PHY_ID_UNIPHYE;
53 break;
54 case TRANSMITTER_UNIPHY_F:
55 atom_phy_id = ATOM_PHY_ID_UNIPHYF;
56 break;
57 case TRANSMITTER_UNIPHY_G:
58 atom_phy_id = ATOM_PHY_ID_UNIPHYG;
59 break;
60 default:
61 atom_phy_id = ATOM_PHY_ID_UNIPHYA;
62 break;
63 }
64 return atom_phy_id;
65}
66
67static uint8_t signal_type_to_atom_dig_mode(enum signal_type s)
68{
69 uint8_t atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_DP;
70
71 switch (s) {
72 case SIGNAL_TYPE_DISPLAY_PORT:
73 case SIGNAL_TYPE_EDP:
74 atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_DP;
75 break;
76 case SIGNAL_TYPE_DVI_SINGLE_LINK:
77 case SIGNAL_TYPE_DVI_DUAL_LINK:
78 atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_DVI;
79 break;
80 case SIGNAL_TYPE_HDMI_TYPE_A:
81 atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_HDMI;
82 break;
83 case SIGNAL_TYPE_DISPLAY_PORT_MST:
84 atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_DP_MST;
85 break;
86 default:
87 atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_DVI;
88 break;
89 }
90
91 return atom_dig_mode;
92}
93
94static uint8_t clock_source_id_to_atom_phy_clk_src_id(
95 enum clock_source_id id)
96{
97 uint8_t atom_phy_clk_src_id = 0;
98
99 switch (id) {
100 case CLOCK_SOURCE_ID_PLL0:
101 atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P0PLL;
102 break;
103 case CLOCK_SOURCE_ID_PLL1:
104 atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
105 break;
106 case CLOCK_SOURCE_ID_PLL2:
107 atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P2PLL;
108 break;
109 case CLOCK_SOURCE_ID_EXTERNAL:
110 atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SRC_EXT;
111 break;
112 default:
113 atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
114 break;
115 }
116
117 return atom_phy_clk_src_id >> 2;
118}
119
120static uint8_t hpd_sel_to_atom(enum hpd_source_id id)
121{
122 uint8_t atom_hpd_sel = 0;
123
124 switch (id) {
125 case HPD_SOURCEID1:
126 atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD1_SEL;
127 break;
128 case HPD_SOURCEID2:
129 atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD2_SEL;
130 break;
131 case HPD_SOURCEID3:
132 atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD3_SEL;
133 break;
134 case HPD_SOURCEID4:
135 atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD4_SEL;
136 break;
137 case HPD_SOURCEID5:
138 atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD5_SEL;
139 break;
140 case HPD_SOURCEID6:
141 atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD6_SEL;
142 break;
143 case HPD_SOURCEID_UNKNOWN:
144 default:
145 atom_hpd_sel = 0;
146 break;
147 }
148 return atom_hpd_sel;
149}
150
151static uint8_t dig_encoder_sel_to_atom(enum engine_id id)
152{
153 uint8_t atom_dig_encoder_sel = 0;
154
155 switch (id) {
156 case ENGINE_ID_DIGA:
157 atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGA_SEL;
158 break;
159 case ENGINE_ID_DIGB:
160 atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGB_SEL;
161 break;
162 case ENGINE_ID_DIGC:
163 atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGC_SEL;
164 break;
165 case ENGINE_ID_DIGD:
166 atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGD_SEL;
167 break;
168 case ENGINE_ID_DIGE:
169 atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGE_SEL;
170 break;
171 case ENGINE_ID_DIGF:
172 atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGF_SEL;
173 break;
174 case ENGINE_ID_DIGG:
175 atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGG_SEL;
176 break;
177 case ENGINE_ID_UNKNOWN:
178 /* No DIG_FRONT is associated to DIG_BACKEND */
179 atom_dig_encoder_sel = 0;
180 break;
181 default:
182 atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGA_SEL;
183 break;
184 }
185
186 return atom_dig_encoder_sel;
187}
188
189static bool clock_source_id_to_atom(
190 enum clock_source_id id,
191 uint32_t *atom_pll_id)
192{
193 bool result = true;
194
195 if (atom_pll_id != NULL)
196 switch (id) {
197 case CLOCK_SOURCE_COMBO_PHY_PLL0:
198 *atom_pll_id = ATOM_COMBOPHY_PLL0;
199 break;
200 case CLOCK_SOURCE_COMBO_PHY_PLL1:
201 *atom_pll_id = ATOM_COMBOPHY_PLL1;
202 break;
203 case CLOCK_SOURCE_COMBO_PHY_PLL2:
204 *atom_pll_id = ATOM_COMBOPHY_PLL2;
205 break;
206 case CLOCK_SOURCE_COMBO_PHY_PLL3:
207 *atom_pll_id = ATOM_COMBOPHY_PLL3;
208 break;
209 case CLOCK_SOURCE_COMBO_PHY_PLL4:
210 *atom_pll_id = ATOM_COMBOPHY_PLL4;
211 break;
212 case CLOCK_SOURCE_COMBO_PHY_PLL5:
213 *atom_pll_id = ATOM_COMBOPHY_PLL5;
214 break;
215 case CLOCK_SOURCE_COMBO_DISPLAY_PLL0:
216 *atom_pll_id = ATOM_PPLL0;
217 break;
218 case CLOCK_SOURCE_ID_DFS:
219 *atom_pll_id = ATOM_GCK_DFS;
220 break;
221 case CLOCK_SOURCE_ID_VCE:
222 *atom_pll_id = ATOM_DP_DTO;
223 break;
224 case CLOCK_SOURCE_ID_DP_DTO:
225 *atom_pll_id = ATOM_DP_DTO;
226 break;
227 case CLOCK_SOURCE_ID_UNDEFINED:
228 /* Should not happen */
229 *atom_pll_id = ATOM_PPLL_INVALID;
230 result = false;
231 break;
232 default:
233 result = false;
234 break;
235 }
236
237 return result;
238}
239
240static bool engine_bp_to_atom(enum engine_id id, uint32_t *atom_engine_id)
241{
242 bool result = false;
243
244 if (atom_engine_id != NULL)
245 switch (id) {
246 case ENGINE_ID_DIGA:
247 *atom_engine_id = ASIC_INT_DIG1_ENCODER_ID;
248 result = true;
249 break;
250 case ENGINE_ID_DIGB:
251 *atom_engine_id = ASIC_INT_DIG2_ENCODER_ID;
252 result = true;
253 break;
254 case ENGINE_ID_DIGC:
255 *atom_engine_id = ASIC_INT_DIG3_ENCODER_ID;
256 result = true;
257 break;
258 case ENGINE_ID_DIGD:
259 *atom_engine_id = ASIC_INT_DIG4_ENCODER_ID;
260 result = true;
261 break;
262 case ENGINE_ID_DIGE:
263 *atom_engine_id = ASIC_INT_DIG5_ENCODER_ID;
264 result = true;
265 break;
266 case ENGINE_ID_DIGF:
267 *atom_engine_id = ASIC_INT_DIG6_ENCODER_ID;
268 result = true;
269 break;
270 case ENGINE_ID_DIGG:
271 *atom_engine_id = ASIC_INT_DIG7_ENCODER_ID;
272 result = true;
273 break;
274 case ENGINE_ID_DACA:
275 *atom_engine_id = ASIC_INT_DAC1_ENCODER_ID;
276 result = true;
277 break;
278 default:
279 break;
280 }
281
282 return result;
283}
284
285static uint8_t encoder_action_to_atom(enum bp_encoder_control_action action)
286{
287 uint8_t atom_action = 0;
288
289 switch (action) {
290 case ENCODER_CONTROL_ENABLE:
291 atom_action = ATOM_ENABLE;
292 break;
293 case ENCODER_CONTROL_DISABLE:
294 atom_action = ATOM_DISABLE;
295 break;
296 case ENCODER_CONTROL_SETUP:
297 atom_action = ATOM_ENCODER_CMD_STREAM_SETUP;
298 break;
299 case ENCODER_CONTROL_INIT:
300 atom_action = ATOM_ENCODER_INIT;
301 break;
302 default:
303 BREAK_TO_DEBUGGER(); /* Unhandle action in driver.!! */
304 break;
305 }
306
307 return atom_action;
308}
309
310static uint8_t disp_power_gating_action_to_atom(
311 enum bp_pipe_control_action action)
312{
313 uint8_t atom_pipe_action = 0;
314
315 switch (action) {
316 case ASIC_PIPE_DISABLE:
317 atom_pipe_action = ATOM_DISABLE;
318 break;
319 case ASIC_PIPE_ENABLE:
320 atom_pipe_action = ATOM_ENABLE;
321 break;
322 case ASIC_PIPE_INIT:
323 atom_pipe_action = ATOM_INIT;
324 break;
325 default:
326 ASSERT_CRITICAL(false); /* Unhandle action in driver! */
327 break;
328 }
329
330 return atom_pipe_action;
331}
332
333static bool dc_clock_type_to_atom(
334 enum bp_dce_clock_type id,
335 uint32_t *atom_clock_type)
336{
337 bool retCode = true;
338
339 if (atom_clock_type != NULL) {
340 switch (id) {
341 case DCECLOCK_TYPE_DISPLAY_CLOCK:
342 *atom_clock_type = DCE_CLOCK_TYPE_DISPCLK;
343 break;
344
345 case DCECLOCK_TYPE_DPREFCLK:
346 *atom_clock_type = DCE_CLOCK_TYPE_DPREFCLK;
347 break;
348
349 default:
350 ASSERT_CRITICAL(false); /* Unhandle action in driver! */
351 break;
352 }
353 }
354
355 return retCode;
356}
357
358static uint8_t transmitter_color_depth_to_atom(enum transmitter_color_depth id)
359{
360 uint8_t atomColorDepth = 0;
361
362 switch (id) {
363 case TRANSMITTER_COLOR_DEPTH_24:
364 atomColorDepth = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_DIS;
365 break;
366 case TRANSMITTER_COLOR_DEPTH_30:
367 atomColorDepth = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_5_4;
368 break;
369 case TRANSMITTER_COLOR_DEPTH_36:
370 atomColorDepth = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_3_2;
371 break;
372 case TRANSMITTER_COLOR_DEPTH_48:
373 atomColorDepth = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_2_1;
374 break;
375 default:
376 ASSERT_CRITICAL(false); /* Unhandle action in driver! */
377 break;
378 }
379
380 return atomColorDepth;
381}
382
383/* function table */
384static const struct command_table_helper command_table_helper_funcs = {
385 .controller_id_to_atom = dal_cmd_table_helper_controller_id_to_atom,
386 .encoder_action_to_atom = encoder_action_to_atom,
387 .engine_bp_to_atom = engine_bp_to_atom,
388 .clock_source_id_to_atom = clock_source_id_to_atom,
389 .clock_source_id_to_atom_phy_clk_src_id =
390 clock_source_id_to_atom_phy_clk_src_id,
391 .signal_type_to_atom_dig_mode = signal_type_to_atom_dig_mode,
392 .hpd_sel_to_atom = hpd_sel_to_atom,
393 .dig_encoder_sel_to_atom = dig_encoder_sel_to_atom,
394 .phy_id_to_atom = phy_id_to_atom,
395 .disp_power_gating_action_to_atom = disp_power_gating_action_to_atom,
396 .assign_control_parameter = NULL,
397 .clock_source_id_to_ref_clk_src = NULL,
398 .transmitter_bp_to_atom = NULL,
399 .encoder_id_to_atom = dal_cmd_table_helper_encoder_id_to_atom,
400 .encoder_mode_bp_to_atom = dal_cmd_table_helper_encoder_mode_bp_to_atom,
401 .dc_clock_type_to_atom = dc_clock_type_to_atom,
402 .transmitter_color_depth_to_atom = transmitter_color_depth_to_atom,
403};
404
405/*
406 * dal_cmd_tbl_helper_dce110_get_table
407 *
408 * @brief
409 * Initialize command table helper functions
410 *
411 * @param
412 * const struct command_table_helper **h - [out] struct of functions
413 *
414 */
415const struct command_table_helper *dal_cmd_tbl_helper_dce112_get_table()
416{
417 return &command_table_helper_funcs;
418}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.h b/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.h
new file mode 100644
index 000000000000..dc3660951355
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.h
@@ -0,0 +1,34 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_COMMAND_TABLE_HELPER_DCE112_H__
27#define __DAL_COMMAND_TABLE_HELPER_DCE112_H__
28
29struct command_table_helper;
30
31/* Initialize command table helper functions */
32const struct command_table_helper *dal_cmd_tbl_helper_dce112_get_table(void);
33
34#endif /* __DAL_COMMAND_TABLE_HELPER_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/bios/dce80/command_table_helper_dce80.c b/drivers/gpu/drm/amd/display/dc/bios/dce80/command_table_helper_dce80.c
new file mode 100644
index 000000000000..295e16ef3f73
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/dce80/command_table_helper_dce80.c
@@ -0,0 +1,354 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28#include "atom.h"
29
30#include "include/grph_object_id.h"
31#include "include/grph_object_defs.h"
32#include "include/bios_parser_types.h"
33
34#include "../command_table_helper.h"
35
36static uint8_t encoder_action_to_atom(enum bp_encoder_control_action action)
37{
38 uint8_t atom_action = 0;
39
40 switch (action) {
41 case ENCODER_CONTROL_ENABLE:
42 atom_action = ATOM_ENABLE;
43 break;
44 case ENCODER_CONTROL_DISABLE:
45 atom_action = ATOM_DISABLE;
46 break;
47 case ENCODER_CONTROL_SETUP:
48 atom_action = ATOM_ENCODER_CMD_SETUP;
49 break;
50 case ENCODER_CONTROL_INIT:
51 atom_action = ATOM_ENCODER_INIT;
52 break;
53 default:
54 BREAK_TO_DEBUGGER(); /* Unhandle action in driver.!! */
55 break;
56 }
57
58 return atom_action;
59}
60
61static bool engine_bp_to_atom(enum engine_id id, uint32_t *atom_engine_id)
62{
63 bool result = false;
64
65 if (atom_engine_id != NULL)
66 switch (id) {
67 case ENGINE_ID_DIGA:
68 *atom_engine_id = ASIC_INT_DIG1_ENCODER_ID;
69 result = true;
70 break;
71 case ENGINE_ID_DIGB:
72 *atom_engine_id = ASIC_INT_DIG2_ENCODER_ID;
73 result = true;
74 break;
75 case ENGINE_ID_DIGC:
76 *atom_engine_id = ASIC_INT_DIG3_ENCODER_ID;
77 result = true;
78 break;
79 case ENGINE_ID_DIGD:
80 *atom_engine_id = ASIC_INT_DIG4_ENCODER_ID;
81 result = true;
82 break;
83 case ENGINE_ID_DIGE:
84 *atom_engine_id = ASIC_INT_DIG5_ENCODER_ID;
85 result = true;
86 break;
87 case ENGINE_ID_DIGF:
88 *atom_engine_id = ASIC_INT_DIG6_ENCODER_ID;
89 result = true;
90 break;
91 case ENGINE_ID_DIGG:
92 *atom_engine_id = ASIC_INT_DIG7_ENCODER_ID;
93 result = true;
94 break;
95 case ENGINE_ID_DACA:
96 *atom_engine_id = ASIC_INT_DAC1_ENCODER_ID;
97 result = true;
98 break;
99 default:
100 break;
101 }
102
103 return result;
104}
105
106static bool clock_source_id_to_atom(
107 enum clock_source_id id,
108 uint32_t *atom_pll_id)
109{
110 bool result = true;
111
112 if (atom_pll_id != NULL)
113 switch (id) {
114 case CLOCK_SOURCE_ID_PLL0:
115 *atom_pll_id = ATOM_PPLL0;
116 break;
117 case CLOCK_SOURCE_ID_PLL1:
118 *atom_pll_id = ATOM_PPLL1;
119 break;
120 case CLOCK_SOURCE_ID_PLL2:
121 *atom_pll_id = ATOM_PPLL2;
122 break;
123 case CLOCK_SOURCE_ID_EXTERNAL:
124 *atom_pll_id = ATOM_PPLL_INVALID;
125 break;
126 case CLOCK_SOURCE_ID_DFS:
127 *atom_pll_id = ATOM_EXT_PLL1;
128 break;
129 case CLOCK_SOURCE_ID_VCE:
130 /* for VCE encoding,
131 * we need to pass in ATOM_PPLL_INVALID
132 */
133 *atom_pll_id = ATOM_PPLL_INVALID;
134 break;
135 case CLOCK_SOURCE_ID_DP_DTO:
136 /* When programming DP DTO PLL ID should be invalid */
137 *atom_pll_id = ATOM_PPLL_INVALID;
138 break;
139 case CLOCK_SOURCE_ID_UNDEFINED:
140 BREAK_TO_DEBUGGER(); /* check when this will happen! */
141 *atom_pll_id = ATOM_PPLL_INVALID;
142 result = false;
143 break;
144 default:
145 result = false;
146 break;
147 }
148
149 return result;
150}
151
152static uint8_t clock_source_id_to_atom_phy_clk_src_id(
153 enum clock_source_id id)
154{
155 uint8_t atom_phy_clk_src_id = 0;
156
157 switch (id) {
158 case CLOCK_SOURCE_ID_PLL0:
159 atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P0PLL;
160 break;
161 case CLOCK_SOURCE_ID_PLL1:
162 atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
163 break;
164 case CLOCK_SOURCE_ID_PLL2:
165 atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P2PLL;
166 break;
167 case CLOCK_SOURCE_ID_EXTERNAL:
168 atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SRC_EXT;
169 break;
170 default:
171 atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
172 break;
173 }
174
175 return atom_phy_clk_src_id >> 2;
176}
177
178static uint8_t signal_type_to_atom_dig_mode(enum signal_type s)
179{
180 uint8_t atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DP;
181
182 switch (s) {
183 case SIGNAL_TYPE_DISPLAY_PORT:
184 case SIGNAL_TYPE_EDP:
185 atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DP;
186 break;
187 case SIGNAL_TYPE_LVDS:
188 atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_LVDS;
189 break;
190 case SIGNAL_TYPE_DVI_SINGLE_LINK:
191 case SIGNAL_TYPE_DVI_DUAL_LINK:
192 atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DVI;
193 break;
194 case SIGNAL_TYPE_HDMI_TYPE_A:
195 atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_HDMI;
196 break;
197 case SIGNAL_TYPE_DISPLAY_PORT_MST:
198 atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DP_MST;
199 break;
200 default:
201 atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DVI;
202 break;
203 }
204
205 return atom_dig_mode;
206}
207
208static uint8_t hpd_sel_to_atom(enum hpd_source_id id)
209{
210 uint8_t atom_hpd_sel = 0;
211
212 switch (id) {
213 case HPD_SOURCEID1:
214 atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD1_SEL;
215 break;
216 case HPD_SOURCEID2:
217 atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD2_SEL;
218 break;
219 case HPD_SOURCEID3:
220 atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD3_SEL;
221 break;
222 case HPD_SOURCEID4:
223 atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD4_SEL;
224 break;
225 case HPD_SOURCEID5:
226 atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD5_SEL;
227 break;
228 case HPD_SOURCEID6:
229 atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD6_SEL;
230 break;
231 case HPD_SOURCEID_UNKNOWN:
232 default:
233 atom_hpd_sel = 0;
234 break;
235 }
236 return atom_hpd_sel >> 4;
237}
238
239static uint8_t dig_encoder_sel_to_atom(enum engine_id id)
240{
241 uint8_t atom_dig_encoder_sel = 0;
242
243 switch (id) {
244 case ENGINE_ID_DIGA:
245 atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGA_SEL;
246 break;
247 case ENGINE_ID_DIGB:
248 atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGB_SEL;
249 break;
250 case ENGINE_ID_DIGC:
251 atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGC_SEL;
252 break;
253 case ENGINE_ID_DIGD:
254 atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGD_SEL;
255 break;
256 case ENGINE_ID_DIGE:
257 atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGE_SEL;
258 break;
259 case ENGINE_ID_DIGF:
260 atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGF_SEL;
261 break;
262 case ENGINE_ID_DIGG:
263 atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGG_SEL;
264 break;
265 default:
266 atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGA_SEL;
267 break;
268 }
269
270 return atom_dig_encoder_sel;
271}
272
273static uint8_t phy_id_to_atom(enum transmitter t)
274{
275 uint8_t atom_phy_id;
276
277 switch (t) {
278 case TRANSMITTER_UNIPHY_A:
279 atom_phy_id = ATOM_PHY_ID_UNIPHYA;
280 break;
281 case TRANSMITTER_UNIPHY_B:
282 atom_phy_id = ATOM_PHY_ID_UNIPHYB;
283 break;
284 case TRANSMITTER_UNIPHY_C:
285 atom_phy_id = ATOM_PHY_ID_UNIPHYC;
286 break;
287 case TRANSMITTER_UNIPHY_D:
288 atom_phy_id = ATOM_PHY_ID_UNIPHYD;
289 break;
290 case TRANSMITTER_UNIPHY_E:
291 atom_phy_id = ATOM_PHY_ID_UNIPHYE;
292 break;
293 case TRANSMITTER_UNIPHY_F:
294 atom_phy_id = ATOM_PHY_ID_UNIPHYF;
295 break;
296 case TRANSMITTER_UNIPHY_G:
297 atom_phy_id = ATOM_PHY_ID_UNIPHYG;
298 break;
299 default:
300 atom_phy_id = ATOM_PHY_ID_UNIPHYA;
301 break;
302 }
303 return atom_phy_id;
304}
305
306static uint8_t disp_power_gating_action_to_atom(
307 enum bp_pipe_control_action action)
308{
309 uint8_t atom_pipe_action = 0;
310
311 switch (action) {
312 case ASIC_PIPE_DISABLE:
313 atom_pipe_action = ATOM_DISABLE;
314 break;
315 case ASIC_PIPE_ENABLE:
316 atom_pipe_action = ATOM_ENABLE;
317 break;
318 case ASIC_PIPE_INIT:
319 atom_pipe_action = ATOM_INIT;
320 break;
321 default:
322 BREAK_TO_DEBUGGER(); /* Unhandle action in driver! */
323 break;
324 }
325
326 return atom_pipe_action;
327}
328
329static const struct command_table_helper command_table_helper_funcs = {
330 .controller_id_to_atom = dal_cmd_table_helper_controller_id_to_atom,
331 .encoder_action_to_atom = encoder_action_to_atom,
332 .engine_bp_to_atom = engine_bp_to_atom,
333 .clock_source_id_to_atom = clock_source_id_to_atom,
334 .clock_source_id_to_atom_phy_clk_src_id =
335 clock_source_id_to_atom_phy_clk_src_id,
336 .signal_type_to_atom_dig_mode = signal_type_to_atom_dig_mode,
337 .hpd_sel_to_atom = hpd_sel_to_atom,
338 .dig_encoder_sel_to_atom = dig_encoder_sel_to_atom,
339 .phy_id_to_atom = phy_id_to_atom,
340 .disp_power_gating_action_to_atom = disp_power_gating_action_to_atom,
341 .assign_control_parameter =
342 dal_cmd_table_helper_assign_control_parameter,
343 .clock_source_id_to_ref_clk_src =
344 dal_cmd_table_helper_clock_source_id_to_ref_clk_src,
345 .transmitter_bp_to_atom = dal_cmd_table_helper_transmitter_bp_to_atom,
346 .encoder_id_to_atom = dal_cmd_table_helper_encoder_id_to_atom,
347 .encoder_mode_bp_to_atom =
348 dal_cmd_table_helper_encoder_mode_bp_to_atom,
349};
350
351const struct command_table_helper *dal_cmd_tbl_helper_dce80_get_table()
352{
353 return &command_table_helper_funcs;
354}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/dce80/command_table_helper_dce80.h b/drivers/gpu/drm/amd/display/dc/bios/dce80/command_table_helper_dce80.h
new file mode 100644
index 000000000000..e675c359e306
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/dce80/command_table_helper_dce80.h
@@ -0,0 +1,33 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_COMMAND_TABLE_HELPER_DCE80_H__
27#define __DAL_COMMAND_TABLE_HELPER_DCE80_H__
28
29struct command_table_helper;
30
31const struct command_table_helper *dal_cmd_tbl_helper_dce80_get_table(void);
32
33#endif
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/Makefile b/drivers/gpu/drm/amd/display/dc/calcs/Makefile
new file mode 100644
index 000000000000..4001933e7808
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/calcs/Makefile
@@ -0,0 +1,10 @@
1#
2# Makefile for the 'calcs' sub-component of DAL.
3# It calculates Bandwidth and Watermarks values for HW programming
4#
5
6BW_CALCS = bandwidth_calcs.o bw_fixed.o gamma_calcs.o
7
8AMD_DAL_BW_CALCS = $(addprefix $(AMDDALPATH)/dc/calcs/,$(BW_CALCS))
9
10AMD_DISPLAY_FILES += $(AMD_DAL_BW_CALCS)
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/bandwidth_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/bandwidth_calcs.c
new file mode 100644
index 000000000000..0b2bb3992f1a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/calcs/bandwidth_calcs.c
@@ -0,0 +1,3108 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27#include "bandwidth_calcs.h"
28#include "dc.h"
29#include "core_types.h"
30
31/*******************************************************************************
32 * Private Functions
33 ******************************************************************************/
34
35static void calculate_bandwidth(
36 const struct bw_calcs_dceip *dceip,
37 const struct bw_calcs_vbios *vbios,
38 struct bw_calcs_data *data)
39
40{
41 const int32_t pixels_per_chunk = 512;
42 const int32_t high = 2;
43 const int32_t mid = 1;
44 const int32_t low = 0;
45 const uint32_t s_low = 0;
46 const uint32_t s_mid1 = 1;
47 const uint32_t s_mid2 = 2;
48 const uint32_t s_mid3 = 3;
49 const uint32_t s_mid4 = 4;
50 const uint32_t s_mid5 = 5;
51 const uint32_t s_mid6 = 6;
52 const uint32_t s_high = 7;
53 const uint32_t bus_efficiency = 1;
54 const uint32_t dmif_chunk_buff_margin = 1;
55
56 uint32_t max_chunks_fbc_mode;
57 int32_t num_cursor_lines;
58
59 int32_t i, j, k;
60 struct bw_fixed yclk[3];
61 struct bw_fixed sclk[8];
62 bool d0_underlay_enable;
63 bool d1_underlay_enable;
64 bool fbc_enabled;
65 bool lpt_enabled;
66 enum bw_defines sclk_message;
67 enum bw_defines yclk_message;
68 enum bw_defines v_filter_init_mode[maximum_number_of_surfaces];
69 enum bw_defines tiling_mode[maximum_number_of_surfaces];
70 enum bw_defines surface_type[maximum_number_of_surfaces];
71 enum bw_defines voltage;
72 enum bw_defines pipe_check;
73 enum bw_defines hsr_check;
74 enum bw_defines vsr_check;
75 enum bw_defines lb_size_check;
76 enum bw_defines fbc_check;
77 enum bw_defines rotation_check;
78 enum bw_defines mode_check;
79 enum bw_defines nbp_state_change_enable_blank;
80 /*initialize variables*/
81 int32_t number_of_displays_enabled = 0;
82 int32_t number_of_displays_enabled_with_margin = 0;
83 int32_t number_of_aligned_displays_with_no_margin = 0;
84
85 yclk[low] = vbios->low_yclk;
86 yclk[mid] = vbios->mid_yclk;
87 yclk[high] = vbios->high_yclk;
88 sclk[s_low] = vbios->low_sclk;
89 sclk[s_mid1] = vbios->mid1_sclk;
90 sclk[s_mid2] = vbios->mid2_sclk;
91 sclk[s_mid3] = vbios->mid3_sclk;
92 sclk[s_mid4] = vbios->mid4_sclk;
93 sclk[s_mid5] = vbios->mid5_sclk;
94 sclk[s_mid6] = vbios->mid6_sclk;
95 sclk[s_high] = vbios->high_sclk;
96 /*''''''''''''''''''*/
97 /* surface assignment:*/
98 /* 0: d0 underlay or underlay luma*/
99 /* 1: d0 underlay chroma*/
100 /* 2: d1 underlay or underlay luma*/
101 /* 3: d1 underlay chroma*/
102 /* 4: d0 graphics*/
103 /* 5: d1 graphics*/
104 /* 6: d2 graphics*/
105 /* 7: d3 graphics, same mode as d2*/
106 /* 8: d4 graphics, same mode as d2*/
107 /* 9: d5 graphics, same mode as d2*/
108 /* ...*/
109 /* maximum_number_of_surfaces-2: d1 display_write_back420 luma*/
110 /* maximum_number_of_surfaces-1: d1 display_write_back420 chroma*/
111 /* underlay luma and chroma surface parameters from spreadsheet*/
112
113
114
115
116 if (data->d0_underlay_mode == bw_def_none) { d0_underlay_enable = 0; }
117 else {
118 d0_underlay_enable = 1;
119 }
120 if (data->d1_underlay_mode == bw_def_none) { d1_underlay_enable = 0; }
121 else {
122 d1_underlay_enable = 1;
123 }
124 data->number_of_underlay_surfaces = d0_underlay_enable + d1_underlay_enable;
125 switch (data->underlay_surface_type) {
126 case bw_def_420:
127 surface_type[0] = bw_def_underlay420_luma;
128 surface_type[2] = bw_def_underlay420_luma;
129 data->bytes_per_pixel[0] = 1;
130 data->bytes_per_pixel[2] = 1;
131 surface_type[1] = bw_def_underlay420_chroma;
132 surface_type[3] = bw_def_underlay420_chroma;
133 data->bytes_per_pixel[1] = 2;
134 data->bytes_per_pixel[3] = 2;
135 data->lb_size_per_component[0] = dceip->underlay420_luma_lb_size_per_component;
136 data->lb_size_per_component[1] = dceip->underlay420_chroma_lb_size_per_component;
137 data->lb_size_per_component[2] = dceip->underlay420_luma_lb_size_per_component;
138 data->lb_size_per_component[3] = dceip->underlay420_chroma_lb_size_per_component;
139 break;
140 case bw_def_422:
141 surface_type[0] = bw_def_underlay422;
142 surface_type[2] = bw_def_underlay422;
143 data->bytes_per_pixel[0] = 2;
144 data->bytes_per_pixel[2] = 2;
145 data->lb_size_per_component[0] = dceip->underlay422_lb_size_per_component;
146 data->lb_size_per_component[2] = dceip->underlay422_lb_size_per_component;
147 break;
148 default:
149 surface_type[0] = bw_def_underlay444;
150 surface_type[2] = bw_def_underlay444;
151 data->bytes_per_pixel[0] = 4;
152 data->bytes_per_pixel[2] = 4;
153 data->lb_size_per_component[0] = dceip->lb_size_per_component444;
154 data->lb_size_per_component[2] = dceip->lb_size_per_component444;
155 break;
156 }
157 if (d0_underlay_enable) {
158 switch (data->underlay_surface_type) {
159 case bw_def_420:
160 data->enable[0] = 1;
161 data->enable[1] = 1;
162 break;
163 default:
164 data->enable[0] = 1;
165 data->enable[1] = 0;
166 break;
167 }
168 }
169 else {
170 data->enable[0] = 0;
171 data->enable[1] = 0;
172 }
173 if (d1_underlay_enable) {
174 switch (data->underlay_surface_type) {
175 case bw_def_420:
176 data->enable[2] = 1;
177 data->enable[3] = 1;
178 break;
179 default:
180 data->enable[2] = 1;
181 data->enable[3] = 0;
182 break;
183 }
184 }
185 else {
186 data->enable[2] = 0;
187 data->enable[3] = 0;
188 }
189 data->use_alpha[0] = 0;
190 data->use_alpha[1] = 0;
191 data->use_alpha[2] = 0;
192 data->use_alpha[3] = 0;
193 data->scatter_gather_enable_for_pipe[0] = vbios->scatter_gather_enable;
194 data->scatter_gather_enable_for_pipe[1] = vbios->scatter_gather_enable;
195 data->scatter_gather_enable_for_pipe[2] = vbios->scatter_gather_enable;
196 data->scatter_gather_enable_for_pipe[3] = vbios->scatter_gather_enable;
197 /*underlay0 same and graphics display pipe0*/
198 data->interlace_mode[0] = data->interlace_mode[4];
199 data->interlace_mode[1] = data->interlace_mode[4];
200 /*underlay1 same and graphics display pipe1*/
201 data->interlace_mode[2] = data->interlace_mode[5];
202 data->interlace_mode[3] = data->interlace_mode[5];
203 /*underlay0 same and graphics display pipe0*/
204 data->h_total[0] = data->h_total[4];
205 data->v_total[0] = data->v_total[4];
206 data->h_total[1] = data->h_total[4];
207 data->v_total[1] = data->v_total[4];
208 /*underlay1 same and graphics display pipe1*/
209 data->h_total[2] = data->h_total[5];
210 data->v_total[2] = data->v_total[5];
211 data->h_total[3] = data->h_total[5];
212 data->v_total[3] = data->v_total[5];
213 /*underlay0 same and graphics display pipe0*/
214 data->pixel_rate[0] = data->pixel_rate[4];
215 data->pixel_rate[1] = data->pixel_rate[4];
216 /*underlay1 same and graphics display pipe1*/
217 data->pixel_rate[2] = data->pixel_rate[5];
218 data->pixel_rate[3] = data->pixel_rate[5];
219 if ((data->underlay_tiling_mode == bw_def_array_linear_general || data->underlay_tiling_mode == bw_def_array_linear_aligned)) {
220 tiling_mode[0] = bw_def_linear;
221 tiling_mode[1] = bw_def_linear;
222 tiling_mode[2] = bw_def_linear;
223 tiling_mode[3] = bw_def_linear;
224 }
225 else {
226 tiling_mode[0] = bw_def_landscape;
227 tiling_mode[1] = bw_def_landscape;
228 tiling_mode[2] = bw_def_landscape;
229 tiling_mode[3] = bw_def_landscape;
230 }
231 data->lb_bpc[0] = data->underlay_lb_bpc;
232 data->lb_bpc[1] = data->underlay_lb_bpc;
233 data->lb_bpc[2] = data->underlay_lb_bpc;
234 data->lb_bpc[3] = data->underlay_lb_bpc;
235 data->compression_rate[0] = bw_int_to_fixed(1);
236 data->compression_rate[1] = bw_int_to_fixed(1);
237 data->compression_rate[2] = bw_int_to_fixed(1);
238 data->compression_rate[3] = bw_int_to_fixed(1);
239 data->access_one_channel_only[0] = 0;
240 data->access_one_channel_only[1] = 0;
241 data->access_one_channel_only[2] = 0;
242 data->access_one_channel_only[3] = 0;
243 data->cursor_width_pixels[0] = bw_int_to_fixed(0);
244 data->cursor_width_pixels[1] = bw_int_to_fixed(0);
245 data->cursor_width_pixels[2] = bw_int_to_fixed(0);
246 data->cursor_width_pixels[3] = bw_int_to_fixed(0);
247 /* graphics surface parameters from spreadsheet*/
248 fbc_enabled = 0;
249 lpt_enabled = 0;
250 for (i = 4; i <= maximum_number_of_surfaces - 3; i++) {
251 if (i < data->number_of_displays + 4) {
252 if (i == 4 && data->d0_underlay_mode == bw_def_underlay_only) {
253 data->enable[i] = 0;
254 data->use_alpha[i] = 0;
255 }
256 else if (i == 4 && data->d0_underlay_mode == bw_def_blend) {
257 data->enable[i] = 1;
258 data->use_alpha[i] = 1;
259 }
260 else if (i == 4) {
261 data->enable[i] = 1;
262 data->use_alpha[i] = 0;
263 }
264 else if (i == 5 && data->d1_underlay_mode == bw_def_underlay_only) {
265 data->enable[i] = 0;
266 data->use_alpha[i] = 0;
267 }
268 else if (i == 5 && data->d1_underlay_mode == bw_def_blend) {
269 data->enable[i] = 1;
270 data->use_alpha[i] = 1;
271 }
272 else {
273 data->enable[i] = 1;
274 data->use_alpha[i] = 0;
275 }
276 }
277 else {
278 data->enable[i] = 0;
279 data->use_alpha[i] = 0;
280 }
281 data->scatter_gather_enable_for_pipe[i] = vbios->scatter_gather_enable;
282 surface_type[i] = bw_def_graphics;
283 data->lb_size_per_component[i] = dceip->lb_size_per_component444;
284 if (data->graphics_tiling_mode == bw_def_array_linear_general || data->graphics_tiling_mode == bw_def_array_linear_aligned) {
285 tiling_mode[i] = bw_def_linear;
286 }
287 else {
288 tiling_mode[i] = bw_def_tiled;
289 }
290 data->lb_bpc[i] = data->graphics_lb_bpc;
291 if ((data->fbc_en[i] == 1 && (dceip->argb_compression_support || data->d0_underlay_mode != bw_def_blended))) {
292 data->compression_rate[i] = bw_int_to_fixed(vbios->average_compression_rate);
293 data->access_one_channel_only[i] = data->lpt_en[i];
294 }
295 else {
296 data->compression_rate[i] = bw_int_to_fixed(1);
297 data->access_one_channel_only[i] = 0;
298 }
299 if (data->fbc_en[i] == 1) {
300 fbc_enabled = 1;
301 if (data->lpt_en[i] == 1) {
302 lpt_enabled = 1;
303 }
304 }
305 data->cursor_width_pixels[i] = bw_int_to_fixed(vbios->cursor_width);
306 }
307 /* display_write_back420*/
308 data->scatter_gather_enable_for_pipe[maximum_number_of_surfaces - 2] = 0;
309 data->scatter_gather_enable_for_pipe[maximum_number_of_surfaces - 1] = 0;
310 if (data->d1_display_write_back_dwb_enable == 1) {
311 data->enable[maximum_number_of_surfaces - 2] = 1;
312 data->enable[maximum_number_of_surfaces - 1] = 1;
313 }
314 else {
315 data->enable[maximum_number_of_surfaces - 2] = 0;
316 data->enable[maximum_number_of_surfaces - 1] = 0;
317 }
318 surface_type[maximum_number_of_surfaces - 2] = bw_def_display_write_back420_luma;
319 surface_type[maximum_number_of_surfaces - 1] = bw_def_display_write_back420_chroma;
320 data->lb_size_per_component[maximum_number_of_surfaces - 2] = dceip->underlay420_luma_lb_size_per_component;
321 data->lb_size_per_component[maximum_number_of_surfaces - 1] = dceip->underlay420_chroma_lb_size_per_component;
322 data->bytes_per_pixel[maximum_number_of_surfaces - 2] = 1;
323 data->bytes_per_pixel[maximum_number_of_surfaces - 1] = 2;
324 data->interlace_mode[maximum_number_of_surfaces - 2] = data->interlace_mode[5];
325 data->interlace_mode[maximum_number_of_surfaces - 1] = data->interlace_mode[5];
326 data->h_taps[maximum_number_of_surfaces - 2] = bw_int_to_fixed(1);
327 data->h_taps[maximum_number_of_surfaces - 1] = bw_int_to_fixed(1);
328 data->v_taps[maximum_number_of_surfaces - 2] = bw_int_to_fixed(1);
329 data->v_taps[maximum_number_of_surfaces - 1] = bw_int_to_fixed(1);
330 data->rotation_angle[maximum_number_of_surfaces - 2] = bw_int_to_fixed(0);
331 data->rotation_angle[maximum_number_of_surfaces - 1] = bw_int_to_fixed(0);
332 tiling_mode[maximum_number_of_surfaces - 2] = bw_def_linear;
333 tiling_mode[maximum_number_of_surfaces - 1] = bw_def_linear;
334 data->lb_bpc[maximum_number_of_surfaces - 2] = 8;
335 data->lb_bpc[maximum_number_of_surfaces - 1] = 8;
336 data->compression_rate[maximum_number_of_surfaces - 2] = bw_int_to_fixed(1);
337 data->compression_rate[maximum_number_of_surfaces - 1] = bw_int_to_fixed(1);
338 data->access_one_channel_only[maximum_number_of_surfaces - 2] = 0;
339 data->access_one_channel_only[maximum_number_of_surfaces - 1] = 0;
340 /*assume display pipe1 has dwb enabled*/
341 data->h_total[maximum_number_of_surfaces - 2] = data->h_total[5];
342 data->h_total[maximum_number_of_surfaces - 1] = data->h_total[5];
343 data->v_total[maximum_number_of_surfaces - 2] = data->v_total[5];
344 data->v_total[maximum_number_of_surfaces - 1] = data->v_total[5];
345 data->pixel_rate[maximum_number_of_surfaces - 2] = data->pixel_rate[5];
346 data->pixel_rate[maximum_number_of_surfaces - 1] = data->pixel_rate[5];
347 data->src_width[maximum_number_of_surfaces - 2] = data->src_width[5];
348 data->src_width[maximum_number_of_surfaces - 1] = data->src_width[5];
349 data->src_height[maximum_number_of_surfaces - 2] = data->src_height[5];
350 data->src_height[maximum_number_of_surfaces - 1] = data->src_height[5];
351 data->pitch_in_pixels[maximum_number_of_surfaces - 2] = data->src_width[5];
352 data->pitch_in_pixels[maximum_number_of_surfaces - 1] = data->src_width[5];
353 data->h_scale_ratio[maximum_number_of_surfaces - 2] = bw_int_to_fixed(1);
354 data->h_scale_ratio[maximum_number_of_surfaces - 1] = bw_int_to_fixed(1);
355 data->v_scale_ratio[maximum_number_of_surfaces - 2] = bw_int_to_fixed(1);
356 data->v_scale_ratio[maximum_number_of_surfaces - 1] = bw_int_to_fixed(1);
357 data->stereo_mode[maximum_number_of_surfaces - 2] = bw_def_mono;
358 data->stereo_mode[maximum_number_of_surfaces - 1] = bw_def_mono;
359 data->cursor_width_pixels[maximum_number_of_surfaces - 2] = bw_int_to_fixed(0);
360 data->cursor_width_pixels[maximum_number_of_surfaces - 1] = bw_int_to_fixed(0);
361 data->use_alpha[maximum_number_of_surfaces - 2] = 0;
362 data->use_alpha[maximum_number_of_surfaces - 1] = 0;
363 /*mode check calculations:*/
364 /* mode within dce ip capabilities*/
365 /* fbc*/
366 /* hsr*/
367 /* vsr*/
368 /* lb size*/
369 /*effective scaling source and ratios:*/
370 /*for graphics, non-stereo, non-interlace surfaces when the size of the source and destination are the same, only one tap is used*/
371 /*420 chroma has half the width, height, horizontal and vertical scaling ratios than luma*/
372 /*rotating a graphic or underlay surface swaps the width, height, horizontal and vertical scaling ratios*/
373 /*in top-bottom stereo mode there is 2:1 vertical downscaling for each eye*/
374 /*in side-by-side stereo mode there is 2:1 horizontal downscaling for each eye*/
375 /*in interlace mode there is 2:1 vertical downscaling for each field*/
376 /*in panning or bezel adjustment mode the source width has an extra 128 pixels*/
377 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
378 if (data->enable[i]) {
379 if (bw_equ(data->h_scale_ratio[i], bw_int_to_fixed(1)) && bw_equ(data->v_scale_ratio[i], bw_int_to_fixed(1)) && surface_type[i] == bw_def_graphics && data->stereo_mode[i] == bw_def_mono && data->interlace_mode[i] == 0) {
380 data->h_taps[i] = bw_int_to_fixed(1);
381 data->v_taps[i] = bw_int_to_fixed(1);
382 }
383 if (surface_type[i] == bw_def_display_write_back420_chroma || surface_type[i] == bw_def_underlay420_chroma) {
384 data->pitch_in_pixels_after_surface_type[i] = bw_div(data->pitch_in_pixels[i], bw_int_to_fixed(2));
385 data->src_width_after_surface_type = bw_div(data->src_width[i], bw_int_to_fixed(2));
386 data->src_height_after_surface_type = bw_div(data->src_height[i], bw_int_to_fixed(2));
387 data->hsr_after_surface_type = bw_div(data->h_scale_ratio[i], bw_int_to_fixed(2));
388 data->vsr_after_surface_type = bw_div(data->v_scale_ratio[i], bw_int_to_fixed(2));
389 }
390 else {
391 data->pitch_in_pixels_after_surface_type[i] = data->pitch_in_pixels[i];
392 data->src_width_after_surface_type = data->src_width[i];
393 data->src_height_after_surface_type = data->src_height[i];
394 data->hsr_after_surface_type = data->h_scale_ratio[i];
395 data->vsr_after_surface_type = data->v_scale_ratio[i];
396 }
397 if ((bw_equ(data->rotation_angle[i], bw_int_to_fixed(90)) || bw_equ(data->rotation_angle[i], bw_int_to_fixed(270))) && surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) {
398 data->src_width_after_rotation = data->src_height_after_surface_type;
399 data->src_height_after_rotation = data->src_width_after_surface_type;
400 data->hsr_after_rotation = data->vsr_after_surface_type;
401 data->vsr_after_rotation = data->hsr_after_surface_type;
402 }
403 else {
404 data->src_width_after_rotation = data->src_width_after_surface_type;
405 data->src_height_after_rotation = data->src_height_after_surface_type;
406 data->hsr_after_rotation = data->hsr_after_surface_type;
407 data->vsr_after_rotation = data->vsr_after_surface_type;
408 }
409 switch (data->stereo_mode[i]) {
410 case bw_def_top_bottom:
411 data->source_width_pixels[i] = data->src_width_after_rotation;
412 data->source_height_pixels = bw_mul(bw_int_to_fixed(2), data->src_height_after_rotation);
413 data->hsr_after_stereo = data->hsr_after_rotation;
414 data->vsr_after_stereo = bw_mul(bw_int_to_fixed(1), data->vsr_after_rotation);
415 break;
416 case bw_def_side_by_side:
417 data->source_width_pixels[i] = bw_mul(bw_int_to_fixed(2), data->src_width_after_rotation);
418 data->source_height_pixels = data->src_height_after_rotation;
419 data->hsr_after_stereo = bw_mul(bw_int_to_fixed(1), data->hsr_after_rotation);
420 data->vsr_after_stereo = data->vsr_after_rotation;
421 break;
422 default:
423 data->source_width_pixels[i] = data->src_width_after_rotation;
424 data->source_height_pixels = data->src_height_after_rotation;
425 data->hsr_after_stereo = data->hsr_after_rotation;
426 data->vsr_after_stereo = data->vsr_after_rotation;
427 break;
428 }
429 data->hsr[i] = data->hsr_after_stereo;
430 if (data->interlace_mode[i]) {
431 data->vsr[i] = bw_mul(data->vsr_after_stereo, bw_int_to_fixed(2));
432 }
433 else {
434 data->vsr[i] = data->vsr_after_stereo;
435 }
436 if (data->panning_and_bezel_adjustment != bw_def_none) {
437 data->source_width_rounded_up_to_chunks[i] = bw_add(bw_floor2(bw_sub(data->source_width_pixels[i], bw_int_to_fixed(1)), bw_int_to_fixed(128)), bw_int_to_fixed(256));
438 }
439 else {
440 data->source_width_rounded_up_to_chunks[i] = bw_ceil2(data->source_width_pixels[i], bw_int_to_fixed(128));
441 }
442 data->source_height_rounded_up_to_chunks[i] = data->source_height_pixels;
443 }
444 }
445 /*mode support checks:*/
446 /*the number of graphics and underlay pipes is limited by the ip support*/
447 /*maximum horizontal and vertical scale ratio is 4, and should not exceed the number of taps*/
448 /*for downscaling with the pre-downscaler, the horizontal scale ratio must be more than the ceiling of one quarter of the number of taps*/
449 /*the pre-downscaler reduces the line buffer source by the horizontal scale ratio*/
450 /*the number of lines in the line buffer has to exceed the number of vertical taps*/
451 /*the size of the line in the line buffer is the product of the source width and the bits per component, rounded up to a multiple of 48*/
452 /*the size of the line in the line buffer in the case of 10 bit per component is the product of the source width rounded up to multiple of 8 and 30.023438 / 3, rounded up to a multiple of 48*/
453 /*the size of the line in the line buffer in the case of 8 bit per component is the product of the source width rounded up to multiple of 8 and 30.023438 / 3, rounded up to a multiple of 48*/
454 /*frame buffer compression is not supported with stereo mode, rotation, or non- 888 formats*/
455 /*rotation is not supported with linear of stereo modes*/
456 if (dceip->number_of_graphics_pipes >= data->number_of_displays && dceip->number_of_underlay_pipes >= data->number_of_underlay_surfaces && !(dceip->display_write_back_supported == 0 && data->d1_display_write_back_dwb_enable == 1)) {
457 pipe_check = bw_def_ok;
458 }
459 else {
460 pipe_check = bw_def_notok;
461 }
462 hsr_check = bw_def_ok;
463 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
464 if (data->enable[i]) {
465 if (bw_neq(data->hsr[i], bw_int_to_fixed(1))) {
466 if (bw_mtn(data->hsr[i], bw_int_to_fixed(4))) {
467 hsr_check = bw_def_hsr_mtn_4;
468 }
469 else {
470 if (bw_mtn(data->hsr[i], data->h_taps[i])) {
471 hsr_check = bw_def_hsr_mtn_h_taps;
472 }
473 else {
474 if (dceip->pre_downscaler_enabled == 1 && bw_mtn(data->hsr[i], bw_int_to_fixed(1)) && bw_leq(data->hsr[i], bw_ceil2(bw_div(data->h_taps[i], bw_int_to_fixed(4)), bw_int_to_fixed(1)))) {
475 hsr_check = bw_def_ceiling__h_taps_div_4___meq_hsr;
476 }
477 }
478 }
479 }
480 }
481 }
482 vsr_check = bw_def_ok;
483 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
484 if (data->enable[i]) {
485 if (bw_neq(data->vsr[i], bw_int_to_fixed(1))) {
486 if (bw_mtn(data->vsr[i], bw_int_to_fixed(4))) {
487 vsr_check = bw_def_vsr_mtn_4;
488 }
489 else {
490 if (bw_mtn(data->vsr[i], data->v_taps[i])) {
491 vsr_check = bw_def_vsr_mtn_v_taps;
492 }
493 }
494 }
495 }
496 }
497 lb_size_check = bw_def_ok;
498 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
499 if (data->enable[i]) {
500 if ((dceip->pre_downscaler_enabled && bw_mtn(data->hsr[i], bw_int_to_fixed(1)))) {
501 data->source_width_in_lb = bw_div(data->source_width_pixels[i], data->hsr[i]);
502 }
503 else {
504 data->source_width_in_lb = data->source_width_pixels[i];
505 }
506 switch (data->lb_bpc[i]) {
507 case 8:
508 data->lb_line_pitch = bw_ceil2(bw_mul(bw_div(bw_frc_to_fixed(2401171875, 100000000), bw_int_to_fixed(3)), bw_ceil2(data->source_width_in_lb, bw_int_to_fixed(8))), bw_int_to_fixed(48));
509 break;
510 case 10:
511 data->lb_line_pitch = bw_ceil2(bw_mul(bw_div(bw_frc_to_fixed(300234375, 10000000), bw_int_to_fixed(3)), bw_ceil2(data->source_width_in_lb, bw_int_to_fixed(8))), bw_int_to_fixed(48));
512 break;
513 default:
514 data->lb_line_pitch = bw_ceil2(bw_mul(bw_int_to_fixed(data->lb_bpc[i]), data->source_width_in_lb), bw_int_to_fixed(48));
515 break;
516 }
517 data->lb_partitions[i] = bw_floor2(bw_div(data->lb_size_per_component[i], data->lb_line_pitch), bw_int_to_fixed(1));
518 /*clamp the partitions to the maxium number supported by the lb*/
519 if ((surface_type[i] != bw_def_graphics || dceip->graphics_lb_nodownscaling_multi_line_prefetching == 1)) {
520 data->lb_partitions_max[i] = bw_int_to_fixed(10);
521 }
522 else {
523 data->lb_partitions_max[i] = bw_int_to_fixed(7);
524 }
525 data->lb_partitions[i] = bw_min2(data->lb_partitions_max[i], data->lb_partitions[i]);
526 if (bw_mtn(bw_add(data->v_taps[i], bw_int_to_fixed(1)), data->lb_partitions[i])) {
527 lb_size_check = bw_def_notok;
528 }
529 }
530 }
531 fbc_check = bw_def_ok;
532 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
533 if (data->enable[i] && data->fbc_en[i] == 1 && (bw_equ(data->rotation_angle[i], bw_int_to_fixed(90)) || bw_equ(data->rotation_angle[i], bw_int_to_fixed(270)) || data->stereo_mode[i] != bw_def_mono || data->bytes_per_pixel[i] != 4)) {
534 fbc_check = bw_def_invalid_rotation_or_bpp_or_stereo;
535 }
536 }
537 rotation_check = bw_def_ok;
538 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
539 if (data->enable[i]) {
540 if ((bw_equ(data->rotation_angle[i], bw_int_to_fixed(90)) || bw_equ(data->rotation_angle[i], bw_int_to_fixed(270))) && (tiling_mode[i] == bw_def_linear || data->stereo_mode[i] != bw_def_mono)) {
541 rotation_check = bw_def_invalid_linear_or_stereo_mode;
542 }
543 }
544 }
545 if (pipe_check == bw_def_ok && hsr_check == bw_def_ok && vsr_check == bw_def_ok && lb_size_check == bw_def_ok && fbc_check == bw_def_ok && rotation_check == bw_def_ok) {
546 mode_check = bw_def_ok;
547 }
548 else {
549 mode_check = bw_def_notok;
550 }
551 /*number of memory channels for write-back client*/
552 data->number_of_dram_wrchannels = vbios->number_of_dram_channels;
553 data->number_of_dram_channels = vbios->number_of_dram_channels;
554 /*modify number of memory channels if lpt mode is enabled*/
555 /* low power tiling mode register*/
556 /* 0 = use channel 0*/
557 /* 1 = use channel 0 and 1*/
558 /* 2 = use channel 0,1,2,3*/
559 if ((fbc_enabled == 1 && lpt_enabled == 1)) {
560 data->dram_efficiency = bw_int_to_fixed(1);
561 if (dceip->low_power_tiling_mode == 0) {
562 data->number_of_dram_channels = 1;
563 }
564 else if (dceip->low_power_tiling_mode == 1) {
565 data->number_of_dram_channels = 2;
566 }
567 else if (dceip->low_power_tiling_mode == 2) {
568 data->number_of_dram_channels = 4;
569 }
570 else {
571 data->number_of_dram_channels = 1;
572 }
573 }
574 else {
575 data->dram_efficiency = bw_frc_to_fixed(8, 10);
576 }
577 /*memory request size and latency hiding:*/
578 /*request size is normally 64 byte, 2-line interleaved, with full latency hiding*/
579 /*the display write-back requests are single line*/
580 /*for tiled graphics surfaces, or undelay surfaces with width higher than the maximum size for full efficiency, request size is 32 byte in 8 and 16 bpp or if the rotation is orthogonal to the tiling grain. only half is useful of the bytes in the request size in 8 bpp or in 32 bpp if the rotation is orthogonal to the tiling grain.*/
581 /*for undelay surfaces with width lower than the maximum size for full efficiency, requests are 4-line interleaved in 16bpp if the rotation is parallel to the tiling grain, and 8-line interleaved with 4-line latency hiding in 8bpp or if the rotation is orthogonal to the tiling grain.*/
582 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
583 if (data->enable[i]) {
584 if ((bw_equ(data->rotation_angle[i], bw_int_to_fixed(90)) || bw_equ(data->rotation_angle[i], bw_int_to_fixed(270)))) {
585 if ((i < 4)) {
586 /*underlay portrait tiling mode is not supported*/
587 data->orthogonal_rotation[i] = 1;
588 }
589 else {
590 /*graphics portrait tiling mode*/
591 if ((data->graphics_micro_tile_mode == bw_def_rotated_micro_tiling)) {
592 data->orthogonal_rotation[i] = 0;
593 }
594 else {
595 data->orthogonal_rotation[i] = 1;
596 }
597 }
598 }
599 else {
600 if ((i < 4)) {
601 /*underlay landscape tiling mode is only supported*/
602 if ((data->underlay_micro_tile_mode == bw_def_display_micro_tiling)) {
603 data->orthogonal_rotation[i] = 0;
604 }
605 else {
606 data->orthogonal_rotation[i] = 1;
607 }
608 }
609 else {
610 /*graphics landscape tiling mode*/
611 if ((data->graphics_micro_tile_mode == bw_def_display_micro_tiling)) {
612 data->orthogonal_rotation[i] = 0;
613 }
614 else {
615 data->orthogonal_rotation[i] = 1;
616 }
617 }
618 }
619 if (bw_equ(data->rotation_angle[i], bw_int_to_fixed(90)) || bw_equ(data->rotation_angle[i], bw_int_to_fixed(270))) {
620 data->underlay_maximum_source_efficient_for_tiling = dceip->underlay_maximum_height_efficient_for_tiling;
621 }
622 else {
623 data->underlay_maximum_source_efficient_for_tiling = dceip->underlay_maximum_width_efficient_for_tiling;
624 }
625 if (surface_type[i] == bw_def_display_write_back420_luma || surface_type[i] == bw_def_display_write_back420_chroma) {
626 data->bytes_per_request[i] = bw_int_to_fixed(64);
627 data->useful_bytes_per_request[i] = bw_int_to_fixed(64);
628 data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(1);
629 data->latency_hiding_lines[i] = bw_int_to_fixed(1);
630 }
631 else if (tiling_mode[i] == bw_def_linear) {
632 data->bytes_per_request[i] = bw_int_to_fixed(64);
633 data->useful_bytes_per_request[i] = bw_int_to_fixed(64);
634 data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2);
635 data->latency_hiding_lines[i] = bw_int_to_fixed(2);
636 }
637 else {
638 if (surface_type[i] == bw_def_graphics || (bw_mtn(data->source_width_rounded_up_to_chunks[i], bw_ceil2(data->underlay_maximum_source_efficient_for_tiling, bw_int_to_fixed(256))))) {
639 switch (data->bytes_per_pixel[i]) {
640 case 8:
641 data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2);
642 data->latency_hiding_lines[i] = bw_int_to_fixed(2);
643 if (data->orthogonal_rotation[i]) {
644 data->bytes_per_request[i] = bw_int_to_fixed(32);
645 data->useful_bytes_per_request[i] = bw_int_to_fixed(32);
646 }
647 else {
648 data->bytes_per_request[i] = bw_int_to_fixed(64);
649 data->useful_bytes_per_request[i] = bw_int_to_fixed(64);
650 }
651 break;
652 case 4:
653 if (data->orthogonal_rotation[i]) {
654 data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2);
655 data->latency_hiding_lines[i] = bw_int_to_fixed(2);
656 data->bytes_per_request[i] = bw_int_to_fixed(32);
657 data->useful_bytes_per_request[i] = bw_int_to_fixed(16);
658 }
659 else {
660 data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2);
661 data->latency_hiding_lines[i] = bw_int_to_fixed(2);
662 data->bytes_per_request[i] = bw_int_to_fixed(64);
663 data->useful_bytes_per_request[i] = bw_int_to_fixed(64);
664 }
665 break;
666 case 2:
667 data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2);
668 data->latency_hiding_lines[i] = bw_int_to_fixed(2);
669 data->bytes_per_request[i] = bw_int_to_fixed(32);
670 data->useful_bytes_per_request[i] = bw_int_to_fixed(32);
671 break;
672 default:
673 data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2);
674 data->latency_hiding_lines[i] = bw_int_to_fixed(2);
675 data->bytes_per_request[i] = bw_int_to_fixed(32);
676 data->useful_bytes_per_request[i] = bw_int_to_fixed(16);
677 break;
678 }
679 }
680 else {
681 data->bytes_per_request[i] = bw_int_to_fixed(64);
682 data->useful_bytes_per_request[i] = bw_int_to_fixed(64);
683 if (data->orthogonal_rotation[i]) {
684 data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(8);
685 data->latency_hiding_lines[i] = bw_int_to_fixed(4);
686 }
687 else {
688 switch (data->bytes_per_pixel[i]) {
689 case 4:
690 data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2);
691 data->latency_hiding_lines[i] = bw_int_to_fixed(2);
692 break;
693 case 2:
694 data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(4);
695 data->latency_hiding_lines[i] = bw_int_to_fixed(4);
696 break;
697 default:
698 data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(8);
699 data->latency_hiding_lines[i] = bw_int_to_fixed(4);
700 break;
701 }
702 }
703 }
704 }
705 }
706 }
707 /*requested peak bandwidth:*/
708 /*the peak request-per-second bandwidth is the product of the maximum source lines in per line out in the beginning*/
709 /*and in the middle of the frame, the ratio of the source width to the line time, the ratio of line interleaving*/
710 /*in memory to lines of latency hiding, and the ratio of bytes per pixel to useful bytes per request.*/
711 /**/
712 /*if the dmif data buffer size holds more than vta_ps worth of source lines, then only vsr is used.*/
713 /*the peak bandwidth is the peak request-per-second bandwidth times the request size.*/
714 /**/
715 /*the line buffer lines in per line out in the beginning of the frame is the vertical filter initialization value*/
716 /*rounded up to even and divided by the line times for initialization, which is normally three.*/
717 /*the line buffer lines in per line out in the middle of the frame is at least one, or the vertical scale ratio,*/
718 /*rounded up to line pairs if not doing line buffer prefetching.*/
719 /**/
720 /*the non-prefetching rounding up of the vertical scale ratio can also be done up to 1 (for a 0,2 pattern), 4/3 (for a 0,2,2 pattern),*/
721 /*6/4 (for a 0,2,2,2 pattern), or 3 (for a 2,4 pattern).*/
722 /**/
723 /*the scaler vertical filter initialization value is calculated by the hardware as the floor of the average of the*/
724 /*vertical scale ratio and the number of vertical taps increased by one. add one more for possible odd line*/
725 /*panning/bezel adjustment mode.*/
726 /**/
727 /*for the bottom interlace field an extra 50% of the vertical scale ratio is considered for this calculation.*/
728 /*in top-bottom stereo mode software has to set the filter initialization value manually and explicitly limit it to 4.*/
729 /*furthermore, there is only one line time for initialization.*/
730 /**/
731 /*line buffer prefetching is done when the number of lines in the line buffer exceeds the number of taps plus*/
732 /*the ceiling of the vertical scale ratio.*/
733 /**/
734 /*multi-line buffer prefetching is only done in the graphics pipe when the scaler is disabled or when upscaling and the vsr <= 0.8.'*/
735 /**/
736 /*the horizontal blank and chunk granularity factor is indirectly used indicate the interval of time required to transfer the source pixels.*/
737 /*the denominator of this term represents the total number of destination output pixels required for the input source pixels.*/
738 /*it applies when the lines in per line out is not 2 or 4. it does not apply when there is a line buffer between the scl and blnd.*/
739 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
740 if (data->enable[i]) {
741 data->v_filter_init[i] = bw_floor2(bw_div((bw_add(bw_add(bw_add(bw_int_to_fixed(1), data->v_taps[i]), data->vsr[i]), bw_mul(bw_mul(bw_int_to_fixed(data->interlace_mode[i]), bw_frc_to_fixed(5, 10)), data->vsr[i]))), bw_int_to_fixed(2)), bw_int_to_fixed(1));
742 if (data->panning_and_bezel_adjustment == bw_def_any_lines) {
743 data->v_filter_init[i] = bw_add(data->v_filter_init[i], bw_int_to_fixed(1));
744 }
745 if (data->stereo_mode[i] == bw_def_top_bottom) {
746 v_filter_init_mode[i] = bw_def_manual;
747 data->v_filter_init[i] = bw_min2(data->v_filter_init[i], bw_int_to_fixed(4));
748 }
749 else {
750 v_filter_init_mode[i] = bw_def_auto;
751 }
752 if (data->stereo_mode[i] == bw_def_top_bottom) {
753 data->num_lines_at_frame_start = bw_int_to_fixed(1);
754 }
755 else {
756 data->num_lines_at_frame_start = bw_int_to_fixed(3);
757 }
758 if ((bw_mtn(data->vsr[i], bw_int_to_fixed(1)) && surface_type[i] == bw_def_graphics) || data->panning_and_bezel_adjustment == bw_def_any_lines) {
759 data->line_buffer_prefetch[i] = 0;
760 }
761 else if ((((dceip->underlay_downscale_prefetch_enabled == 1 && surface_type[i] != bw_def_graphics) || surface_type[i] == bw_def_graphics) && (bw_mtn(data->lb_partitions[i], bw_add(data->v_taps[i], bw_ceil2(data->vsr[i], bw_int_to_fixed(1))))))) {
762 data->line_buffer_prefetch[i] = 1;
763 }
764 else {
765 data->line_buffer_prefetch[i] = 0;
766 }
767 data->lb_lines_in_per_line_out_in_beginning_of_frame[i] = bw_div(bw_ceil2(data->v_filter_init[i], bw_int_to_fixed(dceip->lines_interleaved_into_lb)), data->num_lines_at_frame_start);
768 if (data->line_buffer_prefetch[i] == 1) {
769 data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_max2(bw_int_to_fixed(1), data->vsr[i]);
770 }
771 else if (bw_leq(data->vsr[i], bw_int_to_fixed(1))) {
772 data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_int_to_fixed(1);
773 }
774 else if (bw_leq(data->vsr[i], bw_int_to_fixed(4 / 3))) {
775 data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_div(bw_int_to_fixed(4), bw_int_to_fixed(3));
776 }
777 else if (bw_leq(data->vsr[i], bw_int_to_fixed(6 / 4))) {
778 data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_div(bw_int_to_fixed(6), bw_int_to_fixed(4));
779 }
780 else if (bw_leq(data->vsr[i], bw_int_to_fixed(2))) {
781 data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_int_to_fixed(2);
782 }
783 else if (bw_leq(data->vsr[i], bw_int_to_fixed(3))) {
784 data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_int_to_fixed(3);
785 }
786 else {
787 data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_int_to_fixed(4);
788 }
789 if (data->line_buffer_prefetch[i] == 1 || bw_equ(data->lb_lines_in_per_line_out_in_middle_of_frame[i], bw_int_to_fixed(2)) || bw_equ(data->lb_lines_in_per_line_out_in_middle_of_frame[i], bw_int_to_fixed(4))) {
790 data->horizontal_blank_and_chunk_granularity_factor[i] = bw_int_to_fixed(1);
791 }
792 else {
793 data->horizontal_blank_and_chunk_granularity_factor[i] = bw_div(data->h_total[i], (bw_div((bw_add(data->h_total[i], bw_div((bw_sub(data->source_width_pixels[i], bw_int_to_fixed(dceip->chunk_width))), data->hsr[i]))), bw_int_to_fixed(2))));
794 }
795 data->request_bandwidth[i] = bw_div(bw_mul(bw_div(bw_mul(bw_div(bw_mul(bw_max2(data->lb_lines_in_per_line_out_in_beginning_of_frame[i], data->lb_lines_in_per_line_out_in_middle_of_frame[i]), data->source_width_rounded_up_to_chunks[i]), (bw_div(data->h_total[i], data->pixel_rate[i]))), bw_int_to_fixed(data->bytes_per_pixel[i])), data->useful_bytes_per_request[i]), data->lines_interleaved_in_mem_access[i]), data->latency_hiding_lines[i]);
796 data->display_bandwidth[i] = bw_mul(data->request_bandwidth[i], data->bytes_per_request[i]);
797 }
798 }
799 /*outstanding chunk request limit*/
800 /*if underlay buffer sharing is enabled, the data buffer size for underlay in 422 or 444 is the sum of the luma and chroma data buffer sizes.*/
801 /*underlay buffer sharing mode is only permitted in orthogonal rotation modes.*/
802 /**/
803 /*if there is only one display enabled, the dmif data buffer size for the graphics surface is increased by concatenating the adjacent buffers.*/
804 /**/
805 /*the memory chunk size in bytes is 1024 for the writeback, and 256 times the memory line interleaving and the bytes per pixel for graphics*/
806 /*and underlay.*/
807 /**/
808 /*the pipe chunk size uses 2 for line interleaving, except for the write back, in which case it is 1.*/
809 /*graphics and underlay data buffer size is adjusted (limited) using the outstanding chunk request limit if there is more than one*/
810 /*display enabled or if the dmif request buffer is not large enough for the total data buffer size.*/
811 /*the outstanding chunk request limit is the ceiling of the adjusted data buffer size divided by the chunk size in bytes*/
812 /*the adjusted data buffer size is the product of the display bandwidth and the minimum effective data buffer size in terms of time,*/
813 /*rounded up to the chunk size in bytes, but should not exceed the original data buffer size*/
814 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
815 if (data->enable[i]) {
816 if ((dceip->dmif_pipe_en_fbc_chunk_tracker + 3 == i && fbc_enabled == 0 && tiling_mode[i] != bw_def_linear)) {
817 data->max_chunks_non_fbc_mode[i] = 128 - dmif_chunk_buff_margin;
818 }
819 else {
820 data->max_chunks_non_fbc_mode[i] = 16 - dmif_chunk_buff_margin;
821 }
822 }
823 if (data->fbc_en[i] == 1) {
824 max_chunks_fbc_mode = 128 - dmif_chunk_buff_margin;
825 }
826 }
827 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
828 if (data->enable[i]) {
829 switch (surface_type[i]) {
830 case bw_def_display_write_back420_luma:
831 data->data_buffer_size[i] = bw_int_to_fixed(dceip->display_write_back420_luma_mcifwr_buffer_size);
832 break;
833 case bw_def_display_write_back420_chroma:
834 data->data_buffer_size[i] = bw_int_to_fixed(dceip->display_write_back420_chroma_mcifwr_buffer_size);
835 break;
836 case bw_def_underlay420_luma:
837 data->data_buffer_size[i] = bw_int_to_fixed(dceip->underlay_luma_dmif_size);
838 break;
839 case bw_def_underlay420_chroma:
840 data->data_buffer_size[i] = bw_div(bw_int_to_fixed(dceip->underlay_chroma_dmif_size), bw_int_to_fixed(2));
841 break;
842 case bw_def_underlay422:case bw_def_underlay444:
843 if (data->orthogonal_rotation[i] == 0) {
844 data->data_buffer_size[i] = bw_int_to_fixed(dceip->underlay_luma_dmif_size);
845 }
846 else {
847 data->data_buffer_size[i] = bw_add(bw_int_to_fixed(dceip->underlay_luma_dmif_size), bw_int_to_fixed(dceip->underlay_chroma_dmif_size));
848 }
849 break;
850 default:
851 if (data->fbc_en[i] == 1) {
852 /*data_buffer_size(i) = max_dmif_buffer_allocated * graphics_dmif_size*/
853 if (data->number_of_displays == 1) {
854 data->data_buffer_size[i] = bw_min2(bw_mul(bw_mul(bw_int_to_fixed(max_chunks_fbc_mode), bw_int_to_fixed(pixels_per_chunk)), bw_int_to_fixed(data->bytes_per_pixel[i])), bw_mul(bw_int_to_fixed(dceip->max_dmif_buffer_allocated), bw_int_to_fixed(dceip->graphics_dmif_size)));
855 }
856 else {
857 data->data_buffer_size[i] = bw_min2(bw_mul(bw_mul(bw_int_to_fixed(max_chunks_fbc_mode), bw_int_to_fixed(pixels_per_chunk)), bw_int_to_fixed(data->bytes_per_pixel[i])), bw_int_to_fixed(dceip->graphics_dmif_size));
858 }
859 }
860 else {
861 /*the effective dmif buffer size in non-fbc mode is limited by the 16 entry chunk tracker*/
862 if (data->number_of_displays == 1) {
863 data->data_buffer_size[i] = bw_min2(bw_mul(bw_mul(bw_int_to_fixed(data->max_chunks_non_fbc_mode[i]), bw_int_to_fixed(pixels_per_chunk)), bw_int_to_fixed(data->bytes_per_pixel[i])), bw_mul(bw_int_to_fixed(dceip->max_dmif_buffer_allocated), bw_int_to_fixed(dceip->graphics_dmif_size)));
864 }
865 else {
866 data->data_buffer_size[i] = bw_min2(bw_mul(bw_mul(bw_int_to_fixed(data->max_chunks_non_fbc_mode[i]), bw_int_to_fixed(pixels_per_chunk)), bw_int_to_fixed(data->bytes_per_pixel[i])), bw_int_to_fixed(dceip->graphics_dmif_size));
867 }
868 }
869 break;
870 }
871 if (surface_type[i] == bw_def_display_write_back420_luma || surface_type[i] == bw_def_display_write_back420_chroma) {
872 data->memory_chunk_size_in_bytes[i] = bw_int_to_fixed(1024);
873 data->pipe_chunk_size_in_bytes[i] = bw_int_to_fixed(1024);
874 }
875 else {
876 data->memory_chunk_size_in_bytes[i] = bw_mul(bw_mul(bw_int_to_fixed(dceip->chunk_width), data->lines_interleaved_in_mem_access[i]), bw_int_to_fixed(data->bytes_per_pixel[i]));
877 data->pipe_chunk_size_in_bytes[i] = bw_mul(bw_mul(bw_int_to_fixed(dceip->chunk_width), bw_int_to_fixed(dceip->lines_interleaved_into_lb)), bw_int_to_fixed(data->bytes_per_pixel[i]));
878 }
879 }
880 }
881 data->min_dmif_size_in_time = bw_int_to_fixed(9999);
882 data->min_mcifwr_size_in_time = bw_int_to_fixed(9999);
883 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
884 if (data->enable[i]) {
885 if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) {
886 if (bw_ltn(bw_div(bw_div(bw_mul(data->data_buffer_size[i], data->bytes_per_request[i]), data->useful_bytes_per_request[i]), data->display_bandwidth[i]), data->min_dmif_size_in_time)) {
887 data->min_dmif_size_in_time = bw_div(bw_div(bw_mul(data->data_buffer_size[i], data->bytes_per_request[i]), data->useful_bytes_per_request[i]), data->display_bandwidth[i]);
888 }
889 }
890 else {
891 if (bw_ltn(bw_div(bw_div(bw_mul(data->data_buffer_size[i], data->bytes_per_request[i]), data->useful_bytes_per_request[i]), data->display_bandwidth[i]), data->min_mcifwr_size_in_time)) {
892 data->min_mcifwr_size_in_time = bw_div(bw_div(bw_mul(data->data_buffer_size[i], data->bytes_per_request[i]), data->useful_bytes_per_request[i]), data->display_bandwidth[i]);
893 }
894 }
895 }
896 }
897 data->total_requests_for_dmif_size = bw_int_to_fixed(0);
898 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
899 if (data->enable[i] && surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) {
900 data->total_requests_for_dmif_size = bw_add(data->total_requests_for_dmif_size, bw_div(data->data_buffer_size[i], data->useful_bytes_per_request[i]));
901 }
902 }
903 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
904 if (data->enable[i]) {
905 if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma && dceip->limit_excessive_outstanding_dmif_requests && (data->number_of_displays > 1 || bw_mtn(data->total_requests_for_dmif_size, dceip->dmif_request_buffer_size))) {
906 data->adjusted_data_buffer_size[i] = bw_min2(data->data_buffer_size[i], bw_ceil2(bw_mul(data->min_dmif_size_in_time, data->display_bandwidth[i]), data->memory_chunk_size_in_bytes[i]));
907 }
908 else {
909 data->adjusted_data_buffer_size[i] = data->data_buffer_size[i];
910 }
911 }
912 }
913 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
914 if (data->enable[i]) {
915 if ((data->number_of_displays == 1 && data->number_of_underlay_surfaces == 0)) {
916 /*set maximum chunk limit if only one graphic pipe is enabled*/
917 data->outstanding_chunk_request_limit[i] = bw_int_to_fixed(127);
918 }
919 else {
920 data->outstanding_chunk_request_limit[i] = bw_ceil2(bw_div(data->adjusted_data_buffer_size[i], data->pipe_chunk_size_in_bytes[i]), bw_int_to_fixed(1));
921 /*clamp maximum chunk limit in the graphic display pipe*/
922 if ((i >= 4)) {
923 data->outstanding_chunk_request_limit[i] = bw_max2(bw_int_to_fixed(127), data->outstanding_chunk_request_limit[i]);
924 }
925 }
926 }
927 }
928 /*outstanding pte request limit*/
929 /*in tiling mode with no rotation the sg pte requests are 8 useful pt_es, the sg row height is the page height and the sg page width x height is 64x64 for 8bpp, 64x32 for 16 bpp, 32x32 for 32 bpp*/
930 /*in tiling mode with rotation the sg pte requests are only one useful pte, and the sg row height is also the page height, but the sg page width and height are swapped*/
931 /*in linear mode the pte requests are 8 useful pt_es, the sg page width is 4096 divided by the bytes per pixel, the sg page height is 1, but there is just one row whose height is the lines of pte prefetching*/
932 /*the outstanding pte request limit is obtained by multiplying the outstanding chunk request limit by the peak pte request to eviction limiting ratio, rounding up to integer, multiplying by the pte requests per chunk, and rounding up to integer again*/
933 /*if not using peak pte request to eviction limiting, the outstanding pte request limit is the pte requests in the vblank*/
934 /*the pte requests in the vblank is the product of the number of pte request rows times the number of pte requests in a row*/
935 /*the number of pte requests in a row is the quotient of the source width divided by 256, multiplied by the pte requests per chunk, rounded up to even, multiplied by the scatter-gather row height and divided by the scatter-gather page height*/
936 /*the pte requests per chunk is 256 divided by the scatter-gather page width and the useful pt_es per pte request*/
937 if (data->number_of_displays > 1 || (bw_neq(data->rotation_angle[4], bw_int_to_fixed(0)) && bw_neq(data->rotation_angle[4], bw_int_to_fixed(180)))) {
938 data->peak_pte_request_to_eviction_ratio_limiting = dceip->peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display;
939 }
940 else {
941 data->peak_pte_request_to_eviction_ratio_limiting = dceip->peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation;
942 }
943 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
944 if (data->enable[i] && data->scatter_gather_enable_for_pipe[i] == 1) {
945 if (tiling_mode[i] == bw_def_linear) {
946 data->useful_pte_per_pte_request = bw_int_to_fixed(8);
947 data->scatter_gather_page_width[i] = bw_div(bw_int_to_fixed(4096), bw_int_to_fixed(data->bytes_per_pixel[i]));
948 data->scatter_gather_page_height[i] = bw_int_to_fixed(1);
949 data->scatter_gather_pte_request_rows = bw_int_to_fixed(1);
950 data->scatter_gather_row_height = bw_int_to_fixed(dceip->scatter_gather_lines_of_pte_prefetching_in_linear_mode);
951 }
952 else if (bw_equ(data->rotation_angle[i], bw_int_to_fixed(0)) || bw_equ(data->rotation_angle[i], bw_int_to_fixed(180))) {
953 data->useful_pte_per_pte_request = bw_int_to_fixed(8);
954 switch (data->bytes_per_pixel[i]) {
955 case 4:
956 data->scatter_gather_page_width[i] = bw_int_to_fixed(32);
957 data->scatter_gather_page_height[i] = bw_int_to_fixed(32);
958 break;
959 case 2:
960 data->scatter_gather_page_width[i] = bw_int_to_fixed(64);
961 data->scatter_gather_page_height[i] = bw_int_to_fixed(32);
962 break;
963 default:
964 data->scatter_gather_page_width[i] = bw_int_to_fixed(64);
965 data->scatter_gather_page_height[i] = bw_int_to_fixed(64);
966 break;
967 }
968 data->scatter_gather_pte_request_rows = bw_int_to_fixed(dceip->scatter_gather_pte_request_rows_in_tiling_mode);
969 data->scatter_gather_row_height = data->scatter_gather_page_height[i];
970 }
971 else {
972 data->useful_pte_per_pte_request = bw_int_to_fixed(1);
973 switch (data->bytes_per_pixel[i]) {
974 case 4:
975 data->scatter_gather_page_width[i] = bw_int_to_fixed(32);
976 data->scatter_gather_page_height[i] = bw_int_to_fixed(32);
977 break;
978 case 2:
979 data->scatter_gather_page_width[i] = bw_int_to_fixed(32);
980 data->scatter_gather_page_height[i] = bw_int_to_fixed(64);
981 break;
982 default:
983 data->scatter_gather_page_width[i] = bw_int_to_fixed(64);
984 data->scatter_gather_page_height[i] = bw_int_to_fixed(64);
985 break;
986 }
987 data->scatter_gather_pte_request_rows = bw_int_to_fixed(dceip->scatter_gather_pte_request_rows_in_tiling_mode);
988 data->scatter_gather_row_height = data->scatter_gather_page_height[i];
989 }
990 data->pte_request_per_chunk[i] = bw_div(bw_div(bw_int_to_fixed(dceip->chunk_width), data->scatter_gather_page_width[i]), data->useful_pte_per_pte_request);
991 data->scatter_gather_pte_requests_in_row[i] = bw_div(bw_mul(bw_ceil2(bw_mul(bw_div(data->source_width_rounded_up_to_chunks[i], bw_int_to_fixed(dceip->chunk_width)), data->pte_request_per_chunk[i]), bw_int_to_fixed(1)), data->scatter_gather_row_height), data->scatter_gather_page_height[i]);
992 data->scatter_gather_pte_requests_in_vblank = bw_mul(data->scatter_gather_pte_request_rows, data->scatter_gather_pte_requests_in_row[i]);
993 if (bw_equ(data->peak_pte_request_to_eviction_ratio_limiting, bw_int_to_fixed(0))) {
994 data->scatter_gather_pte_request_limit[i] = data->scatter_gather_pte_requests_in_vblank;
995 }
996 else {
997 data->scatter_gather_pte_request_limit[i] = bw_max2(dceip->minimum_outstanding_pte_request_limit, bw_min2(data->scatter_gather_pte_requests_in_vblank, bw_ceil2(bw_mul(bw_mul(bw_div(bw_ceil2(data->adjusted_data_buffer_size[i], data->memory_chunk_size_in_bytes[i]), data->memory_chunk_size_in_bytes[i]), data->pte_request_per_chunk[i]), data->peak_pte_request_to_eviction_ratio_limiting), bw_int_to_fixed(1))));
998 }
999 }
1000 }
1001 /*pitch padding recommended for efficiency in linear mode*/
1002 /*in linear mode graphics or underlay with scatter gather, a pitch that is a multiple of the channel interleave (256 bytes) times the channel-bank rotation is not efficient*/
1003 /*if that is the case it is recommended to pad the pitch by at least 256 pixels*/
1004 data->inefficient_linear_pitch_in_bytes = bw_mul(bw_mul(bw_int_to_fixed(256), bw_int_to_fixed(vbios->number_of_dram_banks)), bw_int_to_fixed(data->number_of_dram_channels));
1005
1006 /*pixel transfer time*/
1007 /*the dmif and mcifwr yclk(pclk) required is the one that allows the transfer of all pipe's data buffer size in memory in the time for data transfer*/
1008 /*for dmif, pte and cursor requests have to be included.*/
1009 /*the dram data requirement is doubled when the data request size in bytes is less than the dram channel width times the burst size (8)*/
1010 /*the dram data requirement is also multiplied by the number of channels in the case of low power tiling*/
1011 /*the page close-open time is determined by trc and the number of page close-opens*/
1012 /*in tiled mode graphics or underlay with scatter-gather enabled the bytes per page close-open is the product of the memory line interleave times the maximum of the scatter-gather page width and the product of the tile width (8 pixels) times the number of channels times the number of banks.*/
1013 /*in linear mode graphics or underlay with scatter-gather enabled and inefficient pitch, the bytes per page close-open is the line request alternation slice, because different lines are in completely different 4k address bases.*/
1014 /*otherwise, the bytes page close-open is the chunk size because that is the arbitration slice.*/
1015 /*pte requests are grouped by pte requests per chunk if that is more than 1. each group costs a page close-open time for dmif reads*/
1016 /*cursor requests outstanding are limited to a group of two source lines. each group costs a page close-open time for dmif reads*/
1017 /*the display reads and writes time for data transfer is the minimum data or cursor buffer size in time minus the mc urgent latency*/
1018 /*the mc urgent latency is experienced more than one time if the number of dmif requests in the data buffer exceeds the request buffer size plus the request slots reserved for dmif in the dram channel arbiter queues*/
1019 /*the dispclk required is the maximum for all surfaces of the maximum of the source pixels for first output pixel times the throughput factor, divided by the pixels per dispclk, and divided by the minimum latency hiding minus the dram speed/p-state change latency minus the burst time, and the source pixels for last output pixel, times the throughput factor, divided by the pixels per dispclk, and divided by the minimum latency hiding minus the dram speed/p-state change latency minus the burst time, plus the active time.*/
1020 /*the data burst time is the maximum of the total page close-open time, total dmif/mcifwr buffer size in memory divided by the dram bandwidth, and the total dmif/mcifwr buffer size in memory divided by the 32 byte sclk data bus bandwidth, each multiplied by its efficiency.*/
1021 /*the source line transfer time is the maximum for all surfaces of the maximum of the burst time plus the urgent latency times the floor of the data required divided by the buffer size for the fist pixel, and the burst time plus the urgent latency times the floor of the data required divided by the buffer size for the last pixel plus the active time.*/
1022 /*the source pixels for the first output pixel is 512 if the scaler vertical filter initialization value is greater than 2, and it is 4 times the source width if it is greater than 4.*/
1023 /*the source pixels for the last output pixel is the source width times the scaler vertical filter initialization value rounded up to even*/
1024 /*the source data for these pixels is the number of pixels times the bytes per pixel times the bytes per request divided by the useful bytes per request.*/
1025 data->cursor_total_data = bw_int_to_fixed(0);
1026 data->cursor_total_request_groups = bw_int_to_fixed(0);
1027 data->scatter_gather_total_pte_requests = bw_int_to_fixed(0);
1028 data->scatter_gather_total_pte_request_groups = bw_int_to_fixed(0);
1029 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
1030 if (data->enable[i]) {
1031 data->cursor_total_data = bw_add(data->cursor_total_data, bw_mul(bw_mul(bw_int_to_fixed(2), data->cursor_width_pixels[i]), bw_int_to_fixed(4)));
1032 if (dceip->large_cursor == 1) {
1033 data->cursor_total_request_groups = bw_add(data->cursor_total_request_groups, bw_int_to_fixed((dceip->cursor_max_outstanding_group_num + 1)));
1034 }
1035 else {
1036 data->cursor_total_request_groups = bw_add(data->cursor_total_request_groups, bw_ceil2(bw_div(data->cursor_width_pixels[i], dceip->cursor_chunk_width), bw_int_to_fixed(1)));
1037 }
1038 if (data->scatter_gather_enable_for_pipe[i]) {
1039 data->scatter_gather_total_pte_requests = bw_add(data->scatter_gather_total_pte_requests, data->scatter_gather_pte_request_limit[i]);
1040 data->scatter_gather_total_pte_request_groups = bw_add(data->scatter_gather_total_pte_request_groups, bw_ceil2(bw_div(data->scatter_gather_pte_request_limit[i], bw_ceil2(data->pte_request_per_chunk[i], bw_int_to_fixed(1))), bw_int_to_fixed(1)));
1041 }
1042 }
1043 }
1044 data->tile_width_in_pixels = bw_int_to_fixed(8);
1045 data->dmif_total_number_of_data_request_page_close_open = bw_int_to_fixed(0);
1046 data->mcifwr_total_number_of_data_request_page_close_open = bw_int_to_fixed(0);
1047 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
1048 if (data->enable[i]) {
1049 if (data->scatter_gather_enable_for_pipe[i] == 1 && tiling_mode[i] != bw_def_linear) {
1050 data->bytes_per_page_close_open = bw_mul(data->lines_interleaved_in_mem_access[i], bw_max2(bw_mul(bw_mul(bw_mul(bw_int_to_fixed(data->bytes_per_pixel[i]), data->tile_width_in_pixels), bw_int_to_fixed(vbios->number_of_dram_banks)), bw_int_to_fixed(data->number_of_dram_channels)), bw_mul(bw_int_to_fixed(data->bytes_per_pixel[i]), data->scatter_gather_page_width[i])));
1051 }
1052 else if (data->scatter_gather_enable_for_pipe[i] == 1 && tiling_mode[i] == bw_def_linear && bw_equ(bw_mod((bw_mul(data->pitch_in_pixels_after_surface_type[i], bw_int_to_fixed(data->bytes_per_pixel[i]))), data->inefficient_linear_pitch_in_bytes), bw_int_to_fixed(0))) {
1053 data->bytes_per_page_close_open = dceip->linear_mode_line_request_alternation_slice;
1054 }
1055 else {
1056 data->bytes_per_page_close_open = data->memory_chunk_size_in_bytes[i];
1057 }
1058 if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) {
1059 data->dmif_total_number_of_data_request_page_close_open = bw_add(data->dmif_total_number_of_data_request_page_close_open, bw_div(bw_ceil2(data->adjusted_data_buffer_size[i], data->memory_chunk_size_in_bytes[i]), data->bytes_per_page_close_open));
1060 }
1061 else {
1062 data->mcifwr_total_number_of_data_request_page_close_open = bw_add(data->mcifwr_total_number_of_data_request_page_close_open, bw_div(bw_ceil2(data->adjusted_data_buffer_size[i], data->memory_chunk_size_in_bytes[i]), data->bytes_per_page_close_open));
1063 }
1064 }
1065 }
1066 data->dmif_total_page_close_open_time = bw_div(bw_mul((bw_add(bw_add(data->dmif_total_number_of_data_request_page_close_open, data->scatter_gather_total_pte_request_groups), data->cursor_total_request_groups)), vbios->trc), bw_int_to_fixed(1000));
1067 data->mcifwr_total_page_close_open_time = bw_div(bw_mul(data->mcifwr_total_number_of_data_request_page_close_open, vbios->trc), bw_int_to_fixed(1000));
1068 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
1069 if (data->enable[i]) {
1070 data->adjusted_data_buffer_size_in_memory[i] = bw_div(bw_mul(data->adjusted_data_buffer_size[i], data->bytes_per_request[i]), data->useful_bytes_per_request[i]);
1071 }
1072 }
1073 data->total_requests_for_adjusted_dmif_size = bw_int_to_fixed(0);
1074 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
1075 if (data->enable[i]) {
1076 if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) {
1077 data->total_requests_for_adjusted_dmif_size = bw_add(data->total_requests_for_adjusted_dmif_size, bw_div(data->adjusted_data_buffer_size[i], data->useful_bytes_per_request[i]));
1078 }
1079 }
1080 }
1081 data->total_dmifmc_urgent_trips = bw_ceil2(bw_div(data->total_requests_for_adjusted_dmif_size, (bw_add(dceip->dmif_request_buffer_size, bw_int_to_fixed(vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel * data->number_of_dram_channels)))), bw_int_to_fixed(1));
1082 data->total_dmifmc_urgent_latency = bw_mul(vbios->dmifmc_urgent_latency, data->total_dmifmc_urgent_trips);
1083 data->total_display_reads_required_data = bw_int_to_fixed(0);
1084 data->total_display_reads_required_dram_access_data = bw_int_to_fixed(0);
1085 data->total_display_writes_required_data = bw_int_to_fixed(0);
1086 data->total_display_writes_required_dram_access_data = bw_int_to_fixed(0);
1087 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
1088 if (data->enable[i]) {
1089 if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) {
1090 data->display_reads_required_data = data->adjusted_data_buffer_size_in_memory[i];
1091 /*for hbm memories, each channel is split into 2 pseudo-channels that are each 64 bits in width. each*/
1092 /*pseudo-channel may be read independently of one another.*/
1093 /*the read burst length (bl) for hbm memories is 4, so each read command will access 32 bytes of data.*/
1094 /*the 64 or 32 byte sized data is stored in one pseudo-channel.*/
1095 /*it will take 4 memclk cycles or 8 yclk cycles to fetch 64 bytes of data from the hbm memory (2 read commands).*/
1096 /*it will take 2 memclk cycles or 4 yclk cycles to fetch 32 bytes of data from the hbm memory (1 read command).*/
1097 /*for gddr5/ddr4 memories, there is additional overhead if the size of the request is smaller than 64 bytes.*/
1098 /*the read burst length (bl) for gddr5/ddr4 memories is 8, regardless of the size of the data request.*/
1099 /*therefore it will require 8 cycles to fetch 64 or 32 bytes of data from the memory.*/
1100 /*the memory efficiency will be 50% for the 32 byte sized data.*/
1101 if (vbios->memory_type == bw_def_hbm) {
1102 data->display_reads_required_dram_access_data = data->adjusted_data_buffer_size_in_memory[i];
1103 }
1104 else {
1105 data->display_reads_required_dram_access_data = bw_mul(data->adjusted_data_buffer_size_in_memory[i], bw_ceil2(bw_div(bw_int_to_fixed((8 * vbios->dram_channel_width_in_bits / 8)), data->bytes_per_request[i]), bw_int_to_fixed(1)));
1106 }
1107 data->total_display_reads_required_data = bw_add(data->total_display_reads_required_data, data->display_reads_required_data);
1108 data->total_display_reads_required_dram_access_data = bw_add(data->total_display_reads_required_dram_access_data, data->display_reads_required_dram_access_data);
1109 }
1110 else {
1111 data->total_display_writes_required_data = bw_add(data->total_display_writes_required_data, data->adjusted_data_buffer_size_in_memory[i]);
1112 data->total_display_writes_required_dram_access_data = bw_add(data->total_display_writes_required_dram_access_data, bw_mul(data->adjusted_data_buffer_size_in_memory[i], bw_ceil2(bw_div(bw_int_to_fixed(vbios->dram_channel_width_in_bits), data->bytes_per_request[i]), bw_int_to_fixed(1))));
1113 }
1114 }
1115 }
1116 data->total_display_reads_required_data = bw_add(bw_add(data->total_display_reads_required_data, data->cursor_total_data), bw_mul(data->scatter_gather_total_pte_requests, bw_int_to_fixed(64)));
1117 data->total_display_reads_required_dram_access_data = bw_add(bw_add(data->total_display_reads_required_dram_access_data, data->cursor_total_data), bw_mul(data->scatter_gather_total_pte_requests, bw_int_to_fixed(64)));
1118 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
1119 if (data->enable[i]) {
1120 if (bw_mtn(data->v_filter_init[i], bw_int_to_fixed(4))) {
1121 data->src_pixels_for_first_output_pixel[i] = bw_mul(bw_int_to_fixed(4), data->source_width_rounded_up_to_chunks[i]);
1122 }
1123 else {
1124 if (bw_mtn(data->v_filter_init[i], bw_int_to_fixed(2))) {
1125 data->src_pixels_for_first_output_pixel[i] = bw_int_to_fixed(512);
1126 }
1127 else {
1128 data->src_pixels_for_first_output_pixel[i] = bw_int_to_fixed(0);
1129 }
1130 }
1131 data->src_data_for_first_output_pixel[i] = bw_div(bw_mul(bw_mul(data->src_pixels_for_first_output_pixel[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->bytes_per_request[i]), data->useful_bytes_per_request[i]);
1132 data->src_pixels_for_last_output_pixel[i] = bw_mul(data->source_width_rounded_up_to_chunks[i], bw_max2(bw_ceil2(data->v_filter_init[i], bw_int_to_fixed(dceip->lines_interleaved_into_lb)), bw_mul(bw_ceil2(data->vsr[i], bw_int_to_fixed(dceip->lines_interleaved_into_lb)), data->horizontal_blank_and_chunk_granularity_factor[i])));
1133 data->src_data_for_last_output_pixel[i] = bw_div(bw_mul(bw_mul(bw_mul(data->source_width_rounded_up_to_chunks[i], bw_max2(bw_ceil2(data->v_filter_init[i], bw_int_to_fixed(dceip->lines_interleaved_into_lb)), data->lines_interleaved_in_mem_access[i])), bw_int_to_fixed(data->bytes_per_pixel[i])), data->bytes_per_request[i]), data->useful_bytes_per_request[i]);
1134 data->active_time[i] = bw_div(bw_div(data->source_width_rounded_up_to_chunks[i], data->hsr[i]), data->pixel_rate[i]);
1135 }
1136 }
1137 for (i = 0; i <= 2; i++) {
1138 for (j = 0; j <= 7; j++) {
1139 data->dmif_burst_time[i][j] = bw_max3(data->dmif_total_page_close_open_time, bw_div(data->total_display_reads_required_dram_access_data, (bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[i]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)))), bw_div(data->total_display_reads_required_data, (bw_mul(bw_mul(sclk[j], vbios->data_return_bus_width), bw_int_to_fixed(bus_efficiency)))));
1140 if (data->d1_display_write_back_dwb_enable == 1) {
1141 data->mcifwr_burst_time[i][j] = bw_max3(data->mcifwr_total_page_close_open_time, bw_div(data->total_display_writes_required_dram_access_data, (bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[i]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_wrchannels)))), bw_div(data->total_display_writes_required_data, (bw_mul(bw_mul(sclk[j], vbios->data_return_bus_width), bw_int_to_fixed(bus_efficiency)))));
1142 }
1143 }
1144 }
1145 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
1146 for (j = 0; j <= 2; j++) {
1147 for (k = 0; k <= 7; k++) {
1148 if (data->enable[i]) {
1149 if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) {
1150 /*time to transfer data from the dmif buffer to the lb. since the mc to dmif transfer time overlaps*/
1151 /*with the dmif to lb transfer time, only time to transfer the last chunk is considered.*/
1152 data->dmif_buffer_transfer_time[i] = bw_mul(data->source_width_rounded_up_to_chunks[i], (bw_div(dceip->lb_write_pixels_per_dispclk, (bw_div(vbios->low_voltage_max_dispclk, dceip->display_pipe_throughput_factor)))));
1153 data->line_source_transfer_time[i][j][k] = bw_max2(bw_mul((bw_add(data->total_dmifmc_urgent_latency, data->dmif_burst_time[j][k])), bw_floor2(bw_div(data->src_data_for_first_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1))), bw_sub(bw_add(bw_mul((bw_add(data->total_dmifmc_urgent_latency, data->dmif_burst_time[j][k])), bw_floor2(bw_div(data->src_data_for_last_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1))), data->dmif_buffer_transfer_time[i]), data->active_time[i]));
1154 /*during an mclk switch the requests from the dce ip are stored in the gmc/arb. these requests should be serviced immediately*/
1155 /*after the mclk switch sequence and not incur an urgent latency penalty. it is assumed that the gmc/arb can hold up to 256 requests*/
1156 /*per memory channel. if the dce ip is urgent after the mclk switch sequence, all pending requests and subsequent requests should be*/
1157 /*immediately serviced without a gap in the urgent requests.*/
1158 /*the latency incurred would be the time to issue the requests and return the data for the first or last output pixel.*/
1159 if (surface_type[i] == bw_def_graphics) {
1160 switch (data->lb_bpc[i]) {
1161 case 6:
1162 data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency6_bit_per_component;
1163 break;
1164 case 8:
1165 data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency8_bit_per_component;
1166 break;
1167 case 10:
1168 data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency10_bit_per_component;
1169 break;
1170 default:
1171 data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency12_bit_per_component;
1172 break;
1173 }
1174 if (data->use_alpha[i] == 1) {
1175 data->v_scaler_efficiency = bw_min2(data->v_scaler_efficiency, dceip->alpha_vscaler_efficiency);
1176 }
1177 }
1178 else {
1179 switch (data->lb_bpc[i]) {
1180 case 6:
1181 data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency6_bit_per_component;
1182 break;
1183 case 8:
1184 data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency8_bit_per_component;
1185 break;
1186 case 10:
1187 data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency10_bit_per_component;
1188 break;
1189 default:
1190 data->v_scaler_efficiency = bw_int_to_fixed(3);
1191 break;
1192 }
1193 }
1194 if (dceip->pre_downscaler_enabled && bw_mtn(data->hsr[i], bw_int_to_fixed(1))) {
1195 data->scaler_limits_factor = bw_max2(bw_div(data->v_taps[i], data->v_scaler_efficiency), bw_div(data->source_width_rounded_up_to_chunks[i], data->h_total[i]));
1196 }
1197 else {
1198 data->scaler_limits_factor = bw_max3(bw_int_to_fixed(1), bw_ceil2(bw_div(data->h_taps[i], bw_int_to_fixed(4)), bw_int_to_fixed(1)), bw_mul(data->hsr[i], bw_max2(bw_div(data->v_taps[i], data->v_scaler_efficiency), bw_int_to_fixed(1))));
1199 }
1200 data->dram_speed_change_line_source_transfer_time[i][j][k] = bw_mul(bw_int_to_fixed(2), bw_max2((bw_add((bw_div(data->src_data_for_first_output_pixel[i], bw_min2(bw_mul(data->bytes_per_request[i], sclk[k]), bw_div(bw_mul(bw_mul(data->bytes_per_request[i], data->pixel_rate[i]), data->scaler_limits_factor), bw_int_to_fixed(2))))), (bw_mul(data->dmif_burst_time[j][k], bw_floor2(bw_div(data->src_data_for_first_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1)))))), (bw_add((bw_div(data->src_data_for_last_output_pixel[i], bw_min2(bw_mul(data->bytes_per_request[i], sclk[k]), bw_div(bw_mul(bw_mul(data->bytes_per_request[i], data->pixel_rate[i]), data->scaler_limits_factor), bw_int_to_fixed(2))))), (bw_sub(bw_mul(data->dmif_burst_time[j][k], bw_floor2(bw_div(data->src_data_for_last_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1))), data->active_time[i]))))));
1201 }
1202 else {
1203 data->line_source_transfer_time[i][j][k] = bw_max2(bw_mul((bw_add(vbios->mcifwrmc_urgent_latency, data->mcifwr_burst_time[j][k])), bw_floor2(bw_div(data->src_data_for_first_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1))), bw_sub(bw_mul((bw_add(vbios->mcifwrmc_urgent_latency, data->mcifwr_burst_time[j][k])), bw_floor2(bw_div(data->src_data_for_last_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1))), data->active_time[i]));
1204 /*during an mclk switch the requests from the dce ip are stored in the gmc/arb. these requests should be serviced immediately*/
1205 /*after the mclk switch sequence and not incur an urgent latency penalty. it is assumed that the gmc/arb can hold up to 256 requests*/
1206 /*per memory channel. if the dce ip is urgent after the mclk switch sequence, all pending requests and subsequent requests should be*/
1207 /*immediately serviced without a gap in the urgent requests.*/
1208 /*the latency incurred would be the time to issue the requests and return the data for the first or last output pixel.*/
1209 data->dram_speed_change_line_source_transfer_time[i][j][k] = bw_max2((bw_add((bw_div(data->src_data_for_first_output_pixel[i], bw_min2(bw_mul(data->bytes_per_request[i], sclk[k]), bw_div(bw_mul(data->bytes_per_request[i], vbios->low_voltage_max_dispclk), bw_int_to_fixed(2))))), (bw_mul(data->mcifwr_burst_time[j][k], bw_floor2(bw_div(data->src_data_for_first_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1)))))), (bw_add((bw_div(data->src_data_for_last_output_pixel[i], bw_min2(bw_mul(data->bytes_per_request[i], sclk[k]), bw_div(bw_mul(data->bytes_per_request[i], vbios->low_voltage_max_dispclk), bw_int_to_fixed(2))))), (bw_sub(bw_mul(data->mcifwr_burst_time[j][k], bw_floor2(bw_div(data->src_data_for_last_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1))), data->active_time[i])))));
1210 }
1211 }
1212 }
1213 }
1214 }
1215 /*cpu c-state and p-state change enable*/
1216 /*for cpu p-state change to be possible for a yclk(pclk) and sclk level the dispclk required has to be enough for the blackout duration*/
1217 /*for cpu c-state change to be possible for a yclk(pclk) and sclk level the dispclk required has to be enough for the blackout duration and recovery*/
1218 /*condition for the blackout duration:*/
1219 /* minimum latency hiding > blackout duration + dmif burst time + line source transfer time*/
1220 /*condition for the blackout recovery:*/
1221 /* recovery time > dmif burst time + 2 * urgent latency*/
1222 /* recovery time > (display bw * blackout duration + (2 * urgent latency + dmif burst time)*dispclk - dmif size )*/
1223 /* / (dispclk - display bw)*/
1224 /*the minimum latency hiding is the minimum for all pipes of one screen line time, plus one more line time if doing lb prefetch, plus the dmif data buffer size equivalent in time, minus the urgent latency.*/
1225 /*the minimum latency hiding is further limited by the cursor. the cursor latency hiding is the number of lines of the cursor buffer, minus one if the downscaling is less than two, or minus three if it is more*/
1226 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
1227 if (data->enable[i]) {
1228 if ((bw_equ(dceip->stutter_and_dram_clock_state_change_gated_before_cursor, bw_int_to_fixed(0)) && bw_mtn(data->cursor_width_pixels[i], bw_int_to_fixed(0)))) {
1229 if (bw_ltn(data->vsr[i], bw_int_to_fixed(2))) {
1230 data->cursor_latency_hiding[i] = bw_div(bw_div(bw_mul((bw_sub(dceip->cursor_dcp_buffer_lines, bw_int_to_fixed(1))), data->h_total[i]), data->vsr[i]), data->pixel_rate[i]);
1231 }
1232 else {
1233 data->cursor_latency_hiding[i] = bw_div(bw_div(bw_mul((bw_sub(dceip->cursor_dcp_buffer_lines, bw_int_to_fixed(3))), data->h_total[i]), data->vsr[i]), data->pixel_rate[i]);
1234 }
1235 }
1236 else {
1237 data->cursor_latency_hiding[i] = bw_int_to_fixed(9999);
1238 }
1239 }
1240 }
1241 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
1242 if (data->enable[i]) {
1243 if (dceip->graphics_lb_nodownscaling_multi_line_prefetching == 1 && (bw_equ(data->vsr[i], bw_int_to_fixed(1)) || (bw_leq(data->vsr[i], bw_frc_to_fixed(8, 10)) && bw_leq(data->v_taps[i], bw_int_to_fixed(2)) && data->lb_bpc[i] == 8)) && surface_type[i] == bw_def_graphics) {
1244 data->minimum_latency_hiding[i] = bw_sub(bw_div(bw_mul((bw_div((bw_add(bw_sub(data->lb_partitions[i], bw_int_to_fixed(1)), bw_div(bw_div(data->data_buffer_size[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_pixels[i]))), data->vsr[i])), data->h_total[i]), data->pixel_rate[i]), data->total_dmifmc_urgent_latency);
1245 }
1246 else {
1247 data->minimum_latency_hiding[i] = bw_sub(bw_div(bw_mul((bw_div((bw_add(bw_int_to_fixed(1 + data->line_buffer_prefetch[i]), bw_div(bw_div(data->data_buffer_size[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_pixels[i]))), data->vsr[i])), data->h_total[i]), data->pixel_rate[i]), data->total_dmifmc_urgent_latency);
1248 }
1249 data->minimum_latency_hiding_with_cursor[i] = bw_min2(data->minimum_latency_hiding[i], data->cursor_latency_hiding[i]);
1250 }
1251 }
1252 for (i = 0; i <= 2; i++) {
1253 for (j = 0; j <= 7; j++) {
1254 data->blackout_duration_margin[i][j] = bw_int_to_fixed(9999);
1255 data->dispclk_required_for_blackout_duration[i][j] = bw_int_to_fixed(0);
1256 data->dispclk_required_for_blackout_recovery[i][j] = bw_int_to_fixed(0);
1257 for (k = 0; k <= maximum_number_of_surfaces - 1; k++) {
1258 if (data->enable[k] && bw_mtn(vbios->blackout_duration, bw_int_to_fixed(0))) {
1259 if (surface_type[k] != bw_def_display_write_back420_luma && surface_type[k] != bw_def_display_write_back420_chroma) {
1260 data->blackout_duration_margin[i][j] = bw_min2(data->blackout_duration_margin[i][j], bw_sub(bw_sub(bw_sub(data->minimum_latency_hiding_with_cursor[k], vbios->blackout_duration), data->dmif_burst_time[i][j]), data->line_source_transfer_time[k][i][j]));
1261 data->dispclk_required_for_blackout_duration[i][j] = bw_max3(data->dispclk_required_for_blackout_duration[i][j], bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(data->minimum_latency_hiding_with_cursor[k], vbios->blackout_duration), data->dmif_burst_time[i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(data->minimum_latency_hiding_with_cursor[k], vbios->blackout_duration), data->dmif_burst_time[i][j]), data->active_time[k]))));
1262 if (bw_leq(vbios->maximum_blackout_recovery_time, bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[i][j]))) {
1263 data->dispclk_required_for_blackout_recovery[i][j] = bw_int_to_fixed(9999);
1264 }
1265 else if (bw_ltn(data->adjusted_data_buffer_size[k], bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), (bw_add(vbios->blackout_duration, bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[i][j])))))) {
1266 data->dispclk_required_for_blackout_recovery[i][j] = bw_max2(data->dispclk_required_for_blackout_recovery[i][j], bw_div(bw_mul(bw_div(bw_div((bw_sub(bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), (bw_add(vbios->blackout_duration, vbios->maximum_blackout_recovery_time))), data->adjusted_data_buffer_size[k])), bw_int_to_fixed(data->bytes_per_pixel[k])), (bw_sub(vbios->maximum_blackout_recovery_time, bw_sub(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[i][j])))), data->latency_hiding_lines[k]), data->lines_interleaved_in_mem_access[k]));
1267 }
1268 }
1269 else {
1270 data->blackout_duration_margin[i][j] = bw_min2(data->blackout_duration_margin[i][j], bw_sub(bw_sub(bw_sub(bw_sub(data->minimum_latency_hiding_with_cursor[k], vbios->blackout_duration), data->dmif_burst_time[i][j]), data->mcifwr_burst_time[i][j]), data->line_source_transfer_time[k][i][j]));
1271 data->dispclk_required_for_blackout_duration[i][j] = bw_max3(data->dispclk_required_for_blackout_duration[i][j], bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(bw_sub(data->minimum_latency_hiding_with_cursor[k], vbios->blackout_duration), data->dmif_burst_time[i][j]), data->mcifwr_burst_time[i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(bw_sub(data->minimum_latency_hiding_with_cursor[k], vbios->blackout_duration), data->dmif_burst_time[i][j]), data->mcifwr_burst_time[i][j]), data->active_time[k]))));
1272 if (bw_ltn(vbios->maximum_blackout_recovery_time, bw_add(bw_add(bw_mul(bw_int_to_fixed(2), vbios->mcifwrmc_urgent_latency), data->dmif_burst_time[i][j]), data->mcifwr_burst_time[i][j]))) {
1273 data->dispclk_required_for_blackout_recovery[i][j] = bw_int_to_fixed(9999);
1274 }
1275 else if (bw_ltn(data->adjusted_data_buffer_size[k], bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), (bw_add(vbios->blackout_duration, bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[i][j])))))) {
1276 data->dispclk_required_for_blackout_recovery[i][j] = bw_max2(data->dispclk_required_for_blackout_recovery[i][j], bw_div(bw_mul(bw_div(bw_div((bw_sub(bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), (bw_add(vbios->blackout_duration, vbios->maximum_blackout_recovery_time))), data->adjusted_data_buffer_size[k])), bw_int_to_fixed(data->bytes_per_pixel[k])), (bw_sub(vbios->maximum_blackout_recovery_time, (bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[i][j]))))), data->latency_hiding_lines[k]), data->lines_interleaved_in_mem_access[k]));
1277 }
1278 }
1279 }
1280 }
1281 }
1282 }
1283 if (bw_mtn(data->blackout_duration_margin[high][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[high][s_high], vbios->high_voltage_max_dispclk)) {
1284 data->cpup_state_change_enable = bw_def_yes;
1285 if (bw_ltn(data->dispclk_required_for_blackout_recovery[high][s_high], vbios->high_voltage_max_dispclk)) {
1286 data->cpuc_state_change_enable = bw_def_yes;
1287 }
1288 else {
1289 data->cpuc_state_change_enable = bw_def_no;
1290 }
1291 }
1292 else {
1293 data->cpup_state_change_enable = bw_def_no;
1294 data->cpuc_state_change_enable = bw_def_no;
1295 }
1296 /*nb p-state change enable*/
1297 /*for dram speed/p-state change to be possible for a yclk(pclk) and sclk level there has to be positive margin and the dispclk required has to be*/
1298 /*below the maximum.*/
1299 /*the dram speed/p-state change margin is the minimum for all surfaces of the maximum latency hiding minus the dram speed/p-state change latency,*/
1300 /*minus the dmif burst time, minus the source line transfer time*/
1301 /*the maximum latency hiding is the minimum latency hiding plus one source line used for de-tiling in the line buffer, plus half the urgent latency*/
1302 /*if stutter and dram clock state change are gated before cursor then the cursor latency hiding does not limit stutter or dram clock state change*/
1303 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
1304 if (data->enable[i]) {
1305 if ((dceip->graphics_lb_nodownscaling_multi_line_prefetching == 1)) {
1306 data->maximum_latency_hiding[i] = bw_add(data->minimum_latency_hiding[i], bw_mul(bw_frc_to_fixed(8, 10), data->total_dmifmc_urgent_latency));
1307 }
1308 else {
1309 /*maximum_latency_hiding(i) = minimum_latency_hiding(i) + 1 / vsr(i) * h_total(i) / pixel_rate(i) + 0.5 * total_dmifmc_urgent_latency*/
1310 data->maximum_latency_hiding[i] = bw_add(data->minimum_latency_hiding[i], bw_mul(bw_frc_to_fixed(8, 10), data->total_dmifmc_urgent_latency));
1311 }
1312 data->maximum_latency_hiding_with_cursor[i] = bw_min2(data->maximum_latency_hiding[i], data->cursor_latency_hiding[i]);
1313 }
1314 }
1315 /*initialize variables*/
1316 number_of_displays_enabled = 0;
1317 number_of_displays_enabled_with_margin = 0;
1318 for (k = 0; k < maximum_number_of_surfaces; k++) {
1319 if (data->enable[k]) {
1320 number_of_displays_enabled = number_of_displays_enabled + 1;
1321 }
1322 }
1323 data->display_pstate_change_enable[maximum_number_of_surfaces - 1] = 0;
1324 for (i = 0; i <= 2; i++) {
1325 for (j = 0; j <= 7; j++) {
1326 data->min_dram_speed_change_margin[i][j] = bw_int_to_fixed(9999);
1327 data->dram_speed_change_margin = bw_int_to_fixed(9999);
1328 data->dispclk_required_for_dram_speed_change[i][j] = bw_int_to_fixed(0);
1329 data->num_displays_with_margin[i][j] = 0;
1330 for (k = 0; k <= maximum_number_of_surfaces - 1; k++) {
1331 if (data->enable[k]) {
1332 if (surface_type[k] != bw_def_display_write_back420_luma && surface_type[k] != bw_def_display_write_back420_chroma) {
1333 data->dram_speed_change_margin = bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]);
1334 if ((bw_mtn(data->dram_speed_change_margin, bw_int_to_fixed(0)) && bw_ltn(data->dram_speed_change_margin, bw_int_to_fixed(9999)))) {
1335 /*determine the minimum dram clock change margin for each set of clock frequencies*/
1336 data->min_dram_speed_change_margin[i][j] = bw_min2(data->min_dram_speed_change_margin[i][j], data->dram_speed_change_margin);
1337 /*compute the maximum clock frequuency required for the dram clock change at each set of clock frequencies*/
1338 data->dispclk_required_for_dram_speed_change[i][j] = bw_max3(data->dispclk_required_for_dram_speed_change[i][j], bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->active_time[k]))));
1339 if ((bw_ltn(data->dispclk_required_for_dram_speed_change[i][j], vbios->high_voltage_max_dispclk))) {
1340 data->display_pstate_change_enable[k] = 1;
1341 data->num_displays_with_margin[i][j] = data->num_displays_with_margin[i][j] + 1;
1342 }
1343 }
1344 }
1345 else {
1346 data->dram_speed_change_margin = bw_sub(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->mcifwr_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]);
1347 if ((bw_mtn(data->dram_speed_change_margin, bw_int_to_fixed(0)) && bw_ltn(data->dram_speed_change_margin, bw_int_to_fixed(9999)))) {
1348 /*determine the minimum dram clock change margin for each display pipe*/
1349 data->min_dram_speed_change_margin[i][j] = bw_min2(data->min_dram_speed_change_margin[i][j], data->dram_speed_change_margin);
1350 /*compute the maximum clock frequuency required for the dram clock change at each set of clock frequencies*/
1351 data->dispclk_required_for_dram_speed_change[i][j] = bw_max3(data->dispclk_required_for_dram_speed_change[i][j], bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->mcifwr_burst_time[i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->mcifwr_burst_time[i][j]), data->active_time[k]))));
1352 if ((bw_ltn(data->dispclk_required_for_dram_speed_change[i][j], vbios->high_voltage_max_dispclk))) {
1353 data->display_pstate_change_enable[k] = 1;
1354 data->num_displays_with_margin[i][j] = data->num_displays_with_margin[i][j] + 1;
1355 }
1356 }
1357 }
1358 }
1359 }
1360 }
1361 }
1362 /*determine the number of displays with margin to switch in the v_active region*/
1363 for (k = 0; k <= maximum_number_of_surfaces - 1; k++) {
1364 if ((data->enable[k] == 1 && data->display_pstate_change_enable[k] == 1)) {
1365 number_of_displays_enabled_with_margin = number_of_displays_enabled_with_margin + 1;
1366 }
1367 }
1368 /*determine the number of displays that don't have any dram clock change margin, but*/
1369 /*have the same resolution. these displays can switch in a common vblank region if*/
1370 /*their frames are aligned.*/
1371 data->min_vblank_dram_speed_change_margin = bw_int_to_fixed(9999);
1372 for (k = 0; k <= maximum_number_of_surfaces - 1; k++) {
1373 if (data->enable[k]) {
1374 if (surface_type[k] != bw_def_display_write_back420_luma && surface_type[k] != bw_def_display_write_back420_chroma) {
1375 data->v_blank_dram_speed_change_margin[k] = bw_sub(bw_sub(bw_sub(bw_div(bw_mul((bw_sub(data->v_total[k], bw_sub(bw_div(data->src_height[k], data->v_scale_ratio[k]), bw_int_to_fixed(4)))), data->h_total[k]), data->pixel_rate[k]), vbios->nbp_state_change_latency), data->dmif_burst_time[low][s_low]), data->dram_speed_change_line_source_transfer_time[k][low][s_low]);
1376 data->min_vblank_dram_speed_change_margin = bw_min2(data->min_vblank_dram_speed_change_margin, data->v_blank_dram_speed_change_margin[k]);
1377 }
1378 else {
1379 data->v_blank_dram_speed_change_margin[k] = bw_sub(bw_sub(bw_sub(bw_sub(bw_div(bw_mul((bw_sub(data->v_total[k], bw_sub(bw_div(data->src_height[k], data->v_scale_ratio[k]), bw_int_to_fixed(4)))), data->h_total[k]), data->pixel_rate[k]), vbios->nbp_state_change_latency), data->dmif_burst_time[low][s_low]), data->mcifwr_burst_time[low][s_low]), data->dram_speed_change_line_source_transfer_time[k][low][s_low]);
1380 data->min_vblank_dram_speed_change_margin = bw_min2(data->min_vblank_dram_speed_change_margin, data->v_blank_dram_speed_change_margin[k]);
1381 }
1382 }
1383 }
1384 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
1385 data->displays_with_same_mode[i] = bw_int_to_fixed(0);
1386 if (data->enable[i] == 1 && data->display_pstate_change_enable[i] == 0 && bw_mtn(data->v_blank_dram_speed_change_margin[i], bw_int_to_fixed(0))) {
1387 for (j = 0; j <= maximum_number_of_surfaces - 1; j++) {
1388 if ((data->enable[j] == 1 && bw_equ(data->source_width_rounded_up_to_chunks[i], data->source_width_rounded_up_to_chunks[j]) && bw_equ(data->source_height_rounded_up_to_chunks[i], data->source_height_rounded_up_to_chunks[j]) && bw_equ(data->vsr[i], data->vsr[j]) && bw_equ(data->hsr[i], data->hsr[j]) && bw_equ(data->pixel_rate[i], data->pixel_rate[j]))) {
1389 data->displays_with_same_mode[i] = bw_add(data->displays_with_same_mode[i], bw_int_to_fixed(1));
1390 }
1391 }
1392 }
1393 }
1394 /*compute the maximum number of aligned displays with no margin*/
1395 number_of_aligned_displays_with_no_margin = 0;
1396 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
1397 number_of_aligned_displays_with_no_margin = bw_fixed_to_int(bw_max2(bw_int_to_fixed(number_of_aligned_displays_with_no_margin), data->displays_with_same_mode[i]));
1398 }
1399 /*dram clock change is possible, if all displays have positive margin except for one display or a group of*/
1400 /*aligned displays with the same timing.*/
1401 /*the display(s) with the negative margin can be switched in the v_blank region while the other*/
1402 /*displays are in v_blank or v_active.*/
1403 if ((number_of_displays_enabled_with_margin + number_of_aligned_displays_with_no_margin == number_of_displays_enabled && bw_mtn(data->min_dram_speed_change_margin[high][s_high], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[high][s_high], bw_int_to_fixed(9999)) && bw_ltn(data->dispclk_required_for_dram_speed_change[high][s_high], vbios->high_voltage_max_dispclk))) {
1404 data->nbp_state_change_enable = bw_def_yes;
1405 }
1406 else {
1407 data->nbp_state_change_enable = bw_def_no;
1408 }
1409 /*dram clock change is possible only in vblank if all displays are aligned and have no margin*/
1410 if ((number_of_aligned_displays_with_no_margin == number_of_displays_enabled)) {
1411 nbp_state_change_enable_blank = bw_def_yes;
1412 }
1413 else {
1414 nbp_state_change_enable_blank = bw_def_no;
1415 }
1416 /*required yclk(pclk)*/
1417 /*yclk requirement only makes sense if the dmif and mcifwr data total page close-open time is less than the time for data transfer and the total pte requests fit in the scatter-gather saw queque size*/
1418 /*if that is the case, the yclk requirement is the maximum of the ones required by dmif and mcifwr, and the high/low yclk(pclk) is chosen accordingly*/
1419 /*high yclk(pclk) has to be selected when dram speed/p-state change is not possible.*/
1420 data->min_cursor_memory_interface_buffer_size_in_time = bw_int_to_fixed(9999);
1421 /* number of cursor lines stored in the cursor data return buffer*/
1422 num_cursor_lines = 0;
1423 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
1424 if (data->enable[i]) {
1425 if (bw_mtn(data->cursor_width_pixels[i], bw_int_to_fixed(0))) {
1426 /*compute number of cursor lines stored in data return buffer*/
1427 if (bw_leq(data->cursor_width_pixels[i], bw_int_to_fixed(64)) && dceip->large_cursor == 1) {
1428 num_cursor_lines = 4;
1429 }
1430 else {
1431 num_cursor_lines = 2;
1432 }
1433 data->min_cursor_memory_interface_buffer_size_in_time = bw_min2(data->min_cursor_memory_interface_buffer_size_in_time, bw_div(bw_mul(bw_div(bw_int_to_fixed(num_cursor_lines), data->vsr[i]), data->h_total[i]), data->pixel_rate[i]));
1434 }
1435 }
1436 }
1437 /*compute minimum time to read one chunk from the dmif buffer*/
1438 if ((number_of_displays_enabled > 2)) {
1439 data->chunk_request_delay = 0;
1440 }
1441 else {
1442 data->chunk_request_delay = bw_fixed_to_int(bw_div(bw_int_to_fixed(512), vbios->high_voltage_max_dispclk));
1443 }
1444 data->min_read_buffer_size_in_time = bw_min2(data->min_cursor_memory_interface_buffer_size_in_time, data->min_dmif_size_in_time);
1445 data->display_reads_time_for_data_transfer = bw_sub(bw_sub(data->min_read_buffer_size_in_time, data->total_dmifmc_urgent_latency), bw_int_to_fixed(data->chunk_request_delay));
1446 data->display_writes_time_for_data_transfer = bw_sub(data->min_mcifwr_size_in_time, vbios->mcifwrmc_urgent_latency);
1447 data->dmif_required_dram_bandwidth = bw_div(data->total_display_reads_required_dram_access_data, data->display_reads_time_for_data_transfer);
1448 data->mcifwr_required_dram_bandwidth = bw_div(data->total_display_writes_required_dram_access_data, data->display_writes_time_for_data_transfer);
1449 data->required_dmifmc_urgent_latency_for_page_close_open = bw_div((bw_sub(data->min_read_buffer_size_in_time, data->dmif_total_page_close_open_time)), data->total_dmifmc_urgent_trips);
1450 data->required_mcifmcwr_urgent_latency = bw_sub(data->min_mcifwr_size_in_time, data->mcifwr_total_page_close_open_time);
1451 if (bw_mtn(data->scatter_gather_total_pte_requests, dceip->maximum_total_outstanding_pte_requests_allowed_by_saw)) {
1452 data->required_dram_bandwidth_gbyte_per_second = bw_int_to_fixed(9999);
1453 yclk_message = bw_def_exceeded_allowed_outstanding_pte_req_queue_size;
1454 data->y_clk_level = high;
1455 data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels));
1456 }
1457 else if (bw_mtn(vbios->dmifmc_urgent_latency, data->required_dmifmc_urgent_latency_for_page_close_open) || bw_mtn(vbios->mcifwrmc_urgent_latency, data->required_mcifmcwr_urgent_latency)) {
1458 data->required_dram_bandwidth_gbyte_per_second = bw_int_to_fixed(9999);
1459 yclk_message = bw_def_exceeded_allowed_page_close_open;
1460 data->y_clk_level = high;
1461 data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels));
1462 }
1463 else {
1464 data->required_dram_bandwidth_gbyte_per_second = bw_div(bw_max2(data->dmif_required_dram_bandwidth, data->mcifwr_required_dram_bandwidth), bw_int_to_fixed(1000));
1465 if (bw_ltn(bw_mul(data->required_dram_bandwidth_gbyte_per_second, bw_int_to_fixed(1000)), bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[low]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels))) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[low][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[low][s_high], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[low][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[low][s_high], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[low][s_high], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[low][s_high], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[low][s_high], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[low][s_high], vbios->high_voltage_max_dispclk) && data->num_displays_with_margin[low][s_high] == number_of_displays_enabled_with_margin))) {
1466 yclk_message = bw_fixed_to_int(vbios->low_yclk);
1467 data->y_clk_level = low;
1468 data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[low]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels));
1469 }
1470 else if (bw_ltn(bw_mul(data->required_dram_bandwidth_gbyte_per_second, bw_int_to_fixed(1000)), bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[mid]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels))) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[mid][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[mid][s_high], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[mid][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[mid][s_high], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[mid][s_high], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[mid][s_high], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[mid][s_high], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[mid][s_high], vbios->high_voltage_max_dispclk) && data->num_displays_with_margin[mid][s_high] == number_of_displays_enabled_with_margin))) {
1471 yclk_message = bw_fixed_to_int(vbios->mid_yclk);
1472 data->y_clk_level = mid;
1473 data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[mid]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels));
1474 }
1475 else if (bw_ltn(bw_mul(data->required_dram_bandwidth_gbyte_per_second, bw_int_to_fixed(1000)), bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)))) {
1476 yclk_message = bw_fixed_to_int(vbios->high_yclk);
1477 data->y_clk_level = high;
1478 data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels));
1479 }
1480 else {
1481 yclk_message = bw_def_exceeded_allowed_maximum_bw;
1482 data->y_clk_level = high;
1483 data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels));
1484 }
1485 }
1486 /*required sclk*/
1487 /*sclk requirement only makes sense if the total pte requests fit in the scatter-gather saw queque size*/
1488 /*if that is the case, the sclk requirement is the maximum of the ones required by dmif and mcifwr, and the high/mid/low sclk is chosen accordingly, unless that choice results in foresaking dram speed/nb p-state change.*/
1489 /*the dmif and mcifwr sclk required is the one that allows the transfer of all pipe's data buffer size through the sclk bus in the time for data transfer*/
1490 /*for dmif, pte and cursor requests have to be included.*/
1491 data->dmif_required_sclk = bw_div(bw_div(data->total_display_reads_required_data, data->display_reads_time_for_data_transfer), (bw_mul(vbios->data_return_bus_width, bw_int_to_fixed(bus_efficiency))));
1492 data->mcifwr_required_sclk = bw_div(bw_div(data->total_display_writes_required_data, data->display_writes_time_for_data_transfer), (bw_mul(vbios->data_return_bus_width, bw_int_to_fixed(bus_efficiency))));
1493 if (bw_mtn(data->scatter_gather_total_pte_requests, dceip->maximum_total_outstanding_pte_requests_allowed_by_saw)) {
1494 data->required_sclk = bw_int_to_fixed(9999);
1495 sclk_message = bw_def_exceeded_allowed_outstanding_pte_req_queue_size;
1496 data->sclk_level = s_high;
1497 }
1498 else if (bw_mtn(vbios->dmifmc_urgent_latency, data->required_dmifmc_urgent_latency_for_page_close_open) || bw_mtn(vbios->mcifwrmc_urgent_latency, data->required_mcifmcwr_urgent_latency)) {
1499 data->required_sclk = bw_int_to_fixed(9999);
1500 sclk_message = bw_def_exceeded_allowed_page_close_open;
1501 data->sclk_level = s_high;
1502 }
1503 else {
1504 data->required_sclk = bw_max2(data->dmif_required_sclk, data->mcifwr_required_sclk);
1505 if (bw_ltn(data->required_sclk, sclk[s_low]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_low], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_low], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_low], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_low], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_low], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_low], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_low], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_low], vbios->low_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_low] == number_of_displays_enabled_with_margin))) {
1506 sclk_message = bw_def_low;
1507 data->sclk_level = s_low;
1508 data->required_sclk = vbios->low_sclk;
1509 }
1510 else if (bw_ltn(data->required_sclk, sclk[s_mid1]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid1], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid1], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid1], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid1], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid1] == number_of_displays_enabled_with_margin))) {
1511 sclk_message = bw_def_mid;
1512 data->sclk_level = s_mid1;
1513 data->required_sclk = vbios->mid1_sclk;
1514 }
1515 else if (bw_ltn(data->required_sclk, sclk[s_mid2]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid2], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid2], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid2], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid2], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid2] == number_of_displays_enabled_with_margin))) {
1516 sclk_message = bw_def_mid;
1517 data->sclk_level = s_mid2;
1518 data->required_sclk = vbios->mid2_sclk;
1519 }
1520 else if (bw_ltn(data->required_sclk, sclk[s_mid3]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid3], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid3], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid3], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid3], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid3] == number_of_displays_enabled_with_margin))) {
1521 sclk_message = bw_def_mid;
1522 data->sclk_level = s_mid3;
1523 data->required_sclk = vbios->mid3_sclk;
1524 }
1525 else if (bw_ltn(data->required_sclk, sclk[s_mid4]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid4], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid4], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid4], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid4], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid4] == number_of_displays_enabled_with_margin))) {
1526 sclk_message = bw_def_mid;
1527 data->sclk_level = s_mid4;
1528 data->required_sclk = vbios->mid4_sclk;
1529 }
1530 else if (bw_ltn(data->required_sclk, sclk[s_mid5]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid5], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid5], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid5], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid5], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid5] == number_of_displays_enabled_with_margin))) {
1531 sclk_message = bw_def_mid;
1532 data->sclk_level = s_mid5;
1533 data->required_sclk = vbios->mid5_sclk;
1534 }
1535 else if (bw_ltn(data->required_sclk, sclk[s_mid6]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid6] == number_of_displays_enabled_with_margin))) {
1536 sclk_message = bw_def_mid;
1537 data->sclk_level = s_mid6;
1538 data->required_sclk = vbios->mid6_sclk;
1539 }
1540 else if (bw_ltn(data->required_sclk, sclk[s_high])) {
1541 sclk_message = bw_def_high;
1542 data->sclk_level = s_high;
1543 data->required_sclk = vbios->high_sclk;
1544 }
1545 else {
1546 sclk_message = bw_def_exceeded_allowed_maximum_sclk;
1547 data->sclk_level = s_high;
1548 /*required_sclk = high_sclk*/
1549 }
1550 }
1551 /*dispclk*/
1552 /*if dispclk is set to the maximum, ramping is not required. dispclk required without ramping is less than the dispclk required with ramping.*/
1553 /*if dispclk required without ramping is more than the maximum dispclk, that is the dispclk required, and the mode is not supported*/
1554 /*if that does not happen, but dispclk required with ramping is more than the maximum dispclk, dispclk required is just the maximum dispclk*/
1555 /*if that does not happen either, dispclk required is the dispclk required with ramping.*/
1556 /*dispclk required without ramping is the maximum of the one required for display pipe pixel throughput, for scaler throughput, for total read request thrrougput and for dram/np p-state change if enabled.*/
1557 /*the display pipe pixel throughput is the maximum of lines in per line out in the beginning of the frame and lines in per line out in the middle of the frame multiplied by the horizontal blank and chunk granularity factor, altogether multiplied by the ratio of the source width to the line time, divided by the line buffer pixels per dispclk throughput, and multiplied by the display pipe throughput factor.*/
1558 /*the horizontal blank and chunk granularity factor is the ratio of the line time divided by the line time minus half the horizontal blank and chunk time. it applies when the lines in per line out is not 2 or 4.*/
1559 /*the dispclk required for scaler throughput is the product of the pixel rate and the scaling limits factor.*/
1560 /*the dispclk required for total read request throughput is the product of the peak request-per-second bandwidth and the dispclk cycles per request, divided by the request efficiency.*/
1561 /*for the dispclk required with ramping, instead of multiplying just the pipe throughput by the display pipe throughput factor, we multiply the scaler and pipe throughput by the ramping factor.*/
1562 /*the scaling limits factor is the product of the horizontal scale ratio, and the ratio of the vertical taps divided by the scaler efficiency clamped to at least 1.*/
1563 /*the scaling limits factor itself it also clamped to at least 1*/
1564 /*if doing downscaling with the pre-downscaler enabled, the horizontal scale ratio should not be considered above (use "1")*/
1565 data->downspread_factor = bw_add(bw_int_to_fixed(1), bw_div(vbios->down_spread_percentage, bw_int_to_fixed(100)));
1566 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
1567 if (data->enable[i]) {
1568 if (surface_type[i] == bw_def_graphics) {
1569 switch (data->lb_bpc[i]) {
1570 case 6:
1571 data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency6_bit_per_component;
1572 break;
1573 case 8:
1574 data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency8_bit_per_component;
1575 break;
1576 case 10:
1577 data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency10_bit_per_component;
1578 break;
1579 default:
1580 data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency12_bit_per_component;
1581 break;
1582 }
1583 if (data->use_alpha[i] == 1) {
1584 data->v_scaler_efficiency = bw_min2(data->v_scaler_efficiency, dceip->alpha_vscaler_efficiency);
1585 }
1586 }
1587 else {
1588 switch (data->lb_bpc[i]) {
1589 case 6:
1590 data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency6_bit_per_component;
1591 break;
1592 case 8:
1593 data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency8_bit_per_component;
1594 break;
1595 case 10:
1596 data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency10_bit_per_component;
1597 break;
1598 default:
1599 data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency12_bit_per_component;
1600 break;
1601 }
1602 }
1603 if (dceip->pre_downscaler_enabled && bw_mtn(data->hsr[i], bw_int_to_fixed(1))) {
1604 data->scaler_limits_factor = bw_max2(bw_div(data->v_taps[i], data->v_scaler_efficiency), bw_div(data->source_width_rounded_up_to_chunks[i], data->h_total[i]));
1605 }
1606 else {
1607 data->scaler_limits_factor = bw_max3(bw_int_to_fixed(1), bw_ceil2(bw_div(data->h_taps[i], bw_int_to_fixed(4)), bw_int_to_fixed(1)), bw_mul(data->hsr[i], bw_max2(bw_div(data->v_taps[i], data->v_scaler_efficiency), bw_int_to_fixed(1))));
1608 }
1609 data->display_pipe_pixel_throughput = bw_div(bw_div(bw_mul(bw_max2(data->lb_lines_in_per_line_out_in_beginning_of_frame[i], bw_mul(data->lb_lines_in_per_line_out_in_middle_of_frame[i], data->horizontal_blank_and_chunk_granularity_factor[i])), data->source_width_rounded_up_to_chunks[i]), (bw_div(data->h_total[i], data->pixel_rate[i]))), dceip->lb_write_pixels_per_dispclk);
1610 data->dispclk_required_without_ramping[i] = bw_mul(data->downspread_factor, bw_max2(bw_mul(data->pixel_rate[i], data->scaler_limits_factor), bw_mul(dceip->display_pipe_throughput_factor, data->display_pipe_pixel_throughput)));
1611 data->dispclk_required_with_ramping[i] = bw_mul(dceip->dispclk_ramping_factor, bw_max2(bw_mul(data->pixel_rate[i], data->scaler_limits_factor), data->display_pipe_pixel_throughput));
1612 }
1613 }
1614 data->total_dispclk_required_with_ramping = bw_int_to_fixed(0);
1615 data->total_dispclk_required_without_ramping = bw_int_to_fixed(0);
1616 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
1617 if (data->enable[i]) {
1618 if (bw_ltn(data->total_dispclk_required_with_ramping, data->dispclk_required_with_ramping[i])) {
1619 data->total_dispclk_required_with_ramping = data->dispclk_required_with_ramping[i];
1620 }
1621 if (bw_ltn(data->total_dispclk_required_without_ramping, data->dispclk_required_without_ramping[i])) {
1622 data->total_dispclk_required_without_ramping = data->dispclk_required_without_ramping[i];
1623 }
1624 }
1625 }
1626 data->total_read_request_bandwidth = bw_int_to_fixed(0);
1627 data->total_write_request_bandwidth = bw_int_to_fixed(0);
1628 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
1629 if (data->enable[i]) {
1630 if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) {
1631 data->total_read_request_bandwidth = bw_add(data->total_read_request_bandwidth, data->request_bandwidth[i]);
1632 }
1633 else {
1634 data->total_write_request_bandwidth = bw_add(data->total_write_request_bandwidth, data->request_bandwidth[i]);
1635 }
1636 }
1637 }
1638 data->dispclk_required_for_total_read_request_bandwidth = bw_div(bw_mul(data->total_read_request_bandwidth, dceip->dispclk_per_request), dceip->request_efficiency);
1639 data->total_dispclk_required_with_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_with_ramping, data->dispclk_required_for_total_read_request_bandwidth);
1640 data->total_dispclk_required_without_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_without_ramping, data->dispclk_required_for_total_read_request_bandwidth);
1641 if (data->cpuc_state_change_enable == bw_def_yes) {
1642 data->total_dispclk_required_with_ramping_with_request_bandwidth = bw_max3(data->total_dispclk_required_with_ramping_with_request_bandwidth, data->dispclk_required_for_blackout_duration[data->y_clk_level][data->sclk_level], data->dispclk_required_for_blackout_recovery[data->y_clk_level][data->sclk_level]);
1643 data->total_dispclk_required_without_ramping_with_request_bandwidth = bw_max3(data->total_dispclk_required_without_ramping_with_request_bandwidth, data->dispclk_required_for_blackout_duration[data->y_clk_level][data->sclk_level], data->dispclk_required_for_blackout_recovery[data->y_clk_level][data->sclk_level]);
1644 }
1645 if (data->cpup_state_change_enable == bw_def_yes) {
1646 data->total_dispclk_required_with_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_with_ramping_with_request_bandwidth, data->dispclk_required_for_blackout_duration[data->y_clk_level][data->sclk_level]);
1647 data->total_dispclk_required_without_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_without_ramping_with_request_bandwidth, data->dispclk_required_for_blackout_duration[data->y_clk_level][data->sclk_level]);
1648 }
1649 if (data->nbp_state_change_enable == bw_def_yes) {
1650 data->total_dispclk_required_with_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_with_ramping_with_request_bandwidth, data->dispclk_required_for_dram_speed_change[data->y_clk_level][data->sclk_level]);
1651 data->total_dispclk_required_without_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_without_ramping_with_request_bandwidth, data->dispclk_required_for_dram_speed_change[data->y_clk_level][data->sclk_level]);
1652 }
1653 if (bw_ltn(data->total_dispclk_required_with_ramping_with_request_bandwidth, vbios->high_voltage_max_dispclk)) {
1654 data->dispclk = data->total_dispclk_required_with_ramping_with_request_bandwidth;
1655 }
1656 else if (bw_ltn(data->total_dispclk_required_without_ramping_with_request_bandwidth, vbios->high_voltage_max_dispclk)) {
1657 data->dispclk = vbios->high_voltage_max_dispclk;
1658 }
1659 else {
1660 data->dispclk = data->total_dispclk_required_without_ramping_with_request_bandwidth;
1661 }
1662 /* required core voltage*/
1663 /* the core voltage required is low if sclk, yclk(pclk)and dispclk are within the low limits*/
1664 /* otherwise, the core voltage required is medium if yclk (pclk) is within the low limit and sclk and dispclk are within the medium limit*/
1665 /* otherwise, the core voltage required is high if the three clocks are within the high limits*/
1666 /* otherwise, or if the mode is not supported, core voltage requirement is not applicable*/
1667 if (pipe_check == bw_def_notok) {
1668 voltage = bw_def_na;
1669 }
1670 else if (mode_check == bw_def_notok) {
1671 voltage = bw_def_notok;
1672 }
1673 else if (bw_equ(bw_int_to_fixed(yclk_message), vbios->low_yclk) && sclk_message == bw_def_low && bw_ltn(data->dispclk, vbios->low_voltage_max_dispclk)) {
1674 voltage = bw_def_0_72;
1675 }
1676 else if ((bw_equ(bw_int_to_fixed(yclk_message), vbios->low_yclk) || bw_equ(bw_int_to_fixed(yclk_message), vbios->mid_yclk)) && (sclk_message == bw_def_low || sclk_message == bw_def_mid) && bw_ltn(data->dispclk, vbios->mid_voltage_max_dispclk)) {
1677 voltage = bw_def_0_8;
1678 }
1679 else if ((bw_equ(bw_int_to_fixed(yclk_message), vbios->low_yclk) || bw_equ(bw_int_to_fixed(yclk_message), vbios->mid_yclk) || bw_equ(bw_int_to_fixed(yclk_message), vbios->high_yclk)) && (sclk_message == bw_def_low || sclk_message == bw_def_mid || sclk_message == bw_def_high) && bw_leq(data->dispclk, vbios->high_voltage_max_dispclk)) {
1680 if ((data->nbp_state_change_enable == bw_def_no && nbp_state_change_enable_blank == bw_def_no)) {
1681 voltage = bw_def_high_no_nbp_state_change;
1682 }
1683 else {
1684 voltage = bw_def_0_9;
1685 }
1686 }
1687 else {
1688 voltage = bw_def_notok;
1689 }
1690 if (voltage == bw_def_0_72) {
1691 data->max_phyclk = vbios->low_voltage_max_phyclk;
1692 }
1693 else if (voltage == bw_def_0_8) {
1694 data->max_phyclk = vbios->mid_voltage_max_phyclk;
1695 }
1696 else {
1697 data->max_phyclk = vbios->high_voltage_max_phyclk;
1698 }
1699 /*required blackout recovery time*/
1700 data->blackout_recovery_time = bw_int_to_fixed(0);
1701 for (k = 0; k <= maximum_number_of_surfaces - 1; k++) {
1702 if (data->enable[k] && bw_mtn(vbios->blackout_duration, bw_int_to_fixed(0)) && data->cpup_state_change_enable == bw_def_yes) {
1703 if (surface_type[k] != bw_def_display_write_back420_luma && surface_type[k] != bw_def_display_write_back420_chroma) {
1704 data->blackout_recovery_time = bw_max2(data->blackout_recovery_time, bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[data->y_clk_level][data->sclk_level]));
1705 if (bw_ltn(data->adjusted_data_buffer_size[k], bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), (bw_add(vbios->blackout_duration, bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[data->y_clk_level][data->sclk_level])))))) {
1706 data->blackout_recovery_time = bw_max2(data->blackout_recovery_time, bw_div((bw_add(bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), vbios->blackout_duration), bw_sub(bw_div(bw_mul(bw_mul(bw_mul((bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[data->y_clk_level][data->sclk_level])), data->dispclk), bw_int_to_fixed(data->bytes_per_pixel[k])), data->lines_interleaved_in_mem_access[k]), data->latency_hiding_lines[k]), data->adjusted_data_buffer_size[k]))), (bw_sub(bw_div(bw_mul(bw_mul(data->dispclk, bw_int_to_fixed(data->bytes_per_pixel[k])), data->lines_interleaved_in_mem_access[k]), data->latency_hiding_lines[k]), bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k])))));
1707 }
1708 }
1709 else {
1710 data->blackout_recovery_time = bw_max2(data->blackout_recovery_time, bw_add(bw_mul(bw_int_to_fixed(2), vbios->mcifwrmc_urgent_latency), data->mcifwr_burst_time[data->y_clk_level][data->sclk_level]));
1711 if (bw_ltn(data->adjusted_data_buffer_size[k], bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), (bw_add(vbios->blackout_duration, bw_add(bw_mul(bw_int_to_fixed(2), vbios->mcifwrmc_urgent_latency), data->mcifwr_burst_time[data->y_clk_level][data->sclk_level])))))) {
1712 data->blackout_recovery_time = bw_max2(data->blackout_recovery_time, bw_div((bw_add(bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), vbios->blackout_duration), bw_sub(bw_div(bw_mul(bw_mul(bw_mul((bw_add(bw_add(bw_mul(bw_int_to_fixed(2), vbios->mcifwrmc_urgent_latency), data->dmif_burst_time[i][j]), data->mcifwr_burst_time[data->y_clk_level][data->sclk_level])), data->dispclk), bw_int_to_fixed(data->bytes_per_pixel[k])), data->lines_interleaved_in_mem_access[k]), data->latency_hiding_lines[k]), data->adjusted_data_buffer_size[k]))), (bw_sub(bw_div(bw_mul(bw_mul(data->dispclk, bw_int_to_fixed(data->bytes_per_pixel[k])), data->lines_interleaved_in_mem_access[k]), data->latency_hiding_lines[k]), bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k])))));
1713 }
1714 }
1715 }
1716 }
1717 /*sclk deep sleep*/
1718 /*during self-refresh, sclk can be reduced to dispclk divided by the minimum pixels in the data fifo entry, with 15% margin, but shoudl not be set to less than the request bandwidth.*/
1719 /*the data fifo entry is 16 pixels for the writeback, 64 bytes/bytes_per_pixel for the graphics, 16 pixels for the parallel rotation underlay,*/
1720 /*and 16 bytes/bytes_per_pixel for the orthogonal rotation underlay.*/
1721 /*in parallel mode (underlay pipe), the data read from the dmifv buffer is variable and based on the pixel depth (8bbp - 16 bytes, 16 bpp - 32 bytes, 32 bpp - 64 bytes)*/
1722 /*in orthogonal mode (underlay pipe), the data read from the dmifv buffer is fixed at 16 bytes.*/
1723 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
1724 if (data->enable[i]) {
1725 if (surface_type[i] == bw_def_display_write_back420_luma || surface_type[i] == bw_def_display_write_back420_chroma) {
1726 data->pixels_per_data_fifo_entry[i] = bw_int_to_fixed(16);
1727 }
1728 else if (surface_type[i] == bw_def_graphics) {
1729 data->pixels_per_data_fifo_entry[i] = bw_div(bw_int_to_fixed(64), bw_int_to_fixed(data->bytes_per_pixel[i]));
1730 }
1731 else if (data->orthogonal_rotation[i] == 0) {
1732 data->pixels_per_data_fifo_entry[i] = bw_int_to_fixed(16);
1733 }
1734 else {
1735 data->pixels_per_data_fifo_entry[i] = bw_div(bw_int_to_fixed(16), bw_int_to_fixed(data->bytes_per_pixel[i]));
1736 }
1737 }
1738 }
1739 data->min_pixels_per_data_fifo_entry = bw_int_to_fixed(9999);
1740 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
1741 if (data->enable[i]) {
1742 if (bw_mtn(data->min_pixels_per_data_fifo_entry, data->pixels_per_data_fifo_entry[i])) {
1743 data->min_pixels_per_data_fifo_entry = data->pixels_per_data_fifo_entry[i];
1744 }
1745 }
1746 }
1747 data->sclk_deep_sleep = bw_max2(bw_div(bw_mul(data->dispclk, bw_frc_to_fixed(115, 100)), data->min_pixels_per_data_fifo_entry), data->total_read_request_bandwidth);
1748 /*urgent, stutter and nb-p_state watermark*/
1749 /*the urgent watermark is the maximum of the urgent trip time plus the pixel transfer time, the urgent trip times to get data for the first pixel, and the urgent trip times to get data for the last pixel.*/
1750 /*the stutter exit watermark is the self refresh exit time plus the maximum of the data burst time plus the pixel transfer time, the data burst times to get data for the first pixel, and the data burst times to get data for the last pixel. it does not apply to the writeback.*/
1751 /*the nb p-state change watermark is the dram speed/p-state change time plus the maximum of the data burst time plus the pixel transfer time, the data burst times to get data for the first pixel, and the data burst times to get data for the last pixel.*/
1752 /*the pixel transfer time is the maximum of the time to transfer the source pixels required for the first output pixel, and the time to transfer the pixels for the last output pixel minus the active line time.*/
1753 /*blackout_duration is added to the urgent watermark*/
1754 data->chunk_request_time = bw_int_to_fixed(0);
1755 data->cursor_request_time = bw_int_to_fixed(0);
1756 /*compute total time to request one chunk from each active display pipe*/
1757 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
1758 if (data->enable[i]) {
1759 data->chunk_request_time = bw_add(data->chunk_request_time, (bw_div((bw_div(bw_int_to_fixed(pixels_per_chunk * data->bytes_per_pixel[i]), data->useful_bytes_per_request[i])), bw_min2(sclk[data->sclk_level], bw_div(data->dispclk, bw_int_to_fixed(2))))));
1760 }
1761 }
1762 /*compute total time to request cursor data*/
1763 data->cursor_request_time = (bw_div(data->cursor_total_data, (bw_mul(bw_int_to_fixed(32), sclk[data->sclk_level]))));
1764 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
1765 if (data->enable[i]) {
1766 data->line_source_pixels_transfer_time = bw_max2(bw_div(bw_div(data->src_pixels_for_first_output_pixel[i], dceip->lb_write_pixels_per_dispclk), (bw_div(data->dispclk, dceip->display_pipe_throughput_factor))), bw_sub(bw_div(bw_div(data->src_pixels_for_last_output_pixel[i], dceip->lb_write_pixels_per_dispclk), (bw_div(data->dispclk, dceip->display_pipe_throughput_factor))), data->active_time[i]));
1767 if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) {
1768 data->urgent_watermark[i] = bw_add(bw_add(bw_add(bw_add(bw_add(data->total_dmifmc_urgent_latency, data->dmif_burst_time[data->y_clk_level][data->sclk_level]), bw_max2(data->line_source_pixels_transfer_time, data->line_source_transfer_time[i][data->y_clk_level][data->sclk_level])), vbios->blackout_duration), data->chunk_request_time), data->cursor_request_time);
1769 data->stutter_exit_watermark[i] = bw_add(bw_sub(vbios->stutter_self_refresh_exit_latency, data->total_dmifmc_urgent_latency), data->urgent_watermark[i]);
1770 data->stutter_entry_watermark[i] = bw_add(bw_sub(bw_add(vbios->stutter_self_refresh_exit_latency, vbios->stutter_self_refresh_entry_latency), data->total_dmifmc_urgent_latency), data->urgent_watermark[i]);
1771 /*unconditionally remove black out time from the nb p_state watermark*/
1772 if ((data->display_pstate_change_enable[i] == 1)) {
1773 data->nbp_state_change_watermark[i] = bw_add(bw_add(vbios->nbp_state_change_latency, data->dmif_burst_time[data->y_clk_level][data->sclk_level]), bw_max2(data->line_source_pixels_transfer_time, data->dram_speed_change_line_source_transfer_time[i][data->y_clk_level][data->sclk_level]));
1774 }
1775 else {
1776 /*maximize the watermark to force the switch in the vb_lank region of the frame*/
1777 data->nbp_state_change_watermark[i] = bw_int_to_fixed(131000);
1778 }
1779 }
1780 else {
1781 data->urgent_watermark[i] = bw_add(bw_add(bw_add(bw_add(bw_add(vbios->mcifwrmc_urgent_latency, data->mcifwr_burst_time[data->y_clk_level][data->sclk_level]), bw_max2(data->line_source_pixels_transfer_time, data->line_source_transfer_time[i][data->y_clk_level][data->sclk_level])), vbios->blackout_duration), data->chunk_request_time), data->cursor_request_time);
1782 data->stutter_exit_watermark[i] = bw_int_to_fixed(0);
1783 data->stutter_entry_watermark[i] = bw_int_to_fixed(0);
1784 if ((data->display_pstate_change_enable[i] == 1)) {
1785 data->nbp_state_change_watermark[i] = bw_add(bw_add(vbios->nbp_state_change_latency, data->mcifwr_burst_time[data->y_clk_level][data->sclk_level]), bw_max2(data->line_source_pixels_transfer_time, data->dram_speed_change_line_source_transfer_time[i][data->y_clk_level][data->sclk_level]));
1786 }
1787 else {
1788 /*maximize the watermark to force the switch in the vb_lank region of the frame*/
1789 data->nbp_state_change_watermark[i] = bw_int_to_fixed(131000);
1790 }
1791 }
1792 }
1793 }
1794 /*stutter mode enable*/
1795 /*in the multi-display case the stutter exit or entry watermark cannot exceed the minimum latency hiding capabilities of the*/
1796 /*display pipe.*/
1797 data->stutter_mode_enable = data->cpuc_state_change_enable;
1798 if (data->number_of_displays > 1) {
1799 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
1800 if (data->enable[i]) {
1801 if ((bw_mtn(data->stutter_exit_watermark[i], data->minimum_latency_hiding[i]) || bw_mtn(data->stutter_entry_watermark[i], data->minimum_latency_hiding[i]))) {
1802 data->stutter_mode_enable = bw_def_no;
1803 }
1804 }
1805 }
1806 }
1807 /*performance metrics*/
1808 /* display read access efficiency (%)*/
1809 /* display write back access efficiency (%)*/
1810 /* stutter efficiency (%)*/
1811 /* extra underlay pitch recommended for efficiency (pixels)*/
1812 /* immediate flip time (us)*/
1813 /* latency for other clients due to urgent display read (us)*/
1814 /* latency for other clients due to urgent display write (us)*/
1815 /* average bandwidth consumed by display (no compression) (gb/s)*/
1816 /* required dram bandwidth (gb/s)*/
1817 /* required sclk (m_hz)*/
1818 /* required rd urgent latency (us)*/
1819 /* nb p-state change margin (us)*/
1820 /*dmif and mcifwr dram access efficiency*/
1821 /*is the ratio between the ideal dram access time (which is the data buffer size in memory divided by the dram bandwidth), and the actual time which is the total page close-open time. but it cannot exceed the dram efficiency provided by the memory subsystem*/
1822 data->dmifdram_access_efficiency = bw_min2(bw_div(bw_div(data->total_display_reads_required_dram_access_data, data->dram_bandwidth), data->dmif_total_page_close_open_time), bw_int_to_fixed(1));
1823 if (bw_mtn(data->total_display_writes_required_dram_access_data, bw_int_to_fixed(0))) {
1824 data->mcifwrdram_access_efficiency = bw_min2(bw_div(bw_div(data->total_display_writes_required_dram_access_data, data->dram_bandwidth), data->mcifwr_total_page_close_open_time), bw_int_to_fixed(1));
1825 }
1826 else {
1827 data->mcifwrdram_access_efficiency = bw_int_to_fixed(0);
1828 }
1829 /*average bandwidth*/
1830 /*the average bandwidth with no compression is the vertical active time is the source width times the bytes per pixel divided by the line time, multiplied by the vertical scale ratio and the ratio of bytes per request divided by the useful bytes per request.*/
1831 /*the average bandwidth with compression is the same, divided by the compression ratio*/
1832 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
1833 if (data->enable[i]) {
1834 data->average_bandwidth_no_compression[i] = bw_div(bw_mul(bw_mul(bw_div(bw_mul(data->source_width_rounded_up_to_chunks[i], bw_int_to_fixed(data->bytes_per_pixel[i])), (bw_div(data->h_total[i], data->pixel_rate[i]))), data->vsr[i]), data->bytes_per_request[i]), data->useful_bytes_per_request[i]);
1835 data->average_bandwidth[i] = bw_div(data->average_bandwidth_no_compression[i], data->compression_rate[i]);
1836 }
1837 }
1838 data->total_average_bandwidth_no_compression = bw_int_to_fixed(0);
1839 data->total_average_bandwidth = bw_int_to_fixed(0);
1840 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
1841 if (data->enable[i]) {
1842 data->total_average_bandwidth_no_compression = bw_add(data->total_average_bandwidth_no_compression, data->average_bandwidth_no_compression[i]);
1843 data->total_average_bandwidth = bw_add(data->total_average_bandwidth, data->average_bandwidth[i]);
1844 }
1845 }
1846 /*stutter efficiency*/
1847 /*the stutter efficiency is the frame-average time in self-refresh divided by the frame-average stutter cycle duration. only applies if the display write-back is not enabled.*/
1848 /*the frame-average stutter cycle used is the minimum for all pipes of the frame-average data buffer size in time, times the compression rate*/
1849 /*the frame-average time in self-refresh is the stutter cycle minus the self refresh exit latency and the burst time*/
1850 /*the stutter cycle is the dmif buffer size reduced by the excess of the stutter exit watermark over the lb size in time.*/
1851 /*the burst time is the data needed during the stutter cycle divided by the available bandwidth*/
1852 /*compute the time read all the data from the dmif buffer to the lb (dram refresh period)*/
1853 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
1854 if (data->enable[i]) {
1855 data->stutter_refresh_duration[i] = bw_sub(bw_mul(bw_div(bw_div(bw_mul(bw_div(bw_div(data->adjusted_data_buffer_size[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_rounded_up_to_chunks[i]), data->h_total[i]), data->vsr[i]), data->pixel_rate[i]), data->compression_rate[i]), bw_max2(bw_int_to_fixed(0), bw_sub(data->stutter_exit_watermark[i], bw_div(bw_mul((bw_sub(data->lb_partitions[i], bw_int_to_fixed(1))), data->h_total[i]), data->pixel_rate[i]))));
1856 data->stutter_dmif_buffer_size[i] = bw_div(bw_mul(bw_mul(bw_div(bw_mul(bw_mul(data->stutter_refresh_duration[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_rounded_up_to_chunks[i]), data->h_total[i]), data->vsr[i]), data->pixel_rate[i]), data->compression_rate[i]);
1857 }
1858 }
1859 data->min_stutter_refresh_duration = bw_int_to_fixed(9999);
1860 data->total_stutter_dmif_buffer_size = 0;
1861 data->total_bytes_requested = 0;
1862 data->min_stutter_dmif_buffer_size = 9999;
1863 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
1864 if (data->enable[i]) {
1865 if (bw_mtn(data->min_stutter_refresh_duration, data->stutter_refresh_duration[i])) {
1866 data->min_stutter_refresh_duration = data->stutter_refresh_duration[i];
1867 data->total_bytes_requested = bw_fixed_to_int(bw_add(bw_int_to_fixed(data->total_bytes_requested), (bw_mul(bw_mul(data->source_height_rounded_up_to_chunks[i], data->source_width_rounded_up_to_chunks[i]), bw_int_to_fixed(data->bytes_per_pixel[i])))));
1868 data->min_stutter_dmif_buffer_size = bw_fixed_to_int(data->stutter_dmif_buffer_size[i]);
1869 }
1870 data->total_stutter_dmif_buffer_size = bw_fixed_to_int(bw_add(data->stutter_dmif_buffer_size[i], bw_int_to_fixed(data->total_stutter_dmif_buffer_size)));
1871 }
1872 }
1873 data->stutter_burst_time = bw_div(bw_int_to_fixed(data->total_stutter_dmif_buffer_size), bw_min2(bw_mul(data->dram_bandwidth, data->dmifdram_access_efficiency), bw_mul(sclk[data->sclk_level], bw_int_to_fixed(32))));
1874 data->num_stutter_bursts = data->total_bytes_requested / data->min_stutter_dmif_buffer_size;
1875 data->total_stutter_cycle_duration = bw_add(bw_add(data->min_stutter_refresh_duration, vbios->stutter_self_refresh_exit_latency), data->stutter_burst_time);
1876 data->time_in_self_refresh = data->min_stutter_refresh_duration;
1877 if (data->d1_display_write_back_dwb_enable == 1) {
1878 data->stutter_efficiency = bw_int_to_fixed(0);
1879 }
1880 else if (bw_ltn(data->time_in_self_refresh, bw_int_to_fixed(0))) {
1881 data->stutter_efficiency = bw_int_to_fixed(0);
1882 }
1883 else {
1884 /*compute stutter efficiency assuming 60 hz refresh rate*/
1885 data->stutter_efficiency = bw_max2(bw_int_to_fixed(0), bw_mul((bw_sub(bw_int_to_fixed(1), (bw_div(bw_mul((bw_add(vbios->stutter_self_refresh_exit_latency, data->stutter_burst_time)), bw_int_to_fixed(data->num_stutter_bursts)), bw_frc_to_fixed(166666667, 10000))))), bw_int_to_fixed(100)));
1886 }
1887 /*immediate flip time*/
1888 /*if scatter gather is enabled, the immediate flip takes a number of urgent memory trips equivalent to the pte requests in a row divided by the pte request limit.*/
1889 /*otherwise, it may take just one urgenr memory trip*/
1890 data->worst_number_of_trips_to_memory = bw_int_to_fixed(1);
1891 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
1892 if (data->enable[i] && data->scatter_gather_enable_for_pipe[i] == 1) {
1893 data->number_of_trips_to_memory_for_getting_apte_row[i] = bw_ceil2(bw_div(data->scatter_gather_pte_requests_in_row[i], data->scatter_gather_pte_request_limit[i]), bw_int_to_fixed(1));
1894 if (bw_ltn(data->worst_number_of_trips_to_memory, data->number_of_trips_to_memory_for_getting_apte_row[i])) {
1895 data->worst_number_of_trips_to_memory = data->number_of_trips_to_memory_for_getting_apte_row[i];
1896 }
1897 }
1898 }
1899 data->immediate_flip_time = bw_mul(data->worst_number_of_trips_to_memory, data->total_dmifmc_urgent_latency);
1900 /*worst latency for other clients*/
1901 /*it is the urgent latency plus the urgent burst time*/
1902 data->latency_for_non_dmif_clients = bw_add(data->total_dmifmc_urgent_latency, data->dmif_burst_time[data->y_clk_level][data->sclk_level]);
1903 if (data->d1_display_write_back_dwb_enable == 1) {
1904 data->latency_for_non_mcifwr_clients = bw_add(vbios->mcifwrmc_urgent_latency, dceip->mcifwr_all_surfaces_burst_time);
1905 }
1906 else {
1907 data->latency_for_non_mcifwr_clients = bw_int_to_fixed(0);
1908 }
1909 /*dmif mc urgent latency suppported in high sclk and yclk*/
1910 data->dmifmc_urgent_latency_supported_in_high_sclk_and_yclk = bw_div((bw_sub(data->min_read_buffer_size_in_time, data->dmif_burst_time[high][s_high])), data->total_dmifmc_urgent_trips);
1911 /*dram speed/p-state change margin*/
1912 /*in the multi-display case the nb p-state change watermark cannot exceed the average lb size plus the dmif size or the cursor dcp buffer size*/
1913 data->v_blank_nbp_state_dram_speed_change_latency_supported = bw_int_to_fixed(99999);
1914 data->nbp_state_dram_speed_change_latency_supported = bw_int_to_fixed(99999);
1915 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
1916 if (data->enable[i]) {
1917 data->nbp_state_dram_speed_change_latency_supported = bw_min2(data->nbp_state_dram_speed_change_latency_supported, bw_add(bw_sub(data->maximum_latency_hiding_with_cursor[i], data->nbp_state_change_watermark[i]), vbios->nbp_state_change_latency));
1918 data->v_blank_nbp_state_dram_speed_change_latency_supported = bw_min2(data->v_blank_nbp_state_dram_speed_change_latency_supported, bw_add(bw_sub(bw_div(bw_mul((bw_sub(data->v_total[i], bw_sub(bw_div(data->src_height[i], data->v_scale_ratio[i]), bw_int_to_fixed(4)))), data->h_total[i]), data->pixel_rate[i]), data->nbp_state_change_watermark[i]), vbios->nbp_state_change_latency));
1919 }
1920 }
1921 /*sclk required vs urgent latency*/
1922 for (i = 1; i <= 5; i++) {
1923 data->display_reads_time_for_data_transfer_and_urgent_latency = bw_sub(data->min_read_buffer_size_in_time, bw_mul(data->total_dmifmc_urgent_trips, bw_int_to_fixed(i)));
1924 if (pipe_check == bw_def_ok && (bw_mtn(data->display_reads_time_for_data_transfer_and_urgent_latency, data->dmif_total_page_close_open_time))) {
1925 data->dmif_required_sclk_for_urgent_latency[i] = bw_div(bw_div(data->total_display_reads_required_data, data->display_reads_time_for_data_transfer_and_urgent_latency), (bw_mul(vbios->data_return_bus_width, bw_int_to_fixed(bus_efficiency))));
1926 }
1927 else {
1928 data->dmif_required_sclk_for_urgent_latency[i] = bw_int_to_fixed(bw_def_na);
1929 }
1930 }
1931 /*output link bit per pixel supported*/
1932 for (k = 0; k <= maximum_number_of_surfaces - 1; k++) {
1933 data->output_bpphdmi[k] = bw_def_na;
1934 data->output_bppdp4_lane_hbr[k] = bw_def_na;
1935 data->output_bppdp4_lane_hbr2[k] = bw_def_na;
1936 data->output_bppdp4_lane_hbr3[k] = bw_def_na;
1937 if (data->enable[k]) {
1938 data->output_bpphdmi[k] = bw_fixed_to_int(bw_mul(bw_div(bw_min2(bw_int_to_fixed(600), data->max_phyclk), data->pixel_rate[k]), bw_int_to_fixed(24)));
1939 if (bw_meq(data->max_phyclk, bw_int_to_fixed(270))) {
1940 data->output_bppdp4_lane_hbr[k] = bw_fixed_to_int(bw_mul(bw_div(bw_mul(bw_int_to_fixed(270), bw_int_to_fixed(4)), data->pixel_rate[k]), bw_int_to_fixed(8)));
1941 }
1942 if (bw_meq(data->max_phyclk, bw_int_to_fixed(540))) {
1943 data->output_bppdp4_lane_hbr2[k] = bw_fixed_to_int(bw_mul(bw_div(bw_mul(bw_int_to_fixed(540), bw_int_to_fixed(4)), data->pixel_rate[k]), bw_int_to_fixed(8)));
1944 }
1945 if (bw_meq(data->max_phyclk, bw_int_to_fixed(810))) {
1946 data->output_bppdp4_lane_hbr3[k] = bw_fixed_to_int(bw_mul(bw_div(bw_mul(bw_int_to_fixed(810), bw_int_to_fixed(4)), data->pixel_rate[k]), bw_int_to_fixed(8)));
1947 }
1948 }
1949 }
1950}
1951
1952/*******************************************************************************
1953 * Public functions
1954 ******************************************************************************/
1955void bw_calcs_init(struct bw_calcs_dceip *bw_dceip,
1956 struct bw_calcs_vbios *bw_vbios,
1957 enum bw_calcs_version version)
1958{
1959 struct bw_calcs_dceip dceip = { 0 };
1960 struct bw_calcs_vbios vbios = { 0 };
1961
1962 dceip.version = version;
1963
1964 switch (version) {
1965 case BW_CALCS_VERSION_CARRIZO:
1966 vbios.memory_type = bw_def_gddr5;
1967 vbios.dram_channel_width_in_bits = 64;
1968 vbios.number_of_dram_channels = 2;
1969 vbios.number_of_dram_banks = 8;
1970 vbios.high_yclk = bw_int_to_fixed(1600);
1971 vbios.mid_yclk = bw_int_to_fixed(1600);
1972 vbios.low_yclk = bw_frc_to_fixed(66666, 100);
1973 vbios.low_sclk = bw_int_to_fixed(200);
1974 vbios.mid1_sclk = bw_int_to_fixed(300);
1975 vbios.mid2_sclk = bw_int_to_fixed(300);
1976 vbios.mid3_sclk = bw_int_to_fixed(300);
1977 vbios.mid4_sclk = bw_int_to_fixed(300);
1978 vbios.mid5_sclk = bw_int_to_fixed(300);
1979 vbios.mid6_sclk = bw_int_to_fixed(300);
1980 vbios.high_sclk = bw_frc_to_fixed(62609, 100);
1981 vbios.low_voltage_max_dispclk = bw_int_to_fixed(352);
1982 vbios.mid_voltage_max_dispclk = bw_int_to_fixed(467);
1983 vbios.high_voltage_max_dispclk = bw_int_to_fixed(643);
1984 vbios.low_voltage_max_phyclk = bw_int_to_fixed(540);
1985 vbios.mid_voltage_max_phyclk = bw_int_to_fixed(810);
1986 vbios.high_voltage_max_phyclk = bw_int_to_fixed(810);
1987 vbios.data_return_bus_width = bw_int_to_fixed(32);
1988 vbios.trc = bw_int_to_fixed(50);
1989 vbios.dmifmc_urgent_latency = bw_int_to_fixed(4);
1990 vbios.stutter_self_refresh_exit_latency = bw_frc_to_fixed(153, 10);
1991 vbios.stutter_self_refresh_entry_latency = bw_int_to_fixed(0);
1992 vbios.nbp_state_change_latency = bw_frc_to_fixed(19649, 1000);
1993 vbios.mcifwrmc_urgent_latency = bw_int_to_fixed(10);
1994 vbios.scatter_gather_enable = true;
1995 vbios.down_spread_percentage = bw_frc_to_fixed(5, 10);
1996 vbios.cursor_width = 32;
1997 vbios.average_compression_rate = 4;
1998 vbios.number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
1999 vbios.blackout_duration = bw_int_to_fixed(18); /* us */
2000 vbios.maximum_blackout_recovery_time = bw_int_to_fixed(20);
2001
2002 dceip.large_cursor = false;
2003 dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
2004 dceip.dmif_pipe_en_fbc_chunk_tracker = false;
2005 dceip.cursor_max_outstanding_group_num = 1;
2006 dceip.lines_interleaved_into_lb = 2;
2007 dceip.chunk_width = 256;
2008 dceip.number_of_graphics_pipes = 3;
2009 dceip.number_of_underlay_pipes = 1;
2010 dceip.low_power_tiling_mode = 0;
2011 dceip.display_write_back_supported = false;
2012 dceip.argb_compression_support = false;
2013 dceip.underlay_vscaler_efficiency6_bit_per_component =
2014 bw_frc_to_fixed(35556, 10000);
2015 dceip.underlay_vscaler_efficiency8_bit_per_component =
2016 bw_frc_to_fixed(34286, 10000);
2017 dceip.underlay_vscaler_efficiency10_bit_per_component =
2018 bw_frc_to_fixed(32, 10);
2019 dceip.underlay_vscaler_efficiency12_bit_per_component =
2020 bw_int_to_fixed(3);
2021 dceip.graphics_vscaler_efficiency6_bit_per_component =
2022 bw_frc_to_fixed(35, 10);
2023 dceip.graphics_vscaler_efficiency8_bit_per_component =
2024 bw_frc_to_fixed(34286, 10000);
2025 dceip.graphics_vscaler_efficiency10_bit_per_component =
2026 bw_frc_to_fixed(32, 10);
2027 dceip.graphics_vscaler_efficiency12_bit_per_component =
2028 bw_int_to_fixed(3);
2029 dceip.alpha_vscaler_efficiency = bw_int_to_fixed(3);
2030 dceip.max_dmif_buffer_allocated = 2;
2031 dceip.graphics_dmif_size = 12288;
2032 dceip.underlay_luma_dmif_size = 19456;
2033 dceip.underlay_chroma_dmif_size = 23552;
2034 dceip.pre_downscaler_enabled = true;
2035 dceip.underlay_downscale_prefetch_enabled = true;
2036 dceip.lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
2037 dceip.lb_size_per_component444 = bw_int_to_fixed(82176);
2038 dceip.graphics_lb_nodownscaling_multi_line_prefetching = false;
2039 dceip.stutter_and_dram_clock_state_change_gated_before_cursor =
2040 bw_int_to_fixed(0);
2041 dceip.underlay420_luma_lb_size_per_component = bw_int_to_fixed(
2042 82176);
2043 dceip.underlay420_chroma_lb_size_per_component =
2044 bw_int_to_fixed(164352);
2045 dceip.underlay422_lb_size_per_component = bw_int_to_fixed(
2046 82176);
2047 dceip.cursor_chunk_width = bw_int_to_fixed(64);
2048 dceip.cursor_dcp_buffer_lines = bw_int_to_fixed(4);
2049 dceip.underlay_maximum_width_efficient_for_tiling =
2050 bw_int_to_fixed(1920);
2051 dceip.underlay_maximum_height_efficient_for_tiling =
2052 bw_int_to_fixed(1080);
2053 dceip.peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
2054 bw_frc_to_fixed(3, 10);
2055 dceip.peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
2056 bw_int_to_fixed(25);
2057 dceip.minimum_outstanding_pte_request_limit = bw_int_to_fixed(
2058 2);
2059 dceip.maximum_total_outstanding_pte_requests_allowed_by_saw =
2060 bw_int_to_fixed(128);
2061 dceip.limit_excessive_outstanding_dmif_requests = true;
2062 dceip.linear_mode_line_request_alternation_slice =
2063 bw_int_to_fixed(64);
2064 dceip.scatter_gather_lines_of_pte_prefetching_in_linear_mode =
2065 32;
2066 dceip.display_write_back420_luma_mcifwr_buffer_size = 12288;
2067 dceip.display_write_back420_chroma_mcifwr_buffer_size = 8192;
2068 dceip.request_efficiency = bw_frc_to_fixed(8, 10);
2069 dceip.dispclk_per_request = bw_int_to_fixed(2);
2070 dceip.dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
2071 dceip.display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
2072 dceip.scatter_gather_pte_request_rows_in_tiling_mode = 2;
2073 dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0); /* todo: this is a bug*/
2074 break;
2075 case BW_CALCS_VERSION_POLARIS10:
2076 vbios.memory_type = bw_def_gddr5;
2077 vbios.dram_channel_width_in_bits = 32;
2078 vbios.number_of_dram_channels = 8;
2079 vbios.number_of_dram_banks = 8;
2080 vbios.high_yclk = bw_int_to_fixed(6000);
2081 vbios.mid_yclk = bw_int_to_fixed(3200);
2082 vbios.low_yclk = bw_int_to_fixed(1000);
2083 vbios.low_sclk = bw_int_to_fixed(300);
2084 vbios.mid1_sclk = bw_int_to_fixed(400);
2085 vbios.mid2_sclk = bw_int_to_fixed(500);
2086 vbios.mid3_sclk = bw_int_to_fixed(600);
2087 vbios.mid4_sclk = bw_int_to_fixed(700);
2088 vbios.mid5_sclk = bw_int_to_fixed(800);
2089 vbios.mid6_sclk = bw_int_to_fixed(974);
2090 vbios.high_sclk = bw_int_to_fixed(1154);
2091 vbios.low_voltage_max_dispclk = bw_int_to_fixed(459);
2092 vbios.mid_voltage_max_dispclk = bw_int_to_fixed(654);
2093 vbios.high_voltage_max_dispclk = bw_int_to_fixed(1108);
2094 vbios.low_voltage_max_phyclk = bw_int_to_fixed(540);
2095 vbios.mid_voltage_max_phyclk = bw_int_to_fixed(810);
2096 vbios.high_voltage_max_phyclk = bw_int_to_fixed(810);
2097 vbios.data_return_bus_width = bw_int_to_fixed(32);
2098 vbios.trc = bw_int_to_fixed(48);
2099 vbios.dmifmc_urgent_latency = bw_int_to_fixed(3);
2100 vbios.stutter_self_refresh_exit_latency = bw_int_to_fixed(5);
2101 vbios.stutter_self_refresh_entry_latency = bw_int_to_fixed(0);
2102 vbios.nbp_state_change_latency = bw_int_to_fixed(45);
2103 vbios.mcifwrmc_urgent_latency = bw_int_to_fixed(10);
2104 vbios.scatter_gather_enable = true;
2105 vbios.down_spread_percentage = bw_frc_to_fixed(5, 10);
2106 vbios.cursor_width = 32;
2107 vbios.average_compression_rate = 4;
2108 vbios.number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
2109 vbios.blackout_duration = bw_int_to_fixed(0); /* us */
2110 vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
2111
2112 dceip.large_cursor = false;
2113 dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
2114 dceip.dmif_pipe_en_fbc_chunk_tracker = false;
2115 dceip.cursor_max_outstanding_group_num = 1;
2116 dceip.lines_interleaved_into_lb = 2;
2117 dceip.chunk_width = 256;
2118 dceip.number_of_graphics_pipes = 6;
2119 dceip.number_of_underlay_pipes = 0;
2120 dceip.low_power_tiling_mode = 0;
2121 dceip.display_write_back_supported = false;
2122 dceip.argb_compression_support = true;
2123 dceip.underlay_vscaler_efficiency6_bit_per_component =
2124 bw_frc_to_fixed(35556, 10000);
2125 dceip.underlay_vscaler_efficiency8_bit_per_component =
2126 bw_frc_to_fixed(34286, 10000);
2127 dceip.underlay_vscaler_efficiency10_bit_per_component =
2128 bw_frc_to_fixed(32, 10);
2129 dceip.underlay_vscaler_efficiency12_bit_per_component =
2130 bw_int_to_fixed(3);
2131 dceip.graphics_vscaler_efficiency6_bit_per_component =
2132 bw_frc_to_fixed(35, 10);
2133 dceip.graphics_vscaler_efficiency8_bit_per_component =
2134 bw_frc_to_fixed(34286, 10000);
2135 dceip.graphics_vscaler_efficiency10_bit_per_component =
2136 bw_frc_to_fixed(32, 10);
2137 dceip.graphics_vscaler_efficiency12_bit_per_component =
2138 bw_int_to_fixed(3);
2139 dceip.alpha_vscaler_efficiency = bw_int_to_fixed(3);
2140 dceip.max_dmif_buffer_allocated = 4;
2141 dceip.graphics_dmif_size = 12288;
2142 dceip.underlay_luma_dmif_size = 19456;
2143 dceip.underlay_chroma_dmif_size = 23552;
2144 dceip.pre_downscaler_enabled = true;
2145 dceip.underlay_downscale_prefetch_enabled = true;
2146 dceip.lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
2147 dceip.lb_size_per_component444 = bw_int_to_fixed(245952);
2148 dceip.graphics_lb_nodownscaling_multi_line_prefetching = true;
2149 dceip.stutter_and_dram_clock_state_change_gated_before_cursor =
2150 bw_int_to_fixed(1);
2151 dceip.underlay420_luma_lb_size_per_component = bw_int_to_fixed(
2152 82176);
2153 dceip.underlay420_chroma_lb_size_per_component =
2154 bw_int_to_fixed(164352);
2155 dceip.underlay422_lb_size_per_component = bw_int_to_fixed(
2156 82176);
2157 dceip.cursor_chunk_width = bw_int_to_fixed(64);
2158 dceip.cursor_dcp_buffer_lines = bw_int_to_fixed(4);
2159 dceip.underlay_maximum_width_efficient_for_tiling =
2160 bw_int_to_fixed(1920);
2161 dceip.underlay_maximum_height_efficient_for_tiling =
2162 bw_int_to_fixed(1080);
2163 dceip.peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
2164 bw_frc_to_fixed(3, 10);
2165 dceip.peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
2166 bw_int_to_fixed(25);
2167 dceip.minimum_outstanding_pte_request_limit = bw_int_to_fixed(
2168 2);
2169 dceip.maximum_total_outstanding_pte_requests_allowed_by_saw =
2170 bw_int_to_fixed(128);
2171 dceip.limit_excessive_outstanding_dmif_requests = true;
2172 dceip.linear_mode_line_request_alternation_slice =
2173 bw_int_to_fixed(64);
2174 dceip.scatter_gather_lines_of_pte_prefetching_in_linear_mode =
2175 32;
2176 dceip.display_write_back420_luma_mcifwr_buffer_size = 12288;
2177 dceip.display_write_back420_chroma_mcifwr_buffer_size = 8192;
2178 dceip.request_efficiency = bw_frc_to_fixed(8, 10);
2179 dceip.dispclk_per_request = bw_int_to_fixed(2);
2180 dceip.dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
2181 dceip.display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
2182 dceip.scatter_gather_pte_request_rows_in_tiling_mode = 2;
2183 dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0);
2184 break;
2185 case BW_CALCS_VERSION_POLARIS11:
2186 vbios.memory_type = bw_def_gddr5;
2187 vbios.dram_channel_width_in_bits = 32;
2188 vbios.number_of_dram_channels = 4;
2189 vbios.number_of_dram_banks = 8;
2190 vbios.high_yclk = bw_int_to_fixed(6000);
2191 vbios.mid_yclk = bw_int_to_fixed(3200);
2192 vbios.low_yclk = bw_int_to_fixed(1000);
2193 vbios.low_sclk = bw_int_to_fixed(300);
2194 vbios.mid1_sclk = bw_int_to_fixed(400);
2195 vbios.mid2_sclk = bw_int_to_fixed(500);
2196 vbios.mid3_sclk = bw_int_to_fixed(600);
2197 vbios.mid4_sclk = bw_int_to_fixed(700);
2198 vbios.mid5_sclk = bw_int_to_fixed(800);
2199 vbios.mid6_sclk = bw_int_to_fixed(974);
2200 vbios.high_sclk = bw_int_to_fixed(1154);
2201 vbios.low_voltage_max_dispclk = bw_int_to_fixed(459);
2202 vbios.mid_voltage_max_dispclk = bw_int_to_fixed(654);
2203 vbios.high_voltage_max_dispclk = bw_int_to_fixed(1108);
2204 vbios.low_voltage_max_phyclk = bw_int_to_fixed(540);
2205 vbios.mid_voltage_max_phyclk = bw_int_to_fixed(810);
2206 vbios.high_voltage_max_phyclk = bw_int_to_fixed(810);
2207 vbios.data_return_bus_width = bw_int_to_fixed(32);
2208 vbios.trc = bw_int_to_fixed(48);
2209 vbios.dmifmc_urgent_latency = bw_int_to_fixed(3);
2210 vbios.stutter_self_refresh_exit_latency = bw_int_to_fixed(5);
2211 vbios.stutter_self_refresh_entry_latency = bw_int_to_fixed(0);
2212 vbios.nbp_state_change_latency = bw_int_to_fixed(45);
2213 vbios.mcifwrmc_urgent_latency = bw_int_to_fixed(10);
2214 vbios.scatter_gather_enable = true;
2215 vbios.down_spread_percentage = bw_frc_to_fixed(5, 10);
2216 vbios.cursor_width = 32;
2217 vbios.average_compression_rate = 4;
2218 vbios.number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
2219 vbios.blackout_duration = bw_int_to_fixed(0); /* us */
2220 vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
2221
2222 dceip.large_cursor = false;
2223 dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
2224 dceip.dmif_pipe_en_fbc_chunk_tracker = false;
2225 dceip.cursor_max_outstanding_group_num = 1;
2226 dceip.lines_interleaved_into_lb = 2;
2227 dceip.chunk_width = 256;
2228 dceip.number_of_graphics_pipes = 5;
2229 dceip.number_of_underlay_pipes = 0;
2230 dceip.low_power_tiling_mode = 0;
2231 dceip.display_write_back_supported = false;
2232 dceip.argb_compression_support = true;
2233 dceip.underlay_vscaler_efficiency6_bit_per_component =
2234 bw_frc_to_fixed(35556, 10000);
2235 dceip.underlay_vscaler_efficiency8_bit_per_component =
2236 bw_frc_to_fixed(34286, 10000);
2237 dceip.underlay_vscaler_efficiency10_bit_per_component =
2238 bw_frc_to_fixed(32, 10);
2239 dceip.underlay_vscaler_efficiency12_bit_per_component =
2240 bw_int_to_fixed(3);
2241 dceip.graphics_vscaler_efficiency6_bit_per_component =
2242 bw_frc_to_fixed(35, 10);
2243 dceip.graphics_vscaler_efficiency8_bit_per_component =
2244 bw_frc_to_fixed(34286, 10000);
2245 dceip.graphics_vscaler_efficiency10_bit_per_component =
2246 bw_frc_to_fixed(32, 10);
2247 dceip.graphics_vscaler_efficiency12_bit_per_component =
2248 bw_int_to_fixed(3);
2249 dceip.alpha_vscaler_efficiency = bw_int_to_fixed(3);
2250 dceip.max_dmif_buffer_allocated = 4;
2251 dceip.graphics_dmif_size = 12288;
2252 dceip.underlay_luma_dmif_size = 19456;
2253 dceip.underlay_chroma_dmif_size = 23552;
2254 dceip.pre_downscaler_enabled = true;
2255 dceip.underlay_downscale_prefetch_enabled = true;
2256 dceip.lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
2257 dceip.lb_size_per_component444 = bw_int_to_fixed(245952);
2258 dceip.graphics_lb_nodownscaling_multi_line_prefetching = true;
2259 dceip.stutter_and_dram_clock_state_change_gated_before_cursor =
2260 bw_int_to_fixed(1);
2261 dceip.underlay420_luma_lb_size_per_component = bw_int_to_fixed(
2262 82176);
2263 dceip.underlay420_chroma_lb_size_per_component =
2264 bw_int_to_fixed(164352);
2265 dceip.underlay422_lb_size_per_component = bw_int_to_fixed(
2266 82176);
2267 dceip.cursor_chunk_width = bw_int_to_fixed(64);
2268 dceip.cursor_dcp_buffer_lines = bw_int_to_fixed(4);
2269 dceip.underlay_maximum_width_efficient_for_tiling =
2270 bw_int_to_fixed(1920);
2271 dceip.underlay_maximum_height_efficient_for_tiling =
2272 bw_int_to_fixed(1080);
2273 dceip.peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
2274 bw_frc_to_fixed(3, 10);
2275 dceip.peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
2276 bw_int_to_fixed(25);
2277 dceip.minimum_outstanding_pte_request_limit = bw_int_to_fixed(
2278 2);
2279 dceip.maximum_total_outstanding_pte_requests_allowed_by_saw =
2280 bw_int_to_fixed(128);
2281 dceip.limit_excessive_outstanding_dmif_requests = true;
2282 dceip.linear_mode_line_request_alternation_slice =
2283 bw_int_to_fixed(64);
2284 dceip.scatter_gather_lines_of_pte_prefetching_in_linear_mode =
2285 32;
2286 dceip.display_write_back420_luma_mcifwr_buffer_size = 12288;
2287 dceip.display_write_back420_chroma_mcifwr_buffer_size = 8192;
2288 dceip.request_efficiency = bw_frc_to_fixed(8, 10);
2289 dceip.dispclk_per_request = bw_int_to_fixed(2);
2290 dceip.dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
2291 dceip.display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
2292 dceip.scatter_gather_pte_request_rows_in_tiling_mode = 2;
2293 dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0);
2294 break;
2295 case BW_CALCS_VERSION_STONEY:
2296 vbios.memory_type = bw_def_gddr5;
2297 vbios.dram_channel_width_in_bits = 64;
2298 vbios.number_of_dram_channels = 1;
2299 vbios.number_of_dram_banks = 8;
2300 vbios.high_yclk = bw_int_to_fixed(1866);
2301 vbios.mid_yclk = bw_int_to_fixed(1866);
2302 vbios.low_yclk = bw_int_to_fixed(1333);
2303 vbios.low_sclk = bw_int_to_fixed(200);
2304 vbios.mid1_sclk = bw_int_to_fixed(600);
2305 vbios.mid2_sclk = bw_int_to_fixed(600);
2306 vbios.mid3_sclk = bw_int_to_fixed(600);
2307 vbios.mid4_sclk = bw_int_to_fixed(600);
2308 vbios.mid5_sclk = bw_int_to_fixed(600);
2309 vbios.mid6_sclk = bw_int_to_fixed(600);
2310 vbios.high_sclk = bw_int_to_fixed(800);
2311 vbios.low_voltage_max_dispclk = bw_int_to_fixed(352);
2312 vbios.mid_voltage_max_dispclk = bw_int_to_fixed(467);
2313 vbios.high_voltage_max_dispclk = bw_int_to_fixed(643);
2314 vbios.low_voltage_max_phyclk = bw_int_to_fixed(540);
2315 vbios.mid_voltage_max_phyclk = bw_int_to_fixed(810);
2316 vbios.high_voltage_max_phyclk = bw_int_to_fixed(810);
2317 vbios.data_return_bus_width = bw_int_to_fixed(32);
2318 vbios.trc = bw_int_to_fixed(50);
2319 vbios.dmifmc_urgent_latency = bw_int_to_fixed(4);
2320 vbios.stutter_self_refresh_exit_latency = bw_frc_to_fixed(158, 10);
2321 vbios.stutter_self_refresh_entry_latency = bw_int_to_fixed(0);
2322 vbios.nbp_state_change_latency = bw_frc_to_fixed(2008, 100);
2323 vbios.mcifwrmc_urgent_latency = bw_int_to_fixed(10);
2324 vbios.scatter_gather_enable = true;
2325 vbios.down_spread_percentage = bw_frc_to_fixed(5, 10);
2326 vbios.cursor_width = 32;
2327 vbios.average_compression_rate = 4;
2328 vbios.number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
2329 vbios.blackout_duration = bw_int_to_fixed(18); /* us */
2330 vbios.maximum_blackout_recovery_time = bw_int_to_fixed(20);
2331
2332 dceip.large_cursor = false;
2333 dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
2334 dceip.dmif_pipe_en_fbc_chunk_tracker = false;
2335 dceip.cursor_max_outstanding_group_num = 1;
2336 dceip.lines_interleaved_into_lb = 2;
2337 dceip.chunk_width = 256;
2338 dceip.number_of_graphics_pipes = 2;
2339 dceip.number_of_underlay_pipes = 1;
2340 dceip.low_power_tiling_mode = 0;
2341 dceip.display_write_back_supported = false;
2342 dceip.argb_compression_support = true;
2343 dceip.underlay_vscaler_efficiency6_bit_per_component =
2344 bw_frc_to_fixed(35556, 10000);
2345 dceip.underlay_vscaler_efficiency8_bit_per_component =
2346 bw_frc_to_fixed(34286, 10000);
2347 dceip.underlay_vscaler_efficiency10_bit_per_component =
2348 bw_frc_to_fixed(32, 10);
2349 dceip.underlay_vscaler_efficiency12_bit_per_component =
2350 bw_int_to_fixed(3);
2351 dceip.graphics_vscaler_efficiency6_bit_per_component =
2352 bw_frc_to_fixed(35, 10);
2353 dceip.graphics_vscaler_efficiency8_bit_per_component =
2354 bw_frc_to_fixed(34286, 10000);
2355 dceip.graphics_vscaler_efficiency10_bit_per_component =
2356 bw_frc_to_fixed(32, 10);
2357 dceip.graphics_vscaler_efficiency12_bit_per_component =
2358 bw_int_to_fixed(3);
2359 dceip.alpha_vscaler_efficiency = bw_int_to_fixed(3);
2360 dceip.max_dmif_buffer_allocated = 2;
2361 dceip.graphics_dmif_size = 12288;
2362 dceip.underlay_luma_dmif_size = 19456;
2363 dceip.underlay_chroma_dmif_size = 23552;
2364 dceip.pre_downscaler_enabled = true;
2365 dceip.underlay_downscale_prefetch_enabled = true;
2366 dceip.lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
2367 dceip.lb_size_per_component444 = bw_int_to_fixed(82176);
2368 dceip.graphics_lb_nodownscaling_multi_line_prefetching = false;
2369 dceip.stutter_and_dram_clock_state_change_gated_before_cursor =
2370 bw_int_to_fixed(0);
2371 dceip.underlay420_luma_lb_size_per_component = bw_int_to_fixed(
2372 82176);
2373 dceip.underlay420_chroma_lb_size_per_component =
2374 bw_int_to_fixed(164352);
2375 dceip.underlay422_lb_size_per_component = bw_int_to_fixed(
2376 82176);
2377 dceip.cursor_chunk_width = bw_int_to_fixed(64);
2378 dceip.cursor_dcp_buffer_lines = bw_int_to_fixed(4);
2379 dceip.underlay_maximum_width_efficient_for_tiling =
2380 bw_int_to_fixed(1920);
2381 dceip.underlay_maximum_height_efficient_for_tiling =
2382 bw_int_to_fixed(1080);
2383 dceip.peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
2384 bw_frc_to_fixed(3, 10);
2385 dceip.peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
2386 bw_int_to_fixed(25);
2387 dceip.minimum_outstanding_pte_request_limit = bw_int_to_fixed(
2388 2);
2389 dceip.maximum_total_outstanding_pte_requests_allowed_by_saw =
2390 bw_int_to_fixed(128);
2391 dceip.limit_excessive_outstanding_dmif_requests = true;
2392 dceip.linear_mode_line_request_alternation_slice =
2393 bw_int_to_fixed(64);
2394 dceip.scatter_gather_lines_of_pte_prefetching_in_linear_mode =
2395 32;
2396 dceip.display_write_back420_luma_mcifwr_buffer_size = 12288;
2397 dceip.display_write_back420_chroma_mcifwr_buffer_size = 8192;
2398 dceip.request_efficiency = bw_frc_to_fixed(8, 10);
2399 dceip.dispclk_per_request = bw_int_to_fixed(2);
2400 dceip.dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
2401 dceip.display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
2402 dceip.scatter_gather_pte_request_rows_in_tiling_mode = 2;
2403 dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0);
2404 break;
2405 default:
2406 break;
2407 }
2408 *bw_dceip = dceip;
2409 *bw_vbios = vbios;
2410
2411}
2412
2413/**
2414 * Compare calculated (required) clocks against the clocks available at
2415 * maximum voltage (max Performance Level).
2416 */
2417static bool is_display_configuration_supported(
2418 const struct bw_calcs_vbios *vbios,
2419 const struct bw_calcs_output *calcs_output)
2420{
2421 uint32_t int_max_clk;
2422
2423 int_max_clk = bw_fixed_to_int(vbios->high_voltage_max_dispclk);
2424 int_max_clk *= 1000; /* MHz to kHz */
2425 if (calcs_output->dispclk_khz > int_max_clk)
2426 return false;
2427
2428 int_max_clk = bw_fixed_to_int(vbios->high_sclk);
2429 int_max_clk *= 1000; /* MHz to kHz */
2430 if (calcs_output->required_sclk > int_max_clk)
2431 return false;
2432
2433 return true;
2434}
2435
2436static void populate_initial_data(
2437 const struct pipe_ctx pipe[], int pipe_count, struct bw_calcs_data *data)
2438{
2439 int i, j;
2440 int num_displays = 0;
2441
2442 data->underlay_surface_type = bw_def_420;
2443 data->panning_and_bezel_adjustment = bw_def_none;
2444 data->graphics_lb_bpc = 10;
2445 data->underlay_lb_bpc = 8;
2446 data->underlay_tiling_mode = bw_def_tiled;
2447 data->graphics_tiling_mode = bw_def_tiled;
2448 data->underlay_micro_tile_mode = bw_def_display_micro_tiling;
2449 data->graphics_micro_tile_mode = bw_def_display_micro_tiling;
2450
2451 /* Pipes with underlay first */
2452 for (i = 0; i < pipe_count; i++) {
2453 if (!pipe[i].stream || !pipe[i].bottom_pipe)
2454 continue;
2455
2456 ASSERT(pipe[i].surface);
2457
2458 if (num_displays == 0) {
2459 if (!pipe[i].surface->public.visible)
2460 data->d0_underlay_mode = bw_def_underlay_only;
2461 else
2462 data->d0_underlay_mode = bw_def_blend;
2463 } else {
2464 if (!pipe[i].surface->public.visible)
2465 data->d1_underlay_mode = bw_def_underlay_only;
2466 else
2467 data->d1_underlay_mode = bw_def_blend;
2468 }
2469
2470 data->fbc_en[num_displays + 4] = false;
2471 data->lpt_en[num_displays + 4] = false;
2472 data->h_total[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->public.timing.h_total);
2473 data->v_total[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->public.timing.v_total);
2474 data->pixel_rate[num_displays + 4] = bw_frc_to_fixed(pipe[i].stream->public.timing.pix_clk_khz, 1000);
2475 data->src_width[num_displays + 4] = bw_int_to_fixed(pipe[i].scl_data.viewport.width);
2476 data->pitch_in_pixels[num_displays + 4] = bw_int_to_fixed(pipe[i].surface->public.plane_size.grph.surface_pitch);
2477 data->src_height[num_displays + 4] = bw_int_to_fixed(pipe[i].scl_data.viewport.height);
2478 data->h_taps[num_displays + 4] = bw_int_to_fixed(pipe[i].scl_data.taps.h_taps);
2479 data->v_taps[num_displays + 4] = bw_int_to_fixed(pipe[i].scl_data.taps.v_taps);
2480 data->h_scale_ratio[num_displays + 4] = fixed31_32_to_bw_fixed(pipe[i].scl_data.ratios.horz.value);
2481 data->v_scale_ratio[num_displays + 4] = fixed31_32_to_bw_fixed(pipe[i].scl_data.ratios.vert.value);
2482 switch (pipe[i].surface->public.rotation) {
2483 case ROTATION_ANGLE_0:
2484 data->rotation_angle[num_displays + 4] = bw_int_to_fixed(0);
2485 break;
2486 case ROTATION_ANGLE_90:
2487 data->rotation_angle[num_displays + 4] = bw_int_to_fixed(90);
2488 break;
2489 case ROTATION_ANGLE_180:
2490 data->rotation_angle[num_displays + 4] = bw_int_to_fixed(180);
2491 break;
2492 case ROTATION_ANGLE_270:
2493 data->rotation_angle[num_displays + 4] = bw_int_to_fixed(270);
2494 break;
2495 default:
2496 break;
2497 }
2498 switch (pipe[i].surface->public.format) {
2499 case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
2500 case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
2501 case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
2502 data->bytes_per_pixel[num_displays + 4] = 2;
2503 break;
2504 case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
2505 case SURFACE_PIXEL_FORMAT_GRPH_BGRA8888:
2506 case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
2507 case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
2508 case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS:
2509 data->bytes_per_pixel[num_displays + 4] = 4;
2510 break;
2511 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
2512 case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
2513 data->bytes_per_pixel[num_displays + 4] = 8;
2514 break;
2515 default:
2516 data->bytes_per_pixel[num_displays + 4] = 4;
2517 break;
2518 }
2519 data->interlace_mode[num_displays + 4] = false;
2520 data->stereo_mode[num_displays + 4] = bw_def_mono;
2521
2522
2523 for (j = 0; j < 2; j++) {
2524 data->fbc_en[num_displays * 2 + j] = false;
2525 data->lpt_en[num_displays * 2 + j] = false;
2526
2527 data->src_height[num_displays * 2 + j] = bw_int_to_fixed(pipe[i].bottom_pipe->scl_data.viewport.height);
2528 data->src_width[num_displays * 2 + j] = bw_int_to_fixed(pipe[i].bottom_pipe->scl_data.viewport.width);
2529 data->pitch_in_pixels[num_displays * 2 + j] = bw_int_to_fixed(
2530 pipe[i].bottom_pipe->surface->public.plane_size.grph.surface_pitch);
2531 data->h_taps[num_displays * 2 + j] = bw_int_to_fixed(pipe[i].bottom_pipe->scl_data.taps.h_taps);
2532 data->v_taps[num_displays * 2 + j] = bw_int_to_fixed(pipe[i].bottom_pipe->scl_data.taps.v_taps);
2533 data->h_scale_ratio[num_displays * 2 + j] = fixed31_32_to_bw_fixed(
2534 pipe[i].bottom_pipe->scl_data.ratios.horz.value);
2535 data->v_scale_ratio[num_displays * 2 + j] = fixed31_32_to_bw_fixed(
2536 pipe[i].bottom_pipe->scl_data.ratios.vert.value);
2537 switch (pipe[i].bottom_pipe->surface->public.rotation) {
2538 case ROTATION_ANGLE_0:
2539 data->rotation_angle[num_displays * 2 + j] = bw_int_to_fixed(0);
2540 break;
2541 case ROTATION_ANGLE_90:
2542 data->rotation_angle[num_displays * 2 + j] = bw_int_to_fixed(90);
2543 break;
2544 case ROTATION_ANGLE_180:
2545 data->rotation_angle[num_displays * 2 + j] = bw_int_to_fixed(180);
2546 break;
2547 case ROTATION_ANGLE_270:
2548 data->rotation_angle[num_displays * 2 + j] = bw_int_to_fixed(270);
2549 break;
2550 default:
2551 break;
2552 }
2553 data->stereo_mode[num_displays * 2 + j] = bw_def_mono;
2554 }
2555
2556 num_displays++;
2557 }
2558
2559 /* Pipes without underlay after */
2560 for (i = 0; i < pipe_count; i++) {
2561 if (!pipe[i].stream || pipe[i].bottom_pipe)
2562 continue;
2563
2564
2565 data->fbc_en[num_displays + 4] = false;
2566 data->lpt_en[num_displays + 4] = false;
2567 data->h_total[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->public.timing.h_total);
2568 data->v_total[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->public.timing.v_total);
2569 data->pixel_rate[num_displays + 4] = bw_frc_to_fixed(pipe[i].stream->public.timing.pix_clk_khz, 1000);
2570 if (pipe[i].surface) {
2571 data->src_width[num_displays + 4] = bw_int_to_fixed(pipe[i].scl_data.viewport.width);
2572 data->pitch_in_pixels[num_displays + 4] = bw_int_to_fixed(pipe[i].surface->public.plane_size.grph.surface_pitch);
2573 data->src_height[num_displays + 4] = bw_int_to_fixed(pipe[i].scl_data.viewport.height);
2574 data->h_taps[num_displays + 4] = bw_int_to_fixed(pipe[i].scl_data.taps.h_taps);
2575 data->v_taps[num_displays + 4] = bw_int_to_fixed(pipe[i].scl_data.taps.v_taps);
2576 data->h_scale_ratio[num_displays + 4] = fixed31_32_to_bw_fixed(pipe[i].scl_data.ratios.horz.value);
2577 data->v_scale_ratio[num_displays + 4] = fixed31_32_to_bw_fixed(pipe[i].scl_data.ratios.vert.value);
2578 switch (pipe[i].surface->public.rotation) {
2579 case ROTATION_ANGLE_0:
2580 data->rotation_angle[num_displays + 4] = bw_int_to_fixed(0);
2581 break;
2582 case ROTATION_ANGLE_90:
2583 data->rotation_angle[num_displays + 4] = bw_int_to_fixed(90);
2584 break;
2585 case ROTATION_ANGLE_180:
2586 data->rotation_angle[num_displays + 4] = bw_int_to_fixed(180);
2587 break;
2588 case ROTATION_ANGLE_270:
2589 data->rotation_angle[num_displays + 4] = bw_int_to_fixed(270);
2590 break;
2591 default:
2592 break;
2593 }
2594 switch (pipe[i].surface->public.format) {
2595 case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
2596 case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
2597 case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
2598 data->bytes_per_pixel[num_displays + 4] = 2;
2599 break;
2600 case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
2601 case SURFACE_PIXEL_FORMAT_GRPH_BGRA8888:
2602 case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
2603 case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
2604 case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS:
2605 data->bytes_per_pixel[num_displays + 4] = 4;
2606 break;
2607 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
2608 case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
2609 data->bytes_per_pixel[num_displays + 4] = 8;
2610 break;
2611 default:
2612 data->bytes_per_pixel[num_displays + 4] = 4;
2613 break;
2614 }
2615 } else {
2616 data->src_width[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->public.timing.h_addressable);
2617 data->pitch_in_pixels[num_displays + 4] = data->src_width[num_displays + 4];
2618 data->src_height[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->public.timing.v_addressable);
2619 data->h_taps[num_displays + 4] = bw_int_to_fixed(1);
2620 data->v_taps[num_displays + 4] = bw_int_to_fixed(1);
2621 data->h_scale_ratio[num_displays + 4] = bw_int_to_fixed(1);
2622 data->v_scale_ratio[num_displays + 4] = bw_int_to_fixed(1);
2623 data->rotation_angle[num_displays + 4] = bw_int_to_fixed(0);
2624 data->bytes_per_pixel[num_displays + 4] = 4;
2625 }
2626
2627 data->interlace_mode[num_displays + 4] = false;
2628 data->stereo_mode[num_displays + 4] = bw_def_mono;
2629 num_displays++;
2630 }
2631
2632 data->number_of_displays = num_displays;
2633}
2634
2635/**
2636 * Return:
2637 * true - Display(s) configuration supported.
2638 * In this case 'calcs_output' contains data for HW programming
2639 * false - Display(s) configuration not supported (not enough bandwidth).
2640 */
2641
2642bool bw_calcs(struct dc_context *ctx,
2643 const struct bw_calcs_dceip *dceip,
2644 const struct bw_calcs_vbios *vbios,
2645 const struct pipe_ctx pipe[],
2646 int pipe_count,
2647 struct bw_calcs_output *calcs_output)
2648{
2649 struct bw_calcs_data *data = dm_alloc(sizeof(struct bw_calcs_data));
2650
2651 populate_initial_data(pipe, pipe_count, data);
2652
2653 /*TODO: this should be taken out calcs output and assigned during timing sync for pplib use*/
2654 calcs_output->all_displays_in_sync = false;
2655
2656 if (data->number_of_displays != 0) {
2657 uint8_t yclk_lvl, sclk_lvl;
2658 struct bw_fixed high_sclk = vbios->high_sclk;
2659 struct bw_fixed mid1_sclk = vbios->mid1_sclk;
2660 struct bw_fixed mid2_sclk = vbios->mid2_sclk;
2661 struct bw_fixed mid3_sclk = vbios->mid3_sclk;
2662 struct bw_fixed mid4_sclk = vbios->mid4_sclk;
2663 struct bw_fixed mid5_sclk = vbios->mid5_sclk;
2664 struct bw_fixed mid6_sclk = vbios->mid6_sclk;
2665 struct bw_fixed low_sclk = vbios->low_sclk;
2666 struct bw_fixed high_yclk = vbios->high_yclk;
2667 struct bw_fixed mid_yclk = vbios->mid_yclk;
2668 struct bw_fixed low_yclk = vbios->low_yclk;
2669
2670 calculate_bandwidth(dceip, vbios, data);
2671
2672 yclk_lvl = data->y_clk_level;
2673 sclk_lvl = data->sclk_level;
2674
2675 calcs_output->nbp_state_change_enable =
2676 data->nbp_state_change_enable;
2677 calcs_output->cpuc_state_change_enable =
2678 data->cpuc_state_change_enable;
2679 calcs_output->cpup_state_change_enable =
2680 data->cpup_state_change_enable;
2681 calcs_output->stutter_mode_enable =
2682 data->stutter_mode_enable;
2683 calcs_output->dispclk_khz =
2684 bw_fixed_to_int(bw_mul(data->dispclk,
2685 bw_int_to_fixed(1000)));
2686 calcs_output->blackout_recovery_time_us =
2687 bw_fixed_to_int(data->blackout_recovery_time);
2688 calcs_output->required_sclk =
2689 bw_fixed_to_int(bw_mul(data->required_sclk,
2690 bw_int_to_fixed(1000)));
2691 calcs_output->required_sclk_deep_sleep =
2692 bw_fixed_to_int(bw_mul(data->sclk_deep_sleep,
2693 bw_int_to_fixed(1000)));
2694 if (yclk_lvl == 0)
2695 calcs_output->required_yclk = bw_fixed_to_int(
2696 bw_mul(low_yclk, bw_int_to_fixed(1000)));
2697 else if (yclk_lvl == 1)
2698 calcs_output->required_yclk = bw_fixed_to_int(
2699 bw_mul(mid_yclk, bw_int_to_fixed(1000)));
2700 else
2701 calcs_output->required_yclk = bw_fixed_to_int(
2702 bw_mul(high_yclk, bw_int_to_fixed(1000)));
2703
2704 /* units: nanosecond, 16bit storage. */
2705
2706 calcs_output->nbp_state_change_wm_ns[0].a_mark =
2707 bw_fixed_to_int(bw_mul(data->
2708 nbp_state_change_watermark[4], bw_int_to_fixed(1000)));
2709 calcs_output->nbp_state_change_wm_ns[1].a_mark =
2710 bw_fixed_to_int(bw_mul(data->
2711 nbp_state_change_watermark[5], bw_int_to_fixed(1000)));
2712 calcs_output->nbp_state_change_wm_ns[2].a_mark =
2713 bw_fixed_to_int(bw_mul(data->
2714 nbp_state_change_watermark[6], bw_int_to_fixed(1000)));
2715
2716 if (ctx->dc->caps.max_slave_planes) {
2717 calcs_output->nbp_state_change_wm_ns[3].a_mark =
2718 bw_fixed_to_int(bw_mul(data->
2719 nbp_state_change_watermark[0], bw_int_to_fixed(1000)));
2720 calcs_output->nbp_state_change_wm_ns[4].a_mark =
2721 bw_fixed_to_int(bw_mul(data->
2722 nbp_state_change_watermark[1], bw_int_to_fixed(1000)));
2723 } else {
2724 calcs_output->nbp_state_change_wm_ns[3].a_mark =
2725 bw_fixed_to_int(bw_mul(data->
2726 nbp_state_change_watermark[7], bw_int_to_fixed(1000)));
2727 calcs_output->nbp_state_change_wm_ns[4].a_mark =
2728 bw_fixed_to_int(bw_mul(data->
2729 nbp_state_change_watermark[8], bw_int_to_fixed(1000)));
2730 }
2731 calcs_output->nbp_state_change_wm_ns[5].a_mark =
2732 bw_fixed_to_int(bw_mul(data->
2733 nbp_state_change_watermark[9], bw_int_to_fixed(1000)));
2734
2735
2736
2737 calcs_output->stutter_exit_wm_ns[0].a_mark =
2738 bw_fixed_to_int(bw_mul(data->
2739 stutter_exit_watermark[4], bw_int_to_fixed(1000)));
2740 calcs_output->stutter_exit_wm_ns[1].a_mark =
2741 bw_fixed_to_int(bw_mul(data->
2742 stutter_exit_watermark[5], bw_int_to_fixed(1000)));
2743 calcs_output->stutter_exit_wm_ns[2].a_mark =
2744 bw_fixed_to_int(bw_mul(data->
2745 stutter_exit_watermark[6], bw_int_to_fixed(1000)));
2746 if (ctx->dc->caps.max_slave_planes) {
2747 calcs_output->stutter_exit_wm_ns[3].a_mark =
2748 bw_fixed_to_int(bw_mul(data->
2749 stutter_exit_watermark[0], bw_int_to_fixed(1000)));
2750 calcs_output->stutter_exit_wm_ns[4].a_mark =
2751 bw_fixed_to_int(bw_mul(data->
2752 stutter_exit_watermark[1], bw_int_to_fixed(1000)));
2753 } else {
2754 calcs_output->stutter_exit_wm_ns[3].a_mark =
2755 bw_fixed_to_int(bw_mul(data->
2756 stutter_exit_watermark[7], bw_int_to_fixed(1000)));
2757 calcs_output->stutter_exit_wm_ns[4].a_mark =
2758 bw_fixed_to_int(bw_mul(data->
2759 stutter_exit_watermark[8], bw_int_to_fixed(1000)));
2760 }
2761 calcs_output->stutter_exit_wm_ns[5].a_mark =
2762 bw_fixed_to_int(bw_mul(data->
2763 stutter_exit_watermark[9], bw_int_to_fixed(1000)));
2764
2765
2766
2767 calcs_output->urgent_wm_ns[0].a_mark =
2768 bw_fixed_to_int(bw_mul(data->
2769 urgent_watermark[4], bw_int_to_fixed(1000)));
2770 calcs_output->urgent_wm_ns[1].a_mark =
2771 bw_fixed_to_int(bw_mul(data->
2772 urgent_watermark[5], bw_int_to_fixed(1000)));
2773 calcs_output->urgent_wm_ns[2].a_mark =
2774 bw_fixed_to_int(bw_mul(data->
2775 urgent_watermark[6], bw_int_to_fixed(1000)));
2776 if (ctx->dc->caps.max_slave_planes) {
2777 calcs_output->urgent_wm_ns[3].a_mark =
2778 bw_fixed_to_int(bw_mul(data->
2779 urgent_watermark[0], bw_int_to_fixed(1000)));
2780 calcs_output->urgent_wm_ns[4].a_mark =
2781 bw_fixed_to_int(bw_mul(data->
2782 urgent_watermark[1], bw_int_to_fixed(1000)));
2783 } else {
2784 calcs_output->urgent_wm_ns[3].a_mark =
2785 bw_fixed_to_int(bw_mul(data->
2786 urgent_watermark[7], bw_int_to_fixed(1000)));
2787 calcs_output->urgent_wm_ns[4].a_mark =
2788 bw_fixed_to_int(bw_mul(data->
2789 urgent_watermark[8], bw_int_to_fixed(1000)));
2790 }
2791 calcs_output->urgent_wm_ns[5].a_mark =
2792 bw_fixed_to_int(bw_mul(data->
2793 urgent_watermark[9], bw_int_to_fixed(1000)));
2794
2795 if (dceip->version != BW_CALCS_VERSION_CARRIZO) {
2796 ((struct bw_calcs_vbios *)vbios)->low_sclk = mid3_sclk;
2797 ((struct bw_calcs_vbios *)vbios)->mid1_sclk = mid3_sclk;
2798 ((struct bw_calcs_vbios *)vbios)->mid2_sclk = mid3_sclk;
2799 calculate_bandwidth(dceip, vbios, data);
2800
2801 calcs_output->nbp_state_change_wm_ns[0].b_mark =
2802 bw_fixed_to_int(bw_mul(data->
2803 nbp_state_change_watermark[4],bw_int_to_fixed(1000)));
2804 calcs_output->nbp_state_change_wm_ns[1].b_mark =
2805 bw_fixed_to_int(bw_mul(data->
2806 nbp_state_change_watermark[5], bw_int_to_fixed(1000)));
2807 calcs_output->nbp_state_change_wm_ns[2].b_mark =
2808 bw_fixed_to_int(bw_mul(data->
2809 nbp_state_change_watermark[6], bw_int_to_fixed(1000)));
2810
2811 if (ctx->dc->caps.max_slave_planes) {
2812 calcs_output->nbp_state_change_wm_ns[3].b_mark =
2813 bw_fixed_to_int(bw_mul(data->
2814 nbp_state_change_watermark[0], bw_int_to_fixed(1000)));
2815 calcs_output->nbp_state_change_wm_ns[4].b_mark =
2816 bw_fixed_to_int(bw_mul(data->
2817 nbp_state_change_watermark[1], bw_int_to_fixed(1000)));
2818 } else {
2819 calcs_output->nbp_state_change_wm_ns[3].b_mark =
2820 bw_fixed_to_int(bw_mul(data->
2821 nbp_state_change_watermark[7], bw_int_to_fixed(1000)));
2822 calcs_output->nbp_state_change_wm_ns[4].b_mark =
2823 bw_fixed_to_int(bw_mul(data->
2824 nbp_state_change_watermark[8], bw_int_to_fixed(1000)));
2825 }
2826 calcs_output->nbp_state_change_wm_ns[5].b_mark =
2827 bw_fixed_to_int(bw_mul(data->
2828 nbp_state_change_watermark[9], bw_int_to_fixed(1000)));
2829
2830
2831
2832 calcs_output->stutter_exit_wm_ns[0].b_mark =
2833 bw_fixed_to_int(bw_mul(data->
2834 stutter_exit_watermark[4], bw_int_to_fixed(1000)));
2835 calcs_output->stutter_exit_wm_ns[1].b_mark =
2836 bw_fixed_to_int(bw_mul(data->
2837 stutter_exit_watermark[5], bw_int_to_fixed(1000)));
2838 calcs_output->stutter_exit_wm_ns[2].b_mark =
2839 bw_fixed_to_int(bw_mul(data->
2840 stutter_exit_watermark[6], bw_int_to_fixed(1000)));
2841 if (ctx->dc->caps.max_slave_planes) {
2842 calcs_output->stutter_exit_wm_ns[3].b_mark =
2843 bw_fixed_to_int(bw_mul(data->
2844 stutter_exit_watermark[0], bw_int_to_fixed(1000)));
2845 calcs_output->stutter_exit_wm_ns[4].b_mark =
2846 bw_fixed_to_int(bw_mul(data->
2847 stutter_exit_watermark[1], bw_int_to_fixed(1000)));
2848 } else {
2849 calcs_output->stutter_exit_wm_ns[3].b_mark =
2850 bw_fixed_to_int(bw_mul(data->
2851 stutter_exit_watermark[7], bw_int_to_fixed(1000)));
2852 calcs_output->stutter_exit_wm_ns[4].b_mark =
2853 bw_fixed_to_int(bw_mul(data->
2854 stutter_exit_watermark[8], bw_int_to_fixed(1000)));
2855 }
2856 calcs_output->stutter_exit_wm_ns[5].b_mark =
2857 bw_fixed_to_int(bw_mul(data->
2858 stutter_exit_watermark[9], bw_int_to_fixed(1000)));
2859
2860
2861
2862 calcs_output->urgent_wm_ns[0].b_mark =
2863 bw_fixed_to_int(bw_mul(data->
2864 urgent_watermark[4], bw_int_to_fixed(1000)));
2865 calcs_output->urgent_wm_ns[1].b_mark =
2866 bw_fixed_to_int(bw_mul(data->
2867 urgent_watermark[5], bw_int_to_fixed(1000)));
2868 calcs_output->urgent_wm_ns[2].b_mark =
2869 bw_fixed_to_int(bw_mul(data->
2870 urgent_watermark[6], bw_int_to_fixed(1000)));
2871 if (ctx->dc->caps.max_slave_planes) {
2872 calcs_output->urgent_wm_ns[3].b_mark =
2873 bw_fixed_to_int(bw_mul(data->
2874 urgent_watermark[0], bw_int_to_fixed(1000)));
2875 calcs_output->urgent_wm_ns[4].b_mark =
2876 bw_fixed_to_int(bw_mul(data->
2877 urgent_watermark[1], bw_int_to_fixed(1000)));
2878 } else {
2879 calcs_output->urgent_wm_ns[3].b_mark =
2880 bw_fixed_to_int(bw_mul(data->
2881 urgent_watermark[7], bw_int_to_fixed(1000)));
2882 calcs_output->urgent_wm_ns[4].b_mark =
2883 bw_fixed_to_int(bw_mul(data->
2884 urgent_watermark[8], bw_int_to_fixed(1000)));
2885 }
2886 calcs_output->urgent_wm_ns[5].b_mark =
2887 bw_fixed_to_int(bw_mul(data->
2888 urgent_watermark[9], bw_int_to_fixed(1000)));
2889
2890 ((struct bw_calcs_vbios *)vbios)->low_sclk = low_sclk;
2891 ((struct bw_calcs_vbios *)vbios)->mid1_sclk = mid1_sclk;
2892 ((struct bw_calcs_vbios *)vbios)->mid2_sclk = mid2_sclk;
2893 ((struct bw_calcs_vbios *)vbios)->low_yclk = mid_yclk;
2894 calculate_bandwidth(dceip, vbios, data);
2895
2896 calcs_output->nbp_state_change_wm_ns[0].c_mark =
2897 bw_fixed_to_int(bw_mul(data->
2898 nbp_state_change_watermark[4], bw_int_to_fixed(1000)));
2899 calcs_output->nbp_state_change_wm_ns[1].c_mark =
2900 bw_fixed_to_int(bw_mul(data->
2901 nbp_state_change_watermark[5], bw_int_to_fixed(1000)));
2902 calcs_output->nbp_state_change_wm_ns[2].c_mark =
2903 bw_fixed_to_int(bw_mul(data->
2904 nbp_state_change_watermark[6], bw_int_to_fixed(1000)));
2905 if (ctx->dc->caps.max_slave_planes) {
2906 calcs_output->nbp_state_change_wm_ns[3].c_mark =
2907 bw_fixed_to_int(bw_mul(data->
2908 nbp_state_change_watermark[0], bw_int_to_fixed(1000)));
2909 calcs_output->nbp_state_change_wm_ns[4].c_mark =
2910 bw_fixed_to_int(bw_mul(data->
2911 nbp_state_change_watermark[1], bw_int_to_fixed(1000)));
2912 } else {
2913 calcs_output->nbp_state_change_wm_ns[3].c_mark =
2914 bw_fixed_to_int(bw_mul(data->
2915 nbp_state_change_watermark[7], bw_int_to_fixed(1000)));
2916 calcs_output->nbp_state_change_wm_ns[4].c_mark =
2917 bw_fixed_to_int(bw_mul(data->
2918 nbp_state_change_watermark[8], bw_int_to_fixed(1000)));
2919 }
2920 calcs_output->nbp_state_change_wm_ns[5].c_mark =
2921 bw_fixed_to_int(bw_mul(data->
2922 nbp_state_change_watermark[9], bw_int_to_fixed(1000)));
2923
2924
2925 calcs_output->stutter_exit_wm_ns[0].c_mark =
2926 bw_fixed_to_int(bw_mul(data->
2927 stutter_exit_watermark[4], bw_int_to_fixed(1000)));
2928 calcs_output->stutter_exit_wm_ns[1].c_mark =
2929 bw_fixed_to_int(bw_mul(data->
2930 stutter_exit_watermark[5], bw_int_to_fixed(1000)));
2931 calcs_output->stutter_exit_wm_ns[2].c_mark =
2932 bw_fixed_to_int(bw_mul(data->
2933 stutter_exit_watermark[6], bw_int_to_fixed(1000)));
2934 if (ctx->dc->caps.max_slave_planes) {
2935 calcs_output->stutter_exit_wm_ns[3].c_mark =
2936 bw_fixed_to_int(bw_mul(data->
2937 stutter_exit_watermark[0], bw_int_to_fixed(1000)));
2938 calcs_output->stutter_exit_wm_ns[4].c_mark =
2939 bw_fixed_to_int(bw_mul(data->
2940 stutter_exit_watermark[1], bw_int_to_fixed(1000)));
2941 } else {
2942 calcs_output->stutter_exit_wm_ns[3].c_mark =
2943 bw_fixed_to_int(bw_mul(data->
2944 stutter_exit_watermark[7], bw_int_to_fixed(1000)));
2945 calcs_output->stutter_exit_wm_ns[4].c_mark =
2946 bw_fixed_to_int(bw_mul(data->
2947 stutter_exit_watermark[8], bw_int_to_fixed(1000)));
2948 }
2949 calcs_output->stutter_exit_wm_ns[5].c_mark =
2950 bw_fixed_to_int(bw_mul(data->
2951 stutter_exit_watermark[9], bw_int_to_fixed(1000)));
2952
2953 calcs_output->urgent_wm_ns[0].c_mark =
2954 bw_fixed_to_int(bw_mul(data->
2955 urgent_watermark[4], bw_int_to_fixed(1000)));
2956 calcs_output->urgent_wm_ns[1].c_mark =
2957 bw_fixed_to_int(bw_mul(data->
2958 urgent_watermark[5], bw_int_to_fixed(1000)));
2959 calcs_output->urgent_wm_ns[2].c_mark =
2960 bw_fixed_to_int(bw_mul(data->
2961 urgent_watermark[6], bw_int_to_fixed(1000)));
2962 if (ctx->dc->caps.max_slave_planes) {
2963 calcs_output->urgent_wm_ns[3].c_mark =
2964 bw_fixed_to_int(bw_mul(data->
2965 urgent_watermark[0], bw_int_to_fixed(1000)));
2966 calcs_output->urgent_wm_ns[4].c_mark =
2967 bw_fixed_to_int(bw_mul(data->
2968 urgent_watermark[1], bw_int_to_fixed(1000)));
2969 } else {
2970 calcs_output->urgent_wm_ns[3].c_mark =
2971 bw_fixed_to_int(bw_mul(data->
2972 urgent_watermark[7], bw_int_to_fixed(1000)));
2973 calcs_output->urgent_wm_ns[4].c_mark =
2974 bw_fixed_to_int(bw_mul(data->
2975 urgent_watermark[8], bw_int_to_fixed(1000)));
2976 }
2977 calcs_output->urgent_wm_ns[5].c_mark =
2978 bw_fixed_to_int(bw_mul(data->
2979 urgent_watermark[9], bw_int_to_fixed(1000)));
2980 }
2981
2982 if (dceip->version == BW_CALCS_VERSION_CARRIZO) {
2983 ((struct bw_calcs_vbios *)vbios)->low_yclk = high_yclk;
2984 ((struct bw_calcs_vbios *)vbios)->mid_yclk = high_yclk;
2985 ((struct bw_calcs_vbios *)vbios)->low_sclk = high_sclk;
2986 ((struct bw_calcs_vbios *)vbios)->mid1_sclk = high_sclk;
2987 ((struct bw_calcs_vbios *)vbios)->mid2_sclk = high_sclk;
2988 ((struct bw_calcs_vbios *)vbios)->mid3_sclk = high_sclk;
2989 ((struct bw_calcs_vbios *)vbios)->mid4_sclk = high_sclk;
2990 ((struct bw_calcs_vbios *)vbios)->mid5_sclk = high_sclk;
2991 ((struct bw_calcs_vbios *)vbios)->mid6_sclk = high_sclk;
2992 } else {
2993 ((struct bw_calcs_vbios *)vbios)->low_yclk = mid_yclk;
2994 ((struct bw_calcs_vbios *)vbios)->low_sclk = mid3_sclk;
2995 ((struct bw_calcs_vbios *)vbios)->mid1_sclk = mid3_sclk;
2996 ((struct bw_calcs_vbios *)vbios)->mid2_sclk = mid3_sclk;
2997 }
2998
2999 calculate_bandwidth(dceip, vbios, data);
3000
3001 calcs_output->nbp_state_change_wm_ns[0].d_mark =
3002 bw_fixed_to_int(bw_mul(data->
3003 nbp_state_change_watermark[4], bw_int_to_fixed(1000)));
3004 calcs_output->nbp_state_change_wm_ns[1].d_mark =
3005 bw_fixed_to_int(bw_mul(data->
3006 nbp_state_change_watermark[5], bw_int_to_fixed(1000)));
3007 calcs_output->nbp_state_change_wm_ns[2].d_mark =
3008 bw_fixed_to_int(bw_mul(data->
3009 nbp_state_change_watermark[6], bw_int_to_fixed(1000)));
3010 if (ctx->dc->caps.max_slave_planes) {
3011 calcs_output->nbp_state_change_wm_ns[3].d_mark =
3012 bw_fixed_to_int(bw_mul(data->
3013 nbp_state_change_watermark[0], bw_int_to_fixed(1000)));
3014 calcs_output->nbp_state_change_wm_ns[4].d_mark =
3015 bw_fixed_to_int(bw_mul(data->
3016 nbp_state_change_watermark[1], bw_int_to_fixed(1000)));
3017 } else {
3018 calcs_output->nbp_state_change_wm_ns[3].d_mark =
3019 bw_fixed_to_int(bw_mul(data->
3020 nbp_state_change_watermark[7], bw_int_to_fixed(1000)));
3021 calcs_output->nbp_state_change_wm_ns[4].d_mark =
3022 bw_fixed_to_int(bw_mul(data->
3023 nbp_state_change_watermark[8], bw_int_to_fixed(1000)));
3024 }
3025 calcs_output->nbp_state_change_wm_ns[5].d_mark =
3026 bw_fixed_to_int(bw_mul(data->
3027 nbp_state_change_watermark[9], bw_int_to_fixed(1000)));
3028
3029 calcs_output->stutter_exit_wm_ns[0].d_mark =
3030 bw_fixed_to_int(bw_mul(data->
3031 stutter_exit_watermark[4], bw_int_to_fixed(1000)));
3032 calcs_output->stutter_exit_wm_ns[1].d_mark =
3033 bw_fixed_to_int(bw_mul(data->
3034 stutter_exit_watermark[5], bw_int_to_fixed(1000)));
3035 calcs_output->stutter_exit_wm_ns[2].d_mark =
3036 bw_fixed_to_int(bw_mul(data->
3037 stutter_exit_watermark[6], bw_int_to_fixed(1000)));
3038 if (ctx->dc->caps.max_slave_planes) {
3039 calcs_output->stutter_exit_wm_ns[3].d_mark =
3040 bw_fixed_to_int(bw_mul(data->
3041 stutter_exit_watermark[0], bw_int_to_fixed(1000)));
3042 calcs_output->stutter_exit_wm_ns[4].d_mark =
3043 bw_fixed_to_int(bw_mul(data->
3044 stutter_exit_watermark[1], bw_int_to_fixed(1000)));
3045 } else {
3046 calcs_output->stutter_exit_wm_ns[3].d_mark =
3047 bw_fixed_to_int(bw_mul(data->
3048 stutter_exit_watermark[7], bw_int_to_fixed(1000)));
3049 calcs_output->stutter_exit_wm_ns[4].d_mark =
3050 bw_fixed_to_int(bw_mul(data->
3051 stutter_exit_watermark[8], bw_int_to_fixed(1000)));
3052 }
3053 calcs_output->stutter_exit_wm_ns[5].d_mark =
3054 bw_fixed_to_int(bw_mul(data->
3055 stutter_exit_watermark[9], bw_int_to_fixed(1000)));
3056
3057
3058 calcs_output->urgent_wm_ns[0].d_mark =
3059 bw_fixed_to_int(bw_mul(data->
3060 urgent_watermark[4], bw_int_to_fixed(1000)));
3061 calcs_output->urgent_wm_ns[1].d_mark =
3062 bw_fixed_to_int(bw_mul(data->
3063 urgent_watermark[5], bw_int_to_fixed(1000)));
3064 calcs_output->urgent_wm_ns[2].d_mark =
3065 bw_fixed_to_int(bw_mul(data->
3066 urgent_watermark[6], bw_int_to_fixed(1000)));
3067 if (ctx->dc->caps.max_slave_planes) {
3068 calcs_output->urgent_wm_ns[3].d_mark =
3069 bw_fixed_to_int(bw_mul(data->
3070 urgent_watermark[0], bw_int_to_fixed(1000)));
3071 calcs_output->urgent_wm_ns[4].d_mark =
3072 bw_fixed_to_int(bw_mul(data->
3073 urgent_watermark[1], bw_int_to_fixed(1000)));
3074 } else {
3075 calcs_output->urgent_wm_ns[3].d_mark =
3076 bw_fixed_to_int(bw_mul(data->
3077 urgent_watermark[7], bw_int_to_fixed(1000)));
3078 calcs_output->urgent_wm_ns[4].d_mark =
3079 bw_fixed_to_int(bw_mul(data->
3080 urgent_watermark[8], bw_int_to_fixed(1000)));
3081 }
3082 calcs_output->urgent_wm_ns[5].d_mark =
3083 bw_fixed_to_int(bw_mul(data->
3084 urgent_watermark[9], bw_int_to_fixed(1000)));
3085
3086 ((struct bw_calcs_vbios *)vbios)->low_yclk = low_yclk;
3087 ((struct bw_calcs_vbios *)vbios)->mid_yclk = mid_yclk;
3088 ((struct bw_calcs_vbios *)vbios)->low_sclk = low_sclk;
3089 ((struct bw_calcs_vbios *)vbios)->mid1_sclk = mid1_sclk;
3090 ((struct bw_calcs_vbios *)vbios)->mid2_sclk = mid2_sclk;
3091 ((struct bw_calcs_vbios *)vbios)->mid3_sclk = mid3_sclk;
3092 ((struct bw_calcs_vbios *)vbios)->mid4_sclk = mid4_sclk;
3093 ((struct bw_calcs_vbios *)vbios)->mid5_sclk = mid5_sclk;
3094 ((struct bw_calcs_vbios *)vbios)->mid6_sclk = mid6_sclk;
3095 ((struct bw_calcs_vbios *)vbios)->high_sclk = high_sclk;
3096 } else {
3097 calcs_output->nbp_state_change_enable = true;
3098 calcs_output->cpuc_state_change_enable = true;
3099 calcs_output->cpup_state_change_enable = true;
3100 calcs_output->stutter_mode_enable = true;
3101 calcs_output->dispclk_khz = 0;
3102 calcs_output->required_sclk = 0;
3103 }
3104
3105 dm_free(data);
3106
3107 return is_display_configuration_supported(vbios, calcs_output);
3108}
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/bw_fixed.c b/drivers/gpu/drm/amd/display/dc/calcs/bw_fixed.c
new file mode 100644
index 000000000000..fbf2adcc5ff8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/calcs/bw_fixed.c
@@ -0,0 +1,299 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25#include "dm_services.h"
26#include "bw_fixed.h"
27
28#define BITS_PER_FRACTIONAL_PART 24
29
30#define MIN_I32 \
31 (int64_t)(-(1LL << (63 - BITS_PER_FRACTIONAL_PART)))
32
33#define MAX_I32 \
34 (int64_t)((1ULL << (63 - BITS_PER_FRACTIONAL_PART)) - 1)
35
36#define MIN_I64 \
37 (int64_t)(-(1LL << 63))
38
39#define MAX_I64 \
40 (int64_t)((1ULL << 63) - 1)
41
42#define FRACTIONAL_PART_MASK \
43 ((1ULL << BITS_PER_FRACTIONAL_PART) - 1)
44
45#define GET_INTEGER_PART(x) \
46 ((x) >> BITS_PER_FRACTIONAL_PART)
47
48#define GET_FRACTIONAL_PART(x) \
49 (FRACTIONAL_PART_MASK & (x))
50
51static uint64_t abs_i64(int64_t arg)
52{
53 if (arg >= 0)
54 return (uint64_t)(arg);
55 else
56 return (uint64_t)(-arg);
57}
58
59struct bw_fixed bw_min3(struct bw_fixed v1, struct bw_fixed v2, struct bw_fixed v3)
60{
61 return bw_min2(bw_min2(v1, v2), v3);
62}
63
64struct bw_fixed bw_max3(struct bw_fixed v1, struct bw_fixed v2, struct bw_fixed v3)
65{
66 return bw_max2(bw_max2(v1, v2), v3);
67}
68
69struct bw_fixed bw_int_to_fixed(int64_t value)
70{
71 struct bw_fixed res;
72 ASSERT(value < MAX_I32 && value > MIN_I32);
73 res.value = value << BITS_PER_FRACTIONAL_PART;
74 return res;
75}
76
77int32_t bw_fixed_to_int(struct bw_fixed value)
78{
79 return GET_INTEGER_PART(value.value);
80}
81
82struct bw_fixed bw_frc_to_fixed(int64_t numerator, int64_t denominator)
83{
84 struct bw_fixed res;
85 bool arg1_negative = numerator < 0;
86 bool arg2_negative = denominator < 0;
87 uint64_t arg1_value;
88 uint64_t arg2_value;
89 uint64_t remainder;
90
91 /* determine integer part */
92 uint64_t res_value;
93
94 ASSERT(denominator != 0);
95
96 arg1_value = abs_i64(numerator);
97 arg2_value = abs_i64(denominator);
98 res_value = div64_u64_rem(arg1_value, arg2_value, &remainder);
99
100 ASSERT(res_value <= MAX_I32);
101
102 /* determine fractional part */
103 {
104 uint32_t i = BITS_PER_FRACTIONAL_PART;
105
106 do
107 {
108 remainder <<= 1;
109
110 res_value <<= 1;
111
112 if (remainder >= arg2_value)
113 {
114 res_value |= 1;
115 remainder -= arg2_value;
116 }
117 } while (--i != 0);
118 }
119
120 /* round up LSB */
121 {
122 uint64_t summand = (remainder << 1) >= arg2_value;
123
124 ASSERT(res_value <= MAX_I64 - summand);
125
126 res_value += summand;
127 }
128
129 res.value = (int64_t)(res_value);
130
131 if (arg1_negative ^ arg2_negative)
132 res.value = -res.value;
133 return res;
134}
135
136struct bw_fixed bw_min2(const struct bw_fixed arg1, const struct bw_fixed arg2)
137{
138 return (arg1.value <= arg2.value) ? arg1 : arg2;
139}
140
141struct bw_fixed bw_max2(const struct bw_fixed arg1, const struct bw_fixed arg2)
142{
143 return (arg2.value <= arg1.value) ? arg1 : arg2;
144}
145
146struct bw_fixed bw_floor2(
147 const struct bw_fixed arg,
148 const struct bw_fixed significance)
149{
150 struct bw_fixed result;
151 int64_t multiplicand;
152
153 multiplicand = div64_s64(arg.value, abs_i64(significance.value));
154 result.value = abs_i64(significance.value) * multiplicand;
155 ASSERT(abs_i64(result.value) <= abs_i64(arg.value));
156 return result;
157}
158
159struct bw_fixed bw_ceil2(
160 const struct bw_fixed arg,
161 const struct bw_fixed significance)
162{
163 struct bw_fixed result;
164 int64_t multiplicand;
165
166 multiplicand = div64_s64(arg.value, abs_i64(significance.value));
167 result.value = abs_i64(significance.value) * multiplicand;
168 if (abs_i64(result.value) < abs_i64(arg.value)) {
169 if (arg.value < 0)
170 result.value -= abs_i64(significance.value);
171 else
172 result.value += abs_i64(significance.value);
173 }
174 return result;
175}
176
177struct bw_fixed bw_add(const struct bw_fixed arg1, const struct bw_fixed arg2)
178{
179 struct bw_fixed res;
180
181 res.value = arg1.value + arg2.value;
182
183 return res;
184}
185
186struct bw_fixed bw_sub(const struct bw_fixed arg1, const struct bw_fixed arg2)
187{
188 struct bw_fixed res;
189
190 res.value = arg1.value - arg2.value;
191
192 return res;
193}
194
195struct bw_fixed bw_mul(const struct bw_fixed arg1, const struct bw_fixed arg2)
196{
197 struct bw_fixed res;
198
199 bool arg1_negative = arg1.value < 0;
200 bool arg2_negative = arg2.value < 0;
201
202 uint64_t arg1_value = abs_i64(arg1.value);
203 uint64_t arg2_value = abs_i64(arg2.value);
204
205 uint64_t arg1_int = GET_INTEGER_PART(arg1_value);
206 uint64_t arg2_int = GET_INTEGER_PART(arg2_value);
207
208 uint64_t arg1_fra = GET_FRACTIONAL_PART(arg1_value);
209 uint64_t arg2_fra = GET_FRACTIONAL_PART(arg2_value);
210
211 uint64_t tmp;
212
213 res.value = arg1_int * arg2_int;
214
215 ASSERT(res.value <= MAX_I32);
216
217 res.value <<= BITS_PER_FRACTIONAL_PART;
218
219 tmp = arg1_int * arg2_fra;
220
221 ASSERT(tmp <= (uint64_t)(MAX_I64 - res.value));
222
223 res.value += tmp;
224
225 tmp = arg2_int * arg1_fra;
226
227 ASSERT(tmp <= (uint64_t)(MAX_I64 - res.value));
228
229 res.value += tmp;
230
231 tmp = arg1_fra * arg2_fra;
232
233 tmp = (tmp >> BITS_PER_FRACTIONAL_PART) +
234 (tmp >= (uint64_t)(bw_frc_to_fixed(1, 2).value));
235
236 ASSERT(tmp <= (uint64_t)(MAX_I64 - res.value));
237
238 res.value += tmp;
239
240 if (arg1_negative ^ arg2_negative)
241 res.value = -res.value;
242 return res;
243}
244
245struct bw_fixed bw_div(const struct bw_fixed arg1, const struct bw_fixed arg2)
246{
247 struct bw_fixed res = bw_frc_to_fixed(arg1.value, arg2.value);
248 return res;
249}
250
251struct bw_fixed bw_mod(const struct bw_fixed arg1, const struct bw_fixed arg2)
252{
253 struct bw_fixed res;
254 div64_u64_rem(arg1.value, arg2.value, &res.value);
255 return res;
256}
257struct bw_fixed fixed31_32_to_bw_fixed(int64_t raw)
258{
259 struct bw_fixed result = { 0 };
260
261 if (raw < 0) {
262 raw = -raw;
263 result.value = -(raw >> (32 - BITS_PER_FRACTIONAL_PART));
264 } else {
265 result.value = raw >> (32 - BITS_PER_FRACTIONAL_PART);
266 }
267
268 return result;
269}
270
271bool bw_equ(const struct bw_fixed arg1, const struct bw_fixed arg2)
272{
273 return arg1.value == arg2.value;
274}
275
276bool bw_neq(const struct bw_fixed arg1, const struct bw_fixed arg2)
277{
278 return arg1.value != arg2.value;
279}
280
281bool bw_leq(const struct bw_fixed arg1, const struct bw_fixed arg2)
282{
283 return arg1.value <= arg2.value;
284}
285
286bool bw_meq(const struct bw_fixed arg1, const struct bw_fixed arg2)
287{
288 return arg1.value >= arg2.value;
289}
290
291bool bw_ltn(const struct bw_fixed arg1, const struct bw_fixed arg2)
292{
293 return arg1.value < arg2.value;
294}
295
296bool bw_mtn(const struct bw_fixed arg1, const struct bw_fixed arg2)
297{
298 return arg1.value > arg2.value;
299}
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/gamma_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/gamma_calcs.c
new file mode 100644
index 000000000000..854796aa0c71
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/calcs/gamma_calcs.c
@@ -0,0 +1,1382 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27#include "gamma_calcs.h"
28
29struct curve_config {
30 uint32_t offset;
31 int8_t segments[16];
32 int8_t begin;
33};
34
35static bool build_custom_float(
36 struct fixed31_32 value,
37 const struct custom_float_format *format,
38 bool *negative,
39 uint32_t *mantissa,
40 uint32_t *exponenta)
41{
42 uint32_t exp_offset = (1 << (format->exponenta_bits - 1)) - 1;
43
44 const struct fixed31_32 mantissa_constant_plus_max_fraction =
45 dal_fixed31_32_from_fraction(
46 (1LL << (format->mantissa_bits + 1)) - 1,
47 1LL << format->mantissa_bits);
48
49 struct fixed31_32 mantiss;
50
51 if (dal_fixed31_32_eq(
52 value,
53 dal_fixed31_32_zero)) {
54 *negative = false;
55 *mantissa = 0;
56 *exponenta = 0;
57 return true;
58 }
59
60 if (dal_fixed31_32_lt(
61 value,
62 dal_fixed31_32_zero)) {
63 *negative = format->sign;
64 value = dal_fixed31_32_neg(value);
65 } else {
66 *negative = false;
67 }
68
69 if (dal_fixed31_32_lt(
70 value,
71 dal_fixed31_32_one)) {
72 uint32_t i = 1;
73
74 do {
75 value = dal_fixed31_32_shl(value, 1);
76 ++i;
77 } while (dal_fixed31_32_lt(
78 value,
79 dal_fixed31_32_one));
80
81 --i;
82
83 if (exp_offset <= i) {
84 *mantissa = 0;
85 *exponenta = 0;
86 return true;
87 }
88
89 *exponenta = exp_offset - i;
90 } else if (dal_fixed31_32_le(
91 mantissa_constant_plus_max_fraction,
92 value)) {
93 uint32_t i = 1;
94
95 do {
96 value = dal_fixed31_32_shr(value, 1);
97 ++i;
98 } while (dal_fixed31_32_lt(
99 mantissa_constant_plus_max_fraction,
100 value));
101
102 *exponenta = exp_offset + i - 1;
103 } else {
104 *exponenta = exp_offset;
105 }
106
107 mantiss = dal_fixed31_32_sub(
108 value,
109 dal_fixed31_32_one);
110
111 if (dal_fixed31_32_lt(
112 mantiss,
113 dal_fixed31_32_zero) ||
114 dal_fixed31_32_lt(
115 dal_fixed31_32_one,
116 mantiss))
117 mantiss = dal_fixed31_32_zero;
118 else
119 mantiss = dal_fixed31_32_shl(
120 mantiss,
121 format->mantissa_bits);
122
123 *mantissa = dal_fixed31_32_floor(mantiss);
124
125 return true;
126}
127
128static bool setup_custom_float(
129 const struct custom_float_format *format,
130 bool negative,
131 uint32_t mantissa,
132 uint32_t exponenta,
133 uint32_t *result)
134{
135 uint32_t i = 0;
136 uint32_t j = 0;
137
138 uint32_t value = 0;
139
140 /* verification code:
141 * once calculation is ok we can remove it
142 */
143
144 const uint32_t mantissa_mask =
145 (1 << (format->mantissa_bits + 1)) - 1;
146
147 const uint32_t exponenta_mask =
148 (1 << (format->exponenta_bits + 1)) - 1;
149
150 if (mantissa & ~mantissa_mask) {
151 BREAK_TO_DEBUGGER();
152 mantissa = mantissa_mask;
153 }
154
155 if (exponenta & ~exponenta_mask) {
156 BREAK_TO_DEBUGGER();
157 exponenta = exponenta_mask;
158 }
159
160 /* end of verification code */
161
162 while (i < format->mantissa_bits) {
163 uint32_t mask = 1 << i;
164
165 if (mantissa & mask)
166 value |= mask;
167
168 ++i;
169 }
170
171 while (j < format->exponenta_bits) {
172 uint32_t mask = 1 << j;
173
174 if (exponenta & mask)
175 value |= mask << i;
176
177 ++j;
178 }
179
180 if (negative && format->sign)
181 value |= 1 << (i + j);
182
183 *result = value;
184
185 return true;
186}
187
188static bool convert_to_custom_float_format_ex(
189 struct fixed31_32 value,
190 const struct custom_float_format *format,
191 struct custom_float_value *result)
192{
193 return build_custom_float(
194 value, format,
195 &result->negative, &result->mantissa, &result->exponenta) &&
196 setup_custom_float(
197 format, result->negative, result->mantissa, result->exponenta,
198 &result->value);
199}
200
201static bool round_custom_float_6_12(
202 struct hw_x_point *x)
203{
204 struct custom_float_format fmt;
205
206 struct custom_float_value value;
207
208 fmt.exponenta_bits = 6;
209 fmt.mantissa_bits = 12;
210 fmt.sign = true;
211
212 if (!convert_to_custom_float_format_ex(
213 x->x, &fmt, &value))
214 return false;
215
216 x->adjusted_x = x->x;
217
218 if (value.mantissa) {
219 BREAK_TO_DEBUGGER();
220
221 return false;
222 }
223
224 return true;
225}
226
227static bool build_hw_curve_configuration(
228 const struct curve_config *curve_config,
229 struct gamma_curve *gamma_curve,
230 struct curve_points *curve_points,
231 struct hw_x_point *points,
232 uint32_t *number_of_points)
233{
234 const int8_t max_regions_number = ARRAY_SIZE(curve_config->segments);
235
236 int8_t i;
237
238 uint8_t segments_calculation[8] = { 0 };
239
240 struct fixed31_32 region1 = dal_fixed31_32_zero;
241 struct fixed31_32 region2;
242 struct fixed31_32 increment;
243
244 uint32_t index = 0;
245 uint32_t segments = 0;
246 uint32_t max_number;
247
248 bool result = false;
249
250 if (!number_of_points) {
251 BREAK_TO_DEBUGGER();
252 return false;
253 }
254
255 max_number = *number_of_points;
256
257 i = 0;
258
259 while (i != max_regions_number) {
260 gamma_curve[i].offset = 0;
261 gamma_curve[i].segments_num = 0;
262
263 ++i;
264 }
265
266 i = 0;
267
268 while (i != max_regions_number) {
269 /* number should go in uninterruptible sequence */
270 if (curve_config->segments[i] == -1)
271 break;
272
273 ASSERT(curve_config->segments[i] >= 0);
274
275 segments += (1 << curve_config->segments[i]);
276
277 ++i;
278 }
279
280 if (segments > max_number) {
281 BREAK_TO_DEBUGGER();
282 } else {
283 int32_t divisor;
284 uint32_t offset = 0;
285 int8_t begin = curve_config->begin;
286 int32_t region_number = 0;
287
288 i = begin;
289
290 while ((index < max_number) &&
291 (region_number < max_regions_number) &&
292 (i <= 1)) {
293 int32_t j = 0;
294
295 segments = curve_config->segments[region_number];
296 divisor = 1 << segments;
297
298 if (segments == -1) {
299 if (i > 0) {
300 region1 = dal_fixed31_32_shl(
301 dal_fixed31_32_one,
302 i - 1);
303 region2 = dal_fixed31_32_shl(
304 dal_fixed31_32_one,
305 i);
306 } else {
307 region1 = dal_fixed31_32_shr(
308 dal_fixed31_32_one,
309 -(i - 1));
310 region2 = dal_fixed31_32_shr(
311 dal_fixed31_32_one,
312 -i);
313 }
314
315 break;
316 }
317
318 if (i > -1) {
319 region1 = dal_fixed31_32_shl(
320 dal_fixed31_32_one,
321 i);
322 region2 = dal_fixed31_32_shl(
323 dal_fixed31_32_one,
324 i + 1);
325 } else {
326 region1 = dal_fixed31_32_shr(
327 dal_fixed31_32_one,
328 -i);
329 region2 = dal_fixed31_32_shr(
330 dal_fixed31_32_one,
331 -(i + 1));
332 }
333
334 gamma_curve[region_number].offset = offset;
335 gamma_curve[region_number].segments_num = segments;
336
337 offset += divisor;
338
339 ++segments_calculation[segments];
340
341 increment = dal_fixed31_32_div_int(
342 dal_fixed31_32_sub(
343 region2,
344 region1),
345 divisor);
346
347 points[index].x = region1;
348
349 round_custom_float_6_12(points + index);
350
351 ++index;
352 ++region_number;
353
354 while ((index < max_number) && (j < divisor - 1)) {
355 region1 = dal_fixed31_32_add(
356 region1,
357 increment);
358
359 points[index].x = region1;
360 points[index].adjusted_x = region1;
361
362 ++index;
363 ++j;
364 }
365
366 ++i;
367 }
368
369 points[index].x = region1;
370
371 round_custom_float_6_12(points + index);
372
373 *number_of_points = index;
374
375 result = true;
376 }
377
378 curve_points[0].x = points[0].adjusted_x;
379 curve_points[0].offset = dal_fixed31_32_zero;
380
381 curve_points[1].x = points[index - 1].adjusted_x;
382 curve_points[1].offset = dal_fixed31_32_zero;
383
384 curve_points[2].x = points[index].adjusted_x;
385 curve_points[2].offset = dal_fixed31_32_zero;
386
387 return result;
388}
389
390static bool setup_distribution_points(
391 struct gamma_curve *arr_curve_points,
392 struct curve_points *arr_points,
393 uint32_t *hw_points_num,
394 struct hw_x_point *coordinates_x)
395{
396 struct curve_config cfg;
397
398 cfg.offset = 0;
399 cfg.segments[0] = 3;
400 cfg.segments[1] = 4;
401 cfg.segments[2] = 4;
402 cfg.segments[3] = 4;
403 cfg.segments[4] = 4;
404 cfg.segments[5] = 4;
405 cfg.segments[6] = 4;
406 cfg.segments[7] = 4;
407 cfg.segments[8] = 5;
408 cfg.segments[9] = 5;
409 cfg.segments[10] = 0;
410 cfg.segments[11] = -1;
411 cfg.segments[12] = -1;
412 cfg.segments[13] = -1;
413 cfg.segments[14] = -1;
414 cfg.segments[15] = -1;
415
416 cfg.begin = -10;
417
418 if (!build_hw_curve_configuration(
419 &cfg, arr_curve_points,
420 arr_points,
421 coordinates_x, hw_points_num)) {
422 ASSERT_CRITICAL(false);
423 return false;
424 }
425 return true;
426}
427
428struct dividers {
429 struct fixed31_32 divider1;
430 struct fixed31_32 divider2;
431 struct fixed31_32 divider3;
432};
433
434static void build_regamma_coefficients(struct gamma_coefficients *coefficients)
435{
436 /* sRGB should apply 2.4 */
437 static const int32_t numerator01[3] = { 31308, 31308, 31308 };
438 static const int32_t numerator02[3] = { 12920, 12920, 12920 };
439 static const int32_t numerator03[3] = { 55, 55, 55 };
440 static const int32_t numerator04[3] = { 55, 55, 55 };
441 static const int32_t numerator05[3] = { 2400, 2400, 2400 };
442
443 const int32_t *numerator1;
444 const int32_t *numerator2;
445 const int32_t *numerator3;
446 const int32_t *numerator4;
447 const int32_t *numerator5;
448
449 uint32_t i = 0;
450
451 numerator1 = numerator01;
452 numerator2 = numerator02;
453 numerator3 = numerator03;
454 numerator4 = numerator04;
455 numerator5 = numerator05;
456
457 do {
458 coefficients->a0[i] = dal_fixed31_32_from_fraction(
459 numerator1[i], 10000000);
460 coefficients->a1[i] = dal_fixed31_32_from_fraction(
461 numerator2[i], 1000);
462 coefficients->a2[i] = dal_fixed31_32_from_fraction(
463 numerator3[i], 1000);
464 coefficients->a3[i] = dal_fixed31_32_from_fraction(
465 numerator4[i], 1000);
466 coefficients->user_gamma[i] = dal_fixed31_32_from_fraction(
467 numerator5[i], 1000);
468
469 ++i;
470 } while (i != ARRAY_SIZE(coefficients->a0));
471}
472
473static struct fixed31_32 translate_from_linear_space(
474 struct fixed31_32 arg,
475 struct fixed31_32 a0,
476 struct fixed31_32 a1,
477 struct fixed31_32 a2,
478 struct fixed31_32 a3,
479 struct fixed31_32 gamma)
480{
481 const struct fixed31_32 one = dal_fixed31_32_from_int(1);
482
483 if (dal_fixed31_32_le(arg, dal_fixed31_32_neg(a0)))
484 return dal_fixed31_32_sub(
485 a2,
486 dal_fixed31_32_mul(
487 dal_fixed31_32_add(
488 one,
489 a3),
490 dal_fixed31_32_pow(
491 dal_fixed31_32_neg(arg),
492 dal_fixed31_32_recip(gamma))));
493 else if (dal_fixed31_32_le(a0, arg))
494 return dal_fixed31_32_sub(
495 dal_fixed31_32_mul(
496 dal_fixed31_32_add(
497 one,
498 a3),
499 dal_fixed31_32_pow(
500 arg,
501 dal_fixed31_32_recip(gamma))),
502 a2);
503 else
504 return dal_fixed31_32_mul(
505 arg,
506 a1);
507}
508
509static inline struct fixed31_32 translate_from_linear_space_ex(
510 struct fixed31_32 arg,
511 struct gamma_coefficients *coeff,
512 uint32_t color_index)
513{
514 return translate_from_linear_space(
515 arg,
516 coeff->a0[color_index],
517 coeff->a1[color_index],
518 coeff->a2[color_index],
519 coeff->a3[color_index],
520 coeff->user_gamma[color_index]);
521}
522
523static bool find_software_points(
524 const struct gamma_pixel *axis_x_256,
525 struct fixed31_32 hw_point,
526 enum channel_name channel,
527 uint32_t *index_to_start,
528 uint32_t *index_left,
529 uint32_t *index_right,
530 enum hw_point_position *pos)
531{
532 const uint32_t max_number = RGB_256X3X16 + 3;
533
534 struct fixed31_32 left, right;
535
536 uint32_t i = *index_to_start;
537
538 while (i < max_number) {
539 if (channel == CHANNEL_NAME_RED) {
540 left = axis_x_256[i].r;
541
542 if (i < max_number - 1)
543 right = axis_x_256[i + 1].r;
544 else
545 right = axis_x_256[max_number - 1].r;
546 } else if (channel == CHANNEL_NAME_GREEN) {
547 left = axis_x_256[i].g;
548
549 if (i < max_number - 1)
550 right = axis_x_256[i + 1].g;
551 else
552 right = axis_x_256[max_number - 1].g;
553 } else {
554 left = axis_x_256[i].b;
555
556 if (i < max_number - 1)
557 right = axis_x_256[i + 1].b;
558 else
559 right = axis_x_256[max_number - 1].b;
560 }
561
562 if (dal_fixed31_32_le(left, hw_point) &&
563 dal_fixed31_32_le(hw_point, right)) {
564 *index_to_start = i;
565 *index_left = i;
566
567 if (i < max_number - 1)
568 *index_right = i + 1;
569 else
570 *index_right = max_number - 1;
571
572 *pos = HW_POINT_POSITION_MIDDLE;
573
574 return true;
575 } else if ((i == *index_to_start) &&
576 dal_fixed31_32_le(hw_point, left)) {
577 *index_to_start = i;
578 *index_left = i;
579 *index_right = i;
580
581 *pos = HW_POINT_POSITION_LEFT;
582
583 return true;
584 } else if ((i == max_number - 1) &&
585 dal_fixed31_32_le(right, hw_point)) {
586 *index_to_start = i;
587 *index_left = i;
588 *index_right = i;
589
590 *pos = HW_POINT_POSITION_RIGHT;
591
592 return true;
593 }
594
595 ++i;
596 }
597
598 return false;
599}
600
601static bool build_custom_gamma_mapping_coefficients_worker(
602 struct pixel_gamma_point *coeff,
603 const struct hw_x_point *coordinates_x,
604 const struct gamma_pixel *axis_x_256,
605 enum channel_name channel,
606 uint32_t number_of_points,
607 enum surface_pixel_format pixel_format)
608{
609 uint32_t i = 0;
610
611 while (i <= number_of_points) {
612 struct fixed31_32 coord_x;
613
614 uint32_t index_to_start = 0;
615 uint32_t index_left = 0;
616 uint32_t index_right = 0;
617
618 enum hw_point_position hw_pos;
619
620 struct gamma_point *point;
621
622 struct fixed31_32 left_pos;
623 struct fixed31_32 right_pos;
624
625 /*
626 * TODO: confirm enum in surface_pixel_format
627 * if (pixel_format == PIXEL_FORMAT_FP16)
628 *coord_x = coordinates_x[i].adjusted_x;
629 *else
630 */
631 if (channel == CHANNEL_NAME_RED)
632 coord_x = coordinates_x[i].regamma_y_red;
633 else if (channel == CHANNEL_NAME_GREEN)
634 coord_x = coordinates_x[i].regamma_y_green;
635 else
636 coord_x = coordinates_x[i].regamma_y_blue;
637
638 if (!find_software_points(
639 axis_x_256, coord_x, channel,
640 &index_to_start, &index_left, &index_right, &hw_pos)) {
641 BREAK_TO_DEBUGGER();
642 return false;
643 }
644
645 if (index_left >= RGB_256X3X16 + 3) {
646 BREAK_TO_DEBUGGER();
647 return false;
648 }
649
650 if (index_right >= RGB_256X3X16 + 3) {
651 BREAK_TO_DEBUGGER();
652 return false;
653 }
654
655 if (channel == CHANNEL_NAME_RED) {
656 point = &coeff[i].r;
657
658 left_pos = axis_x_256[index_left].r;
659 right_pos = axis_x_256[index_right].r;
660 } else if (channel == CHANNEL_NAME_GREEN) {
661 point = &coeff[i].g;
662
663 left_pos = axis_x_256[index_left].g;
664 right_pos = axis_x_256[index_right].g;
665 } else {
666 point = &coeff[i].b;
667
668 left_pos = axis_x_256[index_left].b;
669 right_pos = axis_x_256[index_right].b;
670 }
671
672 if (hw_pos == HW_POINT_POSITION_MIDDLE)
673 point->coeff = dal_fixed31_32_div(
674 dal_fixed31_32_sub(
675 coord_x,
676 left_pos),
677 dal_fixed31_32_sub(
678 right_pos,
679 left_pos));
680 else if (hw_pos == HW_POINT_POSITION_LEFT)
681 point->coeff = dal_fixed31_32_zero;
682 else if (hw_pos == HW_POINT_POSITION_RIGHT)
683 point->coeff = dal_fixed31_32_from_int(2);
684 else {
685 BREAK_TO_DEBUGGER();
686 return false;
687 }
688
689 point->left_index = index_left;
690 point->right_index = index_right;
691 point->pos = hw_pos;
692
693 ++i;
694 }
695
696 return true;
697}
698
699static inline bool build_oem_custom_gamma_mapping_coefficients(
700 struct pixel_gamma_point *coeff128_oem,
701 const struct hw_x_point *coordinates_x,
702 const struct gamma_pixel *axis_x_256,
703 uint32_t number_of_points,
704 enum surface_pixel_format pixel_format)
705{
706 int i;
707
708 for (i = 0; i < 3; i++) {
709 if (!build_custom_gamma_mapping_coefficients_worker(
710 coeff128_oem, coordinates_x, axis_x_256, i,
711 number_of_points, pixel_format))
712 return false;
713 }
714 return true;
715}
716
717static struct fixed31_32 calculate_mapped_value(
718 struct pwl_float_data *rgb,
719 const struct pixel_gamma_point *coeff,
720 enum channel_name channel,
721 uint32_t max_index)
722{
723 const struct gamma_point *point;
724
725 struct fixed31_32 result;
726
727 if (channel == CHANNEL_NAME_RED)
728 point = &coeff->r;
729 else if (channel == CHANNEL_NAME_GREEN)
730 point = &coeff->g;
731 else
732 point = &coeff->b;
733
734 if ((point->left_index < 0) || (point->left_index > max_index)) {
735 BREAK_TO_DEBUGGER();
736 return dal_fixed31_32_zero;
737 }
738
739 if ((point->right_index < 0) || (point->right_index > max_index)) {
740 BREAK_TO_DEBUGGER();
741 return dal_fixed31_32_zero;
742 }
743
744 if (point->pos == HW_POINT_POSITION_MIDDLE)
745 if (channel == CHANNEL_NAME_RED)
746 result = dal_fixed31_32_add(
747 dal_fixed31_32_mul(
748 point->coeff,
749 dal_fixed31_32_sub(
750 rgb[point->right_index].r,
751 rgb[point->left_index].r)),
752 rgb[point->left_index].r);
753 else if (channel == CHANNEL_NAME_GREEN)
754 result = dal_fixed31_32_add(
755 dal_fixed31_32_mul(
756 point->coeff,
757 dal_fixed31_32_sub(
758 rgb[point->right_index].g,
759 rgb[point->left_index].g)),
760 rgb[point->left_index].g);
761 else
762 result = dal_fixed31_32_add(
763 dal_fixed31_32_mul(
764 point->coeff,
765 dal_fixed31_32_sub(
766 rgb[point->right_index].b,
767 rgb[point->left_index].b)),
768 rgb[point->left_index].b);
769 else if (point->pos == HW_POINT_POSITION_LEFT) {
770 BREAK_TO_DEBUGGER();
771 result = dal_fixed31_32_zero;
772 } else {
773 BREAK_TO_DEBUGGER();
774 result = dal_fixed31_32_one;
775 }
776
777 return result;
778}
779
780static inline struct fixed31_32 calculate_oem_mapped_value(
781 struct pwl_float_data *rgb_oem,
782 const struct pixel_gamma_point *coeff,
783 uint32_t index,
784 enum channel_name channel,
785 uint32_t max_index)
786{
787 return calculate_mapped_value(
788 rgb_oem,
789 coeff + index,
790 channel,
791 max_index);
792}
793
794static void build_regamma_curve(struct pwl_float_data_ex *rgb_regamma,
795 struct pwl_float_data *rgb_oem,
796 struct pixel_gamma_point *coeff128_oem,
797 const struct core_gamma *ramp,
798 const struct core_surface *surface,
799 uint32_t hw_points_num,
800 const struct hw_x_point *coordinate_x,
801 const struct gamma_pixel *axis_x,
802 struct dividers dividers)
803{
804 uint32_t i;
805
806 struct gamma_coefficients coeff;
807 struct pwl_float_data_ex *rgb = rgb_regamma;
808 const struct hw_x_point *coord_x = coordinate_x;
809
810 build_regamma_coefficients(&coeff);
811
812 /* Use opp110->regamma.coordinates_x to retrieve
813 * coordinates chosen base on given user curve (future task).
814 * The x values are exponentially distributed and currently
815 * it is hard-coded, the user curve shape is ignored.
816 * The future task is to recalculate opp110-
817 * regamma.coordinates_x based on input/user curve,
818 * translation from 256/1025 to 128 pwl points.
819 */
820
821 i = 0;
822
823 while (i != hw_points_num + 1) {
824 rgb->r = translate_from_linear_space_ex(
825 coord_x->adjusted_x, &coeff, 0);
826 rgb->g = translate_from_linear_space_ex(
827 coord_x->adjusted_x, &coeff, 1);
828 rgb->b = translate_from_linear_space_ex(
829 coord_x->adjusted_x, &coeff, 2);
830
831 ++coord_x;
832 ++rgb;
833 ++i;
834 }
835}
836
837static bool scale_gamma(struct pwl_float_data *pwl_rgb,
838 const struct core_gamma *ramp,
839 struct dividers dividers)
840{
841 const struct dc_gamma_ramp_rgb256x3x16 *gamma;
842 const uint16_t max_driver = 0xFFFF;
843 const uint16_t max_os = 0xFF00;
844 uint16_t scaler = max_os;
845 uint32_t i;
846 struct pwl_float_data *rgb = pwl_rgb;
847 struct pwl_float_data *rgb_last = rgb + RGB_256X3X16 - 1;
848
849 if (ramp->public.type == GAMMA_RAMP_RBG256X3X16)
850 gamma = &ramp->public.gamma_ramp_rgb256x3x16;
851 else
852 return false; /* invalid option */
853
854 i = 0;
855
856 do {
857 if ((gamma->red[i] > max_os) ||
858 (gamma->green[i] > max_os) ||
859 (gamma->blue[i] > max_os)) {
860 scaler = max_driver;
861 break;
862 }
863 ++i;
864 } while (i != RGB_256X3X16);
865
866 i = 0;
867
868 do {
869 rgb->r = dal_fixed31_32_from_fraction(
870 gamma->red[i], scaler);
871 rgb->g = dal_fixed31_32_from_fraction(
872 gamma->green[i], scaler);
873 rgb->b = dal_fixed31_32_from_fraction(
874 gamma->blue[i], scaler);
875
876 ++rgb;
877 ++i;
878 } while (i != RGB_256X3X16);
879
880 rgb->r = dal_fixed31_32_mul(rgb_last->r,
881 dividers.divider1);
882 rgb->g = dal_fixed31_32_mul(rgb_last->g,
883 dividers.divider1);
884 rgb->b = dal_fixed31_32_mul(rgb_last->b,
885 dividers.divider1);
886
887 ++rgb;
888
889 rgb->r = dal_fixed31_32_mul(rgb_last->r,
890 dividers.divider2);
891 rgb->g = dal_fixed31_32_mul(rgb_last->g,
892 dividers.divider2);
893 rgb->b = dal_fixed31_32_mul(rgb_last->b,
894 dividers.divider2);
895
896 ++rgb;
897
898 rgb->r = dal_fixed31_32_mul(rgb_last->r,
899 dividers.divider3);
900 rgb->g = dal_fixed31_32_mul(rgb_last->g,
901 dividers.divider3);
902 rgb->b = dal_fixed31_32_mul(rgb_last->b,
903 dividers.divider3);
904
905 return true;
906}
907
908static void build_evenly_distributed_points(
909 struct gamma_pixel *points,
910 uint32_t numberof_points,
911 struct fixed31_32 max_value,
912 struct dividers dividers)
913{
914 struct gamma_pixel *p = points;
915 struct gamma_pixel *p_last = p + numberof_points - 1;
916
917 uint32_t i = 0;
918
919 do {
920 struct fixed31_32 value = dal_fixed31_32_div_int(
921 dal_fixed31_32_mul_int(max_value, i),
922 numberof_points - 1);
923
924 p->r = value;
925 p->g = value;
926 p->b = value;
927
928 ++p;
929 ++i;
930 } while (i != numberof_points);
931
932 p->r = dal_fixed31_32_div(p_last->r, dividers.divider1);
933 p->g = dal_fixed31_32_div(p_last->g, dividers.divider1);
934 p->b = dal_fixed31_32_div(p_last->b, dividers.divider1);
935
936 ++p;
937
938 p->r = dal_fixed31_32_div(p_last->r, dividers.divider2);
939 p->g = dal_fixed31_32_div(p_last->g, dividers.divider2);
940 p->b = dal_fixed31_32_div(p_last->b, dividers.divider2);
941
942 ++p;
943
944 p->r = dal_fixed31_32_div(p_last->r, dividers.divider3);
945 p->g = dal_fixed31_32_div(p_last->g, dividers.divider3);
946 p->b = dal_fixed31_32_div(p_last->b, dividers.divider3);
947}
948
949static inline void copy_rgb_regamma_to_coordinates_x(
950 struct hw_x_point *coordinates_x,
951 uint32_t hw_points_num,
952 const struct pwl_float_data_ex *rgb_ex)
953{
954 struct hw_x_point *coords = coordinates_x;
955 uint32_t i = 0;
956 const struct pwl_float_data_ex *rgb_regamma = rgb_ex;
957
958 while (i <= hw_points_num) {
959 coords->regamma_y_red = rgb_regamma->r;
960 coords->regamma_y_green = rgb_regamma->g;
961 coords->regamma_y_blue = rgb_regamma->b;
962
963 ++coords;
964 ++rgb_regamma;
965 ++i;
966 }
967}
968
969static bool calculate_interpolated_hardware_curve(
970 struct pwl_result_data *rgb,
971 struct pixel_gamma_point *coeff128,
972 struct pwl_float_data *rgb_user,
973 const struct hw_x_point *coordinates_x,
974 const struct gamma_pixel *axis_x_256,
975 uint32_t number_of_points,
976 enum surface_pixel_format pixel_format)
977{
978
979 const struct pixel_gamma_point *coeff;
980 struct pixel_gamma_point *coeff_128 = coeff128;
981 uint32_t max_entries = 3 - 1;
982 struct pwl_result_data *rgb_resulted = rgb;
983
984 uint32_t i = 0;
985
986 if (!build_oem_custom_gamma_mapping_coefficients(
987 coeff_128, coordinates_x, axis_x_256,
988 number_of_points,
989 pixel_format))
990 return false;
991
992 coeff = coeff128;
993 max_entries += RGB_256X3X16;
994
995 /* TODO: float point case */
996
997 while (i <= number_of_points) {
998 rgb_resulted->red = calculate_mapped_value(
999 rgb_user, coeff, CHANNEL_NAME_RED, max_entries);
1000 rgb_resulted->green = calculate_mapped_value(
1001 rgb_user, coeff, CHANNEL_NAME_GREEN, max_entries);
1002 rgb_resulted->blue = calculate_mapped_value(
1003 rgb_user, coeff, CHANNEL_NAME_BLUE, max_entries);
1004
1005 ++coeff;
1006 ++rgb_resulted;
1007 ++i;
1008 }
1009
1010 return true;
1011}
1012
1013static bool map_regamma_hw_to_x_user(
1014 struct pixel_gamma_point *coeff128,
1015 struct pwl_float_data *rgb_oem,
1016 struct pwl_result_data *rgb_resulted,
1017 struct pwl_float_data *rgb_user,
1018 struct hw_x_point *coords_x,
1019 const struct gamma_pixel *axis_x,
1020 const struct dc_gamma *gamma,
1021 const struct pwl_float_data_ex *rgb_regamma,
1022 struct dividers dividers,
1023 uint32_t hw_points_num,
1024 const struct core_surface *surface)
1025{
1026 /* setup to spare calculated ideal regamma values */
1027
1028 struct pixel_gamma_point *coeff = coeff128;
1029
1030 struct hw_x_point *coords = coords_x;
1031
1032 copy_rgb_regamma_to_coordinates_x(coords, hw_points_num, rgb_regamma);
1033
1034 return calculate_interpolated_hardware_curve(
1035 rgb_resulted, coeff, rgb_user, coords, axis_x,
1036 hw_points_num, surface->public.format);
1037}
1038
1039static void build_new_custom_resulted_curve(
1040 struct pwl_result_data *rgb_resulted,
1041 uint32_t hw_points_num)
1042{
1043 struct pwl_result_data *rgb = rgb_resulted;
1044 struct pwl_result_data *rgb_plus_1 = rgb + 1;
1045
1046 uint32_t i;
1047
1048 i = 0;
1049
1050 while (i != hw_points_num + 1) {
1051 rgb->red = dal_fixed31_32_clamp(
1052 rgb->red, dal_fixed31_32_zero,
1053 dal_fixed31_32_one);
1054 rgb->green = dal_fixed31_32_clamp(
1055 rgb->green, dal_fixed31_32_zero,
1056 dal_fixed31_32_one);
1057 rgb->blue = dal_fixed31_32_clamp(
1058 rgb->blue, dal_fixed31_32_zero,
1059 dal_fixed31_32_one);
1060
1061 ++rgb;
1062 ++i;
1063 }
1064
1065 rgb = rgb_resulted;
1066
1067 i = 1;
1068
1069 while (i != hw_points_num + 1) {
1070 if (dal_fixed31_32_lt(rgb_plus_1->red, rgb->red))
1071 rgb_plus_1->red = rgb->red;
1072 if (dal_fixed31_32_lt(rgb_plus_1->green, rgb->green))
1073 rgb_plus_1->green = rgb->green;
1074 if (dal_fixed31_32_lt(rgb_plus_1->blue, rgb->blue))
1075 rgb_plus_1->blue = rgb->blue;
1076
1077 rgb->delta_red = dal_fixed31_32_sub(
1078 rgb_plus_1->red,
1079 rgb->red);
1080 rgb->delta_green = dal_fixed31_32_sub(
1081 rgb_plus_1->green,
1082 rgb->green);
1083 rgb->delta_blue = dal_fixed31_32_sub(
1084 rgb_plus_1->blue,
1085 rgb->blue);
1086
1087 ++rgb_plus_1;
1088 ++rgb;
1089 ++i;
1090 }
1091}
1092
1093static void rebuild_curve_configuration_magic(
1094 struct curve_points *arr_points,
1095 struct pwl_result_data *rgb_resulted,
1096 const struct hw_x_point *coordinates_x,
1097 uint32_t hw_points_num)
1098{
1099 const struct fixed31_32 magic_number =
1100 dal_fixed31_32_from_fraction(249, 1000);
1101
1102 struct fixed31_32 y_r;
1103 struct fixed31_32 y_g;
1104 struct fixed31_32 y_b;
1105
1106 struct fixed31_32 y1_min;
1107 struct fixed31_32 y2_max;
1108 struct fixed31_32 y3_max;
1109
1110 y_r = rgb_resulted[0].red;
1111 y_g = rgb_resulted[0].green;
1112 y_b = rgb_resulted[0].blue;
1113
1114 y1_min = dal_fixed31_32_min(y_r, dal_fixed31_32_min(y_g, y_b));
1115
1116 arr_points[0].x = coordinates_x[0].adjusted_x;
1117 arr_points[0].y = y1_min;
1118 arr_points[0].slope = dal_fixed31_32_div(
1119 arr_points[0].y,
1120 arr_points[0].x);
1121
1122 arr_points[1].x = dal_fixed31_32_add(
1123 coordinates_x[hw_points_num - 1].adjusted_x,
1124 magic_number);
1125
1126 arr_points[2].x = arr_points[1].x;
1127
1128 y_r = rgb_resulted[hw_points_num - 1].red;
1129 y_g = rgb_resulted[hw_points_num - 1].green;
1130 y_b = rgb_resulted[hw_points_num - 1].blue;
1131
1132 y2_max = dal_fixed31_32_max(y_r, dal_fixed31_32_max(y_g, y_b));
1133
1134 arr_points[1].y = y2_max;
1135
1136 y_r = rgb_resulted[hw_points_num].red;
1137 y_g = rgb_resulted[hw_points_num].green;
1138 y_b = rgb_resulted[hw_points_num].blue;
1139
1140 y3_max = dal_fixed31_32_max(y_r, dal_fixed31_32_max(y_g, y_b));
1141
1142 arr_points[2].y = y3_max;
1143
1144 arr_points[2].slope = dal_fixed31_32_one;
1145}
1146
1147static bool convert_to_custom_float_format(
1148 struct fixed31_32 value,
1149 const struct custom_float_format *format,
1150 uint32_t *result)
1151{
1152 uint32_t mantissa;
1153 uint32_t exponenta;
1154 bool negative;
1155
1156 return build_custom_float(
1157 value, format, &negative, &mantissa, &exponenta) &&
1158 setup_custom_float(
1159 format, negative, mantissa, exponenta, result);
1160}
1161
1162static bool convert_to_custom_float(
1163 struct pwl_result_data *rgb_resulted,
1164 struct curve_points *arr_points,
1165 uint32_t hw_points_num)
1166{
1167 struct custom_float_format fmt;
1168
1169 struct pwl_result_data *rgb = rgb_resulted;
1170
1171 uint32_t i = 0;
1172
1173 fmt.exponenta_bits = 6;
1174 fmt.mantissa_bits = 12;
1175 fmt.sign = true;
1176
1177 if (!convert_to_custom_float_format(
1178 arr_points[0].x,
1179 &fmt,
1180 &arr_points[0].custom_float_x)) {
1181 BREAK_TO_DEBUGGER();
1182 return false;
1183 }
1184
1185 if (!convert_to_custom_float_format(
1186 arr_points[0].offset,
1187 &fmt,
1188 &arr_points[0].custom_float_offset)) {
1189 BREAK_TO_DEBUGGER();
1190 return false;
1191 }
1192
1193 if (!convert_to_custom_float_format(
1194 arr_points[0].slope,
1195 &fmt,
1196 &arr_points[0].custom_float_slope)) {
1197 BREAK_TO_DEBUGGER();
1198 return false;
1199 }
1200
1201 fmt.mantissa_bits = 10;
1202 fmt.sign = false;
1203
1204 if (!convert_to_custom_float_format(
1205 arr_points[1].x,
1206 &fmt,
1207 &arr_points[1].custom_float_x)) {
1208 BREAK_TO_DEBUGGER();
1209 return false;
1210 }
1211
1212 if (!convert_to_custom_float_format(
1213 arr_points[1].y,
1214 &fmt,
1215 &arr_points[1].custom_float_y)) {
1216 BREAK_TO_DEBUGGER();
1217 return false;
1218 }
1219
1220 if (!convert_to_custom_float_format(
1221 arr_points[2].slope,
1222 &fmt,
1223 &arr_points[2].custom_float_slope)) {
1224 BREAK_TO_DEBUGGER();
1225 return false;
1226 }
1227
1228 fmt.mantissa_bits = 12;
1229 fmt.sign = true;
1230
1231 while (i != hw_points_num) {
1232 if (!convert_to_custom_float_format(
1233 rgb->red,
1234 &fmt,
1235 &rgb->red_reg)) {
1236 BREAK_TO_DEBUGGER();
1237 return false;
1238 }
1239
1240 if (!convert_to_custom_float_format(
1241 rgb->green,
1242 &fmt,
1243 &rgb->green_reg)) {
1244 BREAK_TO_DEBUGGER();
1245 return false;
1246 }
1247
1248 if (!convert_to_custom_float_format(
1249 rgb->blue,
1250 &fmt,
1251 &rgb->blue_reg)) {
1252 BREAK_TO_DEBUGGER();
1253 return false;
1254 }
1255
1256 if (!convert_to_custom_float_format(
1257 rgb->delta_red,
1258 &fmt,
1259 &rgb->delta_red_reg)) {
1260 BREAK_TO_DEBUGGER();
1261 return false;
1262 }
1263
1264 if (!convert_to_custom_float_format(
1265 rgb->delta_green,
1266 &fmt,
1267 &rgb->delta_green_reg)) {
1268 BREAK_TO_DEBUGGER();
1269 return false;
1270 }
1271
1272 if (!convert_to_custom_float_format(
1273 rgb->delta_blue,
1274 &fmt,
1275 &rgb->delta_blue_reg)) {
1276 BREAK_TO_DEBUGGER();
1277 return false;
1278 }
1279
1280 ++rgb;
1281 ++i;
1282 }
1283
1284 return true;
1285}
1286
1287bool calculate_regamma_params(struct pwl_params *params,
1288 const struct core_gamma *ramp,
1289 const struct core_surface *surface)
1290{
1291 struct gamma_curve *arr_curve_points = params->arr_curve_points;
1292 struct curve_points *arr_points = params->arr_points;
1293 struct pwl_result_data *rgb_resulted = params->rgb_resulted;
1294 struct dividers dividers;
1295
1296 struct hw_x_point *coordinates_x = NULL;
1297 struct pwl_float_data *rgb_user = NULL ;
1298 struct pwl_float_data_ex *rgb_regamma = NULL;
1299 struct pwl_float_data *rgb_oem = NULL;
1300 struct gamma_pixel *axix_x_256 = NULL;
1301 struct pixel_gamma_point *coeff128_oem = NULL;
1302 struct pixel_gamma_point *coeff128 = NULL;
1303
1304
1305 bool ret = false;
1306
1307 coordinates_x = dm_alloc(sizeof(*coordinates_x)*(256 + 3));
1308 if (!coordinates_x)
1309 goto coordinates_x_alloc_fail;
1310 rgb_user = dm_alloc(sizeof(*rgb_user) * (FLOAT_GAMMA_RAMP_MAX + 3));
1311 if (!rgb_user)
1312 goto rgb_user_alloc_fail;
1313 rgb_regamma = dm_alloc(sizeof(*rgb_regamma) * (256 + 3));
1314 if (!rgb_regamma)
1315 goto rgb_regamma_alloc_fail;
1316 rgb_oem = dm_alloc(sizeof(*rgb_oem) * (FLOAT_GAMMA_RAMP_MAX + 3));
1317 if (!rgb_oem)
1318 goto rgb_oem_alloc_fail;
1319 axix_x_256 = dm_alloc(sizeof(*axix_x_256) * (256 + 3));
1320 if (!axix_x_256)
1321 goto axix_x_256_alloc_fail;
1322 coeff128_oem = dm_alloc(sizeof(*coeff128_oem) * (256 + 3));
1323 if (!coeff128_oem)
1324 goto coeff128_oem_alloc_fail;
1325 coeff128 = dm_alloc(sizeof(*coeff128) * (256 + 3));
1326 if (!coeff128)
1327 goto coeff128_alloc_fail;
1328
1329 dividers.divider1 = dal_fixed31_32_from_fraction(3, 2);
1330 dividers.divider2 = dal_fixed31_32_from_int(2);
1331 dividers.divider3 = dal_fixed31_32_from_fraction(5, 2);
1332
1333 build_evenly_distributed_points(
1334 axix_x_256,
1335 256,
1336 dal_fixed31_32_one,
1337 dividers);
1338
1339 scale_gamma(rgb_user, ramp, dividers);
1340
1341 setup_distribution_points(arr_curve_points, arr_points,
1342 &params->hw_points_num, coordinates_x);
1343
1344 build_regamma_curve(rgb_regamma, rgb_oem, coeff128_oem,
1345 ramp, surface, params->hw_points_num,
1346 coordinates_x, axix_x_256, dividers);
1347
1348 map_regamma_hw_to_x_user(coeff128, rgb_oem, rgb_resulted, rgb_user,
1349 coordinates_x, axix_x_256, &ramp->public, rgb_regamma,
1350 dividers, params->hw_points_num, surface);
1351
1352 build_new_custom_resulted_curve(rgb_resulted, params->hw_points_num);
1353
1354 rebuild_curve_configuration_magic(
1355 arr_points,
1356 rgb_resulted,
1357 coordinates_x,
1358 params->hw_points_num);
1359
1360 convert_to_custom_float(rgb_resulted, arr_points,
1361 params->hw_points_num);
1362
1363 ret = true;
1364
1365 dm_free(coeff128);
1366coeff128_alloc_fail:
1367 dm_free(coeff128_oem);
1368coeff128_oem_alloc_fail:
1369 dm_free(axix_x_256);
1370axix_x_256_alloc_fail:
1371 dm_free(rgb_oem);
1372rgb_oem_alloc_fail:
1373 dm_free(rgb_regamma);
1374rgb_regamma_alloc_fail:
1375 dm_free(rgb_user);
1376rgb_user_alloc_fail:
1377 dm_free(coordinates_x);
1378coordinates_x_alloc_fail:
1379 return ret;
1380
1381}
1382
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
new file mode 100644
index 000000000000..f7638f84421b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -0,0 +1,1846 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 */
24
25#include "dm_services.h"
26
27#include "dc.h"
28
29#include "core_status.h"
30#include "core_types.h"
31#include "hw_sequencer.h"
32
33#include "resource.h"
34
35#include "clock_source.h"
36#include "dc_bios_types.h"
37
38#include "bandwidth_calcs.h"
39#include "bios_parser_interface.h"
40#include "include/irq_service_interface.h"
41#include "transform.h"
42#include "timing_generator.h"
43#include "virtual/virtual_link_encoder.h"
44
45#include "link_hwss.h"
46#include "link_encoder.h"
47
48#include "dc_link_ddc.h"
49#include "dm_helpers.h"
50#include "mem_input.h"
51
52/*******************************************************************************
53 * Private structures
54 ******************************************************************************/
55
56struct dc_target_sync_report {
57 uint32_t h_count;
58 uint32_t v_count;
59};
60
61/*******************************************************************************
62 * Private functions
63 ******************************************************************************/
64static void destroy_links(struct core_dc *dc)
65{
66 uint32_t i;
67
68 for (i = 0; i < dc->link_count; i++) {
69 if (NULL != dc->links[i])
70 link_destroy(&dc->links[i]);
71 }
72}
73
74static bool create_links(
75 struct core_dc *dc,
76 uint32_t num_virtual_links)
77{
78 int i;
79 int connectors_num;
80 struct dc_bios *bios = dc->ctx->dc_bios;
81
82 dc->link_count = 0;
83
84 connectors_num = bios->funcs->get_connectors_number(bios);
85
86 if (connectors_num > ENUM_ID_COUNT) {
87 dm_error(
88 "DC: Number of connectors %d exceeds maximum of %d!\n",
89 connectors_num,
90 ENUM_ID_COUNT);
91 return false;
92 }
93
94 if (connectors_num == 0 && num_virtual_links == 0) {
95 dm_error("DC: Number of connectors is zero!\n");
96 }
97
98 dm_output_to_console(
99 "DC: %s: connectors_num: physical:%d, virtual:%d\n",
100 __func__,
101 connectors_num,
102 num_virtual_links);
103
104 for (i = 0; i < connectors_num; i++) {
105 struct link_init_data link_init_params = {0};
106 struct core_link *link;
107
108 link_init_params.ctx = dc->ctx;
109 link_init_params.connector_index = i;
110 link_init_params.link_index = dc->link_count;
111 link_init_params.dc = dc;
112 link = link_create(&link_init_params);
113
114 if (link) {
115 dc->links[dc->link_count] = link;
116 link->dc = dc;
117 ++dc->link_count;
118 } else {
119 dm_error("DC: failed to create link!\n");
120 }
121 }
122
123 for (i = 0; i < num_virtual_links; i++) {
124 struct core_link *link = dm_alloc(sizeof(*link));
125 struct encoder_init_data enc_init = {0};
126
127 if (link == NULL) {
128 BREAK_TO_DEBUGGER();
129 goto failed_alloc;
130 }
131
132 link->ctx = dc->ctx;
133 link->dc = dc;
134 link->public.connector_signal = SIGNAL_TYPE_VIRTUAL;
135 link->link_id.type = OBJECT_TYPE_CONNECTOR;
136 link->link_id.id = CONNECTOR_ID_VIRTUAL;
137 link->link_id.enum_id = ENUM_ID_1;
138 link->link_enc = dm_alloc(sizeof(*link->link_enc));
139
140 enc_init.ctx = dc->ctx;
141 enc_init.channel = CHANNEL_ID_UNKNOWN;
142 enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
143 enc_init.transmitter = TRANSMITTER_UNKNOWN;
144 enc_init.connector = link->link_id;
145 enc_init.encoder.type = OBJECT_TYPE_ENCODER;
146 enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
147 enc_init.encoder.enum_id = ENUM_ID_1;
148 virtual_link_encoder_construct(link->link_enc, &enc_init);
149
150 link->public.link_index = dc->link_count;
151 dc->links[dc->link_count] = link;
152 dc->link_count++;
153 }
154
155 return true;
156
157failed_alloc:
158 return false;
159}
160
161static bool stream_adjust_vmin_vmax(struct dc *dc,
162 const struct dc_stream **stream, int num_streams,
163 int vmin, int vmax)
164{
165 /* TODO: Support multiple streams */
166 struct core_dc *core_dc = DC_TO_CORE(dc);
167 struct core_stream *core_stream = DC_STREAM_TO_CORE(stream[0]);
168 int i = 0;
169 bool ret = false;
170 struct pipe_ctx *pipes;
171 unsigned int underlay_idx = core_dc->res_pool->underlay_pipe_index;
172
173 for (i = 0; i < MAX_PIPES; i++) {
174 if (core_dc->current_context->res_ctx.pipe_ctx[i].stream == core_stream
175 && i != underlay_idx) {
176
177 pipes = &core_dc->current_context->res_ctx.pipe_ctx[i];
178 core_dc->hwss.set_drr(&pipes, 1, vmin, vmax);
179
180 /* build and update the info frame */
181 resource_build_info_frame(pipes);
182 core_dc->hwss.update_info_frame(pipes);
183
184 ret = true;
185 }
186 }
187
188 return ret;
189}
190
191
192static bool set_gamut_remap(struct dc *dc,
193 const struct dc_stream **stream, int num_streams)
194{
195 struct core_dc *core_dc = DC_TO_CORE(dc);
196 struct core_stream *core_stream = DC_STREAM_TO_CORE(stream[0]);
197 int i = 0;
198 bool ret = false;
199 struct pipe_ctx *pipes;
200
201 for (i = 0; i < MAX_PIPES; i++) {
202 if (core_dc->current_context->res_ctx.pipe_ctx[i].stream
203 == core_stream) {
204
205 pipes = &core_dc->current_context->res_ctx.pipe_ctx[i];
206 core_dc->hwss.set_plane_config(core_dc, pipes,
207 &core_dc->current_context->res_ctx);
208 ret = true;
209 }
210 }
211
212 return ret;
213}
214
215/* This function is not expected to fail, proper implementation of
216 * validation will prevent this from ever being called for unsupported
217 * configurations.
218 */
219static void stream_update_scaling(
220 const struct dc *dc,
221 const struct dc_stream *dc_stream,
222 const struct rect *src,
223 const struct rect *dst)
224{
225 struct core_stream *stream = DC_STREAM_TO_CORE(dc_stream);
226 struct core_dc *core_dc = DC_TO_CORE(dc);
227 struct validate_context *cur_ctx = core_dc->current_context;
228 int i, j;
229
230 if (src)
231 stream->public.src = *src;
232
233 if (dst)
234 stream->public.dst = *dst;
235
236 for (i = 0; i < cur_ctx->target_count; i++) {
237 struct core_target *target = cur_ctx->targets[i];
238 struct dc_target_status *status = &cur_ctx->target_status[i];
239
240 for (j = 0; j < target->public.stream_count; j++) {
241 if (target->public.streams[j] != dc_stream)
242 continue;
243
244 if (status->surface_count)
245 if (!dc_commit_surfaces_to_target(
246 &core_dc->public,
247 status->surfaces,
248 status->surface_count,
249 &target->public))
250 /* Need to debug validation */
251 BREAK_TO_DEBUGGER();
252
253 return;
254 }
255 }
256}
257
258static bool set_backlight(struct dc *dc, unsigned int backlight_level,
259 unsigned int frame_ramp, const struct dc_stream *stream)
260{
261 struct core_dc *core_dc = DC_TO_CORE(dc);
262 int i;
263
264 if (stream->sink->sink_signal == SIGNAL_TYPE_EDP) {
265 for (i = 0; i < core_dc->link_count; i++)
266 dc_link_set_backlight_level(&core_dc->links[i]->public,
267 backlight_level, frame_ramp, stream);
268 }
269
270 return true;
271
272}
273
274static bool init_dmcu_backlight_settings(struct dc *dc)
275{
276 struct core_dc *core_dc = DC_TO_CORE(dc);
277 int i;
278
279 for (i = 0; i < core_dc->link_count; i++)
280 dc_link_init_dmcu_backlight_settings
281 (&core_dc->links[i]->public);
282
283 return true;
284}
285
286
287static bool set_abm_level(struct dc *dc, unsigned int abm_level)
288{
289 struct core_dc *core_dc = DC_TO_CORE(dc);
290 int i;
291
292 for (i = 0; i < core_dc->link_count; i++)
293 dc_link_set_abm_level(&core_dc->links[i]->public,
294 abm_level);
295
296 return true;
297}
298
299static bool set_psr_enable(struct dc *dc, bool enable)
300{
301 struct core_dc *core_dc = DC_TO_CORE(dc);
302 int i;
303
304 for (i = 0; i < core_dc->link_count; i++)
305 dc_link_set_psr_enable(&core_dc->links[i]->public,
306 enable);
307
308 return true;
309}
310
311
312static bool setup_psr(struct dc *dc, const struct dc_stream *stream)
313{
314 struct core_dc *core_dc = DC_TO_CORE(dc);
315 struct core_stream *core_stream = DC_STREAM_TO_CORE(stream);
316 struct pipe_ctx *pipes;
317 int i;
318 unsigned int underlay_idx = core_dc->res_pool->underlay_pipe_index;
319
320 for (i = 0; i < core_dc->link_count; i++) {
321 if (core_stream->sink->link == core_dc->links[i])
322 dc_link_setup_psr(&core_dc->links[i]->public,
323 stream);
324 }
325
326 for (i = 0; i < MAX_PIPES; i++) {
327 if (core_dc->current_context->res_ctx.pipe_ctx[i].stream
328 == core_stream && i != underlay_idx) {
329 pipes = &core_dc->current_context->res_ctx.pipe_ctx[i];
330 core_dc->hwss.set_static_screen_control(&pipes, 1,
331 0x182);
332 }
333 }
334
335 return true;
336}
337
338static void set_drive_settings(struct dc *dc,
339 struct link_training_settings *lt_settings)
340{
341 struct core_dc *core_dc = DC_TO_CORE(dc);
342 int i;
343
344 for (i = 0; i < core_dc->link_count; i++)
345 dc_link_dp_set_drive_settings(&core_dc->links[i]->public,
346 lt_settings);
347}
348
349static void perform_link_training(struct dc *dc,
350 struct dc_link_settings *link_setting,
351 bool skip_video_pattern)
352{
353 struct core_dc *core_dc = DC_TO_CORE(dc);
354 int i;
355
356 for (i = 0; i < core_dc->link_count; i++)
357 dc_link_dp_perform_link_training(
358 &core_dc->links[i]->public,
359 link_setting,
360 skip_video_pattern);
361}
362
363static void set_preferred_link_settings(struct dc *dc,
364 struct dc_link_settings *link_setting)
365{
366 struct core_dc *core_dc = DC_TO_CORE(dc);
367 int i;
368
369 for (i = 0; i < core_dc->link_count; i++) {
370 core_dc->links[i]->public.verified_link_cap.lane_count =
371 link_setting->lane_count;
372 core_dc->links[i]->public.verified_link_cap.link_rate =
373 link_setting->link_rate;
374 }
375}
376
377static void enable_hpd(const struct dc_link *link)
378{
379 dc_link_dp_enable_hpd(link);
380}
381
382static void disable_hpd(const struct dc_link *link)
383{
384 dc_link_dp_disable_hpd(link);
385}
386
387
388static void set_test_pattern(
389 const struct dc_link *link,
390 enum dp_test_pattern test_pattern,
391 const struct link_training_settings *p_link_settings,
392 const unsigned char *p_custom_pattern,
393 unsigned int cust_pattern_size)
394{
395 if (link != NULL)
396 dc_link_dp_set_test_pattern(
397 link,
398 test_pattern,
399 p_link_settings,
400 p_custom_pattern,
401 cust_pattern_size);
402}
403
404static void allocate_dc_stream_funcs(struct core_dc *core_dc)
405{
406 core_dc->public.stream_funcs.stream_update_scaling = stream_update_scaling;
407 if (core_dc->hwss.set_drr != NULL) {
408 core_dc->public.stream_funcs.adjust_vmin_vmax =
409 stream_adjust_vmin_vmax;
410 }
411
412 core_dc->public.stream_funcs.set_gamut_remap =
413 set_gamut_remap;
414
415 core_dc->public.stream_funcs.set_backlight =
416 set_backlight;
417
418 core_dc->public.stream_funcs.init_dmcu_backlight_settings =
419 init_dmcu_backlight_settings;
420
421 core_dc->public.stream_funcs.set_abm_level =
422 set_abm_level;
423
424 core_dc->public.stream_funcs.set_psr_enable =
425 set_psr_enable;
426
427 core_dc->public.stream_funcs.setup_psr =
428 setup_psr;
429
430 core_dc->public.link_funcs.set_drive_settings =
431 set_drive_settings;
432
433 core_dc->public.link_funcs.perform_link_training =
434 perform_link_training;
435
436 core_dc->public.link_funcs.set_preferred_link_settings =
437 set_preferred_link_settings;
438
439 core_dc->public.link_funcs.enable_hpd =
440 enable_hpd;
441
442 core_dc->public.link_funcs.disable_hpd =
443 disable_hpd;
444
445 core_dc->public.link_funcs.set_test_pattern =
446 set_test_pattern;
447}
448
449static void destruct(struct core_dc *dc)
450{
451 resource_validate_ctx_destruct(dc->current_context);
452
453 dm_free(dc->temp_flip_context);
454 dc->temp_flip_context = NULL;
455
456 destroy_links(dc);
457
458 dc_destroy_resource_pool(dc);
459
460 if (dc->ctx->gpio_service)
461 dal_gpio_service_destroy(&dc->ctx->gpio_service);
462
463 if (dc->ctx->i2caux)
464 dal_i2caux_destroy(&dc->ctx->i2caux);
465
466 if (dc->ctx->created_bios)
467 dal_bios_parser_destroy(&dc->ctx->dc_bios);
468
469 if (dc->ctx->logger)
470 dal_logger_destroy(&dc->ctx->logger);
471
472 dm_free(dc->current_context);
473 dc->current_context = NULL;
474
475 dm_free(dc->ctx);
476 dc->ctx = NULL;
477}
478
479static bool construct(struct core_dc *dc,
480 const struct dc_init_data *init_params)
481{
482 struct dal_logger *logger;
483 struct dc_context *dc_ctx = dm_alloc(sizeof(*dc_ctx));
484 enum dce_version dc_version = DCE_VERSION_UNKNOWN;
485
486 if (!dc_ctx) {
487 dm_error("%s: failed to create ctx\n", __func__);
488 goto ctx_fail;
489 }
490
491 dc->current_context = dm_alloc(sizeof(*dc->current_context));
492 dc->temp_flip_context = dm_alloc(sizeof(*dc->temp_flip_context));
493
494 if (!dc->current_context || !dc->temp_flip_context) {
495 dm_error("%s: failed to create validate ctx\n", __func__);
496 goto val_ctx_fail;
497 }
498
499 dc_ctx->cgs_device = init_params->cgs_device;
500 dc_ctx->driver_context = init_params->driver;
501 dc_ctx->dc = &dc->public;
502 dc_ctx->asic_id = init_params->asic_id;
503
504 /* Create logger */
505 logger = dal_logger_create(dc_ctx);
506
507 if (!logger) {
508 /* can *not* call logger. call base driver 'print error' */
509 dm_error("%s: failed to create Logger!\n", __func__);
510 goto logger_fail;
511 }
512 dc_ctx->logger = logger;
513 dc->ctx = dc_ctx;
514 dc->ctx->dce_environment = init_params->dce_environment;
515
516 dc_version = resource_parse_asic_id(init_params->asic_id);
517 dc->ctx->dce_version = dc_version;
518
519 /* Resource should construct all asic specific resources.
520 * This should be the only place where we need to parse the asic id
521 */
522 if (init_params->vbios_override)
523 dc_ctx->dc_bios = init_params->vbios_override;
524 else {
525 /* Create BIOS parser */
526 struct bp_init_data bp_init_data;
527 bp_init_data.ctx = dc_ctx;
528 bp_init_data.bios = init_params->asic_id.atombios_base_address;
529
530 dc_ctx->dc_bios = dal_bios_parser_create(
531 &bp_init_data, dc_version);
532
533 if (!dc_ctx->dc_bios) {
534 ASSERT_CRITICAL(false);
535 goto bios_fail;
536 }
537
538 dc_ctx->created_bios = true;
539 }
540
541 /* Create I2C AUX */
542 dc_ctx->i2caux = dal_i2caux_create(dc_ctx);
543
544 if (!dc_ctx->i2caux) {
545 ASSERT_CRITICAL(false);
546 goto failed_to_create_i2caux;
547 }
548
549 /* Create GPIO service */
550 dc_ctx->gpio_service = dal_gpio_service_create(
551 dc_version,
552 dc_ctx->dce_environment,
553 dc_ctx);
554
555 if (!dc_ctx->gpio_service) {
556 ASSERT_CRITICAL(false);
557 goto gpio_fail;
558 }
559
560 dc->res_pool = dc_create_resource_pool(
561 dc,
562 init_params->num_virtual_links,
563 dc_version,
564 init_params->asic_id);
565 if (!dc->res_pool)
566 goto create_resource_fail;
567
568 if (!create_links(dc, init_params->num_virtual_links))
569 goto create_links_fail;
570
571 allocate_dc_stream_funcs(dc);
572
573 return true;
574
575 /**** error handling here ****/
576create_links_fail:
577create_resource_fail:
578gpio_fail:
579failed_to_create_i2caux:
580bios_fail:
581logger_fail:
582val_ctx_fail:
583ctx_fail:
584 destruct(dc);
585 return false;
586}
587
588/*
589void ProgramPixelDurationV(unsigned int pixelClockInKHz )
590{
591 fixed31_32 pixel_duration = Fixed31_32(100000000, pixelClockInKHz) * 10;
592 unsigned int pixDurationInPico = round(pixel_duration);
593
594 DPG_PIPE_ARBITRATION_CONTROL1 arb_control;
595
596 arb_control.u32All = ReadReg (mmDPGV0_PIPE_ARBITRATION_CONTROL1);
597 arb_control.bits.PIXEL_DURATION = pixDurationInPico;
598 WriteReg (mmDPGV0_PIPE_ARBITRATION_CONTROL1, arb_control.u32All);
599
600 arb_control.u32All = ReadReg (mmDPGV1_PIPE_ARBITRATION_CONTROL1);
601 arb_control.bits.PIXEL_DURATION = pixDurationInPico;
602 WriteReg (mmDPGV1_PIPE_ARBITRATION_CONTROL1, arb_control.u32All);
603
604 WriteReg (mmDPGV0_PIPE_ARBITRATION_CONTROL2, 0x4000800);
605 WriteReg (mmDPGV0_REPEATER_PROGRAM, 0x11);
606
607 WriteReg (mmDPGV1_PIPE_ARBITRATION_CONTROL2, 0x4000800);
608 WriteReg (mmDPGV1_REPEATER_PROGRAM, 0x11);
609}
610*/
611
612/*******************************************************************************
613 * Public functions
614 ******************************************************************************/
615
616struct dc *dc_create(const struct dc_init_data *init_params)
617 {
618 struct core_dc *core_dc = dm_alloc(sizeof(*core_dc));
619 unsigned int full_pipe_count;
620
621 if (NULL == core_dc)
622 goto alloc_fail;
623
624 if (false == construct(core_dc, init_params))
625 goto construct_fail;
626
627 /*TODO: separate HW and SW initialization*/
628 core_dc->hwss.init_hw(core_dc);
629
630 full_pipe_count = core_dc->res_pool->pipe_count;
631 if (core_dc->res_pool->underlay_pipe_index >= 0)
632 full_pipe_count--;
633 core_dc->public.caps.max_targets = dm_min(
634 full_pipe_count,
635 core_dc->res_pool->stream_enc_count);
636
637 core_dc->public.caps.max_links = core_dc->link_count;
638 core_dc->public.caps.max_audios = core_dc->res_pool->audio_count;
639
640 core_dc->public.config = init_params->flags;
641
642 dm_logger_write(core_dc->ctx->logger, LOG_DC,
643 "Display Core initialized\n");
644
645
646 /* TODO: missing feature to be enabled */
647 core_dc->public.debug.disable_dfs_bypass = true;
648
649 return &core_dc->public;
650
651construct_fail:
652 dm_free(core_dc);
653
654alloc_fail:
655 return NULL;
656}
657
658void dc_destroy(struct dc **dc)
659{
660 struct core_dc *core_dc = DC_TO_CORE(*dc);
661 destruct(core_dc);
662 dm_free(core_dc);
663 *dc = NULL;
664}
665
666static bool is_validation_required(
667 const struct core_dc *dc,
668 const struct dc_validation_set set[],
669 int set_count)
670{
671 const struct validate_context *context = dc->current_context;
672 int i, j;
673
674 if (context->target_count != set_count)
675 return true;
676
677 for (i = 0; i < set_count; i++) {
678
679 if (set[i].surface_count != context->target_status[i].surface_count)
680 return true;
681 if (!is_target_unchanged(DC_TARGET_TO_CORE(set[i].target), context->targets[i]))
682 return true;
683
684 for (j = 0; j < set[i].surface_count; j++) {
685 struct dc_surface temp_surf = { 0 };
686
687 temp_surf = *context->target_status[i].surfaces[j];
688 temp_surf.clip_rect = set[i].surfaces[j]->clip_rect;
689 temp_surf.dst_rect.x = set[i].surfaces[j]->dst_rect.x;
690 temp_surf.dst_rect.y = set[i].surfaces[j]->dst_rect.y;
691
692 if (memcmp(&temp_surf, set[i].surfaces[j], sizeof(temp_surf)) != 0)
693 return true;
694 }
695 }
696
697 return false;
698}
699
700bool dc_validate_resources(
701 const struct dc *dc,
702 const struct dc_validation_set set[],
703 uint8_t set_count)
704{
705 struct core_dc *core_dc = DC_TO_CORE(dc);
706 enum dc_status result = DC_ERROR_UNEXPECTED;
707 struct validate_context *context;
708
709 if (!is_validation_required(core_dc, set, set_count))
710 return true;
711
712 context = dm_alloc(sizeof(struct validate_context));
713 if(context == NULL)
714 goto context_alloc_fail;
715
716 result = core_dc->res_pool->funcs->validate_with_context(
717 core_dc, set, set_count, context);
718
719 resource_validate_ctx_destruct(context);
720 dm_free(context);
721
722context_alloc_fail:
723 if (result != DC_OK) {
724 dm_logger_write(core_dc->ctx->logger, LOG_WARNING,
725 "%s:resource validation failed, dc_status:%d\n",
726 __func__,
727 result);
728 }
729
730 return (result == DC_OK);
731
732}
733
734bool dc_validate_guaranteed(
735 const struct dc *dc,
736 const struct dc_target *dc_target)
737{
738 struct core_dc *core_dc = DC_TO_CORE(dc);
739 enum dc_status result = DC_ERROR_UNEXPECTED;
740 struct validate_context *context;
741
742 context = dm_alloc(sizeof(struct validate_context));
743 if (context == NULL)
744 goto context_alloc_fail;
745
746 result = core_dc->res_pool->funcs->validate_guaranteed(
747 core_dc, dc_target, context);
748
749 resource_validate_ctx_destruct(context);
750 dm_free(context);
751
752context_alloc_fail:
753 if (result != DC_OK) {
754 dm_logger_write(core_dc->ctx->logger, LOG_WARNING,
755 "%s:guaranteed validation failed, dc_status:%d\n",
756 __func__,
757 result);
758 }
759
760 return (result == DC_OK);
761}
762
763static void program_timing_sync(
764 struct core_dc *core_dc,
765 struct validate_context *ctx)
766{
767 int i, j;
768 int group_index = 0;
769 int pipe_count = ctx->res_ctx.pool->pipe_count;
770 struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
771
772 for (i = 0; i < pipe_count; i++) {
773 if (!ctx->res_ctx.pipe_ctx[i].stream || ctx->res_ctx.pipe_ctx[i].top_pipe)
774 continue;
775
776 unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
777 }
778
779 for (i = 0; i < pipe_count; i++) {
780 int group_size = 1;
781 struct pipe_ctx *pipe_set[MAX_PIPES];
782
783 if (!unsynced_pipes[i])
784 continue;
785
786 pipe_set[0] = unsynced_pipes[i];
787 unsynced_pipes[i] = NULL;
788
789 /* Add tg to the set, search rest of the tg's for ones with
790 * same timing, add all tgs with same timing to the group
791 */
792 for (j = i + 1; j < pipe_count; j++) {
793 if (!unsynced_pipes[j])
794 continue;
795
796 if (resource_are_streams_timing_synchronizable(
797 unsynced_pipes[j]->stream,
798 pipe_set[0]->stream)) {
799 pipe_set[group_size] = unsynced_pipes[j];
800 unsynced_pipes[j] = NULL;
801 group_size++;
802 }
803 }
804
805 /* set first unblanked pipe as master */
806 for (j = 0; j < group_size; j++) {
807 struct pipe_ctx *temp;
808
809 if (!pipe_set[j]->tg->funcs->is_blanked(pipe_set[j]->tg)) {
810 if (j == 0)
811 break;
812
813 temp = pipe_set[0];
814 pipe_set[0] = pipe_set[j];
815 pipe_set[j] = temp;
816 break;
817 }
818 }
819
820 /* remove any other unblanked pipes as they have already been synced */
821 for (j = j + 1; j < group_size; j++) {
822 if (!pipe_set[j]->tg->funcs->is_blanked(pipe_set[j]->tg)) {
823 group_size--;
824 pipe_set[j] = pipe_set[group_size];
825 j--;
826 }
827 }
828
829 if (group_size > 1) {
830 core_dc->hwss.enable_timing_synchronization(
831 core_dc, group_index, group_size, pipe_set);
832 group_index++;
833 }
834 }
835}
836
837static bool targets_changed(
838 struct core_dc *dc,
839 struct dc_target *targets[],
840 uint8_t target_count)
841{
842 uint8_t i;
843
844 if (target_count != dc->current_context->target_count)
845 return true;
846
847 for (i = 0; i < dc->current_context->target_count; i++) {
848 if (&dc->current_context->targets[i]->public != targets[i])
849 return true;
850 }
851
852 return false;
853}
854
855static void fill_display_configs(
856 const struct validate_context *context,
857 struct dm_pp_display_configuration *pp_display_cfg)
858{
859 uint8_t i, j, k;
860 uint8_t num_cfgs = 0;
861
862 for (i = 0; i < context->target_count; i++) {
863 const struct core_target *target = context->targets[i];
864
865 for (j = 0; j < target->public.stream_count; j++) {
866 const struct core_stream *stream =
867 DC_STREAM_TO_CORE(target->public.streams[j]);
868 struct dm_pp_single_disp_config *cfg =
869 &pp_display_cfg->disp_configs[num_cfgs];
870 const struct pipe_ctx *pipe_ctx = NULL;
871
872 for (k = 0; k < MAX_PIPES; k++)
873 if (stream ==
874 context->res_ctx.pipe_ctx[k].stream) {
875 pipe_ctx = &context->res_ctx.pipe_ctx[k];
876 break;
877 }
878
879 ASSERT(pipe_ctx != NULL);
880
881 num_cfgs++;
882 cfg->signal = pipe_ctx->stream->signal;
883 cfg->pipe_idx = pipe_ctx->pipe_idx;
884 cfg->src_height = stream->public.src.height;
885 cfg->src_width = stream->public.src.width;
886 cfg->ddi_channel_mapping =
887 stream->sink->link->ddi_channel_mapping.raw;
888 cfg->transmitter =
889 stream->sink->link->link_enc->transmitter;
890 cfg->link_settings.lane_count = stream->sink->link->public.cur_link_settings.lane_count;
891 cfg->link_settings.link_rate = stream->sink->link->public.cur_link_settings.link_rate;
892 cfg->link_settings.link_spread = stream->sink->link->public.cur_link_settings.link_spread;
893 cfg->sym_clock = stream->phy_pix_clk;
894 /* Round v_refresh*/
895 cfg->v_refresh = stream->public.timing.pix_clk_khz * 1000;
896 cfg->v_refresh /= stream->public.timing.h_total;
897 cfg->v_refresh = (cfg->v_refresh + stream->public.timing.v_total / 2)
898 / stream->public.timing.v_total;
899 }
900 }
901 pp_display_cfg->display_count = num_cfgs;
902}
903
904static uint32_t get_min_vblank_time_us(const struct validate_context *context)
905{
906 uint8_t i, j;
907 uint32_t min_vertical_blank_time = -1;
908
909 for (i = 0; i < context->target_count; i++) {
910 const struct core_target *target = context->targets[i];
911
912 for (j = 0; j < target->public.stream_count; j++) {
913 const struct dc_stream *stream =
914 target->public.streams[j];
915 uint32_t vertical_blank_in_pixels = 0;
916 uint32_t vertical_blank_time = 0;
917
918 vertical_blank_in_pixels = stream->timing.h_total *
919 (stream->timing.v_total
920 - stream->timing.v_addressable);
921 vertical_blank_time = vertical_blank_in_pixels
922 * 1000 / stream->timing.pix_clk_khz;
923 if (min_vertical_blank_time > vertical_blank_time)
924 min_vertical_blank_time = vertical_blank_time;
925 }
926 }
927 return min_vertical_blank_time;
928}
929
930static int determine_sclk_from_bounding_box(
931 const struct core_dc *dc,
932 int required_sclk)
933{
934 int i;
935
936 /*
937 * Some asics do not give us sclk levels, so we just report the actual
938 * required sclk
939 */
940 if (dc->sclk_lvls.num_levels == 0)
941 return required_sclk;
942
943 for (i = 0; i < dc->sclk_lvls.num_levels; i++) {
944 if (dc->sclk_lvls.clocks_in_khz[i] >= required_sclk)
945 return dc->sclk_lvls.clocks_in_khz[i];
946 }
947 /*
948 * even maximum level could not satisfy requirement, this
949 * is unexpected at this stage, should have been caught at
950 * validation time
951 */
952 ASSERT(0);
953 return dc->sclk_lvls.clocks_in_khz[dc->sclk_lvls.num_levels - 1];
954}
955
956void pplib_apply_display_requirements(
957 struct core_dc *dc,
958 const struct validate_context *context,
959 struct dm_pp_display_configuration *pp_display_cfg)
960{
961 pp_display_cfg->all_displays_in_sync =
962 context->bw_results.all_displays_in_sync;
963 pp_display_cfg->nb_pstate_switch_disable =
964 context->bw_results.nbp_state_change_enable == false;
965 pp_display_cfg->cpu_cc6_disable =
966 context->bw_results.cpuc_state_change_enable == false;
967 pp_display_cfg->cpu_pstate_disable =
968 context->bw_results.cpup_state_change_enable == false;
969 pp_display_cfg->cpu_pstate_separation_time =
970 context->bw_results.blackout_recovery_time_us;
971
972 pp_display_cfg->min_memory_clock_khz = context->bw_results.required_yclk
973 / MEMORY_TYPE_MULTIPLIER;
974
975 pp_display_cfg->min_engine_clock_khz = determine_sclk_from_bounding_box(
976 dc,
977 context->bw_results.required_sclk);
978
979 pp_display_cfg->min_engine_clock_deep_sleep_khz
980 = context->bw_results.required_sclk_deep_sleep;
981
982 pp_display_cfg->avail_mclk_switch_time_us =
983 get_min_vblank_time_us(context);
984 /* TODO: dce11.2*/
985 pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0;
986
987 pp_display_cfg->disp_clk_khz = context->bw_results.dispclk_khz;
988
989 fill_display_configs(context, pp_display_cfg);
990
991 /* TODO: is this still applicable?*/
992 if (pp_display_cfg->display_count == 1) {
993 const struct dc_crtc_timing *timing =
994 &context->targets[0]->public.streams[0]->timing;
995
996 pp_display_cfg->crtc_index =
997 pp_display_cfg->disp_configs[0].pipe_idx;
998 pp_display_cfg->line_time_in_us = timing->h_total * 1000
999 / timing->pix_clk_khz;
1000 }
1001
1002 if (memcmp(&dc->prev_display_config, pp_display_cfg, sizeof(
1003 struct dm_pp_display_configuration)) != 0)
1004 dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
1005
1006 dc->prev_display_config = *pp_display_cfg;
1007
1008}
1009
1010bool dc_commit_targets(
1011 struct dc *dc,
1012 struct dc_target *targets[],
1013 uint8_t target_count)
1014{
1015 struct core_dc *core_dc = DC_TO_CORE(dc);
1016 struct dc_bios *dcb = core_dc->ctx->dc_bios;
1017 enum dc_status result = DC_ERROR_UNEXPECTED;
1018 struct validate_context *context;
1019 struct dc_validation_set set[MAX_TARGETS];
1020 int i, j, k;
1021
1022 if (false == targets_changed(core_dc, targets, target_count))
1023 return DC_OK;
1024
1025 dm_logger_write(core_dc->ctx->logger, LOG_DC,
1026 "%s: %d targets\n",
1027 __func__,
1028 target_count);
1029
1030 for (i = 0; i < target_count; i++) {
1031 struct dc_target *target = targets[i];
1032
1033 dc_target_log(target,
1034 core_dc->ctx->logger,
1035 LOG_DC);
1036
1037 set[i].target = targets[i];
1038 set[i].surface_count = 0;
1039
1040 }
1041
1042 context = dm_alloc(sizeof(struct validate_context));
1043 if (context == NULL)
1044 goto context_alloc_fail;
1045
1046 result = core_dc->res_pool->funcs->validate_with_context(core_dc, set, target_count, context);
1047 if (result != DC_OK){
1048 dm_logger_write(core_dc->ctx->logger, LOG_ERROR,
1049 "%s: Context validation failed! dc_status:%d\n",
1050 __func__,
1051 result);
1052 BREAK_TO_DEBUGGER();
1053 resource_validate_ctx_destruct(context);
1054 goto fail;
1055 }
1056
1057 if (!dcb->funcs->is_accelerated_mode(dcb)) {
1058 core_dc->hwss.enable_accelerated_mode(core_dc);
1059 }
1060
1061 if (result == DC_OK) {
1062 result = core_dc->hwss.apply_ctx_to_hw(core_dc, context);
1063 }
1064
1065 program_timing_sync(core_dc, context);
1066
1067 for (i = 0; i < context->target_count; i++) {
1068 struct dc_target *dc_target = &context->targets[i]->public;
1069 struct core_sink *sink = DC_SINK_TO_CORE(dc_target->streams[0]->sink);
1070
1071 for (j = 0; j < context->target_status[i].surface_count; j++) {
1072 const struct dc_surface *dc_surface =
1073 context->target_status[i].surfaces[j];
1074
1075 for (k = 0; k < context->res_ctx.pool->pipe_count; k++) {
1076 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[k];
1077
1078 if (dc_surface != &pipe->surface->public
1079 || !dc_surface->visible)
1080 continue;
1081
1082 pipe->tg->funcs->set_blank(pipe->tg, false);
1083 }
1084 }
1085
1086 CONN_MSG_MODE(sink->link, "{%dx%d, %dx%d@%dKhz}",
1087 dc_target->streams[0]->timing.h_addressable,
1088 dc_target->streams[0]->timing.v_addressable,
1089 dc_target->streams[0]->timing.h_total,
1090 dc_target->streams[0]->timing.v_total,
1091 dc_target->streams[0]->timing.pix_clk_khz);
1092 }
1093
1094 pplib_apply_display_requirements(core_dc,
1095 context, &context->pp_display_cfg);
1096
1097 resource_validate_ctx_destruct(core_dc->current_context);
1098
1099 dm_free(core_dc->current_context);
1100 core_dc->current_context = context;
1101
1102 return (result == DC_OK);
1103
1104fail:
1105 dm_free(context);
1106
1107context_alloc_fail:
1108 return (result == DC_OK);
1109}
1110
1111bool dc_pre_update_surfaces_to_target(
1112 struct dc *dc,
1113 const struct dc_surface *const *new_surfaces,
1114 uint8_t new_surface_count,
1115 struct dc_target *dc_target)
1116{
1117 int i, j;
1118 struct core_dc *core_dc = DC_TO_CORE(dc);
1119 uint32_t prev_disp_clk = core_dc->current_context->bw_results.dispclk_khz;
1120 struct core_target *target = DC_TARGET_TO_CORE(dc_target);
1121 struct dc_target_status *target_status = NULL;
1122 struct validate_context *context;
1123 struct validate_context *temp_context;
1124 bool ret = true;
1125
1126 pre_surface_trace(dc, new_surfaces, new_surface_count);
1127
1128 if (core_dc->current_context->target_count == 0)
1129 return false;
1130
1131 /* Cannot commit surface to a target that is not commited */
1132 for (i = 0; i < core_dc->current_context->target_count; i++)
1133 if (target == core_dc->current_context->targets[i])
1134 break;
1135
1136 if (i == core_dc->current_context->target_count)
1137 return false;
1138
1139 target_status = &core_dc->current_context->target_status[i];
1140
1141 if (new_surface_count == target_status->surface_count) {
1142 bool skip_pre = true;
1143
1144 for (i = 0; i < target_status->surface_count; i++) {
1145 struct dc_surface temp_surf = { 0 };
1146
1147 temp_surf = *target_status->surfaces[i];
1148 temp_surf.clip_rect = new_surfaces[i]->clip_rect;
1149 temp_surf.dst_rect.x = new_surfaces[i]->dst_rect.x;
1150 temp_surf.dst_rect.y = new_surfaces[i]->dst_rect.y;
1151
1152 if (memcmp(&temp_surf, new_surfaces[i], sizeof(temp_surf)) != 0) {
1153 skip_pre = false;
1154 break;
1155 }
1156 }
1157
1158 if (skip_pre)
1159 return true;
1160 }
1161
1162 context = dm_alloc(sizeof(struct validate_context));
1163
1164 if (!context) {
1165 dm_error("%s: failed to create validate ctx\n", __func__);
1166 ret = false;
1167 goto val_ctx_fail;
1168 }
1169
1170 resource_validate_ctx_copy_construct(core_dc->current_context, context);
1171
1172 dm_logger_write(core_dc->ctx->logger, LOG_DC,
1173 "%s: commit %d surfaces to target 0x%x\n",
1174 __func__,
1175 new_surface_count,
1176 dc_target);
1177
1178 if (!resource_attach_surfaces_to_context(
1179 new_surfaces, new_surface_count, dc_target, context)) {
1180 BREAK_TO_DEBUGGER();
1181 ret = false;
1182 goto unexpected_fail;
1183 }
1184
1185 for (i = 0; i < new_surface_count; i++)
1186 for (j = 0; j < context->res_ctx.pool->pipe_count; j++) {
1187 if (context->res_ctx.pipe_ctx[j].surface !=
1188 DC_SURFACE_TO_CORE(new_surfaces[i]))
1189 continue;
1190
1191 resource_build_scaling_params(
1192 new_surfaces[i], &context->res_ctx.pipe_ctx[j]);
1193
1194 if (dc->debug.surface_visual_confirm) {
1195 context->res_ctx.pipe_ctx[j].scl_data.recout.height -= 2;
1196 context->res_ctx.pipe_ctx[j].scl_data.recout.width -= 2;
1197 }
1198 }
1199
1200 if (core_dc->res_pool->funcs->validate_bandwidth(core_dc, context) != DC_OK) {
1201 BREAK_TO_DEBUGGER();
1202 ret = false;
1203 goto unexpected_fail;
1204 }
1205
1206 if (core_dc->res_pool->funcs->apply_clk_constraints) {
1207 temp_context = core_dc->res_pool->funcs->apply_clk_constraints(
1208 core_dc,
1209 context);
1210 if (!temp_context) {
1211 dm_error("%s:failed apply clk constraints\n", __func__);
1212 ret = false;
1213 goto unexpected_fail;
1214 }
1215 resource_validate_ctx_destruct(context);
1216 dm_free(context);
1217 context = temp_context;
1218 }
1219
1220 if (prev_disp_clk < context->bw_results.dispclk_khz) {
1221 pplib_apply_display_requirements(core_dc, context,
1222 &context->pp_display_cfg);
1223 core_dc->hwss.set_display_clock(context);
1224 core_dc->current_context->bw_results.dispclk_khz =
1225 context->bw_results.dispclk_khz;
1226 }
1227
1228 for (i = 0; i < new_surface_count; i++)
1229 for (j = 0; j < context->res_ctx.pool->pipe_count; j++) {
1230 if (context->res_ctx.pipe_ctx[j].surface !=
1231 DC_SURFACE_TO_CORE(new_surfaces[i]))
1232 continue;
1233
1234 core_dc->hwss.prepare_pipe_for_context(
1235 core_dc,
1236 &context->res_ctx.pipe_ctx[j],
1237 context);
1238
1239 if (!new_surfaces[i]->visible)
1240 context->res_ctx.pipe_ctx[j].tg->funcs->set_blank(
1241 context->res_ctx.pipe_ctx[j].tg, true);
1242 }
1243
1244unexpected_fail:
1245 resource_validate_ctx_destruct(context);
1246 dm_free(context);
1247val_ctx_fail:
1248
1249 return ret;
1250}
1251
1252bool dc_post_update_surfaces_to_target(struct dc *dc)
1253{
1254 struct core_dc *core_dc = DC_TO_CORE(dc);
1255 int i;
1256
1257 post_surface_trace(dc);
1258
1259 for (i = 0; i < core_dc->current_context->res_ctx.pool->pipe_count; i++)
1260 if (core_dc->current_context->res_ctx.pipe_ctx[i].stream == NULL)
1261 core_dc->hwss.power_down_front_end(
1262 core_dc, &core_dc->current_context->res_ctx.pipe_ctx[i]);
1263
1264 if (core_dc->res_pool->funcs->validate_bandwidth(core_dc, core_dc->current_context)
1265 != DC_OK) {
1266 BREAK_TO_DEBUGGER();
1267 return false;
1268 }
1269
1270 core_dc->hwss.set_bandwidth(core_dc);
1271
1272 pplib_apply_display_requirements(
1273 core_dc, core_dc->current_context, &core_dc->current_context->pp_display_cfg);
1274
1275 return true;
1276}
1277
1278bool dc_commit_surfaces_to_target(
1279 struct dc *dc,
1280 const struct dc_surface **new_surfaces,
1281 uint8_t new_surface_count,
1282 struct dc_target *dc_target)
1283{
1284 struct dc_surface_update updates[MAX_SURFACES] = { 0 };
1285 struct dc_flip_addrs flip_addr[MAX_SURFACES] = { 0 };
1286 struct dc_plane_info plane_info[MAX_SURFACES] = { 0 };
1287 struct dc_scaling_info scaling_info[MAX_SURFACES] = { 0 };
1288 int i;
1289
1290 if (!dc_pre_update_surfaces_to_target(
1291 dc, new_surfaces, new_surface_count, dc_target))
1292 return false;
1293
1294 for (i = 0; i < new_surface_count; i++) {
1295 updates[i].surface = new_surfaces[i];
1296 updates[i].gamma = (struct dc_gamma *)new_surfaces[i]->gamma_correction;
1297
1298 flip_addr[i].address = new_surfaces[i]->address;
1299 flip_addr[i].flip_immediate = new_surfaces[i]->flip_immediate;
1300 plane_info[i].color_space = new_surfaces[i]->color_space;
1301 plane_info[i].format = new_surfaces[i]->format;
1302 plane_info[i].plane_size = new_surfaces[i]->plane_size;
1303 plane_info[i].rotation = new_surfaces[i]->rotation;
1304 plane_info[i].horizontal_mirror = new_surfaces[i]->horizontal_mirror;
1305 plane_info[i].stereo_format = new_surfaces[i]->stereo_format;
1306 plane_info[i].tiling_info = new_surfaces[i]->tiling_info;
1307 plane_info[i].visible = new_surfaces[i]->visible;
1308 scaling_info[i].scaling_quality = new_surfaces[i]->scaling_quality;
1309 scaling_info[i].src_rect = new_surfaces[i]->src_rect;
1310 scaling_info[i].dst_rect = new_surfaces[i]->dst_rect;
1311 scaling_info[i].clip_rect = new_surfaces[i]->clip_rect;
1312
1313 updates[i].flip_addr = &flip_addr[i];
1314 updates[i].plane_info = &plane_info[i];
1315 updates[i].scaling_info = &scaling_info[i];
1316 }
1317 dc_update_surfaces_for_target(dc, updates, new_surface_count, dc_target);
1318
1319 return dc_post_update_surfaces_to_target(dc);
1320}
1321
1322void dc_update_surfaces_for_target(struct dc *dc, struct dc_surface_update *updates,
1323 int surface_count, struct dc_target *dc_target)
1324{
1325 struct core_dc *core_dc = DC_TO_CORE(dc);
1326 struct validate_context *context = core_dc->temp_flip_context;
1327 int i, j;
1328 bool is_new_pipe_surface[MAX_SURFACES];
1329 const struct dc_surface *new_surfaces[MAX_SURFACES] = { 0 };
1330
1331 update_surface_trace(dc, updates, surface_count);
1332
1333 *context = *core_dc->current_context;
1334
1335 for (i = 0; i < context->res_ctx.pool->pipe_count; i++) {
1336 struct pipe_ctx *cur_pipe = &context->res_ctx.pipe_ctx[i];
1337
1338 if (cur_pipe->top_pipe)
1339 cur_pipe->top_pipe =
1340 &context->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx];
1341
1342 if (cur_pipe->bottom_pipe)
1343 cur_pipe->bottom_pipe =
1344 &context->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
1345 }
1346
1347 for (j = 0; j < MAX_SURFACES; j++)
1348 is_new_pipe_surface[j] = true;
1349
1350 for (i = 0 ; i < surface_count; i++) {
1351 struct core_surface *surface = DC_SURFACE_TO_CORE(updates[i].surface);
1352
1353 new_surfaces[i] = updates[i].surface;
1354 for (j = 0; j < context->res_ctx.pool->pipe_count; j++) {
1355 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1356
1357 if (surface == pipe_ctx->surface)
1358 is_new_pipe_surface[i] = false;
1359 }
1360 }
1361
1362 if (dc_target) {
1363 struct core_target *target = DC_TARGET_TO_CORE(dc_target);
1364
1365 if (core_dc->current_context->target_count == 0)
1366 return;
1367
1368 /* Cannot commit surface to a target that is not commited */
1369 for (i = 0; i < core_dc->current_context->target_count; i++)
1370 if (target == core_dc->current_context->targets[i])
1371 break;
1372 if (i == core_dc->current_context->target_count)
1373 return;
1374
1375 if (!resource_attach_surfaces_to_context(
1376 new_surfaces, surface_count, dc_target, context)) {
1377 BREAK_TO_DEBUGGER();
1378 return;
1379 }
1380 }
1381
1382 for (i = 0; i < surface_count; i++) {
1383 struct core_surface *surface = DC_SURFACE_TO_CORE(updates[i].surface);
1384
1385 for (j = 0; j < context->res_ctx.pool->pipe_count; j++) {
1386 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1387
1388 if (pipe_ctx->surface != surface)
1389 continue;
1390
1391 if (updates[i].flip_addr) {
1392 surface->public.address = updates[i].flip_addr->address;
1393 surface->public.flip_immediate =
1394 updates[i].flip_addr->flip_immediate;
1395 }
1396
1397 if (updates[i].plane_info || updates[i].scaling_info
1398 || is_new_pipe_surface[j]) {
1399
1400 if (updates[i].plane_info) {
1401 surface->public.color_space =
1402 updates[i].plane_info->color_space;
1403 surface->public.format =
1404 updates[i].plane_info->format;
1405 surface->public.plane_size =
1406 updates[i].plane_info->plane_size;
1407 surface->public.rotation =
1408 updates[i].plane_info->rotation;
1409 surface->public.horizontal_mirror =
1410 updates[i].plane_info->horizontal_mirror;
1411 surface->public.stereo_format =
1412 updates[i].plane_info->stereo_format;
1413 surface->public.tiling_info =
1414 updates[i].plane_info->tiling_info;
1415 surface->public.visible =
1416 updates[i].plane_info->visible;
1417 }
1418
1419 if (updates[i].scaling_info) {
1420 surface->public.scaling_quality =
1421 updates[i].scaling_info->scaling_quality;
1422 surface->public.dst_rect =
1423 updates[i].scaling_info->dst_rect;
1424 surface->public.src_rect =
1425 updates[i].scaling_info->src_rect;
1426 surface->public.clip_rect =
1427 updates[i].scaling_info->clip_rect;
1428 }
1429
1430 resource_build_scaling_params(updates[i].surface, pipe_ctx);
1431 if (dc->debug.surface_visual_confirm) {
1432 pipe_ctx->scl_data.recout.height -= 2;
1433 pipe_ctx->scl_data.recout.width -= 2;
1434 }
1435 }
1436 }
1437 }
1438
1439 for (i = 0; i < surface_count; i++) {
1440 struct core_surface *surface = DC_SURFACE_TO_CORE(updates[i].surface);
1441 bool apply_ctx = false;
1442
1443 for (j = 0; j < context->res_ctx.pool->pipe_count; j++) {
1444 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1445
1446 if (pipe_ctx->surface != surface)
1447 continue;
1448
1449 if (updates[i].flip_addr) {
1450 core_dc->hwss.pipe_control_lock(
1451 core_dc->hwseq,
1452 pipe_ctx->pipe_idx,
1453 PIPE_LOCK_CONTROL_SURFACE,
1454 true);
1455 core_dc->hwss.update_plane_addr(core_dc, pipe_ctx);
1456 }
1457
1458 if (updates[i].plane_info || updates[i].scaling_info
1459 || is_new_pipe_surface[j]) {
1460
1461 apply_ctx = true;
1462
1463 if (!pipe_ctx->tg->funcs->is_blanked(pipe_ctx->tg)) {
1464 core_dc->hwss.pipe_control_lock(
1465 core_dc->hwseq,
1466 pipe_ctx->pipe_idx,
1467 PIPE_LOCK_CONTROL_SURFACE |
1468 PIPE_LOCK_CONTROL_GRAPHICS |
1469 PIPE_LOCK_CONTROL_SCL |
1470 PIPE_LOCK_CONTROL_BLENDER |
1471 PIPE_LOCK_CONTROL_MODE,
1472 true);
1473 }
1474 }
1475
1476 if (updates[i].gamma)
1477 core_dc->hwss.prepare_pipe_for_context(
1478 core_dc, pipe_ctx, context);
1479 }
1480 if (apply_ctx)
1481 core_dc->hwss.apply_ctx_for_surface(core_dc, surface, context);
1482 }
1483
1484 for (i = context->res_ctx.pool->pipe_count - 1; i >= 0; i--) {
1485 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1486
1487 for (j = 0; j < surface_count; j++) {
1488 if (updates[j].surface == &pipe_ctx->surface->public) {
1489 if (!pipe_ctx->tg->funcs->is_blanked(pipe_ctx->tg)) {
1490 core_dc->hwss.pipe_control_lock(
1491 core_dc->hwseq,
1492 pipe_ctx->pipe_idx,
1493 PIPE_LOCK_CONTROL_GRAPHICS |
1494 PIPE_LOCK_CONTROL_SCL |
1495 PIPE_LOCK_CONTROL_BLENDER |
1496 PIPE_LOCK_CONTROL_SURFACE,
1497 false);
1498 }
1499 break;
1500 }
1501 }
1502 }
1503
1504 core_dc->temp_flip_context = core_dc->current_context;
1505 core_dc->current_context = context;
1506}
1507
1508uint8_t dc_get_current_target_count(const struct dc *dc)
1509{
1510 struct core_dc *core_dc = DC_TO_CORE(dc);
1511 return core_dc->current_context->target_count;
1512}
1513
1514struct dc_target *dc_get_target_at_index(const struct dc *dc, uint8_t i)
1515{
1516 struct core_dc *core_dc = DC_TO_CORE(dc);
1517 if (i < core_dc->current_context->target_count)
1518 return &(core_dc->current_context->targets[i]->public);
1519 return NULL;
1520}
1521
1522const struct dc_link *dc_get_link_at_index(const struct dc *dc, uint32_t link_index)
1523{
1524 struct core_dc *core_dc = DC_TO_CORE(dc);
1525 return &core_dc->links[link_index]->public;
1526}
1527
1528const struct graphics_object_id dc_get_link_id_at_index(
1529 struct dc *dc, uint32_t link_index)
1530{
1531 struct core_dc *core_dc = DC_TO_CORE(dc);
1532 return core_dc->links[link_index]->link_id;
1533}
1534
1535const struct ddc_service *dc_get_ddc_at_index(
1536 struct dc *dc, uint32_t link_index)
1537{
1538 struct core_dc *core_dc = DC_TO_CORE(dc);
1539 return core_dc->links[link_index]->ddc;
1540}
1541
1542enum dc_irq_source dc_get_hpd_irq_source_at_index(
1543 struct dc *dc, uint32_t link_index)
1544{
1545 struct core_dc *core_dc = DC_TO_CORE(dc);
1546 return core_dc->links[link_index]->public.irq_source_hpd;
1547}
1548
1549const struct audio **dc_get_audios(struct dc *dc)
1550{
1551 struct core_dc *core_dc = DC_TO_CORE(dc);
1552 return (const struct audio **)core_dc->res_pool->audios;
1553}
1554
1555void dc_flip_surface_addrs(
1556 struct dc *dc,
1557 const struct dc_surface *const surfaces[],
1558 struct dc_flip_addrs flip_addrs[],
1559 uint32_t count)
1560{
1561 struct core_dc *core_dc = DC_TO_CORE(dc);
1562 int i, j;
1563
1564 for (i = 0; i < count; i++) {
1565 struct core_surface *surface = DC_SURFACE_TO_CORE(surfaces[i]);
1566
1567 surface->public.address = flip_addrs[i].address;
1568 surface->public.flip_immediate = flip_addrs[i].flip_immediate;
1569
1570 for (j = 0; j < core_dc->res_pool->pipe_count; j++) {
1571 struct pipe_ctx *pipe_ctx = &core_dc->current_context->res_ctx.pipe_ctx[j];
1572
1573 if (pipe_ctx->surface != surface)
1574 continue;
1575
1576 core_dc->hwss.update_plane_addr(core_dc, pipe_ctx);
1577 }
1578 }
1579}
1580
1581enum dc_irq_source dc_interrupt_to_irq_source(
1582 struct dc *dc,
1583 uint32_t src_id,
1584 uint32_t ext_id)
1585{
1586 struct core_dc *core_dc = DC_TO_CORE(dc);
1587 return dal_irq_service_to_irq_source(core_dc->res_pool->irqs, src_id, ext_id);
1588}
1589
1590void dc_interrupt_set(const struct dc *dc, enum dc_irq_source src, bool enable)
1591{
1592 struct core_dc *core_dc = DC_TO_CORE(dc);
1593 dal_irq_service_set(core_dc->res_pool->irqs, src, enable);
1594}
1595
1596void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
1597{
1598 struct core_dc *core_dc = DC_TO_CORE(dc);
1599 dal_irq_service_ack(core_dc->res_pool->irqs, src);
1600}
1601
1602void dc_set_power_state(
1603 struct dc *dc,
1604 enum dc_acpi_cm_power_state power_state,
1605 enum dc_video_power_state video_power_state)
1606{
1607 struct core_dc *core_dc = DC_TO_CORE(dc);
1608
1609 core_dc->previous_power_state = core_dc->current_power_state;
1610 core_dc->current_power_state = video_power_state;
1611
1612 switch (power_state) {
1613 case DC_ACPI_CM_POWER_STATE_D0:
1614 core_dc->hwss.init_hw(core_dc);
1615 break;
1616 default:
1617 /* NULL means "reset/release all DC targets" */
1618 dc_commit_targets(dc, NULL, 0);
1619
1620 core_dc->hwss.power_down(core_dc);
1621
1622 /* Zero out the current context so that on resume we start with
1623 * clean state, and dc hw programming optimizations will not
1624 * cause any trouble.
1625 */
1626 memset(core_dc->current_context, 0,
1627 sizeof(*core_dc->current_context));
1628
1629 core_dc->current_context->res_ctx.pool = core_dc->res_pool;
1630
1631 break;
1632 }
1633
1634}
1635
1636void dc_resume(const struct dc *dc)
1637{
1638 struct core_dc *core_dc = DC_TO_CORE(dc);
1639
1640 uint32_t i;
1641
1642 for (i = 0; i < core_dc->link_count; i++)
1643 core_link_resume(core_dc->links[i]);
1644}
1645
1646bool dc_read_dpcd(
1647 struct dc *dc,
1648 uint32_t link_index,
1649 uint32_t address,
1650 uint8_t *data,
1651 uint32_t size)
1652{
1653 struct core_dc *core_dc = DC_TO_CORE(dc);
1654
1655 struct core_link *link = core_dc->links[link_index];
1656 enum ddc_result r = dal_ddc_service_read_dpcd_data(
1657 link->ddc,
1658 address,
1659 data,
1660 size);
1661 return r == DDC_RESULT_SUCESSFULL;
1662}
1663
1664bool dc_write_dpcd(
1665 struct dc *dc,
1666 uint32_t link_index,
1667 uint32_t address,
1668 const uint8_t *data,
1669 uint32_t size)
1670{
1671 struct core_dc *core_dc = DC_TO_CORE(dc);
1672
1673 struct core_link *link = core_dc->links[link_index];
1674
1675 enum ddc_result r = dal_ddc_service_write_dpcd_data(
1676 link->ddc,
1677 address,
1678 data,
1679 size);
1680 return r == DDC_RESULT_SUCESSFULL;
1681}
1682
1683bool dc_submit_i2c(
1684 struct dc *dc,
1685 uint32_t link_index,
1686 struct i2c_command *cmd)
1687{
1688 struct core_dc *core_dc = DC_TO_CORE(dc);
1689
1690 struct core_link *link = core_dc->links[link_index];
1691 struct ddc_service *ddc = link->ddc;
1692
1693 return dal_i2caux_submit_i2c_command(
1694 ddc->ctx->i2caux,
1695 ddc->ddc_pin,
1696 cmd);
1697}
1698
1699static bool link_add_remote_sink_helper(struct core_link *core_link, struct dc_sink *sink)
1700{
1701 struct dc_link *dc_link = &core_link->public;
1702
1703 if (dc_link->sink_count >= MAX_SINKS_PER_LINK) {
1704 BREAK_TO_DEBUGGER();
1705 return false;
1706 }
1707
1708 dc_sink_retain(sink);
1709
1710 dc_link->remote_sinks[dc_link->sink_count] = sink;
1711 dc_link->sink_count++;
1712
1713 return true;
1714}
1715
1716struct dc_sink *dc_link_add_remote_sink(
1717 const struct dc_link *link,
1718 const uint8_t *edid,
1719 int len,
1720 struct dc_sink_init_data *init_data)
1721{
1722 struct dc_sink *dc_sink;
1723 enum dc_edid_status edid_status;
1724 struct core_link *core_link = DC_LINK_TO_LINK(link);
1725
1726 if (len > MAX_EDID_BUFFER_SIZE) {
1727 dm_error("Max EDID buffer size breached!\n");
1728 return NULL;
1729 }
1730
1731 if (!init_data) {
1732 BREAK_TO_DEBUGGER();
1733 return NULL;
1734 }
1735
1736 if (!init_data->link) {
1737 BREAK_TO_DEBUGGER();
1738 return NULL;
1739 }
1740
1741 dc_sink = dc_sink_create(init_data);
1742
1743 if (!dc_sink)
1744 return NULL;
1745
1746 memmove(dc_sink->dc_edid.raw_edid, edid, len);
1747 dc_sink->dc_edid.length = len;
1748
1749 if (!link_add_remote_sink_helper(
1750 core_link,
1751 dc_sink))
1752 goto fail_add_sink;
1753
1754 edid_status = dm_helpers_parse_edid_caps(
1755 core_link->ctx,
1756 &dc_sink->dc_edid,
1757 &dc_sink->edid_caps);
1758
1759 if (edid_status != EDID_OK)
1760 goto fail;
1761
1762 return dc_sink;
1763fail:
1764 dc_link_remove_remote_sink(link, dc_sink);
1765fail_add_sink:
1766 dc_sink_release(dc_sink);
1767 return NULL;
1768}
1769
1770void dc_link_set_sink(const struct dc_link *link, struct dc_sink *sink)
1771{
1772 struct core_link *core_link = DC_LINK_TO_LINK(link);
1773 struct dc_link *dc_link = &core_link->public;
1774
1775 dc_link->local_sink = sink;
1776
1777 if (sink == NULL) {
1778 dc_link->type = dc_connection_none;
1779 } else {
1780 dc_link->type = dc_connection_single;
1781 }
1782}
1783
1784void dc_link_remove_remote_sink(const struct dc_link *link, const struct dc_sink *sink)
1785{
1786 int i;
1787 struct core_link *core_link = DC_LINK_TO_LINK(link);
1788 struct dc_link *dc_link = &core_link->public;
1789
1790 if (!link->sink_count) {
1791 BREAK_TO_DEBUGGER();
1792 return;
1793 }
1794
1795 for (i = 0; i < dc_link->sink_count; i++) {
1796 if (dc_link->remote_sinks[i] == sink) {
1797 dc_sink_release(sink);
1798 dc_link->remote_sinks[i] = NULL;
1799
1800 /* shrink array to remove empty place */
1801 while (i < dc_link->sink_count - 1) {
1802 dc_link->remote_sinks[i] = dc_link->remote_sinks[i+1];
1803 i++;
1804 }
1805
1806 dc_link->sink_count--;
1807 return;
1808 }
1809 }
1810}
1811
1812const struct dc_stream_status *dc_stream_get_status(
1813 const struct dc_stream *dc_stream)
1814{
1815 struct core_stream *stream = DC_STREAM_TO_CORE(dc_stream);
1816
1817 return &stream->status;
1818}
1819
1820bool dc_init_dchub(struct dc *dc, struct dchub_init_data *dh_data)
1821{
1822 int i;
1823 struct core_dc *core_dc = DC_TO_CORE(dc);
1824 struct mem_input *mi = NULL;
1825
1826 for (i = 0; i < core_dc->res_pool->pipe_count; i++) {
1827 if (core_dc->res_pool->mis[i] != NULL) {
1828 mi = core_dc->res_pool->mis[i];
1829 break;
1830 }
1831 }
1832 if (mi == NULL) {
1833 dm_error("no mem_input!\n");
1834 return false;
1835 }
1836
1837 if (mi->funcs->mem_input_update_dchub)
1838 mi->funcs->mem_input_update_dchub(mi, dh_data);
1839 else
1840 ASSERT(mi->funcs->mem_input_update_dchub);
1841
1842
1843 return true;
1844
1845}
1846
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
new file mode 100644
index 000000000000..8ca0f1e0369a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
@@ -0,0 +1,270 @@
1/*
2 * dc_debug.c
3 *
4 * Created on: Nov 3, 2016
5 * Author: yonsun
6 */
7
8#include "dm_services.h"
9
10#include "dc.h"
11
12#include "core_status.h"
13#include "core_types.h"
14#include "hw_sequencer.h"
15
16#include "resource.h"
17
18#define SURFACE_TRACE(...) do {\
19 if (dc->debug.surface_trace) \
20 dm_logger_write(logger, \
21 LOG_IF_TRACE, \
22 ##__VA_ARGS__); \
23} while (0)
24
25void pre_surface_trace(
26 const struct dc *dc,
27 const struct dc_surface *const *surfaces,
28 int surface_count)
29{
30 int i;
31 struct core_dc *core_dc = DC_TO_CORE(dc);
32 struct dal_logger *logger = core_dc->ctx->logger;
33
34 for (i = 0; i < surface_count; i++) {
35 const struct dc_surface *surface = surfaces[i];
36
37 SURFACE_TRACE("Surface %d:\n", i);
38
39 SURFACE_TRACE(
40 "surface->visible = %d;\n"
41 "surface->flip_immediate = %d;\n"
42 "surface->address.type = %d;\n"
43 "surface->address.grph.addr.quad_part = 0x%X;\n"
44 "surface->address.grph.meta_addr.quad_part = 0x%X;\n"
45 "surface->scaling_quality.h_taps = %d;\n"
46 "surface->scaling_quality.v_taps = %d;\n"
47 "surface->scaling_quality.h_taps_c = %d;\n"
48 "surface->scaling_quality.v_taps_c = %d;\n",
49 surface->visible,
50 surface->flip_immediate,
51 surface->address.type,
52 surface->address.grph.addr.quad_part,
53 surface->address.grph.meta_addr.quad_part,
54 surface->scaling_quality.h_taps,
55 surface->scaling_quality.v_taps,
56 surface->scaling_quality.h_taps_c,
57 surface->scaling_quality.v_taps_c);
58
59 SURFACE_TRACE(
60 "surface->src_rect.x = %d;\n"
61 "surface->src_rect.y = %d;\n"
62 "surface->src_rect.width = %d;\n"
63 "surface->src_rect.height = %d;\n"
64 "surface->dst_rect.x = %d;\n"
65 "surface->dst_rect.y = %d;\n"
66 "surface->dst_rect.width = %d;\n"
67 "surface->dst_rect.height = %d;\n"
68 "surface->clip_rect.x = %d;\n"
69 "surface->clip_rect.y = %d;\n"
70 "surface->clip_rect.width = %d;\n"
71 "surface->clip_rect.height = %d;\n",
72 surface->src_rect.x,
73 surface->src_rect.y,
74 surface->src_rect.width,
75 surface->src_rect.height,
76 surface->dst_rect.x,
77 surface->dst_rect.y,
78 surface->dst_rect.width,
79 surface->dst_rect.height,
80 surface->clip_rect.x,
81 surface->clip_rect.y,
82 surface->clip_rect.width,
83 surface->clip_rect.height);
84
85 SURFACE_TRACE(
86 "surface->plane_size.grph.surface_size.x = %d;\n"
87 "surface->plane_size.grph.surface_size.y = %d;\n"
88 "surface->plane_size.grph.surface_size.width = %d;\n"
89 "surface->plane_size.grph.surface_size.height = %d;\n"
90 "surface->plane_size.grph.surface_pitch = %d;\n"
91 "surface->plane_size.grph.meta_pitch = %d;\n",
92 surface->plane_size.grph.surface_size.x,
93 surface->plane_size.grph.surface_size.y,
94 surface->plane_size.grph.surface_size.width,
95 surface->plane_size.grph.surface_size.height,
96 surface->plane_size.grph.surface_pitch,
97 surface->plane_size.grph.meta_pitch);
98
99
100 SURFACE_TRACE(
101 "surface->tiling_info.gfx8.num_banks = %d;\n"
102 "surface->tiling_info.gfx8.bank_width = %d;\n"
103 "surface->tiling_info.gfx8.bank_width_c = %d;\n"
104 "surface->tiling_info.gfx8.bank_height = %d;\n"
105 "surface->tiling_info.gfx8.bank_height_c = %d;\n"
106 "surface->tiling_info.gfx8.tile_aspect = %d;\n"
107 "surface->tiling_info.gfx8.tile_aspect_c = %d;\n"
108 "surface->tiling_info.gfx8.tile_split = %d;\n"
109 "surface->tiling_info.gfx8.tile_split_c = %d;\n"
110 "surface->tiling_info.gfx8.tile_mode = %d;\n"
111 "surface->tiling_info.gfx8.tile_mode_c = %d;\n",
112 surface->tiling_info.gfx8.num_banks,
113 surface->tiling_info.gfx8.bank_width,
114 surface->tiling_info.gfx8.bank_width_c,
115 surface->tiling_info.gfx8.bank_height,
116 surface->tiling_info.gfx8.bank_height_c,
117 surface->tiling_info.gfx8.tile_aspect,
118 surface->tiling_info.gfx8.tile_aspect_c,
119 surface->tiling_info.gfx8.tile_split,
120 surface->tiling_info.gfx8.tile_split_c,
121 surface->tiling_info.gfx8.tile_mode,
122 surface->tiling_info.gfx8.tile_mode_c);
123
124 SURFACE_TRACE(
125 "surface->tiling_info.gfx8.pipe_config = %d;\n"
126 "surface->tiling_info.gfx8.array_mode = %d;\n"
127 "surface->color_space = %d;\n"
128 "surface->dcc.enable = %d;\n"
129 "surface->format = %d;\n"
130 "surface->rotation = %d;\n"
131 "surface->stereo_format = %d;\n",
132 surface->tiling_info.gfx8.pipe_config,
133 surface->tiling_info.gfx8.array_mode,
134 surface->color_space,
135 surface->dcc.enable,
136 surface->format,
137 surface->rotation,
138 surface->stereo_format);
139 SURFACE_TRACE("\n");
140 }
141 SURFACE_TRACE("\n");
142}
143
144void update_surface_trace(
145 const struct dc *dc,
146 const struct dc_surface_update *updates,
147 int surface_count)
148{
149 int i;
150 struct core_dc *core_dc = DC_TO_CORE(dc);
151 struct dal_logger *logger = core_dc->ctx->logger;
152
153 for (i = 0; i < surface_count; i++) {
154 const struct dc_surface_update *update = &updates[i];
155
156 SURFACE_TRACE("Update %d\n", i);
157 if (update->flip_addr) {
158 SURFACE_TRACE("flip_addr->address.type = %d;\n"
159 "flip_addr->address.grph.addr.quad_part = 0x%X;\n"
160 "flip_addr->address.grph.meta_addr.quad_part = 0x%X;\n"
161 "flip_addr->flip_immediate = %d;\n",
162 update->flip_addr->address.type,
163 update->flip_addr->address.grph.addr.quad_part,
164 update->flip_addr->address.grph.meta_addr.quad_part,
165 update->flip_addr->flip_immediate);
166 }
167
168 if (update->plane_info) {
169 SURFACE_TRACE(
170 "plane_info->color_space = %d;\n"
171 "plane_info->format = %d;\n"
172 "plane_info->plane_size.grph.meta_pitch = %d;\n"
173 "plane_info->plane_size.grph.surface_pitch = %d;\n"
174 "plane_info->plane_size.grph.surface_size.height = %d;\n"
175 "plane_info->plane_size.grph.surface_size.width = %d;\n"
176 "plane_info->plane_size.grph.surface_size.x = %d;\n"
177 "plane_info->plane_size.grph.surface_size.y = %d;\n"
178 "plane_info->rotation = %d;\n",
179 update->plane_info->color_space,
180 update->plane_info->format,
181 update->plane_info->plane_size.grph.meta_pitch,
182 update->plane_info->plane_size.grph.surface_pitch,
183 update->plane_info->plane_size.grph.surface_size.height,
184 update->plane_info->plane_size.grph.surface_size.width,
185 update->plane_info->plane_size.grph.surface_size.x,
186 update->plane_info->plane_size.grph.surface_size.y,
187 update->plane_info->rotation,
188 update->plane_info->stereo_format);
189
190 SURFACE_TRACE(
191 "plane_info->tiling_info.gfx8.num_banks = %d;\n"
192 "plane_info->tiling_info.gfx8.bank_width = %d;\n"
193 "plane_info->tiling_info.gfx8.bank_width_c = %d;\n"
194 "plane_info->tiling_info.gfx8.bank_height = %d;\n"
195 "plane_info->tiling_info.gfx8.bank_height_c = %d;\n"
196 "plane_info->tiling_info.gfx8.tile_aspect = %d;\n"
197 "plane_info->tiling_info.gfx8.tile_aspect_c = %d;\n"
198 "plane_info->tiling_info.gfx8.tile_split = %d;\n"
199 "plane_info->tiling_info.gfx8.tile_split_c = %d;\n"
200 "plane_info->tiling_info.gfx8.tile_mode = %d;\n"
201 "plane_info->tiling_info.gfx8.tile_mode_c = %d;\n",
202 update->plane_info->tiling_info.gfx8.num_banks,
203 update->plane_info->tiling_info.gfx8.bank_width,
204 update->plane_info->tiling_info.gfx8.bank_width_c,
205 update->plane_info->tiling_info.gfx8.bank_height,
206 update->plane_info->tiling_info.gfx8.bank_height_c,
207 update->plane_info->tiling_info.gfx8.tile_aspect,
208 update->plane_info->tiling_info.gfx8.tile_aspect_c,
209 update->plane_info->tiling_info.gfx8.tile_split,
210 update->plane_info->tiling_info.gfx8.tile_split_c,
211 update->plane_info->tiling_info.gfx8.tile_mode,
212 update->plane_info->tiling_info.gfx8.tile_mode_c);
213
214 SURFACE_TRACE(
215 "plane_info->tiling_info.gfx8.pipe_config = %d;\n"
216 "plane_info->tiling_info.gfx8.array_mode = %d;\n"
217 "plane_info->visible = %d;\n",
218 update->plane_info->tiling_info.gfx8.pipe_config,
219 update->plane_info->tiling_info.gfx8.array_mode,
220 update->plane_info->visible);
221 }
222
223 if (update->scaling_info) {
224 SURFACE_TRACE(
225 "scaling_info->src_rect.x = %d;\n"
226 "scaling_info->src_rect.y = %d;\n"
227 "scaling_info->src_rect.width = %d;\n"
228 "scaling_info->src_rect.height = %d;\n"
229 "scaling_info->dst_rect.x = %d;\n"
230 "scaling_info->dst_rect.y = %d;\n"
231 "scaling_info->dst_rect.width = %d;\n"
232 "scaling_info->dst_rect.height = %d;\n"
233 "scaling_info->clip_rect.x = %d;\n"
234 "scaling_info->clip_rect.y = %d;\n"
235 "scaling_info->clip_rect.width = %d;\n"
236 "scaling_info->clip_rect.height = %d;\n"
237 "scaling_info->scaling_quality.h_taps = %d;\n"
238 "scaling_info->scaling_quality.v_taps = %d;\n"
239 "scaling_info->scaling_quality.h_taps_c = %d;\n"
240 "scaling_info->scaling_quality.v_taps_c = %d;\n",
241 update->scaling_info->src_rect.x,
242 update->scaling_info->src_rect.y,
243 update->scaling_info->src_rect.width,
244 update->scaling_info->src_rect.height,
245 update->scaling_info->dst_rect.x,
246 update->scaling_info->dst_rect.y,
247 update->scaling_info->dst_rect.width,
248 update->scaling_info->dst_rect.height,
249 update->scaling_info->clip_rect.x,
250 update->scaling_info->clip_rect.y,
251 update->scaling_info->clip_rect.width,
252 update->scaling_info->clip_rect.height,
253 update->scaling_info->scaling_quality.h_taps,
254 update->scaling_info->scaling_quality.v_taps,
255 update->scaling_info->scaling_quality.h_taps_c,
256 update->scaling_info->scaling_quality.v_taps_c);
257 }
258 SURFACE_TRACE("\n");
259 }
260 SURFACE_TRACE("\n");
261}
262
263void post_surface_trace(const struct dc *dc)
264{
265 struct core_dc *core_dc = DC_TO_CORE(dc);
266 struct dal_logger *logger = core_dc->ctx->logger;
267
268 SURFACE_TRACE("post surface process.\n");
269
270}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
new file mode 100644
index 000000000000..d5cffa51ca96
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
@@ -0,0 +1,93 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27#include "core_types.h"
28#include "core_dc.h"
29#include "timing_generator.h"
30#include "hw_sequencer.h"
31
32/* used as index in array of black_color_format */
33enum black_color_format {
34 BLACK_COLOR_FORMAT_RGB_FULLRANGE = 0,
35 BLACK_COLOR_FORMAT_RGB_LIMITED,
36 BLACK_COLOR_FORMAT_YUV_TV,
37 BLACK_COLOR_FORMAT_YUV_CV,
38 BLACK_COLOR_FORMAT_YUV_SUPER_AA,
39 BLACK_COLOR_FORMAT_DEBUG,
40};
41
42static const struct tg_color black_color_format[] = {
43 /* BlackColorFormat_RGB_FullRange */
44 {0, 0, 0},
45 /* BlackColorFormat_RGB_Limited */
46 {0x40, 0x40, 0x40},
47 /* BlackColorFormat_YUV_TV */
48 {0x200, 0x40, 0x200},
49 /* BlackColorFormat_YUV_CV */
50 {0x1f4, 0x40, 0x1f4},
51 /* BlackColorFormat_YUV_SuperAA */
52 {0x1a2, 0x20, 0x1a2},
53 /* visual confirm debug */
54 {0xff, 0xff, 0},
55};
56
57void color_space_to_black_color(
58 const struct core_dc *dc,
59 enum dc_color_space colorspace,
60 struct tg_color *black_color)
61{
62 if (dc->public.debug.surface_visual_confirm) {
63 *black_color =
64 black_color_format[BLACK_COLOR_FORMAT_DEBUG];
65 return;
66 }
67
68 switch (colorspace) {
69 case COLOR_SPACE_YPBPR601:
70 *black_color = black_color_format[BLACK_COLOR_FORMAT_YUV_TV];
71 break;
72
73 case COLOR_SPACE_YPBPR709:
74 case COLOR_SPACE_YCBCR601:
75 case COLOR_SPACE_YCBCR709:
76 case COLOR_SPACE_YCBCR601_LIMITED:
77 case COLOR_SPACE_YCBCR709_LIMITED:
78 *black_color = black_color_format[BLACK_COLOR_FORMAT_YUV_CV];
79 break;
80
81 case COLOR_SPACE_SRGB_LIMITED:
82 *black_color =
83 black_color_format[BLACK_COLOR_FORMAT_RGB_LIMITED];
84 break;
85
86 default:
87 /* fefault is sRGB black (full range). */
88 *black_color =
89 black_color_format[BLACK_COLOR_FORMAT_RGB_FULLRANGE];
90 /* default is sRGB black 0. */
91 break;
92 }
93}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
new file mode 100644
index 000000000000..70a25546de1e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -0,0 +1,1899 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27#include "dm_helpers.h"
28#include "dc.h"
29#include "core_dc.h"
30#include "grph_object_id.h"
31#include "gpio_service_interface.h"
32#include "core_status.h"
33#include "dc_link_dp.h"
34#include "dc_link_ddc.h"
35#include "link_hwss.h"
36#include "stream_encoder.h"
37#include "link_encoder.h"
38#include "hw_sequencer.h"
39#include "resource.h"
40#include "fixed31_32.h"
41#include "include/asic_capability_interface.h"
42
43#include "dce/dce_11_0_d.h"
44#include "dce/dce_11_0_enum.h"
45#include "dce/dce_11_0_sh_mask.h"
46
47#ifndef mmDMCU_STATUS__UC_IN_RESET__SHIFT
48#define mmDMCU_STATUS__UC_IN_RESET__SHIFT 0x0
49#endif
50
51#ifndef mmDMCU_STATUS__UC_IN_RESET_MASK
52#define mmDMCU_STATUS__UC_IN_RESET_MASK 0x00000001L
53#endif
54
55#define LINK_INFO(...) \
56 dm_logger_write(dc_ctx->logger, LOG_HW_HOTPLUG, \
57 __VA_ARGS__)
58
59/*******************************************************************************
60 * Private structures
61 ******************************************************************************/
62
63enum {
64 LINK_RATE_REF_FREQ_IN_MHZ = 27,
65 PEAK_FACTOR_X1000 = 1006
66};
67
68/*******************************************************************************
69 * Private functions
70 ******************************************************************************/
71static void destruct(struct core_link *link)
72{
73 int i;
74
75 if (link->ddc)
76 dal_ddc_service_destroy(&link->ddc);
77
78 if(link->link_enc)
79 link->link_enc->funcs->destroy(&link->link_enc);
80
81 if (link->public.local_sink)
82 dc_sink_release(link->public.local_sink);
83
84 for (i = 0; i < link->public.sink_count; ++i)
85 dc_sink_release(link->public.remote_sinks[i]);
86}
87
88static struct gpio *get_hpd_gpio(const struct core_link *link)
89{
90 enum bp_result bp_result;
91 struct dc_bios *dcb = link->ctx->dc_bios;
92 struct graphics_object_hpd_info hpd_info;
93 struct gpio_pin_info pin_info;
94
95 if (dcb->funcs->get_hpd_info(dcb, link->link_id, &hpd_info) != BP_RESULT_OK)
96 return NULL;
97
98 bp_result = dcb->funcs->get_gpio_pin_info(dcb,
99 hpd_info.hpd_int_gpio_uid, &pin_info);
100
101 if (bp_result != BP_RESULT_OK) {
102 ASSERT(bp_result == BP_RESULT_NORECORD);
103 return NULL;
104 }
105
106 return dal_gpio_service_create_irq(
107 link->ctx->gpio_service,
108 pin_info.offset,
109 pin_info.mask);
110}
111
112/*
113 * Function: program_hpd_filter
114 *
115 * @brief
116 * Programs HPD filter on associated HPD line
117 *
118 * @param [in] delay_on_connect_in_ms: Connect filter timeout
119 * @param [in] delay_on_disconnect_in_ms: Disconnect filter timeout
120 *
121 * @return
122 * true on success, false otherwise
123 */
124static bool program_hpd_filter(
125 const struct core_link *link)
126{
127 bool result = false;
128
129 struct gpio *hpd;
130
131 int delay_on_connect_in_ms = 0;
132 int delay_on_disconnect_in_ms = 0;
133
134 /* Verify feature is supported */
135 switch (link->public.connector_signal) {
136 case SIGNAL_TYPE_DVI_SINGLE_LINK:
137 case SIGNAL_TYPE_DVI_DUAL_LINK:
138 case SIGNAL_TYPE_HDMI_TYPE_A:
139 /* Program hpd filter */
140 delay_on_connect_in_ms = 500;
141 delay_on_disconnect_in_ms = 100;
142 break;
143 case SIGNAL_TYPE_DISPLAY_PORT:
144 case SIGNAL_TYPE_DISPLAY_PORT_MST:
145 /* Program hpd filter to allow DP signal to settle */
146 /* 500: not able to detect MST <-> SST switch as HPD is low for
147 * only 100ms on DELL U2413
148 * 0: some passive dongle still show aux mode instead of i2c
149 * 20-50:not enough to hide bouncing HPD with passive dongle.
150 * also see intermittent i2c read issues.
151 */
152 delay_on_connect_in_ms = 80;
153 delay_on_disconnect_in_ms = 0;
154 break;
155 case SIGNAL_TYPE_LVDS:
156 case SIGNAL_TYPE_EDP:
157 default:
158 /* Don't program hpd filter */
159 return false;
160 }
161
162 /* Obtain HPD handle */
163 hpd = get_hpd_gpio(link);
164
165 if (!hpd)
166 return result;
167
168 /* Setup HPD filtering */
169 if (dal_gpio_open(hpd, GPIO_MODE_INTERRUPT) == GPIO_RESULT_OK) {
170 struct gpio_hpd_config config;
171
172 config.delay_on_connect = delay_on_connect_in_ms;
173 config.delay_on_disconnect = delay_on_disconnect_in_ms;
174
175 dal_irq_setup_hpd_filter(hpd, &config);
176
177 dal_gpio_close(hpd);
178
179 result = true;
180 } else {
181 ASSERT_CRITICAL(false);
182 }
183
184 /* Release HPD handle */
185 dal_gpio_destroy_irq(&hpd);
186
187 return result;
188}
189
190static bool detect_sink(struct core_link *link, enum dc_connection_type *type)
191{
192 uint32_t is_hpd_high = 0;
193 struct gpio *hpd_pin;
194
195 /* todo: may need to lock gpio access */
196 hpd_pin = get_hpd_gpio(link);
197 if (hpd_pin == NULL)
198 goto hpd_gpio_failure;
199
200 dal_gpio_open(hpd_pin, GPIO_MODE_INTERRUPT);
201 dal_gpio_get_value(hpd_pin, &is_hpd_high);
202 dal_gpio_close(hpd_pin);
203 dal_gpio_destroy_irq(&hpd_pin);
204
205 if (is_hpd_high) {
206 *type = dc_connection_single;
207 /* TODO: need to do the actual detection */
208 } else {
209 *type = dc_connection_none;
210 }
211
212 return true;
213
214hpd_gpio_failure:
215 return false;
216}
217
218enum ddc_transaction_type get_ddc_transaction_type(
219 enum signal_type sink_signal)
220{
221 enum ddc_transaction_type transaction_type = DDC_TRANSACTION_TYPE_NONE;
222
223 switch (sink_signal) {
224 case SIGNAL_TYPE_DVI_SINGLE_LINK:
225 case SIGNAL_TYPE_DVI_DUAL_LINK:
226 case SIGNAL_TYPE_HDMI_TYPE_A:
227 case SIGNAL_TYPE_LVDS:
228 case SIGNAL_TYPE_RGB:
229 transaction_type = DDC_TRANSACTION_TYPE_I2C;
230 break;
231
232 case SIGNAL_TYPE_DISPLAY_PORT:
233 case SIGNAL_TYPE_EDP:
234 transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
235 break;
236
237 case SIGNAL_TYPE_DISPLAY_PORT_MST:
238 /* MST does not use I2COverAux, but there is the
239 * SPECIAL use case for "immediate dwnstrm device
240 * access" (EPR#370830). */
241 transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
242 break;
243
244 default:
245 break;
246 }
247
248 return transaction_type;
249}
250
251static enum signal_type get_basic_signal_type(
252 struct graphics_object_id encoder,
253 struct graphics_object_id downstream)
254{
255 if (downstream.type == OBJECT_TYPE_CONNECTOR) {
256 switch (downstream.id) {
257 case CONNECTOR_ID_SINGLE_LINK_DVII:
258 switch (encoder.id) {
259 case ENCODER_ID_INTERNAL_DAC1:
260 case ENCODER_ID_INTERNAL_KLDSCP_DAC1:
261 case ENCODER_ID_INTERNAL_DAC2:
262 case ENCODER_ID_INTERNAL_KLDSCP_DAC2:
263 return SIGNAL_TYPE_RGB;
264 default:
265 return SIGNAL_TYPE_DVI_SINGLE_LINK;
266 }
267 break;
268 case CONNECTOR_ID_DUAL_LINK_DVII:
269 {
270 switch (encoder.id) {
271 case ENCODER_ID_INTERNAL_DAC1:
272 case ENCODER_ID_INTERNAL_KLDSCP_DAC1:
273 case ENCODER_ID_INTERNAL_DAC2:
274 case ENCODER_ID_INTERNAL_KLDSCP_DAC2:
275 return SIGNAL_TYPE_RGB;
276 default:
277 return SIGNAL_TYPE_DVI_DUAL_LINK;
278 }
279 }
280 break;
281 case CONNECTOR_ID_SINGLE_LINK_DVID:
282 return SIGNAL_TYPE_DVI_SINGLE_LINK;
283 case CONNECTOR_ID_DUAL_LINK_DVID:
284 return SIGNAL_TYPE_DVI_DUAL_LINK;
285 case CONNECTOR_ID_VGA:
286 return SIGNAL_TYPE_RGB;
287 case CONNECTOR_ID_HDMI_TYPE_A:
288 return SIGNAL_TYPE_HDMI_TYPE_A;
289 case CONNECTOR_ID_LVDS:
290 return SIGNAL_TYPE_LVDS;
291 case CONNECTOR_ID_DISPLAY_PORT:
292 return SIGNAL_TYPE_DISPLAY_PORT;
293 case CONNECTOR_ID_EDP:
294 return SIGNAL_TYPE_EDP;
295 default:
296 return SIGNAL_TYPE_NONE;
297 }
298 } else if (downstream.type == OBJECT_TYPE_ENCODER) {
299 switch (downstream.id) {
300 case ENCODER_ID_EXTERNAL_NUTMEG:
301 case ENCODER_ID_EXTERNAL_TRAVIS:
302 return SIGNAL_TYPE_DISPLAY_PORT;
303 default:
304 return SIGNAL_TYPE_NONE;
305 }
306 }
307
308 return SIGNAL_TYPE_NONE;
309}
310
311/*
312 * @brief
313 * Check whether there is a dongle on DP connector
314 */
315static bool is_dp_sink_present(struct core_link *link)
316{
317 enum gpio_result gpio_result;
318 uint32_t clock_pin = 0;
319 uint32_t data_pin = 0;
320
321 struct ddc *ddc;
322
323 enum connector_id connector_id =
324 dal_graphics_object_id_get_connector_id(link->link_id);
325
326 bool present =
327 ((connector_id == CONNECTOR_ID_DISPLAY_PORT) ||
328 (connector_id == CONNECTOR_ID_EDP));
329
330 ddc = dal_ddc_service_get_ddc_pin(link->ddc);
331
332 if (!ddc) {
333 BREAK_TO_DEBUGGER();
334 return present;
335 }
336
337 /* Open GPIO and set it to I2C mode */
338 /* Note: this GpioMode_Input will be converted
339 * to GpioConfigType_I2cAuxDualMode in GPIO component,
340 * which indicates we need additional delay */
341
342 if (GPIO_RESULT_OK != dal_ddc_open(
343 ddc, GPIO_MODE_INPUT, GPIO_DDC_CONFIG_TYPE_MODE_I2C)) {
344 dal_gpio_destroy_ddc(&ddc);
345
346 return present;
347 }
348
349 /* Read GPIO: DP sink is present if both clock and data pins are zero */
350 /* [anaumov] in DAL2, there was no check for GPIO failure */
351
352 gpio_result = dal_gpio_get_value(ddc->pin_clock, &clock_pin);
353 ASSERT(gpio_result == GPIO_RESULT_OK);
354
355 if (gpio_result == GPIO_RESULT_OK)
356 if (link->link_enc->features.flags.bits.
357 DP_SINK_DETECT_POLL_DATA_PIN)
358 gpio_result = dal_gpio_get_value(ddc->pin_data, &data_pin);
359
360 present = (gpio_result == GPIO_RESULT_OK) && !(clock_pin || data_pin);
361
362 dal_ddc_close(ddc);
363
364 return present;
365}
366
367/*
368 * @brief
369 * Detect output sink type
370 */
371static enum signal_type link_detect_sink(struct core_link *link)
372{
373 enum signal_type result = get_basic_signal_type(
374 link->link_enc->id, link->link_id);
375
376 /* Internal digital encoder will detect only dongles
377 * that require digital signal */
378
379 /* Detection mechanism is different
380 * for different native connectors.
381 * LVDS connector supports only LVDS signal;
382 * PCIE is a bus slot, the actual connector needs to be detected first;
383 * eDP connector supports only eDP signal;
384 * HDMI should check straps for audio */
385
386 /* PCIE detects the actual connector on add-on board */
387
388 if (link->link_id.id == CONNECTOR_ID_PCIE) {
389 /* ZAZTODO implement PCIE add-on card detection */
390 }
391
392 switch (link->link_id.id) {
393 case CONNECTOR_ID_HDMI_TYPE_A: {
394 /* check audio support:
395 * if native HDMI is not supported, switch to DVI */
396 struct audio_support *aud_support = &link->dc->res_pool->audio_support;
397
398 if (!aud_support->hdmi_audio_native)
399 if (link->link_id.id == CONNECTOR_ID_HDMI_TYPE_A)
400 result = SIGNAL_TYPE_DVI_SINGLE_LINK;
401 }
402 break;
403 case CONNECTOR_ID_DISPLAY_PORT: {
404
405 /* Check whether DP signal detected: if not -
406 * we assume signal is DVI; it could be corrected
407 * to HDMI after dongle detection */
408 if (!is_dp_sink_present(link))
409 result = SIGNAL_TYPE_DVI_SINGLE_LINK;
410 }
411 break;
412 default:
413 break;
414 }
415
416 return result;
417}
418
419static enum signal_type decide_signal_from_strap_and_dongle_type(
420 enum display_dongle_type dongle_type,
421 struct audio_support *audio_support)
422{
423 enum signal_type signal = SIGNAL_TYPE_NONE;
424
425 switch (dongle_type) {
426 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
427 if (audio_support->hdmi_audio_on_dongle)
428 signal = SIGNAL_TYPE_HDMI_TYPE_A;
429 else
430 signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
431 break;
432 case DISPLAY_DONGLE_DP_DVI_DONGLE:
433 signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
434 break;
435 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
436 if (audio_support->hdmi_audio_native)
437 signal = SIGNAL_TYPE_HDMI_TYPE_A;
438 else
439 signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
440 break;
441 default:
442 signal = SIGNAL_TYPE_NONE;
443 break;
444 }
445
446 return signal;
447}
448
449static enum signal_type dp_passive_dongle_detection(
450 struct ddc_service *ddc,
451 struct display_sink_capability *sink_cap,
452 struct audio_support *audio_support)
453{
454 dal_ddc_service_i2c_query_dp_dual_mode_adaptor(
455 ddc, sink_cap);
456 return decide_signal_from_strap_and_dongle_type(
457 sink_cap->dongle_type,
458 audio_support);
459}
460
461static void link_disconnect_sink(struct core_link *link)
462{
463 if (link->public.local_sink) {
464 dc_sink_release(link->public.local_sink);
465 link->public.local_sink = NULL;
466 }
467
468 link->dpcd_sink_count = 0;
469}
470
471static enum dc_edid_status read_edid(
472 struct core_link *link,
473 struct core_sink *sink)
474{
475 uint32_t edid_retry = 3;
476 enum dc_edid_status edid_status;
477
478 /* some dongles read edid incorrectly the first time,
479 * do check sum and retry to make sure read correct edid.
480 */
481 do {
482 sink->public.dc_edid.length =
483 dal_ddc_service_edid_query(link->ddc);
484
485 if (0 == sink->public.dc_edid.length)
486 return EDID_NO_RESPONSE;
487
488 dal_ddc_service_get_edid_buf(link->ddc,
489 sink->public.dc_edid.raw_edid);
490 edid_status = dm_helpers_parse_edid_caps(
491 sink->ctx,
492 &sink->public.dc_edid,
493 &sink->public.edid_caps);
494 --edid_retry;
495 if (edid_status == EDID_BAD_CHECKSUM)
496 dm_logger_write(link->ctx->logger, LOG_WARNING,
497 "Bad EDID checksum, retry remain: %d\n",
498 edid_retry);
499 } while (edid_status == EDID_BAD_CHECKSUM && edid_retry > 0);
500
501 return edid_status;
502}
503
504static void detect_dp(
505 struct core_link *link,
506 struct display_sink_capability *sink_caps,
507 bool *converter_disable_audio,
508 struct audio_support *audio_support,
509 bool boot)
510{
511 sink_caps->signal = link_detect_sink(link);
512 sink_caps->transaction_type =
513 get_ddc_transaction_type(sink_caps->signal);
514
515 if (sink_caps->transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
516 sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT;
517 detect_dp_sink_caps(link);
518
519 /* DP active dongles */
520 if (is_dp_active_dongle(link)) {
521 link->public.type = dc_connection_active_dongle;
522 if (!link->dpcd_caps.sink_count.bits.SINK_COUNT) {
523 /*
524 * active dongle unplug processing for short irq
525 */
526 link_disconnect_sink(link);
527 return;
528 }
529
530 if (link->dpcd_caps.dongle_type !=
531 DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
532 *converter_disable_audio = true;
533 }
534 }
535 if (is_mst_supported(link)) {
536 sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT_MST;
537
538 /*
539 * This call will initiate MST topology discovery. Which
540 * will detect MST ports and add new DRM connector DRM
541 * framework. Then read EDID via remote i2c over aux. In
542 * the end, will notify DRM detect result and save EDID
543 * into DRM framework.
544 *
545 * .detect is called by .fill_modes.
546 * .fill_modes is called by user mode ioctl
547 * DRM_IOCTL_MODE_GETCONNECTOR.
548 *
549 * .get_modes is called by .fill_modes.
550 *
551 * call .get_modes, AMDGPU DM implementation will create
552 * new dc_sink and add to dc_link. For long HPD plug
553 * in/out, MST has its own handle.
554 *
555 * Therefore, just after dc_create, link->sink is not
556 * created for MST until user mode app calls
557 * DRM_IOCTL_MODE_GETCONNECTOR.
558 *
559 * Need check ->sink usages in case ->sink = NULL
560 * TODO: s3 resume check
561 */
562
563 if (dm_helpers_dp_mst_start_top_mgr(
564 link->ctx,
565 &link->public, boot)) {
566 link->public.type = dc_connection_mst_branch;
567 } else {
568 /* MST not supported */
569 sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT;
570 }
571 }
572 } else {
573 /* DP passive dongles */
574 sink_caps->signal = dp_passive_dongle_detection(link->ddc,
575 sink_caps,
576 audio_support);
577 }
578}
579
580bool dc_link_detect(const struct dc_link *dc_link, bool boot)
581{
582 struct core_link *link = DC_LINK_TO_LINK(dc_link);
583 struct dc_sink_init_data sink_init_data = { 0 };
584 struct display_sink_capability sink_caps = { 0 };
585 uint8_t i;
586 bool converter_disable_audio = false;
587 struct audio_support *aud_support = &link->dc->res_pool->audio_support;
588 enum dc_edid_status edid_status;
589 struct dc_context *dc_ctx = link->ctx;
590 struct dc_sink *dc_sink;
591 struct core_sink *sink = NULL;
592 enum dc_connection_type new_connection_type = dc_connection_none;
593
594 if (link->public.connector_signal == SIGNAL_TYPE_VIRTUAL)
595 return false;
596
597 if (false == detect_sink(link, &new_connection_type)) {
598 BREAK_TO_DEBUGGER();
599 return false;
600 }
601
602 if (link->public.connector_signal == SIGNAL_TYPE_EDP &&
603 link->public.local_sink)
604 return true;
605
606 link_disconnect_sink(link);
607
608 if (new_connection_type != dc_connection_none) {
609 link->public.type = new_connection_type;
610
611 /* From Disconnected-to-Connected. */
612 switch (link->public.connector_signal) {
613 case SIGNAL_TYPE_HDMI_TYPE_A: {
614 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
615 if (aud_support->hdmi_audio_native)
616 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
617 else
618 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
619 break;
620 }
621
622 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
623 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
624 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
625 break;
626 }
627
628 case SIGNAL_TYPE_DVI_DUAL_LINK: {
629 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
630 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
631 break;
632 }
633
634 case SIGNAL_TYPE_EDP: {
635 detect_dp_sink_caps(link);
636 sink_caps.transaction_type =
637 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
638 sink_caps.signal = SIGNAL_TYPE_EDP;
639 break;
640 }
641
642 case SIGNAL_TYPE_DISPLAY_PORT: {
643 detect_dp(
644 link,
645 &sink_caps,
646 &converter_disable_audio,
647 aud_support, boot);
648
649 /* Active dongle downstream unplug */
650 if (link->public.type == dc_connection_active_dongle
651 && link->dpcd_caps.sink_count.
652 bits.SINK_COUNT == 0)
653 return true;
654
655 if (link->public.type == dc_connection_mst_branch) {
656 LINK_INFO("link=%d, mst branch is now Connected\n",
657 link->public.link_index);
658 return false;
659 }
660
661 break;
662 }
663
664 default:
665 DC_ERROR("Invalid connector type! signal:%d\n",
666 link->public.connector_signal);
667 return false;
668 } /* switch() */
669
670 if (link->dpcd_caps.sink_count.bits.SINK_COUNT)
671 link->dpcd_sink_count = link->dpcd_caps.sink_count.
672 bits.SINK_COUNT;
673 else
674 link->dpcd_sink_count = 1;
675
676 dal_ddc_service_set_transaction_type(
677 link->ddc,
678 sink_caps.transaction_type);
679
680 sink_init_data.link = &link->public;
681 sink_init_data.sink_signal = sink_caps.signal;
682 sink_init_data.dongle_max_pix_clk =
683 sink_caps.max_hdmi_pixel_clock;
684 sink_init_data.converter_disable_audio =
685 converter_disable_audio;
686
687 dc_sink = dc_sink_create(&sink_init_data);
688 if (!dc_sink) {
689 DC_ERROR("Failed to create sink!\n");
690 return false;
691 }
692
693 sink = DC_SINK_TO_CORE(dc_sink);
694 link->public.local_sink = &sink->public;
695
696 edid_status = read_edid(link, sink);
697
698 switch (edid_status) {
699 case EDID_BAD_CHECKSUM:
700 dm_logger_write(link->ctx->logger, LOG_ERROR,
701 "EDID checksum invalid.\n");
702 break;
703 case EDID_NO_RESPONSE:
704 dm_logger_write(link->ctx->logger, LOG_ERROR,
705 "No EDID read.\n");
706 return false;
707
708 default:
709 break;
710 }
711
712 /* HDMI-DVI Dongle */
713 if (dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A &&
714 !dc_sink->edid_caps.edid_hdmi)
715 dc_sink->sink_signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
716
717 /* Connectivity log: detection */
718 for (i = 0; i < sink->public.dc_edid.length / EDID_BLOCK_SIZE; i++) {
719 CONN_DATA_DETECT(link,
720 &sink->public.dc_edid.raw_edid[i * EDID_BLOCK_SIZE],
721 EDID_BLOCK_SIZE,
722 "%s: [Block %d] ", sink->public.edid_caps.display_name, i);
723 }
724
725 dm_logger_write(link->ctx->logger, LOG_DETECTION_EDID_PARSER,
726 "%s: "
727 "manufacturer_id = %X, "
728 "product_id = %X, "
729 "serial_number = %X, "
730 "manufacture_week = %d, "
731 "manufacture_year = %d, "
732 "display_name = %s, "
733 "speaker_flag = %d, "
734 "audio_mode_count = %d\n",
735 __func__,
736 sink->public.edid_caps.manufacturer_id,
737 sink->public.edid_caps.product_id,
738 sink->public.edid_caps.serial_number,
739 sink->public.edid_caps.manufacture_week,
740 sink->public.edid_caps.manufacture_year,
741 sink->public.edid_caps.display_name,
742 sink->public.edid_caps.speaker_flags,
743 sink->public.edid_caps.audio_mode_count);
744
745 for (i = 0; i < sink->public.edid_caps.audio_mode_count; i++) {
746 dm_logger_write(link->ctx->logger, LOG_DETECTION_EDID_PARSER,
747 "%s: mode number = %d, "
748 "format_code = %d, "
749 "channel_count = %d, "
750 "sample_rate = %d, "
751 "sample_size = %d\n",
752 __func__,
753 i,
754 sink->public.edid_caps.audio_modes[i].format_code,
755 sink->public.edid_caps.audio_modes[i].channel_count,
756 sink->public.edid_caps.audio_modes[i].sample_rate,
757 sink->public.edid_caps.audio_modes[i].sample_size);
758 }
759
760 } else {
761 /* From Connected-to-Disconnected. */
762 if (link->public.type == dc_connection_mst_branch) {
763 LINK_INFO("link=%d, mst branch is now Disconnected\n",
764 link->public.link_index);
765 dm_helpers_dp_mst_stop_top_mgr(link->ctx, &link->public);
766
767 link->mst_stream_alloc_table.stream_count = 0;
768 memset(link->mst_stream_alloc_table.stream_allocations, 0, sizeof(link->mst_stream_alloc_table.stream_allocations));
769 }
770
771 link->public.type = dc_connection_none;
772 sink_caps.signal = SIGNAL_TYPE_NONE;
773 }
774
775 LINK_INFO("link=%d, dc_sink_in=%p is now %s\n",
776 link->public.link_index, &sink->public,
777 (sink_caps.signal == SIGNAL_TYPE_NONE ?
778 "Disconnected":"Connected"));
779
780 return true;
781}
782
783static enum hpd_source_id get_hpd_line(
784 struct core_link *link)
785{
786 struct gpio *hpd;
787 enum hpd_source_id hpd_id = HPD_SOURCEID_UNKNOWN;
788
789 hpd = get_hpd_gpio(link);
790
791 if (hpd) {
792 switch (dal_irq_get_source(hpd)) {
793 case DC_IRQ_SOURCE_HPD1:
794 hpd_id = HPD_SOURCEID1;
795 break;
796 case DC_IRQ_SOURCE_HPD2:
797 hpd_id = HPD_SOURCEID2;
798 break;
799 case DC_IRQ_SOURCE_HPD3:
800 hpd_id = HPD_SOURCEID3;
801 break;
802 case DC_IRQ_SOURCE_HPD4:
803 hpd_id = HPD_SOURCEID4;
804 break;
805 case DC_IRQ_SOURCE_HPD5:
806 hpd_id = HPD_SOURCEID5;
807 break;
808 case DC_IRQ_SOURCE_HPD6:
809 hpd_id = HPD_SOURCEID6;
810 break;
811 default:
812 BREAK_TO_DEBUGGER();
813 break;
814 }
815
816 dal_gpio_destroy_irq(&hpd);
817 }
818
819 return hpd_id;
820}
821
822static enum channel_id get_ddc_line(struct core_link *link)
823{
824 struct ddc *ddc;
825 enum channel_id channel = CHANNEL_ID_UNKNOWN;
826
827 ddc = dal_ddc_service_get_ddc_pin(link->ddc);
828
829 if (ddc) {
830 switch (dal_ddc_get_line(ddc)) {
831 case GPIO_DDC_LINE_DDC1:
832 channel = CHANNEL_ID_DDC1;
833 break;
834 case GPIO_DDC_LINE_DDC2:
835 channel = CHANNEL_ID_DDC2;
836 break;
837 case GPIO_DDC_LINE_DDC3:
838 channel = CHANNEL_ID_DDC3;
839 break;
840 case GPIO_DDC_LINE_DDC4:
841 channel = CHANNEL_ID_DDC4;
842 break;
843 case GPIO_DDC_LINE_DDC5:
844 channel = CHANNEL_ID_DDC5;
845 break;
846 case GPIO_DDC_LINE_DDC6:
847 channel = CHANNEL_ID_DDC6;
848 break;
849 case GPIO_DDC_LINE_DDC_VGA:
850 channel = CHANNEL_ID_DDC_VGA;
851 break;
852 case GPIO_DDC_LINE_I2C_PAD:
853 channel = CHANNEL_ID_I2C_PAD;
854 break;
855 default:
856 BREAK_TO_DEBUGGER();
857 break;
858 }
859 }
860
861 return channel;
862}
863
864static enum transmitter translate_encoder_to_transmitter(
865 struct graphics_object_id encoder)
866{
867 switch (encoder.id) {
868 case ENCODER_ID_INTERNAL_UNIPHY:
869 switch (encoder.enum_id) {
870 case ENUM_ID_1:
871 return TRANSMITTER_UNIPHY_A;
872 case ENUM_ID_2:
873 return TRANSMITTER_UNIPHY_B;
874 default:
875 return TRANSMITTER_UNKNOWN;
876 }
877 break;
878 case ENCODER_ID_INTERNAL_UNIPHY1:
879 switch (encoder.enum_id) {
880 case ENUM_ID_1:
881 return TRANSMITTER_UNIPHY_C;
882 case ENUM_ID_2:
883 return TRANSMITTER_UNIPHY_D;
884 default:
885 return TRANSMITTER_UNKNOWN;
886 }
887 break;
888 case ENCODER_ID_INTERNAL_UNIPHY2:
889 switch (encoder.enum_id) {
890 case ENUM_ID_1:
891 return TRANSMITTER_UNIPHY_E;
892 case ENUM_ID_2:
893 return TRANSMITTER_UNIPHY_F;
894 default:
895 return TRANSMITTER_UNKNOWN;
896 }
897 break;
898 case ENCODER_ID_INTERNAL_UNIPHY3:
899 switch (encoder.enum_id) {
900 case ENUM_ID_1:
901 return TRANSMITTER_UNIPHY_G;
902 default:
903 return TRANSMITTER_UNKNOWN;
904 }
905 break;
906 case ENCODER_ID_EXTERNAL_NUTMEG:
907 switch (encoder.enum_id) {
908 case ENUM_ID_1:
909 return TRANSMITTER_NUTMEG_CRT;
910 default:
911 return TRANSMITTER_UNKNOWN;
912 }
913 break;
914 case ENCODER_ID_EXTERNAL_TRAVIS:
915 switch (encoder.enum_id) {
916 case ENUM_ID_1:
917 return TRANSMITTER_TRAVIS_CRT;
918 case ENUM_ID_2:
919 return TRANSMITTER_TRAVIS_LCD;
920 default:
921 return TRANSMITTER_UNKNOWN;
922 }
923 break;
924 default:
925 return TRANSMITTER_UNKNOWN;
926 }
927}
928
929static bool construct(
930 struct core_link *link,
931 const struct link_init_data *init_params)
932{
933 uint8_t i;
934 struct gpio *hpd_gpio = NULL;
935 struct ddc_service_init_data ddc_service_init_data = { 0 };
936 struct dc_context *dc_ctx = init_params->ctx;
937 struct encoder_init_data enc_init_data = { 0 };
938 struct integrated_info info = {{{ 0 }}};
939 struct dc_bios *bios = init_params->dc->ctx->dc_bios;
940 const struct dc_vbios_funcs *bp_funcs = bios->funcs;
941
942 link->public.irq_source_hpd = DC_IRQ_SOURCE_INVALID;
943 link->public.irq_source_hpd_rx = DC_IRQ_SOURCE_INVALID;
944
945 link->link_status.dpcd_caps = &link->dpcd_caps;
946
947 link->dc = init_params->dc;
948 link->ctx = dc_ctx;
949 link->public.link_index = init_params->link_index;
950
951 link->link_id = bios->funcs->get_connector_id(bios, init_params->connector_index);
952
953 if (link->link_id.type != OBJECT_TYPE_CONNECTOR) {
954 dm_error("%s: Invalid Connector ObjectID from Adapter Service for connector index:%d!\n",
955 __func__, init_params->connector_index);
956 goto create_fail;
957 }
958
959 hpd_gpio = get_hpd_gpio(link);
960
961 if (hpd_gpio != NULL)
962 link->public.irq_source_hpd = dal_irq_get_source(hpd_gpio);
963
964 switch (link->link_id.id) {
965 case CONNECTOR_ID_HDMI_TYPE_A:
966 link->public.connector_signal = SIGNAL_TYPE_HDMI_TYPE_A;
967
968 break;
969 case CONNECTOR_ID_SINGLE_LINK_DVID:
970 case CONNECTOR_ID_SINGLE_LINK_DVII:
971 link->public.connector_signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
972 break;
973 case CONNECTOR_ID_DUAL_LINK_DVID:
974 case CONNECTOR_ID_DUAL_LINK_DVII:
975 link->public.connector_signal = SIGNAL_TYPE_DVI_DUAL_LINK;
976 break;
977 case CONNECTOR_ID_DISPLAY_PORT:
978 link->public.connector_signal = SIGNAL_TYPE_DISPLAY_PORT;
979
980 if (hpd_gpio != NULL)
981 link->public.irq_source_hpd_rx =
982 dal_irq_get_rx_source(hpd_gpio);
983
984 break;
985 case CONNECTOR_ID_EDP:
986 link->public.connector_signal = SIGNAL_TYPE_EDP;
987
988 if (hpd_gpio != NULL) {
989 link->public.irq_source_hpd = DC_IRQ_SOURCE_INVALID;
990 link->public.irq_source_hpd_rx =
991 dal_irq_get_rx_source(hpd_gpio);
992 }
993 break;
994 default:
995 dm_logger_write(dc_ctx->logger, LOG_WARNING,
996 "Unsupported Connector type:%d!\n", link->link_id.id);
997 goto create_fail;
998 }
999
1000 if (hpd_gpio != NULL) {
1001 dal_gpio_destroy_irq(&hpd_gpio);
1002 hpd_gpio = NULL;
1003 }
1004
1005 /* TODO: #DAL3 Implement id to str function.*/
1006 LINK_INFO("Connector[%d] description:"
1007 "signal %d\n",
1008 init_params->connector_index,
1009 link->public.connector_signal);
1010
1011 ddc_service_init_data.ctx = link->ctx;
1012 ddc_service_init_data.id = link->link_id;
1013 ddc_service_init_data.link = link;
1014 link->ddc = dal_ddc_service_create(&ddc_service_init_data);
1015
1016 if (NULL == link->ddc) {
1017 DC_ERROR("Failed to create ddc_service!\n");
1018 goto ddc_create_fail;
1019 }
1020
1021 link->public.ddc_hw_inst =
1022 dal_ddc_get_line(
1023 dal_ddc_service_get_ddc_pin(link->ddc));
1024
1025 enc_init_data.ctx = dc_ctx;
1026 bp_funcs->get_src_obj(dc_ctx->dc_bios, link->link_id, 0, &enc_init_data.encoder);
1027 enc_init_data.connector = link->link_id;
1028 enc_init_data.channel = get_ddc_line(link);
1029 enc_init_data.hpd_source = get_hpd_line(link);
1030 enc_init_data.transmitter =
1031 translate_encoder_to_transmitter(enc_init_data.encoder);
1032 link->link_enc = link->dc->res_pool->funcs->link_enc_create(
1033 &enc_init_data);
1034
1035 if( link->link_enc == NULL) {
1036 DC_ERROR("Failed to create link encoder!\n");
1037 goto link_enc_create_fail;
1038 }
1039
1040 link->public.link_enc_hw_inst = link->link_enc->transmitter;
1041
1042 for (i = 0; i < 4; i++) {
1043 if (BP_RESULT_OK !=
1044 bp_funcs->get_device_tag(dc_ctx->dc_bios, link->link_id, i, &link->device_tag)) {
1045 DC_ERROR("Failed to find device tag!\n");
1046 goto device_tag_fail;
1047 }
1048
1049 /* Look for device tag that matches connector signal,
1050 * CRT for rgb, LCD for other supported signal tyes
1051 */
1052 if (!bp_funcs->is_device_id_supported(dc_ctx->dc_bios, link->device_tag.dev_id))
1053 continue;
1054 if (link->device_tag.dev_id.device_type == DEVICE_TYPE_CRT
1055 && link->public.connector_signal != SIGNAL_TYPE_RGB)
1056 continue;
1057 if (link->device_tag.dev_id.device_type == DEVICE_TYPE_LCD
1058 && link->public.connector_signal == SIGNAL_TYPE_RGB)
1059 continue;
1060 if (link->device_tag.dev_id.device_type == DEVICE_TYPE_WIRELESS
1061 && link->public.connector_signal != SIGNAL_TYPE_WIRELESS)
1062 continue;
1063 break;
1064 }
1065
1066 if (bios->integrated_info)
1067 info = *bios->integrated_info;
1068
1069 /* Look for channel mapping corresponding to connector and device tag */
1070 for (i = 0; i < MAX_NUMBER_OF_EXT_DISPLAY_PATH; i++) {
1071 struct external_display_path *path =
1072 &info.ext_disp_conn_info.path[i];
1073 if (path->device_connector_id.enum_id == link->link_id.enum_id
1074 && path->device_connector_id.id == link->link_id.id
1075 && path->device_connector_id.type == link->link_id.type
1076 && path->device_acpi_enum
1077 == link->device_tag.acpi_device) {
1078 link->ddi_channel_mapping = path->channel_mapping;
1079 break;
1080 }
1081 }
1082
1083 /*
1084 * TODO check if GPIO programmed correctly
1085 *
1086 * If GPIO isn't programmed correctly HPD might not rise or drain
1087 * fast enough, leading to bounces.
1088 */
1089 program_hpd_filter(link);
1090
1091 return true;
1092device_tag_fail:
1093 link->link_enc->funcs->destroy(&link->link_enc);
1094link_enc_create_fail:
1095 dal_ddc_service_destroy(&link->ddc);
1096ddc_create_fail:
1097create_fail:
1098
1099 if (hpd_gpio != NULL) {
1100 dal_gpio_destroy_irq(&hpd_gpio);
1101 }
1102
1103 return false;
1104}
1105
1106/*******************************************************************************
1107 * Public functions
1108 ******************************************************************************/
1109struct core_link *link_create(const struct link_init_data *init_params)
1110{
1111 struct core_link *link =
1112 dm_alloc(sizeof(*link));
1113
1114 if (NULL == link)
1115 goto alloc_fail;
1116
1117 if (false == construct(link, init_params))
1118 goto construct_fail;
1119
1120 return link;
1121
1122construct_fail:
1123 dm_free(link);
1124
1125alloc_fail:
1126 return NULL;
1127}
1128
1129void link_destroy(struct core_link **link)
1130{
1131 destruct(*link);
1132 dm_free(*link);
1133 *link = NULL;
1134}
1135
1136static void dpcd_configure_panel_mode(
1137 struct core_link *link,
1138 enum dp_panel_mode panel_mode)
1139{
1140 union dpcd_edp_config edp_config_set;
1141 bool panel_mode_edp = false;
1142
1143 memset(&edp_config_set, '\0', sizeof(union dpcd_edp_config));
1144
1145 if (DP_PANEL_MODE_DEFAULT != panel_mode) {
1146
1147 switch (panel_mode) {
1148 case DP_PANEL_MODE_EDP:
1149 case DP_PANEL_MODE_SPECIAL:
1150 panel_mode_edp = true;
1151 break;
1152
1153 default:
1154 break;
1155 }
1156
1157 /*set edp panel mode in receiver*/
1158 core_link_read_dpcd(
1159 link,
1160 DPCD_ADDRESS_EDP_CONFIG_SET,
1161 &edp_config_set.raw,
1162 sizeof(edp_config_set.raw));
1163
1164 if (edp_config_set.bits.PANEL_MODE_EDP
1165 != panel_mode_edp) {
1166 enum ddc_result result = DDC_RESULT_UNKNOWN;
1167
1168 edp_config_set.bits.PANEL_MODE_EDP =
1169 panel_mode_edp;
1170 result = core_link_write_dpcd(
1171 link,
1172 DPCD_ADDRESS_EDP_CONFIG_SET,
1173 &edp_config_set.raw,
1174 sizeof(edp_config_set.raw));
1175
1176 ASSERT(result == DDC_RESULT_SUCESSFULL);
1177 }
1178 }
1179 dm_logger_write(link->ctx->logger, LOG_DETECTION_DP_CAPS,
1180 "Link: %d eDP panel mode supported: %d "
1181 "eDP panel mode enabled: %d \n",
1182 link->public.link_index,
1183 link->dpcd_caps.panel_mode_edp,
1184 panel_mode_edp);
1185}
1186
1187static void enable_stream_features(struct pipe_ctx *pipe_ctx)
1188{
1189 struct core_stream *stream = pipe_ctx->stream;
1190 struct core_link *link = stream->sink->link;
1191 union down_spread_ctrl downspread;
1192
1193 core_link_read_dpcd(link, DPCD_ADDRESS_DOWNSPREAD_CNTL,
1194 &downspread.raw, sizeof(downspread));
1195
1196 downspread.bits.IGNORE_MSA_TIMING_PARAM =
1197 (stream->public.ignore_msa_timing_param) ? 1 : 0;
1198
1199 core_link_write_dpcd(link, DPCD_ADDRESS_DOWNSPREAD_CNTL,
1200 &downspread.raw, sizeof(downspread));
1201}
1202
1203static enum dc_status enable_link_dp(struct pipe_ctx *pipe_ctx)
1204{
1205 struct core_stream *stream = pipe_ctx->stream;
1206 enum dc_status status;
1207 bool skip_video_pattern;
1208 struct core_link *link = stream->sink->link;
1209 struct dc_link_settings link_settings = {0};
1210 enum dp_panel_mode panel_mode;
1211 enum clocks_state cur_min_clock_state;
1212 enum dc_link_rate max_link_rate = LINK_RATE_HIGH2;
1213
1214 /* get link settings for video mode timing */
1215 decide_link_settings(stream, &link_settings);
1216
1217 /* raise clock state for HBR3 if required. Confirmed with HW DCE/DPCS
1218 * logic for HBR3 still needs Nominal (0.8V) on VDDC rail
1219 */
1220 if (link->link_enc->features.flags.bits.IS_HBR3_CAPABLE)
1221 max_link_rate = LINK_RATE_HIGH3;
1222
1223 if (link_settings.link_rate == max_link_rate) {
1224 cur_min_clock_state = CLOCKS_STATE_INVALID;
1225
1226 if (dal_display_clock_get_min_clocks_state(
1227 pipe_ctx->dis_clk, &cur_min_clock_state)) {
1228 if (cur_min_clock_state < CLOCKS_STATE_NOMINAL)
1229 dal_display_clock_set_min_clocks_state(
1230 pipe_ctx->dis_clk,
1231 CLOCKS_STATE_NOMINAL);
1232 } else {
1233 }
1234 }
1235
1236 dp_enable_link_phy(
1237 link,
1238 pipe_ctx->stream->signal,
1239 pipe_ctx->clock_source->id,
1240 &link_settings);
1241
1242 panel_mode = dp_get_panel_mode(link);
1243 dpcd_configure_panel_mode(link, panel_mode);
1244
1245 skip_video_pattern = true;
1246
1247 if (link_settings.link_rate == LINK_RATE_LOW)
1248 skip_video_pattern = false;
1249
1250 if (perform_link_training_with_retries(
1251 link,
1252 &link_settings,
1253 skip_video_pattern,
1254 LINK_TRAINING_ATTEMPTS)) {
1255 link->public.cur_link_settings = link_settings;
1256 status = DC_OK;
1257 }
1258 else
1259 status = DC_ERROR_UNEXPECTED;
1260
1261 enable_stream_features(pipe_ctx);
1262
1263 return status;
1264}
1265
1266static enum dc_status enable_link_dp_mst(struct pipe_ctx *pipe_ctx)
1267{
1268 struct core_link *link = pipe_ctx->stream->sink->link;
1269
1270 /* sink signal type after MST branch is MST. Multiple MST sinks
1271 * share one link. Link DP PHY is enable or training only once.
1272 */
1273 if (link->public.cur_link_settings.lane_count != LANE_COUNT_UNKNOWN)
1274 return DC_OK;
1275
1276 return enable_link_dp(pipe_ctx);
1277}
1278
1279static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
1280{
1281 struct core_stream *stream = pipe_ctx->stream;
1282 struct core_link *link = stream->sink->link;
1283
1284 if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
1285 dal_ddc_service_write_scdc_data(
1286 stream->sink->link->ddc,
1287 stream->phy_pix_clk,
1288 stream->public.timing.flags.LTE_340MCSC_SCRAMBLE);
1289
1290 memset(&stream->sink->link->public.cur_link_settings, 0,
1291 sizeof(struct dc_link_settings));
1292
1293 link->link_enc->funcs->enable_tmds_output(
1294 link->link_enc,
1295 pipe_ctx->clock_source->id,
1296 stream->public.timing.display_color_depth,
1297 pipe_ctx->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A,
1298 pipe_ctx->stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK,
1299 stream->phy_pix_clk);
1300
1301 if (pipe_ctx->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
1302 dal_ddc_service_read_scdc_data(link->ddc);
1303}
1304
1305/****************************enable_link***********************************/
1306static enum dc_status enable_link(struct pipe_ctx *pipe_ctx)
1307{
1308 enum dc_status status = DC_ERROR_UNEXPECTED;
1309 switch (pipe_ctx->stream->signal) {
1310 case SIGNAL_TYPE_DISPLAY_PORT:
1311 case SIGNAL_TYPE_EDP:
1312 status = enable_link_dp(pipe_ctx);
1313 break;
1314 case SIGNAL_TYPE_DISPLAY_PORT_MST:
1315 status = enable_link_dp_mst(pipe_ctx);
1316 msleep(200);
1317 break;
1318 case SIGNAL_TYPE_DVI_SINGLE_LINK:
1319 case SIGNAL_TYPE_DVI_DUAL_LINK:
1320 case SIGNAL_TYPE_HDMI_TYPE_A:
1321 enable_link_hdmi(pipe_ctx);
1322 status = DC_OK;
1323 break;
1324 case SIGNAL_TYPE_VIRTUAL:
1325 status = DC_OK;
1326 break;
1327 default:
1328 break;
1329 }
1330
1331 if (pipe_ctx->audio && status == DC_OK) {
1332 /* notify audio driver for audio modes of monitor */
1333 pipe_ctx->audio->funcs->az_enable(pipe_ctx->audio);
1334
1335 /* un-mute audio */
1336 /* TODO: audio should be per stream rather than per link */
1337 pipe_ctx->stream_enc->funcs->audio_mute_control(
1338 pipe_ctx->stream_enc, false);
1339 }
1340
1341 return status;
1342}
1343
1344static void disable_link(struct core_link *link, enum signal_type signal)
1345{
1346 /*
1347 * TODO: implement call for dp_set_hw_test_pattern
1348 * it is needed for compliance testing
1349 */
1350
1351 /* here we need to specify that encoder output settings
1352 * need to be calculated as for the set mode,
1353 * it will lead to querying dynamic link capabilities
1354 * which should be done before enable output */
1355
1356 if (dc_is_dp_signal(signal)) {
1357 /* SST DP, eDP */
1358 if (dc_is_dp_sst_signal(signal))
1359 dp_disable_link_phy(link, signal);
1360 else
1361 dp_disable_link_phy_mst(link, signal);
1362 } else
1363 link->link_enc->funcs->disable_output(link->link_enc, signal);
1364}
1365
1366enum dc_status dc_link_validate_mode_timing(
1367 const struct core_stream *stream,
1368 struct core_link *link,
1369 const struct dc_crtc_timing *timing)
1370{
1371 uint32_t max_pix_clk = stream->sink->dongle_max_pix_clk;
1372
1373 /* A hack to avoid failing any modes for EDID override feature on
1374 * topology change such as lower quality cable for DP or different dongle
1375 */
1376 if (link->public.remote_sinks[0])
1377 return DC_OK;
1378
1379 if (0 != max_pix_clk && timing->pix_clk_khz > max_pix_clk)
1380 return DC_EXCEED_DONGLE_MAX_CLK;
1381
1382 switch (stream->signal) {
1383 case SIGNAL_TYPE_EDP:
1384 case SIGNAL_TYPE_DISPLAY_PORT:
1385 if (!dp_validate_mode_timing(
1386 link,
1387 timing))
1388 return DC_NO_DP_LINK_BANDWIDTH;
1389 break;
1390
1391 default:
1392 break;
1393 }
1394
1395 return DC_OK;
1396}
1397
1398
1399bool dc_link_set_backlight_level(const struct dc_link *dc_link, uint32_t level,
1400 uint32_t frame_ramp, const struct dc_stream *stream)
1401{
1402 struct core_link *link = DC_LINK_TO_CORE(dc_link);
1403 struct dc_context *ctx = link->ctx;
1404 struct core_dc *core_dc = DC_TO_CORE(ctx->dc);
1405 struct core_stream *core_stream = DC_STREAM_TO_CORE(stream);
1406 unsigned int controller_id = 0;
1407 int i;
1408 uint32_t dmcu_status;
1409
1410 dm_logger_write(ctx->logger, LOG_BACKLIGHT,
1411 "New Backlight level: %d (0x%X)\n", level, level);
1412
1413 dmcu_status = dm_read_reg(ctx, mmDMCU_STATUS);
1414
1415 /* If DMCU is in reset state, DMCU is uninitialized */
1416 if (get_reg_field_value(dmcu_status, mmDMCU_STATUS, UC_IN_RESET)) {
1417 link->link_enc->funcs->set_lcd_backlight_level(link->link_enc,
1418 level);
1419 } else {
1420 for (i = 0; i < MAX_PIPES; i++) {
1421 if (core_dc->current_context->res_ctx.pipe_ctx[i].stream
1422 == core_stream)
1423 /* dmcu -1 for all controller id values,
1424 * therefore +1 here
1425 */
1426 controller_id = core_dc->current_context->res_ctx.
1427 pipe_ctx[i].tg->inst + 1;
1428 }
1429
1430 link->link_enc->funcs->set_dmcu_backlight_level
1431 (link->link_enc, level,
1432 frame_ramp, controller_id);
1433 }
1434 return true;
1435}
1436
1437
1438bool dc_link_init_dmcu_backlight_settings(const struct dc_link *dc_link)
1439{
1440 struct core_link *link = DC_LINK_TO_CORE(dc_link);
1441
1442 if (link->link_enc->funcs->init_dmcu_backlight_settings != NULL)
1443 link->link_enc->funcs->
1444 init_dmcu_backlight_settings(link->link_enc);
1445
1446 return true;
1447}
1448
1449bool dc_link_set_abm_level(const struct dc_link *dc_link, uint32_t level)
1450{
1451 struct core_link *link = DC_LINK_TO_CORE(dc_link);
1452 struct dc_context *ctx = link->ctx;
1453
1454 dm_logger_write(ctx->logger, LOG_BACKLIGHT,
1455 "New abm level: %d (0x%X)\n", level, level);
1456
1457 link->link_enc->funcs->set_dmcu_abm_level(link->link_enc, level);
1458 return true;
1459}
1460
1461bool dc_link_set_psr_enable(const struct dc_link *dc_link, bool enable)
1462{
1463 struct core_link *link = DC_LINK_TO_CORE(dc_link);
1464
1465 if (dc_link != NULL && dc_link->psr_caps.psr_version > 0)
1466 link->link_enc->funcs->set_dmcu_psr_enable(link->link_enc,
1467 enable);
1468 return true;
1469}
1470
1471bool dc_link_setup_psr(const struct dc_link *dc_link,
1472 const struct dc_stream *stream)
1473{
1474
1475 struct core_link *link = DC_LINK_TO_CORE(dc_link);
1476 struct dc_context *ctx = link->ctx;
1477 struct core_dc *core_dc = DC_TO_CORE(ctx->dc);
1478 struct core_stream *core_stream = DC_STREAM_TO_CORE(stream);
1479 struct psr_dmcu_context psr_context = {0};
1480 int i;
1481
1482 psr_context.controllerId = CONTROLLER_ID_UNDEFINED;
1483
1484
1485 if (dc_link != NULL && dc_link->psr_caps.psr_version > 0) {
1486 /* updateSinkPsrDpcdConfig*/
1487 union dpcd_psr_configuration psr_configuration;
1488
1489 memset(&psr_configuration, 0, sizeof(psr_configuration));
1490
1491 psr_configuration.bits.ENABLE = 1;
1492 psr_configuration.bits.CRC_VERIFICATION = 1;
1493 psr_configuration.bits.FRAME_CAPTURE_INDICATION =
1494 dc_link->psr_caps.psr_frame_capture_indication_req;
1495
1496 /* Check for PSR v2*/
1497 if (dc_link->psr_caps.psr_version == 0x2) {
1498 /* For PSR v2 selective update.
1499 * Indicates whether sink should start capturing
1500 * immediately following active scan line,
1501 * or starting with the 2nd active scan line.
1502 */
1503 psr_configuration.bits.LINE_CAPTURE_INDICATION = 0;
1504 /*For PSR v2, determines whether Sink should generate
1505 * IRQ_HPD when CRC mismatch is detected.
1506 */
1507 psr_configuration.bits.IRQ_HPD_WITH_CRC_ERROR = 1;
1508 }
1509 dal_ddc_service_write_dpcd_data(
1510 link->ddc,
1511 368,
1512 &psr_configuration.raw,
1513 sizeof(psr_configuration.raw));
1514
1515 psr_context.channel = link->ddc->ddc_pin->hw_info.ddc_channel;
1516 if (psr_context.channel == 0)
1517 psr_context.channel = 1;
1518 psr_context.transmitterId = link->link_enc->transmitter;
1519 psr_context.engineId = link->link_enc->preferred_engine;
1520
1521 for (i = 0; i < MAX_PIPES; i++) {
1522 if (core_dc->current_context->res_ctx.pipe_ctx[i].stream
1523 == core_stream) {
1524 /* dmcu -1 for all controller id values,
1525 * therefore +1 here
1526 */
1527 psr_context.controllerId =
1528 core_dc->current_context->res_ctx.
1529 pipe_ctx[i].tg->inst + 1;
1530 break;
1531 }
1532 }
1533
1534 /* Hardcoded for now. Can be Pcie or Uniphy (or Unknown)*/
1535 psr_context.phyType = PHY_TYPE_UNIPHY;
1536 /*PhyId is associated with the transmitter id*/
1537 psr_context.smuPhyId = link->link_enc->transmitter;
1538
1539 psr_context.crtcTimingVerticalTotal = stream->timing.v_total;
1540 psr_context.vsyncRateHz = div64_u64(div64_u64((stream->
1541 timing.pix_clk_khz * 1000),
1542 stream->timing.v_total),
1543 stream->timing.h_total);
1544
1545 psr_context.psrSupportedDisplayConfig =
1546 (dc_link->psr_caps.psr_version > 0) ? true : false;
1547 psr_context.psrExitLinkTrainingRequired =
1548 dc_link->psr_caps.psr_exit_link_training_required;
1549 psr_context.sdpTransmitLineNumDeadline =
1550 dc_link->psr_caps.psr_sdp_transmit_line_num_deadline;
1551 psr_context.psrFrameCaptureIndicationReq =
1552 dc_link->psr_caps.psr_frame_capture_indication_req;
1553
1554 psr_context.skipPsrWaitForPllLock = 0; /* only = 1 in KV */
1555
1556 psr_context.numberOfControllers =
1557 link->dc->res_pool->res_cap->num_timing_generator;
1558
1559 psr_context.rfb_update_auto_en = true;
1560
1561 /* 2 frames before enter PSR. */
1562 psr_context.timehyst_frames = 2;
1563 /* half a frame
1564 * (units in 100 lines, i.e. a value of 1 represents 100 lines)
1565 */
1566 psr_context.hyst_lines = stream->timing.v_total / 2 / 100;
1567 psr_context.aux_repeats = 10;
1568
1569 psr_context.psr_level.u32all = 0;
1570
1571 /* SMU will perform additional powerdown sequence.
1572 * For unsupported ASICs, set psr_level flag to skip PSR
1573 * static screen notification to SMU.
1574 * (Always set for DAL2, did not check ASIC)
1575 */
1576 psr_context.psr_level.bits.SKIP_SMU_NOTIFICATION = 1;
1577
1578 /* Controls additional delay after remote frame capture before
1579 * continuing power down, default = 0
1580 */
1581 psr_context.frame_delay = 0;
1582
1583 link->link_enc->funcs->setup_dmcu_psr
1584 (link->link_enc, &psr_context);
1585 return true;
1586 } else
1587 return false;
1588
1589}
1590
1591const struct dc_link_status *dc_link_get_status(const struct dc_link *dc_link)
1592{
1593 struct core_link *link = DC_LINK_TO_CORE(dc_link);
1594
1595 return &link->link_status;
1596}
1597
1598void core_link_resume(struct core_link *link)
1599{
1600 if (link->public.connector_signal != SIGNAL_TYPE_VIRTUAL)
1601 program_hpd_filter(link);
1602}
1603
1604static struct fixed31_32 get_pbn_per_slot(struct core_stream *stream)
1605{
1606 struct dc_link_settings *link_settings =
1607 &stream->sink->link->public.cur_link_settings;
1608 uint32_t link_rate_in_mbps =
1609 link_settings->link_rate * LINK_RATE_REF_FREQ_IN_MHZ;
1610 struct fixed31_32 mbps = dal_fixed31_32_from_int(
1611 link_rate_in_mbps * link_settings->lane_count);
1612
1613 return dal_fixed31_32_div_int(mbps, 54);
1614}
1615
1616static int get_color_depth(enum dc_color_depth color_depth)
1617{
1618 switch (color_depth) {
1619 case COLOR_DEPTH_666: return 6;
1620 case COLOR_DEPTH_888: return 8;
1621 case COLOR_DEPTH_101010: return 10;
1622 case COLOR_DEPTH_121212: return 12;
1623 case COLOR_DEPTH_141414: return 14;
1624 case COLOR_DEPTH_161616: return 16;
1625 default: return 0;
1626 }
1627}
1628
1629static struct fixed31_32 get_pbn_from_timing(struct pipe_ctx *pipe_ctx)
1630{
1631 uint32_t bpc;
1632 uint64_t kbps;
1633 struct fixed31_32 peak_kbps;
1634 uint32_t numerator;
1635 uint32_t denominator;
1636
1637 bpc = get_color_depth(pipe_ctx->pix_clk_params.color_depth);
1638 kbps = pipe_ctx->pix_clk_params.requested_pix_clk * bpc * 3;
1639
1640 /*
1641 * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
1642 * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
1643 * common multiplier to render an integer PBN for all link rate/lane
1644 * counts combinations
1645 * calculate
1646 * peak_kbps *= (1006/1000)
1647 * peak_kbps *= (64/54)
1648 * peak_kbps *= 8 convert to bytes
1649 */
1650
1651 numerator = 64 * PEAK_FACTOR_X1000;
1652 denominator = 54 * 8 * 1000 * 1000;
1653 kbps *= numerator;
1654 peak_kbps = dal_fixed31_32_from_fraction(kbps, denominator);
1655
1656 return peak_kbps;
1657}
1658
1659static void update_mst_stream_alloc_table(
1660 struct core_link *link,
1661 struct stream_encoder *stream_enc,
1662 const struct dp_mst_stream_allocation_table *proposed_table)
1663{
1664 struct link_mst_stream_allocation work_table[MAX_CONTROLLER_NUM] = {
1665 { 0 } };
1666 struct link_mst_stream_allocation *dc_alloc;
1667
1668 int i;
1669 int j;
1670
1671 /* if DRM proposed_table has more than one new payload */
1672 ASSERT(proposed_table->stream_count -
1673 link->mst_stream_alloc_table.stream_count < 2);
1674
1675 /* copy proposed_table to core_link, add stream encoder */
1676 for (i = 0; i < proposed_table->stream_count; i++) {
1677
1678 for (j = 0; j < link->mst_stream_alloc_table.stream_count; j++) {
1679 dc_alloc =
1680 &link->mst_stream_alloc_table.stream_allocations[j];
1681
1682 if (dc_alloc->vcp_id ==
1683 proposed_table->stream_allocations[i].vcp_id) {
1684
1685 work_table[i] = *dc_alloc;
1686 break; /* exit j loop */
1687 }
1688 }
1689
1690 /* new vcp_id */
1691 if (j == link->mst_stream_alloc_table.stream_count) {
1692 work_table[i].vcp_id =
1693 proposed_table->stream_allocations[i].vcp_id;
1694 work_table[i].slot_count =
1695 proposed_table->stream_allocations[i].slot_count;
1696 work_table[i].stream_enc = stream_enc;
1697 }
1698 }
1699
1700 /* update link->mst_stream_alloc_table with work_table */
1701 link->mst_stream_alloc_table.stream_count =
1702 proposed_table->stream_count;
1703 for (i = 0; i < MAX_CONTROLLER_NUM; i++)
1704 link->mst_stream_alloc_table.stream_allocations[i] =
1705 work_table[i];
1706}
1707
1708/* convert link_mst_stream_alloc_table to dm dp_mst_stream_alloc_table
1709 * because stream_encoder is not exposed to dm
1710 */
1711static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx)
1712{
1713 struct core_stream *stream = pipe_ctx->stream;
1714 struct core_link *link = stream->sink->link;
1715 struct link_encoder *link_encoder = link->link_enc;
1716 struct stream_encoder *stream_encoder = pipe_ctx->stream_enc;
1717 struct dp_mst_stream_allocation_table proposed_table = {0};
1718 struct fixed31_32 avg_time_slots_per_mtp;
1719 struct fixed31_32 pbn;
1720 struct fixed31_32 pbn_per_slot;
1721 uint8_t i;
1722
1723 /* enable_link_dp_mst already check link->enabled_stream_count
1724 * and stream is in link->stream[]. This is called during set mode,
1725 * stream_enc is available.
1726 */
1727
1728 /* get calculate VC payload for stream: stream_alloc */
1729 if (dm_helpers_dp_mst_write_payload_allocation_table(
1730 stream->ctx,
1731 &stream->public,
1732 &proposed_table,
1733 true)) {
1734 update_mst_stream_alloc_table(
1735 link, pipe_ctx->stream_enc, &proposed_table);
1736 }
1737 else
1738 dm_logger_write(link->ctx->logger, LOG_WARNING,
1739 "Failed to update"
1740 "MST allocation table for"
1741 "pipe idx:%d\n",
1742 pipe_ctx->pipe_idx);
1743
1744 dm_logger_write(link->ctx->logger, LOG_MST,
1745 "%s "
1746 "stream_count: %d: \n ",
1747 __func__,
1748 link->mst_stream_alloc_table.stream_count);
1749
1750 for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
1751 dm_logger_write(link->ctx->logger, LOG_MST,
1752 "stream_enc[%d]: 0x%x "
1753 "stream[%d].vcp_id: %d "
1754 "stream[%d].slot_count: %d\n",
1755 i,
1756 link->mst_stream_alloc_table.stream_allocations[i].stream_enc,
1757 i,
1758 link->mst_stream_alloc_table.stream_allocations[i].vcp_id,
1759 i,
1760 link->mst_stream_alloc_table.stream_allocations[i].slot_count);
1761 }
1762
1763 ASSERT(proposed_table.stream_count > 0);
1764
1765 /* program DP source TX for payload */
1766 link_encoder->funcs->update_mst_stream_allocation_table(
1767 link_encoder,
1768 &link->mst_stream_alloc_table);
1769
1770 /* send down message */
1771 dm_helpers_dp_mst_poll_for_allocation_change_trigger(
1772 stream->ctx,
1773 &stream->public);
1774
1775 dm_helpers_dp_mst_send_payload_allocation(
1776 stream->ctx,
1777 &stream->public,
1778 true);
1779
1780 /* slot X.Y for only current stream */
1781 pbn_per_slot = get_pbn_per_slot(stream);
1782 pbn = get_pbn_from_timing(pipe_ctx);
1783 avg_time_slots_per_mtp = dal_fixed31_32_div(pbn, pbn_per_slot);
1784
1785 stream_encoder->funcs->set_mst_bandwidth(
1786 stream_encoder,
1787 avg_time_slots_per_mtp);
1788
1789 return DC_OK;
1790
1791}
1792
1793static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
1794{
1795 struct core_stream *stream = pipe_ctx->stream;
1796 struct core_link *link = stream->sink->link;
1797 struct link_encoder *link_encoder = link->link_enc;
1798 struct stream_encoder *stream_encoder = pipe_ctx->stream_enc;
1799 struct dp_mst_stream_allocation_table proposed_table = {0};
1800 struct fixed31_32 avg_time_slots_per_mtp = dal_fixed31_32_from_int(0);
1801 uint8_t i;
1802 bool mst_mode = (link->public.type == dc_connection_mst_branch);
1803
1804 /* deallocate_mst_payload is called before disable link. When mode or
1805 * disable/enable monitor, new stream is created which is not in link
1806 * stream[] yet. For this, payload is not allocated yet, so de-alloc
1807 * should not done. For new mode set, map_resources will get engine
1808 * for new stream, so stream_enc->id should be validated until here.
1809 */
1810
1811 /* slot X.Y */
1812 stream_encoder->funcs->set_mst_bandwidth(
1813 stream_encoder,
1814 avg_time_slots_per_mtp);
1815
1816 /* TODO: which component is responsible for remove payload table? */
1817 if (mst_mode) {
1818 if (dm_helpers_dp_mst_write_payload_allocation_table(
1819 stream->ctx,
1820 &stream->public,
1821 &proposed_table,
1822 false)) {
1823
1824 update_mst_stream_alloc_table(
1825 link, pipe_ctx->stream_enc, &proposed_table);
1826 }
1827 else {
1828 dm_logger_write(link->ctx->logger, LOG_WARNING,
1829 "Failed to update"
1830 "MST allocation table for"
1831 "pipe idx:%d\n",
1832 pipe_ctx->pipe_idx);
1833 }
1834 }
1835
1836 dm_logger_write(link->ctx->logger, LOG_MST,
1837 "%s"
1838 "stream_count: %d: ",
1839 __func__,
1840 link->mst_stream_alloc_table.stream_count);
1841
1842 for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
1843 dm_logger_write(link->ctx->logger, LOG_MST,
1844 "stream_enc[%d]: 0x%x "
1845 "stream[%d].vcp_id: %d "
1846 "stream[%d].slot_count: %d\n",
1847 i,
1848 link->mst_stream_alloc_table.stream_allocations[i].stream_enc,
1849 i,
1850 link->mst_stream_alloc_table.stream_allocations[i].vcp_id,
1851 i,
1852 link->mst_stream_alloc_table.stream_allocations[i].slot_count);
1853 }
1854
1855 link_encoder->funcs->update_mst_stream_allocation_table(
1856 link_encoder,
1857 &link->mst_stream_alloc_table);
1858
1859 if (mst_mode) {
1860 dm_helpers_dp_mst_poll_for_allocation_change_trigger(
1861 stream->ctx,
1862 &stream->public);
1863
1864 dm_helpers_dp_mst_send_payload_allocation(
1865 stream->ctx,
1866 &stream->public,
1867 false);
1868 }
1869
1870 return DC_OK;
1871}
1872
1873void core_link_enable_stream(struct pipe_ctx *pipe_ctx)
1874{
1875 struct core_dc *core_dc = DC_TO_CORE(pipe_ctx->stream->ctx->dc);
1876
1877 if (DC_OK != enable_link(pipe_ctx)) {
1878 BREAK_TO_DEBUGGER();
1879 return;
1880 }
1881
1882 core_dc->hwss.enable_stream(pipe_ctx);
1883
1884 if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
1885 allocate_mst_payload(pipe_ctx);
1886}
1887
1888void core_link_disable_stream(struct pipe_ctx *pipe_ctx)
1889{
1890 struct core_dc *core_dc = DC_TO_CORE(pipe_ctx->stream->ctx->dc);
1891
1892 if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
1893 deallocate_mst_payload(pipe_ctx);
1894
1895 core_dc->hwss.disable_stream(pipe_ctx);
1896
1897 disable_link(pipe_ctx->stream->sink->link, pipe_ctx->stream->signal);
1898}
1899
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
new file mode 100644
index 000000000000..6379ccfdb06e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -0,0 +1,1098 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27#include "dm_helpers.h"
28#include "gpio_service_interface.h"
29#include "include/ddc_service_types.h"
30#include "include/grph_object_id.h"
31#include "include/dpcd_defs.h"
32#include "include/logger_interface.h"
33#include "include/vector.h"
34#include "core_types.h"
35#include "dc_link_ddc.h"
36
37#define AUX_POWER_UP_WA_DELAY 500
38#define I2C_OVER_AUX_DEFER_WA_DELAY 70
39
40/* CV smart dongle slave address for retrieving supported HDTV modes*/
41#define CV_SMART_DONGLE_ADDRESS 0x20
42/* DVI-HDMI dongle slave address for retrieving dongle signature*/
43#define DVI_HDMI_DONGLE_ADDRESS 0x68
44static const int8_t dvi_hdmi_dongle_signature_str[] = "6140063500G";
45struct dvi_hdmi_dongle_signature_data {
46 int8_t vendor[3];/* "AMD" */
47 uint8_t version[2];
48 uint8_t size;
49 int8_t id[11];/* "6140063500G"*/
50};
51/* DP-HDMI dongle slave address for retrieving dongle signature*/
52#define DP_HDMI_DONGLE_ADDRESS 0x40
53static const uint8_t dp_hdmi_dongle_signature_str[] = "DP-HDMI ADAPTOR";
54#define DP_HDMI_DONGLE_SIGNATURE_EOT 0x04
55
56struct dp_hdmi_dongle_signature_data {
57 int8_t id[15];/* "DP-HDMI ADAPTOR"*/
58 uint8_t eot;/* end of transmition '\x4' */
59};
60
61/* Address range from 0x00 to 0x1F.*/
62#define DP_ADAPTOR_TYPE2_SIZE 0x20
63#define DP_ADAPTOR_TYPE2_REG_ID 0x10
64#define DP_ADAPTOR_TYPE2_REG_MAX_TMDS_CLK 0x1D
65/* Identifies adaptor as Dual-mode adaptor */
66#define DP_ADAPTOR_TYPE2_ID 0xA0
67/* MHz*/
68#define DP_ADAPTOR_TYPE2_MAX_TMDS_CLK 600
69/* MHz*/
70#define DP_ADAPTOR_TYPE2_MIN_TMDS_CLK 25
71/* kHZ*/
72#define DP_ADAPTOR_DVI_MAX_TMDS_CLK 165000
73/* kHZ*/
74#define DP_ADAPTOR_HDMI_SAFE_MAX_TMDS_CLK 165000
75
76#define DDC_I2C_COMMAND_ENGINE I2C_COMMAND_ENGINE_SW
77
78enum edid_read_result {
79 EDID_READ_RESULT_EDID_MATCH = 0,
80 EDID_READ_RESULT_EDID_MISMATCH,
81 EDID_READ_RESULT_CHECKSUM_READ_ERR,
82 EDID_READ_RESULT_VENDOR_READ_ERR
83};
84
85/* SCDC Address defines (HDMI 2.0)*/
86#define HDMI_SCDC_WRITE_UPDATE_0_ARRAY 3
87#define HDMI_SCDC_ADDRESS 0x54
88#define HDMI_SCDC_SINK_VERSION 0x01
89#define HDMI_SCDC_SOURCE_VERSION 0x02
90#define HDMI_SCDC_UPDATE_0 0x10
91#define HDMI_SCDC_TMDS_CONFIG 0x20
92#define HDMI_SCDC_SCRAMBLER_STATUS 0x21
93#define HDMI_SCDC_CONFIG_0 0x30
94#define HDMI_SCDC_STATUS_FLAGS 0x40
95#define HDMI_SCDC_ERR_DETECT 0x50
96#define HDMI_SCDC_TEST_CONFIG 0xC0
97
98union hdmi_scdc_update_read_data {
99 uint8_t byte[2];
100 struct {
101 uint8_t STATUS_UPDATE:1;
102 uint8_t CED_UPDATE:1;
103 uint8_t RR_TEST:1;
104 uint8_t RESERVED:5;
105 uint8_t RESERVED2:8;
106 } fields;
107};
108
109union hdmi_scdc_status_flags_data {
110 uint8_t byte[2];
111 struct {
112 uint8_t CLOCK_DETECTED:1;
113 uint8_t CH0_LOCKED:1;
114 uint8_t CH1_LOCKED:1;
115 uint8_t CH2_LOCKED:1;
116 uint8_t RESERVED:4;
117 uint8_t RESERVED2:8;
118 } fields;
119};
120
121union hdmi_scdc_ced_data {
122 uint8_t byte[7];
123 struct {
124 uint8_t CH0_8LOW:8;
125 uint8_t CH0_7HIGH:7;
126 uint8_t CH0_VALID:1;
127 uint8_t CH1_8LOW:8;
128 uint8_t CH1_7HIGH:7;
129 uint8_t CH1_VALID:1;
130 uint8_t CH2_8LOW:8;
131 uint8_t CH2_7HIGH:7;
132 uint8_t CH2_VALID:1;
133 uint8_t CHECKSUM:8;
134 } fields;
135};
136
137union hdmi_scdc_test_config_Data {
138 uint8_t byte;
139 struct {
140 uint8_t TEST_READ_REQUEST_DELAY:7;
141 uint8_t TEST_READ_REQUEST: 1;
142 } fields;
143};
144
145struct i2c_payloads {
146 struct vector payloads;
147};
148
149struct aux_payloads {
150 struct vector payloads;
151};
152
153struct i2c_payloads *dal_ddc_i2c_payloads_create(struct dc_context *ctx, uint32_t count)
154{
155 struct i2c_payloads *payloads;
156
157 payloads = dm_alloc(sizeof(struct i2c_payloads));
158
159 if (!payloads)
160 return NULL;
161
162 if (dal_vector_construct(
163 &payloads->payloads, ctx, count, sizeof(struct i2c_payload)))
164 return payloads;
165
166 dm_free(payloads);
167 return NULL;
168
169}
170
171struct i2c_payload *dal_ddc_i2c_payloads_get(struct i2c_payloads *p)
172{
173 return (struct i2c_payload *)p->payloads.container;
174}
175
176uint32_t dal_ddc_i2c_payloads_get_count(struct i2c_payloads *p)
177{
178 return p->payloads.count;
179}
180
181void dal_ddc_i2c_payloads_destroy(struct i2c_payloads **p)
182{
183 if (!p || !*p)
184 return;
185 dal_vector_destruct(&(*p)->payloads);
186 dm_free(*p);
187 *p = NULL;
188
189}
190
191struct aux_payloads *dal_ddc_aux_payloads_create(struct dc_context *ctx, uint32_t count)
192{
193 struct aux_payloads *payloads;
194
195 payloads = dm_alloc(sizeof(struct aux_payloads));
196
197 if (!payloads)
198 return NULL;
199
200 if (dal_vector_construct(
201 &payloads->payloads, ctx, count, sizeof(struct aux_payloads)))
202 return payloads;
203
204 dm_free(payloads);
205 return NULL;
206}
207
208struct aux_payload *dal_ddc_aux_payloads_get(struct aux_payloads *p)
209{
210 return (struct aux_payload *)p->payloads.container;
211}
212
213uint32_t dal_ddc_aux_payloads_get_count(struct aux_payloads *p)
214{
215 return p->payloads.count;
216}
217
218void dal_ddc_aux_payloads_destroy(struct aux_payloads **p)
219{
220 if (!p || !*p)
221 return;
222
223 dal_vector_destruct(&(*p)->payloads);
224 dm_free(*p);
225 *p = NULL;
226}
227
228#define DDC_MIN(a, b) (((a) < (b)) ? (a) : (b))
229
230void dal_ddc_i2c_payloads_add(
231 struct i2c_payloads *payloads,
232 uint32_t address,
233 uint32_t len,
234 uint8_t *data,
235 bool write)
236{
237 uint32_t payload_size = EDID_SEGMENT_SIZE;
238 uint32_t pos;
239
240 for (pos = 0; pos < len; pos += payload_size) {
241 struct i2c_payload payload = {
242 .write = write,
243 .address = address,
244 .length = DDC_MIN(payload_size, len - pos),
245 .data = data + pos };
246 dal_vector_append(&payloads->payloads, &payload);
247 }
248
249}
250
251void dal_ddc_aux_payloads_add(
252 struct aux_payloads *payloads,
253 uint32_t address,
254 uint32_t len,
255 uint8_t *data,
256 bool write)
257{
258 uint32_t payload_size = DEFAULT_AUX_MAX_DATA_SIZE;
259 uint32_t pos;
260
261 for (pos = 0; pos < len; pos += payload_size) {
262 struct aux_payload payload = {
263 .i2c_over_aux = true,
264 .write = write,
265 .address = address,
266 .length = DDC_MIN(payload_size, len - pos),
267 .data = data + pos };
268 dal_vector_append(&payloads->payloads, &payload);
269 }
270}
271
272static bool construct(
273 struct ddc_service *ddc_service,
274 struct ddc_service_init_data *init_data)
275{
276 enum connector_id connector_id =
277 dal_graphics_object_id_get_connector_id(init_data->id);
278
279 struct gpio_service *gpio_service = init_data->ctx->gpio_service;
280 struct graphics_object_i2c_info i2c_info;
281 struct gpio_ddc_hw_info hw_info;
282 struct dc_bios *dcb = init_data->ctx->dc_bios;
283
284 ddc_service->link = init_data->link;
285 ddc_service->ctx = init_data->ctx;
286
287 if (BP_RESULT_OK != dcb->funcs->get_i2c_info(dcb, init_data->id, &i2c_info)) {
288 ddc_service->ddc_pin = NULL;
289 } else {
290 hw_info.ddc_channel = i2c_info.i2c_line;
291 hw_info.hw_supported = i2c_info.i2c_hw_assist;
292
293 ddc_service->ddc_pin = dal_gpio_create_ddc(
294 gpio_service,
295 i2c_info.gpio_info.clk_a_register_index,
296 1 << i2c_info.gpio_info.clk_a_shift,
297 &hw_info);
298 }
299
300 ddc_service->flags.EDID_QUERY_DONE_ONCE = false;
301 ddc_service->flags.FORCE_READ_REPEATED_START = false;
302 ddc_service->flags.EDID_STRESS_READ = false;
303
304 ddc_service->flags.IS_INTERNAL_DISPLAY =
305 connector_id == CONNECTOR_ID_EDP ||
306 connector_id == CONNECTOR_ID_LVDS;
307
308 ddc_service->wa.raw = 0;
309 return true;
310}
311
312struct ddc_service *dal_ddc_service_create(
313 struct ddc_service_init_data *init_data)
314{
315 struct ddc_service *ddc_service;
316
317 ddc_service = dm_alloc(sizeof(struct ddc_service));
318
319 if (!ddc_service)
320 return NULL;
321
322 if (construct(ddc_service, init_data))
323 return ddc_service;
324
325 dm_free(ddc_service);
326 return NULL;
327}
328
329static void destruct(struct ddc_service *ddc)
330{
331 if (ddc->ddc_pin)
332 dal_gpio_destroy_ddc(&ddc->ddc_pin);
333}
334
335void dal_ddc_service_destroy(struct ddc_service **ddc)
336{
337 if (!ddc || !*ddc) {
338 BREAK_TO_DEBUGGER();
339 return;
340 }
341 destruct(*ddc);
342 dm_free(*ddc);
343 *ddc = NULL;
344}
345
346enum ddc_service_type dal_ddc_service_get_type(struct ddc_service *ddc)
347{
348 return DDC_SERVICE_TYPE_CONNECTOR;
349}
350
351void dal_ddc_service_set_transaction_type(
352 struct ddc_service *ddc,
353 enum ddc_transaction_type type)
354{
355 ddc->transaction_type = type;
356}
357
358bool dal_ddc_service_is_in_aux_transaction_mode(struct ddc_service *ddc)
359{
360 switch (ddc->transaction_type) {
361 case DDC_TRANSACTION_TYPE_I2C_OVER_AUX:
362 case DDC_TRANSACTION_TYPE_I2C_OVER_AUX_WITH_DEFER:
363 case DDC_TRANSACTION_TYPE_I2C_OVER_AUX_RETRY_DEFER:
364 return true;
365 default:
366 break;
367 }
368 return false;
369}
370
371void ddc_service_set_dongle_type(struct ddc_service *ddc,
372 enum display_dongle_type dongle_type)
373{
374 ddc->dongle_type = dongle_type;
375}
376
377static uint32_t defer_delay_converter_wa(
378 struct ddc_service *ddc,
379 uint32_t defer_delay)
380{
381 struct core_link *link = ddc->link;
382
383 if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_4 &&
384 !memcmp(link->dpcd_caps.branch_dev_name,
385 DP_DVI_CONVERTER_ID_4,
386 sizeof(link->dpcd_caps.branch_dev_name)))
387 return defer_delay > I2C_OVER_AUX_DEFER_WA_DELAY ?
388 defer_delay : I2C_OVER_AUX_DEFER_WA_DELAY;
389
390 return defer_delay;
391}
392
393#define DP_TRANSLATOR_DELAY 5
394
395static uint32_t get_defer_delay(struct ddc_service *ddc)
396{
397 uint32_t defer_delay = 0;
398
399 switch (ddc->transaction_type) {
400 case DDC_TRANSACTION_TYPE_I2C_OVER_AUX:
401 if ((DISPLAY_DONGLE_DP_VGA_CONVERTER == ddc->dongle_type) ||
402 (DISPLAY_DONGLE_DP_DVI_CONVERTER == ddc->dongle_type) ||
403 (DISPLAY_DONGLE_DP_HDMI_CONVERTER ==
404 ddc->dongle_type)) {
405
406 defer_delay = DP_TRANSLATOR_DELAY;
407
408 defer_delay =
409 defer_delay_converter_wa(ddc, defer_delay);
410
411 } else /*sink has a delay different from an Active Converter*/
412 defer_delay = 0;
413 break;
414 case DDC_TRANSACTION_TYPE_I2C_OVER_AUX_WITH_DEFER:
415 defer_delay = DP_TRANSLATOR_DELAY;
416 break;
417 default:
418 break;
419 }
420 return defer_delay;
421}
422
423static bool i2c_read(
424 struct ddc_service *ddc,
425 uint32_t address,
426 uint8_t *buffer,
427 uint32_t len)
428{
429 uint8_t offs_data = 0;
430 struct i2c_payload payloads[2] = {
431 {
432 .write = true,
433 .address = address,
434 .length = 1,
435 .data = &offs_data },
436 {
437 .write = false,
438 .address = address,
439 .length = len,
440 .data = buffer } };
441
442 struct i2c_command command = {
443 .payloads = payloads,
444 .number_of_payloads = 2,
445 .engine = DDC_I2C_COMMAND_ENGINE,
446 .speed = ddc->ctx->dc->caps.i2c_speed_in_khz };
447
448 return dm_helpers_submit_i2c(
449 ddc->ctx,
450 &ddc->link->public,
451 &command);
452}
453
454static uint8_t aux_read_edid_block(
455 struct ddc_service *ddc,
456 uint8_t address,
457 uint8_t index,
458 uint8_t *buf)
459{
460 struct aux_command cmd = {
461 .payloads = NULL,
462 .number_of_payloads = 0,
463 .defer_delay = get_defer_delay(ddc),
464 .max_defer_write_retry = 0 };
465
466 uint8_t retrieved = 0;
467 uint8_t base_offset =
468 (index % DDC_EDID_BLOCKS_PER_SEGMENT) * DDC_EDID_BLOCK_SIZE;
469 uint8_t segment = index / DDC_EDID_BLOCKS_PER_SEGMENT;
470
471 for (retrieved = 0; retrieved < DDC_EDID_BLOCK_SIZE;
472 retrieved += DEFAULT_AUX_MAX_DATA_SIZE) {
473
474 uint8_t offset = base_offset + retrieved;
475
476 struct aux_payload payloads[3] = {
477 {
478 .i2c_over_aux = true,
479 .write = true,
480 .address = DDC_EDID_SEGMENT_ADDRESS,
481 .length = 1,
482 .data = &segment },
483 {
484 .i2c_over_aux = true,
485 .write = true,
486 .address = address,
487 .length = 1,
488 .data = &offset },
489 {
490 .i2c_over_aux = true,
491 .write = false,
492 .address = address,
493 .length = DEFAULT_AUX_MAX_DATA_SIZE,
494 .data = &buf[retrieved] } };
495
496 if (segment == 0) {
497 cmd.payloads = &payloads[1];
498 cmd.number_of_payloads = 2;
499 } else {
500 cmd.payloads = payloads;
501 cmd.number_of_payloads = 3;
502 }
503
504 if (!dal_i2caux_submit_aux_command(
505 ddc->ctx->i2caux,
506 ddc->ddc_pin,
507 &cmd))
508 /* cannot read, break*/
509 break;
510 }
511
512 /* Reset segment to 0. Needed by some panels */
513 if (0 != segment) {
514 struct aux_payload payloads[1] = { {
515 .i2c_over_aux = true,
516 .write = true,
517 .address = DDC_EDID_SEGMENT_ADDRESS,
518 .length = 1,
519 .data = &segment } };
520 bool result = false;
521
522 segment = 0;
523
524 cmd.number_of_payloads = ARRAY_SIZE(payloads);
525 cmd.payloads = payloads;
526
527 result = dal_i2caux_submit_aux_command(
528 ddc->ctx->i2caux,
529 ddc->ddc_pin,
530 &cmd);
531
532 if (false == result)
533 dm_logger_write(
534 ddc->ctx->logger, LOG_ERROR,
535 "%s: Writing of EDID Segment (0x30) failed!\n",
536 __func__);
537 }
538
539 return retrieved;
540}
541
542static uint8_t i2c_read_edid_block(
543 struct ddc_service *ddc,
544 uint8_t address,
545 uint8_t index,
546 uint8_t *buf)
547{
548 bool ret = false;
549 uint8_t offset = (index % DDC_EDID_BLOCKS_PER_SEGMENT) *
550 DDC_EDID_BLOCK_SIZE;
551 uint8_t segment = index / DDC_EDID_BLOCKS_PER_SEGMENT;
552
553 struct i2c_command cmd = {
554 .payloads = NULL,
555 .number_of_payloads = 0,
556 .engine = DDC_I2C_COMMAND_ENGINE,
557 .speed = ddc->ctx->dc->caps.i2c_speed_in_khz };
558
559 struct i2c_payload payloads[3] = {
560 {
561 .write = true,
562 .address = DDC_EDID_SEGMENT_ADDRESS,
563 .length = 1,
564 .data = &segment },
565 {
566 .write = true,
567 .address = address,
568 .length = 1,
569 .data = &offset },
570 {
571 .write = false,
572 .address = address,
573 .length = DDC_EDID_BLOCK_SIZE,
574 .data = buf } };
575/*
576 * Some I2C engines don't handle stop/start between write-offset and read-data
577 * commands properly. For those displays, we have to force the newer E-DDC
578 * behavior of repeated-start which can be enabled by runtime parameter. */
579/* Originally implemented for OnLive using NXP receiver chip */
580
581 if (index == 0 && !ddc->flags.FORCE_READ_REPEATED_START) {
582 /* base block, use use DDC2B, submit as 2 commands */
583 cmd.payloads = &payloads[1];
584 cmd.number_of_payloads = 1;
585
586 if (dm_helpers_submit_i2c(
587 ddc->ctx,
588 &ddc->link->public,
589 &cmd)) {
590
591 cmd.payloads = &payloads[2];
592 cmd.number_of_payloads = 1;
593
594 ret = dm_helpers_submit_i2c(
595 ddc->ctx,
596 &ddc->link->public,
597 &cmd);
598 }
599
600 } else {
601 /*
602 * extension block use E-DDC, submit as 1 command
603 * or if repeated-start is forced by runtime parameter
604 */
605 if (segment != 0) {
606 /* include segment offset in command*/
607 cmd.payloads = payloads;
608 cmd.number_of_payloads = 3;
609 } else {
610 /* we are reading first segment,
611 * segment offset is not required */
612 cmd.payloads = &payloads[1];
613 cmd.number_of_payloads = 2;
614 }
615
616 ret = dm_helpers_submit_i2c(
617 ddc->ctx,
618 &ddc->link->public,
619 &cmd);
620 }
621
622 return ret ? DDC_EDID_BLOCK_SIZE : 0;
623}
624
625static uint32_t query_edid_block(
626 struct ddc_service *ddc,
627 uint8_t address,
628 uint8_t index,
629 uint8_t *buf,
630 uint32_t size)
631{
632 uint32_t size_retrieved = 0;
633
634 if (size < DDC_EDID_BLOCK_SIZE)
635 return 0;
636
637 if (dal_ddc_service_is_in_aux_transaction_mode(ddc)) {
638 size_retrieved =
639 aux_read_edid_block(ddc, address, index, buf);
640 } else {
641 size_retrieved =
642 i2c_read_edid_block(ddc, address, index, buf);
643 }
644
645 return size_retrieved;
646}
647
648#define DDC_DPCD_EDID_CHECKSUM_WRITE_ADDRESS 0x261
649#define DDC_TEST_ACK_ADDRESS 0x260
650#define DDC_DPCD_EDID_TEST_ACK 0x04
651#define DDC_DPCD_EDID_TEST_MASK 0x04
652#define DDC_DPCD_TEST_REQUEST_ADDRESS 0x218
653
654/* AG TODO GO throug DM callback here like for DPCD */
655
656static void write_dp_edid_checksum(
657 struct ddc_service *ddc,
658 uint8_t checksum)
659{
660 uint8_t dpcd_data;
661
662 dal_ddc_service_read_dpcd_data(
663 ddc,
664 DDC_DPCD_TEST_REQUEST_ADDRESS,
665 &dpcd_data,
666 1);
667
668 if (dpcd_data & DDC_DPCD_EDID_TEST_MASK) {
669
670 dal_ddc_service_write_dpcd_data(
671 ddc,
672 DDC_DPCD_EDID_CHECKSUM_WRITE_ADDRESS,
673 &checksum,
674 1);
675
676 dpcd_data = DDC_DPCD_EDID_TEST_ACK;
677
678 dal_ddc_service_write_dpcd_data(
679 ddc,
680 DDC_TEST_ACK_ADDRESS,
681 &dpcd_data,
682 1);
683 }
684}
685
686uint32_t dal_ddc_service_edid_query(struct ddc_service *ddc)
687{
688 uint32_t bytes_read = 0;
689 uint32_t ext_cnt = 0;
690
691 uint8_t address;
692 uint32_t i;
693
694 for (address = DDC_EDID_ADDRESS_START;
695 address <= DDC_EDID_ADDRESS_END; ++address) {
696
697 bytes_read = query_edid_block(
698 ddc,
699 address,
700 0,
701 ddc->edid_buf,
702 sizeof(ddc->edid_buf) - bytes_read);
703
704 if (bytes_read != DDC_EDID_BLOCK_SIZE)
705 continue;
706
707 /* get the number of ext blocks*/
708 ext_cnt = ddc->edid_buf[DDC_EDID_EXT_COUNT_OFFSET];
709
710 /* EDID 2.0, need to read 1 more block because EDID2.0 is
711 * 256 byte in size*/
712 if (ddc->edid_buf[DDC_EDID_20_SIGNATURE_OFFSET] ==
713 DDC_EDID_20_SIGNATURE)
714 ext_cnt = 1;
715
716 for (i = 0; i < ext_cnt; i++) {
717 /* read additional ext blocks accordingly */
718 bytes_read += query_edid_block(
719 ddc,
720 address,
721 i+1,
722 &ddc->edid_buf[bytes_read],
723 sizeof(ddc->edid_buf) - bytes_read);
724 }
725
726 /*this is special code path for DP compliance*/
727 if (DDC_TRANSACTION_TYPE_I2C_OVER_AUX == ddc->transaction_type)
728 write_dp_edid_checksum(
729 ddc,
730 ddc->edid_buf[(ext_cnt * DDC_EDID_BLOCK_SIZE) +
731 DDC_EDID1X_CHECKSUM_OFFSET]);
732
733 /*remembers the address where we fetch the EDID from
734 * for later signature check use */
735 ddc->address = address;
736
737 break;/* already read edid, done*/
738 }
739
740 ddc->edid_buf_len = bytes_read;
741 return bytes_read;
742}
743
744uint32_t dal_ddc_service_get_edid_buf_len(struct ddc_service *ddc)
745{
746 return ddc->edid_buf_len;
747}
748
749void dal_ddc_service_get_edid_buf(struct ddc_service *ddc, uint8_t *edid_buf)
750{
751 memmove(edid_buf,
752 ddc->edid_buf, ddc->edid_buf_len);
753}
754
755void dal_ddc_service_i2c_query_dp_dual_mode_adaptor(
756 struct ddc_service *ddc,
757 struct display_sink_capability *sink_cap)
758{
759 uint8_t i;
760 bool is_valid_hdmi_signature;
761 enum display_dongle_type *dongle = &sink_cap->dongle_type;
762 uint8_t type2_dongle_buf[DP_ADAPTOR_TYPE2_SIZE];
763 bool is_type2_dongle = false;
764 struct dp_hdmi_dongle_signature_data *dongle_signature;
765
766 /* Assume we have no valid DP passive dongle connected */
767 *dongle = DISPLAY_DONGLE_NONE;
768 sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_HDMI_SAFE_MAX_TMDS_CLK;
769
770 /* Read DP-HDMI dongle I2c (no response interpreted as DP-DVI dongle)*/
771 if (!i2c_read(
772 ddc,
773 DP_HDMI_DONGLE_ADDRESS,
774 type2_dongle_buf,
775 sizeof(type2_dongle_buf))) {
776 *dongle = DISPLAY_DONGLE_DP_DVI_DONGLE;
777 sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_DVI_MAX_TMDS_CLK;
778
779 CONN_DATA_DETECT(ddc->link, type2_dongle_buf, sizeof(type2_dongle_buf),
780 "DP-DVI passive dongle %dMhz: ",
781 DP_ADAPTOR_DVI_MAX_TMDS_CLK / 1000);
782 return;
783 }
784
785 /* Check if Type 2 dongle.*/
786 if (type2_dongle_buf[DP_ADAPTOR_TYPE2_REG_ID] == DP_ADAPTOR_TYPE2_ID)
787 is_type2_dongle = true;
788
789 dongle_signature =
790 (struct dp_hdmi_dongle_signature_data *)type2_dongle_buf;
791
792 is_valid_hdmi_signature = true;
793
794 /* Check EOT */
795 if (dongle_signature->eot != DP_HDMI_DONGLE_SIGNATURE_EOT) {
796 is_valid_hdmi_signature = false;
797 }
798
799 /* Check signature */
800 for (i = 0; i < sizeof(dongle_signature->id); ++i) {
801 /* If its not the right signature,
802 * skip mismatch in subversion byte.*/
803 if (dongle_signature->id[i] !=
804 dp_hdmi_dongle_signature_str[i] && i != 3) {
805
806 if (is_type2_dongle) {
807 is_valid_hdmi_signature = false;
808 break;
809 }
810
811 }
812 }
813
814 if (is_type2_dongle) {
815 uint32_t max_tmds_clk =
816 type2_dongle_buf[DP_ADAPTOR_TYPE2_REG_MAX_TMDS_CLK];
817
818 max_tmds_clk = max_tmds_clk * 2 + max_tmds_clk / 2;
819
820 if (0 == max_tmds_clk ||
821 max_tmds_clk < DP_ADAPTOR_TYPE2_MIN_TMDS_CLK ||
822 max_tmds_clk > DP_ADAPTOR_TYPE2_MAX_TMDS_CLK) {
823 *dongle = DISPLAY_DONGLE_DP_DVI_DONGLE;
824
825 CONN_DATA_DETECT(ddc->link, type2_dongle_buf,
826 sizeof(type2_dongle_buf),
827 "DP-DVI passive dongle %dMhz: ",
828 DP_ADAPTOR_DVI_MAX_TMDS_CLK / 1000);
829 } else {
830 if (is_valid_hdmi_signature == true) {
831 *dongle = DISPLAY_DONGLE_DP_HDMI_DONGLE;
832
833 CONN_DATA_DETECT(ddc->link, type2_dongle_buf,
834 sizeof(type2_dongle_buf),
835 "Type 2 DP-HDMI passive dongle %dMhz: ",
836 max_tmds_clk);
837 } else {
838 *dongle = DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE;
839
840 CONN_DATA_DETECT(ddc->link, type2_dongle_buf,
841 sizeof(type2_dongle_buf),
842 "Type 2 DP-HDMI passive dongle (no signature) %dMhz: ",
843 max_tmds_clk);
844
845 }
846
847 /* Multiply by 1000 to convert to kHz. */
848 sink_cap->max_hdmi_pixel_clock =
849 max_tmds_clk * 1000;
850 }
851
852 } else {
853 if (is_valid_hdmi_signature == true) {
854 *dongle = DISPLAY_DONGLE_DP_HDMI_DONGLE;
855
856 CONN_DATA_DETECT(ddc->link, type2_dongle_buf,
857 sizeof(type2_dongle_buf),
858 "Type 1 DP-HDMI passive dongle %dMhz: ",
859 sink_cap->max_hdmi_pixel_clock / 1000);
860 } else {
861 *dongle = DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE;
862
863 CONN_DATA_DETECT(ddc->link, type2_dongle_buf,
864 sizeof(type2_dongle_buf),
865 "Type 1 DP-HDMI passive dongle (no signature) %dMhz: ",
866 sink_cap->max_hdmi_pixel_clock / 1000);
867 }
868 }
869
870 return;
871}
872
873enum {
874 DP_SINK_CAP_SIZE =
875 DPCD_ADDRESS_EDP_CONFIG_CAP - DPCD_ADDRESS_DPCD_REV + 1
876};
877
878bool dal_ddc_service_query_ddc_data(
879 struct ddc_service *ddc,
880 uint32_t address,
881 uint8_t *write_buf,
882 uint32_t write_size,
883 uint8_t *read_buf,
884 uint32_t read_size)
885{
886 bool ret;
887 uint32_t payload_size =
888 dal_ddc_service_is_in_aux_transaction_mode(ddc) ?
889 DEFAULT_AUX_MAX_DATA_SIZE : EDID_SEGMENT_SIZE;
890
891 uint32_t write_payloads =
892 (write_size + payload_size - 1) / payload_size;
893
894 uint32_t read_payloads =
895 (read_size + payload_size - 1) / payload_size;
896
897 uint32_t payloads_num = write_payloads + read_payloads;
898
899 if (write_size > EDID_SEGMENT_SIZE || read_size > EDID_SEGMENT_SIZE)
900 return false;
901
902 /*TODO: len of payload data for i2c and aux is uint8!!!!,
903 * but we want to read 256 over i2c!!!!*/
904 if (dal_ddc_service_is_in_aux_transaction_mode(ddc)) {
905
906 struct aux_payloads *payloads =
907 dal_ddc_aux_payloads_create(ddc->ctx, payloads_num);
908
909 struct aux_command command = {
910 .payloads = dal_ddc_aux_payloads_get(payloads),
911 .number_of_payloads = 0,
912 .defer_delay = get_defer_delay(ddc),
913 .max_defer_write_retry = 0 };
914
915 dal_ddc_aux_payloads_add(
916 payloads, address, write_size, write_buf, true);
917
918 dal_ddc_aux_payloads_add(
919 payloads, address, read_size, read_buf, false);
920
921 command.number_of_payloads =
922 dal_ddc_aux_payloads_get_count(payloads);
923
924 ret = dal_i2caux_submit_aux_command(
925 ddc->ctx->i2caux,
926 ddc->ddc_pin,
927 &command);
928
929 dal_ddc_aux_payloads_destroy(&payloads);
930
931 } else {
932 struct i2c_payloads *payloads =
933 dal_ddc_i2c_payloads_create(ddc->ctx, payloads_num);
934
935 struct i2c_command command = {
936 .payloads = dal_ddc_i2c_payloads_get(payloads),
937 .number_of_payloads = 0,
938 .engine = DDC_I2C_COMMAND_ENGINE,
939 .speed = ddc->ctx->dc->caps.i2c_speed_in_khz };
940
941 dal_ddc_i2c_payloads_add(
942 payloads, address, write_size, write_buf, true);
943
944 dal_ddc_i2c_payloads_add(
945 payloads, address, read_size, read_buf, false);
946
947 command.number_of_payloads =
948 dal_ddc_i2c_payloads_get_count(payloads);
949
950 ret = dm_helpers_submit_i2c(
951 ddc->ctx,
952 &ddc->link->public,
953 &command);
954
955 dal_ddc_i2c_payloads_destroy(&payloads);
956 }
957
958 return ret;
959}
960
961enum ddc_result dal_ddc_service_read_dpcd_data(
962 struct ddc_service *ddc,
963 uint32_t address,
964 uint8_t *data,
965 uint32_t len)
966{
967 struct aux_payload read_payload = {
968 .i2c_over_aux = false,
969 .write = false,
970 .address = address,
971 .length = len,
972 .data = data,
973 };
974 struct aux_command command = {
975 .payloads = &read_payload,
976 .number_of_payloads = 1,
977 .defer_delay = 0,
978 .max_defer_write_retry = 0,
979 };
980
981 if (len > DEFAULT_AUX_MAX_DATA_SIZE) {
982 BREAK_TO_DEBUGGER();
983 return DDC_RESULT_FAILED_INVALID_OPERATION;
984 }
985
986 if (dal_i2caux_submit_aux_command(
987 ddc->ctx->i2caux,
988 ddc->ddc_pin,
989 &command))
990 return DDC_RESULT_SUCESSFULL;
991
992 return DDC_RESULT_FAILED_OPERATION;
993}
994
995enum ddc_result dal_ddc_service_write_dpcd_data(
996 struct ddc_service *ddc,
997 uint32_t address,
998 const uint8_t *data,
999 uint32_t len)
1000{
1001 struct aux_payload write_payload = {
1002 .i2c_over_aux = false,
1003 .write = true,
1004 .address = address,
1005 .length = len,
1006 .data = (uint8_t *)data,
1007 };
1008 struct aux_command command = {
1009 .payloads = &write_payload,
1010 .number_of_payloads = 1,
1011 .defer_delay = 0,
1012 .max_defer_write_retry = 0,
1013 };
1014
1015 if (len > DEFAULT_AUX_MAX_DATA_SIZE) {
1016 BREAK_TO_DEBUGGER();
1017 return DDC_RESULT_FAILED_INVALID_OPERATION;
1018 }
1019
1020 if (dal_i2caux_submit_aux_command(
1021 ddc->ctx->i2caux,
1022 ddc->ddc_pin,
1023 &command))
1024 return DDC_RESULT_SUCESSFULL;
1025
1026 return DDC_RESULT_FAILED_OPERATION;
1027}
1028
1029/*test only function*/
1030void dal_ddc_service_set_ddc_pin(
1031 struct ddc_service *ddc_service,
1032 struct ddc *ddc)
1033{
1034 ddc_service->ddc_pin = ddc;
1035}
1036
1037struct ddc *dal_ddc_service_get_ddc_pin(struct ddc_service *ddc_service)
1038{
1039 return ddc_service->ddc_pin;
1040}
1041
1042void dal_ddc_service_write_scdc_data(struct ddc_service *ddc_service,
1043 uint32_t pix_clk,
1044 bool lte_340_scramble)
1045{
1046 bool over_340_mhz = pix_clk > 340000 ? 1 : 0;
1047 uint8_t slave_address = HDMI_SCDC_ADDRESS;
1048 uint8_t offset = HDMI_SCDC_SINK_VERSION;
1049 uint8_t sink_version = 0;
1050 uint8_t write_buffer[2] = {0};
1051 /*Lower than 340 Scramble bit from SCDC caps*/
1052
1053 dal_ddc_service_query_ddc_data(ddc_service, slave_address, &offset,
1054 sizeof(offset), &sink_version, sizeof(sink_version));
1055 if (sink_version == 1) {
1056 /*Source Version = 1*/
1057 write_buffer[0] = HDMI_SCDC_SOURCE_VERSION;
1058 write_buffer[1] = 1;
1059 dal_ddc_service_query_ddc_data(ddc_service, slave_address,
1060 write_buffer, sizeof(write_buffer), NULL, 0);
1061 /*Read Request from SCDC caps*/
1062 }
1063 write_buffer[0] = HDMI_SCDC_TMDS_CONFIG;
1064
1065 if (over_340_mhz) {
1066 write_buffer[1] = 3;
1067 } else if (lte_340_scramble) {
1068 write_buffer[1] = 1;
1069 } else {
1070 write_buffer[1] = 0;
1071 }
1072 dal_ddc_service_query_ddc_data(ddc_service, slave_address, write_buffer,
1073 sizeof(write_buffer), NULL, 0);
1074}
1075
1076void dal_ddc_service_read_scdc_data(struct ddc_service *ddc_service)
1077{
1078 uint8_t slave_address = HDMI_SCDC_ADDRESS;
1079 uint8_t offset = HDMI_SCDC_TMDS_CONFIG;
1080 uint8_t tmds_config = 0;
1081
1082 dal_ddc_service_query_ddc_data(ddc_service, slave_address, &offset,
1083 sizeof(offset), &tmds_config, sizeof(tmds_config));
1084 if (tmds_config & 0x1) {
1085 union hdmi_scdc_status_flags_data status_data = { {0} };
1086 uint8_t scramble_status = 0;
1087
1088 offset = HDMI_SCDC_SCRAMBLER_STATUS;
1089 dal_ddc_service_query_ddc_data(ddc_service, slave_address,
1090 &offset, sizeof(offset), &scramble_status,
1091 sizeof(scramble_status));
1092 offset = HDMI_SCDC_STATUS_FLAGS;
1093 dal_ddc_service_query_ddc_data(ddc_service, slave_address,
1094 &offset, sizeof(offset), status_data.byte,
1095 sizeof(status_data.byte));
1096 }
1097}
1098
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
new file mode 100644
index 000000000000..2585ec332e58
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -0,0 +1,2462 @@
1/* Copyright 2015 Advanced Micro Devices, Inc. */
2#include "dm_services.h"
3#include "dc.h"
4#include "dc_link_dp.h"
5#include "dm_helpers.h"
6
7#include "inc/core_types.h"
8#include "link_hwss.h"
9#include "dc_link_ddc.h"
10#include "core_status.h"
11#include "dpcd_defs.h"
12
13#include "core_dc.h"
14
15/* maximum pre emphasis level allowed for each voltage swing level*/
16static const enum dc_pre_emphasis voltage_swing_to_pre_emphasis[] = {
17 PRE_EMPHASIS_LEVEL3,
18 PRE_EMPHASIS_LEVEL2,
19 PRE_EMPHASIS_LEVEL1,
20 PRE_EMPHASIS_DISABLED };
21
22enum {
23 POST_LT_ADJ_REQ_LIMIT = 6,
24 POST_LT_ADJ_REQ_TIMEOUT = 200
25};
26
27enum {
28 LINK_TRAINING_MAX_RETRY_COUNT = 5,
29 /* to avoid infinite loop where-in the receiver
30 * switches between different VS
31 */
32 LINK_TRAINING_MAX_CR_RETRY = 100
33};
34
35static const struct dc_link_settings link_training_fallback_table[] = {
36/* 4320 Mbytes/sec*/
37{ LANE_COUNT_FOUR, LINK_RATE_HIGH3, LINK_SPREAD_DISABLED },
38/* 2160 Mbytes/sec*/
39{ LANE_COUNT_FOUR, LINK_RATE_HIGH2, LINK_SPREAD_DISABLED },
40/* 1080 Mbytes/sec*/
41{ LANE_COUNT_FOUR, LINK_RATE_HIGH, LINK_SPREAD_DISABLED },
42/* 648 Mbytes/sec*/
43{ LANE_COUNT_FOUR, LINK_RATE_LOW, LINK_SPREAD_DISABLED },
44/* 2160 Mbytes/sec*/
45{ LANE_COUNT_TWO, LINK_RATE_HIGH3, LINK_SPREAD_DISABLED },
46/* 1080 Mbytes/sec*/
47{ LANE_COUNT_TWO, LINK_RATE_HIGH2, LINK_SPREAD_DISABLED },
48/* 540 Mbytes/sec*/
49{ LANE_COUNT_TWO, LINK_RATE_HIGH, LINK_SPREAD_DISABLED },
50/* 324 Mbytes/sec*/
51{ LANE_COUNT_TWO, LINK_RATE_LOW, LINK_SPREAD_DISABLED },
52/* 1080 Mbytes/sec*/
53{ LANE_COUNT_ONE, LINK_RATE_HIGH3, LINK_SPREAD_DISABLED },
54/* 540 Mbytes/sec*/
55{ LANE_COUNT_ONE, LINK_RATE_HIGH2, LINK_SPREAD_DISABLED },
56/* 270 Mbytes/sec*/
57{ LANE_COUNT_ONE, LINK_RATE_HIGH, LINK_SPREAD_DISABLED },
58/* 162 Mbytes/sec*/
59{ LANE_COUNT_ONE, LINK_RATE_LOW, LINK_SPREAD_DISABLED } };
60
61static void wait_for_training_aux_rd_interval(
62 struct core_link* link,
63 uint32_t default_wait_in_micro_secs)
64{
65 union training_aux_rd_interval training_rd_interval;
66
67 /* overwrite the delay if rev > 1.1*/
68 if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) {
69 /* DP 1.2 or later - retrieve delay through
70 * "DPCD_ADDR_TRAINING_AUX_RD_INTERVAL" register */
71 core_link_read_dpcd(
72 link,
73 DPCD_ADDRESS_TRAINING_AUX_RD_INTERVAL,
74 (uint8_t *)&training_rd_interval,
75 sizeof(training_rd_interval));
76
77 if (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL)
78 default_wait_in_micro_secs =
79 training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000;
80 }
81
82 udelay(default_wait_in_micro_secs);
83
84 dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
85 "%s:\n wait = %d\n",
86 __func__,
87 default_wait_in_micro_secs);
88}
89
90static void dpcd_set_training_pattern(
91 struct core_link* link,
92 union dpcd_training_pattern dpcd_pattern)
93{
94 core_link_write_dpcd(
95 link,
96 DPCD_ADDRESS_TRAINING_PATTERN_SET,
97 &dpcd_pattern.raw,
98 1);
99
100 dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
101 "%s\n %x pattern = %x\n",
102 __func__,
103 DPCD_ADDRESS_TRAINING_PATTERN_SET,
104 dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
105}
106
107static void dpcd_set_link_settings(
108 struct core_link* link,
109 const struct link_training_settings *lt_settings)
110{
111 uint8_t rate = (uint8_t)
112 (lt_settings->link_settings.link_rate);
113
114 union down_spread_ctrl downspread = {{0}};
115 union lane_count_set lane_count_set = {{0}};
116 uint8_t link_set_buffer[2];
117
118 downspread.raw = (uint8_t)
119 (lt_settings->link_settings.link_spread);
120
121 lane_count_set.bits.LANE_COUNT_SET =
122 lt_settings->link_settings.lane_count;
123
124 lane_count_set.bits.ENHANCED_FRAMING = 1;
125
126 lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED =
127 link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED;
128
129 link_set_buffer[0] = rate;
130 link_set_buffer[1] = lane_count_set.raw;
131
132 core_link_write_dpcd(link, DPCD_ADDRESS_LINK_BW_SET,
133 link_set_buffer, 2);
134 core_link_write_dpcd(link, DPCD_ADDRESS_DOWNSPREAD_CNTL,
135 &downspread.raw, sizeof(downspread));
136
137 dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
138 "%s\n %x rate = %x\n %x lane = %x\n %x spread = %x\n",
139 __func__,
140 DPCD_ADDRESS_LINK_BW_SET,
141 lt_settings->link_settings.link_rate,
142 DPCD_ADDRESS_LANE_COUNT_SET,
143 lt_settings->link_settings.lane_count,
144 DPCD_ADDRESS_DOWNSPREAD_CNTL,
145 lt_settings->link_settings.link_spread);
146
147}
148
149static enum dpcd_training_patterns
150 hw_training_pattern_to_dpcd_training_pattern(
151 struct core_link* link,
152 enum hw_dp_training_pattern pattern)
153{
154 enum dpcd_training_patterns dpcd_tr_pattern =
155 DPCD_TRAINING_PATTERN_VIDEOIDLE;
156
157 switch (pattern) {
158 case HW_DP_TRAINING_PATTERN_1:
159 dpcd_tr_pattern = DPCD_TRAINING_PATTERN_1;
160 break;
161 case HW_DP_TRAINING_PATTERN_2:
162 dpcd_tr_pattern = DPCD_TRAINING_PATTERN_2;
163 break;
164 case HW_DP_TRAINING_PATTERN_3:
165 dpcd_tr_pattern = DPCD_TRAINING_PATTERN_3;
166 break;
167 case HW_DP_TRAINING_PATTERN_4:
168 dpcd_tr_pattern = DPCD_TRAINING_PATTERN_4;
169 break;
170 default:
171 ASSERT(0);
172 dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
173 "%s: Invalid HW Training pattern: %d\n",
174 __func__, pattern);
175 break;
176 }
177
178 return dpcd_tr_pattern;
179
180}
181
182static void dpcd_set_lt_pattern_and_lane_settings(
183 struct core_link* link,
184 const struct link_training_settings *lt_settings,
185 enum hw_dp_training_pattern pattern)
186{
187 union dpcd_training_lane dpcd_lane[LANE_COUNT_DP_MAX] = {{{0}}};
188 const uint32_t dpcd_base_lt_offset =
189 DPCD_ADDRESS_TRAINING_PATTERN_SET;
190 uint8_t dpcd_lt_buffer[5] = {0};
191 union dpcd_training_pattern dpcd_pattern = {{0}};
192 uint32_t lane;
193 uint32_t size_in_bytes;
194 bool edp_workaround = false; /* TODO link_prop.INTERNAL */
195
196 /*****************************************************************
197 * DpcdAddress_TrainingPatternSet
198 *****************************************************************/
199 dpcd_pattern.v1_4.TRAINING_PATTERN_SET =
200 hw_training_pattern_to_dpcd_training_pattern(link, pattern);
201
202 dpcd_lt_buffer[DPCD_ADDRESS_TRAINING_PATTERN_SET - dpcd_base_lt_offset]
203 = dpcd_pattern.raw;
204
205 dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
206 "%s\n %x pattern = %x\n",
207 __func__,
208 DPCD_ADDRESS_TRAINING_PATTERN_SET,
209 dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
210
211 /*****************************************************************
212 * DpcdAddress_Lane0Set -> DpcdAddress_Lane3Set
213 *****************************************************************/
214 for (lane = 0; lane <
215 (uint32_t)(lt_settings->link_settings.lane_count); lane++) {
216
217 dpcd_lane[lane].bits.VOLTAGE_SWING_SET =
218 (uint8_t)(lt_settings->lane_settings[lane].VOLTAGE_SWING);
219 dpcd_lane[lane].bits.PRE_EMPHASIS_SET =
220 (uint8_t)(lt_settings->lane_settings[lane].PRE_EMPHASIS);
221
222 dpcd_lane[lane].bits.MAX_SWING_REACHED =
223 (lt_settings->lane_settings[lane].VOLTAGE_SWING ==
224 VOLTAGE_SWING_MAX_LEVEL ? 1 : 0);
225 dpcd_lane[lane].bits.MAX_PRE_EMPHASIS_REACHED =
226 (lt_settings->lane_settings[lane].PRE_EMPHASIS ==
227 PRE_EMPHASIS_MAX_LEVEL ? 1 : 0);
228 }
229
230 /* concatinate everything into one buffer*/
231
232 size_in_bytes = lt_settings->link_settings.lane_count * sizeof(dpcd_lane[0]);
233
234 // 0x00103 - 0x00102
235 memmove(
236 &dpcd_lt_buffer[DPCD_ADDRESS_LANE0_SET - dpcd_base_lt_offset],
237 dpcd_lane,
238 size_in_bytes);
239
240 dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
241 "%s:\n %x VS set = %x PE set = %x \
242 max VS Reached = %x max PE Reached = %x\n",
243 __func__,
244 DPCD_ADDRESS_LANE0_SET,
245 dpcd_lane[0].bits.VOLTAGE_SWING_SET,
246 dpcd_lane[0].bits.PRE_EMPHASIS_SET,
247 dpcd_lane[0].bits.MAX_SWING_REACHED,
248 dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED);
249
250 if (edp_workaround) {
251 /* for eDP write in 2 parts because the 5-byte burst is
252 * causing issues on some eDP panels (EPR#366724)
253 */
254 core_link_write_dpcd(
255 link,
256 DPCD_ADDRESS_TRAINING_PATTERN_SET,
257 &dpcd_pattern.raw,
258 sizeof(dpcd_pattern.raw) );
259
260 core_link_write_dpcd(
261 link,
262 DPCD_ADDRESS_LANE0_SET,
263 (uint8_t *)(dpcd_lane),
264 size_in_bytes);
265
266 } else
267 /* write it all in (1 + number-of-lanes)-byte burst*/
268 core_link_write_dpcd(
269 link,
270 dpcd_base_lt_offset,
271 dpcd_lt_buffer,
272 size_in_bytes + sizeof(dpcd_pattern.raw) );
273
274 link->public.cur_lane_setting = lt_settings->lane_settings[0];
275}
276
277static bool is_cr_done(enum dc_lane_count ln_count,
278 union lane_status *dpcd_lane_status)
279{
280 bool done = true;
281 uint32_t lane;
282 /*LANEx_CR_DONE bits All 1's?*/
283 for (lane = 0; lane < (uint32_t)(ln_count); lane++) {
284 if (!dpcd_lane_status[lane].bits.CR_DONE_0)
285 done = false;
286 }
287 return done;
288
289}
290
291static bool is_ch_eq_done(enum dc_lane_count ln_count,
292 union lane_status *dpcd_lane_status,
293 union lane_align_status_updated *lane_status_updated)
294{
295 bool done = true;
296 uint32_t lane;
297 if (!lane_status_updated->bits.INTERLANE_ALIGN_DONE)
298 done = false;
299 else {
300 for (lane = 0; lane < (uint32_t)(ln_count); lane++) {
301 if (!dpcd_lane_status[lane].bits.SYMBOL_LOCKED_0 ||
302 !dpcd_lane_status[lane].bits.CHANNEL_EQ_DONE_0)
303 done = false;
304 }
305 }
306 return done;
307
308}
309
310static void update_drive_settings(
311 struct link_training_settings *dest,
312 struct link_training_settings src)
313{
314 uint32_t lane;
315 for (lane = 0; lane < src.link_settings.lane_count; lane++) {
316 dest->lane_settings[lane].VOLTAGE_SWING =
317 src.lane_settings[lane].VOLTAGE_SWING;
318 dest->lane_settings[lane].PRE_EMPHASIS =
319 src.lane_settings[lane].PRE_EMPHASIS;
320 dest->lane_settings[lane].POST_CURSOR2 =
321 src.lane_settings[lane].POST_CURSOR2;
322 }
323}
324
325static uint8_t get_nibble_at_index(const uint8_t *buf,
326 uint32_t index)
327{
328 uint8_t nibble;
329 nibble = buf[index / 2];
330
331 if (index % 2)
332 nibble >>= 4;
333 else
334 nibble &= 0x0F;
335
336 return nibble;
337}
338
339static enum dc_pre_emphasis get_max_pre_emphasis_for_voltage_swing(
340 enum dc_voltage_swing voltage)
341{
342 enum dc_pre_emphasis pre_emphasis;
343 pre_emphasis = PRE_EMPHASIS_MAX_LEVEL;
344
345 if (voltage <= VOLTAGE_SWING_MAX_LEVEL)
346 pre_emphasis = voltage_swing_to_pre_emphasis[voltage];
347
348 return pre_emphasis;
349
350}
351
352static void find_max_drive_settings(
353 const struct link_training_settings *link_training_setting,
354 struct link_training_settings *max_lt_setting)
355{
356 uint32_t lane;
357 struct dc_lane_settings max_requested;
358
359 max_requested.VOLTAGE_SWING =
360 link_training_setting->
361 lane_settings[0].VOLTAGE_SWING;
362 max_requested.PRE_EMPHASIS =
363 link_training_setting->
364 lane_settings[0].PRE_EMPHASIS;
365 /*max_requested.postCursor2 =
366 * link_training_setting->laneSettings[0].postCursor2;*/
367
368 /* Determine what the maximum of the requested settings are*/
369 for (lane = 1; lane < link_training_setting->link_settings.lane_count;
370 lane++) {
371 if (link_training_setting->lane_settings[lane].VOLTAGE_SWING >
372 max_requested.VOLTAGE_SWING)
373
374 max_requested.VOLTAGE_SWING =
375 link_training_setting->
376 lane_settings[lane].VOLTAGE_SWING;
377
378 if (link_training_setting->lane_settings[lane].PRE_EMPHASIS >
379 max_requested.PRE_EMPHASIS)
380 max_requested.PRE_EMPHASIS =
381 link_training_setting->
382 lane_settings[lane].PRE_EMPHASIS;
383
384 /*
385 if (link_training_setting->laneSettings[lane].postCursor2 >
386 max_requested.postCursor2)
387 {
388 max_requested.postCursor2 =
389 link_training_setting->laneSettings[lane].postCursor2;
390 }
391 */
392 }
393
394 /* make sure the requested settings are
395 * not higher than maximum settings*/
396 if (max_requested.VOLTAGE_SWING > VOLTAGE_SWING_MAX_LEVEL)
397 max_requested.VOLTAGE_SWING = VOLTAGE_SWING_MAX_LEVEL;
398
399 if (max_requested.PRE_EMPHASIS > PRE_EMPHASIS_MAX_LEVEL)
400 max_requested.PRE_EMPHASIS = PRE_EMPHASIS_MAX_LEVEL;
401 /*
402 if (max_requested.postCursor2 > PostCursor2_MaxLevel)
403 max_requested.postCursor2 = PostCursor2_MaxLevel;
404 */
405
406 /* make sure the pre-emphasis matches the voltage swing*/
407 if (max_requested.PRE_EMPHASIS >
408 get_max_pre_emphasis_for_voltage_swing(
409 max_requested.VOLTAGE_SWING))
410 max_requested.PRE_EMPHASIS =
411 get_max_pre_emphasis_for_voltage_swing(
412 max_requested.VOLTAGE_SWING);
413
414 /*
415 * Post Cursor2 levels are completely independent from
416 * pre-emphasis (Post Cursor1) levels. But Post Cursor2 levels
417 * can only be applied to each allowable combination of voltage
418 * swing and pre-emphasis levels */
419 /* if ( max_requested.postCursor2 >
420 * getMaxPostCursor2ForVoltageSwing(max_requested.voltageSwing))
421 * max_requested.postCursor2 =
422 * getMaxPostCursor2ForVoltageSwing(max_requested.voltageSwing);
423 */
424
425 max_lt_setting->link_settings.link_rate =
426 link_training_setting->link_settings.link_rate;
427 max_lt_setting->link_settings.lane_count =
428 link_training_setting->link_settings.lane_count;
429 max_lt_setting->link_settings.link_spread =
430 link_training_setting->link_settings.link_spread;
431
432 for (lane = 0; lane <
433 link_training_setting->link_settings.lane_count;
434 lane++) {
435 max_lt_setting->lane_settings[lane].VOLTAGE_SWING =
436 max_requested.VOLTAGE_SWING;
437 max_lt_setting->lane_settings[lane].PRE_EMPHASIS =
438 max_requested.PRE_EMPHASIS;
439 /*max_lt_setting->laneSettings[lane].postCursor2 =
440 * max_requested.postCursor2;
441 */
442 }
443
444}
445
446static void get_lane_status_and_drive_settings(
447 struct core_link* link,
448 const struct link_training_settings *link_training_setting,
449 union lane_status *ln_status,
450 union lane_align_status_updated *ln_status_updated,
451 struct link_training_settings *req_settings)
452{
453 uint8_t dpcd_buf[6] = {0};
454 union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {{{0}}};
455 struct link_training_settings request_settings = {{0}};
456 uint32_t lane;
457
458 memset(req_settings, '\0', sizeof(struct link_training_settings));
459
460 core_link_read_dpcd(
461 link,
462 DPCD_ADDRESS_LANE_01_STATUS,
463 (uint8_t *)(dpcd_buf),
464 sizeof(dpcd_buf));
465
466 for (lane = 0; lane <
467 (uint32_t)(link_training_setting->link_settings.lane_count);
468 lane++) {
469
470 ln_status[lane].raw =
471 get_nibble_at_index(&dpcd_buf[0], lane);
472 dpcd_lane_adjust[lane].raw =
473 get_nibble_at_index(&dpcd_buf[4], lane);
474 }
475
476 ln_status_updated->raw = dpcd_buf[2];
477
478 dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
479 "%s:\n%x Lane01Status = %x\n %x Lane23Status = %x\n ",
480 __func__,
481 DPCD_ADDRESS_LANE_01_STATUS, dpcd_buf[0],
482 DPCD_ADDRESS_LANE_23_STATUS, dpcd_buf[1]);
483
484 dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
485 "%s:\n %x Lane01AdjustRequest = %x\n %x Lane23AdjustRequest = %x\n",
486 __func__,
487 DPCD_ADDRESS_ADJUST_REQUEST_LANE0_1,
488 dpcd_buf[4],
489 DPCD_ADDRESS_ADJUST_REQUEST_LANE2_3,
490 dpcd_buf[5]);
491
492 /*copy to req_settings*/
493 request_settings.link_settings.lane_count =
494 link_training_setting->link_settings.lane_count;
495 request_settings.link_settings.link_rate =
496 link_training_setting->link_settings.link_rate;
497 request_settings.link_settings.link_spread =
498 link_training_setting->link_settings.link_spread;
499
500 for (lane = 0; lane <
501 (uint32_t)(link_training_setting->link_settings.lane_count);
502 lane++) {
503
504 request_settings.lane_settings[lane].VOLTAGE_SWING =
505 (enum dc_voltage_swing)(dpcd_lane_adjust[lane].bits.
506 VOLTAGE_SWING_LANE);
507 request_settings.lane_settings[lane].PRE_EMPHASIS =
508 (enum dc_pre_emphasis)(dpcd_lane_adjust[lane].bits.
509 PRE_EMPHASIS_LANE);
510 }
511
512 /*Note: for postcursor2, read adjusted
513 * postcursor2 settings from*/
514 /*DpcdAddress_AdjustRequestPostCursor2 =
515 *0x020C (not implemented yet)*/
516
517 /* we find the maximum of the requested settings across all lanes*/
518 /* and set this maximum for all lanes*/
519 find_max_drive_settings(&request_settings, req_settings);
520
521 /* if post cursor 2 is needed in the future,
522 * read DpcdAddress_AdjustRequestPostCursor2 = 0x020C
523 */
524
525}
526
527static void dpcd_set_lane_settings(
528 struct core_link* link,
529 const struct link_training_settings *link_training_setting)
530{
531 union dpcd_training_lane dpcd_lane[LANE_COUNT_DP_MAX] = {{{0}}};
532 uint32_t lane;
533
534 for (lane = 0; lane <
535 (uint32_t)(link_training_setting->
536 link_settings.lane_count);
537 lane++) {
538 dpcd_lane[lane].bits.VOLTAGE_SWING_SET =
539 (uint8_t)(link_training_setting->
540 lane_settings[lane].VOLTAGE_SWING);
541 dpcd_lane[lane].bits.PRE_EMPHASIS_SET =
542 (uint8_t)(link_training_setting->
543 lane_settings[lane].PRE_EMPHASIS);
544 dpcd_lane[lane].bits.MAX_SWING_REACHED =
545 (link_training_setting->
546 lane_settings[lane].VOLTAGE_SWING ==
547 VOLTAGE_SWING_MAX_LEVEL ? 1 : 0);
548 dpcd_lane[lane].bits.MAX_PRE_EMPHASIS_REACHED =
549 (link_training_setting->
550 lane_settings[lane].PRE_EMPHASIS ==
551 PRE_EMPHASIS_MAX_LEVEL ? 1 : 0);
552 }
553
554 core_link_write_dpcd(link,
555 DPCD_ADDRESS_LANE0_SET,
556 (uint8_t *)(dpcd_lane),
557 link_training_setting->link_settings.lane_count);
558
559 /*
560 if (LTSettings.link.rate == LinkRate_High2)
561 {
562 DpcdTrainingLaneSet2 dpcd_lane2[lane_count_DPMax] = {0};
563 for ( uint32_t lane = 0;
564 lane < lane_count_DPMax; lane++)
565 {
566 dpcd_lane2[lane].bits.post_cursor2_set =
567 static_cast<unsigned char>(
568 LTSettings.laneSettings[lane].postCursor2);
569 dpcd_lane2[lane].bits.max_post_cursor2_reached = 0;
570 }
571 m_pDpcdAccessSrv->WriteDpcdData(
572 DpcdAddress_Lane0Set2,
573 reinterpret_cast<unsigned char*>(dpcd_lane2),
574 LTSettings.link.lanes);
575 }
576 */
577
578 dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
579 "%s\n %x VS set = %x PE set = %x \
580 max VS Reached = %x max PE Reached = %x\n",
581 __func__,
582 DPCD_ADDRESS_LANE0_SET,
583 dpcd_lane[0].bits.VOLTAGE_SWING_SET,
584 dpcd_lane[0].bits.PRE_EMPHASIS_SET,
585 dpcd_lane[0].bits.MAX_SWING_REACHED,
586 dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED);
587
588 link->public.cur_lane_setting = link_training_setting->lane_settings[0];
589
590}
591
592static bool is_max_vs_reached(
593 const struct link_training_settings *lt_settings)
594{
595 uint32_t lane;
596 for (lane = 0; lane <
597 (uint32_t)(lt_settings->link_settings.lane_count);
598 lane++) {
599 if (lt_settings->lane_settings[lane].VOLTAGE_SWING
600 == VOLTAGE_SWING_MAX_LEVEL)
601 return true;
602 }
603 return false;
604
605}
606
607void dc_link_dp_set_drive_settings(
608 struct dc_link *link,
609 struct link_training_settings *lt_settings)
610{
611 struct core_link *core_link = DC_LINK_TO_CORE(link);
612 /* program ASIC PHY settings*/
613 dp_set_hw_lane_settings(core_link, lt_settings);
614
615 /* Notify DP sink the PHY settings from source */
616 dpcd_set_lane_settings(core_link, lt_settings);
617}
618
619static bool perform_post_lt_adj_req_sequence(
620 struct core_link *link,
621 struct link_training_settings *lt_settings)
622{
623 enum dc_lane_count lane_count =
624 lt_settings->link_settings.lane_count;
625
626 uint32_t adj_req_count;
627 uint32_t adj_req_timer;
628 bool req_drv_setting_changed;
629 uint32_t lane;
630
631 req_drv_setting_changed = false;
632 for (adj_req_count = 0; adj_req_count < POST_LT_ADJ_REQ_LIMIT;
633 adj_req_count++) {
634
635 req_drv_setting_changed = false;
636
637 for (adj_req_timer = 0;
638 adj_req_timer < POST_LT_ADJ_REQ_TIMEOUT;
639 adj_req_timer++) {
640
641 struct link_training_settings req_settings;
642 union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX];
643 union lane_align_status_updated
644 dpcd_lane_status_updated;
645
646 get_lane_status_and_drive_settings(
647 link,
648 lt_settings,
649 dpcd_lane_status,
650 &dpcd_lane_status_updated,
651 &req_settings);
652
653 if (dpcd_lane_status_updated.bits.
654 POST_LT_ADJ_REQ_IN_PROGRESS == 0)
655 return true;
656
657 if (!is_cr_done(lane_count, dpcd_lane_status))
658 return false;
659
660 if (!is_ch_eq_done(
661 lane_count,
662 dpcd_lane_status,
663 &dpcd_lane_status_updated))
664 return false;
665
666 for (lane = 0; lane < (uint32_t)(lane_count); lane++) {
667
668 if (lt_settings->
669 lane_settings[lane].VOLTAGE_SWING !=
670 req_settings.lane_settings[lane].
671 VOLTAGE_SWING ||
672 lt_settings->lane_settings[lane].PRE_EMPHASIS !=
673 req_settings.lane_settings[lane].PRE_EMPHASIS) {
674
675 req_drv_setting_changed = true;
676 break;
677 }
678 }
679
680 if (req_drv_setting_changed) {
681 update_drive_settings(
682 lt_settings,req_settings);
683
684 dc_link_dp_set_drive_settings(&link->public,
685 lt_settings);
686 break;
687 }
688
689 msleep(1);
690 }
691
692 if (!req_drv_setting_changed) {
693 dm_logger_write(link->ctx->logger, LOG_WARNING,
694 "%s: Post Link Training Adjust Request Timed out\n",
695 __func__);
696
697 ASSERT(0);
698 return true;
699 }
700 }
701 dm_logger_write(link->ctx->logger, LOG_WARNING,
702 "%s: Post Link Training Adjust Request limit reached\n",
703 __func__);
704
705 ASSERT(0);
706 return true;
707
708}
709
710static enum hw_dp_training_pattern get_supported_tp(struct core_link *link)
711{
712 enum hw_dp_training_pattern highest_tp = HW_DP_TRAINING_PATTERN_2;
713 struct encoder_feature_support *features = &link->link_enc->features;
714 struct dpcd_caps *dpcd_caps = &link->dpcd_caps;
715
716 if (features->flags.bits.IS_TPS3_CAPABLE)
717 highest_tp = HW_DP_TRAINING_PATTERN_3;
718
719 if (features->flags.bits.IS_TPS4_CAPABLE)
720 highest_tp = HW_DP_TRAINING_PATTERN_4;
721
722 if (dpcd_caps->max_down_spread.bits.TPS4_SUPPORTED &&
723 highest_tp >= HW_DP_TRAINING_PATTERN_4)
724 return HW_DP_TRAINING_PATTERN_4;
725
726 if (dpcd_caps->max_ln_count.bits.TPS3_SUPPORTED &&
727 highest_tp >= HW_DP_TRAINING_PATTERN_3)
728 return HW_DP_TRAINING_PATTERN_3;
729
730 return HW_DP_TRAINING_PATTERN_2;
731}
732
733static bool perform_channel_equalization_sequence(
734 struct core_link *link,
735 struct link_training_settings *lt_settings)
736{
737 struct link_training_settings req_settings;
738 enum hw_dp_training_pattern hw_tr_pattern;
739 uint32_t retries_ch_eq;
740 enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
741 union lane_align_status_updated dpcd_lane_status_updated = {{0}};
742 union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {{{0}}};;
743
744 hw_tr_pattern = get_supported_tp(link);
745
746 dp_set_hw_training_pattern(link, hw_tr_pattern);
747
748 for (retries_ch_eq = 0; retries_ch_eq <= LINK_TRAINING_MAX_RETRY_COUNT;
749 retries_ch_eq++) {
750
751 dp_set_hw_lane_settings(link, lt_settings);
752
753 /* 2. update DPCD*/
754 if (!retries_ch_eq)
755 /* EPR #361076 - write as a 5-byte burst,
756 * but only for the 1-st iteration*/
757 dpcd_set_lt_pattern_and_lane_settings(
758 link,
759 lt_settings,
760 hw_tr_pattern);
761 else
762 dpcd_set_lane_settings(link, lt_settings);
763
764 /* 3. wait for receiver to lock-on*/
765 wait_for_training_aux_rd_interval(link, 400);
766
767 /* 4. Read lane status and requested
768 * drive settings as set by the sink*/
769
770 get_lane_status_and_drive_settings(
771 link,
772 lt_settings,
773 dpcd_lane_status,
774 &dpcd_lane_status_updated,
775 &req_settings);
776
777 /* 5. check CR done*/
778 if (!is_cr_done(lane_count, dpcd_lane_status))
779 return false;
780
781 /* 6. check CHEQ done*/
782 if (is_ch_eq_done(lane_count,
783 dpcd_lane_status,
784 &dpcd_lane_status_updated))
785 return true;
786
787 /* 7. update VS/PE/PC2 in lt_settings*/
788 update_drive_settings(lt_settings, req_settings);
789 }
790
791 return false;
792
793}
794
795static bool perform_clock_recovery_sequence(
796 struct core_link *link,
797 struct link_training_settings *lt_settings)
798{
799 uint32_t retries_cr;
800 uint32_t retry_count;
801 uint32_t lane;
802 struct link_training_settings req_settings;
803 enum dc_lane_count lane_count =
804 lt_settings->link_settings.lane_count;
805 enum hw_dp_training_pattern hw_tr_pattern = HW_DP_TRAINING_PATTERN_1;
806 union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX];
807 union lane_align_status_updated dpcd_lane_status_updated;
808
809 retries_cr = 0;
810 retry_count = 0;
811 /* initial drive setting (VS/PE/PC2)*/
812 for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
813 lt_settings->lane_settings[lane].VOLTAGE_SWING =
814 VOLTAGE_SWING_LEVEL0;
815 lt_settings->lane_settings[lane].PRE_EMPHASIS =
816 PRE_EMPHASIS_DISABLED;
817 lt_settings->lane_settings[lane].POST_CURSOR2 =
818 POST_CURSOR2_DISABLED;
819 }
820
821 dp_set_hw_training_pattern(link, hw_tr_pattern);
822
823 /* najeeb - The synaptics MST hub can put the LT in
824 * infinite loop by switching the VS
825 */
826 /* between level 0 and level 1 continuously, here
827 * we try for CR lock for LinkTrainingMaxCRRetry count*/
828 while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) &&
829 (retry_count < LINK_TRAINING_MAX_CR_RETRY)) {
830
831 memset(&dpcd_lane_status, '\0', sizeof(dpcd_lane_status));
832 memset(&dpcd_lane_status_updated, '\0',
833 sizeof(dpcd_lane_status_updated));
834
835 /* 1. call HWSS to set lane settings*/
836 dp_set_hw_lane_settings(
837 link,
838 lt_settings);
839
840 /* 2. update DPCD of the receiver*/
841 if (!retries_cr)
842 /* EPR #361076 - write as a 5-byte burst,
843 * but only for the 1-st iteration.*/
844 dpcd_set_lt_pattern_and_lane_settings(
845 link,
846 lt_settings,
847 hw_tr_pattern);
848 else
849 dpcd_set_lane_settings(
850 link,
851 lt_settings);
852
853 /* 3. wait receiver to lock-on*/
854 wait_for_training_aux_rd_interval(
855 link,
856 100);
857
858 /* 4. Read lane status and requested drive
859 * settings as set by the sink
860 */
861 get_lane_status_and_drive_settings(
862 link,
863 lt_settings,
864 dpcd_lane_status,
865 &dpcd_lane_status_updated,
866 &req_settings);
867
868 /* 5. check CR done*/
869 if (is_cr_done(lane_count, dpcd_lane_status))
870 return true;
871
872 /* 6. max VS reached*/
873 if (is_max_vs_reached(lt_settings))
874 return false;
875
876 /* 7. same voltage*/
877 /* Note: VS same for all lanes,
878 * so comparing first lane is sufficient*/
879 if (lt_settings->lane_settings[0].VOLTAGE_SWING ==
880 req_settings.lane_settings[0].VOLTAGE_SWING)
881 retries_cr++;
882 else
883 retries_cr = 0;
884
885 /* 8. update VS/PE/PC2 in lt_settings*/
886 update_drive_settings(lt_settings, req_settings);
887
888 retry_count++;
889 }
890
891 if (retry_count >= LINK_TRAINING_MAX_CR_RETRY) {
892 ASSERT(0);
893 dm_logger_write(link->ctx->logger, LOG_ERROR,
894 "%s: Link Training Error, could not \
895 get CR after %d tries. \
896 Possibly voltage swing issue", __func__,
897 LINK_TRAINING_MAX_CR_RETRY);
898
899 }
900
901 return false;
902}
903
904static inline bool perform_link_training_int(
905 struct core_link *link,
906 struct link_training_settings *lt_settings,
907 bool status)
908{
909 union lane_count_set lane_count_set = { {0} };
910 union dpcd_training_pattern dpcd_pattern = { {0} };
911
912 /* 3. set training not in progress*/
913 dpcd_pattern.v1_4.TRAINING_PATTERN_SET = DPCD_TRAINING_PATTERN_VIDEOIDLE;
914 dpcd_set_training_pattern(link, dpcd_pattern);
915
916 /* 4. mainlink output idle pattern*/
917 dp_set_hw_test_pattern(link, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
918
919 /*
920 * 5. post training adjust if required
921 * If the upstream DPTX and downstream DPRX both support TPS4,
922 * TPS4 must be used instead of POST_LT_ADJ_REQ.
923 */
924 if (link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED != 1 &&
925 get_supported_tp(link) == HW_DP_TRAINING_PATTERN_4)
926 return status;
927
928 if (status &&
929 perform_post_lt_adj_req_sequence(link, lt_settings) == false)
930 status = false;
931
932 lane_count_set.bits.LANE_COUNT_SET = lt_settings->link_settings.lane_count;
933 lane_count_set.bits.ENHANCED_FRAMING = 1;
934 lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0;
935
936 core_link_write_dpcd(
937 link,
938 DPCD_ADDRESS_LANE_COUNT_SET,
939 &lane_count_set.raw,
940 sizeof(lane_count_set));
941
942 return status;
943}
944
945bool dc_link_dp_perform_link_training(
946 struct dc_link *link,
947 const struct dc_link_settings *link_setting,
948 bool skip_video_pattern)
949{
950 struct core_link *core_link = DC_LINK_TO_CORE(link);
951 bool status;
952
953 char *link_rate = "Unknown";
954 struct link_training_settings lt_settings;
955
956 status = false;
957 memset(&lt_settings, '\0', sizeof(lt_settings));
958
959 lt_settings.link_settings.link_rate = link_setting->link_rate;
960 lt_settings.link_settings.lane_count = link_setting->lane_count;
961
962 /*@todo[vdevulap] move SS to LS, should not be handled by displaypath*/
963
964 /* TODO hard coded to SS for now
965 * lt_settings.link_settings.link_spread =
966 * dal_display_path_is_ss_supported(
967 * path_mode->display_path) ?
968 * LINK_SPREAD_05_DOWNSPREAD_30KHZ :
969 * LINK_SPREAD_DISABLED;
970 */
971 lt_settings.link_settings.link_spread = LINK_SPREAD_05_DOWNSPREAD_30KHZ;
972
973 /* 1. set link rate, lane count and spread*/
974 dpcd_set_link_settings(core_link, &lt_settings);
975
976 /* 2. perform link training (set link training done
977 * to false is done as well)*/
978 if (perform_clock_recovery_sequence(core_link, &lt_settings)) {
979
980 if (perform_channel_equalization_sequence(core_link,
981 &lt_settings))
982 status = true;
983 }
984
985 if (status || !skip_video_pattern)
986 status = perform_link_training_int(core_link,
987 &lt_settings, status);
988
989 /* 6. print status message*/
990 switch (lt_settings.link_settings.link_rate) {
991
992 case LINK_RATE_LOW:
993 link_rate = "RBR";
994 break;
995 case LINK_RATE_HIGH:
996 link_rate = "HBR";
997 break;
998 case LINK_RATE_HIGH2:
999 link_rate = "HBR2";
1000 break;
1001 case LINK_RATE_RBR2:
1002 link_rate = "RBR2";
1003 break;
1004 case LINK_RATE_HIGH3:
1005 link_rate = "HBR3";
1006 break;
1007 default:
1008 break;
1009 }
1010
1011 /* Connectivity log: link training */
1012 CONN_MSG_LT(core_link, "%sx%d %s VS=%d, PE=%d",
1013 link_rate,
1014 lt_settings.link_settings.lane_count,
1015 status ? "pass" : "fail",
1016 lt_settings.lane_settings[0].VOLTAGE_SWING,
1017 lt_settings.lane_settings[0].PRE_EMPHASIS);
1018
1019 return status;
1020}
1021
1022
1023bool perform_link_training_with_retries(
1024 struct core_link *link,
1025 const struct dc_link_settings *link_setting,
1026 bool skip_video_pattern,
1027 int attempts)
1028{
1029 uint8_t j;
1030 uint8_t delay_between_attempts = LINK_TRAINING_RETRY_DELAY;
1031
1032 for (j = 0; j < attempts; ++j) {
1033
1034 if (dc_link_dp_perform_link_training(
1035 &link->public,
1036 link_setting,
1037 skip_video_pattern))
1038 return true;
1039
1040 msleep(delay_between_attempts);
1041 delay_between_attempts += LINK_TRAINING_RETRY_DELAY;
1042 }
1043
1044 return false;
1045}
1046
1047/*TODO add more check to see if link support request link configuration */
1048static bool is_link_setting_supported(
1049 const struct dc_link_settings *link_setting,
1050 const struct dc_link_settings *max_link_setting)
1051{
1052 if (link_setting->lane_count > max_link_setting->lane_count ||
1053 link_setting->link_rate > max_link_setting->link_rate)
1054 return false;
1055 return true;
1056}
1057
1058static const uint32_t get_link_training_fallback_table_len(
1059 struct core_link *link)
1060{
1061 return ARRAY_SIZE(link_training_fallback_table);
1062}
1063
1064static const struct dc_link_settings *get_link_training_fallback_table(
1065 struct core_link *link, uint32_t i)
1066{
1067 return &link_training_fallback_table[i];
1068}
1069
1070static bool exceeded_limit_link_setting(
1071 const struct dc_link_settings *link_setting,
1072 const struct dc_link_settings *limit_link_setting)
1073{
1074 return (link_setting->lane_count * link_setting->link_rate
1075 > limit_link_setting->lane_count * limit_link_setting->link_rate ?
1076 true : false);
1077}
1078
1079static struct dc_link_settings get_max_link_cap(struct core_link *link)
1080{
1081 /* Set Default link settings */
1082 struct dc_link_settings max_link_cap = {LANE_COUNT_FOUR, LINK_RATE_HIGH,
1083 LINK_SPREAD_05_DOWNSPREAD_30KHZ};
1084
1085 /* Higher link settings based on feature supported */
1086 if (link->link_enc->features.flags.bits.IS_HBR2_CAPABLE)
1087 max_link_cap.link_rate = LINK_RATE_HIGH2;
1088
1089 if (link->link_enc->features.flags.bits.IS_HBR3_CAPABLE)
1090 max_link_cap.link_rate = LINK_RATE_HIGH3;
1091
1092 /* Lower link settings based on sink's link cap */
1093 if (link->public.reported_link_cap.lane_count < max_link_cap.lane_count)
1094 max_link_cap.lane_count =
1095 link->public.reported_link_cap.lane_count;
1096 if (link->public.reported_link_cap.link_rate < max_link_cap.link_rate)
1097 max_link_cap.link_rate =
1098 link->public.reported_link_cap.link_rate;
1099 if (link->public.reported_link_cap.link_spread <
1100 max_link_cap.link_spread)
1101 max_link_cap.link_spread =
1102 link->public.reported_link_cap.link_spread;
1103 return max_link_cap;
1104}
1105
1106bool dp_hbr_verify_link_cap(
1107 struct core_link *link,
1108 struct dc_link_settings *known_limit_link_setting)
1109{
1110 struct dc_link_settings max_link_cap = {0};
1111 bool success;
1112 bool skip_link_training;
1113 const struct dc_link_settings *cur;
1114 bool skip_video_pattern;
1115 uint32_t i;
1116 struct clock_source *dp_cs;
1117 enum clock_source_id dp_cs_id = CLOCK_SOURCE_ID_EXTERNAL;
1118
1119 success = false;
1120 skip_link_training = false;
1121
1122 max_link_cap = get_max_link_cap(link);
1123
1124 /* TODO implement override and monitor patch later */
1125
1126 /* try to train the link from high to low to
1127 * find the physical link capability
1128 */
1129 /* disable PHY done possible by BIOS, will be done by driver itself */
1130 dp_disable_link_phy(link, link->public.connector_signal);
1131
1132 dp_cs = link->dc->res_pool->dp_clock_source;
1133
1134 if (dp_cs)
1135 dp_cs_id = dp_cs->id;
1136 else {
1137 /*
1138 * dp clock source is not initialized for some reason.
1139 * Should not happen, CLOCK_SOURCE_ID_EXTERNAL will be used
1140 */
1141 ASSERT(dp_cs);
1142 }
1143
1144 for (i = 0; i < get_link_training_fallback_table_len(link) &&
1145 !success; i++) {
1146 cur = get_link_training_fallback_table(link, i);
1147
1148 if (known_limit_link_setting->lane_count != LANE_COUNT_UNKNOWN &&
1149 exceeded_limit_link_setting(cur,
1150 known_limit_link_setting))
1151 continue;
1152
1153 if (!is_link_setting_supported(cur, &max_link_cap))
1154 continue;
1155
1156 skip_video_pattern = true;
1157 if (cur->link_rate == LINK_RATE_LOW)
1158 skip_video_pattern = false;
1159
1160 dp_enable_link_phy(
1161 link,
1162 link->public.connector_signal,
1163 dp_cs_id,
1164 cur);
1165
1166 if (skip_link_training)
1167 success = true;
1168 else {
1169 success = dc_link_dp_perform_link_training(
1170 &link->public,
1171 cur,
1172 skip_video_pattern);
1173 }
1174
1175 if (success)
1176 link->public.verified_link_cap = *cur;
1177
1178 /* always disable the link before trying another
1179 * setting or before returning we'll enable it later
1180 * based on the actual mode we're driving
1181 */
1182 dp_disable_link_phy(link, link->public.connector_signal);
1183 }
1184
1185 /* Link Training failed for all Link Settings
1186 * (Lane Count is still unknown)
1187 */
1188 if (!success) {
1189 /* If all LT fails for all settings,
1190 * set verified = failed safe (1 lane low)
1191 */
1192 link->public.verified_link_cap.lane_count = LANE_COUNT_ONE;
1193 link->public.verified_link_cap.link_rate = LINK_RATE_LOW;
1194
1195 link->public.verified_link_cap.link_spread =
1196 LINK_SPREAD_DISABLED;
1197 }
1198
1199 link->public.max_link_setting = link->public.verified_link_cap;
1200
1201 return success;
1202}
1203
1204static uint32_t bandwidth_in_kbps_from_timing(
1205 const struct dc_crtc_timing *timing)
1206{
1207 uint32_t bits_per_channel = 0;
1208 uint32_t kbps;
1209 switch (timing->display_color_depth) {
1210
1211 case COLOR_DEPTH_666:
1212 bits_per_channel = 6;
1213 break;
1214 case COLOR_DEPTH_888:
1215 bits_per_channel = 8;
1216 break;
1217 case COLOR_DEPTH_101010:
1218 bits_per_channel = 10;
1219 break;
1220 case COLOR_DEPTH_121212:
1221 bits_per_channel = 12;
1222 break;
1223 case COLOR_DEPTH_141414:
1224 bits_per_channel = 14;
1225 break;
1226 case COLOR_DEPTH_161616:
1227 bits_per_channel = 16;
1228 break;
1229 default:
1230 break;
1231 }
1232 ASSERT(bits_per_channel != 0);
1233
1234 kbps = timing->pix_clk_khz;
1235 kbps *= bits_per_channel;
1236
1237 if (timing->flags.Y_ONLY != 1)
1238 /*Only YOnly make reduce bandwidth by 1/3 compares to RGB*/
1239 kbps *= 3;
1240
1241 return kbps;
1242
1243}
1244
1245static uint32_t bandwidth_in_kbps_from_link_settings(
1246 const struct dc_link_settings *link_setting)
1247{
1248 uint32_t link_rate_in_kbps = link_setting->link_rate *
1249 LINK_RATE_REF_FREQ_IN_KHZ;
1250
1251 uint32_t lane_count = link_setting->lane_count;
1252 uint32_t kbps = link_rate_in_kbps;
1253 kbps *= lane_count;
1254 kbps *= 8; /* 8 bits per byte*/
1255
1256 return kbps;
1257
1258}
1259
1260bool dp_validate_mode_timing(
1261 struct core_link *link,
1262 const struct dc_crtc_timing *timing)
1263{
1264 uint32_t req_bw;
1265 uint32_t max_bw;
1266
1267 const struct dc_link_settings *link_setting;
1268
1269 /*always DP fail safe mode*/
1270 if (timing->pix_clk_khz == (uint32_t)25175 &&
1271 timing->h_addressable == (uint32_t)640 &&
1272 timing->v_addressable == (uint32_t)480)
1273 return true;
1274
1275 /* We always use verified link settings */
1276 link_setting = &link->public.verified_link_cap;
1277
1278 /* TODO: DYNAMIC_VALIDATION needs to be implemented */
1279 /*if (flags.DYNAMIC_VALIDATION == 1 &&
1280 link->public.verified_link_cap.lane_count != LANE_COUNT_UNKNOWN)
1281 link_setting = &link->public.verified_link_cap;
1282 */
1283
1284 req_bw = bandwidth_in_kbps_from_timing(timing);
1285 max_bw = bandwidth_in_kbps_from_link_settings(link_setting);
1286
1287 if (req_bw <= max_bw) {
1288 /* remember the biggest mode here, during
1289 * initial link training (to get
1290 * verified_link_cap), LS sends event about
1291 * cannot train at reported cap to upper
1292 * layer and upper layer will re-enumerate modes.
1293 * this is not necessary if the lower
1294 * verified_link_cap is enough to drive
1295 * all the modes */
1296
1297 /* TODO: DYNAMIC_VALIDATION needs to be implemented */
1298 /* if (flags.DYNAMIC_VALIDATION == 1)
1299 dpsst->max_req_bw_for_verified_linkcap = dal_max(
1300 dpsst->max_req_bw_for_verified_linkcap, req_bw); */
1301 return true;
1302 } else
1303 return false;
1304}
1305
1306void decide_link_settings(struct core_stream *stream,
1307 struct dc_link_settings *link_setting)
1308{
1309
1310 const struct dc_link_settings *cur_ls;
1311 struct core_link* link;
1312 uint32_t req_bw;
1313 uint32_t link_bw;
1314 uint32_t i;
1315
1316 req_bw = bandwidth_in_kbps_from_timing(
1317 &stream->public.timing);
1318
1319 /* if preferred is specified through AMDDP, use it, if it's enough
1320 * to drive the mode
1321 */
1322 link = stream->sink->link;
1323
1324 if ((link->public.reported_link_cap.lane_count != LANE_COUNT_UNKNOWN) &&
1325 (link->public.reported_link_cap.link_rate <=
1326 link->public.verified_link_cap.link_rate)) {
1327
1328 link_bw = bandwidth_in_kbps_from_link_settings(
1329 &link->public.reported_link_cap);
1330
1331 if (req_bw < link_bw) {
1332 *link_setting = link->public.reported_link_cap;
1333 return;
1334 }
1335 }
1336
1337 /* search for first suitable setting for the requested
1338 * bandwidth
1339 */
1340 for (i = 0; i < get_link_training_fallback_table_len(link); i++) {
1341
1342 cur_ls = get_link_training_fallback_table(link, i);
1343
1344 link_bw =
1345 bandwidth_in_kbps_from_link_settings(
1346 cur_ls);
1347
1348 if (req_bw < link_bw) {
1349 if (is_link_setting_supported(
1350 cur_ls,
1351 &link->public.max_link_setting)) {
1352 *link_setting = *cur_ls;
1353 return;
1354 }
1355 }
1356 }
1357
1358 BREAK_TO_DEBUGGER();
1359 ASSERT(link->public.verified_link_cap.lane_count !=
1360 LANE_COUNT_UNKNOWN);
1361
1362 *link_setting = link->public.verified_link_cap;
1363}
1364
1365/*************************Short Pulse IRQ***************************/
1366
1367static bool hpd_rx_irq_check_link_loss_status(
1368 struct core_link *link,
1369 union hpd_irq_data *hpd_irq_dpcd_data)
1370{
1371 uint8_t irq_reg_rx_power_state;
1372 enum dc_status dpcd_result = DC_ERROR_UNEXPECTED;
1373 union lane_status lane_status;
1374 uint32_t lane;
1375 bool sink_status_changed;
1376 bool return_code;
1377
1378 sink_status_changed = false;
1379 return_code = false;
1380
1381 if (link->public.cur_link_settings.lane_count == 0)
1382 return return_code;
1383 /*1. Check that we can handle interrupt: Not in FS DOS,
1384 * Not in "Display Timeout" state, Link is trained.
1385 */
1386
1387 dpcd_result = core_link_read_dpcd(link,
1388 DPCD_ADDRESS_POWER_STATE,
1389 &irq_reg_rx_power_state,
1390 sizeof(irq_reg_rx_power_state));
1391
1392 if (dpcd_result != DC_OK) {
1393 irq_reg_rx_power_state = DP_PWR_STATE_D0;
1394 dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ,
1395 "%s: DPCD read failed to obtain power state.\n",
1396 __func__);
1397 }
1398
1399 if (irq_reg_rx_power_state == DP_PWR_STATE_D0) {
1400
1401 /*2. Check that Link Status changed, before re-training.*/
1402
1403 /*parse lane status*/
1404 for (lane = 0;
1405 lane < link->public.cur_link_settings.lane_count;
1406 lane++) {
1407
1408 /* check status of lanes 0,1
1409 * changed DpcdAddress_Lane01Status (0x202)*/
1410 lane_status.raw = get_nibble_at_index(
1411 &hpd_irq_dpcd_data->bytes.lane01_status.raw,
1412 lane);
1413
1414 if (!lane_status.bits.CHANNEL_EQ_DONE_0 ||
1415 !lane_status.bits.CR_DONE_0 ||
1416 !lane_status.bits.SYMBOL_LOCKED_0) {
1417 /* if one of the channel equalization, clock
1418 * recovery or symbol lock is dropped
1419 * consider it as (link has been
1420 * dropped) dp sink status has changed*/
1421 sink_status_changed = true;
1422 break;
1423 }
1424
1425 }
1426
1427 /* Check interlane align.*/
1428 if (sink_status_changed ||
1429 !hpd_irq_dpcd_data->bytes.lane_status_updated.bits.
1430 INTERLANE_ALIGN_DONE) {
1431
1432 dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ,
1433 "%s: Link Status changed.\n",
1434 __func__);
1435
1436 return_code = true;
1437 }
1438 }
1439
1440 return return_code;
1441}
1442
1443static enum dc_status read_hpd_rx_irq_data(
1444 struct core_link *link,
1445 union hpd_irq_data *irq_data)
1446{
1447 /* The HW reads 16 bytes from 200h on HPD,
1448 * but if we get an AUX_DEFER, the HW cannot retry
1449 * and this causes the CTS tests 4.3.2.1 - 3.2.4 to
1450 * fail, so we now explicitly read 6 bytes which is
1451 * the req from the above mentioned test cases.
1452 */
1453 return core_link_read_dpcd(
1454 link,
1455 DPCD_ADDRESS_SINK_COUNT,
1456 irq_data->raw,
1457 sizeof(union hpd_irq_data));
1458}
1459
1460static bool allow_hpd_rx_irq(const struct core_link *link)
1461{
1462 /*
1463 * Don't handle RX IRQ unless one of following is met:
1464 * 1) The link is established (cur_link_settings != unknown)
1465 * 2) We kicked off MST detection
1466 * 3) We know we're dealing with an active dongle
1467 */
1468
1469 if ((link->public.cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
1470 (link->public.type == dc_connection_mst_branch) ||
1471 is_dp_active_dongle(link))
1472 return true;
1473
1474 return false;
1475}
1476
1477static bool handle_hpd_irq_psr_sink(const struct core_link *link)
1478{
1479 union dpcd_psr_configuration psr_configuration;
1480
1481 if (link->public.psr_caps.psr_version == 0)
1482 return false;
1483
1484 dal_ddc_service_read_dpcd_data(
1485 link->ddc,
1486 368 /*DpcdAddress_PSR_Enable_Cfg*/,
1487 &psr_configuration.raw,
1488 sizeof(psr_configuration.raw));
1489
1490 if (psr_configuration.bits.ENABLE) {
1491 unsigned char dpcdbuf[3] = {0};
1492 union psr_error_status psr_error_status;
1493 union psr_sink_psr_status psr_sink_psr_status;
1494
1495 dal_ddc_service_read_dpcd_data(
1496 link->ddc,
1497 0x2006 /*DpcdAddress_PSR_Error_Status*/,
1498 (unsigned char *) dpcdbuf,
1499 sizeof(dpcdbuf));
1500
1501 /*DPCD 2006h ERROR STATUS*/
1502 psr_error_status.raw = dpcdbuf[0];
1503 /*DPCD 2008h SINK PANEL SELF REFRESH STATUS*/
1504 psr_sink_psr_status.raw = dpcdbuf[2];
1505
1506 if (psr_error_status.bits.LINK_CRC_ERROR ||
1507 psr_error_status.bits.RFB_STORAGE_ERROR) {
1508 /* Acknowledge and clear error bits */
1509 dal_ddc_service_write_dpcd_data(
1510 link->ddc,
1511 8198 /*DpcdAddress_PSR_Error_Status*/,
1512 &psr_error_status.raw,
1513 sizeof(psr_error_status.raw));
1514
1515 /* PSR error, disable and re-enable PSR */
1516 dc_link_set_psr_enable(&link->public, false);
1517 dc_link_set_psr_enable(&link->public, true);
1518
1519 return true;
1520 } else if (psr_sink_psr_status.bits.SINK_SELF_REFRESH_STATUS ==
1521 PSR_SINK_STATE_ACTIVE_DISPLAY_FROM_SINK_RFB){
1522 /* No error is detect, PSR is active.
1523 * We should return with IRQ_HPD handled without
1524 * checking for loss of sync since PSR would have
1525 * powered down main link.
1526 */
1527 return true;
1528 }
1529 }
1530 return false;
1531}
1532
1533static void dp_test_send_link_training(struct core_link *link)
1534{
1535 struct dc_link_settings link_settings;
1536
1537 core_link_read_dpcd(
1538 link,
1539 DPCD_ADDRESS_TEST_LANE_COUNT,
1540 (unsigned char *)(&link_settings.lane_count),
1541 1);
1542 core_link_read_dpcd(
1543 link,
1544 DPCD_ADDRESS_TEST_LINK_RATE,
1545 (unsigned char *)(&link_settings.link_rate),
1546 1);
1547
1548 /* Set preferred link settings */
1549 link->public.verified_link_cap.lane_count = link_settings.lane_count;
1550 link->public.verified_link_cap.link_rate = link_settings.link_rate;
1551
1552 dp_retrain_link(link);
1553}
1554
1555static void dp_test_send_phy_test_pattern(struct core_link *link)
1556{
1557 union phy_test_pattern dpcd_test_pattern;
1558 union lane_adjust dpcd_lane_adjustment[2];
1559 unsigned char dpcd_post_cursor_2_adjustment = 0;
1560 unsigned char test_80_bit_pattern[
1561 (DPCD_ADDRESS_TEST_80BIT_CUSTOM_PATTERN_79_72 -
1562 DPCD_ADDRESS_TEST_80BIT_CUSTOM_PATTERN_7_0)+1] = {0};
1563 enum dp_test_pattern test_pattern;
1564 struct dc_link_training_settings link_settings;
1565 union lane_adjust dpcd_lane_adjust;
1566 unsigned int lane;
1567 struct link_training_settings link_training_settings;
1568 int i = 0;
1569
1570 dpcd_test_pattern.raw = 0;
1571 memset(dpcd_lane_adjustment, 0, sizeof(dpcd_lane_adjustment));
1572 memset(&link_settings, 0, sizeof(link_settings));
1573
1574 /* get phy test pattern and pattern parameters from DP receiver */
1575 core_link_read_dpcd(
1576 link,
1577 DPCD_ADDRESS_TEST_PHY_PATTERN,
1578 &dpcd_test_pattern.raw,
1579 sizeof(dpcd_test_pattern));
1580 core_link_read_dpcd(
1581 link,
1582 DPCD_ADDRESS_ADJUST_REQUEST_LANE0_1,
1583 &dpcd_lane_adjustment[0].raw,
1584 sizeof(dpcd_lane_adjustment));
1585
1586 /*get post cursor 2 parameters
1587 * For DP 1.1a or eariler, this DPCD register's value is 0
1588 * For DP 1.2 or later:
1589 * Bits 1:0 = POST_CURSOR2_LANE0; Bits 3:2 = POST_CURSOR2_LANE1
1590 * Bits 5:4 = POST_CURSOR2_LANE2; Bits 7:6 = POST_CURSOR2_LANE3
1591 */
1592 core_link_read_dpcd(
1593 link,
1594 DPCD_ADDRESS_ADJUST_REQUEST_POST_CURSOR2,
1595 &dpcd_post_cursor_2_adjustment,
1596 sizeof(dpcd_post_cursor_2_adjustment));
1597
1598 /* translate request */
1599 switch (dpcd_test_pattern.bits.PATTERN) {
1600 case PHY_TEST_PATTERN_D10_2:
1601 test_pattern = DP_TEST_PATTERN_D102;
1602 break;
1603 case PHY_TEST_PATTERN_SYMBOL_ERROR:
1604 test_pattern = DP_TEST_PATTERN_SYMBOL_ERROR;
1605 break;
1606 case PHY_TEST_PATTERN_PRBS7:
1607 test_pattern = DP_TEST_PATTERN_PRBS7;
1608 break;
1609 case PHY_TEST_PATTERN_80BIT_CUSTOM:
1610 test_pattern = DP_TEST_PATTERN_80BIT_CUSTOM;
1611 break;
1612 case PHY_TEST_PATTERN_HBR2_COMPLIANCE_EYE:
1613 test_pattern = DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE;
1614 break;
1615 default:
1616 test_pattern = DP_TEST_PATTERN_VIDEO_MODE;
1617 break;
1618 }
1619
1620 if (test_pattern == DP_TEST_PATTERN_80BIT_CUSTOM)
1621 core_link_read_dpcd(
1622 link,
1623 DPCD_ADDRESS_TEST_80BIT_CUSTOM_PATTERN_7_0,
1624 test_80_bit_pattern,
1625 sizeof(test_80_bit_pattern));
1626
1627 /* prepare link training settings */
1628 link_settings.link = link->public.cur_link_settings;
1629
1630 for (lane = 0; lane <
1631 (unsigned int)(link->public.cur_link_settings.lane_count);
1632 lane++) {
1633 dpcd_lane_adjust.raw =
1634 get_nibble_at_index(&dpcd_lane_adjustment[0].raw, lane);
1635 link_settings.lane_settings[lane].VOLTAGE_SWING =
1636 (enum dc_voltage_swing)
1637 (dpcd_lane_adjust.bits.VOLTAGE_SWING_LANE);
1638 link_settings.lane_settings[lane].PRE_EMPHASIS =
1639 (enum dc_pre_emphasis)
1640 (dpcd_lane_adjust.bits.PRE_EMPHASIS_LANE);
1641 link_settings.lane_settings[lane].POST_CURSOR2 =
1642 (enum dc_post_cursor2)
1643 ((dpcd_post_cursor_2_adjustment >> (lane * 2)) & 0x03);
1644 }
1645
1646 for (i = 0; i < 4; i++)
1647 link_training_settings.lane_settings[i] =
1648 link_settings.lane_settings[i];
1649 link_training_settings.link_settings = link_settings.link;
1650 link_training_settings.allow_invalid_msa_timing_param = false;
1651 /*Usage: Measure DP physical lane signal
1652 * by DP SI test equipment automatically.
1653 * PHY test pattern request is generated by equipment via HPD interrupt.
1654 * HPD needs to be active all the time. HPD should be active
1655 * all the time. Do not touch it.
1656 * forward request to DS
1657 */
1658 dc_link_dp_set_test_pattern(
1659 &link->public,
1660 test_pattern,
1661 &link_training_settings,
1662 test_80_bit_pattern,
1663 (DPCD_ADDRESS_TEST_80BIT_CUSTOM_PATTERN_79_72 -
1664 DPCD_ADDRESS_TEST_80BIT_CUSTOM_PATTERN_7_0)+1);
1665}
1666
1667static void dp_test_send_link_test_pattern(struct core_link *link)
1668{
1669 union link_test_pattern dpcd_test_pattern;
1670 union test_misc dpcd_test_params;
1671 enum dp_test_pattern test_pattern;
1672
1673 memset(&dpcd_test_pattern, 0, sizeof(dpcd_test_pattern));
1674 memset(&dpcd_test_params, 0, sizeof(dpcd_test_params));
1675
1676 /* get link test pattern and pattern parameters */
1677 core_link_read_dpcd(
1678 link,
1679 DPCD_ADDRESS_TEST_PATTERN,
1680 &dpcd_test_pattern.raw,
1681 sizeof(dpcd_test_pattern));
1682 core_link_read_dpcd(
1683 link,
1684 DPCD_ADDRESS_TEST_MISC1,
1685 &dpcd_test_params.raw,
1686 sizeof(dpcd_test_params));
1687
1688 switch (dpcd_test_pattern.bits.PATTERN) {
1689 case LINK_TEST_PATTERN_COLOR_RAMP:
1690 test_pattern = DP_TEST_PATTERN_COLOR_RAMP;
1691 break;
1692 case LINK_TEST_PATTERN_VERTICAL_BARS:
1693 test_pattern = DP_TEST_PATTERN_VERTICAL_BARS;
1694 break; /* black and white */
1695 case LINK_TEST_PATTERN_COLOR_SQUARES:
1696 test_pattern = (dpcd_test_params.bits.DYN_RANGE ==
1697 TEST_DYN_RANGE_VESA ?
1698 DP_TEST_PATTERN_COLOR_SQUARES :
1699 DP_TEST_PATTERN_COLOR_SQUARES_CEA);
1700 break;
1701 default:
1702 test_pattern = DP_TEST_PATTERN_VIDEO_MODE;
1703 break;
1704 }
1705
1706 dc_link_dp_set_test_pattern(
1707 &link->public,
1708 test_pattern,
1709 NULL,
1710 NULL,
1711 0);
1712}
1713
1714static void handle_automated_test(struct core_link *link)
1715{
1716 union test_request test_request;
1717 union test_response test_response;
1718
1719 memset(&test_request, 0, sizeof(test_request));
1720 memset(&test_response, 0, sizeof(test_response));
1721
1722 core_link_read_dpcd(
1723 link,
1724 DPCD_ADDRESS_TEST_REQUEST,
1725 &test_request.raw,
1726 sizeof(union test_request));
1727 if (test_request.bits.LINK_TRAINING) {
1728 /* ACK first to let DP RX test box monitor LT sequence */
1729 test_response.bits.ACK = 1;
1730 core_link_write_dpcd(
1731 link,
1732 DPCD_ADDRESS_TEST_RESPONSE,
1733 &test_response.raw,
1734 sizeof(test_response));
1735 dp_test_send_link_training(link);
1736 /* no acknowledge request is needed again */
1737 test_response.bits.ACK = 0;
1738 }
1739 if (test_request.bits.LINK_TEST_PATTRN) {
1740 dp_test_send_link_test_pattern(link);
1741 link->public.compliance_test_state.bits.
1742 SET_TEST_PATTERN_PENDING = 1;
1743 }
1744 if (test_request.bits.PHY_TEST_PATTERN) {
1745 dp_test_send_phy_test_pattern(link);
1746 test_response.bits.ACK = 1;
1747 }
1748 if (!test_request.raw)
1749 /* no requests, revert all test signals
1750 * TODO: revert all test signals
1751 */
1752 test_response.bits.ACK = 1;
1753 /* send request acknowledgment */
1754 if (test_response.bits.ACK)
1755 core_link_write_dpcd(
1756 link,
1757 DPCD_ADDRESS_TEST_RESPONSE,
1758 &test_response.raw,
1759 sizeof(test_response));
1760}
1761
1762bool dc_link_handle_hpd_rx_irq(const struct dc_link *dc_link)
1763{
1764 struct core_link *link = DC_LINK_TO_LINK(dc_link);
1765 union hpd_irq_data hpd_irq_dpcd_data = {{{{0}}}};
1766 union device_service_irq device_service_clear = {0};
1767 enum dc_status result = DDC_RESULT_UNKNOWN;
1768 bool status = false;
1769 /* For use cases related to down stream connection status change,
1770 * PSR and device auto test, refer to function handle_sst_hpd_irq
1771 * in DAL2.1*/
1772
1773 dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ,
1774 "%s: Got short pulse HPD on link %d\n",
1775 __func__, link->public.link_index);
1776
1777 /* All the "handle_hpd_irq_xxx()" methods
1778 * should be called only after
1779 * dal_dpsst_ls_read_hpd_irq_data
1780 * Order of calls is important too
1781 */
1782 result = read_hpd_rx_irq_data(link, &hpd_irq_dpcd_data);
1783
1784 if (result != DC_OK) {
1785 dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ,
1786 "%s: DPCD read failed to obtain irq data\n",
1787 __func__);
1788 return false;
1789 }
1790
1791 if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
1792 device_service_clear.bits.AUTOMATED_TEST = 1;
1793 core_link_write_dpcd(
1794 link,
1795 DPCD_ADDRESS_DEVICE_SERVICE_IRQ_VECTOR,
1796 &device_service_clear.raw,
1797 sizeof(device_service_clear.raw));
1798 device_service_clear.raw = 0;
1799 handle_automated_test(link);
1800 return false;
1801 }
1802
1803 if (!allow_hpd_rx_irq(link)) {
1804 dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ,
1805 "%s: skipping HPD handling on %d\n",
1806 __func__, link->public.link_index);
1807 return false;
1808 }
1809
1810 if (handle_hpd_irq_psr_sink(link))
1811 /* PSR-related error was detected and handled */
1812 return true;
1813
1814 /* If PSR-related error handled, Main link may be off,
1815 * so do not handle as a normal sink status change interrupt.
1816 */
1817
1818 /* check if we have MST msg and return since we poll for it */
1819 if (hpd_irq_dpcd_data.bytes.device_service_irq.
1820 bits.DOWN_REP_MSG_RDY ||
1821 hpd_irq_dpcd_data.bytes.device_service_irq.
1822 bits.UP_REQ_MSG_RDY)
1823 return false;
1824
1825 /* For now we only handle 'Downstream port status' case.
1826 * If we got sink count changed it means
1827 * Downstream port status changed,
1828 * then DM should call DC to do the detection. */
1829 if (hpd_rx_irq_check_link_loss_status(
1830 link,
1831 &hpd_irq_dpcd_data)) {
1832 /* Connectivity log: link loss */
1833 CONN_DATA_LINK_LOSS(link,
1834 hpd_irq_dpcd_data.raw,
1835 sizeof(hpd_irq_dpcd_data),
1836 "Status: ");
1837
1838 perform_link_training_with_retries(link,
1839 &link->public.cur_link_settings,
1840 true, LINK_TRAINING_ATTEMPTS);
1841
1842 status = false;
1843 }
1844
1845 if (link->public.type == dc_connection_active_dongle &&
1846 hpd_irq_dpcd_data.bytes.sink_cnt.bits.SINK_COUNT
1847 != link->dpcd_sink_count)
1848 status = true;
1849
1850 /* reasons for HPD RX:
1851 * 1. Link Loss - ie Re-train the Link
1852 * 2. MST sideband message
1853 * 3. Automated Test - ie. Internal Commit
1854 * 4. CP (copy protection) - (not interesting for DM???)
1855 * 5. DRR
1856 * 6. Downstream Port status changed
1857 * -ie. Detect - this the only one
1858 * which is interesting for DM because
1859 * it must call dc_link_detect.
1860 */
1861 return status;
1862}
1863
1864/*query dpcd for version and mst cap addresses*/
1865bool is_mst_supported(struct core_link *link)
1866{
1867 bool mst = false;
1868 enum dc_status st = DC_OK;
1869 union dpcd_rev rev;
1870 union mstm_cap cap;
1871
1872 rev.raw = 0;
1873 cap.raw = 0;
1874
1875 st = core_link_read_dpcd(link, DPCD_ADDRESS_DPCD_REV, &rev.raw,
1876 sizeof(rev));
1877
1878 if (st == DC_OK && rev.raw >= DPCD_REV_12) {
1879
1880 st = core_link_read_dpcd(link, DPCD_ADDRESS_MSTM_CAP,
1881 &cap.raw, sizeof(cap));
1882 if (st == DC_OK && cap.bits.MST_CAP == 1)
1883 mst = true;
1884 }
1885 return mst;
1886
1887}
1888
1889bool is_dp_active_dongle(const struct core_link *link)
1890{
1891 enum display_dongle_type dongle_type = link->dpcd_caps.dongle_type;
1892
1893 return (dongle_type == DISPLAY_DONGLE_DP_VGA_CONVERTER) ||
1894 (dongle_type == DISPLAY_DONGLE_DP_DVI_CONVERTER) ||
1895 (dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER);
1896}
1897
1898static void get_active_converter_info(
1899 uint8_t data, struct core_link *link)
1900{
1901 union dp_downstream_port_present ds_port = { .byte = data };
1902
1903 /* decode converter info*/
1904 if (!ds_port.fields.PORT_PRESENT) {
1905 link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE;
1906 ddc_service_set_dongle_type(link->ddc,
1907 link->dpcd_caps.dongle_type);
1908 return;
1909 }
1910
1911 switch (ds_port.fields.PORT_TYPE) {
1912 case DOWNSTREAM_VGA:
1913 link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_VGA_CONVERTER;
1914 break;
1915 case DOWNSTREAM_DVI_HDMI:
1916 /* At this point we don't know is it DVI or HDMI,
1917 * assume DVI.*/
1918 link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_DVI_CONVERTER;
1919 break;
1920 default:
1921 link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE;
1922 break;
1923 }
1924
1925 if (link->dpcd_caps.dpcd_rev.raw >= DCS_DPCD_REV_11) {
1926 uint8_t det_caps[4];
1927 union dwnstream_port_caps_byte0 *port_caps =
1928 (union dwnstream_port_caps_byte0 *)det_caps;
1929 core_link_read_dpcd(link, DPCD_ADDRESS_DWN_STRM_PORT0_CAPS,
1930 det_caps, sizeof(det_caps));
1931
1932 switch (port_caps->bits.DWN_STRM_PORTX_TYPE) {
1933 case DOWN_STREAM_DETAILED_VGA:
1934 link->dpcd_caps.dongle_type =
1935 DISPLAY_DONGLE_DP_VGA_CONVERTER;
1936 break;
1937 case DOWN_STREAM_DETAILED_DVI:
1938 link->dpcd_caps.dongle_type =
1939 DISPLAY_DONGLE_DP_DVI_CONVERTER;
1940 break;
1941 case DOWN_STREAM_DETAILED_HDMI:
1942 link->dpcd_caps.dongle_type =
1943 DISPLAY_DONGLE_DP_HDMI_CONVERTER;
1944
1945 if (ds_port.fields.DETAILED_CAPS) {
1946
1947 union dwnstream_port_caps_byte3_hdmi
1948 hdmi_caps = {.raw = det_caps[3] };
1949
1950 link->dpcd_caps.is_dp_hdmi_s3d_converter =
1951 hdmi_caps.bits.FRAME_SEQ_TO_FRAME_PACK;
1952 }
1953 break;
1954 }
1955 }
1956
1957 ddc_service_set_dongle_type(link->ddc, link->dpcd_caps.dongle_type);
1958
1959 {
1960 struct dp_device_vendor_id dp_id;
1961
1962 /* read IEEE branch device id */
1963 core_link_read_dpcd(
1964 link,
1965 DPCD_ADDRESS_BRANCH_DEVICE_ID_START,
1966 (uint8_t *)&dp_id,
1967 sizeof(dp_id));
1968
1969 link->dpcd_caps.branch_dev_id =
1970 (dp_id.ieee_oui[0] << 16) +
1971 (dp_id.ieee_oui[1] << 8) +
1972 dp_id.ieee_oui[2];
1973
1974 memmove(
1975 link->dpcd_caps.branch_dev_name,
1976 dp_id.ieee_device_id,
1977 sizeof(dp_id.ieee_device_id));
1978 }
1979
1980 {
1981 struct dp_sink_hw_fw_revision dp_hw_fw_revision;
1982
1983 core_link_read_dpcd(
1984 link,
1985 DPCD_ADDRESS_BRANCH_REVISION_START,
1986 (uint8_t *)&dp_hw_fw_revision,
1987 sizeof(dp_hw_fw_revision));
1988
1989 link->dpcd_caps.branch_hw_revision =
1990 dp_hw_fw_revision.ieee_hw_rev;
1991 }
1992}
1993
1994static void dp_wa_power_up_0010FA(struct core_link *link, uint8_t *dpcd_data,
1995 int length)
1996{
1997 int retry = 0;
1998 union dp_downstream_port_present ds_port = { 0 };
1999
2000 if (!link->dpcd_caps.dpcd_rev.raw) {
2001 do {
2002 dp_receiver_power_ctrl(link, true);
2003 core_link_read_dpcd(link, DPCD_ADDRESS_DPCD_REV,
2004 dpcd_data, length);
2005 link->dpcd_caps.dpcd_rev.raw = dpcd_data[
2006 DPCD_ADDRESS_DPCD_REV -
2007 DPCD_ADDRESS_DPCD_REV];
2008 } while (retry++ < 4 && !link->dpcd_caps.dpcd_rev.raw);
2009 }
2010
2011 ds_port.byte = dpcd_data[DPCD_ADDRESS_DOWNSTREAM_PORT_PRESENT -
2012 DPCD_ADDRESS_DPCD_REV];
2013
2014 if (link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_VGA_CONVERTER) {
2015 switch (link->dpcd_caps.branch_dev_id) {
2016 /* Some active dongles (DP-VGA, DP-DLDVI converters) power down
2017 * all internal circuits including AUX communication preventing
2018 * reading DPCD table and EDID (spec violation).
2019 * Encoder will skip DP RX power down on disable_output to
2020 * keep receiver powered all the time.*/
2021 case DP_BRANCH_DEVICE_ID_1:
2022 case DP_BRANCH_DEVICE_ID_4:
2023 link->wa_flags.dp_keep_receiver_powered = true;
2024 break;
2025
2026 /* TODO: May need work around for other dongles. */
2027 default:
2028 link->wa_flags.dp_keep_receiver_powered = false;
2029 break;
2030 }
2031 } else
2032 link->wa_flags.dp_keep_receiver_powered = false;
2033}
2034
2035static void retrieve_psr_link_cap(struct core_link *link,
2036 enum edp_revision edp_revision)
2037{
2038 if (edp_revision >= EDP_REVISION_13) {
2039 core_link_read_dpcd(link,
2040 DPCD_ADDRESS_PSR_SUPPORT_VER,
2041 (uint8_t *)(&link->public.psr_caps),
2042 sizeof(link->public.psr_caps));
2043 if (link->public.psr_caps.psr_version != 0) {
2044 unsigned char psr_capability = 0;
2045
2046 core_link_read_dpcd(link,
2047 DPCD_ADDRESS_PSR_CAPABILITY,
2048 &psr_capability,
2049 sizeof(psr_capability));
2050 /* Bit 0 determines whether fast link training is
2051 * required on PSR exit. If set to 0, link training
2052 * is required. If set to 1, sink must lock within
2053 * five Idle Patterns after Main Link is turned on.
2054 */
2055 link->public.psr_caps.psr_exit_link_training_required
2056 = !(psr_capability & 0x1);
2057
2058 psr_capability = (psr_capability >> 1) & 0x7;
2059 link->public.psr_caps.psr_rfb_setup_time =
2060 55 * (6 - psr_capability);
2061 }
2062 }
2063}
2064
2065static void retrieve_link_cap(struct core_link *link)
2066{
2067 uint8_t dpcd_data[DPCD_ADDRESS_TRAINING_AUX_RD_INTERVAL - DPCD_ADDRESS_DPCD_REV + 1];
2068
2069 union down_stream_port_count down_strm_port_count;
2070 union edp_configuration_cap edp_config_cap;
2071 union dp_downstream_port_present ds_port = { 0 };
2072
2073 memset(dpcd_data, '\0', sizeof(dpcd_data));
2074 memset(&down_strm_port_count,
2075 '\0', sizeof(union down_stream_port_count));
2076 memset(&edp_config_cap, '\0',
2077 sizeof(union edp_configuration_cap));
2078
2079 core_link_read_dpcd(
2080 link,
2081 DPCD_ADDRESS_DPCD_REV,
2082 dpcd_data,
2083 sizeof(dpcd_data));
2084
2085 link->dpcd_caps.dpcd_rev.raw =
2086 dpcd_data[DPCD_ADDRESS_DPCD_REV - DPCD_ADDRESS_DPCD_REV];
2087
2088 {
2089 union training_aux_rd_interval aux_rd_interval;
2090
2091 aux_rd_interval.raw =
2092 dpcd_data[DPCD_ADDRESS_TRAINING_AUX_RD_INTERVAL];
2093
2094 if (aux_rd_interval.bits.EXT_RECIEVER_CAP_FIELD_PRESENT == 1) {
2095 core_link_read_dpcd(
2096 link,
2097 DPCD_ADDRESS_DP13_DPCD_REV,
2098 dpcd_data,
2099 sizeof(dpcd_data));
2100 }
2101 }
2102
2103 ds_port.byte = dpcd_data[DPCD_ADDRESS_DOWNSTREAM_PORT_PRESENT -
2104 DPCD_ADDRESS_DPCD_REV];
2105
2106 get_active_converter_info(ds_port.byte, link);
2107
2108 dp_wa_power_up_0010FA(link, dpcd_data, sizeof(dpcd_data));
2109
2110 link->dpcd_caps.allow_invalid_MSA_timing_param =
2111 down_strm_port_count.bits.IGNORE_MSA_TIMING_PARAM;
2112
2113 link->dpcd_caps.max_ln_count.raw = dpcd_data[
2114 DPCD_ADDRESS_MAX_LANE_COUNT - DPCD_ADDRESS_DPCD_REV];
2115
2116 link->dpcd_caps.max_down_spread.raw = dpcd_data[
2117 DPCD_ADDRESS_MAX_DOWNSPREAD - DPCD_ADDRESS_DPCD_REV];
2118
2119 link->public.reported_link_cap.lane_count =
2120 link->dpcd_caps.max_ln_count.bits.MAX_LANE_COUNT;
2121 link->public.reported_link_cap.link_rate = dpcd_data[
2122 DPCD_ADDRESS_MAX_LINK_RATE - DPCD_ADDRESS_DPCD_REV];
2123 link->public.reported_link_cap.link_spread =
2124 link->dpcd_caps.max_down_spread.bits.MAX_DOWN_SPREAD ?
2125 LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED;
2126
2127 edp_config_cap.raw = dpcd_data[
2128 DPCD_ADDRESS_EDP_CONFIG_CAP - DPCD_ADDRESS_DPCD_REV];
2129 link->dpcd_caps.panel_mode_edp =
2130 edp_config_cap.bits.ALT_SCRAMBLER_RESET;
2131
2132 link->edp_revision = DPCD_EDP_REVISION_EDP_UNKNOWN;
2133
2134 link->public.test_pattern_enabled = false;
2135 link->public.compliance_test_state.raw = 0;
2136
2137 link->public.psr_caps.psr_exit_link_training_required = false;
2138 link->public.psr_caps.psr_frame_capture_indication_req = false;
2139 link->public.psr_caps.psr_rfb_setup_time = 0;
2140 link->public.psr_caps.psr_sdp_transmit_line_num_deadline = 0;
2141 link->public.psr_caps.psr_version = 0;
2142
2143 /* read sink count */
2144 core_link_read_dpcd(link,
2145 DPCD_ADDRESS_SINK_COUNT,
2146 &link->dpcd_caps.sink_count.raw,
2147 sizeof(link->dpcd_caps.sink_count.raw));
2148
2149 /* Display control registers starting at DPCD 700h are only valid and
2150 * enabled if this eDP config cap bit is set. */
2151 if (edp_config_cap.bits.DPCD_DISPLAY_CONTROL_CAPABLE) {
2152 /* Read the Panel's eDP revision at DPCD 700h. */
2153 core_link_read_dpcd(link,
2154 DPCD_ADDRESS_EDP_REV,
2155 (uint8_t *)(&link->edp_revision),
2156 sizeof(link->edp_revision));
2157 }
2158
2159 /* Connectivity log: detection */
2160 CONN_DATA_DETECT(link, dpcd_data, sizeof(dpcd_data), "Rx Caps: ");
2161
2162 /* TODO: Confirm if need retrieve_psr_link_cap */
2163 retrieve_psr_link_cap(link, link->edp_revision);
2164}
2165
2166void detect_dp_sink_caps(struct core_link *link)
2167{
2168 retrieve_link_cap(link);
2169
2170 /* dc init_hw has power encoder using default
2171 * signal for connector. For native DP, no
2172 * need to power up encoder again. If not native
2173 * DP, hw_init may need check signal or power up
2174 * encoder here.
2175 */
2176
2177 if (is_mst_supported(link)) {
2178 link->public.verified_link_cap = link->public.reported_link_cap;
2179 } else {
2180 dp_hbr_verify_link_cap(link,
2181 &link->public.reported_link_cap);
2182 }
2183 /* TODO save sink caps in link->sink */
2184}
2185
2186void dc_link_dp_enable_hpd(const struct dc_link *link)
2187{
2188 struct core_link *core_link = DC_LINK_TO_CORE(link);
2189 struct link_encoder *encoder = core_link->link_enc;
2190
2191 if (encoder != NULL && encoder->funcs->enable_hpd != NULL)
2192 encoder->funcs->enable_hpd(encoder);
2193}
2194
2195void dc_link_dp_disable_hpd(const struct dc_link *link)
2196{
2197 struct core_link *core_link = DC_LINK_TO_CORE(link);
2198 struct link_encoder *encoder = core_link->link_enc;
2199
2200 if (encoder != NULL && encoder->funcs->enable_hpd != NULL)
2201 encoder->funcs->disable_hpd(encoder);
2202}
2203
2204static bool is_dp_phy_pattern(enum dp_test_pattern test_pattern)
2205{
2206 if (test_pattern == DP_TEST_PATTERN_D102 ||
2207 test_pattern == DP_TEST_PATTERN_SYMBOL_ERROR ||
2208 test_pattern == DP_TEST_PATTERN_PRBS7 ||
2209 test_pattern == DP_TEST_PATTERN_80BIT_CUSTOM ||
2210 test_pattern == DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE ||
2211 test_pattern == DP_TEST_PATTERN_TRAINING_PATTERN1 ||
2212 test_pattern == DP_TEST_PATTERN_TRAINING_PATTERN2 ||
2213 test_pattern == DP_TEST_PATTERN_TRAINING_PATTERN3 ||
2214 test_pattern == DP_TEST_PATTERN_TRAINING_PATTERN4 ||
2215 test_pattern == DP_TEST_PATTERN_VIDEO_MODE)
2216 return true;
2217 else
2218 return false;
2219}
2220
2221static void set_crtc_test_pattern(struct core_link *link,
2222 struct pipe_ctx *pipe_ctx,
2223 enum dp_test_pattern test_pattern)
2224{
2225 enum controller_dp_test_pattern controller_test_pattern;
2226 enum dc_color_depth color_depth = pipe_ctx->
2227 stream->public.timing.display_color_depth;
2228 struct bit_depth_reduction_params params;
2229
2230 memset(&params, 0, sizeof(params));
2231
2232 switch (test_pattern) {
2233 case DP_TEST_PATTERN_COLOR_SQUARES:
2234 controller_test_pattern =
2235 CONTROLLER_DP_TEST_PATTERN_COLORSQUARES;
2236 break;
2237 case DP_TEST_PATTERN_COLOR_SQUARES_CEA:
2238 controller_test_pattern =
2239 CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA;
2240 break;
2241 case DP_TEST_PATTERN_VERTICAL_BARS:
2242 controller_test_pattern =
2243 CONTROLLER_DP_TEST_PATTERN_VERTICALBARS;
2244 break;
2245 case DP_TEST_PATTERN_HORIZONTAL_BARS:
2246 controller_test_pattern =
2247 CONTROLLER_DP_TEST_PATTERN_HORIZONTALBARS;
2248 break;
2249 case DP_TEST_PATTERN_COLOR_RAMP:
2250 controller_test_pattern =
2251 CONTROLLER_DP_TEST_PATTERN_COLORRAMP;
2252 break;
2253 default:
2254 controller_test_pattern =
2255 CONTROLLER_DP_TEST_PATTERN_VIDEOMODE;
2256 break;
2257 }
2258
2259 switch (test_pattern) {
2260 case DP_TEST_PATTERN_COLOR_SQUARES:
2261 case DP_TEST_PATTERN_COLOR_SQUARES_CEA:
2262 case DP_TEST_PATTERN_VERTICAL_BARS:
2263 case DP_TEST_PATTERN_HORIZONTAL_BARS:
2264 case DP_TEST_PATTERN_COLOR_RAMP:
2265 {
2266 /* disable bit depth reduction */
2267 pipe_ctx->stream->bit_depth_params = params;
2268 pipe_ctx->opp->funcs->
2269 opp_program_bit_depth_reduction(pipe_ctx->opp, &params);
2270
2271 pipe_ctx->tg->funcs->set_test_pattern(pipe_ctx->tg,
2272 controller_test_pattern, color_depth);
2273 }
2274 break;
2275 case DP_TEST_PATTERN_VIDEO_MODE:
2276 {
2277 /* restore bitdepth reduction */
2278 link->dc->current_context->res_ctx.pool->funcs->
2279 build_bit_depth_reduction_params(pipe_ctx->stream,
2280 &params);
2281 pipe_ctx->stream->bit_depth_params = params;
2282 pipe_ctx->opp->funcs->
2283 opp_program_bit_depth_reduction(pipe_ctx->opp, &params);
2284
2285 pipe_ctx->tg->funcs->set_test_pattern(pipe_ctx->tg,
2286 CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
2287 color_depth);
2288 }
2289 break;
2290
2291 default:
2292 break;
2293 }
2294}
2295
2296bool dc_link_dp_set_test_pattern(
2297 const struct dc_link *link,
2298 enum dp_test_pattern test_pattern,
2299 const struct link_training_settings *p_link_settings,
2300 const unsigned char *p_custom_pattern,
2301 unsigned int cust_pattern_size)
2302{
2303 struct core_link *core_link = DC_LINK_TO_CORE(link);
2304 struct pipe_ctx *pipes =
2305 core_link->dc->current_context->res_ctx.pipe_ctx;
2306 struct pipe_ctx pipe_ctx = pipes[0];
2307 unsigned int lane;
2308 unsigned int i;
2309 unsigned char link_qual_pattern[LANE_COUNT_DP_MAX] = {0};
2310 union dpcd_training_pattern training_pattern;
2311 union test_response test_response;
2312 enum dpcd_phy_test_patterns pattern;
2313
2314 memset(&training_pattern, 0, sizeof(training_pattern));
2315 memset(&test_response, 0, sizeof(test_response));
2316
2317 for (i = 0; i < MAX_PIPES; i++) {
2318 if (pipes[i].stream->sink->link == core_link) {
2319 pipe_ctx = pipes[i];
2320 break;
2321 }
2322 }
2323
2324 /* Reset CRTC Test Pattern if it is currently running and request
2325 * is VideoMode Reset DP Phy Test Pattern if it is currently running
2326 * and request is VideoMode
2327 */
2328 if (core_link->public.test_pattern_enabled && test_pattern ==
2329 DP_TEST_PATTERN_VIDEO_MODE) {
2330 /* Set CRTC Test Pattern */
2331 set_crtc_test_pattern(core_link, &pipe_ctx, test_pattern);
2332 dp_set_hw_test_pattern(core_link, test_pattern,
2333 (uint8_t *)p_custom_pattern,
2334 (uint32_t)cust_pattern_size);
2335
2336 /* Unblank Stream */
2337 core_link->dc->hwss.unblank_stream(
2338 &pipe_ctx,
2339 &core_link->public.verified_link_cap);
2340 /* TODO:m_pHwss->MuteAudioEndpoint
2341 * (pPathMode->pDisplayPath, false);
2342 */
2343
2344 /* Reset Test Pattern state */
2345 core_link->public.test_pattern_enabled = false;
2346
2347 return true;
2348 }
2349
2350 /* Check for PHY Test Patterns */
2351 if (is_dp_phy_pattern(test_pattern)) {
2352 /* Set DPCD Lane Settings before running test pattern */
2353 if (p_link_settings != NULL) {
2354 dp_set_hw_lane_settings(core_link, p_link_settings);
2355 dpcd_set_lane_settings(core_link, p_link_settings);
2356 }
2357
2358 /* Blank stream if running test pattern */
2359 if (test_pattern != DP_TEST_PATTERN_VIDEO_MODE) {
2360 /*TODO:
2361 * m_pHwss->
2362 * MuteAudioEndpoint(pPathMode->pDisplayPath, true);
2363 */
2364 /* Blank stream */
2365 pipes->stream_enc->funcs->dp_blank(pipe_ctx.stream_enc);
2366 }
2367
2368 dp_set_hw_test_pattern(core_link, test_pattern,
2369 (uint8_t *)p_custom_pattern,
2370 (uint32_t)cust_pattern_size);
2371
2372 if (test_pattern != DP_TEST_PATTERN_VIDEO_MODE) {
2373 /* Set Test Pattern state */
2374 core_link->public.test_pattern_enabled = true;
2375 if (p_link_settings != NULL)
2376 dpcd_set_link_settings(core_link,
2377 p_link_settings);
2378 }
2379
2380 switch (test_pattern) {
2381 case DP_TEST_PATTERN_VIDEO_MODE:
2382 pattern = PHY_TEST_PATTERN_NONE;
2383 break;
2384 case DP_TEST_PATTERN_D102:
2385 pattern = PHY_TEST_PATTERN_D10_2;
2386 break;
2387 case DP_TEST_PATTERN_SYMBOL_ERROR:
2388 pattern = PHY_TEST_PATTERN_SYMBOL_ERROR;
2389 break;
2390 case DP_TEST_PATTERN_PRBS7:
2391 pattern = PHY_TEST_PATTERN_PRBS7;
2392 break;
2393 case DP_TEST_PATTERN_80BIT_CUSTOM:
2394 pattern = PHY_TEST_PATTERN_80BIT_CUSTOM;
2395 break;
2396 case DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE:
2397 pattern = PHY_TEST_PATTERN_HBR2_COMPLIANCE_EYE;
2398 break;
2399 default:
2400 return false;
2401 }
2402
2403 if (test_pattern == DP_TEST_PATTERN_VIDEO_MODE
2404 /*TODO:&& !pPathMode->pDisplayPath->IsTargetPoweredOn()*/)
2405 return false;
2406
2407 if (core_link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) {
2408 /* tell receiver that we are sending qualification
2409 * pattern DP 1.2 or later - DP receiver's link quality
2410 * pattern is set using DPCD LINK_QUAL_LANEx_SET
2411 * register (0x10B~0x10E)\
2412 */
2413 for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++)
2414 link_qual_pattern[lane] =
2415 (unsigned char)(pattern);
2416
2417 core_link_write_dpcd(core_link,
2418 DPCD_ADDRESS_LINK_QUAL_LANE0_SET,
2419 link_qual_pattern,
2420 sizeof(link_qual_pattern));
2421 } else if (core_link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_10 ||
2422 core_link->dpcd_caps.dpcd_rev.raw == 0) {
2423 /* tell receiver that we are sending qualification
2424 * pattern DP 1.1a or earlier - DP receiver's link
2425 * quality pattern is set using
2426 * DPCD TRAINING_PATTERN_SET -> LINK_QUAL_PATTERN_SET
2427 * register (0x102). We will use v_1.3 when we are
2428 * setting test pattern for DP 1.1.
2429 */
2430 core_link_read_dpcd(core_link,
2431 DPCD_ADDRESS_TRAINING_PATTERN_SET,
2432 &training_pattern.raw,
2433 sizeof(training_pattern));
2434 training_pattern.v1_3.LINK_QUAL_PATTERN_SET = pattern;
2435 core_link_write_dpcd(core_link,
2436 DPCD_ADDRESS_TRAINING_PATTERN_SET,
2437 &training_pattern.raw,
2438 sizeof(training_pattern));
2439 }
2440 } else {
2441 /* CRTC Patterns */
2442 set_crtc_test_pattern(core_link, &pipe_ctx, test_pattern);
2443 /* Set Test Pattern state */
2444 core_link->public.test_pattern_enabled = true;
2445
2446 /* If this is called because of compliance test request,
2447 * we respond ack here.
2448 */
2449 if (core_link->public.compliance_test_state.bits.
2450 SET_TEST_PATTERN_PENDING == 1) {
2451 core_link->public.compliance_test_state.bits.
2452 SET_TEST_PATTERN_PENDING = 0;
2453 test_response.bits.ACK = 1;
2454 core_link_write_dpcd(core_link,
2455 DPCD_ADDRESS_TEST_RESPONSE,
2456 &test_response.raw,
2457 sizeof(test_response));
2458 }
2459 }
2460
2461 return true;
2462}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
new file mode 100644
index 000000000000..e89f5f176ec3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
@@ -0,0 +1,222 @@
1/* Copyright 2015 Advanced Micro Devices, Inc. */
2
3
4#include "dm_services.h"
5#include "dc.h"
6#include "inc/core_dc.h"
7#include "include/ddc_service_types.h"
8#include "include/i2caux_interface.h"
9#include "link_hwss.h"
10#include "hw_sequencer.h"
11#include "dc_link_dp.h"
12#include "dc_link_ddc.h"
13#include "dm_helpers.h"
14#include "dce/dce_link_encoder.h"
15#include "dce/dce_stream_encoder.h"
16
17enum dc_status core_link_read_dpcd(
18 struct core_link* link,
19 uint32_t address,
20 uint8_t *data,
21 uint32_t size)
22{
23 if (!dm_helpers_dp_read_dpcd(link->ctx,
24 &link->public,
25 address, data, size))
26 return DC_ERROR_UNEXPECTED;
27
28 return DC_OK;
29}
30
31enum dc_status core_link_write_dpcd(
32 struct core_link* link,
33 uint32_t address,
34 const uint8_t *data,
35 uint32_t size)
36{
37 if (!dm_helpers_dp_write_dpcd(link->ctx,
38 &link->public,
39 address, data, size))
40 return DC_ERROR_UNEXPECTED;
41
42 return DC_OK;
43}
44
45void dp_receiver_power_ctrl(struct core_link *link, bool on)
46{
47 uint8_t state;
48
49 state = on ? DP_POWER_STATE_D0 : DP_POWER_STATE_D3;
50
51 core_link_write_dpcd(link, DPCD_ADDRESS_POWER_STATE, &state,
52 sizeof(state));
53}
54
55void dp_enable_link_phy(
56 struct core_link *link,
57 enum signal_type signal,
58 enum clock_source_id clock_source,
59 const struct dc_link_settings *link_settings)
60{
61 struct link_encoder *link_enc = link->link_enc;
62
63 if (dc_is_dp_sst_signal(signal)) {
64 if (signal == SIGNAL_TYPE_EDP) {
65 link_enc->funcs->power_control(link_enc, true);
66 link_enc->funcs->backlight_control(link_enc, true);
67 }
68
69 link_enc->funcs->enable_dp_output(
70 link_enc,
71 link_settings,
72 clock_source);
73 } else {
74 link_enc->funcs->enable_dp_mst_output(
75 link_enc,
76 link_settings,
77 clock_source);
78 }
79
80 dp_receiver_power_ctrl(link, true);
81}
82
83void dp_disable_link_phy(struct core_link *link, enum signal_type signal)
84{
85 if (!link->wa_flags.dp_keep_receiver_powered)
86 dp_receiver_power_ctrl(link, false);
87
88 if (signal == SIGNAL_TYPE_EDP)
89 link->link_enc->funcs->backlight_control(link->link_enc, false);
90
91 link->link_enc->funcs->disable_output(link->link_enc, signal);
92
93 /* Clear current link setting.*/
94 memset(&link->public.cur_link_settings, 0,
95 sizeof(link->public.cur_link_settings));
96}
97
98void dp_disable_link_phy_mst(struct core_link *link, enum signal_type signal)
99{
100 /* MST disable link only when no stream use the link */
101 if (link->mst_stream_alloc_table.stream_count > 0)
102 return;
103
104 dp_disable_link_phy(link, signal);
105}
106
107bool dp_set_hw_training_pattern(
108 struct core_link *link,
109 enum hw_dp_training_pattern pattern)
110{
111 enum dp_test_pattern test_pattern = DP_TEST_PATTERN_UNSUPPORTED;
112
113 switch (pattern) {
114 case HW_DP_TRAINING_PATTERN_1:
115 test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN1;
116 break;
117 case HW_DP_TRAINING_PATTERN_2:
118 test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN2;
119 break;
120 case HW_DP_TRAINING_PATTERN_3:
121 test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN3;
122 break;
123 case HW_DP_TRAINING_PATTERN_4:
124 test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN4;
125 break;
126 default:
127 break;
128 }
129
130 dp_set_hw_test_pattern(link, test_pattern, NULL, 0);
131
132 return true;
133}
134
135void dp_set_hw_lane_settings(
136 struct core_link *link,
137 const struct link_training_settings *link_settings)
138{
139 struct link_encoder *encoder = link->link_enc;
140
141 /* call Encoder to set lane settings */
142 encoder->funcs->dp_set_lane_settings(encoder, link_settings);
143}
144
145enum dp_panel_mode dp_get_panel_mode(struct core_link *link)
146{
147 /* We need to explicitly check that connector
148 * is not DP. Some Travis_VGA get reported
149 * by video bios as DP.
150 */
151 if (link->public.connector_signal != SIGNAL_TYPE_DISPLAY_PORT) {
152
153 switch (link->dpcd_caps.branch_dev_id) {
154 case DP_BRANCH_DEVICE_ID_2:
155 if (strncmp(
156 link->dpcd_caps.branch_dev_name,
157 DP_VGA_LVDS_CONVERTER_ID_2,
158 sizeof(
159 link->dpcd_caps.
160 branch_dev_name)) == 0) {
161 return DP_PANEL_MODE_SPECIAL;
162 }
163 break;
164 case DP_BRANCH_DEVICE_ID_3:
165 if (strncmp(link->dpcd_caps.branch_dev_name,
166 DP_VGA_LVDS_CONVERTER_ID_3,
167 sizeof(
168 link->dpcd_caps.
169 branch_dev_name)) == 0) {
170 return DP_PANEL_MODE_SPECIAL;
171 }
172 break;
173 default:
174 break;
175 }
176
177 if (link->dpcd_caps.panel_mode_edp) {
178 return DP_PANEL_MODE_EDP;
179 }
180 }
181
182 return DP_PANEL_MODE_DEFAULT;
183}
184
185void dp_set_hw_test_pattern(
186 struct core_link *link,
187 enum dp_test_pattern test_pattern,
188 uint8_t *custom_pattern,
189 uint32_t custom_pattern_size)
190{
191 struct encoder_set_dp_phy_pattern_param pattern_param = {0};
192 struct link_encoder *encoder = link->link_enc;
193
194 pattern_param.dp_phy_pattern = test_pattern;
195 pattern_param.custom_pattern = custom_pattern;
196 pattern_param.custom_pattern_size = custom_pattern_size;
197 pattern_param.dp_panel_mode = dp_get_panel_mode(link);
198
199 encoder->funcs->dp_set_phy_pattern(encoder, &pattern_param);
200}
201
202
203void dp_retrain_link(struct core_link *link)
204{
205 struct pipe_ctx *pipes = link->dc->current_context->res_ctx.pipe_ctx;
206 unsigned int i;
207
208 for (i = 0; i < MAX_PIPES; i++) {
209 if (pipes[i].stream_enc != NULL) {
210 dm_delay_in_microseconds(link->ctx, 100);
211 pipes->stream_enc->funcs->dp_blank(pipes[i].stream_enc);
212 link->dc->hwss.disable_stream(&pipes[i]);
213 dc_link_dp_perform_link_training(
214 &link->public,
215 &link->public.verified_link_cap,
216 true);
217 link->dc->hwss.enable_stream(&pipes[i]);
218 link->dc->hwss.unblank_stream(&pipes[i],
219 &link->public.verified_link_cap);
220 }
221 }
222}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
new file mode 100644
index 000000000000..bd53d27e5414
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -0,0 +1,1934 @@
1/*
2* Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25#include "dm_services.h"
26
27#include "resource.h"
28#include "include/irq_service_interface.h"
29#include "link_encoder.h"
30#include "stream_encoder.h"
31#include "opp.h"
32#include "timing_generator.h"
33#include "transform.h"
34#include "set_mode_types.h"
35
36#include "virtual/virtual_stream_encoder.h"
37
38#include "dce80/dce80_resource.h"
39#include "dce100/dce100_resource.h"
40#include "dce110/dce110_resource.h"
41#include "dce112/dce112_resource.h"
42
43enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
44{
45 enum dce_version dc_version = DCE_VERSION_UNKNOWN;
46 switch (asic_id.chip_family) {
47
48 case FAMILY_CI:
49 case FAMILY_KV:
50 dc_version = DCE_VERSION_8_0;
51 break;
52 case FAMILY_CZ:
53 dc_version = DCE_VERSION_11_0;
54 break;
55
56 case FAMILY_VI:
57 if (ASIC_REV_IS_TONGA_P(asic_id.hw_internal_rev) ||
58 ASIC_REV_IS_FIJI_P(asic_id.hw_internal_rev)) {
59 dc_version = DCE_VERSION_10_0;
60 break;
61 }
62 if (ASIC_REV_IS_POLARIS10_P(asic_id.hw_internal_rev) ||
63 ASIC_REV_IS_POLARIS11_M(asic_id.hw_internal_rev)) {
64 dc_version = DCE_VERSION_11_2;
65 }
66 break;
67 default:
68 dc_version = DCE_VERSION_UNKNOWN;
69 break;
70 }
71 return dc_version;
72}
73
74struct resource_pool *dc_create_resource_pool(
75 struct core_dc *dc,
76 int num_virtual_links,
77 enum dce_version dc_version,
78 struct hw_asic_id asic_id)
79{
80
81 switch (dc_version) {
82 case DCE_VERSION_8_0:
83 return dce80_create_resource_pool(
84 num_virtual_links, dc);
85 case DCE_VERSION_10_0:
86 return dce100_create_resource_pool(
87 num_virtual_links, dc);
88 case DCE_VERSION_11_0:
89 return dce110_create_resource_pool(
90 num_virtual_links, dc, asic_id);
91 case DCE_VERSION_11_2:
92 return dce112_create_resource_pool(
93 num_virtual_links, dc);
94 default:
95 break;
96 }
97
98 return false;
99}
100
101void dc_destroy_resource_pool(struct core_dc *dc)
102{
103 if (dc) {
104 if (dc->res_pool)
105 dc->res_pool->funcs->destroy(&dc->res_pool);
106
107 if (dc->hwseq)
108 dm_free(dc->hwseq);
109 }
110}
111
112static void update_num_audio(
113 const struct resource_straps *straps,
114 unsigned int *num_audio,
115 struct audio_support *aud_support)
116{
117 if (straps->hdmi_disable == 0) {
118 aud_support->hdmi_audio_native = true;
119 aud_support->hdmi_audio_on_dongle = true;
120 aud_support->dp_audio = true;
121 } else {
122 if (straps->dc_pinstraps_audio & 0x2) {
123 aud_support->hdmi_audio_on_dongle = true;
124 aud_support->dp_audio = true;
125 } else {
126 aud_support->dp_audio = true;
127 }
128 }
129
130 switch (straps->audio_stream_number) {
131 case 0: /* multi streams supported */
132 break;
133 case 1: /* multi streams not supported */
134 *num_audio = 1;
135 break;
136 default:
137 DC_ERR("DC: unexpected audio fuse!\n");
138 };
139}
140
141bool resource_construct(
142 unsigned int num_virtual_links,
143 struct core_dc *dc,
144 struct resource_pool *pool,
145 const struct resource_create_funcs *create_funcs)
146{
147 struct dc_context *ctx = dc->ctx;
148 const struct resource_caps *caps = pool->res_cap;
149 int i;
150 unsigned int num_audio = caps->num_audio;
151 struct resource_straps straps = {0};
152
153 if (create_funcs->read_dce_straps)
154 create_funcs->read_dce_straps(dc->ctx, &straps);
155
156 pool->audio_count = 0;
157 if (create_funcs->create_audio) {
158 /* find the total number of streams available via the
159 * AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
160 * registers (one for each pin) starting from pin 1
161 * up to the max number of audio pins.
162 * We stop on the first pin where
163 * PORT_CONNECTIVITY == 1 (as instructed by HW team).
164 */
165 update_num_audio(&straps, &num_audio, &pool->audio_support);
166 for (i = 0; i < pool->pipe_count && i < num_audio; i++) {
167 struct audio *aud = create_funcs->create_audio(ctx, i);
168
169 if (aud == NULL) {
170 DC_ERR("DC: failed to create audio!\n");
171 return false;
172 }
173
174 if (!aud->funcs->endpoint_valid(aud)) {
175 aud->funcs->destroy(&aud);
176 break;
177 }
178
179 pool->audios[i] = aud;
180 pool->audio_count++;
181 }
182 }
183
184 pool->stream_enc_count = 0;
185 if (create_funcs->create_stream_encoder) {
186 for (i = 0; i < caps->num_stream_encoder; i++) {
187 pool->stream_enc[i] = create_funcs->create_stream_encoder(i, ctx);
188 if (pool->stream_enc[i] == NULL)
189 DC_ERR("DC: failed to create stream_encoder!\n");
190 pool->stream_enc_count++;
191 }
192 }
193
194 for (i = 0; i < num_virtual_links; i++) {
195 pool->stream_enc[pool->stream_enc_count] =
196 virtual_stream_encoder_create(
197 ctx, ctx->dc_bios);
198 if (pool->stream_enc[pool->stream_enc_count] == NULL) {
199 DC_ERR("DC: failed to create stream_encoder!\n");
200 return false;
201 }
202 pool->stream_enc_count++;
203 }
204
205 dc->hwseq = create_funcs->create_hwseq(ctx);
206
207 return true;
208}
209
210
211void resource_unreference_clock_source(
212 struct resource_context *res_ctx,
213 struct clock_source *clock_source)
214{
215 int i;
216 for (i = 0; i < res_ctx->pool->clk_src_count; i++) {
217 if (res_ctx->pool->clock_sources[i] != clock_source)
218 continue;
219
220 res_ctx->clock_source_ref_count[i]--;
221
222 if (res_ctx->clock_source_ref_count[i] == 0)
223 clock_source->funcs->cs_power_down(clock_source);
224
225 break;
226 }
227
228 if (res_ctx->pool->dp_clock_source == clock_source) {
229 res_ctx->dp_clock_source_ref_count--;
230
231 if (res_ctx->dp_clock_source_ref_count == 0)
232 clock_source->funcs->cs_power_down(clock_source);
233 }
234}
235
236void resource_reference_clock_source(
237 struct resource_context *res_ctx,
238 struct clock_source *clock_source)
239{
240 int i;
241 for (i = 0; i < res_ctx->pool->clk_src_count; i++) {
242 if (res_ctx->pool->clock_sources[i] != clock_source)
243 continue;
244
245 res_ctx->clock_source_ref_count[i]++;
246 break;
247 }
248
249 if (res_ctx->pool->dp_clock_source == clock_source)
250 res_ctx->dp_clock_source_ref_count++;
251}
252
253bool resource_are_streams_timing_synchronizable(
254 const struct core_stream *stream1,
255 const struct core_stream *stream2)
256{
257 if (stream1->public.timing.h_total != stream2->public.timing.h_total)
258 return false;
259
260 if (stream1->public.timing.v_total != stream2->public.timing.v_total)
261 return false;
262
263 if (stream1->public.timing.h_addressable
264 != stream2->public.timing.h_addressable)
265 return false;
266
267 if (stream1->public.timing.v_addressable
268 != stream2->public.timing.v_addressable)
269 return false;
270
271 if (stream1->public.timing.pix_clk_khz
272 != stream2->public.timing.pix_clk_khz)
273 return false;
274
275 if (stream1->phy_pix_clk != stream2->phy_pix_clk
276 && !dc_is_dp_signal(stream1->signal)
277 && !dc_is_dp_signal(stream2->signal))
278 return false;
279
280 return true;
281}
282
283static bool is_sharable_clk_src(
284 const struct pipe_ctx *pipe_with_clk_src,
285 const struct pipe_ctx *pipe)
286{
287 if (pipe_with_clk_src->clock_source == NULL)
288 return false;
289
290 if (pipe_with_clk_src->stream->signal == SIGNAL_TYPE_VIRTUAL)
291 return false;
292
293 if (dc_is_dp_signal(pipe_with_clk_src->stream->signal))
294 return false;
295
296 if (dc_is_hdmi_signal(pipe_with_clk_src->stream->signal)
297 && dc_is_dvi_signal(pipe->stream->signal))
298 return false;
299
300 if (dc_is_hdmi_signal(pipe->stream->signal)
301 && dc_is_dvi_signal(pipe_with_clk_src->stream->signal))
302 return false;
303
304 if (!resource_are_streams_timing_synchronizable(
305 pipe_with_clk_src->stream, pipe->stream))
306 return false;
307
308 return true;
309}
310
311struct clock_source *resource_find_used_clk_src_for_sharing(
312 struct resource_context *res_ctx,
313 struct pipe_ctx *pipe_ctx)
314{
315 int i;
316
317 for (i = 0; i < MAX_PIPES; i++) {
318 if (is_sharable_clk_src(&res_ctx->pipe_ctx[i], pipe_ctx))
319 return res_ctx->pipe_ctx[i].clock_source;
320 }
321
322 return NULL;
323}
324
325static enum pixel_format convert_pixel_format_to_dalsurface(
326 enum surface_pixel_format surface_pixel_format)
327{
328 enum pixel_format dal_pixel_format = PIXEL_FORMAT_UNKNOWN;
329
330 switch (surface_pixel_format) {
331 case SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS:
332 dal_pixel_format = PIXEL_FORMAT_INDEX8;
333 break;
334 case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
335 dal_pixel_format = PIXEL_FORMAT_RGB565;
336 break;
337 case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
338 dal_pixel_format = PIXEL_FORMAT_RGB565;
339 break;
340 case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
341 dal_pixel_format = PIXEL_FORMAT_ARGB8888;
342 break;
343 case SURFACE_PIXEL_FORMAT_GRPH_BGRA8888:
344 dal_pixel_format = PIXEL_FORMAT_ARGB8888;
345 break;
346 case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
347 dal_pixel_format = PIXEL_FORMAT_ARGB2101010;
348 break;
349 case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
350 dal_pixel_format = PIXEL_FORMAT_ARGB2101010;
351 break;
352 case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS:
353 dal_pixel_format = PIXEL_FORMAT_ARGB2101010_XRBIAS;
354 break;
355 case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
356 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
357 dal_pixel_format = PIXEL_FORMAT_FP16;
358 break;
359 case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
360 dal_pixel_format = PIXEL_FORMAT_420BPP12;
361 break;
362 case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
363 dal_pixel_format = PIXEL_FORMAT_420BPP12;
364 break;
365 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
366 default:
367 dal_pixel_format = PIXEL_FORMAT_UNKNOWN;
368 break;
369 }
370 return dal_pixel_format;
371}
372
373static void rect_swap_helper(struct rect *rect)
374{
375 uint32_t temp = 0;
376
377 temp = rect->height;
378 rect->height = rect->width;
379 rect->width = temp;
380
381 temp = rect->x;
382 rect->x = rect->y;
383 rect->y = temp;
384}
385
386static void calculate_viewport(
387 const struct dc_surface *surface,
388 struct pipe_ctx *pipe_ctx)
389{
390 struct rect stream_src = pipe_ctx->stream->public.src;
391 struct rect src = surface->src_rect;
392 struct rect dst = surface->dst_rect;
393 struct rect surface_clip = surface->clip_rect;
394 struct rect clip = {0};
395
396
397 if (surface->rotation == ROTATION_ANGLE_90 ||
398 surface->rotation == ROTATION_ANGLE_270) {
399 rect_swap_helper(&src);
400 rect_swap_helper(&dst);
401 rect_swap_helper(&surface_clip);
402 rect_swap_helper(&stream_src);
403 }
404
405 /* The actual clip is an intersection between stream
406 * source and surface clip
407 */
408 clip.x = stream_src.x > surface_clip.x ?
409 stream_src.x : surface_clip.x;
410
411 clip.width = stream_src.x + stream_src.width <
412 surface_clip.x + surface_clip.width ?
413 stream_src.x + stream_src.width - clip.x :
414 surface_clip.x + surface_clip.width - clip.x ;
415
416 clip.y = stream_src.y > surface_clip.y ?
417 stream_src.y : surface_clip.y;
418
419 clip.height = stream_src.y + stream_src.height <
420 surface_clip.y + surface_clip.height ?
421 stream_src.y + stream_src.height - clip.y :
422 surface_clip.y + surface_clip.height - clip.y ;
423
424 /* offset = src.ofs + (clip.ofs - dst.ofs) * scl_ratio
425 * num_pixels = clip.num_pix * scl_ratio
426 */
427 pipe_ctx->scl_data.viewport.x = src.x + (clip.x - dst.x) *
428 src.width / dst.width;
429 pipe_ctx->scl_data.viewport.width = clip.width *
430 src.width / dst.width;
431
432 pipe_ctx->scl_data.viewport.y = src.y + (clip.y - dst.y) *
433 src.height / dst.height;
434 pipe_ctx->scl_data.viewport.height = clip.height *
435 src.height / dst.height;
436
437 /* Minimum viewport such that 420/422 chroma vp is non 0 */
438 if (pipe_ctx->scl_data.viewport.width < 2)
439 pipe_ctx->scl_data.viewport.width = 2;
440 if (pipe_ctx->scl_data.viewport.height < 2)
441 pipe_ctx->scl_data.viewport.height = 2;
442}
443
444static void calculate_recout(
445 const struct dc_surface *surface,
446 struct pipe_ctx *pipe_ctx)
447{
448 struct core_stream *stream = pipe_ctx->stream;
449 struct rect clip = surface->clip_rect;
450
451 pipe_ctx->scl_data.recout.x = stream->public.dst.x;
452 if (stream->public.src.x < clip.x)
453 pipe_ctx->scl_data.recout.x += (clip.x
454 - stream->public.src.x) * stream->public.dst.width
455 / stream->public.src.width;
456
457 pipe_ctx->scl_data.recout.width = clip.width *
458 stream->public.dst.width / stream->public.src.width;
459 if (pipe_ctx->scl_data.recout.width + pipe_ctx->scl_data.recout.x >
460 stream->public.dst.x + stream->public.dst.width)
461 pipe_ctx->scl_data.recout.width =
462 stream->public.dst.x + stream->public.dst.width
463 - pipe_ctx->scl_data.recout.x;
464
465 pipe_ctx->scl_data.recout.y = stream->public.dst.y;
466 if (stream->public.src.y < clip.y)
467 pipe_ctx->scl_data.recout.y += (clip.y
468 - stream->public.src.y) * stream->public.dst.height
469 / stream->public.src.height;
470
471 pipe_ctx->scl_data.recout.height = clip.height *
472 stream->public.dst.height / stream->public.src.height;
473 if (pipe_ctx->scl_data.recout.height + pipe_ctx->scl_data.recout.y >
474 stream->public.dst.y + stream->public.dst.height)
475 pipe_ctx->scl_data.recout.height =
476 stream->public.dst.y + stream->public.dst.height
477 - pipe_ctx->scl_data.recout.y;
478}
479
480static void calculate_scaling_ratios(
481 const struct dc_surface *surface,
482 struct pipe_ctx *pipe_ctx)
483{
484 struct core_stream *stream = pipe_ctx->stream;
485 const uint32_t in_w = stream->public.src.width;
486 const uint32_t in_h = stream->public.src.height;
487 const uint32_t out_w = stream->public.dst.width;
488 const uint32_t out_h = stream->public.dst.height;
489
490 pipe_ctx->scl_data.ratios.horz = dal_fixed31_32_from_fraction(
491 surface->src_rect.width,
492 surface->dst_rect.width);
493 pipe_ctx->scl_data.ratios.vert = dal_fixed31_32_from_fraction(
494 surface->src_rect.height,
495 surface->dst_rect.height);
496
497 if (surface->stereo_format == PLANE_STEREO_FORMAT_SIDE_BY_SIDE)
498 pipe_ctx->scl_data.ratios.horz.value *= 2;
499 else if (surface->stereo_format == PLANE_STEREO_FORMAT_TOP_AND_BOTTOM)
500 pipe_ctx->scl_data.ratios.vert.value *= 2;
501
502 pipe_ctx->scl_data.ratios.vert.value = div64_s64(
503 pipe_ctx->scl_data.ratios.vert.value * in_h, out_h);
504 pipe_ctx->scl_data.ratios.horz.value = div64_s64(
505 pipe_ctx->scl_data.ratios.horz.value * in_w, out_w);
506
507 pipe_ctx->scl_data.ratios.horz_c = pipe_ctx->scl_data.ratios.horz;
508 pipe_ctx->scl_data.ratios.vert_c = pipe_ctx->scl_data.ratios.vert;
509
510 if (pipe_ctx->scl_data.format == PIXEL_FORMAT_420BPP12) {
511 pipe_ctx->scl_data.ratios.horz_c.value /= 2;
512 pipe_ctx->scl_data.ratios.vert_c.value /= 2;
513 }
514}
515
516bool resource_build_scaling_params(
517 const struct dc_surface *surface,
518 struct pipe_ctx *pipe_ctx)
519{
520 bool res;
521 struct dc_crtc_timing *timing = &pipe_ctx->stream->public.timing;
522 /* Important: scaling ratio calculation requires pixel format,
523 * lb depth calculation requires recout and taps require scaling ratios.
524 */
525 pipe_ctx->scl_data.format = convert_pixel_format_to_dalsurface(surface->format);
526
527 calculate_viewport(surface, pipe_ctx);
528
529 if (pipe_ctx->scl_data.viewport.height < 16 || pipe_ctx->scl_data.viewport.width < 16)
530 return false;
531
532 calculate_scaling_ratios(surface, pipe_ctx);
533
534 calculate_recout(surface, pipe_ctx);
535
536 /**
537 * Setting line buffer pixel depth to 24bpp yields banding
538 * on certain displays, such as the Sharp 4k
539 */
540 pipe_ctx->scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP;
541
542 pipe_ctx->scl_data.h_active = timing->h_addressable;
543 pipe_ctx->scl_data.v_active = timing->v_addressable;
544
545 /* Taps calculations */
546 res = pipe_ctx->xfm->funcs->transform_get_optimal_number_of_taps(
547 pipe_ctx->xfm, &pipe_ctx->scl_data, &surface->scaling_quality);
548
549 if (!res) {
550 /* Try 24 bpp linebuffer */
551 pipe_ctx->scl_data.lb_params.depth = LB_PIXEL_DEPTH_24BPP;
552
553 res = pipe_ctx->xfm->funcs->transform_get_optimal_number_of_taps(
554 pipe_ctx->xfm, &pipe_ctx->scl_data, &surface->scaling_quality);
555 }
556
557 dm_logger_write(pipe_ctx->stream->ctx->logger, LOG_SCALER,
558 "%s: Viewport:\nheight:%d width:%d x:%d "
559 "y:%d\n dst_rect:\nheight:%d width:%d x:%d "
560 "y:%d\n",
561 __func__,
562 pipe_ctx->scl_data.viewport.height,
563 pipe_ctx->scl_data.viewport.width,
564 pipe_ctx->scl_data.viewport.x,
565 pipe_ctx->scl_data.viewport.y,
566 surface->dst_rect.height,
567 surface->dst_rect.width,
568 surface->dst_rect.x,
569 surface->dst_rect.y);
570
571 return res;
572}
573
574
575enum dc_status resource_build_scaling_params_for_context(
576 const struct core_dc *dc,
577 struct validate_context *context)
578{
579 int i;
580
581 for (i = 0; i < MAX_PIPES; i++) {
582 if (context->res_ctx.pipe_ctx[i].surface != NULL &&
583 context->res_ctx.pipe_ctx[i].stream != NULL)
584 if (!resource_build_scaling_params(
585 &context->res_ctx.pipe_ctx[i].surface->public,
586 &context->res_ctx.pipe_ctx[i]))
587 return DC_FAIL_BANDWIDTH_VALIDATE;
588 }
589
590 return DC_OK;
591}
592
593static void detach_surfaces_for_target(
594 struct validate_context *context,
595 const struct dc_target *dc_target)
596{
597 int i;
598 struct core_stream *stream = DC_STREAM_TO_CORE(dc_target->streams[0]);
599
600 for (i = 0; i < context->res_ctx.pool->pipe_count; i++) {
601 struct pipe_ctx *cur_pipe = &context->res_ctx.pipe_ctx[i];
602 if (cur_pipe->stream == stream) {
603 cur_pipe->surface = NULL;
604 cur_pipe->top_pipe = NULL;
605 cur_pipe->bottom_pipe = NULL;
606 }
607 }
608}
609
610struct pipe_ctx *find_idle_secondary_pipe(struct resource_context *res_ctx)
611{
612 int i;
613 struct pipe_ctx *secondary_pipe = NULL;
614
615 /*
616 * search backwards for the second pipe to keep pipe
617 * assignment more consistent
618 */
619
620 for (i = res_ctx->pool->pipe_count - 1; i >= 0; i--) {
621 if (res_ctx->pipe_ctx[i].stream == NULL) {
622 secondary_pipe = &res_ctx->pipe_ctx[i];
623 secondary_pipe->pipe_idx = i;
624 break;
625 }
626 }
627
628
629 return secondary_pipe;
630}
631
632struct pipe_ctx *resource_get_head_pipe_for_stream(
633 struct resource_context *res_ctx,
634 const struct core_stream *stream)
635{
636 int i;
637 for (i = 0; i < res_ctx->pool->pipe_count; i++) {
638 if (res_ctx->pipe_ctx[i].stream == stream &&
639 !res_ctx->pipe_ctx[i].top_pipe) {
640 return &res_ctx->pipe_ctx[i];
641 break;
642 }
643 }
644 return NULL;
645}
646
647/*
648 * A free_pipe for a target is defined here as a pipe with a stream that belongs
649 * to the target but has no surface attached yet
650 */
651static struct pipe_ctx *acquire_free_pipe_for_target(
652 struct resource_context *res_ctx,
653 const struct dc_target *dc_target)
654{
655 int i;
656 struct core_stream *stream = DC_STREAM_TO_CORE(dc_target->streams[0]);
657
658 struct pipe_ctx *head_pipe = NULL;
659
660 /* Find head pipe, which has the back end set up*/
661
662 head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream);
663
664 if (!head_pipe)
665 ASSERT(0);
666
667 if (!head_pipe->surface)
668 return head_pipe;
669
670 /* Re-use pipe already acquired for this stream if available*/
671 for (i = res_ctx->pool->pipe_count - 1; i >= 0; i--) {
672 if (res_ctx->pipe_ctx[i].stream == stream &&
673 !res_ctx->pipe_ctx[i].surface) {
674 return &res_ctx->pipe_ctx[i];
675 }
676 }
677
678 /*
679 * At this point we have no re-useable pipe for this stream and we need
680 * to acquire an idle one to satisfy the request
681 */
682
683 if(!res_ctx->pool->funcs->acquire_idle_pipe_for_layer)
684 return NULL;
685
686 return res_ctx->pool->funcs->acquire_idle_pipe_for_layer(res_ctx, stream);
687
688}
689
690static void release_free_pipes_for_target(
691 struct resource_context *res_ctx,
692 const struct dc_target *dc_target)
693{
694 int i;
695 struct core_stream *stream = DC_STREAM_TO_CORE(dc_target->streams[0]);
696
697 for (i = res_ctx->pool->pipe_count - 1; i >= 0; i--) {
698 if (res_ctx->pipe_ctx[i].stream == stream &&
699 !res_ctx->pipe_ctx[i].surface) {
700 res_ctx->pipe_ctx[i].stream = NULL;
701 }
702 }
703}
704
705bool resource_attach_surfaces_to_context(
706 const struct dc_surface * const *surfaces,
707 int surface_count,
708 const struct dc_target *dc_target,
709 struct validate_context *context)
710{
711 int i;
712 struct pipe_ctx *tail_pipe;
713 struct dc_target_status *target_status = NULL;
714
715
716 if (surface_count > MAX_SURFACE_NUM) {
717 dm_error("Surface: can not attach %d surfaces! Maximum is: %d\n",
718 surface_count, MAX_SURFACE_NUM);
719 return false;
720 }
721
722 for (i = 0; i < context->target_count; i++)
723 if (&context->targets[i]->public == dc_target) {
724 target_status = &context->target_status[i];
725 break;
726 }
727 if (target_status == NULL) {
728 dm_error("Existing target not found; failed to attach surfaces\n");
729 return false;
730 }
731
732 /* retain new surfaces */
733 for (i = 0; i < surface_count; i++)
734 dc_surface_retain(surfaces[i]);
735
736 detach_surfaces_for_target(context, dc_target);
737
738 /* release existing surfaces*/
739 for (i = 0; i < target_status->surface_count; i++)
740 dc_surface_release(target_status->surfaces[i]);
741
742 for (i = surface_count; i < target_status->surface_count; i++)
743 target_status->surfaces[i] = NULL;
744
745 target_status->surface_count = 0;
746
747 if (surface_count == 0)
748 return true;
749
750 tail_pipe = NULL;
751 for (i = 0; i < surface_count; i++) {
752 struct core_surface *surface = DC_SURFACE_TO_CORE(surfaces[i]);
753 struct pipe_ctx *free_pipe = acquire_free_pipe_for_target(
754 &context->res_ctx, dc_target);
755
756 if (!free_pipe) {
757 target_status->surfaces[i] = NULL;
758 return false;
759 }
760
761 free_pipe->surface = surface;
762
763 if (tail_pipe) {
764 free_pipe->top_pipe = tail_pipe;
765 tail_pipe->bottom_pipe = free_pipe;
766 }
767
768 tail_pipe = free_pipe;
769 }
770
771 release_free_pipes_for_target(&context->res_ctx, dc_target);
772
773 /* assign new surfaces*/
774 for (i = 0; i < surface_count; i++)
775 target_status->surfaces[i] = surfaces[i];
776
777 target_status->surface_count = surface_count;
778
779 return true;
780}
781
782
783static bool is_timing_changed(const struct core_stream *cur_stream,
784 const struct core_stream *new_stream)
785{
786 if (cur_stream == NULL)
787 return true;
788
789 /* If sink pointer changed, it means this is a hotplug, we should do
790 * full hw setting.
791 */
792 if (cur_stream->sink != new_stream->sink)
793 return true;
794
795 /* If output color space is changed, need to reprogram info frames */
796 if (cur_stream->public.output_color_space !=
797 new_stream->public.output_color_space)
798 return true;
799
800 return memcmp(
801 &cur_stream->public.timing,
802 &new_stream->public.timing,
803 sizeof(struct dc_crtc_timing)) != 0;
804}
805
806static bool are_stream_backends_same(
807 const struct core_stream *stream_a, const struct core_stream *stream_b)
808{
809 if (stream_a == stream_b)
810 return true;
811
812 if (stream_a == NULL || stream_b == NULL)
813 return false;
814
815 if (is_timing_changed(stream_a, stream_b))
816 return false;
817
818 return true;
819}
820
821bool is_target_unchanged(
822 const struct core_target *old_target, const struct core_target *target)
823{
824 int i;
825
826 if (old_target == target)
827 return true;
828 if (old_target->public.stream_count != target->public.stream_count)
829 return false;
830
831 for (i = 0; i < old_target->public.stream_count; i++) {
832 const struct core_stream *old_stream = DC_STREAM_TO_CORE(
833 old_target->public.streams[i]);
834 const struct core_stream *stream = DC_STREAM_TO_CORE(
835 target->public.streams[i]);
836
837 if (!are_stream_backends_same(old_stream, stream))
838 return false;
839 }
840
841 return true;
842}
843
844bool resource_validate_attach_surfaces(
845 const struct dc_validation_set set[],
846 int set_count,
847 const struct validate_context *old_context,
848 struct validate_context *context)
849{
850 int i, j;
851
852 for (i = 0; i < set_count; i++) {
853 for (j = 0; j < old_context->target_count; j++)
854 if (is_target_unchanged(
855 old_context->targets[j],
856 context->targets[i])) {
857 if (!resource_attach_surfaces_to_context(
858 old_context->target_status[j].surfaces,
859 old_context->target_status[j].surface_count,
860 &context->targets[i]->public,
861 context))
862 return false;
863 context->target_status[i] = old_context->target_status[j];
864 }
865 if (set[i].surface_count != 0)
866 if (!resource_attach_surfaces_to_context(
867 set[i].surfaces,
868 set[i].surface_count,
869 &context->targets[i]->public,
870 context))
871 return false;
872
873 }
874
875 return true;
876}
877
878/* Maximum TMDS single link pixel clock 165MHz */
879#define TMDS_MAX_PIXEL_CLOCK_IN_KHZ 165000
880
881static void set_stream_engine_in_use(
882 struct resource_context *res_ctx,
883 struct stream_encoder *stream_enc)
884{
885 int i;
886
887 for (i = 0; i < res_ctx->pool->stream_enc_count; i++) {
888 if (res_ctx->pool->stream_enc[i] == stream_enc)
889 res_ctx->is_stream_enc_acquired[i] = true;
890 }
891}
892
893/* TODO: release audio object */
894static void set_audio_in_use(
895 struct resource_context *res_ctx,
896 struct audio *audio)
897{
898 int i;
899 for (i = 0; i < res_ctx->pool->audio_count; i++) {
900 if (res_ctx->pool->audios[i] == audio) {
901 res_ctx->is_audio_acquired[i] = true;
902 }
903 }
904}
905
906static int acquire_first_free_pipe(
907 struct resource_context *res_ctx,
908 struct core_stream *stream)
909{
910 int i;
911
912 for (i = 0; i < res_ctx->pool->pipe_count; i++) {
913 if (!res_ctx->pipe_ctx[i].stream) {
914 struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
915
916 pipe_ctx->tg = res_ctx->pool->timing_generators[i];
917 pipe_ctx->mi = res_ctx->pool->mis[i];
918 pipe_ctx->ipp = res_ctx->pool->ipps[i];
919 pipe_ctx->xfm = res_ctx->pool->transforms[i];
920 pipe_ctx->opp = res_ctx->pool->opps[i];
921 pipe_ctx->dis_clk = res_ctx->pool->display_clock;
922 pipe_ctx->pipe_idx = i;
923
924 pipe_ctx->stream = stream;
925 return i;
926 }
927 }
928 return -1;
929}
930
931static struct stream_encoder *find_first_free_match_stream_enc_for_link(
932 struct resource_context *res_ctx,
933 struct core_stream *stream)
934{
935 int i;
936 int j = -1;
937 struct core_link *link = stream->sink->link;
938
939 for (i = 0; i < res_ctx->pool->stream_enc_count; i++) {
940 if (!res_ctx->is_stream_enc_acquired[i] &&
941 res_ctx->pool->stream_enc[i]) {
942 /* Store first available for MST second display
943 * in daisy chain use case */
944 j = i;
945 if (res_ctx->pool->stream_enc[i]->id ==
946 link->link_enc->preferred_engine)
947 return res_ctx->pool->stream_enc[i];
948 }
949 }
950
951 /*
952 * below can happen in cases when stream encoder is acquired:
953 * 1) for second MST display in chain, so preferred engine already
954 * acquired;
955 * 2) for another link, which preferred engine already acquired by any
956 * MST configuration.
957 *
958 * If signal is of DP type and preferred engine not found, return last available
959 *
960 * TODO - This is just a patch up and a generic solution is
961 * required for non DP connectors.
962 */
963
964 if (j >= 0 && dc_is_dp_signal(stream->signal))
965 return res_ctx->pool->stream_enc[j];
966
967 return NULL;
968}
969
970static struct audio *find_first_free_audio(struct resource_context *res_ctx)
971{
972 int i;
973 for (i = 0; i < res_ctx->pool->audio_count; i++) {
974 if (res_ctx->is_audio_acquired[i] == false) {
975 return res_ctx->pool->audios[i];
976 }
977 }
978
979 return 0;
980}
981
982static void update_stream_signal(struct core_stream *stream)
983{
984 const struct dc_sink *dc_sink = stream->public.sink;
985
986 stream->signal = dc_sink->sink_signal;
987 /* For asic supports dual link DVI, we should adjust signal type
988 * based on timing pixel clock. If pixel clock more than 165Mhz,
989 * signal is dual link, otherwise, single link.
990 */
991 if (dc_sink->sink_signal == SIGNAL_TYPE_DVI_SINGLE_LINK ||
992 dc_sink->sink_signal == SIGNAL_TYPE_DVI_DUAL_LINK) {
993 if (stream->public.timing.pix_clk_khz >
994 TMDS_MAX_PIXEL_CLOCK_IN_KHZ)
995 stream->signal = SIGNAL_TYPE_DVI_DUAL_LINK;
996 else
997 stream->signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
998 }
999}
1000
1001bool resource_is_stream_unchanged(
1002 const struct validate_context *old_context, struct core_stream *stream)
1003{
1004 int i, j;
1005
1006 for (i = 0; i < old_context->target_count; i++) {
1007 struct core_target *old_target = old_context->targets[i];
1008
1009 for (j = 0; j < old_target->public.stream_count; j++) {
1010 struct core_stream *old_stream =
1011 DC_STREAM_TO_CORE(old_target->public.streams[j]);
1012
1013 if (are_stream_backends_same(old_stream, stream))
1014 return true;
1015 }
1016 }
1017
1018 return false;
1019}
1020
1021static void copy_pipe_ctx(
1022 const struct pipe_ctx *from_pipe_ctx, struct pipe_ctx *to_pipe_ctx)
1023{
1024 struct core_surface *surface = to_pipe_ctx->surface;
1025 struct core_stream *stream = to_pipe_ctx->stream;
1026
1027 *to_pipe_ctx = *from_pipe_ctx;
1028 to_pipe_ctx->stream = stream;
1029 if (surface != NULL)
1030 to_pipe_ctx->surface = surface;
1031}
1032
1033static struct core_stream *find_pll_sharable_stream(
1034 const struct core_stream *stream_needs_pll,
1035 struct validate_context *context)
1036{
1037 int i, j;
1038
1039 for (i = 0; i < context->target_count; i++) {
1040 struct core_target *target = context->targets[i];
1041
1042 for (j = 0; j < target->public.stream_count; j++) {
1043 struct core_stream *stream_has_pll =
1044 DC_STREAM_TO_CORE(target->public.streams[j]);
1045
1046 /* We are looking for non dp, non virtual stream */
1047 if (resource_are_streams_timing_synchronizable(
1048 stream_needs_pll, stream_has_pll)
1049 && !dc_is_dp_signal(stream_has_pll->signal)
1050 && stream_has_pll->sink->link->public.connector_signal
1051 != SIGNAL_TYPE_VIRTUAL)
1052 return stream_has_pll;
1053 }
1054 }
1055
1056 return NULL;
1057}
1058
1059static int get_norm_pix_clk(const struct dc_crtc_timing *timing)
1060{
1061 uint32_t pix_clk = timing->pix_clk_khz;
1062 uint32_t normalized_pix_clk = pix_clk;
1063
1064 if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
1065 pix_clk /= 2;
1066
1067 switch (timing->display_color_depth) {
1068 case COLOR_DEPTH_888:
1069 normalized_pix_clk = pix_clk;
1070 break;
1071 case COLOR_DEPTH_101010:
1072 normalized_pix_clk = (pix_clk * 30) / 24;
1073 break;
1074 case COLOR_DEPTH_121212:
1075 normalized_pix_clk = (pix_clk * 36) / 24;
1076 break;
1077 case COLOR_DEPTH_161616:
1078 normalized_pix_clk = (pix_clk * 48) / 24;
1079 break;
1080 default:
1081 ASSERT(0);
1082 break;
1083 }
1084
1085 return normalized_pix_clk;
1086}
1087
1088static void calculate_phy_pix_clks(
1089 const struct core_dc *dc,
1090 struct validate_context *context)
1091{
1092 int i, j;
1093
1094 for (i = 0; i < context->target_count; i++) {
1095 struct core_target *target = context->targets[i];
1096
1097 for (j = 0; j < target->public.stream_count; j++) {
1098 struct core_stream *stream =
1099 DC_STREAM_TO_CORE(target->public.streams[j]);
1100
1101 update_stream_signal(stream);
1102
1103 /* update actual pixel clock on all streams */
1104 if (dc_is_hdmi_signal(stream->signal))
1105 stream->phy_pix_clk = get_norm_pix_clk(
1106 &stream->public.timing);
1107 else
1108 stream->phy_pix_clk =
1109 stream->public.timing.pix_clk_khz;
1110 }
1111 }
1112}
1113
1114enum dc_status resource_map_pool_resources(
1115 const struct core_dc *dc,
1116 struct validate_context *context)
1117{
1118 int i, j, k;
1119
1120 calculate_phy_pix_clks(dc, context);
1121
1122 for (i = 0; i < context->target_count; i++) {
1123 struct core_target *target = context->targets[i];
1124
1125 for (j = 0; j < target->public.stream_count; j++) {
1126 struct core_stream *stream =
1127 DC_STREAM_TO_CORE(target->public.streams[j]);
1128
1129 if (!resource_is_stream_unchanged(dc->current_context, stream))
1130 continue;
1131
1132 /* mark resources used for stream that is already active */
1133 for (k = 0; k < MAX_PIPES; k++) {
1134 struct pipe_ctx *pipe_ctx =
1135 &context->res_ctx.pipe_ctx[k];
1136 const struct pipe_ctx *old_pipe_ctx =
1137 &dc->current_context->res_ctx.pipe_ctx[k];
1138
1139 if (!are_stream_backends_same(old_pipe_ctx->stream, stream))
1140 continue;
1141
1142 pipe_ctx->stream = stream;
1143 copy_pipe_ctx(old_pipe_ctx, pipe_ctx);
1144
1145 set_stream_engine_in_use(
1146 &context->res_ctx,
1147 pipe_ctx->stream_enc);
1148
1149 /* Switch to dp clock source only if there is
1150 * no non dp stream that shares the same timing
1151 * with the dp stream.
1152 */
1153 if (dc_is_dp_signal(pipe_ctx->stream->signal) &&
1154 !find_pll_sharable_stream(stream, context))
1155 pipe_ctx->clock_source =
1156 context->res_ctx.pool->dp_clock_source;
1157
1158 resource_reference_clock_source(
1159 &context->res_ctx,
1160 pipe_ctx->clock_source);
1161
1162 set_audio_in_use(&context->res_ctx,
1163 pipe_ctx->audio);
1164 }
1165 }
1166 }
1167
1168 for (i = 0; i < context->target_count; i++) {
1169 struct core_target *target = context->targets[i];
1170
1171 for (j = 0; j < target->public.stream_count; j++) {
1172 struct core_stream *stream =
1173 DC_STREAM_TO_CORE(target->public.streams[j]);
1174 struct pipe_ctx *pipe_ctx = NULL;
1175 int pipe_idx = -1;
1176
1177 if (resource_is_stream_unchanged(dc->current_context, stream))
1178 continue;
1179 /* acquire new resources */
1180 pipe_idx = acquire_first_free_pipe(
1181 &context->res_ctx, stream);
1182 if (pipe_idx < 0)
1183 return DC_NO_CONTROLLER_RESOURCE;
1184
1185
1186 pipe_ctx = &context->res_ctx.pipe_ctx[pipe_idx];
1187
1188 pipe_ctx->stream_enc =
1189 find_first_free_match_stream_enc_for_link(
1190 &context->res_ctx, stream);
1191
1192 if (!pipe_ctx->stream_enc)
1193 return DC_NO_STREAM_ENG_RESOURCE;
1194
1195 set_stream_engine_in_use(
1196 &context->res_ctx,
1197 pipe_ctx->stream_enc);
1198
1199 /* TODO: Add check if ASIC support and EDID audio */
1200 if (!stream->sink->converter_disable_audio &&
1201 dc_is_audio_capable_signal(pipe_ctx->stream->signal) &&
1202 stream->public.audio_info.mode_count) {
1203 pipe_ctx->audio = find_first_free_audio(
1204 &context->res_ctx);
1205
1206 /*
1207 * Audio assigned in order first come first get.
1208 * There are asics which has number of audio
1209 * resources less then number of pipes
1210 */
1211 if (pipe_ctx->audio)
1212 set_audio_in_use(
1213 &context->res_ctx,
1214 pipe_ctx->audio);
1215 }
1216
1217 if (j == 0) {
1218 context->target_status[i].primary_otg_inst =
1219 pipe_ctx->tg->inst;
1220 }
1221 }
1222 }
1223
1224 return DC_OK;
1225}
1226
1227/* first target in the context is used to populate the rest */
1228void validate_guaranteed_copy_target(
1229 struct validate_context *context,
1230 int max_targets)
1231{
1232 int i;
1233
1234 for (i = 1; i < max_targets; i++) {
1235 context->targets[i] = context->targets[0];
1236
1237 copy_pipe_ctx(&context->res_ctx.pipe_ctx[0],
1238 &context->res_ctx.pipe_ctx[i]);
1239 context->res_ctx.pipe_ctx[i].stream =
1240 context->res_ctx.pipe_ctx[0].stream;
1241
1242 dc_target_retain(&context->targets[i]->public);
1243 context->target_count++;
1244 }
1245}
1246
1247static void translate_info_frame(const struct hw_info_frame *hw_info_frame,
1248 struct encoder_info_frame *encoder_info_frame)
1249{
1250 memset(
1251 encoder_info_frame, 0, sizeof(struct encoder_info_frame));
1252
1253 /* For gamut we recalc checksum */
1254 if (hw_info_frame->gamut_packet.valid) {
1255 uint8_t chk_sum = 0;
1256 uint8_t *ptr;
1257 uint8_t i;
1258
1259 memmove(
1260 &encoder_info_frame->gamut,
1261 &hw_info_frame->gamut_packet,
1262 sizeof(struct hw_info_packet));
1263
1264 /*start of the Gamut data. */
1265 ptr = &encoder_info_frame->gamut.sb[3];
1266
1267 for (i = 0; i <= encoder_info_frame->gamut.sb[1]; i++)
1268 chk_sum += ptr[i];
1269
1270 encoder_info_frame->gamut.sb[2] = (uint8_t) (0x100 - chk_sum);
1271 }
1272
1273 if (hw_info_frame->avi_info_packet.valid) {
1274 memmove(
1275 &encoder_info_frame->avi,
1276 &hw_info_frame->avi_info_packet,
1277 sizeof(struct hw_info_packet));
1278 }
1279
1280 if (hw_info_frame->vendor_info_packet.valid) {
1281 memmove(
1282 &encoder_info_frame->vendor,
1283 &hw_info_frame->vendor_info_packet,
1284 sizeof(struct hw_info_packet));
1285 }
1286
1287 if (hw_info_frame->spd_packet.valid) {
1288 memmove(
1289 &encoder_info_frame->spd,
1290 &hw_info_frame->spd_packet,
1291 sizeof(struct hw_info_packet));
1292 }
1293
1294 if (hw_info_frame->vsc_packet.valid) {
1295 memmove(
1296 &encoder_info_frame->vsc,
1297 &hw_info_frame->vsc_packet,
1298 sizeof(struct hw_info_packet));
1299 }
1300}
1301
1302static void set_avi_info_frame(
1303 struct hw_info_packet *info_packet,
1304 struct pipe_ctx *pipe_ctx)
1305{
1306 struct core_stream *stream = pipe_ctx->stream;
1307 enum dc_color_space color_space = COLOR_SPACE_UNKNOWN;
1308 struct info_frame info_frame = { {0} };
1309 uint32_t pixel_encoding = 0;
1310 enum scanning_type scan_type = SCANNING_TYPE_NODATA;
1311 enum dc_aspect_ratio aspect = ASPECT_RATIO_NO_DATA;
1312 bool itc = false;
1313 uint8_t cn0_cn1 = 0;
1314 uint8_t *check_sum = NULL;
1315 uint8_t byte_index = 0;
1316
1317 if (info_packet == NULL)
1318 return;
1319
1320 color_space = pipe_ctx->stream->public.output_color_space;
1321
1322 /* Initialize header */
1323 info_frame.avi_info_packet.info_packet_hdmi.bits.header.
1324 info_frame_type = INFO_FRAME_AVI;
1325 /* InfoFrameVersion_3 is defined by CEA861F (Section 6.4), but shall
1326 * not be used in HDMI 2.0 (Section 10.1) */
1327 info_frame.avi_info_packet.info_packet_hdmi.bits.header.version =
1328 INFO_FRAME_VERSION_2;
1329 info_frame.avi_info_packet.info_packet_hdmi.bits.header.length =
1330 INFO_FRAME_SIZE_AVI;
1331
1332 /*
1333 * IDO-defined (Y2,Y1,Y0 = 1,1,1) shall not be used by devices built
1334 * according to HDMI 2.0 spec (Section 10.1)
1335 */
1336
1337 switch (stream->public.timing.pixel_encoding) {
1338 case PIXEL_ENCODING_YCBCR422:
1339 pixel_encoding = 1;
1340 break;
1341
1342 case PIXEL_ENCODING_YCBCR444:
1343 pixel_encoding = 2;
1344 break;
1345 case PIXEL_ENCODING_YCBCR420:
1346 pixel_encoding = 3;
1347 break;
1348
1349 case PIXEL_ENCODING_RGB:
1350 default:
1351 pixel_encoding = 0;
1352 }
1353
1354 /* Y0_Y1_Y2 : The pixel encoding */
1355 /* H14b AVI InfoFrame has extension on Y-field from 2 bits to 3 bits */
1356 info_frame.avi_info_packet.info_packet_hdmi.bits.Y0_Y1_Y2 =
1357 pixel_encoding;
1358
1359 /* A0 = 1 Active Format Information valid */
1360 info_frame.avi_info_packet.info_packet_hdmi.bits.A0 =
1361 ACTIVE_FORMAT_VALID;
1362
1363 /* B0, B1 = 3; Bar info data is valid */
1364 info_frame.avi_info_packet.info_packet_hdmi.bits.B0_B1 =
1365 BAR_INFO_BOTH_VALID;
1366
1367 info_frame.avi_info_packet.info_packet_hdmi.bits.SC0_SC1 =
1368 PICTURE_SCALING_UNIFORM;
1369
1370 /* S0, S1 : Underscan / Overscan */
1371 /* TODO: un-hardcode scan type */
1372 scan_type = SCANNING_TYPE_UNDERSCAN;
1373 info_frame.avi_info_packet.info_packet_hdmi.bits.S0_S1 = scan_type;
1374
1375 /* C0, C1 : Colorimetry */
1376 if (color_space == COLOR_SPACE_YCBCR709)
1377 info_frame.avi_info_packet.info_packet_hdmi.bits.C0_C1 =
1378 COLORIMETRY_ITU709;
1379 else if (color_space == COLOR_SPACE_YCBCR601)
1380 info_frame.avi_info_packet.info_packet_hdmi.bits.C0_C1 =
1381 COLORIMETRY_ITU601;
1382 else
1383 info_frame.avi_info_packet.info_packet_hdmi.bits.C0_C1 =
1384 COLORIMETRY_NO_DATA;
1385
1386 /* TODO: un-hardcode aspect ratio */
1387 aspect = stream->public.timing.aspect_ratio;
1388
1389 switch (aspect) {
1390 case ASPECT_RATIO_4_3:
1391 case ASPECT_RATIO_16_9:
1392 info_frame.avi_info_packet.info_packet_hdmi.bits.M0_M1 = aspect;
1393 break;
1394
1395 case ASPECT_RATIO_NO_DATA:
1396 case ASPECT_RATIO_64_27:
1397 case ASPECT_RATIO_256_135:
1398 default:
1399 info_frame.avi_info_packet.info_packet_hdmi.bits.M0_M1 = 0;
1400 }
1401
1402 /* Active Format Aspect ratio - same as Picture Aspect Ratio. */
1403 info_frame.avi_info_packet.info_packet_hdmi.bits.R0_R3 =
1404 ACTIVE_FORMAT_ASPECT_RATIO_SAME_AS_PICTURE;
1405
1406 /* TODO: un-hardcode cn0_cn1 and itc */
1407 cn0_cn1 = 0;
1408 itc = false;
1409
1410 if (itc) {
1411 info_frame.avi_info_packet.info_packet_hdmi.bits.ITC = 1;
1412 info_frame.avi_info_packet.info_packet_hdmi.bits.CN0_CN1 =
1413 cn0_cn1;
1414 }
1415
1416 /* TODO : We should handle YCC quantization */
1417 /* but we do not have matrix calculation */
1418 if (color_space == COLOR_SPACE_SRGB) {
1419 info_frame.avi_info_packet.info_packet_hdmi.bits.Q0_Q1 =
1420 RGB_QUANTIZATION_FULL_RANGE;
1421 info_frame.avi_info_packet.info_packet_hdmi.bits.YQ0_YQ1 =
1422 YYC_QUANTIZATION_FULL_RANGE;
1423 } else if (color_space == COLOR_SPACE_SRGB_LIMITED) {
1424 info_frame.avi_info_packet.info_packet_hdmi.bits.Q0_Q1 =
1425 RGB_QUANTIZATION_LIMITED_RANGE;
1426 info_frame.avi_info_packet.info_packet_hdmi.bits.YQ0_YQ1 =
1427 YYC_QUANTIZATION_LIMITED_RANGE;
1428 } else {
1429 info_frame.avi_info_packet.info_packet_hdmi.bits.Q0_Q1 =
1430 RGB_QUANTIZATION_DEFAULT_RANGE;
1431 info_frame.avi_info_packet.info_packet_hdmi.bits.YQ0_YQ1 =
1432 YYC_QUANTIZATION_LIMITED_RANGE;
1433 }
1434
1435 info_frame.avi_info_packet.info_packet_hdmi.bits.VIC0_VIC7 =
1436 stream->public.timing.vic;
1437
1438 /* pixel repetition
1439 * PR0 - PR3 start from 0 whereas pHwPathMode->mode.timing.flags.pixel
1440 * repetition start from 1 */
1441 info_frame.avi_info_packet.info_packet_hdmi.bits.PR0_PR3 = 0;
1442
1443 /* Bar Info
1444 * barTop: Line Number of End of Top Bar.
1445 * barBottom: Line Number of Start of Bottom Bar.
1446 * barLeft: Pixel Number of End of Left Bar.
1447 * barRight: Pixel Number of Start of Right Bar. */
1448 info_frame.avi_info_packet.info_packet_hdmi.bits.bar_top =
1449 stream->public.timing.v_border_top;
1450 info_frame.avi_info_packet.info_packet_hdmi.bits.bar_bottom =
1451 (stream->public.timing.v_border_top
1452 - stream->public.timing.v_border_bottom + 1);
1453 info_frame.avi_info_packet.info_packet_hdmi.bits.bar_left =
1454 stream->public.timing.h_border_left;
1455 info_frame.avi_info_packet.info_packet_hdmi.bits.bar_right =
1456 (stream->public.timing.h_total
1457 - stream->public.timing.h_border_right + 1);
1458
1459 /* check_sum - Calculate AFMT_AVI_INFO0 ~ AFMT_AVI_INFO3 */
1460 check_sum =
1461 &info_frame.
1462 avi_info_packet.info_packet_hdmi.packet_raw_data.sb[0];
1463 *check_sum = INFO_FRAME_AVI + INFO_FRAME_SIZE_AVI
1464 + INFO_FRAME_VERSION_2;
1465
1466 for (byte_index = 1; byte_index <= INFO_FRAME_SIZE_AVI; byte_index++)
1467 *check_sum += info_frame.avi_info_packet.info_packet_hdmi.
1468 packet_raw_data.sb[byte_index];
1469
1470 /* one byte complement */
1471 *check_sum = (uint8_t) (0x100 - *check_sum);
1472
1473 /* Store in hw_path_mode */
1474 info_packet->hb0 =
1475 info_frame.avi_info_packet.info_packet_hdmi.packet_raw_data.hb0;
1476 info_packet->hb1 =
1477 info_frame.avi_info_packet.info_packet_hdmi.packet_raw_data.hb1;
1478 info_packet->hb2 =
1479 info_frame.avi_info_packet.info_packet_hdmi.packet_raw_data.hb2;
1480
1481 for (byte_index = 0; byte_index < sizeof(info_packet->sb); byte_index++)
1482 info_packet->sb[byte_index] = info_frame.avi_info_packet.
1483 info_packet_hdmi.packet_raw_data.sb[byte_index];
1484
1485 info_packet->valid = true;
1486}
1487
1488static void set_vendor_info_packet(struct core_stream *stream,
1489 struct hw_info_packet *info_packet)
1490{
1491 uint32_t length = 0;
1492 bool hdmi_vic_mode = false;
1493 uint8_t checksum = 0;
1494 uint32_t i = 0;
1495 enum dc_timing_3d_format format;
1496
1497 ASSERT_CRITICAL(stream != NULL);
1498 ASSERT_CRITICAL(info_packet != NULL);
1499
1500 format = stream->public.timing.timing_3d_format;
1501
1502 /* Can be different depending on packet content */
1503 length = 5;
1504
1505 if (stream->public.timing.hdmi_vic != 0
1506 && stream->public.timing.h_total >= 3840
1507 && stream->public.timing.v_total >= 2160)
1508 hdmi_vic_mode = true;
1509
1510 /* According to HDMI 1.4a CTS, VSIF should be sent
1511 * for both 3D stereo and HDMI VIC modes.
1512 * For all other modes, there is no VSIF sent. */
1513
1514 if (format == TIMING_3D_FORMAT_NONE && !hdmi_vic_mode)
1515 return;
1516
1517 /* 24bit IEEE Registration identifier (0x000c03). LSB first. */
1518 info_packet->sb[1] = 0x03;
1519 info_packet->sb[2] = 0x0C;
1520 info_packet->sb[3] = 0x00;
1521
1522 /*PB4: 5 lower bytes = 0 (reserved). 3 higher bits = HDMI_Video_Format.
1523 * The value for HDMI_Video_Format are:
1524 * 0x0 (0b000) - No additional HDMI video format is presented in this
1525 * packet
1526 * 0x1 (0b001) - Extended resolution format present. 1 byte of HDMI_VIC
1527 * parameter follows
1528 * 0x2 (0b010) - 3D format indication present. 3D_Structure and
1529 * potentially 3D_Ext_Data follows
1530 * 0x3..0x7 (0b011..0b111) - reserved for future use */
1531 if (format != TIMING_3D_FORMAT_NONE)
1532 info_packet->sb[4] = (2 << 5);
1533 else if (hdmi_vic_mode)
1534 info_packet->sb[4] = (1 << 5);
1535
1536 /* PB5: If PB4 claims 3D timing (HDMI_Video_Format = 0x2):
1537 * 4 lower bites = 0 (reserved). 4 higher bits = 3D_Structure.
1538 * The value for 3D_Structure are:
1539 * 0x0 - Frame Packing
1540 * 0x1 - Field Alternative
1541 * 0x2 - Line Alternative
1542 * 0x3 - Side-by-Side (full)
1543 * 0x4 - L + depth
1544 * 0x5 - L + depth + graphics + graphics-depth
1545 * 0x6 - Top-and-Bottom
1546 * 0x7 - Reserved for future use
1547 * 0x8 - Side-by-Side (Half)
1548 * 0x9..0xE - Reserved for future use
1549 * 0xF - Not used */
1550 switch (format) {
1551 case TIMING_3D_FORMAT_HW_FRAME_PACKING:
1552 case TIMING_3D_FORMAT_SW_FRAME_PACKING:
1553 info_packet->sb[5] = (0x0 << 4);
1554 break;
1555
1556 case TIMING_3D_FORMAT_SIDE_BY_SIDE:
1557 case TIMING_3D_FORMAT_SBS_SW_PACKED:
1558 info_packet->sb[5] = (0x8 << 4);
1559 length = 6;
1560 break;
1561
1562 case TIMING_3D_FORMAT_TOP_AND_BOTTOM:
1563 case TIMING_3D_FORMAT_TB_SW_PACKED:
1564 info_packet->sb[5] = (0x6 << 4);
1565 break;
1566
1567 default:
1568 break;
1569 }
1570
1571 /*PB5: If PB4 is set to 0x1 (extended resolution format)
1572 * fill PB5 with the correct HDMI VIC code */
1573 if (hdmi_vic_mode)
1574 info_packet->sb[5] = stream->public.timing.hdmi_vic;
1575
1576 /* Header */
1577 info_packet->hb0 = 0x81; /* VSIF packet type. */
1578 info_packet->hb1 = 0x01; /* Version */
1579
1580 /* 4 lower bits = Length, 4 higher bits = 0 (reserved) */
1581 info_packet->hb2 = (uint8_t) (length);
1582
1583 /* Calculate checksum */
1584 checksum = 0;
1585 checksum += info_packet->hb0;
1586 checksum += info_packet->hb1;
1587 checksum += info_packet->hb2;
1588
1589 for (i = 1; i <= length; i++)
1590 checksum += info_packet->sb[i];
1591
1592 info_packet->sb[0] = (uint8_t) (0x100 - checksum);
1593
1594 info_packet->valid = true;
1595}
1596
1597static void set_spd_info_packet(struct core_stream *stream,
1598 struct hw_info_packet *info_packet)
1599{
1600 /* SPD info packet for FreeSync */
1601
1602 unsigned char checksum = 0;
1603 unsigned int idx, payload_size = 0;
1604
1605 /* Check if Freesync is supported. Return if false. If true,
1606 * set the corresponding bit in the info packet
1607 */
1608 if (stream->public.freesync_ctx.supported == false)
1609 return;
1610
1611 if (dc_is_hdmi_signal(stream->signal)) {
1612
1613 /* HEADER */
1614
1615 /* HB0 = Packet Type = 0x83 (Source Product
1616 * Descriptor InfoFrame)
1617 */
1618 info_packet->hb0 = 0x83;
1619
1620 /* HB1 = Version = 0x01 */
1621 info_packet->hb1 = 0x01;
1622
1623 /* HB2 = [Bits 7:5 = 0] [Bits 4:0 = Length = 0x08] */
1624 info_packet->hb2 = 0x08;
1625
1626 payload_size = 0x08;
1627
1628 } else if (dc_is_dp_signal(stream->signal)) {
1629
1630 /* HEADER */
1631
1632 /* HB0 = Secondary-data Packet ID = 0 - Only non-zero
1633 * when used to associate audio related info packets
1634 */
1635 info_packet->hb0 = 0x00;
1636
1637 /* HB1 = Packet Type = 0x83 (Source Product
1638 * Descriptor InfoFrame)
1639 */
1640 info_packet->hb1 = 0x83;
1641
1642 /* HB2 = [Bits 7:0 = Least significant eight bits -
1643 * For INFOFRAME, the value must be 1Bh]
1644 */
1645 info_packet->hb2 = 0x1B;
1646
1647 /* HB3 = [Bits 7:2 = INFOFRAME SDP Version Number = 0x1]
1648 * [Bits 1:0 = Most significant two bits = 0x00]
1649 */
1650 info_packet->hb3 = 0x04;
1651
1652 payload_size = 0x1B;
1653 }
1654
1655 /* PB1 = 0x1A (24bit AMD IEEE OUI (0x00001A) - Byte 0) */
1656 info_packet->sb[1] = 0x1A;
1657
1658 /* PB2 = 0x00 (24bit AMD IEEE OUI (0x00001A) - Byte 1) */
1659 info_packet->sb[2] = 0x00;
1660
1661 /* PB3 = 0x00 (24bit AMD IEEE OUI (0x00001A) - Byte 2) */
1662 info_packet->sb[3] = 0x00;
1663
1664 /* PB4 = Reserved */
1665 info_packet->sb[4] = 0x00;
1666
1667 /* PB5 = Reserved */
1668 info_packet->sb[5] = 0x00;
1669
1670 /* PB6 = [Bits 7:3 = Reserved] */
1671 info_packet->sb[6] = 0x00;
1672
1673 if (stream->public.freesync_ctx.supported == true)
1674 /* PB6 = [Bit 0 = FreeSync Supported] */
1675 info_packet->sb[6] |= 0x01;
1676
1677 if (stream->public.freesync_ctx.enabled == true)
1678 /* PB6 = [Bit 1 = FreeSync Enabled] */
1679 info_packet->sb[6] |= 0x02;
1680
1681 if (stream->public.freesync_ctx.active == true)
1682 /* PB6 = [Bit 2 = FreeSync Active] */
1683 info_packet->sb[6] |= 0x04;
1684
1685 /* PB7 = FreeSync Minimum refresh rate (Hz) */
1686 info_packet->sb[7] = (unsigned char) (stream->public.freesync_ctx.
1687 min_refresh_in_micro_hz / 1000000);
1688
1689 /* PB8 = FreeSync Maximum refresh rate (Hz)
1690 *
1691 * Note: We do not use the maximum capable refresh rate
1692 * of the panel, because we should never go above the field
1693 * rate of the mode timing set.
1694 */
1695 info_packet->sb[8] = (unsigned char) (stream->public.freesync_ctx.
1696 nominal_refresh_in_micro_hz / 1000000);
1697
1698 /* PB9 - PB27 = Reserved */
1699 for (idx = 9; idx <= 27; idx++)
1700 info_packet->sb[idx] = 0x00;
1701
1702 /* Calculate checksum */
1703 checksum += info_packet->hb0;
1704 checksum += info_packet->hb1;
1705 checksum += info_packet->hb2;
1706 checksum += info_packet->hb3;
1707
1708 for (idx = 1; idx <= payload_size; idx++)
1709 checksum += info_packet->sb[idx];
1710
1711 /* PB0 = Checksum (one byte complement) */
1712 info_packet->sb[0] = (unsigned char) (0x100 - checksum);
1713
1714 info_packet->valid = true;
1715}
1716
1717static void set_vsc_info_packet(struct core_stream *stream,
1718 struct hw_info_packet *info_packet)
1719{
1720 unsigned int vscPacketRevision = 0;
1721 unsigned int i;
1722
1723 if (stream->sink->link->public.psr_caps.psr_version != 0) {
1724 vscPacketRevision = 2;
1725 }
1726
1727 /* VSC packet not needed based on the features
1728 * supported by this DP display
1729 */
1730 if (vscPacketRevision == 0)
1731 return;
1732
1733 if (vscPacketRevision == 0x2) {
1734 /* Secondary-data Packet ID = 0*/
1735 info_packet->hb0 = 0x00;
1736 /* 07h - Packet Type Value indicating Video
1737 * Stream Configuration packet
1738 */
1739 info_packet->hb1 = 0x07;
1740 /* 02h = VSC SDP supporting 3D stereo and PSR
1741 * (applies to eDP v1.3 or higher).
1742 */
1743 info_packet->hb2 = 0x02;
1744 /* 08h = VSC packet supporting 3D stereo + PSR
1745 * (HB2 = 02h).
1746 */
1747 info_packet->hb3 = 0x08;
1748
1749 for (i = 0; i < 28; i++)
1750 info_packet->sb[i] = 0;
1751
1752 info_packet->valid = true;
1753 }
1754
1755 /*TODO: stereo 3D support and extend pixel encoding colorimetry*/
1756}
1757
1758void resource_validate_ctx_destruct(struct validate_context *context)
1759{
1760 int i, j;
1761
1762 for (i = 0; i < context->target_count; i++) {
1763 for (j = 0; j < context->target_status[i].surface_count; j++)
1764 dc_surface_release(
1765 context->target_status[i].surfaces[j]);
1766
1767 context->target_status[i].surface_count = 0;
1768 dc_target_release(&context->targets[i]->public);
1769 }
1770}
1771
1772/*
1773 * Copy src_ctx into dst_ctx and retain all surfaces and targets referenced
1774 * by the src_ctx
1775 */
1776void resource_validate_ctx_copy_construct(
1777 const struct validate_context *src_ctx,
1778 struct validate_context *dst_ctx)
1779{
1780 int i, j;
1781
1782 *dst_ctx = *src_ctx;
1783
1784 for (i = 0; i < dst_ctx->res_ctx.pool->pipe_count; i++) {
1785 struct pipe_ctx *cur_pipe = &dst_ctx->res_ctx.pipe_ctx[i];
1786
1787 if (cur_pipe->top_pipe)
1788 cur_pipe->top_pipe = &dst_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx];
1789
1790 if (cur_pipe->bottom_pipe)
1791 cur_pipe->bottom_pipe = &dst_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
1792
1793 }
1794
1795 for (i = 0; i < dst_ctx->target_count; i++) {
1796 dc_target_retain(&dst_ctx->targets[i]->public);
1797 for (j = 0; j < dst_ctx->target_status[i].surface_count; j++)
1798 dc_surface_retain(
1799 dst_ctx->target_status[i].surfaces[j]);
1800 }
1801}
1802
1803struct clock_source *dc_resource_find_first_free_pll(
1804 struct resource_context *res_ctx)
1805{
1806 int i;
1807
1808 for (i = 0; i < res_ctx->pool->clk_src_count; ++i) {
1809 if (res_ctx->clock_source_ref_count[i] == 0)
1810 return res_ctx->pool->clock_sources[i];
1811 }
1812
1813 return NULL;
1814}
1815
1816void resource_build_info_frame(struct pipe_ctx *pipe_ctx)
1817{
1818 enum signal_type signal = SIGNAL_TYPE_NONE;
1819 struct hw_info_frame info_frame = { { 0 } };
1820
1821 /* default all packets to invalid */
1822 info_frame.avi_info_packet.valid = false;
1823 info_frame.gamut_packet.valid = false;
1824 info_frame.vendor_info_packet.valid = false;
1825 info_frame.spd_packet.valid = false;
1826 info_frame.vsc_packet.valid = false;
1827
1828 signal = pipe_ctx->stream->signal;
1829
1830 /* HDMi and DP have different info packets*/
1831 if (dc_is_hdmi_signal(signal)) {
1832 set_avi_info_frame(
1833 &info_frame.avi_info_packet, pipe_ctx);
1834 set_vendor_info_packet(
1835 pipe_ctx->stream, &info_frame.vendor_info_packet);
1836 set_spd_info_packet(pipe_ctx->stream, &info_frame.spd_packet);
1837 }
1838
1839 else if (dc_is_dp_signal(signal))
1840 set_vsc_info_packet(pipe_ctx->stream, &info_frame.vsc_packet);
1841 set_spd_info_packet(pipe_ctx->stream, &info_frame.spd_packet);
1842
1843 translate_info_frame(&info_frame,
1844 &pipe_ctx->encoder_info_frame);
1845}
1846
1847enum dc_status resource_map_clock_resources(
1848 const struct core_dc *dc,
1849 struct validate_context *context)
1850{
1851 int i, j, k;
1852
1853 /* acquire new resources */
1854 for (i = 0; i < context->target_count; i++) {
1855 struct core_target *target = context->targets[i];
1856
1857 for (j = 0; j < target->public.stream_count; j++) {
1858 struct core_stream *stream =
1859 DC_STREAM_TO_CORE(target->public.streams[j]);
1860
1861 if (resource_is_stream_unchanged(dc->current_context, stream))
1862 continue;
1863
1864 for (k = 0; k < MAX_PIPES; k++) {
1865 struct pipe_ctx *pipe_ctx =
1866 &context->res_ctx.pipe_ctx[k];
1867
1868 if (context->res_ctx.pipe_ctx[k].stream != stream)
1869 continue;
1870
1871 if (dc_is_dp_signal(pipe_ctx->stream->signal)
1872 || pipe_ctx->stream->signal == SIGNAL_TYPE_VIRTUAL)
1873 pipe_ctx->clock_source =
1874 context->res_ctx.pool->dp_clock_source;
1875 else {
1876 pipe_ctx->clock_source = NULL;
1877
1878 if (!dc->public.config.disable_disp_pll_sharing)
1879 resource_find_used_clk_src_for_sharing(
1880 &context->res_ctx,
1881 pipe_ctx);
1882
1883 if (pipe_ctx->clock_source == NULL)
1884 pipe_ctx->clock_source =
1885 dc_resource_find_first_free_pll(&context->res_ctx);
1886 }
1887
1888 if (pipe_ctx->clock_source == NULL)
1889 return DC_NO_CLOCK_SOURCE_RESOURCE;
1890
1891 resource_reference_clock_source(
1892 &context->res_ctx,
1893 pipe_ctx->clock_source);
1894
1895 /* only one cs per stream regardless of mpo */
1896 break;
1897 }
1898 }
1899 }
1900
1901 return DC_OK;
1902}
1903
1904/*
1905 * Note: We need to disable output if clock sources change,
1906 * since bios does optimization and doesn't apply if changing
1907 * PHY when not already disabled.
1908 */
1909bool pipe_need_reprogram(
1910 struct pipe_ctx *pipe_ctx_old,
1911 struct pipe_ctx *pipe_ctx)
1912{
1913 if (pipe_ctx_old->stream->sink != pipe_ctx->stream->sink)
1914 return true;
1915
1916 if (pipe_ctx_old->stream->signal != pipe_ctx->stream->signal)
1917 return true;
1918
1919 if (pipe_ctx_old->audio != pipe_ctx->audio)
1920 return true;
1921
1922 if (pipe_ctx_old->clock_source != pipe_ctx->clock_source
1923 && pipe_ctx_old->stream != pipe_ctx->stream)
1924 return true;
1925
1926 if (pipe_ctx_old->stream_enc != pipe_ctx->stream_enc)
1927 return true;
1928
1929 if (is_timing_changed(pipe_ctx_old->stream, pipe_ctx->stream))
1930 return true;
1931
1932
1933 return false;
1934}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_sink.c b/drivers/gpu/drm/amd/display/dc/core/dc_sink.c
new file mode 100644
index 000000000000..67ae799b6f4f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_sink.c
@@ -0,0 +1,113 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27#include "dm_helpers.h"
28#include "core_types.h"
29
30/*******************************************************************************
31 * Private definitions
32 ******************************************************************************/
33
34struct sink {
35 struct core_sink protected;
36 int ref_count;
37};
38
39#define DC_SINK_TO_SINK(dc_sink) \
40 container_of(dc_sink, struct sink, protected.public)
41
42/*******************************************************************************
43 * Private functions
44 ******************************************************************************/
45
46static void destruct(struct sink *sink)
47{
48
49}
50
51static bool construct(struct sink *sink, const struct dc_sink_init_data *init_params)
52{
53
54 struct core_link *core_link = DC_LINK_TO_LINK(init_params->link);
55
56 sink->protected.public.sink_signal = init_params->sink_signal;
57 sink->protected.link = core_link;
58 sink->protected.ctx = core_link->ctx;
59 sink->protected.dongle_max_pix_clk = init_params->dongle_max_pix_clk;
60 sink->protected.converter_disable_audio =
61 init_params->converter_disable_audio;
62
63 return true;
64}
65
66/*******************************************************************************
67 * Public functions
68 ******************************************************************************/
69
70void dc_sink_retain(const struct dc_sink *dc_sink)
71{
72 struct sink *sink = DC_SINK_TO_SINK(dc_sink);
73
74 ++sink->ref_count;
75}
76
77void dc_sink_release(const struct dc_sink *dc_sink)
78{
79 struct sink *sink = DC_SINK_TO_SINK(dc_sink);
80
81 --sink->ref_count;
82
83 if (sink->ref_count == 0) {
84 destruct(sink);
85 dm_free(sink);
86 }
87}
88
89struct dc_sink *dc_sink_create(const struct dc_sink_init_data *init_params)
90{
91 struct sink *sink = dm_alloc(sizeof(*sink));
92
93 if (NULL == sink)
94 goto alloc_fail;
95
96 if (false == construct(sink, init_params))
97 goto construct_fail;
98
99 /* TODO should we move this outside to where the assignment actually happens? */
100 dc_sink_retain(&sink->protected.public);
101
102 return &sink->protected.public;
103
104construct_fail:
105 dm_free(sink);
106
107alloc_fail:
108 return NULL;
109}
110
111/*******************************************************************************
112 * Protected functions - visible only inside of DC (not visible in DM)
113 ******************************************************************************/
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
new file mode 100644
index 000000000000..8d6aa607e1f5
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -0,0 +1,141 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27#include "dc.h"
28#include "core_types.h"
29#include "resource.h"
30
31/*******************************************************************************
32 * Private definitions
33 ******************************************************************************/
34
35struct stream {
36 struct core_stream protected;
37 int ref_count;
38};
39
40#define DC_STREAM_TO_STREAM(dc_stream) container_of(dc_stream, struct stream, protected.public)
41
42/*******************************************************************************
43 * Private functions
44 ******************************************************************************/
45
46static bool construct(struct core_stream *stream,
47 const struct dc_sink *dc_sink_data)
48{
49 uint32_t i = 0;
50
51 stream->sink = DC_SINK_TO_CORE(dc_sink_data);
52 stream->ctx = stream->sink->ctx;
53 stream->public.sink = dc_sink_data;
54
55 dc_sink_retain(dc_sink_data);
56
57 /* Copy audio modes */
58 /* TODO - Remove this translation */
59 for (i = 0; i < (dc_sink_data->edid_caps.audio_mode_count); i++)
60 {
61 stream->public.audio_info.modes[i].channel_count = dc_sink_data->edid_caps.audio_modes[i].channel_count;
62 stream->public.audio_info.modes[i].format_code = dc_sink_data->edid_caps.audio_modes[i].format_code;
63 stream->public.audio_info.modes[i].sample_rates.all = dc_sink_data->edid_caps.audio_modes[i].sample_rate;
64 stream->public.audio_info.modes[i].sample_size = dc_sink_data->edid_caps.audio_modes[i].sample_size;
65 }
66 stream->public.audio_info.mode_count = dc_sink_data->edid_caps.audio_mode_count;
67 stream->public.audio_info.audio_latency = dc_sink_data->edid_caps.audio_latency;
68 stream->public.audio_info.video_latency = dc_sink_data->edid_caps.video_latency;
69 memmove(
70 stream->public.audio_info.display_name,
71 dc_sink_data->edid_caps.display_name,
72 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
73 stream->public.audio_info.manufacture_id = dc_sink_data->edid_caps.manufacturer_id;
74 stream->public.audio_info.product_id = dc_sink_data->edid_caps.product_id;
75 stream->public.audio_info.flags.all = dc_sink_data->edid_caps.speaker_flags;
76
77 /* TODO - Unhardcode port_id */
78 stream->public.audio_info.port_id[0] = 0x5558859e;
79 stream->public.audio_info.port_id[1] = 0xd989449;
80
81 /* EDID CAP translation for HDMI 2.0 */
82 stream->public.timing.flags.LTE_340MCSC_SCRAMBLE = dc_sink_data->edid_caps.lte_340mcsc_scramble;
83
84 stream->status.link = &stream->sink->link->public;
85
86 return true;
87}
88
89static void destruct(struct core_stream *stream)
90{
91 dc_sink_release(&stream->sink->public);
92}
93
94void dc_stream_retain(const struct dc_stream *dc_stream)
95{
96 struct stream *stream = DC_STREAM_TO_STREAM(dc_stream);
97 stream->ref_count++;
98}
99
100void dc_stream_release(const struct dc_stream *public)
101{
102 struct stream *stream = DC_STREAM_TO_STREAM(public);
103 struct core_stream *protected = DC_STREAM_TO_CORE(public);
104
105 if (public != NULL) {
106 stream->ref_count--;
107
108 if (stream->ref_count == 0) {
109 destruct(protected);
110 dm_free(stream);
111 }
112 }
113}
114
115struct dc_stream *dc_create_stream_for_sink(
116 const struct dc_sink *dc_sink)
117{
118 struct core_sink *sink = DC_SINK_TO_CORE(dc_sink);
119 struct stream *stream;
120
121 if (sink == NULL)
122 goto alloc_fail;
123
124 stream = dm_alloc(sizeof(struct stream));
125
126 if (NULL == stream)
127 goto alloc_fail;
128
129 if (false == construct(&stream->protected, dc_sink))
130 goto construct_fail;
131
132 dc_stream_retain(&stream->protected.public);
133
134 return &stream->protected.public;
135
136construct_fail:
137 dm_free(stream);
138
139alloc_fail:
140 return NULL;
141}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
new file mode 100644
index 000000000000..b89d3b5d0ba0
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
@@ -0,0 +1,213 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26/* DC interface (public) */
27#include "dm_services.h"
28#include "dc.h"
29
30/* DC core (private) */
31#include "core_dc.h"
32#include "transform.h"
33
34/*******************************************************************************
35 * Private structures
36 ******************************************************************************/
37struct surface {
38 struct core_surface protected;
39 enum dc_irq_source irq_source;
40 int ref_count;
41};
42
43struct gamma {
44 struct core_gamma protected;
45 int ref_count;
46};
47
48#define DC_SURFACE_TO_SURFACE(dc_surface) container_of(dc_surface, struct surface, protected.public)
49#define CORE_SURFACE_TO_SURFACE(core_surface) container_of(core_surface, struct surface, protected)
50
51#define DC_GAMMA_TO_GAMMA(dc_gamma) \
52 container_of(dc_gamma, struct gamma, protected.public)
53#define CORE_GAMMA_TO_GAMMA(core_gamma) \
54 container_of(core_gamma, struct gamma, protected)
55
56/*******************************************************************************
57 * Private functions
58 ******************************************************************************/
59static bool construct(struct dc_context *ctx, struct surface *surface)
60{
61 surface->protected.ctx = ctx;
62 return true;
63}
64
65static void destruct(struct surface *surface)
66{
67
68}
69
70/*******************************************************************************
71 * Public functions
72 ******************************************************************************/
73void enable_surface_flip_reporting(struct dc_surface *dc_surface,
74 uint32_t controller_id)
75{
76 struct surface *surface = DC_SURFACE_TO_SURFACE(dc_surface);
77 surface->irq_source = controller_id + DC_IRQ_SOURCE_PFLIP1 - 1;
78 /*register_flip_interrupt(surface);*/
79}
80
81struct dc_surface *dc_create_surface(const struct dc *dc)
82{
83 struct core_dc *core_dc = DC_TO_CORE(dc);
84
85 struct surface *surface = dm_alloc(sizeof(*surface));
86
87 if (NULL == surface)
88 goto alloc_fail;
89
90 if (false == construct(core_dc->ctx, surface))
91 goto construct_fail;
92
93 dc_surface_retain(&surface->protected.public);
94
95 return &surface->protected.public;
96
97construct_fail:
98 dm_free(surface);
99
100alloc_fail:
101 return NULL;
102}
103
104const struct dc_surface_status *dc_surface_get_status(
105 const struct dc_surface *dc_surface)
106{
107 struct dc_surface_status *surface_status;
108 struct core_surface *core_surface;
109 struct core_dc *core_dc;
110 int i;
111
112 if (dc_surface == NULL)
113 return NULL;
114
115 core_surface = DC_SURFACE_TO_CORE(dc_surface);
116
117 if (core_surface == NULL || core_surface->ctx == NULL)
118 return NULL;
119
120 surface_status = &core_surface->status;
121
122 if (core_surface->ctx == NULL || core_surface->ctx->dc == NULL)
123 return NULL;
124
125 core_dc = DC_TO_CORE(core_surface->ctx->dc);
126
127
128 if (core_dc->current_context == NULL)
129 return NULL;
130
131 for (i = 0; i < core_dc->current_context->res_ctx.pool->pipe_count;
132 i++) {
133 struct pipe_ctx *pipe_ctx =
134 &core_dc->current_context->res_ctx.pipe_ctx[i];
135
136 if (pipe_ctx->surface !=
137 DC_SURFACE_TO_CORE(dc_surface))
138 continue;
139
140 core_dc->hwss.update_pending_status(pipe_ctx);
141 }
142
143 return surface_status;
144}
145
146void dc_surface_retain(const struct dc_surface *dc_surface)
147{
148 struct surface *surface = DC_SURFACE_TO_SURFACE(dc_surface);
149
150 ++surface->ref_count;
151}
152
153void dc_surface_release(const struct dc_surface *dc_surface)
154{
155 struct surface *surface = DC_SURFACE_TO_SURFACE(dc_surface);
156
157 --surface->ref_count;
158
159 if (surface->ref_count == 0) {
160 destruct(surface);
161 dm_free(surface);
162 }
163}
164
165static bool construct_gamma(struct gamma *gamma)
166{
167 return true;
168}
169
170static void destruct_gamma(struct gamma *gamma)
171{
172
173}
174
175void dc_gamma_retain(const struct dc_gamma *dc_gamma)
176{
177 struct gamma *gamma = DC_GAMMA_TO_GAMMA(dc_gamma);
178
179 ++gamma->ref_count;
180}
181
182void dc_gamma_release(const struct dc_gamma *dc_gamma)
183{
184 struct gamma *gamma = DC_GAMMA_TO_GAMMA(dc_gamma);
185 --gamma->ref_count;
186
187 if (gamma->ref_count == 0) {
188 destruct_gamma(gamma);
189 dm_free(gamma);
190 }
191}
192
193struct dc_gamma *dc_create_gamma()
194{
195 struct gamma *gamma = dm_alloc(sizeof(*gamma));
196
197 if (gamma == NULL)
198 goto alloc_fail;
199
200 if (false == construct_gamma(gamma))
201 goto construct_fail;
202
203 dc_gamma_retain(&gamma->protected.public);
204
205 return &gamma->protected.public;
206
207construct_fail:
208 dm_free(gamma);
209
210alloc_fail:
211 return NULL;
212}
213
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_target.c b/drivers/gpu/drm/amd/display/dc/core/dc_target.c
new file mode 100644
index 000000000000..48eb7b0e0350
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_target.c
@@ -0,0 +1,334 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27#include "core_types.h"
28#include "hw_sequencer.h"
29#include "resource.h"
30#include "ipp.h"
31#include "timing_generator.h"
32
33struct target {
34 struct core_target protected;
35 int ref_count;
36};
37
38#define DC_TARGET_TO_TARGET(dc_target) \
39 container_of(dc_target, struct target, protected.public)
40#define CORE_TARGET_TO_TARGET(core_target) \
41 container_of(core_target, struct target, protected)
42
43static void construct(
44 struct core_target *target,
45 struct dc_context *ctx,
46 struct dc_stream *dc_streams[],
47 uint8_t stream_count)
48{
49 uint8_t i;
50 for (i = 0; i < stream_count; i++) {
51 target->public.streams[i] = dc_streams[i];
52 dc_stream_retain(dc_streams[i]);
53 }
54
55 target->ctx = ctx;
56 target->public.stream_count = stream_count;
57}
58
59static void destruct(struct core_target *core_target)
60{
61 int i;
62
63 for (i = 0; i < core_target->public.stream_count; i++) {
64 dc_stream_release(
65 (struct dc_stream *)core_target->public.streams[i]);
66 core_target->public.streams[i] = NULL;
67 }
68}
69
70void dc_target_retain(const struct dc_target *dc_target)
71{
72 struct target *target = DC_TARGET_TO_TARGET(dc_target);
73
74 target->ref_count++;
75}
76
77void dc_target_release(const struct dc_target *dc_target)
78{
79 struct target *target = DC_TARGET_TO_TARGET(dc_target);
80 struct core_target *protected = DC_TARGET_TO_CORE(dc_target);
81
82 ASSERT(target->ref_count > 0);
83 target->ref_count--;
84 if (target->ref_count == 0) {
85 destruct(protected);
86 dm_free(target);
87 }
88}
89
90const struct dc_target_status *dc_target_get_status(
91 const struct dc_target* dc_target)
92{
93 uint8_t i;
94 struct core_target* target = DC_TARGET_TO_CORE(dc_target);
95 struct core_dc *dc = DC_TO_CORE(target->ctx->dc);
96
97 for (i = 0; i < dc->current_context->target_count; i++)
98 if (target == dc->current_context->targets[i])
99 return &dc->current_context->target_status[i];
100
101 return NULL;
102}
103
104struct dc_target *dc_create_target_for_streams(
105 struct dc_stream *dc_streams[],
106 uint8_t stream_count)
107{
108 struct core_stream *stream;
109 struct target *target;
110
111 if (0 == stream_count)
112 goto target_alloc_fail;
113
114 stream = DC_STREAM_TO_CORE(dc_streams[0]);
115
116 target = dm_alloc(sizeof(struct target));
117
118 if (NULL == target)
119 goto target_alloc_fail;
120
121 construct(&target->protected, stream->ctx, dc_streams, stream_count);
122
123 dc_target_retain(&target->protected.public);
124
125 return &target->protected.public;
126
127target_alloc_fail:
128 return NULL;
129}
130
131bool dc_target_is_connected_to_sink(
132 const struct dc_target * dc_target,
133 const struct dc_sink *dc_sink)
134{
135 struct core_target *target = DC_TARGET_TO_CORE(dc_target);
136 uint8_t i;
137 for (i = 0; i < target->public.stream_count; i++) {
138 if (target->public.streams[i]->sink == dc_sink)
139 return true;
140 }
141 return false;
142}
143
144/**
145 * Update the cursor attributes and set cursor surface address
146 */
147bool dc_target_set_cursor_attributes(
148 struct dc_target *dc_target,
149 const struct dc_cursor_attributes *attributes)
150{
151 uint8_t i, j;
152 struct core_target *target;
153 struct core_dc *core_dc;
154 struct resource_context *res_ctx;
155
156 if (NULL == dc_target) {
157 dm_error("DC: dc_target is NULL!\n");
158 return false;
159
160 }
161 if (NULL == attributes) {
162 dm_error("DC: attributes is NULL!\n");
163 return false;
164
165 }
166
167 target = DC_TARGET_TO_CORE(dc_target);
168 core_dc = DC_TO_CORE(target->ctx->dc);
169 res_ctx = &core_dc->current_context->res_ctx;
170
171 for (i = 0; i < target->public.stream_count; i++) {
172 for (j = 0; j < MAX_PIPES; j++) {
173 struct input_pixel_processor *ipp =
174 res_ctx->pipe_ctx[j].ipp;
175
176 if (res_ctx->pipe_ctx[j].stream !=
177 DC_STREAM_TO_CORE(target->public.streams[i]))
178 continue;
179
180 /* As of writing of this code cursor is on the top
181 * plane so we only need to set it on first pipe we
182 * find. May need to make this code dce specific later.
183 */
184 if (ipp->funcs->ipp_cursor_set_attributes(
185 ipp, attributes))
186 return true;
187 }
188 }
189
190 return false;
191}
192
193bool dc_target_set_cursor_position(
194 struct dc_target *dc_target,
195 const struct dc_cursor_position *position)
196{
197 uint8_t i, j;
198 struct core_target *target;
199 struct core_dc *core_dc;
200 struct resource_context *res_ctx;
201
202 if (NULL == dc_target) {
203 dm_error("DC: dc_target is NULL!\n");
204 return false;
205 }
206
207 if (NULL == position) {
208 dm_error("DC: cursor position is NULL!\n");
209 return false;
210 }
211
212 target = DC_TARGET_TO_CORE(dc_target);
213 core_dc = DC_TO_CORE(target->ctx->dc);
214 res_ctx = &core_dc->current_context->res_ctx;
215
216 for (i = 0; i < target->public.stream_count; i++) {
217 for (j = 0; j < MAX_PIPES; j++) {
218 struct input_pixel_processor *ipp =
219 res_ctx->pipe_ctx[j].ipp;
220
221 if (res_ctx->pipe_ctx[j].stream !=
222 DC_STREAM_TO_CORE(target->public.streams[i]))
223 continue;
224
225 /* As of writing of this code cursor is on the top
226 * plane so we only need to set it on first pipe we
227 * find. May need to make this code dce specific later.
228 */
229 ipp->funcs->ipp_cursor_set_position(ipp, position);
230 return true;
231 }
232 }
233
234 return false;
235}
236
237uint32_t dc_target_get_vblank_counter(const struct dc_target *dc_target)
238{
239 uint8_t i, j;
240 struct core_target *target = DC_TARGET_TO_CORE(dc_target);
241 struct core_dc *core_dc = DC_TO_CORE(target->ctx->dc);
242 struct resource_context *res_ctx =
243 &core_dc->current_context->res_ctx;
244
245 for (i = 0; i < target->public.stream_count; i++) {
246 for (j = 0; j < MAX_PIPES; j++) {
247 struct timing_generator *tg = res_ctx->pipe_ctx[j].tg;
248
249 if (res_ctx->pipe_ctx[j].stream !=
250 DC_STREAM_TO_CORE(target->public.streams[i]))
251 continue;
252
253 return tg->funcs->get_frame_count(tg);
254 }
255 }
256
257 return 0;
258}
259
260uint32_t dc_target_get_scanoutpos(
261 const struct dc_target *dc_target,
262 uint32_t *vbl,
263 uint32_t *position)
264{
265 uint8_t i, j;
266 struct core_target *target = DC_TARGET_TO_CORE(dc_target);
267 struct core_dc *core_dc = DC_TO_CORE(target->ctx->dc);
268 struct resource_context *res_ctx =
269 &core_dc->current_context->res_ctx;
270
271 for (i = 0; i < target->public.stream_count; i++) {
272 for (j = 0; j < MAX_PIPES; j++) {
273 struct timing_generator *tg = res_ctx->pipe_ctx[j].tg;
274
275 if (res_ctx->pipe_ctx[j].stream !=
276 DC_STREAM_TO_CORE(target->public.streams[i]))
277 continue;
278
279 return tg->funcs->get_scanoutpos(tg, vbl, position);
280 }
281 }
282
283 return 0;
284}
285
286void dc_target_log(
287 const struct dc_target *dc_target,
288 struct dal_logger *dm_logger,
289 enum dc_log_type log_type)
290{
291 int i;
292
293 const struct core_target *core_target =
294 CONST_DC_TARGET_TO_CORE(dc_target);
295
296 dm_logger_write(dm_logger,
297 log_type,
298 "core_target 0x%x: stream_count=%d\n",
299 core_target,
300 core_target->public.stream_count);
301
302 for (i = 0; i < core_target->public.stream_count; i++) {
303 const struct core_stream *core_stream =
304 DC_STREAM_TO_CORE(core_target->public.streams[i]);
305
306 dm_logger_write(dm_logger,
307 log_type,
308 "core_stream 0x%x: src: %d, %d, %d, %d; dst: %d, %d, %d, %d;\n",
309 core_stream,
310 core_stream->public.src.x,
311 core_stream->public.src.y,
312 core_stream->public.src.width,
313 core_stream->public.src.height,
314 core_stream->public.dst.x,
315 core_stream->public.dst.y,
316 core_stream->public.dst.width,
317 core_stream->public.dst.height);
318 dm_logger_write(dm_logger,
319 log_type,
320 "\tpix_clk_khz: %d, h_total: %d, v_total: %d\n",
321 core_stream->public.timing.pix_clk_khz,
322 core_stream->public.timing.h_total,
323 core_stream->public.timing.v_total);
324 dm_logger_write(dm_logger,
325 log_type,
326 "\tsink name: %s, serial: %d\n",
327 core_stream->sink->public.edid_caps.display_name,
328 core_stream->sink->public.edid_caps.serial_number);
329 dm_logger_write(dm_logger,
330 log_type,
331 "\tlink: %d\n",
332 core_stream->sink->link->public.link_index);
333 }
334}
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
new file mode 100644
index 000000000000..5575484323b3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -0,0 +1,780 @@
1/*
2 * Copyright 2012-14 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef DC_INTERFACE_H_
27#define DC_INTERFACE_H_
28
29#include "dc_types.h"
30#include "dpcd_defs.h"
31#include "grph_object_defs.h"
32#include "logger_types.h"
33#include "gpio_types.h"
34#include "link_service_types.h"
35
36#define MAX_TARGETS 6
37#define MAX_SURFACES 6
38#define MAX_SINKS_PER_LINK 4
39
40/*******************************************************************************
41 * Display Core Interfaces
42 ******************************************************************************/
43
44struct dc_caps {
45 uint32_t max_targets;
46 uint32_t max_links;
47 uint32_t max_audios;
48 uint32_t max_slave_planes;
49 uint32_t max_downscale_ratio;
50 uint32_t i2c_speed_in_khz;
51};
52
53
54struct dc_dcc_surface_param {
55 enum surface_pixel_format format;
56 struct dc_size surface_size;
57 enum dc_scan_direction scan;
58};
59
60struct dc_dcc_setting {
61 unsigned int max_compressed_blk_size;
62 unsigned int max_uncompressed_blk_size;
63 bool independent_64b_blks;
64};
65
66struct dc_surface_dcc_cap {
67 bool capable;
68 bool const_color_support;
69
70 union {
71 struct {
72 struct dc_dcc_setting rgb;
73 } grph;
74
75 struct {
76 struct dc_dcc_setting luma;
77 struct dc_dcc_setting chroma;
78 } video;
79 };
80};
81
82/* Forward declaration*/
83struct dc;
84struct dc_surface;
85struct validate_context;
86
87struct dc_cap_funcs {
88 int i;
89};
90
91struct dc_stream_funcs {
92 bool (*adjust_vmin_vmax)(struct dc *dc,
93 const struct dc_stream **stream,
94 int num_streams,
95 int vmin,
96 int vmax);
97
98 void (*stream_update_scaling)(const struct dc *dc,
99 const struct dc_stream *dc_stream,
100 const struct rect *src,
101 const struct rect *dst);
102 bool (*set_gamut_remap)(struct dc *dc,
103 const struct dc_stream **stream, int num_streams);
104 bool (*set_backlight)(struct dc *dc, unsigned int backlight_level,
105 unsigned int frame_ramp, const struct dc_stream *stream);
106 bool (*init_dmcu_backlight_settings)(struct dc *dc);
107 bool (*set_abm_level)(struct dc *dc, unsigned int abm_level);
108 bool (*set_psr_enable)(struct dc *dc, bool enable);
109 bool (*setup_psr)(struct dc *dc, const struct dc_stream *stream);
110};
111
112struct link_training_settings;
113
114struct dc_link_funcs {
115 void (*set_drive_settings)(struct dc *dc,
116 struct link_training_settings *lt_settings);
117 void (*perform_link_training)(struct dc *dc,
118 struct dc_link_settings *link_setting,
119 bool skip_video_pattern);
120 void (*set_preferred_link_settings)(struct dc *dc,
121 struct dc_link_settings *link_setting);
122 void (*enable_hpd)(const struct dc_link *link);
123 void (*disable_hpd)(const struct dc_link *link);
124 void (*set_test_pattern)(
125 const struct dc_link *link,
126 enum dp_test_pattern test_pattern,
127 const struct link_training_settings *p_link_settings,
128 const unsigned char *p_custom_pattern,
129 unsigned int cust_pattern_size);
130};
131
132/* Structure to hold configuration flags set by dm at dc creation. */
133struct dc_config {
134 bool gpu_vm_support;
135 bool disable_disp_pll_sharing;
136};
137
138struct dc_debug {
139 bool surface_visual_confirm;
140 bool max_disp_clk;
141 bool target_trace;
142 bool surface_trace;
143 bool validation_trace;
144 bool disable_stutter;
145 bool disable_dcc;
146 bool disable_dfs_bypass;
147 bool disable_power_gate;
148 bool disable_clock_gate;
149};
150
151struct dc {
152 struct dc_caps caps;
153 struct dc_cap_funcs cap_funcs;
154 struct dc_stream_funcs stream_funcs;
155 struct dc_link_funcs link_funcs;
156 struct dc_config config;
157 struct dc_debug debug;
158};
159
160enum frame_buffer_mode {
161 FRAME_BUFFER_MODE_LOCAL_ONLY = 0,
162 FRAME_BUFFER_MODE_ZFB_ONLY,
163 FRAME_BUFFER_MODE_MIXED_ZFB_AND_LOCAL,
164} ;
165
166struct dchub_init_data {
167 bool dchub_initialzied;
168 bool dchub_info_valid;
169 int64_t zfb_phys_addr_base;
170 int64_t zfb_mc_base_addr;
171 uint64_t zfb_size_in_byte;
172 enum frame_buffer_mode fb_mode;
173};
174
175struct dc_init_data {
176 struct hw_asic_id asic_id;
177 void *driver; /* ctx */
178 struct cgs_device *cgs_device;
179
180 int num_virtual_links;
181 /*
182 * If 'vbios_override' not NULL, it will be called instead
183 * of the real VBIOS. Intended use is Diagnostics on FPGA.
184 */
185 struct dc_bios *vbios_override;
186 enum dce_environment dce_environment;
187
188 struct dc_config flags;
189};
190
191struct dc *dc_create(const struct dc_init_data *init_params);
192
193void dc_destroy(struct dc **dc);
194
195bool dc_init_dchub(struct dc *dc, struct dchub_init_data *dh_data);
196
197/*******************************************************************************
198 * Surface Interfaces
199 ******************************************************************************/
200
201enum {
202 RGB_256X3X16 = 256,
203 FLOAT_GAMMA_RAMP_MAX = 1025
204};
205
206enum dc_gamma_ramp_type {
207 GAMMA_RAMP_RBG256X3X16,
208 GAMMA_RAMP_FLOAT,
209};
210
211struct float_rgb {
212 struct fixed32_32 red;
213 struct fixed32_32 green;
214 struct fixed32_32 blue;
215};
216
217struct dc_gamma_ramp_float {
218 struct float_rgb scale;
219 struct float_rgb offset;
220 struct float_rgb gamma_curve[FLOAT_GAMMA_RAMP_MAX];
221};
222
223struct dc_gamma_ramp_rgb256x3x16 {
224 uint16_t red[RGB_256X3X16];
225 uint16_t green[RGB_256X3X16];
226 uint16_t blue[RGB_256X3X16];
227};
228
229struct dc_gamma {
230 enum dc_gamma_ramp_type type;
231 union {
232 struct dc_gamma_ramp_rgb256x3x16 gamma_ramp_rgb256x3x16;
233 struct dc_gamma_ramp_float gamma_ramp_float;
234 };
235 uint32_t size;
236};
237
238struct dc_surface {
239 bool visible;
240 bool flip_immediate;
241 struct dc_plane_address address;
242
243 struct scaling_taps scaling_quality;
244 struct rect src_rect;
245 struct rect dst_rect;
246 struct rect clip_rect;
247
248 union plane_size plane_size;
249 union dc_tiling_info tiling_info;
250 struct dc_plane_dcc_param dcc;
251 enum dc_color_space color_space;
252
253 enum surface_pixel_format format;
254 enum dc_rotation_angle rotation;
255 bool horizontal_mirror;
256 enum plane_stereo_format stereo_format;
257
258 const struct dc_gamma *gamma_correction;
259};
260
261struct dc_plane_info {
262 union plane_size plane_size;
263 union dc_tiling_info tiling_info;
264 enum surface_pixel_format format;
265 enum dc_rotation_angle rotation;
266 bool horizontal_mirror;
267 enum plane_stereo_format stereo_format;
268 enum dc_color_space color_space; /*todo: wrong place, fits in scaling info*/
269 bool visible;
270};
271
272struct dc_scaling_info {
273 struct rect src_rect;
274 struct rect dst_rect;
275 struct rect clip_rect;
276 struct scaling_taps scaling_quality;
277};
278
279struct dc_surface_update {
280 const struct dc_surface *surface;
281
282 /* isr safe update parameters. null means no updates */
283 struct dc_flip_addrs *flip_addr;
284 struct dc_plane_info *plane_info;
285 struct dc_scaling_info *scaling_info;
286 /* following updates require alloc/sleep/spin that is not isr safe,
287 * null means no updates
288 */
289 struct dc_gamma *gamma;
290
291
292};
293/*
294 * This structure is filled in by dc_surface_get_status and contains
295 * the last requested address and the currently active address so the called
296 * can determine if there are any outstanding flips
297 */
298struct dc_surface_status {
299 struct dc_plane_address requested_address;
300 struct dc_plane_address current_address;
301 bool is_flip_pending;
302};
303
304/*
305 * Create a new surface with default parameters;
306 */
307struct dc_surface *dc_create_surface(const struct dc *dc);
308const struct dc_surface_status *dc_surface_get_status(
309 const struct dc_surface *dc_surface);
310
311void dc_surface_retain(const struct dc_surface *dc_surface);
312void dc_surface_release(const struct dc_surface *dc_surface);
313
314void dc_gamma_release(const struct dc_gamma *dc_gamma);
315struct dc_gamma *dc_create_gamma(void);
316
317/*
318 * This structure holds a surface address. There could be multiple addresses
319 * in cases such as Stereo 3D, Planar YUV, etc. Other per-flip attributes such
320 * as frame durations and DCC format can also be set.
321 */
322struct dc_flip_addrs {
323 struct dc_plane_address address;
324 bool flip_immediate;
325 /* TODO: DCC format info */
326 /* TODO: add flip duration for FreeSync */
327};
328
329/*
330 * Optimized flip address update function.
331 *
332 * After this call:
333 * Surface addresses and flip attributes are programmed.
334 * Surface flip occur at next configured time (h_sync or v_sync flip)
335 */
336void dc_flip_surface_addrs(struct dc *dc,
337 const struct dc_surface *const surfaces[],
338 struct dc_flip_addrs flip_addrs[],
339 uint32_t count);
340
341/*
342 * Set up surface attributes and associate to a target
343 * The surfaces parameter is an absolute set of all surface active for the target.
344 * If no surfaces are provided, the target will be blanked; no memory read.
345 * Any flip related attribute changes must be done through this interface.
346 *
347 * After this call:
348 * Surfaces attributes are programmed and configured to be composed into target.
349 * This does not trigger a flip. No surface address is programmed.
350 */
351
352bool dc_commit_surfaces_to_target(
353 struct dc *dc,
354 const struct dc_surface **dc_surfaces,
355 uint8_t surface_count,
356 struct dc_target *dc_target);
357
358bool dc_pre_update_surfaces_to_target(
359 struct dc *dc,
360 const struct dc_surface *const *new_surfaces,
361 uint8_t new_surface_count,
362 struct dc_target *dc_target);
363
364bool dc_post_update_surfaces_to_target(
365 struct dc *dc);
366
367void dc_update_surfaces_for_target(struct dc *dc, struct dc_surface_update *updates,
368 int surface_count, struct dc_target *dc_target);
369
370/*******************************************************************************
371 * Target Interfaces
372 ******************************************************************************/
373#define MAX_STREAM_NUM 1
374
375struct dc_target {
376 uint8_t stream_count;
377 const struct dc_stream *streams[MAX_STREAM_NUM];
378};
379
380/*
381 * Target status is returned from dc_target_get_status in order to get the
382 * the IRQ source, current frame counter and currently attached surfaces.
383 */
384struct dc_target_status {
385 int primary_otg_inst;
386 int cur_frame_count;
387 int surface_count;
388 const struct dc_surface *surfaces[MAX_SURFACE_NUM];
389};
390
391struct dc_target *dc_create_target_for_streams(
392 struct dc_stream *dc_streams[],
393 uint8_t stream_count);
394
395/*
396 * Get the current target status.
397 */
398const struct dc_target_status *dc_target_get_status(
399 const struct dc_target* dc_target);
400
401void dc_target_retain(const struct dc_target *dc_target);
402void dc_target_release(const struct dc_target *dc_target);
403void dc_target_log(
404 const struct dc_target *dc_target,
405 struct dal_logger *dc_logger,
406 enum dc_log_type log_type);
407
408uint8_t dc_get_current_target_count(const struct dc *dc);
409struct dc_target *dc_get_target_at_index(const struct dc *dc, uint8_t i);
410
411bool dc_target_is_connected_to_sink(
412 const struct dc_target *dc_target,
413 const struct dc_sink *dc_sink);
414
415uint32_t dc_target_get_vblank_counter(const struct dc_target *dc_target);
416
417/* TODO: Return parsed values rather than direct register read
418 * This has a dependency on the caller (amdgpu_get_crtc_scanoutpos)
419 * being refactored properly to be dce-specific
420 */
421uint32_t dc_target_get_scanoutpos(
422 const struct dc_target *dc_target,
423 uint32_t *vbl,
424 uint32_t *position);
425
426/*
427 * Structure to store surface/target associations for validation
428 */
429struct dc_validation_set {
430 const struct dc_target *target;
431 const struct dc_surface *surfaces[MAX_SURFACES];
432 uint8_t surface_count;
433};
434
435/*
436 * This function takes a set of resources and checks that they are cofunctional.
437 *
438 * After this call:
439 * No hardware is programmed for call. Only validation is done.
440 */
441bool dc_validate_resources(
442 const struct dc *dc,
443 const struct dc_validation_set set[],
444 uint8_t set_count);
445
446/*
447 * This function takes a target and checks if it is guaranteed to be supported.
448 * Guaranteed means that MAX_COFUNC*target is supported.
449 *
450 * After this call:
451 * No hardware is programmed for call. Only validation is done.
452 */
453
454bool dc_validate_guaranteed(
455 const struct dc *dc,
456 const struct dc_target *dc_target);
457
458/*
459 * Set up streams and links associated to targets to drive sinks
460 * The targets parameter is an absolute set of all active targets.
461 *
462 * After this call:
463 * Phy, Encoder, Timing Generator are programmed and enabled.
464 * New targets are enabled with blank stream; no memory read.
465 */
466bool dc_commit_targets(
467 struct dc *dc,
468 struct dc_target *targets[],
469 uint8_t target_count);
470
471/*******************************************************************************
472 * Stream Interfaces
473 ******************************************************************************/
474struct dc_stream {
475 const struct dc_sink *sink;
476 struct dc_crtc_timing timing;
477
478 enum dc_color_space output_color_space;
479
480 struct rect src; /* viewport in target space*/
481 struct rect dst; /* stream addressable area */
482
483 struct audio_info audio_info;
484
485 bool ignore_msa_timing_param;
486
487 struct freesync_context freesync_ctx;
488
489 /* TODO: dithering */
490 /* TODO: transfer function (CSC/regamma/gamut remap) */
491 struct colorspace_transform gamut_remap_matrix;
492 struct csc_transform csc_color_matrix;
493 /* TODO: custom INFO packets */
494 /* TODO: ABM info (DMCU) */
495 /* TODO: PSR info */
496 /* TODO: CEA VIC */
497};
498
499/**
500 * Create a new default stream for the requested sink
501 */
502struct dc_stream *dc_create_stream_for_sink(const struct dc_sink *dc_sink);
503
504void dc_stream_retain(const struct dc_stream *dc_stream);
505void dc_stream_release(const struct dc_stream *dc_stream);
506
507struct dc_stream_status {
508 /*
509 * link this stream passes through
510 */
511 const struct dc_link *link;
512};
513
514const struct dc_stream_status *dc_stream_get_status(
515 const struct dc_stream *dc_stream);
516
517/*******************************************************************************
518 * Link Interfaces
519 ******************************************************************************/
520
521/*
522 * A link contains one or more sinks and their connected status.
523 * The currently active signal type (HDMI, DP-SST, DP-MST) is also reported.
524 */
525struct dc_link {
526 const struct dc_sink *remote_sinks[MAX_SINKS_PER_LINK];
527 unsigned int sink_count;
528 const struct dc_sink *local_sink;
529 unsigned int link_index;
530 enum dc_connection_type type;
531 enum signal_type connector_signal;
532 enum dc_irq_source irq_source_hpd;
533 enum dc_irq_source irq_source_hpd_rx;/* aka DP Short Pulse */
534 /* caps is the same as reported_link_cap. link_traing use
535 * reported_link_cap. Will clean up. TODO
536 */
537 struct dc_link_settings reported_link_cap;
538 struct dc_link_settings verified_link_cap;
539 struct dc_link_settings max_link_setting;
540 struct dc_link_settings cur_link_settings;
541 struct dc_lane_settings cur_lane_setting;
542
543 uint8_t ddc_hw_inst;
544 uint8_t link_enc_hw_inst;
545
546 struct psr_caps psr_caps;
547 bool test_pattern_enabled;
548 union compliance_test_state compliance_test_state;
549};
550
551struct dpcd_caps {
552 union dpcd_rev dpcd_rev;
553 union max_lane_count max_ln_count;
554 union max_down_spread max_down_spread;
555
556 /* dongle type (DP converter, CV smart dongle) */
557 enum display_dongle_type dongle_type;
558 /* Dongle's downstream count. */
559 union sink_count sink_count;
560 /* If dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER,
561 indicates 'Frame Sequential-to-lllFrame Pack' conversion capability.*/
562 bool is_dp_hdmi_s3d_converter;
563
564 bool allow_invalid_MSA_timing_param;
565 bool panel_mode_edp;
566 uint32_t sink_dev_id;
567 uint32_t branch_dev_id;
568 int8_t branch_dev_name[6];
569 int8_t branch_hw_revision;
570};
571
572struct dc_link_status {
573 struct dpcd_caps *dpcd_caps;
574};
575
576const struct dc_link_status *dc_link_get_status(const struct dc_link *dc_link);
577
578/*
579 * Return an enumerated dc_link. dc_link order is constant and determined at
580 * boot time. They cannot be created or destroyed.
581 * Use dc_get_caps() to get number of links.
582 */
583const struct dc_link *dc_get_link_at_index(const struct dc *dc, uint32_t link_index);
584
585/* Return id of physical connector represented by a dc_link at link_index.*/
586const struct graphics_object_id dc_get_link_id_at_index(
587 struct dc *dc, uint32_t link_index);
588
589/* Set backlight level of an embedded panel (eDP, LVDS). */
590bool dc_link_set_backlight_level(const struct dc_link *dc_link, uint32_t level,
591 uint32_t frame_ramp, const struct dc_stream *stream);
592
593bool dc_link_init_dmcu_backlight_settings(const struct dc_link *dc_link);
594
595bool dc_link_set_abm_level(const struct dc_link *dc_link, uint32_t level);
596
597bool dc_link_set_psr_enable(const struct dc_link *dc_link, bool enable);
598
599bool dc_link_setup_psr(const struct dc_link *dc_link,
600 const struct dc_stream *stream);
601
602/* Request DC to detect if there is a Panel connected.
603 * boot - If this call is during initial boot.
604 * Return false for any type of detection failure or MST detection
605 * true otherwise. True meaning further action is required (status update
606 * and OS notification).
607 */
608bool dc_link_detect(const struct dc_link *dc_link, bool boot);
609
610/* Notify DC about DP RX Interrupt (aka Short Pulse Interrupt).
611 * Return:
612 * true - Downstream port status changed. DM should call DC to do the
613 * detection.
614 * false - no change in Downstream port status. No further action required
615 * from DM. */
616bool dc_link_handle_hpd_rx_irq(const struct dc_link *dc_link);
617
618struct dc_sink_init_data;
619
620struct dc_sink *dc_link_add_remote_sink(
621 const struct dc_link *dc_link,
622 const uint8_t *edid,
623 int len,
624 struct dc_sink_init_data *init_data);
625
626void dc_link_remove_remote_sink(
627 const struct dc_link *link,
628 const struct dc_sink *sink);
629
630/* Used by diagnostics for virtual link at the moment */
631void dc_link_set_sink(const struct dc_link *link, struct dc_sink *sink);
632
633void dc_link_dp_set_drive_settings(
634 struct dc_link *link,
635 struct link_training_settings *lt_settings);
636
637bool dc_link_dp_perform_link_training(
638 struct dc_link *link,
639 const struct dc_link_settings *link_setting,
640 bool skip_video_pattern);
641
642void dc_link_dp_enable_hpd(const struct dc_link *link);
643
644void dc_link_dp_disable_hpd(const struct dc_link *link);
645
646bool dc_link_dp_set_test_pattern(
647 const struct dc_link *link,
648 enum dp_test_pattern test_pattern,
649 const struct link_training_settings *p_link_settings,
650 const unsigned char *p_custom_pattern,
651 unsigned int cust_pattern_size);
652
653/*******************************************************************************
654 * Sink Interfaces - A sink corresponds to a display output device
655 ******************************************************************************/
656
657/*
658 * The sink structure contains EDID and other display device properties
659 */
660struct dc_sink {
661 enum signal_type sink_signal;
662 struct dc_edid dc_edid; /* raw edid */
663 struct dc_edid_caps edid_caps; /* parse display caps */
664};
665
666void dc_sink_retain(const struct dc_sink *sink);
667void dc_sink_release(const struct dc_sink *sink);
668
669const struct audio **dc_get_audios(struct dc *dc);
670
671struct dc_sink_init_data {
672 enum signal_type sink_signal;
673 const struct dc_link *link;
674 uint32_t dongle_max_pix_clk;
675 bool converter_disable_audio;
676};
677
678struct dc_sink *dc_sink_create(const struct dc_sink_init_data *init_params);
679
680/*******************************************************************************
681 * Cursor interfaces - To manages the cursor within a target
682 ******************************************************************************/
683/* TODO: Deprecated once we switch to dc_set_cursor_position */
684bool dc_target_set_cursor_attributes(
685 struct dc_target *dc_target,
686 const struct dc_cursor_attributes *attributes);
687
688bool dc_target_set_cursor_position(
689 struct dc_target *dc_target,
690 const struct dc_cursor_position *position);
691
692/* Newer interfaces */
693struct dc_cursor {
694 struct dc_plane_address address;
695 struct dc_cursor_attributes attributes;
696};
697
698/*
699 * Create a new cursor with default values for a given target.
700 */
701struct dc_cursor *dc_create_cursor_for_target(
702 const struct dc *dc,
703 struct dc_target *dc_target);
704
705/**
706 * Commit cursor attribute changes such as pixel format and dimensions and
707 * surface address.
708 *
709 * After this call:
710 * Cursor address and format is programmed to the new values.
711 * Cursor position is unmodified.
712 */
713bool dc_commit_cursor(
714 const struct dc *dc,
715 struct dc_cursor *cursor);
716
717/*
718 * Optimized cursor position update
719 *
720 * After this call:
721 * Cursor position will be programmed as well as enable/disable bit.
722 */
723bool dc_set_cursor_position(
724 const struct dc *dc,
725 struct dc_cursor *cursor,
726 struct dc_cursor_position *pos);
727
728/*******************************************************************************
729 * Interrupt interfaces
730 ******************************************************************************/
731enum dc_irq_source dc_interrupt_to_irq_source(
732 struct dc *dc,
733 uint32_t src_id,
734 uint32_t ext_id);
735void dc_interrupt_set(const struct dc *dc, enum dc_irq_source src, bool enable);
736void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src);
737enum dc_irq_source dc_get_hpd_irq_source_at_index(
738 struct dc *dc, uint32_t link_index);
739
740/*******************************************************************************
741 * Power Interfaces
742 ******************************************************************************/
743
744void dc_set_power_state(
745 struct dc *dc,
746 enum dc_acpi_cm_power_state power_state,
747 enum dc_video_power_state video_power_state);
748void dc_resume(const struct dc *dc);
749
750/*******************************************************************************
751 * DDC Interfaces
752 ******************************************************************************/
753
754const struct ddc_service *dc_get_ddc_at_index(
755 struct dc *dc, uint32_t link_index);
756
757/*
758 * DPCD access interfaces
759 */
760
761bool dc_read_dpcd(
762 struct dc *dc,
763 uint32_t link_index,
764 uint32_t address,
765 uint8_t *data,
766 uint32_t size);
767
768bool dc_write_dpcd(
769 struct dc *dc,
770 uint32_t link_index,
771 uint32_t address,
772 const uint8_t *data,
773 uint32_t size);
774
775bool dc_submit_i2c(
776 struct dc *dc,
777 uint32_t link_index,
778 struct i2c_command *cmd);
779
780#endif /* DC_INTERFACE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
new file mode 100644
index 000000000000..790c5bd51cb9
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
@@ -0,0 +1,224 @@
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef DC_BIOS_TYPES_H
27#define DC_BIOS_TYPES_H
28
29/******************************************************************************
30 * Interface file for VBIOS implementations.
31 *
32 * The default implementation is inside DC.
33 * Display Manager (which instantiates DC) has the option to supply it's own
34 * (external to DC) implementation of VBIOS, which will be called by DC, using
35 * this interface.
36 * (The intended use is Diagnostics, but other uses may appear.)
37 *****************************************************************************/
38
39#include "include/bios_parser_types.h"
40
41struct dc_vbios_funcs {
42 uint8_t (*get_connectors_number)(struct dc_bios *bios);
43
44 struct graphics_object_id (*get_encoder_id)(
45 struct dc_bios *bios,
46 uint32_t i);
47 struct graphics_object_id (*get_connector_id)(
48 struct dc_bios *bios,
49 uint8_t connector_index);
50 uint32_t (*get_dst_number)(
51 struct dc_bios *bios,
52 struct graphics_object_id id);
53
54 uint32_t (*get_gpio_record)(
55 struct dc_bios *dcb,
56 struct graphics_object_id id,
57 struct bp_gpio_cntl_info *gpio_record,
58 uint32_t record_size);
59
60 enum bp_result (*get_src_obj)(
61 struct dc_bios *bios,
62 struct graphics_object_id object_id, uint32_t index,
63 struct graphics_object_id *src_object_id);
64 enum bp_result (*get_dst_obj)(
65 struct dc_bios *bios,
66 struct graphics_object_id object_id, uint32_t index,
67 struct graphics_object_id *dest_object_id);
68
69 enum bp_result (*get_i2c_info)(
70 struct dc_bios *dcb,
71 struct graphics_object_id id,
72 struct graphics_object_i2c_info *info);
73
74 enum bp_result (*get_voltage_ddc_info)(
75 struct dc_bios *bios,
76 uint32_t index,
77 struct graphics_object_i2c_info *info);
78 enum bp_result (*get_thermal_ddc_info)(
79 struct dc_bios *bios,
80 uint32_t i2c_channel_id,
81 struct graphics_object_i2c_info *info);
82 enum bp_result (*get_hpd_info)(
83 struct dc_bios *bios,
84 struct graphics_object_id id,
85 struct graphics_object_hpd_info *info);
86 enum bp_result (*get_device_tag)(
87 struct dc_bios *bios,
88 struct graphics_object_id connector_object_id,
89 uint32_t device_tag_index,
90 struct connector_device_tag_info *info);
91 enum bp_result (*get_firmware_info)(
92 struct dc_bios *bios,
93 struct firmware_info *info);
94 enum bp_result (*get_spread_spectrum_info)(
95 struct dc_bios *bios,
96 enum as_signal_type signal,
97 uint32_t index,
98 struct spread_spectrum_info *ss_info);
99 uint32_t (*get_ss_entry_number)(
100 struct dc_bios *bios,
101 enum as_signal_type signal);
102 enum bp_result (*get_embedded_panel_info)(
103 struct dc_bios *bios,
104 struct embedded_panel_info *info);
105 enum bp_result (*get_gpio_pin_info)(
106 struct dc_bios *bios,
107 uint32_t gpio_id,
108 struct gpio_pin_info *info);
109 enum bp_result (*get_encoder_cap_info)(
110 struct dc_bios *bios,
111 struct graphics_object_id object_id,
112 struct bp_encoder_cap_info *info);
113
114 bool (*is_lid_status_changed)(
115 struct dc_bios *bios);
116 bool (*is_display_config_changed)(
117 struct dc_bios *bios);
118 bool (*is_accelerated_mode)(
119 struct dc_bios *bios);
120 void (*get_bios_event_info)(
121 struct dc_bios *bios,
122 struct bios_event_info *info);
123 void (*update_requested_backlight_level)(
124 struct dc_bios *bios,
125 uint32_t backlight_8bit);
126 uint32_t (*get_requested_backlight_level)(
127 struct dc_bios *bios);
128 void (*take_backlight_control)(
129 struct dc_bios *bios,
130 bool cntl);
131
132 bool (*is_active_display)(
133 struct dc_bios *bios,
134 enum signal_type signal,
135 const struct connector_device_tag_info *device_tag);
136 enum controller_id (*get_embedded_display_controller_id)(
137 struct dc_bios *bios);
138 uint32_t (*get_embedded_display_refresh_rate)(
139 struct dc_bios *bios);
140
141 void (*set_scratch_critical_state)(
142 struct dc_bios *bios,
143 bool state);
144 bool (*is_device_id_supported)(
145 struct dc_bios *bios,
146 struct device_id id);
147
148 /* COMMANDS */
149
150 enum bp_result (*encoder_control)(
151 struct dc_bios *bios,
152 struct bp_encoder_control *cntl);
153 enum bp_result (*transmitter_control)(
154 struct dc_bios *bios,
155 struct bp_transmitter_control *cntl);
156 enum bp_result (*crt_control)(
157 struct dc_bios *bios,
158 enum engine_id engine_id,
159 bool enable,
160 uint32_t pixel_clock);
161 enum bp_result (*enable_crtc)(
162 struct dc_bios *bios,
163 enum controller_id id,
164 bool enable);
165 enum bp_result (*adjust_pixel_clock)(
166 struct dc_bios *bios,
167 struct bp_adjust_pixel_clock_parameters *bp_params);
168 enum bp_result (*set_pixel_clock)(
169 struct dc_bios *bios,
170 struct bp_pixel_clock_parameters *bp_params);
171 enum bp_result (*set_dce_clock)(
172 struct dc_bios *bios,
173 struct bp_set_dce_clock_parameters *bp_params);
174 unsigned int (*get_smu_clock_info)(
175 struct dc_bios *bios);
176 enum bp_result (*enable_spread_spectrum_on_ppll)(
177 struct dc_bios *bios,
178 struct bp_spread_spectrum_parameters *bp_params,
179 bool enable);
180 enum bp_result (*program_crtc_timing)(
181 struct dc_bios *bios,
182 struct bp_hw_crtc_timing_parameters *bp_params);
183
184 enum bp_result (*crtc_source_select)(
185 struct dc_bios *bios,
186 struct bp_crtc_source_select *bp_params);
187 enum bp_result (*program_display_engine_pll)(
188 struct dc_bios *bios,
189 struct bp_pixel_clock_parameters *bp_params);
190
191 enum signal_type (*dac_load_detect)(
192 struct dc_bios *bios,
193 struct graphics_object_id encoder,
194 struct graphics_object_id connector,
195 enum signal_type display_signal);
196
197 enum bp_result (*enable_disp_power_gating)(
198 struct dc_bios *bios,
199 enum controller_id controller_id,
200 enum bp_pipe_control_action action);
201
202 void (*post_init)(struct dc_bios *bios);
203
204 void (*bios_parser_destroy)(struct dc_bios **dcb);
205};
206
207struct bios_registers {
208 uint32_t BIOS_SCRATCH_6;
209};
210
211struct dc_bios {
212 const struct dc_vbios_funcs *funcs;
213
214 uint8_t *bios;
215 uint32_t bios_size;
216
217 uint8_t *bios_local_image;
218
219 struct dc_context *ctx;
220 const struct bios_registers *regs;
221 struct integrated_info *integrated_info;
222};
223
224#endif /* DC_BIOS_TYPES_H */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h b/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h
new file mode 100644
index 000000000000..b143fe88f49f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h
@@ -0,0 +1,115 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25#ifndef DC_DDC_TYPES_H_
26#define DC_DDC_TYPES_H_
27
28struct i2c_payload {
29 bool write;
30 uint8_t address;
31 uint32_t length;
32 uint8_t *data;
33};
34
35enum i2c_command_engine {
36 I2C_COMMAND_ENGINE_DEFAULT,
37 I2C_COMMAND_ENGINE_SW,
38 I2C_COMMAND_ENGINE_HW
39};
40
41struct i2c_command {
42 struct i2c_payload *payloads;
43 uint8_t number_of_payloads;
44
45 enum i2c_command_engine engine;
46
47 /* expressed in KHz
48 * zero means "use default value" */
49 uint32_t speed;
50};
51
52struct gpio_ddc_hw_info {
53 bool hw_supported;
54 uint32_t ddc_channel;
55};
56
57struct ddc {
58 struct gpio *pin_data;
59 struct gpio *pin_clock;
60 struct gpio_ddc_hw_info hw_info;
61 struct dc_context *ctx;
62};
63
64union ddc_wa {
65 struct {
66 uint32_t DP_SKIP_POWER_OFF:1;
67 uint32_t DP_AUX_POWER_UP_WA_DELAY:1;
68 } bits;
69 uint32_t raw;
70};
71
72struct ddc_flags {
73 uint8_t EDID_QUERY_DONE_ONCE:1;
74 uint8_t IS_INTERNAL_DISPLAY:1;
75 uint8_t FORCE_READ_REPEATED_START:1;
76 uint8_t EDID_STRESS_READ:1;
77
78};
79
80enum ddc_transaction_type {
81 DDC_TRANSACTION_TYPE_NONE = 0,
82 DDC_TRANSACTION_TYPE_I2C,
83 DDC_TRANSACTION_TYPE_I2C_OVER_AUX,
84 DDC_TRANSACTION_TYPE_I2C_OVER_AUX_WITH_DEFER,
85 DDC_TRANSACTION_TYPE_I2C_OVER_AUX_RETRY_DEFER
86};
87
88enum display_dongle_type {
89 DISPLAY_DONGLE_NONE = 0,
90 /* Active converter types*/
91 DISPLAY_DONGLE_DP_VGA_CONVERTER,
92 DISPLAY_DONGLE_DP_DVI_CONVERTER,
93 DISPLAY_DONGLE_DP_HDMI_CONVERTER,
94 /* DP-HDMI/DVI passive dongles (Type 1 and Type 2)*/
95 DISPLAY_DONGLE_DP_DVI_DONGLE,
96 DISPLAY_DONGLE_DP_HDMI_DONGLE,
97 /* Other types of dongle*/
98 DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE,
99};
100
101struct ddc_service {
102 struct ddc *ddc_pin;
103 struct ddc_flags flags;
104 union ddc_wa wa;
105 enum ddc_transaction_type transaction_type;
106 enum display_dongle_type dongle_type;
107 struct dc_context *ctx;
108 struct core_link *link;
109
110 uint32_t address;
111 uint32_t edid_buf_len;
112 uint8_t edid_buf[MAX_EDID_BUFFER_SIZE];
113};
114
115#endif /* DC_DDC_TYPES_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
new file mode 100644
index 000000000000..befc4985fe54
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -0,0 +1,105 @@
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef DC_DP_TYPES_H
27#define DC_DP_TYPES_H
28
29enum dc_lane_count {
30 LANE_COUNT_UNKNOWN = 0,
31 LANE_COUNT_ONE = 1,
32 LANE_COUNT_TWO = 2,
33 LANE_COUNT_FOUR = 4,
34 LANE_COUNT_EIGHT = 8,
35 LANE_COUNT_DP_MAX = LANE_COUNT_FOUR
36};
37
38/* This is actually a reference clock (27MHz) multiplier
39 * 162MBps bandwidth for 1.62GHz like rate,
40 * 270MBps for 2.70GHz,
41 * 324MBps for 3.24Ghz,
42 * 540MBps for 5.40GHz
43 * 810MBps for 8.10GHz
44 */
45enum dc_link_rate {
46 LINK_RATE_UNKNOWN = 0,
47 LINK_RATE_LOW = 0x06,
48 LINK_RATE_HIGH = 0x0A,
49 LINK_RATE_RBR2 = 0x0C,
50 LINK_RATE_HIGH2 = 0x14,
51 LINK_RATE_HIGH3 = 0x1E
52};
53
54enum dc_link_spread {
55 LINK_SPREAD_DISABLED = 0x00,
56 /* 0.5 % downspread 30 kHz */
57 LINK_SPREAD_05_DOWNSPREAD_30KHZ = 0x10,
58 /* 0.5 % downspread 33 kHz */
59 LINK_SPREAD_05_DOWNSPREAD_33KHZ = 0x11
60};
61
62enum dc_voltage_swing {
63 VOLTAGE_SWING_LEVEL0 = 0, /* direct HW translation! */
64 VOLTAGE_SWING_LEVEL1,
65 VOLTAGE_SWING_LEVEL2,
66 VOLTAGE_SWING_LEVEL3,
67 VOLTAGE_SWING_MAX_LEVEL = VOLTAGE_SWING_LEVEL3
68};
69
70enum dc_pre_emphasis {
71 PRE_EMPHASIS_DISABLED = 0, /* direct HW translation! */
72 PRE_EMPHASIS_LEVEL1,
73 PRE_EMPHASIS_LEVEL2,
74 PRE_EMPHASIS_LEVEL3,
75 PRE_EMPHASIS_MAX_LEVEL = PRE_EMPHASIS_LEVEL3
76};
77/* Post Cursor 2 is optional for transmitter
78 * and it applies only to the main link operating at HBR2
79 */
80enum dc_post_cursor2 {
81 POST_CURSOR2_DISABLED = 0, /* direct HW translation! */
82 POST_CURSOR2_LEVEL1,
83 POST_CURSOR2_LEVEL2,
84 POST_CURSOR2_LEVEL3,
85 POST_CURSOR2_MAX_LEVEL = POST_CURSOR2_LEVEL3,
86};
87
88struct dc_link_settings {
89 enum dc_lane_count lane_count;
90 enum dc_link_rate link_rate;
91 enum dc_link_spread link_spread;
92};
93
94struct dc_lane_settings {
95 enum dc_voltage_swing VOLTAGE_SWING;
96 enum dc_pre_emphasis PRE_EMPHASIS;
97 enum dc_post_cursor2 POST_CURSOR2;
98};
99
100struct dc_link_training_settings {
101 struct dc_link_settings link;
102 struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX];
103};
104
105#endif /* DC_DP_TYPES_H */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c
new file mode 100644
index 000000000000..3a80b0c08ae4
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c
@@ -0,0 +1,144 @@
1/*
2 * dc_helper.c
3 *
4 * Created on: Aug 30, 2016
5 * Author: agrodzov
6 */
7#include "dm_services.h"
8#include <stdarg.h>
9
10uint32_t generic_reg_update_ex(const struct dc_context *ctx,
11 uint32_t addr, uint32_t reg_val, int n,
12 uint8_t shift1, uint32_t mask1, uint32_t field_value1,
13 ...)
14{
15 uint32_t shift, mask, field_value;
16 int i = 1;
17
18 va_list ap;
19 va_start(ap, field_value1);
20
21 reg_val = set_reg_field_value_ex(reg_val, field_value1, mask1, shift1);
22
23 while (i < n) {
24 shift = va_arg(ap, uint32_t);
25 mask = va_arg(ap, uint32_t);
26 field_value = va_arg(ap, uint32_t);
27
28 reg_val = set_reg_field_value_ex(reg_val, field_value, mask, shift);
29 i++;
30 }
31
32 dm_write_reg(ctx, addr, reg_val);
33 va_end(ap);
34
35 return reg_val;
36}
37
38uint32_t generic_reg_get(const struct dc_context *ctx, uint32_t addr,
39 uint8_t shift, uint32_t mask, uint32_t *field_value)
40{
41 uint32_t reg_val = dm_read_reg(ctx, addr);
42 *field_value = get_reg_field_value_ex(reg_val, mask, shift);
43 return reg_val;
44}
45
46uint32_t generic_reg_get2(const struct dc_context *ctx, uint32_t addr,
47 uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
48 uint8_t shift2, uint32_t mask2, uint32_t *field_value2)
49{
50 uint32_t reg_val = dm_read_reg(ctx, addr);
51 *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1);
52 *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2);
53 return reg_val;
54}
55
56uint32_t generic_reg_get3(const struct dc_context *ctx, uint32_t addr,
57 uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
58 uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
59 uint8_t shift3, uint32_t mask3, uint32_t *field_value3)
60{
61 uint32_t reg_val = dm_read_reg(ctx, addr);
62 *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1);
63 *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2);
64 *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3);
65 return reg_val;
66}
67
68uint32_t generic_reg_get5(const struct dc_context *ctx, uint32_t addr,
69 uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
70 uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
71 uint8_t shift3, uint32_t mask3, uint32_t *field_value3,
72 uint8_t shift4, uint32_t mask4, uint32_t *field_value4,
73 uint8_t shift5, uint32_t mask5, uint32_t *field_value5)
74{
75 uint32_t reg_val = dm_read_reg(ctx, addr);
76 *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1);
77 *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2);
78 *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3);
79 *field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4);
80 *field_value5 = get_reg_field_value_ex(reg_val, mask5, shift5);
81 return reg_val;
82}
83
84/* note: va version of this is pretty bad idea, since there is a output parameter pass by pointer
85 * compiler won't be able to check for size match and is prone to stack corruption type of bugs
86
87uint32_t generic_reg_get(const struct dc_context *ctx,
88 uint32_t addr, int n, ...)
89{
90 uint32_t shift, mask;
91 uint32_t *field_value;
92 uint32_t reg_val;
93 int i = 0;
94
95 reg_val = dm_read_reg(ctx, addr);
96
97 va_list ap;
98 va_start(ap, n);
99
100 while (i < n) {
101 shift = va_arg(ap, uint32_t);
102 mask = va_arg(ap, uint32_t);
103 field_value = va_arg(ap, uint32_t *);
104
105 *field_value = get_reg_field_value_ex(reg_val, mask, shift);
106 i++;
107 }
108
109 va_end(ap);
110
111 return reg_val;
112}
113*/
114
115uint32_t generic_reg_wait(const struct dc_context *ctx,
116 uint32_t addr, uint32_t shift, uint32_t mask, uint32_t condition_value,
117 unsigned int delay_between_poll_us, unsigned int time_out_num_tries,
118 const char *func_name)
119{
120 uint32_t field_value;
121 uint32_t reg_val;
122 int i;
123
124 for (i = 0; i <= time_out_num_tries; i++) {
125 if (i) {
126 if (0 < delay_between_poll_us && delay_between_poll_us < 1000)
127 udelay(delay_between_poll_us);
128
129 if (delay_between_poll_us > 1000)
130 msleep(delay_between_poll_us/1000);
131 }
132
133 reg_val = dm_read_reg(ctx, addr);
134
135 field_value = get_reg_field_value_ex(reg_val, mask, shift);
136
137 if (field_value == condition_value)
138 return reg_val;
139 }
140
141 DC_ERR("REG_WAIT timeout %dus * %d tries - %s",
142 delay_between_poll_us, time_out_num_tries, func_name);
143 return reg_val;
144}
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
new file mode 100644
index 000000000000..5605a5c96da7
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -0,0 +1,588 @@
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef DC_HW_TYPES_H
27#define DC_HW_TYPES_H
28
29#include "os_types.h"
30
31/******************************************************************************
32 * Data types for Virtual HW Layer of DAL3.
33 * (see DAL3 design documents for HW Layer definition)
34 *
35 * The intended uses are:
36 * 1. Generation pseudocode sequences for HW programming.
37 * 2. Implementation of real HW programming by HW Sequencer of DAL3.
38 *
39 * Note: do *not* add any types which are *not* used for HW programming - this
40 * will ensure separation of Logic layer from HW layer.
41 ******************************************************************************/
42
43union large_integer {
44 struct {
45 uint32_t low_part;
46 int32_t high_part;
47 };
48
49 struct {
50 uint32_t low_part;
51 int32_t high_part;
52 } u;
53
54 int64_t quad_part;
55};
56
57#define PHYSICAL_ADDRESS_LOC union large_integer
58
59enum dc_plane_addr_type {
60 PLN_ADDR_TYPE_GRAPHICS = 0,
61 PLN_ADDR_TYPE_GRPH_STEREO,
62 PLN_ADDR_TYPE_VIDEO_PROGRESSIVE,
63};
64
65struct dc_plane_address {
66 enum dc_plane_addr_type type;
67 union {
68 struct{
69 PHYSICAL_ADDRESS_LOC addr;
70 PHYSICAL_ADDRESS_LOC meta_addr;
71 union large_integer dcc_const_color;
72 } grph;
73
74 /*stereo*/
75 struct {
76 PHYSICAL_ADDRESS_LOC left_addr;
77 PHYSICAL_ADDRESS_LOC left_meta_addr;
78 union large_integer left_dcc_const_color;
79
80 PHYSICAL_ADDRESS_LOC right_addr;
81 PHYSICAL_ADDRESS_LOC right_meta_addr;
82 union large_integer right_dcc_const_color;
83
84 } grph_stereo;
85
86 /*video progressive*/
87 struct {
88 PHYSICAL_ADDRESS_LOC luma_addr;
89 PHYSICAL_ADDRESS_LOC luma_meta_addr;
90 union large_integer luma_dcc_const_color;
91
92 PHYSICAL_ADDRESS_LOC chroma_addr;
93 PHYSICAL_ADDRESS_LOC chroma_meta_addr;
94 union large_integer chroma_dcc_const_color;
95 } video_progressive;
96 };
97};
98
99struct dc_size {
100 uint32_t width;
101 uint32_t height;
102};
103
104struct rect {
105 int x;
106 int y;
107 uint32_t width;
108 uint32_t height;
109};
110
111union plane_size {
112 /* Grph or Video will be selected
113 * based on format above:
114 * Use Video structure if
115 * format >= DalPixelFormat_VideoBegin
116 * else use Grph structure
117 */
118 struct {
119 struct rect surface_size;
120 /* Graphic surface pitch in pixels.
121 * In LINEAR_GENERAL mode, pitch
122 * is 32 pixel aligned.
123 */
124 uint32_t surface_pitch;
125
126 uint32_t meta_pitch;
127 } grph;
128
129 struct {
130 struct rect luma_size;
131 /* Graphic surface pitch in pixels.
132 * In LINEAR_GENERAL mode, pitch is
133 * 32 pixel aligned.
134 */
135 uint32_t luma_pitch;
136 uint32_t meta_luma_pitch;
137
138 struct rect chroma_size;
139 /* Graphic surface pitch in pixels.
140 * In LINEAR_GENERAL mode, pitch is
141 * 32 pixel aligned.
142 */
143 uint32_t chroma_pitch;
144 uint32_t meta_chroma_pitch;
145 } video;
146};
147
148struct dc_plane_dcc_param {
149 bool enable;
150
151 union {
152 struct {
153 uint32_t meta_pitch;
154 bool independent_64b_blks;
155 } grph;
156
157 struct {
158 uint32_t meta_pitch_l;
159 bool independent_64b_blks_l;
160
161 uint32_t meta_pitch_c;
162 bool independent_64b_blks_c;
163 } video;
164 };
165};
166
167/*Displayable pixel format in fb*/
168enum surface_pixel_format {
169 SURFACE_PIXEL_FORMAT_GRPH_BEGIN = 0,
170 /*TOBE REMOVED paletta 256 colors*/
171 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS =
172 SURFACE_PIXEL_FORMAT_GRPH_BEGIN,
173 /*16 bpp*/
174 SURFACE_PIXEL_FORMAT_GRPH_ARGB1555,
175 /*16 bpp*/
176 SURFACE_PIXEL_FORMAT_GRPH_RGB565,
177 /*32 bpp*/
178 SURFACE_PIXEL_FORMAT_GRPH_ARGB8888,
179 /*32 bpp swaped*/
180 SURFACE_PIXEL_FORMAT_GRPH_BGRA8888,
181
182 SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010,
183 /*swaped*/
184 SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010,
185 /*TOBE REMOVED swaped, XR_BIAS has no differance
186 * for pixel layout than previous and we can
187 * delete this after discusion*/
188 SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS,
189 /*64 bpp */
190 SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616,
191 /*float*/
192 SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F,
193 /*swaped & float*/
194 SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F,
195 /*grow graphics here if necessary */
196
197 SURFACE_PIXEL_FORMAT_VIDEO_BEGIN,
198 SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr =
199 SURFACE_PIXEL_FORMAT_VIDEO_BEGIN,
200 SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb,
201 SURFACE_PIXEL_FORMAT_INVALID
202
203 /*grow 444 video here if necessary */
204};
205
206/* Pixel format */
207enum pixel_format {
208 /*graph*/
209 PIXEL_FORMAT_UNINITIALIZED,
210 PIXEL_FORMAT_INDEX8,
211 PIXEL_FORMAT_RGB565,
212 PIXEL_FORMAT_ARGB8888,
213 PIXEL_FORMAT_ARGB2101010,
214 PIXEL_FORMAT_ARGB2101010_XRBIAS,
215 PIXEL_FORMAT_FP16,
216 /*video*/
217 PIXEL_FORMAT_420BPP12,
218 /*end of pixel format definition*/
219 PIXEL_FORMAT_INVALID,
220
221 PIXEL_FORMAT_GRPH_BEGIN = PIXEL_FORMAT_INDEX8,
222 PIXEL_FORMAT_GRPH_END = PIXEL_FORMAT_FP16,
223 PIXEL_FORMAT_VIDEO_BEGIN = PIXEL_FORMAT_420BPP12,
224 PIXEL_FORMAT_VIDEO_END = PIXEL_FORMAT_420BPP12,
225 PIXEL_FORMAT_UNKNOWN
226};
227
228enum tile_split_values {
229 DC_DISPLAY_MICRO_TILING = 0x0,
230 DC_THIN_MICRO_TILING = 0x1,
231 DC_DEPTH_MICRO_TILING = 0x2,
232 DC_ROTATED_MICRO_TILING = 0x3,
233};
234
235/* TODO: These values come from hardware spec. We need to readdress this
236 * if they ever change.
237 */
238enum array_mode_values {
239 DC_ARRAY_LINEAR_GENERAL = 0,
240 DC_ARRAY_LINEAR_ALLIGNED,
241 DC_ARRAY_1D_TILED_THIN1,
242 DC_ARRAY_1D_TILED_THICK,
243 DC_ARRAY_2D_TILED_THIN1,
244 DC_ARRAY_PRT_TILED_THIN1,
245 DC_ARRAY_PRT_2D_TILED_THIN1,
246 DC_ARRAY_2D_TILED_THICK,
247 DC_ARRAY_2D_TILED_X_THICK,
248 DC_ARRAY_PRT_TILED_THICK,
249 DC_ARRAY_PRT_2D_TILED_THICK,
250 DC_ARRAY_PRT_3D_TILED_THIN1,
251 DC_ARRAY_3D_TILED_THIN1,
252 DC_ARRAY_3D_TILED_THICK,
253 DC_ARRAY_3D_TILED_X_THICK,
254 DC_ARRAY_PRT_3D_TILED_THICK,
255};
256
257enum tile_mode_values {
258 DC_ADDR_SURF_MICRO_TILING_DISPLAY = 0x0,
259 DC_ADDR_SURF_MICRO_TILING_NON_DISPLAY = 0x1,
260};
261
262union dc_tiling_info {
263
264 struct {
265 /* Specifies the number of memory banks for tiling
266 * purposes.
267 * Only applies to 2D and 3D tiling modes.
268 * POSSIBLE VALUES: 2,4,8,16
269 */
270 unsigned int num_banks;
271 /* Specifies the number of tiles in the x direction
272 * to be incorporated into the same bank.
273 * Only applies to 2D and 3D tiling modes.
274 * POSSIBLE VALUES: 1,2,4,8
275 */
276 unsigned int bank_width;
277 unsigned int bank_width_c;
278 /* Specifies the number of tiles in the y direction to
279 * be incorporated into the same bank.
280 * Only applies to 2D and 3D tiling modes.
281 * POSSIBLE VALUES: 1,2,4,8
282 */
283 unsigned int bank_height;
284 unsigned int bank_height_c;
285 /* Specifies the macro tile aspect ratio. Only applies
286 * to 2D and 3D tiling modes.
287 */
288 unsigned int tile_aspect;
289 unsigned int tile_aspect_c;
290 /* Specifies the number of bytes that will be stored
291 * contiguously for each tile.
292 * If the tile data requires more storage than this
293 * amount, it is split into multiple slices.
294 * This field must not be larger than
295 * GB_ADDR_CONFIG.DRAM_ROW_SIZE.
296 * Only applies to 2D and 3D tiling modes.
297 * For color render targets, TILE_SPLIT >= 256B.
298 */
299 enum tile_split_values tile_split;
300 enum tile_split_values tile_split_c;
301 /* Specifies the addressing within a tile.
302 * 0x0 - DISPLAY_MICRO_TILING
303 * 0x1 - THIN_MICRO_TILING
304 * 0x2 - DEPTH_MICRO_TILING
305 * 0x3 - ROTATED_MICRO_TILING
306 */
307 enum tile_mode_values tile_mode;
308 enum tile_mode_values tile_mode_c;
309 /* Specifies the number of pipes and how they are
310 * interleaved in the surface.
311 * Refer to memory addressing document for complete
312 * details and constraints.
313 */
314 unsigned int pipe_config;
315 /* Specifies the tiling mode of the surface.
316 * THIN tiles use an 8x8x1 tile size.
317 * THICK tiles use an 8x8x4 tile size.
318 * 2D tiling modes rotate banks for successive Z slices
319 * 3D tiling modes rotate pipes and banks for Z slices
320 * Refer to memory addressing document for complete
321 * details and constraints.
322 */
323 enum array_mode_values array_mode;
324 } gfx8;
325
326};
327
328/* Rotation angle */
329enum dc_rotation_angle {
330 ROTATION_ANGLE_0 = 0,
331 ROTATION_ANGLE_90,
332 ROTATION_ANGLE_180,
333 ROTATION_ANGLE_270,
334 ROTATION_ANGLE_COUNT
335};
336
337enum dc_scan_direction {
338 SCAN_DIRECTION_UNKNOWN = 0,
339 SCAN_DIRECTION_HORIZONTAL = 1, /* 0, 180 rotation */
340 SCAN_DIRECTION_VERTICAL = 2, /* 90, 270 rotation */
341};
342
343struct dc_cursor_position {
344 uint32_t x;
345 uint32_t y;
346
347 uint32_t x_hotspot;
348 uint32_t y_hotspot;
349
350 /*
351 * This parameter indicates whether HW cursor should be enabled
352 */
353 bool enable;
354
355 /*
356 * This parameter indicates whether cursor hot spot should be
357 * programmed
358 */
359 bool hot_spot_enable;
360};
361
362/* IPP related types */
363
364/* Used by both ipp amd opp functions*/
365/* TODO: to be consolidated with enum color_space */
366
367/*
368 * This enum is for programming CURSOR_MODE register field. What this register
369 * should be programmed to depends on OS requested cursor shape flags and what
370 * we stored in the cursor surface.
371 */
372enum dc_cursor_color_format {
373 CURSOR_MODE_MONO,
374 CURSOR_MODE_COLOR_1BIT_AND,
375 CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA,
376 CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA
377};
378
379/*
380 * This is all the parameters required by DAL in order to update the cursor
381 * attributes, including the new cursor image surface address, size, hotspot
382 * location, color format, etc.
383 */
384
385union dc_cursor_attribute_flags {
386 struct {
387 uint32_t ENABLE_MAGNIFICATION:1;
388 uint32_t INVERSE_TRANSPARENT_CLAMPING:1;
389 uint32_t HORIZONTAL_MIRROR:1;
390 uint32_t VERTICAL_MIRROR:1;
391 uint32_t INVERT_PIXEL_DATA:1;
392 uint32_t ZERO_EXPANSION:1;
393 uint32_t MIN_MAX_INVERT:1;
394 uint32_t RESERVED:25;
395 } bits;
396 uint32_t value;
397};
398
399struct dc_cursor_attributes {
400 PHYSICAL_ADDRESS_LOC address;
401
402 /* Width and height should correspond to cursor surface width x heigh */
403 uint32_t width;
404 uint32_t height;
405 uint32_t x_hot;
406 uint32_t y_hot;
407
408 enum dc_cursor_color_format color_format;
409
410 /* In case we support HW Cursor rotation in the future */
411 enum dc_rotation_angle rotation_angle;
412
413 union dc_cursor_attribute_flags attribute_flags;
414};
415
416/* OPP */
417
418enum dc_color_space {
419 COLOR_SPACE_UNKNOWN,
420 COLOR_SPACE_SRGB,
421 COLOR_SPACE_SRGB_LIMITED,
422 COLOR_SPACE_YPBPR601,
423 COLOR_SPACE_YPBPR709,
424 COLOR_SPACE_YCBCR601,
425 COLOR_SPACE_YCBCR709,
426 COLOR_SPACE_YCBCR601_LIMITED,
427 COLOR_SPACE_YCBCR709_LIMITED
428};
429
430enum dc_quantization_range {
431 QUANTIZATION_RANGE_UNKNOWN,
432 QUANTIZATION_RANGE_FULL,
433 QUANTIZATION_RANGE_LIMITED
434};
435
436/* XFM */
437
438/* used in struct dc_surface */
439struct scaling_taps {
440 uint32_t v_taps;
441 uint32_t h_taps;
442 uint32_t v_taps_c;
443 uint32_t h_taps_c;
444};
445
446enum dc_timing_standard {
447 TIMING_STANDARD_UNDEFINED,
448 TIMING_STANDARD_DMT,
449 TIMING_STANDARD_GTF,
450 TIMING_STANDARD_CVT,
451 TIMING_STANDARD_CVT_RB,
452 TIMING_STANDARD_CEA770,
453 TIMING_STANDARD_CEA861,
454 TIMING_STANDARD_HDMI,
455 TIMING_STANDARD_TV_NTSC,
456 TIMING_STANDARD_TV_NTSC_J,
457 TIMING_STANDARD_TV_PAL,
458 TIMING_STANDARD_TV_PAL_M,
459 TIMING_STANDARD_TV_PAL_CN,
460 TIMING_STANDARD_TV_SECAM,
461 TIMING_STANDARD_EXPLICIT,
462 /*!< For explicit timings from EDID, VBIOS, etc.*/
463 TIMING_STANDARD_USER_OVERRIDE,
464 /*!< For mode timing override by user*/
465 TIMING_STANDARD_MAX
466};
467
468enum dc_timing_3d_format {
469 TIMING_3D_FORMAT_NONE,
470 TIMING_3D_FORMAT_FRAME_ALTERNATE, /* No stereosync at all*/
471 TIMING_3D_FORMAT_INBAND_FA, /* Inband Frame Alternate (DVI/DP)*/
472 TIMING_3D_FORMAT_DP_HDMI_INBAND_FA, /* Inband FA to HDMI Frame Pack*/
473 /* for active DP-HDMI dongle*/
474 TIMING_3D_FORMAT_SIDEBAND_FA, /* Sideband Frame Alternate (eDP)*/
475 TIMING_3D_FORMAT_HW_FRAME_PACKING,
476 TIMING_3D_FORMAT_SW_FRAME_PACKING,
477 TIMING_3D_FORMAT_ROW_INTERLEAVE,
478 TIMING_3D_FORMAT_COLUMN_INTERLEAVE,
479 TIMING_3D_FORMAT_PIXEL_INTERLEAVE,
480 TIMING_3D_FORMAT_SIDE_BY_SIDE,
481 TIMING_3D_FORMAT_TOP_AND_BOTTOM,
482 TIMING_3D_FORMAT_SBS_SW_PACKED,
483 /* Side-by-side, packed by application/driver into 2D frame*/
484 TIMING_3D_FORMAT_TB_SW_PACKED,
485 /* Top-and-bottom, packed by application/driver into 2D frame*/
486
487 TIMING_3D_FORMAT_MAX,
488};
489
490enum dc_color_depth {
491 COLOR_DEPTH_UNDEFINED,
492 COLOR_DEPTH_666,
493 COLOR_DEPTH_888,
494 COLOR_DEPTH_101010,
495 COLOR_DEPTH_121212,
496 COLOR_DEPTH_141414,
497 COLOR_DEPTH_161616,
498 COLOR_DEPTH_COUNT
499};
500
501enum dc_pixel_encoding {
502 PIXEL_ENCODING_UNDEFINED,
503 PIXEL_ENCODING_RGB,
504 PIXEL_ENCODING_YCBCR422,
505 PIXEL_ENCODING_YCBCR444,
506 PIXEL_ENCODING_YCBCR420,
507 PIXEL_ENCODING_COUNT
508};
509
510enum dc_aspect_ratio {
511 ASPECT_RATIO_NO_DATA,
512 ASPECT_RATIO_4_3,
513 ASPECT_RATIO_16_9,
514 ASPECT_RATIO_64_27,
515 ASPECT_RATIO_256_135,
516 ASPECT_RATIO_FUTURE
517};
518
519enum scanning_type {
520 SCANNING_TYPE_NODATA = 0,
521 SCANNING_TYPE_OVERSCAN,
522 SCANNING_TYPE_UNDERSCAN,
523 SCANNING_TYPE_FUTURE,
524 SCANNING_TYPE_UNDEFINED
525};
526
527struct dc_crtc_timing_flags {
528 uint32_t INTERLACE :1;
529 uint32_t HSYNC_POSITIVE_POLARITY :1; /* when set to 1,
530 it is positive polarity --reversed with dal1 or video bios define*/
531 uint32_t VSYNC_POSITIVE_POLARITY :1; /* when set to 1,
532 it is positive polarity --reversed with dal1 or video bios define*/
533
534 uint32_t HORZ_COUNT_BY_TWO:1;
535
536 uint32_t EXCLUSIVE_3D :1; /* if this bit set,
537 timing can be driven in 3D format only
538 and there is no corresponding 2D timing*/
539 uint32_t RIGHT_EYE_3D_POLARITY :1; /* 1 - means right eye polarity
540 (right eye = '1', left eye = '0') */
541 uint32_t SUB_SAMPLE_3D :1; /* 1 - means left/right images subsampled
542 when mixed into 3D image. 0 - means summation (3D timing is doubled)*/
543 uint32_t USE_IN_3D_VIEW_ONLY :1; /* Do not use this timing in 2D View,
544 because corresponding 2D timing also present in the list*/
545 uint32_t STEREO_3D_PREFERENCE :1; /* Means this is 2D timing
546 and we want to match priority of corresponding 3D timing*/
547 uint32_t Y_ONLY :1;
548
549 uint32_t YCBCR420 :1; /* TODO: shouldn't need this flag, should be a separate pixel format */
550 uint32_t DTD_COUNTER :5; /* values 1 to 16 */
551
552 /* HDMI 2.0 - Support scrambling for TMDS character
553 * rates less than or equal to 340Mcsc */
554 uint32_t LTE_340MCSC_SCRAMBLE:1;
555
556};
557
558struct dc_crtc_timing {
559
560 uint32_t h_total;
561 uint32_t h_border_left;
562 uint32_t h_addressable;
563 uint32_t h_border_right;
564 uint32_t h_front_porch;
565 uint32_t h_sync_width;
566
567 uint32_t v_total;
568 uint32_t v_border_top;
569 uint32_t v_addressable;
570 uint32_t v_border_bottom;
571 uint32_t v_front_porch;
572 uint32_t v_sync_width;
573
574 uint32_t pix_clk_khz;
575
576 uint32_t vic;
577 uint32_t hdmi_vic;
578 enum dc_timing_3d_format timing_3d_format;
579 enum dc_color_depth display_color_depth;
580 enum dc_pixel_encoding pixel_encoding;
581 enum dc_aspect_ratio aspect_ratio;
582 enum scanning_type scan_type;
583
584 struct dc_crtc_timing_flags flags;
585};
586
587#endif /* DC_HW_TYPES_H */
588
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
new file mode 100644
index 000000000000..ae9fcca121e6
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -0,0 +1,493 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25#ifndef DC_TYPES_H_
26#define DC_TYPES_H_
27
28#include "fixed32_32.h"
29#include "fixed31_32.h"
30#include "irq_types.h"
31#include "dc_dp_types.h"
32#include "dc_hw_types.h"
33#include "dal_types.h"
34
35/* forward declarations */
36struct dc_surface;
37struct dc_target;
38struct dc_stream;
39struct dc_link;
40struct dc_sink;
41struct dal;
42
43/********************************
44 * Environment definitions
45 ********************************/
46enum dce_environment {
47 DCE_ENV_PRODUCTION_DRV = 0,
48 /* Emulation on FPGA, in "Maximus" System.
49 * This environment enforces that *only* DC registers accessed.
50 * (access to non-DC registers will hang FPGA) */
51 DCE_ENV_FPGA_MAXIMUS,
52 /* Emulation on real HW or on FPGA. Used by Diagnostics, enforces
53 * requirements of Diagnostics team. */
54 DCE_ENV_DIAG
55};
56
57/* Note: use these macro definitions instead of direct comparison! */
58#define IS_FPGA_MAXIMUS_DC(dce_environment) \
59 (dce_environment == DCE_ENV_FPGA_MAXIMUS)
60
61#define IS_DIAG_DC(dce_environment) \
62 (IS_FPGA_MAXIMUS_DC(dce_environment) || (dce_environment == DCE_ENV_DIAG))
63
64struct hw_asic_id {
65 uint32_t chip_id;
66 uint32_t chip_family;
67 uint32_t pci_revision_id;
68 uint32_t hw_internal_rev;
69 uint32_t vram_type;
70 uint32_t vram_width;
71 uint32_t feature_flags;
72 uint32_t fake_paths_num;
73 void *atombios_base_address;
74};
75
76struct dc_context {
77 struct dc *dc;
78
79 void *driver_context; /* e.g. amdgpu_device */
80
81 struct dal_logger *logger;
82 void *cgs_device;
83
84 enum dce_environment dce_environment;
85 struct hw_asic_id asic_id;
86
87 /* todo: below should probably move to dc. to facilitate removal
88 * of AS we will store these here
89 */
90 enum dce_version dce_version;
91 struct dc_bios *dc_bios;
92 bool created_bios;
93 struct gpio_service *gpio_service;
94 struct i2caux *i2caux;
95};
96
97
98#define MAX_EDID_BUFFER_SIZE 512
99#define EDID_BLOCK_SIZE 128
100#define MAX_SURFACE_NUM 2
101#define NUM_PIXEL_FORMATS 10
102
103#include "dc_ddc_types.h"
104
105enum tiling_mode {
106 TILING_MODE_INVALID,
107 TILING_MODE_LINEAR,
108 TILING_MODE_TILED,
109 TILING_MODE_COUNT
110};
111
112enum view_3d_format {
113 VIEW_3D_FORMAT_NONE = 0,
114 VIEW_3D_FORMAT_FRAME_SEQUENTIAL,
115 VIEW_3D_FORMAT_SIDE_BY_SIDE,
116 VIEW_3D_FORMAT_TOP_AND_BOTTOM,
117 VIEW_3D_FORMAT_COUNT,
118 VIEW_3D_FORMAT_FIRST = VIEW_3D_FORMAT_FRAME_SEQUENTIAL
119};
120
121enum plane_stereo_format {
122 PLANE_STEREO_FORMAT_NONE = 0,
123 PLANE_STEREO_FORMAT_SIDE_BY_SIDE = 1,
124 PLANE_STEREO_FORMAT_TOP_AND_BOTTOM = 2,
125 PLANE_STEREO_FORMAT_FRAME_ALTERNATE = 3,
126 PLANE_STEREO_FORMAT_ROW_INTERLEAVED = 5,
127 PLANE_STEREO_FORMAT_COLUMN_INTERLEAVED = 6,
128 PLANE_STEREO_FORMAT_CHECKER_BOARD = 7
129};
130
131/* TODO: Find way to calculate number of bits
132 * Please increase if pixel_format enum increases
133 * num from PIXEL_FORMAT_INDEX8 to PIXEL_FORMAT_444BPP32
134 */
135
136enum dc_edid_connector_type {
137 EDID_CONNECTOR_UNKNOWN = 0,
138 EDID_CONNECTOR_ANALOG = 1,
139 EDID_CONNECTOR_DIGITAL = 10,
140 EDID_CONNECTOR_DVI = 11,
141 EDID_CONNECTOR_HDMIA = 12,
142 EDID_CONNECTOR_MDDI = 14,
143 EDID_CONNECTOR_DISPLAYPORT = 15
144};
145
146enum dc_edid_status {
147 EDID_OK,
148 EDID_BAD_INPUT,
149 EDID_NO_RESPONSE,
150 EDID_BAD_CHECKSUM,
151};
152
153/* audio capability from EDID*/
154struct dc_cea_audio_mode {
155 uint8_t format_code; /* ucData[0] [6:3]*/
156 uint8_t channel_count; /* ucData[0] [2:0]*/
157 uint8_t sample_rate; /* ucData[1]*/
158 union {
159 uint8_t sample_size; /* for LPCM*/
160 /* for Audio Formats 2-8 (Max bit rate divided by 8 kHz)*/
161 uint8_t max_bit_rate;
162 uint8_t audio_codec_vendor_specific; /* for Audio Formats 9-15*/
163 };
164};
165
166struct dc_edid {
167 uint32_t length;
168 uint8_t raw_edid[MAX_EDID_BUFFER_SIZE];
169};
170
171/* When speaker location data block is not available, DEFAULT_SPEAKER_LOCATION
172 * is used. In this case we assume speaker location are: front left, front
173 * right and front center. */
174#define DEFAULT_SPEAKER_LOCATION 5
175
176#define DC_MAX_AUDIO_DESC_COUNT 16
177
178#define AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS 20
179
180struct dc_edid_caps {
181 /* sink identification */
182 uint16_t manufacturer_id;
183 uint16_t product_id;
184 uint32_t serial_number;
185 uint8_t manufacture_week;
186 uint8_t manufacture_year;
187 uint8_t display_name[AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS];
188
189 /* audio caps */
190 uint8_t speaker_flags;
191 uint32_t audio_mode_count;
192 struct dc_cea_audio_mode audio_modes[DC_MAX_AUDIO_DESC_COUNT];
193 uint32_t audio_latency;
194 uint32_t video_latency;
195
196 /*HDMI 2.0 caps*/
197 bool lte_340mcsc_scramble;
198
199 bool edid_hdmi;
200};
201
202struct view {
203 uint32_t width;
204 uint32_t height;
205};
206
207struct dc_mode_flags {
208 /* note: part of refresh rate flag*/
209 uint32_t INTERLACE :1;
210 /* native display timing*/
211 uint32_t NATIVE :1;
212 /* preferred is the recommended mode, one per display */
213 uint32_t PREFERRED :1;
214 /* true if this mode should use reduced blanking timings
215 *_not_ related to the Reduced Blanking adjustment*/
216 uint32_t REDUCED_BLANKING :1;
217 /* note: part of refreshrate flag*/
218 uint32_t VIDEO_OPTIMIZED_RATE :1;
219 /* should be reported to upper layers as mode_flags*/
220 uint32_t PACKED_PIXEL_FORMAT :1;
221 /*< preferred view*/
222 uint32_t PREFERRED_VIEW :1;
223 /* this timing should be used only in tiled mode*/
224 uint32_t TILED_MODE :1;
225 uint32_t DSE_MODE :1;
226 /* Refresh rate divider when Miracast sink is using a
227 different rate than the output display device
228 Must be zero for wired displays and non-zero for
229 Miracast displays*/
230 uint32_t MIRACAST_REFRESH_DIVIDER;
231};
232
233
234enum dc_timing_source {
235 TIMING_SOURCE_UNDEFINED,
236
237 /* explicitly specifed by user, most important*/
238 TIMING_SOURCE_USER_FORCED,
239 TIMING_SOURCE_USER_OVERRIDE,
240 TIMING_SOURCE_CUSTOM,
241 TIMING_SOURCE_EXPLICIT,
242
243 /* explicitly specified by the display device, more important*/
244 TIMING_SOURCE_EDID_CEA_SVD_3D,
245 TIMING_SOURCE_EDID_CEA_SVD_PREFERRED,
246 TIMING_SOURCE_EDID_CEA_SVD_420,
247 TIMING_SOURCE_EDID_DETAILED,
248 TIMING_SOURCE_EDID_ESTABLISHED,
249 TIMING_SOURCE_EDID_STANDARD,
250 TIMING_SOURCE_EDID_CEA_SVD,
251 TIMING_SOURCE_EDID_CVT_3BYTE,
252 TIMING_SOURCE_EDID_4BYTE,
253 TIMING_SOURCE_VBIOS,
254 TIMING_SOURCE_CV,
255 TIMING_SOURCE_TV,
256 TIMING_SOURCE_HDMI_VIC,
257
258 /* implicitly specified by display device, still safe but less important*/
259 TIMING_SOURCE_DEFAULT,
260
261 /* only used for custom base modes */
262 TIMING_SOURCE_CUSTOM_BASE,
263
264 /* these timing might not work, least important*/
265 TIMING_SOURCE_RANGELIMIT,
266 TIMING_SOURCE_OS_FORCED,
267 TIMING_SOURCE_IMPLICIT,
268
269 /* only used by default mode list*/
270 TIMING_SOURCE_BASICMODE,
271
272 TIMING_SOURCE_COUNT
273};
274
275enum dc_timing_support_method {
276 TIMING_SUPPORT_METHOD_UNDEFINED,
277 TIMING_SUPPORT_METHOD_EXPLICIT,
278 TIMING_SUPPORT_METHOD_IMPLICIT,
279 TIMING_SUPPORT_METHOD_NATIVE
280};
281
282struct dc_mode_info {
283 uint32_t pixel_width;
284 uint32_t pixel_height;
285 uint32_t field_rate;
286 /* Vertical refresh rate for progressive modes.
287 * Field rate for interlaced modes.*/
288
289 enum dc_timing_standard timing_standard;
290 enum dc_timing_source timing_source;
291 struct dc_mode_flags flags;
292};
293
294enum dc_power_state {
295 DC_POWER_STATE_ON = 1,
296 DC_POWER_STATE_STANDBY,
297 DC_POWER_STATE_SUSPEND,
298 DC_POWER_STATE_OFF
299};
300
301/* DC PowerStates */
302enum dc_video_power_state {
303 DC_VIDEO_POWER_UNSPECIFIED = 0,
304 DC_VIDEO_POWER_ON = 1,
305 DC_VIDEO_POWER_STANDBY,
306 DC_VIDEO_POWER_SUSPEND,
307 DC_VIDEO_POWER_OFF,
308 DC_VIDEO_POWER_HIBERNATE,
309 DC_VIDEO_POWER_SHUTDOWN,
310 DC_VIDEO_POWER_ULPS, /* BACO or Ultra-Light-Power-State */
311 DC_VIDEO_POWER_AFTER_RESET,
312 DC_VIDEO_POWER_MAXIMUM
313};
314
315enum dc_acpi_cm_power_state {
316 DC_ACPI_CM_POWER_STATE_D0 = 1,
317 DC_ACPI_CM_POWER_STATE_D1 = 2,
318 DC_ACPI_CM_POWER_STATE_D2 = 4,
319 DC_ACPI_CM_POWER_STATE_D3 = 8
320};
321
322enum dc_connection_type {
323 dc_connection_none,
324 dc_connection_single,
325 dc_connection_mst_branch,
326 dc_connection_active_dongle
327};
328
329struct dc_csc_adjustments {
330 struct fixed31_32 contrast;
331 struct fixed31_32 saturation;
332 struct fixed31_32 brightness;
333 struct fixed31_32 hue;
334};
335
336enum {
337 MAX_LANES = 2,
338 MAX_COFUNC_PATH = 6,
339 LAYER_INDEX_PRIMARY = -1,
340};
341
342/* Scaling format */
343enum scaling_transformation {
344 SCALING_TRANSFORMATION_UNINITIALIZED,
345 SCALING_TRANSFORMATION_IDENTITY = 0x0001,
346 SCALING_TRANSFORMATION_CENTER_TIMING = 0x0002,
347 SCALING_TRANSFORMATION_FULL_SCREEN_SCALE = 0x0004,
348 SCALING_TRANSFORMATION_PRESERVE_ASPECT_RATIO_SCALE = 0x0008,
349 SCALING_TRANSFORMATION_DAL_DECIDE = 0x0010,
350 SCALING_TRANSFORMATION_INVALID = 0x80000000,
351
352 /* Flag the first and last */
353 SCALING_TRANSFORMATION_BEGING = SCALING_TRANSFORMATION_IDENTITY,
354 SCALING_TRANSFORMATION_END =
355 SCALING_TRANSFORMATION_PRESERVE_ASPECT_RATIO_SCALE
356};
357
358/* audio*/
359
360union audio_sample_rates {
361 struct sample_rates {
362 uint8_t RATE_32:1;
363 uint8_t RATE_44_1:1;
364 uint8_t RATE_48:1;
365 uint8_t RATE_88_2:1;
366 uint8_t RATE_96:1;
367 uint8_t RATE_176_4:1;
368 uint8_t RATE_192:1;
369 } rate;
370
371 uint8_t all;
372};
373
374struct audio_speaker_flags {
375 uint32_t FL_FR:1;
376 uint32_t LFE:1;
377 uint32_t FC:1;
378 uint32_t RL_RR:1;
379 uint32_t RC:1;
380 uint32_t FLC_FRC:1;
381 uint32_t RLC_RRC:1;
382 uint32_t SUPPORT_AI:1;
383};
384
385struct audio_speaker_info {
386 uint32_t ALLSPEAKERS:7;
387 uint32_t SUPPORT_AI:1;
388};
389
390
391struct audio_info_flags {
392
393 union {
394
395 struct audio_speaker_flags speaker_flags;
396 struct audio_speaker_info info;
397
398 uint8_t all;
399 };
400};
401
402enum audio_format_code {
403 AUDIO_FORMAT_CODE_FIRST = 1,
404 AUDIO_FORMAT_CODE_LINEARPCM = AUDIO_FORMAT_CODE_FIRST,
405
406 AUDIO_FORMAT_CODE_AC3,
407 /*Layers 1 & 2 */
408 AUDIO_FORMAT_CODE_MPEG1,
409 /*MPEG1 Layer 3 */
410 AUDIO_FORMAT_CODE_MP3,
411 /*multichannel */
412 AUDIO_FORMAT_CODE_MPEG2,
413 AUDIO_FORMAT_CODE_AAC,
414 AUDIO_FORMAT_CODE_DTS,
415 AUDIO_FORMAT_CODE_ATRAC,
416 AUDIO_FORMAT_CODE_1BITAUDIO,
417 AUDIO_FORMAT_CODE_DOLBYDIGITALPLUS,
418 AUDIO_FORMAT_CODE_DTS_HD,
419 AUDIO_FORMAT_CODE_MAT_MLP,
420 AUDIO_FORMAT_CODE_DST,
421 AUDIO_FORMAT_CODE_WMAPRO,
422 AUDIO_FORMAT_CODE_LAST,
423 AUDIO_FORMAT_CODE_COUNT =
424 AUDIO_FORMAT_CODE_LAST - AUDIO_FORMAT_CODE_FIRST
425};
426
427struct audio_mode {
428 /* ucData[0] [6:3] */
429 enum audio_format_code format_code;
430 /* ucData[0] [2:0] */
431 uint8_t channel_count;
432 /* ucData[1] */
433 union audio_sample_rates sample_rates;
434 union {
435 /* for LPCM */
436 uint8_t sample_size;
437 /* for Audio Formats 2-8 (Max bit rate divided by 8 kHz) */
438 uint8_t max_bit_rate;
439 /* for Audio Formats 9-15 */
440 uint8_t vendor_specific;
441 };
442};
443
444struct audio_info {
445 struct audio_info_flags flags;
446 uint32_t video_latency;
447 uint32_t audio_latency;
448 uint32_t display_index;
449 uint8_t display_name[AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS];
450 uint32_t manufacture_id;
451 uint32_t product_id;
452 /* PortID used for ContainerID when defined */
453 uint32_t port_id[2];
454 uint32_t mode_count;
455 /* this field must be last in this struct */
456 struct audio_mode modes[DC_MAX_AUDIO_DESC_COUNT];
457};
458
459struct freesync_context {
460 bool supported;
461 bool enabled;
462 bool active;
463
464 unsigned int min_refresh_in_micro_hz;
465 unsigned int nominal_refresh_in_micro_hz;
466};
467
468struct colorspace_transform {
469 struct fixed31_32 matrix[12];
470 bool enable_remap;
471};
472
473struct csc_transform {
474 uint16_t matrix[12];
475 bool enable_adjustment;
476};
477
478struct psr_caps {
479 /* These parameters are from PSR capabilities reported by Sink DPCD */
480 unsigned char psr_version;
481 unsigned int psr_rfb_setup_time;
482 bool psr_exit_link_training_required;
483
484 /* These parameters are calculated in Driver,
485 * based on display timing and Sink capabilities.
486 * If VBLANK region is too small and Sink takes a long time
487 * to set up RFB, it may take an extra frame to enter PSR state.
488 */
489 bool psr_frame_capture_indication_req;
490 unsigned int psr_sdp_transmit_line_num_deadline;
491};
492
493#endif /* DC_TYPES_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/Makefile b/drivers/gpu/drm/amd/display/dc/dce/Makefile
new file mode 100644
index 000000000000..bfca38170329
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/Makefile
@@ -0,0 +1,14 @@
1#
2# Makefile for common 'dce' logic
3# HW object file under this folder follow similar pattern for HW programming
4# - register offset and/or shift + mask stored in the dec_hw struct
5# - register programming through common macros that look up register
6# offset/shift/mask stored in dce_hw struct
7
8DCE = dce_audio.o dce_stream_encoder.o dce_link_encoder.o dce_hwseq.o \
9dce_mem_input.o dce_clock_source.o dce_scl_filters.o dce_transform.o
10
11
12AMD_DAL_DCE = $(addprefix $(AMDDALPATH)/dc/dce/,$(DCE))
13
14AMD_DISPLAY_FILES += $(AMD_DAL_DCE)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
new file mode 100644
index 000000000000..dc44053e8575
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
@@ -0,0 +1,920 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "reg_helper.h"
27#include "dce_audio.h"
28#include "dce/dce_11_0_d.h"
29#include "dce/dce_11_0_sh_mask.h"
30
31#define DCE_AUD(audio)\
32 container_of(audio, struct dce_audio, base)
33
34#define CTX \
35 aud->base.ctx
36#define REG(reg)\
37 (aud->regs->reg)
38
39#undef FN
40#define FN(reg_name, field_name) \
41 aud->shifts->field_name, aud->masks->field_name
42
43#define IX_REG(reg)\
44 ix ## reg
45
46#define AZ_REG_READ(reg_name) \
47 read_indirect_azalia_reg(audio, IX_REG(reg_name))
48
49#define AZ_REG_WRITE(reg_name, value) \
50 write_indirect_azalia_reg(audio, IX_REG(reg_name), value)
51
52static void write_indirect_azalia_reg(struct audio *audio,
53 uint32_t reg_index,
54 uint32_t reg_data)
55{
56 struct dce_audio *aud = DCE_AUD(audio);
57
58 /* AZALIA_F0_CODEC_ENDPOINT_INDEX endpoint index */
59 REG_SET(AZALIA_F0_CODEC_ENDPOINT_INDEX, 0,
60 AZALIA_ENDPOINT_REG_INDEX, reg_index);
61
62 /* AZALIA_F0_CODEC_ENDPOINT_DATA endpoint data */
63 REG_SET(AZALIA_F0_CODEC_ENDPOINT_DATA, 0,
64 AZALIA_ENDPOINT_REG_DATA, reg_data);
65
66 dm_logger_write(CTX->logger, LOG_HW_AUDIO,
67 "AUDIO:write_indirect_azalia_reg: index: %u data: %u\n",
68 reg_index, reg_data);
69}
70
71static uint32_t read_indirect_azalia_reg(struct audio *audio, uint32_t reg_index)
72{
73 struct dce_audio *aud = DCE_AUD(audio);
74
75 uint32_t value = 0;
76
77 /* AZALIA_F0_CODEC_ENDPOINT_INDEX endpoint index */
78 REG_SET(AZALIA_F0_CODEC_ENDPOINT_INDEX, 0,
79 AZALIA_ENDPOINT_REG_INDEX, reg_index);
80
81 /* AZALIA_F0_CODEC_ENDPOINT_DATA endpoint data */
82 value = REG_READ(AZALIA_F0_CODEC_ENDPOINT_DATA);
83
84 dm_logger_write(CTX->logger, LOG_HW_AUDIO,
85 "AUDIO:read_indirect_azalia_reg: index: %u data: %u\n",
86 reg_index, value);
87
88 return value;
89}
90
91static bool is_audio_format_supported(
92 const struct audio_info *audio_info,
93 enum audio_format_code audio_format_code,
94 uint32_t *format_index)
95{
96 uint32_t index;
97 uint32_t max_channe_index = 0;
98 bool found = false;
99
100 if (audio_info == NULL)
101 return found;
102
103 /* pass through whole array */
104 for (index = 0; index < audio_info->mode_count; index++) {
105 if (audio_info->modes[index].format_code == audio_format_code) {
106 if (found) {
107 /* format has multiply entries, choose one with
108 * highst number of channels */
109 if (audio_info->modes[index].channel_count >
110 audio_info->modes[max_channe_index].channel_count) {
111 max_channe_index = index;
112 }
113 } else {
114 /* format found, save it's index */
115 found = true;
116 max_channe_index = index;
117 }
118 }
119 }
120
121 /* return index */
122 if (found && format_index != NULL)
123 *format_index = max_channe_index;
124
125 return found;
126}
127
128/*For HDMI, calculate if specified sample rates can fit into a given timing */
129static void check_audio_bandwidth_hdmi(
130 const struct audio_crtc_info *crtc_info,
131 uint32_t channel_count,
132 union audio_sample_rates *sample_rates)
133{
134 uint32_t samples;
135 uint32_t h_blank;
136 bool limit_freq_to_48_khz = false;
137 bool limit_freq_to_88_2_khz = false;
138 bool limit_freq_to_96_khz = false;
139 bool limit_freq_to_174_4_khz = false;
140
141 /* For two channels supported return whatever sink support,unmodified*/
142 if (channel_count > 2) {
143
144 /* Based on HDMI spec 1.3 Table 7.5 */
145 if ((crtc_info->requested_pixel_clock <= 27000) &&
146 (crtc_info->v_active <= 576) &&
147 !(crtc_info->interlaced) &&
148 !(crtc_info->pixel_repetition == 2 ||
149 crtc_info->pixel_repetition == 4)) {
150 limit_freq_to_48_khz = true;
151
152 } else if ((crtc_info->requested_pixel_clock <= 27000) &&
153 (crtc_info->v_active <= 576) &&
154 (crtc_info->interlaced) &&
155 (crtc_info->pixel_repetition == 2)) {
156 limit_freq_to_88_2_khz = true;
157
158 } else if ((crtc_info->requested_pixel_clock <= 54000) &&
159 (crtc_info->v_active <= 576) &&
160 !(crtc_info->interlaced)) {
161 limit_freq_to_174_4_khz = true;
162 }
163 }
164
165 /* Also do some calculation for the available Audio Bandwidth for the
166 * 8 ch (i.e. for the Layout 1 => ch > 2)
167 */
168 h_blank = crtc_info->h_total - crtc_info->h_active;
169
170 if (crtc_info->pixel_repetition)
171 h_blank *= crtc_info->pixel_repetition;
172
173 /*based on HDMI spec 1.3 Table 7.5 */
174 h_blank -= 58;
175 /*for Control Period */
176 h_blank -= 16;
177
178 samples = h_blank * 10;
179 /* Number of Audio Packets (multiplied by 10) per Line (for 8 ch number
180 * of Audio samples per line multiplied by 10 - Layout 1)
181 */
182 samples /= 32;
183 samples *= crtc_info->v_active;
184 /*Number of samples multiplied by 10, per second */
185 samples *= crtc_info->refresh_rate;
186 /*Number of Audio samples per second */
187 samples /= 10;
188
189 /* @todo do it after deep color is implemented
190 * 8xx - deep color bandwidth scaling
191 * Extra bandwidth is avaliable in deep color b/c link runs faster than
192 * pixel rate. This has the effect of allowing more tmds characters to
193 * be transmitted during blank
194 */
195
196 switch (crtc_info->color_depth) {
197 case COLOR_DEPTH_888:
198 samples *= 4;
199 break;
200 case COLOR_DEPTH_101010:
201 samples *= 5;
202 break;
203 case COLOR_DEPTH_121212:
204 samples *= 6;
205 break;
206 default:
207 samples *= 4;
208 break;
209 }
210
211 samples /= 4;
212
213 /*check limitation*/
214 if (samples < 88200)
215 limit_freq_to_48_khz = true;
216 else if (samples < 96000)
217 limit_freq_to_88_2_khz = true;
218 else if (samples < 176400)
219 limit_freq_to_96_khz = true;
220 else if (samples < 192000)
221 limit_freq_to_174_4_khz = true;
222
223 if (sample_rates != NULL) {
224 /* limit frequencies */
225 if (limit_freq_to_174_4_khz)
226 sample_rates->rate.RATE_192 = 0;
227
228 if (limit_freq_to_96_khz) {
229 sample_rates->rate.RATE_192 = 0;
230 sample_rates->rate.RATE_176_4 = 0;
231 }
232 if (limit_freq_to_88_2_khz) {
233 sample_rates->rate.RATE_192 = 0;
234 sample_rates->rate.RATE_176_4 = 0;
235 sample_rates->rate.RATE_96 = 0;
236 }
237 if (limit_freq_to_48_khz) {
238 sample_rates->rate.RATE_192 = 0;
239 sample_rates->rate.RATE_176_4 = 0;
240 sample_rates->rate.RATE_96 = 0;
241 sample_rates->rate.RATE_88_2 = 0;
242 }
243 }
244}
245
246/*For DP SST, calculate if specified sample rates can fit into a given timing */
247static void check_audio_bandwidth_dpsst(
248 const struct audio_crtc_info *crtc_info,
249 uint32_t channel_count,
250 union audio_sample_rates *sample_rates)
251{
252 /* do nothing */
253}
254
255/*For DP MST, calculate if specified sample rates can fit into a given timing */
256static void check_audio_bandwidth_dpmst(
257 const struct audio_crtc_info *crtc_info,
258 uint32_t channel_count,
259 union audio_sample_rates *sample_rates)
260{
261 /* do nothing */
262}
263
264static void check_audio_bandwidth(
265 const struct audio_crtc_info *crtc_info,
266 uint32_t channel_count,
267 enum signal_type signal,
268 union audio_sample_rates *sample_rates)
269{
270 switch (signal) {
271 case SIGNAL_TYPE_HDMI_TYPE_A:
272 check_audio_bandwidth_hdmi(
273 crtc_info, channel_count, sample_rates);
274 break;
275 case SIGNAL_TYPE_EDP:
276 case SIGNAL_TYPE_DISPLAY_PORT:
277 check_audio_bandwidth_dpsst(
278 crtc_info, channel_count, sample_rates);
279 break;
280 case SIGNAL_TYPE_DISPLAY_PORT_MST:
281 check_audio_bandwidth_dpmst(
282 crtc_info, channel_count, sample_rates);
283 break;
284 default:
285 break;
286 }
287}
288
289/* expose/not expose HBR capability to Audio driver */
290static void set_high_bit_rate_capable(
291 struct audio *audio,
292 bool capable)
293{
294 uint32_t value = 0;
295
296 /* set high bit rate audio capable*/
297 value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR);
298
299 set_reg_field_value(value, capable,
300 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR,
301 HBR_CAPABLE);
302
303 AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR, value);
304}
305
306/* set video latency in in ms/2+1 */
307static void set_video_latency(
308 struct audio *audio,
309 int latency_in_ms)
310{
311 uint32_t value = 0;
312
313 if ((latency_in_ms < 0) || (latency_in_ms > 255))
314 return;
315
316 value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC);
317
318 set_reg_field_value(value, latency_in_ms,
319 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
320 VIDEO_LIPSYNC);
321
322 AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
323 value);
324}
325
326/* set audio latency in in ms/2+1 */
327static void set_audio_latency(
328 struct audio *audio,
329 int latency_in_ms)
330{
331 uint32_t value = 0;
332
333 if (latency_in_ms < 0)
334 latency_in_ms = 0;
335
336 if (latency_in_ms > 255)
337 latency_in_ms = 255;
338
339 value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC);
340
341 set_reg_field_value(value, latency_in_ms,
342 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
343 AUDIO_LIPSYNC);
344
345 AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
346 value);
347}
348
349void dce_aud_az_enable(struct audio *audio)
350{
351 uint32_t value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL);
352
353 if (get_reg_field_value(value,
354 AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
355 AUDIO_ENABLED) != 1)
356 set_reg_field_value(value, 1,
357 AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
358 AUDIO_ENABLED);
359
360 AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, value);
361}
362
363void dce_aud_az_disable(struct audio *audio)
364{
365 uint32_t value;
366
367 value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL);
368
369 set_reg_field_value(value, 0,
370 AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
371 AUDIO_ENABLED);
372
373 AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, value);
374}
375
376void dce_aud_az_configure(
377 struct audio *audio,
378 enum signal_type signal,
379 const struct audio_crtc_info *crtc_info,
380 const struct audio_info *audio_info)
381{
382 struct dce_audio *aud = DCE_AUD(audio);
383
384 uint32_t speakers = audio_info->flags.info.ALLSPEAKERS;
385 uint32_t value;
386 uint32_t field = 0;
387 enum audio_format_code audio_format_code;
388 uint32_t format_index;
389 uint32_t index;
390 bool is_ac3_supported = false;
391 union audio_sample_rates sample_rate;
392 uint32_t strlen = 0;
393
394 /* Speaker Allocation */
395 /*
396 uint32_t value;
397 uint32_t field = 0;*/
398 value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
399
400 set_reg_field_value(value,
401 speakers,
402 AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
403 SPEAKER_ALLOCATION);
404
405 /* LFE_PLAYBACK_LEVEL = LFEPBL
406 * LFEPBL = 0 : Unknown or refer to other information
407 * LFEPBL = 1 : 0dB playback
408 * LFEPBL = 2 : +10dB playback
409 * LFE_BL = 3 : Reserved
410 */
411 set_reg_field_value(value,
412 0,
413 AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
414 LFE_PLAYBACK_LEVEL);
415 /* todo: according to reg spec LFE_PLAYBACK_LEVEL is read only.
416 * why are we writing to it? DCE8 does not write this */
417
418
419 set_reg_field_value(value,
420 0,
421 AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
422 HDMI_CONNECTION);
423
424 set_reg_field_value(value,
425 0,
426 AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
427 DP_CONNECTION);
428
429 field = get_reg_field_value(value,
430 AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
431 EXTRA_CONNECTION_INFO);
432
433 field &= ~0x1;
434
435 set_reg_field_value(value,
436 field,
437 AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
438 EXTRA_CONNECTION_INFO);
439
440 /* set audio for output signal */
441 switch (signal) {
442 case SIGNAL_TYPE_HDMI_TYPE_A:
443 set_reg_field_value(value,
444 1,
445 AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
446 HDMI_CONNECTION);
447
448 break;
449
450 case SIGNAL_TYPE_EDP:
451 case SIGNAL_TYPE_DISPLAY_PORT:
452 case SIGNAL_TYPE_DISPLAY_PORT_MST:
453 set_reg_field_value(value,
454 1,
455 AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
456 DP_CONNECTION);
457 break;
458 default:
459 BREAK_TO_DEBUGGER();
460 break;
461 }
462
463 AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, value);
464
465 /* Audio Descriptors */
466 /* pass through all formats */
467 for (format_index = 0; format_index < AUDIO_FORMAT_CODE_COUNT;
468 format_index++) {
469 audio_format_code =
470 (AUDIO_FORMAT_CODE_FIRST + format_index);
471
472 /* those are unsupported, skip programming */
473 if (audio_format_code == AUDIO_FORMAT_CODE_1BITAUDIO ||
474 audio_format_code == AUDIO_FORMAT_CODE_DST)
475 continue;
476
477 value = 0;
478
479 /* check if supported */
480 if (is_audio_format_supported(
481 audio_info, audio_format_code, &index)) {
482 const struct audio_mode *audio_mode =
483 &audio_info->modes[index];
484 union audio_sample_rates sample_rates =
485 audio_mode->sample_rates;
486 uint8_t byte2 = audio_mode->max_bit_rate;
487
488 /* adjust specific properties */
489 switch (audio_format_code) {
490 case AUDIO_FORMAT_CODE_LINEARPCM: {
491 check_audio_bandwidth(
492 crtc_info,
493 audio_mode->channel_count,
494 signal,
495 &sample_rates);
496
497 byte2 = audio_mode->sample_size;
498
499 set_reg_field_value(value,
500 sample_rates.all,
501 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
502 SUPPORTED_FREQUENCIES_STEREO);
503 }
504 break;
505 case AUDIO_FORMAT_CODE_AC3:
506 is_ac3_supported = true;
507 break;
508 case AUDIO_FORMAT_CODE_DOLBYDIGITALPLUS:
509 case AUDIO_FORMAT_CODE_DTS_HD:
510 case AUDIO_FORMAT_CODE_MAT_MLP:
511 case AUDIO_FORMAT_CODE_DST:
512 case AUDIO_FORMAT_CODE_WMAPRO:
513 byte2 = audio_mode->vendor_specific;
514 break;
515 default:
516 break;
517 }
518
519 /* fill audio format data */
520 set_reg_field_value(value,
521 audio_mode->channel_count - 1,
522 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
523 MAX_CHANNELS);
524
525 set_reg_field_value(value,
526 sample_rates.all,
527 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
528 SUPPORTED_FREQUENCIES);
529
530 set_reg_field_value(value,
531 byte2,
532 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
533 DESCRIPTOR_BYTE_2);
534 } /* if */
535
536 AZ_REG_WRITE(
537 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 + format_index,
538 value);
539 } /* for */
540
541 if (is_ac3_supported)
542 /* todo: this reg global. why program global register? */
543 REG_WRITE(AZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS,
544 0x05);
545
546 /* check for 192khz/8-Ch support for HBR requirements */
547 sample_rate.all = 0;
548 sample_rate.rate.RATE_192 = 1;
549
550 check_audio_bandwidth(
551 crtc_info,
552 8,
553 signal,
554 &sample_rate);
555
556 set_high_bit_rate_capable(audio, sample_rate.rate.RATE_192);
557
558 /* Audio and Video Lipsync */
559 set_video_latency(audio, audio_info->video_latency);
560 set_audio_latency(audio, audio_info->audio_latency);
561
562 value = 0;
563 set_reg_field_value(value, audio_info->manufacture_id,
564 AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0,
565 MANUFACTURER_ID);
566
567 set_reg_field_value(value, audio_info->product_id,
568 AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0,
569 PRODUCT_ID);
570
571 AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0,
572 value);
573
574 value = 0;
575
576 /*get display name string length */
577 while (audio_info->display_name[strlen++] != '\0') {
578 if (strlen >=
579 MAX_HW_AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS)
580 break;
581 }
582 set_reg_field_value(value, strlen,
583 AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1,
584 SINK_DESCRIPTION_LEN);
585
586 AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1,
587 value);
588
589 /*
590 *write the port ID:
591 *PORT_ID0 = display index
592 *PORT_ID1 = 16bit BDF
593 *(format MSB->LSB: 8bit Bus, 5bit Device, 3bit Function)
594 */
595
596 value = 0;
597
598 set_reg_field_value(value, audio_info->port_id[0],
599 AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2,
600 PORT_ID0);
601
602 AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2, value);
603
604 value = 0;
605 set_reg_field_value(value, audio_info->port_id[1],
606 AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3,
607 PORT_ID1);
608
609 AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3, value);
610
611 /*write the 18 char monitor string */
612
613 value = 0;
614 set_reg_field_value(value, audio_info->display_name[0],
615 AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4,
616 DESCRIPTION0);
617
618 set_reg_field_value(value, audio_info->display_name[1],
619 AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4,
620 DESCRIPTION1);
621
622 set_reg_field_value(value, audio_info->display_name[2],
623 AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4,
624 DESCRIPTION2);
625
626 set_reg_field_value(value, audio_info->display_name[3],
627 AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4,
628 DESCRIPTION3);
629
630 AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4, value);
631
632 value = 0;
633 set_reg_field_value(value, audio_info->display_name[4],
634 AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5,
635 DESCRIPTION4);
636
637 set_reg_field_value(value, audio_info->display_name[5],
638 AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5,
639 DESCRIPTION5);
640
641 set_reg_field_value(value, audio_info->display_name[6],
642 AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5,
643 DESCRIPTION6);
644
645 set_reg_field_value(value, audio_info->display_name[7],
646 AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5,
647 DESCRIPTION7);
648
649 AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5, value);
650
651 value = 0;
652 set_reg_field_value(value, audio_info->display_name[8],
653 AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6,
654 DESCRIPTION8);
655
656 set_reg_field_value(value, audio_info->display_name[9],
657 AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6,
658 DESCRIPTION9);
659
660 set_reg_field_value(value, audio_info->display_name[10],
661 AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6,
662 DESCRIPTION10);
663
664 set_reg_field_value(value, audio_info->display_name[11],
665 AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6,
666 DESCRIPTION11);
667
668 AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6, value);
669
670 value = 0;
671 set_reg_field_value(value, audio_info->display_name[12],
672 AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7,
673 DESCRIPTION12);
674
675 set_reg_field_value(value, audio_info->display_name[13],
676 AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7,
677 DESCRIPTION13);
678
679 set_reg_field_value(value, audio_info->display_name[14],
680 AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7,
681 DESCRIPTION14);
682
683 set_reg_field_value(value, audio_info->display_name[15],
684 AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7,
685 DESCRIPTION15);
686
687 AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7, value);
688
689 value = 0;
690 set_reg_field_value(value, audio_info->display_name[16],
691 AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8,
692 DESCRIPTION16);
693
694 set_reg_field_value(value, audio_info->display_name[17],
695 AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8,
696 DESCRIPTION17);
697
698 AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8, value);
699}
700
701/*
702* todo: wall clk related functionality probably belong to clock_src.
703*/
704
705/* search pixel clock value for Azalia HDMI Audio */
706static bool get_azalia_clock_info_hdmi(
707 uint32_t crtc_pixel_clock_in_khz,
708 uint32_t actual_pixel_clock_in_khz,
709 struct azalia_clock_info *azalia_clock_info)
710{
711 if (azalia_clock_info == NULL)
712 return false;
713
714 /* audio_dto_phase= 24 * 10,000;
715 * 24MHz in [100Hz] units */
716 azalia_clock_info->audio_dto_phase =
717 24 * 10000;
718
719 /* audio_dto_module = PCLKFrequency * 10,000;
720 * [khz] -> [100Hz] */
721 azalia_clock_info->audio_dto_module =
722 actual_pixel_clock_in_khz * 10;
723
724 return true;
725}
726
727static bool get_azalia_clock_info_dp(
728 uint32_t requested_pixel_clock_in_khz,
729 const struct audio_pll_info *pll_info,
730 struct azalia_clock_info *azalia_clock_info)
731{
732 if (pll_info == NULL || azalia_clock_info == NULL)
733 return false;
734
735 /* Reported dpDtoSourceClockInkhz value for
736 * DCE8 already adjusted for SS, do not need any
737 * adjustment here anymore
738 */
739
740 /*audio_dto_phase = 24 * 10,000;
741 * 24MHz in [100Hz] units */
742 azalia_clock_info->audio_dto_phase = 24 * 10000;
743
744 /*audio_dto_module = dpDtoSourceClockInkhz * 10,000;
745 * [khz] ->[100Hz] */
746 azalia_clock_info->audio_dto_module =
747 pll_info->dp_dto_source_clock_in_khz * 10;
748
749 return true;
750}
751
752void dce_aud_wall_dto_setup(
753 struct audio *audio,
754 enum signal_type signal,
755 const struct audio_crtc_info *crtc_info,
756 const struct audio_pll_info *pll_info)
757{
758 struct dce_audio *aud = DCE_AUD(audio);
759
760 struct azalia_clock_info clock_info = { 0 };
761
762 if (dc_is_hdmi_signal(signal)) {
763 uint32_t src_sel;
764
765 /*DTO0 Programming goal:
766 -generate 24MHz, 128*Fs from 24MHz
767 -use DTO0 when an active HDMI port is connected
768 (optionally a DP is connected) */
769
770 /* calculate DTO settings */
771 get_azalia_clock_info_hdmi(
772 crtc_info->requested_pixel_clock,
773 crtc_info->calculated_pixel_clock,
774 &clock_info);
775
776 /* On TN/SI, Program DTO source select and DTO select before
777 programming DTO modulo and DTO phase. These bits must be
778 programmed first, otherwise there will be no HDMI audio at boot
779 up. This is a HW sequence change (different from old ASICs).
780 Caution when changing this programming sequence.
781
782 HDMI enabled, using DTO0
783 program master CRTC for DTO0 */
784 src_sel = pll_info->dto_source - DTO_SOURCE_ID0;
785 REG_UPDATE_2(DCCG_AUDIO_DTO_SOURCE,
786 DCCG_AUDIO_DTO0_SOURCE_SEL, src_sel,
787 DCCG_AUDIO_DTO_SEL, 0);
788
789 /* module */
790 REG_UPDATE(DCCG_AUDIO_DTO0_MODULE,
791 DCCG_AUDIO_DTO0_MODULE, clock_info.audio_dto_module);
792
793 /* phase */
794 REG_UPDATE(DCCG_AUDIO_DTO0_PHASE,
795 DCCG_AUDIO_DTO0_PHASE, clock_info.audio_dto_phase);
796 } else {
797 /*DTO1 Programming goal:
798 -generate 24MHz, 512*Fs, 128*Fs from 24MHz
799 -default is to used DTO1, and switch to DTO0 when an audio
800 master HDMI port is connected
801 -use as default for DP
802
803 calculate DTO settings */
804 get_azalia_clock_info_dp(
805 crtc_info->requested_pixel_clock,
806 pll_info,
807 &clock_info);
808
809 /* Program DTO select before programming DTO modulo and DTO
810 phase. default to use DTO1 */
811
812 REG_UPDATE(DCCG_AUDIO_DTO_SOURCE,
813 DCCG_AUDIO_DTO_SEL, 1);
814
815 REG_UPDATE(DCCG_AUDIO_DTO_SOURCE,
816 DCCG_AUDIO_DTO_SEL, 1);
817 /* DCCG_AUDIO_DTO2_USE_512FBR_DTO, 1)
818 * Select 512fs for DP TODO: web register definition
819 * does not match register header file
820 * DCE11 version it's commented out while DCE8 it's set to 1
821 */
822
823 /* module */
824 REG_UPDATE(DCCG_AUDIO_DTO1_MODULE,
825 DCCG_AUDIO_DTO1_MODULE, clock_info.audio_dto_module);
826
827 /* phase */
828 REG_UPDATE(DCCG_AUDIO_DTO1_PHASE,
829 DCCG_AUDIO_DTO1_PHASE, clock_info.audio_dto_phase);
830
831 /* DAL2 code separate DCCG_AUDIO_DTO_SEL and
832 DCCG_AUDIO_DTO2_USE_512FBR_DTO programming into two different
833 location. merge together should not hurt */
834 /*value.bits.DCCG_AUDIO_DTO2_USE_512FBR_DTO = 1;
835 dal_write_reg(mmDCCG_AUDIO_DTO_SOURCE, value);*/
836 }
837}
838
839bool dce_aud_endpoint_valid(
840 struct audio *audio)
841{
842 uint32_t value;
843 uint32_t port_connectivity;
844
845 value = AZ_REG_READ(
846 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
847
848 port_connectivity = get_reg_field_value(value,
849 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT,
850 PORT_CONNECTIVITY);
851
852 return !(port_connectivity == 1);
853}
854
855/* initialize HW state */
856void dce_aud_hw_init(
857 struct audio *audio)
858{
859 struct dce_audio *aud = DCE_AUD(audio);
860
861 /* we only need to program the following registers once, so we only do
862 it for the inst 0*/
863 if (audio->inst != 0)
864 return;
865
866 /* Suport R5 - 32khz
867 * Suport R6 - 44.1khz
868 * Suport R7 - 48khz
869 */
870 REG_UPDATE(AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES,
871 AUDIO_RATE_CAPABILITIES, 0x70);
872
873 /*Keep alive bit to verify HW block in BU. */
874 REG_UPDATE_2(AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES,
875 CLKSTOP, 1,
876 EPSS, 1);
877}
878
879static const struct audio_funcs funcs = {
880 .endpoint_valid = dce_aud_endpoint_valid,
881 .hw_init = dce_aud_hw_init,
882 .wall_dto_setup = dce_aud_wall_dto_setup,
883 .az_enable = dce_aud_az_enable,
884 .az_disable = dce_aud_az_disable,
885 .az_configure = dce_aud_az_configure,
886 .destroy = dce_aud_destroy,
887};
888
889void dce_aud_destroy(struct audio **audio)
890{
891 dm_free(*audio);
892 *audio = NULL;
893}
894
895struct audio *dce_audio_create(
896 struct dc_context *ctx,
897 unsigned int inst,
898 const struct dce_audio_registers *reg,
899 const struct dce_audio_shift *shifts,
900 const struct dce_aduio_mask *masks
901 )
902{
903 struct dce_audio *audio = dm_alloc(sizeof(*audio));
904
905 if (audio == NULL) {
906 ASSERT_CRITICAL(audio);
907 return NULL;
908 }
909
910 audio->base.ctx = ctx;
911 audio->base.inst = inst;
912 audio->base.funcs = &funcs;
913
914 audio->regs = reg;
915 audio->shifts = shifts;
916 audio->masks = masks;
917
918 return &audio->base;
919}
920
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h
new file mode 100644
index 000000000000..bf97cd8c8221
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h
@@ -0,0 +1,145 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25#ifndef __DAL_AUDIO_DCE_110_H__
26#define __DAL_AUDIO_DCE_110_H__
27
28#include "audio.h"
29
30#define AUD_COMMON_REG_LIST(id)\
31 SRI(AZALIA_F0_CODEC_ENDPOINT_INDEX, AZF0ENDPOINT, id),\
32 SRI(AZALIA_F0_CODEC_ENDPOINT_DATA, AZF0ENDPOINT, id),\
33 SR(AZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS),\
34 SR(AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES),\
35 SR(AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES),\
36 SR(DCCG_AUDIO_DTO_SOURCE),\
37 SR(DCCG_AUDIO_DTO0_MODULE),\
38 SR(DCCG_AUDIO_DTO0_PHASE),\
39 SR(DCCG_AUDIO_DTO1_MODULE),\
40 SR(DCCG_AUDIO_DTO1_PHASE)
41
42
43 /* set field name */
44#define SF(reg_name, field_name, post_fix)\
45 .field_name = reg_name ## __ ## field_name ## post_fix
46
47
48#define AUD_COMMON_MASK_SH_LIST_BASE(mask_sh)\
49 SF(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL, mask_sh),\
50 SF(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO_SEL, mask_sh),\
51 SF(DCCG_AUDIO_DTO0_MODULE, DCCG_AUDIO_DTO0_MODULE, mask_sh),\
52 SF(DCCG_AUDIO_DTO0_PHASE, DCCG_AUDIO_DTO0_PHASE, mask_sh),\
53 SF(DCCG_AUDIO_DTO0_MODULE, DCCG_AUDIO_DTO0_MODULE, mask_sh),\
54 SF(DCCG_AUDIO_DTO0_PHASE, DCCG_AUDIO_DTO0_PHASE, mask_sh),\
55 SF(AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES, AUDIO_RATE_CAPABILITIES, mask_sh),\
56 SF(AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES, CLKSTOP, mask_sh),\
57 SF(AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES, EPSS, mask_sh)
58
59#define AUD_COMMON_MASK_SH_LIST(mask_sh)\
60 AUD_COMMON_MASK_SH_LIST_BASE(mask_sh),\
61 SF(AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\
62 SF(AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh)
63
64
65struct dce_audio_registers {
66 uint32_t AZALIA_F0_CODEC_ENDPOINT_INDEX;
67 uint32_t AZALIA_F0_CODEC_ENDPOINT_DATA;
68
69 uint32_t AZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS;
70 uint32_t AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES;
71 uint32_t AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES;
72
73 uint32_t DCCG_AUDIO_DTO_SOURCE;
74 uint32_t DCCG_AUDIO_DTO0_MODULE;
75 uint32_t DCCG_AUDIO_DTO0_PHASE;
76 uint32_t DCCG_AUDIO_DTO1_MODULE;
77 uint32_t DCCG_AUDIO_DTO1_PHASE;
78
79 uint32_t AUDIO_RATE_CAPABILITIES;
80};
81
82struct dce_audio_shift {
83 uint8_t AZALIA_ENDPOINT_REG_INDEX;
84 uint8_t AZALIA_ENDPOINT_REG_DATA;
85
86 uint8_t AUDIO_RATE_CAPABILITIES;
87 uint8_t CLKSTOP;
88 uint8_t EPSS;
89
90 uint8_t DCCG_AUDIO_DTO0_SOURCE_SEL;
91 uint8_t DCCG_AUDIO_DTO_SEL;
92 uint8_t DCCG_AUDIO_DTO0_MODULE;
93 uint8_t DCCG_AUDIO_DTO0_PHASE;
94 uint8_t DCCG_AUDIO_DTO1_MODULE;
95 uint8_t DCCG_AUDIO_DTO1_PHASE;
96};
97
98struct dce_aduio_mask {
99 uint32_t AZALIA_ENDPOINT_REG_INDEX;
100 uint32_t AZALIA_ENDPOINT_REG_DATA;
101
102 uint32_t AUDIO_RATE_CAPABILITIES;
103 uint32_t CLKSTOP;
104 uint32_t EPSS;
105
106 uint32_t DCCG_AUDIO_DTO0_SOURCE_SEL;
107 uint32_t DCCG_AUDIO_DTO_SEL;
108 uint32_t DCCG_AUDIO_DTO0_MODULE;
109 uint32_t DCCG_AUDIO_DTO0_PHASE;
110 uint32_t DCCG_AUDIO_DTO1_MODULE;
111 uint32_t DCCG_AUDIO_DTO1_PHASE;
112};
113
114struct dce_audio {
115 struct audio base;
116 const struct dce_audio_registers *regs;
117 const struct dce_audio_shift *shifts;
118 const struct dce_aduio_mask *masks;
119};
120
121struct audio *dce_audio_create(
122 struct dc_context *ctx,
123 unsigned int inst,
124 const struct dce_audio_registers *reg,
125 const struct dce_audio_shift *shifts,
126 const struct dce_aduio_mask *masks);
127
128void dce_aud_destroy(struct audio **audio);
129
130void dce_aud_hw_init(struct audio *audio);
131
132void dce_aud_az_enable(struct audio *audio);
133void dce_aud_az_disable(struct audio *audio);
134
135void dce_aud_az_configure(struct audio *audio,
136 enum signal_type signal,
137 const struct audio_crtc_info *crtc_info,
138 const struct audio_info *audio_info);
139
140void dce_aud_wall_dto_setup(struct audio *audio,
141 enum signal_type signal,
142 const struct audio_crtc_info *crtc_info,
143 const struct audio_pll_info *pll_info);
144
145#endif /*__DAL_AUDIO_DCE_110_H__*/
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
new file mode 100644
index 000000000000..80ac5d9efa71
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -0,0 +1,1264 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28
29#include "dc_types.h"
30#include "core_types.h"
31
32#include "include/grph_object_id.h"
33#include "include/logger_interface.h"
34
35#include "dce_clock_source.h"
36
37#include "reg_helper.h"
38
39#define REG(reg)\
40 (clk_src->regs->reg)
41
42#define CTX \
43 clk_src->base.ctx
44
45#undef FN
46#define FN(reg_name, field_name) \
47 clk_src->cs_shift->field_name, clk_src->cs_mask->field_name
48
49#define FRACT_FB_DIVIDER_DEC_POINTS_MAX_NUM 6
50#define CALC_PLL_CLK_SRC_ERR_TOLERANCE 1
51#define MAX_PLL_CALC_ERROR 0xFFFFFFFF
52
53static const struct spread_spectrum_data *get_ss_data_entry(
54 struct dce110_clk_src *clk_src,
55 enum signal_type signal,
56 uint32_t pix_clk_khz)
57{
58
59 uint32_t entrys_num;
60 uint32_t i;
61 struct spread_spectrum_data *ss_parm = NULL;
62 struct spread_spectrum_data *ret = NULL;
63
64 switch (signal) {
65 case SIGNAL_TYPE_DVI_SINGLE_LINK:
66 case SIGNAL_TYPE_DVI_DUAL_LINK:
67 ss_parm = clk_src->dvi_ss_params;
68 entrys_num = clk_src->dvi_ss_params_cnt;
69 break;
70
71 case SIGNAL_TYPE_HDMI_TYPE_A:
72 ss_parm = clk_src->hdmi_ss_params;
73 entrys_num = clk_src->hdmi_ss_params_cnt;
74 break;
75
76 case SIGNAL_TYPE_DISPLAY_PORT:
77 case SIGNAL_TYPE_DISPLAY_PORT_MST:
78 case SIGNAL_TYPE_EDP:
79 case SIGNAL_TYPE_VIRTUAL:
80 ss_parm = clk_src->dp_ss_params;
81 entrys_num = clk_src->dp_ss_params_cnt;
82 break;
83
84 default:
85 ss_parm = NULL;
86 entrys_num = 0;
87 break;
88 }
89
90 if (ss_parm == NULL)
91 return ret;
92
93 for (i = 0; i < entrys_num; ++i, ++ss_parm) {
94 if (ss_parm->freq_range_khz >= pix_clk_khz) {
95 ret = ss_parm;
96 break;
97 }
98 }
99
100 return ret;
101}
102
103/**
104* Function: calculate_fb_and_fractional_fb_divider
105*
106* * DESCRIPTION: Calculates feedback and fractional feedback dividers values
107*
108*PARAMETERS:
109* targetPixelClock Desired frequency in 10 KHz
110* ref_divider Reference divider (already known)
111* postDivider Post Divider (already known)
112* feedback_divider_param Pointer where to store
113* calculated feedback divider value
114* fract_feedback_divider_param Pointer where to store
115* calculated fract feedback divider value
116*
117*RETURNS:
118* It fills the locations pointed by feedback_divider_param
119* and fract_feedback_divider_param
120* It returns - true if feedback divider not 0
121* - false should never happen)
122*/
123static bool calculate_fb_and_fractional_fb_divider(
124 struct calc_pll_clock_source *calc_pll_cs,
125 uint32_t target_pix_clk_khz,
126 uint32_t ref_divider,
127 uint32_t post_divider,
128 uint32_t *feedback_divider_param,
129 uint32_t *fract_feedback_divider_param)
130{
131 uint64_t feedback_divider;
132
133 feedback_divider =
134 (uint64_t)(target_pix_clk_khz * ref_divider * post_divider);
135 feedback_divider *= 10;
136 /* additional factor, since we divide by 10 afterwards */
137 feedback_divider *= (uint64_t)(calc_pll_cs->fract_fb_divider_factor);
138 feedback_divider = div_u64(feedback_divider, calc_pll_cs->ref_freq_khz);
139
140/*Round to the number of precision
141 * The following code replace the old code (ullfeedbackDivider + 5)/10
142 * for example if the difference between the number
143 * of fractional feedback decimal point and the fractional FB Divider precision
144 * is 2 then the equation becomes (ullfeedbackDivider + 5*100) / (10*100))*/
145
146 feedback_divider += (uint64_t)
147 (5 * calc_pll_cs->fract_fb_divider_precision_factor);
148 feedback_divider =
149 div_u64(feedback_divider,
150 calc_pll_cs->fract_fb_divider_precision_factor * 10);
151 feedback_divider *= (uint64_t)
152 (calc_pll_cs->fract_fb_divider_precision_factor);
153
154 *feedback_divider_param =
155 div_u64_rem(
156 feedback_divider,
157 calc_pll_cs->fract_fb_divider_factor,
158 fract_feedback_divider_param);
159
160 if (*feedback_divider_param != 0)
161 return true;
162 return false;
163}
164
165/**
166*calc_fb_divider_checking_tolerance
167*
168*DESCRIPTION: Calculates Feedback and Fractional Feedback divider values
169* for passed Reference and Post divider, checking for tolerance.
170*PARAMETERS:
171* pll_settings Pointer to structure
172* ref_divider Reference divider (already known)
173* postDivider Post Divider (already known)
174* tolerance Tolerance for Calculated Pixel Clock to be within
175*
176*RETURNS:
177* It fills the PLLSettings structure with PLL Dividers values
178* if calculated values are within required tolerance
179* It returns - true if eror is within tolerance
180* - false if eror is not within tolerance
181*/
182static bool calc_fb_divider_checking_tolerance(
183 struct calc_pll_clock_source *calc_pll_cs,
184 struct pll_settings *pll_settings,
185 uint32_t ref_divider,
186 uint32_t post_divider,
187 uint32_t tolerance)
188{
189 uint32_t feedback_divider;
190 uint32_t fract_feedback_divider;
191 uint32_t actual_calculated_clock_khz;
192 uint32_t abs_err;
193 uint64_t actual_calc_clk_khz;
194
195 calculate_fb_and_fractional_fb_divider(
196 calc_pll_cs,
197 pll_settings->adjusted_pix_clk,
198 ref_divider,
199 post_divider,
200 &feedback_divider,
201 &fract_feedback_divider);
202
203 /*Actual calculated value*/
204 actual_calc_clk_khz = (uint64_t)(feedback_divider *
205 calc_pll_cs->fract_fb_divider_factor) +
206 fract_feedback_divider;
207 actual_calc_clk_khz *= calc_pll_cs->ref_freq_khz;
208 actual_calc_clk_khz =
209 div_u64(actual_calc_clk_khz,
210 ref_divider * post_divider *
211 calc_pll_cs->fract_fb_divider_factor);
212
213 actual_calculated_clock_khz = (uint32_t)(actual_calc_clk_khz);
214
215 abs_err = (actual_calculated_clock_khz >
216 pll_settings->adjusted_pix_clk)
217 ? actual_calculated_clock_khz -
218 pll_settings->adjusted_pix_clk
219 : pll_settings->adjusted_pix_clk -
220 actual_calculated_clock_khz;
221
222 if (abs_err <= tolerance) {
223 /*found good values*/
224 pll_settings->reference_freq = calc_pll_cs->ref_freq_khz;
225 pll_settings->reference_divider = ref_divider;
226 pll_settings->feedback_divider = feedback_divider;
227 pll_settings->fract_feedback_divider = fract_feedback_divider;
228 pll_settings->pix_clk_post_divider = post_divider;
229 pll_settings->calculated_pix_clk =
230 actual_calculated_clock_khz;
231 pll_settings->vco_freq =
232 actual_calculated_clock_khz * post_divider;
233 return true;
234 }
235 return false;
236}
237
238static bool calc_pll_dividers_in_range(
239 struct calc_pll_clock_source *calc_pll_cs,
240 struct pll_settings *pll_settings,
241 uint32_t min_ref_divider,
242 uint32_t max_ref_divider,
243 uint32_t min_post_divider,
244 uint32_t max_post_divider,
245 uint32_t err_tolerance)
246{
247 uint32_t ref_divider;
248 uint32_t post_divider;
249 uint32_t tolerance;
250
251/* This is err_tolerance / 10000 = 0.0025 - acceptable error of 0.25%
252 * This is errorTolerance / 10000 = 0.0001 - acceptable error of 0.01%*/
253 tolerance = (pll_settings->adjusted_pix_clk * err_tolerance) /
254 10000;
255 if (tolerance < CALC_PLL_CLK_SRC_ERR_TOLERANCE)
256 tolerance = CALC_PLL_CLK_SRC_ERR_TOLERANCE;
257
258 for (
259 post_divider = max_post_divider;
260 post_divider >= min_post_divider;
261 --post_divider) {
262 for (
263 ref_divider = min_ref_divider;
264 ref_divider <= max_ref_divider;
265 ++ref_divider) {
266 if (calc_fb_divider_checking_tolerance(
267 calc_pll_cs,
268 pll_settings,
269 ref_divider,
270 post_divider,
271 tolerance)) {
272 return true;
273 }
274 }
275 }
276
277 return false;
278}
279
280static uint32_t calculate_pixel_clock_pll_dividers(
281 struct calc_pll_clock_source *calc_pll_cs,
282 struct pll_settings *pll_settings)
283{
284 uint32_t err_tolerance;
285 uint32_t min_post_divider;
286 uint32_t max_post_divider;
287 uint32_t min_ref_divider;
288 uint32_t max_ref_divider;
289
290 if (pll_settings->adjusted_pix_clk == 0) {
291 dm_logger_write(calc_pll_cs->ctx->logger, LOG_ERROR,
292 "%s Bad requested pixel clock", __func__);
293 return MAX_PLL_CALC_ERROR;
294 }
295
296/* 1) Find Post divider ranges */
297 if (pll_settings->pix_clk_post_divider) {
298 min_post_divider = pll_settings->pix_clk_post_divider;
299 max_post_divider = pll_settings->pix_clk_post_divider;
300 } else {
301 min_post_divider = calc_pll_cs->min_pix_clock_pll_post_divider;
302 if (min_post_divider * pll_settings->adjusted_pix_clk <
303 calc_pll_cs->min_vco_khz) {
304 min_post_divider = calc_pll_cs->min_vco_khz /
305 pll_settings->adjusted_pix_clk;
306 if ((min_post_divider *
307 pll_settings->adjusted_pix_clk) <
308 calc_pll_cs->min_vco_khz)
309 min_post_divider++;
310 }
311
312 max_post_divider = calc_pll_cs->max_pix_clock_pll_post_divider;
313 if (max_post_divider * pll_settings->adjusted_pix_clk
314 > calc_pll_cs->max_vco_khz)
315 max_post_divider = calc_pll_cs->max_vco_khz /
316 pll_settings->adjusted_pix_clk;
317 }
318
319/* 2) Find Reference divider ranges
320 * When SS is enabled, or for Display Port even without SS,
321 * pll_settings->referenceDivider is not zero.
322 * So calculate PPLL FB and fractional FB divider
323 * using the passed reference divider*/
324
325 if (pll_settings->reference_divider) {
326 min_ref_divider = pll_settings->reference_divider;
327 max_ref_divider = pll_settings->reference_divider;
328 } else {
329 min_ref_divider = ((calc_pll_cs->ref_freq_khz
330 / calc_pll_cs->max_pll_input_freq_khz)
331 > calc_pll_cs->min_pll_ref_divider)
332 ? calc_pll_cs->ref_freq_khz
333 / calc_pll_cs->max_pll_input_freq_khz
334 : calc_pll_cs->min_pll_ref_divider;
335
336 max_ref_divider = ((calc_pll_cs->ref_freq_khz
337 / calc_pll_cs->min_pll_input_freq_khz)
338 < calc_pll_cs->max_pll_ref_divider)
339 ? calc_pll_cs->ref_freq_khz /
340 calc_pll_cs->min_pll_input_freq_khz
341 : calc_pll_cs->max_pll_ref_divider;
342 }
343
344/* If some parameters are invalid we could have scenario when "min">"max"
345 * which produced endless loop later.
346 * We should investigate why we get the wrong parameters.
347 * But to follow the similar logic when "adjustedPixelClock" is set to be 0
348 * it is better to return here than cause system hang/watchdog timeout later.
349 * ## SVS Wed 15 Jul 2009 */
350
351 if (min_post_divider > max_post_divider) {
352 dm_logger_write(calc_pll_cs->ctx->logger, LOG_ERROR,
353 "%s Post divider range is invalid", __func__);
354 return MAX_PLL_CALC_ERROR;
355 }
356
357 if (min_ref_divider > max_ref_divider) {
358 dm_logger_write(calc_pll_cs->ctx->logger, LOG_ERROR,
359 "%s Reference divider range is invalid", __func__);
360 return MAX_PLL_CALC_ERROR;
361 }
362
363/* 3) Try to find PLL dividers given ranges
364 * starting with minimal error tolerance.
365 * Increase error tolerance until PLL dividers found*/
366 err_tolerance = MAX_PLL_CALC_ERROR;
367
368 while (!calc_pll_dividers_in_range(
369 calc_pll_cs,
370 pll_settings,
371 min_ref_divider,
372 max_ref_divider,
373 min_post_divider,
374 max_post_divider,
375 err_tolerance))
376 err_tolerance += (err_tolerance > 10)
377 ? (err_tolerance / 10)
378 : 1;
379
380 return err_tolerance;
381}
382
383static bool pll_adjust_pix_clk(
384 struct dce110_clk_src *clk_src,
385 struct pixel_clk_params *pix_clk_params,
386 struct pll_settings *pll_settings)
387{
388 uint32_t actual_pix_clk_khz = 0;
389 uint32_t requested_clk_khz = 0;
390 struct bp_adjust_pixel_clock_parameters bp_adjust_pixel_clock_params = {
391 0 };
392 enum bp_result bp_result;
393
394 switch (pix_clk_params->signal_type) {
395 case SIGNAL_TYPE_HDMI_TYPE_A: {
396 requested_clk_khz = pix_clk_params->requested_pix_clk;
397
398 switch (pix_clk_params->color_depth) {
399 case COLOR_DEPTH_101010:
400 requested_clk_khz = (requested_clk_khz * 5) >> 2;
401 break; /* x1.25*/
402 case COLOR_DEPTH_121212:
403 requested_clk_khz = (requested_clk_khz * 6) >> 2;
404 break; /* x1.5*/
405 case COLOR_DEPTH_161616:
406 requested_clk_khz = requested_clk_khz * 2;
407 break; /* x2.0*/
408 default:
409 break;
410 }
411
412 actual_pix_clk_khz = requested_clk_khz;
413 }
414 break;
415
416 case SIGNAL_TYPE_DISPLAY_PORT:
417 case SIGNAL_TYPE_DISPLAY_PORT_MST:
418 case SIGNAL_TYPE_EDP:
419 requested_clk_khz = pix_clk_params->requested_sym_clk;
420 actual_pix_clk_khz = pix_clk_params->requested_pix_clk;
421 break;
422
423 default:
424 requested_clk_khz = pix_clk_params->requested_pix_clk;
425 actual_pix_clk_khz = pix_clk_params->requested_pix_clk;
426 break;
427 }
428
429 bp_adjust_pixel_clock_params.pixel_clock = requested_clk_khz;
430 bp_adjust_pixel_clock_params.
431 encoder_object_id = pix_clk_params->encoder_object_id;
432 bp_adjust_pixel_clock_params.signal_type = pix_clk_params->signal_type;
433 bp_adjust_pixel_clock_params.
434 ss_enable = pix_clk_params->flags.ENABLE_SS;
435 bp_result = clk_src->bios->funcs->adjust_pixel_clock(
436 clk_src->bios, &bp_adjust_pixel_clock_params);
437 if (bp_result == BP_RESULT_OK) {
438 pll_settings->actual_pix_clk = actual_pix_clk_khz;
439 pll_settings->adjusted_pix_clk =
440 bp_adjust_pixel_clock_params.adjusted_pixel_clock;
441 pll_settings->reference_divider =
442 bp_adjust_pixel_clock_params.reference_divider;
443 pll_settings->pix_clk_post_divider =
444 bp_adjust_pixel_clock_params.pixel_clock_post_divider;
445
446 return true;
447 }
448
449 return false;
450}
451
452/**
453 * Calculate PLL Dividers for given Clock Value.
454 * First will call VBIOS Adjust Exec table to check if requested Pixel clock
455 * will be Adjusted based on usage.
456 * Then it will calculate PLL Dividers for this Adjusted clock using preferred
457 * method (Maximum VCO frequency).
458 *
459 * \return
460 * Calculation error in units of 0.01%
461 */
462
463static uint32_t dce110_get_pix_clk_dividers_helper (
464 struct dce110_clk_src *clk_src,
465 struct pll_settings *pll_settings,
466 struct pixel_clk_params *pix_clk_params)
467{
468 uint32_t addr = 0;
469 uint32_t value = 0;
470 uint32_t field = 0;
471 uint32_t pll_calc_error = MAX_PLL_CALC_ERROR;
472
473 /* Check if reference clock is external (not pcie/xtalin)
474 * HW Dce80 spec:
475 * 00 - PCIE_REFCLK, 01 - XTALIN, 02 - GENERICA, 03 - GENERICB
476 * 04 - HSYNCA, 05 - GENLK_CLK, 06 - PCIE_REFCLK, 07 - DVOCLK0 */
477 value = REG_READ(PLL_CNTL);
478 REG_GET(PLL_CNTL, PLL_REF_DIV_SRC, &field);
479 pll_settings->use_external_clk = (field > 1);
480
481 /* VBIOS by default enables DP SS (spread on IDCLK) for DCE 8.0 always
482 * (we do not care any more from SI for some older DP Sink which
483 * does not report SS support, no known issues) */
484 if ((pix_clk_params->flags.ENABLE_SS) ||
485 (dc_is_dp_signal(pix_clk_params->signal_type))) {
486
487 const struct spread_spectrum_data *ss_data = get_ss_data_entry(
488 clk_src,
489 pix_clk_params->signal_type,
490 pll_settings->adjusted_pix_clk);
491
492 if (NULL != ss_data)
493 pll_settings->ss_percentage = ss_data->percentage;
494 }
495
496 /* Check VBIOS AdjustPixelClock Exec table */
497 if (!pll_adjust_pix_clk(clk_src, pix_clk_params, pll_settings)) {
498 /* Should never happen, ASSERT and fill up values to be able
499 * to continue. */
500 dm_logger_write(clk_src->base.ctx->logger, LOG_ERROR,
501 "%s: Failed to adjust pixel clock!!", __func__);
502 pll_settings->actual_pix_clk =
503 pix_clk_params->requested_pix_clk;
504 pll_settings->adjusted_pix_clk =
505 pix_clk_params->requested_pix_clk;
506
507 if (dc_is_dp_signal(pix_clk_params->signal_type))
508 pll_settings->adjusted_pix_clk = 100000;
509 }
510
511 /* Calculate Dividers */
512 if (pix_clk_params->signal_type == SIGNAL_TYPE_HDMI_TYPE_A)
513 /*Calculate Dividers by HDMI object, no SS case or SS case */
514 pll_calc_error =
515 calculate_pixel_clock_pll_dividers(
516 &clk_src->calc_pll_hdmi,
517 pll_settings);
518 else
519 /*Calculate Dividers by default object, no SS case or SS case */
520 pll_calc_error =
521 calculate_pixel_clock_pll_dividers(
522 &clk_src->calc_pll,
523 pll_settings);
524
525 return pll_calc_error;
526}
527
528static void dce112_get_pix_clk_dividers_helper (
529 struct dce110_clk_src *clk_src,
530 struct pll_settings *pll_settings,
531 struct pixel_clk_params *pix_clk_params)
532{
533 uint32_t actualPixelClockInKHz;
534
535 actualPixelClockInKHz = pix_clk_params->requested_pix_clk;
536 /* Calculate Dividers */
537 if (pix_clk_params->signal_type == SIGNAL_TYPE_HDMI_TYPE_A) {
538 switch (pix_clk_params->color_depth) {
539 case COLOR_DEPTH_101010:
540 actualPixelClockInKHz = (actualPixelClockInKHz * 5) >> 2;
541 break;
542 case COLOR_DEPTH_121212:
543 actualPixelClockInKHz = (actualPixelClockInKHz * 6) >> 2;
544 break;
545 case COLOR_DEPTH_161616:
546 actualPixelClockInKHz = actualPixelClockInKHz * 2;
547 break;
548 default:
549 break;
550 }
551 }
552 pll_settings->actual_pix_clk = actualPixelClockInKHz;
553 pll_settings->adjusted_pix_clk = actualPixelClockInKHz;
554 pll_settings->calculated_pix_clk = pix_clk_params->requested_pix_clk;
555}
556
557static uint32_t dce110_get_pix_clk_dividers(
558 struct clock_source *cs,
559 struct pixel_clk_params *pix_clk_params,
560 struct pll_settings *pll_settings)
561{
562 struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(cs);
563 uint32_t pll_calc_error = MAX_PLL_CALC_ERROR;
564
565 if (pix_clk_params == NULL || pll_settings == NULL
566 || pix_clk_params->requested_pix_clk == 0) {
567 dm_logger_write(clk_src->base.ctx->logger, LOG_ERROR,
568 "%s: Invalid parameters!!\n", __func__);
569 return pll_calc_error;
570 }
571
572 memset(pll_settings, 0, sizeof(*pll_settings));
573
574 if (cs->id == CLOCK_SOURCE_ID_DP_DTO ||
575 cs->id == CLOCK_SOURCE_ID_EXTERNAL) {
576 pll_settings->adjusted_pix_clk = clk_src->ext_clk_khz;
577 pll_settings->calculated_pix_clk = clk_src->ext_clk_khz;
578 pll_settings->actual_pix_clk =
579 pix_clk_params->requested_pix_clk;
580 return 0;
581 }
582
583 switch (cs->ctx->dce_version) {
584 case DCE_VERSION_8_0:
585 case DCE_VERSION_10_0:
586 case DCE_VERSION_11_0:
587 pll_calc_error =
588 dce110_get_pix_clk_dividers_helper(clk_src,
589 pll_settings, pix_clk_params);
590 break;
591 case DCE_VERSION_11_2:
592 dce112_get_pix_clk_dividers_helper(clk_src,
593 pll_settings, pix_clk_params);
594 break;
595 default:
596 break;
597 }
598
599 return pll_calc_error;
600}
601
602static bool disable_spread_spectrum(struct dce110_clk_src *clk_src)
603{
604 enum bp_result result;
605 struct bp_spread_spectrum_parameters bp_ss_params = {0};
606
607 bp_ss_params.pll_id = clk_src->base.id;
608
609 /*Call ASICControl to process ATOMBIOS Exec table*/
610 result = clk_src->bios->funcs->enable_spread_spectrum_on_ppll(
611 clk_src->bios,
612 &bp_ss_params,
613 false);
614
615 return result == BP_RESULT_OK;
616}
617
618static bool calculate_ss(
619 const struct pll_settings *pll_settings,
620 const struct spread_spectrum_data *ss_data,
621 struct delta_sigma_data *ds_data)
622{
623 struct fixed32_32 fb_div;
624 struct fixed32_32 ss_amount;
625 struct fixed32_32 ss_nslip_amount;
626 struct fixed32_32 ss_ds_frac_amount;
627 struct fixed32_32 ss_step_size;
628 struct fixed32_32 modulation_time;
629
630 if (ds_data == NULL)
631 return false;
632 if (ss_data == NULL)
633 return false;
634 if (ss_data->percentage == 0)
635 return false;
636 if (pll_settings == NULL)
637 return false;
638
639 memset(ds_data, 0, sizeof(struct delta_sigma_data));
640
641 /* compute SS_AMOUNT_FBDIV & SS_AMOUNT_NFRAC_SLIP & SS_AMOUNT_DSFRAC*/
642 /* 6 decimal point support in fractional feedback divider */
643 fb_div = dal_fixed32_32_from_fraction(
644 pll_settings->fract_feedback_divider, 1000000);
645 fb_div = dal_fixed32_32_add_int(fb_div, pll_settings->feedback_divider);
646
647 ds_data->ds_frac_amount = 0;
648 /*spreadSpectrumPercentage is in the unit of .01%,
649 * so have to divided by 100 * 100*/
650 ss_amount = dal_fixed32_32_mul(
651 fb_div, dal_fixed32_32_from_fraction(ss_data->percentage,
652 100 * ss_data->percentage_divider));
653 ds_data->feedback_amount = dal_fixed32_32_floor(ss_amount);
654
655 ss_nslip_amount = dal_fixed32_32_sub(ss_amount,
656 dal_fixed32_32_from_int(ds_data->feedback_amount));
657 ss_nslip_amount = dal_fixed32_32_mul_int(ss_nslip_amount, 10);
658 ds_data->nfrac_amount = dal_fixed32_32_floor(ss_nslip_amount);
659
660 ss_ds_frac_amount = dal_fixed32_32_sub(ss_nslip_amount,
661 dal_fixed32_32_from_int(ds_data->nfrac_amount));
662 ss_ds_frac_amount = dal_fixed32_32_mul_int(ss_ds_frac_amount, 65536);
663 ds_data->ds_frac_amount = dal_fixed32_32_floor(ss_ds_frac_amount);
664
665 /* compute SS_STEP_SIZE_DSFRAC */
666 modulation_time = dal_fixed32_32_from_fraction(
667 pll_settings->reference_freq * 1000,
668 pll_settings->reference_divider * ss_data->modulation_freq_hz);
669
670 if (ss_data->flags.CENTER_SPREAD)
671 modulation_time = dal_fixed32_32_div_int(modulation_time, 4);
672 else
673 modulation_time = dal_fixed32_32_div_int(modulation_time, 2);
674
675 ss_step_size = dal_fixed32_32_div(ss_amount, modulation_time);
676 /* SS_STEP_SIZE_DSFRAC_DEC = Int(SS_STEP_SIZE * 2 ^ 16 * 10)*/
677 ss_step_size = dal_fixed32_32_mul_int(ss_step_size, 65536 * 10);
678 ds_data->ds_frac_size = dal_fixed32_32_floor(ss_step_size);
679
680 return true;
681}
682
683static bool enable_spread_spectrum(
684 struct dce110_clk_src *clk_src,
685 enum signal_type signal, struct pll_settings *pll_settings)
686{
687 struct bp_spread_spectrum_parameters bp_params = {0};
688 struct delta_sigma_data d_s_data;
689 const struct spread_spectrum_data *ss_data = NULL;
690
691 ss_data = get_ss_data_entry(
692 clk_src,
693 signal,
694 pll_settings->calculated_pix_clk);
695
696/* Pixel clock PLL has been programmed to generate desired pixel clock,
697 * now enable SS on pixel clock */
698/* TODO is it OK to return true not doing anything ??*/
699 if (ss_data != NULL && pll_settings->ss_percentage != 0) {
700 if (calculate_ss(pll_settings, ss_data, &d_s_data)) {
701 bp_params.ds.feedback_amount =
702 d_s_data.feedback_amount;
703 bp_params.ds.nfrac_amount =
704 d_s_data.nfrac_amount;
705 bp_params.ds.ds_frac_size = d_s_data.ds_frac_size;
706 bp_params.ds_frac_amount =
707 d_s_data.ds_frac_amount;
708 bp_params.flags.DS_TYPE = 1;
709 bp_params.pll_id = clk_src->base.id;
710 bp_params.percentage = ss_data->percentage;
711 if (ss_data->flags.CENTER_SPREAD)
712 bp_params.flags.CENTER_SPREAD = 1;
713 if (ss_data->flags.EXTERNAL_SS)
714 bp_params.flags.EXTERNAL_SS = 1;
715
716 if (BP_RESULT_OK !=
717 clk_src->bios->funcs->
718 enable_spread_spectrum_on_ppll(
719 clk_src->bios,
720 &bp_params,
721 true))
722 return false;
723 } else
724 return false;
725 }
726 return true;
727}
728
729static void dce110_program_pixel_clk_resync(
730 struct dce110_clk_src *clk_src,
731 enum signal_type signal_type,
732 enum dc_color_depth colordepth)
733{
734 uint32_t value = 0;
735
736 REG_UPDATE(RESYNC_CNTL,
737 DCCG_DEEP_COLOR_CNTL1, 0);
738 /*
739 24 bit mode: TMDS clock = 1.0 x pixel clock (1:1)
740 30 bit mode: TMDS clock = 1.25 x pixel clock (5:4)
741 36 bit mode: TMDS clock = 1.5 x pixel clock (3:2)
742 48 bit mode: TMDS clock = 2 x pixel clock (2:1)
743 */
744 if (signal_type != SIGNAL_TYPE_HDMI_TYPE_A)
745 return;
746
747 switch (colordepth) {
748 case COLOR_DEPTH_888:
749 REG_UPDATE(RESYNC_CNTL,
750 DCCG_DEEP_COLOR_CNTL1, 0);
751 break;
752 case COLOR_DEPTH_101010:
753 REG_UPDATE(RESYNC_CNTL,
754 DCCG_DEEP_COLOR_CNTL1, 1);
755 break;
756 case COLOR_DEPTH_121212:
757 REG_UPDATE(RESYNC_CNTL,
758 DCCG_DEEP_COLOR_CNTL1, 2);
759 break;
760 case COLOR_DEPTH_161616:
761 REG_UPDATE(RESYNC_CNTL,
762 DCCG_DEEP_COLOR_CNTL1, 3);
763 break;
764 default:
765 break;
766 }
767}
768
769static void dce112_program_pixel_clk_resync(
770 struct dce110_clk_src *clk_src,
771 enum signal_type signal_type,
772 enum dc_color_depth colordepth,
773 bool enable_ycbcr420)
774{
775 uint32_t value = 0;
776
777 REG_UPDATE(PIXCLK_RESYNC_CNTL,
778 PHYPLLA_DCCG_DEEP_COLOR_CNTL, 0);
779 /*
780 24 bit mode: TMDS clock = 1.0 x pixel clock (1:1)
781 30 bit mode: TMDS clock = 1.25 x pixel clock (5:4)
782 36 bit mode: TMDS clock = 1.5 x pixel clock (3:2)
783 48 bit mode: TMDS clock = 2 x pixel clock (2:1)
784 */
785 if (signal_type != SIGNAL_TYPE_HDMI_TYPE_A)
786 return;
787
788 switch (colordepth) {
789 case COLOR_DEPTH_888:
790 REG_UPDATE_2(PIXCLK_RESYNC_CNTL,
791 PHYPLLA_DCCG_DEEP_COLOR_CNTL, 0,
792 PHYPLLA_PIXCLK_DOUBLE_RATE_ENABLE, enable_ycbcr420);
793 break;
794 case COLOR_DEPTH_101010:
795 REG_UPDATE_2(PIXCLK_RESYNC_CNTL,
796 PHYPLLA_DCCG_DEEP_COLOR_CNTL, 1,
797 PHYPLLA_PIXCLK_DOUBLE_RATE_ENABLE, enable_ycbcr420);
798 break;
799 case COLOR_DEPTH_121212:
800 REG_UPDATE_2(PIXCLK_RESYNC_CNTL,
801 PHYPLLA_DCCG_DEEP_COLOR_CNTL, 2,
802 PHYPLLA_PIXCLK_DOUBLE_RATE_ENABLE, enable_ycbcr420);
803 break;
804 case COLOR_DEPTH_161616:
805 REG_UPDATE_2(PIXCLK_RESYNC_CNTL,
806 PHYPLLA_DCCG_DEEP_COLOR_CNTL, 3,
807 PHYPLLA_PIXCLK_DOUBLE_RATE_ENABLE, enable_ycbcr420);
808 break;
809 default:
810 break;
811 }
812}
813
814static bool dce110_program_pix_clk(
815 struct clock_source *clk_src,
816 struct pixel_clk_params *pix_clk_params,
817 struct pll_settings *pll_settings)
818{
819 struct dce110_clk_src *dce110_clk_src = TO_DCE110_CLK_SRC(clk_src);
820 struct bp_pixel_clock_parameters bp_pc_params = {0};
821
822 /* First disable SS
823 * ATOMBIOS will enable by default SS on PLL for DP,
824 * do not disable it here
825 */
826 if (clk_src->id != CLOCK_SOURCE_ID_EXTERNAL &&
827 !dc_is_dp_signal(pix_clk_params->signal_type) &&
828 clk_src->ctx->dce_version <= DCE_VERSION_11_0)
829 disable_spread_spectrum(dce110_clk_src);
830
831 /*ATOMBIOS expects pixel rate adjusted by deep color ratio)*/
832 bp_pc_params.controller_id = pix_clk_params->controller_id;
833 bp_pc_params.pll_id = clk_src->id;
834 bp_pc_params.target_pixel_clock = pll_settings->actual_pix_clk;
835 bp_pc_params.encoder_object_id = pix_clk_params->encoder_object_id;
836 bp_pc_params.signal_type = pix_clk_params->signal_type;
837
838 switch (clk_src->ctx->dce_version) {
839 case DCE_VERSION_11_2:
840 if (clk_src->id != CLOCK_SOURCE_ID_DP_DTO) {
841 bp_pc_params.flags.SET_GENLOCK_REF_DIV_SRC =
842 pll_settings->use_external_clk;
843 bp_pc_params.flags.SET_XTALIN_REF_SRC =
844 !pll_settings->use_external_clk;
845 if (pix_clk_params->flags.SUPPORT_YCBCR420) {
846 bp_pc_params.target_pixel_clock = pll_settings->actual_pix_clk / 2;
847 bp_pc_params.flags.SUPPORT_YUV_420 = 1;
848 }
849 }
850 if (dce110_clk_src->bios->funcs->set_pixel_clock(
851 dce110_clk_src->bios, &bp_pc_params) != BP_RESULT_OK)
852 return false;
853 /* Resync deep color DTO */
854 if (clk_src->id != CLOCK_SOURCE_ID_DP_DTO)
855 dce112_program_pixel_clk_resync(dce110_clk_src,
856 pix_clk_params->signal_type,
857 pix_clk_params->color_depth,
858 pix_clk_params->flags.SUPPORT_YCBCR420);
859 break;
860 case DCE_VERSION_8_0:
861 case DCE_VERSION_10_0:
862 case DCE_VERSION_11_0:
863 bp_pc_params.reference_divider = pll_settings->reference_divider;
864 bp_pc_params.feedback_divider = pll_settings->feedback_divider;
865 bp_pc_params.fractional_feedback_divider =
866 pll_settings->fract_feedback_divider;
867 bp_pc_params.pixel_clock_post_divider =
868 pll_settings->pix_clk_post_divider;
869 bp_pc_params.flags.SET_EXTERNAL_REF_DIV_SRC =
870 pll_settings->use_external_clk;
871
872 if (dce110_clk_src->bios->funcs->set_pixel_clock(
873 dce110_clk_src->bios, &bp_pc_params) != BP_RESULT_OK)
874 return false;
875 /* Enable SS
876 * ATOMBIOS will enable by default SS for DP on PLL ( DP ID clock),
877 * based on HW display PLL team, SS control settings should be programmed
878 * during PLL Reset, but they do not have effect
879 * until SS_EN is asserted.*/
880 if (clk_src->id != CLOCK_SOURCE_ID_EXTERNAL
881 && pix_clk_params->flags.ENABLE_SS && !dc_is_dp_signal(
882 pix_clk_params->signal_type)) {
883
884 if (!enable_spread_spectrum(dce110_clk_src,
885 pix_clk_params->signal_type,
886 pll_settings))
887 return false;
888 /* Resync deep color DTO */
889 dce110_program_pixel_clk_resync(dce110_clk_src,
890 pix_clk_params->signal_type,
891 pix_clk_params->color_depth);
892 }
893 break;
894 default:
895 break;
896 }
897
898 return true;
899}
900
901static bool dce110_clock_source_power_down(
902 struct clock_source *clk_src)
903{
904 struct dce110_clk_src *dce110_clk_src = TO_DCE110_CLK_SRC(clk_src);
905 enum bp_result bp_result;
906 struct bp_pixel_clock_parameters bp_pixel_clock_params = {0};
907
908 if (clk_src->dp_clk_src)
909 return true;
910
911 /* If Pixel Clock is 0 it means Power Down Pll*/
912 bp_pixel_clock_params.controller_id = CONTROLLER_ID_UNDEFINED;
913 bp_pixel_clock_params.pll_id = clk_src->id;
914 bp_pixel_clock_params.flags.FORCE_PROGRAMMING_OF_PLL = 1;
915
916 /*Call ASICControl to process ATOMBIOS Exec table*/
917 bp_result = dce110_clk_src->bios->funcs->set_pixel_clock(
918 dce110_clk_src->bios,
919 &bp_pixel_clock_params);
920
921 return bp_result == BP_RESULT_OK;
922}
923
924/*****************************************/
925/* Constructor */
926/*****************************************/
927static const struct clock_source_funcs dce110_clk_src_funcs = {
928 .cs_power_down = dce110_clock_source_power_down,
929 .program_pix_clk = dce110_program_pix_clk,
930 .get_pix_clk_dividers = dce110_get_pix_clk_dividers
931};
932
933static void get_ss_info_from_atombios(
934 struct dce110_clk_src *clk_src,
935 enum as_signal_type as_signal,
936 struct spread_spectrum_data *spread_spectrum_data[],
937 uint32_t *ss_entries_num)
938{
939 enum bp_result bp_result = BP_RESULT_FAILURE;
940 struct spread_spectrum_info *ss_info;
941 struct spread_spectrum_data *ss_data;
942 struct spread_spectrum_info *ss_info_cur;
943 struct spread_spectrum_data *ss_data_cur;
944 uint32_t i;
945
946 if (ss_entries_num == NULL) {
947 dm_logger_write(clk_src->base.ctx->logger, LOG_SYNC,
948 "Invalid entry !!!\n");
949 return;
950 }
951 if (spread_spectrum_data == NULL) {
952 dm_logger_write(clk_src->base.ctx->logger, LOG_SYNC,
953 "Invalid array pointer!!!\n");
954 return;
955 }
956
957 spread_spectrum_data[0] = NULL;
958 *ss_entries_num = 0;
959
960 *ss_entries_num = clk_src->bios->funcs->get_ss_entry_number(
961 clk_src->bios,
962 as_signal);
963
964 if (*ss_entries_num == 0)
965 return;
966
967 ss_info = dm_alloc(sizeof(struct spread_spectrum_info) * (*ss_entries_num));
968 ss_info_cur = ss_info;
969 if (ss_info == NULL)
970 return;
971
972 ss_data = dm_alloc(sizeof(struct spread_spectrum_data) * (*ss_entries_num));
973 if (ss_data == NULL)
974 goto out_free_info;
975
976 for (i = 0, ss_info_cur = ss_info;
977 i < (*ss_entries_num);
978 ++i, ++ss_info_cur) {
979
980 bp_result = clk_src->bios->funcs->get_spread_spectrum_info(
981 clk_src->bios,
982 as_signal,
983 i,
984 ss_info_cur);
985
986 if (bp_result != BP_RESULT_OK)
987 goto out_free_data;
988 }
989
990 for (i = 0, ss_info_cur = ss_info, ss_data_cur = ss_data;
991 i < (*ss_entries_num);
992 ++i, ++ss_info_cur, ++ss_data_cur) {
993
994 if (ss_info_cur->type.STEP_AND_DELAY_INFO != false) {
995 dm_logger_write(clk_src->base.ctx->logger, LOG_SYNC,
996 "Invalid ATOMBIOS SS Table!!!\n");
997 goto out_free_data;
998 }
999
1000 /* for HDMI check SS percentage,
1001 * if it is > 6 (0.06%), the ATOMBIOS table info is invalid*/
1002 if (as_signal == AS_SIGNAL_TYPE_HDMI
1003 && ss_info_cur->spread_spectrum_percentage > 6){
1004 /* invalid input, do nothing */
1005 dm_logger_write(clk_src->base.ctx->logger, LOG_SYNC,
1006 "Invalid SS percentage ");
1007 dm_logger_write(clk_src->base.ctx->logger, LOG_SYNC,
1008 "for HDMI in ATOMBIOS info Table!!!\n");
1009 continue;
1010 }
1011 if (ss_info_cur->spread_percentage_divider == 1000) {
1012 /* Keep previous precision from ATOMBIOS for these
1013 * in case new precision set by ATOMBIOS for these
1014 * (otherwise all code in DCE specific classes
1015 * for all previous ASICs would need
1016 * to be updated for SS calculations,
1017 * Audio SS compensation and DP DTO SS compensation
1018 * which assumes fixed SS percentage Divider = 100)*/
1019 ss_info_cur->spread_spectrum_percentage /= 10;
1020 ss_info_cur->spread_percentage_divider = 100;
1021 }
1022
1023 ss_data_cur->freq_range_khz = ss_info_cur->target_clock_range;
1024 ss_data_cur->percentage =
1025 ss_info_cur->spread_spectrum_percentage;
1026 ss_data_cur->percentage_divider =
1027 ss_info_cur->spread_percentage_divider;
1028 ss_data_cur->modulation_freq_hz =
1029 ss_info_cur->spread_spectrum_range;
1030
1031 if (ss_info_cur->type.CENTER_MODE)
1032 ss_data_cur->flags.CENTER_SPREAD = 1;
1033
1034 if (ss_info_cur->type.EXTERNAL)
1035 ss_data_cur->flags.EXTERNAL_SS = 1;
1036
1037 }
1038
1039 *spread_spectrum_data = ss_data;
1040 dm_free(ss_info);
1041 return;
1042
1043out_free_data:
1044 dm_free(ss_data);
1045 *ss_entries_num = 0;
1046out_free_info:
1047 dm_free(ss_info);
1048}
1049
1050static void ss_info_from_atombios_create(
1051 struct dce110_clk_src *clk_src)
1052{
1053 get_ss_info_from_atombios(
1054 clk_src,
1055 AS_SIGNAL_TYPE_DISPLAY_PORT,
1056 &clk_src->dp_ss_params,
1057 &clk_src->dp_ss_params_cnt);
1058 get_ss_info_from_atombios(
1059 clk_src,
1060 AS_SIGNAL_TYPE_HDMI,
1061 &clk_src->hdmi_ss_params,
1062 &clk_src->hdmi_ss_params_cnt);
1063 get_ss_info_from_atombios(
1064 clk_src,
1065 AS_SIGNAL_TYPE_DVI,
1066 &clk_src->dvi_ss_params,
1067 &clk_src->dvi_ss_params_cnt);
1068}
1069
1070static bool calc_pll_max_vco_construct(
1071 struct calc_pll_clock_source *calc_pll_cs,
1072 struct calc_pll_clock_source_init_data *init_data)
1073{
1074 uint32_t i;
1075 struct firmware_info fw_info = { { 0 } };
1076 if (calc_pll_cs == NULL ||
1077 init_data == NULL ||
1078 init_data->bp == NULL)
1079 return false;
1080
1081 if (init_data->bp->funcs->get_firmware_info(
1082 init_data->bp,
1083 &fw_info) != BP_RESULT_OK)
1084 return false;
1085
1086 calc_pll_cs->ctx = init_data->ctx;
1087 calc_pll_cs->ref_freq_khz = fw_info.pll_info.crystal_frequency;
1088 calc_pll_cs->min_vco_khz =
1089 fw_info.pll_info.min_output_pxl_clk_pll_frequency;
1090 calc_pll_cs->max_vco_khz =
1091 fw_info.pll_info.max_output_pxl_clk_pll_frequency;
1092
1093 if (init_data->max_override_input_pxl_clk_pll_freq_khz != 0)
1094 calc_pll_cs->max_pll_input_freq_khz =
1095 init_data->max_override_input_pxl_clk_pll_freq_khz;
1096 else
1097 calc_pll_cs->max_pll_input_freq_khz =
1098 fw_info.pll_info.max_input_pxl_clk_pll_frequency;
1099
1100 if (init_data->min_override_input_pxl_clk_pll_freq_khz != 0)
1101 calc_pll_cs->min_pll_input_freq_khz =
1102 init_data->min_override_input_pxl_clk_pll_freq_khz;
1103 else
1104 calc_pll_cs->min_pll_input_freq_khz =
1105 fw_info.pll_info.min_input_pxl_clk_pll_frequency;
1106
1107 calc_pll_cs->min_pix_clock_pll_post_divider =
1108 init_data->min_pix_clk_pll_post_divider;
1109 calc_pll_cs->max_pix_clock_pll_post_divider =
1110 init_data->max_pix_clk_pll_post_divider;
1111 calc_pll_cs->min_pll_ref_divider =
1112 init_data->min_pll_ref_divider;
1113 calc_pll_cs->max_pll_ref_divider =
1114 init_data->max_pll_ref_divider;
1115
1116 if (init_data->num_fract_fb_divider_decimal_point == 0 ||
1117 init_data->num_fract_fb_divider_decimal_point_precision >
1118 init_data->num_fract_fb_divider_decimal_point) {
1119 dm_logger_write(calc_pll_cs->ctx->logger, LOG_ERROR,
1120 "The dec point num or precision is incorrect!");
1121 return false;
1122 }
1123 if (init_data->num_fract_fb_divider_decimal_point_precision == 0) {
1124 dm_logger_write(calc_pll_cs->ctx->logger, LOG_ERROR,
1125 "Incorrect fract feedback divider precision num!");
1126 return false;
1127 }
1128
1129 calc_pll_cs->fract_fb_divider_decimal_points_num =
1130 init_data->num_fract_fb_divider_decimal_point;
1131 calc_pll_cs->fract_fb_divider_precision =
1132 init_data->num_fract_fb_divider_decimal_point_precision;
1133 calc_pll_cs->fract_fb_divider_factor = 1;
1134 for (i = 0; i < calc_pll_cs->fract_fb_divider_decimal_points_num; ++i)
1135 calc_pll_cs->fract_fb_divider_factor *= 10;
1136
1137 calc_pll_cs->fract_fb_divider_precision_factor = 1;
1138 for (
1139 i = 0;
1140 i < (calc_pll_cs->fract_fb_divider_decimal_points_num -
1141 calc_pll_cs->fract_fb_divider_precision);
1142 ++i)
1143 calc_pll_cs->fract_fb_divider_precision_factor *= 10;
1144
1145 return true;
1146}
1147
1148bool dce110_clk_src_construct(
1149 struct dce110_clk_src *clk_src,
1150 struct dc_context *ctx,
1151 struct dc_bios *bios,
1152 enum clock_source_id id,
1153 const struct dce110_clk_src_regs *regs,
1154 const struct dce110_clk_src_shift *cs_shift,
1155 const struct dce110_clk_src_mask *cs_mask)
1156{
1157 struct firmware_info fw_info = { { 0 } };
1158 struct calc_pll_clock_source_init_data calc_pll_cs_init_data_hdmi;
1159 struct calc_pll_clock_source_init_data calc_pll_cs_init_data;
1160
1161 clk_src->base.ctx = ctx;
1162 clk_src->bios = bios;
1163 clk_src->base.id = id;
1164 clk_src->base.funcs = &dce110_clk_src_funcs;
1165
1166 clk_src->regs = regs;
1167 clk_src->cs_shift = cs_shift;
1168 clk_src->cs_mask = cs_mask;
1169
1170 if (clk_src->bios->funcs->get_firmware_info(
1171 clk_src->bios, &fw_info) != BP_RESULT_OK) {
1172 ASSERT_CRITICAL(false);
1173 goto unexpected_failure;
1174 }
1175
1176 clk_src->ext_clk_khz =
1177 fw_info.external_clock_source_frequency_for_dp;
1178
1179 switch (clk_src->base.ctx->dce_version) {
1180 case DCE_VERSION_8_0:
1181 case DCE_VERSION_10_0:
1182 case DCE_VERSION_11_0:
1183
1184 /* structure normally used with PLL ranges from ATOMBIOS; DS on by default */
1185 calc_pll_cs_init_data.bp = bios;
1186 calc_pll_cs_init_data.min_pix_clk_pll_post_divider = 1;
1187 calc_pll_cs_init_data.max_pix_clk_pll_post_divider =
1188 clk_src->cs_mask->PLL_POST_DIV_PIXCLK;
1189 calc_pll_cs_init_data.min_pll_ref_divider = 1;
1190 calc_pll_cs_init_data.max_pll_ref_divider = clk_src->cs_mask->PLL_REF_DIV;
1191 /* when 0 use minInputPxlClkPLLFrequencyInKHz from firmwareInfo*/
1192 calc_pll_cs_init_data.min_override_input_pxl_clk_pll_freq_khz = 0;
1193 /* when 0 use maxInputPxlClkPLLFrequencyInKHz from firmwareInfo*/
1194 calc_pll_cs_init_data.max_override_input_pxl_clk_pll_freq_khz = 0;
1195 /*numberOfFractFBDividerDecimalPoints*/
1196 calc_pll_cs_init_data.num_fract_fb_divider_decimal_point =
1197 FRACT_FB_DIVIDER_DEC_POINTS_MAX_NUM;
1198 /*number of decimal point to round off for fractional feedback divider value*/
1199 calc_pll_cs_init_data.num_fract_fb_divider_decimal_point_precision =
1200 FRACT_FB_DIVIDER_DEC_POINTS_MAX_NUM;
1201 calc_pll_cs_init_data.ctx = ctx;
1202
1203 /*structure for HDMI, no SS or SS% <= 0.06% for 27 MHz Ref clock */
1204 calc_pll_cs_init_data_hdmi.bp = bios;
1205 calc_pll_cs_init_data_hdmi.min_pix_clk_pll_post_divider = 1;
1206 calc_pll_cs_init_data_hdmi.max_pix_clk_pll_post_divider =
1207 clk_src->cs_mask->PLL_POST_DIV_PIXCLK;
1208 calc_pll_cs_init_data_hdmi.min_pll_ref_divider = 1;
1209 calc_pll_cs_init_data_hdmi.max_pll_ref_divider = clk_src->cs_mask->PLL_REF_DIV;
1210 /* when 0 use minInputPxlClkPLLFrequencyInKHz from firmwareInfo*/
1211 calc_pll_cs_init_data_hdmi.min_override_input_pxl_clk_pll_freq_khz = 13500;
1212 /* when 0 use maxInputPxlClkPLLFrequencyInKHz from firmwareInfo*/
1213 calc_pll_cs_init_data_hdmi.max_override_input_pxl_clk_pll_freq_khz = 27000;
1214 /*numberOfFractFBDividerDecimalPoints*/
1215 calc_pll_cs_init_data_hdmi.num_fract_fb_divider_decimal_point =
1216 FRACT_FB_DIVIDER_DEC_POINTS_MAX_NUM;
1217 /*number of decimal point to round off for fractional feedback divider value*/
1218 calc_pll_cs_init_data_hdmi.num_fract_fb_divider_decimal_point_precision =
1219 FRACT_FB_DIVIDER_DEC_POINTS_MAX_NUM;
1220 calc_pll_cs_init_data_hdmi.ctx = ctx;
1221
1222 clk_src->ref_freq_khz = fw_info.pll_info.crystal_frequency;
1223
1224 if (clk_src->base.id == CLOCK_SOURCE_ID_EXTERNAL)
1225 return true;
1226
1227 /* PLL only from here on */
1228 ss_info_from_atombios_create(clk_src);
1229
1230 if (!calc_pll_max_vco_construct(
1231 &clk_src->calc_pll,
1232 &calc_pll_cs_init_data)) {
1233 ASSERT_CRITICAL(false);
1234 goto unexpected_failure;
1235 }
1236
1237 if (clk_src->ref_freq_khz == 48000) {
1238 calc_pll_cs_init_data_hdmi.
1239 min_override_input_pxl_clk_pll_freq_khz = 24000;
1240 calc_pll_cs_init_data_hdmi.
1241 max_override_input_pxl_clk_pll_freq_khz = 48000;
1242 } else if (clk_src->ref_freq_khz == 100000) {
1243 calc_pll_cs_init_data_hdmi.
1244 min_override_input_pxl_clk_pll_freq_khz = 25000;
1245 calc_pll_cs_init_data_hdmi.
1246 max_override_input_pxl_clk_pll_freq_khz = 50000;
1247 }
1248
1249 if (!calc_pll_max_vco_construct(
1250 &clk_src->calc_pll_hdmi, &calc_pll_cs_init_data_hdmi)) {
1251 ASSERT_CRITICAL(false);
1252 goto unexpected_failure;
1253 }
1254 break;
1255 default:
1256 break;
1257 }
1258
1259 return true;
1260
1261unexpected_failure:
1262 return false;
1263}
1264
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
new file mode 100644
index 000000000000..067e4ac0e67a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
@@ -0,0 +1,109 @@
1/* Copyright 2012-15 Advanced Micro Devices, Inc.
2 *
3 * Permission is hereby granted, free of charge, to any person obtaining a
4 * copy of this software and associated documentation files (the "Software"),
5 * to deal in the Software without restriction, including without limitation
6 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7 * and/or sell copies of the Software, and to permit persons to whom the
8 * Software is furnished to do so, subject to the following conditions:
9 *
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
17 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
18 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
19 * OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * Authors: AMD
22 *
23 */
24
25#ifndef __DC_CLOCK_SOURCE_DCE_H__
26#define __DC_CLOCK_SOURCE_DCE_H__
27
28#include "../inc/clock_source.h"
29
30#define TO_DCE110_CLK_SRC(clk_src)\
31 container_of(clk_src, struct dce110_clk_src, base)
32
33#define CS_COMMON_REG_LIST_DCE_100_110(id) \
34 SRI(RESYNC_CNTL, PIXCLK, id), \
35 SRI(PLL_CNTL, BPHYC_PLL, id)
36
37#define CS_COMMON_REG_LIST_DCE_80(id) \
38 SRI(RESYNC_CNTL, PIXCLK, id), \
39 SRI(PLL_CNTL, DCCG_PLL, id)
40
41#define CS_COMMON_REG_LIST_DCE_112(id) \
42 SRI(PIXCLK_RESYNC_CNTL, PHYPLL, id)
43
44#define CS_SF(reg_name, field_name, post_fix)\
45 .field_name = reg_name ## __ ## field_name ## post_fix
46
47#define CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh)\
48 CS_SF(PLL_CNTL, PLL_REF_DIV_SRC, mask_sh),\
49 CS_SF(PIXCLK1_RESYNC_CNTL, DCCG_DEEP_COLOR_CNTL1, mask_sh),\
50 CS_SF(PLL_POST_DIV, PLL_POST_DIV_PIXCLK, mask_sh),\
51 CS_SF(PLL_REF_DIV, PLL_REF_DIV, mask_sh),\
52
53#define CS_COMMON_MASK_SH_LIST_DCE_112(mask_sh)\
54 CS_SF(PHYPLLA_PIXCLK_RESYNC_CNTL, PHYPLLA_DCCG_DEEP_COLOR_CNTL, mask_sh),\
55 CS_SF(PHYPLLA_PIXCLK_RESYNC_CNTL, PHYPLLA_PIXCLK_DOUBLE_RATE_ENABLE, mask_sh),\
56
57#define CS_REG_FIELD_LIST(type) \
58 type PLL_REF_DIV_SRC; \
59 type DCCG_DEEP_COLOR_CNTL1; \
60 type PHYPLLA_DCCG_DEEP_COLOR_CNTL; \
61 type PHYPLLA_PIXCLK_DOUBLE_RATE_ENABLE; \
62 type PLL_POST_DIV_PIXCLK; \
63 type PLL_REF_DIV; \
64
65struct dce110_clk_src_shift {
66 CS_REG_FIELD_LIST(uint8_t)
67};
68
69struct dce110_clk_src_mask{
70 CS_REG_FIELD_LIST(uint32_t)
71};
72
73struct dce110_clk_src_regs {
74 uint32_t RESYNC_CNTL;
75 uint32_t PIXCLK_RESYNC_CNTL;
76 uint32_t PLL_CNTL;
77};
78
79struct dce110_clk_src {
80 struct clock_source base;
81 const struct dce110_clk_src_regs *regs;
82 const struct dce110_clk_src_mask *cs_mask;
83 const struct dce110_clk_src_shift *cs_shift;
84 struct dc_bios *bios;
85
86 struct spread_spectrum_data *dp_ss_params;
87 uint32_t dp_ss_params_cnt;
88 struct spread_spectrum_data *hdmi_ss_params;
89 uint32_t hdmi_ss_params_cnt;
90 struct spread_spectrum_data *dvi_ss_params;
91 uint32_t dvi_ss_params_cnt;
92
93 uint32_t ext_clk_khz;
94 uint32_t ref_freq_khz;
95
96 struct calc_pll_clock_source calc_pll;
97 struct calc_pll_clock_source calc_pll_hdmi;
98};
99
100bool dce110_clk_src_construct(
101 struct dce110_clk_src *clk_src,
102 struct dc_context *ctx,
103 struct dc_bios *bios,
104 enum clock_source_id,
105 const struct dce110_clk_src_regs *regs,
106 const struct dce110_clk_src_shift *cs_shift,
107 const struct dce110_clk_src_mask *cs_mask);
108
109#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c
new file mode 100644
index 000000000000..dd1cf5e6e949
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c
@@ -0,0 +1,195 @@
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dce_hwseq.h"
27#include "reg_helper.h"
28#include "hw_sequencer.h"
29
30#define CTX \
31 hws->ctx
32#define REG(reg)\
33 hws->regs->reg
34
35#undef FN
36#define FN(reg_name, field_name) \
37 hws->shifts->field_name, hws->masks->field_name
38
39void dce_enable_fe_clock(struct dce_hwseq *hws,
40 unsigned int fe_inst, bool enable)
41{
42 REG_UPDATE(DCFE_CLOCK_CONTROL[fe_inst],
43 DCFE_CLOCK_ENABLE, enable);
44}
45
46void dce_pipe_control_lock(struct dce_hwseq *hws,
47 unsigned int blnd_inst,
48 enum pipe_lock_control control_mask,
49 bool lock)
50{
51 uint32_t lock_val = lock ? 1 : 0;
52 uint32_t dcp_grph, scl, dcp_grph_surf, blnd, update_lock_mode;
53
54 uint32_t val = REG_GET_5(BLND_V_UPDATE_LOCK[blnd_inst],
55 BLND_DCP_GRPH_V_UPDATE_LOCK, &dcp_grph,
56 BLND_SCL_V_UPDATE_LOCK, &scl,
57 BLND_DCP_GRPH_SURF_V_UPDATE_LOCK, &dcp_grph_surf,
58 BLND_BLND_V_UPDATE_LOCK, &blnd,
59 BLND_V_UPDATE_LOCK_MODE, &update_lock_mode);
60
61 if (control_mask & PIPE_LOCK_CONTROL_GRAPHICS)
62 dcp_grph = lock_val;
63
64 if (control_mask & PIPE_LOCK_CONTROL_SCL)
65 scl = lock_val;
66
67 if (control_mask & PIPE_LOCK_CONTROL_SURFACE)
68 dcp_grph_surf = lock_val;
69
70 if (control_mask & PIPE_LOCK_CONTROL_BLENDER)
71 blnd = lock_val;
72
73 if (control_mask & PIPE_LOCK_CONTROL_MODE)
74 update_lock_mode = lock_val;
75
76 REG_SET_5(BLND_V_UPDATE_LOCK[blnd_inst], val,
77 BLND_DCP_GRPH_V_UPDATE_LOCK, dcp_grph,
78 BLND_SCL_V_UPDATE_LOCK, scl,
79 BLND_DCP_GRPH_SURF_V_UPDATE_LOCK, dcp_grph_surf,
80 BLND_BLND_V_UPDATE_LOCK, blnd,
81 BLND_V_UPDATE_LOCK_MODE, update_lock_mode);
82
83 if (hws->wa.blnd_crtc_trigger)
84 if (!lock && (control_mask & PIPE_LOCK_CONTROL_BLENDER)) {
85 uint32_t value = REG_READ(CRTC_H_BLANK_START_END[blnd_inst]);
86 REG_WRITE(CRTC_H_BLANK_START_END[blnd_inst], value);
87 }
88}
89
90void dce_set_blender_mode(struct dce_hwseq *hws,
91 unsigned int blnd_inst,
92 enum blnd_mode mode)
93{
94 uint32_t feedthrough = 1;
95 uint32_t blnd_mode = 0;
96 uint32_t multiplied_mode = 0;
97 uint32_t alpha_mode = 2;
98
99 switch (mode) {
100 case BLND_MODE_OTHER_PIPE:
101 feedthrough = 0;
102 blnd_mode = 1;
103 alpha_mode = 0;
104 break;
105 case BLND_MODE_BLENDING:
106 feedthrough = 0;
107 blnd_mode = 2;
108 alpha_mode = 0;
109 multiplied_mode = 1;
110 break;
111 case BLND_MODE_CURRENT_PIPE:
112 default:
113 if (REG(BLND_CONTROL[blnd_inst]) == REG(BLNDV_CONTROL) ||
114 blnd_inst == 0)
115 feedthrough = 0;
116 break;
117 }
118
119 REG_UPDATE_4(BLND_CONTROL[blnd_inst],
120 BLND_FEEDTHROUGH_EN, feedthrough,
121 BLND_ALPHA_MODE, alpha_mode,
122 BLND_MODE, blnd_mode,
123 BLND_MULTIPLIED_MODE, multiplied_mode);
124}
125
126
127static void dce_disable_sram_shut_down(struct dce_hwseq *hws)
128{
129 if (REG(DC_MEM_GLOBAL_PWR_REQ_CNTL))
130 REG_UPDATE(DC_MEM_GLOBAL_PWR_REQ_CNTL,
131 DC_MEM_GLOBAL_PWR_REQ_DIS, 1);
132}
133
134static void dce_underlay_clock_enable(struct dce_hwseq *hws)
135{
136 /* todo: why do we need this at boot? is dce_enable_fe_clock enough? */
137 if (REG(DCFEV_CLOCK_CONTROL))
138 REG_UPDATE(DCFEV_CLOCK_CONTROL,
139 DCFEV_CLOCK_ENABLE, 1);
140}
141
142static void enable_hw_base_light_sleep(void)
143{
144 /* TODO: implement */
145}
146
147static void disable_sw_manual_control_light_sleep(void)
148{
149 /* TODO: implement */
150}
151
152void dce_clock_gating_power_up(struct dce_hwseq *hws,
153 bool enable)
154{
155 if (enable) {
156 enable_hw_base_light_sleep();
157 disable_sw_manual_control_light_sleep();
158 } else {
159 dce_disable_sram_shut_down(hws);
160 dce_underlay_clock_enable(hws);
161 }
162}
163
164void dce_crtc_switch_to_clk_src(struct dce_hwseq *hws,
165 struct clock_source *clk_src,
166 unsigned int tg_inst)
167{
168 if (clk_src->id == CLOCK_SOURCE_ID_DP_DTO) {
169 REG_UPDATE(PIXEL_RATE_CNTL[tg_inst],
170 DP_DTO0_ENABLE, 1);
171
172 } else if (clk_src->id >= CLOCK_SOURCE_COMBO_PHY_PLL0) {
173 uint32_t rate_source = clk_src->id - CLOCK_SOURCE_COMBO_PHY_PLL0;
174
175 REG_UPDATE_2(PHYPLL_PIXEL_RATE_CNTL[tg_inst],
176 PHYPLL_PIXEL_RATE_SOURCE, rate_source,
177 PIXEL_RATE_PLL_SOURCE, 0);
178
179 REG_UPDATE(PIXEL_RATE_CNTL[tg_inst],
180 DP_DTO0_ENABLE, 0);
181
182 } else if (clk_src->id <= CLOCK_SOURCE_ID_PLL2) {
183 uint32_t rate_source = clk_src->id - CLOCK_SOURCE_ID_PLL0;
184
185 REG_UPDATE_2(PIXEL_RATE_CNTL[tg_inst],
186 PIXEL_RATE_SOURCE, rate_source,
187 DP_DTO0_ENABLE, 0);
188
189 if (REG(PHYPLL_PIXEL_RATE_CNTL[tg_inst]))
190 REG_UPDATE(PHYPLL_PIXEL_RATE_CNTL[tg_inst],
191 PIXEL_RATE_PLL_SOURCE, 1);
192 } else {
193 DC_ERR("unknown clock source");
194 }
195}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
new file mode 100644
index 000000000000..4af8d560a7ee
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
@@ -0,0 +1,250 @@
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25#ifndef __DCE_HWSEQ_H__
26#define __DCE_HWSEQ_H__
27
28#include "hw_sequencer.h"
29
30#define HWSEQ_DCEF_REG_LIST_DCE8() \
31 .DCFE_CLOCK_CONTROL[0] = mmCRTC0_CRTC_DCFE_CLOCK_CONTROL, \
32 .DCFE_CLOCK_CONTROL[1] = mmCRTC1_CRTC_DCFE_CLOCK_CONTROL, \
33 .DCFE_CLOCK_CONTROL[2] = mmCRTC2_CRTC_DCFE_CLOCK_CONTROL, \
34 .DCFE_CLOCK_CONTROL[3] = mmCRTC3_CRTC_DCFE_CLOCK_CONTROL, \
35 .DCFE_CLOCK_CONTROL[4] = mmCRTC4_CRTC_DCFE_CLOCK_CONTROL, \
36 .DCFE_CLOCK_CONTROL[5] = mmCRTC5_CRTC_DCFE_CLOCK_CONTROL
37
38#define HWSEQ_DCEF_REG_LIST() \
39 SRII(DCFE_CLOCK_CONTROL, DCFE, 0), \
40 SRII(DCFE_CLOCK_CONTROL, DCFE, 1), \
41 SRII(DCFE_CLOCK_CONTROL, DCFE, 2), \
42 SRII(DCFE_CLOCK_CONTROL, DCFE, 3), \
43 SRII(DCFE_CLOCK_CONTROL, DCFE, 4), \
44 SRII(DCFE_CLOCK_CONTROL, DCFE, 5), \
45 SR(DC_MEM_GLOBAL_PWR_REQ_CNTL)
46
47#define HWSEQ_BLND_REG_LIST() \
48 SRII(BLND_V_UPDATE_LOCK, BLND, 0), \
49 SRII(BLND_V_UPDATE_LOCK, BLND, 1), \
50 SRII(BLND_V_UPDATE_LOCK, BLND, 2), \
51 SRII(BLND_V_UPDATE_LOCK, BLND, 3), \
52 SRII(BLND_V_UPDATE_LOCK, BLND, 4), \
53 SRII(BLND_V_UPDATE_LOCK, BLND, 5), \
54 SRII(BLND_CONTROL, BLND, 0), \
55 SRII(BLND_CONTROL, BLND, 1), \
56 SRII(BLND_CONTROL, BLND, 2), \
57 SRII(BLND_CONTROL, BLND, 3), \
58 SRII(BLND_CONTROL, BLND, 4), \
59 SRII(BLND_CONTROL, BLND, 5)
60
61#define HWSEQ_PIXEL_RATE_REG_LIST(blk) \
62 SRII(PIXEL_RATE_CNTL, blk, 0), \
63 SRII(PIXEL_RATE_CNTL, blk, 1), \
64 SRII(PIXEL_RATE_CNTL, blk, 2), \
65 SRII(PIXEL_RATE_CNTL, blk, 3), \
66 SRII(PIXEL_RATE_CNTL, blk, 4), \
67 SRII(PIXEL_RATE_CNTL, blk, 5)
68
69#define HWSEQ_PHYPLL_REG_LIST(blk) \
70 SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 0), \
71 SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 1), \
72 SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 2), \
73 SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 3), \
74 SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 4), \
75 SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 5)
76
77#define HWSEQ_DCE11_REG_LIST_BASE() \
78 SR(DC_MEM_GLOBAL_PWR_REQ_CNTL), \
79 SR(DCFEV_CLOCK_CONTROL), \
80 SRII(DCFE_CLOCK_CONTROL, DCFE, 0), \
81 SRII(DCFE_CLOCK_CONTROL, DCFE, 1), \
82 SRII(CRTC_H_BLANK_START_END, CRTC, 0),\
83 SRII(CRTC_H_BLANK_START_END, CRTC, 1),\
84 SRII(BLND_V_UPDATE_LOCK, BLND, 0),\
85 SRII(BLND_V_UPDATE_LOCK, BLND, 1),\
86 SRII(BLND_CONTROL, BLND, 0),\
87 SRII(BLND_CONTROL, BLND, 1),\
88 SR(BLNDV_CONTROL),\
89 HWSEQ_PIXEL_RATE_REG_LIST(CRTC)
90
91#define HWSEQ_DCE8_REG_LIST() \
92 HWSEQ_DCEF_REG_LIST_DCE8(), \
93 HWSEQ_BLND_REG_LIST(), \
94 HWSEQ_PIXEL_RATE_REG_LIST(CRTC)
95
96#define HWSEQ_DCE10_REG_LIST() \
97 HWSEQ_DCEF_REG_LIST(), \
98 HWSEQ_BLND_REG_LIST(), \
99 HWSEQ_PIXEL_RATE_REG_LIST(CRTC)
100
101#define HWSEQ_ST_REG_LIST() \
102 HWSEQ_DCE11_REG_LIST_BASE(), \
103 .DCFE_CLOCK_CONTROL[2] = mmDCFEV_CLOCK_CONTROL, \
104 .CRTC_H_BLANK_START_END[2] = mmCRTCV_H_BLANK_START_END, \
105 .BLND_V_UPDATE_LOCK[2] = mmBLNDV_V_UPDATE_LOCK, \
106 .BLND_CONTROL[2] = mmBLNDV_CONTROL,
107
108#define HWSEQ_CZ_REG_LIST() \
109 HWSEQ_DCE11_REG_LIST_BASE(), \
110 SRII(DCFE_CLOCK_CONTROL, DCFE, 2), \
111 SRII(CRTC_H_BLANK_START_END, CRTC, 2), \
112 SRII(BLND_V_UPDATE_LOCK, BLND, 2), \
113 SRII(BLND_CONTROL, BLND, 2), \
114 .DCFE_CLOCK_CONTROL[3] = mmDCFEV_CLOCK_CONTROL, \
115 .CRTC_H_BLANK_START_END[3] = mmCRTCV_H_BLANK_START_END, \
116 .BLND_V_UPDATE_LOCK[3] = mmBLNDV_V_UPDATE_LOCK, \
117 .BLND_CONTROL[3] = mmBLNDV_CONTROL
118
119#define HWSEQ_DCE112_REG_LIST() \
120 HWSEQ_DCE10_REG_LIST(), \
121 HWSEQ_PIXEL_RATE_REG_LIST(CRTC), \
122 HWSEQ_PHYPLL_REG_LIST(CRTC)
123
124struct dce_hwseq_registers {
125 uint32_t DCFE_CLOCK_CONTROL[6];
126 uint32_t DCFEV_CLOCK_CONTROL;
127 uint32_t DC_MEM_GLOBAL_PWR_REQ_CNTL;
128 uint32_t BLND_V_UPDATE_LOCK[6];
129 uint32_t BLND_CONTROL[6];
130 uint32_t BLNDV_CONTROL;
131
132 uint32_t CRTC_H_BLANK_START_END[6];
133 uint32_t PIXEL_RATE_CNTL[6];
134 uint32_t PHYPLL_PIXEL_RATE_CNTL[6];
135};
136 /* set field name */
137#define HWS_SF(blk_name, reg_name, field_name, post_fix)\
138 .field_name = blk_name ## reg_name ## __ ## field_name ## post_fix
139
140#define HWS_SF1(blk_name, reg_name, field_name, post_fix)\
141 .field_name = blk_name ## reg_name ## __ ## blk_name ## field_name ## post_fix
142
143
144#define HWSEQ_DCEF_MASK_SH_LIST(mask_sh, blk)\
145 HWS_SF(blk, CLOCK_CONTROL, DCFE_CLOCK_ENABLE, mask_sh),\
146 SF(DC_MEM_GLOBAL_PWR_REQ_CNTL, DC_MEM_GLOBAL_PWR_REQ_DIS, mask_sh)
147
148#define HWSEQ_BLND_MASK_SH_LIST(mask_sh, blk)\
149 HWS_SF(blk, V_UPDATE_LOCK, BLND_DCP_GRPH_V_UPDATE_LOCK, mask_sh),\
150 HWS_SF(blk, V_UPDATE_LOCK, BLND_SCL_V_UPDATE_LOCK, mask_sh),\
151 HWS_SF(blk, V_UPDATE_LOCK, BLND_DCP_GRPH_SURF_V_UPDATE_LOCK, mask_sh),\
152 HWS_SF(blk, V_UPDATE_LOCK, BLND_BLND_V_UPDATE_LOCK, mask_sh),\
153 HWS_SF(blk, V_UPDATE_LOCK, BLND_V_UPDATE_LOCK_MODE, mask_sh),\
154 HWS_SF(blk, CONTROL, BLND_FEEDTHROUGH_EN, mask_sh),\
155 HWS_SF(blk, CONTROL, BLND_ALPHA_MODE, mask_sh),\
156 HWS_SF(blk, CONTROL, BLND_MODE, mask_sh),\
157 HWS_SF(blk, CONTROL, BLND_MULTIPLIED_MODE, mask_sh)
158
159#define HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, blk)\
160 HWS_SF1(blk, PIXEL_RATE_CNTL, PIXEL_RATE_SOURCE, mask_sh),\
161 HWS_SF(blk, PIXEL_RATE_CNTL, DP_DTO0_ENABLE, mask_sh)
162
163#define HWSEQ_PHYPLL_MASK_SH_LIST(mask_sh, blk)\
164 HWS_SF1(blk, PHYPLL_PIXEL_RATE_CNTL, PHYPLL_PIXEL_RATE_SOURCE, mask_sh),\
165 HWS_SF1(blk, PHYPLL_PIXEL_RATE_CNTL, PIXEL_RATE_PLL_SOURCE, mask_sh)
166
167#define HWSEQ_DCE8_MASK_SH_LIST(mask_sh)\
168 .DCFE_CLOCK_ENABLE = CRTC_DCFE_CLOCK_CONTROL__CRTC_DCFE_CLOCK_ENABLE ## mask_sh, \
169 HWS_SF(BLND_, V_UPDATE_LOCK, BLND_DCP_GRPH_V_UPDATE_LOCK, mask_sh),\
170 HWS_SF(BLND_, V_UPDATE_LOCK, BLND_SCL_V_UPDATE_LOCK, mask_sh),\
171 HWS_SF(BLND_, V_UPDATE_LOCK, BLND_DCP_GRPH_SURF_V_UPDATE_LOCK, mask_sh),\
172 HWS_SF(BLND_, CONTROL, BLND_MODE, mask_sh),\
173 HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_)
174
175#define HWSEQ_DCE10_MASK_SH_LIST(mask_sh)\
176 HWSEQ_DCEF_MASK_SH_LIST(mask_sh, DCFE_),\
177 HWSEQ_BLND_MASK_SH_LIST(mask_sh, BLND_),\
178 HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_)
179
180#define HWSEQ_DCE11_MASK_SH_LIST(mask_sh)\
181 HWSEQ_DCE10_MASK_SH_LIST(mask_sh),\
182 SF(DCFEV_CLOCK_CONTROL, DCFEV_CLOCK_ENABLE, mask_sh),\
183 HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_)
184
185#define HWSEQ_DCE112_MASK_SH_LIST(mask_sh)\
186 HWSEQ_DCE10_MASK_SH_LIST(mask_sh),\
187 HWSEQ_PHYPLL_MASK_SH_LIST(mask_sh, CRTC0_)
188
189#define HWSEQ_REG_FIED_LIST(type) \
190 type DCFE_CLOCK_ENABLE; \
191 type DCFEV_CLOCK_ENABLE; \
192 type DC_MEM_GLOBAL_PWR_REQ_DIS; \
193 type BLND_DCP_GRPH_V_UPDATE_LOCK; \
194 type BLND_SCL_V_UPDATE_LOCK; \
195 type BLND_DCP_GRPH_SURF_V_UPDATE_LOCK; \
196 type BLND_BLND_V_UPDATE_LOCK; \
197 type BLND_V_UPDATE_LOCK_MODE; \
198 type BLND_FEEDTHROUGH_EN; \
199 type BLND_ALPHA_MODE; \
200 type BLND_MODE; \
201 type BLND_MULTIPLIED_MODE; \
202 type DP_DTO0_ENABLE; \
203 type PIXEL_RATE_SOURCE; \
204 type PHYPLL_PIXEL_RATE_SOURCE; \
205 type PIXEL_RATE_PLL_SOURCE; \
206
207struct dce_hwseq_shift {
208 HWSEQ_REG_FIED_LIST(uint8_t)
209};
210
211struct dce_hwseq_mask {
212 HWSEQ_REG_FIED_LIST(uint32_t)
213};
214
215struct dce_hwseq_wa {
216 bool blnd_crtc_trigger;
217};
218
219struct dce_hwseq {
220 struct dc_context *ctx;
221 const struct dce_hwseq_registers *regs;
222 const struct dce_hwseq_shift *shifts;
223 const struct dce_hwseq_mask *masks;
224 struct dce_hwseq_wa wa;
225};
226
227enum blnd_mode {
228 BLND_MODE_CURRENT_PIPE = 0,/* Data from current pipe only */
229 BLND_MODE_OTHER_PIPE, /* Data from other pipe only */
230 BLND_MODE_BLENDING,/* Alpha blending - blend 'current' and 'other' */
231};
232
233void dce_enable_fe_clock(struct dce_hwseq *hwss,
234 unsigned int inst, bool enable);
235
236void dce_pipe_control_lock(struct dce_hwseq *hws,
237 unsigned int blnd_inst,
238 enum pipe_lock_control control_mask,
239 bool lock);
240
241void dce_set_blender_mode(struct dce_hwseq *hws,
242 unsigned int blnd_inst, enum blnd_mode mode);
243
244void dce_clock_gating_power_up(struct dce_hwseq *hws,
245 bool enable);
246
247void dce_crtc_switch_to_clk_src(struct dce_hwseq *hws,
248 struct clock_source *clk_src,
249 unsigned int tg_inst);
250#endif /*__DCE_HWSEQ_H__*/
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
new file mode 100644
index 000000000000..86e55d028cbf
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
@@ -0,0 +1,2176 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "reg_helper.h"
27
28#include "core_types.h"
29#include "link_encoder.h"
30#include "dce_link_encoder.h"
31#include "stream_encoder.h"
32#include "i2caux_interface.h"
33#include "dc_bios_types.h"
34
35#include "gpio_service_interface.h"
36
37#include "dce/dce_11_0_d.h"
38#include "dce/dce_11_0_sh_mask.h"
39#include "dce/dce_11_0_enum.h"
40
41#ifndef ATOM_S2_CURRENT_BL_LEVEL_MASK
42#define ATOM_S2_CURRENT_BL_LEVEL_MASK 0x0000FF00L
43#define ATOM_S2_VRI_BRIGHT_ENABLE 0x20000000L
44#endif
45
46#ifndef ATOM_S2_CURRENT_BL_LEVEL_SHIFT
47#define ATOM_S2_CURRENT_BL_LEVEL_SHIFT 8
48#endif
49
50#ifndef HPD0_DC_HPD_CONTROL__DC_HPD_EN_MASK
51#define HPD0_DC_HPD_CONTROL__DC_HPD_EN_MASK 0x10000000L
52#endif
53
54#ifndef HPD0_DC_HPD_CONTROL__DC_HPD_EN__SHIFT
55#define HPD0_DC_HPD_CONTROL__DC_HPD_EN__SHIFT 0x1c
56#endif
57
58#define CTX \
59 enc110->base.ctx
60
61#define REG(reg)\
62 (enc110->link_regs->reg)
63
64#define AUX_REG(reg)\
65 (enc110->aux_regs->reg)
66
67#define HPD_REG(reg)\
68 (enc110->hpd_regs->reg)
69
70/* For current ASICs pixel clock - 600MHz */
71#define MAX_ENCODER_CLK 600000
72
73#define DCE11_UNIPHY_MAX_PIXEL_CLK_IN_KHZ 594000
74
75#define DEFAULT_AUX_MAX_DATA_SIZE 16
76#define AUX_MAX_DEFER_WRITE_RETRY 20
77/*
78 * @brief
79 * Trigger Source Select
80 * ASIC-dependent, actual values for register programming
81 */
82#define DCE110_DIG_FE_SOURCE_SELECT_INVALID 0x0
83#define DCE110_DIG_FE_SOURCE_SELECT_DIGA 0x1
84#define DCE110_DIG_FE_SOURCE_SELECT_DIGB 0x2
85#define DCE110_DIG_FE_SOURCE_SELECT_DIGC 0x4
86#define DCE110_DIG_FE_SOURCE_SELECT_DIGD 0x08
87#define DCE110_DIG_FE_SOURCE_SELECT_DIGE 0x10
88#define DCE110_DIG_FE_SOURCE_SELECT_DIGF 0x20
89
90/* all values are in milliseconds */
91/* For eDP, after power-up/power/down,
92 * 300/500 msec max. delay from LCDVCC to black video generation */
93#define PANEL_POWER_UP_TIMEOUT 300
94#define PANEL_POWER_DOWN_TIMEOUT 500
95#define HPD_CHECK_INTERVAL 10
96
97/* Minimum pixel clock, in KHz. For TMDS signal is 25.00 MHz */
98#define TMDS_MIN_PIXEL_CLOCK 25000
99/* Maximum pixel clock, in KHz. For TMDS signal is 165.00 MHz */
100#define TMDS_MAX_PIXEL_CLOCK 165000
101/* For current ASICs pixel clock - 600MHz */
102#define MAX_ENCODER_CLOCK 600000
103
104/* Set the ABM Pipe */
105#define MCP_ABM_PIPE_SET 0x66
106/* Set the ABM level */
107#define MCP_ABM_LEVEL_SET 0x65
108/* Set backlight level */
109#define MCP_BL_SET 0x67
110
111/* PSR related commands */
112#define PSR_ENABLE 0x20
113#define PSR_EXIT 0x21
114#define PSR_SET 0x23
115
116/*TODO: Used for psr wakeup for set backlight level*/
117static unsigned int psr_crtc_offset;
118
119/* registers setting needs to be save and restored used at InitBacklight */
120static struct dce110_abm_backlight_registers stored_backlight_registers;
121
122enum {
123 DP_MST_UPDATE_MAX_RETRY = 50
124};
125
126#define DIG_REG(reg)\
127 (reg + enc110->offsets.dig)
128
129#define DP_REG(reg)\
130 (reg + enc110->offsets.dp)
131
132static const struct link_encoder_funcs dce110_lnk_enc_funcs = {
133 .validate_output_with_stream =
134 dce110_link_encoder_validate_output_with_stream,
135 .hw_init = dce110_link_encoder_hw_init,
136 .setup = dce110_link_encoder_setup,
137 .enable_tmds_output = dce110_link_encoder_enable_tmds_output,
138 .enable_dp_output = dce110_link_encoder_enable_dp_output,
139 .enable_dp_mst_output = dce110_link_encoder_enable_dp_mst_output,
140 .disable_output = dce110_link_encoder_disable_output,
141 .dp_set_lane_settings = dce110_link_encoder_dp_set_lane_settings,
142 .dp_set_phy_pattern = dce110_link_encoder_dp_set_phy_pattern,
143 .update_mst_stream_allocation_table =
144 dce110_link_encoder_update_mst_stream_allocation_table,
145 .set_lcd_backlight_level = dce110_link_encoder_set_lcd_backlight_level,
146 .set_dmcu_backlight_level =
147 dce110_link_encoder_set_dmcu_backlight_level,
148 .init_dmcu_backlight_settings =
149 dce110_link_encoder_init_dmcu_backlight_settings,
150 .set_dmcu_abm_level = dce110_link_encoder_set_dmcu_abm_level,
151 .set_dmcu_psr_enable = dce110_link_encoder_set_dmcu_psr_enable,
152 .setup_dmcu_psr = dce110_link_encoder_setup_dmcu_psr,
153 .backlight_control = dce110_link_encoder_edp_backlight_control,
154 .power_control = dce110_link_encoder_edp_power_control,
155 .connect_dig_be_to_fe = dce110_link_encoder_connect_dig_be_to_fe,
156 .enable_hpd = dce110_link_encoder_enable_hpd,
157 .disable_hpd = dce110_link_encoder_disable_hpd,
158 .destroy = dce110_link_encoder_destroy
159};
160
161
162static enum bp_result link_transmitter_control(
163 struct dce110_link_encoder *enc110,
164 struct bp_transmitter_control *cntl)
165{
166 enum bp_result result;
167 struct dc_bios *bp = enc110->base.ctx->dc_bios;
168
169 result = bp->funcs->transmitter_control(bp, cntl);
170
171 return result;
172}
173
174static void enable_phy_bypass_mode(
175 struct dce110_link_encoder *enc110,
176 bool enable)
177{
178 /* This register resides in DP back end block;
179 * transmitter is used for the offset */
180
181 REG_UPDATE(DP_DPHY_CNTL, DPHY_BYPASS, enable);
182
183}
184
185static void disable_prbs_symbols(
186 struct dce110_link_encoder *enc110,
187 bool disable)
188{
189 /* This register resides in DP back end block;
190 * transmitter is used for the offset */
191
192 REG_UPDATE_4(DP_DPHY_CNTL,
193 DPHY_ATEST_SEL_LANE0, disable,
194 DPHY_ATEST_SEL_LANE1, disable,
195 DPHY_ATEST_SEL_LANE2, disable,
196 DPHY_ATEST_SEL_LANE3, disable);
197}
198
199static void disable_prbs_mode(
200 struct dce110_link_encoder *enc110)
201{
202 /* This register resides in DP back end block;
203 * transmitter is used for the offset */
204
205 REG_UPDATE(DP_DPHY_PRBS_CNTL, DPHY_PRBS_EN, 0);
206}
207
208static void program_pattern_symbols(
209 struct dce110_link_encoder *enc110,
210 uint16_t pattern_symbols[8])
211{
212 /* This register resides in DP back end block;
213 * transmitter is used for the offset */
214
215 REG_SET_3(DP_DPHY_SYM0, 0,
216 DPHY_SYM1, pattern_symbols[0],
217 DPHY_SYM2, pattern_symbols[1],
218 DPHY_SYM3, pattern_symbols[2]);
219
220 /* This register resides in DP back end block;
221 * transmitter is used for the offset */
222
223 REG_SET_3(DP_DPHY_SYM1, 0,
224 DPHY_SYM4, pattern_symbols[3],
225 DPHY_SYM5, pattern_symbols[4],
226 DPHY_SYM6, pattern_symbols[5]);
227
228 /* This register resides in DP back end block;
229 * transmitter is used for the offset */
230
231 REG_SET_2(DP_DPHY_SYM2, 0,
232 DPHY_SYM7, pattern_symbols[6],
233 DPHY_SYM8, pattern_symbols[7]);
234}
235
236static void set_dp_phy_pattern_d102(
237 struct dce110_link_encoder *enc110)
238{
239 /* Disable PHY Bypass mode to setup the test pattern */
240 enable_phy_bypass_mode(enc110, false);
241
242 /* For 10-bit PRBS or debug symbols
243 * please use the following sequence: */
244
245 /* Enable debug symbols on the lanes */
246
247 disable_prbs_symbols(enc110, true);
248
249 /* Disable PRBS mode,
250 * make sure DPHY_PRBS_CNTL.DPHY_PRBS_EN=0 */
251
252 disable_prbs_mode(enc110);
253
254 /* Program debug symbols to be output */
255 {
256 uint16_t pattern_symbols[8] = {
257 0x2AA, 0x2AA, 0x2AA, 0x2AA,
258 0x2AA, 0x2AA, 0x2AA, 0x2AA
259 };
260
261 program_pattern_symbols(enc110, pattern_symbols);
262 }
263
264 /* Enable phy bypass mode to enable the test pattern */
265
266 enable_phy_bypass_mode(enc110, true);
267}
268
269static void set_link_training_complete(
270 struct dce110_link_encoder *enc110,
271 bool complete)
272{
273 /* This register resides in DP back end block;
274 * transmitter is used for the offset */
275
276 REG_UPDATE(DP_LINK_CNTL, DP_LINK_TRAINING_COMPLETE, complete);
277
278}
279
280void dce110_link_encoder_set_dp_phy_pattern_training_pattern(
281 struct link_encoder *enc,
282 uint32_t index)
283{
284 struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
285 /* Write Training Pattern */
286
287 REG_WRITE(DP_DPHY_TRAINING_PATTERN_SEL, index);
288
289 /* Set HW Register Training Complete to false */
290
291 set_link_training_complete(enc110, false);
292
293 /* Disable PHY Bypass mode to output Training Pattern */
294
295 enable_phy_bypass_mode(enc110, false);
296
297 /* Disable PRBS mode,
298 * make sure DPHY_PRBS_CNTL.DPHY_PRBS_EN=0 */
299
300 disable_prbs_mode(enc110);
301}
302
303static void set_dp_phy_pattern_symbol_error(
304 struct dce110_link_encoder *enc110)
305{
306 /* Disable PHY Bypass mode to setup the test pattern */
307 uint32_t value = 0x0;
308
309 enable_phy_bypass_mode(enc110, false);
310
311 /* program correct panel mode*/
312 {
313 ASSERT(REG(DP_DPHY_INTERNAL_CTRL));
314 /*DCE 120 does not have this reg*/
315
316 REG_WRITE(DP_DPHY_INTERNAL_CTRL, value);
317 }
318
319 /* A PRBS23 pattern is used for most DP electrical measurements. */
320
321 /* Enable PRBS symbols on the lanes */
322
323 disable_prbs_symbols(enc110, false);
324
325 /* For PRBS23 Set bit DPHY_PRBS_SEL=1 and Set bit DPHY_PRBS_EN=1 */
326 {
327 REG_UPDATE_2(DP_DPHY_PRBS_CNTL,
328 DPHY_PRBS_SEL, 1,
329 DPHY_PRBS_EN, 1);
330 }
331
332 /* Enable phy bypass mode to enable the test pattern */
333
334 enable_phy_bypass_mode(enc110, true);
335}
336
337static void set_dp_phy_pattern_prbs7(
338 struct dce110_link_encoder *enc110)
339{
340 /* Disable PHY Bypass mode to setup the test pattern */
341
342 enable_phy_bypass_mode(enc110, false);
343
344 /* A PRBS7 pattern is used for most DP electrical measurements. */
345
346 /* Enable PRBS symbols on the lanes */
347
348 disable_prbs_symbols(enc110, false);
349
350 /* For PRBS7 Set bit DPHY_PRBS_SEL=0 and Set bit DPHY_PRBS_EN=1 */
351 {
352 REG_UPDATE_2(DP_DPHY_PRBS_CNTL,
353 DPHY_PRBS_SEL, 0,
354 DPHY_PRBS_EN, 1);
355 }
356
357 /* Enable phy bypass mode to enable the test pattern */
358
359 enable_phy_bypass_mode(enc110, true);
360}
361
362static void set_dp_phy_pattern_80bit_custom(
363 struct dce110_link_encoder *enc110,
364 const uint8_t *pattern)
365{
366 /* Disable PHY Bypass mode to setup the test pattern */
367 enable_phy_bypass_mode(enc110, false);
368
369 /* Enable debug symbols on the lanes */
370
371 disable_prbs_symbols(enc110, true);
372
373 /* Enable PHY bypass mode to enable the test pattern */
374 /* TODO is it really needed ? */
375
376 enable_phy_bypass_mode(enc110, true);
377
378 /* Program 80 bit custom pattern */
379 {
380 uint16_t pattern_symbols[8];
381
382 pattern_symbols[0] =
383 ((pattern[1] & 0x03) << 8) | pattern[0];
384 pattern_symbols[1] =
385 ((pattern[2] & 0x0f) << 6) | ((pattern[1] >> 2) & 0x3f);
386 pattern_symbols[2] =
387 ((pattern[3] & 0x3f) << 4) | ((pattern[2] >> 4) & 0x0f);
388 pattern_symbols[3] =
389 (pattern[4] << 2) | ((pattern[3] >> 6) & 0x03);
390 pattern_symbols[4] =
391 ((pattern[6] & 0x03) << 8) | pattern[5];
392 pattern_symbols[5] =
393 ((pattern[7] & 0x0f) << 6) | ((pattern[6] >> 2) & 0x3f);
394 pattern_symbols[6] =
395 ((pattern[8] & 0x3f) << 4) | ((pattern[7] >> 4) & 0x0f);
396 pattern_symbols[7] =
397 (pattern[9] << 2) | ((pattern[8] >> 6) & 0x03);
398
399 program_pattern_symbols(enc110, pattern_symbols);
400 }
401
402 /* Enable phy bypass mode to enable the test pattern */
403
404 enable_phy_bypass_mode(enc110, true);
405}
406
407static void set_dp_phy_pattern_hbr2_compliance(
408 struct dce110_link_encoder *enc110)
409{
410
411 /* previously there is a register DP_HBR2_EYE_PATTERN
412 * that is enabled to get the pattern.
413 * But it does not work with the latest spec change,
414 * so we are programming the following registers manually.
415 *
416 * The following settings have been confirmed
417 * by Nick Chorney and Sandra Liu */
418
419 /* Disable PHY Bypass mode to setup the test pattern */
420
421 enable_phy_bypass_mode(enc110, false);
422
423 /* Setup DIG encoder in DP SST mode */
424
425 enc110->base.funcs->setup(&enc110->base, SIGNAL_TYPE_DISPLAY_PORT);
426
427 /* program correct panel mode*/
428 {
429 ASSERT(REG(DP_DPHY_INTERNAL_CTRL));
430
431 REG_WRITE(DP_DPHY_INTERNAL_CTRL, 0x0);
432 }
433
434 /* no vbid after BS (SR)
435 * DP_LINK_FRAMING_CNTL changed history Sandra Liu
436 * 11000260 / 11000104 / 110000FC */
437
438 /* TODO DP_LINK_FRAMING_CNTL should always use hardware default value
439 * output except output hbr2_compliance pattern for physical PHY
440 * measurement. This is not normal usage case. SW should reset this
441 * register to hardware default value after end use of HBR2 eye
442 */
443 BREAK_TO_DEBUGGER();
444 /* TODO: do we still need this, find out at compliance test
445 addr = mmDP_LINK_FRAMING_CNTL + fe_addr_offset;
446
447 value = dal_read_reg(ctx, addr);
448
449 set_reg_field_value(value, 0xFC,
450 DP_LINK_FRAMING_CNTL, DP_IDLE_BS_INTERVAL);
451 set_reg_field_value(value, 1,
452 DP_LINK_FRAMING_CNTL, DP_VBID_DISABLE);
453 set_reg_field_value(value, 1,
454 DP_LINK_FRAMING_CNTL, DP_VID_ENHANCED_FRAME_MODE);
455
456 dal_write_reg(ctx, addr, value);
457 */
458 /* swap every BS with SR */
459
460 REG_UPDATE(DP_DPHY_SCRAM_CNTL, DPHY_SCRAMBLER_BS_COUNT, 0);
461
462 /*TODO add support for this test pattern
463 * support_dp_hbr2_eye_pattern
464 */
465
466 /* set link training complete */
467 set_link_training_complete(enc110, true);
468 /* do not enable video stream */
469
470 REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, 0);
471
472 /* Disable PHY Bypass mode to setup the test pattern */
473
474 enable_phy_bypass_mode(enc110, false);
475}
476
477static void set_dp_phy_pattern_passthrough_mode(
478 struct dce110_link_encoder *enc110,
479 enum dp_panel_mode panel_mode)
480{
481 uint32_t value;
482
483 /* program correct panel mode */
484 {
485 ASSERT(REG(DP_DPHY_INTERNAL_CTRL));
486 value = REG_READ(DP_DPHY_INTERNAL_CTRL);
487
488 switch (panel_mode) {
489 case DP_PANEL_MODE_EDP:
490 value = 0x1;
491 break;
492 case DP_PANEL_MODE_SPECIAL:
493 value = 0x11;
494 break;
495 default:
496 value = 0x0;
497 break;
498 }
499
500 REG_WRITE(DP_DPHY_INTERNAL_CTRL, value);
501 }
502
503 REG_UPDATE(DP_DPHY_SCRAM_CNTL, DPHY_SCRAMBLER_BS_COUNT, 0x1FF);
504
505 /* set link training complete */
506
507 set_link_training_complete(enc110, true);
508
509 /* Disable PHY Bypass mode to setup the test pattern */
510
511 enable_phy_bypass_mode(enc110, false);
512
513 /* Disable PRBS mode,
514 * make sure DPHY_PRBS_CNTL.DPHY_PRBS_EN=0 */
515
516 disable_prbs_mode(enc110);
517}
518
519/* return value is bit-vector */
520static uint8_t get_frontend_source(
521 enum engine_id engine)
522{
523 switch (engine) {
524 case ENGINE_ID_DIGA:
525 return DCE110_DIG_FE_SOURCE_SELECT_DIGA;
526 case ENGINE_ID_DIGB:
527 return DCE110_DIG_FE_SOURCE_SELECT_DIGB;
528 case ENGINE_ID_DIGC:
529 return DCE110_DIG_FE_SOURCE_SELECT_DIGC;
530 case ENGINE_ID_DIGD:
531 return DCE110_DIG_FE_SOURCE_SELECT_DIGD;
532 case ENGINE_ID_DIGE:
533 return DCE110_DIG_FE_SOURCE_SELECT_DIGE;
534 case ENGINE_ID_DIGF:
535 return DCE110_DIG_FE_SOURCE_SELECT_DIGF;
536 default:
537 ASSERT_CRITICAL(false);
538 return DCE110_DIG_FE_SOURCE_SELECT_INVALID;
539 }
540}
541
542static void configure_encoder(
543 struct dce110_link_encoder *enc110,
544 const struct dc_link_settings *link_settings)
545{
546 /* set number of lanes */
547
548 REG_SET(DP_CONFIG, 0,
549 DP_UDI_LANES, link_settings->lane_count - LANE_COUNT_ONE);
550
551 /* setup scrambler */
552 REG_UPDATE(DP_DPHY_SCRAM_CNTL, DPHY_SCRAMBLER_ADVANCE, 1);
553}
554
555static bool is_panel_powered_on(struct dce110_link_encoder *enc110)
556{
557 bool ret;
558 uint32_t value;
559
560 REG_GET(LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, &value);
561 ret = value;
562
563 return ret == 1;
564}
565
566
567/* TODO duplicate of dc_link.c version */
568static struct gpio *get_hpd_gpio(const struct link_encoder *enc)
569{
570 enum bp_result bp_result;
571 struct dc_bios *dcb = enc->ctx->dc_bios;
572 struct graphics_object_hpd_info hpd_info;
573 struct gpio_pin_info pin_info;
574
575 if (dcb->funcs->get_hpd_info(dcb, enc->connector, &hpd_info) != BP_RESULT_OK)
576 return NULL;
577
578 bp_result = dcb->funcs->get_gpio_pin_info(dcb,
579 hpd_info.hpd_int_gpio_uid, &pin_info);
580
581 if (bp_result != BP_RESULT_OK) {
582 ASSERT(bp_result == BP_RESULT_NORECORD);
583 return NULL;
584 }
585
586 return dal_gpio_service_create_irq(
587 enc->ctx->gpio_service,
588 pin_info.offset,
589 pin_info.mask);
590}
591
592/*
593 * @brief
594 * eDP only.
595 */
596static void link_encoder_edp_wait_for_hpd_ready(
597 struct dce110_link_encoder *enc110,
598 bool power_up)
599{
600 struct dc_context *ctx = enc110->base.ctx;
601 struct graphics_object_id connector = enc110->base.connector;
602 struct gpio *hpd;
603 bool edp_hpd_high = false;
604 uint32_t time_elapsed = 0;
605 uint32_t timeout = power_up ?
606 PANEL_POWER_UP_TIMEOUT : PANEL_POWER_DOWN_TIMEOUT;
607
608 if (dal_graphics_object_id_get_connector_id(connector) !=
609 CONNECTOR_ID_EDP) {
610 BREAK_TO_DEBUGGER();
611 return;
612 }
613
614 if (!power_up)
615 /* from KV, we will not HPD low after turning off VCC -
616 * instead, we will check the SW timer in power_up(). */
617 return;
618
619 /* when we power on/off the eDP panel,
620 * we need to wait until SENSE bit is high/low */
621
622 /* obtain HPD */
623 /* TODO what to do with this? */
624 hpd = get_hpd_gpio(&enc110->base);
625
626 if (!hpd) {
627 BREAK_TO_DEBUGGER();
628 return;
629 }
630
631 dal_gpio_open(hpd, GPIO_MODE_INTERRUPT);
632
633 /* wait until timeout or panel detected */
634
635 do {
636 uint32_t detected = 0;
637
638 dal_gpio_get_value(hpd, &detected);
639
640 if (!(detected ^ power_up)) {
641 edp_hpd_high = true;
642 break;
643 }
644
645 msleep(HPD_CHECK_INTERVAL);
646
647 time_elapsed += HPD_CHECK_INTERVAL;
648 } while (time_elapsed < timeout);
649
650 dal_gpio_close(hpd);
651
652 dal_gpio_destroy_irq(&hpd);
653
654 if (false == edp_hpd_high) {
655 dm_logger_write(ctx->logger, LOG_ERROR,
656 "%s: wait timed out!\n", __func__);
657 }
658}
659
660/*
661 * @brief
662 * eDP only. Control the power of the eDP panel.
663 */
664void dce110_link_encoder_edp_power_control(
665 struct link_encoder *enc,
666 bool power_up)
667{
668 struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
669 struct dc_context *ctx = enc110->base.ctx;
670 struct bp_transmitter_control cntl = { 0 };
671 enum bp_result bp_result;
672
673 if (dal_graphics_object_id_get_connector_id(enc110->base.connector) !=
674 CONNECTOR_ID_EDP) {
675 BREAK_TO_DEBUGGER();
676 return;
677 }
678
679 if ((power_up && !is_panel_powered_on(enc110)) ||
680 (!power_up && is_panel_powered_on(enc110))) {
681
682 /* Send VBIOS command to prompt eDP panel power */
683
684 dm_logger_write(ctx->logger, LOG_HW_RESUME_S3,
685 "%s: Panel Power action: %s\n",
686 __func__, (power_up ? "On":"Off"));
687
688 cntl.action = power_up ?
689 TRANSMITTER_CONTROL_POWER_ON :
690 TRANSMITTER_CONTROL_POWER_OFF;
691 cntl.transmitter = enc110->base.transmitter;
692 cntl.connector_obj_id = enc110->base.connector;
693 cntl.coherent = false;
694 cntl.lanes_number = LANE_COUNT_FOUR;
695 cntl.hpd_sel = enc110->base.hpd_source;
696
697 bp_result = link_transmitter_control(enc110, &cntl);
698
699 if (BP_RESULT_OK != bp_result) {
700
701 dm_logger_write(ctx->logger, LOG_ERROR,
702 "%s: Panel Power bp_result: %d\n",
703 __func__, bp_result);
704 }
705 } else {
706 dm_logger_write(ctx->logger, LOG_HW_RESUME_S3,
707 "%s: Skipping Panel Power action: %s\n",
708 __func__, (power_up ? "On":"Off"));
709 }
710
711 link_encoder_edp_wait_for_hpd_ready(enc110, true);
712}
713
714static void aux_initialize(
715 struct dce110_link_encoder *enc110)
716{
717 struct dc_context *ctx = enc110->base.ctx;
718 enum hpd_source_id hpd_source = enc110->base.hpd_source;
719 uint32_t addr = AUX_REG(AUX_CONTROL);
720 uint32_t value = dm_read_reg(ctx, addr);
721
722 set_reg_field_value(value, hpd_source, AUX_CONTROL, AUX_HPD_SEL);
723 set_reg_field_value(value, 0, AUX_CONTROL, AUX_LS_READ_EN);
724 dm_write_reg(ctx, addr, value);
725
726 addr = AUX_REG(AUX_DPHY_RX_CONTROL0);
727 value = dm_read_reg(ctx, addr);
728
729 /* 1/4 window (the maximum allowed) */
730 set_reg_field_value(value, 1,
731 AUX_DPHY_RX_CONTROL0, AUX_RX_RECEIVE_WINDOW);
732 dm_write_reg(ctx, addr, value);
733
734}
735
736/*todo: cloned in stream enc, fix*/
737static bool is_panel_backlight_on(struct dce110_link_encoder *enc110)
738{
739 uint32_t value;
740
741 REG_GET(LVTMA_PWRSEQ_CNTL, LVTMA_BLON, &value);
742
743 return value;
744}
745
746/*todo: cloned in stream enc, fix*/
747/*
748 * @brief
749 * eDP only. Control the backlight of the eDP panel
750 */
751void dce110_link_encoder_edp_backlight_control(
752 struct link_encoder *enc,
753 bool enable)
754{
755 struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
756 struct dc_context *ctx = enc110->base.ctx;
757 struct bp_transmitter_control cntl = { 0 };
758
759 if (dal_graphics_object_id_get_connector_id(enc110->base.connector)
760 != CONNECTOR_ID_EDP) {
761 BREAK_TO_DEBUGGER();
762 return;
763 }
764
765 if (enable && is_panel_backlight_on(enc110)) {
766 dm_logger_write(ctx->logger, LOG_HW_RESUME_S3,
767 "%s: panel already powered up. Do nothing.\n",
768 __func__);
769 return;
770 }
771
772 if (!enable && !is_panel_powered_on(enc110)) {
773 dm_logger_write(ctx->logger, LOG_HW_RESUME_S3,
774 "%s: panel already powered down. Do nothing.\n",
775 __func__);
776 return;
777 }
778
779 /* Send VBIOS command to control eDP panel backlight */
780
781 dm_logger_write(ctx->logger, LOG_HW_RESUME_S3,
782 "%s: backlight action: %s\n",
783 __func__, (enable ? "On":"Off"));
784
785 cntl.action = enable ?
786 TRANSMITTER_CONTROL_BACKLIGHT_ON :
787 TRANSMITTER_CONTROL_BACKLIGHT_OFF;
788 /*cntl.engine_id = ctx->engine;*/
789 cntl.transmitter = enc110->base.transmitter;
790 cntl.connector_obj_id = enc110->base.connector;
791 /*todo: unhardcode*/
792 cntl.lanes_number = LANE_COUNT_FOUR;
793 cntl.hpd_sel = enc110->base.hpd_source;
794
795 /* For eDP, the following delays might need to be considered
796 * after link training completed:
797 * idle period - min. accounts for required BS-Idle pattern,
798 * max. allows for source frame synchronization);
799 * 50 msec max. delay from valid video data from source
800 * to video on dislpay or backlight enable.
801 *
802 * Disable the delay for now.
803 * Enable it in the future if necessary.
804 */
805 /* dc_service_sleep_in_milliseconds(50); */
806 link_transmitter_control(enc110, &cntl);
807}
808
809static bool is_dig_enabled(const struct dce110_link_encoder *enc110)
810{
811 uint32_t value;
812
813 REG_GET(DIG_BE_EN_CNTL, DIG_ENABLE, &value);
814 return value;
815}
816
817static void link_encoder_disable(struct dce110_link_encoder *enc110)
818{
819 /* reset training pattern */
820 REG_SET(DP_DPHY_TRAINING_PATTERN_SEL, 0,
821 DPHY_TRAINING_PATTERN_SEL, 0);
822
823 /* reset training complete */
824 REG_UPDATE(DP_LINK_CNTL, DP_LINK_TRAINING_COMPLETE, 0);
825
826 /* reset panel mode */
827 ASSERT(REG(DP_DPHY_INTERNAL_CTRL));
828 REG_WRITE(DP_DPHY_INTERNAL_CTRL, 0);
829}
830
831static void hpd_initialize(
832 struct dce110_link_encoder *enc110)
833{
834 /* Associate HPD with DIG_BE */
835 enum hpd_source_id hpd_source = enc110->base.hpd_source;
836
837 REG_UPDATE(DIG_BE_CNTL, DIG_HPD_SELECT, hpd_source);
838}
839
840bool dce110_link_encoder_validate_dvi_output(
841 const struct dce110_link_encoder *enc110,
842 enum signal_type connector_signal,
843 enum signal_type signal,
844 const struct dc_crtc_timing *crtc_timing)
845{
846 uint32_t max_pixel_clock = TMDS_MAX_PIXEL_CLOCK;
847
848 if (enc110->base.features.max_pixel_clock < TMDS_MAX_PIXEL_CLOCK)
849 max_pixel_clock = enc110->base.features.max_pixel_clock;
850
851 if (signal == SIGNAL_TYPE_DVI_DUAL_LINK)
852 max_pixel_clock <<= 1;
853
854 /* This handles the case of HDMI downgrade to DVI we don't want to
855 * we don't want to cap the pixel clock if the DDI is not DVI.
856 */
857 if (connector_signal != SIGNAL_TYPE_DVI_DUAL_LINK &&
858 connector_signal != SIGNAL_TYPE_DVI_SINGLE_LINK)
859 max_pixel_clock = enc110->base.features.max_pixel_clock;
860
861 /* DVI only support RGB pixel encoding */
862 if (crtc_timing->pixel_encoding != PIXEL_ENCODING_RGB)
863 return false;
864
865 if (crtc_timing->pix_clk_khz < TMDS_MIN_PIXEL_CLOCK)
866 return false;
867
868 if (crtc_timing->pix_clk_khz > max_pixel_clock)
869 return false;
870
871 /* DVI supports 6/8bpp single-link and 10/16bpp dual-link */
872 switch (crtc_timing->display_color_depth) {
873 case COLOR_DEPTH_666:
874 case COLOR_DEPTH_888:
875 break;
876 case COLOR_DEPTH_101010:
877 case COLOR_DEPTH_161616:
878 if (signal != SIGNAL_TYPE_DVI_DUAL_LINK)
879 return false;
880 break;
881 default:
882 return false;
883 }
884
885 return true;
886}
887
888static bool dce110_link_encoder_validate_hdmi_output(
889 const struct dce110_link_encoder *enc110,
890 const struct dc_crtc_timing *crtc_timing,
891 int adjusted_pix_clk_khz)
892{
893 enum dc_color_depth max_deep_color =
894 enc110->base.features.max_hdmi_deep_color;
895
896 if (max_deep_color > enc110->base.features.max_deep_color)
897 max_deep_color = enc110->base.features.max_deep_color;
898
899 if (max_deep_color < crtc_timing->display_color_depth)
900 return false;
901
902 if (adjusted_pix_clk_khz < TMDS_MIN_PIXEL_CLOCK)
903 return false;
904
905 if ((adjusted_pix_clk_khz == 0) ||
906 (adjusted_pix_clk_khz > enc110->base.features.max_hdmi_pixel_clock) ||
907 (adjusted_pix_clk_khz > enc110->base.features.max_pixel_clock))
908 return false;
909
910 /* DCE11 HW does not support 420 */
911 if (!enc110->base.features.ycbcr420_supported &&
912 crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
913 return false;
914
915 return true;
916}
917
918bool dce110_link_encoder_validate_rgb_output(
919 const struct dce110_link_encoder *enc110,
920 const struct dc_crtc_timing *crtc_timing)
921{
922 if (crtc_timing->pix_clk_khz > enc110->base.features.max_pixel_clock)
923 return false;
924
925 if (crtc_timing->pixel_encoding != PIXEL_ENCODING_RGB)
926 return false;
927
928 return true;
929}
930
931bool dce110_link_encoder_validate_dp_output(
932 const struct dce110_link_encoder *enc110,
933 const struct dc_crtc_timing *crtc_timing)
934{
935 /* default RGB only */
936 if (crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB)
937 return true;
938
939 if (enc110->base.features.flags.bits.IS_YCBCR_CAPABLE)
940 return true;
941
942 /* for DCE 8.x or later DP Y-only feature,
943 * we need ASIC cap + FeatureSupportDPYonly, not support 666 */
944 if (crtc_timing->flags.Y_ONLY &&
945 enc110->base.features.flags.bits.IS_YCBCR_CAPABLE &&
946 crtc_timing->display_color_depth != COLOR_DEPTH_666)
947 return true;
948
949 return false;
950}
951
952bool dce110_link_encoder_validate_wireless_output(
953 const struct dce110_link_encoder *enc110,
954 const struct dc_crtc_timing *crtc_timing)
955{
956 if (crtc_timing->pix_clk_khz > enc110->base.features.max_pixel_clock)
957 return false;
958
959 /* Wireless only supports YCbCr444 */
960 if (crtc_timing->pixel_encoding ==
961 PIXEL_ENCODING_YCBCR444)
962 return true;
963
964 return false;
965}
966
967bool dce110_link_encoder_construct(
968 struct dce110_link_encoder *enc110,
969 const struct encoder_init_data *init_data,
970 const struct dce110_link_enc_registers *link_regs,
971 const struct dce110_link_enc_aux_registers *aux_regs,
972 const struct dce110_link_enc_hpd_registers *hpd_regs)
973{
974 enc110->base.funcs = &dce110_lnk_enc_funcs;
975 enc110->base.ctx = init_data->ctx;
976 enc110->base.id = init_data->encoder;
977
978 enc110->base.hpd_source = init_data->hpd_source;
979 enc110->base.connector = init_data->connector;
980 enc110->base.input_signals = SIGNAL_TYPE_ALL;
981
982 enc110->base.preferred_engine = ENGINE_ID_UNKNOWN;
983
984 enc110->base.features.flags.raw = 0;
985
986 enc110->base.transmitter = init_data->transmitter;
987
988 enc110->base.features.flags.bits.IS_AUDIO_CAPABLE = true;
989
990 enc110->base.features.max_pixel_clock =
991 MAX_ENCODER_CLK;
992
993 enc110->base.features.max_deep_color = COLOR_DEPTH_121212;
994 enc110->base.features.max_hdmi_deep_color = COLOR_DEPTH_121212;
995
996 /* set the flag to indicate whether driver poll the I2C data pin
997 * while doing the DP sink detect
998 */
999
1000/* if (dal_adapter_service_is_feature_supported(as,
1001 FEATURE_DP_SINK_DETECT_POLL_DATA_PIN))
1002 enc110->base.features.flags.bits.
1003 DP_SINK_DETECT_POLL_DATA_PIN = true;*/
1004
1005 enc110->base.output_signals =
1006 SIGNAL_TYPE_DVI_SINGLE_LINK |
1007 SIGNAL_TYPE_DVI_DUAL_LINK |
1008 SIGNAL_TYPE_LVDS |
1009 SIGNAL_TYPE_DISPLAY_PORT |
1010 SIGNAL_TYPE_DISPLAY_PORT_MST |
1011 SIGNAL_TYPE_EDP |
1012 SIGNAL_TYPE_HDMI_TYPE_A;
1013
1014 /* For DCE 8.0 and 8.1, by design, UNIPHY is hardwired to DIG_BE.
1015 * SW always assign DIG_FE 1:1 mapped to DIG_FE for non-MST UNIPHY.
1016 * SW assign DIG_FE to non-MST UNIPHY first and MST last. So prefer
1017 * DIG is per UNIPHY and used by SST DP, eDP, HDMI, DVI and LVDS.
1018 * Prefer DIG assignment is decided by board design.
1019 * For DCE 8.0, there are only max 6 UNIPHYs, we assume board design
1020 * and VBIOS will filter out 7 UNIPHY for DCE 8.0.
1021 * By this, adding DIGG should not hurt DCE 8.0.
1022 * This will let DCE 8.1 share DCE 8.0 as much as possible
1023 */
1024
1025 enc110->link_regs = link_regs;
1026 enc110->aux_regs = aux_regs;
1027 enc110->hpd_regs = hpd_regs;
1028
1029 switch (enc110->base.transmitter) {
1030 case TRANSMITTER_UNIPHY_A:
1031 enc110->base.preferred_engine = ENGINE_ID_DIGA;
1032 break;
1033 case TRANSMITTER_UNIPHY_B:
1034 enc110->base.preferred_engine = ENGINE_ID_DIGB;
1035 break;
1036 case TRANSMITTER_UNIPHY_C:
1037 enc110->base.preferred_engine = ENGINE_ID_DIGC;
1038 break;
1039 case TRANSMITTER_UNIPHY_D:
1040 enc110->base.preferred_engine = ENGINE_ID_DIGD;
1041 break;
1042 case TRANSMITTER_UNIPHY_E:
1043 enc110->base.preferred_engine = ENGINE_ID_DIGE;
1044 break;
1045 case TRANSMITTER_UNIPHY_F:
1046 enc110->base.preferred_engine = ENGINE_ID_DIGF;
1047 break;
1048 default:
1049 ASSERT_CRITICAL(false);
1050 enc110->base.preferred_engine = ENGINE_ID_UNKNOWN;
1051 }
1052
1053 dm_logger_write(init_data->ctx->logger, LOG_I2C_AUX,
1054 "Using channel: %s [%d]\n",
1055 DECODE_CHANNEL_ID(init_data->channel),
1056 init_data->channel);
1057
1058 /* Override features with DCE-specific values */
1059 {
1060 struct bp_encoder_cap_info bp_cap_info = {0};
1061 const struct dc_vbios_funcs *bp_funcs = enc110->base.ctx->dc_bios->funcs;
1062
1063 if (BP_RESULT_OK == bp_funcs->get_encoder_cap_info(
1064 enc110->base.ctx->dc_bios, enc110->base.id,
1065 &bp_cap_info))
1066 enc110->base.features.flags.bits.IS_HBR2_CAPABLE =
1067 bp_cap_info.DP_HBR2_CAP;
1068 }
1069 /* test pattern 3 support */
1070 enc110->base.features.flags.bits.IS_TPS3_CAPABLE = true;
1071
1072 enc110->base.features.flags.bits.IS_Y_ONLY_CAPABLE = false;
1073 /*
1074 dal_adapter_service_is_feature_supported(as,
1075 FEATURE_SUPPORT_DP_Y_ONLY);
1076*/
1077 enc110->base.features.flags.bits.IS_YCBCR_CAPABLE = true;
1078 /*
1079 dal_adapter_service_is_feature_supported(as,
1080 FEATURE_SUPPORT_DP_YUV);
1081 */
1082 return true;
1083}
1084
1085bool dce110_link_encoder_validate_output_with_stream(
1086 struct link_encoder *enc,
1087 struct pipe_ctx *pipe_ctx)
1088{
1089 struct core_stream *stream = pipe_ctx->stream;
1090 struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
1091 bool is_valid;
1092
1093 switch (pipe_ctx->stream->signal) {
1094 case SIGNAL_TYPE_DVI_SINGLE_LINK:
1095 case SIGNAL_TYPE_DVI_DUAL_LINK:
1096 is_valid = dce110_link_encoder_validate_dvi_output(
1097 enc110,
1098 stream->sink->link->public.connector_signal,
1099 pipe_ctx->stream->signal,
1100 &stream->public.timing);
1101 break;
1102 case SIGNAL_TYPE_HDMI_TYPE_A:
1103 is_valid = dce110_link_encoder_validate_hdmi_output(
1104 enc110,
1105 &stream->public.timing,
1106 stream->phy_pix_clk);
1107 break;
1108 case SIGNAL_TYPE_RGB:
1109 is_valid = dce110_link_encoder_validate_rgb_output(
1110 enc110, &stream->public.timing);
1111 break;
1112 case SIGNAL_TYPE_DISPLAY_PORT:
1113 case SIGNAL_TYPE_DISPLAY_PORT_MST:
1114 case SIGNAL_TYPE_EDP:
1115 is_valid = dce110_link_encoder_validate_dp_output(
1116 enc110, &stream->public.timing);
1117 break;
1118 case SIGNAL_TYPE_WIRELESS:
1119 is_valid = dce110_link_encoder_validate_wireless_output(
1120 enc110, &stream->public.timing);
1121 break;
1122 default:
1123 is_valid = true;
1124 break;
1125 }
1126
1127 return is_valid;
1128}
1129
1130void dce110_link_encoder_hw_init(
1131 struct link_encoder *enc)
1132{
1133 struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
1134 struct dc_context *ctx = enc110->base.ctx;
1135 struct bp_transmitter_control cntl = { 0 };
1136 enum bp_result result;
1137
1138 cntl.action = TRANSMITTER_CONTROL_INIT;
1139 cntl.engine_id = ENGINE_ID_UNKNOWN;
1140 cntl.transmitter = enc110->base.transmitter;
1141 cntl.connector_obj_id = enc110->base.connector;
1142 cntl.lanes_number = LANE_COUNT_FOUR;
1143 cntl.coherent = false;
1144 cntl.hpd_sel = enc110->base.hpd_source;
1145
1146 result = link_transmitter_control(enc110, &cntl);
1147
1148 if (result != BP_RESULT_OK) {
1149 dm_logger_write(ctx->logger, LOG_ERROR,
1150 "%s: Failed to execute VBIOS command table!\n",
1151 __func__);
1152 BREAK_TO_DEBUGGER();
1153 return;
1154 }
1155
1156 if (enc110->base.connector.id == CONNECTOR_ID_LVDS) {
1157 cntl.action = TRANSMITTER_CONTROL_BACKLIGHT_BRIGHTNESS;
1158
1159 result = link_transmitter_control(enc110, &cntl);
1160
1161 ASSERT(result == BP_RESULT_OK);
1162
1163 } else if (enc110->base.connector.id == CONNECTOR_ID_EDP) {
1164 enc->funcs->power_control(&enc110->base, true);
1165 }
1166 aux_initialize(enc110);
1167
1168 /* reinitialize HPD.
1169 * hpd_initialize() will pass DIG_FE id to HW context.
1170 * All other routine within HW context will use fe_engine_offset
1171 * as DIG_FE id even caller pass DIG_FE id.
1172 * So this routine must be called first. */
1173 hpd_initialize(enc110);
1174}
1175
1176void dce110_link_encoder_destroy(struct link_encoder **enc)
1177{
1178 dm_free(TO_DCE110_LINK_ENC(*enc));
1179 *enc = NULL;
1180}
1181
1182void dce110_link_encoder_setup(
1183 struct link_encoder *enc,
1184 enum signal_type signal)
1185{
1186 struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
1187
1188 switch (signal) {
1189 case SIGNAL_TYPE_EDP:
1190 case SIGNAL_TYPE_DISPLAY_PORT:
1191 /* DP SST */
1192 REG_UPDATE(DIG_BE_CNTL, DIG_MODE, 0);
1193 break;
1194 case SIGNAL_TYPE_LVDS:
1195 /* LVDS */
1196 REG_UPDATE(DIG_BE_CNTL, DIG_MODE, 1);
1197 break;
1198 case SIGNAL_TYPE_DVI_SINGLE_LINK:
1199 case SIGNAL_TYPE_DVI_DUAL_LINK:
1200 /* TMDS-DVI */
1201 REG_UPDATE(DIG_BE_CNTL, DIG_MODE, 2);
1202 break;
1203 case SIGNAL_TYPE_HDMI_TYPE_A:
1204 /* TMDS-HDMI */
1205 REG_UPDATE(DIG_BE_CNTL, DIG_MODE, 3);
1206 break;
1207 case SIGNAL_TYPE_DISPLAY_PORT_MST:
1208 /* DP MST */
1209 REG_UPDATE(DIG_BE_CNTL, DIG_MODE, 5);
1210 break;
1211 default:
1212 ASSERT_CRITICAL(false);
1213 /* invalid mode ! */
1214 break;
1215 }
1216
1217}
1218
1219/* TODO: still need depth or just pass in adjusted pixel clock? */
1220void dce110_link_encoder_enable_tmds_output(
1221 struct link_encoder *enc,
1222 enum clock_source_id clock_source,
1223 enum dc_color_depth color_depth,
1224 bool hdmi,
1225 bool dual_link,
1226 uint32_t pixel_clock)
1227{
1228 struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
1229 struct dc_context *ctx = enc110->base.ctx;
1230 struct bp_transmitter_control cntl = { 0 };
1231 enum bp_result result;
1232
1233 /* Enable the PHY */
1234
1235 cntl.action = TRANSMITTER_CONTROL_ENABLE;
1236 cntl.engine_id = ENGINE_ID_UNKNOWN;
1237 cntl.transmitter = enc110->base.transmitter;
1238 cntl.pll_id = clock_source;
1239 if (hdmi) {
1240 cntl.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1241 cntl.lanes_number = 4;
1242 } else if (dual_link) {
1243 cntl.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1244 cntl.lanes_number = 8;
1245 } else {
1246 cntl.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1247 cntl.lanes_number = 4;
1248 }
1249 cntl.hpd_sel = enc110->base.hpd_source;
1250
1251 cntl.pixel_clock = pixel_clock;
1252 cntl.color_depth = color_depth;
1253
1254 result = link_transmitter_control(enc110, &cntl);
1255
1256 if (result != BP_RESULT_OK) {
1257 dm_logger_write(ctx->logger, LOG_ERROR,
1258 "%s: Failed to execute VBIOS command table!\n",
1259 __func__);
1260 BREAK_TO_DEBUGGER();
1261 }
1262}
1263
1264/* enables DP PHY output */
1265void dce110_link_encoder_enable_dp_output(
1266 struct link_encoder *enc,
1267 const struct dc_link_settings *link_settings,
1268 enum clock_source_id clock_source)
1269{
1270 struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
1271 struct dc_context *ctx = enc110->base.ctx;
1272 struct bp_transmitter_control cntl = { 0 };
1273 enum bp_result result;
1274
1275 /* Enable the PHY */
1276
1277 /* number_of_lanes is used for pixel clock adjust,
1278 * but it's not passed to asic_control.
1279 * We need to set number of lanes manually.
1280 */
1281 configure_encoder(enc110, link_settings);
1282
1283 cntl.action = TRANSMITTER_CONTROL_ENABLE;
1284 cntl.engine_id = ENGINE_ID_UNKNOWN;
1285 cntl.transmitter = enc110->base.transmitter;
1286 cntl.pll_id = clock_source;
1287 cntl.signal = SIGNAL_TYPE_DISPLAY_PORT;
1288 cntl.lanes_number = link_settings->lane_count;
1289 cntl.hpd_sel = enc110->base.hpd_source;
1290 cntl.pixel_clock = link_settings->link_rate
1291 * LINK_RATE_REF_FREQ_IN_KHZ;
1292 /* TODO: check if undefined works */
1293 cntl.color_depth = COLOR_DEPTH_UNDEFINED;
1294
1295 result = link_transmitter_control(enc110, &cntl);
1296
1297 if (result != BP_RESULT_OK) {
1298 dm_logger_write(ctx->logger, LOG_ERROR,
1299 "%s: Failed to execute VBIOS command table!\n",
1300 __func__);
1301 BREAK_TO_DEBUGGER();
1302 }
1303}
1304
1305/* enables DP PHY output in MST mode */
1306void dce110_link_encoder_enable_dp_mst_output(
1307 struct link_encoder *enc,
1308 const struct dc_link_settings *link_settings,
1309 enum clock_source_id clock_source)
1310{
1311 struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
1312 struct dc_context *ctx = enc110->base.ctx;
1313 struct bp_transmitter_control cntl = { 0 };
1314 enum bp_result result;
1315
1316 /* Enable the PHY */
1317
1318 /* number_of_lanes is used for pixel clock adjust,
1319 * but it's not passed to asic_control.
1320 * We need to set number of lanes manually.
1321 */
1322 configure_encoder(enc110, link_settings);
1323
1324 cntl.action = TRANSMITTER_CONTROL_ENABLE;
1325 cntl.engine_id = ENGINE_ID_UNKNOWN;
1326 cntl.transmitter = enc110->base.transmitter;
1327 cntl.pll_id = clock_source;
1328 cntl.signal = SIGNAL_TYPE_DISPLAY_PORT_MST;
1329 cntl.lanes_number = link_settings->lane_count;
1330 cntl.hpd_sel = enc110->base.hpd_source;
1331 cntl.pixel_clock = link_settings->link_rate
1332 * LINK_RATE_REF_FREQ_IN_KHZ;
1333 /* TODO: check if undefined works */
1334 cntl.color_depth = COLOR_DEPTH_UNDEFINED;
1335
1336 result = link_transmitter_control(enc110, &cntl);
1337
1338 if (result != BP_RESULT_OK) {
1339 dm_logger_write(ctx->logger, LOG_ERROR,
1340 "%s: Failed to execute VBIOS command table!\n",
1341 __func__);
1342 BREAK_TO_DEBUGGER();
1343 }
1344}
1345/*
1346 * @brief
1347 * Disable transmitter and its encoder
1348 */
1349void dce110_link_encoder_disable_output(
1350 struct link_encoder *enc,
1351 enum signal_type signal)
1352{
1353 struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
1354 struct dc_context *ctx = enc110->base.ctx;
1355 struct bp_transmitter_control cntl = { 0 };
1356 enum bp_result result;
1357
1358 if (!is_dig_enabled(enc110)) {
1359 /* OF_SKIP_POWER_DOWN_INACTIVE_ENCODER */
1360 return;
1361 }
1362 /* Power-down RX and disable GPU PHY should be paired.
1363 * Disabling PHY without powering down RX may cause
1364 * symbol lock loss, on which we will get DP Sink interrupt. */
1365
1366 /* There is a case for the DP active dongles
1367 * where we want to disable the PHY but keep RX powered,
1368 * for those we need to ignore DP Sink interrupt
1369 * by checking lane count that has been set
1370 * on the last do_enable_output(). */
1371
1372 /* disable transmitter */
1373 cntl.action = TRANSMITTER_CONTROL_DISABLE;
1374 cntl.transmitter = enc110->base.transmitter;
1375 cntl.hpd_sel = enc110->base.hpd_source;
1376 cntl.signal = signal;
1377 cntl.connector_obj_id = enc110->base.connector;
1378
1379 result = link_transmitter_control(enc110, &cntl);
1380
1381 if (result != BP_RESULT_OK) {
1382 dm_logger_write(ctx->logger, LOG_ERROR,
1383 "%s: Failed to execute VBIOS command table!\n",
1384 __func__);
1385 BREAK_TO_DEBUGGER();
1386 return;
1387 }
1388
1389 /* disable encoder */
1390 if (dc_is_dp_signal(signal))
1391 link_encoder_disable(enc110);
1392
1393 if (enc110->base.connector.id == CONNECTOR_ID_EDP) {
1394 /* power down eDP panel */
1395 /* TODO: Power control cause regression, we should implement
1396 * it properly, for now just comment it.
1397 *
1398 * link_encoder_edp_wait_for_hpd_ready(
1399 link_enc,
1400 link_enc->connector,
1401 false);
1402
1403 * link_encoder_edp_power_control(
1404 link_enc, false); */
1405 }
1406}
1407
1408void dce110_link_encoder_dp_set_lane_settings(
1409 struct link_encoder *enc,
1410 const struct link_training_settings *link_settings)
1411{
1412 struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
1413 union dpcd_training_lane_set training_lane_set = { { 0 } };
1414 int32_t lane = 0;
1415 struct bp_transmitter_control cntl = { 0 };
1416
1417 if (!link_settings) {
1418 BREAK_TO_DEBUGGER();
1419 return;
1420 }
1421
1422 cntl.action = TRANSMITTER_CONTROL_SET_VOLTAGE_AND_PREEMPASIS;
1423 cntl.transmitter = enc110->base.transmitter;
1424 cntl.connector_obj_id = enc110->base.connector;
1425 cntl.lanes_number = link_settings->link_settings.lane_count;
1426 cntl.hpd_sel = enc110->base.hpd_source;
1427 cntl.pixel_clock = link_settings->link_settings.link_rate *
1428 LINK_RATE_REF_FREQ_IN_KHZ;
1429
1430 for (lane = 0; lane < link_settings->link_settings.lane_count; ++lane) {
1431 /* translate lane settings */
1432
1433 training_lane_set.bits.VOLTAGE_SWING_SET =
1434 link_settings->lane_settings[lane].VOLTAGE_SWING;
1435 training_lane_set.bits.PRE_EMPHASIS_SET =
1436 link_settings->lane_settings[lane].PRE_EMPHASIS;
1437
1438 /* post cursor 2 setting only applies to HBR2 link rate */
1439 if (link_settings->link_settings.link_rate == LINK_RATE_HIGH2) {
1440 /* this is passed to VBIOS
1441 * to program post cursor 2 level */
1442
1443 training_lane_set.bits.POST_CURSOR2_SET =
1444 link_settings->lane_settings[lane].POST_CURSOR2;
1445 }
1446
1447 cntl.lane_select = lane;
1448 cntl.lane_settings = training_lane_set.raw;
1449
1450 /* call VBIOS table to set voltage swing and pre-emphasis */
1451 link_transmitter_control(enc110, &cntl);
1452 }
1453}
1454
1455/* set DP PHY test and training patterns */
1456void dce110_link_encoder_dp_set_phy_pattern(
1457 struct link_encoder *enc,
1458 const struct encoder_set_dp_phy_pattern_param *param)
1459{
1460 struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
1461
1462 switch (param->dp_phy_pattern) {
1463 case DP_TEST_PATTERN_TRAINING_PATTERN1:
1464 dce110_link_encoder_set_dp_phy_pattern_training_pattern(enc, 0);
1465 break;
1466 case DP_TEST_PATTERN_TRAINING_PATTERN2:
1467 dce110_link_encoder_set_dp_phy_pattern_training_pattern(enc, 1);
1468 break;
1469 case DP_TEST_PATTERN_TRAINING_PATTERN3:
1470 dce110_link_encoder_set_dp_phy_pattern_training_pattern(enc, 2);
1471 break;
1472 case DP_TEST_PATTERN_D102:
1473 set_dp_phy_pattern_d102(enc110);
1474 break;
1475 case DP_TEST_PATTERN_SYMBOL_ERROR:
1476 set_dp_phy_pattern_symbol_error(enc110);
1477 break;
1478 case DP_TEST_PATTERN_PRBS7:
1479 set_dp_phy_pattern_prbs7(enc110);
1480 break;
1481 case DP_TEST_PATTERN_80BIT_CUSTOM:
1482 set_dp_phy_pattern_80bit_custom(
1483 enc110, param->custom_pattern);
1484 break;
1485 case DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE:
1486 set_dp_phy_pattern_hbr2_compliance(enc110);
1487 break;
1488 case DP_TEST_PATTERN_VIDEO_MODE: {
1489 set_dp_phy_pattern_passthrough_mode(
1490 enc110, param->dp_panel_mode);
1491 break;
1492 }
1493
1494 default:
1495 /* invalid phy pattern */
1496 ASSERT_CRITICAL(false);
1497 break;
1498 }
1499}
1500
1501static void fill_stream_allocation_row_info(
1502 const struct link_mst_stream_allocation *stream_allocation,
1503 uint32_t *src,
1504 uint32_t *slots)
1505{
1506 const struct stream_encoder *stream_enc = stream_allocation->stream_enc;
1507
1508 if (stream_enc) {
1509 *src = stream_enc->id;
1510 *slots = stream_allocation->slot_count;
1511 } else {
1512 *src = 0;
1513 *slots = 0;
1514 }
1515}
1516
1517/* programs DP MST VC payload allocation */
1518void dce110_link_encoder_update_mst_stream_allocation_table(
1519 struct link_encoder *enc,
1520 const struct link_mst_stream_allocation_table *table)
1521{
1522 struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
1523 uint32_t value0 = 0;
1524 uint32_t value1 = 0;
1525 uint32_t value2 = 0;
1526 uint32_t slots = 0;
1527 uint32_t src = 0;
1528 uint32_t retries = 0;
1529
1530 /* For CZ, there are only 3 pipes. So Virtual channel is up 3.*/
1531
1532 /* --- Set MSE Stream Attribute -
1533 * Setup VC Payload Table on Tx Side,
1534 * Issue allocation change trigger
1535 * to commit payload on both tx and rx side */
1536
1537 /* we should clean-up table each time */
1538
1539 if (table->stream_count >= 1) {
1540 fill_stream_allocation_row_info(
1541 &table->stream_allocations[0],
1542 &src,
1543 &slots);
1544 } else {
1545 src = 0;
1546 slots = 0;
1547 }
1548
1549 REG_UPDATE_2(DP_MSE_SAT0,
1550 DP_MSE_SAT_SRC0, src,
1551 DP_MSE_SAT_SLOT_COUNT0, slots);
1552
1553 if (table->stream_count >= 2) {
1554 fill_stream_allocation_row_info(
1555 &table->stream_allocations[1],
1556 &src,
1557 &slots);
1558 } else {
1559 src = 0;
1560 slots = 0;
1561 }
1562
1563 REG_UPDATE_2(DP_MSE_SAT0,
1564 DP_MSE_SAT_SRC1, src,
1565 DP_MSE_SAT_SLOT_COUNT1, slots);
1566
1567 if (table->stream_count >= 3) {
1568 fill_stream_allocation_row_info(
1569 &table->stream_allocations[2],
1570 &src,
1571 &slots);
1572 } else {
1573 src = 0;
1574 slots = 0;
1575 }
1576
1577 REG_UPDATE_2(DP_MSE_SAT1,
1578 DP_MSE_SAT_SRC2, src,
1579 DP_MSE_SAT_SLOT_COUNT2, slots);
1580
1581 if (table->stream_count >= 4) {
1582 fill_stream_allocation_row_info(
1583 &table->stream_allocations[3],
1584 &src,
1585 &slots);
1586 } else {
1587 src = 0;
1588 slots = 0;
1589 }
1590
1591 REG_UPDATE_2(DP_MSE_SAT1,
1592 DP_MSE_SAT_SRC3, src,
1593 DP_MSE_SAT_SLOT_COUNT3, slots);
1594
1595 /* --- wait for transaction finish */
1596
1597 /* send allocation change trigger (ACT) ?
1598 * this step first sends the ACT,
1599 * then double buffers the SAT into the hardware
1600 * making the new allocation active on the DP MST mode link */
1601
1602
1603 /* DP_MSE_SAT_UPDATE:
1604 * 0 - No Action
1605 * 1 - Update SAT with trigger
1606 * 2 - Update SAT without trigger */
1607
1608 REG_UPDATE(DP_MSE_SAT_UPDATE,
1609 DP_MSE_SAT_UPDATE, 1);
1610
1611 /* wait for update to complete
1612 * (i.e. DP_MSE_SAT_UPDATE field is reset to 0)
1613 * then wait for the transmission
1614 * of at least 16 MTP headers on immediate local link.
1615 * i.e. DP_MSE_16_MTP_KEEPOUT field (read only) is reset to 0
1616 * a value of 1 indicates that DP MST mode
1617 * is in the 16 MTP keepout region after a VC has been added.
1618 * MST stream bandwidth (VC rate) can be configured
1619 * after this bit is cleared */
1620
1621 do {
1622 udelay(10);
1623
1624 value0 = REG_READ(DP_MSE_SAT_UPDATE);
1625
1626 REG_GET(DP_MSE_SAT_UPDATE,
1627 DP_MSE_SAT_UPDATE, &value1);
1628
1629 REG_GET(DP_MSE_SAT_UPDATE,
1630 DP_MSE_16_MTP_KEEPOUT, &value2);
1631
1632 /* bit field DP_MSE_SAT_UPDATE is set to 1 already */
1633 if (!value1 && !value2)
1634 break;
1635 ++retries;
1636 } while (retries < DP_MST_UPDATE_MAX_RETRY);
1637}
1638
1639void dce110_link_encoder_set_lcd_backlight_level(
1640 struct link_encoder *enc,
1641 uint32_t level)
1642{
1643 struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
1644
1645 const uint32_t backlight_update_pending_max_retry = 1000;
1646
1647 uint32_t backlight_lock;
1648
1649 uint32_t i;
1650 uint32_t backlight_24bit;
1651 uint32_t backlight_17bit;
1652 uint32_t backlight_16bit;
1653 uint32_t masked_pwm_period;
1654 uint8_t rounding_bit;
1655 uint8_t bit_count;
1656 uint64_t active_duty_cycle;
1657 uint32_t pwm_period_bitcnt;
1658
1659 backlight_lock = REG_READ(BL_PWM_GRP1_REG_LOCK);
1660
1661 /*
1662 * 1. Convert 8-bit value to 17 bit U1.16 format
1663 * (1 integer, 16 fractional bits)
1664 */
1665
1666 /* 1.1 multiply 8 bit value by 0x10101 to get a 24 bit value,
1667 * effectively multiplying value by 256/255
1668 * eg. for a level of 0xEF, backlight_24bit = 0xEF * 0x10101 = 0xEFEFEF
1669 */
1670 backlight_24bit = level * 0x10101;
1671
1672 /* 1.2 The upper 16 bits of the 24 bit value is the fraction, lower 8
1673 * used for rounding, take most significant bit of fraction for
1674 * rounding, e.g. for 0xEFEFEF, rounding bit is 1
1675 */
1676 rounding_bit = (backlight_24bit >> 7) & 1;
1677
1678 /* 1.3 Add the upper 16 bits of the 24 bit value with the rounding bit
1679 * resulting in a 17 bit value e.g. 0xEFF0 = (0xEFEFEF >> 8) + 1
1680 */
1681 backlight_17bit = (backlight_24bit >> 8) + rounding_bit;
1682
1683 /*
1684 * 2. Find 16 bit backlight active duty cycle, where 0 <= backlight
1685 * active duty cycle <= backlight period
1686 */
1687
1688 /* 2.1 Apply bitmask for backlight period value based on value of BITCNT
1689 */
1690 {
1691 REG_GET(BL_PWM_PERIOD_CNTL,
1692 BL_PWM_PERIOD_BITCNT, &pwm_period_bitcnt);
1693
1694 if (pwm_period_bitcnt == 0)
1695 bit_count = 16;
1696 else
1697 bit_count = pwm_period_bitcnt;
1698 }
1699
1700 /* e.g. maskedPwmPeriod = 0x24 when bitCount is 6 */
1701 masked_pwm_period =
1702 REG_GET(BL_PWM_PERIOD_CNTL,
1703 BL_PWM_PERIOD, &masked_pwm_period)
1704 & ((1 << bit_count) - 1);
1705
1706 /* 2.2 Calculate integer active duty cycle required upper 16 bits
1707 * contain integer component, lower 16 bits contain fractional component
1708 * of active duty cycle e.g. 0x21BDC0 = 0xEFF0 * 0x24
1709 */
1710 active_duty_cycle = backlight_17bit * masked_pwm_period;
1711
1712 /* 2.3 Calculate 16 bit active duty cycle from integer and fractional
1713 * components shift by bitCount then mask 16 bits and add rounding bit
1714 * from MSB of fraction e.g. 0x86F7 = ((0x21BDC0 >> 6) & 0xFFF) + 0
1715 */
1716 backlight_16bit = active_duty_cycle >> bit_count;
1717 backlight_16bit &= 0xFFFF;
1718 backlight_16bit += (active_duty_cycle >> (bit_count - 1)) & 0x1;
1719
1720 REG_UPDATE(BL_PWM_CNTL,
1721 BL_ACTIVE_INT_FRAC_CNT, backlight_16bit);
1722
1723 /*
1724 * 3. Program register with updated value
1725 */
1726
1727 /* 3.1 Lock group 2 backlight registers */
1728
1729 REG_UPDATE(BL_PWM_GRP1_REG_LOCK,
1730 BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN, 1);
1731
1732 REG_UPDATE(BL_PWM_GRP1_REG_LOCK,
1733 BL_PWM_GRP1_REG_LOCK, 1);
1734
1735 /* 3.3 Unlock group 2 backlight registers */
1736 REG_UPDATE(BL_PWM_GRP1_REG_LOCK,
1737 BL_PWM_GRP1_REG_LOCK, 0);
1738
1739 /* 5.4.4 Wait for pending bit to be cleared */
1740 for (i = 0; i < backlight_update_pending_max_retry; ++i) {
1741 REG_GET(BL_PWM_GRP1_REG_LOCK,
1742 BL_PWM_GRP1_REG_UPDATE_PENDING, &backlight_lock);
1743 if (!backlight_lock)
1744 break;
1745
1746 udelay(10);
1747 }
1748}
1749
1750void dce110_link_encoder_set_dmcu_backlight_level(
1751 struct link_encoder *enc,
1752 uint32_t level,
1753 uint32_t frame_ramp,
1754 uint32_t controller_id)
1755{
1756 struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
1757 struct dc_context *ctx = enc110->base.ctx;
1758 unsigned int dmcu_max_retry_on_wait_reg_ready = 801;
1759 unsigned int dmcu_wait_reg_ready_interval = 100;
1760 unsigned int backlight_17bit = level * 0x10101;
1761 unsigned char temp_uchar =
1762 (unsigned char)(((backlight_17bit & 0x80) >> 7) & 1);
1763 unsigned int regValue;
1764 uint32_t rampingBoundary = 0xFFFF;
1765 uint32_t s2;
1766
1767 backlight_17bit = (backlight_17bit >> 8) + temp_uchar;
1768
1769 /* set ramping boundary */
1770 REG_WRITE(MASTER_COMM_DATA_REG1, rampingBoundary);
1771
1772 /* setDMCUParam_Pipe */
1773 REG_UPDATE_2(MASTER_COMM_CMD_REG,
1774 MASTER_COMM_CMD_REG_BYTE0, MCP_ABM_PIPE_SET,
1775 MASTER_COMM_CMD_REG_BYTE1, controller_id);
1776
1777 /* notifyDMCUMsg */
1778 REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
1779
1780 /* waitDMCUReadyForCmd */
1781 do {
1782 dm_delay_in_microseconds(ctx, dmcu_wait_reg_ready_interval);
1783 regValue = REG_READ(MASTER_COMM_CNTL_REG);
1784 dmcu_max_retry_on_wait_reg_ready--;
1785 } while
1786 /* expected value is 0, loop while not 0*/
1787 ((MASTER_COMM_CNTL_REG__MASTER_COMM_INTERRUPT_MASK & regValue) &&
1788 dmcu_max_retry_on_wait_reg_ready > 0);
1789
1790 /* setDMCUParam_BL */
1791 REG_UPDATE(BL1_PWM_USER_LEVEL, BL1_PWM_USER_LEVEL, backlight_17bit);
1792
1793 /* write ramp */
1794 REG_WRITE(MASTER_COMM_DATA_REG1, frame_ramp);
1795
1796 /* setDMCUParam_Cmd */
1797 REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, MCP_BL_SET);
1798
1799 /* notifyDMCUMsg */
1800 REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
1801
1802 /* UpdateRequestedBacklightLevel */
1803 s2 = REG_READ(BIOS_SCRATCH_2);
1804
1805 s2 &= ~ATOM_S2_CURRENT_BL_LEVEL_MASK;
1806 level &= (ATOM_S2_CURRENT_BL_LEVEL_MASK >>
1807 ATOM_S2_CURRENT_BL_LEVEL_SHIFT);
1808 s2 |= (level << ATOM_S2_CURRENT_BL_LEVEL_SHIFT);
1809
1810 REG_WRITE(BIOS_SCRATCH_2, s2);
1811}
1812
1813void dce110_link_encoder_init_dmcu_backlight_settings(
1814 struct link_encoder *enc)
1815{
1816 struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
1817 uint32_t bl_pwm_cntl;
1818 uint32_t pwmCntl;
1819 uint32_t pwmCntl2;
1820 uint32_t periodCntl;
1821 uint32_t s2;
1822 uint32_t value;
1823
1824 bl_pwm_cntl = REG_READ(BL_PWM_CNTL);
1825
1826 /* It must not be 0, so we have to restore them
1827 * Bios bug w/a - period resets to zero,
1828 * restoring to cache values which is always correct
1829 */
1830 REG_GET(BL_PWM_CNTL,
1831 BL_ACTIVE_INT_FRAC_CNT, &value);
1832 if (value == 0 || bl_pwm_cntl == 1) {
1833 if (stored_backlight_registers.vBL_PWM_CNTL != 0) {
1834 pwmCntl = stored_backlight_registers.vBL_PWM_CNTL;
1835 REG_WRITE(BL_PWM_CNTL, pwmCntl);
1836
1837 pwmCntl2 = stored_backlight_registers.vBL_PWM_CNTL2;
1838 REG_WRITE(BL_PWM_CNTL2, pwmCntl2);
1839
1840 periodCntl =
1841 stored_backlight_registers.vBL_PWM_PERIOD_CNTL;
1842 REG_WRITE(BL_PWM_PERIOD_CNTL, periodCntl);
1843
1844 REG_UPDATE(LVTMA_PWRSEQ_REF_DIV,
1845 BL_PWM_REF_DIV,
1846 stored_backlight_registers.
1847 vLVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV);
1848 }
1849 } else {
1850 stored_backlight_registers.vBL_PWM_CNTL =
1851 REG_READ(BL_PWM_CNTL);
1852 stored_backlight_registers.vBL_PWM_CNTL2 =
1853 REG_READ(BL_PWM_CNTL2);
1854 stored_backlight_registers.vBL_PWM_PERIOD_CNTL =
1855 REG_READ(BL_PWM_PERIOD_CNTL);
1856
1857 REG_GET(LVTMA_PWRSEQ_REF_DIV, BL_PWM_REF_DIV,
1858 &stored_backlight_registers.
1859 vLVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV);
1860 }
1861
1862 /* Have driver take backlight control
1863 * TakeBacklightControl(true)
1864 */
1865 s2 = REG_READ(BIOS_SCRATCH_2);
1866 s2 |= ATOM_S2_VRI_BRIGHT_ENABLE;
1867 REG_WRITE(BIOS_SCRATCH_2, s2);
1868
1869 /* Enable the backlight output */
1870 REG_UPDATE(BL_PWM_CNTL, BL_PWM_EN, 1);
1871
1872}
1873
1874void dce110_link_encoder_set_dmcu_abm_level(
1875 struct link_encoder *enc, uint32_t level)
1876{
1877 struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
1878 struct dc_context *ctx = enc110->base.ctx;
1879
1880 unsigned int dmcu_max_retry_on_wait_reg_ready = 801;
1881 unsigned int dmcu_wait_reg_ready_interval = 100;
1882 unsigned int regValue;
1883
1884 /* waitDMCUReadyForCmd */
1885 do {
1886 dm_delay_in_microseconds(ctx, dmcu_wait_reg_ready_interval);
1887 regValue = REG_READ(MASTER_COMM_CNTL_REG);
1888 dmcu_max_retry_on_wait_reg_ready--;
1889 } while
1890 /* expected value is 0, loop while not 0*/
1891 ((MASTER_COMM_CNTL_REG__MASTER_COMM_INTERRUPT_MASK & regValue) &&
1892 dmcu_max_retry_on_wait_reg_ready > 0);
1893
1894 /* setDMCUParam_ABMLevel */
1895 REG_UPDATE_2(MASTER_COMM_CMD_REG,
1896 MASTER_COMM_CMD_REG_BYTE0, MCP_ABM_LEVEL_SET,
1897 MASTER_COMM_CMD_REG_BYTE2, level);
1898
1899 /* notifyDMCUMsg */
1900 REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
1901}
1902
1903static void get_dmcu_psr_state(struct link_encoder *enc, uint32_t *psr_state)
1904{
1905 struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
1906 struct dc_context *ctx = enc110->base.ctx;
1907
1908 uint32_t count = 0;
1909 uint32_t psrStateOffset = 0xf0;
1910 uint32_t value;
1911
1912 /* Enable write access to IRAM */
1913 REG_UPDATE(DMCU_RAM_ACCESS_CTRL, IRAM_HOST_ACCESS_EN, 1);
1914
1915 do {
1916 dm_delay_in_microseconds(ctx, 2);
1917 REG_GET(DCI_MEM_PWR_STATUS,
1918 DMCU_IRAM_MEM_PWR_STATE, &value);
1919 } while
1920 (value != 0 && count++ < 10);
1921
1922 /* Write address to IRAM_RD_ADDR in DMCU_IRAM_RD_CTRL */
1923 REG_WRITE(DMCU_IRAM_RD_CTRL, psrStateOffset);
1924
1925 /* Read data from IRAM_RD_DATA in DMCU_IRAM_RD_DATA*/
1926 *psr_state = REG_READ(DMCU_IRAM_RD_DATA);
1927
1928 /* Disable write access to IRAM after finished using IRAM
1929 * in order to allow dynamic sleep state
1930 */
1931 REG_UPDATE(DMCU_RAM_ACCESS_CTRL, IRAM_HOST_ACCESS_EN, 0);
1932}
1933
1934void dce110_link_encoder_set_dmcu_psr_enable(struct link_encoder *enc,
1935 bool enable)
1936{
1937 struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
1938 struct dc_context *ctx = enc110->base.ctx;
1939
1940 unsigned int dmcu_max_retry_on_wait_reg_ready = 801;
1941 unsigned int dmcu_wait_reg_ready_interval = 100;
1942
1943 unsigned int regValue;
1944
1945 unsigned int retryCount;
1946 uint32_t psr_state = 0;
1947
1948 /* waitDMCUReadyForCmd */
1949 do {
1950 dm_delay_in_microseconds(ctx, dmcu_wait_reg_ready_interval);
1951 regValue = REG_READ(MASTER_COMM_CNTL_REG);
1952 dmcu_max_retry_on_wait_reg_ready--;
1953 } while
1954 /* expected value is 0, loop while not 0*/
1955 ((MASTER_COMM_CNTL_REG__MASTER_COMM_INTERRUPT_MASK & regValue) &&
1956 dmcu_max_retry_on_wait_reg_ready > 0);
1957
1958 /* setDMCUParam_Cmd */
1959 if (enable)
1960 REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, PSR_ENABLE);
1961 else
1962 REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, PSR_EXIT);
1963
1964 /* notifyDMCUMsg */
1965 REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
1966
1967 for (retryCount = 0; retryCount <= 100; retryCount++) {
1968 get_dmcu_psr_state(enc, &psr_state);
1969 if (enable) {
1970 if (psr_state != 0)
1971 break;
1972 } else {
1973 if (psr_state == 0)
1974 break;
1975 }
1976 dm_delay_in_microseconds(ctx, 10);
1977 }
1978}
1979
1980void dce110_link_encoder_setup_dmcu_psr(struct link_encoder *enc,
1981 struct psr_dmcu_context *psr_context)
1982{
1983 struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
1984 struct dc_context *ctx = enc110->base.ctx;
1985
1986 unsigned int dmcu_max_retry_on_wait_reg_ready = 801;
1987 unsigned int dmcu_wait_reg_ready_interval = 100;
1988 unsigned int regValue;
1989
1990 union dce110_dmcu_psr_config_data_reg1 masterCmdData1;
1991 union dce110_dmcu_psr_config_data_reg2 masterCmdData2;
1992 union dce110_dmcu_psr_config_data_reg3 masterCmdData3;
1993
1994 if (psr_context->psrExitLinkTrainingRequired)
1995 REG_UPDATE(DP_DPHY_FAST_TRAINING, DPHY_RX_FAST_TRAINING_CAPABLE, 1);
1996 else {
1997 REG_UPDATE(DP_DPHY_FAST_TRAINING, DPHY_RX_FAST_TRAINING_CAPABLE, 0);
1998 /*In DCE 11, we are able to pre-program a Force SR register
1999 * to be able to trigger SR symbol after 5 idle patterns
2000 * transmitted. Upon PSR Exit, DMCU can trigger
2001 * DPHY_LOAD_BS_COUNT_START = 1. Upon writing 1 to
2002 * DPHY_LOAD_BS_COUNT_START and the internal counter
2003 * reaches DPHY_LOAD_BS_COUNT, the next BS symbol will be
2004 * replaced by SR symbol once.
2005 */
2006
2007 REG_UPDATE(DP_DPHY_BS_SR_SWAP_CNTL, DPHY_LOAD_BS_COUNT, 0x5);
2008 }
2009
2010 /* Enable static screen interrupts for PSR supported display */
2011 /* Disable the interrupt coming from other displays. */
2012 REG_UPDATE_4(DMCU_INTERRUPT_TO_UC_EN_MASK,
2013 STATIC_SCREEN1_INT_TO_UC_EN, 0,
2014 STATIC_SCREEN2_INT_TO_UC_EN, 0,
2015 STATIC_SCREEN3_INT_TO_UC_EN, 0,
2016 STATIC_SCREEN4_INT_TO_UC_EN, 0);
2017
2018 switch (psr_context->controllerId) {
2019 /* Driver uses case 1 for unconfigured */
2020 case 1:
2021 psr_crtc_offset = mmCRTC0_CRTC_STATIC_SCREEN_CONTROL -
2022 mmCRTC0_CRTC_STATIC_SCREEN_CONTROL;
2023
2024 REG_UPDATE(DMCU_INTERRUPT_TO_UC_EN_MASK,
2025 STATIC_SCREEN1_INT_TO_UC_EN, 1);
2026 break;
2027 case 2:
2028 psr_crtc_offset = mmCRTC1_CRTC_STATIC_SCREEN_CONTROL -
2029 mmCRTC0_CRTC_STATIC_SCREEN_CONTROL;
2030
2031 REG_UPDATE(DMCU_INTERRUPT_TO_UC_EN_MASK,
2032 STATIC_SCREEN2_INT_TO_UC_EN, 1);
2033 break;
2034 case 3:
2035 psr_crtc_offset = mmCRTC2_CRTC_STATIC_SCREEN_CONTROL -
2036 mmCRTC0_CRTC_STATIC_SCREEN_CONTROL;
2037
2038 REG_UPDATE(DMCU_INTERRUPT_TO_UC_EN_MASK,
2039 STATIC_SCREEN3_INT_TO_UC_EN, 1);
2040 break;
2041 case 4:
2042 psr_crtc_offset = mmCRTC3_CRTC_STATIC_SCREEN_CONTROL -
2043 mmCRTC0_CRTC_STATIC_SCREEN_CONTROL;
2044
2045 REG_UPDATE(DMCU_INTERRUPT_TO_UC_EN_MASK,
2046 STATIC_SCREEN4_INT_TO_UC_EN, 1);
2047 break;
2048 case 5:
2049 psr_crtc_offset = mmCRTC4_CRTC_STATIC_SCREEN_CONTROL -
2050 mmCRTC0_CRTC_STATIC_SCREEN_CONTROL;
2051 /* CZ/NL only has 4 CRTC!!
2052 * really valid.
2053 * There is no interrupt enable mask for these instances.
2054 */
2055 break;
2056 case 6:
2057 psr_crtc_offset = mmCRTC5_CRTC_STATIC_SCREEN_CONTROL -
2058 mmCRTC0_CRTC_STATIC_SCREEN_CONTROL;
2059 /* CZ/NL only has 4 CRTC!!
2060 * These are here because they are defined in HW regspec,
2061 * but not really valid. There is no interrupt enable mask
2062 * for these instances.
2063 */
2064 break;
2065 default:
2066 psr_crtc_offset = mmCRTC0_CRTC_STATIC_SCREEN_CONTROL -
2067 mmCRTC0_CRTC_STATIC_SCREEN_CONTROL;
2068
2069 REG_UPDATE(DMCU_INTERRUPT_TO_UC_EN_MASK,
2070 STATIC_SCREEN1_INT_TO_UC_EN, 1);
2071 break;
2072 }
2073
2074 REG_UPDATE_2(DP_SEC_CNTL1,
2075 DP_SEC_GSP0_LINE_NUM, psr_context->sdpTransmitLineNumDeadline,
2076 DP_SEC_GSP0_PRIORITY, 1);
2077
2078 if (psr_context->psr_level.bits.SKIP_SMU_NOTIFICATION) {
2079 REG_UPDATE(SMU_INTERRUPT_CONTROL, DC_SMU_INT_ENABLE, 1);
2080 }
2081
2082 /* waitDMCUReadyForCmd */
2083 do {
2084 dm_delay_in_microseconds(ctx, dmcu_wait_reg_ready_interval);
2085 regValue = REG_READ(MASTER_COMM_CNTL_REG);
2086 dmcu_max_retry_on_wait_reg_ready--;
2087 } while
2088 /* expected value is 0, loop while not 0*/
2089 ((MASTER_COMM_CNTL_REG__MASTER_COMM_INTERRUPT_MASK & regValue) &&
2090 dmcu_max_retry_on_wait_reg_ready > 0);
2091
2092 /* setDMCUParam_PSRHostConfigData */
2093 masterCmdData1.u32All = 0;
2094 masterCmdData1.bits.timehyst_frames = psr_context->timehyst_frames;
2095 masterCmdData1.bits.hyst_lines = psr_context->hyst_lines;
2096 masterCmdData1.bits.rfb_update_auto_en =
2097 psr_context->rfb_update_auto_en;
2098 masterCmdData1.bits.dp_port_num = psr_context->transmitterId;
2099 masterCmdData1.bits.dcp_sel = psr_context->controllerId;
2100 masterCmdData1.bits.phy_type = psr_context->phyType;
2101 masterCmdData1.bits.frame_cap_ind =
2102 psr_context->psrFrameCaptureIndicationReq;
2103 masterCmdData1.bits.aux_chan = psr_context->channel;
2104 masterCmdData1.bits.aux_repeat = psr_context->aux_repeats;
2105 dm_write_reg(ctx, REG(MASTER_COMM_DATA_REG1),
2106 masterCmdData1.u32All);
2107
2108 masterCmdData2.u32All = 0;
2109 masterCmdData2.bits.dig_fe = psr_context->engineId;
2110 masterCmdData2.bits.dig_be = psr_context->transmitterId;
2111 masterCmdData2.bits.skip_wait_for_pll_lock =
2112 psr_context->skipPsrWaitForPllLock;
2113 masterCmdData2.bits.frame_delay = psr_context->frame_delay;
2114 masterCmdData2.bits.smu_phy_id = psr_context->smuPhyId;
2115 masterCmdData2.bits.num_of_controllers =
2116 psr_context->numberOfControllers;
2117 dm_write_reg(ctx, REG(MASTER_COMM_DATA_REG2),
2118 masterCmdData2.u32All);
2119
2120 masterCmdData3.u32All = 0;
2121 masterCmdData3.bits.psr_level = psr_context->psr_level.u32all;
2122 dm_write_reg(ctx, REG(MASTER_COMM_DATA_REG3),
2123 masterCmdData3.u32All);
2124
2125 /* setDMCUParam_Cmd */
2126 REG_UPDATE(MASTER_COMM_CMD_REG,
2127 MASTER_COMM_CMD_REG_BYTE0, PSR_SET);
2128
2129 /* notifyDMCUMsg */
2130 REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
2131}
2132
2133void dce110_link_encoder_connect_dig_be_to_fe(
2134 struct link_encoder *enc,
2135 enum engine_id engine,
2136 bool connect)
2137{
2138 struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
2139 uint32_t field;
2140
2141 if (engine != ENGINE_ID_UNKNOWN) {
2142
2143 REG_GET(DIG_BE_CNTL, DIG_FE_SOURCE_SELECT, &field);
2144
2145 if (connect)
2146 field |= get_frontend_source(engine);
2147 else
2148 field &= ~get_frontend_source(engine);
2149
2150 REG_UPDATE(DIG_BE_CNTL, DIG_FE_SOURCE_SELECT, field);
2151 }
2152}
2153
2154void dce110_link_encoder_enable_hpd(struct link_encoder *enc)
2155{
2156 struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
2157 struct dc_context *ctx = enc110->base.ctx;
2158 uint32_t addr = HPD_REG(DC_HPD_CONTROL);
2159 uint32_t hpd_enable = 0;
2160 uint32_t value = dm_read_reg(ctx, addr);
2161
2162 get_reg_field_value(hpd_enable, DC_HPD_CONTROL, DC_HPD_EN);
2163
2164 if (hpd_enable == 0)
2165 set_reg_field_value(value, 1, DC_HPD_CONTROL, DC_HPD_EN);
2166}
2167
2168void dce110_link_encoder_disable_hpd(struct link_encoder *enc)
2169{
2170 struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
2171 struct dc_context *ctx = enc110->base.ctx;
2172 uint32_t addr = HPD_REG(DC_HPD_CONTROL);
2173 uint32_t value = dm_read_reg(ctx, addr);
2174
2175 set_reg_field_value(value, 0, DC_HPD_CONTROL, DC_HPD_EN);
2176}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
new file mode 100644
index 000000000000..1635b239402f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
@@ -0,0 +1,363 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DC_LINK_ENCODER__DCE110_H__
27#define __DC_LINK_ENCODER__DCE110_H__
28
29#include "link_encoder.h"
30
31#define TO_DCE110_LINK_ENC(link_encoder)\
32 container_of(link_encoder, struct dce110_link_encoder, base)
33
34#define AUX_REG_LIST(id)\
35 SRI(AUX_CONTROL, DP_AUX, id), \
36 SRI(AUX_DPHY_RX_CONTROL0, DP_AUX, id)
37
38#define HPD_REG_LIST(id)\
39 SRI(DC_HPD_CONTROL, HPD, id)
40
41#define LE_COMMON_REG_LIST_BASE(id) \
42 SR(BL_PWM_CNTL), \
43 SR(BL_PWM_GRP1_REG_LOCK), \
44 SR(BL_PWM_PERIOD_CNTL), \
45 SR(LVTMA_PWRSEQ_CNTL), \
46 SR(LVTMA_PWRSEQ_STATE), \
47 SR(BL_PWM_CNTL2), \
48 SR(LVTMA_PWRSEQ_REF_DIV), \
49 SR(MASTER_COMM_DATA_REG1), \
50 SR(MASTER_COMM_DATA_REG2), \
51 SR(MASTER_COMM_DATA_REG3), \
52 SR(MASTER_COMM_CMD_REG), \
53 SR(MASTER_COMM_CNTL_REG), \
54 SR(DMCU_RAM_ACCESS_CTRL), \
55 SR(DMCU_IRAM_RD_CTRL), \
56 SR(DMCU_IRAM_RD_DATA), \
57 SR(DMCU_INTERRUPT_TO_UC_EN_MASK), \
58 SR(SMU_INTERRUPT_CONTROL), \
59 SRI(DIG_BE_CNTL, DIG, id), \
60 SRI(DIG_BE_EN_CNTL, DIG, id), \
61 SRI(DP_CONFIG, DP, id), \
62 SRI(DP_DPHY_CNTL, DP, id), \
63 SRI(DP_DPHY_PRBS_CNTL, DP, id), \
64 SRI(DP_DPHY_SCRAM_CNTL, DP, id),\
65 SRI(DP_DPHY_SYM0, DP, id), \
66 SRI(DP_DPHY_SYM1, DP, id), \
67 SRI(DP_DPHY_SYM2, DP, id), \
68 SRI(DP_DPHY_TRAINING_PATTERN_SEL, DP, id), \
69 SRI(DP_LINK_CNTL, DP, id), \
70 SRI(DP_LINK_FRAMING_CNTL, DP, id), \
71 SRI(DP_MSE_SAT0, DP, id), \
72 SRI(DP_MSE_SAT1, DP, id), \
73 SRI(DP_MSE_SAT2, DP, id), \
74 SRI(DP_MSE_SAT_UPDATE, DP, id), \
75 SRI(DP_SEC_CNTL, DP, id), \
76 SRI(DP_VID_STREAM_CNTL, DP, id), \
77 SRI(DP_DPHY_FAST_TRAINING, DP, id), \
78 SRI(DP_SEC_CNTL1, DP, id)
79
80 #define LE_COMMON_REG_LIST(id)\
81 LE_COMMON_REG_LIST_BASE(id), \
82 SRI(DP_DPHY_BS_SR_SWAP_CNTL, DP, id), \
83 SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \
84 SR(BIOS_SCRATCH_2), \
85 SR(BL1_PWM_USER_LEVEL), \
86 SR(DCI_MEM_PWR_STATUS)
87
88 #define LE_DCE110_REG_LIST(id)\
89 LE_COMMON_REG_LIST_BASE(id), \
90 SRI(DP_DPHY_BS_SR_SWAP_CNTL, DP, id), \
91 SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \
92 SR(BIOS_SCRATCH_2), \
93 SR(BL1_PWM_USER_LEVEL), \
94 SR(DCI_MEM_PWR_STATUS)
95
96 #define LE_DCE80_REG_LIST(id)\
97 SR(BIOS_SCRATCH_2), \
98 SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \
99 SR(BL1_PWM_USER_LEVEL), \
100 LE_COMMON_REG_LIST_BASE(id)
101
102
103struct dce110_link_enc_aux_registers {
104 uint32_t AUX_CONTROL;
105 uint32_t AUX_DPHY_RX_CONTROL0;
106};
107
108struct dce110_link_enc_hpd_registers {
109 uint32_t DC_HPD_CONTROL;
110};
111
112struct dce110_link_enc_registers {
113 /* BL registers */
114 uint32_t BL_PWM_CNTL;
115 uint32_t BL_PWM_GRP1_REG_LOCK;
116 uint32_t BL_PWM_PERIOD_CNTL;
117 uint32_t LVTMA_PWRSEQ_CNTL;
118 uint32_t LVTMA_PWRSEQ_STATE;
119 uint32_t BL_PWM_CNTL2;
120 uint32_t LVTMA_PWRSEQ_REF_DIV;
121
122 /* DMCU registers */
123 uint32_t BL1_PWM_USER_LEVEL;
124 uint32_t ABM0_BL1_PWM_USER_LEVEL;
125 uint32_t MASTER_COMM_DATA_REG1;
126 uint32_t MASTER_COMM_DATA_REG2;
127 uint32_t MASTER_COMM_DATA_REG3;
128 uint32_t MASTER_COMM_CMD_REG;
129 uint32_t MASTER_COMM_CNTL_REG;
130 uint32_t BIOS_SCRATCH_2;
131 uint32_t DMCU_RAM_ACCESS_CTRL;
132 uint32_t DCI_MEM_PWR_STATUS;
133 uint32_t DMU_MEM_PWR_CNTL;
134 uint32_t DMCU_IRAM_RD_CTRL;
135 uint32_t DMCU_IRAM_RD_DATA;
136 uint32_t DMCU_INTERRUPT_TO_UC_EN_MASK;
137 uint32_t SMU_INTERRUPT_CONTROL;
138
139
140 /* Common DP registers */
141 uint32_t DIG_BE_CNTL;
142 uint32_t DIG_BE_EN_CNTL;
143 uint32_t DP_CONFIG;
144 uint32_t DP_DPHY_CNTL;
145 uint32_t DP_DPHY_INTERNAL_CTRL;
146 uint32_t DP_DPHY_PRBS_CNTL;
147 uint32_t DP_DPHY_SCRAM_CNTL;
148 uint32_t DP_DPHY_SYM0;
149 uint32_t DP_DPHY_SYM1;
150 uint32_t DP_DPHY_SYM2;
151 uint32_t DP_DPHY_TRAINING_PATTERN_SEL;
152 uint32_t DP_LINK_CNTL;
153 uint32_t DP_LINK_FRAMING_CNTL;
154 uint32_t DP_MSE_SAT0;
155 uint32_t DP_MSE_SAT1;
156 uint32_t DP_MSE_SAT2;
157 uint32_t DP_MSE_SAT_UPDATE;
158 uint32_t DP_SEC_CNTL;
159 uint32_t DP_VID_STREAM_CNTL;
160 uint32_t DP_DPHY_FAST_TRAINING;
161 uint32_t DP_DPHY_BS_SR_SWAP_CNTL;
162 uint32_t DP_SEC_CNTL1;
163};
164
165struct dce110_link_encoder {
166 struct link_encoder base;
167 const struct dce110_link_enc_registers *link_regs;
168 const struct dce110_link_enc_aux_registers *aux_regs;
169 const struct dce110_link_enc_hpd_registers *hpd_regs;
170};
171
172/*******************************************************************
173* MASTER_COMM_DATA_REG1 Bit position Data
174* 7:0 hyst_frames[7:0]
175* 14:8 hyst_lines[6:0]
176* 15 RFB_UPDATE_AUTO_EN
177* 18:16 phy_num[2:0]
178* 21:19 dcp_sel[2:0]
179* 22 phy_type
180* 23 frame_cap_ind
181* 26:24 aux_chan[2:0]
182* 30:27 aux_repeat[3:0]
183* 31:31 reserved[31:31]
184*******************************************************************/
185union dce110_dmcu_psr_config_data_reg1 {
186 struct {
187 unsigned int timehyst_frames:8; /*[7:0]*/
188 unsigned int hyst_lines:7; /*[14:8]*/
189 unsigned int rfb_update_auto_en:1; /*[15:15]*/
190 unsigned int dp_port_num:3; /*[18:16]*/
191 unsigned int dcp_sel:3; /*[21:19]*/
192 unsigned int phy_type:1; /*[22:22]*/
193 unsigned int frame_cap_ind:1; /*[23:23]*/
194 unsigned int aux_chan:3; /*[26:24]*/
195 unsigned int aux_repeat:4; /*[30:27]*/
196 unsigned int reserved:1; /*[31:31]*/
197 } bits;
198 unsigned int u32All;
199};
200
201/*******************************************************************
202* MASTER_COMM_DATA_REG2
203*******************************************************************/
204union dce110_dmcu_psr_config_data_reg2 {
205 struct {
206 unsigned int dig_fe:3; /*[2:0]*/
207 unsigned int dig_be:3; /*[5:3]*/
208 unsigned int skip_wait_for_pll_lock:1; /*[6:6]*/
209 unsigned int reserved:9; /*[15:7]*/
210 unsigned int frame_delay:8; /*[23:16]*/
211 unsigned int smu_phy_id:4; /*[27:24]*/
212 unsigned int num_of_controllers:4; /*[31:28]*/
213 } bits;
214 unsigned int u32All;
215};
216
217/*******************************************************************
218* MASTER_COMM_DATA_REG3
219*******************************************************************/
220union dce110_dmcu_psr_config_data_reg3 {
221 struct {
222 unsigned int psr_level:16; /*[15:0]*/
223 unsigned int link_rate:4; /*[19:16]*/
224 unsigned int reserved:12; /*[31:20]*/
225 } bits;
226 unsigned int u32All;
227};
228
229struct dce110_abm_backlight_registers {
230 unsigned int vBL_PWM_CNTL;
231 unsigned int vBL_PWM_CNTL2;
232 unsigned int vBL_PWM_PERIOD_CNTL;
233 unsigned int vLVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV;
234};
235
236bool dce110_link_encoder_construct(
237 struct dce110_link_encoder *enc110,
238 const struct encoder_init_data *init_data,
239 const struct dce110_link_enc_registers *link_regs,
240 const struct dce110_link_enc_aux_registers *aux_regs,
241 const struct dce110_link_enc_hpd_registers *hpd_regs);
242
243bool dce110_link_encoder_validate_dvi_output(
244 const struct dce110_link_encoder *enc110,
245 enum signal_type connector_signal,
246 enum signal_type signal,
247 const struct dc_crtc_timing *crtc_timing);
248
249bool dce110_link_encoder_validate_rgb_output(
250 const struct dce110_link_encoder *enc110,
251 const struct dc_crtc_timing *crtc_timing);
252
253bool dce110_link_encoder_validate_dp_output(
254 const struct dce110_link_encoder *enc110,
255 const struct dc_crtc_timing *crtc_timing);
256
257bool dce110_link_encoder_validate_wireless_output(
258 const struct dce110_link_encoder *enc110,
259 const struct dc_crtc_timing *crtc_timing);
260
261bool dce110_link_encoder_validate_output_with_stream(
262 struct link_encoder *enc,
263 struct pipe_ctx *pipe_ctx);
264
265/****************** HW programming ************************/
266
267/* initialize HW */ /* why do we initialze aux in here? */
268void dce110_link_encoder_hw_init(struct link_encoder *enc);
269
270void dce110_link_encoder_destroy(struct link_encoder **enc);
271
272/* program DIG_MODE in DIG_BE */
273/* TODO can this be combined with enable_output? */
274void dce110_link_encoder_setup(
275 struct link_encoder *enc,
276 enum signal_type signal);
277
278/* enables TMDS PHY output */
279/* TODO: still need depth or just pass in adjusted pixel clock? */
280void dce110_link_encoder_enable_tmds_output(
281 struct link_encoder *enc,
282 enum clock_source_id clock_source,
283 enum dc_color_depth color_depth,
284 bool hdmi,
285 bool dual_link,
286 uint32_t pixel_clock);
287
288/* enables DP PHY output */
289void dce110_link_encoder_enable_dp_output(
290 struct link_encoder *enc,
291 const struct dc_link_settings *link_settings,
292 enum clock_source_id clock_source);
293
294/* enables DP PHY output in MST mode */
295void dce110_link_encoder_enable_dp_mst_output(
296 struct link_encoder *enc,
297 const struct dc_link_settings *link_settings,
298 enum clock_source_id clock_source);
299
300/* disable PHY output */
301void dce110_link_encoder_disable_output(
302 struct link_encoder *link_enc,
303 enum signal_type signal);
304
305/* set DP lane settings */
306void dce110_link_encoder_dp_set_lane_settings(
307 struct link_encoder *enc,
308 const struct link_training_settings *link_settings);
309
310void dce110_link_encoder_dp_set_phy_pattern(
311 struct link_encoder *enc,
312 const struct encoder_set_dp_phy_pattern_param *param);
313
314/* programs DP MST VC payload allocation */
315void dce110_link_encoder_update_mst_stream_allocation_table(
316 struct link_encoder *enc,
317 const struct link_mst_stream_allocation_table *table);
318
319void dce110_link_encoder_set_lcd_backlight_level(
320 struct link_encoder *enc,
321 uint32_t level);
322
323void dce110_link_encoder_set_dmcu_backlight_level(
324 struct link_encoder *enc,
325 uint32_t level,
326 uint32_t frame_ramp,
327 uint32_t controller_id);
328
329void dce110_link_encoder_init_dmcu_backlight_settings(
330 struct link_encoder *enc);
331
332void dce110_link_encoder_set_dmcu_abm_level(
333 struct link_encoder *enc,
334 uint32_t level);
335
336void dce110_link_encoder_set_dmcu_psr_enable(
337 struct link_encoder *enc, bool enable);
338
339void dce110_link_encoder_setup_dmcu_psr(struct link_encoder *enc,
340 struct psr_dmcu_context *psr_context);
341
342void dce110_link_encoder_edp_backlight_control(
343 struct link_encoder *enc,
344 bool enable);
345
346void dce110_link_encoder_edp_power_control(
347 struct link_encoder *enc,
348 bool power_up);
349
350void dce110_link_encoder_connect_dig_be_to_fe(
351 struct link_encoder *enc,
352 enum engine_id engine,
353 bool connect);
354
355void dce110_link_encoder_set_dp_phy_pattern_training_pattern(
356 struct link_encoder *enc,
357 uint32_t index);
358
359void dce110_link_encoder_enable_hpd(struct link_encoder *enc);
360
361void dce110_link_encoder_disable_hpd(struct link_encoder *enc);
362
363#endif /* __DC_LINK_ENCODER__DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
new file mode 100644
index 000000000000..654731cccdcd
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
@@ -0,0 +1,384 @@
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "mem_input.h"
27#include "reg_helper.h"
28
29#define CTX \
30 mi->ctx
31#define REG(reg)\
32 mi->regs->reg
33
34#undef FN
35#define FN(reg_name, field_name) \
36 mi->shifts->field_name, mi->masks->field_name
37
38
39static void program_urgency_watermark(struct mem_input *mi,
40 uint32_t wm_select,
41 uint32_t urgency_low_wm,
42 uint32_t urgency_high_wm)
43{
44 REG_UPDATE(DPG_WATERMARK_MASK_CONTROL,
45 URGENCY_WATERMARK_MASK, wm_select);
46
47 REG_SET_2(DPG_PIPE_URGENCY_CONTROL, 0,
48 URGENCY_LOW_WATERMARK, urgency_low_wm,
49 URGENCY_HIGH_WATERMARK, urgency_high_wm);
50}
51
52static void program_nbp_watermark(struct mem_input *mi,
53 uint32_t wm_select,
54 uint32_t nbp_wm)
55{
56 if (REG(DPG_PIPE_NB_PSTATE_CHANGE_CONTROL)) {
57 REG_UPDATE(DPG_WATERMARK_MASK_CONTROL,
58 NB_PSTATE_CHANGE_WATERMARK_MASK, wm_select);
59
60 REG_UPDATE_3(DPG_PIPE_NB_PSTATE_CHANGE_CONTROL,
61 NB_PSTATE_CHANGE_ENABLE, 1,
62 NB_PSTATE_CHANGE_URGENT_DURING_REQUEST, 1,
63 NB_PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST, 1);
64
65 REG_UPDATE(DPG_PIPE_NB_PSTATE_CHANGE_CONTROL,
66 NB_PSTATE_CHANGE_WATERMARK, nbp_wm);
67 }
68}
69
70static void program_stutter_watermark(struct mem_input *mi,
71 uint32_t wm_select,
72 uint32_t stutter_mark)
73{
74 REG_UPDATE(DPG_WATERMARK_MASK_CONTROL,
75 STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK, wm_select);
76
77 REG_UPDATE(DPG_PIPE_STUTTER_CONTROL,
78 STUTTER_EXIT_SELF_REFRESH_WATERMARK, stutter_mark);
79}
80
81void dce_mem_input_program_display_marks(struct mem_input *mi,
82 struct bw_watermarks nbp,
83 struct bw_watermarks stutter,
84 struct bw_watermarks urgent,
85 uint32_t total_dest_line_time_ns)
86{
87 uint32_t stutter_en = mi->ctx->dc->debug.disable_stutter ? 0 : 1;
88
89 program_urgency_watermark(mi, 0, /* set a */
90 urgent.a_mark, total_dest_line_time_ns);
91 program_urgency_watermark(mi, 1, /* set b */
92 urgent.b_mark, total_dest_line_time_ns);
93 program_urgency_watermark(mi, 2, /* set c */
94 urgent.c_mark, total_dest_line_time_ns);
95 program_urgency_watermark(mi, 3, /* set d */
96 urgent.d_mark, total_dest_line_time_ns);
97
98 REG_UPDATE_2(DPG_PIPE_STUTTER_CONTROL,
99 STUTTER_ENABLE, stutter_en,
100 STUTTER_IGNORE_FBC, 1);
101 program_nbp_watermark(mi, 0, nbp.a_mark); /* set a */
102 program_nbp_watermark(mi, 1, nbp.b_mark); /* set b */
103 program_nbp_watermark(mi, 2, nbp.c_mark); /* set c */
104 program_nbp_watermark(mi, 3, nbp.d_mark); /* set d */
105
106 program_stutter_watermark(mi, 0, stutter.a_mark); /* set a */
107 program_stutter_watermark(mi, 1, stutter.b_mark); /* set b */
108 program_stutter_watermark(mi, 2, stutter.c_mark); /* set c */
109 program_stutter_watermark(mi, 3, stutter.d_mark); /* set d */
110}
111
112static void program_tiling(struct mem_input *mi,
113 const union dc_tiling_info *info)
114{
115 if (mi->masks->GRPH_ARRAY_MODE) { /* GFX8 */
116 REG_UPDATE_9(GRPH_CONTROL,
117 GRPH_NUM_BANKS, info->gfx8.num_banks,
118 GRPH_BANK_WIDTH, info->gfx8.bank_width,
119 GRPH_BANK_HEIGHT, info->gfx8.bank_height,
120 GRPH_MACRO_TILE_ASPECT, info->gfx8.tile_aspect,
121 GRPH_TILE_SPLIT, info->gfx8.tile_split,
122 GRPH_MICRO_TILE_MODE, info->gfx8.tile_mode,
123 GRPH_PIPE_CONFIG, info->gfx8.pipe_config,
124 GRPH_ARRAY_MODE, info->gfx8.array_mode,
125 GRPH_COLOR_EXPANSION_MODE, 1);
126 /* 01 - DCP_GRPH_COLOR_EXPANSION_MODE_ZEXP: zero expansion for YCbCr */
127 /*
128 GRPH_Z, 0);
129 */
130 }
131}
132
133
134static void program_size_and_rotation(
135 struct mem_input *mi,
136 enum dc_rotation_angle rotation,
137 const union plane_size *plane_size)
138{
139 const struct rect *in_rect = &plane_size->grph.surface_size;
140 struct rect hw_rect = plane_size->grph.surface_size;
141 const uint32_t rotation_angles[ROTATION_ANGLE_COUNT] = {
142 [ROTATION_ANGLE_0] = 0,
143 [ROTATION_ANGLE_90] = 1,
144 [ROTATION_ANGLE_180] = 2,
145 [ROTATION_ANGLE_270] = 3,
146 };
147
148 if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270) {
149 hw_rect.x = in_rect->y;
150 hw_rect.y = in_rect->x;
151
152 hw_rect.height = in_rect->width;
153 hw_rect.width = in_rect->height;
154 }
155
156 REG_SET(GRPH_X_START, 0,
157 GRPH_X_START, hw_rect.x);
158
159 REG_SET(GRPH_Y_START, 0,
160 GRPH_Y_START, hw_rect.y);
161
162 REG_SET(GRPH_X_END, 0,
163 GRPH_X_END, hw_rect.width);
164
165 REG_SET(GRPH_Y_END, 0,
166 GRPH_Y_END, hw_rect.height);
167
168 REG_SET(GRPH_PITCH, 0,
169 GRPH_PITCH, plane_size->grph.surface_pitch);
170
171 REG_SET(HW_ROTATION, 0,
172 GRPH_ROTATION_ANGLE, rotation_angles[rotation]);
173}
174
175static void program_grph_pixel_format(
176 struct mem_input *mi,
177 enum surface_pixel_format format)
178{
179 uint32_t red_xbar = 0, blue_xbar = 0; /* no swap */
180 uint32_t grph_depth, grph_format;
181 uint32_t sign = 0, floating = 0;
182
183 if (format == SURFACE_PIXEL_FORMAT_GRPH_BGRA8888 ||
184 /*todo: doesn't look like we handle BGRA here,
185 * should problem swap endian*/
186 format == SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010 ||
187 format == SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS ||
188 format == SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F) {
189 /* ABGR formats */
190 red_xbar = 2;
191 blue_xbar = 2;
192 }
193
194 REG_SET_2(GRPH_SWAP_CNTL, 0,
195 GRPH_RED_CROSSBAR, red_xbar,
196 GRPH_BLUE_CROSSBAR, blue_xbar);
197
198 switch (format) {
199 case SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS:
200 grph_depth = 0;
201 grph_format = 0;
202 break;
203 case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
204 grph_depth = 1;
205 grph_format = 0;
206 break;
207 case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
208 grph_depth = 1;
209 grph_format = 1;
210 break;
211 case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
212 case SURFACE_PIXEL_FORMAT_GRPH_BGRA8888:
213 grph_depth = 2;
214 grph_format = 0;
215 break;
216 case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
217 case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
218 case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS:
219 grph_depth = 2;
220 grph_format = 1;
221 break;
222 case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
223 sign = 1;
224 floating = 1;
225 /* no break */
226 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: /* shouldn't this get float too? */
227 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
228 grph_depth = 3;
229 grph_format = 0;
230 break;
231 default:
232 DC_ERR("unsupported grph pixel format");
233 break;
234 }
235
236 REG_UPDATE_2(GRPH_CONTROL,
237 GRPH_DEPTH, grph_depth,
238 GRPH_FORMAT, grph_format);
239
240 REG_UPDATE_4(PRESCALE_GRPH_CONTROL,
241 GRPH_PRESCALE_SELECT, floating,
242 GRPH_PRESCALE_R_SIGN, sign,
243 GRPH_PRESCALE_G_SIGN, sign,
244 GRPH_PRESCALE_B_SIGN, sign);
245}
246
247bool dce_mem_input_program_surface_config(struct mem_input *mi,
248 enum surface_pixel_format format,
249 union dc_tiling_info *tiling_info,
250 union plane_size *plane_size,
251 enum dc_rotation_angle rotation,
252 struct dc_plane_dcc_param *dcc,
253 bool horizontal_mirror)
254{
255 REG_UPDATE(GRPH_ENABLE, GRPH_ENABLE, 1);
256
257 program_tiling(mi, tiling_info);
258 program_size_and_rotation(mi, rotation, plane_size);
259
260 if (format >= SURFACE_PIXEL_FORMAT_GRPH_BEGIN &&
261 format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
262 program_grph_pixel_format(mi, format);
263
264 return true;
265}
266
267static uint32_t get_dmif_switch_time_us(
268 uint32_t h_total,
269 uint32_t v_total,
270 uint32_t pix_clk_khz)
271{
272 uint32_t frame_time;
273 uint32_t pixels_per_second;
274 uint32_t pixels_per_frame;
275 uint32_t refresh_rate;
276 const uint32_t us_in_sec = 1000000;
277 const uint32_t min_single_frame_time_us = 30000;
278 /*return double of frame time*/
279 const uint32_t single_frame_time_multiplier = 2;
280
281 if (!h_total || v_total || !pix_clk_khz)
282 return single_frame_time_multiplier * min_single_frame_time_us;
283
284 /*TODO: should we use pixel format normalized pixel clock here?*/
285 pixels_per_second = pix_clk_khz * 1000;
286 pixels_per_frame = h_total * v_total;
287
288 if (!pixels_per_second || !pixels_per_frame) {
289 /* avoid division by zero */
290 ASSERT(pixels_per_frame);
291 ASSERT(pixels_per_second);
292 return single_frame_time_multiplier * min_single_frame_time_us;
293 }
294
295 refresh_rate = pixels_per_second / pixels_per_frame;
296
297 if (!refresh_rate) {
298 /* avoid division by zero*/
299 ASSERT(refresh_rate);
300 return single_frame_time_multiplier * min_single_frame_time_us;
301 }
302
303 frame_time = us_in_sec / refresh_rate;
304
305 if (frame_time < min_single_frame_time_us)
306 frame_time = min_single_frame_time_us;
307
308 frame_time *= single_frame_time_multiplier;
309
310 return frame_time;
311}
312
313void dce_mem_input_allocate_dmif(struct mem_input *mi,
314 uint32_t h_total,
315 uint32_t v_total,
316 uint32_t pix_clk_khz,
317 uint32_t total_stream_num)
318{
319 const uint32_t retry_delay = 10;
320 uint32_t retry_count = get_dmif_switch_time_us(
321 h_total,
322 v_total,
323 pix_clk_khz) / retry_delay;
324
325 uint32_t pix_dur;
326 uint32_t buffers_allocated;
327 uint32_t dmif_buffer_control;
328
329 dmif_buffer_control = REG_GET(DMIF_BUFFER_CONTROL,
330 DMIF_BUFFERS_ALLOCATED, &buffers_allocated);
331
332 if (buffers_allocated == 2)
333 return;
334
335 REG_SET(DMIF_BUFFER_CONTROL, dmif_buffer_control,
336 DMIF_BUFFERS_ALLOCATED, 2);
337
338 REG_WAIT(DMIF_BUFFER_CONTROL,
339 DMIF_BUFFERS_ALLOCATION_COMPLETED, 1,
340 retry_delay, retry_count);
341
342 if (pix_clk_khz != 0) {
343 pix_dur = 1000000000ULL / pix_clk_khz;
344
345 REG_UPDATE(DPG_PIPE_ARBITRATION_CONTROL1,
346 PIXEL_DURATION, pix_dur);
347 }
348
349 if (mi->wa.single_head_rdreq_dmif_limit) {
350 uint32_t eanble = (total_stream_num > 1) ? 0 :
351 mi->wa.single_head_rdreq_dmif_limit;
352
353 REG_UPDATE(MC_HUB_RDREQ_DMIF_LIMIT,
354 ENABLE, eanble);
355 }
356}
357
358void dce_mem_input_free_dmif(struct mem_input *mi,
359 uint32_t total_stream_num)
360{
361 uint32_t buffers_allocated;
362 uint32_t dmif_buffer_control;
363
364 dmif_buffer_control = REG_GET(DMIF_BUFFER_CONTROL,
365 DMIF_BUFFERS_ALLOCATED, &buffers_allocated);
366
367 if (buffers_allocated == 0)
368 return;
369
370 REG_SET(DMIF_BUFFER_CONTROL, dmif_buffer_control,
371 DMIF_BUFFERS_ALLOCATED, 0);
372
373 REG_WAIT(DMIF_BUFFER_CONTROL,
374 DMIF_BUFFERS_ALLOCATION_COMPLETED, 1,
375 10, 0xBB8);
376
377 if (mi->wa.single_head_rdreq_dmif_limit) {
378 uint32_t eanble = (total_stream_num > 1) ? 0 :
379 mi->wa.single_head_rdreq_dmif_limit;
380
381 REG_UPDATE(MC_HUB_RDREQ_DMIF_LIMIT,
382 ENABLE, eanble);
383 }
384}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h
new file mode 100644
index 000000000000..d5930a925fcb
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h
@@ -0,0 +1,217 @@
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25#ifndef __DCE_MEM_INPUT_H__
26#define __DCE_MEM_INPUT_H__
27
28#define MI_DCE_BASE_REG_LIST(id)\
29 SRI(GRPH_ENABLE, DCP, id),\
30 SRI(GRPH_CONTROL, DCP, id),\
31 SRI(GRPH_X_START, DCP, id),\
32 SRI(GRPH_Y_START, DCP, id),\
33 SRI(GRPH_X_END, DCP, id),\
34 SRI(GRPH_Y_END, DCP, id),\
35 SRI(GRPH_PITCH, DCP, id),\
36 SRI(HW_ROTATION, DCP, id),\
37 SRI(GRPH_SWAP_CNTL, DCP, id),\
38 SRI(PRESCALE_GRPH_CONTROL, DCP, id),\
39 SRI(DPG_PIPE_ARBITRATION_CONTROL1, DMIF_PG, id),\
40 SRI(DPG_WATERMARK_MASK_CONTROL, DMIF_PG, id),\
41 SRI(DPG_PIPE_URGENCY_CONTROL, DMIF_PG, id),\
42 SRI(DPG_PIPE_STUTTER_CONTROL, DMIF_PG, id),\
43 SRI(DMIF_BUFFER_CONTROL, PIPE, id)
44
45#define MI_REG_LIST(id)\
46 MI_DCE_BASE_REG_LIST(id),\
47 SRI(DPG_PIPE_NB_PSTATE_CHANGE_CONTROL, DMIF_PG, id)
48
49struct dce_mem_input_registers {
50 /* DCP */
51 uint32_t GRPH_ENABLE;
52 uint32_t GRPH_CONTROL;
53 uint32_t GRPH_X_START;
54 uint32_t GRPH_Y_START;
55 uint32_t GRPH_X_END;
56 uint32_t GRPH_Y_END;
57 uint32_t GRPH_PITCH;
58 uint32_t HW_ROTATION;
59 uint32_t GRPH_SWAP_CNTL;
60 uint32_t PRESCALE_GRPH_CONTROL;
61 /* DMIF_PG */
62 uint32_t DPG_PIPE_ARBITRATION_CONTROL1;
63 uint32_t DPG_WATERMARK_MASK_CONTROL;
64 uint32_t DPG_PIPE_URGENCY_CONTROL;
65 uint32_t DPG_PIPE_NB_PSTATE_CHANGE_CONTROL;
66 uint32_t DPG_PIPE_LOW_POWER_CONTROL;
67 uint32_t DPG_PIPE_STUTTER_CONTROL;
68 uint32_t DPG_PIPE_STUTTER_CONTROL2;
69 /* DCI */
70 uint32_t DMIF_BUFFER_CONTROL;
71 /* MC_HUB */
72 uint32_t MC_HUB_RDREQ_DMIF_LIMIT;
73};
74
75/* Set_Filed_for_Block */
76#define SFB(blk_name, reg_name, field_name, post_fix)\
77 .field_name = blk_name ## reg_name ## __ ## field_name ## post_fix
78
79#define MI_GFX8_TILE_MASK_SH_LIST(mask_sh, blk)\
80 SFB(blk, GRPH_CONTROL, GRPH_BANK_HEIGHT, mask_sh),\
81 SFB(blk, GRPH_CONTROL, GRPH_MACRO_TILE_ASPECT, mask_sh),\
82 SFB(blk, GRPH_CONTROL, GRPH_TILE_SPLIT, mask_sh),\
83 SFB(blk, GRPH_CONTROL, GRPH_MICRO_TILE_MODE, mask_sh),\
84 SFB(blk, GRPH_CONTROL, GRPH_PIPE_CONFIG, mask_sh),\
85 SFB(blk, GRPH_CONTROL, GRPH_ARRAY_MODE, mask_sh),\
86 SFB(blk, GRPH_CONTROL, GRPH_COLOR_EXPANSION_MODE, mask_sh)
87
88#define MI_DCP_MASK_SH_LIST(mask_sh, blk)\
89 SFB(blk, GRPH_ENABLE, GRPH_ENABLE, mask_sh),\
90 SFB(blk, GRPH_CONTROL, GRPH_DEPTH, mask_sh),\
91 SFB(blk, GRPH_CONTROL, GRPH_FORMAT, mask_sh),\
92 SFB(blk, GRPH_CONTROL, GRPH_NUM_BANKS, mask_sh),\
93 SFB(blk, GRPH_X_START, GRPH_X_START, mask_sh),\
94 SFB(blk, GRPH_Y_START, GRPH_Y_START, mask_sh),\
95 SFB(blk, GRPH_X_END, GRPH_X_END, mask_sh),\
96 SFB(blk, GRPH_Y_END, GRPH_Y_END, mask_sh),\
97 SFB(blk, GRPH_PITCH, GRPH_PITCH, mask_sh),\
98 SFB(blk, HW_ROTATION, GRPH_ROTATION_ANGLE, mask_sh),\
99 SFB(blk, GRPH_SWAP_CNTL, GRPH_RED_CROSSBAR, mask_sh),\
100 SFB(blk, GRPH_SWAP_CNTL, GRPH_BLUE_CROSSBAR, mask_sh),\
101 SFB(blk, PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_SELECT, mask_sh),\
102 SFB(blk, PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_R_SIGN, mask_sh),\
103 SFB(blk, PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_G_SIGN, mask_sh),\
104 SFB(blk, PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_B_SIGN, mask_sh)
105
106#define MI_DMIF_PG_MASK_SH_LIST(mask_sh, blk)\
107 SFB(blk, DPG_PIPE_ARBITRATION_CONTROL1, PIXEL_DURATION, mask_sh),\
108 SFB(blk, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, mask_sh),\
109 SFB(blk, DPG_WATERMARK_MASK_CONTROL, STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK, mask_sh),\
110 SFB(blk, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, mask_sh),\
111 SFB(blk, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, mask_sh),\
112 SFB(blk, DPG_PIPE_STUTTER_CONTROL, STUTTER_ENABLE, mask_sh),\
113 SFB(blk, DPG_PIPE_STUTTER_CONTROL, STUTTER_IGNORE_FBC, mask_sh),\
114 SF(PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATED, mask_sh),\
115 SF(PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATION_COMPLETED, mask_sh)
116
117#define MI_DMIF_PG_MASK_SH_DCE(mask_sh, blk)\
118 SFB(blk, DPG_PIPE_STUTTER_CONTROL, STUTTER_EXIT_SELF_REFRESH_WATERMARK, mask_sh),\
119 SFB(blk, DPG_WATERMARK_MASK_CONTROL, NB_PSTATE_CHANGE_WATERMARK_MASK, mask_sh),\
120 SFB(blk, DPG_PIPE_NB_PSTATE_CHANGE_CONTROL, NB_PSTATE_CHANGE_ENABLE, mask_sh),\
121 SFB(blk, DPG_PIPE_NB_PSTATE_CHANGE_CONTROL, NB_PSTATE_CHANGE_URGENT_DURING_REQUEST, mask_sh),\
122 SFB(blk, DPG_PIPE_NB_PSTATE_CHANGE_CONTROL, NB_PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST, mask_sh),\
123 SFB(blk, DPG_PIPE_NB_PSTATE_CHANGE_CONTROL, NB_PSTATE_CHANGE_WATERMARK, mask_sh)
124
125#define MI_DCE_MASK_SH_LIST(mask_sh)\
126 MI_DCP_MASK_SH_LIST(mask_sh,),\
127 MI_DMIF_PG_MASK_SH_LIST(mask_sh,),\
128 MI_DMIF_PG_MASK_SH_DCE(mask_sh,),\
129 MI_GFX8_TILE_MASK_SH_LIST(mask_sh,)
130
131#define MI_REG_FIELD_LIST(type) \
132 type GRPH_ENABLE; \
133 type GRPH_X_START; \
134 type GRPH_Y_START; \
135 type GRPH_X_END; \
136 type GRPH_Y_END; \
137 type GRPH_PITCH; \
138 type GRPH_ROTATION_ANGLE; \
139 type GRPH_RED_CROSSBAR; \
140 type GRPH_BLUE_CROSSBAR; \
141 type GRPH_PRESCALE_SELECT; \
142 type GRPH_PRESCALE_R_SIGN; \
143 type GRPH_PRESCALE_G_SIGN; \
144 type GRPH_PRESCALE_B_SIGN; \
145 type GRPH_DEPTH; \
146 type GRPH_FORMAT; \
147 type GRPH_NUM_BANKS; \
148 type GRPH_BANK_WIDTH;\
149 type GRPH_BANK_HEIGHT;\
150 type GRPH_MACRO_TILE_ASPECT;\
151 type GRPH_TILE_SPLIT;\
152 type GRPH_MICRO_TILE_MODE;\
153 type GRPH_PIPE_CONFIG;\
154 type GRPH_ARRAY_MODE;\
155 type GRPH_COLOR_EXPANSION_MODE;\
156 type GRPH_SW_MODE; \
157 type GRPH_NUM_SHADER_ENGINES; \
158 type GRPH_NUM_PIPES; \
159 type PIXEL_DURATION; \
160 type URGENCY_WATERMARK_MASK; \
161 type PSTATE_CHANGE_WATERMARK_MASK; \
162 type NB_PSTATE_CHANGE_WATERMARK_MASK; \
163 type STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK; \
164 type URGENCY_LOW_WATERMARK; \
165 type URGENCY_HIGH_WATERMARK; \
166 type NB_PSTATE_CHANGE_ENABLE; \
167 type NB_PSTATE_CHANGE_URGENT_DURING_REQUEST; \
168 type NB_PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST; \
169 type NB_PSTATE_CHANGE_WATERMARK; \
170 type PSTATE_CHANGE_ENABLE; \
171 type PSTATE_CHANGE_URGENT_DURING_REQUEST; \
172 type PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST; \
173 type PSTATE_CHANGE_WATERMARK; \
174 type STUTTER_ENABLE; \
175 type STUTTER_IGNORE_FBC; \
176 type STUTTER_EXIT_SELF_REFRESH_WATERMARK; \
177 type DMIF_BUFFERS_ALLOCATED; \
178 type DMIF_BUFFERS_ALLOCATION_COMPLETED; \
179 type ENABLE; /* MC_HUB_RDREQ_DMIF_LIMIT */\
180
181struct dce_mem_input_shift {
182 MI_REG_FIELD_LIST(uint8_t)
183};
184
185struct dce_mem_input_mask {
186 MI_REG_FIELD_LIST(uint32_t)
187};
188
189struct dce_mem_input_wa {
190 uint8_t single_head_rdreq_dmif_limit;
191};
192
193struct mem_input;
194bool dce_mem_input_program_surface_config(struct mem_input *mi,
195 enum surface_pixel_format format,
196 union dc_tiling_info *tiling_info,
197 union plane_size *plane_size,
198 enum dc_rotation_angle rotation,
199 struct dc_plane_dcc_param *dcc,
200 bool horizontal_mirror);
201
202void dce_mem_input_allocate_dmif(struct mem_input *mi,
203 uint32_t h_total,
204 uint32_t v_total,
205 uint32_t pix_clk_khz,
206 uint32_t total_stream_num);
207
208void dce_mem_input_free_dmif(struct mem_input *mi,
209 uint32_t total_stream_num);
210
211void dce_mem_input_program_display_marks(struct mem_input *mi,
212 struct bw_watermarks nbp,
213 struct bw_watermarks stutter,
214 struct bw_watermarks urgent,
215 uint32_t total_dest_line_time_ns);
216
217#endif /*__DCE_MEM_INPUT_H__*/
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters.c b/drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters.c
new file mode 100644
index 000000000000..3aab86781be2
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters.c
@@ -0,0 +1,501 @@
1/*
2 * Copyright 2012-16 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "transform.h"
27
28const uint16_t filter_2tap_16p[18] = {
29 4096, 0,
30 3840, 256,
31 3584, 512,
32 3328, 768,
33 3072, 1024,
34 2816, 1280,
35 2560, 1536,
36 2304, 1792,
37 2048, 2048
38};
39
40const uint16_t filter_3tap_16p_upscale[27] = {
41 2048, 2048, 0,
42 1708, 2424, 16348,
43 1372, 2796, 16308,
44 1056, 3148, 16272,
45 768, 3464, 16244,
46 512, 3728, 16236,
47 296, 3928, 16252,
48 124, 4052, 16296,
49 0, 4096, 0
50};
51
52const uint16_t filter_3tap_16p_117[27] = {
53 2048, 2048, 0,
54 1824, 2276, 16376,
55 1600, 2496, 16380,
56 1376, 2700, 16,
57 1156, 2880, 52,
58 948, 3032, 108,
59 756, 3144, 192,
60 580, 3212, 296,
61 428, 3236, 428
62};
63
64const uint16_t filter_3tap_16p_150[27] = {
65 2048, 2048, 0,
66 1872, 2184, 36,
67 1692, 2308, 88,
68 1516, 2420, 156,
69 1340, 2516, 236,
70 1168, 2592, 328,
71 1004, 2648, 440,
72 844, 2684, 560,
73 696, 2696, 696
74};
75
76const uint16_t filter_3tap_16p_183[27] = {
77 2048, 2048, 0,
78 1892, 2104, 92,
79 1744, 2152, 196,
80 1592, 2196, 300,
81 1448, 2232, 412,
82 1304, 2256, 528,
83 1168, 2276, 648,
84 1032, 2288, 772,
85 900, 2292, 900
86};
87
88const uint16_t filter_4tap_16p_upscale[36] = {
89 0, 4096, 0, 0,
90 16240, 4056, 180, 16380,
91 16136, 3952, 404, 16364,
92 16072, 3780, 664, 16344,
93 16040, 3556, 952, 16312,
94 16036, 3284, 1268, 16272,
95 16052, 2980, 1604, 16224,
96 16084, 2648, 1952, 16176,
97 16128, 2304, 2304, 16128
98};
99
100const uint16_t filter_4tap_16p_117[36] = {
101 428, 3236, 428, 0,
102 276, 3232, 604, 16364,
103 148, 3184, 800, 16340,
104 44, 3104, 1016, 16312,
105 16344, 2984, 1244, 16284,
106 16284, 2832, 1488, 16256,
107 16244, 2648, 1732, 16236,
108 16220, 2440, 1976, 16220,
109 16212, 2216, 2216, 16212
110};
111
112const uint16_t filter_4tap_16p_150[36] = {
113 696, 2700, 696, 0,
114 560, 2700, 848, 16364,
115 436, 2676, 1008, 16348,
116 328, 2628, 1180, 16336,
117 232, 2556, 1356, 16328,
118 152, 2460, 1536, 16328,
119 84, 2344, 1716, 16332,
120 28, 2208, 1888, 16348,
121 16376, 2052, 2052, 16376
122};
123
124const uint16_t filter_4tap_16p_183[36] = {
125 940, 2208, 940, 0,
126 832, 2200, 1052, 4,
127 728, 2180, 1164, 16,
128 628, 2148, 1280, 36,
129 536, 2100, 1392, 60,
130 448, 2044, 1504, 92,
131 368, 1976, 1612, 132,
132 296, 1900, 1716, 176,
133 232, 1812, 1812, 232
134};
135
136const uint16_t filter_2tap_64p[66] = {
137 4096, 0,
138 4032, 64,
139 3968, 128,
140 3904, 192,
141 3840, 256,
142 3776, 320,
143 3712, 384,
144 3648, 448,
145 3584, 512,
146 3520, 576,
147 3456, 640,
148 3392, 704,
149 3328, 768,
150 3264, 832,
151 3200, 896,
152 3136, 960,
153 3072, 1024,
154 3008, 1088,
155 2944, 1152,
156 2880, 1216,
157 2816, 1280,
158 2752, 1344,
159 2688, 1408,
160 2624, 1472,
161 2560, 1536,
162 2496, 1600,
163 2432, 1664,
164 2368, 1728,
165 2304, 1792,
166 2240, 1856,
167 2176, 1920,
168 2112, 1984,
169 2048, 2048 };
170
171const uint16_t filter_3tap_64p_upscale[99] = {
172 2048, 2048, 0,
173 1960, 2140, 16376,
174 1876, 2236, 16364,
175 1792, 2328, 16356,
176 1708, 2424, 16348,
177 1620, 2516, 16336,
178 1540, 2612, 16328,
179 1456, 2704, 16316,
180 1372, 2796, 16308,
181 1292, 2884, 16296,
182 1212, 2976, 16288,
183 1136, 3060, 16280,
184 1056, 3148, 16272,
185 984, 3228, 16264,
186 908, 3312, 16256,
187 836, 3388, 16248,
188 768, 3464, 16244,
189 700, 3536, 16240,
190 636, 3604, 16236,
191 572, 3668, 16236,
192 512, 3728, 16236,
193 456, 3784, 16236,
194 400, 3836, 16240,
195 348, 3884, 16244,
196 296, 3928, 16252,
197 252, 3964, 16260,
198 204, 4000, 16268,
199 164, 4028, 16284,
200 124, 4052, 16296,
201 88, 4072, 16316,
202 56, 4084, 16336,
203 24, 4092, 16356,
204 0, 4096, 0
205};
206
207const uint16_t filter_3tap_64p_117[99] = {
208 2048, 2048, 0,
209 1992, 2104, 16380,
210 1936, 2160, 16380,
211 1880, 2220, 16376,
212 1824, 2276, 16376,
213 1768, 2332, 16376,
214 1712, 2388, 16376,
215 1656, 2444, 16376,
216 1600, 2496, 16380,
217 1544, 2548, 0,
218 1488, 2600, 4,
219 1432, 2652, 8,
220 1376, 2700, 16,
221 1320, 2748, 20,
222 1264, 2796, 32,
223 1212, 2840, 40,
224 1156, 2880, 52,
225 1104, 2920, 64,
226 1052, 2960, 80,
227 1000, 2996, 92,
228 948, 3032, 108,
229 900, 3060, 128,
230 852, 3092, 148,
231 804, 3120, 168,
232 756, 3144, 192,
233 712, 3164, 216,
234 668, 3184, 240,
235 624, 3200, 268,
236 580, 3212, 296,
237 540, 3220, 328,
238 500, 3228, 360,
239 464, 3232, 392,
240 428, 3236, 428
241};
242
243const uint16_t filter_3tap_64p_150[99] = {
244 2048, 2048, 0,
245 2004, 2080, 8,
246 1960, 2116, 16,
247 1916, 2148, 28,
248 1872, 2184, 36,
249 1824, 2216, 48,
250 1780, 2248, 60,
251 1736, 2280, 76,
252 1692, 2308, 88,
253 1648, 2336, 104,
254 1604, 2368, 120,
255 1560, 2392, 136,
256 1516, 2420, 156,
257 1472, 2444, 172,
258 1428, 2472, 192,
259 1384, 2492, 212,
260 1340, 2516, 236,
261 1296, 2536, 256,
262 1252, 2556, 280,
263 1212, 2576, 304,
264 1168, 2592, 328,
265 1124, 2608, 356,
266 1084, 2624, 384,
267 1044, 2636, 412,
268 1004, 2648, 440,
269 964, 2660, 468,
270 924, 2668, 500,
271 884, 2676, 528,
272 844, 2684, 560,
273 808, 2688, 596,
274 768, 2692, 628,
275 732, 2696, 664,
276 696, 2696, 696
277};
278
279const uint16_t filter_3tap_64p_183[99] = {
280 2048, 2048, 0,
281 2008, 2060, 20,
282 1968, 2076, 44,
283 1932, 2088, 68,
284 1892, 2104, 92,
285 1856, 2116, 120,
286 1816, 2128, 144,
287 1780, 2140, 168,
288 1744, 2152, 196,
289 1704, 2164, 220,
290 1668, 2176, 248,
291 1632, 2188, 272,
292 1592, 2196, 300,
293 1556, 2204, 328,
294 1520, 2216, 356,
295 1484, 2224, 384,
296 1448, 2232, 412,
297 1412, 2240, 440,
298 1376, 2244, 468,
299 1340, 2252, 496,
300 1304, 2256, 528,
301 1272, 2264, 556,
302 1236, 2268, 584,
303 1200, 2272, 616,
304 1168, 2276, 648,
305 1132, 2280, 676,
306 1100, 2284, 708,
307 1064, 2288, 740,
308 1032, 2288, 772,
309 996, 2292, 800,
310 964, 2292, 832,
311 932, 2292, 868,
312 900, 2292, 900
313};
314
315const uint16_t filter_4tap_64p_upscale[132] = {
316 0, 4096, 0, 0,
317 16344, 4092, 40, 0,
318 16308, 4084, 84, 16380,
319 16272, 4072, 132, 16380,
320 16240, 4056, 180, 16380,
321 16212, 4036, 232, 16376,
322 16184, 4012, 288, 16372,
323 16160, 3984, 344, 16368,
324 16136, 3952, 404, 16364,
325 16116, 3916, 464, 16360,
326 16100, 3872, 528, 16356,
327 16084, 3828, 596, 16348,
328 16072, 3780, 664, 16344,
329 16060, 3728, 732, 16336,
330 16052, 3676, 804, 16328,
331 16044, 3616, 876, 16320,
332 16040, 3556, 952, 16312,
333 16036, 3492, 1028, 16300,
334 16032, 3424, 1108, 16292,
335 16032, 3356, 1188, 16280,
336 16036, 3284, 1268, 16272,
337 16036, 3212, 1352, 16260,
338 16040, 3136, 1436, 16248,
339 16044, 3056, 1520, 16236,
340 16052, 2980, 1604, 16224,
341 16060, 2896, 1688, 16212,
342 16064, 2816, 1776, 16200,
343 16076, 2732, 1864, 16188,
344 16084, 2648, 1952, 16176,
345 16092, 2564, 2040, 16164,
346 16104, 2476, 2128, 16152,
347 16116, 2388, 2216, 16140,
348 16128, 2304, 2304, 16128 };
349
350const uint16_t filter_4tap_64p_117[132] = {
351 420, 3248, 420, 0,
352 380, 3248, 464, 16380,
353 344, 3248, 508, 16372,
354 308, 3248, 552, 16368,
355 272, 3240, 596, 16364,
356 236, 3236, 644, 16356,
357 204, 3224, 692, 16352,
358 172, 3212, 744, 16344,
359 144, 3196, 796, 16340,
360 116, 3180, 848, 16332,
361 88, 3160, 900, 16324,
362 60, 3136, 956, 16320,
363 36, 3112, 1012, 16312,
364 16, 3084, 1068, 16304,
365 16380, 3056, 1124, 16296,
366 16360, 3024, 1184, 16292,
367 16340, 2992, 1244, 16284,
368 16324, 2956, 1304, 16276,
369 16308, 2920, 1364, 16268,
370 16292, 2880, 1424, 16264,
371 16280, 2836, 1484, 16256,
372 16268, 2792, 1548, 16252,
373 16256, 2748, 1608, 16244,
374 16248, 2700, 1668, 16240,
375 16240, 2652, 1732, 16232,
376 16232, 2604, 1792, 16228,
377 16228, 2552, 1856, 16224,
378 16220, 2500, 1916, 16220,
379 16216, 2444, 1980, 16216,
380 16216, 2388, 2040, 16216,
381 16212, 2332, 2100, 16212,
382 16212, 2276, 2160, 16212,
383 16212, 2220, 2220, 16212 };
384
385const uint16_t filter_4tap_64p_150[132] = {
386 696, 2700, 696, 0,
387 660, 2704, 732, 16380,
388 628, 2704, 768, 16376,
389 596, 2704, 804, 16372,
390 564, 2700, 844, 16364,
391 532, 2696, 884, 16360,
392 500, 2692, 924, 16356,
393 472, 2684, 964, 16352,
394 440, 2676, 1004, 16352,
395 412, 2668, 1044, 16348,
396 384, 2656, 1088, 16344,
397 360, 2644, 1128, 16340,
398 332, 2632, 1172, 16336,
399 308, 2616, 1216, 16336,
400 284, 2600, 1260, 16332,
401 260, 2580, 1304, 16332,
402 236, 2560, 1348, 16328,
403 216, 2540, 1392, 16328,
404 196, 2516, 1436, 16328,
405 176, 2492, 1480, 16324,
406 156, 2468, 1524, 16324,
407 136, 2440, 1568, 16328,
408 120, 2412, 1612, 16328,
409 104, 2384, 1656, 16328,
410 88, 2352, 1700, 16332,
411 72, 2324, 1744, 16332,
412 60, 2288, 1788, 16336,
413 48, 2256, 1828, 16340,
414 36, 2220, 1872, 16344,
415 24, 2184, 1912, 16352,
416 12, 2148, 1952, 16356,
417 4, 2112, 1996, 16364,
418 16380, 2072, 2036, 16372 };
419
420const uint16_t filter_4tap_64p_183[132] = {
421 944, 2204, 944, 0,
422 916, 2204, 972, 0,
423 888, 2200, 996, 0,
424 860, 2200, 1024, 4,
425 832, 2196, 1052, 4,
426 808, 2192, 1080, 8,
427 780, 2188, 1108, 12,
428 756, 2180, 1140, 12,
429 728, 2176, 1168, 16,
430 704, 2168, 1196, 20,
431 680, 2160, 1224, 24,
432 656, 2152, 1252, 28,
433 632, 2144, 1280, 36,
434 608, 2132, 1308, 40,
435 584, 2120, 1336, 48,
436 560, 2112, 1364, 52,
437 536, 2096, 1392, 60,
438 516, 2084, 1420, 68,
439 492, 2072, 1448, 76,
440 472, 2056, 1476, 84,
441 452, 2040, 1504, 92,
442 428, 2024, 1532, 100,
443 408, 2008, 1560, 112,
444 392, 1992, 1584, 120,
445 372, 1972, 1612, 132,
446 352, 1956, 1636, 144,
447 336, 1936, 1664, 156,
448 316, 1916, 1688, 168,
449 300, 1896, 1712, 180,
450 284, 1876, 1736, 192,
451 268, 1852, 1760, 208,
452 252, 1832, 1784, 220,
453 236, 1808, 1808, 236 };
454
455const uint16_t *get_filter_3tap_16p(struct fixed31_32 ratio)
456{
457 if (ratio.value < dal_fixed31_32_one.value)
458 return filter_3tap_16p_upscale;
459 else if (ratio.value < dal_fixed31_32_from_fraction(4, 3).value)
460 return filter_3tap_16p_117;
461 else if (ratio.value < dal_fixed31_32_from_fraction(5, 3).value)
462 return filter_3tap_16p_150;
463 else
464 return filter_3tap_16p_183;
465}
466
467const uint16_t *get_filter_3tap_64p(struct fixed31_32 ratio)
468{
469 if (ratio.value < dal_fixed31_32_one.value)
470 return filter_3tap_64p_upscale;
471 else if (ratio.value < dal_fixed31_32_from_fraction(4, 3).value)
472 return filter_3tap_64p_117;
473 else if (ratio.value < dal_fixed31_32_from_fraction(5, 3).value)
474 return filter_3tap_64p_150;
475 else
476 return filter_3tap_64p_183;
477}
478
479const uint16_t *get_filter_4tap_16p(struct fixed31_32 ratio)
480{
481 if (ratio.value < dal_fixed31_32_one.value)
482 return filter_4tap_16p_upscale;
483 else if (ratio.value < dal_fixed31_32_from_fraction(4, 3).value)
484 return filter_4tap_16p_117;
485 else if (ratio.value < dal_fixed31_32_from_fraction(5, 3).value)
486 return filter_4tap_16p_150;
487 else
488 return filter_4tap_16p_183;
489}
490
491const uint16_t *get_filter_4tap_64p(struct fixed31_32 ratio)
492{
493 if (ratio.value < dal_fixed31_32_one.value)
494 return filter_4tap_64p_upscale;
495 else if (ratio.value < dal_fixed31_32_from_fraction(4, 3).value)
496 return filter_4tap_64p_117;
497 else if (ratio.value < dal_fixed31_32_from_fraction(5, 3).value)
498 return filter_4tap_64p_150;
499 else
500 return filter_4tap_64p_183;
501}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
new file mode 100644
index 000000000000..842182ce93a8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
@@ -0,0 +1,1302 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dc_bios_types.h"
27#include "dce_stream_encoder.h"
28#include "reg_helper.h"
29
30enum DP_PIXEL_ENCODING {
31DP_PIXEL_ENCODING_RGB444 = 0x00000000,
32DP_PIXEL_ENCODING_YCBCR422 = 0x00000001,
33DP_PIXEL_ENCODING_YCBCR444 = 0x00000002,
34DP_PIXEL_ENCODING_RGB_WIDE_GAMUT = 0x00000003,
35DP_PIXEL_ENCODING_Y_ONLY = 0x00000004,
36DP_PIXEL_ENCODING_YCBCR420 = 0x00000005,
37DP_PIXEL_ENCODING_RESERVED = 0x00000006,
38};
39
40
41enum DP_COMPONENT_DEPTH {
42DP_COMPONENT_DEPTH_6BPC = 0x00000000,
43DP_COMPONENT_DEPTH_8BPC = 0x00000001,
44DP_COMPONENT_DEPTH_10BPC = 0x00000002,
45DP_COMPONENT_DEPTH_12BPC = 0x00000003,
46DP_COMPONENT_DEPTH_16BPC = 0x00000004,
47DP_COMPONENT_DEPTH_RESERVED = 0x00000005,
48};
49
50
51#define REG(reg)\
52 (enc110->regs->reg)
53
54#undef FN
55#define FN(reg_name, field_name) \
56 enc110->se_shift->field_name, enc110->se_mask->field_name
57
58#define VBI_LINE_0 0
59#define DP_BLANK_MAX_RETRY 20
60#define HDMI_CLOCK_CHANNEL_RATE_MORE_340M 340000
61
62#ifndef TMDS_CNTL__TMDS_PIXEL_ENCODING_MASK
63 #define TMDS_CNTL__TMDS_PIXEL_ENCODING_MASK 0x00000010L
64 #define TMDS_CNTL__TMDS_COLOR_FORMAT_MASK 0x00000300L
65 #define TMDS_CNTL__TMDS_PIXEL_ENCODING__SHIFT 0x00000004
66 #define TMDS_CNTL__TMDS_COLOR_FORMAT__SHIFT 0x00000008
67#endif
68
69enum {
70 DP_MST_UPDATE_MAX_RETRY = 50
71};
72
73#define DCE110_SE(audio)\
74 container_of(audio, struct dce110_stream_encoder, base)
75
76#define CTX \
77 enc110->base.ctx
78
79static void dce110_update_generic_info_packet(
80 struct dce110_stream_encoder *enc110,
81 uint32_t packet_index,
82 const struct encoder_info_packet *info_packet)
83{
84 uint32_t regval;
85 /* TODOFPGA Figure out a proper number for max_retries polling for lock
86 * use 50 for now.
87 */
88 uint32_t max_retries = 50;
89
90 if (REG(AFMT_VBI_PACKET_CONTROL1)) {
91 if (packet_index >= 8)
92 ASSERT(0);
93
94 /* poll dig_update_lock is not locked -> asic internal signal
95 * assume otg master lock will unlock it
96 */
97 REG_WAIT(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_LOCK_STATUS,
98 1, 10, max_retries);
99
100 /* check if HW reading GSP memory */
101 REG_WAIT(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_CONFLICT,
102 1, 10, max_retries);
103
104 /* HW does is not reading GSP memory not reading too long ->
105 * something wrong. clear GPS memory access and notify?
106 * hw SW is writing to GSP memory
107 */
108 REG_UPDATE(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_CONFLICT_CLR, 1);
109 }
110 /* choose which generic packet to use */
111 {
112 regval = REG_READ(AFMT_VBI_PACKET_CONTROL);
113 REG_UPDATE(AFMT_VBI_PACKET_CONTROL,
114 AFMT_GENERIC_INDEX, packet_index);
115 }
116
117 /* write generic packet header
118 * (4th byte is for GENERIC0 only) */
119 {
120 REG_SET_4(AFMT_GENERIC_HDR, 0,
121 AFMT_GENERIC_HB0, info_packet->hb0,
122 AFMT_GENERIC_HB1, info_packet->hb1,
123 AFMT_GENERIC_HB2, info_packet->hb2,
124 AFMT_GENERIC_HB3, info_packet->hb3);
125 }
126
127 /* write generic packet contents
128 * (we never use last 4 bytes)
129 * there are 8 (0-7) mmDIG0_AFMT_GENERIC0_x registers */
130 {
131 const uint32_t *content =
132 (const uint32_t *) &info_packet->sb[0];
133
134 REG_WRITE(AFMT_GENERIC_0, *content++);
135 REG_WRITE(AFMT_GENERIC_1, *content++);
136 REG_WRITE(AFMT_GENERIC_2, *content++);
137 REG_WRITE(AFMT_GENERIC_3, *content++);
138 REG_WRITE(AFMT_GENERIC_4, *content++);
139 REG_WRITE(AFMT_GENERIC_5, *content++);
140 REG_WRITE(AFMT_GENERIC_6, *content);
141 REG_WRITE(AFMT_GENERIC_7, 0);
142 }
143
144 if (!REG(AFMT_VBI_PACKET_CONTROL1)) {
145 /* force double-buffered packet update */
146 REG_UPDATE_2(AFMT_VBI_PACKET_CONTROL,
147 AFMT_GENERIC0_UPDATE, (packet_index == 0),
148 AFMT_GENERIC2_UPDATE, (packet_index == 2));
149 }
150}
151
152static void dce110_update_hdmi_info_packet(
153 struct dce110_stream_encoder *enc110,
154 uint32_t packet_index,
155 const struct encoder_info_packet *info_packet)
156{
157 struct dc_context *ctx = enc110->base.ctx;
158 uint32_t cont, send, line;
159
160 if (info_packet->valid) {
161 dce110_update_generic_info_packet(
162 enc110,
163 packet_index,
164 info_packet);
165
166 /* enable transmission of packet(s) -
167 * packet transmission begins on the next frame */
168 cont = 1;
169 /* send packet(s) every frame */
170 send = 1;
171 /* select line number to send packets on */
172 line = 2;
173 } else {
174 cont = 0;
175 send = 0;
176 line = 0;
177 }
178
179 /* choose which generic packet control to use */
180 switch (packet_index) {
181 case 0:
182 REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL0,
183 HDMI_GENERIC0_CONT, cont,
184 HDMI_GENERIC0_SEND, send,
185 HDMI_GENERIC0_LINE, line);
186 break;
187 case 1:
188 REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL0,
189 HDMI_GENERIC1_CONT, cont,
190 HDMI_GENERIC1_SEND, send,
191 HDMI_GENERIC1_LINE, line);
192 break;
193 case 2:
194 REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL1,
195 HDMI_GENERIC0_CONT, cont,
196 HDMI_GENERIC0_SEND, send,
197 HDMI_GENERIC0_LINE, line);
198 break;
199 case 3:
200 REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL1,
201 HDMI_GENERIC1_CONT, cont,
202 HDMI_GENERIC1_SEND, send,
203 HDMI_GENERIC1_LINE, line);
204 break;
205 default:
206 /* invalid HW packet index */
207 dm_logger_write(
208 ctx->logger, LOG_WARNING,
209 "Invalid HW packet index: %s()\n",
210 __func__);
211 return;
212 }
213}
214
215/* setup stream encoder in dp mode */
216static void dce110_stream_encoder_dp_set_stream_attribute(
217 struct stream_encoder *enc,
218 struct dc_crtc_timing *crtc_timing,
219 enum dc_color_space output_color_space)
220{
221
222 struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
223
224 /* set pixel encoding */
225 switch (crtc_timing->pixel_encoding) {
226 case PIXEL_ENCODING_YCBCR422:
227 REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
228 DP_PIXEL_ENCODING_YCBCR422);
229 break;
230 case PIXEL_ENCODING_YCBCR444:
231 REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
232 DP_PIXEL_ENCODING_YCBCR444);
233
234 if (crtc_timing->flags.Y_ONLY)
235 if (crtc_timing->display_color_depth != COLOR_DEPTH_666)
236 /* HW testing only, no use case yet.
237 * Color depth of Y-only could be
238 * 8, 10, 12, 16 bits */
239 REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
240 DP_PIXEL_ENCODING_Y_ONLY);
241 /* Note: DP_MSA_MISC1 bit 7 is the indicator
242 * of Y-only mode.
243 * This bit is set in HW if register
244 * DP_PIXEL_ENCODING is programmed to 0x4 */
245 break;
246 case PIXEL_ENCODING_YCBCR420:
247 REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
248 DP_PIXEL_ENCODING_YCBCR420);
249 if (enc110->se_mask->DP_VID_M_DOUBLE_VALUE_EN)
250 REG_UPDATE(DP_VID_TIMING, DP_VID_M_DOUBLE_VALUE_EN, 1);
251
252 break;
253 default:
254 REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
255 DP_PIXEL_ENCODING_RGB444);
256 break;
257 }
258
259 /* set color depth */
260
261 switch (crtc_timing->display_color_depth) {
262 case COLOR_DEPTH_666:
263 REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
264 0);
265 break;
266 case COLOR_DEPTH_888:
267 REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
268 DP_COMPONENT_DEPTH_8BPC);
269 break;
270 case COLOR_DEPTH_101010:
271 REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
272 DP_COMPONENT_DEPTH_10BPC);
273
274 break;
275 case COLOR_DEPTH_121212:
276 REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
277 DP_COMPONENT_DEPTH_12BPC);
278 break;
279 default:
280 REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
281 DP_COMPONENT_DEPTH_6BPC);
282 break;
283 }
284
285 /* set dynamic range and YCbCr range */
286 if (enc110->se_mask->DP_DYN_RANGE && enc110->se_mask->DP_YCBCR_RANGE)
287 REG_UPDATE_2(
288 DP_PIXEL_FORMAT,
289 DP_DYN_RANGE, 0,
290 DP_YCBCR_RANGE, 0);
291
292}
293
294static void dce110_stream_encoder_set_stream_attribute_helper(
295 struct dce110_stream_encoder *enc110,
296 struct dc_crtc_timing *crtc_timing)
297{
298 if (enc110->regs->TMDS_CNTL) {
299 switch (crtc_timing->pixel_encoding) {
300 case PIXEL_ENCODING_YCBCR422:
301 REG_UPDATE(TMDS_CNTL, TMDS_PIXEL_ENCODING, 1);
302 break;
303 default:
304 REG_UPDATE(TMDS_CNTL, TMDS_PIXEL_ENCODING, 0);
305 break;
306 }
307 REG_UPDATE(TMDS_CNTL, TMDS_COLOR_FORMAT, 0);
308 } else if (enc110->regs->DIG_FE_CNTL) {
309 switch (crtc_timing->pixel_encoding) {
310 case PIXEL_ENCODING_YCBCR422:
311 REG_UPDATE(DIG_FE_CNTL, TMDS_PIXEL_ENCODING, 1);
312 break;
313 default:
314 REG_UPDATE(DIG_FE_CNTL, TMDS_PIXEL_ENCODING, 0);
315 break;
316 }
317 REG_UPDATE(DIG_FE_CNTL, TMDS_COLOR_FORMAT, 0);
318 }
319
320}
321
322/* setup stream encoder in hdmi mode */
323static void dce110_stream_encoder_hdmi_set_stream_attribute(
324 struct stream_encoder *enc,
325 struct dc_crtc_timing *crtc_timing,
326 int actual_pix_clk_khz,
327 bool enable_audio)
328{
329 struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
330 struct bp_encoder_control cntl = {0};
331
332 cntl.action = ENCODER_CONTROL_SETUP;
333 cntl.engine_id = enc110->base.id;
334 cntl.signal = SIGNAL_TYPE_HDMI_TYPE_A;
335 cntl.enable_dp_audio = enable_audio;
336 cntl.pixel_clock = actual_pix_clk_khz;
337 cntl.lanes_number = LANE_COUNT_FOUR;
338
339 if (enc110->base.bp->funcs->encoder_control(
340 enc110->base.bp, &cntl) != BP_RESULT_OK)
341 return;
342
343 dce110_stream_encoder_set_stream_attribute_helper(enc110, crtc_timing);
344
345 /* setup HDMI engine */
346 if (!enc110->se_mask->HDMI_DATA_SCRAMBLE_EN) {
347 REG_UPDATE_3(HDMI_CONTROL,
348 HDMI_PACKET_GEN_VERSION, 1,
349 HDMI_KEEPOUT_MODE, 1,
350 HDMI_DEEP_COLOR_ENABLE, 0);
351 } else if (enc110->regs->DIG_FE_CNTL) {
352 REG_UPDATE_5(HDMI_CONTROL,
353 HDMI_PACKET_GEN_VERSION, 1,
354 HDMI_KEEPOUT_MODE, 1,
355 HDMI_DEEP_COLOR_ENABLE, 0,
356 HDMI_DATA_SCRAMBLE_EN, 0,
357 HDMI_CLOCK_CHANNEL_RATE, 0);
358 }
359
360 switch (crtc_timing->display_color_depth) {
361 case COLOR_DEPTH_888:
362 REG_UPDATE(HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 0);
363 break;
364 case COLOR_DEPTH_101010:
365 REG_UPDATE_2(HDMI_CONTROL,
366 HDMI_DEEP_COLOR_DEPTH, 1,
367 HDMI_DEEP_COLOR_ENABLE, 1);
368 break;
369 case COLOR_DEPTH_121212:
370 REG_UPDATE_2(HDMI_CONTROL,
371 HDMI_DEEP_COLOR_DEPTH, 2,
372 HDMI_DEEP_COLOR_ENABLE, 1);
373 break;
374 case COLOR_DEPTH_161616:
375 REG_UPDATE_2(HDMI_CONTROL,
376 HDMI_DEEP_COLOR_DEPTH, 3,
377 HDMI_DEEP_COLOR_ENABLE, 1);
378 break;
379 default:
380 break;
381 }
382
383 if (enc110->se_mask->HDMI_DATA_SCRAMBLE_EN) {
384 if (actual_pix_clk_khz >= HDMI_CLOCK_CHANNEL_RATE_MORE_340M) {
385 /* enable HDMI data scrambler
386 * HDMI_CLOCK_CHANNEL_RATE_MORE_340M
387 * Clock channel frequency is 1/4 of character rate.
388 */
389 REG_UPDATE_2(HDMI_CONTROL,
390 HDMI_DATA_SCRAMBLE_EN, 1,
391 HDMI_CLOCK_CHANNEL_RATE, 1);
392 } else if (crtc_timing->flags.LTE_340MCSC_SCRAMBLE) {
393
394 /* TODO: New feature for DCE11, still need to implement */
395
396 /* enable HDMI data scrambler
397 * HDMI_CLOCK_CHANNEL_FREQ_EQUAL_TO_CHAR_RATE
398 * Clock channel frequency is the same
399 * as character rate
400 */
401 REG_UPDATE_2(HDMI_CONTROL,
402 HDMI_DATA_SCRAMBLE_EN, 1,
403 HDMI_CLOCK_CHANNEL_RATE, 0);
404 }
405 }
406
407 REG_UPDATE_3(HDMI_VBI_PACKET_CONTROL,
408 HDMI_GC_CONT, 1,
409 HDMI_GC_SEND, 1,
410 HDMI_NULL_SEND, 1);
411
412 /* following belongs to audio */
413 REG_UPDATE(HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1);
414
415 REG_UPDATE(AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1);
416
417 REG_UPDATE(HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE,
418 VBI_LINE_0 + 2);
419
420 REG_UPDATE(HDMI_GC, HDMI_GC_AVMUTE, 0);
421
422}
423
424/* setup stream encoder in dvi mode */
425static void dce110_stream_encoder_dvi_set_stream_attribute(
426 struct stream_encoder *enc,
427 struct dc_crtc_timing *crtc_timing,
428 bool is_dual_link)
429{
430 struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
431 struct bp_encoder_control cntl = {0};
432
433 cntl.action = ENCODER_CONTROL_SETUP;
434 cntl.engine_id = enc110->base.id;
435 cntl.signal = is_dual_link ?
436 SIGNAL_TYPE_DVI_DUAL_LINK : SIGNAL_TYPE_DVI_SINGLE_LINK;
437 cntl.enable_dp_audio = false;
438 cntl.pixel_clock = crtc_timing->pix_clk_khz;
439 cntl.lanes_number = (is_dual_link) ? LANE_COUNT_EIGHT : LANE_COUNT_FOUR;
440
441 if (enc110->base.bp->funcs->encoder_control(
442 enc110->base.bp, &cntl) != BP_RESULT_OK)
443 return;
444
445 ASSERT(crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB);
446 ASSERT(crtc_timing->display_color_depth == COLOR_DEPTH_888);
447 dce110_stream_encoder_set_stream_attribute_helper(enc110, crtc_timing);
448}
449
450static void dce110_stream_encoder_set_mst_bandwidth(
451 struct stream_encoder *enc,
452 struct fixed31_32 avg_time_slots_per_mtp)
453{
454 struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
455 uint32_t x = dal_fixed31_32_floor(
456 avg_time_slots_per_mtp);
457 uint32_t y = dal_fixed31_32_ceil(
458 dal_fixed31_32_shl(
459 dal_fixed31_32_sub_int(
460 avg_time_slots_per_mtp,
461 x),
462 26));
463
464 {
465 REG_SET_2(DP_MSE_RATE_CNTL, 0,
466 DP_MSE_RATE_X, x,
467 DP_MSE_RATE_Y, y);
468 }
469
470 /* wait for update to be completed on the link */
471 /* i.e. DP_MSE_RATE_UPDATE_PENDING field (read only) */
472 /* is reset to 0 (not pending) */
473 REG_WAIT(DP_MSE_RATE_UPDATE, DP_MSE_RATE_UPDATE_PENDING,
474 0,
475 10, DP_MST_UPDATE_MAX_RETRY);
476}
477
478static void dce110_stream_encoder_update_hdmi_info_packets(
479 struct stream_encoder *enc,
480 const struct encoder_info_frame *info_frame)
481{
482 struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
483
484 if (enc110->se_mask->HDMI_AVI_INFO_CONT &&
485 enc110->se_mask->HDMI_AVI_INFO_SEND) {
486
487 if (info_frame->avi.valid) {
488 const uint32_t *content =
489 (const uint32_t *) &info_frame->avi.sb[0];
490
491 REG_WRITE(AFMT_AVI_INFO0, content[0]);
492
493 REG_WRITE(AFMT_AVI_INFO1, content[1]);
494
495 REG_WRITE(AFMT_AVI_INFO2, content[2]);
496
497 REG_WRITE(AFMT_AVI_INFO3, content[3]);
498
499 REG_UPDATE(AFMT_AVI_INFO3, AFMT_AVI_INFO_VERSION,
500 info_frame->avi.hb1);
501
502 REG_UPDATE_2(HDMI_INFOFRAME_CONTROL0,
503 HDMI_AVI_INFO_SEND, 1,
504 HDMI_AVI_INFO_CONT, 1);
505
506 REG_UPDATE(HDMI_INFOFRAME_CONTROL1, HDMI_AVI_INFO_LINE,
507 VBI_LINE_0 + 2);
508
509 } else {
510 REG_UPDATE_2(HDMI_INFOFRAME_CONTROL0,
511 HDMI_AVI_INFO_SEND, 0,
512 HDMI_AVI_INFO_CONT, 0);
513 }
514 }
515
516 if (enc110->se_mask->HDMI_AVI_INFO_CONT &&
517 enc110->se_mask->HDMI_AVI_INFO_SEND) {
518 dce110_update_hdmi_info_packet(enc110, 0, &info_frame->vendor);
519 dce110_update_hdmi_info_packet(enc110, 1, &info_frame->gamut);
520 dce110_update_hdmi_info_packet(enc110, 2, &info_frame->spd);
521 }
522
523}
524
525static void dce110_stream_encoder_stop_hdmi_info_packets(
526 struct stream_encoder *enc)
527{
528 struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
529
530 /* stop generic packets 0 & 1 on HDMI */
531 REG_SET_6(HDMI_GENERIC_PACKET_CONTROL0, 0,
532 HDMI_GENERIC1_CONT, 0,
533 HDMI_GENERIC1_LINE, 0,
534 HDMI_GENERIC1_SEND, 0,
535 HDMI_GENERIC0_CONT, 0,
536 HDMI_GENERIC0_LINE, 0,
537 HDMI_GENERIC0_SEND, 0);
538
539 /* stop generic packets 2 & 3 on HDMI */
540 REG_SET_6(HDMI_GENERIC_PACKET_CONTROL1, 0,
541 HDMI_GENERIC0_CONT, 0,
542 HDMI_GENERIC0_LINE, 0,
543 HDMI_GENERIC0_SEND, 0,
544 HDMI_GENERIC1_CONT, 0,
545 HDMI_GENERIC1_LINE, 0,
546 HDMI_GENERIC1_SEND, 0);
547
548}
549
550static void dce110_stream_encoder_update_dp_info_packets(
551 struct stream_encoder *enc,
552 const struct encoder_info_frame *info_frame)
553{
554 struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
555 uint32_t value = REG_READ(DP_SEC_CNTL);
556
557 if (info_frame->vsc.valid)
558 dce110_update_generic_info_packet(
559 enc110,
560 0, /* packetIndex */
561 &info_frame->vsc);
562
563 /* enable/disable transmission of packet(s).
564 * If enabled, packet transmission begins on the next frame
565 */
566 REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP0_ENABLE, info_frame->vsc.valid);
567
568 /* This bit is the master enable bit.
569 * When enabling secondary stream engine,
570 * this master bit must also be set.
571 * This register shared with audio info frame.
572 * Therefore we need to enable master bit
573 * if at least on of the fields is not 0
574 */
575 if (value)
576 REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
577}
578
579static void dce110_stream_encoder_stop_dp_info_packets(
580 struct stream_encoder *enc)
581{
582 /* stop generic packets on DP */
583 struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
584 uint32_t value = REG_READ(DP_SEC_CNTL);
585
586 if (enc110->se_mask->DP_SEC_AVI_ENABLE) {
587 REG_SET_7(DP_SEC_CNTL, 0,
588 DP_SEC_GSP0_ENABLE, 0,
589 DP_SEC_GSP1_ENABLE, 0,
590 DP_SEC_GSP2_ENABLE, 0,
591 DP_SEC_GSP3_ENABLE, 0,
592 DP_SEC_AVI_ENABLE, 0,
593 DP_SEC_MPG_ENABLE, 0,
594 DP_SEC_STREAM_ENABLE, 0);
595 }
596
597 /* this register shared with audio info frame.
598 * therefore we need to keep master enabled
599 * if at least one of the fields is not 0 */
600
601 if (value)
602 REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
603
604}
605
606static void dce110_stream_encoder_dp_blank(
607 struct stream_encoder *enc)
608{
609 struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
610 uint32_t retries = 0;
611 uint32_t max_retries = DP_BLANK_MAX_RETRY * 10;
612
613 /* Note: For CZ, we are changing driver default to disable
614 * stream deferred to next VBLANK. If results are positive, we
615 * will make the same change to all DCE versions. There are a
616 * handful of panels that cannot handle disable stream at
617 * HBLANK and will result in a white line flash across the
618 * screen on stream disable. */
619
620 /* Specify the video stream disable point
621 * (2 = start of the next vertical blank) */
622 REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_DIS_DEFER, 2);
623 /* Larger delay to wait until VBLANK - use max retry of
624 * 10us*3000=30ms. This covers 16.6ms of typical 60 Hz mode +
625 * a little more because we may not trust delay accuracy.
626 */
627 max_retries = DP_BLANK_MAX_RETRY * 150;
628
629 /* disable DP stream */
630 REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, 0);
631
632 /* the encoder stops sending the video stream
633 * at the start of the vertical blanking.
634 * Poll for DP_VID_STREAM_STATUS == 0
635 */
636
637 REG_WAIT(DP_VID_STREAM_CNTL, DP_VID_STREAM_STATUS,
638 1,
639 10, max_retries);
640
641 ASSERT(retries <= max_retries);
642
643 /* Tell the DP encoder to ignore timing from CRTC, must be done after
644 * the polling. If we set DP_STEER_FIFO_RESET before DP stream blank is
645 * complete, stream status will be stuck in video stream enabled state,
646 * i.e. DP_VID_STREAM_STATUS stuck at 1.
647 */
648
649 REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, true);
650}
651
652/* output video stream to link encoder */
653static void dce110_stream_encoder_dp_unblank(
654 struct stream_encoder *enc,
655 const struct encoder_unblank_param *param)
656{
657 struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
658
659 if (param->link_settings.link_rate != LINK_RATE_UNKNOWN) {
660 uint32_t n_vid = 0x8000;
661 uint32_t m_vid;
662
663 /* M / N = Fstream / Flink
664 * m_vid / n_vid = pixel rate / link rate
665 */
666
667 uint64_t m_vid_l = n_vid;
668
669 m_vid_l *= param->crtc_timing.pixel_clock;
670 m_vid_l = div_u64(m_vid_l,
671 param->link_settings.link_rate
672 * LINK_RATE_REF_FREQ_IN_KHZ);
673
674 m_vid = (uint32_t) m_vid_l;
675
676 /* enable auto measurement */
677
678 REG_UPDATE(DP_VID_TIMING, DP_VID_M_N_GEN_EN, 0);
679
680 /* auto measurement need 1 full 0x8000 symbol cycle to kick in,
681 * therefore program initial value for Mvid and Nvid
682 */
683
684 REG_UPDATE(DP_VID_N, DP_VID_N, n_vid);
685
686 REG_UPDATE(DP_VID_M, DP_VID_M, m_vid);
687
688 REG_UPDATE(DP_VID_TIMING, DP_VID_M_N_GEN_EN, 1);
689 }
690
691 /* set DIG_START to 0x1 to resync FIFO */
692
693 REG_UPDATE(DIG_FE_CNTL, DIG_START, 1);
694
695 /* switch DP encoder to CRTC data */
696
697 REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 0);
698
699 /* wait 100us for DIG/DP logic to prime
700 * (i.e. a few video lines)
701 */
702 udelay(100);
703
704 /* the hardware would start sending video at the start of the next DP
705 * frame (i.e. rising edge of the vblank).
706 * NOTE: We used to program DP_VID_STREAM_DIS_DEFER = 2 here, but this
707 * register has no effect on enable transition! HW always guarantees
708 * VID_STREAM enable at start of next frame, and this is not
709 * programmable
710 */
711
712 REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, true);
713}
714
715
716#define DP_SEC_AUD_N__DP_SEC_AUD_N__DEFAULT 0x8000
717#define DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE__AUTO_CALC 1
718
719#include "include/audio_types.h"
720
721/**
722* speakersToChannels
723*
724* @brief
725* translate speakers to channels
726*
727* FL - Front Left
728* FR - Front Right
729* RL - Rear Left
730* RR - Rear Right
731* RC - Rear Center
732* FC - Front Center
733* FLC - Front Left Center
734* FRC - Front Right Center
735* RLC - Rear Left Center
736* RRC - Rear Right Center
737* LFE - Low Freq Effect
738*
739* FC
740* FLC FRC
741* FL FR
742*
743* LFE
744* ()
745*
746*
747* RL RR
748* RLC RRC
749* RC
750*
751* ch 8 7 6 5 4 3 2 1
752* 0b00000011 - - - - - - FR FL
753* 0b00000111 - - - - - LFE FR FL
754* 0b00001011 - - - - FC - FR FL
755* 0b00001111 - - - - FC LFE FR FL
756* 0b00010011 - - - RC - - FR FL
757* 0b00010111 - - - RC - LFE FR FL
758* 0b00011011 - - - RC FC - FR FL
759* 0b00011111 - - - RC FC LFE FR FL
760* 0b00110011 - - RR RL - - FR FL
761* 0b00110111 - - RR RL - LFE FR FL
762* 0b00111011 - - RR RL FC - FR FL
763* 0b00111111 - - RR RL FC LFE FR FL
764* 0b01110011 - RC RR RL - - FR FL
765* 0b01110111 - RC RR RL - LFE FR FL
766* 0b01111011 - RC RR RL FC - FR FL
767* 0b01111111 - RC RR RL FC LFE FR FL
768* 0b11110011 RRC RLC RR RL - - FR FL
769* 0b11110111 RRC RLC RR RL - LFE FR FL
770* 0b11111011 RRC RLC RR RL FC - FR FL
771* 0b11111111 RRC RLC RR RL FC LFE FR FL
772* 0b11000011 FRC FLC - - - - FR FL
773* 0b11000111 FRC FLC - - - LFE FR FL
774* 0b11001011 FRC FLC - - FC - FR FL
775* 0b11001111 FRC FLC - - FC LFE FR FL
776* 0b11010011 FRC FLC - RC - - FR FL
777* 0b11010111 FRC FLC - RC - LFE FR FL
778* 0b11011011 FRC FLC - RC FC - FR FL
779* 0b11011111 FRC FLC - RC FC LFE FR FL
780* 0b11110011 FRC FLC RR RL - - FR FL
781* 0b11110111 FRC FLC RR RL - LFE FR FL
782* 0b11111011 FRC FLC RR RL FC - FR FL
783* 0b11111111 FRC FLC RR RL FC LFE FR FL
784*
785* @param
786* speakers - speaker information as it comes from CEA audio block
787*/
788/* translate speakers to channels */
789
790union audio_cea_channels {
791 uint8_t all;
792 struct audio_cea_channels_bits {
793 uint32_t FL:1;
794 uint32_t FR:1;
795 uint32_t LFE:1;
796 uint32_t FC:1;
797 uint32_t RL_RC:1;
798 uint32_t RR:1;
799 uint32_t RC_RLC_FLC:1;
800 uint32_t RRC_FRC:1;
801 } channels;
802};
803
804struct audio_clock_info {
805 /* pixel clock frequency*/
806 uint32_t pixel_clock_in_10khz;
807 /* N - 32KHz audio */
808 uint32_t n_32khz;
809 /* CTS - 32KHz audio*/
810 uint32_t cts_32khz;
811 uint32_t n_44khz;
812 uint32_t cts_44khz;
813 uint32_t n_48khz;
814 uint32_t cts_48khz;
815};
816
817/* 25.2MHz/1.001*/
818/* 25.2MHz/1.001*/
819/* 25.2MHz*/
820/* 27MHz */
821/* 27MHz*1.001*/
822/* 27MHz*1.001*/
823/* 54MHz*/
824/* 54MHz*1.001*/
825/* 74.25MHz/1.001*/
826/* 74.25MHz*/
827/* 148.5MHz/1.001*/
828/* 148.5MHz*/
829
830static const struct audio_clock_info audio_clock_info_table[12] = {
831 {2517, 4576, 28125, 7007, 31250, 6864, 28125},
832 {2518, 4576, 28125, 7007, 31250, 6864, 28125},
833 {2520, 4096, 25200, 6272, 28000, 6144, 25200},
834 {2700, 4096, 27000, 6272, 30000, 6144, 27000},
835 {2702, 4096, 27027, 6272, 30030, 6144, 27027},
836 {2703, 4096, 27027, 6272, 30030, 6144, 27027},
837 {5400, 4096, 54000, 6272, 60000, 6144, 54000},
838 {5405, 4096, 54054, 6272, 60060, 6144, 54054},
839 {7417, 11648, 210937, 17836, 234375, 11648, 140625},
840 {7425, 4096, 74250, 6272, 82500, 6144, 74250},
841 {14835, 11648, 421875, 8918, 234375, 5824, 140625},
842 {14850, 4096, 148500, 6272, 165000, 6144, 148500}
843};
844
845static const struct audio_clock_info audio_clock_info_table_36bpc[12] = {
846 {2517, 9152, 84375, 7007, 48875, 9152, 56250},
847 {2518, 9152, 84375, 7007, 48875, 9152, 56250},
848 {2520, 4096, 37800, 6272, 42000, 6144, 37800},
849 {2700, 4096, 40500, 6272, 45000, 6144, 40500},
850 {2702, 8192, 81081, 6272, 45045, 8192, 54054},
851 {2703, 8192, 81081, 6272, 45045, 8192, 54054},
852 {5400, 4096, 81000, 6272, 90000, 6144, 81000},
853 {5405, 4096, 81081, 6272, 90090, 6144, 81081},
854 {7417, 11648, 316406, 17836, 351562, 11648, 210937},
855 {7425, 4096, 111375, 6272, 123750, 6144, 111375},
856 {14835, 11648, 632812, 17836, 703125, 11648, 421875},
857 {14850, 4096, 222750, 6272, 247500, 6144, 222750}
858};
859
860static const struct audio_clock_info audio_clock_info_table_48bpc[12] = {
861 {2517, 4576, 56250, 7007, 62500, 6864, 56250},
862 {2518, 4576, 56250, 7007, 62500, 6864, 56250},
863 {2520, 4096, 50400, 6272, 56000, 6144, 50400},
864 {2700, 4096, 54000, 6272, 60000, 6144, 54000},
865 {2702, 4096, 54054, 6267, 60060, 8192, 54054},
866 {2703, 4096, 54054, 6272, 60060, 8192, 54054},
867 {5400, 4096, 108000, 6272, 120000, 6144, 108000},
868 {5405, 4096, 108108, 6272, 120120, 6144, 108108},
869 {7417, 11648, 421875, 17836, 468750, 11648, 281250},
870 {7425, 4096, 148500, 6272, 165000, 6144, 148500},
871 {14835, 11648, 843750, 8918, 468750, 11648, 281250},
872 {14850, 4096, 297000, 6272, 330000, 6144, 297000}
873};
874
875union audio_cea_channels speakers_to_channels(
876 struct audio_speaker_flags speaker_flags)
877{
878 union audio_cea_channels cea_channels = {0};
879
880 /* these are one to one */
881 cea_channels.channels.FL = speaker_flags.FL_FR;
882 cea_channels.channels.FR = speaker_flags.FL_FR;
883 cea_channels.channels.LFE = speaker_flags.LFE;
884 cea_channels.channels.FC = speaker_flags.FC;
885
886 /* if Rear Left and Right exist move RC speaker to channel 7
887 * otherwise to channel 5
888 */
889 if (speaker_flags.RL_RR) {
890 cea_channels.channels.RL_RC = speaker_flags.RL_RR;
891 cea_channels.channels.RR = speaker_flags.RL_RR;
892 cea_channels.channels.RC_RLC_FLC = speaker_flags.RC;
893 } else {
894 cea_channels.channels.RL_RC = speaker_flags.RC;
895 }
896
897 /* FRONT Left Right Center and REAR Left Right Center are exclusive */
898 if (speaker_flags.FLC_FRC) {
899 cea_channels.channels.RC_RLC_FLC = speaker_flags.FLC_FRC;
900 cea_channels.channels.RRC_FRC = speaker_flags.FLC_FRC;
901 } else {
902 cea_channels.channels.RC_RLC_FLC = speaker_flags.RLC_RRC;
903 cea_channels.channels.RRC_FRC = speaker_flags.RLC_RRC;
904 }
905
906 return cea_channels;
907}
908
909uint32_t calc_max_audio_packets_per_line(
910 const struct audio_crtc_info *crtc_info)
911{
912 uint32_t max_packets_per_line;
913
914 max_packets_per_line =
915 crtc_info->h_total - crtc_info->h_active;
916
917 if (crtc_info->pixel_repetition)
918 max_packets_per_line *= crtc_info->pixel_repetition;
919
920 /* for other hdmi features */
921 max_packets_per_line -= 58;
922 /* for Control Period */
923 max_packets_per_line -= 16;
924 /* Number of Audio Packets per Line */
925 max_packets_per_line /= 32;
926
927 return max_packets_per_line;
928}
929
930bool get_audio_clock_info(
931 enum dc_color_depth color_depth,
932 uint32_t crtc_pixel_clock_in_khz,
933 uint32_t actual_pixel_clock_in_khz,
934 struct audio_clock_info *audio_clock_info)
935{
936 const struct audio_clock_info *clock_info;
937 uint32_t index;
938 uint32_t crtc_pixel_clock_in_10khz = crtc_pixel_clock_in_khz / 10;
939 uint32_t audio_array_size;
940
941 if (audio_clock_info == NULL)
942 return false; /* should not happen */
943
944 switch (color_depth) {
945 case COLOR_DEPTH_161616:
946 clock_info = audio_clock_info_table_48bpc;
947 audio_array_size = ARRAY_SIZE(
948 audio_clock_info_table_48bpc);
949 break;
950 case COLOR_DEPTH_121212:
951 clock_info = audio_clock_info_table_36bpc;
952 audio_array_size = ARRAY_SIZE(
953 audio_clock_info_table_36bpc);
954 break;
955 default:
956 clock_info = audio_clock_info_table;
957 audio_array_size = ARRAY_SIZE(
958 audio_clock_info_table);
959 break;
960 }
961
962 if (clock_info != NULL) {
963 /* search for exact pixel clock in table */
964 for (index = 0; index < audio_array_size; index++) {
965 if (clock_info[index].pixel_clock_in_10khz >
966 crtc_pixel_clock_in_10khz)
967 break; /* not match */
968 else if (clock_info[index].pixel_clock_in_10khz ==
969 crtc_pixel_clock_in_10khz) {
970 /* match found */
971 *audio_clock_info = clock_info[index];
972 return true;
973 }
974 }
975 }
976
977 /* not found */
978 if (actual_pixel_clock_in_khz == 0)
979 actual_pixel_clock_in_khz = crtc_pixel_clock_in_khz;
980
981 /* See HDMI spec the table entry under
982 * pixel clock of "Other". */
983 audio_clock_info->pixel_clock_in_10khz =
984 actual_pixel_clock_in_khz / 10;
985 audio_clock_info->cts_32khz = actual_pixel_clock_in_khz;
986 audio_clock_info->cts_44khz = actual_pixel_clock_in_khz;
987 audio_clock_info->cts_48khz = actual_pixel_clock_in_khz;
988
989 audio_clock_info->n_32khz = 4096;
990 audio_clock_info->n_44khz = 6272;
991 audio_clock_info->n_48khz = 6144;
992
993 return true;
994}
995
996static void dce110_se_audio_setup(
997 struct stream_encoder *enc,
998 unsigned int az_inst,
999 struct audio_info *audio_info)
1000{
1001 struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
1002
1003 uint32_t speakers = 0;
1004 uint32_t channels = 0;
1005
1006 ASSERT(audio_info);
1007 if (audio_info == NULL)
1008 /* This should not happen.it does so we don't get BSOD*/
1009 return;
1010
1011 speakers = audio_info->flags.info.ALLSPEAKERS;
1012 channels = speakers_to_channels(audio_info->flags.speaker_flags).all;
1013
1014 /* setup the audio stream source select (audio -> dig mapping) */
1015 REG_SET(AFMT_AUDIO_SRC_CONTROL, 0, AFMT_AUDIO_SRC_SELECT, az_inst);
1016
1017 /* Channel allocation */
1018 REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_CHANNEL_ENABLE, channels);
1019}
1020
1021static void dce110_se_setup_hdmi_audio(
1022 struct stream_encoder *enc,
1023 const struct audio_crtc_info *crtc_info)
1024{
1025 struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
1026
1027 struct audio_clock_info audio_clock_info = {0};
1028 uint32_t max_packets_per_line;
1029
1030 /* For now still do calculation, although this field is ignored when
1031 above HDMI_PACKET_GEN_VERSION set to 1 */
1032 max_packets_per_line = calc_max_audio_packets_per_line(crtc_info);
1033
1034 /* HDMI_AUDIO_PACKET_CONTROL */
1035 REG_UPDATE_2(HDMI_AUDIO_PACKET_CONTROL,
1036 HDMI_AUDIO_PACKETS_PER_LINE, max_packets_per_line,
1037 HDMI_AUDIO_DELAY_EN, 1);
1038
1039 /* AFMT_AUDIO_PACKET_CONTROL */
1040 REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);
1041
1042 /* AFMT_AUDIO_PACKET_CONTROL2 */
1043 REG_UPDATE_2(AFMT_AUDIO_PACKET_CONTROL2,
1044 AFMT_AUDIO_LAYOUT_OVRD, 0,
1045 AFMT_60958_OSF_OVRD, 0);
1046
1047 /* HDMI_ACR_PACKET_CONTROL */
1048 REG_UPDATE_3(HDMI_ACR_PACKET_CONTROL,
1049 HDMI_ACR_AUTO_SEND, 1,
1050 HDMI_ACR_SOURCE, 0,
1051 HDMI_ACR_AUDIO_PRIORITY, 0);
1052
1053 /* Program audio clock sample/regeneration parameters */
1054 if (get_audio_clock_info(
1055 crtc_info->color_depth,
1056 crtc_info->requested_pixel_clock,
1057 crtc_info->calculated_pixel_clock,
1058 &audio_clock_info)) {
1059
1060 /* HDMI_ACR_32_0__HDMI_ACR_CTS_32_MASK */
1061 REG_UPDATE(HDMI_ACR_32_0, HDMI_ACR_CTS_32, audio_clock_info.cts_32khz);
1062
1063 /* HDMI_ACR_32_1__HDMI_ACR_N_32_MASK */
1064 REG_UPDATE(HDMI_ACR_32_1, HDMI_ACR_N_32, audio_clock_info.n_32khz);
1065
1066 /* HDMI_ACR_44_0__HDMI_ACR_CTS_44_MASK */
1067 REG_UPDATE(HDMI_ACR_44_0, HDMI_ACR_CTS_44, audio_clock_info.cts_44khz);
1068
1069 /* HDMI_ACR_44_1__HDMI_ACR_N_44_MASK */
1070 REG_UPDATE(HDMI_ACR_44_1, HDMI_ACR_N_44, audio_clock_info.n_44khz);
1071
1072 /* HDMI_ACR_48_0__HDMI_ACR_CTS_48_MASK */
1073 REG_UPDATE(HDMI_ACR_48_0, HDMI_ACR_CTS_48, audio_clock_info.cts_48khz);
1074
1075 /* HDMI_ACR_48_1__HDMI_ACR_N_48_MASK */
1076 REG_UPDATE(HDMI_ACR_48_1, HDMI_ACR_N_48, audio_clock_info.n_48khz);
1077
1078 /* Video driver cannot know in advance which sample rate will
1079 be used by HD Audio driver
1080 HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE field is
1081 programmed below in interruppt callback */
1082 } /* if */
1083
1084 /* AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L_MASK &
1085 AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY_MASK */
1086 REG_UPDATE_2(AFMT_60958_0,
1087 AFMT_60958_CS_CHANNEL_NUMBER_L, 1,
1088 AFMT_60958_CS_CLOCK_ACCURACY, 0);
1089
1090 /* AFMT_60958_1 AFMT_60958_CS_CHALNNEL_NUMBER_R */
1091 REG_UPDATE(AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, 2);
1092
1093 /*AFMT_60958_2 now keep this settings until
1094 * Programming guide comes out*/
1095 REG_UPDATE_6(AFMT_60958_2,
1096 AFMT_60958_CS_CHANNEL_NUMBER_2, 3,
1097 AFMT_60958_CS_CHANNEL_NUMBER_3, 4,
1098 AFMT_60958_CS_CHANNEL_NUMBER_4, 5,
1099 AFMT_60958_CS_CHANNEL_NUMBER_5, 6,
1100 AFMT_60958_CS_CHANNEL_NUMBER_6, 7,
1101 AFMT_60958_CS_CHANNEL_NUMBER_7, 8);
1102}
1103
1104static void dce110_se_setup_dp_audio(
1105 struct stream_encoder *enc)
1106{
1107 struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
1108
1109 /* --- DP Audio packet configurations --- */
1110
1111 /* ATP Configuration */
1112 REG_SET(DP_SEC_AUD_N, 0,
1113 DP_SEC_AUD_N, DP_SEC_AUD_N__DP_SEC_AUD_N__DEFAULT);
1114
1115 /* Async/auto-calc timestamp mode */
1116 REG_SET(DP_SEC_TIMESTAMP, 0, DP_SEC_TIMESTAMP_MODE,
1117 DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE__AUTO_CALC);
1118
1119 /* --- The following are the registers
1120 * copied from the SetupHDMI --- */
1121
1122 /* AFMT_AUDIO_PACKET_CONTROL */
1123 REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);
1124
1125 /* AFMT_AUDIO_PACKET_CONTROL2 */
1126 /* Program the ATP and AIP next */
1127 REG_UPDATE_2(AFMT_AUDIO_PACKET_CONTROL2,
1128 AFMT_AUDIO_LAYOUT_OVRD, 0,
1129 AFMT_60958_OSF_OVRD, 0);
1130
1131 /* AFMT_INFOFRAME_CONTROL0 */
1132 REG_UPDATE(AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1);
1133
1134 /* AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY_MASK */
1135 REG_UPDATE(AFMT_60958_0, AFMT_60958_CS_CLOCK_ACCURACY, 0);
1136}
1137
1138static void dce110_se_enable_audio_clock(
1139 struct stream_encoder *enc,
1140 bool enable)
1141{
1142 struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
1143
1144 if (REG(AFMT_CNTL) == 0)
1145 return; /* DCE8/10 does not have this register */
1146
1147 REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, !!enable);
1148
1149 /* wait for AFMT clock to turn on,
1150 * expectation: this should complete in 1-2 reads
1151 *
1152 * REG_WAIT(AFMT_CNTL, AFMT_AUDIO_CLOCK_ON, !!enable, 1, 10);
1153 *
1154 * TODO: wait for clock_on does not work well. May need HW
1155 * program sequence. But audio seems work normally even without wait
1156 * for clock_on status change
1157 */
1158}
1159
1160static void dce110_se_enable_dp_audio(
1161 struct stream_encoder *enc)
1162{
1163 struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
1164
1165 /* Enable Audio packets */
1166 REG_UPDATE(DP_SEC_CNTL, DP_SEC_ASP_ENABLE, 1);
1167
1168 /* Program the ATP and AIP next */
1169 REG_UPDATE_2(DP_SEC_CNTL,
1170 DP_SEC_ATP_ENABLE, 1,
1171 DP_SEC_AIP_ENABLE, 1);
1172
1173 /* Program STREAM_ENABLE after all the other enables. */
1174 REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
1175}
1176
1177static void dce110_se_disable_dp_audio(
1178 struct stream_encoder *enc)
1179{
1180 struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
1181 uint32_t value = REG_READ(DP_SEC_CNTL);
1182
1183 /* Disable Audio packets */
1184 REG_UPDATE_5(DP_SEC_CNTL,
1185 DP_SEC_ASP_ENABLE, 0,
1186 DP_SEC_ATP_ENABLE, 0,
1187 DP_SEC_AIP_ENABLE, 0,
1188 DP_SEC_ACM_ENABLE, 0,
1189 DP_SEC_STREAM_ENABLE, 0);
1190
1191 /* This register shared with encoder info frame. Therefore we need to
1192 keep master enabled if at least on of the fields is not 0 */
1193 if (value != 0)
1194 REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
1195
1196}
1197
1198void dce110_se_audio_mute_control(
1199 struct stream_encoder *enc,
1200 bool mute)
1201{
1202 struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
1203
1204 REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, !mute);
1205}
1206
1207void dce110_se_dp_audio_setup(
1208 struct stream_encoder *enc,
1209 unsigned int az_inst,
1210 struct audio_info *info)
1211{
1212 dce110_se_audio_setup(enc, az_inst, info);
1213}
1214
1215void dce110_se_dp_audio_enable(
1216 struct stream_encoder *enc)
1217{
1218 dce110_se_enable_audio_clock(enc, true);
1219 dce110_se_setup_dp_audio(enc);
1220 dce110_se_enable_dp_audio(enc);
1221}
1222
1223void dce110_se_dp_audio_disable(
1224 struct stream_encoder *enc)
1225{
1226 dce110_se_disable_dp_audio(enc);
1227 dce110_se_enable_audio_clock(enc, false);
1228}
1229
1230void dce110_se_hdmi_audio_setup(
1231 struct stream_encoder *enc,
1232 unsigned int az_inst,
1233 struct audio_info *info,
1234 struct audio_crtc_info *audio_crtc_info)
1235{
1236 dce110_se_enable_audio_clock(enc, true);
1237 dce110_se_setup_hdmi_audio(enc, audio_crtc_info);
1238 dce110_se_audio_setup(enc, az_inst, info);
1239}
1240
1241void dce110_se_hdmi_audio_disable(
1242 struct stream_encoder *enc)
1243{
1244 dce110_se_enable_audio_clock(enc, false);
1245}
1246
1247static const struct stream_encoder_funcs dce110_str_enc_funcs = {
1248 .dp_set_stream_attribute =
1249 dce110_stream_encoder_dp_set_stream_attribute,
1250 .hdmi_set_stream_attribute =
1251 dce110_stream_encoder_hdmi_set_stream_attribute,
1252 .dvi_set_stream_attribute =
1253 dce110_stream_encoder_dvi_set_stream_attribute,
1254 .set_mst_bandwidth =
1255 dce110_stream_encoder_set_mst_bandwidth,
1256 .update_hdmi_info_packets =
1257 dce110_stream_encoder_update_hdmi_info_packets,
1258 .stop_hdmi_info_packets =
1259 dce110_stream_encoder_stop_hdmi_info_packets,
1260 .update_dp_info_packets =
1261 dce110_stream_encoder_update_dp_info_packets,
1262 .stop_dp_info_packets =
1263 dce110_stream_encoder_stop_dp_info_packets,
1264 .dp_blank =
1265 dce110_stream_encoder_dp_blank,
1266 .dp_unblank =
1267 dce110_stream_encoder_dp_unblank,
1268
1269 .audio_mute_control = dce110_se_audio_mute_control,
1270
1271 .dp_audio_setup = dce110_se_dp_audio_setup,
1272 .dp_audio_enable = dce110_se_dp_audio_enable,
1273 .dp_audio_disable = dce110_se_dp_audio_disable,
1274
1275 .hdmi_audio_setup = dce110_se_hdmi_audio_setup,
1276 .hdmi_audio_disable = dce110_se_hdmi_audio_disable,
1277};
1278
1279bool dce110_stream_encoder_construct(
1280 struct dce110_stream_encoder *enc110,
1281 struct dc_context *ctx,
1282 struct dc_bios *bp,
1283 enum engine_id eng_id,
1284 const struct dce110_stream_enc_registers *regs,
1285 const struct dce_stream_encoder_shift *se_shift,
1286 const struct dce_stream_encoder_mask *se_mask)
1287{
1288 if (!enc110)
1289 return false;
1290 if (!bp)
1291 return false;
1292
1293 enc110->base.funcs = &dce110_str_enc_funcs;
1294 enc110->base.ctx = ctx;
1295 enc110->base.id = eng_id;
1296 enc110->base.bp = bp;
1297 enc110->regs = regs;
1298 enc110->se_shift = se_shift;
1299 enc110->se_mask = se_mask;
1300
1301 return true;
1302}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h
new file mode 100644
index 000000000000..458a37000956
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h
@@ -0,0 +1,564 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DC_STREAM_ENCODER_DCE110_H__
27#define __DC_STREAM_ENCODER_DCE110_H__
28
29#include "stream_encoder.h"
30
31#define DCE110STRENC_FROM_STRENC(stream_encoder)\
32 container_of(stream_encoder, struct dce110_stream_encoder, base)
33
34#ifndef TMDS_CNTL__TMDS_PIXEL_ENCODING_MASK
35 #define TMDS_CNTL__TMDS_PIXEL_ENCODING_MASK 0x00000010L
36 #define TMDS_CNTL__TMDS_COLOR_FORMAT_MASK 0x00000300L
37 #define TMDS_CNTL__TMDS_PIXEL_ENCODING__SHIFT 0x00000004
38 #define TMDS_CNTL__TMDS_COLOR_FORMAT__SHIFT 0x00000008
39#endif
40
41
42#define SE_COMMON_REG_LIST_DCE_BASE(id) \
43 SE_COMMON_REG_LIST_BASE(id),\
44 SRI(AFMT_AVI_INFO0, DIG, id), \
45 SRI(AFMT_AVI_INFO1, DIG, id), \
46 SRI(AFMT_AVI_INFO2, DIG, id), \
47 SRI(AFMT_AVI_INFO3, DIG, id)
48
49#define SE_COMMON_REG_LIST_BASE(id) \
50 SRI(AFMT_GENERIC_0, DIG, id), \
51 SRI(AFMT_GENERIC_1, DIG, id), \
52 SRI(AFMT_GENERIC_2, DIG, id), \
53 SRI(AFMT_GENERIC_3, DIG, id), \
54 SRI(AFMT_GENERIC_4, DIG, id), \
55 SRI(AFMT_GENERIC_5, DIG, id), \
56 SRI(AFMT_GENERIC_6, DIG, id), \
57 SRI(AFMT_GENERIC_7, DIG, id), \
58 SRI(AFMT_GENERIC_HDR, DIG, id), \
59 SRI(AFMT_INFOFRAME_CONTROL0, DIG, id), \
60 SRI(AFMT_VBI_PACKET_CONTROL, DIG, id), \
61 SRI(AFMT_AUDIO_PACKET_CONTROL, DIG, id), \
62 SRI(AFMT_AUDIO_PACKET_CONTROL2, DIG, id), \
63 SRI(AFMT_AUDIO_SRC_CONTROL, DIG, id), \
64 SRI(AFMT_60958_0, DIG, id), \
65 SRI(AFMT_60958_1, DIG, id), \
66 SRI(AFMT_60958_2, DIG, id), \
67 SRI(DIG_FE_CNTL, DIG, id), \
68 SRI(HDMI_CONTROL, DIG, id), \
69 SRI(HDMI_GC, DIG, id), \
70 SRI(HDMI_GENERIC_PACKET_CONTROL0, DIG, id), \
71 SRI(HDMI_GENERIC_PACKET_CONTROL1, DIG, id), \
72 SRI(HDMI_INFOFRAME_CONTROL0, DIG, id), \
73 SRI(HDMI_INFOFRAME_CONTROL1, DIG, id), \
74 SRI(HDMI_VBI_PACKET_CONTROL, DIG, id), \
75 SRI(HDMI_AUDIO_PACKET_CONTROL, DIG, id),\
76 SRI(HDMI_ACR_PACKET_CONTROL, DIG, id),\
77 SRI(HDMI_ACR_32_0, DIG, id),\
78 SRI(HDMI_ACR_32_1, DIG, id),\
79 SRI(HDMI_ACR_44_0, DIG, id),\
80 SRI(HDMI_ACR_44_1, DIG, id),\
81 SRI(HDMI_ACR_48_0, DIG, id),\
82 SRI(HDMI_ACR_48_1, DIG, id),\
83 SRI(TMDS_CNTL, DIG, id), \
84 SRI(DP_MSE_RATE_CNTL, DP, id), \
85 SRI(DP_MSE_RATE_UPDATE, DP, id), \
86 SRI(DP_PIXEL_FORMAT, DP, id), \
87 SRI(DP_SEC_CNTL, DP, id), \
88 SRI(DP_STEER_FIFO, DP, id), \
89 SRI(DP_VID_M, DP, id), \
90 SRI(DP_VID_N, DP, id), \
91 SRI(DP_VID_STREAM_CNTL, DP, id), \
92 SRI(DP_VID_TIMING, DP, id), \
93 SRI(DP_SEC_AUD_N, DP, id), \
94 SRI(DP_SEC_TIMESTAMP, DP, id)
95
96#define SE_COMMON_REG_LIST(id)\
97 SE_COMMON_REG_LIST_DCE_BASE(id), \
98 SRI(AFMT_CNTL, DIG, id)
99
100#define SE_SF(reg_name, field_name, post_fix)\
101 .field_name = reg_name ## __ ## field_name ## post_fix
102
103#define SE_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh)\
104 SE_SF(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_INDEX, mask_sh),\
105 SE_SF(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC0_UPDATE, mask_sh),\
106 SE_SF(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC2_UPDATE, mask_sh),\
107 SE_SF(AFMT_GENERIC_HDR, AFMT_GENERIC_HB0, mask_sh),\
108 SE_SF(AFMT_GENERIC_HDR, AFMT_GENERIC_HB1, mask_sh),\
109 SE_SF(AFMT_GENERIC_HDR, AFMT_GENERIC_HB2, mask_sh),\
110 SE_SF(AFMT_GENERIC_HDR, AFMT_GENERIC_HB3, mask_sh),\
111 SE_SF(HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC0_CONT, mask_sh),\
112 SE_SF(HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC0_SEND, mask_sh),\
113 SE_SF(HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC0_LINE, mask_sh),\
114 SE_SF(HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC1_CONT, mask_sh),\
115 SE_SF(HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC1_SEND, mask_sh),\
116 SE_SF(HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC1_LINE, mask_sh),\
117 SE_SF(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING, mask_sh),\
118 SE_SF(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH, mask_sh),\
119 SE_SF(DP_PIXEL_FORMAT, DP_DYN_RANGE, mask_sh),\
120 SE_SF(DP_PIXEL_FORMAT, DP_YCBCR_RANGE, mask_sh),\
121 SE_SF(HDMI_CONTROL, HDMI_PACKET_GEN_VERSION, mask_sh),\
122 SE_SF(HDMI_CONTROL, HDMI_KEEPOUT_MODE, mask_sh),\
123 SE_SF(HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, mask_sh),\
124 SE_SF(HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, mask_sh),\
125 SE_SF(HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, mask_sh),\
126 SE_SF(HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, mask_sh),\
127 SE_SF(HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, mask_sh),\
128 SE_SF(HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, mask_sh),\
129 SE_SF(AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, mask_sh),\
130 SE_SF(HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE, mask_sh),\
131 SE_SF(HDMI_GC, HDMI_GC_AVMUTE, mask_sh),\
132 SE_SF(DP_MSE_RATE_CNTL, DP_MSE_RATE_X, mask_sh),\
133 SE_SF(DP_MSE_RATE_CNTL, DP_MSE_RATE_Y, mask_sh),\
134 SE_SF(DP_MSE_RATE_UPDATE, DP_MSE_RATE_UPDATE_PENDING, mask_sh),\
135 SE_SF(AFMT_AVI_INFO3, AFMT_AVI_INFO_VERSION, mask_sh),\
136 SE_SF(HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, mask_sh),\
137 SE_SF(HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, mask_sh),\
138 SE_SF(HDMI_INFOFRAME_CONTROL1, HDMI_AVI_INFO_LINE, mask_sh),\
139 SE_SF(DP_SEC_CNTL, DP_SEC_GSP0_ENABLE, mask_sh),\
140 SE_SF(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, mask_sh),\
141 SE_SF(DP_SEC_CNTL, DP_SEC_GSP1_ENABLE, mask_sh),\
142 SE_SF(DP_SEC_CNTL, DP_SEC_GSP2_ENABLE, mask_sh),\
143 SE_SF(DP_SEC_CNTL, DP_SEC_GSP3_ENABLE, mask_sh),\
144 SE_SF(DP_SEC_CNTL, DP_SEC_AVI_ENABLE, mask_sh),\
145 SE_SF(DP_SEC_CNTL, DP_SEC_MPG_ENABLE, mask_sh),\
146 SE_SF(DP_VID_STREAM_CNTL, DP_VID_STREAM_DIS_DEFER, mask_sh),\
147 SE_SF(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, mask_sh),\
148 SE_SF(DP_VID_STREAM_CNTL, DP_VID_STREAM_STATUS, mask_sh),\
149 SE_SF(DP_STEER_FIFO, DP_STEER_FIFO_RESET, mask_sh),\
150 SE_SF(DP_VID_TIMING, DP_VID_M_N_GEN_EN, mask_sh),\
151 SE_SF(DP_VID_N, DP_VID_N, mask_sh),\
152 SE_SF(DP_VID_M, DP_VID_M, mask_sh),\
153 SE_SF(DIG_FE_CNTL, DIG_START, mask_sh),\
154 SE_SF(AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT, mask_sh),\
155 SE_SF(AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_CHANNEL_ENABLE, mask_sh),\
156 SE_SF(HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_PACKETS_PER_LINE, mask_sh),\
157 SE_SF(HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, mask_sh),\
158 SE_SF(AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, mask_sh),\
159 SE_SF(AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_LAYOUT_OVRD, mask_sh),\
160 SE_SF(AFMT_AUDIO_PACKET_CONTROL2, AFMT_60958_OSF_OVRD, mask_sh),\
161 SE_SF(HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, mask_sh),\
162 SE_SF(HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, mask_sh),\
163 SE_SF(HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUDIO_PRIORITY, mask_sh),\
164 SE_SF(HDMI_ACR_32_0, HDMI_ACR_CTS_32, mask_sh),\
165 SE_SF(HDMI_ACR_32_1, HDMI_ACR_N_32, mask_sh),\
166 SE_SF(HDMI_ACR_44_0, HDMI_ACR_CTS_44, mask_sh),\
167 SE_SF(HDMI_ACR_44_1, HDMI_ACR_N_44, mask_sh),\
168 SE_SF(HDMI_ACR_48_0, HDMI_ACR_CTS_48, mask_sh),\
169 SE_SF(HDMI_ACR_48_1, HDMI_ACR_N_48, mask_sh),\
170 SE_SF(AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, mask_sh),\
171 SE_SF(AFMT_60958_0, AFMT_60958_CS_CLOCK_ACCURACY, mask_sh),\
172 SE_SF(AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, mask_sh),\
173 SE_SF(AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, mask_sh),\
174 SE_SF(AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, mask_sh),\
175 SE_SF(AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, mask_sh),\
176 SE_SF(AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, mask_sh),\
177 SE_SF(AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, mask_sh),\
178 SE_SF(AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, mask_sh),\
179 SE_SF(DP_SEC_AUD_N, DP_SEC_AUD_N, mask_sh),\
180 SE_SF(DP_SEC_TIMESTAMP, DP_SEC_TIMESTAMP_MODE, mask_sh),\
181 SE_SF(DP_SEC_CNTL, DP_SEC_ASP_ENABLE, mask_sh),\
182 SE_SF(DP_SEC_CNTL, DP_SEC_ATP_ENABLE, mask_sh),\
183 SE_SF(DP_SEC_CNTL, DP_SEC_AIP_ENABLE, mask_sh),\
184 SE_SF(DP_SEC_CNTL, DP_SEC_ACM_ENABLE, mask_sh),\
185 SE_SF(AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, mask_sh)
186
187#define SE_COMMON_MASK_SH_LIST_DCE_COMMON(mask_sh)\
188 SE_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh)
189
190#define SE_COMMON_MASK_SH_LIST_DCE80_100(mask_sh)\
191 SE_COMMON_MASK_SH_LIST_DCE_COMMON(mask_sh),\
192 SE_SF(TMDS_CNTL, TMDS_PIXEL_ENCODING, mask_sh),\
193 SE_SF(TMDS_CNTL, TMDS_COLOR_FORMAT, mask_sh)
194
195#define SE_COMMON_MASK_SH_LIST_DCE110(mask_sh)\
196 SE_COMMON_MASK_SH_LIST_DCE_COMMON(mask_sh),\
197 SE_SF(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, mask_sh),\
198 SE_SF(HDMI_CONTROL, HDMI_CLOCK_CHANNEL_RATE, mask_sh),\
199 SE_SF(HDMI_CONTROL, HDMI_DATA_SCRAMBLE_EN, mask_sh),\
200 SE_SF(DIG_FE_CNTL, TMDS_PIXEL_ENCODING, mask_sh),\
201 SE_SF(DIG_FE_CNTL, TMDS_COLOR_FORMAT, mask_sh)
202
203#define SE_COMMON_MASK_SH_LIST_DCE112(mask_sh)\
204 SE_COMMON_MASK_SH_LIST_DCE_COMMON(mask_sh),\
205 SE_SF(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, mask_sh),\
206 SE_SF(HDMI_CONTROL, HDMI_CLOCK_CHANNEL_RATE, mask_sh),\
207 SE_SF(HDMI_CONTROL, HDMI_DATA_SCRAMBLE_EN, mask_sh),\
208 SE_SF(DIG_FE_CNTL, TMDS_PIXEL_ENCODING, mask_sh),\
209 SE_SF(DIG_FE_CNTL, TMDS_COLOR_FORMAT, mask_sh),\
210 SE_SF(DP_VID_TIMING, DP_VID_M_DOUBLE_VALUE_EN, mask_sh)
211
212struct dce_stream_encoder_shift {
213 uint8_t AFMT_GENERIC_INDEX;
214 uint8_t AFMT_GENERIC0_UPDATE;
215 uint8_t AFMT_GENERIC2_UPDATE;
216 uint8_t AFMT_GENERIC_HB0;
217 uint8_t AFMT_GENERIC_HB1;
218 uint8_t AFMT_GENERIC_HB2;
219 uint8_t AFMT_GENERIC_HB3;
220 uint8_t AFMT_GENERIC_LOCK_STATUS;
221 uint8_t AFMT_GENERIC_CONFLICT;
222 uint8_t AFMT_GENERIC_CONFLICT_CLR;
223 uint8_t AFMT_GENERIC0_FRAME_UPDATE_PENDING;
224 uint8_t AFMT_GENERIC1_FRAME_UPDATE_PENDING;
225 uint8_t AFMT_GENERIC2_FRAME_UPDATE_PENDING;
226 uint8_t AFMT_GENERIC3_FRAME_UPDATE_PENDING;
227 uint8_t AFMT_GENERIC4_FRAME_UPDATE_PENDING;
228 uint8_t AFMT_GENERIC5_FRAME_UPDATE_PENDING;
229 uint8_t AFMT_GENERIC6_FRAME_UPDATE_PENDING;
230 uint8_t AFMT_GENERIC7_FRAME_UPDATE_PENDING;
231 uint8_t AFMT_GENERIC0_FRAME_UPDATE;
232 uint8_t AFMT_GENERIC1_FRAME_UPDATE;
233 uint8_t AFMT_GENERIC2_FRAME_UPDATE;
234 uint8_t AFMT_GENERIC3_FRAME_UPDATE;
235 uint8_t AFMT_GENERIC4_FRAME_UPDATE;
236 uint8_t AFMT_GENERIC5_FRAME_UPDATE;
237 uint8_t AFMT_GENERIC6_FRAME_UPDATE;
238 uint8_t AFMT_GENERIC7_FRAME_UPDATE;
239 uint8_t HDMI_GENERIC0_CONT;
240 uint8_t HDMI_GENERIC0_SEND;
241 uint8_t HDMI_GENERIC0_LINE;
242 uint8_t HDMI_GENERIC1_CONT;
243 uint8_t HDMI_GENERIC1_SEND;
244 uint8_t HDMI_GENERIC1_LINE;
245 uint8_t DP_PIXEL_ENCODING;
246 uint8_t DP_COMPONENT_DEPTH;
247 uint8_t DP_DYN_RANGE;
248 uint8_t DP_YCBCR_RANGE;
249 uint8_t HDMI_PACKET_GEN_VERSION;
250 uint8_t HDMI_KEEPOUT_MODE;
251 uint8_t HDMI_DEEP_COLOR_ENABLE;
252 uint8_t HDMI_CLOCK_CHANNEL_RATE;
253 uint8_t HDMI_DEEP_COLOR_DEPTH;
254 uint8_t HDMI_GC_CONT;
255 uint8_t HDMI_GC_SEND;
256 uint8_t HDMI_NULL_SEND;
257 uint8_t HDMI_DATA_SCRAMBLE_EN;
258 uint8_t HDMI_AUDIO_INFO_SEND;
259 uint8_t AFMT_AUDIO_INFO_UPDATE;
260 uint8_t HDMI_AUDIO_INFO_LINE;
261 uint8_t HDMI_GC_AVMUTE;
262 uint8_t DP_MSE_RATE_X;
263 uint8_t DP_MSE_RATE_Y;
264 uint8_t DP_MSE_RATE_UPDATE_PENDING;
265 uint8_t AFMT_AVI_INFO_VERSION;
266 uint8_t HDMI_AVI_INFO_SEND;
267 uint8_t HDMI_AVI_INFO_CONT;
268 uint8_t HDMI_AVI_INFO_LINE;
269 uint8_t DP_SEC_GSP0_ENABLE;
270 uint8_t DP_SEC_STREAM_ENABLE;
271 uint8_t DP_SEC_GSP1_ENABLE;
272 uint8_t DP_SEC_GSP2_ENABLE;
273 uint8_t DP_SEC_GSP3_ENABLE;
274 uint8_t DP_SEC_GSP4_ENABLE;
275 uint8_t DP_SEC_GSP5_ENABLE;
276 uint8_t DP_SEC_GSP6_ENABLE;
277 uint8_t DP_SEC_GSP7_ENABLE;
278 uint8_t DP_SEC_AVI_ENABLE;
279 uint8_t DP_SEC_MPG_ENABLE;
280 uint8_t DP_VID_STREAM_DIS_DEFER;
281 uint8_t DP_VID_STREAM_ENABLE;
282 uint8_t DP_VID_STREAM_STATUS;
283 uint8_t DP_STEER_FIFO_RESET;
284 uint8_t DP_VID_M_N_GEN_EN;
285 uint8_t DP_VID_N;
286 uint8_t DP_VID_M;
287 uint8_t DIG_START;
288 uint8_t AFMT_AUDIO_SRC_SELECT;
289 uint8_t AFMT_AUDIO_CHANNEL_ENABLE;
290 uint8_t HDMI_AUDIO_PACKETS_PER_LINE;
291 uint8_t HDMI_AUDIO_DELAY_EN;
292 uint8_t AFMT_60958_CS_UPDATE;
293 uint8_t AFMT_AUDIO_LAYOUT_OVRD;
294 uint8_t AFMT_60958_OSF_OVRD;
295 uint8_t HDMI_ACR_AUTO_SEND;
296 uint8_t HDMI_ACR_SOURCE;
297 uint8_t HDMI_ACR_AUDIO_PRIORITY;
298 uint8_t HDMI_ACR_CTS_32;
299 uint8_t HDMI_ACR_N_32;
300 uint8_t HDMI_ACR_CTS_44;
301 uint8_t HDMI_ACR_N_44;
302 uint8_t HDMI_ACR_CTS_48;
303 uint8_t HDMI_ACR_N_48;
304 uint8_t AFMT_60958_CS_CHANNEL_NUMBER_L;
305 uint8_t AFMT_60958_CS_CLOCK_ACCURACY;
306 uint8_t AFMT_60958_CS_CHANNEL_NUMBER_R;
307 uint8_t AFMT_60958_CS_CHANNEL_NUMBER_2;
308 uint8_t AFMT_60958_CS_CHANNEL_NUMBER_3;
309 uint8_t AFMT_60958_CS_CHANNEL_NUMBER_4;
310 uint8_t AFMT_60958_CS_CHANNEL_NUMBER_5;
311 uint8_t AFMT_60958_CS_CHANNEL_NUMBER_6;
312 uint8_t AFMT_60958_CS_CHANNEL_NUMBER_7;
313 uint8_t DP_SEC_AUD_N;
314 uint8_t DP_SEC_TIMESTAMP_MODE;
315 uint8_t DP_SEC_ASP_ENABLE;
316 uint8_t DP_SEC_ATP_ENABLE;
317 uint8_t DP_SEC_AIP_ENABLE;
318 uint8_t DP_SEC_ACM_ENABLE;
319 uint8_t AFMT_AUDIO_SAMPLE_SEND;
320 uint8_t AFMT_AUDIO_CLOCK_EN;
321 uint8_t TMDS_PIXEL_ENCODING;
322 uint8_t TMDS_COLOR_FORMAT;
323 uint8_t DP_DB_DISABLE;
324 uint8_t DP_MSA_MISC0;
325 uint8_t DP_MSA_HTOTAL;
326 uint8_t DP_MSA_VTOTAL;
327 uint8_t DP_MSA_HSTART;
328 uint8_t DP_MSA_VSTART;
329 uint8_t DP_MSA_HSYNCWIDTH;
330 uint8_t DP_MSA_HSYNCPOLARITY;
331 uint8_t DP_MSA_VSYNCWIDTH;
332 uint8_t DP_MSA_VSYNCPOLARITY;
333 uint8_t DP_MSA_HWIDTH;
334 uint8_t DP_MSA_VHEIGHT;
335 uint8_t HDMI_DB_DISABLE;
336 uint8_t DP_VID_N_MUL;
337 uint8_t DP_VID_M_DOUBLE_VALUE_EN;
338};
339
340struct dce_stream_encoder_mask {
341 uint32_t AFMT_GENERIC_INDEX;
342 uint32_t AFMT_GENERIC0_UPDATE;
343 uint32_t AFMT_GENERIC2_UPDATE;
344 uint32_t AFMT_GENERIC_HB0;
345 uint32_t AFMT_GENERIC_HB1;
346 uint32_t AFMT_GENERIC_HB2;
347 uint32_t AFMT_GENERIC_HB3;
348 uint32_t AFMT_GENERIC_LOCK_STATUS;
349 uint32_t AFMT_GENERIC_CONFLICT;
350 uint32_t AFMT_GENERIC_CONFLICT_CLR;
351 uint32_t AFMT_GENERIC0_FRAME_UPDATE_PENDING;
352 uint32_t AFMT_GENERIC1_FRAME_UPDATE_PENDING;
353 uint32_t AFMT_GENERIC2_FRAME_UPDATE_PENDING;
354 uint32_t AFMT_GENERIC3_FRAME_UPDATE_PENDING;
355 uint32_t AFMT_GENERIC4_FRAME_UPDATE_PENDING;
356 uint32_t AFMT_GENERIC5_FRAME_UPDATE_PENDING;
357 uint32_t AFMT_GENERIC6_FRAME_UPDATE_PENDING;
358 uint32_t AFMT_GENERIC7_FRAME_UPDATE_PENDING;
359 uint32_t AFMT_GENERIC0_FRAME_UPDATE;
360 uint32_t AFMT_GENERIC1_FRAME_UPDATE;
361 uint32_t AFMT_GENERIC2_FRAME_UPDATE;
362 uint32_t AFMT_GENERIC3_FRAME_UPDATE;
363 uint32_t AFMT_GENERIC4_FRAME_UPDATE;
364 uint32_t AFMT_GENERIC5_FRAME_UPDATE;
365 uint32_t AFMT_GENERIC6_FRAME_UPDATE;
366 uint32_t AFMT_GENERIC7_FRAME_UPDATE;
367 uint32_t HDMI_GENERIC0_CONT;
368 uint32_t HDMI_GENERIC0_SEND;
369 uint32_t HDMI_GENERIC0_LINE;
370 uint32_t HDMI_GENERIC1_CONT;
371 uint32_t HDMI_GENERIC1_SEND;
372 uint32_t HDMI_GENERIC1_LINE;
373 uint32_t DP_PIXEL_ENCODING;
374 uint32_t DP_COMPONENT_DEPTH;
375 uint32_t DP_DYN_RANGE;
376 uint32_t DP_YCBCR_RANGE;
377 uint32_t HDMI_PACKET_GEN_VERSION;
378 uint32_t HDMI_KEEPOUT_MODE;
379 uint32_t HDMI_DEEP_COLOR_ENABLE;
380 uint32_t HDMI_CLOCK_CHANNEL_RATE;
381 uint32_t HDMI_DEEP_COLOR_DEPTH;
382 uint32_t HDMI_GC_CONT;
383 uint32_t HDMI_GC_SEND;
384 uint32_t HDMI_NULL_SEND;
385 uint32_t HDMI_DATA_SCRAMBLE_EN;
386 uint32_t HDMI_AUDIO_INFO_SEND;
387 uint32_t AFMT_AUDIO_INFO_UPDATE;
388 uint32_t HDMI_AUDIO_INFO_LINE;
389 uint32_t HDMI_GC_AVMUTE;
390 uint32_t DP_MSE_RATE_X;
391 uint32_t DP_MSE_RATE_Y;
392 uint32_t DP_MSE_RATE_UPDATE_PENDING;
393 uint32_t AFMT_AVI_INFO_VERSION;
394 uint32_t HDMI_AVI_INFO_SEND;
395 uint32_t HDMI_AVI_INFO_CONT;
396 uint32_t HDMI_AVI_INFO_LINE;
397 uint32_t DP_SEC_GSP0_ENABLE;
398 uint32_t DP_SEC_STREAM_ENABLE;
399 uint32_t DP_SEC_GSP1_ENABLE;
400 uint32_t DP_SEC_GSP2_ENABLE;
401 uint32_t DP_SEC_GSP3_ENABLE;
402 uint32_t DP_SEC_GSP4_ENABLE;
403 uint32_t DP_SEC_GSP5_ENABLE;
404 uint32_t DP_SEC_GSP6_ENABLE;
405 uint32_t DP_SEC_GSP7_ENABLE;
406 uint32_t DP_SEC_AVI_ENABLE;
407 uint32_t DP_SEC_MPG_ENABLE;
408 uint32_t DP_VID_STREAM_DIS_DEFER;
409 uint32_t DP_VID_STREAM_ENABLE;
410 uint32_t DP_VID_STREAM_STATUS;
411 uint32_t DP_STEER_FIFO_RESET;
412 uint32_t DP_VID_M_N_GEN_EN;
413 uint32_t DP_VID_N;
414 uint32_t DP_VID_M;
415 uint32_t DIG_START;
416 uint32_t AFMT_AUDIO_SRC_SELECT;
417 uint32_t AFMT_AUDIO_CHANNEL_ENABLE;
418 uint32_t HDMI_AUDIO_PACKETS_PER_LINE;
419 uint32_t HDMI_AUDIO_DELAY_EN;
420 uint32_t AFMT_60958_CS_UPDATE;
421 uint32_t AFMT_AUDIO_LAYOUT_OVRD;
422 uint32_t AFMT_60958_OSF_OVRD;
423 uint32_t HDMI_ACR_AUTO_SEND;
424 uint32_t HDMI_ACR_SOURCE;
425 uint32_t HDMI_ACR_AUDIO_PRIORITY;
426 uint32_t HDMI_ACR_CTS_32;
427 uint32_t HDMI_ACR_N_32;
428 uint32_t HDMI_ACR_CTS_44;
429 uint32_t HDMI_ACR_N_44;
430 uint32_t HDMI_ACR_CTS_48;
431 uint32_t HDMI_ACR_N_48;
432 uint32_t AFMT_60958_CS_CHANNEL_NUMBER_L;
433 uint32_t AFMT_60958_CS_CLOCK_ACCURACY;
434 uint32_t AFMT_60958_CS_CHANNEL_NUMBER_R;
435 uint32_t AFMT_60958_CS_CHANNEL_NUMBER_2;
436 uint32_t AFMT_60958_CS_CHANNEL_NUMBER_3;
437 uint32_t AFMT_60958_CS_CHANNEL_NUMBER_4;
438 uint32_t AFMT_60958_CS_CHANNEL_NUMBER_5;
439 uint32_t AFMT_60958_CS_CHANNEL_NUMBER_6;
440 uint32_t AFMT_60958_CS_CHANNEL_NUMBER_7;
441 uint32_t DP_SEC_AUD_N;
442 uint32_t DP_SEC_TIMESTAMP_MODE;
443 uint32_t DP_SEC_ASP_ENABLE;
444 uint32_t DP_SEC_ATP_ENABLE;
445 uint32_t DP_SEC_AIP_ENABLE;
446 uint32_t DP_SEC_ACM_ENABLE;
447 uint32_t AFMT_AUDIO_SAMPLE_SEND;
448 uint32_t AFMT_AUDIO_CLOCK_EN;
449 uint32_t TMDS_PIXEL_ENCODING;
450 uint32_t TMDS_COLOR_FORMAT;
451 uint32_t DP_DB_DISABLE;
452 uint32_t DP_MSA_MISC0;
453 uint32_t DP_MSA_HTOTAL;
454 uint32_t DP_MSA_VTOTAL;
455 uint32_t DP_MSA_HSTART;
456 uint32_t DP_MSA_VSTART;
457 uint32_t DP_MSA_HSYNCWIDTH;
458 uint32_t DP_MSA_HSYNCPOLARITY;
459 uint32_t DP_MSA_VSYNCWIDTH;
460 uint32_t DP_MSA_VSYNCPOLARITY;
461 uint32_t DP_MSA_HWIDTH;
462 uint32_t DP_MSA_VHEIGHT;
463 uint32_t HDMI_DB_DISABLE;
464 uint32_t DP_VID_N_MUL;
465 uint32_t DP_VID_M_DOUBLE_VALUE_EN;
466};
467
468struct dce110_stream_enc_registers {
469 uint32_t AFMT_CNTL;
470 uint32_t AFMT_AVI_INFO0;
471 uint32_t AFMT_AVI_INFO1;
472 uint32_t AFMT_AVI_INFO2;
473 uint32_t AFMT_AVI_INFO3;
474 uint32_t AFMT_GENERIC_0;
475 uint32_t AFMT_GENERIC_1;
476 uint32_t AFMT_GENERIC_2;
477 uint32_t AFMT_GENERIC_3;
478 uint32_t AFMT_GENERIC_4;
479 uint32_t AFMT_GENERIC_5;
480 uint32_t AFMT_GENERIC_6;
481 uint32_t AFMT_GENERIC_7;
482 uint32_t AFMT_GENERIC_HDR;
483 uint32_t AFMT_INFOFRAME_CONTROL0;
484 uint32_t AFMT_VBI_PACKET_CONTROL;
485 uint32_t AFMT_VBI_PACKET_CONTROL1;
486 uint32_t AFMT_AUDIO_PACKET_CONTROL;
487 uint32_t AFMT_AUDIO_PACKET_CONTROL2;
488 uint32_t AFMT_AUDIO_SRC_CONTROL;
489 uint32_t AFMT_60958_0;
490 uint32_t AFMT_60958_1;
491 uint32_t AFMT_60958_2;
492 uint32_t DIG_FE_CNTL;
493 uint32_t DP_MSE_RATE_CNTL;
494 uint32_t DP_MSE_RATE_UPDATE;
495 uint32_t DP_PIXEL_FORMAT;
496 uint32_t DP_SEC_CNTL;
497 uint32_t DP_STEER_FIFO;
498 uint32_t DP_VID_M;
499 uint32_t DP_VID_N;
500 uint32_t DP_VID_STREAM_CNTL;
501 uint32_t DP_VID_TIMING;
502 uint32_t DP_SEC_AUD_N;
503 uint32_t DP_SEC_TIMESTAMP;
504 uint32_t HDMI_CONTROL;
505 uint32_t HDMI_GC;
506 uint32_t HDMI_GENERIC_PACKET_CONTROL0;
507 uint32_t HDMI_GENERIC_PACKET_CONTROL1;
508 uint32_t HDMI_GENERIC_PACKET_CONTROL2;
509 uint32_t HDMI_GENERIC_PACKET_CONTROL3;
510 uint32_t HDMI_INFOFRAME_CONTROL0;
511 uint32_t HDMI_INFOFRAME_CONTROL1;
512 uint32_t HDMI_VBI_PACKET_CONTROL;
513 uint32_t HDMI_AUDIO_PACKET_CONTROL;
514 uint32_t HDMI_ACR_PACKET_CONTROL;
515 uint32_t HDMI_ACR_32_0;
516 uint32_t HDMI_ACR_32_1;
517 uint32_t HDMI_ACR_44_0;
518 uint32_t HDMI_ACR_44_1;
519 uint32_t HDMI_ACR_48_0;
520 uint32_t HDMI_ACR_48_1;
521 uint32_t TMDS_CNTL;
522};
523
524struct dce110_stream_encoder {
525 struct stream_encoder base;
526 const struct dce110_stream_enc_registers *regs;
527 const struct dce_stream_encoder_shift *se_shift;
528 const struct dce_stream_encoder_mask *se_mask;
529};
530
531bool dce110_stream_encoder_construct(
532 struct dce110_stream_encoder *enc110,
533 struct dc_context *ctx,
534 struct dc_bios *bp,
535 enum engine_id eng_id,
536 const struct dce110_stream_enc_registers *regs,
537 const struct dce_stream_encoder_shift *se_shift,
538 const struct dce_stream_encoder_mask *se_mask);
539
540
541void dce110_se_audio_mute_control(
542 struct stream_encoder *enc, bool mute);
543
544void dce110_se_dp_audio_setup(
545 struct stream_encoder *enc,
546 unsigned int az_inst,
547 struct audio_info *info);
548
549void dce110_se_dp_audio_enable(
550 struct stream_encoder *enc);
551
552void dce110_se_dp_audio_disable(
553 struct stream_encoder *enc);
554
555void dce110_se_hdmi_audio_setup(
556 struct stream_encoder *enc,
557 unsigned int az_inst,
558 struct audio_info *info,
559 struct audio_crtc_info *audio_crtc_info);
560
561void dce110_se_hdmi_audio_disable(
562 struct stream_encoder *enc);
563
564#endif /* __DC_STREAM_ENCODER_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
new file mode 100644
index 000000000000..f47b6617f662
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
@@ -0,0 +1,1002 @@
1/*
2 * Copyright 2012-16 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dce_transform.h"
27#include "reg_helper.h"
28#include "opp.h"
29#include "basics/conversion.h"
30
31#define REG(reg) \
32 (xfm_dce->regs->reg)
33
34#undef FN
35#define FN(reg_name, field_name) \
36 xfm_dce->xfm_shift->field_name, xfm_dce->xfm_mask->field_name
37
38#define CTX \
39 xfm_dce->base.ctx
40
41#define IDENTITY_RATIO(ratio) (dal_fixed31_32_u2d19(ratio) == (1 << 19))
42#define GAMUT_MATRIX_SIZE 12
43#define SCL_PHASES 16
44
45enum dcp_out_trunc_round_mode {
46 DCP_OUT_TRUNC_ROUND_MODE_TRUNCATE,
47 DCP_OUT_TRUNC_ROUND_MODE_ROUND
48};
49
50enum dcp_out_trunc_round_depth {
51 DCP_OUT_TRUNC_ROUND_DEPTH_14BIT,
52 DCP_OUT_TRUNC_ROUND_DEPTH_13BIT,
53 DCP_OUT_TRUNC_ROUND_DEPTH_12BIT,
54 DCP_OUT_TRUNC_ROUND_DEPTH_11BIT,
55 DCP_OUT_TRUNC_ROUND_DEPTH_10BIT,
56 DCP_OUT_TRUNC_ROUND_DEPTH_9BIT,
57 DCP_OUT_TRUNC_ROUND_DEPTH_8BIT
58};
59
60/* defines the various methods of bit reduction available for use */
61enum dcp_bit_depth_reduction_mode {
62 DCP_BIT_DEPTH_REDUCTION_MODE_DITHER,
63 DCP_BIT_DEPTH_REDUCTION_MODE_ROUND,
64 DCP_BIT_DEPTH_REDUCTION_MODE_TRUNCATE,
65 DCP_BIT_DEPTH_REDUCTION_MODE_DISABLED,
66 DCP_BIT_DEPTH_REDUCTION_MODE_INVALID
67};
68
69enum dcp_spatial_dither_mode {
70 DCP_SPATIAL_DITHER_MODE_AAAA,
71 DCP_SPATIAL_DITHER_MODE_A_AA_A,
72 DCP_SPATIAL_DITHER_MODE_AABBAABB,
73 DCP_SPATIAL_DITHER_MODE_AABBCCAABBCC,
74 DCP_SPATIAL_DITHER_MODE_INVALID
75};
76
77enum dcp_spatial_dither_depth {
78 DCP_SPATIAL_DITHER_DEPTH_30BPP,
79 DCP_SPATIAL_DITHER_DEPTH_24BPP
80};
81
82static bool setup_scaling_configuration(
83 struct dce_transform *xfm_dce,
84 const struct scaler_data *data)
85{
86 struct dc_context *ctx = xfm_dce->base.ctx;
87
88 if (data->taps.h_taps + data->taps.v_taps <= 2) {
89 /* Set bypass */
90 REG_UPDATE_2(SCL_MODE, SCL_MODE, 0, SCL_PSCL_EN, 0);
91 return false;
92 }
93
94 REG_SET_2(SCL_TAP_CONTROL, 0,
95 SCL_H_NUM_OF_TAPS, data->taps.h_taps - 1,
96 SCL_V_NUM_OF_TAPS, data->taps.v_taps - 1);
97
98 if (data->format <= PIXEL_FORMAT_GRPH_END)
99 REG_UPDATE_2(SCL_MODE, SCL_MODE, 1, SCL_PSCL_EN, 1);
100 else
101 REG_UPDATE_2(SCL_MODE, SCL_MODE, 2, SCL_PSCL_EN, 1);
102
103 /* 1 - Replace out of bound pixels with edge */
104 REG_SET(SCL_CONTROL, 0, SCL_BOUNDARY_MODE, 1);
105
106 return true;
107}
108
109static void program_overscan(
110 struct dce_transform *xfm_dce,
111 const struct scaler_data *data)
112{
113 int overscan_right = data->h_active
114 - data->recout.x - data->recout.width;
115 int overscan_bottom = data->v_active
116 - data->recout.y - data->recout.height;
117
118 if (overscan_right < 0) {
119 BREAK_TO_DEBUGGER();
120 overscan_right = 0;
121 }
122 if (overscan_bottom < 0) {
123 BREAK_TO_DEBUGGER();
124 overscan_bottom = 0;
125 }
126
127 REG_SET_2(EXT_OVERSCAN_LEFT_RIGHT, 0,
128 EXT_OVERSCAN_LEFT, data->recout.x,
129 EXT_OVERSCAN_RIGHT, overscan_right);
130 REG_SET_2(EXT_OVERSCAN_TOP_BOTTOM, 0,
131 EXT_OVERSCAN_TOP, data->recout.y,
132 EXT_OVERSCAN_BOTTOM, overscan_bottom);
133}
134
135static void program_multi_taps_filter(
136 struct dce_transform *xfm_dce,
137 int taps,
138 const uint16_t *coeffs,
139 enum ram_filter_type filter_type)
140{
141 int phase, pair;
142 int array_idx = 0;
143 int taps_pairs = (taps + 1) / 2;
144 int phases_to_program = SCL_PHASES / 2 + 1;
145
146 uint32_t power_ctl = 0;
147
148 if (!coeffs)
149 return;
150
151 /*We need to disable power gating on coeff memory to do programming*/
152 if (REG(DCFE_MEM_PWR_CTRL)) {
153 power_ctl = REG_READ(DCFE_MEM_PWR_CTRL);
154 REG_SET(DCFE_MEM_PWR_CTRL, power_ctl, SCL_COEFF_MEM_PWR_DIS, 1);
155
156 REG_WAIT(DCFE_MEM_PWR_STATUS, SCL_COEFF_MEM_PWR_STATE, 0, 1, 10);
157 }
158 for (phase = 0; phase < phases_to_program; phase++) {
159 /*we always program N/2 + 1 phases, total phases N, but N/2-1 are just mirror
160 phase 0 is unique and phase N/2 is unique if N is even*/
161 for (pair = 0; pair < taps_pairs; pair++) {
162 uint16_t odd_coeff = 0;
163 uint16_t even_coeff = coeffs[array_idx];
164
165 REG_SET_3(SCL_COEF_RAM_SELECT, 0,
166 SCL_C_RAM_FILTER_TYPE, filter_type,
167 SCL_C_RAM_PHASE, phase,
168 SCL_C_RAM_TAP_PAIR_IDX, pair);
169
170 if (taps % 2 && pair == taps_pairs - 1)
171 array_idx++;
172 else {
173 odd_coeff = coeffs[array_idx + 1];
174 array_idx += 2;
175 }
176
177 REG_SET_4(SCL_COEF_RAM_TAP_DATA, 0,
178 SCL_C_RAM_EVEN_TAP_COEF_EN, 1,
179 SCL_C_RAM_EVEN_TAP_COEF, even_coeff,
180 SCL_C_RAM_ODD_TAP_COEF_EN, 1,
181 SCL_C_RAM_ODD_TAP_COEF, odd_coeff);
182 }
183 }
184
185 /*We need to restore power gating on coeff memory to initial state*/
186 if (REG(DCFE_MEM_PWR_CTRL))
187 REG_WRITE(DCFE_MEM_PWR_CTRL, power_ctl);
188}
189
190static void program_viewport(
191 struct dce_transform *xfm_dce,
192 const struct rect *view_port)
193{
194 REG_SET_2(VIEWPORT_START, 0,
195 VIEWPORT_X_START, view_port->x,
196 VIEWPORT_Y_START, view_port->y);
197
198 REG_SET_2(VIEWPORT_SIZE, 0,
199 VIEWPORT_HEIGHT, view_port->height,
200 VIEWPORT_WIDTH, view_port->width);
201
202 /* TODO: add stereo support */
203}
204
205static void calculate_inits(
206 struct dce_transform *xfm_dce,
207 const struct scaler_data *data,
208 struct scl_ratios_inits *inits)
209{
210 struct fixed31_32 h_init;
211 struct fixed31_32 v_init;
212
213 inits->h_int_scale_ratio =
214 dal_fixed31_32_u2d19(data->ratios.horz) << 5;
215 inits->v_int_scale_ratio =
216 dal_fixed31_32_u2d19(data->ratios.vert) << 5;
217
218 h_init =
219 dal_fixed31_32_div_int(
220 dal_fixed31_32_add(
221 data->ratios.horz,
222 dal_fixed31_32_from_int(data->taps.h_taps + 1)),
223 2);
224 inits->h_init.integer = dal_fixed31_32_floor(h_init);
225 inits->h_init.fraction = dal_fixed31_32_u0d19(h_init) << 5;
226
227 v_init =
228 dal_fixed31_32_div_int(
229 dal_fixed31_32_add(
230 data->ratios.vert,
231 dal_fixed31_32_from_int(data->taps.v_taps + 1)),
232 2);
233 inits->v_init.integer = dal_fixed31_32_floor(v_init);
234 inits->v_init.fraction = dal_fixed31_32_u0d19(v_init) << 5;
235}
236
237static void program_scl_ratios_inits(
238 struct dce_transform *xfm_dce,
239 struct scl_ratios_inits *inits)
240{
241
242 REG_SET(SCL_HORZ_FILTER_SCALE_RATIO, 0,
243 SCL_H_SCALE_RATIO, inits->h_int_scale_ratio);
244
245 REG_SET(SCL_VERT_FILTER_SCALE_RATIO, 0,
246 SCL_V_SCALE_RATIO, inits->v_int_scale_ratio);
247
248 REG_SET_2(SCL_HORZ_FILTER_INIT, 0,
249 SCL_H_INIT_INT, inits->h_init.integer,
250 SCL_H_INIT_FRAC, inits->h_init.fraction);
251
252 REG_SET_2(SCL_VERT_FILTER_INIT, 0,
253 SCL_V_INIT_INT, inits->v_init.integer,
254 SCL_V_INIT_FRAC, inits->v_init.fraction);
255
256 REG_WRITE(SCL_AUTOMATIC_MODE_CONTROL, 0);
257}
258
259static const uint16_t *get_filter_coeffs_16p(int taps, struct fixed31_32 ratio)
260{
261 if (taps == 4)
262 return get_filter_4tap_16p(ratio);
263 else if (taps == 3)
264 return get_filter_3tap_16p(ratio);
265 else if (taps == 2)
266 return filter_2tap_16p;
267 else if (taps == 1)
268 return NULL;
269 else {
270 /* should never happen, bug */
271 BREAK_TO_DEBUGGER();
272 return NULL;
273 }
274}
275
276static void dce_transform_set_scaler(
277 struct transform *xfm,
278 const struct scaler_data *data)
279{
280 struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
281 bool is_scaling_required;
282 bool filter_updated = false;
283 const uint16_t *coeffs_v, *coeffs_h;
284
285 /*Use all three pieces of memory always*/
286 REG_SET_2(LB_MEMORY_CTRL, 0,
287 LB_MEMORY_CONFIG, 0,
288 LB_MEMORY_SIZE, xfm_dce->lb_memory_size);
289
290 /* 1. Program overscan */
291 program_overscan(xfm_dce, data);
292
293 /* 2. Program taps and configuration */
294 is_scaling_required = setup_scaling_configuration(xfm_dce, data);
295
296 if (is_scaling_required) {
297 /* 3. Calculate and program ratio, filter initialization */
298 struct scl_ratios_inits inits = { 0 };
299
300 calculate_inits(xfm_dce, data, &inits);
301
302 program_scl_ratios_inits(xfm_dce, &inits);
303
304 coeffs_v = get_filter_coeffs_16p(data->taps.v_taps, data->ratios.vert);
305 coeffs_h = get_filter_coeffs_16p(data->taps.h_taps, data->ratios.horz);
306
307 if (coeffs_v != xfm_dce->filter_v || coeffs_h != xfm_dce->filter_h) {
308 /* 4. Program vertical filters */
309 if (xfm_dce->filter_v == NULL)
310 REG_SET(SCL_VERT_FILTER_CONTROL, 0,
311 SCL_V_2TAP_HARDCODE_COEF_EN, 0);
312 program_multi_taps_filter(
313 xfm_dce,
314 data->taps.v_taps,
315 coeffs_v,
316 FILTER_TYPE_RGB_Y_VERTICAL);
317 program_multi_taps_filter(
318 xfm_dce,
319 data->taps.v_taps,
320 coeffs_v,
321 FILTER_TYPE_ALPHA_VERTICAL);
322
323 /* 5. Program horizontal filters */
324 if (xfm_dce->filter_h == NULL)
325 REG_SET(SCL_HORZ_FILTER_CONTROL, 0,
326 SCL_H_2TAP_HARDCODE_COEF_EN, 0);
327 program_multi_taps_filter(
328 xfm_dce,
329 data->taps.h_taps,
330 coeffs_h,
331 FILTER_TYPE_RGB_Y_HORIZONTAL);
332 program_multi_taps_filter(
333 xfm_dce,
334 data->taps.h_taps,
335 coeffs_h,
336 FILTER_TYPE_ALPHA_HORIZONTAL);
337
338 xfm_dce->filter_v = coeffs_v;
339 xfm_dce->filter_h = coeffs_h;
340 filter_updated = true;
341 }
342 }
343
344 /* 6. Program the viewport */
345 program_viewport(xfm_dce, &data->viewport);
346
347 /* 7. Set bit to flip to new coefficient memory */
348 if (filter_updated)
349 REG_UPDATE(SCL_UPDATE, SCL_COEF_UPDATE_COMPLETE, 1);
350
351 REG_UPDATE(LB_DATA_FORMAT, ALPHA_EN, data->lb_params.alpha_en);
352}
353
354/*****************************************************************************
355 * set_clamp
356 *
357 * @param depth : bit depth to set the clamp to (should match denorm)
358 *
359 * @brief
360 * Programs clamp according to panel bit depth.
361 *
362 *******************************************************************************/
363static void set_clamp(
364 struct dce_transform *xfm_dce,
365 enum dc_color_depth depth)
366{
367 int clamp_max = 0;
368
369 /* At the clamp block the data will be MSB aligned, so we set the max
370 * clamp accordingly.
371 * For example, the max value for 6 bits MSB aligned (14 bit bus) would
372 * be "11 1111 0000 0000" in binary, so 0x3F00.
373 */
374 switch (depth) {
375 case COLOR_DEPTH_666:
376 /* 6bit MSB aligned on 14 bit bus '11 1111 0000 0000' */
377 clamp_max = 0x3F00;
378 break;
379 case COLOR_DEPTH_888:
380 /* 8bit MSB aligned on 14 bit bus '11 1111 1100 0000' */
381 clamp_max = 0x3FC0;
382 break;
383 case COLOR_DEPTH_101010:
384 /* 10bit MSB aligned on 14 bit bus '11 1111 1111 1100' */
385 clamp_max = 0x3FFC;
386 break;
387 case COLOR_DEPTH_121212:
388 /* 12bit MSB aligned on 14 bit bus '11 1111 1111 1111' */
389 clamp_max = 0x3FFF;
390 break;
391 default:
392 clamp_max = 0x3FC0;
393 BREAK_TO_DEBUGGER(); /* Invalid clamp bit depth */
394 }
395 REG_SET_2(OUT_CLAMP_CONTROL_B_CB, 0,
396 OUT_CLAMP_MIN_B_CB, 0,
397 OUT_CLAMP_MAX_B_CB, clamp_max);
398
399 REG_SET_2(OUT_CLAMP_CONTROL_G_Y, 0,
400 OUT_CLAMP_MIN_G_Y, 0,
401 OUT_CLAMP_MAX_G_Y, clamp_max);
402
403 REG_SET_2(OUT_CLAMP_CONTROL_R_CR, 0,
404 OUT_CLAMP_MIN_R_CR, 0,
405 OUT_CLAMP_MAX_R_CR, clamp_max);
406}
407
408/*******************************************************************************
409 * set_round
410 *
411 * @brief
412 * Programs Round/Truncate
413 *
414 * @param [in] mode :round or truncate
415 * @param [in] depth :bit depth to round/truncate to
416 OUT_ROUND_TRUNC_MODE 3:0 0xA Output data round or truncate mode
417 POSSIBLE VALUES:
418 00 - truncate to u0.12
419 01 - truncate to u0.11
420 02 - truncate to u0.10
421 03 - truncate to u0.9
422 04 - truncate to u0.8
423 05 - reserved
424 06 - truncate to u0.14
425 07 - truncate to u0.13 set_reg_field_value(
426 value,
427 clamp_max,
428 OUT_CLAMP_CONTROL_R_CR,
429 OUT_CLAMP_MAX_R_CR);
430 08 - round to u0.12
431 09 - round to u0.11
432 10 - round to u0.10
433 11 - round to u0.9
434 12 - round to u0.8
435 13 - reserved
436 14 - round to u0.14
437 15 - round to u0.13
438
439 ******************************************************************************/
440static void set_round(
441 struct dce_transform *xfm_dce,
442 enum dcp_out_trunc_round_mode mode,
443 enum dcp_out_trunc_round_depth depth)
444{
445 int depth_bits = 0;
446 int mode_bit = 0;
447
448 /* set up bit depth */
449 switch (depth) {
450 case DCP_OUT_TRUNC_ROUND_DEPTH_14BIT:
451 depth_bits = 6;
452 break;
453 case DCP_OUT_TRUNC_ROUND_DEPTH_13BIT:
454 depth_bits = 7;
455 break;
456 case DCP_OUT_TRUNC_ROUND_DEPTH_12BIT:
457 depth_bits = 0;
458 break;
459 case DCP_OUT_TRUNC_ROUND_DEPTH_11BIT:
460 depth_bits = 1;
461 break;
462 case DCP_OUT_TRUNC_ROUND_DEPTH_10BIT:
463 depth_bits = 2;
464 break;
465 case DCP_OUT_TRUNC_ROUND_DEPTH_9BIT:
466 depth_bits = 3;
467 break;
468 case DCP_OUT_TRUNC_ROUND_DEPTH_8BIT:
469 depth_bits = 4;
470 break;
471 default:
472 depth_bits = 4;
473 BREAK_TO_DEBUGGER(); /* Invalid dcp_out_trunc_round_depth */
474 }
475
476 /* set up round or truncate */
477 switch (mode) {
478 case DCP_OUT_TRUNC_ROUND_MODE_TRUNCATE:
479 mode_bit = 0;
480 break;
481 case DCP_OUT_TRUNC_ROUND_MODE_ROUND:
482 mode_bit = 1;
483 break;
484 default:
485 BREAK_TO_DEBUGGER(); /* Invalid dcp_out_trunc_round_mode */
486 }
487
488 depth_bits |= mode_bit << 3;
489
490 REG_SET(OUT_ROUND_CONTROL, 0, OUT_ROUND_TRUNC_MODE, depth_bits);
491}
492
493/*****************************************************************************
494 * set_dither
495 *
496 * @brief
497 * Programs Dither
498 *
499 * @param [in] dither_enable : enable dither
500 * @param [in] dither_mode : dither mode to set
501 * @param [in] dither_depth : bit depth to dither to
502 * @param [in] frame_random_enable : enable frame random
503 * @param [in] rgb_random_enable : enable rgb random
504 * @param [in] highpass_random_enable : enable highpass random
505 *
506 ******************************************************************************/
507
508static void set_dither(
509 struct dce_transform *xfm_dce,
510 bool dither_enable,
511 enum dcp_spatial_dither_mode dither_mode,
512 enum dcp_spatial_dither_depth dither_depth,
513 bool frame_random_enable,
514 bool rgb_random_enable,
515 bool highpass_random_enable)
516{
517 int dither_depth_bits = 0;
518 int dither_mode_bits = 0;
519
520 switch (dither_mode) {
521 case DCP_SPATIAL_DITHER_MODE_AAAA:
522 dither_mode_bits = 0;
523 break;
524 case DCP_SPATIAL_DITHER_MODE_A_AA_A:
525 dither_mode_bits = 1;
526 break;
527 case DCP_SPATIAL_DITHER_MODE_AABBAABB:
528 dither_mode_bits = 2;
529 break;
530 case DCP_SPATIAL_DITHER_MODE_AABBCCAABBCC:
531 dither_mode_bits = 3;
532 break;
533 default:
534 /* Invalid dcp_spatial_dither_mode */
535 BREAK_TO_DEBUGGER();
536 }
537
538 switch (dither_depth) {
539 case DCP_SPATIAL_DITHER_DEPTH_30BPP:
540 dither_depth_bits = 0;
541 break;
542 case DCP_SPATIAL_DITHER_DEPTH_24BPP:
543 dither_depth_bits = 1;
544 break;
545 default:
546 /* Invalid dcp_spatial_dither_depth */
547 BREAK_TO_DEBUGGER();
548 }
549
550 /* write the register */
551 REG_SET_6(DCP_SPATIAL_DITHER_CNTL, 0,
552 DCP_SPATIAL_DITHER_EN, dither_enable,
553 DCP_SPATIAL_DITHER_MODE, dither_mode_bits,
554 DCP_SPATIAL_DITHER_DEPTH, dither_depth_bits,
555 DCP_FRAME_RANDOM_ENABLE, frame_random_enable,
556 DCP_RGB_RANDOM_ENABLE, rgb_random_enable,
557 DCP_HIGHPASS_RANDOM_ENABLE, highpass_random_enable);
558}
559
560/*****************************************************************************
561 * dce_transform_bit_depth_reduction_program
562 *
563 * @brief
564 * Programs the DCP bit depth reduction registers (Clamp, Round/Truncate,
565 * Dither) for dce
566 *
567 * @param depth : bit depth to set the clamp to (should match denorm)
568 *
569 ******************************************************************************/
570static void program_bit_depth_reduction(
571 struct dce_transform *xfm_dce,
572 enum dc_color_depth depth,
573 const struct bit_depth_reduction_params *bit_depth_params)
574{
575 enum dcp_bit_depth_reduction_mode depth_reduction_mode;
576 enum dcp_spatial_dither_mode spatial_dither_mode;
577 bool frame_random_enable;
578 bool rgb_random_enable;
579 bool highpass_random_enable;
580
581 ASSERT(depth < COLOR_DEPTH_121212); /* Invalid clamp bit depth */
582
583 if (bit_depth_params->flags.SPATIAL_DITHER_ENABLED) {
584 depth_reduction_mode = DCP_BIT_DEPTH_REDUCTION_MODE_DITHER;
585 frame_random_enable = true;
586 rgb_random_enable = true;
587 highpass_random_enable = true;
588
589 } else {
590 depth_reduction_mode = DCP_BIT_DEPTH_REDUCTION_MODE_DISABLED;
591 frame_random_enable = false;
592 rgb_random_enable = false;
593 highpass_random_enable = false;
594 }
595
596 spatial_dither_mode = DCP_SPATIAL_DITHER_MODE_A_AA_A;
597
598 set_clamp(xfm_dce, depth);
599
600 switch (depth_reduction_mode) {
601 case DCP_BIT_DEPTH_REDUCTION_MODE_DITHER:
602 /* Spatial Dither: Set round/truncate to bypass (12bit),
603 * enable Dither (30bpp) */
604 set_round(xfm_dce,
605 DCP_OUT_TRUNC_ROUND_MODE_TRUNCATE,
606 DCP_OUT_TRUNC_ROUND_DEPTH_12BIT);
607
608 set_dither(xfm_dce, true, spatial_dither_mode,
609 DCP_SPATIAL_DITHER_DEPTH_30BPP, frame_random_enable,
610 rgb_random_enable, highpass_random_enable);
611 break;
612 case DCP_BIT_DEPTH_REDUCTION_MODE_ROUND:
613 /* Round: Enable round (10bit), disable Dither */
614 set_round(xfm_dce,
615 DCP_OUT_TRUNC_ROUND_MODE_ROUND,
616 DCP_OUT_TRUNC_ROUND_DEPTH_10BIT);
617
618 set_dither(xfm_dce, false, spatial_dither_mode,
619 DCP_SPATIAL_DITHER_DEPTH_30BPP, frame_random_enable,
620 rgb_random_enable, highpass_random_enable);
621 break;
622 case DCP_BIT_DEPTH_REDUCTION_MODE_TRUNCATE: /* Truncate */
623 /* Truncate: Enable truncate (10bit), disable Dither */
624 set_round(xfm_dce,
625 DCP_OUT_TRUNC_ROUND_MODE_TRUNCATE,
626 DCP_OUT_TRUNC_ROUND_DEPTH_10BIT);
627
628 set_dither(xfm_dce, false, spatial_dither_mode,
629 DCP_SPATIAL_DITHER_DEPTH_30BPP, frame_random_enable,
630 rgb_random_enable, highpass_random_enable);
631 break;
632
633 case DCP_BIT_DEPTH_REDUCTION_MODE_DISABLED: /* Disabled */
634 /* Truncate: Set round/truncate to bypass (12bit),
635 * disable Dither */
636 set_round(xfm_dce,
637 DCP_OUT_TRUNC_ROUND_MODE_TRUNCATE,
638 DCP_OUT_TRUNC_ROUND_DEPTH_12BIT);
639
640 set_dither(xfm_dce, false, spatial_dither_mode,
641 DCP_SPATIAL_DITHER_DEPTH_30BPP, frame_random_enable,
642 rgb_random_enable, highpass_random_enable);
643 break;
644 default:
645 /* Invalid DCP Depth reduction mode */
646 BREAK_TO_DEBUGGER();
647 break;
648 }
649}
650
651static int dce_transform_get_max_num_of_supported_lines(
652 struct dce_transform *xfm_dce,
653 enum lb_pixel_depth depth,
654 int pixel_width)
655{
656 int pixels_per_entries = 0;
657 int max_pixels_supports = 0;
658
659 ASSERT(pixel_width);
660
661 /* Find number of pixels that can fit into a single LB entry and
662 * take floor of the value since we cannot store a single pixel
663 * across multiple entries. */
664 switch (depth) {
665 case LB_PIXEL_DEPTH_18BPP:
666 pixels_per_entries = xfm_dce->lb_bits_per_entry / 18;
667 break;
668
669 case LB_PIXEL_DEPTH_24BPP:
670 pixels_per_entries = xfm_dce->lb_bits_per_entry / 24;
671 break;
672
673 case LB_PIXEL_DEPTH_30BPP:
674 pixels_per_entries = xfm_dce->lb_bits_per_entry / 30;
675 break;
676
677 case LB_PIXEL_DEPTH_36BPP:
678 pixels_per_entries = xfm_dce->lb_bits_per_entry / 36;
679 break;
680
681 default:
682 dm_logger_write(xfm_dce->base.ctx->logger, LOG_WARNING,
683 "%s: Invalid LB pixel depth",
684 __func__);
685 BREAK_TO_DEBUGGER();
686 break;
687 }
688
689 ASSERT(pixels_per_entries);
690
691 max_pixels_supports =
692 pixels_per_entries *
693 xfm_dce->lb_memory_size;
694
695 return (max_pixels_supports / pixel_width);
696}
697
698static void set_denormalization(
699 struct dce_transform *xfm_dce,
700 enum dc_color_depth depth)
701{
702 int denorm_mode = 0;
703
704 switch (depth) {
705 case COLOR_DEPTH_666:
706 /* 63/64 for 6 bit output color depth */
707 denorm_mode = 1;
708 break;
709 case COLOR_DEPTH_888:
710 /* Unity for 8 bit output color depth
711 * because prescale is disabled by default */
712 denorm_mode = 0;
713 break;
714 case COLOR_DEPTH_101010:
715 /* 1023/1024 for 10 bit output color depth */
716 denorm_mode = 3;
717 break;
718 case COLOR_DEPTH_121212:
719 /* 4095/4096 for 12 bit output color depth */
720 denorm_mode = 5;
721 break;
722 case COLOR_DEPTH_141414:
723 case COLOR_DEPTH_161616:
724 default:
725 /* not valid used case! */
726 break;
727 }
728
729 REG_SET(DENORM_CONTROL, 0, DENORM_MODE, denorm_mode);
730}
731
732static void dce_transform_set_pixel_storage_depth(
733 struct transform *xfm,
734 enum lb_pixel_depth depth,
735 const struct bit_depth_reduction_params *bit_depth_params)
736{
737 struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
738 int pixel_depth, expan_mode;
739 enum dc_color_depth color_depth;
740
741 switch (depth) {
742 case LB_PIXEL_DEPTH_18BPP:
743 color_depth = COLOR_DEPTH_666;
744 pixel_depth = 2;
745 expan_mode = 1;
746 break;
747 case LB_PIXEL_DEPTH_24BPP:
748 color_depth = COLOR_DEPTH_888;
749 pixel_depth = 1;
750 expan_mode = 1;
751 break;
752 case LB_PIXEL_DEPTH_30BPP:
753 color_depth = COLOR_DEPTH_101010;
754 pixel_depth = 0;
755 expan_mode = 1;
756 break;
757 case LB_PIXEL_DEPTH_36BPP:
758 color_depth = COLOR_DEPTH_121212;
759 pixel_depth = 3;
760 expan_mode = 0;
761 break;
762 default:
763 color_depth = COLOR_DEPTH_101010;
764 pixel_depth = 0;
765 expan_mode = 1;
766 BREAK_TO_DEBUGGER();
767 break;
768 }
769
770 set_denormalization(xfm_dce, color_depth);
771 program_bit_depth_reduction(xfm_dce, color_depth, bit_depth_params);
772
773 REG_UPDATE_2(LB_DATA_FORMAT,
774 PIXEL_DEPTH, pixel_depth,
775 PIXEL_EXPAN_MODE, expan_mode);
776
777 if (!(xfm_dce->lb_pixel_depth_supported & depth)) {
778 /*we should use unsupported capabilities
779 * unless it is required by w/a*/
780 dm_logger_write(xfm->ctx->logger, LOG_WARNING,
781 "%s: Capability not supported",
782 __func__);
783 }
784}
785
786static void program_gamut_remap(
787 struct dce_transform *xfm_dce,
788 const uint16_t *reg_val)
789{
790 if (reg_val) {
791 REG_SET_2(GAMUT_REMAP_C11_C12, 0,
792 GAMUT_REMAP_C11, reg_val[0],
793 GAMUT_REMAP_C12, reg_val[1]);
794 REG_SET_2(GAMUT_REMAP_C13_C14, 0,
795 GAMUT_REMAP_C13, reg_val[2],
796 GAMUT_REMAP_C14, reg_val[3]);
797 REG_SET_2(GAMUT_REMAP_C21_C22, 0,
798 GAMUT_REMAP_C21, reg_val[4],
799 GAMUT_REMAP_C22, reg_val[5]);
800 REG_SET_2(GAMUT_REMAP_C23_C24, 0,
801 GAMUT_REMAP_C23, reg_val[6],
802 GAMUT_REMAP_C24, reg_val[7]);
803 REG_SET_2(GAMUT_REMAP_C31_C32, 0,
804 GAMUT_REMAP_C31, reg_val[8],
805 GAMUT_REMAP_C32, reg_val[9]);
806 REG_SET_2(GAMUT_REMAP_C33_C34, 0,
807 GAMUT_REMAP_C33, reg_val[10],
808 GAMUT_REMAP_C34, reg_val[11]);
809
810 REG_SET(GAMUT_REMAP_CONTROL, 0, GRPH_GAMUT_REMAP_MODE, 1);
811 } else
812 REG_SET(GAMUT_REMAP_CONTROL, 0, GRPH_GAMUT_REMAP_MODE, 0);
813
814}
815
816/**
817 *****************************************************************************
818 * Function: dal_transform_wide_gamut_set_gamut_remap
819 *
820 * @param [in] const struct xfm_grph_csc_adjustment *adjust
821 *
822 * @return
823 * void
824 *
825 * @note calculate and apply color temperature adjustment to in Rgb color space
826 *
827 * @see
828 *
829 *****************************************************************************
830 */
831static void dce_transform_set_gamut_remap(
832 struct transform *xfm,
833 const struct xfm_grph_csc_adjustment *adjust)
834{
835 struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
836
837 if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW)
838 /* Bypass if type is bypass or hw */
839 program_gamut_remap(xfm_dce, NULL);
840 else {
841 struct fixed31_32 arr_matrix[GAMUT_MATRIX_SIZE];
842 uint16_t arr_reg_val[GAMUT_MATRIX_SIZE];
843
844 arr_matrix[0] = adjust->temperature_matrix[0];
845 arr_matrix[1] = adjust->temperature_matrix[1];
846 arr_matrix[2] = adjust->temperature_matrix[2];
847 arr_matrix[3] = dal_fixed31_32_zero;
848
849 arr_matrix[4] = adjust->temperature_matrix[3];
850 arr_matrix[5] = adjust->temperature_matrix[4];
851 arr_matrix[6] = adjust->temperature_matrix[5];
852 arr_matrix[7] = dal_fixed31_32_zero;
853
854 arr_matrix[8] = adjust->temperature_matrix[6];
855 arr_matrix[9] = adjust->temperature_matrix[7];
856 arr_matrix[10] = adjust->temperature_matrix[8];
857 arr_matrix[11] = dal_fixed31_32_zero;
858
859 convert_float_matrix(
860 arr_reg_val, arr_matrix, GAMUT_MATRIX_SIZE);
861
862 program_gamut_remap(xfm_dce, arr_reg_val);
863 }
864}
865
866static uint32_t decide_taps(struct fixed31_32 ratio, uint32_t in_taps, bool chroma)
867{
868 uint32_t taps;
869
870 if (IDENTITY_RATIO(ratio)) {
871 return 1;
872 } else if (in_taps != 0) {
873 taps = in_taps;
874 } else {
875 taps = 4;
876 }
877
878 if (chroma) {
879 taps /= 2;
880 if (taps < 2)
881 taps = 2;
882 }
883
884 return taps;
885}
886
887
888bool dce_transform_get_optimal_number_of_taps(
889 struct transform *xfm,
890 struct scaler_data *scl_data,
891 const struct scaling_taps *in_taps)
892{
893 struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
894 int pixel_width = scl_data->viewport.width;
895 int max_num_of_lines;
896
897 if (xfm_dce->prescaler_on &&
898 (scl_data->viewport.width > scl_data->recout.width))
899 pixel_width = scl_data->recout.width;
900
901 max_num_of_lines = dce_transform_get_max_num_of_supported_lines(
902 xfm_dce,
903 scl_data->lb_params.depth,
904 pixel_width);
905
906 /* Fail if in_taps are impossible */
907 if (in_taps->v_taps >= max_num_of_lines)
908 return false;
909
910 /*
911 * Set taps according to this policy (in this order)
912 * - Use 1 for no scaling
913 * - Use input taps
914 * - Use 4 and reduce as required by line buffer size
915 * - Decide chroma taps if chroma is scaled
916 *
917 * Ignore input chroma taps. Decide based on non-chroma
918 */
919 scl_data->taps.h_taps = decide_taps(scl_data->ratios.horz, in_taps->h_taps, false);
920 scl_data->taps.v_taps = decide_taps(scl_data->ratios.vert, in_taps->v_taps, false);
921 scl_data->taps.h_taps_c = decide_taps(scl_data->ratios.horz_c, in_taps->h_taps, true);
922 scl_data->taps.v_taps_c = decide_taps(scl_data->ratios.vert_c, in_taps->v_taps, true);
923
924 if (!IDENTITY_RATIO(scl_data->ratios.vert)) {
925 /* reduce v_taps if needed but ensure we have at least two */
926 if (in_taps->v_taps == 0
927 && max_num_of_lines <= scl_data->taps.v_taps
928 && scl_data->taps.v_taps > 1) {
929 scl_data->taps.v_taps = max_num_of_lines - 1;
930 }
931
932 if (scl_data->taps.v_taps <= 1)
933 return false;
934 }
935
936 if (!IDENTITY_RATIO(scl_data->ratios.vert_c)) {
937 /* reduce chroma v_taps if needed but ensure we have at least two */
938 if (max_num_of_lines <= scl_data->taps.v_taps_c && scl_data->taps.v_taps_c > 1) {
939 scl_data->taps.v_taps_c = max_num_of_lines - 1;
940 }
941
942 if (scl_data->taps.v_taps_c <= 1)
943 return false;
944 }
945
946 /* we've got valid taps */
947 return true;
948}
949
950static void dce_transform_reset(struct transform *xfm)
951{
952 struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
953
954 xfm_dce->filter_h = NULL;
955 xfm_dce->filter_v = NULL;
956}
957
958
959static const struct transform_funcs dce_transform_funcs = {
960 .transform_reset = dce_transform_reset,
961 .transform_set_scaler =
962 dce_transform_set_scaler,
963 .transform_set_gamut_remap =
964 dce_transform_set_gamut_remap,
965 .transform_set_pixel_storage_depth =
966 dce_transform_set_pixel_storage_depth,
967 .transform_get_optimal_number_of_taps =
968 dce_transform_get_optimal_number_of_taps
969};
970
971/*****************************************/
972/* Constructor, Destructor */
973/*****************************************/
974
975bool dce_transform_construct(
976 struct dce_transform *xfm_dce,
977 struct dc_context *ctx,
978 uint32_t inst,
979 const struct dce_transform_registers *regs,
980 const struct dce_transform_shift *xfm_shift,
981 const struct dce_transform_mask *xfm_mask)
982{
983 xfm_dce->base.ctx = ctx;
984
985 xfm_dce->base.inst = inst;
986 xfm_dce->base.funcs = &dce_transform_funcs;
987
988 xfm_dce->regs = regs;
989 xfm_dce->xfm_shift = xfm_shift;
990 xfm_dce->xfm_mask = xfm_mask;
991
992 xfm_dce->prescaler_on = true;
993 xfm_dce->lb_pixel_depth_supported =
994 LB_PIXEL_DEPTH_18BPP |
995 LB_PIXEL_DEPTH_24BPP |
996 LB_PIXEL_DEPTH_30BPP;
997
998 xfm_dce->lb_bits_per_entry = LB_BITS_PER_ENTRY;
999 xfm_dce->lb_memory_size = LB_TOTAL_NUMBER_OF_ENTRIES; /*0x6B0*/
1000
1001 return true;
1002}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h
new file mode 100644
index 000000000000..897645e2889f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h
@@ -0,0 +1,313 @@
1/*
2 * Copyright 2012-16 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef _DCE_DCE_TRANSFORM_H_
27#define _DCE_DCE_TRANSFORM_H_
28
29
30#include "transform.h"
31
32#define TO_DCE_TRANSFORM(transform)\
33 container_of(transform, struct dce_transform, base)
34
35#define LB_TOTAL_NUMBER_OF_ENTRIES 1712
36#define LB_BITS_PER_ENTRY 144
37
38#define XFM_COMMON_REG_LIST_DCE_BASE(id) \
39 SRI(LB_DATA_FORMAT, LB, id), \
40 SRI(GAMUT_REMAP_CONTROL, DCP, id), \
41 SRI(GAMUT_REMAP_C11_C12, DCP, id), \
42 SRI(GAMUT_REMAP_C13_C14, DCP, id), \
43 SRI(GAMUT_REMAP_C21_C22, DCP, id), \
44 SRI(GAMUT_REMAP_C23_C24, DCP, id), \
45 SRI(GAMUT_REMAP_C31_C32, DCP, id), \
46 SRI(GAMUT_REMAP_C33_C34, DCP, id), \
47 SRI(DENORM_CONTROL, DCP, id), \
48 SRI(DCP_SPATIAL_DITHER_CNTL, DCP, id), \
49 SRI(OUT_ROUND_CONTROL, DCP, id), \
50 SRI(OUT_CLAMP_CONTROL_R_CR, DCP, id), \
51 SRI(OUT_CLAMP_CONTROL_G_Y, DCP, id), \
52 SRI(OUT_CLAMP_CONTROL_B_CB, DCP, id), \
53 SRI(SCL_MODE, SCL, id), \
54 SRI(SCL_TAP_CONTROL, SCL, id), \
55 SRI(SCL_CONTROL, SCL, id), \
56 SRI(EXT_OVERSCAN_LEFT_RIGHT, SCL, id), \
57 SRI(EXT_OVERSCAN_TOP_BOTTOM, SCL, id), \
58 SRI(SCL_VERT_FILTER_CONTROL, SCL, id), \
59 SRI(SCL_HORZ_FILTER_CONTROL, SCL, id), \
60 SRI(SCL_COEF_RAM_SELECT, SCL, id), \
61 SRI(SCL_COEF_RAM_TAP_DATA, SCL, id), \
62 SRI(VIEWPORT_START, SCL, id), \
63 SRI(VIEWPORT_SIZE, SCL, id), \
64 SRI(SCL_HORZ_FILTER_SCALE_RATIO, SCL, id), \
65 SRI(SCL_VERT_FILTER_SCALE_RATIO, SCL, id), \
66 SRI(SCL_HORZ_FILTER_INIT, SCL, id), \
67 SRI(SCL_VERT_FILTER_INIT, SCL, id), \
68 SRI(SCL_AUTOMATIC_MODE_CONTROL, SCL, id), \
69 SRI(LB_MEMORY_CTRL, LB, id), \
70 SRI(SCL_UPDATE, SCL, id)
71
72#define XFM_COMMON_REG_LIST_DCE100(id) \
73 XFM_COMMON_REG_LIST_DCE_BASE(id), \
74 SRI(DCFE_MEM_PWR_CTRL, CRTC, id), \
75 SRI(DCFE_MEM_PWR_STATUS, CRTC, id)
76
77#define XFM_COMMON_REG_LIST_DCE110(id) \
78 XFM_COMMON_REG_LIST_DCE_BASE(id), \
79 SRI(DCFE_MEM_PWR_CTRL, DCFE, id), \
80 SRI(DCFE_MEM_PWR_STATUS, DCFE, id)
81
82#define XFM_SF(reg_name, field_name, post_fix)\
83 .field_name = reg_name ## __ ## field_name ## post_fix
84
85#define XFM_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh) \
86 XFM_SF(OUT_CLAMP_CONTROL_B_CB, OUT_CLAMP_MIN_B_CB, mask_sh), \
87 XFM_SF(OUT_CLAMP_CONTROL_B_CB, OUT_CLAMP_MAX_B_CB, mask_sh), \
88 XFM_SF(OUT_CLAMP_CONTROL_G_Y, OUT_CLAMP_MIN_G_Y, mask_sh), \
89 XFM_SF(OUT_CLAMP_CONTROL_G_Y, OUT_CLAMP_MAX_G_Y, mask_sh), \
90 XFM_SF(OUT_CLAMP_CONTROL_R_CR, OUT_CLAMP_MIN_R_CR, mask_sh), \
91 XFM_SF(OUT_CLAMP_CONTROL_R_CR, OUT_CLAMP_MAX_R_CR, mask_sh), \
92 XFM_SF(OUT_ROUND_CONTROL, OUT_ROUND_TRUNC_MODE, mask_sh), \
93 XFM_SF(DCP_SPATIAL_DITHER_CNTL, DCP_SPATIAL_DITHER_EN, mask_sh), \
94 XFM_SF(DCP_SPATIAL_DITHER_CNTL, DCP_SPATIAL_DITHER_MODE, mask_sh), \
95 XFM_SF(DCP_SPATIAL_DITHER_CNTL, DCP_SPATIAL_DITHER_DEPTH, mask_sh), \
96 XFM_SF(DCP_SPATIAL_DITHER_CNTL, DCP_FRAME_RANDOM_ENABLE, mask_sh), \
97 XFM_SF(DCP_SPATIAL_DITHER_CNTL, DCP_RGB_RANDOM_ENABLE, mask_sh), \
98 XFM_SF(DCP_SPATIAL_DITHER_CNTL, DCP_HIGHPASS_RANDOM_ENABLE, mask_sh), \
99 XFM_SF(DENORM_CONTROL, DENORM_MODE, mask_sh), \
100 XFM_SF(LB_DATA_FORMAT, PIXEL_DEPTH, mask_sh), \
101 XFM_SF(LB_DATA_FORMAT, PIXEL_EXPAN_MODE, mask_sh), \
102 XFM_SF(GAMUT_REMAP_C11_C12, GAMUT_REMAP_C11, mask_sh), \
103 XFM_SF(GAMUT_REMAP_C11_C12, GAMUT_REMAP_C12, mask_sh), \
104 XFM_SF(GAMUT_REMAP_C13_C14, GAMUT_REMAP_C13, mask_sh), \
105 XFM_SF(GAMUT_REMAP_C13_C14, GAMUT_REMAP_C14, mask_sh), \
106 XFM_SF(GAMUT_REMAP_C21_C22, GAMUT_REMAP_C21, mask_sh), \
107 XFM_SF(GAMUT_REMAP_C21_C22, GAMUT_REMAP_C22, mask_sh), \
108 XFM_SF(GAMUT_REMAP_C23_C24, GAMUT_REMAP_C23, mask_sh), \
109 XFM_SF(GAMUT_REMAP_C23_C24, GAMUT_REMAP_C24, mask_sh), \
110 XFM_SF(GAMUT_REMAP_C31_C32, GAMUT_REMAP_C31, mask_sh), \
111 XFM_SF(GAMUT_REMAP_C31_C32, GAMUT_REMAP_C32, mask_sh), \
112 XFM_SF(GAMUT_REMAP_C33_C34, GAMUT_REMAP_C33, mask_sh), \
113 XFM_SF(GAMUT_REMAP_C33_C34, GAMUT_REMAP_C34, mask_sh), \
114 XFM_SF(GAMUT_REMAP_CONTROL, GRPH_GAMUT_REMAP_MODE, mask_sh), \
115 XFM_SF(SCL_MODE, SCL_MODE, mask_sh), \
116 XFM_SF(SCL_TAP_CONTROL, SCL_H_NUM_OF_TAPS, mask_sh), \
117 XFM_SF(SCL_TAP_CONTROL, SCL_V_NUM_OF_TAPS, mask_sh), \
118 XFM_SF(SCL_CONTROL, SCL_BOUNDARY_MODE, mask_sh), \
119 XFM_SF(EXT_OVERSCAN_LEFT_RIGHT, EXT_OVERSCAN_LEFT, mask_sh), \
120 XFM_SF(EXT_OVERSCAN_LEFT_RIGHT, EXT_OVERSCAN_RIGHT, mask_sh), \
121 XFM_SF(EXT_OVERSCAN_TOP_BOTTOM, EXT_OVERSCAN_TOP, mask_sh), \
122 XFM_SF(EXT_OVERSCAN_TOP_BOTTOM, EXT_OVERSCAN_BOTTOM, mask_sh), \
123 XFM_SF(SCL_COEF_RAM_SELECT, SCL_C_RAM_FILTER_TYPE, mask_sh), \
124 XFM_SF(SCL_COEF_RAM_SELECT, SCL_C_RAM_PHASE, mask_sh), \
125 XFM_SF(SCL_COEF_RAM_SELECT, SCL_C_RAM_TAP_PAIR_IDX, mask_sh), \
126 XFM_SF(SCL_COEF_RAM_TAP_DATA, SCL_C_RAM_EVEN_TAP_COEF_EN, mask_sh), \
127 XFM_SF(SCL_COEF_RAM_TAP_DATA, SCL_C_RAM_EVEN_TAP_COEF, mask_sh), \
128 XFM_SF(SCL_COEF_RAM_TAP_DATA, SCL_C_RAM_ODD_TAP_COEF_EN, mask_sh), \
129 XFM_SF(SCL_COEF_RAM_TAP_DATA, SCL_C_RAM_ODD_TAP_COEF, mask_sh), \
130 XFM_SF(VIEWPORT_START, VIEWPORT_X_START, mask_sh), \
131 XFM_SF(VIEWPORT_START, VIEWPORT_Y_START, mask_sh), \
132 XFM_SF(VIEWPORT_SIZE, VIEWPORT_HEIGHT, mask_sh), \
133 XFM_SF(VIEWPORT_SIZE, VIEWPORT_WIDTH, mask_sh), \
134 XFM_SF(SCL_HORZ_FILTER_SCALE_RATIO, SCL_H_SCALE_RATIO, mask_sh), \
135 XFM_SF(SCL_VERT_FILTER_SCALE_RATIO, SCL_V_SCALE_RATIO, mask_sh), \
136 XFM_SF(SCL_HORZ_FILTER_INIT, SCL_H_INIT_INT, mask_sh), \
137 XFM_SF(SCL_HORZ_FILTER_INIT, SCL_H_INIT_FRAC, mask_sh), \
138 XFM_SF(SCL_VERT_FILTER_INIT, SCL_V_INIT_INT, mask_sh), \
139 XFM_SF(SCL_VERT_FILTER_INIT, SCL_V_INIT_FRAC, mask_sh), \
140 XFM_SF(LB_MEMORY_CTRL, LB_MEMORY_CONFIG, mask_sh), \
141 XFM_SF(LB_MEMORY_CTRL, LB_MEMORY_SIZE, mask_sh), \
142 XFM_SF(SCL_VERT_FILTER_CONTROL, SCL_V_2TAP_HARDCODE_COEF_EN, mask_sh), \
143 XFM_SF(SCL_HORZ_FILTER_CONTROL, SCL_H_2TAP_HARDCODE_COEF_EN, mask_sh), \
144 XFM_SF(SCL_UPDATE, SCL_COEF_UPDATE_COMPLETE, mask_sh), \
145 XFM_SF(LB_DATA_FORMAT, ALPHA_EN, mask_sh)
146
147#define XFM_COMMON_MASK_SH_LIST_DCE110(mask_sh) \
148 XFM_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh), \
149 XFM_SF(DCFE_MEM_PWR_CTRL, SCL_COEFF_MEM_PWR_DIS, mask_sh), \
150 XFM_SF(DCFE_MEM_PWR_STATUS, SCL_COEFF_MEM_PWR_STATE, mask_sh), \
151 XFM_SF(SCL_MODE, SCL_PSCL_EN, mask_sh)
152
153#define XFM_REG_FIELD_LIST(type) \
154 type OUT_CLAMP_MIN_B_CB; \
155 type OUT_CLAMP_MAX_B_CB; \
156 type OUT_CLAMP_MIN_G_Y; \
157 type OUT_CLAMP_MAX_G_Y; \
158 type OUT_CLAMP_MIN_R_CR; \
159 type OUT_CLAMP_MAX_R_CR; \
160 type OUT_ROUND_TRUNC_MODE; \
161 type DCP_SPATIAL_DITHER_EN; \
162 type DCP_SPATIAL_DITHER_MODE; \
163 type DCP_SPATIAL_DITHER_DEPTH; \
164 type DCP_FRAME_RANDOM_ENABLE; \
165 type DCP_RGB_RANDOM_ENABLE; \
166 type DCP_HIGHPASS_RANDOM_ENABLE; \
167 type DENORM_MODE; \
168 type PIXEL_DEPTH; \
169 type PIXEL_EXPAN_MODE; \
170 type GAMUT_REMAP_C11; \
171 type GAMUT_REMAP_C12; \
172 type GAMUT_REMAP_C13; \
173 type GAMUT_REMAP_C14; \
174 type GAMUT_REMAP_C21; \
175 type GAMUT_REMAP_C22; \
176 type GAMUT_REMAP_C23; \
177 type GAMUT_REMAP_C24; \
178 type GAMUT_REMAP_C31; \
179 type GAMUT_REMAP_C32; \
180 type GAMUT_REMAP_C33; \
181 type GAMUT_REMAP_C34; \
182 type GRPH_GAMUT_REMAP_MODE; \
183 type SCL_MODE; \
184 type SCL_PSCL_EN; \
185 type SCL_H_NUM_OF_TAPS; \
186 type SCL_V_NUM_OF_TAPS; \
187 type SCL_BOUNDARY_MODE; \
188 type EXT_OVERSCAN_LEFT; \
189 type EXT_OVERSCAN_RIGHT; \
190 type EXT_OVERSCAN_TOP; \
191 type EXT_OVERSCAN_BOTTOM; \
192 type SCL_COEFF_MEM_PWR_DIS; \
193 type SCL_COEFF_MEM_PWR_STATE; \
194 type SCL_C_RAM_FILTER_TYPE; \
195 type SCL_C_RAM_PHASE; \
196 type SCL_C_RAM_TAP_PAIR_IDX; \
197 type SCL_C_RAM_EVEN_TAP_COEF_EN; \
198 type SCL_C_RAM_EVEN_TAP_COEF; \
199 type SCL_C_RAM_ODD_TAP_COEF_EN; \
200 type SCL_C_RAM_ODD_TAP_COEF; \
201 type VIEWPORT_X_START; \
202 type VIEWPORT_Y_START; \
203 type VIEWPORT_HEIGHT; \
204 type VIEWPORT_WIDTH; \
205 type SCL_H_SCALE_RATIO; \
206 type SCL_V_SCALE_RATIO; \
207 type SCL_H_INIT_INT; \
208 type SCL_H_INIT_FRAC; \
209 type SCL_V_INIT_INT; \
210 type SCL_V_INIT_FRAC; \
211 type LB_MEMORY_CONFIG; \
212 type LB_MEMORY_SIZE; \
213 type SCL_V_2TAP_HARDCODE_COEF_EN; \
214 type SCL_H_2TAP_HARDCODE_COEF_EN; \
215 type SCL_COEF_UPDATE_COMPLETE; \
216 type ALPHA_EN
217
218struct dce_transform_shift {
219 XFM_REG_FIELD_LIST(uint8_t);
220};
221
222struct dce_transform_mask {
223 XFM_REG_FIELD_LIST(uint32_t);
224};
225
226struct dce_transform_registers {
227 uint32_t LB_DATA_FORMAT;
228 uint32_t GAMUT_REMAP_CONTROL;
229 uint32_t GAMUT_REMAP_C11_C12;
230 uint32_t GAMUT_REMAP_C13_C14;
231 uint32_t GAMUT_REMAP_C21_C22;
232 uint32_t GAMUT_REMAP_C23_C24;
233 uint32_t GAMUT_REMAP_C31_C32;
234 uint32_t GAMUT_REMAP_C33_C34;
235 uint32_t DENORM_CONTROL;
236 uint32_t DCP_SPATIAL_DITHER_CNTL;
237 uint32_t OUT_ROUND_CONTROL;
238 uint32_t OUT_CLAMP_CONTROL_R_CR;
239 uint32_t OUT_CLAMP_CONTROL_G_Y;
240 uint32_t OUT_CLAMP_CONTROL_B_CB;
241 uint32_t SCL_MODE;
242 uint32_t SCL_TAP_CONTROL;
243 uint32_t SCL_CONTROL;
244 uint32_t EXT_OVERSCAN_LEFT_RIGHT;
245 uint32_t EXT_OVERSCAN_TOP_BOTTOM;
246 uint32_t SCL_VERT_FILTER_CONTROL;
247 uint32_t SCL_HORZ_FILTER_CONTROL;
248 uint32_t DCFE_MEM_PWR_CTRL;
249 uint32_t DCFE_MEM_PWR_STATUS;
250 uint32_t SCL_COEF_RAM_SELECT;
251 uint32_t SCL_COEF_RAM_TAP_DATA;
252 uint32_t VIEWPORT_START;
253 uint32_t VIEWPORT_SIZE;
254 uint32_t SCL_HORZ_FILTER_SCALE_RATIO;
255 uint32_t SCL_VERT_FILTER_SCALE_RATIO;
256 uint32_t SCL_HORZ_FILTER_INIT;
257 uint32_t SCL_VERT_FILTER_INIT;
258 uint32_t SCL_AUTOMATIC_MODE_CONTROL;
259 uint32_t LB_MEMORY_CTRL;
260 uint32_t SCL_UPDATE;
261};
262
263struct init_int_and_frac {
264 uint32_t integer;
265 uint32_t fraction;
266};
267
268struct scl_ratios_inits {
269 uint32_t h_int_scale_ratio;
270 uint32_t v_int_scale_ratio;
271 struct init_int_and_frac h_init;
272 struct init_int_and_frac v_init;
273};
274
275enum ram_filter_type {
276 FILTER_TYPE_RGB_Y_VERTICAL = 0, /* 0 - RGB/Y Vertical filter */
277 FILTER_TYPE_CBCR_VERTICAL = 1, /* 1 - CbCr Vertical filter */
278 FILTER_TYPE_RGB_Y_HORIZONTAL = 2, /* 1 - RGB/Y Horizontal filter */
279 FILTER_TYPE_CBCR_HORIZONTAL = 3, /* 3 - CbCr Horizontal filter */
280 FILTER_TYPE_ALPHA_VERTICAL = 4, /* 4 - Alpha Vertical filter. */
281 FILTER_TYPE_ALPHA_HORIZONTAL = 5, /* 5 - Alpha Horizontal filter. */
282};
283
284struct dce_transform {
285 struct transform base;
286 const struct dce_transform_registers *regs;
287 const struct dce_transform_shift *xfm_shift;
288 const struct dce_transform_mask *xfm_mask;
289
290 const uint16_t *filter_v;
291 const uint16_t *filter_h;
292 const uint16_t *filter_v_c;
293 const uint16_t *filter_h_c;
294 int lb_pixel_depth_supported;
295 int lb_memory_size;
296 int lb_bits_per_entry;
297 bool prescaler_on;
298};
299
300bool dce_transform_construct(struct dce_transform *xfm110,
301 struct dc_context *ctx,
302 uint32_t inst,
303 const struct dce_transform_registers *regs,
304 const struct dce_transform_shift *xfm_shift,
305 const struct dce_transform_mask *xfm_mask);
306
307bool dce_transform_get_optimal_number_of_taps(
308 struct transform *xfm,
309 struct scaler_data *scl_data,
310 const struct scaling_taps *in_taps);
311
312
313#endif /* _DCE_DCE_TRANSFORM_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/Makefile b/drivers/gpu/drm/amd/display/dc/dce100/Makefile
new file mode 100644
index 000000000000..ea40870624b3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce100/Makefile
@@ -0,0 +1,23 @@
1#
2# Makefile for the 'controller' sub-component of DAL.
3# It provides the control and status of HW CRTC block.
4
5DCE100 = dce100_resource.o dce100_hw_sequencer.o
6
7AMD_DAL_DCE100 = $(addprefix $(AMDDALPATH)/dc/dce100/,$(DCE100))
8
9AMD_DISPLAY_FILES += $(AMD_DAL_DCE100)
10
11
12###############################################################################
13# DCE 10x
14###############################################################################
15ifdef 0#CONFIG_DRM_AMD_DC_DCE11_0
16TG_DCE100 = dce100_resource.o
17
18AMD_DAL_TG_DCE100 = $(addprefix \
19 $(AMDDALPATH)/dc/dce100/,$(TG_DCE100))
20
21AMD_DISPLAY_FILES += $(AMD_DAL_TG_DCE100)
22endif
23
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
new file mode 100644
index 000000000000..e2fe024e1182
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
@@ -0,0 +1,140 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25#include "dm_services.h"
26#include "dc.h"
27#include "core_dc.h"
28#include "core_types.h"
29#include "hw_sequencer.h"
30#include "dce100_hw_sequencer.h"
31#include "dce110/dce110_hw_sequencer.h"
32
33/* include DCE10 register header files */
34#include "dce/dce_10_0_d.h"
35#include "dce/dce_10_0_sh_mask.h"
36
37struct dce100_hw_seq_reg_offsets {
38 uint32_t blnd;
39 uint32_t crtc;
40};
41
42static const struct dce100_hw_seq_reg_offsets reg_offsets[] = {
43{
44 .crtc = (mmCRTC0_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
45},
46{
47 .crtc = (mmCRTC1_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
48},
49{
50 .crtc = (mmCRTC2_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
51},
52{
53 .crtc = (mmCRTC3_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
54},
55{
56 .crtc = (mmCRTC4_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
57},
58{
59 .crtc = (mmCRTC5_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
60}
61};
62
63#define HW_REG_CRTC(reg, id)\
64 (reg + reg_offsets[id].crtc)
65
66/*******************************************************************************
67 * Private definitions
68 ******************************************************************************/
69/***************************PIPE_CONTROL***********************************/
70
71static bool dce100_enable_display_power_gating(
72 struct core_dc *dc,
73 uint8_t controller_id,
74 struct dc_bios *dcb,
75 enum pipe_gating_control power_gating)
76{
77 enum bp_result bp_result = BP_RESULT_OK;
78 enum bp_pipe_control_action cntl;
79 struct dc_context *ctx = dc->ctx;
80
81 if (power_gating == PIPE_GATING_CONTROL_INIT)
82 cntl = ASIC_PIPE_INIT;
83 else if (power_gating == PIPE_GATING_CONTROL_ENABLE)
84 cntl = ASIC_PIPE_ENABLE;
85 else
86 cntl = ASIC_PIPE_DISABLE;
87
88 if (!(power_gating == PIPE_GATING_CONTROL_INIT && controller_id != 0)){
89
90 bp_result = dcb->funcs->enable_disp_power_gating(
91 dcb, controller_id + 1, cntl);
92
93 /* Revert MASTER_UPDATE_MODE to 0 because bios sets it 2
94 * by default when command table is called
95 */
96 dm_write_reg(ctx,
97 HW_REG_CRTC(mmMASTER_UPDATE_MODE, controller_id),
98 0);
99 }
100
101 if (bp_result == BP_RESULT_OK)
102 return true;
103 else
104 return false;
105}
106
107static void set_display_mark_for_pipe_if_needed(struct core_dc *dc,
108 struct pipe_ctx *pipe_ctx,
109 struct validate_context *context)
110{
111 /* Do nothing until we have proper bandwitdth calcs */
112}
113
114static void set_displaymarks(
115 const struct core_dc *dc, struct validate_context *context)
116{
117 /* Do nothing until we have proper bandwitdth calcs */
118}
119
120static void set_bandwidth(struct core_dc *dc)
121{
122 /* Do nothing until we have proper bandwitdth calcs */
123}
124
125
126/**************************************************************************/
127
128bool dce100_hw_sequencer_construct(struct core_dc *dc)
129{
130 dce110_hw_sequencer_construct(dc);
131
132 /* TODO: dce80 is empty implementation at the moment*/
133 dc->hwss.enable_display_power_gating = dce100_enable_display_power_gating;
134 dc->hwss.set_displaymarks = set_displaymarks;
135 dc->hwss.increase_watermarks_for_pipe = set_display_mark_for_pipe_if_needed;
136 dc->hwss.set_bandwidth = set_bandwidth;
137
138 return true;
139}
140
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h
new file mode 100644
index 000000000000..cf497ea605c8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h
@@ -0,0 +1,36 @@
1/*
2* Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DC_HWSS_DCE100_H__
27#define __DC_HWSS_DCE100_H__
28
29#include "core_types.h"
30
31struct core_dc;
32
33bool dce100_hw_sequencer_construct(struct core_dc *dc);
34
35#endif /* __DC_HWSS_DCE100_H__ */
36
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
new file mode 100644
index 000000000000..16595dc875a1
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
@@ -0,0 +1,1085 @@
1/*
2* Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25#include "dm_services.h"
26
27#include "link_encoder.h"
28#include "stream_encoder.h"
29
30#include "resource.h"
31#include "include/irq_service_interface.h"
32#include "../virtual/virtual_stream_encoder.h"
33#include "dce110/dce110_resource.h"
34#include "dce110/dce110_timing_generator.h"
35#include "irq/dce110/irq_service_dce110.h"
36#include "dce/dce_link_encoder.h"
37#include "dce/dce_stream_encoder.h"
38#include "dce110/dce110_mem_input.h"
39#include "dce110/dce110_mem_input_v.h"
40#include "dce110/dce110_ipp.h"
41#include "dce/dce_transform.h"
42#include "dce110/dce110_opp.h"
43#include "dce/dce_clock_source.h"
44#include "dce/dce_audio.h"
45#include "dce/dce_hwseq.h"
46#include "dce100/dce100_hw_sequencer.h"
47
48#include "reg_helper.h"
49
50#include "dce/dce_10_0_d.h"
51#include "dce/dce_10_0_sh_mask.h"
52
53#ifndef mmMC_HUB_RDREQ_DMIF_LIMIT
54#include "gmc/gmc_8_2_d.h"
55#include "gmc/gmc_8_2_sh_mask.h"
56#endif
57
58#ifndef mmDP_DPHY_INTERNAL_CTRL
59 #define mmDP_DPHY_INTERNAL_CTRL 0x4aa7
60 #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x4aa7
61 #define mmDP1_DP_DPHY_INTERNAL_CTRL 0x4ba7
62 #define mmDP2_DP_DPHY_INTERNAL_CTRL 0x4ca7
63 #define mmDP3_DP_DPHY_INTERNAL_CTRL 0x4da7
64 #define mmDP4_DP_DPHY_INTERNAL_CTRL 0x4ea7
65 #define mmDP5_DP_DPHY_INTERNAL_CTRL 0x4fa7
66 #define mmDP6_DP_DPHY_INTERNAL_CTRL 0x54a7
67 #define mmDP7_DP_DPHY_INTERNAL_CTRL 0x56a7
68 #define mmDP8_DP_DPHY_INTERNAL_CTRL 0x57a7
69#endif
70
71#ifndef mmBIOS_SCRATCH_2
72 #define mmBIOS_SCRATCH_2 0x05CB
73 #define mmBIOS_SCRATCH_6 0x05CF
74#endif
75
76#ifndef mmDP_DPHY_BS_SR_SWAP_CNTL
77 #define mmDP_DPHY_BS_SR_SWAP_CNTL 0x4ADC
78 #define mmDP0_DP_DPHY_BS_SR_SWAP_CNTL 0x4ADC
79 #define mmDP1_DP_DPHY_BS_SR_SWAP_CNTL 0x4BDC
80 #define mmDP2_DP_DPHY_BS_SR_SWAP_CNTL 0x4CDC
81 #define mmDP3_DP_DPHY_BS_SR_SWAP_CNTL 0x4DDC
82 #define mmDP4_DP_DPHY_BS_SR_SWAP_CNTL 0x4EDC
83 #define mmDP5_DP_DPHY_BS_SR_SWAP_CNTL 0x4FDC
84 #define mmDP6_DP_DPHY_BS_SR_SWAP_CNTL 0x54DC
85#endif
86
87#ifndef mmDP_DPHY_FAST_TRAINING
88 #define mmDP_DPHY_FAST_TRAINING 0x4ABC
89 #define mmDP0_DP_DPHY_FAST_TRAINING 0x4ABC
90 #define mmDP1_DP_DPHY_FAST_TRAINING 0x4BBC
91 #define mmDP2_DP_DPHY_FAST_TRAINING 0x4CBC
92 #define mmDP3_DP_DPHY_FAST_TRAINING 0x4DBC
93 #define mmDP4_DP_DPHY_FAST_TRAINING 0x4EBC
94 #define mmDP5_DP_DPHY_FAST_TRAINING 0x4FBC
95 #define mmDP6_DP_DPHY_FAST_TRAINING 0x54BC
96#endif
97
98static const struct dce110_timing_generator_offsets dce100_tg_offsets[] = {
99 {
100 .crtc = (mmCRTC0_CRTC_CONTROL - mmCRTC_CONTROL),
101 .dcp = (mmDCP0_GRPH_CONTROL - mmGRPH_CONTROL),
102 },
103 {
104 .crtc = (mmCRTC1_CRTC_CONTROL - mmCRTC_CONTROL),
105 .dcp = (mmDCP1_GRPH_CONTROL - mmGRPH_CONTROL),
106 },
107 {
108 .crtc = (mmCRTC2_CRTC_CONTROL - mmCRTC_CONTROL),
109 .dcp = (mmDCP2_GRPH_CONTROL - mmGRPH_CONTROL),
110 },
111 {
112 .crtc = (mmCRTC3_CRTC_CONTROL - mmCRTC_CONTROL),
113 .dcp = (mmDCP3_GRPH_CONTROL - mmGRPH_CONTROL),
114 },
115 {
116 .crtc = (mmCRTC4_CRTC_CONTROL - mmCRTC_CONTROL),
117 .dcp = (mmDCP4_GRPH_CONTROL - mmGRPH_CONTROL),
118 },
119 {
120 .crtc = (mmCRTC5_CRTC_CONTROL - mmCRTC_CONTROL),
121 .dcp = (mmDCP5_GRPH_CONTROL - mmGRPH_CONTROL),
122 }
123};
124
125static const struct dce110_mem_input_reg_offsets dce100_mi_reg_offsets[] = {
126 {
127 .dcp = (mmDCP0_GRPH_CONTROL - mmGRPH_CONTROL),
128 .dmif = (mmDMIF_PG0_DPG_WATERMARK_MASK_CONTROL
129 - mmDPG_WATERMARK_MASK_CONTROL),
130 .pipe = (mmPIPE0_DMIF_BUFFER_CONTROL
131 - mmPIPE0_DMIF_BUFFER_CONTROL),
132 },
133 {
134 .dcp = (mmDCP1_GRPH_CONTROL - mmGRPH_CONTROL),
135 .dmif = (mmDMIF_PG1_DPG_WATERMARK_MASK_CONTROL
136 - mmDPG_WATERMARK_MASK_CONTROL),
137 .pipe = (mmPIPE1_DMIF_BUFFER_CONTROL
138 - mmPIPE0_DMIF_BUFFER_CONTROL),
139 },
140 {
141 .dcp = (mmDCP2_GRPH_CONTROL - mmGRPH_CONTROL),
142 .dmif = (mmDMIF_PG2_DPG_WATERMARK_MASK_CONTROL
143 - mmDPG_WATERMARK_MASK_CONTROL),
144 .pipe = (mmPIPE2_DMIF_BUFFER_CONTROL
145 - mmPIPE0_DMIF_BUFFER_CONTROL),
146 },
147 {
148 .dcp = (mmDCP3_GRPH_CONTROL - mmGRPH_CONTROL),
149 .dmif = (mmDMIF_PG3_DPG_WATERMARK_MASK_CONTROL
150 - mmDPG_WATERMARK_MASK_CONTROL),
151 .pipe = (mmPIPE3_DMIF_BUFFER_CONTROL
152 - mmPIPE0_DMIF_BUFFER_CONTROL),
153 },
154 {
155 .dcp = (mmDCP4_GRPH_CONTROL - mmGRPH_CONTROL),
156 .dmif = (mmDMIF_PG4_DPG_WATERMARK_MASK_CONTROL
157 - mmDPG_WATERMARK_MASK_CONTROL),
158 .pipe = (mmPIPE4_DMIF_BUFFER_CONTROL
159 - mmPIPE0_DMIF_BUFFER_CONTROL),
160 },
161 {
162 .dcp = (mmDCP5_GRPH_CONTROL - mmGRPH_CONTROL),
163 .dmif = (mmDMIF_PG5_DPG_WATERMARK_MASK_CONTROL
164 - mmDPG_WATERMARK_MASK_CONTROL),
165 .pipe = (mmPIPE5_DMIF_BUFFER_CONTROL
166 - mmPIPE0_DMIF_BUFFER_CONTROL),
167 }
168};
169
170
171static const struct dce110_ipp_reg_offsets dce100_ipp_reg_offsets[] = {
172{
173 .dcp_offset = (mmDCP0_CUR_CONTROL - mmCUR_CONTROL),
174},
175{
176 .dcp_offset = (mmDCP1_CUR_CONTROL - mmCUR_CONTROL),
177},
178{
179 .dcp_offset = (mmDCP2_CUR_CONTROL - mmCUR_CONTROL),
180},
181{
182 .dcp_offset = (mmDCP3_CUR_CONTROL - mmCUR_CONTROL),
183},
184{
185 .dcp_offset = (mmDCP4_CUR_CONTROL - mmCUR_CONTROL),
186},
187{
188 .dcp_offset = (mmDCP5_CUR_CONTROL - mmCUR_CONTROL),
189}
190};
191
192
193
194/* set register offset */
195#define SR(reg_name)\
196 .reg_name = mm ## reg_name
197
198/* set register offset with instance */
199#define SRI(reg_name, block, id)\
200 .reg_name = mm ## block ## id ## _ ## reg_name
201
202
203#define transform_regs(id)\
204[id] = {\
205 XFM_COMMON_REG_LIST_DCE100(id)\
206}
207
208static const struct dce_transform_registers xfm_regs[] = {
209 transform_regs(0),
210 transform_regs(1),
211 transform_regs(2),
212 transform_regs(3),
213 transform_regs(4),
214 transform_regs(5)
215};
216
217static const struct dce_transform_shift xfm_shift = {
218 XFM_COMMON_MASK_SH_LIST_DCE110(__SHIFT)
219};
220
221static const struct dce_transform_mask xfm_mask = {
222 XFM_COMMON_MASK_SH_LIST_DCE110(_MASK)
223};
224
225#define aux_regs(id)\
226[id] = {\
227 AUX_REG_LIST(id)\
228}
229
230static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = {
231 aux_regs(0),
232 aux_regs(1),
233 aux_regs(2),
234 aux_regs(3),
235 aux_regs(4),
236 aux_regs(5)
237};
238
239#define hpd_regs(id)\
240[id] = {\
241 HPD_REG_LIST(id)\
242}
243
244static const struct dce110_link_enc_hpd_registers link_enc_hpd_regs[] = {
245 hpd_regs(0),
246 hpd_regs(1),
247 hpd_regs(2),
248 hpd_regs(3),
249 hpd_regs(4),
250 hpd_regs(5)
251};
252
253#define link_regs(id)\
254[id] = {\
255 LE_DCE110_REG_LIST(id)\
256}
257
258static const struct dce110_link_enc_registers link_enc_regs[] = {
259 link_regs(0),
260 link_regs(1),
261 link_regs(2),
262 link_regs(3),
263 link_regs(4),
264 link_regs(5),
265 link_regs(6),
266};
267
268#define stream_enc_regs(id)\
269[id] = {\
270 SE_COMMON_REG_LIST_DCE_BASE(id),\
271 .AFMT_CNTL = 0,\
272}
273
274static const struct dce110_stream_enc_registers stream_enc_regs[] = {
275 stream_enc_regs(0),
276 stream_enc_regs(1),
277 stream_enc_regs(2),
278 stream_enc_regs(3),
279 stream_enc_regs(4),
280 stream_enc_regs(5),
281 stream_enc_regs(6)
282};
283
284static const struct dce_stream_encoder_shift se_shift = {
285 SE_COMMON_MASK_SH_LIST_DCE80_100(__SHIFT)
286};
287
288static const struct dce_stream_encoder_mask se_mask = {
289 SE_COMMON_MASK_SH_LIST_DCE80_100(_MASK)
290};
291
292#define audio_regs(id)\
293[id] = {\
294 AUD_COMMON_REG_LIST(id)\
295}
296
297static const struct dce_audio_registers audio_regs[] = {
298 audio_regs(0),
299 audio_regs(1),
300 audio_regs(2),
301 audio_regs(3),
302 audio_regs(4),
303 audio_regs(5),
304 audio_regs(6),
305};
306
307static const struct dce_audio_shift audio_shift = {
308 AUD_COMMON_MASK_SH_LIST(__SHIFT)
309};
310
311static const struct dce_aduio_mask audio_mask = {
312 AUD_COMMON_MASK_SH_LIST(_MASK)
313};
314
315#define clk_src_regs(id)\
316[id] = {\
317 CS_COMMON_REG_LIST_DCE_100_110(id),\
318}
319
320static const struct dce110_clk_src_regs clk_src_regs[] = {
321 clk_src_regs(0),
322 clk_src_regs(1),
323 clk_src_regs(2)
324};
325
326static const struct dce110_clk_src_shift cs_shift = {
327 CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
328};
329
330static const struct dce110_clk_src_mask cs_mask = {
331 CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
332};
333
334
335
336#define DCFE_MEM_PWR_CTRL_REG_BASE 0x1b03
337
338static const struct dce110_opp_reg_offsets dce100_opp_reg_offsets[] = {
339{
340 .fmt_offset = (mmFMT0_FMT_CONTROL - mmFMT_CONTROL),
341 .dcfe_offset = (mmCRTC0_DCFE_MEM_PWR_CTRL - DCFE_MEM_PWR_CTRL_REG_BASE),
342 .dcp_offset = (mmDCP0_GRPH_CONTROL - mmGRPH_CONTROL),
343},
344{ .fmt_offset = (mmFMT1_FMT_CONTROL - mmFMT0_FMT_CONTROL),
345 .dcfe_offset = (mmCRTC1_DCFE_MEM_PWR_CTRL - DCFE_MEM_PWR_CTRL_REG_BASE),
346 .dcp_offset = (mmDCP1_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
347},
348{ .fmt_offset = (mmFMT2_FMT_CONTROL - mmFMT0_FMT_CONTROL),
349 .dcfe_offset = (mmCRTC2_DCFE_MEM_PWR_CTRL - DCFE_MEM_PWR_CTRL_REG_BASE),
350 .dcp_offset = (mmDCP2_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
351},
352{
353 .fmt_offset = (mmFMT3_FMT_CONTROL - mmFMT0_FMT_CONTROL),
354 .dcfe_offset = (mmCRTC3_DCFE_MEM_PWR_CTRL - DCFE_MEM_PWR_CTRL_REG_BASE),
355 .dcp_offset = (mmDCP3_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
356},
357{ .fmt_offset = (mmFMT4_FMT_CONTROL - mmFMT0_FMT_CONTROL),
358 .dcfe_offset = (mmCRTC4_DCFE_MEM_PWR_CTRL - DCFE_MEM_PWR_CTRL_REG_BASE),
359 .dcp_offset = (mmDCP4_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
360},
361{ .fmt_offset = (mmFMT5_FMT_CONTROL - mmFMT0_FMT_CONTROL),
362 .dcfe_offset = (mmCRTC5_DCFE_MEM_PWR_CTRL - DCFE_MEM_PWR_CTRL_REG_BASE),
363 .dcp_offset = (mmDCP5_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
364}
365};
366
367static const struct bios_registers bios_regs = {
368 .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6
369};
370
371static const struct resource_caps res_cap = {
372 .num_timing_generator = 6,
373 .num_audio = 6,
374 .num_stream_encoder = 6,
375 .num_pll = 3
376};
377
378#define CTX ctx
379#define REG(reg) mm ## reg
380
381#ifndef mmCC_DC_HDMI_STRAPS
382#define mmCC_DC_HDMI_STRAPS 0x1918
383#define CC_DC_HDMI_STRAPS__HDMI_DISABLE_MASK 0x40
384#define CC_DC_HDMI_STRAPS__HDMI_DISABLE__SHIFT 0x6
385#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER_MASK 0x700
386#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8
387#endif
388
389static void read_dce_straps(
390 struct dc_context *ctx,
391 struct resource_straps *straps)
392{
393 REG_GET_2(CC_DC_HDMI_STRAPS,
394 HDMI_DISABLE, &straps->hdmi_disable,
395 AUDIO_STREAM_NUMBER, &straps->audio_stream_number);
396
397 REG_GET(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO, &straps->dc_pinstraps_audio);
398}
399
400static struct audio *create_audio(
401 struct dc_context *ctx, unsigned int inst)
402{
403 return dce_audio_create(ctx, inst,
404 &audio_regs[inst], &audio_shift, &audio_mask);
405}
406
407static struct timing_generator *dce100_timing_generator_create(
408 struct dc_context *ctx,
409 uint32_t instance,
410 const struct dce110_timing_generator_offsets *offsets)
411{
412 struct dce110_timing_generator *tg110 =
413 dm_alloc(sizeof(struct dce110_timing_generator));
414
415 if (!tg110)
416 return NULL;
417
418 if (dce110_timing_generator_construct(tg110, ctx, instance,
419 offsets))
420 return &tg110->base;
421
422 BREAK_TO_DEBUGGER();
423 dm_free(tg110);
424 return NULL;
425}
426
427static struct stream_encoder *dce100_stream_encoder_create(
428 enum engine_id eng_id,
429 struct dc_context *ctx)
430{
431 struct dce110_stream_encoder *enc110 =
432 dm_alloc(sizeof(struct dce110_stream_encoder));
433
434 if (!enc110)
435 return NULL;
436
437 if (dce110_stream_encoder_construct(
438 enc110, ctx, ctx->dc_bios, eng_id,
439 &stream_enc_regs[eng_id], &se_shift, &se_mask))
440 return &enc110->base;
441
442 BREAK_TO_DEBUGGER();
443 dm_free(enc110);
444 return NULL;
445}
446
447#define SRII(reg_name, block, id)\
448 .reg_name[id] = mm ## block ## id ## _ ## reg_name
449
450static const struct dce_hwseq_registers hwseq_reg = {
451 HWSEQ_DCE10_REG_LIST()
452};
453
454static const struct dce_hwseq_shift hwseq_shift = {
455 HWSEQ_DCE10_MASK_SH_LIST(__SHIFT)
456};
457
458static const struct dce_hwseq_mask hwseq_mask = {
459 HWSEQ_DCE10_MASK_SH_LIST(_MASK)
460};
461
462static struct dce_hwseq *dce100_hwseq_create(
463 struct dc_context *ctx)
464{
465 struct dce_hwseq *hws = dm_alloc(sizeof(struct dce_hwseq));
466
467 if (hws) {
468 hws->ctx = ctx;
469 hws->regs = &hwseq_reg;
470 hws->shifts = &hwseq_shift;
471 hws->masks = &hwseq_mask;
472 }
473 return hws;
474}
475
476static const struct resource_create_funcs res_create_funcs = {
477 .read_dce_straps = read_dce_straps,
478 .create_audio = create_audio,
479 .create_stream_encoder = dce100_stream_encoder_create,
480 .create_hwseq = dce100_hwseq_create,
481};
482
483#define mi_inst_regs(id) { \
484 MI_REG_LIST(id), \
485 .MC_HUB_RDREQ_DMIF_LIMIT = mmMC_HUB_RDREQ_DMIF_LIMIT \
486}
487static const struct dce_mem_input_registers mi_regs[] = {
488 mi_inst_regs(0),
489 mi_inst_regs(1),
490 mi_inst_regs(2),
491 mi_inst_regs(3),
492 mi_inst_regs(4),
493 mi_inst_regs(5),
494};
495
496static const struct dce_mem_input_shift mi_shifts = {
497 MI_DCE_MASK_SH_LIST(__SHIFT),
498 .ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE__SHIFT
499};
500
501static const struct dce_mem_input_mask mi_masks = {
502 MI_DCE_MASK_SH_LIST(_MASK),
503 .ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE_MASK
504};
505
506static struct mem_input *dce100_mem_input_create(
507 struct dc_context *ctx,
508 uint32_t inst,
509 const struct dce110_mem_input_reg_offsets *offset)
510{
511 struct dce110_mem_input *mem_input110 =
512 dm_alloc(sizeof(struct dce110_mem_input));
513
514 if (!mem_input110)
515 return NULL;
516
517 if (dce110_mem_input_construct(mem_input110, ctx, inst, offset)) {
518 struct mem_input *mi = &mem_input110->base;
519
520 mi->regs = &mi_regs[inst];
521 mi->shifts = &mi_shifts;
522 mi->masks = &mi_masks;
523 mi->wa.single_head_rdreq_dmif_limit = 2;
524 return mi;
525 }
526
527 BREAK_TO_DEBUGGER();
528 dm_free(mem_input110);
529 return NULL;
530}
531
532static void dce100_transform_destroy(struct transform **xfm)
533{
534 dm_free(TO_DCE_TRANSFORM(*xfm));
535 *xfm = NULL;
536}
537
538static struct transform *dce100_transform_create(
539 struct dc_context *ctx,
540 uint32_t inst)
541{
542 struct dce_transform *transform =
543 dm_alloc(sizeof(struct dce_transform));
544
545 if (!transform)
546 return NULL;
547
548 if (dce_transform_construct(transform, ctx, inst,
549 &xfm_regs[inst], &xfm_shift, &xfm_mask)) {
550 return &transform->base;
551 }
552
553 BREAK_TO_DEBUGGER();
554 dm_free(transform);
555 return NULL;
556}
557
558static struct input_pixel_processor *dce100_ipp_create(
559 struct dc_context *ctx,
560 uint32_t inst,
561 const struct dce110_ipp_reg_offsets *offsets)
562{
563 struct dce110_ipp *ipp =
564 dm_alloc(sizeof(struct dce110_ipp));
565
566 if (!ipp)
567 return NULL;
568
569 if (dce110_ipp_construct(ipp, ctx, inst, offsets))
570 return &ipp->base;
571
572 BREAK_TO_DEBUGGER();
573 dm_free(ipp);
574 return NULL;
575}
576
577struct link_encoder *dce100_link_encoder_create(
578 const struct encoder_init_data *enc_init_data)
579{
580 struct dce110_link_encoder *enc110 =
581 dm_alloc(sizeof(struct dce110_link_encoder));
582
583 if (!enc110)
584 return NULL;
585
586 if (dce110_link_encoder_construct(
587 enc110,
588 enc_init_data,
589 &link_enc_regs[enc_init_data->transmitter],
590 &link_enc_aux_regs[enc_init_data->channel - 1],
591 &link_enc_hpd_regs[enc_init_data->hpd_source])) {
592
593 enc110->base.features.ycbcr420_supported = false;
594 enc110->base.features.max_hdmi_pixel_clock = 300000;
595 return &enc110->base;
596 }
597
598 BREAK_TO_DEBUGGER();
599 dm_free(enc110);
600 return NULL;
601}
602
603struct output_pixel_processor *dce100_opp_create(
604 struct dc_context *ctx,
605 uint32_t inst,
606 const struct dce110_opp_reg_offsets *offset)
607{
608 struct dce110_opp *opp =
609 dm_alloc(sizeof(struct dce110_opp));
610
611 if (!opp)
612 return NULL;
613
614 if (dce110_opp_construct(opp,
615 ctx, inst, offset))
616 return &opp->base;
617
618 BREAK_TO_DEBUGGER();
619 dm_free(opp);
620 return NULL;
621}
622
623void dce100_opp_destroy(struct output_pixel_processor **opp)
624{
625 struct dce110_opp *dce110_opp;
626
627 if (!opp || !*opp)
628 return;
629
630 dce110_opp = FROM_DCE11_OPP(*opp);
631
632 dm_free(dce110_opp->regamma.coeff128_dx);
633 dm_free(dce110_opp->regamma.coeff128_oem);
634 dm_free(dce110_opp->regamma.coeff128);
635 dm_free(dce110_opp->regamma.axis_x_1025);
636 dm_free(dce110_opp->regamma.axis_x_256);
637 dm_free(dce110_opp->regamma.coordinates_x);
638 dm_free(dce110_opp->regamma.rgb_regamma);
639 dm_free(dce110_opp->regamma.rgb_resulted);
640 dm_free(dce110_opp->regamma.rgb_oem);
641 dm_free(dce110_opp->regamma.rgb_user);
642 dm_free(dce110_opp);
643
644 *opp = NULL;
645}
646
647struct clock_source *dce100_clock_source_create(
648 struct dc_context *ctx,
649 struct dc_bios *bios,
650 enum clock_source_id id,
651 const struct dce110_clk_src_regs *regs,
652 bool dp_clk_src)
653{
654 struct dce110_clk_src *clk_src =
655 dm_alloc(sizeof(struct dce110_clk_src));
656
657 if (!clk_src)
658 return NULL;
659
660 if (dce110_clk_src_construct(clk_src, ctx, bios, id,
661 regs, &cs_shift, &cs_mask)) {
662 clk_src->base.dp_clk_src = dp_clk_src;
663 return &clk_src->base;
664 }
665
666 BREAK_TO_DEBUGGER();
667 return NULL;
668}
669
670void dce100_clock_source_destroy(struct clock_source **clk_src)
671{
672 dm_free(TO_DCE110_CLK_SRC(*clk_src));
673 *clk_src = NULL;
674}
675
676static void destruct(struct dce110_resource_pool *pool)
677{
678 unsigned int i;
679
680 for (i = 0; i < pool->base.pipe_count; i++) {
681 if (pool->base.opps[i] != NULL)
682 dce100_opp_destroy(&pool->base.opps[i]);
683
684 if (pool->base.transforms[i] != NULL)
685 dce100_transform_destroy(&pool->base.transforms[i]);
686
687 if (pool->base.ipps[i] != NULL)
688 dce110_ipp_destroy(&pool->base.ipps[i]);
689
690 if (pool->base.mis[i] != NULL) {
691 dm_free(TO_DCE110_MEM_INPUT(pool->base.mis[i]));
692 pool->base.mis[i] = NULL;
693 }
694
695 if (pool->base.timing_generators[i] != NULL) {
696 dm_free(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
697 pool->base.timing_generators[i] = NULL;
698 }
699 }
700
701 for (i = 0; i < pool->base.stream_enc_count; i++) {
702 if (pool->base.stream_enc[i] != NULL)
703 dm_free(DCE110STRENC_FROM_STRENC(pool->base.stream_enc[i]));
704 }
705
706 for (i = 0; i < pool->base.clk_src_count; i++) {
707 if (pool->base.clock_sources[i] != NULL)
708 dce100_clock_source_destroy(&pool->base.clock_sources[i]);
709 }
710
711 if (pool->base.dp_clock_source != NULL)
712 dce100_clock_source_destroy(&pool->base.dp_clock_source);
713
714 for (i = 0; i < pool->base.audio_count; i++) {
715 if (pool->base.audios[i] != NULL)
716 dce_aud_destroy(&pool->base.audios[i]);
717 }
718
719 if (pool->base.display_clock != NULL)
720 dal_display_clock_destroy(&pool->base.display_clock);
721
722 if (pool->base.irqs != NULL)
723 dal_irq_service_destroy(&pool->base.irqs);
724}
725
726static enum dc_status validate_mapped_resource(
727 const struct core_dc *dc,
728 struct validate_context *context)
729{
730 enum dc_status status = DC_OK;
731 uint8_t i, j, k;
732
733 for (i = 0; i < context->target_count; i++) {
734 struct core_target *target = context->targets[i];
735
736 for (j = 0; j < target->public.stream_count; j++) {
737 struct core_stream *stream =
738 DC_STREAM_TO_CORE(target->public.streams[j]);
739 struct core_link *link = stream->sink->link;
740
741 if (resource_is_stream_unchanged(dc->current_context, stream))
742 continue;
743
744 for (k = 0; k < MAX_PIPES; k++) {
745 struct pipe_ctx *pipe_ctx =
746 &context->res_ctx.pipe_ctx[k];
747
748 if (context->res_ctx.pipe_ctx[k].stream != stream)
749 continue;
750
751 if (!pipe_ctx->tg->funcs->validate_timing(
752 pipe_ctx->tg, &stream->public.timing))
753 return DC_FAIL_CONTROLLER_VALIDATE;
754
755 status = dce110_resource_build_pipe_hw_param(pipe_ctx);
756
757 if (status != DC_OK)
758 return status;
759
760 if (!link->link_enc->funcs->validate_output_with_stream(
761 link->link_enc,
762 pipe_ctx))
763 return DC_FAIL_ENC_VALIDATE;
764
765 /* TODO: validate audio ASIC caps, encoder */
766 status = dc_link_validate_mode_timing(stream,
767 link,
768 &stream->public.timing);
769
770 if (status != DC_OK)
771 return status;
772
773 resource_build_info_frame(pipe_ctx);
774
775 /* do not need to validate non root pipes */
776 break;
777 }
778 }
779 }
780
781 return DC_OK;
782}
783
784enum dc_status dce100_validate_bandwidth(
785 const struct core_dc *dc,
786 struct validate_context *context)
787{
788 /* TODO implement when needed but for now hardcode max value*/
789 context->bw_results.dispclk_khz = 681000;
790
791 return DC_OK;
792}
793
794static bool dce100_validate_surface_sets(
795 const struct dc_validation_set set[],
796 int set_count)
797{
798 int i;
799
800 for (i = 0; i < set_count; i++) {
801 if (set[i].surface_count == 0)
802 continue;
803
804 if (set[i].surface_count > 1)
805 return false;
806
807 if (set[i].surfaces[0]->clip_rect.width
808 != set[i].target->streams[0]->src.width
809 || set[i].surfaces[0]->clip_rect.height
810 != set[i].target->streams[0]->src.height)
811 return false;
812 if (set[i].surfaces[0]->format
813 >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
814 return false;
815 }
816
817 return true;
818}
819
820enum dc_status dce100_validate_with_context(
821 const struct core_dc *dc,
822 const struct dc_validation_set set[],
823 int set_count,
824 struct validate_context *context)
825{
826 struct dc_context *dc_ctx = dc->ctx;
827 enum dc_status result = DC_ERROR_UNEXPECTED;
828 int i;
829
830 if (!dce100_validate_surface_sets(set, set_count))
831 return DC_FAIL_SURFACE_VALIDATE;
832
833 context->res_ctx.pool = dc->res_pool;
834
835 for (i = 0; i < set_count; i++) {
836 context->targets[i] = DC_TARGET_TO_CORE(set[i].target);
837 dc_target_retain(&context->targets[i]->public);
838 context->target_count++;
839 }
840
841 result = resource_map_pool_resources(dc, context);
842
843 if (result == DC_OK)
844 result = resource_map_clock_resources(dc, context);
845
846 if (!resource_validate_attach_surfaces(
847 set, set_count, dc->current_context, context)) {
848 DC_ERROR("Failed to attach surface to target!\n");
849 return DC_FAIL_ATTACH_SURFACES;
850 }
851
852 if (result == DC_OK)
853 result = validate_mapped_resource(dc, context);
854
855 if (result == DC_OK)
856 result = resource_build_scaling_params_for_context(dc, context);
857
858 if (result == DC_OK)
859 result = dce100_validate_bandwidth(dc, context);
860
861 return result;
862}
863
864enum dc_status dce100_validate_guaranteed(
865 const struct core_dc *dc,
866 const struct dc_target *dc_target,
867 struct validate_context *context)
868{
869 enum dc_status result = DC_ERROR_UNEXPECTED;
870
871 context->res_ctx.pool = dc->res_pool;
872
873 context->targets[0] = DC_TARGET_TO_CORE(dc_target);
874 dc_target_retain(&context->targets[0]->public);
875 context->target_count++;
876
877 result = resource_map_pool_resources(dc, context);
878
879 if (result == DC_OK)
880 result = resource_map_clock_resources(dc, context);
881
882 if (result == DC_OK)
883 result = validate_mapped_resource(dc, context);
884
885 if (result == DC_OK) {
886 validate_guaranteed_copy_target(
887 context, dc->public.caps.max_targets);
888 result = resource_build_scaling_params_for_context(dc, context);
889 }
890
891 if (result == DC_OK)
892 result = dce100_validate_bandwidth(dc, context);
893
894 return result;
895}
896
897static void dce100_destroy_resource_pool(struct resource_pool **pool)
898{
899 struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool);
900
901 destruct(dce110_pool);
902 dm_free(dce110_pool);
903 *pool = NULL;
904}
905
906static const struct resource_funcs dce100_res_pool_funcs = {
907 .destroy = dce100_destroy_resource_pool,
908 .link_enc_create = dce100_link_encoder_create,
909 .validate_with_context = dce100_validate_with_context,
910 .validate_guaranteed = dce100_validate_guaranteed,
911 .validate_bandwidth = dce100_validate_bandwidth
912};
913
914static bool construct(
915 uint8_t num_virtual_links,
916 struct core_dc *dc,
917 struct dce110_resource_pool *pool)
918{
919 unsigned int i;
920 struct dc_context *ctx = dc->ctx;
921 struct firmware_info info;
922 struct dc_bios *bp;
923 struct dm_pp_static_clock_info static_clk_info = {0};
924
925 ctx->dc_bios->regs = &bios_regs;
926
927 pool->base.res_cap = &res_cap;
928 pool->base.funcs = &dce100_res_pool_funcs;
929 pool->base.underlay_pipe_index = -1;
930
931 bp = ctx->dc_bios;
932
933 if ((bp->funcs->get_firmware_info(bp, &info) == BP_RESULT_OK) &&
934 info.external_clock_source_frequency_for_dp != 0) {
935 pool->base.dp_clock_source =
936 dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true);
937
938 pool->base.clock_sources[0] =
939 dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], false);
940 pool->base.clock_sources[1] =
941 dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false);
942 pool->base.clock_sources[2] =
943 dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false);
944 pool->base.clk_src_count = 3;
945
946 } else {
947 pool->base.dp_clock_source =
948 dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], true);
949
950 pool->base.clock_sources[0] =
951 dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false);
952 pool->base.clock_sources[1] =
953 dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false);
954 pool->base.clk_src_count = 2;
955 }
956
957 if (pool->base.dp_clock_source == NULL) {
958 dm_error("DC: failed to create dp clock source!\n");
959 BREAK_TO_DEBUGGER();
960 goto res_create_fail;
961 }
962
963 for (i = 0; i < pool->base.clk_src_count; i++) {
964 if (pool->base.clock_sources[i] == NULL) {
965 dm_error("DC: failed to create clock sources!\n");
966 BREAK_TO_DEBUGGER();
967 goto res_create_fail;
968 }
969 }
970
971 pool->base.display_clock = dal_display_clock_dce110_create(ctx);
972 if (pool->base.display_clock == NULL) {
973 dm_error("DC: failed to create display clock!\n");
974 BREAK_TO_DEBUGGER();
975 goto res_create_fail;
976 }
977
978
979 /* get static clock information for PPLIB or firmware, save
980 * max_clock_state
981 */
982 if (dm_pp_get_static_clocks(ctx, &static_clk_info)) {
983 enum clocks_state max_clocks_state =
984 dce110_resource_convert_clock_state_pp_to_dc(
985 static_clk_info.max_clocks_state);
986
987 dal_display_clock_store_max_clocks_state(
988 pool->base.display_clock, max_clocks_state);
989 }
990 {
991 struct irq_service_init_data init_data;
992 init_data.ctx = dc->ctx;
993 pool->base.irqs = dal_irq_service_dce110_create(&init_data);
994 if (!pool->base.irqs)
995 goto res_create_fail;
996 }
997
998 /*************************************************
999 * Resource + asic cap harcoding *
1000 *************************************************/
1001 pool->base.underlay_pipe_index = -1;
1002 pool->base.pipe_count = res_cap.num_timing_generator;
1003 dc->public.caps.max_downscale_ratio = 200;
1004 dc->public.caps.i2c_speed_in_khz = 40;
1005
1006 for (i = 0; i < pool->base.pipe_count; i++) {
1007 pool->base.timing_generators[i] =
1008 dce100_timing_generator_create(
1009 ctx,
1010 i,
1011 &dce100_tg_offsets[i]);
1012 if (pool->base.timing_generators[i] == NULL) {
1013 BREAK_TO_DEBUGGER();
1014 dm_error("DC: failed to create tg!\n");
1015 goto res_create_fail;
1016 }
1017
1018 pool->base.mis[i] = dce100_mem_input_create(ctx, i,
1019 &dce100_mi_reg_offsets[i]);
1020 if (pool->base.mis[i] == NULL) {
1021 BREAK_TO_DEBUGGER();
1022 dm_error(
1023 "DC: failed to create memory input!\n");
1024 goto res_create_fail;
1025 }
1026
1027 pool->base.ipps[i] = dce100_ipp_create(ctx, i,
1028 &dce100_ipp_reg_offsets[i]);
1029 if (pool->base.ipps[i] == NULL) {
1030 BREAK_TO_DEBUGGER();
1031 dm_error(
1032 "DC: failed to create input pixel processor!\n");
1033 goto res_create_fail;
1034 }
1035
1036 pool->base.transforms[i] = dce100_transform_create(ctx, i);
1037 if (pool->base.transforms[i] == NULL) {
1038 BREAK_TO_DEBUGGER();
1039 dm_error(
1040 "DC: failed to create transform!\n");
1041 goto res_create_fail;
1042 }
1043
1044 pool->base.opps[i] = dce100_opp_create(ctx, i, &dce100_opp_reg_offsets[i]);
1045 if (pool->base.opps[i] == NULL) {
1046 BREAK_TO_DEBUGGER();
1047 dm_error(
1048 "DC: failed to create output pixel processor!\n");
1049 goto res_create_fail;
1050 }
1051 }
1052
1053 if (!resource_construct(num_virtual_links, dc, &pool->base,
1054 &res_create_funcs))
1055 goto res_create_fail;
1056
1057 /* Create hardware sequencer */
1058 if (!dce100_hw_sequencer_construct(dc))
1059 goto res_create_fail;
1060
1061 return true;
1062
1063res_create_fail:
1064 destruct(pool);
1065
1066 return false;
1067}
1068
1069struct resource_pool *dce100_create_resource_pool(
1070 uint8_t num_virtual_links,
1071 struct core_dc *dc)
1072{
1073 struct dce110_resource_pool *pool =
1074 dm_alloc(sizeof(struct dce110_resource_pool));
1075
1076 if (!pool)
1077 return NULL;
1078
1079 if (construct(num_virtual_links, dc, pool))
1080 return &pool->base;
1081
1082 BREAK_TO_DEBUGGER();
1083 return NULL;
1084}
1085
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.h b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.h
new file mode 100644
index 000000000000..bfd7518c94c9
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.h
@@ -0,0 +1,19 @@
1/*
2 * dce100_resource.h
3 *
4 * Created on: 2016-01-20
5 * Author: qyang
6 */
7
8#ifndef DCE100_RESOURCE_H_
9#define DCE100_RESOURCE_H_
10
11struct core_dc;
12struct resource_pool;
13struct dc_validation_set;
14
15struct resource_pool *dce100_create_resource_pool(
16 uint8_t num_virtual_links,
17 struct core_dc *dc);
18
19#endif /* DCE100_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/Makefile b/drivers/gpu/drm/amd/display/dc/dce110/Makefile
new file mode 100644
index 000000000000..cd7a9095fa06
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/Makefile
@@ -0,0 +1,15 @@
1#
2# Makefile for the 'controller' sub-component of DAL.
3# It provides the control and status of HW CRTC block.
4
5DCE110 = dce110_ipp.o dce110_ipp_cursor.o \
6dce110_ipp_gamma.o dce110_opp.o dce110_opp_csc.o \
7dce110_timing_generator.o dce110_opp_formatter.o dce110_opp_regamma.o \
8dce110_compressor.o dce110_mem_input.o dce110_hw_sequencer.o \
9dce110_resource.o \
10dce110_opp_regamma_v.o dce110_opp_csc_v.o dce110_timing_generator_v.o \
11dce110_mem_input_v.o dce110_opp_v.o dce110_transform_v.o
12
13AMD_DAL_DCE110 = $(addprefix $(AMDDALPATH)/dc/dce110/,$(DCE110))
14
15AMD_DISPLAY_FILES += $(AMD_DAL_DCE110)
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
new file mode 100644
index 000000000000..518150a414e2
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
@@ -0,0 +1,859 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28#include "dce/dce_11_0_d.h"
29#include "dce/dce_11_0_sh_mask.h"
30#include "gmc/gmc_8_2_sh_mask.h"
31#include "gmc/gmc_8_2_d.h"
32
33#include "include/logger_interface.h"
34
35#include "dce110_compressor.h"
36
37#define DCP_REG(reg)\
38 (reg + cp110->offsets.dcp_offset)
39#define DMIF_REG(reg)\
40 (reg + cp110->offsets.dmif_offset)
41
42static const struct dce110_compressor_reg_offsets reg_offsets[] = {
43{
44 .dcp_offset = (mmDCP0_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
45 .dmif_offset =
46 (mmDMIF_PG0_DPG_PIPE_DPM_CONTROL
47 - mmDMIF_PG0_DPG_PIPE_DPM_CONTROL),
48},
49{
50 .dcp_offset = (mmDCP1_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
51 .dmif_offset =
52 (mmDMIF_PG1_DPG_PIPE_DPM_CONTROL
53 - mmDMIF_PG0_DPG_PIPE_DPM_CONTROL),
54},
55{
56 .dcp_offset = (mmDCP2_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
57 .dmif_offset =
58 (mmDMIF_PG2_DPG_PIPE_DPM_CONTROL
59 - mmDMIF_PG0_DPG_PIPE_DPM_CONTROL),
60}
61};
62
63static const uint32_t dce11_one_lpt_channel_max_resolution = 2560 * 1600;
64
65enum fbc_idle_force {
66 /* Bit 0 - Display registers updated */
67 FBC_IDLE_FORCE_DISPLAY_REGISTER_UPDATE = 0x00000001,
68
69 /* Bit 2 - FBC_GRPH_COMP_EN register updated */
70 FBC_IDLE_FORCE_GRPH_COMP_EN = 0x00000002,
71 /* Bit 3 - FBC_SRC_SEL register updated */
72 FBC_IDLE_FORCE_SRC_SEL_CHANGE = 0x00000004,
73 /* Bit 4 - FBC_MIN_COMPRESSION register updated */
74 FBC_IDLE_FORCE_MIN_COMPRESSION_CHANGE = 0x00000008,
75 /* Bit 5 - FBC_ALPHA_COMP_EN register updated */
76 FBC_IDLE_FORCE_ALPHA_COMP_EN = 0x00000010,
77 /* Bit 6 - FBC_ZERO_ALPHA_CHUNK_SKIP_EN register updated */
78 FBC_IDLE_FORCE_ZERO_ALPHA_CHUNK_SKIP_EN = 0x00000020,
79 /* Bit 7 - FBC_FORCE_COPY_TO_COMP_BUF register updated */
80 FBC_IDLE_FORCE_FORCE_COPY_TO_COMP_BUF = 0x00000040,
81
82 /* Bit 24 - Memory write to region 0 defined by MC registers. */
83 FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION0 = 0x01000000,
84 /* Bit 25 - Memory write to region 1 defined by MC registers */
85 FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION1 = 0x02000000,
86 /* Bit 26 - Memory write to region 2 defined by MC registers */
87 FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION2 = 0x04000000,
88 /* Bit 27 - Memory write to region 3 defined by MC registers. */
89 FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION3 = 0x08000000,
90
91 /* Bit 28 - Memory write from any client other than MCIF */
92 FBC_IDLE_FORCE_MEMORY_WRITE_OTHER_THAN_MCIF = 0x10000000,
93 /* Bit 29 - CG statics screen signal is inactive */
94 FBC_IDLE_FORCE_CG_STATIC_SCREEN_IS_INACTIVE = 0x20000000,
95};
96
97static uint32_t lpt_size_alignment(struct dce110_compressor *cp110)
98{
99 /*LPT_ALIGNMENT (in bytes) = ROW_SIZE * #BANKS * # DRAM CHANNELS. */
100 return cp110->base.raw_size * cp110->base.banks_num *
101 cp110->base.dram_channels_num;
102}
103
104static uint32_t lpt_memory_control_config(struct dce110_compressor *cp110,
105 uint32_t lpt_control)
106{
107 /*LPT MC Config */
108 if (cp110->base.options.bits.LPT_MC_CONFIG == 1) {
109 /* POSSIBLE VALUES for LPT NUM_PIPES (DRAM CHANNELS):
110 * 00 - 1 CHANNEL
111 * 01 - 2 CHANNELS
112 * 02 - 4 OR 6 CHANNELS
113 * (Only for discrete GPU, N/A for CZ)
114 * 03 - 8 OR 12 CHANNELS
115 * (Only for discrete GPU, N/A for CZ) */
116 switch (cp110->base.dram_channels_num) {
117 case 2:
118 set_reg_field_value(
119 lpt_control,
120 1,
121 LOW_POWER_TILING_CONTROL,
122 LOW_POWER_TILING_NUM_PIPES);
123 break;
124 case 1:
125 set_reg_field_value(
126 lpt_control,
127 0,
128 LOW_POWER_TILING_CONTROL,
129 LOW_POWER_TILING_NUM_PIPES);
130 break;
131 default:
132 dm_logger_write(
133 cp110->base.ctx->logger, LOG_WARNING,
134 "%s: Invalid LPT NUM_PIPES!!!",
135 __func__);
136 break;
137 }
138
139 /* The mapping for LPT NUM_BANKS is in
140 * GRPH_CONTROL.GRPH_NUM_BANKS register field
141 * Specifies the number of memory banks for tiling
142 * purposes. Only applies to 2D and 3D tiling modes.
143 * POSSIBLE VALUES:
144 * 00 - DCP_GRPH_NUM_BANKS_2BANK: ADDR_SURF_2_BANK
145 * 01 - DCP_GRPH_NUM_BANKS_4BANK: ADDR_SURF_4_BANK
146 * 02 - DCP_GRPH_NUM_BANKS_8BANK: ADDR_SURF_8_BANK
147 * 03 - DCP_GRPH_NUM_BANKS_16BANK: ADDR_SURF_16_BANK */
148 switch (cp110->base.banks_num) {
149 case 16:
150 set_reg_field_value(
151 lpt_control,
152 3,
153 LOW_POWER_TILING_CONTROL,
154 LOW_POWER_TILING_NUM_BANKS);
155 break;
156 case 8:
157 set_reg_field_value(
158 lpt_control,
159 2,
160 LOW_POWER_TILING_CONTROL,
161 LOW_POWER_TILING_NUM_BANKS);
162 break;
163 case 4:
164 set_reg_field_value(
165 lpt_control,
166 1,
167 LOW_POWER_TILING_CONTROL,
168 LOW_POWER_TILING_NUM_BANKS);
169 break;
170 case 2:
171 set_reg_field_value(
172 lpt_control,
173 0,
174 LOW_POWER_TILING_CONTROL,
175 LOW_POWER_TILING_NUM_BANKS);
176 break;
177 default:
178 dm_logger_write(
179 cp110->base.ctx->logger, LOG_WARNING,
180 "%s: Invalid LPT NUM_BANKS!!!",
181 __func__);
182 break;
183 }
184
185 /* The mapping is in DMIF_ADDR_CALC.
186 * ADDR_CONFIG_PIPE_INTERLEAVE_SIZE register field for
187 * Carrizo specifies the memory interleave per pipe.
188 * It effectively specifies the location of pipe bits in
189 * the memory address.
190 * POSSIBLE VALUES:
191 * 00 - ADDR_CONFIG_PIPE_INTERLEAVE_256B: 256 byte
192 * interleave
193 * 01 - ADDR_CONFIG_PIPE_INTERLEAVE_512B: 512 byte
194 * interleave
195 */
196 switch (cp110->base.channel_interleave_size) {
197 case 256: /*256B */
198 set_reg_field_value(
199 lpt_control,
200 0,
201 LOW_POWER_TILING_CONTROL,
202 LOW_POWER_TILING_PIPE_INTERLEAVE_SIZE);
203 break;
204 case 512: /*512B */
205 set_reg_field_value(
206 lpt_control,
207 1,
208 LOW_POWER_TILING_CONTROL,
209 LOW_POWER_TILING_PIPE_INTERLEAVE_SIZE);
210 break;
211 default:
212 dm_logger_write(
213 cp110->base.ctx->logger, LOG_WARNING,
214 "%s: Invalid LPT INTERLEAVE_SIZE!!!",
215 __func__);
216 break;
217 }
218
219 /* The mapping for LOW_POWER_TILING_ROW_SIZE is in
220 * DMIF_ADDR_CALC.ADDR_CONFIG_ROW_SIZE register field
221 * for Carrizo. Specifies the size of dram row in bytes.
222 * This should match up with NOOFCOLS field in
223 * MC_ARB_RAMCFG (ROW_SIZE = 4 * 2 ^^ columns).
224 * This register DMIF_ADDR_CALC is not used by the
225 * hardware as it is only used for addrlib assertions.
226 * POSSIBLE VALUES:
227 * 00 - ADDR_CONFIG_1KB_ROW: Treat 1KB as DRAM row
228 * boundary
229 * 01 - ADDR_CONFIG_2KB_ROW: Treat 2KB as DRAM row
230 * boundary
231 * 02 - ADDR_CONFIG_4KB_ROW: Treat 4KB as DRAM row
232 * boundary */
233 switch (cp110->base.raw_size) {
234 case 4096: /*4 KB */
235 set_reg_field_value(
236 lpt_control,
237 2,
238 LOW_POWER_TILING_CONTROL,
239 LOW_POWER_TILING_ROW_SIZE);
240 break;
241 case 2048:
242 set_reg_field_value(
243 lpt_control,
244 1,
245 LOW_POWER_TILING_CONTROL,
246 LOW_POWER_TILING_ROW_SIZE);
247 break;
248 case 1024:
249 set_reg_field_value(
250 lpt_control,
251 0,
252 LOW_POWER_TILING_CONTROL,
253 LOW_POWER_TILING_ROW_SIZE);
254 break;
255 default:
256 dm_logger_write(
257 cp110->base.ctx->logger, LOG_WARNING,
258 "%s: Invalid LPT ROW_SIZE!!!",
259 __func__);
260 break;
261 }
262 } else {
263 dm_logger_write(
264 cp110->base.ctx->logger, LOG_WARNING,
265 "%s: LPT MC Configuration is not provided",
266 __func__);
267 }
268
269 return lpt_control;
270}
271
272static bool is_source_bigger_than_epanel_size(
273 struct dce110_compressor *cp110,
274 uint32_t source_view_width,
275 uint32_t source_view_height)
276{
277 if (cp110->base.embedded_panel_h_size != 0 &&
278 cp110->base.embedded_panel_v_size != 0 &&
279 ((source_view_width * source_view_height) >
280 (cp110->base.embedded_panel_h_size *
281 cp110->base.embedded_panel_v_size)))
282 return true;
283
284 return false;
285}
286
287static uint32_t align_to_chunks_number_per_line(
288 struct dce110_compressor *cp110,
289 uint32_t pixels)
290{
291 return 256 * ((pixels + 255) / 256);
292}
293
294static void wait_for_fbc_state_changed(
295 struct dce110_compressor *cp110,
296 bool enabled)
297{
298 uint8_t counter = 0;
299 uint32_t addr = mmFBC_STATUS;
300 uint32_t value;
301
302 while (counter < 10) {
303 value = dm_read_reg(cp110->base.ctx, addr);
304 if (get_reg_field_value(
305 value,
306 FBC_STATUS,
307 FBC_ENABLE_STATUS) == enabled)
308 break;
309 udelay(10);
310 counter++;
311 }
312
313 if (counter == 10) {
314 dm_logger_write(
315 cp110->base.ctx->logger, LOG_WARNING,
316 "%s: wait counter exceeded, changes to HW not applied",
317 __func__);
318 }
319}
320
321void dce110_compressor_power_up_fbc(struct compressor *compressor)
322{
323 uint32_t value;
324 uint32_t addr;
325
326 addr = mmFBC_CNTL;
327 value = dm_read_reg(compressor->ctx, addr);
328 set_reg_field_value(value, 0, FBC_CNTL, FBC_GRPH_COMP_EN);
329 set_reg_field_value(value, 1, FBC_CNTL, FBC_EN);
330 set_reg_field_value(value, 2, FBC_CNTL, FBC_COHERENCY_MODE);
331 if (compressor->options.bits.CLK_GATING_DISABLED == 1) {
332 /* HW needs to do power measurement comparison. */
333 set_reg_field_value(
334 value,
335 0,
336 FBC_CNTL,
337 FBC_COMP_CLK_GATE_EN);
338 }
339 dm_write_reg(compressor->ctx, addr, value);
340
341 addr = mmFBC_COMP_MODE;
342 value = dm_read_reg(compressor->ctx, addr);
343 set_reg_field_value(value, 1, FBC_COMP_MODE, FBC_RLE_EN);
344 set_reg_field_value(value, 1, FBC_COMP_MODE, FBC_DPCM4_RGB_EN);
345 set_reg_field_value(value, 1, FBC_COMP_MODE, FBC_IND_EN);
346 dm_write_reg(compressor->ctx, addr, value);
347
348 addr = mmFBC_COMP_CNTL;
349 value = dm_read_reg(compressor->ctx, addr);
350 set_reg_field_value(value, 1, FBC_COMP_CNTL, FBC_DEPTH_RGB08_EN);
351 dm_write_reg(compressor->ctx, addr, value);
352 /*FBC_MIN_COMPRESSION 0 ==> 2:1 */
353 /* 1 ==> 4:1 */
354 /* 2 ==> 8:1 */
355 /* 0xF ==> 1:1 */
356 set_reg_field_value(value, 0xF, FBC_COMP_CNTL, FBC_MIN_COMPRESSION);
357 dm_write_reg(compressor->ctx, addr, value);
358 compressor->min_compress_ratio = FBC_COMPRESS_RATIO_1TO1;
359
360 value = 0;
361 dm_write_reg(compressor->ctx, mmFBC_IND_LUT0, value);
362
363 value = 0xFFFFFF;
364 dm_write_reg(compressor->ctx, mmFBC_IND_LUT1, value);
365}
366
367void dce110_compressor_enable_fbc(
368 struct compressor *compressor,
369 uint32_t paths_num,
370 struct compr_addr_and_pitch_params *params)
371{
372 struct dce110_compressor *cp110 = TO_DCE110_COMPRESSOR(compressor);
373
374 if (compressor->options.bits.FBC_SUPPORT &&
375 (compressor->options.bits.DUMMY_BACKEND == 0) &&
376 (!dce110_compressor_is_fbc_enabled_in_hw(compressor, NULL)) &&
377 (!is_source_bigger_than_epanel_size(
378 cp110,
379 params->source_view_width,
380 params->source_view_height))) {
381
382 uint32_t addr;
383 uint32_t value;
384
385 /* Before enabling FBC first need to enable LPT if applicable
386 * LPT state should always be changed (enable/disable) while FBC
387 * is disabled */
388 if (compressor->options.bits.LPT_SUPPORT && (paths_num < 2) &&
389 (params->source_view_width *
390 params->source_view_height <=
391 dce11_one_lpt_channel_max_resolution)) {
392 dce110_compressor_enable_lpt(compressor);
393 }
394
395 addr = mmFBC_CNTL;
396 value = dm_read_reg(compressor->ctx, addr);
397 set_reg_field_value(value, 1, FBC_CNTL, FBC_GRPH_COMP_EN);
398 set_reg_field_value(
399 value,
400 params->inst,
401 FBC_CNTL, FBC_SRC_SEL);
402 dm_write_reg(compressor->ctx, addr, value);
403
404 /* Keep track of enum controller_id FBC is attached to */
405 compressor->is_enabled = true;
406 compressor->attached_inst = params->inst;
407 cp110->offsets = reg_offsets[params->inst - 1];
408
409 /*Toggle it as there is bug in HW */
410 set_reg_field_value(value, 0, FBC_CNTL, FBC_GRPH_COMP_EN);
411 dm_write_reg(compressor->ctx, addr, value);
412 set_reg_field_value(value, 1, FBC_CNTL, FBC_GRPH_COMP_EN);
413 dm_write_reg(compressor->ctx, addr, value);
414
415 wait_for_fbc_state_changed(cp110, true);
416 }
417}
418
419void dce110_compressor_disable_fbc(struct compressor *compressor)
420{
421 struct dce110_compressor *cp110 = TO_DCE110_COMPRESSOR(compressor);
422
423 if (compressor->options.bits.FBC_SUPPORT &&
424 dce110_compressor_is_fbc_enabled_in_hw(compressor, NULL)) {
425 uint32_t reg_data;
426 /* Turn off compression */
427 reg_data = dm_read_reg(compressor->ctx, mmFBC_CNTL);
428 set_reg_field_value(reg_data, 0, FBC_CNTL, FBC_GRPH_COMP_EN);
429 dm_write_reg(compressor->ctx, mmFBC_CNTL, reg_data);
430
431 /* Reset enum controller_id to undefined */
432 compressor->attached_inst = 0;
433 compressor->is_enabled = false;
434
435 /* Whenever disabling FBC make sure LPT is disabled if LPT
436 * supported */
437 if (compressor->options.bits.LPT_SUPPORT)
438 dce110_compressor_disable_lpt(compressor);
439
440 wait_for_fbc_state_changed(cp110, false);
441 }
442}
443
444bool dce110_compressor_is_fbc_enabled_in_hw(
445 struct compressor *compressor,
446 uint32_t *inst)
447{
448 /* Check the hardware register */
449 uint32_t value;
450
451 value = dm_read_reg(compressor->ctx, mmFBC_STATUS);
452 if (get_reg_field_value(value, FBC_STATUS, FBC_ENABLE_STATUS)) {
453 if (inst != NULL)
454 *inst = compressor->attached_inst;
455 return true;
456 }
457
458 value = dm_read_reg(compressor->ctx, mmFBC_MISC);
459 if (get_reg_field_value(value, FBC_MISC, FBC_STOP_ON_HFLIP_EVENT)) {
460 value = dm_read_reg(compressor->ctx, mmFBC_CNTL);
461
462 if (get_reg_field_value(value, FBC_CNTL, FBC_GRPH_COMP_EN)) {
463 if (inst != NULL)
464 *inst =
465 compressor->attached_inst;
466 return true;
467 }
468 }
469 return false;
470}
471
472bool dce110_compressor_is_lpt_enabled_in_hw(struct compressor *compressor)
473{
474 /* Check the hardware register */
475 uint32_t value = dm_read_reg(compressor->ctx,
476 mmLOW_POWER_TILING_CONTROL);
477
478 return get_reg_field_value(
479 value,
480 LOW_POWER_TILING_CONTROL,
481 LOW_POWER_TILING_ENABLE);
482}
483
484void dce110_compressor_program_compressed_surface_address_and_pitch(
485 struct compressor *compressor,
486 struct compr_addr_and_pitch_params *params)
487{
488 struct dce110_compressor *cp110 = TO_DCE110_COMPRESSOR(compressor);
489 uint32_t value = 0;
490 uint32_t fbc_pitch = 0;
491 uint32_t compressed_surf_address_low_part =
492 compressor->compr_surface_address.addr.low_part;
493
494 /* Clear content first. */
495 dm_write_reg(
496 compressor->ctx,
497 DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS_HIGH),
498 0);
499 dm_write_reg(compressor->ctx,
500 DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS), 0);
501
502 if (compressor->options.bits.LPT_SUPPORT) {
503 uint32_t lpt_alignment = lpt_size_alignment(cp110);
504
505 if (lpt_alignment != 0) {
506 compressed_surf_address_low_part =
507 ((compressed_surf_address_low_part
508 + (lpt_alignment - 1)) / lpt_alignment)
509 * lpt_alignment;
510 }
511 }
512
513 /* Write address, HIGH has to be first. */
514 dm_write_reg(compressor->ctx,
515 DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS_HIGH),
516 compressor->compr_surface_address.addr.high_part);
517 dm_write_reg(compressor->ctx,
518 DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS),
519 compressed_surf_address_low_part);
520
521 fbc_pitch = align_to_chunks_number_per_line(
522 cp110,
523 params->source_view_width);
524
525 if (compressor->min_compress_ratio == FBC_COMPRESS_RATIO_1TO1)
526 fbc_pitch = fbc_pitch / 8;
527 else
528 dm_logger_write(
529 compressor->ctx->logger, LOG_WARNING,
530 "%s: Unexpected DCE11 compression ratio",
531 __func__);
532
533 /* Clear content first. */
534 dm_write_reg(compressor->ctx, DCP_REG(mmGRPH_COMPRESS_PITCH), 0);
535
536 /* Write FBC Pitch. */
537 set_reg_field_value(
538 value,
539 fbc_pitch,
540 GRPH_COMPRESS_PITCH,
541 GRPH_COMPRESS_PITCH);
542 dm_write_reg(compressor->ctx, DCP_REG(mmGRPH_COMPRESS_PITCH), value);
543
544}
545
546void dce110_compressor_disable_lpt(struct compressor *compressor)
547{
548 struct dce110_compressor *cp110 = TO_DCE110_COMPRESSOR(compressor);
549 uint32_t value;
550 uint32_t addr;
551 uint32_t inx;
552
553 /* Disable all pipes LPT Stutter */
554 for (inx = 0; inx < 3; inx++) {
555 value =
556 dm_read_reg(
557 compressor->ctx,
558 DMIF_REG(mmDPG_PIPE_STUTTER_CONTROL_NONLPTCH));
559 set_reg_field_value(
560 value,
561 0,
562 DPG_PIPE_STUTTER_CONTROL_NONLPTCH,
563 STUTTER_ENABLE_NONLPTCH);
564 dm_write_reg(
565 compressor->ctx,
566 DMIF_REG(mmDPG_PIPE_STUTTER_CONTROL_NONLPTCH),
567 value);
568 }
569 /* Disable Underlay pipe LPT Stutter */
570 addr = mmDPGV0_PIPE_STUTTER_CONTROL_NONLPTCH;
571 value = dm_read_reg(compressor->ctx, addr);
572 set_reg_field_value(
573 value,
574 0,
575 DPGV0_PIPE_STUTTER_CONTROL_NONLPTCH,
576 STUTTER_ENABLE_NONLPTCH);
577 dm_write_reg(compressor->ctx, addr, value);
578
579 /* Disable LPT */
580 addr = mmLOW_POWER_TILING_CONTROL;
581 value = dm_read_reg(compressor->ctx, addr);
582 set_reg_field_value(
583 value,
584 0,
585 LOW_POWER_TILING_CONTROL,
586 LOW_POWER_TILING_ENABLE);
587 dm_write_reg(compressor->ctx, addr, value);
588
589 /* Clear selection of Channel(s) containing Compressed Surface */
590 addr = mmGMCON_LPT_TARGET;
591 value = dm_read_reg(compressor->ctx, addr);
592 set_reg_field_value(
593 value,
594 0xFFFFFFFF,
595 GMCON_LPT_TARGET,
596 STCTRL_LPT_TARGET);
597 dm_write_reg(compressor->ctx, mmGMCON_LPT_TARGET, value);
598}
599
600void dce110_compressor_enable_lpt(struct compressor *compressor)
601{
602 struct dce110_compressor *cp110 = TO_DCE110_COMPRESSOR(compressor);
603 uint32_t value;
604 uint32_t addr;
605 uint32_t value_control;
606 uint32_t channels;
607
608 /* Enable LPT Stutter from Display pipe */
609 value = dm_read_reg(compressor->ctx,
610 DMIF_REG(mmDPG_PIPE_STUTTER_CONTROL_NONLPTCH));
611 set_reg_field_value(
612 value,
613 1,
614 DPG_PIPE_STUTTER_CONTROL_NONLPTCH,
615 STUTTER_ENABLE_NONLPTCH);
616 dm_write_reg(compressor->ctx,
617 DMIF_REG(mmDPG_PIPE_STUTTER_CONTROL_NONLPTCH), value);
618
619 /* Enable Underlay pipe LPT Stutter */
620 addr = mmDPGV0_PIPE_STUTTER_CONTROL_NONLPTCH;
621 value = dm_read_reg(compressor->ctx, addr);
622 set_reg_field_value(
623 value,
624 1,
625 DPGV0_PIPE_STUTTER_CONTROL_NONLPTCH,
626 STUTTER_ENABLE_NONLPTCH);
627 dm_write_reg(compressor->ctx, addr, value);
628
629 /* Selection of Channel(s) containing Compressed Surface: 0xfffffff
630 * will disable LPT.
631 * STCTRL_LPT_TARGETn corresponds to channel n. */
632 addr = mmLOW_POWER_TILING_CONTROL;
633 value_control = dm_read_reg(compressor->ctx, addr);
634 channels = get_reg_field_value(value_control,
635 LOW_POWER_TILING_CONTROL,
636 LOW_POWER_TILING_MODE);
637
638 addr = mmGMCON_LPT_TARGET;
639 value = dm_read_reg(compressor->ctx, addr);
640 set_reg_field_value(
641 value,
642 channels + 1, /* not mentioned in programming guide,
643 but follow DCE8.1 */
644 GMCON_LPT_TARGET,
645 STCTRL_LPT_TARGET);
646 dm_write_reg(compressor->ctx, addr, value);
647
648 /* Enable LPT */
649 addr = mmLOW_POWER_TILING_CONTROL;
650 value = dm_read_reg(compressor->ctx, addr);
651 set_reg_field_value(
652 value,
653 1,
654 LOW_POWER_TILING_CONTROL,
655 LOW_POWER_TILING_ENABLE);
656 dm_write_reg(compressor->ctx, addr, value);
657}
658
659void dce110_compressor_program_lpt_control(
660 struct compressor *compressor,
661 struct compr_addr_and_pitch_params *params)
662{
663 struct dce110_compressor *cp110 = TO_DCE110_COMPRESSOR(compressor);
664 uint32_t rows_per_channel;
665 uint32_t lpt_alignment;
666 uint32_t source_view_width;
667 uint32_t source_view_height;
668 uint32_t lpt_control = 0;
669
670 if (!compressor->options.bits.LPT_SUPPORT)
671 return;
672
673 lpt_control = dm_read_reg(compressor->ctx,
674 mmLOW_POWER_TILING_CONTROL);
675
676 /* POSSIBLE VALUES for Low Power Tiling Mode:
677 * 00 - Use channel 0
678 * 01 - Use Channel 0 and 1
679 * 02 - Use Channel 0,1,2,3
680 * 03 - reserved */
681 switch (compressor->lpt_channels_num) {
682 /* case 2:
683 * Use Channel 0 & 1 / Not used for DCE 11 */
684 case 1:
685 /*Use Channel 0 for LPT for DCE 11 */
686 set_reg_field_value(
687 lpt_control,
688 0,
689 LOW_POWER_TILING_CONTROL,
690 LOW_POWER_TILING_MODE);
691 break;
692 default:
693 dm_logger_write(
694 compressor->ctx->logger, LOG_WARNING,
695 "%s: Invalid selected DRAM channels for LPT!!!",
696 __func__);
697 break;
698 }
699
700 lpt_control = lpt_memory_control_config(cp110, lpt_control);
701
702 /* Program LOW_POWER_TILING_ROWS_PER_CHAN field which depends on
703 * FBC compressed surface pitch.
704 * LOW_POWER_TILING_ROWS_PER_CHAN = Roundup ((Surface Height *
705 * Surface Pitch) / (Row Size * Number of Channels *
706 * Number of Banks)). */
707 rows_per_channel = 0;
708 lpt_alignment = lpt_size_alignment(cp110);
709 source_view_width =
710 align_to_chunks_number_per_line(
711 cp110,
712 params->source_view_width);
713 source_view_height = (params->source_view_height + 1) & (~0x1);
714
715 if (lpt_alignment != 0) {
716 rows_per_channel = source_view_width * source_view_height * 4;
717 rows_per_channel =
718 (rows_per_channel % lpt_alignment) ?
719 (rows_per_channel / lpt_alignment + 1) :
720 rows_per_channel / lpt_alignment;
721 }
722
723 set_reg_field_value(
724 lpt_control,
725 rows_per_channel,
726 LOW_POWER_TILING_CONTROL,
727 LOW_POWER_TILING_ROWS_PER_CHAN);
728
729 dm_write_reg(compressor->ctx,
730 mmLOW_POWER_TILING_CONTROL, lpt_control);
731}
732
733/*
734 * DCE 11 Frame Buffer Compression Implementation
735 */
736
737void dce110_compressor_set_fbc_invalidation_triggers(
738 struct compressor *compressor,
739 uint32_t fbc_trigger)
740{
741 /* Disable region hit event, FBC_MEMORY_REGION_MASK = 0 (bits 16-19)
742 * for DCE 11 regions cannot be used - does not work with S/G
743 */
744 uint32_t addr = mmFBC_CLIENT_REGION_MASK;
745 uint32_t value = dm_read_reg(compressor->ctx, addr);
746
747 set_reg_field_value(
748 value,
749 0,
750 FBC_CLIENT_REGION_MASK,
751 FBC_MEMORY_REGION_MASK);
752 dm_write_reg(compressor->ctx, addr, value);
753
754 /* Setup events when to clear all CSM entries (effectively marking
755 * current compressed data invalid)
756 * For DCE 11 CSM metadata 11111 means - "Not Compressed"
757 * Used as the initial value of the metadata sent to the compressor
758 * after invalidation, to indicate that the compressor should attempt
759 * to compress all chunks on the current pass. Also used when the chunk
760 * is not successfully written to memory.
761 * When this CSM value is detected, FBC reads from the uncompressed
762 * buffer. Set events according to passed in value, these events are
763 * valid for DCE11:
764 * - bit 0 - display register updated
765 * - bit 28 - memory write from any client except from MCIF
766 * - bit 29 - CG static screen signal is inactive
767 * In addition, DCE11.1 also needs to set new DCE11.1 specific events
768 * that are used to trigger invalidation on certain register changes,
769 * for example enabling of Alpha Compression may trigger invalidation of
770 * FBC once bit is set. These events are as follows:
771 * - Bit 2 - FBC_GRPH_COMP_EN register updated
772 * - Bit 3 - FBC_SRC_SEL register updated
773 * - Bit 4 - FBC_MIN_COMPRESSION register updated
774 * - Bit 5 - FBC_ALPHA_COMP_EN register updated
775 * - Bit 6 - FBC_ZERO_ALPHA_CHUNK_SKIP_EN register updated
776 * - Bit 7 - FBC_FORCE_COPY_TO_COMP_BUF register updated
777 */
778 addr = mmFBC_IDLE_FORCE_CLEAR_MASK;
779 value = dm_read_reg(compressor->ctx, addr);
780 set_reg_field_value(
781 value,
782 fbc_trigger |
783 FBC_IDLE_FORCE_GRPH_COMP_EN |
784 FBC_IDLE_FORCE_SRC_SEL_CHANGE |
785 FBC_IDLE_FORCE_MIN_COMPRESSION_CHANGE |
786 FBC_IDLE_FORCE_ALPHA_COMP_EN |
787 FBC_IDLE_FORCE_ZERO_ALPHA_CHUNK_SKIP_EN |
788 FBC_IDLE_FORCE_FORCE_COPY_TO_COMP_BUF,
789 FBC_IDLE_FORCE_CLEAR_MASK,
790 FBC_IDLE_FORCE_CLEAR_MASK);
791 dm_write_reg(compressor->ctx, addr, value);
792}
793
794bool dce110_compressor_construct(struct dce110_compressor *compressor,
795 struct dc_context *ctx)
796{
797 struct dc_bios *bp = ctx->dc_bios;
798 struct embedded_panel_info panel_info;
799
800 compressor->base.options.bits.FBC_SUPPORT = true;
801 compressor->base.options.bits.LPT_SUPPORT = true;
802 /* For DCE 11 always use one DRAM channel for LPT */
803 compressor->base.lpt_channels_num = 1;
804 compressor->base.options.bits.DUMMY_BACKEND = false;
805
806 /* Check if this system has more than 1 DRAM channel; if only 1 then LPT
807 * should not be supported */
808 if (compressor->base.memory_bus_width == 64)
809 compressor->base.options.bits.LPT_SUPPORT = false;
810
811 compressor->base.options.bits.CLK_GATING_DISABLED = false;
812
813 compressor->base.ctx = ctx;
814 compressor->base.embedded_panel_h_size = 0;
815 compressor->base.embedded_panel_v_size = 0;
816 compressor->base.memory_bus_width = ctx->asic_id.vram_width;
817 compressor->base.allocated_size = 0;
818 compressor->base.preferred_requested_size = 0;
819 compressor->base.min_compress_ratio = FBC_COMPRESS_RATIO_INVALID;
820 compressor->base.options.raw = 0;
821 compressor->base.banks_num = 0;
822 compressor->base.raw_size = 0;
823 compressor->base.channel_interleave_size = 0;
824 compressor->base.dram_channels_num = 0;
825 compressor->base.lpt_channels_num = 0;
826 compressor->base.attached_inst = 0;
827 compressor->base.is_enabled = false;
828
829 if (BP_RESULT_OK ==
830 bp->funcs->get_embedded_panel_info(bp, &panel_info)) {
831 compressor->base.embedded_panel_h_size =
832 panel_info.lcd_timing.horizontal_addressable;
833 compressor->base.embedded_panel_v_size =
834 panel_info.lcd_timing.vertical_addressable;
835 }
836 return true;
837}
838
839struct compressor *dce110_compressor_create(struct dc_context *ctx)
840{
841 struct dce110_compressor *cp110 =
842 dm_alloc(sizeof(struct dce110_compressor));
843
844 if (!cp110)
845 return NULL;
846
847 if (dce110_compressor_construct(cp110, ctx))
848 return &cp110->base;
849
850 BREAK_TO_DEBUGGER();
851 dm_free(cp110);
852 return NULL;
853}
854
855void dce110_compressor_destroy(struct compressor **compressor)
856{
857 dm_free(TO_DCE110_COMPRESSOR(*compressor));
858 *compressor = NULL;
859}
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.h
new file mode 100644
index 000000000000..22af5be51581
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.h
@@ -0,0 +1,78 @@
1/* Copyright 2012-15 Advanced Micro Devices, Inc.
2 *
3 * Permission is hereby granted, free of charge, to any person obtaining a
4 * copy of this software and associated documentation files (the "Software"),
5 * to deal in the Software without restriction, including without limitation
6 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7 * and/or sell copies of the Software, and to permit persons to whom the
8 * Software is furnished to do so, subject to the following conditions:
9 *
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
17 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
18 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
19 * OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * Authors: AMD
22 *
23 */
24
25#ifndef __DC_COMPRESSOR_DCE110_H__
26#define __DC_COMPRESSOR_DCE110_H__
27
28#include "../inc/compressor.h"
29
30#define TO_DCE110_COMPRESSOR(compressor)\
31 container_of(compressor, struct dce110_compressor, base)
32
33struct dce110_compressor_reg_offsets {
34 uint32_t dcp_offset;
35 uint32_t dmif_offset;
36};
37
38struct dce110_compressor {
39 struct compressor base;
40 struct dce110_compressor_reg_offsets offsets;
41};
42
43struct compressor *dce110_compressor_create(struct dc_context *ctx);
44
45bool dce110_compressor_construct(struct dce110_compressor *cp110,
46 struct dc_context *ctx);
47
48void dce110_compressor_destroy(struct compressor **cp);
49
50/* FBC RELATED */
51void dce110_compressor_power_up_fbc(struct compressor *cp);
52
53void dce110_compressor_enable_fbc(struct compressor *cp, uint32_t paths_num,
54 struct compr_addr_and_pitch_params *params);
55
56void dce110_compressor_disable_fbc(struct compressor *cp);
57
58void dce110_compressor_set_fbc_invalidation_triggers(struct compressor *cp,
59 uint32_t fbc_trigger);
60
61void dce110_compressor_program_compressed_surface_address_and_pitch(
62 struct compressor *cp,
63 struct compr_addr_and_pitch_params *params);
64
65bool dce110_compressor_is_fbc_enabled_in_hw(struct compressor *cp,
66 uint32_t *fbc_mapped_crtc_id);
67
68/* LPT RELATED */
69void dce110_compressor_enable_lpt(struct compressor *cp);
70
71void dce110_compressor_disable_lpt(struct compressor *cp);
72
73void dce110_compressor_program_lpt_control(struct compressor *cp,
74 struct compr_addr_and_pitch_params *params);
75
76bool dce110_compressor_is_lpt_enabled_in_hw(struct compressor *cp);
77
78#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
new file mode 100644
index 000000000000..1a682996b531
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -0,0 +1,1978 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25#include "dm_services.h"
26#include "dc.h"
27#include "dc_bios_types.h"
28#include "core_types.h"
29#include "core_status.h"
30#include "resource.h"
31#include "hw_sequencer.h"
32#include "dm_helpers.h"
33#include "dce110_hw_sequencer.h"
34#include "dce110_timing_generator.h"
35
36#include "bios/bios_parser_helper.h"
37#include "timing_generator.h"
38#include "mem_input.h"
39#include "opp.h"
40#include "ipp.h"
41#include "transform.h"
42#include "stream_encoder.h"
43#include "link_encoder.h"
44#include "clock_source.h"
45#include "gamma_calcs.h"
46#include "audio.h"
47#include "dce/dce_hwseq.h"
48
49/* include DCE11 register header files */
50#include "dce/dce_11_0_d.h"
51#include "dce/dce_11_0_sh_mask.h"
52
53struct dce110_hw_seq_reg_offsets {
54 uint32_t crtc;
55};
56
57static const struct dce110_hw_seq_reg_offsets reg_offsets[] = {
58{
59 .crtc = (mmCRTC0_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
60},
61{
62 .crtc = (mmCRTC1_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
63},
64{
65 .crtc = (mmCRTC2_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
66},
67{
68 .crtc = (mmCRTCV_GSL_CONTROL - mmCRTC_GSL_CONTROL),
69}
70};
71
72#define HW_REG_BLND(reg, id)\
73 (reg + reg_offsets[id].blnd)
74
75#define HW_REG_CRTC(reg, id)\
76 (reg + reg_offsets[id].crtc)
77
78#define MAX_WATERMARK 0xFFFF
79#define SAFE_NBP_MARK 0x7FFF
80
81/*******************************************************************************
82 * Private definitions
83 ******************************************************************************/
84/***************************PIPE_CONTROL***********************************/
85static void dce110_init_pte(struct dc_context *ctx)
86{
87 uint32_t addr;
88 uint32_t value = 0;
89 uint32_t chunk_int = 0;
90 uint32_t chunk_mul = 0;
91
92 addr = mmUNP_DVMM_PTE_CONTROL;
93 value = dm_read_reg(ctx, addr);
94
95 set_reg_field_value(
96 value,
97 0,
98 DVMM_PTE_CONTROL,
99 DVMM_USE_SINGLE_PTE);
100
101 set_reg_field_value(
102 value,
103 1,
104 DVMM_PTE_CONTROL,
105 DVMM_PTE_BUFFER_MODE0);
106
107 set_reg_field_value(
108 value,
109 1,
110 DVMM_PTE_CONTROL,
111 DVMM_PTE_BUFFER_MODE1);
112
113 dm_write_reg(ctx, addr, value);
114
115 addr = mmDVMM_PTE_REQ;
116 value = dm_read_reg(ctx, addr);
117
118 chunk_int = get_reg_field_value(
119 value,
120 DVMM_PTE_REQ,
121 HFLIP_PTEREQ_PER_CHUNK_INT);
122
123 chunk_mul = get_reg_field_value(
124 value,
125 DVMM_PTE_REQ,
126 HFLIP_PTEREQ_PER_CHUNK_MULTIPLIER);
127
128 if (chunk_int != 0x4 || chunk_mul != 0x4) {
129
130 set_reg_field_value(
131 value,
132 255,
133 DVMM_PTE_REQ,
134 MAX_PTEREQ_TO_ISSUE);
135
136 set_reg_field_value(
137 value,
138 4,
139 DVMM_PTE_REQ,
140 HFLIP_PTEREQ_PER_CHUNK_INT);
141
142 set_reg_field_value(
143 value,
144 4,
145 DVMM_PTE_REQ,
146 HFLIP_PTEREQ_PER_CHUNK_MULTIPLIER);
147
148 dm_write_reg(ctx, addr, value);
149 }
150}
151/**************************************************************************/
152
153static void enable_display_pipe_clock_gating(
154 struct dc_context *ctx,
155 bool clock_gating)
156{
157 /*TODO*/
158}
159
160static bool dce110_enable_display_power_gating(
161 struct core_dc *dc,
162 uint8_t controller_id,
163 struct dc_bios *dcb,
164 enum pipe_gating_control power_gating)
165{
166 enum bp_result bp_result = BP_RESULT_OK;
167 enum bp_pipe_control_action cntl;
168 struct dc_context *ctx = dc->ctx;
169 unsigned int underlay_idx = dc->res_pool->underlay_pipe_index;
170
171 if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment))
172 return true;
173
174 if (power_gating == PIPE_GATING_CONTROL_INIT)
175 cntl = ASIC_PIPE_INIT;
176 else if (power_gating == PIPE_GATING_CONTROL_ENABLE)
177 cntl = ASIC_PIPE_ENABLE;
178 else
179 cntl = ASIC_PIPE_DISABLE;
180
181 if (controller_id == underlay_idx)
182 controller_id = CONTROLLER_ID_UNDERLAY0 - 1;
183
184 if (power_gating != PIPE_GATING_CONTROL_INIT || controller_id == 0){
185
186 bp_result = dcb->funcs->enable_disp_power_gating(
187 dcb, controller_id + 1, cntl);
188
189 /* Revert MASTER_UPDATE_MODE to 0 because bios sets it 2
190 * by default when command table is called
191 *
192 * Bios parser accepts controller_id = 6 as indicative of
193 * underlay pipe in dce110. But we do not support more
194 * than 3.
195 */
196 if (controller_id < CONTROLLER_ID_MAX - 1)
197 dm_write_reg(ctx,
198 HW_REG_CRTC(mmCRTC_MASTER_UPDATE_MODE, controller_id),
199 0);
200 }
201
202 if (power_gating != PIPE_GATING_CONTROL_ENABLE)
203 dce110_init_pte(ctx);
204
205 if (bp_result == BP_RESULT_OK)
206 return true;
207 else
208 return false;
209}
210
211static void build_prescale_params(struct ipp_prescale_params *prescale_params,
212 const struct core_surface *surface)
213{
214 prescale_params->mode = IPP_PRESCALE_MODE_FIXED_UNSIGNED;
215
216 switch (surface->public.format) {
217 case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
218 case SURFACE_PIXEL_FORMAT_GRPH_BGRA8888:
219 prescale_params->scale = 0x2020;
220 break;
221 case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
222 case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
223 prescale_params->scale = 0x2008;
224 break;
225 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
226 case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
227 prescale_params->scale = 0x2000;
228 break;
229 default:
230 ASSERT(false);
231 }
232}
233
234static bool set_gamma_ramp(
235 struct input_pixel_processor *ipp,
236 struct output_pixel_processor *opp,
237 const struct core_gamma *ramp,
238 const struct core_surface *surface)
239{
240 struct ipp_prescale_params prescale_params = { 0 };
241 struct pwl_params *regamma_params;
242 bool result = false;
243
244 regamma_params = dm_alloc(sizeof(struct pwl_params));
245 if (regamma_params == NULL)
246 goto regamma_alloc_fail;
247
248 regamma_params->hw_points_num = GAMMA_HW_POINTS_NUM;
249
250 opp->funcs->opp_power_on_regamma_lut(opp, true);
251
252 if (ipp) {
253 build_prescale_params(&prescale_params, surface);
254 ipp->funcs->ipp_program_prescale(ipp, &prescale_params);
255 }
256
257 if (ramp && calculate_regamma_params(regamma_params, ramp, surface)) {
258
259 opp->funcs->opp_program_regamma_pwl(opp, regamma_params);
260 if (ipp)
261 ipp->funcs->ipp_set_degamma(ipp, IPP_DEGAMMA_MODE_HW_sRGB);
262 opp->funcs->opp_set_regamma_mode(opp, OPP_REGAMMA_USER);
263 } else {
264 if (ipp)
265 ipp->funcs->ipp_set_degamma(ipp, IPP_DEGAMMA_MODE_BYPASS);
266 opp->funcs->opp_set_regamma_mode(opp, OPP_REGAMMA_BYPASS);
267 }
268
269 opp->funcs->opp_power_on_regamma_lut(opp, false);
270
271 result = true;
272
273 dm_free(regamma_params);
274
275regamma_alloc_fail:
276 return result;
277}
278
279static enum dc_status bios_parser_crtc_source_select(
280 struct pipe_ctx *pipe_ctx)
281{
282 struct dc_bios *dcb;
283 /* call VBIOS table to set CRTC source for the HW
284 * encoder block
285 * note: video bios clears all FMT setting here. */
286 struct bp_crtc_source_select crtc_source_select = {0};
287 const struct core_sink *sink = pipe_ctx->stream->sink;
288
289 crtc_source_select.engine_id = pipe_ctx->stream_enc->id;
290 crtc_source_select.controller_id = pipe_ctx->pipe_idx + 1;
291 /*TODO: Need to un-hardcode color depth, dp_audio and account for
292 * the case where signal and sink signal is different (translator
293 * encoder)*/
294 crtc_source_select.signal = pipe_ctx->stream->signal;
295 crtc_source_select.enable_dp_audio = false;
296 crtc_source_select.sink_signal = pipe_ctx->stream->signal;
297 crtc_source_select.display_output_bit_depth = PANEL_8BIT_COLOR;
298
299 dcb = sink->ctx->dc_bios;
300
301 if (BP_RESULT_OK != dcb->funcs->crtc_source_select(
302 dcb,
303 &crtc_source_select)) {
304 return DC_ERROR_UNEXPECTED;
305 }
306
307 return DC_OK;
308}
309
310void dce110_update_info_frame(struct pipe_ctx *pipe_ctx)
311{
312 if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
313 pipe_ctx->stream_enc->funcs->update_hdmi_info_packets(
314 pipe_ctx->stream_enc,
315 &pipe_ctx->encoder_info_frame);
316 else if (dc_is_dp_signal(pipe_ctx->stream->signal))
317 pipe_ctx->stream_enc->funcs->update_dp_info_packets(
318 pipe_ctx->stream_enc,
319 &pipe_ctx->encoder_info_frame);
320}
321
322void dce110_enable_stream(struct pipe_ctx *pipe_ctx)
323{
324 enum dc_lane_count lane_count =
325 pipe_ctx->stream->sink->link->public.cur_link_settings.lane_count;
326
327 struct dc_crtc_timing *timing = &pipe_ctx->stream->public.timing;
328 struct core_link *link = pipe_ctx->stream->sink->link;
329
330 /* 1. update AVI info frame (HDMI, DP)
331 * we always need to update info frame
332 */
333 uint32_t active_total_with_borders;
334 uint32_t early_control = 0;
335 struct timing_generator *tg = pipe_ctx->tg;
336
337 /* TODOFPGA may change to hwss.update_info_frame */
338 dce110_update_info_frame(pipe_ctx);
339 /* enable early control to avoid corruption on DP monitor*/
340 active_total_with_borders =
341 timing->h_addressable
342 + timing->h_border_left
343 + timing->h_border_right;
344
345 if (lane_count != 0)
346 early_control = active_total_with_borders % lane_count;
347
348 if (early_control == 0)
349 early_control = lane_count;
350
351 tg->funcs->set_early_control(tg, early_control);
352
353 /* enable audio only within mode set */
354 if (pipe_ctx->audio != NULL) {
355 if (dc_is_dp_signal(pipe_ctx->stream->signal))
356 pipe_ctx->stream_enc->funcs->dp_audio_enable(pipe_ctx->stream_enc);
357 }
358
359 /* For MST, there are multiply stream go to only one link.
360 * connect DIG back_end to front_end while enable_stream and
361 * disconnect them during disable_stream
362 * BY this, it is logic clean to separate stream and link */
363 link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc,
364 pipe_ctx->stream_enc->id, true);
365
366}
367
368void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
369{
370 struct core_stream *stream = pipe_ctx->stream;
371 struct core_link *link = stream->sink->link;
372
373 if (pipe_ctx->audio) {
374 pipe_ctx->audio->funcs->az_disable(pipe_ctx->audio);
375
376 if (dc_is_dp_signal(pipe_ctx->stream->signal))
377 pipe_ctx->stream_enc->funcs->dp_audio_disable(
378 pipe_ctx->stream_enc);
379 else
380 pipe_ctx->stream_enc->funcs->hdmi_audio_disable(
381 pipe_ctx->stream_enc);
382
383 pipe_ctx->audio = NULL;
384
385 /* TODO: notify audio driver for if audio modes list changed
386 * add audio mode list change flag */
387 /* dal_audio_disable_azalia_audio_jack_presence(stream->audio,
388 * stream->stream_engine_id);
389 */
390 }
391
392 if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
393 pipe_ctx->stream_enc->funcs->stop_hdmi_info_packets(
394 pipe_ctx->stream_enc);
395
396 if (dc_is_dp_signal(pipe_ctx->stream->signal))
397 pipe_ctx->stream_enc->funcs->stop_dp_info_packets(
398 pipe_ctx->stream_enc);
399
400 pipe_ctx->stream_enc->funcs->audio_mute_control(
401 pipe_ctx->stream_enc, true);
402
403
404 /* blank at encoder level */
405 if (dc_is_dp_signal(pipe_ctx->stream->signal))
406 pipe_ctx->stream_enc->funcs->dp_blank(pipe_ctx->stream_enc);
407
408 link->link_enc->funcs->connect_dig_be_to_fe(
409 link->link_enc,
410 pipe_ctx->stream_enc->id,
411 false);
412
413}
414
415void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
416 struct dc_link_settings *link_settings)
417{
418 struct encoder_unblank_param params = { { 0 } };
419
420 /* only 3 items below are used by unblank */
421 params.crtc_timing.pixel_clock =
422 pipe_ctx->stream->public.timing.pix_clk_khz;
423 params.link_settings.link_rate = link_settings->link_rate;
424 pipe_ctx->stream_enc->funcs->dp_unblank(pipe_ctx->stream_enc, &params);
425}
426
427static enum audio_dto_source translate_to_dto_source(enum controller_id crtc_id)
428{
429 switch (crtc_id) {
430 case CONTROLLER_ID_D0:
431 return DTO_SOURCE_ID0;
432 case CONTROLLER_ID_D1:
433 return DTO_SOURCE_ID1;
434 case CONTROLLER_ID_D2:
435 return DTO_SOURCE_ID2;
436 case CONTROLLER_ID_D3:
437 return DTO_SOURCE_ID3;
438 case CONTROLLER_ID_D4:
439 return DTO_SOURCE_ID4;
440 case CONTROLLER_ID_D5:
441 return DTO_SOURCE_ID5;
442 default:
443 return DTO_SOURCE_UNKNOWN;
444 }
445}
446
447static void build_audio_output(
448 const struct pipe_ctx *pipe_ctx,
449 struct audio_output *audio_output)
450{
451 const struct core_stream *stream = pipe_ctx->stream;
452 audio_output->engine_id = pipe_ctx->stream_enc->id;
453
454 audio_output->signal = pipe_ctx->stream->signal;
455
456 /* audio_crtc_info */
457
458 audio_output->crtc_info.h_total =
459 stream->public.timing.h_total;
460
461 /*
462 * Audio packets are sent during actual CRTC blank physical signal, we
463 * need to specify actual active signal portion
464 */
465 audio_output->crtc_info.h_active =
466 stream->public.timing.h_addressable
467 + stream->public.timing.h_border_left
468 + stream->public.timing.h_border_right;
469
470 audio_output->crtc_info.v_active =
471 stream->public.timing.v_addressable
472 + stream->public.timing.v_border_top
473 + stream->public.timing.v_border_bottom;
474
475 audio_output->crtc_info.pixel_repetition = 1;
476
477 audio_output->crtc_info.interlaced =
478 stream->public.timing.flags.INTERLACE;
479
480 audio_output->crtc_info.refresh_rate =
481 (stream->public.timing.pix_clk_khz*1000)/
482 (stream->public.timing.h_total*stream->public.timing.v_total);
483
484 audio_output->crtc_info.color_depth =
485 stream->public.timing.display_color_depth;
486
487 audio_output->crtc_info.requested_pixel_clock =
488 pipe_ctx->pix_clk_params.requested_pix_clk;
489
490 /*
491 * TODO - Investigate why calculated pixel clk has to be
492 * requested pixel clk
493 */
494 audio_output->crtc_info.calculated_pixel_clock =
495 pipe_ctx->pix_clk_params.requested_pix_clk;
496
497 if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
498 pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
499 audio_output->pll_info.dp_dto_source_clock_in_khz =
500 dal_display_clock_get_dp_ref_clk_frequency(
501 pipe_ctx->dis_clk);
502 }
503
504 audio_output->pll_info.feed_back_divider =
505 pipe_ctx->pll_settings.feedback_divider;
506
507 audio_output->pll_info.dto_source =
508 translate_to_dto_source(
509 pipe_ctx->pipe_idx + 1);
510
511 /* TODO hard code to enable for now. Need get from stream */
512 audio_output->pll_info.ss_enabled = true;
513
514 audio_output->pll_info.ss_percentage =
515 pipe_ctx->pll_settings.ss_percentage;
516}
517
518static void get_surface_visual_confirm_color(const struct pipe_ctx *pipe_ctx,
519 struct tg_color *color)
520{
521 uint32_t color_value = MAX_TG_COLOR_VALUE * (4 - pipe_ctx->pipe_idx) / 4;
522
523 switch (pipe_ctx->scl_data.format) {
524 case PIXEL_FORMAT_ARGB8888:
525 /* set boarder color to red */
526 color->color_r_cr = color_value;
527 break;
528
529 case PIXEL_FORMAT_ARGB2101010:
530 /* set boarder color to blue */
531 color->color_b_cb = color_value;
532 break;
533 case PIXEL_FORMAT_420BPP12:
534 /* set boarder color to green */
535 color->color_g_y = color_value;
536 break;
537 case PIXEL_FORMAT_FP16:
538 /* set boarder color to white */
539 color->color_r_cr = color_value;
540 color->color_b_cb = color_value;
541 color->color_g_y = color_value;
542 break;
543 default:
544 break;
545 }
546}
547
548static void program_scaler(const struct core_dc *dc,
549 const struct pipe_ctx *pipe_ctx)
550{
551 struct tg_color color = {0};
552
553 if (dc->public.debug.surface_visual_confirm)
554 get_surface_visual_confirm_color(pipe_ctx, &color);
555 else
556 color_space_to_black_color(dc,
557 pipe_ctx->stream->public.output_color_space,
558 &color);
559
560 pipe_ctx->xfm->funcs->transform_set_pixel_storage_depth(
561 pipe_ctx->xfm,
562 pipe_ctx->scl_data.lb_params.depth,
563 &pipe_ctx->stream->bit_depth_params);
564
565 if (pipe_ctx->tg->funcs->set_overscan_blank_color)
566 pipe_ctx->tg->funcs->set_overscan_blank_color(
567 pipe_ctx->tg,
568 &color);
569
570 pipe_ctx->xfm->funcs->transform_set_scaler(pipe_ctx->xfm,
571 &pipe_ctx->scl_data);
572}
573
574static enum dc_status prog_pixclk_crtc_otg(
575 struct pipe_ctx *pipe_ctx,
576 struct validate_context *context,
577 struct core_dc *dc)
578{
579 struct core_stream *stream = pipe_ctx->stream;
580 struct pipe_ctx *pipe_ctx_old = &dc->current_context->res_ctx.
581 pipe_ctx[pipe_ctx->pipe_idx];
582 struct tg_color black_color = {0};
583
584 if (!pipe_ctx_old->stream) {
585
586 /* program blank color */
587 color_space_to_black_color(dc,
588 stream->public.output_color_space, &black_color);
589 pipe_ctx->tg->funcs->set_blank_color(
590 pipe_ctx->tg,
591 &black_color);
592 /*
593 * Must blank CRTC after disabling power gating and before any
594 * programming, otherwise CRTC will be hung in bad state
595 */
596 pipe_ctx->tg->funcs->set_blank(pipe_ctx->tg, true);
597
598 if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
599 pipe_ctx->clock_source,
600 &pipe_ctx->pix_clk_params,
601 &pipe_ctx->pll_settings)) {
602 BREAK_TO_DEBUGGER();
603 return DC_ERROR_UNEXPECTED;
604 }
605
606 pipe_ctx->tg->funcs->program_timing(
607 pipe_ctx->tg,
608 &stream->public.timing,
609 true);
610 }
611
612 if (!pipe_ctx_old->stream) {
613 if (false == pipe_ctx->tg->funcs->enable_crtc(
614 pipe_ctx->tg)) {
615 BREAK_TO_DEBUGGER();
616 return DC_ERROR_UNEXPECTED;
617 }
618 }
619
620 return DC_OK;
621}
622
623static enum dc_status apply_single_controller_ctx_to_hw(
624 struct pipe_ctx *pipe_ctx,
625 struct validate_context *context,
626 struct core_dc *dc)
627{
628 struct core_stream *stream = pipe_ctx->stream;
629 struct pipe_ctx *pipe_ctx_old = &dc->current_context->res_ctx.
630 pipe_ctx[pipe_ctx->pipe_idx];
631
632 /* */
633 dc->hwss.prog_pixclk_crtc_otg(pipe_ctx, context, dc);
634
635 pipe_ctx->opp->funcs->opp_set_dyn_expansion(
636 pipe_ctx->opp,
637 COLOR_SPACE_YCBCR601,
638 stream->public.timing.display_color_depth,
639 pipe_ctx->stream->signal);
640
641 pipe_ctx->opp->funcs->opp_program_fmt(
642 pipe_ctx->opp,
643 &stream->bit_depth_params,
644 &stream->clamping);
645
646 /* FPGA does not program backend */
647 if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
648 return DC_OK;
649
650 /* TODO: move to stream encoder */
651 if (pipe_ctx->stream->signal != SIGNAL_TYPE_VIRTUAL)
652 if (DC_OK != bios_parser_crtc_source_select(pipe_ctx)) {
653 BREAK_TO_DEBUGGER();
654 return DC_ERROR_UNEXPECTED;
655 }
656
657 if (pipe_ctx->stream->signal != SIGNAL_TYPE_VIRTUAL)
658 stream->sink->link->link_enc->funcs->setup(
659 stream->sink->link->link_enc,
660 pipe_ctx->stream->signal);
661
662 if (dc_is_dp_signal(pipe_ctx->stream->signal))
663 pipe_ctx->stream_enc->funcs->dp_set_stream_attribute(
664 pipe_ctx->stream_enc,
665 &stream->public.timing,
666 stream->public.output_color_space);
667
668 if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
669 pipe_ctx->stream_enc->funcs->hdmi_set_stream_attribute(
670 pipe_ctx->stream_enc,
671 &stream->public.timing,
672 stream->phy_pix_clk,
673 pipe_ctx->audio != NULL);
674
675 if (dc_is_dvi_signal(pipe_ctx->stream->signal))
676 pipe_ctx->stream_enc->funcs->dvi_set_stream_attribute(
677 pipe_ctx->stream_enc,
678 &stream->public.timing,
679 (pipe_ctx->stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK) ?
680 true : false);
681
682 if (!pipe_ctx_old->stream) {
683 core_link_enable_stream(pipe_ctx);
684
685 if (dc_is_dp_signal(pipe_ctx->stream->signal))
686 dce110_unblank_stream(pipe_ctx,
687 &stream->sink->link->public.cur_link_settings);
688 }
689
690 pipe_ctx->scl_data.lb_params.alpha_en = pipe_ctx->bottom_pipe != 0;
691 /* program_scaler and allocate_mem_input are not new asic */
692 if (!pipe_ctx_old || memcmp(&pipe_ctx_old->scl_data,
693 &pipe_ctx->scl_data,
694 sizeof(struct scaler_data)) != 0)
695 program_scaler(dc, pipe_ctx);
696
697 /* mst support - use total stream count */
698 pipe_ctx->mi->funcs->allocate_mem_input(
699 pipe_ctx->mi,
700 stream->public.timing.h_total,
701 stream->public.timing.v_total,
702 stream->public.timing.pix_clk_khz,
703 context->target_count);
704
705 return DC_OK;
706}
707
708/******************************************************************************/
709
710static void power_down_encoders(struct core_dc *dc)
711{
712 int i;
713
714 for (i = 0; i < dc->link_count; i++) {
715 dc->links[i]->link_enc->funcs->disable_output(
716 dc->links[i]->link_enc, SIGNAL_TYPE_NONE);
717 }
718}
719
720static void power_down_controllers(struct core_dc *dc)
721{
722 int i;
723
724 for (i = 0; i < dc->res_pool->pipe_count; i++) {
725 dc->res_pool->timing_generators[i]->funcs->disable_crtc(
726 dc->res_pool->timing_generators[i]);
727 }
728}
729
730static void power_down_clock_sources(struct core_dc *dc)
731{
732 int i;
733
734 if (dc->res_pool->dp_clock_source->funcs->cs_power_down(
735 dc->res_pool->dp_clock_source) == false)
736 dm_error("Failed to power down pll! (dp clk src)\n");
737
738 for (i = 0; i < dc->res_pool->clk_src_count; i++) {
739 if (dc->res_pool->clock_sources[i]->funcs->cs_power_down(
740 dc->res_pool->clock_sources[i]) == false)
741 dm_error("Failed to power down pll! (clk src index=%d)\n", i);
742 }
743}
744
745static void power_down_all_hw_blocks(struct core_dc *dc)
746{
747 power_down_encoders(dc);
748
749 power_down_controllers(dc);
750
751 power_down_clock_sources(dc);
752}
753
754static void disable_vga_and_power_gate_all_controllers(
755 struct core_dc *dc)
756{
757 int i;
758 struct timing_generator *tg;
759 struct dc_context *ctx = dc->ctx;
760
761 for (i = 0; i < dc->res_pool->pipe_count; i++) {
762 tg = dc->res_pool->timing_generators[i];
763
764 tg->funcs->disable_vga(tg);
765
766 /* Enable CLOCK gating for each pipe BEFORE controller
767 * powergating. */
768 enable_display_pipe_clock_gating(ctx,
769 true);
770
771 dc->hwss.power_down_front_end(
772 dc, &dc->current_context->res_ctx.pipe_ctx[i]);
773 }
774}
775
776/**
777 * When ASIC goes from VBIOS/VGA mode to driver/accelerated mode we need:
778 * 1. Power down all DC HW blocks
779 * 2. Disable VGA engine on all controllers
780 * 3. Enable power gating for controller
781 * 4. Set acc_mode_change bit (VBIOS will clear this bit when going to FSDOS)
782 */
783void dce110_enable_accelerated_mode(struct core_dc *dc)
784{
785 power_down_all_hw_blocks(dc);
786
787 disable_vga_and_power_gate_all_controllers(dc);
788 bios_set_scratch_acc_mode_change(dc->ctx->dc_bios);
789}
790
791/**
792 * Call display_engine_clock_dce80 to perform the Dclk programming.
793 */
794void dce110_set_display_clock(struct validate_context *context)
795{
796 /* Program the display engine clock.
797 * Check DFS bypass mode support or not. DFSbypass feature is only when
798 * BIOS GPU info table reports support. */
799
800 if (/*dal_adapter_service_is_dfs_bypass_enabled()*/ false) {
801 /*TODO: set_display_clock_dfs_bypass(
802 hws,
803 path_set,
804 context->res_ctx.pool->display_clock,
805 context->res_ctx.min_clocks.min_dclk_khz);*/
806 } else {
807 /*
808 * TODO: need to either port work around from DAL2 function
809 * getActualRequiredDisplayClock or program displayclock without
810 * calling vbios. Currently temporily work
811 * around by increasing the displclk by 15 percent
812 */
813 dal_display_clock_set_clock(
814 context->res_ctx.pool->display_clock,
815 context->bw_results.dispclk_khz * 115 / 100);
816 }
817
818
819 /* TODO: When changing display engine clock, DMCU WaitLoop must be
820 * reconfigured in order to maintain the same delays within DMCU
821 * programming sequences. */
822}
823
824static uint32_t compute_pstate_blackout_duration(
825 struct bw_fixed blackout_duration,
826 const struct core_stream *stream)
827{
828 uint32_t total_dest_line_time_ns;
829 uint32_t pstate_blackout_duration_ns;
830
831 pstate_blackout_duration_ns = 1000 * blackout_duration.value >> 24;
832
833 total_dest_line_time_ns = 1000000UL *
834 stream->public.timing.h_total /
835 stream->public.timing.pix_clk_khz +
836 pstate_blackout_duration_ns;
837
838 return total_dest_line_time_ns;
839}
840
841/* get the index of the pipe_ctx if there were no gaps in the pipe_ctx array*/
842int get_bw_result_idx(
843 struct resource_context *res_ctx,
844 int pipe_idx)
845{
846 int i, collapsed_idx;
847
848 if (res_ctx->pipe_ctx[pipe_idx].top_pipe)
849 return 3;
850
851 collapsed_idx = 0;
852 for (i = 0; i < pipe_idx; i++) {
853 if (res_ctx->pipe_ctx[i].stream)
854 collapsed_idx++;
855 }
856
857 return collapsed_idx;
858}
859
860static bool is_watermark_set_a_greater(
861 const struct bw_watermarks *set_a,
862 const struct bw_watermarks *set_b)
863{
864 if (set_a->a_mark > set_b->a_mark
865 || set_a->b_mark > set_b->b_mark
866 || set_a->c_mark > set_b->c_mark
867 || set_a->d_mark > set_b->d_mark)
868 return true;
869 return false;
870}
871
872static bool did_watermarks_increase(
873 struct pipe_ctx *pipe_ctx,
874 struct validate_context *context,
875 struct validate_context *old_context)
876{
877 int collapsed_pipe_idx = get_bw_result_idx(&context->res_ctx,
878 pipe_ctx->pipe_idx);
879 int old_collapsed_pipe_idx = get_bw_result_idx(&old_context->res_ctx,
880 pipe_ctx->pipe_idx);
881 struct pipe_ctx *old_pipe_ctx = &old_context->res_ctx.pipe_ctx[pipe_ctx->pipe_idx];
882
883 if (!old_pipe_ctx->stream)
884 return true;
885
886 if (is_watermark_set_a_greater(
887 &context->bw_results.nbp_state_change_wm_ns[collapsed_pipe_idx],
888 &old_context->bw_results.nbp_state_change_wm_ns[old_collapsed_pipe_idx]))
889 return true;
890 if (is_watermark_set_a_greater(
891 &context->bw_results.stutter_exit_wm_ns[collapsed_pipe_idx],
892 &old_context->bw_results.stutter_exit_wm_ns[old_collapsed_pipe_idx]))
893 return true;
894 if (is_watermark_set_a_greater(
895 &context->bw_results.urgent_wm_ns[collapsed_pipe_idx],
896 &old_context->bw_results.urgent_wm_ns[old_collapsed_pipe_idx]))
897 return true;
898
899 return false;
900}
901
902static void program_wm_for_pipe(struct core_dc *dc,
903 struct pipe_ctx *pipe_ctx,
904 struct validate_context *context)
905{
906 int total_dest_line_time_ns = compute_pstate_blackout_duration(
907 dc->bw_vbios.blackout_duration,
908 pipe_ctx->stream);
909 int bw_result_idx = get_bw_result_idx(&context->res_ctx,
910 pipe_ctx->pipe_idx);
911
912 pipe_ctx->mi->funcs->mem_input_program_display_marks(
913 pipe_ctx->mi,
914 context->bw_results.nbp_state_change_wm_ns[bw_result_idx],
915 context->bw_results.stutter_exit_wm_ns[bw_result_idx],
916 context->bw_results.urgent_wm_ns[bw_result_idx],
917 total_dest_line_time_ns);
918
919 if (pipe_ctx->top_pipe)
920 pipe_ctx->mi->funcs->mem_input_program_chroma_display_marks(
921 pipe_ctx->mi,
922 context->bw_results.nbp_state_change_wm_ns[bw_result_idx + 1],
923 context->bw_results.stutter_exit_wm_ns[bw_result_idx + 1],
924 context->bw_results.urgent_wm_ns[bw_result_idx + 1],
925 total_dest_line_time_ns);
926}
927
928void dce110_set_displaymarks(
929 const struct core_dc *dc,
930 struct validate_context *context)
931{
932 uint8_t i, num_pipes;
933 unsigned int underlay_idx = dc->res_pool->underlay_pipe_index;
934
935 for (i = 0, num_pipes = 0; i < MAX_PIPES; i++) {
936 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
937 uint32_t total_dest_line_time_ns;
938
939 if (pipe_ctx->stream == NULL)
940 continue;
941
942 total_dest_line_time_ns = compute_pstate_blackout_duration(
943 dc->bw_vbios.blackout_duration, pipe_ctx->stream);
944 pipe_ctx->mi->funcs->mem_input_program_display_marks(
945 pipe_ctx->mi,
946 context->bw_results.nbp_state_change_wm_ns[num_pipes],
947 context->bw_results.stutter_exit_wm_ns[num_pipes],
948 context->bw_results.urgent_wm_ns[num_pipes],
949 total_dest_line_time_ns);
950 if (i == underlay_idx) {
951 num_pipes++;
952 pipe_ctx->mi->funcs->mem_input_program_chroma_display_marks(
953 pipe_ctx->mi,
954 context->bw_results.nbp_state_change_wm_ns[num_pipes],
955 context->bw_results.stutter_exit_wm_ns[num_pipes],
956 context->bw_results.urgent_wm_ns[num_pipes],
957 total_dest_line_time_ns);
958 }
959 num_pipes++;
960 }
961}
962
963static void set_safe_displaymarks(struct resource_context *res_ctx)
964{
965 int i;
966 int underlay_idx = res_ctx->pool->underlay_pipe_index;
967 struct bw_watermarks max_marks = {
968 MAX_WATERMARK, MAX_WATERMARK, MAX_WATERMARK, MAX_WATERMARK };
969 struct bw_watermarks nbp_marks = {
970 SAFE_NBP_MARK, SAFE_NBP_MARK, SAFE_NBP_MARK, SAFE_NBP_MARK };
971
972 for (i = 0; i < MAX_PIPES; i++) {
973 if (res_ctx->pipe_ctx[i].stream == NULL)
974 continue;
975
976 res_ctx->pipe_ctx[i].mi->funcs->mem_input_program_display_marks(
977 res_ctx->pipe_ctx[i].mi,
978 nbp_marks,
979 max_marks,
980 max_marks,
981 MAX_WATERMARK);
982 if (i == underlay_idx)
983 res_ctx->pipe_ctx[i].mi->funcs->mem_input_program_chroma_display_marks(
984 res_ctx->pipe_ctx[i].mi,
985 nbp_marks,
986 max_marks,
987 max_marks,
988 MAX_WATERMARK);
989 }
990}
991
992static void switch_dp_clock_sources(
993 const struct core_dc *dc,
994 struct resource_context *res_ctx)
995{
996 uint8_t i;
997 for (i = 0; i < MAX_PIPES; i++) {
998 struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
999
1000 if (pipe_ctx->stream == NULL || pipe_ctx->top_pipe)
1001 continue;
1002
1003 if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
1004 struct clock_source *clk_src =
1005 resource_find_used_clk_src_for_sharing(
1006 res_ctx, pipe_ctx);
1007
1008 if (clk_src &&
1009 clk_src != pipe_ctx->clock_source) {
1010 resource_unreference_clock_source(
1011 res_ctx, pipe_ctx->clock_source);
1012 pipe_ctx->clock_source = clk_src;
1013 resource_reference_clock_source(res_ctx, clk_src);
1014
1015 dce_crtc_switch_to_clk_src(dc->hwseq, clk_src, i);
1016 }
1017 }
1018 }
1019}
1020
1021/*******************************************************************************
1022 * Public functions
1023 ******************************************************************************/
1024
1025static void reset_single_pipe_hw_ctx(
1026 const struct core_dc *dc,
1027 struct pipe_ctx *pipe_ctx,
1028 struct validate_context *context)
1029{
1030 core_link_disable_stream(pipe_ctx);
1031 if (!pipe_ctx->tg->funcs->set_blank(pipe_ctx->tg, true)) {
1032 dm_error("DC: failed to blank crtc!\n");
1033 BREAK_TO_DEBUGGER();
1034 }
1035 pipe_ctx->tg->funcs->disable_crtc(pipe_ctx->tg);
1036 pipe_ctx->mi->funcs->free_mem_input(
1037 pipe_ctx->mi, context->target_count);
1038 resource_unreference_clock_source(
1039 &context->res_ctx, pipe_ctx->clock_source);
1040
1041 dc->hwss.power_down_front_end((struct core_dc *)dc, pipe_ctx);
1042
1043 pipe_ctx->stream = NULL;
1044}
1045
1046static void set_drr(struct pipe_ctx **pipe_ctx,
1047 int num_pipes, int vmin, int vmax)
1048{
1049 int i = 0;
1050 struct drr_params params = {0};
1051
1052 params.vertical_total_max = vmax;
1053 params.vertical_total_min = vmin;
1054
1055 /* TODO: If multiple pipes are to be supported, you need
1056 * some GSL stuff
1057 */
1058
1059 for (i = 0; i < num_pipes; i++) {
1060 pipe_ctx[i]->tg->funcs->set_drr(pipe_ctx[i]->tg, &params);
1061 }
1062}
1063
1064static void set_static_screen_control(struct pipe_ctx **pipe_ctx,
1065 int num_pipes, int value)
1066{
1067 unsigned int i;
1068
1069 for (i = 0; i < num_pipes; i++)
1070 pipe_ctx[i]->tg->funcs->
1071 set_static_screen_control(pipe_ctx[i]->tg, value);
1072}
1073
1074/* unit: in_khz before mode set, get pixel clock from context. ASIC register
1075 * may not be programmed yet.
1076 * TODO: after mode set, pre_mode_set = false,
1077 * may read PLL register to get pixel clock
1078 */
1079static uint32_t get_max_pixel_clock_for_all_paths(
1080 struct core_dc *dc,
1081 struct validate_context *context,
1082 bool pre_mode_set)
1083{
1084 uint32_t max_pix_clk = 0;
1085 int i;
1086
1087 if (!pre_mode_set) {
1088 /* TODO: read ASIC register to get pixel clock */
1089 ASSERT(0);
1090 }
1091
1092 for (i = 0; i < MAX_PIPES; i++) {
1093 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1094
1095 if (pipe_ctx->stream == NULL)
1096 continue;
1097
1098 /* do not check under lay */
1099 if (pipe_ctx->top_pipe)
1100 continue;
1101
1102 if (pipe_ctx->pix_clk_params.requested_pix_clk > max_pix_clk)
1103 max_pix_clk =
1104 pipe_ctx->pix_clk_params.requested_pix_clk;
1105 }
1106
1107 if (max_pix_clk == 0)
1108 ASSERT(0);
1109
1110 return max_pix_clk;
1111}
1112
1113/*
1114 * Find clock state based on clock requested. if clock value is 0, simply
1115 * set clock state as requested without finding clock state by clock value
1116 */
1117static void apply_min_clocks(
1118 struct core_dc *dc,
1119 struct validate_context *context,
1120 enum clocks_state *clocks_state,
1121 bool pre_mode_set)
1122{
1123 struct state_dependent_clocks req_clocks = {0};
1124 struct pipe_ctx *pipe_ctx;
1125 int i;
1126
1127 for (i = 0; i < MAX_PIPES; i++) {
1128 pipe_ctx = &context->res_ctx.pipe_ctx[i];
1129 if (pipe_ctx->dis_clk != NULL)
1130 break;
1131 }
1132
1133 if (!pre_mode_set) {
1134 /* set clock_state without verification */
1135 if (dal_display_clock_set_min_clocks_state(
1136 pipe_ctx->dis_clk, *clocks_state))
1137 return;
1138
1139 /* TODOFPGA */
1140 }
1141
1142 /* get the required state based on state dependent clocks:
1143 * display clock and pixel clock
1144 */
1145 req_clocks.display_clk_khz = context->bw_results.dispclk_khz;
1146
1147 req_clocks.pixel_clk_khz = get_max_pixel_clock_for_all_paths(
1148 dc, context, true);
1149
1150 if (dal_display_clock_get_required_clocks_state(
1151 pipe_ctx->dis_clk, &req_clocks, clocks_state)) {
1152 dal_display_clock_set_min_clocks_state(
1153 pipe_ctx->dis_clk, *clocks_state);
1154 } else {
1155 }
1156}
1157
1158static enum dc_status apply_ctx_to_hw_fpga(
1159 struct core_dc *dc,
1160 struct validate_context *context)
1161{
1162 enum dc_status status = DC_ERROR_UNEXPECTED;
1163 int i;
1164
1165 for (i = 0; i < context->res_ctx.pool->pipe_count; i++) {
1166 struct pipe_ctx *pipe_ctx_old =
1167 &dc->current_context->res_ctx.pipe_ctx[i];
1168 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1169
1170 if (pipe_ctx->stream == NULL)
1171 continue;
1172
1173 if (pipe_ctx->stream == pipe_ctx_old->stream)
1174 continue;
1175
1176 status = apply_single_controller_ctx_to_hw(
1177 pipe_ctx,
1178 context,
1179 dc);
1180
1181 if (status != DC_OK)
1182 return status;
1183 }
1184
1185 return DC_OK;
1186}
1187
1188static void reset_hw_ctx_wrap(
1189 struct core_dc *dc,
1190 struct validate_context *context)
1191{
1192 int i;
1193
1194 /* Reset old context */
1195 /* look up the targets that have been removed since last commit */
1196 for (i = 0; i < context->res_ctx.pool->pipe_count; i++) {
1197 struct pipe_ctx *pipe_ctx_old =
1198 &dc->current_context->res_ctx.pipe_ctx[i];
1199 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1200
1201 /* Note: We need to disable output if clock sources change,
1202 * since bios does optimization and doesn't apply if changing
1203 * PHY when not already disabled.
1204 */
1205
1206 /* Skip underlay pipe since it will be handled in commit surface*/
1207 if (!pipe_ctx_old->stream || pipe_ctx_old->top_pipe)
1208 continue;
1209
1210 if (!pipe_ctx->stream ||
1211 pipe_need_reprogram(pipe_ctx_old, pipe_ctx))
1212 reset_single_pipe_hw_ctx(
1213 dc, pipe_ctx_old, dc->current_context);
1214 }
1215}
1216
1217/*TODO: const validate_context*/
1218enum dc_status dce110_apply_ctx_to_hw(
1219 struct core_dc *dc,
1220 struct validate_context *context)
1221{
1222 struct dc_bios *dcb = dc->ctx->dc_bios;
1223 enum dc_status status;
1224 int i;
1225 bool programmed_audio_dto = false;
1226 enum clocks_state clocks_state = CLOCKS_STATE_INVALID;
1227
1228 /* Reset old context */
1229 /* look up the targets that have been removed since last commit */
1230 dc->hwss.reset_hw_ctx_wrap(dc, context);
1231
1232 /* Skip applying if no targets */
1233 if (context->target_count <= 0)
1234 return DC_OK;
1235
1236 if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
1237 apply_ctx_to_hw_fpga(dc, context);
1238 return DC_OK;
1239 }
1240
1241 /* Apply new context */
1242 dcb->funcs->set_scratch_critical_state(dcb, true);
1243
1244 /* below is for real asic only */
1245 for (i = 0; i < context->res_ctx.pool->pipe_count; i++) {
1246 struct pipe_ctx *pipe_ctx_old =
1247 &dc->current_context->res_ctx.pipe_ctx[i];
1248 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1249
1250 if (pipe_ctx->stream == NULL || pipe_ctx->top_pipe)
1251 continue;
1252
1253 if (pipe_ctx->stream == pipe_ctx_old->stream) {
1254 if (pipe_ctx_old->clock_source != pipe_ctx->clock_source)
1255 dce_crtc_switch_to_clk_src(dc->hwseq,
1256 pipe_ctx->clock_source, i);
1257 continue;
1258 }
1259
1260 dc->hwss.enable_display_power_gating(
1261 dc, i, dc->ctx->dc_bios,
1262 PIPE_GATING_CONTROL_DISABLE);
1263 }
1264
1265 set_safe_displaymarks(&context->res_ctx);
1266 /*TODO: when pplib works*/
1267 apply_min_clocks(dc, context, &clocks_state, true);
1268
1269 if (context->bw_results.dispclk_khz
1270 > dc->current_context->bw_results.dispclk_khz)
1271 dc->hwss.set_display_clock(context);
1272
1273 for (i = 0; i < context->res_ctx.pool->pipe_count; i++) {
1274 struct pipe_ctx *pipe_ctx_old =
1275 &dc->current_context->res_ctx.pipe_ctx[i];
1276 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1277
1278 if (pipe_ctx->stream == NULL)
1279 continue;
1280
1281 if (pipe_ctx->stream == pipe_ctx_old->stream)
1282 continue;
1283
1284 if (pipe_ctx->top_pipe)
1285 continue;
1286
1287 if (context->res_ctx.pipe_ctx[i].audio != NULL) {
1288 /* Setup audio rate clock source */
1289 /* Issue:
1290 * Audio lag happened on DP monitor when unplug a HDMI monitor
1291 *
1292 * Cause:
1293 * In case of DP and HDMI connected or HDMI only, DCCG_AUDIO_DTO_SEL
1294 * is set to either dto0 or dto1, audio should work fine.
1295 * In case of DP connected only, DCCG_AUDIO_DTO_SEL should be dto1,
1296 * set to dto0 will cause audio lag.
1297 *
1298 * Solution:
1299 * Not optimized audio wall dto setup. When mode set, iterate pipe_ctx,
1300 * find first available pipe with audio, setup audio wall DTO per topology
1301 * instead of per pipe.
1302 */
1303 struct audio_output audio_output;
1304 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1305
1306 build_audio_output(pipe_ctx, &audio_output);
1307
1308 if (dc_is_dp_signal(pipe_ctx->stream->signal))
1309 pipe_ctx->stream_enc->funcs->dp_audio_setup(
1310 pipe_ctx->stream_enc,
1311 pipe_ctx->audio->inst,
1312 &pipe_ctx->stream->public.audio_info);
1313 else
1314 pipe_ctx->stream_enc->funcs->hdmi_audio_setup(
1315 pipe_ctx->stream_enc,
1316 pipe_ctx->audio->inst,
1317 &pipe_ctx->stream->public.audio_info,
1318 &audio_output.crtc_info);
1319
1320 pipe_ctx->audio->funcs->az_configure(
1321 pipe_ctx->audio,
1322 pipe_ctx->stream->signal,
1323 &audio_output.crtc_info,
1324 &pipe_ctx->stream->public.audio_info);
1325
1326 if (!programmed_audio_dto) {
1327 pipe_ctx->audio->funcs->wall_dto_setup(
1328 pipe_ctx->audio,
1329 pipe_ctx->stream->signal,
1330 &audio_output.crtc_info,
1331 &audio_output.pll_info);
1332 programmed_audio_dto = true;
1333 }
1334 }
1335
1336 status = apply_single_controller_ctx_to_hw(
1337 pipe_ctx,
1338 context,
1339 dc);
1340
1341 if (DC_OK != status)
1342 return status;
1343 }
1344
1345 dc->hwss.set_displaymarks(dc, context);
1346
1347 /* to save power */
1348 apply_min_clocks(dc, context, &clocks_state, false);
1349
1350 dcb->funcs->set_scratch_critical_state(dcb, false);
1351
1352 switch_dp_clock_sources(dc, &context->res_ctx);
1353
1354 return DC_OK;
1355}
1356
1357/*******************************************************************************
1358 * Front End programming
1359 ******************************************************************************/
1360static void set_default_colors(struct pipe_ctx *pipe_ctx)
1361{
1362 struct default_adjustment default_adjust = { 0 };
1363
1364 default_adjust.force_hw_default = false;
1365 if (pipe_ctx->surface == NULL)
1366 default_adjust.in_color_space = COLOR_SPACE_SRGB;
1367 else
1368 default_adjust.in_color_space =
1369 pipe_ctx->surface->public.color_space;
1370 if (pipe_ctx->stream == NULL)
1371 default_adjust.out_color_space = COLOR_SPACE_SRGB;
1372 else
1373 default_adjust.out_color_space =
1374 pipe_ctx->stream->public.output_color_space;
1375 default_adjust.csc_adjust_type = GRAPHICS_CSC_ADJUST_TYPE_SW;
1376 default_adjust.surface_pixel_format = pipe_ctx->scl_data.format;
1377
1378 /* display color depth */
1379 default_adjust.color_depth =
1380 pipe_ctx->stream->public.timing.display_color_depth;
1381
1382 /* Lb color depth */
1383 default_adjust.lb_color_depth = pipe_ctx->scl_data.lb_params.depth;
1384
1385 pipe_ctx->opp->funcs->opp_set_csc_default(
1386 pipe_ctx->opp, &default_adjust);
1387}
1388
1389static void program_blender(const struct core_dc *dc,
1390 struct pipe_ctx *pipe_ctx)
1391{
1392 enum blnd_mode blender_mode = BLND_MODE_CURRENT_PIPE;
1393
1394 if (pipe_ctx->bottom_pipe) {
1395 if (pipe_ctx->bottom_pipe->surface->public.visible) {
1396 if (pipe_ctx->surface->public.visible)
1397 blender_mode = BLND_MODE_BLENDING;
1398 else
1399 blender_mode = BLND_MODE_OTHER_PIPE;
1400 }
1401 }
1402 dce_set_blender_mode(dc->hwseq, pipe_ctx->pipe_idx, blender_mode);
1403}
1404
1405/**
1406 * TODO REMOVE, USE UPDATE INSTEAD
1407 */
1408static void set_plane_config(
1409 const struct core_dc *dc,
1410 struct pipe_ctx *pipe_ctx,
1411 struct resource_context *res_ctx)
1412{
1413 struct mem_input *mi = pipe_ctx->mi;
1414 struct core_surface *surface = pipe_ctx->surface;
1415 struct xfm_grph_csc_adjustment adjust;
1416 struct out_csc_color_matrix tbl_entry;
1417 unsigned int i;
1418
1419 memset(&adjust, 0, sizeof(adjust));
1420 memset(&tbl_entry, 0, sizeof(tbl_entry));
1421 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
1422
1423 dce_enable_fe_clock(dc->hwseq, pipe_ctx->pipe_idx, true);
1424
1425 set_default_colors(pipe_ctx);
1426 if (pipe_ctx->stream->public.csc_color_matrix.enable_adjustment
1427 == true) {
1428 tbl_entry.color_space =
1429 pipe_ctx->stream->public.output_color_space;
1430
1431 for (i = 0; i < 12; i++)
1432 tbl_entry.regval[i] =
1433 pipe_ctx->stream->public.csc_color_matrix.matrix[i];
1434
1435 pipe_ctx->opp->funcs->opp_set_csc_adjustment
1436 (pipe_ctx->opp, &tbl_entry);
1437 }
1438
1439 if (pipe_ctx->stream->public.gamut_remap_matrix.enable_remap == true) {
1440 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
1441 adjust.temperature_matrix[0] =
1442 pipe_ctx->stream->
1443 public.gamut_remap_matrix.matrix[0];
1444 adjust.temperature_matrix[1] =
1445 pipe_ctx->stream->
1446 public.gamut_remap_matrix.matrix[1];
1447 adjust.temperature_matrix[2] =
1448 pipe_ctx->stream->
1449 public.gamut_remap_matrix.matrix[2];
1450 adjust.temperature_matrix[3] =
1451 pipe_ctx->stream->
1452 public.gamut_remap_matrix.matrix[4];
1453 adjust.temperature_matrix[4] =
1454 pipe_ctx->stream->
1455 public.gamut_remap_matrix.matrix[5];
1456 adjust.temperature_matrix[5] =
1457 pipe_ctx->stream->
1458 public.gamut_remap_matrix.matrix[6];
1459 adjust.temperature_matrix[6] =
1460 pipe_ctx->stream->
1461 public.gamut_remap_matrix.matrix[8];
1462 adjust.temperature_matrix[7] =
1463 pipe_ctx->stream->
1464 public.gamut_remap_matrix.matrix[9];
1465 adjust.temperature_matrix[8] =
1466 pipe_ctx->stream->
1467 public.gamut_remap_matrix.matrix[10];
1468 }
1469
1470 pipe_ctx->xfm->funcs->transform_set_gamut_remap(pipe_ctx->xfm, &adjust);
1471
1472 pipe_ctx->scl_data.lb_params.alpha_en = pipe_ctx->bottom_pipe != 0;
1473 program_scaler(dc, pipe_ctx);
1474
1475 program_blender(dc, pipe_ctx);
1476
1477 mi->funcs->mem_input_program_surface_config(
1478 mi,
1479 surface->public.format,
1480 &surface->public.tiling_info,
1481 &surface->public.plane_size,
1482 surface->public.rotation,
1483 NULL,
1484 false);
1485
1486 if (dc->public.config.gpu_vm_support)
1487 mi->funcs->mem_input_program_pte_vm(
1488 pipe_ctx->mi,
1489 surface->public.format,
1490 &surface->public.tiling_info,
1491 surface->public.rotation);
1492}
1493
1494static void update_plane_addr(const struct core_dc *dc,
1495 struct pipe_ctx *pipe_ctx)
1496{
1497 struct core_surface *surface = pipe_ctx->surface;
1498
1499 if (surface == NULL)
1500 return;
1501
1502 pipe_ctx->mi->funcs->mem_input_program_surface_flip_and_addr(
1503 pipe_ctx->mi,
1504 &surface->public.address,
1505 surface->public.flip_immediate);
1506
1507 surface->status.requested_address = surface->public.address;
1508
1509 if (surface->public.visible)
1510 pipe_ctx->tg->funcs->set_blank(pipe_ctx->tg, false);
1511}
1512
1513void dce110_update_pending_status(struct pipe_ctx *pipe_ctx)
1514{
1515 struct core_surface *surface = pipe_ctx->surface;
1516
1517 if (surface == NULL)
1518 return;
1519
1520 surface->status.is_flip_pending =
1521 pipe_ctx->mi->funcs->mem_input_is_flip_pending(
1522 pipe_ctx->mi);
1523
1524 if (surface->status.is_flip_pending && !surface->public.visible)
1525 pipe_ctx->mi->current_address = pipe_ctx->mi->request_address;
1526
1527 surface->status.current_address = pipe_ctx->mi->current_address;
1528}
1529
1530void dce110_power_down(struct core_dc *dc)
1531{
1532 power_down_all_hw_blocks(dc);
1533 disable_vga_and_power_gate_all_controllers(dc);
1534}
1535
1536static bool wait_for_reset_trigger_to_occur(
1537 struct dc_context *dc_ctx,
1538 struct timing_generator *tg)
1539{
1540 bool rc = false;
1541
1542 /* To avoid endless loop we wait at most
1543 * frames_to_wait_on_triggered_reset frames for the reset to occur. */
1544 const uint32_t frames_to_wait_on_triggered_reset = 10;
1545 uint32_t i;
1546
1547 for (i = 0; i < frames_to_wait_on_triggered_reset; i++) {
1548
1549 if (!tg->funcs->is_counter_moving(tg)) {
1550 DC_ERROR("TG counter is not moving!\n");
1551 break;
1552 }
1553
1554 if (tg->funcs->did_triggered_reset_occur(tg)) {
1555 rc = true;
1556 /* usually occurs at i=1 */
1557 DC_SYNC_INFO("GSL: reset occurred at wait count: %d\n",
1558 i);
1559 break;
1560 }
1561
1562 /* Wait for one frame. */
1563 tg->funcs->wait_for_state(tg, CRTC_STATE_VACTIVE);
1564 tg->funcs->wait_for_state(tg, CRTC_STATE_VBLANK);
1565 }
1566
1567 if (false == rc)
1568 DC_ERROR("GSL: Timeout on reset trigger!\n");
1569
1570 return rc;
1571}
1572
1573/* Enable timing synchronization for a group of Timing Generators. */
1574static void dce110_enable_timing_synchronization(
1575 struct core_dc *dc,
1576 int group_index,
1577 int group_size,
1578 struct pipe_ctx *grouped_pipes[])
1579{
1580 struct dc_context *dc_ctx = dc->ctx;
1581 struct dcp_gsl_params gsl_params = { 0 };
1582 int i;
1583
1584 DC_SYNC_INFO("GSL: Setting-up...\n");
1585
1586 /* Designate a single TG in the group as a master.
1587 * Since HW doesn't care which one, we always assign
1588 * the 1st one in the group. */
1589 gsl_params.gsl_group = 0;
1590 gsl_params.gsl_master = grouped_pipes[0]->tg->inst;
1591
1592 for (i = 0; i < group_size; i++)
1593 grouped_pipes[i]->tg->funcs->setup_global_swap_lock(
1594 grouped_pipes[i]->tg, &gsl_params);
1595
1596 /* Reset slave controllers on master VSync */
1597 DC_SYNC_INFO("GSL: enabling trigger-reset\n");
1598
1599 for (i = 1 /* skip the master */; i < group_size; i++)
1600 grouped_pipes[i]->tg->funcs->enable_reset_trigger(
1601 grouped_pipes[i]->tg, gsl_params.gsl_group);
1602
1603
1604
1605 for (i = 1 /* skip the master */; i < group_size; i++) {
1606 DC_SYNC_INFO("GSL: waiting for reset to occur.\n");
1607 wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[i]->tg);
1608 /* Regardless of success of the wait above, remove the reset or
1609 * the driver will start timing out on Display requests. */
1610 DC_SYNC_INFO("GSL: disabling trigger-reset.\n");
1611 grouped_pipes[i]->tg->funcs->disable_reset_trigger(grouped_pipes[i]->tg);
1612 }
1613
1614
1615 /* GSL Vblank synchronization is a one time sync mechanism, assumption
1616 * is that the sync'ed displays will not drift out of sync over time*/
1617 DC_SYNC_INFO("GSL: Restoring register states.\n");
1618 for (i = 0; i < group_size; i++)
1619 grouped_pipes[i]->tg->funcs->tear_down_global_swap_lock(grouped_pipes[i]->tg);
1620
1621 DC_SYNC_INFO("GSL: Set-up complete.\n");
1622}
1623
1624static void init_hw(struct core_dc *dc)
1625{
1626 int i;
1627 struct dc_bios *bp;
1628 struct transform *xfm;
1629
1630 bp = dc->ctx->dc_bios;
1631 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1632 xfm = dc->res_pool->transforms[i];
1633 xfm->funcs->transform_reset(xfm);
1634
1635 dc->hwss.enable_display_power_gating(
1636 dc, i, bp,
1637 PIPE_GATING_CONTROL_INIT);
1638 dc->hwss.enable_display_power_gating(
1639 dc, i, bp,
1640 PIPE_GATING_CONTROL_DISABLE);
1641 dc->hwss.enable_display_pipe_clock_gating(
1642 dc->ctx,
1643 true);
1644 }
1645
1646 dce_clock_gating_power_up(dc->hwseq, false);;
1647 /***************************************/
1648
1649 for (i = 0; i < dc->link_count; i++) {
1650 /****************************************/
1651 /* Power up AND update implementation according to the
1652 * required signal (which may be different from the
1653 * default signal on connector). */
1654 struct core_link *link = dc->links[i];
1655 link->link_enc->funcs->hw_init(link->link_enc);
1656 }
1657
1658 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1659 struct timing_generator *tg = dc->res_pool->timing_generators[i];
1660
1661 tg->funcs->disable_vga(tg);
1662
1663 /* Blank controller using driver code instead of
1664 * command table. */
1665 tg->funcs->set_blank(tg, true);
1666 }
1667
1668 for (i = 0; i < dc->res_pool->audio_count; i++) {
1669 struct audio *audio = dc->res_pool->audios[i];
1670 audio->funcs->hw_init(audio);
1671 }
1672}
1673
1674/* TODO: move this to apply_ctx_tohw some how?*/
1675static void dce110_power_on_pipe_if_needed(
1676 struct core_dc *dc,
1677 struct pipe_ctx *pipe_ctx,
1678 struct validate_context *context)
1679{
1680 struct pipe_ctx *old_pipe_ctx = &dc->current_context->res_ctx.pipe_ctx[pipe_ctx->pipe_idx];
1681 struct dc_bios *dcb = dc->ctx->dc_bios;
1682 struct tg_color black_color = {0};
1683
1684 if (!old_pipe_ctx->stream && pipe_ctx->stream) {
1685 dc->hwss.enable_display_power_gating(
1686 dc,
1687 pipe_ctx->pipe_idx,
1688 dcb, PIPE_GATING_CONTROL_DISABLE);
1689
1690 /*
1691 * This is for powering on underlay, so crtc does not
1692 * need to be enabled
1693 */
1694
1695 pipe_ctx->tg->funcs->program_timing(pipe_ctx->tg,
1696 &pipe_ctx->stream->public.timing,
1697 false);
1698
1699 pipe_ctx->tg->funcs->enable_advanced_request(
1700 pipe_ctx->tg,
1701 true,
1702 &pipe_ctx->stream->public.timing);
1703
1704 pipe_ctx->mi->funcs->allocate_mem_input(pipe_ctx->mi,
1705 pipe_ctx->stream->public.timing.h_total,
1706 pipe_ctx->stream->public.timing.v_total,
1707 pipe_ctx->stream->public.timing.pix_clk_khz,
1708 context->target_count);
1709
1710 /* TODO unhardcode*/
1711 color_space_to_black_color(dc,
1712 COLOR_SPACE_YCBCR601, &black_color);
1713 pipe_ctx->tg->funcs->set_blank_color(
1714 pipe_ctx->tg,
1715 &black_color);
1716 }
1717}
1718
1719static void dce110_increase_watermarks_for_pipe(
1720 struct core_dc *dc,
1721 struct pipe_ctx *pipe_ctx,
1722 struct validate_context *context)
1723{
1724 if (did_watermarks_increase(pipe_ctx, context, dc->current_context))
1725 program_wm_for_pipe(dc, pipe_ctx, context);
1726}
1727
1728static void dce110_set_bandwidth(struct core_dc *dc)
1729{
1730 int i;
1731
1732 for (i = 0; i < dc->current_context->res_ctx.pool->pipe_count; i++) {
1733 struct pipe_ctx *pipe_ctx = &dc->current_context->res_ctx.pipe_ctx[i];
1734
1735 if (!pipe_ctx->stream)
1736 continue;
1737
1738 program_wm_for_pipe(dc, pipe_ctx, dc->current_context);
1739 }
1740
1741 dc->hwss.set_display_clock(dc->current_context);
1742}
1743
1744static void dce110_program_front_end_for_pipe(
1745 struct core_dc *dc, struct pipe_ctx *pipe_ctx)
1746{
1747 struct mem_input *mi = pipe_ctx->mi;
1748 struct pipe_ctx *old_pipe = NULL;
1749 struct core_surface *surface = pipe_ctx->surface;
1750 struct xfm_grph_csc_adjustment adjust;
1751 struct out_csc_color_matrix tbl_entry;
1752 unsigned int i;
1753
1754 memset(&tbl_entry, 0, sizeof(tbl_entry));
1755
1756 if (dc->current_context)
1757 old_pipe = &dc->current_context->res_ctx.pipe_ctx[pipe_ctx->pipe_idx];
1758
1759 memset(&adjust, 0, sizeof(adjust));
1760 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
1761
1762 dce_enable_fe_clock(dc->hwseq, pipe_ctx->pipe_idx, true);
1763
1764 set_default_colors(pipe_ctx);
1765 if (pipe_ctx->stream->public.csc_color_matrix.enable_adjustment
1766 == true) {
1767 tbl_entry.color_space =
1768 pipe_ctx->stream->public.output_color_space;
1769
1770 for (i = 0; i < 12; i++)
1771 tbl_entry.regval[i] =
1772 pipe_ctx->stream->public.csc_color_matrix.matrix[i];
1773
1774 pipe_ctx->opp->funcs->opp_set_csc_adjustment
1775 (pipe_ctx->opp, &tbl_entry);
1776 }
1777
1778 if (pipe_ctx->stream->public.gamut_remap_matrix.enable_remap == true) {
1779 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
1780 adjust.temperature_matrix[0] =
1781 pipe_ctx->stream->
1782 public.gamut_remap_matrix.matrix[0];
1783 adjust.temperature_matrix[1] =
1784 pipe_ctx->stream->
1785 public.gamut_remap_matrix.matrix[1];
1786 adjust.temperature_matrix[2] =
1787 pipe_ctx->stream->
1788 public.gamut_remap_matrix.matrix[2];
1789 adjust.temperature_matrix[3] =
1790 pipe_ctx->stream->
1791 public.gamut_remap_matrix.matrix[4];
1792 adjust.temperature_matrix[4] =
1793 pipe_ctx->stream->
1794 public.gamut_remap_matrix.matrix[5];
1795 adjust.temperature_matrix[5] =
1796 pipe_ctx->stream->
1797 public.gamut_remap_matrix.matrix[6];
1798 adjust.temperature_matrix[6] =
1799 pipe_ctx->stream->
1800 public.gamut_remap_matrix.matrix[8];
1801 adjust.temperature_matrix[7] =
1802 pipe_ctx->stream->
1803 public.gamut_remap_matrix.matrix[9];
1804 adjust.temperature_matrix[8] =
1805 pipe_ctx->stream->
1806 public.gamut_remap_matrix.matrix[10];
1807 }
1808
1809 pipe_ctx->xfm->funcs->transform_set_gamut_remap(pipe_ctx->xfm, &adjust);
1810
1811 pipe_ctx->scl_data.lb_params.alpha_en = pipe_ctx->bottom_pipe != 0;
1812 if (old_pipe && memcmp(&old_pipe->scl_data,
1813 &pipe_ctx->scl_data,
1814 sizeof(struct scaler_data)) != 0)
1815 program_scaler(dc, pipe_ctx);
1816
1817 mi->funcs->mem_input_program_surface_config(
1818 mi,
1819 surface->public.format,
1820 &surface->public.tiling_info,
1821 &surface->public.plane_size,
1822 surface->public.rotation,
1823 false,
1824 false);
1825
1826 if (dc->public.config.gpu_vm_support)
1827 mi->funcs->mem_input_program_pte_vm(
1828 pipe_ctx->mi,
1829 surface->public.format,
1830 &surface->public.tiling_info,
1831 surface->public.rotation);
1832
1833 dm_logger_write(dc->ctx->logger, LOG_SURFACE,
1834 "Pipe:%d 0x%x: addr hi:0x%x, "
1835 "addr low:0x%x, "
1836 "src: %d, %d, %d,"
1837 " %d; dst: %d, %d, %d, %d;"
1838 "clip: %d, %d, %d, %d\n",
1839 pipe_ctx->pipe_idx,
1840 pipe_ctx->surface,
1841 pipe_ctx->surface->public.address.grph.addr.high_part,
1842 pipe_ctx->surface->public.address.grph.addr.low_part,
1843 pipe_ctx->surface->public.src_rect.x,
1844 pipe_ctx->surface->public.src_rect.y,
1845 pipe_ctx->surface->public.src_rect.width,
1846 pipe_ctx->surface->public.src_rect.height,
1847 pipe_ctx->surface->public.dst_rect.x,
1848 pipe_ctx->surface->public.dst_rect.y,
1849 pipe_ctx->surface->public.dst_rect.width,
1850 pipe_ctx->surface->public.dst_rect.height,
1851 pipe_ctx->surface->public.clip_rect.x,
1852 pipe_ctx->surface->public.clip_rect.y,
1853 pipe_ctx->surface->public.clip_rect.width,
1854 pipe_ctx->surface->public.clip_rect.height);
1855
1856 dm_logger_write(dc->ctx->logger, LOG_SURFACE,
1857 "Pipe %d: width, height, x, y\n"
1858 "viewport:%d, %d, %d, %d\n"
1859 "recout: %d, %d, %d, %d\n",
1860 pipe_ctx->pipe_idx,
1861 pipe_ctx->scl_data.viewport.width,
1862 pipe_ctx->scl_data.viewport.height,
1863 pipe_ctx->scl_data.viewport.x,
1864 pipe_ctx->scl_data.viewport.y,
1865 pipe_ctx->scl_data.recout.width,
1866 pipe_ctx->scl_data.recout.height,
1867 pipe_ctx->scl_data.recout.x,
1868 pipe_ctx->scl_data.recout.y);
1869}
1870
1871
1872
1873static void dce110_prepare_pipe_for_surface_commit(
1874 struct core_dc *dc,
1875 struct pipe_ctx *pipe_ctx,
1876 struct validate_context *context) {
1877 struct core_gamma *gamma = NULL;
1878
1879 dc->hwss.increase_watermarks_for_pipe(dc, pipe_ctx, context);
1880
1881 if (pipe_ctx->surface->public.gamma_correction)
1882 gamma = DC_GAMMA_TO_CORE(
1883 pipe_ctx->surface->public.gamma_correction);
1884
1885 dc->hwss.set_gamma_correction(
1886 pipe_ctx->ipp,
1887 pipe_ctx->opp,
1888 gamma, pipe_ctx->surface);
1889}
1890
1891static void dce110_prepare_pipe_for_context(
1892 struct core_dc *dc,
1893 struct pipe_ctx *pipe_ctx,
1894 struct validate_context *context)
1895{
1896 dce110_power_on_pipe_if_needed(dc, pipe_ctx, context);
1897 dce110_prepare_pipe_for_surface_commit(dc, pipe_ctx, context);
1898}
1899
1900static void dce110_apply_ctx_for_surface(
1901 struct core_dc *dc,
1902 struct core_surface *surface,
1903 struct validate_context *context)
1904{
1905 int i;
1906
1907 /* TODO remove when removing the surface reset workaroud*/
1908 if (!surface)
1909 return;
1910
1911 for (i = 0; i < context->res_ctx.pool->pipe_count; i++) {
1912 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1913
1914 if (pipe_ctx->surface != surface)
1915 continue;
1916
1917 dce110_program_front_end_for_pipe(dc, pipe_ctx);
1918 program_blender(dc, pipe_ctx);
1919
1920 }
1921
1922}
1923
1924static void dce110_power_down_fe(struct core_dc *dc, struct pipe_ctx *pipe)
1925{
1926 int i;
1927
1928 for (i = 0; i < dc->res_pool->pipe_count; i++)
1929 if (&dc->current_context->res_ctx.pipe_ctx[i] == pipe)
1930 break;
1931
1932 if (i == dc->res_pool->pipe_count)
1933 return;
1934
1935 dc->hwss.enable_display_power_gating(
1936 dc, i, dc->ctx->dc_bios, PIPE_GATING_CONTROL_ENABLE);
1937 if (pipe->xfm)
1938 pipe->xfm->funcs->transform_reset(pipe->xfm);
1939 memset(&pipe->scl_data, 0, sizeof(struct scaler_data));
1940}
1941
1942static const struct hw_sequencer_funcs dce110_funcs = {
1943 .init_hw = init_hw,
1944 .apply_ctx_to_hw = dce110_apply_ctx_to_hw,
1945 .prepare_pipe_for_context = dce110_prepare_pipe_for_context,
1946 .apply_ctx_for_surface = dce110_apply_ctx_for_surface,
1947 .set_plane_config = set_plane_config,
1948 .update_plane_addr = update_plane_addr,
1949 .update_pending_status = dce110_update_pending_status,
1950 .set_gamma_correction = set_gamma_ramp,
1951 .power_down = dce110_power_down,
1952 .enable_accelerated_mode = dce110_enable_accelerated_mode,
1953 .enable_timing_synchronization = dce110_enable_timing_synchronization,
1954 .update_info_frame = dce110_update_info_frame,
1955 .enable_stream = dce110_enable_stream,
1956 .disable_stream = dce110_disable_stream,
1957 .unblank_stream = dce110_unblank_stream,
1958 .enable_display_pipe_clock_gating = enable_display_pipe_clock_gating,
1959 .enable_display_power_gating = dce110_enable_display_power_gating,
1960 .power_down_front_end = dce110_power_down_fe,
1961 .pipe_control_lock = dce_pipe_control_lock,
1962 .set_display_clock = dce110_set_display_clock,
1963 .set_displaymarks = dce110_set_displaymarks,
1964 .increase_watermarks_for_pipe = dce110_increase_watermarks_for_pipe,
1965 .set_bandwidth = dce110_set_bandwidth,
1966 .set_drr = set_drr,
1967 .set_static_screen_control = set_static_screen_control,
1968 .reset_hw_ctx_wrap = reset_hw_ctx_wrap,
1969 .prog_pixclk_crtc_otg = prog_pixclk_crtc_otg,
1970};
1971
1972bool dce110_hw_sequencer_construct(struct core_dc *dc)
1973{
1974 dc->hwss = dce110_funcs;
1975
1976 return true;
1977}
1978
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
new file mode 100644
index 000000000000..a6b4d0d2429f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
@@ -0,0 +1,62 @@
1/*
2* Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DC_HWSS_DCE110_H__
27#define __DC_HWSS_DCE110_H__
28
29#include "core_types.h"
30
31#define GAMMA_HW_POINTS_NUM 256
32struct core_dc;
33
34bool dce110_hw_sequencer_construct(struct core_dc *dc);
35
36enum dc_status dce110_apply_ctx_to_hw(
37 struct core_dc *dc,
38 struct validate_context *context);
39
40void dce110_set_display_clock(struct validate_context *context);
41
42void dce110_set_displaymarks(
43 const struct core_dc *dc,
44 struct validate_context *context);
45
46void dce110_enable_stream(struct pipe_ctx *pipe_ctx);
47
48void dce110_disable_stream(struct pipe_ctx *pipe_ctx);
49
50void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
51 struct dc_link_settings *link_settings);
52
53void dce110_update_info_frame(struct pipe_ctx *pipe_ctx);
54
55void dce110_enable_accelerated_mode(struct core_dc *dc);
56
57void dce110_power_down(struct core_dc *dc);
58
59void dce110_update_pending_status(struct pipe_ctx *pipe_ctx);
60
61#endif /* __DC_HWSS_DCE110_H__ */
62
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_ipp.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_ipp.c
new file mode 100644
index 000000000000..dd69f6060bb9
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_ipp.c
@@ -0,0 +1,62 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27#include "include/logger_interface.h"
28
29#include "dce/dce_11_0_d.h"
30#include "dce/dce_11_0_sh_mask.h"
31
32#include "dce110_ipp.h"
33
34static const struct ipp_funcs funcs = {
35 .ipp_cursor_set_attributes = dce110_ipp_cursor_set_attributes,
36 .ipp_cursor_set_position = dce110_ipp_cursor_set_position,
37 .ipp_program_prescale = dce110_ipp_program_prescale,
38 .ipp_set_degamma = dce110_ipp_set_degamma,
39};
40
41bool dce110_ipp_construct(
42 struct dce110_ipp* ipp,
43 struct dc_context *ctx,
44 uint32_t inst,
45 const struct dce110_ipp_reg_offsets *offset)
46{
47 ipp->base.ctx = ctx;
48
49 ipp->base.inst = inst;
50
51 ipp->offsets = *offset;
52
53 ipp->base.funcs = &funcs;
54
55 return true;
56}
57
58void dce110_ipp_destroy(struct input_pixel_processor **ipp)
59{
60 dm_free(TO_DCE110_IPP(*ipp));
61 *ipp = NULL;
62}
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_ipp.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_ipp.h
new file mode 100644
index 000000000000..60eebdecfa10
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_ipp.h
@@ -0,0 +1,76 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DC_IPP_DCE110_H__
27#define __DC_IPP_DCE110_H__
28
29#include "ipp.h"
30
31struct gamma_parameters;
32struct dev_c_lut;
33
34#define TO_DCE110_IPP(input_pixel_processor)\
35 container_of(input_pixel_processor, struct dce110_ipp, base)
36
37struct dce110_ipp_reg_offsets {
38 uint32_t dcp_offset;
39};
40
41struct dce110_ipp {
42 struct input_pixel_processor base;
43 struct dce110_ipp_reg_offsets offsets;
44};
45
46bool dce110_ipp_construct(
47 struct dce110_ipp* ipp,
48 struct dc_context *ctx,
49 enum controller_id id,
50 const struct dce110_ipp_reg_offsets *offset);
51
52void dce110_ipp_destroy(struct input_pixel_processor **ipp);
53
54/* CURSOR RELATED */
55void dce110_ipp_cursor_set_position(
56 struct input_pixel_processor *ipp,
57 const struct dc_cursor_position *position);
58
59bool dce110_ipp_cursor_set_attributes(
60 struct input_pixel_processor *ipp,
61 const struct dc_cursor_attributes *attributes);
62
63/* DEGAMMA RELATED */
64bool dce110_ipp_set_degamma(
65 struct input_pixel_processor *ipp,
66 enum ipp_degamma_mode mode);
67
68void dce110_ipp_program_prescale(
69 struct input_pixel_processor *ipp,
70 struct ipp_prescale_params *params);
71/*
72 * Helper functions to be resused in other ASICs
73 */
74void dce110_helper_select_lut(struct dce110_ipp *ipp110);
75
76#endif /*__DC_IPP_DCE110_H__*/
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_ipp_cursor.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_ipp_cursor.c
new file mode 100644
index 000000000000..95f6ca3ba5df
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_ipp_cursor.c
@@ -0,0 +1,253 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27#include "include/logger_interface.h"
28
29#include "dce/dce_11_0_d.h"
30#include "dce/dce_11_0_sh_mask.h"
31
32#include "dce110_ipp.h"
33
34#define CURSOR_COLOR_BLACK 0x00000000
35#define CURSOR_COLOR_WHITE 0xFFFFFFFF
36
37#define DCP_REG(reg)\
38 (reg + ipp110->offsets.dcp_offset)
39
40static void enable(
41 struct dce110_ipp *ipp110,
42 bool enable);
43
44static void lock(
45 struct dce110_ipp *ipp110,
46 bool enable);
47
48static void program_position(
49 struct dce110_ipp *ipp110,
50 uint32_t x,
51 uint32_t y);
52
53static bool program_control(
54 struct dce110_ipp *ipp110,
55 enum dc_cursor_color_format color_format,
56 bool enable_magnification,
57 bool inverse_transparent_clamping);
58
59static void program_hotspot(
60 struct dce110_ipp *ipp110,
61 uint32_t x,
62 uint32_t y);
63
64static void program_size(
65 struct dce110_ipp *ipp110,
66 uint32_t width,
67 uint32_t height);
68
69static void program_address(
70 struct dce110_ipp *ipp110,
71 PHYSICAL_ADDRESS_LOC address);
72
73void dce110_ipp_cursor_set_position(
74 struct input_pixel_processor *ipp,
75 const struct dc_cursor_position *position)
76{
77 struct dce110_ipp *ipp110 = TO_DCE110_IPP(ipp);
78
79 /* lock cursor registers */
80 lock(ipp110, true);
81
82 /* Flag passed in structure differentiates cursor enable/disable. */
83 /* Update if it differs from cached state. */
84 enable(ipp110, position->enable);
85
86 program_position(ipp110, position->x, position->y);
87
88 if (position->hot_spot_enable)
89 program_hotspot(
90 ipp110,
91 position->x_hotspot,
92 position->y_hotspot);
93
94 /* unlock cursor registers */
95 lock(ipp110, false);
96}
97
98bool dce110_ipp_cursor_set_attributes(
99 struct input_pixel_processor *ipp,
100 const struct dc_cursor_attributes *attributes)
101{
102 struct dce110_ipp *ipp110 = TO_DCE110_IPP(ipp);
103 /* Lock cursor registers */
104 lock(ipp110, true);
105
106 /* Program cursor control */
107 program_control(
108 ipp110,
109 attributes->color_format,
110 attributes->attribute_flags.bits.ENABLE_MAGNIFICATION,
111 attributes->attribute_flags.bits.INVERSE_TRANSPARENT_CLAMPING);
112
113 /* Program hot spot coordinates */
114 program_hotspot(ipp110, attributes->x_hot, attributes->y_hot);
115
116 /*
117 * Program cursor size -- NOTE: HW spec specifies that HW register
118 * stores size as (height - 1, width - 1)
119 */
120 program_size(ipp110, attributes->width-1, attributes->height-1);
121
122 /* Program cursor surface address */
123 program_address(ipp110, attributes->address);
124
125 /* Unlock Cursor registers. */
126 lock(ipp110, false);
127
128 return true;
129}
130
131static void enable(
132 struct dce110_ipp *ipp110, bool enable)
133{
134 uint32_t value = 0;
135 uint32_t addr = DCP_REG(mmCUR_CONTROL);
136
137 value = dm_read_reg(ipp110->base.ctx, addr);
138 set_reg_field_value(value, enable, CUR_CONTROL, CURSOR_EN);
139 dm_write_reg(ipp110->base.ctx, addr, value);
140}
141
142static void lock(
143 struct dce110_ipp *ipp110, bool lock)
144{
145 uint32_t value = 0;
146 uint32_t addr = DCP_REG(mmCUR_UPDATE);
147
148 value = dm_read_reg(ipp110->base.ctx, addr);
149 set_reg_field_value(value, lock, CUR_UPDATE, CURSOR_UPDATE_LOCK);
150 dm_write_reg(ipp110->base.ctx, addr, value);
151}
152
153static void program_position(
154 struct dce110_ipp *ipp110,
155 uint32_t x,
156 uint32_t y)
157{
158 uint32_t value = 0;
159 uint32_t addr = DCP_REG(mmCUR_POSITION);
160
161 value = dm_read_reg(ipp110->base.ctx, addr);
162 set_reg_field_value(value, x, CUR_POSITION, CURSOR_X_POSITION);
163 set_reg_field_value(value, y, CUR_POSITION, CURSOR_Y_POSITION);
164 dm_write_reg(ipp110->base.ctx, addr, value);
165}
166
167static bool program_control(
168 struct dce110_ipp *ipp110,
169 enum dc_cursor_color_format color_format,
170 bool enable_magnification,
171 bool inverse_transparent_clamping)
172{
173 uint32_t value = 0;
174 uint32_t addr = DCP_REG(mmCUR_CONTROL);
175 uint32_t mode = 0;
176
177 switch (color_format) {
178 case CURSOR_MODE_MONO:
179 mode = 0;
180 break;
181 case CURSOR_MODE_COLOR_1BIT_AND:
182 mode = 1;
183 break;
184 case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA:
185 mode = 2;
186 break;
187 case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA:
188 mode = 3;
189 break;
190 default:
191 return false;
192 }
193
194 set_reg_field_value(value, mode, CUR_CONTROL, CURSOR_MODE);
195 set_reg_field_value(value, enable_magnification,
196 CUR_CONTROL, CURSOR_2X_MAGNIFY);
197 set_reg_field_value(value, inverse_transparent_clamping,
198 CUR_CONTROL, CUR_INV_TRANS_CLAMP);
199 dm_write_reg(ipp110->base.ctx, addr, value);
200
201 if (color_format == CURSOR_MODE_MONO) {
202 addr = DCP_REG(mmCUR_COLOR1);
203 dm_write_reg(ipp110->base.ctx, addr, CURSOR_COLOR_BLACK);
204 addr = DCP_REG(mmCUR_COLOR2);
205 dm_write_reg(ipp110->base.ctx, addr, CURSOR_COLOR_WHITE);
206 }
207 return true;
208}
209
210static void program_hotspot(
211 struct dce110_ipp *ipp110,
212 uint32_t x,
213 uint32_t y)
214{
215 uint32_t value = 0;
216 uint32_t addr = DCP_REG(mmCUR_HOT_SPOT);
217
218 value = dm_read_reg(ipp110->base.ctx, addr);
219 set_reg_field_value(value, x, CUR_HOT_SPOT, CURSOR_HOT_SPOT_X);
220 set_reg_field_value(value, y, CUR_HOT_SPOT, CURSOR_HOT_SPOT_Y);
221 dm_write_reg(ipp110->base.ctx, addr, value);
222}
223
224static void program_size(
225 struct dce110_ipp *ipp110,
226 uint32_t width,
227 uint32_t height)
228{
229 uint32_t value = 0;
230 uint32_t addr = DCP_REG(mmCUR_SIZE);
231
232 value = dm_read_reg(ipp110->base.ctx, addr);
233 set_reg_field_value(value, width, CUR_SIZE, CURSOR_WIDTH);
234 set_reg_field_value(value, height, CUR_SIZE, CURSOR_HEIGHT);
235 dm_write_reg(ipp110->base.ctx, addr, value);
236}
237
238static void program_address(
239 struct dce110_ipp *ipp110,
240 PHYSICAL_ADDRESS_LOC address)
241{
242 uint32_t addr = DCP_REG(mmCUR_SURFACE_ADDRESS_HIGH);
243 /* SURFACE_ADDRESS_HIGH: Higher order bits (39:32) of hardware cursor
244 * surface base address in byte. It is 4K byte aligned.
245 * The correct way to program cursor surface address is to first write
246 * to CUR_SURFACE_ADDRESS_HIGH, and then write to CUR_SURFACE_ADDRESS */
247
248 dm_write_reg(ipp110->base.ctx, addr, address.high_part);
249
250 addr = DCP_REG(mmCUR_SURFACE_ADDRESS);
251 dm_write_reg(ipp110->base.ctx, addr, address.low_part);
252}
253
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_ipp_gamma.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_ipp_gamma.c
new file mode 100644
index 000000000000..79a6a6dd72fc
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_ipp_gamma.c
@@ -0,0 +1,303 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27#include "include/logger_interface.h"
28#include "include/fixed31_32.h"
29#include "basics/conversion.h"
30
31#include "dce/dce_11_0_d.h"
32#include "dce/dce_11_0_sh_mask.h"
33
34#include "dce110_ipp.h"
35#include "gamma_types.h"
36
37#define DCP_REG(reg)\
38 (reg + ipp110->offsets.dcp_offset)
39
40enum {
41 MAX_INPUT_LUT_ENTRY = 256
42};
43
44/*PROTOTYPE DECLARATIONS*/
45static void set_lut_inc(
46 struct dce110_ipp *ipp110,
47 uint8_t inc,
48 bool is_float,
49 bool is_signed);
50
51bool dce110_ipp_set_degamma(
52 struct input_pixel_processor *ipp,
53 enum ipp_degamma_mode mode)
54{
55 struct dce110_ipp *ipp110 = TO_DCE110_IPP(ipp);
56
57 uint32_t value = 0;
58
59 uint32_t degamma_type = (mode == IPP_DEGAMMA_MODE_HW_sRGB) ? 1 : 0;
60
61 ASSERT(mode == IPP_DEGAMMA_MODE_BYPASS ||
62 mode == IPP_DEGAMMA_MODE_HW_sRGB);
63
64 set_reg_field_value(
65 value,
66 degamma_type,
67 DEGAMMA_CONTROL,
68 GRPH_DEGAMMA_MODE);
69
70 set_reg_field_value(
71 value,
72 degamma_type,
73 DEGAMMA_CONTROL,
74 CURSOR_DEGAMMA_MODE);
75
76 set_reg_field_value(
77 value,
78 degamma_type,
79 DEGAMMA_CONTROL,
80 CURSOR2_DEGAMMA_MODE);
81
82 dm_write_reg(ipp110->base.ctx, DCP_REG(mmDEGAMMA_CONTROL), value);
83
84 return true;
85}
86
87void dce110_ipp_program_prescale(
88 struct input_pixel_processor *ipp,
89 struct ipp_prescale_params *params)
90{
91 struct dce110_ipp *ipp110 = TO_DCE110_IPP(ipp);
92
93 uint32_t prescale_control = 0;
94 uint32_t prescale_value = 0;
95 uint32_t legacy_lut_control = 0;
96
97 prescale_control = dm_read_reg(ipp110->base.ctx,
98 DCP_REG(mmPRESCALE_GRPH_CONTROL));
99
100 if (params->mode != IPP_PRESCALE_MODE_BYPASS) {
101
102 set_reg_field_value(
103 prescale_control,
104 0,
105 PRESCALE_GRPH_CONTROL,
106 GRPH_PRESCALE_BYPASS);
107
108 /*
109 * If prescale is in use, then legacy lut should
110 * be bypassed
111 */
112 legacy_lut_control = dm_read_reg(ipp110->base.ctx,
113 DCP_REG(mmINPUT_GAMMA_CONTROL));
114
115 set_reg_field_value(
116 legacy_lut_control,
117 1,
118 INPUT_GAMMA_CONTROL,
119 GRPH_INPUT_GAMMA_MODE);
120
121 dm_write_reg(ipp110->base.ctx,
122 DCP_REG(mmINPUT_GAMMA_CONTROL),
123 legacy_lut_control);
124 } else {
125 set_reg_field_value(
126 prescale_control,
127 1,
128 PRESCALE_GRPH_CONTROL,
129 GRPH_PRESCALE_BYPASS);
130 }
131
132 set_reg_field_value(
133 prescale_value,
134 params->scale,
135 PRESCALE_VALUES_GRPH_R,
136 GRPH_PRESCALE_SCALE_R);
137
138 set_reg_field_value(
139 prescale_value,
140 params->bias,
141 PRESCALE_VALUES_GRPH_R,
142 GRPH_PRESCALE_BIAS_R);
143
144 dm_write_reg(ipp110->base.ctx,
145 DCP_REG(mmPRESCALE_GRPH_CONTROL),
146 prescale_control);
147
148 dm_write_reg(ipp110->base.ctx,
149 DCP_REG(mmPRESCALE_VALUES_GRPH_R),
150 prescale_value);
151
152 dm_write_reg(ipp110->base.ctx,
153 DCP_REG(mmPRESCALE_VALUES_GRPH_G),
154 prescale_value);
155
156 dm_write_reg(ipp110->base.ctx,
157 DCP_REG(mmPRESCALE_VALUES_GRPH_B),
158 prescale_value);
159}
160
161static void set_lut_inc(
162 struct dce110_ipp *ipp110,
163 uint8_t inc,
164 bool is_float,
165 bool is_signed)
166{
167 const uint32_t addr = DCP_REG(mmDC_LUT_CONTROL);
168
169 uint32_t value = dm_read_reg(ipp110->base.ctx, addr);
170
171 set_reg_field_value(
172 value,
173 inc,
174 DC_LUT_CONTROL,
175 DC_LUT_INC_R);
176
177 set_reg_field_value(
178 value,
179 inc,
180 DC_LUT_CONTROL,
181 DC_LUT_INC_G);
182
183 set_reg_field_value(
184 value,
185 inc,
186 DC_LUT_CONTROL,
187 DC_LUT_INC_B);
188
189 set_reg_field_value(
190 value,
191 is_float,
192 DC_LUT_CONTROL,
193 DC_LUT_DATA_R_FLOAT_POINT_EN);
194
195 set_reg_field_value(
196 value,
197 is_float,
198 DC_LUT_CONTROL,
199 DC_LUT_DATA_G_FLOAT_POINT_EN);
200
201 set_reg_field_value(
202 value,
203 is_float,
204 DC_LUT_CONTROL,
205 DC_LUT_DATA_B_FLOAT_POINT_EN);
206
207 set_reg_field_value(
208 value,
209 is_signed,
210 DC_LUT_CONTROL,
211 DC_LUT_DATA_R_SIGNED_EN);
212
213 set_reg_field_value(
214 value,
215 is_signed,
216 DC_LUT_CONTROL,
217 DC_LUT_DATA_G_SIGNED_EN);
218
219 set_reg_field_value(
220 value,
221 is_signed,
222 DC_LUT_CONTROL,
223 DC_LUT_DATA_B_SIGNED_EN);
224
225 dm_write_reg(ipp110->base.ctx, addr, value);
226}
227
228void dce110_helper_select_lut(struct dce110_ipp *ipp110)
229{
230 uint32_t value = 0;
231
232 set_lut_inc(ipp110, 0, false, false);
233
234 {
235 const uint32_t addr = DCP_REG(mmDC_LUT_WRITE_EN_MASK);
236
237 value = dm_read_reg(ipp110->base.ctx, addr);
238
239 /* enable all */
240 set_reg_field_value(
241 value,
242 0x7,
243 DC_LUT_WRITE_EN_MASK,
244 DC_LUT_WRITE_EN_MASK);
245
246 dm_write_reg(ipp110->base.ctx, addr, value);
247 }
248
249 {
250 const uint32_t addr = DCP_REG(mmDC_LUT_RW_MODE);
251
252 value = dm_read_reg(ipp110->base.ctx, addr);
253
254 set_reg_field_value(
255 value,
256 0,
257 DC_LUT_RW_MODE,
258 DC_LUT_RW_MODE);
259
260 dm_write_reg(ipp110->base.ctx, addr, value);
261 }
262
263 {
264 const uint32_t addr = DCP_REG(mmDC_LUT_CONTROL);
265
266 value = dm_read_reg(ipp110->base.ctx, addr);
267
268 /* 00 - new u0.12 */
269 set_reg_field_value(
270 value,
271 3,
272 DC_LUT_CONTROL,
273 DC_LUT_DATA_R_FORMAT);
274
275 set_reg_field_value(
276 value,
277 3,
278 DC_LUT_CONTROL,
279 DC_LUT_DATA_G_FORMAT);
280
281 set_reg_field_value(
282 value,
283 3,
284 DC_LUT_CONTROL,
285 DC_LUT_DATA_B_FORMAT);
286
287 dm_write_reg(ipp110->base.ctx, addr, value);
288 }
289
290 {
291 const uint32_t addr = DCP_REG(mmDC_LUT_RW_INDEX);
292
293 value = dm_read_reg(ipp110->base.ctx, addr);
294
295 set_reg_field_value(
296 value,
297 0,
298 DC_LUT_RW_INDEX,
299 DC_LUT_RW_INDEX);
300
301 dm_write_reg(ipp110->base.ctx, addr, value);
302 }
303}
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input.c
new file mode 100644
index 000000000000..c0a68c6f585e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input.c
@@ -0,0 +1,535 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25#include "dm_services.h"
26
27#include "dce/dce_11_0_d.h"
28#include "dce/dce_11_0_sh_mask.h"
29/* TODO: this needs to be looked at, used by Stella's workaround*/
30#include "gmc/gmc_8_2_d.h"
31#include "gmc/gmc_8_2_sh_mask.h"
32
33#include "include/logger_interface.h"
34
35#include "dce110_mem_input.h"
36
37#define DCP_REG(reg) (reg + mem_input110->offsets.dcp)
38#define DMIF_REG(reg) (reg + mem_input110->offsets.dmif)
39#define PIPE_REG(reg) (reg + mem_input110->offsets.pipe)
40
41static void program_sec_addr(
42 struct dce110_mem_input *mem_input110,
43 PHYSICAL_ADDRESS_LOC address)
44{
45 uint32_t value = 0;
46 uint32_t temp;
47
48 /*high register MUST be programmed first*/
49 temp = address.high_part &
50 GRPH_SECONDARY_SURFACE_ADDRESS_HIGH__GRPH_SECONDARY_SURFACE_ADDRESS_HIGH_MASK;
51 set_reg_field_value(value, temp,
52 GRPH_SECONDARY_SURFACE_ADDRESS_HIGH,
53 GRPH_SECONDARY_SURFACE_ADDRESS_HIGH);
54 dm_write_reg(mem_input110->base.ctx,
55 DCP_REG(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH), value);
56
57 value = 0;
58 temp = address.low_part >>
59 GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS__SHIFT;
60 set_reg_field_value(value, temp,
61 GRPH_SECONDARY_SURFACE_ADDRESS,
62 GRPH_SECONDARY_SURFACE_ADDRESS);
63 dm_write_reg(mem_input110->base.ctx,
64 DCP_REG(mmGRPH_SECONDARY_SURFACE_ADDRESS), value);
65}
66
67static void program_pri_addr(
68 struct dce110_mem_input *mem_input110,
69 PHYSICAL_ADDRESS_LOC address)
70{
71 uint32_t value = 0;
72 uint32_t temp;
73
74 /*high register MUST be programmed first*/
75 temp = address.high_part &
76 GRPH_PRIMARY_SURFACE_ADDRESS_HIGH__GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_MASK;
77 set_reg_field_value(value, temp,
78 GRPH_PRIMARY_SURFACE_ADDRESS_HIGH,
79 GRPH_PRIMARY_SURFACE_ADDRESS_HIGH);
80 dm_write_reg(mem_input110->base.ctx,
81 DCP_REG(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH), value);
82
83 value = 0;
84 temp = address.low_part >>
85 GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS__SHIFT;
86 set_reg_field_value(value, temp,
87 GRPH_PRIMARY_SURFACE_ADDRESS,
88 GRPH_PRIMARY_SURFACE_ADDRESS);
89 dm_write_reg(mem_input110->base.ctx,
90 DCP_REG(mmGRPH_PRIMARY_SURFACE_ADDRESS), value);
91}
92
93bool dce110_mem_input_is_flip_pending(struct mem_input *mem_input)
94{
95 struct dce110_mem_input *mem_input110 = TO_DCE110_MEM_INPUT(mem_input);
96 uint32_t value;
97
98 value = dm_read_reg(mem_input110->base.ctx, DCP_REG(mmGRPH_UPDATE));
99
100 if (get_reg_field_value(value, GRPH_UPDATE,
101 GRPH_SURFACE_UPDATE_PENDING))
102 return true;
103
104 mem_input->current_address = mem_input->request_address;
105 return false;
106}
107
108bool dce110_mem_input_program_surface_flip_and_addr(
109 struct mem_input *mem_input,
110 const struct dc_plane_address *address,
111 bool flip_immediate)
112{
113 struct dce110_mem_input *mem_input110 = TO_DCE110_MEM_INPUT(mem_input);
114
115 uint32_t value = 0;
116
117 value = dm_read_reg(mem_input110->base.ctx, DCP_REG(mmGRPH_FLIP_CONTROL));
118 if (flip_immediate) {
119 set_reg_field_value(value, 1, GRPH_FLIP_CONTROL, GRPH_SURFACE_UPDATE_IMMEDIATE_EN);
120 set_reg_field_value(value, 1, GRPH_FLIP_CONTROL, GRPH_SURFACE_UPDATE_H_RETRACE_EN);
121 } else {
122 set_reg_field_value(value, 0, GRPH_FLIP_CONTROL, GRPH_SURFACE_UPDATE_IMMEDIATE_EN);
123 set_reg_field_value(value, 0, GRPH_FLIP_CONTROL, GRPH_SURFACE_UPDATE_H_RETRACE_EN);
124 }
125 dm_write_reg(mem_input110->base.ctx, DCP_REG(mmGRPH_FLIP_CONTROL), value);
126
127 switch (address->type) {
128 case PLN_ADDR_TYPE_GRAPHICS:
129 if (address->grph.addr.quad_part == 0)
130 break;
131 program_pri_addr(mem_input110, address->grph.addr);
132 break;
133 case PLN_ADDR_TYPE_GRPH_STEREO:
134 if (address->grph_stereo.left_addr.quad_part == 0
135 || address->grph_stereo.right_addr.quad_part == 0)
136 break;
137 program_pri_addr(mem_input110, address->grph_stereo.left_addr);
138 program_sec_addr(mem_input110, address->grph_stereo.right_addr);
139 break;
140 default:
141 /* not supported */
142 BREAK_TO_DEBUGGER();
143 break;
144 }
145
146 mem_input->request_address = *address;
147 if (flip_immediate)
148 mem_input->current_address = *address;
149
150 return true;
151}
152
153/* Scatter Gather param tables */
154static const unsigned int dvmm_Hw_Setting_2DTiling[4][9] = {
155 { 8, 64, 64, 8, 8, 1, 4, 0, 0},
156 { 16, 64, 32, 8, 16, 1, 8, 0, 0},
157 { 32, 32, 32, 16, 16, 1, 8, 0, 0},
158 { 64, 8, 32, 16, 16, 1, 8, 0, 0}, /* fake */
159};
160
161static const unsigned int dvmm_Hw_Setting_1DTiling[4][9] = {
162 { 8, 512, 8, 1, 0, 1, 0, 0, 0}, /* 0 for invalid */
163 { 16, 256, 8, 2, 0, 1, 0, 0, 0},
164 { 32, 128, 8, 4, 0, 1, 0, 0, 0},
165 { 64, 64, 8, 4, 0, 1, 0, 0, 0}, /* fake */
166};
167
168static const unsigned int dvmm_Hw_Setting_Linear[4][9] = {
169 { 8, 4096, 1, 8, 0, 1, 0, 0, 0},
170 { 16, 2048, 1, 8, 0, 1, 0, 0, 0},
171 { 32, 1024, 1, 8, 0, 1, 0, 0, 0},
172 { 64, 512, 1, 8, 0, 1, 0, 0, 0}, /* new for 64bpp from HW */
173};
174
175/* Helper to get table entry from surface info */
176static const unsigned int *get_dvmm_hw_setting(
177 union dc_tiling_info *tiling_info,
178 enum surface_pixel_format format)
179{
180 enum bits_per_pixel {
181 bpp_8 = 0,
182 bpp_16,
183 bpp_32,
184 bpp_64
185 } bpp;
186
187 if (format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616)
188 bpp = bpp_64;
189 else if (format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB8888)
190 bpp = bpp_32;
191 else if (format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB1555)
192 bpp = bpp_16;
193 else
194 bpp = bpp_8;
195
196 switch (tiling_info->gfx8.array_mode) {
197 case DC_ARRAY_1D_TILED_THIN1:
198 case DC_ARRAY_1D_TILED_THICK:
199 case DC_ARRAY_PRT_TILED_THIN1:
200 return dvmm_Hw_Setting_1DTiling[bpp];
201 case DC_ARRAY_2D_TILED_THIN1:
202 case DC_ARRAY_2D_TILED_THICK:
203 case DC_ARRAY_2D_TILED_X_THICK:
204 case DC_ARRAY_PRT_2D_TILED_THIN1:
205 case DC_ARRAY_PRT_2D_TILED_THICK:
206 return dvmm_Hw_Setting_2DTiling[bpp];
207 case DC_ARRAY_LINEAR_GENERAL:
208 case DC_ARRAY_LINEAR_ALLIGNED:
209 return dvmm_Hw_Setting_Linear[bpp];
210 default:
211 return dvmm_Hw_Setting_2DTiling[bpp];
212 }
213}
214
215bool dce110_mem_input_program_pte_vm(
216 struct mem_input *mem_input,
217 enum surface_pixel_format format,
218 union dc_tiling_info *tiling_info,
219 enum dc_rotation_angle rotation)
220{
221 struct dce110_mem_input *mem_input110 = TO_DCE110_MEM_INPUT(mem_input);
222 const unsigned int *pte = get_dvmm_hw_setting(tiling_info, format);
223
224 unsigned int page_width = 0;
225 unsigned int page_height = 0;
226 unsigned int temp_page_width = pte[1];
227 unsigned int temp_page_height = pte[2];
228 unsigned int min_pte_before_flip = 0;
229 uint32_t value = 0;
230
231 while ((temp_page_width >>= 1) != 0)
232 page_width++;
233 while ((temp_page_height >>= 1) != 0)
234 page_height++;
235
236 switch (rotation) {
237 case ROTATION_ANGLE_90:
238 case ROTATION_ANGLE_270:
239 min_pte_before_flip = pte[4];
240 break;
241 default:
242 min_pte_before_flip = pte[3];
243 break;
244 }
245
246 value = dm_read_reg(mem_input110->base.ctx, DCP_REG(mmGRPH_PIPE_OUTSTANDING_REQUEST_LIMIT));
247 set_reg_field_value(value, 0xff, GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT, GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT);
248 dm_write_reg(mem_input110->base.ctx, DCP_REG(mmGRPH_PIPE_OUTSTANDING_REQUEST_LIMIT), value);
249
250 value = dm_read_reg(mem_input110->base.ctx, DCP_REG(mmDVMM_PTE_CONTROL));
251 set_reg_field_value(value, page_width, DVMM_PTE_CONTROL, DVMM_PAGE_WIDTH);
252 set_reg_field_value(value, page_height, DVMM_PTE_CONTROL, DVMM_PAGE_HEIGHT);
253 set_reg_field_value(value, min_pte_before_flip, DVMM_PTE_CONTROL, DVMM_MIN_PTE_BEFORE_FLIP);
254 dm_write_reg(mem_input110->base.ctx, DCP_REG(mmDVMM_PTE_CONTROL), value);
255
256 value = dm_read_reg(mem_input110->base.ctx, DCP_REG(mmDVMM_PTE_ARB_CONTROL));
257 set_reg_field_value(value, pte[5], DVMM_PTE_ARB_CONTROL, DVMM_PTE_REQ_PER_CHUNK);
258 set_reg_field_value(value, 0xff, DVMM_PTE_ARB_CONTROL, DVMM_MAX_PTE_REQ_OUTSTANDING);
259 dm_write_reg(mem_input110->base.ctx, DCP_REG(mmDVMM_PTE_ARB_CONTROL), value);
260
261 return true;
262}
263
264static void program_urgency_watermark(
265 const struct dc_context *ctx,
266 const uint32_t offset,
267 struct bw_watermarks marks_low,
268 uint32_t total_dest_line_time_ns)
269{
270 /* register value */
271 uint32_t urgency_cntl = 0;
272 uint32_t wm_mask_cntl = 0;
273
274 uint32_t urgency_addr = offset + mmDPG_PIPE_URGENCY_CONTROL;
275 uint32_t wm_addr = offset + mmDPG_WATERMARK_MASK_CONTROL;
276
277 /*Write mask to enable reading/writing of watermark set A*/
278 wm_mask_cntl = dm_read_reg(ctx, wm_addr);
279 set_reg_field_value(wm_mask_cntl,
280 1,
281 DPG_WATERMARK_MASK_CONTROL,
282 URGENCY_WATERMARK_MASK);
283 dm_write_reg(ctx, wm_addr, wm_mask_cntl);
284
285 urgency_cntl = dm_read_reg(ctx, urgency_addr);
286
287 set_reg_field_value(
288 urgency_cntl,
289 marks_low.d_mark,
290 DPG_PIPE_URGENCY_CONTROL,
291 URGENCY_LOW_WATERMARK);
292
293 set_reg_field_value(
294 urgency_cntl,
295 total_dest_line_time_ns,
296 DPG_PIPE_URGENCY_CONTROL,
297 URGENCY_HIGH_WATERMARK);
298 dm_write_reg(ctx, urgency_addr, urgency_cntl);
299
300 /*Write mask to enable reading/writing of watermark set B*/
301 wm_mask_cntl = dm_read_reg(ctx, wm_addr);
302 set_reg_field_value(wm_mask_cntl,
303 2,
304 DPG_WATERMARK_MASK_CONTROL,
305 URGENCY_WATERMARK_MASK);
306 dm_write_reg(ctx, wm_addr, wm_mask_cntl);
307
308 urgency_cntl = dm_read_reg(ctx, urgency_addr);
309
310 set_reg_field_value(urgency_cntl,
311 marks_low.a_mark,
312 DPG_PIPE_URGENCY_CONTROL,
313 URGENCY_LOW_WATERMARK);
314
315 set_reg_field_value(urgency_cntl,
316 total_dest_line_time_ns,
317 DPG_PIPE_URGENCY_CONTROL,
318 URGENCY_HIGH_WATERMARK);
319 dm_write_reg(ctx, urgency_addr, urgency_cntl);
320}
321
322static void program_stutter_watermark(
323 const struct dc_context *ctx,
324 const uint32_t offset,
325 struct bw_watermarks marks)
326{
327 /* register value */
328 uint32_t stutter_cntl = 0;
329 uint32_t wm_mask_cntl = 0;
330
331 uint32_t stutter_addr = offset + mmDPG_PIPE_STUTTER_CONTROL;
332 uint32_t wm_addr = offset + mmDPG_WATERMARK_MASK_CONTROL;
333
334 /*Write mask to enable reading/writing of watermark set A*/
335
336 wm_mask_cntl = dm_read_reg(ctx, wm_addr);
337 set_reg_field_value(wm_mask_cntl,
338 1,
339 DPG_WATERMARK_MASK_CONTROL,
340 STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK);
341 dm_write_reg(ctx, wm_addr, wm_mask_cntl);
342
343 stutter_cntl = dm_read_reg(ctx, stutter_addr);
344
345 if (ctx->dc->debug.disable_stutter) {
346 set_reg_field_value(stutter_cntl,
347 0,
348 DPG_PIPE_STUTTER_CONTROL,
349 STUTTER_ENABLE);
350 } else {
351 set_reg_field_value(stutter_cntl,
352 1,
353 DPG_PIPE_STUTTER_CONTROL,
354 STUTTER_ENABLE);
355 }
356
357 set_reg_field_value(stutter_cntl,
358 1,
359 DPG_PIPE_STUTTER_CONTROL,
360 STUTTER_IGNORE_FBC);
361
362 /*Write watermark set A*/
363 set_reg_field_value(stutter_cntl,
364 marks.d_mark,
365 DPG_PIPE_STUTTER_CONTROL,
366 STUTTER_EXIT_SELF_REFRESH_WATERMARK);
367 dm_write_reg(ctx, stutter_addr, stutter_cntl);
368
369 /*Write mask to enable reading/writing of watermark set B*/
370 wm_mask_cntl = dm_read_reg(ctx, wm_addr);
371 set_reg_field_value(wm_mask_cntl,
372 2,
373 DPG_WATERMARK_MASK_CONTROL,
374 STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK);
375 dm_write_reg(ctx, wm_addr, wm_mask_cntl);
376
377 stutter_cntl = dm_read_reg(ctx, stutter_addr);
378
379 /*Write watermark set B*/
380 set_reg_field_value(stutter_cntl,
381 marks.a_mark,
382 DPG_PIPE_STUTTER_CONTROL,
383 STUTTER_EXIT_SELF_REFRESH_WATERMARK);
384 dm_write_reg(ctx, stutter_addr, stutter_cntl);
385}
386
387static void program_nbp_watermark(
388 const struct dc_context *ctx,
389 const uint32_t offset,
390 struct bw_watermarks marks)
391{
392 uint32_t value;
393 uint32_t addr;
394 /* Write mask to enable reading/writing of watermark set A */
395 addr = offset + mmDPG_WATERMARK_MASK_CONTROL;
396 value = dm_read_reg(ctx, addr);
397 set_reg_field_value(
398 value,
399 1,
400 DPG_WATERMARK_MASK_CONTROL,
401 NB_PSTATE_CHANGE_WATERMARK_MASK);
402 dm_write_reg(ctx, addr, value);
403
404 addr = offset + mmDPG_PIPE_NB_PSTATE_CHANGE_CONTROL;
405 value = dm_read_reg(ctx, addr);
406 set_reg_field_value(
407 value,
408 1,
409 DPG_PIPE_NB_PSTATE_CHANGE_CONTROL,
410 NB_PSTATE_CHANGE_ENABLE);
411 set_reg_field_value(
412 value,
413 1,
414 DPG_PIPE_NB_PSTATE_CHANGE_CONTROL,
415 NB_PSTATE_CHANGE_URGENT_DURING_REQUEST);
416 set_reg_field_value(
417 value,
418 1,
419 DPG_PIPE_NB_PSTATE_CHANGE_CONTROL,
420 NB_PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST);
421 dm_write_reg(ctx, addr, value);
422
423 /* Write watermark set A */
424 value = dm_read_reg(ctx, addr);
425 set_reg_field_value(
426 value,
427 marks.d_mark,
428 DPG_PIPE_NB_PSTATE_CHANGE_CONTROL,
429 NB_PSTATE_CHANGE_WATERMARK);
430 dm_write_reg(ctx, addr, value);
431
432 /* Write mask to enable reading/writing of watermark set B */
433 addr = offset + mmDPG_WATERMARK_MASK_CONTROL;
434 value = dm_read_reg(ctx, addr);
435 set_reg_field_value(
436 value,
437 2,
438 DPG_WATERMARK_MASK_CONTROL,
439 NB_PSTATE_CHANGE_WATERMARK_MASK);
440 dm_write_reg(ctx, addr, value);
441
442 addr = offset + mmDPG_PIPE_NB_PSTATE_CHANGE_CONTROL;
443 value = dm_read_reg(ctx, addr);
444 set_reg_field_value(
445 value,
446 1,
447 DPG_PIPE_NB_PSTATE_CHANGE_CONTROL,
448 NB_PSTATE_CHANGE_ENABLE);
449 set_reg_field_value(
450 value,
451 1,
452 DPG_PIPE_NB_PSTATE_CHANGE_CONTROL,
453 NB_PSTATE_CHANGE_URGENT_DURING_REQUEST);
454 set_reg_field_value(
455 value,
456 1,
457 DPG_PIPE_NB_PSTATE_CHANGE_CONTROL,
458 NB_PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST);
459 dm_write_reg(ctx, addr, value);
460
461 /* Write watermark set B */
462 value = dm_read_reg(ctx, addr);
463 set_reg_field_value(
464 value,
465 marks.a_mark,
466 DPG_PIPE_NB_PSTATE_CHANGE_CONTROL,
467 NB_PSTATE_CHANGE_WATERMARK);
468 dm_write_reg(ctx, addr, value);
469}
470
471void dce110_mem_input_program_display_marks(
472 struct mem_input *mem_input,
473 struct bw_watermarks nbp,
474 struct bw_watermarks stutter,
475 struct bw_watermarks urgent,
476 uint32_t total_dest_line_time_ns)
477{
478 struct dce110_mem_input *bm_dce110 = TO_DCE110_MEM_INPUT(mem_input);
479
480 program_urgency_watermark(
481 mem_input->ctx,
482 bm_dce110->offsets.dmif,
483 urgent,
484 total_dest_line_time_ns);
485
486 program_nbp_watermark(
487 mem_input->ctx,
488 bm_dce110->offsets.dmif,
489 nbp);
490
491 program_stutter_watermark(
492 mem_input->ctx,
493 bm_dce110->offsets.dmif,
494 stutter);
495}
496
497static struct mem_input_funcs dce110_mem_input_funcs = {
498 .mem_input_program_display_marks =
499 dce110_mem_input_program_display_marks,
500 .allocate_mem_input = dce_mem_input_allocate_dmif,
501 .free_mem_input = dce_mem_input_free_dmif,
502 .mem_input_program_surface_flip_and_addr =
503 dce110_mem_input_program_surface_flip_and_addr,
504 .mem_input_program_pte_vm =
505 dce110_mem_input_program_pte_vm,
506 .mem_input_program_surface_config =
507 dce_mem_input_program_surface_config,
508 .mem_input_is_flip_pending =
509 dce110_mem_input_is_flip_pending,
510 .mem_input_update_dchub = NULL
511};
512/*****************************************/
513/* Constructor, Destructor */
514/*****************************************/
515
516bool dce110_mem_input_construct(
517 struct dce110_mem_input *mem_input110,
518 struct dc_context *ctx,
519 uint32_t inst,
520 const struct dce110_mem_input_reg_offsets *offsets)
521{
522 /* supported stutter method
523 * STUTTER_MODE_ENHANCED
524 * STUTTER_MODE_QUAD_DMIF_BUFFER
525 * STUTTER_MODE_WATERMARK_NBP_STATE
526 */
527 mem_input110->base.funcs = &dce110_mem_input_funcs;
528 mem_input110->base.ctx = ctx;
529
530 mem_input110->base.inst = inst;
531
532 mem_input110->offsets = *offsets;
533
534 return true;
535}
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input.h
new file mode 100644
index 000000000000..83b2df93ce49
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input.h
@@ -0,0 +1,131 @@
1/* Copyright 2012-15 Advanced Micro Devices, Inc.
2 *
3 * Permission is hereby granted, free of charge, to any person obtaining a
4 * copy of this software and associated documentation files (the "Software"),
5 * to deal in the Software without restriction, including without limitation
6 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7 * and/or sell copies of the Software, and to permit persons to whom the
8 * Software is furnished to do so, subject to the following conditions:
9 *
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
17 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
18 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
19 * OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * Authors: AMD
22 *
23 */
24
25#ifndef __DC_MEM_INPUT_DCE110_H__
26#define __DC_MEM_INPUT_DCE110_H__
27
28#include "mem_input.h"
29
30#define TO_DCE110_MEM_INPUT(mi)\
31 container_of(mi, struct dce110_mem_input, base)
32
33struct dce110_mem_input_reg_offsets {
34 uint32_t dcp;
35 uint32_t dmif;
36 uint32_t pipe;
37};
38
39struct dce110_mem_input {
40 struct mem_input base;
41 struct dce110_mem_input_reg_offsets offsets;
42};
43
44bool dce110_mem_input_construct(
45 struct dce110_mem_input *mem_input110,
46 struct dc_context *ctx,
47 uint32_t inst,
48 const struct dce110_mem_input_reg_offsets *offsets);
49
50/*
51 * dce110_mem_input_program_display_marks
52 *
53 * This function will program nbp stutter and urgency watermarks to minimum
54 * allowable values
55 */
56void dce110_mem_input_program_display_marks(
57 struct mem_input *mem_input,
58 struct bw_watermarks nbp,
59 struct bw_watermarks stutter,
60 struct bw_watermarks urgent,
61 uint32_t total_dest_line_time_ns);
62
63/*
64 * dce110_allocate_mem_input
65 *
66 * This function will allocate a dmif buffer and program required
67 * pixel duration for pipe
68 */
69void dce110_allocate_mem_input(
70 struct mem_input *mem_input,
71 uint32_t h_total,/* for current stream */
72 uint32_t v_total,/* for current stream */
73 uint32_t pix_clk_khz,/* for current stream */
74 uint32_t total_stream_num);
75
76/*
77 * dce110_free_mem_input
78 *
79 * This function will deallocate a dmif buffer from pipe
80 */
81void dce110_free_mem_input(
82 struct mem_input *mem_input,
83 uint32_t total_stream_num);
84
85/*
86 * dce110_mem_input_program_surface_flip_and_addr
87 *
88 * This function programs hsync/vsync mode and surface address
89 */
90bool dce110_mem_input_program_surface_flip_and_addr(
91 struct mem_input *mem_input,
92 const struct dc_plane_address *address,
93 bool flip_immediate);
94
95/*
96 * dce110_mem_input_program_surface_config
97 *
98 * This function will program surface tiling, size, rotation and pixel format
99 * to corresponding dcp registers.
100 */
101bool dce110_mem_input_program_surface_config(
102 struct mem_input *mem_input,
103 enum surface_pixel_format format,
104 union dc_tiling_info *tiling_info,
105 union plane_size *plane_size,
106 enum dc_rotation_angle rotation,
107 struct dc_plane_dcc_param *dcc,
108 bool horizontal_mirror);
109
110/*
111 * dce110_mem_input_program_pte_vm
112 *
113 * This function will program pte vm registers.
114 */
115bool dce110_mem_input_program_pte_vm(
116 struct mem_input *mem_input,
117 enum surface_pixel_format format,
118 union dc_tiling_info *tiling_info,
119 enum dc_rotation_angle rotation);
120
121/*
122 * dce110_mem_input_is_flip_pending
123 *
124 * This function will wait until the surface update-pending bit is cleared.
125 * This is necessary when a flip immediate call is requested as we shouldn't
126 * return until the flip has actually occurred.
127 */
128bool dce110_mem_input_is_flip_pending(
129 struct mem_input *mem_input);
130
131#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
new file mode 100644
index 000000000000..f0310bab4030
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
@@ -0,0 +1,1081 @@
1/*
2 * Copyright 2012-16 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25#include "dm_services.h"
26
27#include "dce/dce_11_0_d.h"
28#include "dce/dce_11_0_sh_mask.h"
29/* TODO: this needs to be looked at, used by Stella's workaround*/
30#include "gmc/gmc_8_2_d.h"
31#include "gmc/gmc_8_2_sh_mask.h"
32
33#include "include/logger_interface.h"
34#include "inc/bandwidth_calcs.h"
35
36#include "dce110_mem_input.h"
37
38#define DCP_REG(reg) (reg + mem_input110->offsets.dcp)
39/*#define DMIF_REG(reg) (reg + mem_input110->offsets.dmif)*/
40/*#define PIPE_REG(reg) (reg + mem_input110->offsets.pipe)*/
41
42static const struct dce110_mem_input_reg_offsets dce110_mi_v_reg_offsets[] = {
43 {
44 .dcp = 0,
45 .dmif = 0,
46 .pipe = 0,
47 }
48};
49
50static void set_flip_control(
51 struct dce110_mem_input *mem_input110,
52 bool immediate)
53{
54 uint32_t value = 0;
55
56 value = dm_read_reg(
57 mem_input110->base.ctx,
58 DCP_REG(mmUNP_FLIP_CONTROL));
59
60 set_reg_field_value(value, 1,
61 UNP_FLIP_CONTROL,
62 GRPH_SURFACE_UPDATE_PENDING_MODE);
63
64 dm_write_reg(
65 mem_input110->base.ctx,
66 DCP_REG(mmUNP_FLIP_CONTROL),
67 value);
68}
69
70/* chroma part */
71static void program_pri_addr_c(
72 struct dce110_mem_input *mem_input110,
73 PHYSICAL_ADDRESS_LOC address)
74{
75 uint32_t value = 0;
76 uint32_t temp = 0;
77 /*high register MUST be programmed first*/
78 temp = address.high_part &
79UNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_C__GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_C_MASK;
80
81 set_reg_field_value(value, temp,
82 UNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_C,
83 GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_C);
84
85 dm_write_reg(
86 mem_input110->base.ctx,
87 DCP_REG(mmUNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_C),
88 value);
89
90 temp = 0;
91 value = 0;
92 temp = address.low_part >>
93 UNP_GRPH_PRIMARY_SURFACE_ADDRESS_C__GRPH_PRIMARY_SURFACE_ADDRESS_C__SHIFT;
94
95 set_reg_field_value(value, temp,
96 UNP_GRPH_PRIMARY_SURFACE_ADDRESS_C,
97 GRPH_PRIMARY_SURFACE_ADDRESS_C);
98
99 dm_write_reg(
100 mem_input110->base.ctx,
101 DCP_REG(mmUNP_GRPH_PRIMARY_SURFACE_ADDRESS_C),
102 value);
103}
104
105/* luma part */
106static void program_pri_addr_l(
107 struct dce110_mem_input *mem_input110,
108 PHYSICAL_ADDRESS_LOC address)
109{
110 uint32_t value = 0;
111 uint32_t temp = 0;
112
113 /*high register MUST be programmed first*/
114 temp = address.high_part &
115UNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_L__GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_L_MASK;
116
117 set_reg_field_value(value, temp,
118 UNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_L,
119 GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_L);
120
121 dm_write_reg(
122 mem_input110->base.ctx,
123 DCP_REG(mmUNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_L),
124 value);
125
126 temp = 0;
127 value = 0;
128 temp = address.low_part >>
129 UNP_GRPH_PRIMARY_SURFACE_ADDRESS_L__GRPH_PRIMARY_SURFACE_ADDRESS_L__SHIFT;
130
131 set_reg_field_value(value, temp,
132 UNP_GRPH_PRIMARY_SURFACE_ADDRESS_L,
133 GRPH_PRIMARY_SURFACE_ADDRESS_L);
134
135 dm_write_reg(
136 mem_input110->base.ctx,
137 DCP_REG(mmUNP_GRPH_PRIMARY_SURFACE_ADDRESS_L),
138 value);
139}
140
141static void program_addr(
142 struct dce110_mem_input *mem_input110,
143 const struct dc_plane_address *addr)
144{
145 switch (addr->type) {
146 case PLN_ADDR_TYPE_GRAPHICS:
147 program_pri_addr_l(
148 mem_input110,
149 addr->grph.addr);
150 break;
151 case PLN_ADDR_TYPE_VIDEO_PROGRESSIVE:
152 program_pri_addr_l(
153 mem_input110,
154 addr->video_progressive.luma_addr);
155 program_pri_addr_c(
156 mem_input110,
157 addr->video_progressive.chroma_addr);
158 break;
159 default:
160 /* not supported */
161 BREAK_TO_DEBUGGER();
162 }
163}
164
165static void enable(struct dce110_mem_input *mem_input110)
166{
167 uint32_t value = 0;
168
169 value = dm_read_reg(mem_input110->base.ctx, DCP_REG(mmUNP_GRPH_ENABLE));
170 set_reg_field_value(value, 1, UNP_GRPH_ENABLE, GRPH_ENABLE);
171 dm_write_reg(mem_input110->base.ctx,
172 DCP_REG(mmUNP_GRPH_ENABLE),
173 value);
174}
175
176static void program_tiling(
177 struct dce110_mem_input *mem_input110,
178 const union dc_tiling_info *info,
179 const enum surface_pixel_format pixel_format)
180{
181 uint32_t value = 0;
182
183 set_reg_field_value(value, info->gfx8.num_banks,
184 UNP_GRPH_CONTROL, GRPH_NUM_BANKS);
185
186 set_reg_field_value(value, info->gfx8.bank_width,
187 UNP_GRPH_CONTROL, GRPH_BANK_WIDTH_L);
188
189 set_reg_field_value(value, info->gfx8.bank_height,
190 UNP_GRPH_CONTROL, GRPH_BANK_HEIGHT_L);
191
192 set_reg_field_value(value, info->gfx8.tile_aspect,
193 UNP_GRPH_CONTROL, GRPH_MACRO_TILE_ASPECT_L);
194
195 set_reg_field_value(value, info->gfx8.tile_split,
196 UNP_GRPH_CONTROL, GRPH_TILE_SPLIT_L);
197
198 set_reg_field_value(value, info->gfx8.tile_mode,
199 UNP_GRPH_CONTROL, GRPH_MICRO_TILE_MODE_L);
200
201 set_reg_field_value(value, info->gfx8.pipe_config,
202 UNP_GRPH_CONTROL, GRPH_PIPE_CONFIG);
203
204 set_reg_field_value(value, info->gfx8.array_mode,
205 UNP_GRPH_CONTROL, GRPH_ARRAY_MODE);
206
207 set_reg_field_value(value, 1,
208 UNP_GRPH_CONTROL, GRPH_COLOR_EXPANSION_MODE);
209
210 set_reg_field_value(value, 0,
211 UNP_GRPH_CONTROL, GRPH_Z);
212
213 dm_write_reg(
214 mem_input110->base.ctx,
215 mmUNP_GRPH_CONTROL,
216 value);
217
218 value = 0;
219
220 set_reg_field_value(value, info->gfx8.bank_width_c,
221 UNP_GRPH_CONTROL_C, GRPH_BANK_WIDTH_C);
222
223 set_reg_field_value(value, info->gfx8.bank_height_c,
224 UNP_GRPH_CONTROL_C, GRPH_BANK_HEIGHT_C);
225
226 set_reg_field_value(value, info->gfx8.tile_aspect_c,
227 UNP_GRPH_CONTROL_C, GRPH_MACRO_TILE_ASPECT_C);
228
229 set_reg_field_value(value, info->gfx8.tile_split_c,
230 UNP_GRPH_CONTROL_C, GRPH_TILE_SPLIT_C);
231
232 set_reg_field_value(value, info->gfx8.tile_mode_c,
233 UNP_GRPH_CONTROL_C, GRPH_MICRO_TILE_MODE_C);
234
235 dm_write_reg(
236 mem_input110->base.ctx,
237 mmUNP_GRPH_CONTROL_C,
238 value);
239}
240
241static void program_size_and_rotation(
242 struct dce110_mem_input *mem_input110,
243 enum dc_rotation_angle rotation,
244 const union plane_size *plane_size)
245{
246 uint32_t value = 0;
247 union plane_size local_size = *plane_size;
248
249 if (rotation == ROTATION_ANGLE_90 ||
250 rotation == ROTATION_ANGLE_270) {
251
252 uint32_t swap;
253 swap = local_size.video.luma_size.x;
254 local_size.video.luma_size.x =
255 local_size.video.luma_size.y;
256 local_size.video.luma_size.y = swap;
257
258 swap = local_size.video.luma_size.width;
259 local_size.video.luma_size.width =
260 local_size.video.luma_size.height;
261 local_size.video.luma_size.height = swap;
262
263 swap = local_size.video.chroma_size.x;
264 local_size.video.chroma_size.x =
265 local_size.video.chroma_size.y;
266 local_size.video.chroma_size.y = swap;
267
268 swap = local_size.video.chroma_size.width;
269 local_size.video.chroma_size.width =
270 local_size.video.chroma_size.height;
271 local_size.video.chroma_size.height = swap;
272 }
273
274 value = 0;
275 set_reg_field_value(value, local_size.video.luma_pitch,
276 UNP_GRPH_PITCH_L, GRPH_PITCH_L);
277
278 dm_write_reg(
279 mem_input110->base.ctx,
280 DCP_REG(mmUNP_GRPH_PITCH_L),
281 value);
282
283 value = 0;
284 set_reg_field_value(value, local_size.video.chroma_pitch,
285 UNP_GRPH_PITCH_C, GRPH_PITCH_C);
286 dm_write_reg(
287 mem_input110->base.ctx,
288 DCP_REG(mmUNP_GRPH_PITCH_C),
289 value);
290
291 value = 0;
292 set_reg_field_value(value, 0,
293 UNP_GRPH_X_START_L, GRPH_X_START_L);
294 dm_write_reg(
295 mem_input110->base.ctx,
296 DCP_REG(mmUNP_GRPH_X_START_L),
297 value);
298
299 value = 0;
300 set_reg_field_value(value, 0,
301 UNP_GRPH_X_START_C, GRPH_X_START_C);
302 dm_write_reg(
303 mem_input110->base.ctx,
304 DCP_REG(mmUNP_GRPH_X_START_C),
305 value);
306
307 value = 0;
308 set_reg_field_value(value, 0,
309 UNP_GRPH_Y_START_L, GRPH_Y_START_L);
310 dm_write_reg(
311 mem_input110->base.ctx,
312 DCP_REG(mmUNP_GRPH_Y_START_L),
313 value);
314
315 value = 0;
316 set_reg_field_value(value, 0,
317 UNP_GRPH_Y_START_C, GRPH_Y_START_C);
318 dm_write_reg(
319 mem_input110->base.ctx,
320 DCP_REG(mmUNP_GRPH_Y_START_C),
321 value);
322
323 value = 0;
324 set_reg_field_value(value, local_size.video.luma_size.x +
325 local_size.video.luma_size.width,
326 UNP_GRPH_X_END_L, GRPH_X_END_L);
327 dm_write_reg(
328 mem_input110->base.ctx,
329 DCP_REG(mmUNP_GRPH_X_END_L),
330 value);
331
332 value = 0;
333 set_reg_field_value(value, local_size.video.chroma_size.x +
334 local_size.video.chroma_size.width,
335 UNP_GRPH_X_END_C, GRPH_X_END_C);
336 dm_write_reg(
337 mem_input110->base.ctx,
338 DCP_REG(mmUNP_GRPH_X_END_C),
339 value);
340
341 value = 0;
342 set_reg_field_value(value, local_size.video.luma_size.y +
343 local_size.video.luma_size.height,
344 UNP_GRPH_Y_END_L, GRPH_Y_END_L);
345 dm_write_reg(
346 mem_input110->base.ctx,
347 DCP_REG(mmUNP_GRPH_Y_END_L),
348 value);
349
350 value = 0;
351 set_reg_field_value(value, local_size.video.chroma_size.y +
352 local_size.video.chroma_size.height,
353 UNP_GRPH_Y_END_C, GRPH_Y_END_C);
354 dm_write_reg(
355 mem_input110->base.ctx,
356 DCP_REG(mmUNP_GRPH_Y_END_C),
357 value);
358
359 value = 0;
360 switch (rotation) {
361 case ROTATION_ANGLE_90:
362 set_reg_field_value(value, 3,
363 UNP_HW_ROTATION, ROTATION_ANGLE);
364 break;
365 case ROTATION_ANGLE_180:
366 set_reg_field_value(value, 2,
367 UNP_HW_ROTATION, ROTATION_ANGLE);
368 break;
369 case ROTATION_ANGLE_270:
370 set_reg_field_value(value, 1,
371 UNP_HW_ROTATION, ROTATION_ANGLE);
372 break;
373 default:
374 set_reg_field_value(value, 0,
375 UNP_HW_ROTATION, ROTATION_ANGLE);
376 break;
377 }
378
379 dm_write_reg(
380 mem_input110->base.ctx,
381 DCP_REG(mmUNP_HW_ROTATION),
382 value);
383}
384
385static void program_pixel_format(
386 struct dce110_mem_input *mem_input110,
387 enum surface_pixel_format format)
388{
389 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
390 uint32_t value;
391 uint8_t grph_depth;
392 uint8_t grph_format;
393
394 value = dm_read_reg(
395 mem_input110->base.ctx,
396 DCP_REG(mmUNP_GRPH_CONTROL));
397
398 switch (format) {
399 case SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS:
400 grph_depth = 0;
401 grph_format = 0;
402 break;
403 case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
404 grph_depth = 1;
405 grph_format = 1;
406 break;
407 case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
408 case SURFACE_PIXEL_FORMAT_GRPH_BGRA8888:
409 grph_depth = 2;
410 grph_format = 0;
411 break;
412 case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
413 case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
414 case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS:
415 grph_depth = 2;
416 grph_format = 1;
417 break;
418 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
419 case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
420 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
421 grph_depth = 3;
422 grph_format = 0;
423 break;
424 default:
425 grph_depth = 2;
426 grph_format = 0;
427 break;
428 }
429
430 set_reg_field_value(
431 value,
432 grph_depth,
433 UNP_GRPH_CONTROL,
434 GRPH_DEPTH);
435 set_reg_field_value(
436 value,
437 grph_format,
438 UNP_GRPH_CONTROL,
439 GRPH_FORMAT);
440
441 dm_write_reg(
442 mem_input110->base.ctx,
443 DCP_REG(mmUNP_GRPH_CONTROL),
444 value);
445
446 value = dm_read_reg(
447 mem_input110->base.ctx,
448 DCP_REG(mmUNP_GRPH_CONTROL_EXP));
449
450 /* VIDEO FORMAT 0 */
451 set_reg_field_value(
452 value,
453 0,
454 UNP_GRPH_CONTROL_EXP,
455 VIDEO_FORMAT);
456 dm_write_reg(
457 mem_input110->base.ctx,
458 DCP_REG(mmUNP_GRPH_CONTROL_EXP),
459 value);
460
461 } else {
462 /* Video 422 and 420 needs UNP_GRPH_CONTROL_EXP programmed */
463 uint32_t value;
464 uint8_t video_format;
465
466 value = dm_read_reg(
467 mem_input110->base.ctx,
468 DCP_REG(mmUNP_GRPH_CONTROL_EXP));
469
470 switch (format) {
471 case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
472 video_format = 2;
473 break;
474 case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
475 video_format = 3;
476 break;
477 default:
478 video_format = 0;
479 break;
480 }
481
482 set_reg_field_value(
483 value,
484 video_format,
485 UNP_GRPH_CONTROL_EXP,
486 VIDEO_FORMAT);
487
488 dm_write_reg(
489 mem_input110->base.ctx,
490 DCP_REG(mmUNP_GRPH_CONTROL_EXP),
491 value);
492 }
493}
494
495bool dce110_mem_input_v_is_surface_pending(struct mem_input *mem_input)
496{
497 struct dce110_mem_input *mem_input110 = TO_DCE110_MEM_INPUT(mem_input);
498 uint32_t value;
499
500 value = dm_read_reg(mem_input110->base.ctx, DCP_REG(mmUNP_GRPH_UPDATE));
501
502 if (get_reg_field_value(value, UNP_GRPH_UPDATE,
503 GRPH_SURFACE_UPDATE_PENDING))
504 return true;
505
506 mem_input->current_address = mem_input->request_address;
507 return false;
508}
509
510bool dce110_mem_input_v_program_surface_flip_and_addr(
511 struct mem_input *mem_input,
512 const struct dc_plane_address *address,
513 bool flip_immediate)
514{
515 struct dce110_mem_input *mem_input110 = TO_DCE110_MEM_INPUT(mem_input);
516
517 set_flip_control(mem_input110, flip_immediate);
518 program_addr(mem_input110,
519 address);
520
521 mem_input->request_address = *address;
522
523 return true;
524}
525
526/* Scatter Gather param tables */
527static const unsigned int dvmm_Hw_Setting_2DTiling[4][9] = {
528 { 8, 64, 64, 8, 8, 1, 4, 0, 0},
529 { 16, 64, 32, 8, 16, 1, 8, 0, 0},
530 { 32, 32, 32, 16, 16, 1, 8, 0, 0},
531 { 64, 8, 32, 16, 16, 1, 8, 0, 0}, /* fake */
532};
533
534static const unsigned int dvmm_Hw_Setting_1DTiling[4][9] = {
535 { 8, 512, 8, 1, 0, 1, 0, 0, 0}, /* 0 for invalid */
536 { 16, 256, 8, 2, 0, 1, 0, 0, 0},
537 { 32, 128, 8, 4, 0, 1, 0, 0, 0},
538 { 64, 64, 8, 4, 0, 1, 0, 0, 0}, /* fake */
539};
540
541static const unsigned int dvmm_Hw_Setting_Linear[4][9] = {
542 { 8, 4096, 1, 8, 0, 1, 0, 0, 0},
543 { 16, 2048, 1, 8, 0, 1, 0, 0, 0},
544 { 32, 1024, 1, 8, 0, 1, 0, 0, 0},
545 { 64, 512, 1, 8, 0, 1, 0, 0, 0}, /* new for 64bpp from HW */
546};
547
548/* Helper to get table entry from surface info */
549static const unsigned int *get_dvmm_hw_setting(
550 union dc_tiling_info *tiling_info,
551 enum surface_pixel_format format,
552 bool chroma)
553{
554 enum bits_per_pixel {
555 bpp_8 = 0,
556 bpp_16,
557 bpp_32,
558 bpp_64
559 } bpp;
560
561 if (format >= SURFACE_PIXEL_FORMAT_INVALID)
562 bpp = bpp_32;
563 else if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
564 bpp = chroma ? bpp_16 : bpp_8;
565 else
566 bpp = bpp_8;
567
568 switch (tiling_info->gfx8.array_mode) {
569 case DC_ARRAY_1D_TILED_THIN1:
570 case DC_ARRAY_1D_TILED_THICK:
571 case DC_ARRAY_PRT_TILED_THIN1:
572 return dvmm_Hw_Setting_1DTiling[bpp];
573 case DC_ARRAY_2D_TILED_THIN1:
574 case DC_ARRAY_2D_TILED_THICK:
575 case DC_ARRAY_2D_TILED_X_THICK:
576 case DC_ARRAY_PRT_2D_TILED_THIN1:
577 case DC_ARRAY_PRT_2D_TILED_THICK:
578 return dvmm_Hw_Setting_2DTiling[bpp];
579 case DC_ARRAY_LINEAR_GENERAL:
580 case DC_ARRAY_LINEAR_ALLIGNED:
581 return dvmm_Hw_Setting_Linear[bpp];
582 default:
583 return dvmm_Hw_Setting_2DTiling[bpp];
584 }
585}
586
587bool dce110_mem_input_v_program_pte_vm(
588 struct mem_input *mem_input,
589 enum surface_pixel_format format,
590 union dc_tiling_info *tiling_info,
591 enum dc_rotation_angle rotation)
592{
593 struct dce110_mem_input *mem_input110 = TO_DCE110_MEM_INPUT(mem_input);
594 const unsigned int *pte = get_dvmm_hw_setting(tiling_info, format, false);
595 const unsigned int *pte_chroma = get_dvmm_hw_setting(tiling_info, format, true);
596
597 unsigned int page_width = 0;
598 unsigned int page_height = 0;
599 unsigned int page_width_chroma = 0;
600 unsigned int page_height_chroma = 0;
601 unsigned int temp_page_width = pte[1];
602 unsigned int temp_page_height = pte[2];
603 unsigned int min_pte_before_flip = 0;
604 unsigned int min_pte_before_flip_chroma = 0;
605 uint32_t value = 0;
606
607 while ((temp_page_width >>= 1) != 0)
608 page_width++;
609 while ((temp_page_height >>= 1) != 0)
610 page_height++;
611
612 temp_page_width = pte_chroma[1];
613 temp_page_height = pte_chroma[2];
614 while ((temp_page_width >>= 1) != 0)
615 page_width_chroma++;
616 while ((temp_page_height >>= 1) != 0)
617 page_height_chroma++;
618
619 switch (rotation) {
620 case ROTATION_ANGLE_90:
621 case ROTATION_ANGLE_270:
622 min_pte_before_flip = pte[4];
623 min_pte_before_flip_chroma = pte_chroma[4];
624 break;
625 default:
626 min_pte_before_flip = pte[3];
627 min_pte_before_flip_chroma = pte_chroma[3];
628 break;
629 }
630
631 value = dm_read_reg(mem_input110->base.ctx, DCP_REG(mmUNP_PIPE_OUTSTANDING_REQUEST_LIMIT));
632 /* TODO: un-hardcode requestlimit */
633 set_reg_field_value(value, 0xff, UNP_PIPE_OUTSTANDING_REQUEST_LIMIT, UNP_PIPE_OUTSTANDING_REQUEST_LIMIT_L);
634 set_reg_field_value(value, 0xff, UNP_PIPE_OUTSTANDING_REQUEST_LIMIT, UNP_PIPE_OUTSTANDING_REQUEST_LIMIT_C);
635 dm_write_reg(mem_input110->base.ctx, DCP_REG(mmUNP_PIPE_OUTSTANDING_REQUEST_LIMIT), value);
636
637 value = dm_read_reg(mem_input110->base.ctx, DCP_REG(mmUNP_DVMM_PTE_CONTROL));
638 set_reg_field_value(value, page_width, UNP_DVMM_PTE_CONTROL, DVMM_PAGE_WIDTH);
639 set_reg_field_value(value, page_height, UNP_DVMM_PTE_CONTROL, DVMM_PAGE_HEIGHT);
640 set_reg_field_value(value, min_pte_before_flip, UNP_DVMM_PTE_CONTROL, DVMM_MIN_PTE_BEFORE_FLIP);
641 dm_write_reg(mem_input110->base.ctx, DCP_REG(mmUNP_DVMM_PTE_CONTROL), value);
642
643 value = dm_read_reg(mem_input110->base.ctx, DCP_REG(mmUNP_DVMM_PTE_ARB_CONTROL));
644 set_reg_field_value(value, pte[5], UNP_DVMM_PTE_ARB_CONTROL, DVMM_PTE_REQ_PER_CHUNK);
645 set_reg_field_value(value, 0xff, UNP_DVMM_PTE_ARB_CONTROL, DVMM_MAX_PTE_REQ_OUTSTANDING);
646 dm_write_reg(mem_input110->base.ctx, DCP_REG(mmUNP_DVMM_PTE_ARB_CONTROL), value);
647
648 value = dm_read_reg(mem_input110->base.ctx, DCP_REG(mmUNP_DVMM_PTE_CONTROL_C));
649 set_reg_field_value(value, page_width_chroma, UNP_DVMM_PTE_CONTROL_C, DVMM_PAGE_WIDTH_C);
650 set_reg_field_value(value, page_height_chroma, UNP_DVMM_PTE_CONTROL_C, DVMM_PAGE_HEIGHT_C);
651 set_reg_field_value(value, min_pte_before_flip_chroma, UNP_DVMM_PTE_CONTROL_C, DVMM_MIN_PTE_BEFORE_FLIP_C);
652 dm_write_reg(mem_input110->base.ctx, DCP_REG(mmUNP_DVMM_PTE_CONTROL_C), value);
653
654 value = dm_read_reg(mem_input110->base.ctx, DCP_REG(mmUNP_DVMM_PTE_ARB_CONTROL_C));
655 set_reg_field_value(value, pte_chroma[5], UNP_DVMM_PTE_ARB_CONTROL_C, DVMM_PTE_REQ_PER_CHUNK_C);
656 set_reg_field_value(value, 0xff, UNP_DVMM_PTE_ARB_CONTROL_C, DVMM_MAX_PTE_REQ_OUTSTANDING_C);
657 dm_write_reg(mem_input110->base.ctx, DCP_REG(mmUNP_DVMM_PTE_ARB_CONTROL_C), value);
658
659 return true;
660}
661
662bool dce110_mem_input_v_program_surface_config(
663 struct mem_input *mem_input,
664 enum surface_pixel_format format,
665 union dc_tiling_info *tiling_info,
666 union plane_size *plane_size,
667 enum dc_rotation_angle rotation,
668 struct dc_plane_dcc_param *dcc,
669 bool horizotal_mirror)
670{
671 struct dce110_mem_input *mem_input110 = TO_DCE110_MEM_INPUT(mem_input);
672
673 enable(mem_input110);
674 program_tiling(mem_input110, tiling_info, format);
675 program_size_and_rotation(mem_input110, rotation, plane_size);
676 program_pixel_format(mem_input110, format);
677
678 return true;
679}
680
681static void program_urgency_watermark(
682 const struct dc_context *ctx,
683 const uint32_t urgency_addr,
684 const uint32_t wm_addr,
685 struct bw_watermarks marks_low,
686 uint32_t total_dest_line_time_ns)
687{
688 /* register value */
689 uint32_t urgency_cntl = 0;
690 uint32_t wm_mask_cntl = 0;
691
692 /*Write mask to enable reading/writing of watermark set A*/
693 wm_mask_cntl = dm_read_reg(ctx, wm_addr);
694 set_reg_field_value(wm_mask_cntl,
695 1,
696 DPGV0_WATERMARK_MASK_CONTROL,
697 URGENCY_WATERMARK_MASK);
698 dm_write_reg(ctx, wm_addr, wm_mask_cntl);
699
700 urgency_cntl = dm_read_reg(ctx, urgency_addr);
701
702 set_reg_field_value(
703 urgency_cntl,
704 marks_low.a_mark,
705 DPGV0_PIPE_URGENCY_CONTROL,
706 URGENCY_LOW_WATERMARK);
707
708 set_reg_field_value(
709 urgency_cntl,
710 total_dest_line_time_ns,
711 DPGV0_PIPE_URGENCY_CONTROL,
712 URGENCY_HIGH_WATERMARK);
713 dm_write_reg(ctx, urgency_addr, urgency_cntl);
714
715 /*Write mask to enable reading/writing of watermark set B*/
716 wm_mask_cntl = dm_read_reg(ctx, wm_addr);
717 set_reg_field_value(wm_mask_cntl,
718 2,
719 DPGV0_WATERMARK_MASK_CONTROL,
720 URGENCY_WATERMARK_MASK);
721 dm_write_reg(ctx, wm_addr, wm_mask_cntl);
722
723 urgency_cntl = dm_read_reg(ctx, urgency_addr);
724
725 set_reg_field_value(urgency_cntl,
726 marks_low.b_mark,
727 DPGV0_PIPE_URGENCY_CONTROL,
728 URGENCY_LOW_WATERMARK);
729
730 set_reg_field_value(urgency_cntl,
731 total_dest_line_time_ns,
732 DPGV0_PIPE_URGENCY_CONTROL,
733 URGENCY_HIGH_WATERMARK);
734
735 dm_write_reg(ctx, urgency_addr, urgency_cntl);
736}
737
738static void program_urgency_watermark_l(
739 const struct dc_context *ctx,
740 struct bw_watermarks marks_low,
741 uint32_t total_dest_line_time_ns)
742{
743 program_urgency_watermark(
744 ctx,
745 mmDPGV0_PIPE_URGENCY_CONTROL,
746 mmDPGV0_WATERMARK_MASK_CONTROL,
747 marks_low,
748 total_dest_line_time_ns);
749}
750
751static void program_urgency_watermark_c(
752 const struct dc_context *ctx,
753 struct bw_watermarks marks_low,
754 uint32_t total_dest_line_time_ns)
755{
756 program_urgency_watermark(
757 ctx,
758 mmDPGV1_PIPE_URGENCY_CONTROL,
759 mmDPGV1_WATERMARK_MASK_CONTROL,
760 marks_low,
761 total_dest_line_time_ns);
762}
763
764static void program_stutter_watermark(
765 const struct dc_context *ctx,
766 const uint32_t stutter_addr,
767 const uint32_t wm_addr,
768 struct bw_watermarks marks)
769{
770 /* register value */
771 uint32_t stutter_cntl = 0;
772 uint32_t wm_mask_cntl = 0;
773
774 /*Write mask to enable reading/writing of watermark set A*/
775
776 wm_mask_cntl = dm_read_reg(ctx, wm_addr);
777 set_reg_field_value(wm_mask_cntl,
778 1,
779 DPGV0_WATERMARK_MASK_CONTROL,
780 STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK);
781 dm_write_reg(ctx, wm_addr, wm_mask_cntl);
782
783 stutter_cntl = dm_read_reg(ctx, stutter_addr);
784
785 if (ctx->dc->debug.disable_stutter) {
786 set_reg_field_value(stutter_cntl,
787 0,
788 DPGV0_PIPE_STUTTER_CONTROL,
789 STUTTER_ENABLE);
790 } else {
791 set_reg_field_value(stutter_cntl,
792 1,
793 DPGV0_PIPE_STUTTER_CONTROL,
794 STUTTER_ENABLE);
795 }
796
797 set_reg_field_value(stutter_cntl,
798 1,
799 DPGV0_PIPE_STUTTER_CONTROL,
800 STUTTER_IGNORE_FBC);
801
802 /*Write watermark set A*/
803 set_reg_field_value(stutter_cntl,
804 marks.a_mark,
805 DPGV0_PIPE_STUTTER_CONTROL,
806 STUTTER_EXIT_SELF_REFRESH_WATERMARK);
807 dm_write_reg(ctx, stutter_addr, stutter_cntl);
808
809 /*Write mask to enable reading/writing of watermark set B*/
810 wm_mask_cntl = dm_read_reg(ctx, wm_addr);
811 set_reg_field_value(wm_mask_cntl,
812 2,
813 DPGV0_WATERMARK_MASK_CONTROL,
814 STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK);
815 dm_write_reg(ctx, wm_addr, wm_mask_cntl);
816
817 stutter_cntl = dm_read_reg(ctx, stutter_addr);
818 /*Write watermark set B*/
819 set_reg_field_value(stutter_cntl,
820 marks.b_mark,
821 DPGV0_PIPE_STUTTER_CONTROL,
822 STUTTER_EXIT_SELF_REFRESH_WATERMARK);
823 dm_write_reg(ctx, stutter_addr, stutter_cntl);
824}
825
826static void program_stutter_watermark_l(
827 const struct dc_context *ctx,
828 struct bw_watermarks marks)
829{
830 program_stutter_watermark(ctx,
831 mmDPGV0_PIPE_STUTTER_CONTROL,
832 mmDPGV0_WATERMARK_MASK_CONTROL,
833 marks);
834}
835
836static void program_stutter_watermark_c(
837 const struct dc_context *ctx,
838 struct bw_watermarks marks)
839{
840 program_stutter_watermark(ctx,
841 mmDPGV1_PIPE_STUTTER_CONTROL,
842 mmDPGV1_WATERMARK_MASK_CONTROL,
843 marks);
844}
845
846static void program_nbp_watermark(
847 const struct dc_context *ctx,
848 const uint32_t wm_mask_ctrl_addr,
849 const uint32_t nbp_pstate_ctrl_addr,
850 struct bw_watermarks marks)
851{
852 uint32_t value;
853
854 /* Write mask to enable reading/writing of watermark set A */
855
856 value = dm_read_reg(ctx, wm_mask_ctrl_addr);
857
858 set_reg_field_value(
859 value,
860 1,
861 DPGV0_WATERMARK_MASK_CONTROL,
862 NB_PSTATE_CHANGE_WATERMARK_MASK);
863 dm_write_reg(ctx, wm_mask_ctrl_addr, value);
864
865 value = dm_read_reg(ctx, nbp_pstate_ctrl_addr);
866
867 set_reg_field_value(
868 value,
869 1,
870 DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL,
871 NB_PSTATE_CHANGE_ENABLE);
872 set_reg_field_value(
873 value,
874 1,
875 DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL,
876 NB_PSTATE_CHANGE_URGENT_DURING_REQUEST);
877 set_reg_field_value(
878 value,
879 1,
880 DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL,
881 NB_PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST);
882 dm_write_reg(ctx, nbp_pstate_ctrl_addr, value);
883
884 /* Write watermark set A */
885 value = dm_read_reg(ctx, nbp_pstate_ctrl_addr);
886 set_reg_field_value(
887 value,
888 marks.a_mark,
889 DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL,
890 NB_PSTATE_CHANGE_WATERMARK);
891 dm_write_reg(ctx, nbp_pstate_ctrl_addr, value);
892
893 /* Write mask to enable reading/writing of watermark set B */
894 value = dm_read_reg(ctx, wm_mask_ctrl_addr);
895 set_reg_field_value(
896 value,
897 2,
898 DPGV0_WATERMARK_MASK_CONTROL,
899 NB_PSTATE_CHANGE_WATERMARK_MASK);
900 dm_write_reg(ctx, wm_mask_ctrl_addr, value);
901
902 value = dm_read_reg(ctx, nbp_pstate_ctrl_addr);
903 set_reg_field_value(
904 value,
905 1,
906 DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL,
907 NB_PSTATE_CHANGE_ENABLE);
908 set_reg_field_value(
909 value,
910 1,
911 DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL,
912 NB_PSTATE_CHANGE_URGENT_DURING_REQUEST);
913 set_reg_field_value(
914 value,
915 1,
916 DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL,
917 NB_PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST);
918 dm_write_reg(ctx, nbp_pstate_ctrl_addr, value);
919
920 /* Write watermark set B */
921 value = dm_read_reg(ctx, nbp_pstate_ctrl_addr);
922 set_reg_field_value(
923 value,
924 marks.b_mark,
925 DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL,
926 NB_PSTATE_CHANGE_WATERMARK);
927 dm_write_reg(ctx, nbp_pstate_ctrl_addr, value);
928}
929
930static void program_nbp_watermark_l(
931 const struct dc_context *ctx,
932 struct bw_watermarks marks)
933{
934 program_nbp_watermark(ctx,
935 mmDPGV0_WATERMARK_MASK_CONTROL,
936 mmDPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL,
937 marks);
938}
939
940static void program_nbp_watermark_c(
941 const struct dc_context *ctx,
942 struct bw_watermarks marks)
943{
944 program_nbp_watermark(ctx,
945 mmDPGV1_WATERMARK_MASK_CONTROL,
946 mmDPGV1_PIPE_NB_PSTATE_CHANGE_CONTROL,
947 marks);
948}
949
950void dce110_mem_input_v_program_display_marks(
951 struct mem_input *mem_input,
952 struct bw_watermarks nbp,
953 struct bw_watermarks stutter,
954 struct bw_watermarks urgent,
955 uint32_t total_dest_line_time_ns)
956{
957 program_urgency_watermark_l(
958 mem_input->ctx,
959 urgent,
960 total_dest_line_time_ns);
961
962 program_nbp_watermark_l(
963 mem_input->ctx,
964 nbp);
965
966 program_stutter_watermark_l(
967 mem_input->ctx,
968 stutter);
969
970}
971
972void dce110_mem_input_program_chroma_display_marks(
973 struct mem_input *mem_input,
974 struct bw_watermarks nbp,
975 struct bw_watermarks stutter,
976 struct bw_watermarks urgent,
977 uint32_t total_dest_line_time_ns)
978{
979 program_urgency_watermark_c(
980 mem_input->ctx,
981 urgent,
982 total_dest_line_time_ns);
983
984 program_nbp_watermark_c(
985 mem_input->ctx,
986 nbp);
987
988 program_stutter_watermark_c(
989 mem_input->ctx,
990 stutter);
991}
992
993void dce110_allocate_mem_input_v(
994 struct mem_input *mi,
995 uint32_t h_total,/* for current stream */
996 uint32_t v_total,/* for current stream */
997 uint32_t pix_clk_khz,/* for current stream */
998 uint32_t total_stream_num)
999{
1000 uint32_t addr;
1001 uint32_t value;
1002 uint32_t pix_dur;
1003 if (pix_clk_khz != 0) {
1004 addr = mmDPGV0_PIPE_ARBITRATION_CONTROL1;
1005 value = dm_read_reg(mi->ctx, addr);
1006 pix_dur = 1000000000ULL / pix_clk_khz;
1007 set_reg_field_value(
1008 value,
1009 pix_dur,
1010 DPGV0_PIPE_ARBITRATION_CONTROL1,
1011 PIXEL_DURATION);
1012 dm_write_reg(mi->ctx, addr, value);
1013
1014 addr = mmDPGV1_PIPE_ARBITRATION_CONTROL1;
1015 value = dm_read_reg(mi->ctx, addr);
1016 pix_dur = 1000000000ULL / pix_clk_khz;
1017 set_reg_field_value(
1018 value,
1019 pix_dur,
1020 DPGV1_PIPE_ARBITRATION_CONTROL1,
1021 PIXEL_DURATION);
1022 dm_write_reg(mi->ctx, addr, value);
1023
1024 addr = mmDPGV0_PIPE_ARBITRATION_CONTROL2;
1025 value = 0x4000800;
1026 dm_write_reg(mi->ctx, addr, value);
1027
1028 addr = mmDPGV1_PIPE_ARBITRATION_CONTROL2;
1029 value = 0x4000800;
1030 dm_write_reg(mi->ctx, addr, value);
1031 }
1032
1033}
1034
1035void dce110_free_mem_input_v(
1036 struct mem_input *mi,
1037 uint32_t total_stream_num)
1038{
1039}
1040
1041static struct mem_input_funcs dce110_mem_input_v_funcs = {
1042 .mem_input_program_display_marks =
1043 dce110_mem_input_v_program_display_marks,
1044 .mem_input_program_chroma_display_marks =
1045 dce110_mem_input_program_chroma_display_marks,
1046 .allocate_mem_input = dce110_allocate_mem_input_v,
1047 .free_mem_input = dce110_free_mem_input_v,
1048 .mem_input_program_surface_flip_and_addr =
1049 dce110_mem_input_v_program_surface_flip_and_addr,
1050 .mem_input_program_pte_vm =
1051 dce110_mem_input_v_program_pte_vm,
1052 .mem_input_program_surface_config =
1053 dce110_mem_input_v_program_surface_config,
1054 .mem_input_is_flip_pending =
1055 dce110_mem_input_v_is_surface_pending
1056};
1057/*****************************************/
1058/* Constructor, Destructor */
1059/*****************************************/
1060
1061bool dce110_mem_input_v_construct(
1062 struct dce110_mem_input *mem_input110,
1063 struct dc_context *ctx)
1064{
1065 mem_input110->base.funcs = &dce110_mem_input_v_funcs;
1066 mem_input110->base.ctx = ctx;
1067
1068 mem_input110->base.inst = 0;
1069
1070 mem_input110->offsets = dce110_mi_v_reg_offsets[0];
1071
1072 return true;
1073}
1074
1075#if 0
1076void dce110_mem_input_v_destroy(struct mem_input **mem_input)
1077{
1078 dm_free(TO_DCE110_MEM_INPUT(*mem_input));
1079 *mem_input = NULL;
1080}
1081#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.h
new file mode 100644
index 000000000000..5b1796ccefc0
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.h
@@ -0,0 +1,94 @@
1/* Copyright 2012-16 Advanced Micro Devices, Inc.
2 *
3 * Permission is hereby granted, free of charge, to any person obtaining a
4 * copy of this software and associated documentation files (the "Software"),
5 * to deal in the Software without restriction, including without limitation
6 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7 * and/or sell copies of the Software, and to permit persons to whom the
8 * Software is furnished to do so, subject to the following conditions:
9 *
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
17 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
18 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
19 * OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * Authors: AMD
22 *
23 */
24
25#ifndef __DC_MEM_INPUT_V_DCE110_H__
26#define __DC_MEM_INPUT_V_DCE110_H__
27
28#include "mem_input.h"
29#include "dce110_mem_input.h"
30
31bool dce110_mem_input_v_construct(
32 struct dce110_mem_input *mem_input110,
33 struct dc_context *ctx);
34
35/*
36 * This function will program nbp stutter and urgency watermarks to minimum
37 * allowable values
38 */
39void dce110_mem_input_v_program_display_marks(
40 struct mem_input *mem_input,
41 struct bw_watermarks nbp,
42 struct bw_watermarks stutter,
43 struct bw_watermarks urgent,
44 uint32_t total_dest_line_time_ns);
45
46/*
47 * This function will allocate a dmif buffer and program required
48 * pixel duration for pipe
49 */
50void dce110_allocate_mem_v_input(
51 struct mem_input *mem_input,
52 uint32_t h_total,/* for current stream */
53 uint32_t v_total,/* for current stream */
54 uint32_t pix_clk_khz,/* for current stream */
55 uint32_t total_stream_num);
56
57/*
58 * This function will deallocate a dmif buffer from pipe
59 */
60void dce110_free_mem_v_input(
61 struct mem_input *mem_input,
62 uint32_t total_stream_num);
63
64/*
65 * This function programs hsync/vsync mode and surface address
66 */
67bool dce110_mem_input_v_program_surface_flip_and_addr(
68 struct mem_input *mem_input,
69 const struct dc_plane_address *address,
70 bool flip_immediate);
71
72/*
73 * dce110_mem_input_v_program_scatter_gather
74 *
75 * This function will program scatter gather registers.
76 */
77bool dce110_mem_input_v_program_pte_vm(
78 struct mem_input *mem_input,
79 enum surface_pixel_format format,
80 union dc_tiling_info *tiling_info,
81 enum dc_rotation_angle rotation);
82
83/*
84 * This function will program surface tiling, size, rotation and pixel format
85 * to corresponding dcp registers.
86 */
87bool dce110_mem_input_v_program_surface_config(
88 struct mem_input *mem_input,
89 enum surface_pixel_format format,
90 union dc_tiling_info *tiling_info,
91 union plane_size *plane_size,
92 enum dc_rotation_angle rotation);
93
94#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp.c
new file mode 100644
index 000000000000..698ec2f55d0f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp.c
@@ -0,0 +1,77 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28/* include DCE11 register header files */
29#include "dce/dce_11_0_d.h"
30#include "dce/dce_11_0_sh_mask.h"
31
32#include "dce110_opp.h"
33
34#include "gamma_types.h"
35
36enum {
37 MAX_LUT_ENTRY = 256,
38 MAX_NUMBER_OF_ENTRIES = 256
39};
40
41/*****************************************/
42/* Constructor, Destructor */
43/*****************************************/
44
45static const struct opp_funcs funcs = {
46 .opp_power_on_regamma_lut = dce110_opp_power_on_regamma_lut,
47 .opp_set_csc_adjustment = dce110_opp_set_csc_adjustment,
48 .opp_set_csc_default = dce110_opp_set_csc_default,
49 .opp_set_dyn_expansion = dce110_opp_set_dyn_expansion,
50 .opp_program_regamma_pwl = dce110_opp_program_regamma_pwl,
51 .opp_set_regamma_mode = dce110_opp_set_regamma_mode,
52 .opp_destroy = dce110_opp_destroy,
53 .opp_program_fmt = dce110_opp_program_fmt,
54};
55
56bool dce110_opp_construct(struct dce110_opp *opp110,
57 struct dc_context *ctx,
58 uint32_t inst,
59 const struct dce110_opp_reg_offsets *offsets)
60{
61 opp110->base.funcs = &funcs;
62
63 opp110->base.ctx = ctx;
64
65 opp110->base.inst = inst;
66
67 opp110->offsets = *offsets;
68
69 return true;
70}
71
72void dce110_opp_destroy(struct output_pixel_processor **opp)
73{
74 dm_free(FROM_DCE11_OPP(*opp));
75 *opp = NULL;
76}
77
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp.h
new file mode 100644
index 000000000000..2fbb2415cc97
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp.h
@@ -0,0 +1,149 @@
1/* Copyright 2012-15 Advanced Micro Devices, Inc.
2 *
3 * Permission is hereby granted, free of charge, to any person obtaining a
4 * copy of this software and associated documentation files (the "Software"),
5 * to deal in the Software without restriction, including without limitation
6 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7 * and/or sell copies of the Software, and to permit persons to whom the
8 * Software is furnished to do so, subject to the following conditions:
9 *
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
17 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
18 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
19 * OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * Authors: AMD
22 *
23 */
24
25#ifndef __DC_OPP_DCE110_H__
26#define __DC_OPP_DCE110_H__
27
28#include "dc_types.h"
29#include "opp.h"
30#include "core_types.h"
31
32#include "gamma_types.h" /* decprecated */
33
34struct gamma_parameters;
35
36#define FROM_DCE11_OPP(opp)\
37 container_of(opp, struct dce110_opp, base)
38
39enum dce110_opp_reg_type {
40 DCE110_OPP_REG_DCP = 0,
41 DCE110_OPP_REG_DCFE,
42 DCE110_OPP_REG_FMT,
43
44 DCE110_OPP_REG_MAX
45};
46
47struct dce110_regamma {
48 struct gamma_curve arr_curve_points[16];
49 struct curve_points arr_points[3];
50 uint32_t hw_points_num;
51 struct hw_x_point *coordinates_x;
52 struct pwl_result_data *rgb_resulted;
53
54 /* re-gamma curve */
55 struct pwl_float_data_ex *rgb_regamma;
56 /* coeff used to map user evenly distributed points
57 * to our hardware points (predefined) for gamma 256 */
58 struct pixel_gamma_point *coeff128;
59 struct pixel_gamma_point *coeff128_oem;
60 /* coeff used to map user evenly distributed points
61 * to our hardware points (predefined) for gamma 1025 */
62 struct pixel_gamma_point *coeff128_dx;
63 /* evenly distributed points, gamma 256 software points 0-255 */
64 struct gamma_pixel *axis_x_256;
65 /* evenly distributed points, gamma 1025 software points 0-1025 */
66 struct gamma_pixel *axis_x_1025;
67 /* OEM supplied gamma for regamma LUT */
68 struct pwl_float_data *rgb_oem;
69 /* user supplied gamma */
70 struct pwl_float_data *rgb_user;
71 uint32_t extra_points;
72 bool use_half_points;
73 struct fixed31_32 x_max1;
74 struct fixed31_32 x_max2;
75 struct fixed31_32 x_min;
76 struct fixed31_32 divider1;
77 struct fixed31_32 divider2;
78 struct fixed31_32 divider3;
79};
80
81/* OPP RELATED */
82#define TO_DCE110_OPP(opp)\
83 container_of(opp, struct dce110_opp, base)
84
85struct dce110_opp_reg_offsets {
86 uint32_t fmt_offset;
87 uint32_t fmt_mem_offset;
88 uint32_t dcp_offset;
89 uint32_t dcfe_offset;
90};
91
92struct dce110_opp {
93 struct output_pixel_processor base;
94 struct dce110_opp_reg_offsets offsets;
95 struct dce110_regamma regamma;
96};
97
98bool dce110_opp_construct(struct dce110_opp *opp110,
99 struct dc_context *ctx,
100 uint32_t inst,
101 const struct dce110_opp_reg_offsets *offsets);
102
103void dce110_opp_destroy(struct output_pixel_processor **opp);
104
105/* REGAMMA RELATED */
106void dce110_opp_power_on_regamma_lut(
107 struct output_pixel_processor *opp,
108 bool power_on);
109
110bool dce110_opp_program_regamma_pwl(
111 struct output_pixel_processor *opp,
112 const struct pwl_params *params);
113
114void dce110_opp_set_regamma_mode(struct output_pixel_processor *opp,
115 enum opp_regamma mode);
116
117void dce110_opp_set_csc_adjustment(
118 struct output_pixel_processor *opp,
119 const struct out_csc_color_matrix *tbl_entry);
120
121void dce110_opp_set_csc_default(
122 struct output_pixel_processor *opp,
123 const struct default_adjustment *default_adjust);
124
125/* FORMATTER RELATED */
126void dce110_opp_program_bit_depth_reduction(
127 struct output_pixel_processor *opp,
128 const struct bit_depth_reduction_params *params);
129
130void dce110_opp_program_clamping_and_pixel_encoding(
131 struct output_pixel_processor *opp,
132 const struct clamping_and_pixel_encoding_params *params);
133
134void dce110_opp_set_dyn_expansion(
135 struct output_pixel_processor *opp,
136 enum dc_color_space color_sp,
137 enum dc_color_depth color_dpth,
138 enum signal_type signal);
139
140void dce110_opp_program_fmt(
141 struct output_pixel_processor *opp,
142 struct bit_depth_reduction_params *fmt_bit_depth,
143 struct clamping_and_pixel_encoding_params *clamping);
144
145void dce110_opp_set_clamping(
146 struct dce110_opp *opp110,
147 const struct clamping_and_pixel_encoding_params *params);
148
149#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_csc.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_csc.c
new file mode 100644
index 000000000000..b46db202ddbb
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_csc.c
@@ -0,0 +1,363 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27#include "dce110_opp.h"
28#include "basics/conversion.h"
29
30/* include DCE11 register header files */
31#include "dce/dce_11_0_d.h"
32#include "dce/dce_11_0_sh_mask.h"
33
34#define DCP_REG(reg)\
35 (reg + opp110->offsets.dcp_offset)
36
37enum {
38 OUTPUT_CSC_MATRIX_SIZE = 12
39};
40
41static const struct out_csc_color_matrix global_color_matrix[] = {
42{ COLOR_SPACE_SRGB,
43 { 0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} },
44{ COLOR_SPACE_SRGB_LIMITED,
45 { 0x1B60, 0, 0, 0x200, 0, 0x1B60, 0, 0x200, 0, 0, 0x1B60, 0x200} },
46{ COLOR_SPACE_YCBCR601,
47 { 0xE00, 0xF447, 0xFDB9, 0x1000, 0x82F, 0x1012, 0x31F, 0x200, 0xFB47,
48 0xF6B9, 0xE00, 0x1000} },
49{ COLOR_SPACE_YCBCR709, { 0xE00, 0xF349, 0xFEB7, 0x1000, 0x5D2, 0x1394, 0x1FA,
50 0x200, 0xFCCB, 0xF535, 0xE00, 0x1000} },
51/* TODO: correct values below */
52{ COLOR_SPACE_YCBCR601_LIMITED, { 0xE00, 0xF447, 0xFDB9, 0x1000, 0x991,
53 0x12C9, 0x3A6, 0x200, 0xFB47, 0xF6B9, 0xE00, 0x1000} },
54{ COLOR_SPACE_YCBCR709_LIMITED, { 0xE00, 0xF349, 0xFEB7, 0x1000, 0x6CE, 0x16E3,
55 0x24F, 0x200, 0xFCCB, 0xF535, 0xE00, 0x1000} }
56};
57
58enum csc_color_mode {
59 /* 00 - BITS2:0 Bypass */
60 CSC_COLOR_MODE_GRAPHICS_BYPASS,
61 /* 01 - hard coded coefficient TV RGB */
62 CSC_COLOR_MODE_GRAPHICS_PREDEFINED,
63 /* 04 - programmable OUTPUT CSC coefficient */
64 CSC_COLOR_MODE_GRAPHICS_OUTPUT_CSC,
65};
66
67static void program_color_matrix(
68 struct dce110_opp *opp110,
69 const struct out_csc_color_matrix *tbl_entry,
70 enum grph_color_adjust_option options)
71{
72 struct dc_context *ctx = opp110->base.ctx;
73 {
74 uint32_t value = 0;
75 uint32_t addr = DCP_REG(mmOUTPUT_CSC_C11_C12);
76 /* fixed S2.13 format */
77 set_reg_field_value(
78 value,
79 tbl_entry->regval[0],
80 OUTPUT_CSC_C11_C12,
81 OUTPUT_CSC_C11);
82
83 set_reg_field_value(
84 value,
85 tbl_entry->regval[1],
86 OUTPUT_CSC_C11_C12,
87 OUTPUT_CSC_C12);
88
89 dm_write_reg(ctx, addr, value);
90 }
91 {
92 uint32_t value = 0;
93 uint32_t addr = DCP_REG(mmOUTPUT_CSC_C13_C14);
94 /* fixed S2.13 format */
95 set_reg_field_value(
96 value,
97 tbl_entry->regval[2],
98 OUTPUT_CSC_C13_C14,
99 OUTPUT_CSC_C13);
100 /* fixed S0.13 format */
101 set_reg_field_value(
102 value,
103 tbl_entry->regval[3],
104 OUTPUT_CSC_C13_C14,
105 OUTPUT_CSC_C14);
106
107 dm_write_reg(ctx, addr, value);
108 }
109 {
110 uint32_t value = 0;
111 uint32_t addr = DCP_REG(mmOUTPUT_CSC_C21_C22);
112 /* fixed S2.13 format */
113 set_reg_field_value(
114 value,
115 tbl_entry->regval[4],
116 OUTPUT_CSC_C21_C22,
117 OUTPUT_CSC_C21);
118 /* fixed S2.13 format */
119 set_reg_field_value(
120 value,
121 tbl_entry->regval[5],
122 OUTPUT_CSC_C21_C22,
123 OUTPUT_CSC_C22);
124
125 dm_write_reg(ctx, addr, value);
126 }
127 {
128 uint32_t value = 0;
129 uint32_t addr = DCP_REG(mmOUTPUT_CSC_C23_C24);
130 /* fixed S2.13 format */
131 set_reg_field_value(
132 value,
133 tbl_entry->regval[6],
134 OUTPUT_CSC_C23_C24,
135 OUTPUT_CSC_C23);
136 /* fixed S0.13 format */
137 set_reg_field_value(
138 value,
139 tbl_entry->regval[7],
140 OUTPUT_CSC_C23_C24,
141 OUTPUT_CSC_C24);
142
143 dm_write_reg(ctx, addr, value);
144 }
145 {
146 uint32_t value = 0;
147 uint32_t addr = DCP_REG(mmOUTPUT_CSC_C31_C32);
148 /* fixed S2.13 format */
149 set_reg_field_value(
150 value,
151 tbl_entry->regval[8],
152 OUTPUT_CSC_C31_C32,
153 OUTPUT_CSC_C31);
154 /* fixed S0.13 format */
155 set_reg_field_value(
156 value,
157 tbl_entry->regval[9],
158 OUTPUT_CSC_C31_C32,
159 OUTPUT_CSC_C32);
160
161 dm_write_reg(ctx, addr, value);
162 }
163 {
164 uint32_t value = 0;
165 uint32_t addr = DCP_REG(mmOUTPUT_CSC_C33_C34);
166 /* fixed S2.13 format */
167 set_reg_field_value(
168 value,
169 tbl_entry->regval[10],
170 OUTPUT_CSC_C33_C34,
171 OUTPUT_CSC_C33);
172 /* fixed S0.13 format */
173 set_reg_field_value(
174 value,
175 tbl_entry->regval[11],
176 OUTPUT_CSC_C33_C34,
177 OUTPUT_CSC_C34);
178
179 dm_write_reg(ctx, addr, value);
180 }
181}
182
183static bool configure_graphics_mode(
184 struct dce110_opp *opp110,
185 enum csc_color_mode config,
186 enum graphics_csc_adjust_type csc_adjust_type,
187 enum dc_color_space color_space)
188{
189 struct dc_context *ctx = opp110->base.ctx;
190 uint32_t addr = DCP_REG(mmOUTPUT_CSC_CONTROL);
191 uint32_t value = dm_read_reg(ctx, addr);
192
193 set_reg_field_value(
194 value,
195 0,
196 OUTPUT_CSC_CONTROL,
197 OUTPUT_CSC_GRPH_MODE);
198
199 if (csc_adjust_type == GRAPHICS_CSC_ADJUST_TYPE_SW) {
200 if (config == CSC_COLOR_MODE_GRAPHICS_OUTPUT_CSC) {
201 set_reg_field_value(
202 value,
203 4,
204 OUTPUT_CSC_CONTROL,
205 OUTPUT_CSC_GRPH_MODE);
206 } else {
207
208 switch (color_space) {
209 case COLOR_SPACE_SRGB:
210 /* by pass */
211 set_reg_field_value(
212 value,
213 0,
214 OUTPUT_CSC_CONTROL,
215 OUTPUT_CSC_GRPH_MODE);
216 break;
217 case COLOR_SPACE_SRGB_LIMITED:
218 /* TV RGB */
219 set_reg_field_value(
220 value,
221 1,
222 OUTPUT_CSC_CONTROL,
223 OUTPUT_CSC_GRPH_MODE);
224 break;
225 case COLOR_SPACE_YCBCR601:
226 case COLOR_SPACE_YPBPR601:
227 case COLOR_SPACE_YCBCR601_LIMITED:
228 /* YCbCr601 */
229 set_reg_field_value(
230 value,
231 2,
232 OUTPUT_CSC_CONTROL,
233 OUTPUT_CSC_GRPH_MODE);
234 break;
235 case COLOR_SPACE_YCBCR709:
236 case COLOR_SPACE_YPBPR709:
237 case COLOR_SPACE_YCBCR709_LIMITED:
238 /* YCbCr709 */
239 set_reg_field_value(
240 value,
241 3,
242 OUTPUT_CSC_CONTROL,
243 OUTPUT_CSC_GRPH_MODE);
244 break;
245 default:
246 return false;
247 }
248 }
249 } else if (csc_adjust_type == GRAPHICS_CSC_ADJUST_TYPE_HW) {
250 switch (color_space) {
251 case COLOR_SPACE_SRGB:
252 /* by pass */
253 set_reg_field_value(
254 value,
255 0,
256 OUTPUT_CSC_CONTROL,
257 OUTPUT_CSC_GRPH_MODE);
258 break;
259 case COLOR_SPACE_SRGB_LIMITED:
260 /* TV RGB */
261 set_reg_field_value(
262 value,
263 1,
264 OUTPUT_CSC_CONTROL,
265 OUTPUT_CSC_GRPH_MODE);
266 break;
267 case COLOR_SPACE_YCBCR601:
268 case COLOR_SPACE_YPBPR601:
269 case COLOR_SPACE_YCBCR601_LIMITED:
270 /* YCbCr601 */
271 set_reg_field_value(
272 value,
273 2,
274 OUTPUT_CSC_CONTROL,
275 OUTPUT_CSC_GRPH_MODE);
276 break;
277 case COLOR_SPACE_YCBCR709:
278 case COLOR_SPACE_YPBPR709:
279 case COLOR_SPACE_YCBCR709_LIMITED:
280 /* YCbCr709 */
281 set_reg_field_value(
282 value,
283 3,
284 OUTPUT_CSC_CONTROL,
285 OUTPUT_CSC_GRPH_MODE);
286 break;
287 default:
288 return false;
289 }
290
291 } else
292 /* by pass */
293 set_reg_field_value(
294 value,
295 0,
296 OUTPUT_CSC_CONTROL,
297 OUTPUT_CSC_GRPH_MODE);
298
299 addr = DCP_REG(mmOUTPUT_CSC_CONTROL);
300 dm_write_reg(ctx, addr, value);
301
302 return true;
303}
304
305void dce110_opp_set_csc_adjustment(
306 struct output_pixel_processor *opp,
307 const struct out_csc_color_matrix *tbl_entry)
308{
309 struct dce110_opp *opp110 = TO_DCE110_OPP(opp);
310 enum csc_color_mode config =
311 CSC_COLOR_MODE_GRAPHICS_OUTPUT_CSC;
312
313 program_color_matrix(
314 opp110, tbl_entry, GRAPHICS_CSC_ADJUST_TYPE_SW);
315
316 /* We did everything ,now program DxOUTPUT_CSC_CONTROL */
317 configure_graphics_mode(opp110, config, GRAPHICS_CSC_ADJUST_TYPE_SW,
318 tbl_entry->color_space);
319}
320
321void dce110_opp_set_csc_default(
322 struct output_pixel_processor *opp,
323 const struct default_adjustment *default_adjust)
324{
325 struct dce110_opp *opp110 = TO_DCE110_OPP(opp);
326 enum csc_color_mode config =
327 CSC_COLOR_MODE_GRAPHICS_PREDEFINED;
328
329 if (default_adjust->force_hw_default == false) {
330 const struct out_csc_color_matrix *elm;
331 /* currently parameter not in use */
332 enum grph_color_adjust_option option =
333 GRPH_COLOR_MATRIX_HW_DEFAULT;
334 uint32_t i;
335 /*
336 * HW default false we program locally defined matrix
337 * HW default true we use predefined hw matrix and we
338 * do not need to program matrix
339 * OEM wants the HW default via runtime parameter.
340 */
341 option = GRPH_COLOR_MATRIX_SW;
342
343 for (i = 0; i < ARRAY_SIZE(global_color_matrix); ++i) {
344 elm = &global_color_matrix[i];
345 if (elm->color_space != default_adjust->out_color_space)
346 continue;
347 /* program the matrix with default values from this
348 * file */
349 program_color_matrix(opp110, elm, option);
350 config = CSC_COLOR_MODE_GRAPHICS_OUTPUT_CSC;
351 break;
352 }
353 }
354
355 /* configure the what we programmed :
356 * 1. Default values from this file
357 * 2. Use hardware default from ROM_A and we do not need to program
358 * matrix */
359
360 configure_graphics_mode(opp110, config,
361 default_adjust->csc_adjust_type,
362 default_adjust->out_color_space);
363}
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_csc_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_csc_v.c
new file mode 100644
index 000000000000..975466f6e424
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_csc_v.c
@@ -0,0 +1,738 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27#include "dce110_opp.h"
28#include "basics/conversion.h"
29
30/* include DCE11 register header files */
31#include "dce/dce_11_0_d.h"
32#include "dce/dce_11_0_sh_mask.h"
33#include "dce/dce_11_0_enum.h"
34
35enum {
36 OUTPUT_CSC_MATRIX_SIZE = 12
37};
38
39/* constrast:0 - 2.0, default 1.0 */
40#define UNDERLAY_CONTRAST_DEFAULT 100
41#define UNDERLAY_CONTRAST_MAX 200
42#define UNDERLAY_CONTRAST_MIN 0
43#define UNDERLAY_CONTRAST_STEP 1
44#define UNDERLAY_CONTRAST_DIVIDER 100
45
46/* Saturation: 0 - 2.0; default 1.0 */
47#define UNDERLAY_SATURATION_DEFAULT 100 /*1.00*/
48#define UNDERLAY_SATURATION_MIN 0
49#define UNDERLAY_SATURATION_MAX 200 /* 2.00 */
50#define UNDERLAY_SATURATION_STEP 1 /* 0.01 */
51/*actual max overlay saturation
52 * value = UNDERLAY_SATURATION_MAX /UNDERLAY_SATURATION_DIVIDER
53 */
54
55/* Hue */
56#define UNDERLAY_HUE_DEFAULT 0
57#define UNDERLAY_HUE_MIN -300
58#define UNDERLAY_HUE_MAX 300
59#define UNDERLAY_HUE_STEP 5
60#define UNDERLAY_HUE_DIVIDER 10 /* HW range: -30 ~ +30 */
61#define UNDERLAY_SATURATION_DIVIDER 100
62
63/* Brightness: in DAL usually -.25 ~ .25.
64 * In MMD is -100 to +100 in 16-235 range; which when scaled to full range is
65 * ~-116 to +116. When normalized this is about 0.4566.
66 * With 100 divider this becomes 46, but we may use another for better precision
67 * The ideal one is 100/219 ((100/255)*(255/219)),
68 * i.e. min/max = +-100, divider = 219
69 * default 0.0
70 */
71#define UNDERLAY_BRIGHTNESS_DEFAULT 0
72#define UNDERLAY_BRIGHTNESS_MIN -46 /* ~116/255 */
73#define UNDERLAY_BRIGHTNESS_MAX 46
74#define UNDERLAY_BRIGHTNESS_STEP 1 /* .01 */
75#define UNDERLAY_BRIGHTNESS_DIVIDER 100
76
77static const struct out_csc_color_matrix global_color_matrix[] = {
78{ COLOR_SPACE_SRGB,
79 { 0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} },
80{ COLOR_SPACE_SRGB_LIMITED,
81 { 0x1B60, 0, 0, 0x200, 0, 0x1B60, 0, 0x200, 0, 0, 0x1B60, 0x200} },
82{ COLOR_SPACE_YCBCR601,
83 { 0xE00, 0xF447, 0xFDB9, 0x1000, 0x82F, 0x1012, 0x31F, 0x200, 0xFB47,
84 0xF6B9, 0xE00, 0x1000} },
85{ COLOR_SPACE_YCBCR709, { 0xE00, 0xF349, 0xFEB7, 0x1000, 0x5D2, 0x1394, 0x1FA,
86 0x200, 0xFCCB, 0xF535, 0xE00, 0x1000} },
87/* TODO: correct values below */
88{ COLOR_SPACE_YCBCR601_LIMITED, { 0xE00, 0xF447, 0xFDB9, 0x1000, 0x991,
89 0x12C9, 0x3A6, 0x200, 0xFB47, 0xF6B9, 0xE00, 0x1000} },
90{ COLOR_SPACE_YCBCR709_LIMITED, { 0xE00, 0xF349, 0xFEB7, 0x1000, 0x6CE, 0x16E3,
91 0x24F, 0x200, 0xFCCB, 0xF535, 0xE00, 0x1000} }
92};
93
94enum csc_color_mode {
95 /* 00 - BITS2:0 Bypass */
96 CSC_COLOR_MODE_GRAPHICS_BYPASS,
97 /* 01 - hard coded coefficient TV RGB */
98 CSC_COLOR_MODE_GRAPHICS_PREDEFINED,
99 /* 04 - programmable OUTPUT CSC coefficient */
100 CSC_COLOR_MODE_GRAPHICS_OUTPUT_CSC,
101};
102
103static void program_color_matrix_v(
104 struct dce110_opp *opp110,
105 const struct out_csc_color_matrix *tbl_entry,
106 enum grph_color_adjust_option options)
107{
108 struct dc_context *ctx = opp110->base.ctx;
109 uint32_t cntl_value = dm_read_reg(ctx, mmCOL_MAN_OUTPUT_CSC_CONTROL);
110 bool use_set_a = (get_reg_field_value(cntl_value,
111 COL_MAN_OUTPUT_CSC_CONTROL,
112 OUTPUT_CSC_MODE) != 4);
113
114 set_reg_field_value(
115 cntl_value,
116 0,
117 COL_MAN_OUTPUT_CSC_CONTROL,
118 OUTPUT_CSC_MODE);
119
120 if (use_set_a) {
121 {
122 uint32_t value = 0;
123 uint32_t addr = mmOUTPUT_CSC_C11_C12_A;
124 /* fixed S2.13 format */
125 set_reg_field_value(
126 value,
127 tbl_entry->regval[0],
128 OUTPUT_CSC_C11_C12_A,
129 OUTPUT_CSC_C11_A);
130
131 set_reg_field_value(
132 value,
133 tbl_entry->regval[1],
134 OUTPUT_CSC_C11_C12_A,
135 OUTPUT_CSC_C12_A);
136
137 dm_write_reg(ctx, addr, value);
138 }
139 {
140 uint32_t value = 0;
141 uint32_t addr = mmOUTPUT_CSC_C13_C14_A;
142 /* fixed S2.13 format */
143 set_reg_field_value(
144 value,
145 tbl_entry->regval[2],
146 OUTPUT_CSC_C13_C14_A,
147 OUTPUT_CSC_C13_A);
148 /* fixed S0.13 format */
149 set_reg_field_value(
150 value,
151 tbl_entry->regval[3],
152 OUTPUT_CSC_C13_C14_A,
153 OUTPUT_CSC_C14_A);
154
155 dm_write_reg(ctx, addr, value);
156 }
157 {
158 uint32_t value = 0;
159 uint32_t addr = mmOUTPUT_CSC_C21_C22_A;
160 /* fixed S2.13 format */
161 set_reg_field_value(
162 value,
163 tbl_entry->regval[4],
164 OUTPUT_CSC_C21_C22_A,
165 OUTPUT_CSC_C21_A);
166 /* fixed S2.13 format */
167 set_reg_field_value(
168 value,
169 tbl_entry->regval[5],
170 OUTPUT_CSC_C21_C22_A,
171 OUTPUT_CSC_C22_A);
172
173 dm_write_reg(ctx, addr, value);
174 }
175 {
176 uint32_t value = 0;
177 uint32_t addr = mmOUTPUT_CSC_C23_C24_A;
178 /* fixed S2.13 format */
179 set_reg_field_value(
180 value,
181 tbl_entry->regval[6],
182 OUTPUT_CSC_C23_C24_A,
183 OUTPUT_CSC_C23_A);
184 /* fixed S0.13 format */
185 set_reg_field_value(
186 value,
187 tbl_entry->regval[7],
188 OUTPUT_CSC_C23_C24_A,
189 OUTPUT_CSC_C24_A);
190
191 dm_write_reg(ctx, addr, value);
192 }
193 {
194 uint32_t value = 0;
195 uint32_t addr = mmOUTPUT_CSC_C31_C32_A;
196 /* fixed S2.13 format */
197 set_reg_field_value(
198 value,
199 tbl_entry->regval[8],
200 OUTPUT_CSC_C31_C32_A,
201 OUTPUT_CSC_C31_A);
202 /* fixed S0.13 format */
203 set_reg_field_value(
204 value,
205 tbl_entry->regval[9],
206 OUTPUT_CSC_C31_C32_A,
207 OUTPUT_CSC_C32_A);
208
209 dm_write_reg(ctx, addr, value);
210 }
211 {
212 uint32_t value = 0;
213 uint32_t addr = mmOUTPUT_CSC_C33_C34_A;
214 /* fixed S2.13 format */
215 set_reg_field_value(
216 value,
217 tbl_entry->regval[10],
218 OUTPUT_CSC_C33_C34_A,
219 OUTPUT_CSC_C33_A);
220 /* fixed S0.13 format */
221 set_reg_field_value(
222 value,
223 tbl_entry->regval[11],
224 OUTPUT_CSC_C33_C34_A,
225 OUTPUT_CSC_C34_A);
226
227 dm_write_reg(ctx, addr, value);
228 }
229 set_reg_field_value(
230 cntl_value,
231 4,
232 COL_MAN_OUTPUT_CSC_CONTROL,
233 OUTPUT_CSC_MODE);
234 } else {
235 {
236 uint32_t value = 0;
237 uint32_t addr = mmOUTPUT_CSC_C11_C12_B;
238 /* fixed S2.13 format */
239 set_reg_field_value(
240 value,
241 tbl_entry->regval[0],
242 OUTPUT_CSC_C11_C12_B,
243 OUTPUT_CSC_C11_B);
244
245 set_reg_field_value(
246 value,
247 tbl_entry->regval[1],
248 OUTPUT_CSC_C11_C12_B,
249 OUTPUT_CSC_C12_B);
250
251 dm_write_reg(ctx, addr, value);
252 }
253 {
254 uint32_t value = 0;
255 uint32_t addr = mmOUTPUT_CSC_C13_C14_B;
256 /* fixed S2.13 format */
257 set_reg_field_value(
258 value,
259 tbl_entry->regval[2],
260 OUTPUT_CSC_C13_C14_B,
261 OUTPUT_CSC_C13_B);
262 /* fixed S0.13 format */
263 set_reg_field_value(
264 value,
265 tbl_entry->regval[3],
266 OUTPUT_CSC_C13_C14_B,
267 OUTPUT_CSC_C14_B);
268
269 dm_write_reg(ctx, addr, value);
270 }
271 {
272 uint32_t value = 0;
273 uint32_t addr = mmOUTPUT_CSC_C21_C22_B;
274 /* fixed S2.13 format */
275 set_reg_field_value(
276 value,
277 tbl_entry->regval[4],
278 OUTPUT_CSC_C21_C22_B,
279 OUTPUT_CSC_C21_B);
280 /* fixed S2.13 format */
281 set_reg_field_value(
282 value,
283 tbl_entry->regval[5],
284 OUTPUT_CSC_C21_C22_B,
285 OUTPUT_CSC_C22_B);
286
287 dm_write_reg(ctx, addr, value);
288 }
289 {
290 uint32_t value = 0;
291 uint32_t addr = mmOUTPUT_CSC_C23_C24_B;
292 /* fixed S2.13 format */
293 set_reg_field_value(
294 value,
295 tbl_entry->regval[6],
296 OUTPUT_CSC_C23_C24_B,
297 OUTPUT_CSC_C23_B);
298 /* fixed S0.13 format */
299 set_reg_field_value(
300 value,
301 tbl_entry->regval[7],
302 OUTPUT_CSC_C23_C24_B,
303 OUTPUT_CSC_C24_B);
304
305 dm_write_reg(ctx, addr, value);
306 }
307 {
308 uint32_t value = 0;
309 uint32_t addr = mmOUTPUT_CSC_C31_C32_B;
310 /* fixed S2.13 format */
311 set_reg_field_value(
312 value,
313 tbl_entry->regval[8],
314 OUTPUT_CSC_C31_C32_B,
315 OUTPUT_CSC_C31_B);
316 /* fixed S0.13 format */
317 set_reg_field_value(
318 value,
319 tbl_entry->regval[9],
320 OUTPUT_CSC_C31_C32_B,
321 OUTPUT_CSC_C32_B);
322
323 dm_write_reg(ctx, addr, value);
324 }
325 {
326 uint32_t value = 0;
327 uint32_t addr = mmOUTPUT_CSC_C33_C34_B;
328 /* fixed S2.13 format */
329 set_reg_field_value(
330 value,
331 tbl_entry->regval[10],
332 OUTPUT_CSC_C33_C34_B,
333 OUTPUT_CSC_C33_B);
334 /* fixed S0.13 format */
335 set_reg_field_value(
336 value,
337 tbl_entry->regval[11],
338 OUTPUT_CSC_C33_C34_B,
339 OUTPUT_CSC_C34_B);
340
341 dm_write_reg(ctx, addr, value);
342 }
343 set_reg_field_value(
344 cntl_value,
345 5,
346 COL_MAN_OUTPUT_CSC_CONTROL,
347 OUTPUT_CSC_MODE);
348 }
349
350 dm_write_reg(ctx, mmCOL_MAN_OUTPUT_CSC_CONTROL, cntl_value);
351}
352
353static bool configure_graphics_mode_v(
354 struct dce110_opp *opp110,
355 enum csc_color_mode config,
356 enum graphics_csc_adjust_type csc_adjust_type,
357 enum dc_color_space color_space)
358{
359 struct dc_context *ctx = opp110->base.ctx;
360 uint32_t addr = mmCOL_MAN_OUTPUT_CSC_CONTROL;
361 uint32_t value = dm_read_reg(ctx, addr);
362
363 set_reg_field_value(
364 value,
365 0,
366 COL_MAN_OUTPUT_CSC_CONTROL,
367 OUTPUT_CSC_MODE);
368
369 if (csc_adjust_type == GRAPHICS_CSC_ADJUST_TYPE_SW) {
370 if (config == CSC_COLOR_MODE_GRAPHICS_OUTPUT_CSC)
371 return true;
372
373 switch (color_space) {
374 case COLOR_SPACE_SRGB:
375 /* by pass */
376 set_reg_field_value(
377 value,
378 0,
379 COL_MAN_OUTPUT_CSC_CONTROL,
380 OUTPUT_CSC_MODE);
381 break;
382 case COLOR_SPACE_SRGB_LIMITED:
383 /* not supported for underlay on CZ */
384 return false;
385
386 case COLOR_SPACE_YCBCR601:
387 case COLOR_SPACE_YPBPR601:
388 case COLOR_SPACE_YCBCR601_LIMITED:
389 /* YCbCr601 */
390 set_reg_field_value(
391 value,
392 2,
393 COL_MAN_OUTPUT_CSC_CONTROL,
394 OUTPUT_CSC_MODE);
395 break;
396 case COLOR_SPACE_YCBCR709:
397 case COLOR_SPACE_YPBPR709:
398 case COLOR_SPACE_YCBCR709_LIMITED:
399 /* YCbCr709 */
400 set_reg_field_value(
401 value,
402 3,
403 COL_MAN_OUTPUT_CSC_CONTROL,
404 OUTPUT_CSC_MODE);
405 break;
406 default:
407 return false;
408 }
409
410 } else if (csc_adjust_type == GRAPHICS_CSC_ADJUST_TYPE_HW) {
411 switch (color_space) {
412 case COLOR_SPACE_SRGB:
413 /* by pass */
414 set_reg_field_value(
415 value,
416 0,
417 COL_MAN_OUTPUT_CSC_CONTROL,
418 OUTPUT_CSC_MODE);
419 break;
420 case COLOR_SPACE_SRGB_LIMITED:
421 /* not supported for underlay on CZ */
422 return false;
423 case COLOR_SPACE_YCBCR601:
424 case COLOR_SPACE_YPBPR601:
425 case COLOR_SPACE_YCBCR601_LIMITED:
426 /* YCbCr601 */
427 set_reg_field_value(
428 value,
429 2,
430 COL_MAN_OUTPUT_CSC_CONTROL,
431 OUTPUT_CSC_MODE);
432 break;
433 case COLOR_SPACE_YCBCR709:
434 case COLOR_SPACE_YPBPR709:
435 case COLOR_SPACE_YCBCR709_LIMITED:
436 /* YCbCr709 */
437 set_reg_field_value(
438 value,
439 3,
440 COL_MAN_OUTPUT_CSC_CONTROL,
441 OUTPUT_CSC_MODE);
442 break;
443 default:
444 return false;
445 }
446
447 } else
448 /* by pass */
449 set_reg_field_value(
450 value,
451 0,
452 COL_MAN_OUTPUT_CSC_CONTROL,
453 OUTPUT_CSC_MODE);
454
455 addr = mmCOL_MAN_OUTPUT_CSC_CONTROL;
456 dm_write_reg(ctx, addr, value);
457
458 return true;
459}
460
461/*TODO: color depth is not correct when this is called*/
462static void set_Denormalization(struct output_pixel_processor *opp,
463 enum dc_color_depth color_depth)
464{
465 uint32_t value = dm_read_reg(opp->ctx, mmDENORM_CLAMP_CONTROL);
466
467 switch (color_depth) {
468 case COLOR_DEPTH_888:
469 /* 255/256 for 8 bit output color depth */
470 set_reg_field_value(
471 value,
472 1,
473 DENORM_CLAMP_CONTROL,
474 DENORM_MODE);
475 break;
476 case COLOR_DEPTH_101010:
477 /* 1023/1024 for 10 bit output color depth */
478 set_reg_field_value(
479 value,
480 2,
481 DENORM_CLAMP_CONTROL,
482 DENORM_MODE);
483 break;
484 case COLOR_DEPTH_121212:
485 /* 4095/4096 for 12 bit output color depth */
486 set_reg_field_value(
487 value,
488 3,
489 DENORM_CLAMP_CONTROL,
490 DENORM_MODE);
491 break;
492 default:
493 /* not valid case */
494 break;
495 }
496
497 set_reg_field_value(
498 value,
499 1,
500 DENORM_CLAMP_CONTROL,
501 DENORM_10BIT_OUT);
502
503 dm_write_reg(opp->ctx, mmDENORM_CLAMP_CONTROL, value);
504}
505
506struct input_csc_matrix {
507 enum dc_color_space color_space;
508 uint32_t regval[12];
509};
510
511static const struct input_csc_matrix input_csc_matrix[] = {
512 {COLOR_SPACE_SRGB,
513/*1_1 1_2 1_3 1_4 2_1 2_2 2_3 2_4 3_1 3_2 3_3 3_4 */
514 {0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} },
515 {COLOR_SPACE_SRGB_LIMITED,
516 {0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} },
517 {COLOR_SPACE_YCBCR601,
518 {0x2cdd, 0x2000, 0x0, 0xe991, 0xe926, 0x2000, 0xf4fd, 0x10ef,
519 0x0, 0x2000, 0x38b4, 0xe3a6} },
520 {COLOR_SPACE_YCBCR601_LIMITED,
521 {0x3353, 0x2568, 0x0, 0xe400, 0xe5dc, 0x2568, 0xf367, 0x1108,
522 0x0, 0x2568, 0x40de, 0xdd3a} },
523 {COLOR_SPACE_YCBCR709,
524 {0x3265, 0x2000, 0, 0xe6ce, 0xf105, 0x2000, 0xfa01, 0xa7d, 0,
525 0x2000, 0x3b61, 0xe24f} },
526 {COLOR_SPACE_YCBCR709_LIMITED,
527 {0x39a6, 0x2568, 0, 0xe0d6, 0xeedd, 0x2568, 0xf925, 0x9a8, 0,
528 0x2568, 0x43ee, 0xdbb2} }
529};
530
531static void program_input_csc(
532 struct output_pixel_processor *opp, enum dc_color_space color_space)
533{
534 int arr_size = sizeof(input_csc_matrix)/sizeof(struct input_csc_matrix);
535 struct dc_context *ctx = opp->ctx;
536 const uint32_t *regval = NULL;
537 bool use_set_a;
538 uint32_t value;
539 int i;
540
541 for (i = 0; i < arr_size; i++)
542 if (input_csc_matrix[i].color_space == color_space) {
543 regval = input_csc_matrix[i].regval;
544 break;
545 }
546 if (regval == NULL) {
547 BREAK_TO_DEBUGGER();
548 return;
549 }
550
551 /*
552 * 1 == set A, the logic is 'if currently we're not using set A,
553 * then use set A, otherwise use set B'
554 */
555 value = dm_read_reg(ctx, mmCOL_MAN_INPUT_CSC_CONTROL);
556 use_set_a = get_reg_field_value(
557 value, COL_MAN_INPUT_CSC_CONTROL, INPUT_CSC_MODE) != 1;
558
559 if (use_set_a) {
560 /* fixed S2.13 format */
561 value = 0;
562 set_reg_field_value(
563 value, regval[0], INPUT_CSC_C11_C12_A, INPUT_CSC_C11_A);
564 set_reg_field_value(
565 value, regval[1], INPUT_CSC_C11_C12_A, INPUT_CSC_C12_A);
566 dm_write_reg(ctx, mmINPUT_CSC_C11_C12_A, value);
567
568 value = 0;
569 set_reg_field_value(
570 value, regval[2], INPUT_CSC_C13_C14_A, INPUT_CSC_C13_A);
571 set_reg_field_value(
572 value, regval[3], INPUT_CSC_C13_C14_A, INPUT_CSC_C14_A);
573 dm_write_reg(ctx, mmINPUT_CSC_C13_C14_A, value);
574
575 value = 0;
576 set_reg_field_value(
577 value, regval[4], INPUT_CSC_C21_C22_A, INPUT_CSC_C21_A);
578 set_reg_field_value(
579 value, regval[5], INPUT_CSC_C21_C22_A, INPUT_CSC_C22_A);
580 dm_write_reg(ctx, mmINPUT_CSC_C21_C22_A, value);
581
582 value = 0;
583 set_reg_field_value(
584 value, regval[6], INPUT_CSC_C23_C24_A, INPUT_CSC_C23_A);
585 set_reg_field_value(
586 value, regval[7], INPUT_CSC_C23_C24_A, INPUT_CSC_C24_A);
587 dm_write_reg(ctx, mmINPUT_CSC_C23_C24_A, value);
588
589 value = 0;
590 set_reg_field_value(
591 value, regval[8], INPUT_CSC_C31_C32_A, INPUT_CSC_C31_A);
592 set_reg_field_value(
593 value, regval[9], INPUT_CSC_C31_C32_A, INPUT_CSC_C32_A);
594 dm_write_reg(ctx, mmINPUT_CSC_C31_C32_A, value);
595
596 value = 0;
597 set_reg_field_value(
598 value, regval[10], INPUT_CSC_C33_C34_A, INPUT_CSC_C33_A);
599 set_reg_field_value(
600 value, regval[11], INPUT_CSC_C33_C34_A, INPUT_CSC_C34_A);
601 dm_write_reg(ctx, mmINPUT_CSC_C33_C34_A, value);
602 } else {
603 /* fixed S2.13 format */
604 value = 0;
605 set_reg_field_value(
606 value, regval[0], INPUT_CSC_C11_C12_B, INPUT_CSC_C11_B);
607 set_reg_field_value(
608 value, regval[1], INPUT_CSC_C11_C12_B, INPUT_CSC_C12_B);
609 dm_write_reg(ctx, mmINPUT_CSC_C11_C12_B, value);
610
611 value = 0;
612 set_reg_field_value(
613 value, regval[2], INPUT_CSC_C13_C14_B, INPUT_CSC_C13_B);
614 set_reg_field_value(
615 value, regval[3], INPUT_CSC_C13_C14_B, INPUT_CSC_C14_B);
616 dm_write_reg(ctx, mmINPUT_CSC_C13_C14_B, value);
617
618 value = 0;
619 set_reg_field_value(
620 value, regval[4], INPUT_CSC_C21_C22_B, INPUT_CSC_C21_B);
621 set_reg_field_value(
622 value, regval[5], INPUT_CSC_C21_C22_B, INPUT_CSC_C22_B);
623 dm_write_reg(ctx, mmINPUT_CSC_C21_C22_B, value);
624
625 value = 0;
626 set_reg_field_value(
627 value, regval[6], INPUT_CSC_C23_C24_B, INPUT_CSC_C23_B);
628 set_reg_field_value(
629 value, regval[7], INPUT_CSC_C23_C24_B, INPUT_CSC_C24_B);
630 dm_write_reg(ctx, mmINPUT_CSC_C23_C24_B, value);
631
632 value = 0;
633 set_reg_field_value(
634 value, regval[8], INPUT_CSC_C31_C32_B, INPUT_CSC_C31_B);
635 set_reg_field_value(
636 value, regval[9], INPUT_CSC_C31_C32_B, INPUT_CSC_C32_B);
637 dm_write_reg(ctx, mmINPUT_CSC_C31_C32_B, value);
638
639 value = 0;
640 set_reg_field_value(
641 value, regval[10], INPUT_CSC_C33_C34_B, INPUT_CSC_C33_B);
642 set_reg_field_value(
643 value, regval[11], INPUT_CSC_C33_C34_B, INPUT_CSC_C34_B);
644 dm_write_reg(ctx, mmINPUT_CSC_C33_C34_B, value);
645 }
646
647 /* KK: leave INPUT_CSC_CONVERSION_MODE at default */
648 value = 0;
649 /*
650 * select 8.4 input type instead of default 12.0. From the discussion
651 * with HW team, this format depends on the UNP surface format, so for
652 * 8-bit we should select 8.4 (4 bits truncated). For 10 it should be
653 * 10.2. For Carrizo we only support 8-bit surfaces on underlay pipe
654 * so we can always keep this at 8.4 (input_type=2). If the later asics
655 * start supporting 10+ bits, we will have a problem: surface
656 * programming including UNP_GRPH* is being done in DalISR after this,
657 * so either we pass surface format to here, or move this logic to ISR
658 */
659
660 set_reg_field_value(
661 value, 2, COL_MAN_INPUT_CSC_CONTROL, INPUT_CSC_INPUT_TYPE);
662 set_reg_field_value(
663 value,
664 use_set_a ? 1 : 2,
665 COL_MAN_INPUT_CSC_CONTROL,
666 INPUT_CSC_MODE);
667
668 dm_write_reg(ctx, mmCOL_MAN_INPUT_CSC_CONTROL, value);
669}
670
671void dce110_opp_v_set_csc_default(
672 struct output_pixel_processor *opp,
673 const struct default_adjustment *default_adjust)
674{
675 struct dce110_opp *opp110 = TO_DCE110_OPP(opp);
676 enum csc_color_mode config =
677 CSC_COLOR_MODE_GRAPHICS_PREDEFINED;
678
679 if (default_adjust->force_hw_default == false) {
680 const struct out_csc_color_matrix *elm;
681 /* currently parameter not in use */
682 enum grph_color_adjust_option option =
683 GRPH_COLOR_MATRIX_HW_DEFAULT;
684 uint32_t i;
685 /*
686 * HW default false we program locally defined matrix
687 * HW default true we use predefined hw matrix and we
688 * do not need to program matrix
689 * OEM wants the HW default via runtime parameter.
690 */
691 option = GRPH_COLOR_MATRIX_SW;
692
693 for (i = 0; i < ARRAY_SIZE(global_color_matrix); ++i) {
694 elm = &global_color_matrix[i];
695 if (elm->color_space != default_adjust->out_color_space)
696 continue;
697 /* program the matrix with default values from this
698 * file
699 */
700 program_color_matrix_v(opp110, elm, option);
701 config = CSC_COLOR_MODE_GRAPHICS_OUTPUT_CSC;
702 break;
703 }
704 }
705
706 program_input_csc(opp, default_adjust->in_color_space);
707
708 /* configure the what we programmed :
709 * 1. Default values from this file
710 * 2. Use hardware default from ROM_A and we do not need to program
711 * matrix
712 */
713
714 configure_graphics_mode_v(opp110, config,
715 default_adjust->csc_adjust_type,
716 default_adjust->out_color_space);
717
718 set_Denormalization(opp, default_adjust->color_depth);
719}
720
721void dce110_opp_v_set_csc_adjustment(
722 struct output_pixel_processor *opp,
723 const struct out_csc_color_matrix *tbl_entry)
724{
725 struct dce110_opp *opp110 = TO_DCE110_OPP(opp);
726 enum csc_color_mode config =
727 CSC_COLOR_MODE_GRAPHICS_OUTPUT_CSC;
728
729 program_color_matrix_v(
730 opp110, tbl_entry, GRAPHICS_CSC_ADJUST_TYPE_SW);
731
732 /* We did everything ,now program DxOUTPUT_CSC_CONTROL */
733 configure_graphics_mode_v(opp110, config, GRAPHICS_CSC_ADJUST_TYPE_SW,
734 tbl_entry->color_space);
735
736 /*TODO: Check if denormalization is needed*/
737 /*set_Denormalization(opp, adjust->color_depth);*/
738}
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_formatter.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_formatter.c
new file mode 100644
index 000000000000..eac20775c23f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_formatter.c
@@ -0,0 +1,627 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28#include "dce/dce_11_0_d.h"
29#include "dce/dce_11_0_sh_mask.h"
30
31#include "dce110_opp.h"
32
33#define FMT_REG(reg)\
34 (reg + opp110->offsets.fmt_offset)
35/**
36 * set_truncation
37 * 1) set truncation depth: 0 for 18 bpp or 1 for 24 bpp
38 * 2) enable truncation
39 * 3) HW remove 12bit FMT support for DCE11 power saving reason.
40 */
41static void set_truncation(
42 struct dce110_opp *opp110,
43 const struct bit_depth_reduction_params *params)
44{
45 uint32_t value = 0;
46 uint32_t addr = FMT_REG(mmFMT_BIT_DEPTH_CONTROL);
47
48 /*Disable truncation*/
49 value = dm_read_reg(opp110->base.ctx, addr);
50 set_reg_field_value(value, 0,
51 FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN);
52 set_reg_field_value(value, 0,
53 FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH);
54 set_reg_field_value(value, 0,
55 FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_MODE);
56
57 dm_write_reg(opp110->base.ctx, addr, value);
58
59 /* no 10bpc trunc on DCE11*/
60 if (params->flags.TRUNCATE_ENABLED == 0 ||
61 params->flags.TRUNCATE_DEPTH == 2)
62 return;
63
64 /*Set truncation depth and Enable truncation*/
65 set_reg_field_value(value, 1,
66 FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN);
67 set_reg_field_value(value, params->flags.TRUNCATE_MODE,
68 FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_MODE);
69 set_reg_field_value(value, params->flags.TRUNCATE_DEPTH,
70 FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH);
71
72 dm_write_reg(opp110->base.ctx, addr, value);
73
74}
75
76/**
77 * set_spatial_dither
78 * 1) set spatial dithering mode: pattern of seed
79 * 2) set spatical dithering depth: 0 for 18bpp or 1 for 24bpp
80 * 3) set random seed
81 * 4) set random mode
82 * lfsr is reset every frame or not reset
83 * RGB dithering method
84 * 0: RGB data are all dithered with x^28+x^3+1
85 * 1: R data is dithered with x^28+x^3+1
86 * G data is dithered with x^28+X^9+1
87 * B data is dithered with x^28+x^13+1
88 * enable high pass filter or not
89 * 5) enable spatical dithering
90 */
91static void set_spatial_dither(
92 struct dce110_opp *opp110,
93 const struct bit_depth_reduction_params *params)
94{
95 uint32_t addr = FMT_REG(mmFMT_BIT_DEPTH_CONTROL);
96 uint32_t depth_cntl_value = 0;
97 uint32_t fmt_cntl_value = 0;
98 uint32_t dither_r_value = 0;
99 uint32_t dither_g_value = 0;
100 uint32_t dither_b_value = 0;
101
102 /*Disable spatial (random) dithering*/
103 depth_cntl_value = dm_read_reg(opp110->base.ctx, addr);
104 set_reg_field_value(depth_cntl_value, 0,
105 FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN);
106 set_reg_field_value(depth_cntl_value, 0,
107 FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_MODE);
108 set_reg_field_value(depth_cntl_value, 0,
109 FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH);
110 set_reg_field_value(depth_cntl_value, 0,
111 FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_EN);
112 set_reg_field_value(depth_cntl_value, 0,
113 FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE);
114 set_reg_field_value(depth_cntl_value, 0,
115 FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE);
116 set_reg_field_value(depth_cntl_value, 0,
117 FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE);
118
119 dm_write_reg(opp110->base.ctx, addr, depth_cntl_value);
120
121 /* no 10bpc on DCE11*/
122 if (params->flags.SPATIAL_DITHER_ENABLED == 0 ||
123 params->flags.SPATIAL_DITHER_DEPTH == 2)
124 return;
125
126 addr = FMT_REG(mmFMT_CONTROL);
127 fmt_cntl_value = dm_read_reg(opp110->base.ctx, addr);
128 /* only use FRAME_COUNTER_MAX if frameRandom == 1*/
129 if (params->flags.FRAME_RANDOM == 1) {
130 if (params->flags.SPATIAL_DITHER_DEPTH == 0 ||
131 params->flags.SPATIAL_DITHER_DEPTH == 1) {
132 set_reg_field_value(fmt_cntl_value, 15,
133 FMT_CONTROL,
134 FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX);
135 set_reg_field_value(fmt_cntl_value, 2,
136 FMT_CONTROL,
137 FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP);
138 } else if (params->flags.SPATIAL_DITHER_DEPTH == 2) {
139 set_reg_field_value(fmt_cntl_value, 3,
140 FMT_CONTROL,
141 FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX);
142 set_reg_field_value(fmt_cntl_value, 1,
143 FMT_CONTROL,
144 FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP);
145 } else
146 return;
147 } else {
148 set_reg_field_value(fmt_cntl_value, 0,
149 FMT_CONTROL,
150 FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX);
151 set_reg_field_value(fmt_cntl_value, 0,
152 FMT_CONTROL,
153 FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP);
154 }
155
156 dm_write_reg(opp110->base.ctx, addr, fmt_cntl_value);
157
158 /*Set seed for random values for
159 * spatial dithering for R,G,B channels*/
160 addr = FMT_REG(mmFMT_DITHER_RAND_R_SEED);
161 set_reg_field_value(dither_r_value, params->r_seed_value,
162 FMT_DITHER_RAND_R_SEED,
163 FMT_RAND_R_SEED);
164 dm_write_reg(opp110->base.ctx, addr, dither_r_value);
165
166 addr = FMT_REG(mmFMT_DITHER_RAND_G_SEED);
167 set_reg_field_value(dither_g_value,
168 params->g_seed_value,
169 FMT_DITHER_RAND_G_SEED,
170 FMT_RAND_G_SEED);
171 dm_write_reg(opp110->base.ctx, addr, dither_g_value);
172
173 addr = FMT_REG(mmFMT_DITHER_RAND_B_SEED);
174 set_reg_field_value(dither_b_value, params->b_seed_value,
175 FMT_DITHER_RAND_B_SEED,
176 FMT_RAND_B_SEED);
177 dm_write_reg(opp110->base.ctx, addr, dither_b_value);
178
179 /* FMT_OFFSET_R_Cr 31:16 0x0 Setting the zero
180 * offset for the R/Cr channel, lower 4LSB
181 * is forced to zeros. Typically set to 0
182 * RGB and 0x80000 YCbCr.
183 */
184 /* FMT_OFFSET_G_Y 31:16 0x0 Setting the zero
185 * offset for the G/Y channel, lower 4LSB is
186 * forced to zeros. Typically set to 0 RGB
187 * and 0x80000 YCbCr.
188 */
189 /* FMT_OFFSET_B_Cb 31:16 0x0 Setting the zero
190 * offset for the B/Cb channel, lower 4LSB is
191 * forced to zeros. Typically set to 0 RGB and
192 * 0x80000 YCbCr.
193 */
194
195 /*Set spatial dithering bit depth*/
196 set_reg_field_value(depth_cntl_value,
197 params->flags.SPATIAL_DITHER_DEPTH,
198 FMT_BIT_DEPTH_CONTROL,
199 FMT_SPATIAL_DITHER_DEPTH);
200
201 /* Set spatial dithering mode
202 * (default is Seed patterrn AAAA...)
203 */
204 set_reg_field_value(depth_cntl_value,
205 params->flags.SPATIAL_DITHER_MODE,
206 FMT_BIT_DEPTH_CONTROL,
207 FMT_SPATIAL_DITHER_MODE);
208
209 /*Reset only at startup*/
210 set_reg_field_value(depth_cntl_value,
211 params->flags.FRAME_RANDOM,
212 FMT_BIT_DEPTH_CONTROL,
213 FMT_FRAME_RANDOM_ENABLE);
214
215 /*Set RGB data dithered with x^28+x^3+1*/
216 set_reg_field_value(depth_cntl_value,
217 params->flags.RGB_RANDOM,
218 FMT_BIT_DEPTH_CONTROL,
219 FMT_RGB_RANDOM_ENABLE);
220
221 /*Disable High pass filter*/
222 set_reg_field_value(depth_cntl_value,
223 params->flags.HIGHPASS_RANDOM,
224 FMT_BIT_DEPTH_CONTROL,
225 FMT_HIGHPASS_RANDOM_ENABLE);
226
227 /*Enable spatial dithering*/
228 set_reg_field_value(depth_cntl_value,
229 1,
230 FMT_BIT_DEPTH_CONTROL,
231 FMT_SPATIAL_DITHER_EN);
232
233 addr = FMT_REG(mmFMT_BIT_DEPTH_CONTROL);
234 dm_write_reg(opp110->base.ctx, addr, depth_cntl_value);
235
236}
237
238/**
239 * SetTemporalDither (Frame Modulation)
240 * 1) set temporal dither depth
241 * 2) select pattern: from hard-coded pattern or programmable pattern
242 * 3) select optimized strips for BGR or RGB LCD sub-pixel
243 * 4) set s matrix
244 * 5) set t matrix
245 * 6) set grey level for 0.25, 0.5, 0.75
246 * 7) enable temporal dithering
247 */
248static void set_temporal_dither(
249 struct dce110_opp *opp110,
250 const struct bit_depth_reduction_params *params)
251{
252 uint32_t addr = FMT_REG(mmFMT_BIT_DEPTH_CONTROL);
253 uint32_t value;
254
255 /*Disable temporal (frame modulation) dithering first*/
256 value = dm_read_reg(opp110->base.ctx, addr);
257
258 set_reg_field_value(value,
259 0,
260 FMT_BIT_DEPTH_CONTROL,
261 FMT_TEMPORAL_DITHER_EN);
262
263 set_reg_field_value(value,
264 0,
265 FMT_BIT_DEPTH_CONTROL,
266 FMT_TEMPORAL_DITHER_RESET);
267 set_reg_field_value(value,
268 0,
269 FMT_BIT_DEPTH_CONTROL,
270 FMT_TEMPORAL_DITHER_OFFSET);
271 set_reg_field_value(value,
272 0,
273 FMT_BIT_DEPTH_CONTROL,
274 FMT_TEMPORAL_DITHER_DEPTH);
275 set_reg_field_value(value,
276 0,
277 FMT_BIT_DEPTH_CONTROL,
278 FMT_TEMPORAL_LEVEL);
279 set_reg_field_value(value,
280 0,
281 FMT_BIT_DEPTH_CONTROL,
282 FMT_25FRC_SEL);
283
284 set_reg_field_value(value,
285 0,
286 FMT_BIT_DEPTH_CONTROL,
287 FMT_50FRC_SEL);
288
289 set_reg_field_value(value,
290 0,
291 FMT_BIT_DEPTH_CONTROL,
292 FMT_75FRC_SEL);
293
294 dm_write_reg(opp110->base.ctx, addr, value);
295
296 /* no 10bpc dither on DCE11*/
297 if (params->flags.FRAME_MODULATION_ENABLED == 0 ||
298 params->flags.FRAME_MODULATION_DEPTH == 2)
299 return;
300
301 /* Set temporal dithering depth*/
302 set_reg_field_value(value,
303 params->flags.FRAME_MODULATION_DEPTH,
304 FMT_BIT_DEPTH_CONTROL,
305 FMT_TEMPORAL_DITHER_DEPTH);
306
307 set_reg_field_value(value,
308 0,
309 FMT_BIT_DEPTH_CONTROL,
310 FMT_TEMPORAL_DITHER_RESET);
311
312 set_reg_field_value(value,
313 0,
314 FMT_BIT_DEPTH_CONTROL,
315 FMT_TEMPORAL_DITHER_OFFSET);
316
317 /*Select legacy pattern based on FRC and Temporal level*/
318 addr = FMT_REG(mmFMT_TEMPORAL_DITHER_PATTERN_CONTROL);
319 dm_write_reg(opp110->base.ctx, addr, 0);
320 /*Set s matrix*/
321 addr = FMT_REG(
322 mmFMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_S_MATRIX);
323 dm_write_reg(opp110->base.ctx, addr, 0);
324 /*Set t matrix*/
325 addr = FMT_REG(
326 mmFMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_T_MATRIX);
327 dm_write_reg(opp110->base.ctx, addr, 0);
328
329 /*Select patterns for 0.25, 0.5 and 0.75 grey level*/
330 set_reg_field_value(value,
331 params->flags.TEMPORAL_LEVEL,
332 FMT_BIT_DEPTH_CONTROL,
333 FMT_TEMPORAL_LEVEL);
334
335 set_reg_field_value(value,
336 params->flags.FRC25,
337 FMT_BIT_DEPTH_CONTROL,
338 FMT_25FRC_SEL);
339
340 set_reg_field_value(value,
341 params->flags.FRC50,
342 FMT_BIT_DEPTH_CONTROL,
343 FMT_50FRC_SEL);
344
345 set_reg_field_value(value,
346 params->flags.FRC75,
347 FMT_BIT_DEPTH_CONTROL,
348 FMT_75FRC_SEL);
349
350 /*Enable bit reduction by temporal (frame modulation) dithering*/
351 set_reg_field_value(value,
352 1,
353 FMT_BIT_DEPTH_CONTROL,
354 FMT_TEMPORAL_DITHER_EN);
355
356 addr = FMT_REG(mmFMT_BIT_DEPTH_CONTROL);
357 dm_write_reg(opp110->base.ctx, addr, value);
358
359}
360
361/**
362 * Set Clamping
363 * 1) Set clamping format based on bpc - 0 for 6bpc (No clamping)
364 * 1 for 8 bpc
365 * 2 for 10 bpc
366 * 3 for 12 bpc
367 * 7 for programable
368 * 2) Enable clamp if Limited range requested
369 */
370void dce110_opp_set_clamping(
371 struct dce110_opp *opp110,
372 const struct clamping_and_pixel_encoding_params *params)
373{
374 uint32_t clamp_cntl_value = 0;
375 uint32_t red_clamp_value = 0;
376 uint32_t green_clamp_value = 0;
377 uint32_t blue_clamp_value = 0;
378 uint32_t addr = FMT_REG(mmFMT_CLAMP_CNTL);
379
380 clamp_cntl_value = dm_read_reg(opp110->base.ctx, addr);
381
382 set_reg_field_value(clamp_cntl_value,
383 0,
384 FMT_CLAMP_CNTL,
385 FMT_CLAMP_DATA_EN);
386
387 set_reg_field_value(clamp_cntl_value,
388 0,
389 FMT_CLAMP_CNTL,
390 FMT_CLAMP_COLOR_FORMAT);
391
392 switch (params->clamping_level) {
393 case CLAMPING_FULL_RANGE:
394 break;
395
396 case CLAMPING_LIMITED_RANGE_8BPC:
397 set_reg_field_value(clamp_cntl_value,
398 1,
399 FMT_CLAMP_CNTL,
400 FMT_CLAMP_DATA_EN);
401
402 set_reg_field_value(clamp_cntl_value,
403 1,
404 FMT_CLAMP_CNTL,
405 FMT_CLAMP_COLOR_FORMAT);
406
407 break;
408
409 case CLAMPING_LIMITED_RANGE_10BPC:
410 set_reg_field_value(clamp_cntl_value,
411 1,
412 FMT_CLAMP_CNTL,
413 FMT_CLAMP_DATA_EN);
414
415 set_reg_field_value(clamp_cntl_value,
416 2,
417 FMT_CLAMP_CNTL,
418 FMT_CLAMP_COLOR_FORMAT);
419
420 break;
421 case CLAMPING_LIMITED_RANGE_12BPC:
422 set_reg_field_value(clamp_cntl_value,
423 1,
424 FMT_CLAMP_CNTL,
425 FMT_CLAMP_DATA_EN);
426
427 set_reg_field_value(clamp_cntl_value,
428 3,
429 FMT_CLAMP_CNTL,
430 FMT_CLAMP_COLOR_FORMAT);
431
432 break;
433 case CLAMPING_LIMITED_RANGE_PROGRAMMABLE:
434 set_reg_field_value(clamp_cntl_value,
435 1,
436 FMT_CLAMP_CNTL,
437 FMT_CLAMP_DATA_EN);
438
439 set_reg_field_value(clamp_cntl_value,
440 7,
441 FMT_CLAMP_CNTL,
442 FMT_CLAMP_COLOR_FORMAT);
443
444 /*set the defaults*/
445 set_reg_field_value(red_clamp_value,
446 0x10,
447 FMT_CLAMP_COMPONENT_R,
448 FMT_CLAMP_LOWER_R);
449
450 set_reg_field_value(red_clamp_value,
451 0xFEF,
452 FMT_CLAMP_COMPONENT_R,
453 FMT_CLAMP_UPPER_R);
454
455 addr = FMT_REG(mmFMT_CLAMP_COMPONENT_R);
456 dm_write_reg(opp110->base.ctx, addr, red_clamp_value);
457
458 set_reg_field_value(green_clamp_value,
459 0x10,
460 FMT_CLAMP_COMPONENT_G,
461 FMT_CLAMP_LOWER_G);
462
463 set_reg_field_value(green_clamp_value,
464 0xFEF,
465 FMT_CLAMP_COMPONENT_G,
466 FMT_CLAMP_UPPER_G);
467
468 addr = FMT_REG(mmFMT_CLAMP_COMPONENT_G);
469 dm_write_reg(opp110->base.ctx, addr, green_clamp_value);
470
471 set_reg_field_value(blue_clamp_value,
472 0x10,
473 FMT_CLAMP_COMPONENT_B,
474 FMT_CLAMP_LOWER_B);
475
476 set_reg_field_value(blue_clamp_value,
477 0xFEF,
478 FMT_CLAMP_COMPONENT_B,
479 FMT_CLAMP_UPPER_B);
480
481 addr = FMT_REG(mmFMT_CLAMP_COMPONENT_B);
482 dm_write_reg(opp110->base.ctx, addr, blue_clamp_value);
483
484 break;
485
486 default:
487 break;
488 }
489
490 addr = FMT_REG(mmFMT_CLAMP_CNTL);
491 /*Set clamp control*/
492 dm_write_reg(opp110->base.ctx, addr, clamp_cntl_value);
493
494}
495
496/**
497 * set_pixel_encoding
498 *
499 * Set Pixel Encoding
500 * 0: RGB 4:4:4 or YCbCr 4:4:4 or YOnly
501 * 1: YCbCr 4:2:2
502 */
503static void set_pixel_encoding(
504 struct dce110_opp *opp110,
505 const struct clamping_and_pixel_encoding_params *params)
506{
507 uint32_t fmt_cntl_value;
508 uint32_t addr = FMT_REG(mmFMT_CONTROL);
509
510 /*RGB 4:4:4 or YCbCr 4:4:4 - 0; YCbCr 4:2:2 -1.*/
511 fmt_cntl_value = dm_read_reg(opp110->base.ctx, addr);
512
513 set_reg_field_value(fmt_cntl_value,
514 0,
515 FMT_CONTROL,
516 FMT_PIXEL_ENCODING);
517
518 if (params->pixel_encoding == PIXEL_ENCODING_YCBCR422) {
519 set_reg_field_value(fmt_cntl_value,
520 1,
521 FMT_CONTROL,
522 FMT_PIXEL_ENCODING);
523
524 /*00 - Pixels drop mode ,01 - Pixels average mode*/
525 set_reg_field_value(fmt_cntl_value,
526 0,
527 FMT_CONTROL,
528 FMT_SUBSAMPLING_MODE);
529
530 /*00 - Cb before Cr ,01 - Cr before Cb*/
531 set_reg_field_value(fmt_cntl_value,
532 0,
533 FMT_CONTROL,
534 FMT_SUBSAMPLING_ORDER);
535 }
536 dm_write_reg(opp110->base.ctx, addr, fmt_cntl_value);
537
538}
539
540void dce110_opp_program_bit_depth_reduction(
541 struct output_pixel_processor *opp,
542 const struct bit_depth_reduction_params *params)
543{
544 struct dce110_opp *opp110 = TO_DCE110_OPP(opp);
545
546 set_truncation(opp110, params);
547 set_spatial_dither(opp110, params);
548 set_temporal_dither(opp110, params);
549}
550
551void dce110_opp_program_clamping_and_pixel_encoding(
552 struct output_pixel_processor *opp,
553 const struct clamping_and_pixel_encoding_params *params)
554{
555 struct dce110_opp *opp110 = TO_DCE110_OPP(opp);
556
557 dce110_opp_set_clamping(opp110, params);
558 set_pixel_encoding(opp110, params);
559}
560
561void dce110_opp_set_dyn_expansion(
562 struct output_pixel_processor *opp,
563 enum dc_color_space color_sp,
564 enum dc_color_depth color_dpth,
565 enum signal_type signal)
566{
567 struct dce110_opp *opp110 = TO_DCE110_OPP(opp);
568 uint32_t value;
569 bool enable_dyn_exp = false;
570 uint32_t addr = FMT_REG(mmFMT_DYNAMIC_EXP_CNTL);
571
572 value = dm_read_reg(opp->ctx, addr);
573
574 set_reg_field_value(value, 0,
575 FMT_DYNAMIC_EXP_CNTL, FMT_DYNAMIC_EXP_EN);
576 set_reg_field_value(value, 0,
577 FMT_DYNAMIC_EXP_CNTL, FMT_DYNAMIC_EXP_MODE);
578
579 /* From HW programming guide:
580 FMT_DYNAMIC_EXP_EN = 0 for limited RGB or YCbCr output
581 FMT_DYNAMIC_EXP_EN = 1 for RGB full range only*/
582 if (color_sp == COLOR_SPACE_SRGB)
583 enable_dyn_exp = true;
584
585 /*00 - 10-bit -> 12-bit dynamic expansion*/
586 /*01 - 8-bit -> 12-bit dynamic expansion*/
587 if (signal == SIGNAL_TYPE_HDMI_TYPE_A) {
588 switch (color_dpth) {
589 case COLOR_DEPTH_888:
590 set_reg_field_value(value, enable_dyn_exp ? 1:0,
591 FMT_DYNAMIC_EXP_CNTL, FMT_DYNAMIC_EXP_EN);
592 set_reg_field_value(value, 1,
593 FMT_DYNAMIC_EXP_CNTL, FMT_DYNAMIC_EXP_MODE);
594 break;
595 case COLOR_DEPTH_101010:
596 set_reg_field_value(value, enable_dyn_exp ? 1:0,
597 FMT_DYNAMIC_EXP_CNTL, FMT_DYNAMIC_EXP_EN);
598 set_reg_field_value(value, 0,
599 FMT_DYNAMIC_EXP_CNTL, FMT_DYNAMIC_EXP_MODE);
600 break;
601 case COLOR_DEPTH_121212:
602 break;
603 default:
604 break;
605 }
606 }
607
608 dm_write_reg(opp->ctx, addr, value);
609}
610
611void dce110_opp_program_fmt(
612 struct output_pixel_processor *opp,
613 struct bit_depth_reduction_params *fmt_bit_depth,
614 struct clamping_and_pixel_encoding_params *clamping)
615{
616 /* dithering is affected by <CrtcSourceSelect>, hence should be
617 * programmed afterwards */
618 dce110_opp_program_bit_depth_reduction(
619 opp,
620 fmt_bit_depth);
621
622 dce110_opp_program_clamping_and_pixel_encoding(
623 opp,
624 clamping);
625
626 return;
627}
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_regamma.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_regamma.c
new file mode 100644
index 000000000000..62051abcfe2f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_regamma.c
@@ -0,0 +1,537 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28/* include DCE11 register header files */
29#include "dce/dce_11_0_d.h"
30#include "dce/dce_11_0_sh_mask.h"
31
32#include "dce110_opp.h"
33#include "gamma_types.h"
34
35#define DCP_REG(reg)\
36 (reg + opp110->offsets.dcp_offset)
37
38#define DCFE_REG(reg)\
39 (reg + opp110->offsets.dcfe_offset)
40
41enum {
42 MAX_PWL_ENTRY = 128,
43 MAX_REGIONS_NUMBER = 16
44
45};
46
47/*
48 *****************************************************************************
49 * Function: regamma_config_regions_and_segments
50 *
51 * build regamma curve by using predefined hw points
52 * uses interface parameters ,like EDID coeff.
53 *
54 * @param : parameters interface parameters
55 * @return void
56 *
57 * @note
58 *
59 * @see
60 *
61 *****************************************************************************
62 */
63static void regamma_config_regions_and_segments(
64 struct dce110_opp *opp110,
65 const struct pwl_params *params)
66{
67 const struct gamma_curve *curve;
68 uint32_t value = 0;
69
70 {
71 set_reg_field_value(
72 value,
73 params->arr_points[0].custom_float_x,
74 REGAMMA_CNTLA_START_CNTL,
75 REGAMMA_CNTLA_EXP_REGION_START);
76
77 set_reg_field_value(
78 value,
79 0,
80 REGAMMA_CNTLA_START_CNTL,
81 REGAMMA_CNTLA_EXP_REGION_START_SEGMENT);
82
83 dm_write_reg(opp110->base.ctx,
84 DCP_REG(mmREGAMMA_CNTLA_START_CNTL),
85 value);
86 }
87 {
88 value = 0;
89 set_reg_field_value(
90 value,
91 params->arr_points[0].custom_float_slope,
92 REGAMMA_CNTLA_SLOPE_CNTL,
93 REGAMMA_CNTLA_EXP_REGION_LINEAR_SLOPE);
94
95 dm_write_reg(opp110->base.ctx,
96 DCP_REG(mmREGAMMA_CNTLA_SLOPE_CNTL), value);
97 }
98 {
99 value = 0;
100 set_reg_field_value(
101 value,
102 params->arr_points[1].custom_float_x,
103 REGAMMA_CNTLA_END_CNTL1,
104 REGAMMA_CNTLA_EXP_REGION_END);
105
106 dm_write_reg(opp110->base.ctx,
107 DCP_REG(mmREGAMMA_CNTLA_END_CNTL1), value);
108 }
109 {
110 value = 0;
111 set_reg_field_value(
112 value,
113 params->arr_points[2].custom_float_slope,
114 REGAMMA_CNTLA_END_CNTL2,
115 REGAMMA_CNTLA_EXP_REGION_END_BASE);
116
117 set_reg_field_value(
118 value,
119 params->arr_points[1].custom_float_y,
120 REGAMMA_CNTLA_END_CNTL2,
121 REGAMMA_CNTLA_EXP_REGION_END_SLOPE);
122
123 dm_write_reg(opp110->base.ctx,
124 DCP_REG(mmREGAMMA_CNTLA_END_CNTL2), value);
125 }
126
127 curve = params->arr_curve_points;
128
129 {
130 value = 0;
131 set_reg_field_value(
132 value,
133 curve[0].offset,
134 REGAMMA_CNTLA_REGION_0_1,
135 REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET);
136
137 set_reg_field_value(
138 value,
139 curve[0].segments_num,
140 REGAMMA_CNTLA_REGION_0_1,
141 REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS);
142
143 set_reg_field_value(
144 value,
145 curve[1].offset,
146 REGAMMA_CNTLA_REGION_0_1,
147 REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET);
148
149 set_reg_field_value(
150 value,
151 curve[1].segments_num,
152 REGAMMA_CNTLA_REGION_0_1,
153 REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS);
154
155 dm_write_reg(
156 opp110->base.ctx,
157 DCP_REG(mmREGAMMA_CNTLA_REGION_0_1),
158 value);
159 }
160
161 curve += 2;
162 {
163 value = 0;
164 set_reg_field_value(
165 value,
166 curve[0].offset,
167 REGAMMA_CNTLA_REGION_2_3,
168 REGAMMA_CNTLA_EXP_REGION2_LUT_OFFSET);
169
170 set_reg_field_value(
171 value,
172 curve[0].segments_num,
173 REGAMMA_CNTLA_REGION_2_3,
174 REGAMMA_CNTLA_EXP_REGION2_NUM_SEGMENTS);
175
176 set_reg_field_value(
177 value,
178 curve[1].offset,
179 REGAMMA_CNTLA_REGION_2_3,
180 REGAMMA_CNTLA_EXP_REGION3_LUT_OFFSET);
181
182 set_reg_field_value(
183 value,
184 curve[1].segments_num,
185 REGAMMA_CNTLA_REGION_2_3,
186 REGAMMA_CNTLA_EXP_REGION3_NUM_SEGMENTS);
187
188 dm_write_reg(opp110->base.ctx,
189 DCP_REG(mmREGAMMA_CNTLA_REGION_2_3),
190 value);
191 }
192
193 curve += 2;
194 {
195 value = 0;
196 set_reg_field_value(
197 value,
198 curve[0].offset,
199 REGAMMA_CNTLA_REGION_4_5,
200 REGAMMA_CNTLA_EXP_REGION4_LUT_OFFSET);
201
202 set_reg_field_value(
203 value,
204 curve[0].segments_num,
205 REGAMMA_CNTLA_REGION_4_5,
206 REGAMMA_CNTLA_EXP_REGION4_NUM_SEGMENTS);
207
208 set_reg_field_value(
209 value,
210 curve[1].offset,
211 REGAMMA_CNTLA_REGION_4_5,
212 REGAMMA_CNTLA_EXP_REGION5_LUT_OFFSET);
213
214 set_reg_field_value(
215 value,
216 curve[1].segments_num,
217 REGAMMA_CNTLA_REGION_4_5,
218 REGAMMA_CNTLA_EXP_REGION5_NUM_SEGMENTS);
219
220 dm_write_reg(opp110->base.ctx,
221 DCP_REG(mmREGAMMA_CNTLA_REGION_4_5),
222 value);
223 }
224
225 curve += 2;
226 {
227 value = 0;
228 set_reg_field_value(
229 value,
230 curve[0].offset,
231 REGAMMA_CNTLA_REGION_6_7,
232 REGAMMA_CNTLA_EXP_REGION6_LUT_OFFSET);
233
234 set_reg_field_value(
235 value,
236 curve[0].segments_num,
237 REGAMMA_CNTLA_REGION_6_7,
238 REGAMMA_CNTLA_EXP_REGION6_NUM_SEGMENTS);
239
240 set_reg_field_value(
241 value,
242 curve[1].offset,
243 REGAMMA_CNTLA_REGION_6_7,
244 REGAMMA_CNTLA_EXP_REGION7_LUT_OFFSET);
245
246 set_reg_field_value(
247 value,
248 curve[1].segments_num,
249 REGAMMA_CNTLA_REGION_6_7,
250 REGAMMA_CNTLA_EXP_REGION7_NUM_SEGMENTS);
251
252 dm_write_reg(opp110->base.ctx,
253 DCP_REG(mmREGAMMA_CNTLA_REGION_6_7),
254 value);
255 }
256
257 curve += 2;
258 {
259 value = 0;
260 set_reg_field_value(
261 value,
262 curve[0].offset,
263 REGAMMA_CNTLA_REGION_8_9,
264 REGAMMA_CNTLA_EXP_REGION8_LUT_OFFSET);
265
266 set_reg_field_value(
267 value,
268 curve[0].segments_num,
269 REGAMMA_CNTLA_REGION_8_9,
270 REGAMMA_CNTLA_EXP_REGION8_NUM_SEGMENTS);
271
272 set_reg_field_value(
273 value,
274 curve[1].offset,
275 REGAMMA_CNTLA_REGION_8_9,
276 REGAMMA_CNTLA_EXP_REGION9_LUT_OFFSET);
277
278 set_reg_field_value(
279 value,
280 curve[1].segments_num,
281 REGAMMA_CNTLA_REGION_8_9,
282 REGAMMA_CNTLA_EXP_REGION9_NUM_SEGMENTS);
283
284 dm_write_reg(opp110->base.ctx,
285 DCP_REG(mmREGAMMA_CNTLA_REGION_8_9),
286 value);
287 }
288
289 curve += 2;
290 {
291 value = 0;
292 set_reg_field_value(
293 value,
294 curve[0].offset,
295 REGAMMA_CNTLA_REGION_10_11,
296 REGAMMA_CNTLA_EXP_REGION10_LUT_OFFSET);
297
298 set_reg_field_value(
299 value,
300 curve[0].segments_num,
301 REGAMMA_CNTLA_REGION_10_11,
302 REGAMMA_CNTLA_EXP_REGION10_NUM_SEGMENTS);
303
304 set_reg_field_value(
305 value,
306 curve[1].offset,
307 REGAMMA_CNTLA_REGION_10_11,
308 REGAMMA_CNTLA_EXP_REGION11_LUT_OFFSET);
309
310 set_reg_field_value(
311 value,
312 curve[1].segments_num,
313 REGAMMA_CNTLA_REGION_10_11,
314 REGAMMA_CNTLA_EXP_REGION11_NUM_SEGMENTS);
315
316 dm_write_reg(opp110->base.ctx,
317 DCP_REG(mmREGAMMA_CNTLA_REGION_10_11),
318 value);
319 }
320
321 curve += 2;
322 {
323 value = 0;
324 set_reg_field_value(
325 value,
326 curve[0].offset,
327 REGAMMA_CNTLA_REGION_12_13,
328 REGAMMA_CNTLA_EXP_REGION12_LUT_OFFSET);
329
330 set_reg_field_value(
331 value,
332 curve[0].segments_num,
333 REGAMMA_CNTLA_REGION_12_13,
334 REGAMMA_CNTLA_EXP_REGION12_NUM_SEGMENTS);
335
336 set_reg_field_value(
337 value,
338 curve[1].offset,
339 REGAMMA_CNTLA_REGION_12_13,
340 REGAMMA_CNTLA_EXP_REGION13_LUT_OFFSET);
341
342 set_reg_field_value(
343 value,
344 curve[1].segments_num,
345 REGAMMA_CNTLA_REGION_12_13,
346 REGAMMA_CNTLA_EXP_REGION13_NUM_SEGMENTS);
347
348 dm_write_reg(opp110->base.ctx,
349 DCP_REG(mmREGAMMA_CNTLA_REGION_12_13),
350 value);
351 }
352
353 curve += 2;
354 {
355 value = 0;
356 set_reg_field_value(
357 value,
358 curve[0].offset,
359 REGAMMA_CNTLA_REGION_14_15,
360 REGAMMA_CNTLA_EXP_REGION14_LUT_OFFSET);
361
362 set_reg_field_value(
363 value,
364 curve[0].segments_num,
365 REGAMMA_CNTLA_REGION_14_15,
366 REGAMMA_CNTLA_EXP_REGION14_NUM_SEGMENTS);
367
368 set_reg_field_value(
369 value,
370 curve[1].offset,
371 REGAMMA_CNTLA_REGION_14_15,
372 REGAMMA_CNTLA_EXP_REGION15_LUT_OFFSET);
373
374 set_reg_field_value(
375 value,
376 curve[1].segments_num,
377 REGAMMA_CNTLA_REGION_14_15,
378 REGAMMA_CNTLA_EXP_REGION15_NUM_SEGMENTS);
379
380 dm_write_reg(opp110->base.ctx,
381 DCP_REG(mmREGAMMA_CNTLA_REGION_14_15),
382 value);
383 }
384}
385
386static void program_pwl(
387 struct dce110_opp *opp110,
388 const struct pwl_params *params)
389{
390 uint32_t value;
391
392 {
393 uint8_t max_tries = 10;
394 uint8_t counter = 0;
395
396 /* Power on LUT memory */
397 value = dm_read_reg(opp110->base.ctx,
398 DCFE_REG(mmDCFE_MEM_PWR_CTRL));
399
400 set_reg_field_value(
401 value,
402 1,
403 DCFE_MEM_PWR_CTRL,
404 DCP_REGAMMA_MEM_PWR_DIS);
405
406 dm_write_reg(opp110->base.ctx,
407 DCFE_REG(mmDCFE_MEM_PWR_CTRL), value);
408
409 while (counter < max_tries) {
410 value =
411 dm_read_reg(
412 opp110->base.ctx,
413 DCFE_REG(mmDCFE_MEM_PWR_STATUS));
414
415 if (get_reg_field_value(
416 value,
417 DCFE_MEM_PWR_STATUS,
418 DCP_REGAMMA_MEM_PWR_STATE) == 0)
419 break;
420
421 ++counter;
422 }
423
424 if (counter == max_tries) {
425 dm_logger_write(opp110->base.ctx->logger, LOG_WARNING,
426 "%s: regamma lut was not powered on "
427 "in a timely manner,"
428 " programming still proceeds\n",
429 __func__);
430 }
431 }
432
433 value = 0;
434
435 set_reg_field_value(
436 value,
437 7,
438 REGAMMA_LUT_WRITE_EN_MASK,
439 REGAMMA_LUT_WRITE_EN_MASK);
440
441 dm_write_reg(opp110->base.ctx,
442 DCP_REG(mmREGAMMA_LUT_WRITE_EN_MASK), value);
443 dm_write_reg(opp110->base.ctx,
444 DCP_REG(mmREGAMMA_LUT_INDEX), 0);
445
446 /* Program REGAMMA_LUT_DATA */
447 {
448 const uint32_t addr = DCP_REG(mmREGAMMA_LUT_DATA);
449
450 uint32_t i = 0;
451
452 const struct pwl_result_data *rgb = params->rgb_resulted;
453
454 while (i != params->hw_points_num) {
455 dm_write_reg(opp110->base.ctx, addr, rgb->red_reg);
456 dm_write_reg(opp110->base.ctx, addr, rgb->green_reg);
457 dm_write_reg(opp110->base.ctx, addr, rgb->blue_reg);
458
459 dm_write_reg(opp110->base.ctx, addr,
460 rgb->delta_red_reg);
461 dm_write_reg(opp110->base.ctx, addr,
462 rgb->delta_green_reg);
463 dm_write_reg(opp110->base.ctx, addr,
464 rgb->delta_blue_reg);
465
466 ++rgb;
467 ++i;
468 }
469 }
470
471 /* we are done with DCP LUT memory; re-enable low power mode */
472 value = dm_read_reg(opp110->base.ctx, DCFE_REG(mmDCFE_MEM_PWR_CTRL));
473
474 set_reg_field_value(
475 value,
476 0,
477 DCFE_MEM_PWR_CTRL,
478 DCP_REGAMMA_MEM_PWR_DIS);
479
480 dm_write_reg(opp110->base.ctx, DCFE_REG(mmDCFE_MEM_PWR_CTRL), value);
481}
482
483bool dce110_opp_program_regamma_pwl(
484 struct output_pixel_processor *opp,
485 const struct pwl_params *params)
486{
487 struct dce110_opp *opp110 = TO_DCE110_OPP(opp);
488
489 /* Setup regions */
490 regamma_config_regions_and_segments(opp110, params);
491
492 /* Program PWL */
493 program_pwl(opp110, params);
494
495 return true;
496}
497
498void dce110_opp_power_on_regamma_lut(
499 struct output_pixel_processor *opp,
500 bool power_on)
501{
502 struct dce110_opp *opp110 = TO_DCE110_OPP(opp);
503
504 uint32_t value =
505 dm_read_reg(opp->ctx, DCFE_REG(mmDCFE_MEM_PWR_CTRL));
506
507 set_reg_field_value(
508 value,
509 power_on,
510 DCFE_MEM_PWR_CTRL,
511 DCP_REGAMMA_MEM_PWR_DIS);
512
513 set_reg_field_value(
514 value,
515 power_on,
516 DCFE_MEM_PWR_CTRL,
517 DCP_LUT_MEM_PWR_DIS);
518
519 dm_write_reg(opp->ctx, DCFE_REG(mmDCFE_MEM_PWR_CTRL), value);
520}
521
522void dce110_opp_set_regamma_mode(struct output_pixel_processor *opp,
523 enum opp_regamma mode)
524{
525 struct dce110_opp *opp110 = TO_DCE110_OPP(opp);
526 uint32_t value = dm_read_reg(opp110->base.ctx,
527 DCP_REG(mmREGAMMA_CONTROL));
528
529 set_reg_field_value(
530 value,
531 mode,
532 REGAMMA_CONTROL,
533 GRPH_REGAMMA_MODE);
534
535 dm_write_reg(opp110->base.ctx, DCP_REG(mmREGAMMA_CONTROL),
536 value);
537}
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_regamma_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_regamma_v.c
new file mode 100644
index 000000000000..3b3a9175b2c3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_regamma_v.c
@@ -0,0 +1,551 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28/* include DCE11 register header files */
29#include "dce/dce_11_0_d.h"
30#include "dce/dce_11_0_sh_mask.h"
31
32#include "dce110_opp.h"
33#include "gamma_types.h"
34
35static void power_on_lut(struct output_pixel_processor *opp,
36 bool power_on, bool inputgamma, bool regamma)
37{
38 uint32_t value = dm_read_reg(opp->ctx, mmDCFEV_MEM_PWR_CTRL);
39 int i;
40
41 if (power_on) {
42 if (inputgamma)
43 set_reg_field_value(
44 value,
45 1,
46 DCFEV_MEM_PWR_CTRL,
47 COL_MAN_INPUT_GAMMA_MEM_PWR_DIS);
48 if (regamma)
49 set_reg_field_value(
50 value,
51 1,
52 DCFEV_MEM_PWR_CTRL,
53 COL_MAN_GAMMA_CORR_MEM_PWR_DIS);
54 } else {
55 if (inputgamma)
56 set_reg_field_value(
57 value,
58 0,
59 DCFEV_MEM_PWR_CTRL,
60 COL_MAN_INPUT_GAMMA_MEM_PWR_DIS);
61 if (regamma)
62 set_reg_field_value(
63 value,
64 0,
65 DCFEV_MEM_PWR_CTRL,
66 COL_MAN_GAMMA_CORR_MEM_PWR_DIS);
67 }
68
69 dm_write_reg(opp->ctx, mmDCFEV_MEM_PWR_CTRL, value);
70
71 for (i = 0; i < 3; i++) {
72 value = dm_read_reg(opp->ctx, mmDCFEV_MEM_PWR_CTRL);
73 if (get_reg_field_value(value,
74 DCFEV_MEM_PWR_CTRL,
75 COL_MAN_INPUT_GAMMA_MEM_PWR_DIS) &&
76 get_reg_field_value(value,
77 DCFEV_MEM_PWR_CTRL,
78 COL_MAN_GAMMA_CORR_MEM_PWR_DIS))
79 break;
80
81 udelay(2);
82 }
83}
84
85static void set_bypass_input_gamma(struct dce110_opp *opp110)
86{
87 uint32_t value;
88
89 value = dm_read_reg(opp110->base.ctx,
90 mmCOL_MAN_INPUT_GAMMA_CONTROL1);
91
92 set_reg_field_value(
93 value,
94 0,
95 COL_MAN_INPUT_GAMMA_CONTROL1,
96 INPUT_GAMMA_MODE);
97
98 dm_write_reg(opp110->base.ctx,
99 mmCOL_MAN_INPUT_GAMMA_CONTROL1, value);
100}
101
102static void configure_regamma_mode(struct dce110_opp *opp110, uint32_t mode)
103{
104 uint32_t value = 0;
105
106 set_reg_field_value(
107 value,
108 mode,
109 GAMMA_CORR_CONTROL,
110 GAMMA_CORR_MODE);
111
112 dm_write_reg(opp110->base.ctx, mmGAMMA_CORR_CONTROL, 0);
113}
114
115/*
116 *****************************************************************************
117 * Function: regamma_config_regions_and_segments
118 *
119 * build regamma curve by using predefined hw points
120 * uses interface parameters ,like EDID coeff.
121 *
122 * @param : parameters interface parameters
123 * @return void
124 *
125 * @note
126 *
127 * @see
128 *
129 *****************************************************************************
130 */
131static void regamma_config_regions_and_segments(
132 struct dce110_opp *opp110, const struct pwl_params *params)
133{
134 const struct gamma_curve *curve;
135 uint32_t value = 0;
136
137 {
138 set_reg_field_value(
139 value,
140 params->arr_points[0].custom_float_x,
141 GAMMA_CORR_CNTLA_START_CNTL,
142 GAMMA_CORR_CNTLA_EXP_REGION_START);
143
144 set_reg_field_value(
145 value,
146 0,
147 GAMMA_CORR_CNTLA_START_CNTL,
148 GAMMA_CORR_CNTLA_EXP_REGION_START_SEGMENT);
149
150 dm_write_reg(opp110->base.ctx, mmGAMMA_CORR_CNTLA_START_CNTL,
151 value);
152 }
153 {
154 value = 0;
155 set_reg_field_value(
156 value,
157 params->arr_points[0].custom_float_slope,
158 GAMMA_CORR_CNTLA_SLOPE_CNTL,
159 GAMMA_CORR_CNTLA_EXP_REGION_LINEAR_SLOPE);
160
161 dm_write_reg(opp110->base.ctx,
162 mmGAMMA_CORR_CNTLA_SLOPE_CNTL, value);
163 }
164 {
165 value = 0;
166 set_reg_field_value(
167 value,
168 params->arr_points[1].custom_float_x,
169 GAMMA_CORR_CNTLA_END_CNTL1,
170 GAMMA_CORR_CNTLA_EXP_REGION_END);
171
172 dm_write_reg(opp110->base.ctx,
173 mmGAMMA_CORR_CNTLA_END_CNTL1, value);
174 }
175 {
176 value = 0;
177 set_reg_field_value(
178 value,
179 params->arr_points[2].custom_float_slope,
180 GAMMA_CORR_CNTLA_END_CNTL2,
181 GAMMA_CORR_CNTLA_EXP_REGION_END_BASE);
182
183 set_reg_field_value(
184 value,
185 params->arr_points[1].custom_float_y,
186 GAMMA_CORR_CNTLA_END_CNTL2,
187 GAMMA_CORR_CNTLA_EXP_REGION_END_SLOPE);
188
189 dm_write_reg(opp110->base.ctx,
190 mmGAMMA_CORR_CNTLA_END_CNTL2, value);
191 }
192
193 curve = params->arr_curve_points;
194
195 {
196 value = 0;
197 set_reg_field_value(
198 value,
199 curve[0].offset,
200 GAMMA_CORR_CNTLA_REGION_0_1,
201 GAMMA_CORR_CNTLA_EXP_REGION0_LUT_OFFSET);
202
203 set_reg_field_value(
204 value,
205 curve[0].segments_num,
206 GAMMA_CORR_CNTLA_REGION_0_1,
207 GAMMA_CORR_CNTLA_EXP_REGION0_NUM_SEGMENTS);
208
209 set_reg_field_value(
210 value,
211 curve[1].offset,
212 GAMMA_CORR_CNTLA_REGION_0_1,
213 GAMMA_CORR_CNTLA_EXP_REGION1_LUT_OFFSET);
214
215 set_reg_field_value(
216 value,
217 curve[1].segments_num,
218 GAMMA_CORR_CNTLA_REGION_0_1,
219 GAMMA_CORR_CNTLA_EXP_REGION1_NUM_SEGMENTS);
220
221 dm_write_reg(
222 opp110->base.ctx,
223 mmGAMMA_CORR_CNTLA_REGION_0_1,
224 value);
225 }
226
227 curve += 2;
228 {
229 value = 0;
230 set_reg_field_value(
231 value,
232 curve[0].offset,
233 GAMMA_CORR_CNTLA_REGION_2_3,
234 GAMMA_CORR_CNTLA_EXP_REGION2_LUT_OFFSET);
235
236 set_reg_field_value(
237 value,
238 curve[0].segments_num,
239 GAMMA_CORR_CNTLA_REGION_2_3,
240 GAMMA_CORR_CNTLA_EXP_REGION2_NUM_SEGMENTS);
241
242 set_reg_field_value(
243 value,
244 curve[1].offset,
245 GAMMA_CORR_CNTLA_REGION_2_3,
246 GAMMA_CORR_CNTLA_EXP_REGION3_LUT_OFFSET);
247
248 set_reg_field_value(
249 value,
250 curve[1].segments_num,
251 GAMMA_CORR_CNTLA_REGION_2_3,
252 GAMMA_CORR_CNTLA_EXP_REGION3_NUM_SEGMENTS);
253
254 dm_write_reg(opp110->base.ctx,
255 mmGAMMA_CORR_CNTLA_REGION_2_3,
256 value);
257 }
258
259 curve += 2;
260 {
261 value = 0;
262 set_reg_field_value(
263 value,
264 curve[0].offset,
265 GAMMA_CORR_CNTLA_REGION_4_5,
266 GAMMA_CORR_CNTLA_EXP_REGION4_LUT_OFFSET);
267
268 set_reg_field_value(
269 value,
270 curve[0].segments_num,
271 GAMMA_CORR_CNTLA_REGION_4_5,
272 GAMMA_CORR_CNTLA_EXP_REGION4_NUM_SEGMENTS);
273
274 set_reg_field_value(
275 value,
276 curve[1].offset,
277 GAMMA_CORR_CNTLA_REGION_4_5,
278 GAMMA_CORR_CNTLA_EXP_REGION5_LUT_OFFSET);
279
280 set_reg_field_value(
281 value,
282 curve[1].segments_num,
283 GAMMA_CORR_CNTLA_REGION_4_5,
284 GAMMA_CORR_CNTLA_EXP_REGION5_NUM_SEGMENTS);
285
286 dm_write_reg(opp110->base.ctx,
287 mmGAMMA_CORR_CNTLA_REGION_4_5,
288 value);
289 }
290
291 curve += 2;
292 {
293 value = 0;
294 set_reg_field_value(
295 value,
296 curve[0].offset,
297 GAMMA_CORR_CNTLA_REGION_6_7,
298 GAMMA_CORR_CNTLA_EXP_REGION6_LUT_OFFSET);
299
300 set_reg_field_value(
301 value,
302 curve[0].segments_num,
303 GAMMA_CORR_CNTLA_REGION_6_7,
304 GAMMA_CORR_CNTLA_EXP_REGION6_NUM_SEGMENTS);
305
306 set_reg_field_value(
307 value,
308 curve[1].offset,
309 GAMMA_CORR_CNTLA_REGION_6_7,
310 GAMMA_CORR_CNTLA_EXP_REGION7_LUT_OFFSET);
311
312 set_reg_field_value(
313 value,
314 curve[1].segments_num,
315 GAMMA_CORR_CNTLA_REGION_6_7,
316 GAMMA_CORR_CNTLA_EXP_REGION7_NUM_SEGMENTS);
317
318 dm_write_reg(opp110->base.ctx,
319 mmGAMMA_CORR_CNTLA_REGION_6_7,
320 value);
321 }
322
323 curve += 2;
324 {
325 value = 0;
326 set_reg_field_value(
327 value,
328 curve[0].offset,
329 GAMMA_CORR_CNTLA_REGION_8_9,
330 GAMMA_CORR_CNTLA_EXP_REGION8_LUT_OFFSET);
331
332 set_reg_field_value(
333 value,
334 curve[0].segments_num,
335 GAMMA_CORR_CNTLA_REGION_8_9,
336 GAMMA_CORR_CNTLA_EXP_REGION8_NUM_SEGMENTS);
337
338 set_reg_field_value(
339 value,
340 curve[1].offset,
341 GAMMA_CORR_CNTLA_REGION_8_9,
342 GAMMA_CORR_CNTLA_EXP_REGION9_LUT_OFFSET);
343
344 set_reg_field_value(
345 value,
346 curve[1].segments_num,
347 GAMMA_CORR_CNTLA_REGION_8_9,
348 GAMMA_CORR_CNTLA_EXP_REGION9_NUM_SEGMENTS);
349
350 dm_write_reg(opp110->base.ctx,
351 mmGAMMA_CORR_CNTLA_REGION_8_9,
352 value);
353 }
354
355 curve += 2;
356 {
357 value = 0;
358 set_reg_field_value(
359 value,
360 curve[0].offset,
361 GAMMA_CORR_CNTLA_REGION_10_11,
362 GAMMA_CORR_CNTLA_EXP_REGION10_LUT_OFFSET);
363
364 set_reg_field_value(
365 value,
366 curve[0].segments_num,
367 GAMMA_CORR_CNTLA_REGION_10_11,
368 GAMMA_CORR_CNTLA_EXP_REGION10_NUM_SEGMENTS);
369
370 set_reg_field_value(
371 value,
372 curve[1].offset,
373 GAMMA_CORR_CNTLA_REGION_10_11,
374 GAMMA_CORR_CNTLA_EXP_REGION11_LUT_OFFSET);
375
376 set_reg_field_value(
377 value,
378 curve[1].segments_num,
379 GAMMA_CORR_CNTLA_REGION_10_11,
380 GAMMA_CORR_CNTLA_EXP_REGION11_NUM_SEGMENTS);
381
382 dm_write_reg(opp110->base.ctx,
383 mmGAMMA_CORR_CNTLA_REGION_10_11,
384 value);
385 }
386
387 curve += 2;
388 {
389 value = 0;
390 set_reg_field_value(
391 value,
392 curve[0].offset,
393 GAMMA_CORR_CNTLA_REGION_12_13,
394 GAMMA_CORR_CNTLA_EXP_REGION12_LUT_OFFSET);
395
396 set_reg_field_value(
397 value,
398 curve[0].segments_num,
399 GAMMA_CORR_CNTLA_REGION_12_13,
400 GAMMA_CORR_CNTLA_EXP_REGION12_NUM_SEGMENTS);
401
402 set_reg_field_value(
403 value,
404 curve[1].offset,
405 GAMMA_CORR_CNTLA_REGION_12_13,
406 GAMMA_CORR_CNTLA_EXP_REGION13_LUT_OFFSET);
407
408 set_reg_field_value(
409 value,
410 curve[1].segments_num,
411 GAMMA_CORR_CNTLA_REGION_12_13,
412 GAMMA_CORR_CNTLA_EXP_REGION13_NUM_SEGMENTS);
413
414 dm_write_reg(opp110->base.ctx,
415 mmGAMMA_CORR_CNTLA_REGION_12_13,
416 value);
417 }
418
419 curve += 2;
420 {
421 value = 0;
422 set_reg_field_value(
423 value,
424 curve[0].offset,
425 GAMMA_CORR_CNTLA_REGION_14_15,
426 GAMMA_CORR_CNTLA_EXP_REGION14_LUT_OFFSET);
427
428 set_reg_field_value(
429 value,
430 curve[0].segments_num,
431 GAMMA_CORR_CNTLA_REGION_14_15,
432 GAMMA_CORR_CNTLA_EXP_REGION14_NUM_SEGMENTS);
433
434 set_reg_field_value(
435 value,
436 curve[1].offset,
437 GAMMA_CORR_CNTLA_REGION_14_15,
438 GAMMA_CORR_CNTLA_EXP_REGION15_LUT_OFFSET);
439
440 set_reg_field_value(
441 value,
442 curve[1].segments_num,
443 GAMMA_CORR_CNTLA_REGION_14_15,
444 GAMMA_CORR_CNTLA_EXP_REGION15_NUM_SEGMENTS);
445
446 dm_write_reg(opp110->base.ctx,
447 mmGAMMA_CORR_CNTLA_REGION_14_15,
448 value);
449 }
450}
451
452static void program_pwl(struct dce110_opp *opp110,
453 const struct pwl_params *params)
454{
455 uint32_t value = 0;
456
457 set_reg_field_value(
458 value,
459 7,
460 GAMMA_CORR_LUT_WRITE_EN_MASK,
461 GAMMA_CORR_LUT_WRITE_EN_MASK);
462
463 dm_write_reg(opp110->base.ctx,
464 mmGAMMA_CORR_LUT_WRITE_EN_MASK, value);
465
466 dm_write_reg(opp110->base.ctx,
467 mmGAMMA_CORR_LUT_INDEX, 0);
468
469 /* Program REGAMMA_LUT_DATA */
470 {
471 const uint32_t addr = mmGAMMA_CORR_LUT_DATA;
472 uint32_t i = 0;
473 const struct pwl_result_data *rgb =
474 params->rgb_resulted;
475
476 while (i != params->hw_points_num) {
477 dm_write_reg(opp110->base.ctx, addr, rgb->red_reg);
478 dm_write_reg(opp110->base.ctx, addr, rgb->green_reg);
479 dm_write_reg(opp110->base.ctx, addr, rgb->blue_reg);
480
481 dm_write_reg(opp110->base.ctx, addr,
482 rgb->delta_red_reg);
483 dm_write_reg(opp110->base.ctx, addr,
484 rgb->delta_green_reg);
485 dm_write_reg(opp110->base.ctx, addr,
486 rgb->delta_blue_reg);
487
488 ++rgb;
489 ++i;
490 }
491 }
492}
493
494bool dce110_opp_program_regamma_pwl_v(
495 struct output_pixel_processor *opp,
496 const struct pwl_params *params)
497{
498 struct dce110_opp *opp110 = TO_DCE110_OPP(opp);
499
500 /* Setup regions */
501 regamma_config_regions_and_segments(opp110, params);
502
503 set_bypass_input_gamma(opp110);
504
505 /* Power on gamma LUT memory */
506 power_on_lut(opp, true, false, true);
507
508 /* Program PWL */
509 program_pwl(opp110, params);
510
511 /* program regamma config */
512 configure_regamma_mode(opp110, 1);
513
514 /* Power return to auto back */
515 power_on_lut(opp, false, false, true);
516
517 return true;
518}
519
520void dce110_opp_power_on_regamma_lut_v(
521 struct output_pixel_processor *opp,
522 bool power_on)
523{
524 uint32_t value = dm_read_reg(opp->ctx, mmDCFEV_MEM_PWR_CTRL);
525
526 set_reg_field_value(
527 value,
528 0,
529 DCFEV_MEM_PWR_CTRL,
530 COL_MAN_GAMMA_CORR_MEM_PWR_FORCE);
531
532 set_reg_field_value(
533 value,
534 power_on,
535 DCFEV_MEM_PWR_CTRL,
536 COL_MAN_GAMMA_CORR_MEM_PWR_DIS);
537
538 set_reg_field_value(
539 value,
540 0,
541 DCFEV_MEM_PWR_CTRL,
542 COL_MAN_INPUT_GAMMA_MEM_PWR_FORCE);
543
544 set_reg_field_value(
545 value,
546 power_on,
547 DCFEV_MEM_PWR_CTRL,
548 COL_MAN_INPUT_GAMMA_MEM_PWR_DIS);
549
550 dm_write_reg(opp->ctx, mmDCFEV_MEM_PWR_CTRL, value);
551}
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_v.c
new file mode 100644
index 000000000000..4b32397529ec
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_v.c
@@ -0,0 +1,63 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28/* include DCE11 register header files */
29#include "dce/dce_11_0_d.h"
30#include "dce/dce_11_0_sh_mask.h"
31
32#include "dce110_opp.h"
33#include "dce110_opp_v.h"
34
35#include "gamma_types.h"
36
37/*****************************************/
38/* Constructor, Destructor */
39/*****************************************/
40
41static const struct opp_funcs funcs = {
42 .opp_power_on_regamma_lut = dce110_opp_power_on_regamma_lut_v,
43 .opp_program_regamma_pwl = dce110_opp_program_regamma_pwl_v,
44 .opp_set_csc_default = dce110_opp_v_set_csc_default,
45 .opp_set_csc_adjustment = dce110_opp_v_set_csc_adjustment,
46 .opp_set_dyn_expansion = dce110_opp_set_dyn_expansion,
47 .opp_set_regamma_mode = dce110_opp_set_regamma_mode,
48 .opp_destroy = dce110_opp_destroy,
49 .opp_program_fmt = dce110_opp_program_fmt,
50 .opp_program_bit_depth_reduction =
51 dce110_opp_program_bit_depth_reduction
52};
53
54bool dce110_opp_v_construct(struct dce110_opp *opp110,
55 struct dc_context *ctx)
56{
57 opp110->base.funcs = &funcs;
58
59 opp110->base.ctx = ctx;
60
61 return true;
62}
63
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_v.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_v.h
new file mode 100644
index 000000000000..dcdbf86fccc1
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_v.h
@@ -0,0 +1,56 @@
1/* Copyright 2012-15 Advanced Micro Devices, Inc.
2 *
3 * Permission is hereby granted, free of charge, to any person obtaining a
4 * copy of this software and associated documentation files (the "Software"),
5 * to deal in the Software without restriction, including without limitation
6 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7 * and/or sell copies of the Software, and to permit persons to whom the
8 * Software is furnished to do so, subject to the following conditions:
9 *
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
17 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
18 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
19 * OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * Authors: AMD
22 *
23 */
24
25#ifndef __DC_OPP_DCE110_V_H__
26#define __DC_OPP_DCE110_V_H__
27
28#include "dc_types.h"
29#include "opp.h"
30#include "core_types.h"
31
32#include "gamma_types.h" /* decprecated */
33
34struct gamma_parameters;
35
36bool dce110_opp_v_construct(struct dce110_opp *opp110,
37 struct dc_context *ctx);
38
39/* underlay callbacks */
40void dce110_opp_v_set_csc_default(
41 struct output_pixel_processor *opp,
42 const struct default_adjustment *default_adjust);
43
44void dce110_opp_v_set_csc_adjustment(
45 struct output_pixel_processor *opp,
46 const struct out_csc_color_matrix *tbl_entry);
47
48bool dce110_opp_program_regamma_pwl_v(
49 struct output_pixel_processor *opp,
50 const struct pwl_params *params);
51
52void dce110_opp_power_on_regamma_lut_v(
53 struct output_pixel_processor *opp,
54 bool power_on);
55
56#endif /* __DC_OPP_DCE110_V_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
new file mode 100644
index 000000000000..959467fa421e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -0,0 +1,1413 @@
1/*
2* Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28#include "link_encoder.h"
29#include "stream_encoder.h"
30
31#include "resource.h"
32#include "dce110/dce110_resource.h"
33
34#include "include/irq_service_interface.h"
35#include "dce/dce_audio.h"
36#include "dce110/dce110_timing_generator.h"
37#include "irq/dce110/irq_service_dce110.h"
38#include "dce110/dce110_timing_generator_v.h"
39#include "dce/dce_link_encoder.h"
40#include "dce/dce_stream_encoder.h"
41#include "dce110/dce110_mem_input.h"
42#include "dce110/dce110_mem_input_v.h"
43#include "dce110/dce110_ipp.h"
44#include "dce/dce_transform.h"
45#include "dce110/dce110_transform_v.h"
46#include "dce110/dce110_opp.h"
47#include "dce110/dce110_opp_v.h"
48#include "dce/dce_clock_source.h"
49#include "dce/dce_hwseq.h"
50#include "dce110/dce110_hw_sequencer.h"
51
52#include "reg_helper.h"
53
54#include "dce/dce_11_0_d.h"
55#include "dce/dce_11_0_sh_mask.h"
56
57#ifndef mmMC_HUB_RDREQ_DMIF_LIMIT
58#include "gmc/gmc_8_2_d.h"
59#include "gmc/gmc_8_2_sh_mask.h"
60#endif
61
62#ifndef mmDP_DPHY_INTERNAL_CTRL
63 #define mmDP_DPHY_INTERNAL_CTRL 0x4aa7
64 #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x4aa7
65 #define mmDP1_DP_DPHY_INTERNAL_CTRL 0x4ba7
66 #define mmDP2_DP_DPHY_INTERNAL_CTRL 0x4ca7
67 #define mmDP3_DP_DPHY_INTERNAL_CTRL 0x4da7
68 #define mmDP4_DP_DPHY_INTERNAL_CTRL 0x4ea7
69 #define mmDP5_DP_DPHY_INTERNAL_CTRL 0x4fa7
70 #define mmDP6_DP_DPHY_INTERNAL_CTRL 0x54a7
71 #define mmDP7_DP_DPHY_INTERNAL_CTRL 0x56a7
72 #define mmDP8_DP_DPHY_INTERNAL_CTRL 0x57a7
73#endif
74
75#ifndef mmBIOS_SCRATCH_2
76 #define mmBIOS_SCRATCH_2 0x05CB
77 #define mmBIOS_SCRATCH_6 0x05CF
78#endif
79
80#ifndef mmDP_DPHY_BS_SR_SWAP_CNTL
81 #define mmDP_DPHY_BS_SR_SWAP_CNTL 0x4ADC
82 #define mmDP0_DP_DPHY_BS_SR_SWAP_CNTL 0x4ADC
83 #define mmDP1_DP_DPHY_BS_SR_SWAP_CNTL 0x4BDC
84 #define mmDP2_DP_DPHY_BS_SR_SWAP_CNTL 0x4CDC
85 #define mmDP3_DP_DPHY_BS_SR_SWAP_CNTL 0x4DDC
86 #define mmDP4_DP_DPHY_BS_SR_SWAP_CNTL 0x4EDC
87 #define mmDP5_DP_DPHY_BS_SR_SWAP_CNTL 0x4FDC
88 #define mmDP6_DP_DPHY_BS_SR_SWAP_CNTL 0x54DC
89#endif
90
91#ifndef mmDP_DPHY_FAST_TRAINING
92 #define mmDP_DPHY_FAST_TRAINING 0x4ABC
93 #define mmDP0_DP_DPHY_FAST_TRAINING 0x4ABC
94 #define mmDP1_DP_DPHY_FAST_TRAINING 0x4BBC
95 #define mmDP2_DP_DPHY_FAST_TRAINING 0x4CBC
96 #define mmDP3_DP_DPHY_FAST_TRAINING 0x4DBC
97 #define mmDP4_DP_DPHY_FAST_TRAINING 0x4EBC
98 #define mmDP5_DP_DPHY_FAST_TRAINING 0x4FBC
99 #define mmDP6_DP_DPHY_FAST_TRAINING 0x54BC
100#endif
101
102#ifndef DPHY_RX_FAST_TRAINING_CAPABLE
103 #define DPHY_RX_FAST_TRAINING_CAPABLE 0x1
104#endif
105
106static const struct dce110_timing_generator_offsets dce110_tg_offsets[] = {
107 {
108 .crtc = (mmCRTC0_CRTC_CONTROL - mmCRTC_CONTROL),
109 .dcp = (mmDCP0_GRPH_CONTROL - mmGRPH_CONTROL),
110 },
111 {
112 .crtc = (mmCRTC1_CRTC_CONTROL - mmCRTC_CONTROL),
113 .dcp = (mmDCP1_GRPH_CONTROL - mmGRPH_CONTROL),
114 },
115 {
116 .crtc = (mmCRTC2_CRTC_CONTROL - mmCRTC_CONTROL),
117 .dcp = (mmDCP2_GRPH_CONTROL - mmGRPH_CONTROL),
118 },
119 {
120 .crtc = (mmCRTC3_CRTC_CONTROL - mmCRTC_CONTROL),
121 .dcp = (mmDCP3_GRPH_CONTROL - mmGRPH_CONTROL),
122 },
123 {
124 .crtc = (mmCRTC4_CRTC_CONTROL - mmCRTC_CONTROL),
125 .dcp = (mmDCP4_GRPH_CONTROL - mmGRPH_CONTROL),
126 },
127 {
128 .crtc = (mmCRTC5_CRTC_CONTROL - mmCRTC_CONTROL),
129 .dcp = (mmDCP5_GRPH_CONTROL - mmGRPH_CONTROL),
130 }
131};
132
133static const struct dce110_mem_input_reg_offsets dce110_mi_reg_offsets[] = {
134 {
135 .dcp = (mmDCP0_GRPH_CONTROL - mmGRPH_CONTROL),
136 .dmif = (mmDMIF_PG0_DPG_WATERMARK_MASK_CONTROL
137 - mmDPG_WATERMARK_MASK_CONTROL),
138 .pipe = (mmPIPE0_DMIF_BUFFER_CONTROL
139 - mmPIPE0_DMIF_BUFFER_CONTROL),
140 },
141 {
142 .dcp = (mmDCP1_GRPH_CONTROL - mmGRPH_CONTROL),
143 .dmif = (mmDMIF_PG1_DPG_WATERMARK_MASK_CONTROL
144 - mmDPG_WATERMARK_MASK_CONTROL),
145 .pipe = (mmPIPE1_DMIF_BUFFER_CONTROL
146 - mmPIPE0_DMIF_BUFFER_CONTROL),
147 },
148 {
149 .dcp = (mmDCP2_GRPH_CONTROL - mmGRPH_CONTROL),
150 .dmif = (mmDMIF_PG2_DPG_WATERMARK_MASK_CONTROL
151 - mmDPG_WATERMARK_MASK_CONTROL),
152 .pipe = (mmPIPE2_DMIF_BUFFER_CONTROL
153 - mmPIPE0_DMIF_BUFFER_CONTROL),
154 }
155};
156
157
158static const struct dce110_ipp_reg_offsets dce110_ipp_reg_offsets[] = {
159{
160 .dcp_offset = (mmDCP0_CUR_CONTROL - mmCUR_CONTROL),
161},
162{
163 .dcp_offset = (mmDCP1_CUR_CONTROL - mmCUR_CONTROL),
164},
165{
166 .dcp_offset = (mmDCP2_CUR_CONTROL - mmCUR_CONTROL),
167},
168{
169 .dcp_offset = (mmDCP3_CUR_CONTROL - mmCUR_CONTROL),
170},
171{
172 .dcp_offset = (mmDCP4_CUR_CONTROL - mmCUR_CONTROL),
173},
174{
175 .dcp_offset = (mmDCP5_CUR_CONTROL - mmCUR_CONTROL),
176}
177};
178
179
180
181
182/* set register offset */
183#define SR(reg_name)\
184 .reg_name = mm ## reg_name
185
186/* set register offset with instance */
187#define SRI(reg_name, block, id)\
188 .reg_name = mm ## block ## id ## _ ## reg_name
189
190
191#define transform_regs(id)\
192[id] = {\
193 XFM_COMMON_REG_LIST_DCE110(id)\
194}
195
196static const struct dce_transform_registers xfm_regs[] = {
197 transform_regs(0),
198 transform_regs(1),
199 transform_regs(2)
200};
201
202static const struct dce_transform_shift xfm_shift = {
203 XFM_COMMON_MASK_SH_LIST_DCE110(__SHIFT)
204};
205
206static const struct dce_transform_mask xfm_mask = {
207 XFM_COMMON_MASK_SH_LIST_DCE110(_MASK)
208};
209
210#define aux_regs(id)\
211[id] = {\
212 AUX_REG_LIST(id)\
213}
214
215static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = {
216 aux_regs(0),
217 aux_regs(1),
218 aux_regs(2),
219 aux_regs(3),
220 aux_regs(4),
221 aux_regs(5)
222};
223
224#define hpd_regs(id)\
225[id] = {\
226 HPD_REG_LIST(id)\
227}
228
229static const struct dce110_link_enc_hpd_registers link_enc_hpd_regs[] = {
230 hpd_regs(0),
231 hpd_regs(1),
232 hpd_regs(2),
233 hpd_regs(3),
234 hpd_regs(4),
235 hpd_regs(5)
236};
237
238
239#define link_regs(id)\
240[id] = {\
241 LE_DCE110_REG_LIST(id)\
242}
243
244static const struct dce110_link_enc_registers link_enc_regs[] = {
245 link_regs(0),
246 link_regs(1),
247 link_regs(2),
248 link_regs(3),
249 link_regs(4),
250 link_regs(5),
251 link_regs(6),
252};
253
254#define stream_enc_regs(id)\
255[id] = {\
256 SE_COMMON_REG_LIST(id),\
257 .TMDS_CNTL = 0,\
258}
259
260static const struct dce110_stream_enc_registers stream_enc_regs[] = {
261 stream_enc_regs(0),
262 stream_enc_regs(1),
263 stream_enc_regs(2)
264};
265
266static const struct dce_stream_encoder_shift se_shift = {
267 SE_COMMON_MASK_SH_LIST_DCE110(__SHIFT)
268};
269
270static const struct dce_stream_encoder_mask se_mask = {
271 SE_COMMON_MASK_SH_LIST_DCE110(_MASK)
272};
273
274#define audio_regs(id)\
275[id] = {\
276 AUD_COMMON_REG_LIST(id)\
277}
278
279static const struct dce_audio_registers audio_regs[] = {
280 audio_regs(0),
281 audio_regs(1),
282 audio_regs(2),
283 audio_regs(3),
284 audio_regs(4),
285 audio_regs(5),
286 audio_regs(6),
287};
288
289static const struct dce_audio_shift audio_shift = {
290 AUD_COMMON_MASK_SH_LIST(__SHIFT)
291};
292
293static const struct dce_aduio_mask audio_mask = {
294 AUD_COMMON_MASK_SH_LIST(_MASK)
295};
296
297/* AG TBD Needs to be reduced back to 3 pipes once dce10 hw sequencer implemented. */
298static const struct dce110_opp_reg_offsets dce110_opp_reg_offsets[] = {
299{
300 .fmt_offset = (mmFMT0_FMT_CONTROL - mmFMT0_FMT_CONTROL),
301 .dcfe_offset = (mmDCFE0_DCFE_MEM_PWR_CTRL - mmDCFE0_DCFE_MEM_PWR_CTRL),
302 .dcp_offset = (mmDCP0_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
303},
304{ .fmt_offset = (mmFMT1_FMT_CONTROL - mmFMT0_FMT_CONTROL),
305 .dcfe_offset = (mmDCFE1_DCFE_MEM_PWR_CTRL - mmDCFE0_DCFE_MEM_PWR_CTRL),
306 .dcp_offset = (mmDCP1_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
307},
308{ .fmt_offset = (mmFMT2_FMT_CONTROL - mmFMT0_FMT_CONTROL),
309 .dcfe_offset = (mmDCFE2_DCFE_MEM_PWR_CTRL - mmDCFE0_DCFE_MEM_PWR_CTRL),
310 .dcp_offset = (mmDCP2_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
311},
312{
313 .fmt_offset = (mmFMT3_FMT_CONTROL - mmFMT0_FMT_CONTROL),
314 .dcfe_offset = (mmDCFE3_DCFE_MEM_PWR_CTRL - mmDCFE0_DCFE_MEM_PWR_CTRL),
315 .dcp_offset = (mmDCP3_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
316},
317{ .fmt_offset = (mmFMT4_FMT_CONTROL - mmFMT0_FMT_CONTROL),
318 .dcfe_offset = (mmDCFE4_DCFE_MEM_PWR_CTRL - mmDCFE0_DCFE_MEM_PWR_CTRL),
319 .dcp_offset = (mmDCP4_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
320},
321{ .fmt_offset = (mmFMT5_FMT_CONTROL - mmFMT0_FMT_CONTROL),
322 .dcfe_offset = (mmDCFE5_DCFE_MEM_PWR_CTRL - mmDCFE0_DCFE_MEM_PWR_CTRL),
323 .dcp_offset = (mmDCP5_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
324}
325};
326
327#define clk_src_regs(id)\
328[id] = {\
329 CS_COMMON_REG_LIST_DCE_100_110(id),\
330}
331
332static const struct dce110_clk_src_regs clk_src_regs[] = {
333 clk_src_regs(0),
334 clk_src_regs(1),
335 clk_src_regs(2)
336};
337
338static const struct dce110_clk_src_shift cs_shift = {
339 CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
340};
341
342static const struct dce110_clk_src_mask cs_mask = {
343 CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
344};
345
346static const struct bios_registers bios_regs = {
347 .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6
348};
349
350static const struct resource_caps carrizo_resource_cap = {
351 .num_timing_generator = 3,
352 .num_video_plane = 1,
353 .num_audio = 3,
354 .num_stream_encoder = 3,
355 .num_pll = 2,
356};
357
358static const struct resource_caps stoney_resource_cap = {
359 .num_timing_generator = 2,
360 .num_video_plane = 1,
361 .num_audio = 3,
362 .num_stream_encoder = 3,
363 .num_pll = 2,
364};
365
366#define CTX ctx
367#define REG(reg) mm ## reg
368
369#ifndef mmCC_DC_HDMI_STRAPS
370#define mmCC_DC_HDMI_STRAPS 0x4819
371#define CC_DC_HDMI_STRAPS__HDMI_DISABLE_MASK 0x40
372#define CC_DC_HDMI_STRAPS__HDMI_DISABLE__SHIFT 0x6
373#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER_MASK 0x700
374#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8
375#endif
376
377static void read_dce_straps(
378 struct dc_context *ctx,
379 struct resource_straps *straps)
380{
381 REG_GET_2(CC_DC_HDMI_STRAPS,
382 HDMI_DISABLE, &straps->hdmi_disable,
383 AUDIO_STREAM_NUMBER, &straps->audio_stream_number);
384
385 REG_GET(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO, &straps->dc_pinstraps_audio);
386}
387
388static struct audio *create_audio(
389 struct dc_context *ctx, unsigned int inst)
390{
391 return dce_audio_create(ctx, inst,
392 &audio_regs[inst], &audio_shift, &audio_mask);
393}
394
395static struct timing_generator *dce110_timing_generator_create(
396 struct dc_context *ctx,
397 uint32_t instance,
398 const struct dce110_timing_generator_offsets *offsets)
399{
400 struct dce110_timing_generator *tg110 =
401 dm_alloc(sizeof(struct dce110_timing_generator));
402
403 if (!tg110)
404 return NULL;
405
406 if (dce110_timing_generator_construct(tg110, ctx, instance, offsets))
407 return &tg110->base;
408
409 BREAK_TO_DEBUGGER();
410 dm_free(tg110);
411 return NULL;
412}
413
414static struct stream_encoder *dce110_stream_encoder_create(
415 enum engine_id eng_id,
416 struct dc_context *ctx)
417{
418 struct dce110_stream_encoder *enc110 =
419 dm_alloc(sizeof(struct dce110_stream_encoder));
420
421 if (!enc110)
422 return NULL;
423
424 if (dce110_stream_encoder_construct(
425 enc110, ctx, ctx->dc_bios, eng_id,
426 &stream_enc_regs[eng_id], &se_shift, &se_mask))
427 return &enc110->base;
428
429 BREAK_TO_DEBUGGER();
430 dm_free(enc110);
431 return NULL;
432}
433
434#define SRII(reg_name, block, id)\
435 .reg_name[id] = mm ## block ## id ## _ ## reg_name
436
437static const struct dce_hwseq_registers hwseq_stoney_reg = {
438 HWSEQ_ST_REG_LIST()
439};
440
441static const struct dce_hwseq_registers hwseq_cz_reg = {
442 HWSEQ_CZ_REG_LIST()
443};
444
445static const struct dce_hwseq_shift hwseq_shift = {
446 HWSEQ_DCE11_MASK_SH_LIST(__SHIFT),
447};
448
449static const struct dce_hwseq_mask hwseq_mask = {
450 HWSEQ_DCE11_MASK_SH_LIST(_MASK),
451};
452
453static struct dce_hwseq *dce110_hwseq_create(
454 struct dc_context *ctx)
455{
456 struct dce_hwseq *hws = dm_alloc(sizeof(struct dce_hwseq));
457
458 if (hws) {
459 hws->ctx = ctx;
460 hws->regs = ASIC_REV_IS_STONEY(ctx->asic_id.hw_internal_rev) ?
461 &hwseq_stoney_reg : &hwseq_cz_reg;
462 hws->shifts = &hwseq_shift;
463 hws->masks = &hwseq_mask;
464 hws->wa.blnd_crtc_trigger = true;
465 }
466 return hws;
467}
468
469static const struct resource_create_funcs res_create_funcs = {
470 .read_dce_straps = read_dce_straps,
471 .create_audio = create_audio,
472 .create_stream_encoder = dce110_stream_encoder_create,
473 .create_hwseq = dce110_hwseq_create,
474};
475
476#define mi_inst_regs(id) { \
477 MI_REG_LIST(id), \
478 .MC_HUB_RDREQ_DMIF_LIMIT = mmMC_HUB_RDREQ_DMIF_LIMIT \
479}
480static const struct dce_mem_input_registers mi_regs[] = {
481 mi_inst_regs(0),
482 mi_inst_regs(1),
483 mi_inst_regs(2),
484};
485
486static const struct dce_mem_input_shift mi_shifts = {
487 MI_DCE_MASK_SH_LIST(__SHIFT),
488 .ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE__SHIFT
489};
490
491static const struct dce_mem_input_mask mi_masks = {
492 MI_DCE_MASK_SH_LIST(_MASK),
493 .ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE_MASK
494};
495
496static struct mem_input *dce110_mem_input_create(
497 struct dc_context *ctx,
498 uint32_t inst,
499 const struct dce110_mem_input_reg_offsets *offset)
500{
501 struct dce110_mem_input *mem_input110 =
502 dm_alloc(sizeof(struct dce110_mem_input));
503
504 if (!mem_input110)
505 return NULL;
506
507 if (dce110_mem_input_construct(mem_input110, ctx, inst, offset)) {
508 struct mem_input *mi = &mem_input110->base;
509
510 mi->regs = &mi_regs[inst];
511 mi->shifts = &mi_shifts;
512 mi->masks = &mi_masks;
513 mi->wa.single_head_rdreq_dmif_limit = 3;
514 return mi;
515 }
516
517 BREAK_TO_DEBUGGER();
518 dm_free(mem_input110);
519 return NULL;
520}
521
522static void dce110_transform_destroy(struct transform **xfm)
523{
524 dm_free(TO_DCE_TRANSFORM(*xfm));
525 *xfm = NULL;
526}
527
528static struct transform *dce110_transform_create(
529 struct dc_context *ctx,
530 uint32_t inst)
531{
532 struct dce_transform *transform =
533 dm_alloc(sizeof(struct dce_transform));
534
535 if (!transform)
536 return NULL;
537
538 if (dce_transform_construct(transform, ctx, inst,
539 &xfm_regs[inst], &xfm_shift, &xfm_mask))
540 return &transform->base;
541
542 BREAK_TO_DEBUGGER();
543 dm_free(transform);
544 return NULL;
545}
546
547static struct input_pixel_processor *dce110_ipp_create(
548 struct dc_context *ctx,
549 uint32_t inst,
550 const struct dce110_ipp_reg_offsets *offsets)
551{
552 struct dce110_ipp *ipp =
553 dm_alloc(sizeof(struct dce110_ipp));
554
555 if (!ipp)
556 return NULL;
557
558 if (dce110_ipp_construct(ipp, ctx, inst, offsets))
559 return &ipp->base;
560
561 BREAK_TO_DEBUGGER();
562 dm_free(ipp);
563 return NULL;
564}
565
566struct link_encoder *dce110_link_encoder_create(
567 const struct encoder_init_data *enc_init_data)
568{
569 struct dce110_link_encoder *enc110 =
570 dm_alloc(sizeof(struct dce110_link_encoder));
571
572 if (!enc110)
573 return NULL;
574
575 if (dce110_link_encoder_construct(
576 enc110,
577 enc_init_data,
578 &link_enc_regs[enc_init_data->transmitter],
579 &link_enc_aux_regs[enc_init_data->channel - 1],
580 &link_enc_hpd_regs[enc_init_data->hpd_source])) {
581
582 enc110->base.features.ycbcr420_supported = false;
583 enc110->base.features.max_hdmi_pixel_clock = 594000;
584 return &enc110->base;
585 }
586
587 BREAK_TO_DEBUGGER();
588 dm_free(enc110);
589 return NULL;
590}
591
592static struct output_pixel_processor *dce110_opp_create(
593 struct dc_context *ctx,
594 uint32_t inst,
595 const struct dce110_opp_reg_offsets *offsets)
596{
597 struct dce110_opp *opp =
598 dm_alloc(sizeof(struct dce110_opp));
599
600 if (!opp)
601 return NULL;
602
603 if (dce110_opp_construct(opp,
604 ctx, inst, offsets))
605 return &opp->base;
606
607 BREAK_TO_DEBUGGER();
608 dm_free(opp);
609 return NULL;
610}
611
612struct clock_source *dce110_clock_source_create(
613 struct dc_context *ctx,
614 struct dc_bios *bios,
615 enum clock_source_id id,
616 const struct dce110_clk_src_regs *regs,
617 bool dp_clk_src)
618{
619 struct dce110_clk_src *clk_src =
620 dm_alloc(sizeof(struct dce110_clk_src));
621
622 if (!clk_src)
623 return NULL;
624
625 if (dce110_clk_src_construct(clk_src, ctx, bios, id,
626 regs, &cs_shift, &cs_mask)) {
627 clk_src->base.dp_clk_src = dp_clk_src;
628 return &clk_src->base;
629 }
630
631 BREAK_TO_DEBUGGER();
632 return NULL;
633}
634
635void dce110_clock_source_destroy(struct clock_source **clk_src)
636{
637 struct dce110_clk_src *dce110_clk_src;
638
639 if (!clk_src)
640 return;
641
642 dce110_clk_src = TO_DCE110_CLK_SRC(*clk_src);
643
644 if (dce110_clk_src->dp_ss_params)
645 dm_free(dce110_clk_src->dp_ss_params);
646
647 if (dce110_clk_src->hdmi_ss_params)
648 dm_free(dce110_clk_src->hdmi_ss_params);
649
650 if (dce110_clk_src->dvi_ss_params)
651 dm_free(dce110_clk_src->dvi_ss_params);
652
653 dm_free(dce110_clk_src);
654 *clk_src = NULL;
655}
656
657static void destruct(struct dce110_resource_pool *pool)
658{
659 unsigned int i;
660
661 for (i = 0; i < pool->base.pipe_count; i++) {
662 if (pool->base.opps[i] != NULL)
663 dce110_opp_destroy(&pool->base.opps[i]);
664
665 if (pool->base.transforms[i] != NULL)
666 dce110_transform_destroy(&pool->base.transforms[i]);
667
668 if (pool->base.ipps[i] != NULL)
669 dce110_ipp_destroy(&pool->base.ipps[i]);
670
671 if (pool->base.mis[i] != NULL) {
672 dm_free(TO_DCE110_MEM_INPUT(pool->base.mis[i]));
673 pool->base.mis[i] = NULL;
674 }
675
676 if (pool->base.timing_generators[i] != NULL) {
677 dm_free(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
678 pool->base.timing_generators[i] = NULL;
679 }
680 }
681
682 for (i = 0; i < pool->base.stream_enc_count; i++) {
683 if (pool->base.stream_enc[i] != NULL)
684 dm_free(DCE110STRENC_FROM_STRENC(pool->base.stream_enc[i]));
685 }
686
687 for (i = 0; i < pool->base.clk_src_count; i++) {
688 if (pool->base.clock_sources[i] != NULL) {
689 dce110_clock_source_destroy(&pool->base.clock_sources[i]);
690 }
691 }
692
693 if (pool->base.dp_clock_source != NULL)
694 dce110_clock_source_destroy(&pool->base.dp_clock_source);
695
696 for (i = 0; i < pool->base.audio_count; i++) {
697 if (pool->base.audios[i] != NULL) {
698 dce_aud_destroy(&pool->base.audios[i]);
699 }
700 }
701
702 if (pool->base.display_clock != NULL) {
703 dal_display_clock_destroy(&pool->base.display_clock);
704 }
705
706 if (pool->base.irqs != NULL) {
707 dal_irq_service_destroy(&pool->base.irqs);
708 }
709}
710
711
712static void get_pixel_clock_parameters(
713 const struct pipe_ctx *pipe_ctx,
714 struct pixel_clk_params *pixel_clk_params)
715{
716 const struct core_stream *stream = pipe_ctx->stream;
717
718 /*TODO: is this halved for YCbCr 420? in that case we might want to move
719 * the pixel clock normalization for hdmi up to here instead of doing it
720 * in pll_adjust_pix_clk
721 */
722 pixel_clk_params->requested_pix_clk = stream->public.timing.pix_clk_khz;
723 pixel_clk_params->encoder_object_id = stream->sink->link->link_enc->id;
724 pixel_clk_params->signal_type = pipe_ctx->stream->signal;
725 pixel_clk_params->controller_id = pipe_ctx->pipe_idx + 1;
726 /* TODO: un-hardcode*/
727 pixel_clk_params->requested_sym_clk = LINK_RATE_LOW *
728 LINK_RATE_REF_FREQ_IN_KHZ;
729 pixel_clk_params->flags.ENABLE_SS = 0;
730 pixel_clk_params->color_depth =
731 stream->public.timing.display_color_depth;
732 pixel_clk_params->flags.DISPLAY_BLANKED = 1;
733 pixel_clk_params->flags.SUPPORT_YCBCR420 = (stream->public.timing.pixel_encoding ==
734 PIXEL_ENCODING_YCBCR420);
735}
736
737void dce110_resource_build_bit_depth_reduction_params(
738 const struct core_stream *stream,
739 struct bit_depth_reduction_params *fmt_bit_depth)
740{
741 memset(fmt_bit_depth, 0, sizeof(*fmt_bit_depth));
742
743 /*TODO: Need to un-hardcode, refer to function with same name
744 * in dal2 hw_sequencer*/
745
746 fmt_bit_depth->flags.TRUNCATE_ENABLED = 0;
747 fmt_bit_depth->flags.SPATIAL_DITHER_ENABLED = 0;
748 fmt_bit_depth->flags.FRAME_MODULATION_ENABLED = 0;
749
750 /* Diagnostics need consistent CRC of the image, that means
751 * dithering should not be enabled for Diagnostics. */
752 if (IS_DIAG_DC(stream->ctx->dce_environment) == false) {
753 switch (stream->public.timing.display_color_depth) {
754 case COLOR_DEPTH_666:
755 fmt_bit_depth->flags.SPATIAL_DITHER_ENABLED = 1;
756 fmt_bit_depth->flags.SPATIAL_DITHER_DEPTH = 0;
757 break;
758 case COLOR_DEPTH_888:
759 fmt_bit_depth->flags.SPATIAL_DITHER_ENABLED = 1;
760 fmt_bit_depth->flags.SPATIAL_DITHER_DEPTH = 1;
761 break;
762 case COLOR_DEPTH_101010:
763 fmt_bit_depth->flags.SPATIAL_DITHER_ENABLED = 1;
764 fmt_bit_depth->flags.SPATIAL_DITHER_DEPTH = 2;
765 break;
766 default:
767 break;
768 }
769 fmt_bit_depth->flags.RGB_RANDOM = 1;
770 fmt_bit_depth->flags.HIGHPASS_RANDOM = 1;
771 fmt_bit_depth->flags.TRUNCATE_ENABLED = 1;
772 fmt_bit_depth->flags.TRUNCATE_DEPTH = 2;
773 }
774
775 return;
776}
777
778enum dc_status dce110_resource_build_pipe_hw_param(struct pipe_ctx *pipe_ctx)
779{
780 get_pixel_clock_parameters(pipe_ctx, &pipe_ctx->pix_clk_params);
781 pipe_ctx->clock_source->funcs->get_pix_clk_dividers(
782 pipe_ctx->clock_source,
783 &pipe_ctx->pix_clk_params,
784 &pipe_ctx->pll_settings);
785 dce110_resource_build_bit_depth_reduction_params(pipe_ctx->stream,
786 &pipe_ctx->stream->bit_depth_params);
787 pipe_ctx->stream->clamping.pixel_encoding = pipe_ctx->stream->public.timing.pixel_encoding;
788
789 return DC_OK;
790}
791
792static bool is_surface_pixel_format_supported(struct pipe_ctx *pipe_ctx, unsigned int underlay_idx)
793{
794 if (pipe_ctx->pipe_idx != underlay_idx)
795 return true;
796 if (!pipe_ctx->surface)
797 return false;
798 if (pipe_ctx->surface->public.format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
799 return false;
800 return true;
801}
802
803static enum dc_status validate_mapped_resource(
804 const struct core_dc *dc,
805 struct validate_context *context)
806{
807 enum dc_status status = DC_OK;
808 uint8_t i, j, k;
809
810 for (i = 0; i < context->target_count; i++) {
811 struct core_target *target = context->targets[i];
812
813 for (j = 0; j < target->public.stream_count; j++) {
814 struct core_stream *stream =
815 DC_STREAM_TO_CORE(target->public.streams[j]);
816 struct core_link *link = stream->sink->link;
817
818 if (resource_is_stream_unchanged(dc->current_context, stream))
819 continue;
820
821 for (k = 0; k < MAX_PIPES; k++) {
822 struct pipe_ctx *pipe_ctx =
823 &context->res_ctx.pipe_ctx[k];
824
825 if (context->res_ctx.pipe_ctx[k].stream != stream)
826 continue;
827
828 if (!is_surface_pixel_format_supported(pipe_ctx,
829 context->res_ctx.pool->underlay_pipe_index))
830 return DC_SURFACE_PIXEL_FORMAT_UNSUPPORTED;
831
832 if (!pipe_ctx->tg->funcs->validate_timing(
833 pipe_ctx->tg, &stream->public.timing))
834 return DC_FAIL_CONTROLLER_VALIDATE;
835
836 status = dce110_resource_build_pipe_hw_param(pipe_ctx);
837
838 if (status != DC_OK)
839 return status;
840
841 if (!link->link_enc->funcs->validate_output_with_stream(
842 link->link_enc,
843 pipe_ctx))
844 return DC_FAIL_ENC_VALIDATE;
845
846 /* TODO: validate audio ASIC caps, encoder */
847
848 status = dc_link_validate_mode_timing(stream,
849 link,
850 &stream->public.timing);
851
852 if (status != DC_OK)
853 return status;
854
855 resource_build_info_frame(pipe_ctx);
856
857 /* do not need to validate non root pipes */
858 break;
859 }
860 }
861 }
862
863 return DC_OK;
864}
865
866enum dc_status dce110_validate_bandwidth(
867 const struct core_dc *dc,
868 struct validate_context *context)
869{
870 enum dc_status result = DC_ERROR_UNEXPECTED;
871
872 dm_logger_write(
873 dc->ctx->logger, LOG_BANDWIDTH_CALCS,
874 "%s: start",
875 __func__);
876
877 if (!bw_calcs(
878 dc->ctx,
879 &dc->bw_dceip,
880 &dc->bw_vbios,
881 context->res_ctx.pipe_ctx,
882 context->res_ctx.pool->pipe_count,
883 &context->bw_results))
884 result = DC_FAIL_BANDWIDTH_VALIDATE;
885 else
886 result = DC_OK;
887
888 if (result == DC_FAIL_BANDWIDTH_VALIDATE)
889 dm_logger_write(dc->ctx->logger, LOG_BANDWIDTH_VALIDATION,
890 "%s: %dx%d@%d Bandwidth validation failed!\n",
891 __func__,
892 context->targets[0]->public.streams[0]->timing.h_addressable,
893 context->targets[0]->public.streams[0]->timing.v_addressable,
894 context->targets[0]->public.streams[0]->timing.pix_clk_khz);
895
896 if (memcmp(&dc->current_context->bw_results,
897 &context->bw_results, sizeof(context->bw_results))) {
898 struct log_entry log_entry;
899 dm_logger_open(
900 dc->ctx->logger,
901 &log_entry,
902 LOG_BANDWIDTH_CALCS);
903 dm_logger_append(&log_entry, "%s: finish,\n"
904 "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
905 "stutMark_b: %d stutMark_a: %d\n",
906 __func__,
907 context->bw_results.nbp_state_change_wm_ns[0].b_mark,
908 context->bw_results.nbp_state_change_wm_ns[0].a_mark,
909 context->bw_results.urgent_wm_ns[0].b_mark,
910 context->bw_results.urgent_wm_ns[0].a_mark,
911 context->bw_results.stutter_exit_wm_ns[0].b_mark,
912 context->bw_results.stutter_exit_wm_ns[0].a_mark);
913 dm_logger_append(&log_entry,
914 "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
915 "stutMark_b: %d stutMark_a: %d\n",
916 context->bw_results.nbp_state_change_wm_ns[1].b_mark,
917 context->bw_results.nbp_state_change_wm_ns[1].a_mark,
918 context->bw_results.urgent_wm_ns[1].b_mark,
919 context->bw_results.urgent_wm_ns[1].a_mark,
920 context->bw_results.stutter_exit_wm_ns[1].b_mark,
921 context->bw_results.stutter_exit_wm_ns[1].a_mark);
922 dm_logger_append(&log_entry,
923 "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
924 "stutMark_b: %d stutMark_a: %d stutter_mode_enable: %d\n",
925 context->bw_results.nbp_state_change_wm_ns[2].b_mark,
926 context->bw_results.nbp_state_change_wm_ns[2].a_mark,
927 context->bw_results.urgent_wm_ns[2].b_mark,
928 context->bw_results.urgent_wm_ns[2].a_mark,
929 context->bw_results.stutter_exit_wm_ns[2].b_mark,
930 context->bw_results.stutter_exit_wm_ns[2].a_mark,
931 context->bw_results.stutter_mode_enable);
932 dm_logger_append(&log_entry,
933 "cstate: %d pstate: %d nbpstate: %d sync: %d dispclk: %d\n"
934 "sclk: %d sclk_sleep: %d yclk: %d blackout_recovery_time_us: %d\n",
935 context->bw_results.cpuc_state_change_enable,
936 context->bw_results.cpup_state_change_enable,
937 context->bw_results.nbp_state_change_enable,
938 context->bw_results.all_displays_in_sync,
939 context->bw_results.dispclk_khz,
940 context->bw_results.required_sclk,
941 context->bw_results.required_sclk_deep_sleep,
942 context->bw_results.required_yclk,
943 context->bw_results.blackout_recovery_time_us);
944 dm_logger_close(&log_entry);
945 }
946 return result;
947}
948
949static bool dce110_validate_surface_sets(
950 const struct dc_validation_set set[],
951 int set_count)
952{
953 int i;
954
955 for (i = 0; i < set_count; i++) {
956 if (set[i].surface_count == 0)
957 continue;
958
959 if (set[i].surface_count > 2)
960 return false;
961
962 if (set[i].surfaces[0]->src_rect.width
963 != set[i].target->streams[0]->src.width
964 || set[i].surfaces[0]->src_rect.height
965 != set[i].target->streams[0]->src.height)
966 return false;
967 if (set[i].surfaces[0]->format
968 >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
969 return false;
970
971 if (set[i].surface_count == 2) {
972 if (set[i].surfaces[1]->format
973 < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
974 return false;
975 if (set[i].surfaces[1]->src_rect.width > 1920
976 || set[i].surfaces[1]->src_rect.height > 1080)
977 return false;
978
979 if (set[i].target->streams[0]->timing.pixel_encoding != PIXEL_ENCODING_RGB)
980 return false;
981 }
982 }
983
984 return true;
985}
986
987enum dc_status dce110_validate_with_context(
988 const struct core_dc *dc,
989 const struct dc_validation_set set[],
990 int set_count,
991 struct validate_context *context)
992{
993 struct dc_context *dc_ctx = dc->ctx;
994 enum dc_status result = DC_ERROR_UNEXPECTED;
995 int i;
996
997 if (!dce110_validate_surface_sets(set, set_count))
998 return DC_FAIL_SURFACE_VALIDATE;
999
1000 context->res_ctx.pool = dc->res_pool;
1001
1002 for (i = 0; i < set_count; i++) {
1003 context->targets[i] = DC_TARGET_TO_CORE(set[i].target);
1004 dc_target_retain(&context->targets[i]->public);
1005 context->target_count++;
1006 }
1007
1008 result = resource_map_pool_resources(dc, context);
1009
1010 if (result == DC_OK)
1011 result = resource_map_clock_resources(dc, context);
1012
1013 if (!resource_validate_attach_surfaces(
1014 set, set_count, dc->current_context, context)) {
1015 DC_ERROR("Failed to attach surface to target!\n");
1016 return DC_FAIL_ATTACH_SURFACES;
1017 }
1018
1019 if (result == DC_OK)
1020 result = validate_mapped_resource(dc, context);
1021
1022 if (result == DC_OK)
1023 result = resource_build_scaling_params_for_context(dc, context);
1024
1025 if (result == DC_OK)
1026 result = dce110_validate_bandwidth(dc, context);
1027
1028 return result;
1029}
1030
1031enum dc_status dce110_validate_guaranteed(
1032 const struct core_dc *dc,
1033 const struct dc_target *dc_target,
1034 struct validate_context *context)
1035{
1036 enum dc_status result = DC_ERROR_UNEXPECTED;
1037
1038 context->res_ctx.pool = dc->res_pool;
1039
1040 context->targets[0] = DC_TARGET_TO_CORE(dc_target);
1041 dc_target_retain(&context->targets[0]->public);
1042 context->target_count++;
1043
1044 result = resource_map_pool_resources(dc, context);
1045
1046 if (result == DC_OK)
1047 result = resource_map_clock_resources(dc, context);
1048
1049 if (result == DC_OK)
1050 result = validate_mapped_resource(dc, context);
1051
1052 if (result == DC_OK) {
1053 validate_guaranteed_copy_target(
1054 context, dc->public.caps.max_targets);
1055 result = resource_build_scaling_params_for_context(dc, context);
1056 }
1057
1058 if (result == DC_OK)
1059 result = dce110_validate_bandwidth(dc, context);
1060
1061 return result;
1062}
1063
1064static struct pipe_ctx *dce110_acquire_idle_pipe_for_layer(
1065 struct resource_context *res_ctx,
1066 struct core_stream *stream)
1067{
1068 unsigned int underlay_idx = res_ctx->pool->underlay_pipe_index;
1069 struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[underlay_idx];
1070
1071 if (res_ctx->pipe_ctx[underlay_idx].stream) {
1072 return NULL;
1073 }
1074
1075 pipe_ctx->tg = res_ctx->pool->timing_generators[underlay_idx];
1076 pipe_ctx->mi = res_ctx->pool->mis[underlay_idx];
1077 /*pipe_ctx->ipp = res_ctx->pool->ipps[underlay_idx];*/
1078 pipe_ctx->xfm = res_ctx->pool->transforms[underlay_idx];
1079 pipe_ctx->opp = res_ctx->pool->opps[underlay_idx];
1080 pipe_ctx->dis_clk = res_ctx->pool->display_clock;
1081 pipe_ctx->pipe_idx = underlay_idx;
1082
1083 pipe_ctx->stream = stream;
1084
1085 return pipe_ctx;
1086
1087}
1088
1089static void dce110_destroy_resource_pool(struct resource_pool **pool)
1090{
1091 struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool);
1092
1093 destruct(dce110_pool);
1094 dm_free(dce110_pool);
1095 *pool = NULL;
1096}
1097
1098
1099static const struct resource_funcs dce110_res_pool_funcs = {
1100 .destroy = dce110_destroy_resource_pool,
1101 .link_enc_create = dce110_link_encoder_create,
1102 .validate_with_context = dce110_validate_with_context,
1103 .validate_guaranteed = dce110_validate_guaranteed,
1104 .validate_bandwidth = dce110_validate_bandwidth,
1105 .acquire_idle_pipe_for_layer = dce110_acquire_idle_pipe_for_layer,
1106 .build_bit_depth_reduction_params =
1107 dce110_resource_build_bit_depth_reduction_params
1108};
1109
1110static void underlay_create(struct dc_context *ctx, struct resource_pool *pool)
1111{
1112 struct dce110_timing_generator *dce110_tgv = dm_alloc(sizeof (*dce110_tgv));
1113 struct dce_transform *dce110_xfmv = dm_alloc(sizeof (*dce110_xfmv));
1114 struct dce110_mem_input *dce110_miv = dm_alloc(sizeof (*dce110_miv));
1115 struct dce110_opp *dce110_oppv = dm_alloc(sizeof (*dce110_oppv));
1116
1117 dce110_opp_v_construct(dce110_oppv, ctx);
1118 dce110_timing_generator_v_construct(dce110_tgv, ctx);
1119 dce110_mem_input_v_construct(dce110_miv, ctx);
1120 dce110_transform_v_construct(dce110_xfmv, ctx);
1121
1122 pool->opps[pool->pipe_count] = &dce110_oppv->base;
1123 pool->timing_generators[pool->pipe_count] = &dce110_tgv->base;
1124 pool->mis[pool->pipe_count] = &dce110_miv->base;
1125 pool->transforms[pool->pipe_count] = &dce110_xfmv->base;
1126 pool->pipe_count++;
1127
1128 /* update the public caps to indicate an underlay is available */
1129 ctx->dc->caps.max_slave_planes = 1;
1130 ctx->dc->caps.max_slave_planes = 1;
1131}
1132
1133static void bw_calcs_data_update_from_pplib(struct core_dc *dc)
1134{
1135 struct dm_pp_clock_levels clks = {0};
1136
1137 /*do system clock*/
1138 dm_pp_get_clock_levels_by_type(
1139 dc->ctx,
1140 DM_PP_CLOCK_TYPE_ENGINE_CLK,
1141 &clks);
1142 /* convert all the clock fro kHz to fix point mHz */
1143 dc->bw_vbios.high_sclk = bw_frc_to_fixed(
1144 clks.clocks_in_khz[clks.num_levels-1], 1000);
1145 dc->bw_vbios.mid1_sclk = bw_frc_to_fixed(
1146 clks.clocks_in_khz[clks.num_levels/8], 1000);
1147 dc->bw_vbios.mid2_sclk = bw_frc_to_fixed(
1148 clks.clocks_in_khz[clks.num_levels*2/8], 1000);
1149 dc->bw_vbios.mid3_sclk = bw_frc_to_fixed(
1150 clks.clocks_in_khz[clks.num_levels*3/8], 1000);
1151 dc->bw_vbios.mid4_sclk = bw_frc_to_fixed(
1152 clks.clocks_in_khz[clks.num_levels*4/8], 1000);
1153 dc->bw_vbios.mid5_sclk = bw_frc_to_fixed(
1154 clks.clocks_in_khz[clks.num_levels*5/8], 1000);
1155 dc->bw_vbios.mid6_sclk = bw_frc_to_fixed(
1156 clks.clocks_in_khz[clks.num_levels*6/8], 1000);
1157 dc->bw_vbios.low_sclk = bw_frc_to_fixed(
1158 clks.clocks_in_khz[0], 1000);
1159 dc->sclk_lvls = clks;
1160
1161 /*do display clock*/
1162 dm_pp_get_clock_levels_by_type(
1163 dc->ctx,
1164 DM_PP_CLOCK_TYPE_DISPLAY_CLK,
1165 &clks);
1166 dc->bw_vbios.high_voltage_max_dispclk = bw_frc_to_fixed(
1167 clks.clocks_in_khz[clks.num_levels-1], 1000);
1168 dc->bw_vbios.mid_voltage_max_dispclk = bw_frc_to_fixed(
1169 clks.clocks_in_khz[clks.num_levels>>1], 1000);
1170 dc->bw_vbios.low_voltage_max_dispclk = bw_frc_to_fixed(
1171 clks.clocks_in_khz[0], 1000);
1172
1173 /*do memory clock*/
1174 dm_pp_get_clock_levels_by_type(
1175 dc->ctx,
1176 DM_PP_CLOCK_TYPE_MEMORY_CLK,
1177 &clks);
1178
1179 dc->bw_vbios.low_yclk = bw_frc_to_fixed(
1180 clks.clocks_in_khz[0] * MEMORY_TYPE_MULTIPLIER, 1000);
1181 dc->bw_vbios.mid_yclk = bw_frc_to_fixed(
1182 clks.clocks_in_khz[clks.num_levels>>1] * MEMORY_TYPE_MULTIPLIER,
1183 1000);
1184 dc->bw_vbios.high_yclk = bw_frc_to_fixed(
1185 clks.clocks_in_khz[clks.num_levels-1] * MEMORY_TYPE_MULTIPLIER,
1186 1000);
1187}
1188
1189enum clocks_state dce110_resource_convert_clock_state_pp_to_dc(
1190 enum dm_pp_clocks_state pp_clock_state)
1191{
1192 enum clocks_state dc_clocks_state = CLOCKS_STATE_INVALID;
1193
1194 switch (pp_clock_state) {
1195 case DM_PP_CLOCKS_STATE_INVALID:
1196 dc_clocks_state = CLOCKS_STATE_INVALID;
1197 break;
1198 case DM_PP_CLOCKS_STATE_ULTRA_LOW:
1199 dc_clocks_state = CLOCKS_STATE_ULTRA_LOW;
1200 break;
1201 case DM_PP_CLOCKS_STATE_LOW:
1202 dc_clocks_state = CLOCKS_STATE_LOW;
1203 break;
1204 case DM_PP_CLOCKS_STATE_NOMINAL:
1205 dc_clocks_state = CLOCKS_STATE_NOMINAL;
1206 break;
1207 case DM_PP_CLOCKS_STATE_PERFORMANCE:
1208 dc_clocks_state = CLOCKS_STATE_PERFORMANCE;
1209 break;
1210 case DM_PP_CLOCKS_DPM_STATE_LEVEL_4:
1211 dc_clocks_state = CLOCKS_DPM_STATE_LEVEL_4;
1212 break;
1213 case DM_PP_CLOCKS_DPM_STATE_LEVEL_5:
1214 dc_clocks_state = CLOCKS_DPM_STATE_LEVEL_5;
1215 break;
1216 case DM_PP_CLOCKS_DPM_STATE_LEVEL_6:
1217 dc_clocks_state = CLOCKS_DPM_STATE_LEVEL_6;
1218 break;
1219 case DM_PP_CLOCKS_DPM_STATE_LEVEL_7:
1220 dc_clocks_state = CLOCKS_DPM_STATE_LEVEL_7;
1221 break;
1222 default:
1223 dc_clocks_state = CLOCKS_STATE_INVALID;
1224 break;
1225 }
1226
1227 return dc_clocks_state;
1228}
1229
1230const struct resource_caps *dce110_resource_cap(
1231 struct hw_asic_id *asic_id)
1232{
1233 if (ASIC_REV_IS_STONEY(asic_id->hw_internal_rev))
1234 return &stoney_resource_cap;
1235 else
1236 return &carrizo_resource_cap;
1237}
1238
1239static bool construct(
1240 uint8_t num_virtual_links,
1241 struct core_dc *dc,
1242 struct dce110_resource_pool *pool,
1243 struct hw_asic_id asic_id)
1244{
1245 unsigned int i;
1246 struct dc_context *ctx = dc->ctx;
1247 struct firmware_info info;
1248 struct dc_bios *bp;
1249 struct dm_pp_static_clock_info static_clk_info = {0};
1250
1251 ctx->dc_bios->regs = &bios_regs;
1252
1253 pool->base.res_cap = dce110_resource_cap(&ctx->asic_id);
1254 pool->base.funcs = &dce110_res_pool_funcs;
1255
1256 /*************************************************
1257 * Resource + asic cap harcoding *
1258 *************************************************/
1259
1260 pool->base.pipe_count = pool->base.res_cap->num_timing_generator;
1261 pool->base.underlay_pipe_index = pool->base.pipe_count;
1262
1263 dc->public.caps.max_downscale_ratio = 150;
1264 dc->public.caps.i2c_speed_in_khz = 100;
1265
1266 /*************************************************
1267 * Create resources *
1268 *************************************************/
1269
1270 bp = ctx->dc_bios;
1271
1272 if ((bp->funcs->get_firmware_info(bp, &info) == BP_RESULT_OK) &&
1273 info.external_clock_source_frequency_for_dp != 0) {
1274 pool->base.dp_clock_source =
1275 dce110_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true);
1276
1277 pool->base.clock_sources[0] =
1278 dce110_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0,
1279 &clk_src_regs[0], false);
1280 pool->base.clock_sources[1] =
1281 dce110_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1,
1282 &clk_src_regs[1], false);
1283
1284 pool->base.clk_src_count = 2;
1285
1286 /* TODO: find out if CZ support 3 PLLs */
1287 }
1288
1289 if (pool->base.dp_clock_source == NULL) {
1290 dm_error("DC: failed to create dp clock source!\n");
1291 BREAK_TO_DEBUGGER();
1292 goto res_create_fail;
1293 }
1294
1295 for (i = 0; i < pool->base.clk_src_count; i++) {
1296 if (pool->base.clock_sources[i] == NULL) {
1297 dm_error("DC: failed to create clock sources!\n");
1298 BREAK_TO_DEBUGGER();
1299 goto res_create_fail;
1300 }
1301 }
1302
1303 pool->base.display_clock = dal_display_clock_dce110_create(ctx);
1304 if (pool->base.display_clock == NULL) {
1305 dm_error("DC: failed to create display clock!\n");
1306 BREAK_TO_DEBUGGER();
1307 goto res_create_fail;
1308 }
1309
1310 /* get static clock information for PPLIB or firmware, save
1311 * max_clock_state
1312 */
1313 if (dm_pp_get_static_clocks(ctx, &static_clk_info)) {
1314 enum clocks_state max_clocks_state =
1315 dce110_resource_convert_clock_state_pp_to_dc(
1316 static_clk_info.max_clocks_state);
1317
1318 dal_display_clock_store_max_clocks_state(
1319 pool->base.display_clock, max_clocks_state);
1320 }
1321
1322 {
1323 struct irq_service_init_data init_data;
1324 init_data.ctx = dc->ctx;
1325 pool->base.irqs = dal_irq_service_dce110_create(&init_data);
1326 if (!pool->base.irqs)
1327 goto res_create_fail;
1328 }
1329
1330 for (i = 0; i < pool->base.pipe_count; i++) {
1331 pool->base.timing_generators[i] = dce110_timing_generator_create(
1332 ctx, i, &dce110_tg_offsets[i]);
1333 if (pool->base.timing_generators[i] == NULL) {
1334 BREAK_TO_DEBUGGER();
1335 dm_error("DC: failed to create tg!\n");
1336 goto res_create_fail;
1337 }
1338
1339 pool->base.mis[i] = dce110_mem_input_create(ctx, i,
1340 &dce110_mi_reg_offsets[i]);
1341 if (pool->base.mis[i] == NULL) {
1342 BREAK_TO_DEBUGGER();
1343 dm_error(
1344 "DC: failed to create memory input!\n");
1345 goto res_create_fail;
1346 }
1347
1348 pool->base.ipps[i] = dce110_ipp_create(ctx, i, &dce110_ipp_reg_offsets[i]);
1349 if (pool->base.ipps[i] == NULL) {
1350 BREAK_TO_DEBUGGER();
1351 dm_error(
1352 "DC: failed to create input pixel processor!\n");
1353 goto res_create_fail;
1354 }
1355
1356 pool->base.transforms[i] = dce110_transform_create(ctx, i);
1357 if (pool->base.transforms[i] == NULL) {
1358 BREAK_TO_DEBUGGER();
1359 dm_error(
1360 "DC: failed to create transform!\n");
1361 goto res_create_fail;
1362 }
1363
1364 pool->base.opps[i] = dce110_opp_create(ctx, i, &dce110_opp_reg_offsets[i]);
1365 if (pool->base.opps[i] == NULL) {
1366 BREAK_TO_DEBUGGER();
1367 dm_error(
1368 "DC: failed to create output pixel processor!\n");
1369 goto res_create_fail;
1370 }
1371 }
1372
1373 underlay_create(ctx, &pool->base);
1374
1375 if (!resource_construct(num_virtual_links, dc, &pool->base,
1376 &res_create_funcs))
1377 goto res_create_fail;
1378
1379 /* Create hardware sequencer */
1380 if (!dce110_hw_sequencer_construct(dc))
1381 goto res_create_fail;
1382
1383 if (ASIC_REV_IS_STONEY(ctx->asic_id.hw_internal_rev))
1384 bw_calcs_init(&dc->bw_dceip, &dc->bw_vbios, BW_CALCS_VERSION_STONEY);
1385 else
1386 bw_calcs_init(&dc->bw_dceip, &dc->bw_vbios, BW_CALCS_VERSION_CARRIZO);
1387
1388 bw_calcs_data_update_from_pplib(dc);
1389
1390 return true;
1391
1392res_create_fail:
1393 destruct(pool);
1394 return false;
1395}
1396
1397struct resource_pool *dce110_create_resource_pool(
1398 uint8_t num_virtual_links,
1399 struct core_dc *dc,
1400 struct hw_asic_id asic_id)
1401{
1402 struct dce110_resource_pool *pool =
1403 dm_alloc(sizeof(struct dce110_resource_pool));
1404
1405 if (!pool)
1406 return NULL;
1407
1408 if (construct(num_virtual_links, dc, pool, asic_id))
1409 return &pool->base;
1410
1411 BREAK_TO_DEBUGGER();
1412 return NULL;
1413}
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.h
new file mode 100644
index 000000000000..535623aa0052
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.h
@@ -0,0 +1,56 @@
1/*
2* Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DC_RESOURCE_DCE110_H__
27#define __DC_RESOURCE_DCE110_H__
28
29#include "core_types.h"
30
31struct core_dc;
32struct resource_pool;
33
34#define TO_DCE110_RES_POOL(pool)\
35 container_of(pool, struct dce110_resource_pool, base)
36
37struct dce110_resource_pool {
38 struct resource_pool base;
39};
40
41enum dc_status dce110_resource_build_pipe_hw_param(struct pipe_ctx *pipe_ctx);
42
43enum clocks_state dce110_resource_convert_clock_state_pp_to_dc(
44 enum dm_pp_clocks_state pp_clock_state);
45
46void dce110_resource_build_bit_depth_reduction_params(
47 const struct core_stream *stream,
48 struct bit_depth_reduction_params *fmt_bit_depth);
49
50struct resource_pool *dce110_create_resource_pool(
51 uint8_t num_virtual_links,
52 struct core_dc *dc,
53 struct hw_asic_id asic_id);
54
55#endif /* __DC_RESOURCE_DCE110_H__ */
56
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
new file mode 100644
index 000000000000..b1c97125f6fb
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
@@ -0,0 +1,1953 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28/* include DCE11 register header files */
29#include "dce/dce_11_0_d.h"
30#include "dce/dce_11_0_sh_mask.h"
31
32#include "dc_types.h"
33#include "dc_bios_types.h"
34#include "dc.h"
35
36#include "include/grph_object_id.h"
37#include "include/logger_interface.h"
38#include "dce110_timing_generator.h"
39
40#include "timing_generator.h"
41
42
43#define NUMBER_OF_FRAME_TO_WAIT_ON_TRIGGERED_RESET 10
44
45#define MAX_H_TOTAL (CRTC_H_TOTAL__CRTC_H_TOTAL_MASK + 1)
46#define MAX_V_TOTAL (CRTC_V_TOTAL__CRTC_V_TOTAL_MASKhw + 1)
47
48#define CRTC_REG(reg) (reg + tg110->offsets.crtc)
49#define DCP_REG(reg) (reg + tg110->offsets.dcp)
50
51/* Flowing register offsets are same in files of
52 * dce/dce_11_0_d.h
53 * dce/vi_polaris10_p/vi_polaris10_d.h
54 *
55 * So we can create dce110 timing generator to use it.
56 */
57
58
59/*
60* apply_front_porch_workaround
61*
62* This is a workaround for a bug that has existed since R5xx and has not been
63* fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
64*/
65static void dce110_timing_generator_apply_front_porch_workaround(
66 struct timing_generator *tg,
67 struct dc_crtc_timing *timing)
68{
69 if (timing->flags.INTERLACE == 1) {
70 if (timing->v_front_porch < 2)
71 timing->v_front_porch = 2;
72 } else {
73 if (timing->v_front_porch < 1)
74 timing->v_front_porch = 1;
75 }
76}
77
78/**
79 *****************************************************************************
80 * Function: is_in_vertical_blank
81 *
82 * @brief
83 * check the current status of CRTC to check if we are in Vertical Blank
84 * regioneased" state
85 *
86 * @return
87 * true if currently in blank region, false otherwise
88 *
89 *****************************************************************************
90 */
91static bool dce110_timing_generator_is_in_vertical_blank(
92 struct timing_generator *tg)
93{
94 uint32_t addr = 0;
95 uint32_t value = 0;
96 uint32_t field = 0;
97 struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
98
99 addr = CRTC_REG(mmCRTC_STATUS);
100 value = dm_read_reg(tg->ctx, addr);
101 field = get_reg_field_value(value, CRTC_STATUS, CRTC_V_BLANK);
102 return field == 1;
103}
104
105void dce110_timing_generator_set_early_control(
106 struct timing_generator *tg,
107 uint32_t early_cntl)
108{
109 uint32_t regval;
110 struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
111 uint32_t address = CRTC_REG(mmCRTC_CONTROL);
112
113 regval = dm_read_reg(tg->ctx, address);
114 set_reg_field_value(regval, early_cntl,
115 CRTC_CONTROL, CRTC_HBLANK_EARLY_CONTROL);
116 dm_write_reg(tg->ctx, address, regval);
117}
118
119/**
120 * Enable CRTC
121 * Enable CRTC - call ASIC Control Object to enable Timing generator.
122 */
123bool dce110_timing_generator_enable_crtc(struct timing_generator *tg)
124{
125 enum bp_result result;
126
127 struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
128 uint32_t value = 0;
129
130 /*
131 * 3 is used to make sure V_UPDATE occurs at the beginning of the first
132 * line of vertical front porch
133 */
134 set_reg_field_value(
135 value,
136 0,
137 CRTC_MASTER_UPDATE_MODE,
138 MASTER_UPDATE_MODE);
139
140 dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_MASTER_UPDATE_MODE), value);
141
142 /* TODO: may want this on to catch underflow */
143 value = 0;
144 dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_MASTER_UPDATE_LOCK), value);
145
146 result = tg->bp->funcs->enable_crtc(tg->bp, tg110->controller_id, true);
147
148 return result == BP_RESULT_OK;
149}
150
151void dce110_timing_generator_program_blank_color(
152 struct timing_generator *tg,
153 const struct tg_color *black_color)
154{
155 struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
156 uint32_t addr = CRTC_REG(mmCRTC_BLACK_COLOR);
157 uint32_t value = dm_read_reg(tg->ctx, addr);
158
159 set_reg_field_value(
160 value,
161 black_color->color_b_cb,
162 CRTC_BLACK_COLOR,
163 CRTC_BLACK_COLOR_B_CB);
164 set_reg_field_value(
165 value,
166 black_color->color_g_y,
167 CRTC_BLACK_COLOR,
168 CRTC_BLACK_COLOR_G_Y);
169 set_reg_field_value(
170 value,
171 black_color->color_r_cr,
172 CRTC_BLACK_COLOR,
173 CRTC_BLACK_COLOR_R_CR);
174
175 dm_write_reg(tg->ctx, addr, value);
176}
177
178/**
179 *****************************************************************************
180 * Function: disable_stereo
181 *
182 * @brief
183 * Disables active stereo on controller
184 * Frame Packing need to be disabled in vBlank or when CRTC not running
185 *****************************************************************************
186 */
187#if 0
188@TODOSTEREO
189static void disable_stereo(struct timing_generator *tg)
190{
191 struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
192 uint32_t addr = CRTC_REG(mmCRTC_3D_STRUCTURE_CONTROL);
193 uint32_t value = 0;
194 uint32_t test = 0;
195 uint32_t field = 0;
196 uint32_t struc_en = 0;
197 uint32_t struc_stereo_sel_ovr = 0;
198
199 value = dm_read_reg(tg->ctx, addr);
200 struc_en = get_reg_field_value(
201 value,
202 CRTC_3D_STRUCTURE_CONTROL,
203 CRTC_3D_STRUCTURE_EN);
204
205 struc_stereo_sel_ovr = get_reg_field_value(
206 value,
207 CRTC_3D_STRUCTURE_CONTROL,
208 CRTC_3D_STRUCTURE_STEREO_SEL_OVR);
209
210 /*
211 * When disabling Frame Packing in 2 step mode, we need to program both
212 * registers at the same frame
213 * Programming it in the beginning of VActive makes sure we are ok
214 */
215
216 if (struc_en != 0 && struc_stereo_sel_ovr == 0) {
217 tg->funcs->wait_for_vblank(tg);
218 tg->funcs->wait_for_vactive(tg);
219 }
220
221 value = 0;
222 dm_write_reg(tg->ctx, addr, value);
223
224 addr = tg->regs[IDX_CRTC_STEREO_CONTROL];
225 dm_write_reg(tg->ctx, addr, value);
226}
227#endif
228
229/**
230 * disable_crtc - call ASIC Control Object to disable Timing generator.
231 */
232bool dce110_timing_generator_disable_crtc(struct timing_generator *tg)
233{
234 enum bp_result result;
235
236 struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
237
238 result = tg->bp->funcs->enable_crtc(tg->bp, tg110->controller_id, false);
239
240 /* Need to make sure stereo is disabled according to the DCE5.0 spec */
241
242 /*
243 * @TODOSTEREO call this when adding stereo support
244 * tg->funcs->disable_stereo(tg);
245 */
246
247 return result == BP_RESULT_OK;
248}
249
250/**
251* program_horz_count_by_2
252* Programs DxCRTC_HORZ_COUNT_BY2_EN - 1 for DVI 30bpp mode, 0 otherwise
253*
254*/
255static void program_horz_count_by_2(
256 struct timing_generator *tg,
257 const struct dc_crtc_timing *timing)
258{
259 uint32_t regval;
260 struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
261
262 regval = dm_read_reg(tg->ctx,
263 CRTC_REG(mmCRTC_COUNT_CONTROL));
264
265 set_reg_field_value(regval, 0, CRTC_COUNT_CONTROL,
266 CRTC_HORZ_COUNT_BY2_EN);
267
268 if (timing->flags.HORZ_COUNT_BY_TWO)
269 set_reg_field_value(regval, 1, CRTC_COUNT_CONTROL,
270 CRTC_HORZ_COUNT_BY2_EN);
271
272 dm_write_reg(tg->ctx,
273 CRTC_REG(mmCRTC_COUNT_CONTROL), regval);
274}
275
276/**
277 * program_timing_generator
278 * Program CRTC Timing Registers - DxCRTC_H_*, DxCRTC_V_*, Pixel repetition.
279 * Call ASIC Control Object to program Timings.
280 */
281bool dce110_timing_generator_program_timing_generator(
282 struct timing_generator *tg,
283 const struct dc_crtc_timing *dc_crtc_timing)
284{
285 enum bp_result result;
286 struct bp_hw_crtc_timing_parameters bp_params;
287 struct dc_crtc_timing patched_crtc_timing;
288 struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
289
290 uint32_t vsync_offset = dc_crtc_timing->v_border_bottom +
291 dc_crtc_timing->v_front_porch;
292 uint32_t v_sync_start =dc_crtc_timing->v_addressable + vsync_offset;
293
294 uint32_t hsync_offset = dc_crtc_timing->h_border_right +
295 dc_crtc_timing->h_front_porch;
296 uint32_t h_sync_start = dc_crtc_timing->h_addressable + hsync_offset;
297
298 memset(&bp_params, 0, sizeof(struct bp_hw_crtc_timing_parameters));
299
300 /* Due to an asic bug we need to apply the Front Porch workaround prior
301 * to programming the timing.
302 */
303
304 patched_crtc_timing = *dc_crtc_timing;
305
306 dce110_timing_generator_apply_front_porch_workaround(tg, &patched_crtc_timing);
307
308 bp_params.controller_id = tg110->controller_id;
309
310 bp_params.h_total = patched_crtc_timing.h_total;
311 bp_params.h_addressable =
312 patched_crtc_timing.h_addressable;
313 bp_params.v_total = patched_crtc_timing.v_total;
314 bp_params.v_addressable = patched_crtc_timing.v_addressable;
315
316 bp_params.h_sync_start = h_sync_start;
317 bp_params.h_sync_width = patched_crtc_timing.h_sync_width;
318 bp_params.v_sync_start = v_sync_start;
319 bp_params.v_sync_width = patched_crtc_timing.v_sync_width;
320
321 /* Set overscan */
322 bp_params.h_overscan_left =
323 patched_crtc_timing.h_border_left;
324 bp_params.h_overscan_right =
325 patched_crtc_timing.h_border_right;
326 bp_params.v_overscan_top = patched_crtc_timing.v_border_top;
327 bp_params.v_overscan_bottom =
328 patched_crtc_timing.v_border_bottom;
329
330 /* Set flags */
331 if (patched_crtc_timing.flags.HSYNC_POSITIVE_POLARITY == 1)
332 bp_params.flags.HSYNC_POSITIVE_POLARITY = 1;
333
334 if (patched_crtc_timing.flags.VSYNC_POSITIVE_POLARITY == 1)
335 bp_params.flags.VSYNC_POSITIVE_POLARITY = 1;
336
337 if (patched_crtc_timing.flags.INTERLACE == 1)
338 bp_params.flags.INTERLACE = 1;
339
340 if (patched_crtc_timing.flags.HORZ_COUNT_BY_TWO == 1)
341 bp_params.flags.HORZ_COUNT_BY_TWO = 1;
342
343 result = tg->bp->funcs->program_crtc_timing(tg->bp, &bp_params);
344
345 program_horz_count_by_2(tg, &patched_crtc_timing);
346
347 tg110->base.funcs->enable_advanced_request(tg, true, &patched_crtc_timing);
348
349 /* Enable stereo - only when we need to pack 3D frame. Other types
350 * of stereo handled in explicit call */
351
352 return result == BP_RESULT_OK;
353}
354
355/**
356 *****************************************************************************
357 * Function: set_drr
358 *
359 * @brief
360 * Program dynamic refresh rate registers m_DxCRTC_V_TOTAL_*.
361 *
362 * @param [in] pHwCrtcTiming: point to H
363 * wCrtcTiming struct
364 *****************************************************************************
365 */
366void dce110_timing_generator_set_drr(
367 struct timing_generator *tg,
368 const struct drr_params *params)
369{
370 /* register values */
371 uint32_t v_total_min = 0;
372 uint32_t v_total_max = 0;
373 uint32_t v_total_cntl = 0;
374 uint32_t static_screen_cntl = 0;
375 struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
376
377 uint32_t addr = 0;
378
379 addr = CRTC_REG(mmCRTC_V_TOTAL_MIN);
380 v_total_min = dm_read_reg(tg->ctx, addr);
381
382 addr = CRTC_REG(mmCRTC_V_TOTAL_MAX);
383 v_total_max = dm_read_reg(tg->ctx, addr);
384
385 addr = CRTC_REG(mmCRTC_V_TOTAL_CONTROL);
386 v_total_cntl = dm_read_reg(tg->ctx, addr);
387
388 addr = CRTC_REG(mmCRTC_STATIC_SCREEN_CONTROL);
389 static_screen_cntl = dm_read_reg(tg->ctx, addr);
390
391 if (params != NULL &&
392 params->vertical_total_max > 0 &&
393 params->vertical_total_min > 0) {
394
395 set_reg_field_value(v_total_max,
396 params->vertical_total_max - 1,
397 CRTC_V_TOTAL_MAX,
398 CRTC_V_TOTAL_MAX);
399
400 set_reg_field_value(v_total_min,
401 params->vertical_total_min - 1,
402 CRTC_V_TOTAL_MIN,
403 CRTC_V_TOTAL_MIN);
404
405 set_reg_field_value(v_total_cntl,
406 1,
407 CRTC_V_TOTAL_CONTROL,
408 CRTC_V_TOTAL_MIN_SEL);
409
410 set_reg_field_value(v_total_cntl,
411 1,
412 CRTC_V_TOTAL_CONTROL,
413 CRTC_V_TOTAL_MAX_SEL);
414
415 set_reg_field_value(v_total_cntl,
416 0,
417 CRTC_V_TOTAL_CONTROL,
418 CRTC_FORCE_LOCK_ON_EVENT);
419 set_reg_field_value(v_total_cntl,
420 0,
421 CRTC_V_TOTAL_CONTROL,
422 CRTC_FORCE_LOCK_TO_MASTER_VSYNC);
423
424 set_reg_field_value(v_total_cntl,
425 0,
426 CRTC_V_TOTAL_CONTROL,
427 CRTC_SET_V_TOTAL_MIN_MASK_EN);
428
429 set_reg_field_value(v_total_cntl,
430 0,
431 CRTC_V_TOTAL_CONTROL,
432 CRTC_SET_V_TOTAL_MIN_MASK);
433
434 set_reg_field_value(static_screen_cntl,
435 0x180,
436 CRTC_STATIC_SCREEN_CONTROL,
437 CRTC_STATIC_SCREEN_EVENT_MASK);
438 } else {
439 set_reg_field_value(v_total_cntl,
440 0,
441 CRTC_V_TOTAL_CONTROL,
442 CRTC_SET_V_TOTAL_MIN_MASK);
443 set_reg_field_value(static_screen_cntl,
444 0,
445 CRTC_STATIC_SCREEN_CONTROL,
446 CRTC_STATIC_SCREEN_EVENT_MASK);
447 set_reg_field_value(v_total_min,
448 0,
449 CRTC_V_TOTAL_MIN,
450 CRTC_V_TOTAL_MIN);
451 set_reg_field_value(v_total_max,
452 0,
453 CRTC_V_TOTAL_MAX,
454 CRTC_V_TOTAL_MAX);
455 set_reg_field_value(v_total_cntl,
456 0,
457 CRTC_V_TOTAL_CONTROL,
458 CRTC_V_TOTAL_MIN_SEL);
459 set_reg_field_value(v_total_cntl,
460 0,
461 CRTC_V_TOTAL_CONTROL,
462 CRTC_V_TOTAL_MAX_SEL);
463 set_reg_field_value(v_total_cntl,
464 0,
465 CRTC_V_TOTAL_CONTROL,
466 CRTC_FORCE_LOCK_ON_EVENT);
467 set_reg_field_value(v_total_cntl,
468 0,
469 CRTC_V_TOTAL_CONTROL,
470 CRTC_FORCE_LOCK_TO_MASTER_VSYNC);
471 }
472
473 addr = CRTC_REG(mmCRTC_V_TOTAL_MIN);
474 dm_write_reg(tg->ctx, addr, v_total_min);
475
476 addr = CRTC_REG(mmCRTC_V_TOTAL_MAX);
477 dm_write_reg(tg->ctx, addr, v_total_max);
478
479 addr = CRTC_REG(mmCRTC_V_TOTAL_CONTROL);
480 dm_write_reg(tg->ctx, addr, v_total_cntl);
481
482 addr = CRTC_REG(mmCRTC_STATIC_SCREEN_CONTROL);
483 dm_write_reg(tg->ctx, addr, static_screen_cntl);
484}
485
486void dce110_timing_generator_set_static_screen_control(
487 struct timing_generator *tg,
488 uint32_t value)
489{
490 struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
491 uint32_t static_screen_cntl = 0;
492 uint32_t addr = 0;
493
494 addr = CRTC_REG(mmCRTC_STATIC_SCREEN_CONTROL);
495 static_screen_cntl = dm_read_reg(tg->ctx, addr);
496
497 set_reg_field_value(static_screen_cntl,
498 value,
499 CRTC_STATIC_SCREEN_CONTROL,
500 CRTC_STATIC_SCREEN_EVENT_MASK);
501
502 set_reg_field_value(static_screen_cntl,
503 2,
504 CRTC_STATIC_SCREEN_CONTROL,
505 CRTC_STATIC_SCREEN_FRAME_COUNT);
506
507 dm_write_reg(tg->ctx, addr, static_screen_cntl);
508}
509
510/*
511 * get_vblank_counter
512 *
513 * @brief
514 * Get counter for vertical blanks. use register CRTC_STATUS_FRAME_COUNT which
515 * holds the counter of frames.
516 *
517 * @param
518 * struct timing_generator *tg - [in] timing generator which controls the
519 * desired CRTC
520 *
521 * @return
522 * Counter of frames, which should equal to number of vblanks.
523 */
524uint32_t dce110_timing_generator_get_vblank_counter(struct timing_generator *tg)
525{
526 struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
527 uint32_t addr = CRTC_REG(mmCRTC_STATUS_FRAME_COUNT);
528 uint32_t value = dm_read_reg(tg->ctx, addr);
529 uint32_t field = get_reg_field_value(
530 value, CRTC_STATUS_FRAME_COUNT, CRTC_FRAME_COUNT);
531
532 return field;
533}
534
535/**
536 *****************************************************************************
537 * Function: dce110_get_crtc_positions
538 *
539 * @brief
540 * Returns CRTC vertical/horizontal counters
541 *
542 * @param [out] v_position, h_position
543 *****************************************************************************
544 */
545
546void dce110_timing_generator_get_crtc_positions(
547 struct timing_generator *tg,
548 int32_t *h_position,
549 int32_t *v_position)
550{
551 uint32_t value;
552 struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
553
554 value = dm_read_reg(tg->ctx, CRTC_REG(mmCRTC_STATUS_POSITION));
555
556 *h_position = get_reg_field_value(
557 value,
558 CRTC_STATUS_POSITION,
559 CRTC_HORZ_COUNT);
560
561 *v_position = get_reg_field_value(
562 value,
563 CRTC_STATUS_POSITION,
564 CRTC_VERT_COUNT);
565}
566
567/**
568 *****************************************************************************
569 * Function: get_crtc_scanoutpos
570 *
571 * @brief
572 * Returns CRTC vertical/horizontal counters
573 *
574 * @param [out] vpos, hpos
575 *****************************************************************************
576 */
577uint32_t dce110_timing_generator_get_crtc_scanoutpos(
578 struct timing_generator *tg,
579 uint32_t *vbl,
580 uint32_t *position)
581{
582 struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
583 /* TODO 1: Update the implementation once caller is updated
584 * WARNING!! This function is returning the whole register value
585 * because the caller is expecting it instead of proper vertical and
586 * horizontal position. This should be a temporary implementation
587 * until the caller is updated. */
588
589 /* TODO 2: re-use dce110_timing_generator_get_crtc_positions() */
590
591 *vbl = dm_read_reg(tg->ctx,
592 CRTC_REG(mmCRTC_V_BLANK_START_END));
593
594 *position = dm_read_reg(tg->ctx,
595 CRTC_REG(mmCRTC_STATUS_POSITION));
596
597 /* @TODO: return value should indicate if current
598 * crtc is inside vblank*/
599 return 0;
600}
601
602/* TODO: is it safe to assume that mask/shift of Primary and Underlay
603 * are the same?
604 * For example: today CRTC_H_TOTAL == CRTCV_H_TOTAL but is it always
605 * guaranteed? */
606void dce110_timing_generator_program_blanking(
607 struct timing_generator *tg,
608 const struct dc_crtc_timing *timing)
609{
610 uint32_t vsync_offset = timing->v_border_bottom +
611 timing->v_front_porch;
612 uint32_t v_sync_start =timing->v_addressable + vsync_offset;
613
614 uint32_t hsync_offset = timing->h_border_right +
615 timing->h_front_porch;
616 uint32_t h_sync_start = timing->h_addressable + hsync_offset;
617 struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
618
619 struct dc_context *ctx = tg->ctx;
620 uint32_t value = 0;
621 uint32_t addr = 0;
622 uint32_t tmp = 0;
623
624 addr = CRTC_REG(mmCRTC_H_TOTAL);
625 value = dm_read_reg(ctx, addr);
626 set_reg_field_value(
627 value,
628 timing->h_total - 1,
629 CRTC_H_TOTAL,
630 CRTC_H_TOTAL);
631 dm_write_reg(ctx, addr, value);
632
633 addr = CRTC_REG(mmCRTC_V_TOTAL);
634 value = dm_read_reg(ctx, addr);
635 set_reg_field_value(
636 value,
637 timing->v_total - 1,
638 CRTC_V_TOTAL,
639 CRTC_V_TOTAL);
640 dm_write_reg(ctx, addr, value);
641
642 addr = CRTC_REG(mmCRTC_H_BLANK_START_END);
643 value = dm_read_reg(ctx, addr);
644
645 tmp = timing->h_total -
646 (h_sync_start + timing->h_border_left);
647
648 set_reg_field_value(
649 value,
650 tmp,
651 CRTC_H_BLANK_START_END,
652 CRTC_H_BLANK_END);
653
654 tmp = tmp + timing->h_addressable +
655 timing->h_border_left + timing->h_border_right;
656
657 set_reg_field_value(
658 value,
659 tmp,
660 CRTC_H_BLANK_START_END,
661 CRTC_H_BLANK_START);
662
663 dm_write_reg(ctx, addr, value);
664
665 addr = CRTC_REG(mmCRTC_V_BLANK_START_END);
666 value = dm_read_reg(ctx, addr);
667
668 tmp = timing->v_total - (v_sync_start + timing->v_border_top);
669
670 set_reg_field_value(
671 value,
672 tmp,
673 CRTC_V_BLANK_START_END,
674 CRTC_V_BLANK_END);
675
676 tmp = tmp + timing->v_addressable + timing->v_border_top +
677 timing->v_border_bottom;
678
679 set_reg_field_value(
680 value,
681 tmp,
682 CRTC_V_BLANK_START_END,
683 CRTC_V_BLANK_START);
684
685 dm_write_reg(ctx, addr, value);
686}
687
688void dce110_timing_generator_set_test_pattern(
689 struct timing_generator *tg,
690 /* TODO: replace 'controller_dp_test_pattern' by 'test_pattern_mode'
691 * because this is not DP-specific (which is probably somewhere in DP
692 * encoder) */
693 enum controller_dp_test_pattern test_pattern,
694 enum dc_color_depth color_depth)
695{
696 struct dc_context *ctx = tg->ctx;
697 uint32_t value;
698 uint32_t addr;
699 struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
700 enum test_pattern_color_format bit_depth;
701 enum test_pattern_dyn_range dyn_range;
702 enum test_pattern_mode mode;
703 /* color ramp generator mixes 16-bits color */
704 uint32_t src_bpc = 16;
705 /* requested bpc */
706 uint32_t dst_bpc;
707 uint32_t index;
708 /* RGB values of the color bars.
709 * Produce two RGB colors: RGB0 - white (all Fs)
710 * and RGB1 - black (all 0s)
711 * (three RGB components for two colors)
712 */
713 uint16_t src_color[6] = {0xFFFF, 0xFFFF, 0xFFFF, 0x0000,
714 0x0000, 0x0000};
715 /* dest color (converted to the specified color format) */
716 uint16_t dst_color[6];
717 uint32_t inc_base;
718
719 /* translate to bit depth */
720 switch (color_depth) {
721 case COLOR_DEPTH_666:
722 bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_6;
723 break;
724 case COLOR_DEPTH_888:
725 bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_8;
726 break;
727 case COLOR_DEPTH_101010:
728 bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_10;
729 break;
730 case COLOR_DEPTH_121212:
731 bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_12;
732 break;
733 default:
734 bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_8;
735 break;
736 }
737
738 switch (test_pattern) {
739 case CONTROLLER_DP_TEST_PATTERN_COLORSQUARES:
740 case CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA:
741 {
742 dyn_range = (test_pattern ==
743 CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA ?
744 TEST_PATTERN_DYN_RANGE_CEA :
745 TEST_PATTERN_DYN_RANGE_VESA);
746 mode = TEST_PATTERN_MODE_COLORSQUARES_RGB;
747 value = 0;
748 addr = CRTC_REG(mmCRTC_TEST_PATTERN_PARAMETERS);
749
750 set_reg_field_value(
751 value,
752 6,
753 CRTC_TEST_PATTERN_PARAMETERS,
754 CRTC_TEST_PATTERN_VRES);
755 set_reg_field_value(
756 value,
757 6,
758 CRTC_TEST_PATTERN_PARAMETERS,
759 CRTC_TEST_PATTERN_HRES);
760
761 dm_write_reg(ctx, addr, value);
762
763 addr = CRTC_REG(mmCRTC_TEST_PATTERN_CONTROL);
764 value = 0;
765
766 set_reg_field_value(
767 value,
768 1,
769 CRTC_TEST_PATTERN_CONTROL,
770 CRTC_TEST_PATTERN_EN);
771
772 set_reg_field_value(
773 value,
774 mode,
775 CRTC_TEST_PATTERN_CONTROL,
776 CRTC_TEST_PATTERN_MODE);
777
778 set_reg_field_value(
779 value,
780 dyn_range,
781 CRTC_TEST_PATTERN_CONTROL,
782 CRTC_TEST_PATTERN_DYNAMIC_RANGE);
783 set_reg_field_value(
784 value,
785 bit_depth,
786 CRTC_TEST_PATTERN_CONTROL,
787 CRTC_TEST_PATTERN_COLOR_FORMAT);
788 dm_write_reg(ctx, addr, value);
789 }
790 break;
791
792 case CONTROLLER_DP_TEST_PATTERN_VERTICALBARS:
793 case CONTROLLER_DP_TEST_PATTERN_HORIZONTALBARS:
794 {
795 mode = (test_pattern ==
796 CONTROLLER_DP_TEST_PATTERN_VERTICALBARS ?
797 TEST_PATTERN_MODE_VERTICALBARS :
798 TEST_PATTERN_MODE_HORIZONTALBARS);
799
800 switch (bit_depth) {
801 case TEST_PATTERN_COLOR_FORMAT_BPC_6:
802 dst_bpc = 6;
803 break;
804 case TEST_PATTERN_COLOR_FORMAT_BPC_8:
805 dst_bpc = 8;
806 break;
807 case TEST_PATTERN_COLOR_FORMAT_BPC_10:
808 dst_bpc = 10;
809 break;
810 default:
811 dst_bpc = 8;
812 break;
813 }
814
815 /* adjust color to the required colorFormat */
816 for (index = 0; index < 6; index++) {
817 /* dst = 2^dstBpc * src / 2^srcBpc = src >>
818 * (srcBpc - dstBpc);
819 */
820 dst_color[index] =
821 src_color[index] >> (src_bpc - dst_bpc);
822 /* CRTC_TEST_PATTERN_DATA has 16 bits,
823 * lowest 6 are hardwired to ZERO
824 * color bits should be left aligned aligned to MSB
825 * XXXXXXXXXX000000 for 10 bit,
826 * XXXXXXXX00000000 for 8 bit and XXXXXX0000000000 for 6
827 */
828 dst_color[index] <<= (16 - dst_bpc);
829 }
830
831 value = 0;
832 addr = CRTC_REG(mmCRTC_TEST_PATTERN_PARAMETERS);
833 dm_write_reg(ctx, addr, value);
834
835 /* We have to write the mask before data, similar to pipeline.
836 * For example, for 8 bpc, if we want RGB0 to be magenta,
837 * and RGB1 to be cyan,
838 * we need to make 7 writes:
839 * MASK DATA
840 * 000001 00000000 00000000 set mask to R0
841 * 000010 11111111 00000000 R0 255, 0xFF00, set mask to G0
842 * 000100 00000000 00000000 G0 0, 0x0000, set mask to B0
843 * 001000 11111111 00000000 B0 255, 0xFF00, set mask to R1
844 * 010000 00000000 00000000 R1 0, 0x0000, set mask to G1
845 * 100000 11111111 00000000 G1 255, 0xFF00, set mask to B1
846 * 100000 11111111 00000000 B1 255, 0xFF00
847 *
848 * we will make a loop of 6 in which we prepare the mask,
849 * then write, then prepare the color for next write.
850 * first iteration will write mask only,
851 * but each next iteration color prepared in
852 * previous iteration will be written within new mask,
853 * the last component will written separately,
854 * mask is not changing between 6th and 7th write
855 * and color will be prepared by last iteration
856 */
857
858 /* write color, color values mask in CRTC_TEST_PATTERN_MASK
859 * is B1, G1, R1, B0, G0, R0
860 */
861 value = 0;
862 addr = CRTC_REG(mmCRTC_TEST_PATTERN_COLOR);
863 for (index = 0; index < 6; index++) {
864 /* prepare color mask, first write PATTERN_DATA
865 * will have all zeros
866 */
867 set_reg_field_value(
868 value,
869 (1 << index),
870 CRTC_TEST_PATTERN_COLOR,
871 CRTC_TEST_PATTERN_MASK);
872 /* write color component */
873 dm_write_reg(ctx, addr, value);
874 /* prepare next color component,
875 * will be written in the next iteration
876 */
877 set_reg_field_value(
878 value,
879 dst_color[index],
880 CRTC_TEST_PATTERN_COLOR,
881 CRTC_TEST_PATTERN_DATA);
882 }
883 /* write last color component,
884 * it's been already prepared in the loop
885 */
886 dm_write_reg(ctx, addr, value);
887
888 /* enable test pattern */
889 addr = CRTC_REG(mmCRTC_TEST_PATTERN_CONTROL);
890 value = 0;
891
892 set_reg_field_value(
893 value,
894 1,
895 CRTC_TEST_PATTERN_CONTROL,
896 CRTC_TEST_PATTERN_EN);
897
898 set_reg_field_value(
899 value,
900 mode,
901 CRTC_TEST_PATTERN_CONTROL,
902 CRTC_TEST_PATTERN_MODE);
903
904 set_reg_field_value(
905 value,
906 0,
907 CRTC_TEST_PATTERN_CONTROL,
908 CRTC_TEST_PATTERN_DYNAMIC_RANGE);
909
910 set_reg_field_value(
911 value,
912 bit_depth,
913 CRTC_TEST_PATTERN_CONTROL,
914 CRTC_TEST_PATTERN_COLOR_FORMAT);
915
916 dm_write_reg(ctx, addr, value);
917 }
918 break;
919
920 case CONTROLLER_DP_TEST_PATTERN_COLORRAMP:
921 {
922 mode = (bit_depth ==
923 TEST_PATTERN_COLOR_FORMAT_BPC_10 ?
924 TEST_PATTERN_MODE_DUALRAMP_RGB :
925 TEST_PATTERN_MODE_SINGLERAMP_RGB);
926
927 switch (bit_depth) {
928 case TEST_PATTERN_COLOR_FORMAT_BPC_6:
929 dst_bpc = 6;
930 break;
931 case TEST_PATTERN_COLOR_FORMAT_BPC_8:
932 dst_bpc = 8;
933 break;
934 case TEST_PATTERN_COLOR_FORMAT_BPC_10:
935 dst_bpc = 10;
936 break;
937 default:
938 dst_bpc = 8;
939 break;
940 }
941
942 /* increment for the first ramp for one color gradation
943 * 1 gradation for 6-bit color is 2^10
944 * gradations in 16-bit color
945 */
946 inc_base = (src_bpc - dst_bpc);
947
948 value = 0;
949 addr = CRTC_REG(mmCRTC_TEST_PATTERN_PARAMETERS);
950
951 switch (bit_depth) {
952 case TEST_PATTERN_COLOR_FORMAT_BPC_6:
953 {
954 set_reg_field_value(
955 value,
956 inc_base,
957 CRTC_TEST_PATTERN_PARAMETERS,
958 CRTC_TEST_PATTERN_INC0);
959 set_reg_field_value(
960 value,
961 0,
962 CRTC_TEST_PATTERN_PARAMETERS,
963 CRTC_TEST_PATTERN_INC1);
964 set_reg_field_value(
965 value,
966 6,
967 CRTC_TEST_PATTERN_PARAMETERS,
968 CRTC_TEST_PATTERN_HRES);
969 set_reg_field_value(
970 value,
971 6,
972 CRTC_TEST_PATTERN_PARAMETERS,
973 CRTC_TEST_PATTERN_VRES);
974 set_reg_field_value(
975 value,
976 0,
977 CRTC_TEST_PATTERN_PARAMETERS,
978 CRTC_TEST_PATTERN_RAMP0_OFFSET);
979 }
980 break;
981 case TEST_PATTERN_COLOR_FORMAT_BPC_8:
982 {
983 set_reg_field_value(
984 value,
985 inc_base,
986 CRTC_TEST_PATTERN_PARAMETERS,
987 CRTC_TEST_PATTERN_INC0);
988 set_reg_field_value(
989 value,
990 0,
991 CRTC_TEST_PATTERN_PARAMETERS,
992 CRTC_TEST_PATTERN_INC1);
993 set_reg_field_value(
994 value,
995 8,
996 CRTC_TEST_PATTERN_PARAMETERS,
997 CRTC_TEST_PATTERN_HRES);
998 set_reg_field_value(
999 value,
1000 6,
1001 CRTC_TEST_PATTERN_PARAMETERS,
1002 CRTC_TEST_PATTERN_VRES);
1003 set_reg_field_value(
1004 value,
1005 0,
1006 CRTC_TEST_PATTERN_PARAMETERS,
1007 CRTC_TEST_PATTERN_RAMP0_OFFSET);
1008 }
1009 break;
1010 case TEST_PATTERN_COLOR_FORMAT_BPC_10:
1011 {
1012 set_reg_field_value(
1013 value,
1014 inc_base,
1015 CRTC_TEST_PATTERN_PARAMETERS,
1016 CRTC_TEST_PATTERN_INC0);
1017 set_reg_field_value(
1018 value,
1019 inc_base + 2,
1020 CRTC_TEST_PATTERN_PARAMETERS,
1021 CRTC_TEST_PATTERN_INC1);
1022 set_reg_field_value(
1023 value,
1024 8,
1025 CRTC_TEST_PATTERN_PARAMETERS,
1026 CRTC_TEST_PATTERN_HRES);
1027 set_reg_field_value(
1028 value,
1029 5,
1030 CRTC_TEST_PATTERN_PARAMETERS,
1031 CRTC_TEST_PATTERN_VRES);
1032 set_reg_field_value(
1033 value,
1034 384 << 6,
1035 CRTC_TEST_PATTERN_PARAMETERS,
1036 CRTC_TEST_PATTERN_RAMP0_OFFSET);
1037 }
1038 break;
1039 default:
1040 break;
1041 }
1042 dm_write_reg(ctx, addr, value);
1043
1044 value = 0;
1045 addr = CRTC_REG(mmCRTC_TEST_PATTERN_COLOR);
1046 dm_write_reg(ctx, addr, value);
1047
1048 /* enable test pattern */
1049 addr = CRTC_REG(mmCRTC_TEST_PATTERN_CONTROL);
1050 value = 0;
1051
1052 set_reg_field_value(
1053 value,
1054 1,
1055 CRTC_TEST_PATTERN_CONTROL,
1056 CRTC_TEST_PATTERN_EN);
1057
1058 set_reg_field_value(
1059 value,
1060 mode,
1061 CRTC_TEST_PATTERN_CONTROL,
1062 CRTC_TEST_PATTERN_MODE);
1063
1064 set_reg_field_value(
1065 value,
1066 0,
1067 CRTC_TEST_PATTERN_CONTROL,
1068 CRTC_TEST_PATTERN_DYNAMIC_RANGE);
1069 /* add color depth translation here */
1070 set_reg_field_value(
1071 value,
1072 bit_depth,
1073 CRTC_TEST_PATTERN_CONTROL,
1074 CRTC_TEST_PATTERN_COLOR_FORMAT);
1075
1076 dm_write_reg(ctx, addr, value);
1077 }
1078 break;
1079 case CONTROLLER_DP_TEST_PATTERN_VIDEOMODE:
1080 {
1081 value = 0;
1082 dm_write_reg(ctx, CRTC_REG(mmCRTC_TEST_PATTERN_CONTROL), value);
1083 dm_write_reg(ctx, CRTC_REG(mmCRTC_TEST_PATTERN_COLOR), value);
1084 dm_write_reg(ctx, CRTC_REG(mmCRTC_TEST_PATTERN_PARAMETERS),
1085 value);
1086 }
1087 break;
1088 default:
1089 break;
1090 }
1091}
1092
1093/**
1094* dce110_timing_generator_validate_timing
1095* The timing generators support a maximum display size of is 8192 x 8192 pixels,
1096* including both active display and blanking periods. Check H Total and V Total.
1097*/
1098bool dce110_timing_generator_validate_timing(
1099 struct timing_generator *tg,
1100 const struct dc_crtc_timing *timing,
1101 enum signal_type signal)
1102{
1103 uint32_t h_blank;
1104 uint32_t h_back_porch;
1105 uint32_t hsync_offset = timing->h_border_right +
1106 timing->h_front_porch;
1107 uint32_t h_sync_start = timing->h_addressable + hsync_offset;
1108
1109 struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
1110
1111 ASSERT(timing != NULL);
1112
1113 if (!timing)
1114 return false;
1115
1116 /* Check maximum number of pixels supported by Timing Generator
1117 * (Currently will never fail, in order to fail needs display which
1118 * needs more than 8192 horizontal and
1119 * more than 8192 vertical total pixels)
1120 */
1121 if (timing->h_total > tg110->max_h_total ||
1122 timing->v_total > tg110->max_v_total)
1123 return false;
1124
1125 h_blank = (timing->h_total - timing->h_addressable -
1126 timing->h_border_right -
1127 timing->h_border_left);
1128
1129 if (h_blank < tg110->min_h_blank)
1130 return false;
1131
1132 if (timing->h_front_porch < tg110->min_h_front_porch)
1133 return false;
1134
1135 h_back_porch = h_blank - (h_sync_start -
1136 timing->h_addressable -
1137 timing->h_border_right -
1138 timing->h_sync_width);
1139
1140 if (h_back_porch < tg110->min_h_back_porch)
1141 return false;
1142
1143 return true;
1144}
1145
1146/**
1147* Wait till we are at the beginning of VBlank.
1148*/
1149void dce110_timing_generator_wait_for_vblank(struct timing_generator *tg)
1150{
1151 /* We want to catch beginning of VBlank here, so if the first try are
1152 * in VBlank, we might be very close to Active, in this case wait for
1153 * another frame
1154 */
1155 while (dce110_timing_generator_is_in_vertical_blank(tg)) {
1156 if (!dce110_timing_generator_is_counter_moving(tg)) {
1157 /* error - no point to wait if counter is not moving */
1158 break;
1159 }
1160 }
1161
1162 while (!dce110_timing_generator_is_in_vertical_blank(tg)) {
1163 if (!dce110_timing_generator_is_counter_moving(tg)) {
1164 /* error - no point to wait if counter is not moving */
1165 break;
1166 }
1167 }
1168}
1169
1170/**
1171* Wait till we are in VActive (anywhere in VActive)
1172*/
1173void dce110_timing_generator_wait_for_vactive(struct timing_generator *tg)
1174{
1175 while (dce110_timing_generator_is_in_vertical_blank(tg)) {
1176 if (!dce110_timing_generator_is_counter_moving(tg)) {
1177 /* error - no point to wait if counter is not moving */
1178 break;
1179 }
1180 }
1181}
1182
1183/**
1184 *****************************************************************************
1185 * Function: dce110_timing_generator_setup_global_swap_lock
1186 *
1187 * @brief
1188 * Setups Global Swap Lock group for current pipe
1189 * Pipe can join or leave GSL group, become a TimingServer or TimingClient
1190 *
1191 * @param [in] gsl_params: setup data
1192 *****************************************************************************
1193 */
1194
1195void dce110_timing_generator_setup_global_swap_lock(
1196 struct timing_generator *tg,
1197 const struct dcp_gsl_params *gsl_params)
1198{
1199 uint32_t value;
1200 struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
1201 uint32_t address = DCP_REG(mmDCP_GSL_CONTROL);
1202 uint32_t check_point = FLIP_READY_BACK_LOOKUP;
1203
1204 value = dm_read_reg(tg->ctx, address);
1205
1206 /* This pipe will belong to GSL Group zero. */
1207 set_reg_field_value(value,
1208 1,
1209 DCP_GSL_CONTROL,
1210 DCP_GSL0_EN);
1211
1212 set_reg_field_value(value,
1213 gsl_params->gsl_master == tg->inst,
1214 DCP_GSL_CONTROL,
1215 DCP_GSL_MASTER_EN);
1216
1217 set_reg_field_value(value,
1218 HFLIP_READY_DELAY,
1219 DCP_GSL_CONTROL,
1220 DCP_GSL_HSYNC_FLIP_FORCE_DELAY);
1221
1222 /* Keep signal low (pending high) during 6 lines.
1223 * Also defines minimum interval before re-checking signal. */
1224 set_reg_field_value(value,
1225 HFLIP_CHECK_DELAY,
1226 DCP_GSL_CONTROL,
1227 DCP_GSL_HSYNC_FLIP_CHECK_DELAY);
1228
1229
1230 {
1231 uint32_t value_crtc_vtotal;
1232
1233 value_crtc_vtotal = dm_read_reg(tg->ctx,
1234 CRTC_REG(mmCRTC_V_TOTAL));
1235
1236 set_reg_field_value(value,
1237 0,/* DCP_GSL_PURPOSE_SURFACE_FLIP */
1238 DCP_GSL_CONTROL,
1239 DCP_GSL_SYNC_SOURCE);
1240
1241 /* Checkpoint relative to end of frame */
1242 check_point = get_reg_field_value(value_crtc_vtotal,
1243 CRTC_V_TOTAL,
1244 CRTC_V_TOTAL);
1245
1246 dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_GSL_WINDOW), 0);
1247 }
1248
1249 set_reg_field_value(value,
1250 1,
1251 DCP_GSL_CONTROL,
1252 DCP_GSL_DELAY_SURFACE_UPDATE_PENDING);
1253
1254 dm_write_reg(tg->ctx, address, value);
1255
1256 /********************************************************************/
1257 address = CRTC_REG(mmCRTC_GSL_CONTROL);
1258
1259 value = 0;
1260 set_reg_field_value(value,
1261 check_point - FLIP_READY_BACK_LOOKUP,
1262 CRTC_GSL_CONTROL,
1263 CRTC_GSL_CHECK_LINE_NUM);
1264
1265 set_reg_field_value(value,
1266 VFLIP_READY_DELAY,
1267 CRTC_GSL_CONTROL,
1268 CRTC_GSL_FORCE_DELAY);
1269
1270 dm_write_reg(tg->ctx, address, value);
1271}
1272
1273void dce110_timing_generator_tear_down_global_swap_lock(
1274 struct timing_generator *tg)
1275{
1276 /* Clear all the register writes done by
1277 * dce110_timing_generator_setup_global_swap_lock
1278 */
1279
1280 uint32_t value;
1281 struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
1282 uint32_t address = DCP_REG(mmDCP_GSL_CONTROL);
1283
1284 value = 0;
1285
1286 /* This pipe will belong to GSL Group zero. */
1287 /* Settig HW default values from reg specs */
1288 set_reg_field_value(value,
1289 0,
1290 DCP_GSL_CONTROL,
1291 DCP_GSL0_EN);
1292
1293 set_reg_field_value(value,
1294 0,
1295 DCP_GSL_CONTROL,
1296 DCP_GSL_MASTER_EN);
1297
1298 set_reg_field_value(value,
1299 0x2,
1300 DCP_GSL_CONTROL,
1301 DCP_GSL_HSYNC_FLIP_FORCE_DELAY);
1302
1303 set_reg_field_value(value,
1304 0x6,
1305 DCP_GSL_CONTROL,
1306 DCP_GSL_HSYNC_FLIP_CHECK_DELAY);
1307
1308 /* Restore DCP_GSL_PURPOSE_SURFACE_FLIP */
1309 {
1310 uint32_t value_crtc_vtotal;
1311
1312 value_crtc_vtotal = dm_read_reg(tg->ctx,
1313 CRTC_REG(mmCRTC_V_TOTAL));
1314
1315 set_reg_field_value(value,
1316 0,
1317 DCP_GSL_CONTROL,
1318 DCP_GSL_SYNC_SOURCE);
1319 }
1320
1321 set_reg_field_value(value,
1322 0,
1323 DCP_GSL_CONTROL,
1324 DCP_GSL_DELAY_SURFACE_UPDATE_PENDING);
1325
1326 dm_write_reg(tg->ctx, address, value);
1327
1328 /********************************************************************/
1329 address = CRTC_REG(mmCRTC_GSL_CONTROL);
1330
1331 value = 0;
1332 set_reg_field_value(value,
1333 0,
1334 CRTC_GSL_CONTROL,
1335 CRTC_GSL_CHECK_LINE_NUM);
1336
1337 set_reg_field_value(value,
1338 0x2,
1339 CRTC_GSL_CONTROL,
1340 CRTC_GSL_FORCE_DELAY);
1341
1342 dm_write_reg(tg->ctx, address, value);
1343}
1344/**
1345 *****************************************************************************
1346 * Function: is_counter_moving
1347 *
1348 * @brief
1349 * check if the timing generator is currently going
1350 *
1351 * @return
1352 * true if currently going, false if currently paused or stopped.
1353 *
1354 *****************************************************************************
1355 */
1356bool dce110_timing_generator_is_counter_moving(struct timing_generator *tg)
1357{
1358 uint32_t h1 = 0;
1359 uint32_t h2 = 0;
1360 uint32_t v1 = 0;
1361 uint32_t v2 = 0;
1362
1363 tg->funcs->get_position(tg, &h1, &v1);
1364 tg->funcs->get_position(tg, &h2, &v2);
1365
1366 if (h1 == h2 && v1 == v2)
1367 return false;
1368 else
1369 return true;
1370}
1371
1372void dce110_timing_generator_enable_advanced_request(
1373 struct timing_generator *tg,
1374 bool enable,
1375 const struct dc_crtc_timing *timing)
1376{
1377 struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
1378 uint32_t addr = CRTC_REG(mmCRTC_START_LINE_CONTROL);
1379 uint32_t value = dm_read_reg(tg->ctx, addr);
1380
1381 if (enable) {
1382 set_reg_field_value(
1383 value,
1384 0,
1385 CRTC_START_LINE_CONTROL,
1386 CRTC_LEGACY_REQUESTOR_EN);
1387 } else {
1388 set_reg_field_value(
1389 value,
1390 1,
1391 CRTC_START_LINE_CONTROL,
1392 CRTC_LEGACY_REQUESTOR_EN);
1393 }
1394
1395 if ((timing->v_sync_width + timing->v_front_porch) <= 3) {
1396 set_reg_field_value(
1397 value,
1398 3,
1399 CRTC_START_LINE_CONTROL,
1400 CRTC_ADVANCED_START_LINE_POSITION);
1401 set_reg_field_value(
1402 value,
1403 0,
1404 CRTC_START_LINE_CONTROL,
1405 CRTC_PREFETCH_EN);
1406 } else {
1407 set_reg_field_value(
1408 value,
1409 4,
1410 CRTC_START_LINE_CONTROL,
1411 CRTC_ADVANCED_START_LINE_POSITION);
1412 set_reg_field_value(
1413 value,
1414 1,
1415 CRTC_START_LINE_CONTROL,
1416 CRTC_PREFETCH_EN);
1417 }
1418
1419 set_reg_field_value(
1420 value,
1421 1,
1422 CRTC_START_LINE_CONTROL,
1423 CRTC_PROGRESSIVE_START_LINE_EARLY);
1424
1425 set_reg_field_value(
1426 value,
1427 1,
1428 CRTC_START_LINE_CONTROL,
1429 CRTC_INTERLACE_START_LINE_EARLY);
1430
1431 dm_write_reg(tg->ctx, addr, value);
1432}
1433
1434/*TODO: Figure out if we need this function. */
1435void dce110_timing_generator_set_lock_master(struct timing_generator *tg,
1436 bool lock)
1437{
1438 struct dc_context *ctx = tg->ctx;
1439 struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
1440 uint32_t addr = CRTC_REG(mmCRTC_MASTER_UPDATE_LOCK);
1441 uint32_t value = dm_read_reg(ctx, addr);
1442
1443 set_reg_field_value(
1444 value,
1445 lock ? 1 : 0,
1446 CRTC_MASTER_UPDATE_LOCK,
1447 MASTER_UPDATE_LOCK);
1448
1449 dm_write_reg(ctx, addr, value);
1450}
1451
1452void dce110_timing_generator_enable_reset_trigger(
1453 struct timing_generator *tg,
1454 int source_tg_inst)
1455{
1456 uint32_t value;
1457 uint32_t rising_edge = 0;
1458 uint32_t falling_edge = 0;
1459 enum trigger_source_select trig_src_select = TRIGGER_SOURCE_SELECT_LOGIC_ZERO;
1460 struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
1461
1462 /* Setup trigger edge */
1463 {
1464 uint32_t pol_value = dm_read_reg(tg->ctx,
1465 CRTC_REG(mmCRTC_V_SYNC_A_CNTL));
1466
1467 /* Register spec has reversed definition:
1468 * 0 for positive, 1 for negative */
1469 if (get_reg_field_value(pol_value,
1470 CRTC_V_SYNC_A_CNTL,
1471 CRTC_V_SYNC_A_POL) == 0) {
1472 rising_edge = 1;
1473 } else {
1474 falling_edge = 1;
1475 }
1476 }
1477
1478 value = dm_read_reg(tg->ctx, CRTC_REG(mmCRTC_TRIGB_CNTL));
1479
1480 trig_src_select = TRIGGER_SOURCE_SELECT_GSL_GROUP0;
1481
1482 set_reg_field_value(value,
1483 trig_src_select,
1484 CRTC_TRIGB_CNTL,
1485 CRTC_TRIGB_SOURCE_SELECT);
1486
1487 set_reg_field_value(value,
1488 TRIGGER_POLARITY_SELECT_LOGIC_ZERO,
1489 CRTC_TRIGB_CNTL,
1490 CRTC_TRIGB_POLARITY_SELECT);
1491
1492 set_reg_field_value(value,
1493 rising_edge,
1494 CRTC_TRIGB_CNTL,
1495 CRTC_TRIGB_RISING_EDGE_DETECT_CNTL);
1496
1497 set_reg_field_value(value,
1498 falling_edge,
1499 CRTC_TRIGB_CNTL,
1500 CRTC_TRIGB_FALLING_EDGE_DETECT_CNTL);
1501
1502 set_reg_field_value(value,
1503 0, /* send every signal */
1504 CRTC_TRIGB_CNTL,
1505 CRTC_TRIGB_FREQUENCY_SELECT);
1506
1507 set_reg_field_value(value,
1508 0, /* no delay */
1509 CRTC_TRIGB_CNTL,
1510 CRTC_TRIGB_DELAY);
1511
1512 set_reg_field_value(value,
1513 1, /* clear trigger status */
1514 CRTC_TRIGB_CNTL,
1515 CRTC_TRIGB_CLEAR);
1516
1517 dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_TRIGB_CNTL), value);
1518
1519 /**************************************************************/
1520
1521 value = dm_read_reg(tg->ctx, CRTC_REG(mmCRTC_FORCE_COUNT_NOW_CNTL));
1522
1523 set_reg_field_value(value,
1524 2, /* force H count to H_TOTAL and V count to V_TOTAL */
1525 CRTC_FORCE_COUNT_NOW_CNTL,
1526 CRTC_FORCE_COUNT_NOW_MODE);
1527
1528 set_reg_field_value(value,
1529 1, /* TriggerB - we never use TriggerA */
1530 CRTC_FORCE_COUNT_NOW_CNTL,
1531 CRTC_FORCE_COUNT_NOW_TRIG_SEL);
1532
1533 set_reg_field_value(value,
1534 1, /* clear trigger status */
1535 CRTC_FORCE_COUNT_NOW_CNTL,
1536 CRTC_FORCE_COUNT_NOW_CLEAR);
1537
1538 dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_FORCE_COUNT_NOW_CNTL), value);
1539}
1540
1541void dce110_timing_generator_disable_reset_trigger(
1542 struct timing_generator *tg)
1543{
1544 uint32_t value;
1545 struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
1546
1547 value = dm_read_reg(tg->ctx, CRTC_REG(mmCRTC_FORCE_COUNT_NOW_CNTL));
1548
1549 set_reg_field_value(value,
1550 0, /* force counter now mode is disabled */
1551 CRTC_FORCE_COUNT_NOW_CNTL,
1552 CRTC_FORCE_COUNT_NOW_MODE);
1553
1554 set_reg_field_value(value,
1555 1, /* clear trigger status */
1556 CRTC_FORCE_COUNT_NOW_CNTL,
1557 CRTC_FORCE_COUNT_NOW_CLEAR);
1558
1559 dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_FORCE_COUNT_NOW_CNTL), value);
1560
1561 /********************************************************************/
1562 value = dm_read_reg(tg->ctx, CRTC_REG(mmCRTC_TRIGB_CNTL));
1563
1564 set_reg_field_value(value,
1565 TRIGGER_SOURCE_SELECT_LOGIC_ZERO,
1566 CRTC_TRIGB_CNTL,
1567 CRTC_TRIGB_SOURCE_SELECT);
1568
1569 set_reg_field_value(value,
1570 TRIGGER_POLARITY_SELECT_LOGIC_ZERO,
1571 CRTC_TRIGB_CNTL,
1572 CRTC_TRIGB_POLARITY_SELECT);
1573
1574 set_reg_field_value(value,
1575 1, /* clear trigger status */
1576 CRTC_TRIGB_CNTL,
1577 CRTC_TRIGB_CLEAR);
1578
1579 dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_TRIGB_CNTL), value);
1580}
1581
1582/**
1583 *****************************************************************************
1584 * @brief
1585 * Checks whether CRTC triggered reset occurred
1586 *
1587 * @return
1588 * true if triggered reset occurred, false otherwise
1589 *****************************************************************************
1590 */
1591bool dce110_timing_generator_did_triggered_reset_occur(
1592 struct timing_generator *tg)
1593{
1594 struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
1595 uint32_t value = dm_read_reg(tg->ctx,
1596 CRTC_REG(mmCRTC_FORCE_COUNT_NOW_CNTL));
1597
1598 return get_reg_field_value(value,
1599 CRTC_FORCE_COUNT_NOW_CNTL,
1600 CRTC_FORCE_COUNT_NOW_OCCURRED) != 0;
1601}
1602
1603/**
1604 * dce110_timing_generator_disable_vga
1605 * Turn OFF VGA Mode and Timing - DxVGA_CONTROL
1606 * VGA Mode and VGA Timing is used by VBIOS on CRT Monitors;
1607 */
1608void dce110_timing_generator_disable_vga(
1609 struct timing_generator *tg)
1610{
1611 uint32_t addr = 0;
1612 uint32_t value = 0;
1613
1614 struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
1615
1616 switch (tg110->controller_id) {
1617 case CONTROLLER_ID_D0:
1618 addr = mmD1VGA_CONTROL;
1619 break;
1620 case CONTROLLER_ID_D1:
1621 addr = mmD2VGA_CONTROL;
1622 break;
1623 case CONTROLLER_ID_D2:
1624 addr = mmD3VGA_CONTROL;
1625 break;
1626 case CONTROLLER_ID_D3:
1627 addr = mmD4VGA_CONTROL;
1628 break;
1629 case CONTROLLER_ID_D4:
1630 addr = mmD5VGA_CONTROL;
1631 break;
1632 case CONTROLLER_ID_D5:
1633 addr = mmD6VGA_CONTROL;
1634 break;
1635 default:
1636 break;
1637 }
1638 value = dm_read_reg(tg->ctx, addr);
1639
1640 set_reg_field_value(value, 0, D1VGA_CONTROL, D1VGA_MODE_ENABLE);
1641 set_reg_field_value(value, 0, D1VGA_CONTROL, D1VGA_TIMING_SELECT);
1642 set_reg_field_value(
1643 value, 0, D1VGA_CONTROL, D1VGA_SYNC_POLARITY_SELECT);
1644 set_reg_field_value(value, 0, D1VGA_CONTROL, D1VGA_OVERSCAN_COLOR_EN);
1645
1646 dm_write_reg(tg->ctx, addr, value);
1647}
1648
1649/**
1650* set_overscan_color_black
1651*
1652* @param :black_color is one of the color space
1653* :this routine will set overscan black color according to the color space.
1654* @return none
1655*/
1656
1657void dce110_timing_generator_set_overscan_color_black(
1658 struct timing_generator *tg,
1659 const struct tg_color *color)
1660{
1661 struct dc_context *ctx = tg->ctx;
1662 uint32_t addr;
1663 uint32_t value = 0;
1664 struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
1665
1666 set_reg_field_value(
1667 value,
1668 color->color_b_cb,
1669 CRTC_OVERSCAN_COLOR,
1670 CRTC_OVERSCAN_COLOR_BLUE);
1671
1672 set_reg_field_value(
1673 value,
1674 color->color_r_cr,
1675 CRTC_OVERSCAN_COLOR,
1676 CRTC_OVERSCAN_COLOR_RED);
1677
1678 set_reg_field_value(
1679 value,
1680 color->color_g_y,
1681 CRTC_OVERSCAN_COLOR,
1682 CRTC_OVERSCAN_COLOR_GREEN);
1683
1684 addr = CRTC_REG(mmCRTC_OVERSCAN_COLOR);
1685 dm_write_reg(ctx, addr, value);
1686 addr = CRTC_REG(mmCRTC_BLACK_COLOR);
1687 dm_write_reg(ctx, addr, value);
1688 /* This is desirable to have a constant DAC output voltage during the
1689 * blank time that is higher than the 0 volt reference level that the
1690 * DAC outputs when the NBLANK signal
1691 * is asserted low, such as for output to an analog TV. */
1692 addr = CRTC_REG(mmCRTC_BLANK_DATA_COLOR);
1693 dm_write_reg(ctx, addr, value);
1694
1695 /* TO DO we have to program EXT registers and we need to know LB DATA
1696 * format because it is used when more 10 , i.e. 12 bits per color
1697 *
1698 * m_mmDxCRTC_OVERSCAN_COLOR_EXT
1699 * m_mmDxCRTC_BLACK_COLOR_EXT
1700 * m_mmDxCRTC_BLANK_DATA_COLOR_EXT
1701 */
1702
1703}
1704
1705void dce110_tg_program_blank_color(struct timing_generator *tg,
1706 const struct tg_color *black_color)
1707{
1708 struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
1709 uint32_t addr = CRTC_REG(mmCRTC_BLACK_COLOR);
1710 uint32_t value = dm_read_reg(tg->ctx, addr);
1711
1712 set_reg_field_value(
1713 value,
1714 black_color->color_b_cb,
1715 CRTC_BLACK_COLOR,
1716 CRTC_BLACK_COLOR_B_CB);
1717 set_reg_field_value(
1718 value,
1719 black_color->color_g_y,
1720 CRTC_BLACK_COLOR,
1721 CRTC_BLACK_COLOR_G_Y);
1722 set_reg_field_value(
1723 value,
1724 black_color->color_r_cr,
1725 CRTC_BLACK_COLOR,
1726 CRTC_BLACK_COLOR_R_CR);
1727
1728 dm_write_reg(tg->ctx, addr, value);
1729
1730 addr = CRTC_REG(mmCRTC_BLANK_DATA_COLOR);
1731 dm_write_reg(tg->ctx, addr, value);
1732}
1733
1734void dce110_tg_set_overscan_color(struct timing_generator *tg,
1735 const struct tg_color *overscan_color)
1736{
1737 struct dc_context *ctx = tg->ctx;
1738 uint32_t value = 0;
1739 uint32_t addr;
1740 struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
1741
1742 set_reg_field_value(
1743 value,
1744 overscan_color->color_b_cb,
1745 CRTC_OVERSCAN_COLOR,
1746 CRTC_OVERSCAN_COLOR_BLUE);
1747
1748 set_reg_field_value(
1749 value,
1750 overscan_color->color_g_y,
1751 CRTC_OVERSCAN_COLOR,
1752 CRTC_OVERSCAN_COLOR_GREEN);
1753
1754 set_reg_field_value(
1755 value,
1756 overscan_color->color_r_cr,
1757 CRTC_OVERSCAN_COLOR,
1758 CRTC_OVERSCAN_COLOR_RED);
1759
1760 addr = CRTC_REG(mmCRTC_OVERSCAN_COLOR);
1761 dm_write_reg(ctx, addr, value);
1762}
1763
1764void dce110_tg_get_position(struct timing_generator *tg,
1765 struct crtc_position *position)
1766{
1767 int32_t h_position;
1768 int32_t v_position;
1769
1770 dce110_timing_generator_get_crtc_positions(tg, &h_position, &v_position);
1771
1772 position->horizontal_count = (uint32_t)h_position;
1773 position->vertical_count = (uint32_t)v_position;
1774}
1775
1776void dce110_tg_program_timing(struct timing_generator *tg,
1777 const struct dc_crtc_timing *timing,
1778 bool use_vbios)
1779{
1780 if (use_vbios)
1781 dce110_timing_generator_program_timing_generator(tg, timing);
1782 else
1783 dce110_timing_generator_program_blanking(tg, timing);
1784}
1785
1786bool dce110_tg_is_blanked(struct timing_generator *tg)
1787{
1788 struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
1789 uint32_t value = dm_read_reg(tg->ctx, CRTC_REG(mmCRTC_BLANK_CONTROL));
1790
1791 if (get_reg_field_value(
1792 value,
1793 CRTC_BLANK_CONTROL,
1794 CRTC_BLANK_DATA_EN) == 1 &&
1795 get_reg_field_value(
1796 value,
1797 CRTC_BLANK_CONTROL,
1798 CRTC_CURRENT_BLANK_STATE) == 1)
1799 return true;
1800 return false;
1801}
1802
1803bool dce110_tg_set_blank(struct timing_generator *tg,
1804 bool enable_blanking)
1805{
1806 struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
1807 uint32_t value = 0;
1808
1809 set_reg_field_value(
1810 value,
1811 1,
1812 CRTC_DOUBLE_BUFFER_CONTROL,
1813 CRTC_BLANK_DATA_DOUBLE_BUFFER_EN);
1814
1815 dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_DOUBLE_BUFFER_CONTROL), value);
1816 value = 0;
1817
1818 if (enable_blanking) {
1819 int counter;
1820
1821 set_reg_field_value(
1822 value,
1823 1,
1824 CRTC_BLANK_CONTROL,
1825 CRTC_BLANK_DATA_EN);
1826
1827 dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_BLANK_CONTROL), value);
1828
1829 for (counter = 0; counter < 100; counter++) {
1830 value = dm_read_reg(tg->ctx, CRTC_REG(mmCRTC_BLANK_CONTROL));
1831
1832 if (get_reg_field_value(
1833 value,
1834 CRTC_BLANK_CONTROL,
1835 CRTC_BLANK_DATA_EN) == 1 &&
1836 get_reg_field_value(
1837 value,
1838 CRTC_BLANK_CONTROL,
1839 CRTC_CURRENT_BLANK_STATE) == 1)
1840 break;
1841
1842 msleep(1);
1843 }
1844
1845 if (counter == 100) {
1846 dm_logger_write(tg->ctx->logger, LOG_ERROR,
1847 "timing generator %d blank timing out.\n",
1848 tg110->controller_id);
1849 return false;
1850 }
1851 } else
1852 dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_BLANK_CONTROL), 0);
1853
1854 return true;
1855}
1856
1857bool dce110_tg_validate_timing(struct timing_generator *tg,
1858 const struct dc_crtc_timing *timing)
1859{
1860 return dce110_timing_generator_validate_timing(tg, timing, SIGNAL_TYPE_NONE);
1861}
1862
1863void dce110_tg_wait_for_state(struct timing_generator *tg,
1864 enum crtc_state state)
1865{
1866 switch (state) {
1867 case CRTC_STATE_VBLANK:
1868 dce110_timing_generator_wait_for_vblank(tg);
1869 break;
1870
1871 case CRTC_STATE_VACTIVE:
1872 dce110_timing_generator_wait_for_vactive(tg);
1873 break;
1874
1875 default:
1876 break;
1877 }
1878}
1879
1880void dce110_tg_set_colors(struct timing_generator *tg,
1881 const struct tg_color *blank_color,
1882 const struct tg_color *overscan_color)
1883{
1884 if (blank_color != NULL)
1885 dce110_tg_program_blank_color(tg, blank_color);
1886 if (overscan_color != NULL)
1887 dce110_tg_set_overscan_color(tg, overscan_color);
1888}
1889
1890static const struct timing_generator_funcs dce110_tg_funcs = {
1891 .validate_timing = dce110_tg_validate_timing,
1892 .program_timing = dce110_tg_program_timing,
1893 .enable_crtc = dce110_timing_generator_enable_crtc,
1894 .disable_crtc = dce110_timing_generator_disable_crtc,
1895 .is_counter_moving = dce110_timing_generator_is_counter_moving,
1896 .get_position = dce110_timing_generator_get_crtc_positions,
1897 .get_frame_count = dce110_timing_generator_get_vblank_counter,
1898 .get_scanoutpos = dce110_timing_generator_get_crtc_scanoutpos,
1899 .set_early_control = dce110_timing_generator_set_early_control,
1900 .wait_for_state = dce110_tg_wait_for_state,
1901 .set_blank = dce110_tg_set_blank,
1902 .is_blanked = dce110_tg_is_blanked,
1903 .set_colors = dce110_tg_set_colors,
1904 .set_overscan_blank_color =
1905 dce110_timing_generator_set_overscan_color_black,
1906 .set_blank_color = dce110_timing_generator_program_blank_color,
1907 .disable_vga = dce110_timing_generator_disable_vga,
1908 .did_triggered_reset_occur =
1909 dce110_timing_generator_did_triggered_reset_occur,
1910 .setup_global_swap_lock =
1911 dce110_timing_generator_setup_global_swap_lock,
1912 .enable_reset_trigger = dce110_timing_generator_enable_reset_trigger,
1913 .disable_reset_trigger = dce110_timing_generator_disable_reset_trigger,
1914 .tear_down_global_swap_lock =
1915 dce110_timing_generator_tear_down_global_swap_lock,
1916 .enable_advanced_request =
1917 dce110_timing_generator_enable_advanced_request,
1918 .set_drr =
1919 dce110_timing_generator_set_drr,
1920 .set_static_screen_control =
1921 dce110_timing_generator_set_static_screen_control,
1922 .set_test_pattern = dce110_timing_generator_set_test_pattern
1923
1924};
1925
1926bool dce110_timing_generator_construct(
1927 struct dce110_timing_generator *tg110,
1928 struct dc_context *ctx,
1929 uint32_t instance,
1930 const struct dce110_timing_generator_offsets *offsets)
1931{
1932 if (!tg110)
1933 return false;
1934
1935 tg110->controller_id = CONTROLLER_ID_D0 + instance;
1936 tg110->base.inst = instance;
1937
1938 tg110->offsets = *offsets;
1939
1940 tg110->base.funcs = &dce110_tg_funcs;
1941
1942 tg110->base.ctx = ctx;
1943 tg110->base.bp = ctx->dc_bios;
1944
1945 tg110->max_h_total = CRTC_H_TOTAL__CRTC_H_TOTAL_MASK + 1;
1946 tg110->max_v_total = CRTC_V_TOTAL__CRTC_V_TOTAL_MASK + 1;
1947
1948 tg110->min_h_blank = 56;
1949 tg110->min_h_front_porch = 4;
1950 tg110->min_h_back_porch = 4;
1951
1952 return true;
1953}
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h
new file mode 100644
index 000000000000..39906502ad5c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h
@@ -0,0 +1,273 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DC_TIMING_GENERATOR_DCE110_H__
27#define __DC_TIMING_GENERATOR_DCE110_H__
28
29#include "timing_generator.h"
30#include "../include/grph_object_id.h"
31#include "../include/hw_sequencer_types.h"
32
33/* GSL Sync related values */
34
35/* In VSync mode, after 4 units of time, master pipe will generate
36 * flip_ready signal */
37#define VFLIP_READY_DELAY 4
38/* In HSync mode, after 2 units of time, master pipe will generate
39 * flip_ready signal */
40#define HFLIP_READY_DELAY 2
41/* 6 lines delay between forcing flip and checking all pipes ready */
42#define HFLIP_CHECK_DELAY 6
43/* 3 lines before end of frame */
44#define FLIP_READY_BACK_LOOKUP 3
45
46/* Trigger Source Select - ASIC-defendant, actual values for the
47 * register programming */
48enum trigger_source_select {
49 TRIGGER_SOURCE_SELECT_LOGIC_ZERO = 0,
50 TRIGGER_SOURCE_SELECT_CRTC_VSYNCA = 1,
51 TRIGGER_SOURCE_SELECT_CRTC_HSYNCA = 2,
52 TRIGGER_SOURCE_SELECT_CRTC_VSYNCB = 3,
53 TRIGGER_SOURCE_SELECT_CRTC_HSYNCB = 4,
54 TRIGGER_SOURCE_SELECT_GENERICF = 5,
55 TRIGGER_SOURCE_SELECT_GENERICE = 6,
56 TRIGGER_SOURCE_SELECT_VSYNCA = 7,
57 TRIGGER_SOURCE_SELECT_HSYNCA = 8,
58 TRIGGER_SOURCE_SELECT_VSYNCB = 9,
59 TRIGGER_SOURCE_SELECT_HSYNCB = 10,
60 TRIGGER_SOURCE_SELECT_HPD1 = 11,
61 TRIGGER_SOURCE_SELECT_HPD2 = 12,
62 TRIGGER_SOURCE_SELECT_GENERICD = 13,
63 TRIGGER_SOURCE_SELECT_GENERICC = 14,
64 TRIGGER_SOURCE_SELECT_VIDEO_CAPTURE = 15,
65 TRIGGER_SOURCE_SELECT_GSL_GROUP0 = 16,
66 TRIGGER_SOURCE_SELECT_GSL_GROUP1 = 17,
67 TRIGGER_SOURCE_SELECT_GSL_GROUP2 = 18,
68 TRIGGER_SOURCE_SELECT_BLONY = 19,
69 TRIGGER_SOURCE_SELECT_GENERICA = 20,
70 TRIGGER_SOURCE_SELECT_GENERICB = 21,
71 TRIGGER_SOURCE_SELECT_GSL_ALLOW_FLIP = 22,
72 TRIGGER_SOURCE_SELECT_MANUAL_TRIGGER = 23
73};
74
75/* Trigger Source Select - ASIC-dependant, actual values for the
76 * register programming */
77enum trigger_polarity_select {
78 TRIGGER_POLARITY_SELECT_LOGIC_ZERO = 0,
79 TRIGGER_POLARITY_SELECT_CRTC = 1,
80 TRIGGER_POLARITY_SELECT_GENERICA = 2,
81 TRIGGER_POLARITY_SELECT_GENERICB = 3,
82 TRIGGER_POLARITY_SELECT_HSYNCA = 4,
83 TRIGGER_POLARITY_SELECT_HSYNCB = 5,
84 TRIGGER_POLARITY_SELECT_VIDEO_CAPTURE = 6,
85 TRIGGER_POLARITY_SELECT_GENERICC = 7
86};
87
88
89struct dce110_timing_generator_offsets {
90 int32_t crtc;
91 int32_t dcp;
92
93 /* DCE80 use only */
94 int32_t dmif;
95};
96
97struct dce110_timing_generator {
98 struct timing_generator base;
99 struct dce110_timing_generator_offsets offsets;
100 struct dce110_timing_generator_offsets derived_offsets;
101
102 enum controller_id controller_id;
103
104 uint32_t max_h_total;
105 uint32_t max_v_total;
106
107 uint32_t min_h_blank;
108 uint32_t min_h_front_porch;
109 uint32_t min_h_back_porch;
110
111 uint32_t min_h_sync_width;
112 uint32_t min_v_sync_width;
113 uint32_t min_v_blank;
114
115};
116
117#define DCE110TG_FROM_TG(tg)\
118 container_of(tg, struct dce110_timing_generator, base)
119
120bool dce110_timing_generator_construct(
121 struct dce110_timing_generator *tg,
122 struct dc_context *ctx,
123 uint32_t instance,
124 const struct dce110_timing_generator_offsets *offsets);
125
126/* determine if given timing can be supported by TG */
127bool dce110_timing_generator_validate_timing(
128 struct timing_generator *tg,
129 const struct dc_crtc_timing *timing,
130 enum signal_type signal);
131
132/******** HW programming ************/
133
134/* Program timing generator with given timing */
135bool dce110_timing_generator_program_timing_generator(
136 struct timing_generator *tg,
137 const struct dc_crtc_timing *dc_crtc_timing);
138
139/* Disable/Enable Timing Generator */
140bool dce110_timing_generator_enable_crtc(struct timing_generator *tg);
141bool dce110_timing_generator_disable_crtc(struct timing_generator *tg);
142
143void dce110_timing_generator_set_early_control(
144 struct timing_generator *tg,
145 uint32_t early_cntl);
146
147/**************** TG current status ******************/
148
149/* return the current frame counter. Used by Linux kernel DRM */
150uint32_t dce110_timing_generator_get_vblank_counter(
151 struct timing_generator *tg);
152
153/* Get current H and V position */
154void dce110_timing_generator_get_crtc_positions(
155 struct timing_generator *tg,
156 int32_t *h_position,
157 int32_t *v_position);
158
159/* return true if TG counter is moving. false if TG is stopped */
160bool dce110_timing_generator_is_counter_moving(struct timing_generator *tg);
161
162/* wait until TG is in beginning of vertical blank region */
163void dce110_timing_generator_wait_for_vblank(struct timing_generator *tg);
164
165/* wait until TG is in beginning of active region */
166void dce110_timing_generator_wait_for_vactive(struct timing_generator *tg);
167
168/*********** Timing Generator Synchronization routines ****/
169
170/* Setups Global Swap Lock group, TimingServer or TimingClient*/
171void dce110_timing_generator_setup_global_swap_lock(
172 struct timing_generator *tg,
173 const struct dcp_gsl_params *gsl_params);
174
175/* Clear all the register writes done by setup_global_swap_lock */
176void dce110_timing_generator_tear_down_global_swap_lock(
177 struct timing_generator *tg);
178
179/* Reset slave controllers on master VSync */
180void dce110_timing_generator_enable_reset_trigger(
181 struct timing_generator *tg,
182 int source);
183
184/* disabling trigger-reset */
185void dce110_timing_generator_disable_reset_trigger(
186 struct timing_generator *tg);
187
188/* Checks whether CRTC triggered reset occurred */
189bool dce110_timing_generator_did_triggered_reset_occur(
190 struct timing_generator *tg);
191
192/******** Stuff to move to other virtual HW objects *****************/
193/* Move to enable accelerated mode */
194void dce110_timing_generator_disable_vga(struct timing_generator *tg);
195/* TODO: Should we move it to transform */
196/* Fully program CRTC timing in timing generator */
197void dce110_timing_generator_program_blanking(
198 struct timing_generator *tg,
199 const struct dc_crtc_timing *timing);
200
201/* TODO: Should we move it to opp? */
202/* Combine with below and move YUV/RGB color conversion to SW layer */
203void dce110_timing_generator_program_blank_color(
204 struct timing_generator *tg,
205 const struct tg_color *black_color);
206/* Combine with above and move YUV/RGB color conversion to SW layer */
207void dce110_timing_generator_set_overscan_color_black(
208 struct timing_generator *tg,
209 const struct tg_color *color);
210void dce110_timing_generator_color_space_to_black_color(
211 enum dc_color_space colorspace,
212 struct tg_color *black_color);
213/*************** End-of-move ********************/
214
215/* Not called yet */
216void dce110_timing_generator_set_test_pattern(
217 struct timing_generator *tg,
218 /* TODO: replace 'controller_dp_test_pattern' by 'test_pattern_mode'
219 * because this is not DP-specific (which is probably somewhere in DP
220 * encoder) */
221 enum controller_dp_test_pattern test_pattern,
222 enum dc_color_depth color_depth);
223
224void dce110_timing_generator_set_drr(
225 struct timing_generator *tg,
226 const struct drr_params *params);
227
228void dce110_timing_generator_set_static_screen_control(
229 struct timing_generator *tg,
230 uint32_t value);
231
232uint32_t dce110_timing_generator_get_crtc_scanoutpos(
233 struct timing_generator *tg,
234 uint32_t *vbl,
235 uint32_t *position);
236
237void dce110_timing_generator_enable_advanced_request(
238 struct timing_generator *tg,
239 bool enable,
240 const struct dc_crtc_timing *timing);
241
242void dce110_timing_generator_set_lock_master(struct timing_generator *tg,
243 bool lock);
244
245void dce110_tg_program_blank_color(struct timing_generator *tg,
246 const struct tg_color *black_color);
247
248void dce110_tg_set_overscan_color(struct timing_generator *tg,
249 const struct tg_color *overscan_color);
250
251void dce110_tg_get_position(struct timing_generator *tg,
252 struct crtc_position *position);
253
254void dce110_tg_program_timing(struct timing_generator *tg,
255 const struct dc_crtc_timing *timing,
256 bool use_vbios);
257
258bool dce110_tg_is_blanked(struct timing_generator *tg);
259
260bool dce110_tg_set_blank(struct timing_generator *tg,
261 bool enable_blanking);
262
263bool dce110_tg_validate_timing(struct timing_generator *tg,
264 const struct dc_crtc_timing *timing);
265
266void dce110_tg_wait_for_state(struct timing_generator *tg,
267 enum crtc_state state);
268
269void dce110_tg_set_colors(struct timing_generator *tg,
270 const struct tg_color *blank_color,
271 const struct tg_color *overscan_color);
272
273#endif /* __DC_TIMING_GENERATOR_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c
new file mode 100644
index 000000000000..3bf3179e07c5
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c
@@ -0,0 +1,743 @@
1#include "dm_services.h"
2
3/* include DCE11 register header files */
4#include "dce/dce_11_0_d.h"
5#include "dce/dce_11_0_sh_mask.h"
6
7#include "dc_types.h"
8#include "dc_bios_types.h"
9#include "dc.h"
10
11#include "include/grph_object_id.h"
12#include "include/logger_interface.h"
13#include "dce110_timing_generator.h"
14#include "dce110_timing_generator_v.h"
15
16#include "timing_generator.h"
17
18/** ********************************************************************************
19 *
20 * DCE11 Timing Generator Implementation
21 *
22 **********************************************************************************/
23
24/**
25* Enable CRTCV
26*/
27
28static bool dce110_timing_generator_v_enable_crtc(struct timing_generator *tg)
29{
30/*
31* Set MASTER_UPDATE_MODE to 0
32* This is needed for DRR, and also suggested to be default value by Syed.
33*/
34
35 uint32_t value;
36
37 value = 0;
38 set_reg_field_value(value, 0,
39 CRTCV_MASTER_UPDATE_MODE, MASTER_UPDATE_MODE);
40 dm_write_reg(tg->ctx,
41 mmCRTCV_MASTER_UPDATE_MODE, value);
42
43 /* TODO: may want this on for looking for underflow */
44 value = 0;
45 dm_write_reg(tg->ctx, mmCRTCV_MASTER_UPDATE_MODE, value);
46
47 value = 0;
48 set_reg_field_value(value, 1,
49 CRTCV_MASTER_EN, CRTC_MASTER_EN);
50 dm_write_reg(tg->ctx,
51 mmCRTCV_MASTER_EN, value);
52
53 return true;
54}
55
56static bool dce110_timing_generator_v_disable_crtc(struct timing_generator *tg)
57{
58 uint32_t value;
59
60 value = dm_read_reg(tg->ctx,
61 mmCRTCV_CONTROL);
62 set_reg_field_value(value, 0,
63 CRTCV_CONTROL, CRTC_DISABLE_POINT_CNTL);
64 set_reg_field_value(value, 0,
65 CRTCV_CONTROL, CRTC_MASTER_EN);
66 dm_write_reg(tg->ctx,
67 mmCRTCV_CONTROL, value);
68 /*
69 * TODO: call this when adding stereo support
70 * tg->funcs->disable_stereo(tg);
71 */
72 return true;
73}
74
75static bool dce110_timing_generator_v_blank_crtc(struct timing_generator *tg)
76{
77 struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
78 uint32_t addr = mmCRTCV_BLANK_CONTROL;
79 uint32_t value = dm_read_reg(tg->ctx, addr);
80 uint8_t counter = 100;
81
82 set_reg_field_value(
83 value,
84 1,
85 CRTCV_BLANK_CONTROL,
86 CRTC_BLANK_DATA_EN);
87
88 set_reg_field_value(
89 value,
90 0,
91 CRTCV_BLANK_CONTROL,
92 CRTC_BLANK_DE_MODE);
93
94 dm_write_reg(tg->ctx, addr, value);
95
96 while (counter > 0) {
97 value = dm_read_reg(tg->ctx, addr);
98
99 if (get_reg_field_value(
100 value,
101 CRTCV_BLANK_CONTROL,
102 CRTC_BLANK_DATA_EN) == 1 &&
103 get_reg_field_value(
104 value,
105 CRTCV_BLANK_CONTROL,
106 CRTC_CURRENT_BLANK_STATE) == 1)
107 break;
108
109 msleep(1);
110 counter--;
111 }
112
113 if (!counter) {
114 dm_logger_write(tg->ctx->logger, LOG_ERROR,
115 "timing generator %d blank timing out.\n",
116 tg110->controller_id);
117 return false;
118 }
119
120 return true;
121}
122
123static bool dce110_timing_generator_v_unblank_crtc(struct timing_generator *tg)
124{
125 uint32_t addr = mmCRTCV_BLANK_CONTROL;
126 uint32_t value = dm_read_reg(tg->ctx, addr);
127
128 set_reg_field_value(
129 value,
130 0,
131 CRTCV_BLANK_CONTROL,
132 CRTC_BLANK_DATA_EN);
133
134 set_reg_field_value(
135 value,
136 0,
137 CRTCV_BLANK_CONTROL,
138 CRTC_BLANK_DE_MODE);
139
140 dm_write_reg(tg->ctx, addr, value);
141
142 return true;
143}
144
145static bool dce110_timing_generator_v_is_in_vertical_blank(
146 struct timing_generator *tg)
147{
148 uint32_t addr = 0;
149 uint32_t value = 0;
150 uint32_t field = 0;
151
152 addr = mmCRTCV_STATUS;
153 value = dm_read_reg(tg->ctx, addr);
154 field = get_reg_field_value(value, CRTCV_STATUS, CRTC_V_BLANK);
155 return field == 1;
156}
157
158static bool dce110_timing_generator_v_is_counter_moving(struct timing_generator *tg)
159{
160 uint32_t value;
161 uint32_t h1 = 0;
162 uint32_t h2 = 0;
163 uint32_t v1 = 0;
164 uint32_t v2 = 0;
165
166 value = dm_read_reg(tg->ctx, mmCRTCV_STATUS_POSITION);
167
168 h1 = get_reg_field_value(
169 value,
170 CRTCV_STATUS_POSITION,
171 CRTC_HORZ_COUNT);
172
173 v1 = get_reg_field_value(
174 value,
175 CRTCV_STATUS_POSITION,
176 CRTC_VERT_COUNT);
177
178 value = dm_read_reg(tg->ctx, mmCRTCV_STATUS_POSITION);
179
180 h2 = get_reg_field_value(
181 value,
182 CRTCV_STATUS_POSITION,
183 CRTC_HORZ_COUNT);
184
185 v2 = get_reg_field_value(
186 value,
187 CRTCV_STATUS_POSITION,
188 CRTC_VERT_COUNT);
189
190 if (h1 == h2 && v1 == v2)
191 return false;
192 else
193 return true;
194}
195
196static void dce110_timing_generator_v_wait_for_vblank(struct timing_generator *tg)
197{
198 /* We want to catch beginning of VBlank here, so if the first try are
199 * in VBlank, we might be very close to Active, in this case wait for
200 * another frame
201 */
202 while (dce110_timing_generator_v_is_in_vertical_blank(tg)) {
203 if (!dce110_timing_generator_v_is_counter_moving(tg)) {
204 /* error - no point to wait if counter is not moving */
205 break;
206 }
207 }
208
209 while (!dce110_timing_generator_v_is_in_vertical_blank(tg)) {
210 if (!dce110_timing_generator_v_is_counter_moving(tg)) {
211 /* error - no point to wait if counter is not moving */
212 break;
213 }
214 }
215}
216
217/**
218* Wait till we are in VActive (anywhere in VActive)
219*/
220static void dce110_timing_generator_v_wait_for_vactive(struct timing_generator *tg)
221{
222 while (dce110_timing_generator_v_is_in_vertical_blank(tg)) {
223 if (!dce110_timing_generator_v_is_counter_moving(tg)) {
224 /* error - no point to wait if counter is not moving */
225 break;
226 }
227 }
228}
229
230static void dce110_timing_generator_v_wait_for_state(struct timing_generator *tg,
231 enum crtc_state state)
232{
233 switch (state) {
234 case CRTC_STATE_VBLANK:
235 dce110_timing_generator_v_wait_for_vblank(tg);
236 break;
237
238 case CRTC_STATE_VACTIVE:
239 dce110_timing_generator_v_wait_for_vactive(tg);
240 break;
241
242 default:
243 break;
244 }
245}
246
247static void dce110_timing_generator_v_program_blanking(
248 struct timing_generator *tg,
249 const struct dc_crtc_timing *timing)
250{
251 uint32_t vsync_offset = timing->v_border_bottom +
252 timing->v_front_porch;
253 uint32_t v_sync_start = timing->v_addressable + vsync_offset;
254
255 uint32_t hsync_offset = timing->h_border_right +
256 timing->h_front_porch;
257 uint32_t h_sync_start = timing->h_addressable + hsync_offset;
258
259 struct dc_context *ctx = tg->ctx;
260 uint32_t value = 0;
261 uint32_t addr = 0;
262 uint32_t tmp = 0;
263
264 addr = mmCRTCV_H_TOTAL;
265 value = dm_read_reg(ctx, addr);
266 set_reg_field_value(
267 value,
268 timing->h_total - 1,
269 CRTCV_H_TOTAL,
270 CRTC_H_TOTAL);
271 dm_write_reg(ctx, addr, value);
272
273 addr = mmCRTCV_V_TOTAL;
274 value = dm_read_reg(ctx, addr);
275 set_reg_field_value(
276 value,
277 timing->v_total - 1,
278 CRTCV_V_TOTAL,
279 CRTC_V_TOTAL);
280 dm_write_reg(ctx, addr, value);
281
282 addr = mmCRTCV_H_BLANK_START_END;
283 value = dm_read_reg(ctx, addr);
284
285 tmp = timing->h_total -
286 (h_sync_start + timing->h_border_left);
287
288 set_reg_field_value(
289 value,
290 tmp,
291 CRTCV_H_BLANK_START_END,
292 CRTC_H_BLANK_END);
293
294 tmp = tmp + timing->h_addressable +
295 timing->h_border_left + timing->h_border_right;
296
297 set_reg_field_value(
298 value,
299 tmp,
300 CRTCV_H_BLANK_START_END,
301 CRTC_H_BLANK_START);
302
303 dm_write_reg(ctx, addr, value);
304
305 addr = mmCRTCV_V_BLANK_START_END;
306 value = dm_read_reg(ctx, addr);
307
308 tmp = timing->v_total - (v_sync_start + timing->v_border_top);
309
310 set_reg_field_value(
311 value,
312 tmp,
313 CRTCV_V_BLANK_START_END,
314 CRTC_V_BLANK_END);
315
316 tmp = tmp + timing->v_addressable + timing->v_border_top +
317 timing->v_border_bottom;
318
319 set_reg_field_value(
320 value,
321 tmp,
322 CRTCV_V_BLANK_START_END,
323 CRTC_V_BLANK_START);
324
325 dm_write_reg(ctx, addr, value);
326
327 addr = mmCRTCV_H_SYNC_A;
328 value = 0;
329 set_reg_field_value(
330 value,
331 timing->h_sync_width,
332 CRTCV_H_SYNC_A,
333 CRTC_H_SYNC_A_END);
334 dm_write_reg(ctx, addr, value);
335
336 addr = mmCRTCV_H_SYNC_A_CNTL;
337 value = dm_read_reg(ctx, addr);
338 if (timing->flags.HSYNC_POSITIVE_POLARITY) {
339 set_reg_field_value(
340 value,
341 0,
342 CRTCV_H_SYNC_A_CNTL,
343 CRTC_H_SYNC_A_POL);
344 } else {
345 set_reg_field_value(
346 value,
347 1,
348 CRTCV_H_SYNC_A_CNTL,
349 CRTC_H_SYNC_A_POL);
350 }
351 dm_write_reg(ctx, addr, value);
352
353 addr = mmCRTCV_V_SYNC_A;
354 value = 0;
355 set_reg_field_value(
356 value,
357 timing->v_sync_width,
358 CRTCV_V_SYNC_A,
359 CRTC_V_SYNC_A_END);
360 dm_write_reg(ctx, addr, value);
361
362 addr = mmCRTCV_V_SYNC_A_CNTL;
363 value = dm_read_reg(ctx, addr);
364 if (timing->flags.VSYNC_POSITIVE_POLARITY) {
365 set_reg_field_value(
366 value,
367 0,
368 CRTCV_V_SYNC_A_CNTL,
369 CRTC_V_SYNC_A_POL);
370 } else {
371 set_reg_field_value(
372 value,
373 1,
374 CRTCV_V_SYNC_A_CNTL,
375 CRTC_V_SYNC_A_POL);
376 }
377 dm_write_reg(ctx, addr, value);
378
379 addr = mmCRTCV_INTERLACE_CONTROL;
380 value = dm_read_reg(ctx, addr);
381 set_reg_field_value(
382 value,
383 timing->flags.INTERLACE,
384 CRTCV_INTERLACE_CONTROL,
385 CRTC_INTERLACE_ENABLE);
386 dm_write_reg(ctx, addr, value);
387}
388
389static void dce110_timing_generator_v_enable_advanced_request(
390 struct timing_generator *tg,
391 bool enable,
392 const struct dc_crtc_timing *timing)
393{
394 uint32_t addr = mmCRTCV_START_LINE_CONTROL;
395 uint32_t value = dm_read_reg(tg->ctx, addr);
396
397 if (enable) {
398 if ((timing->v_sync_width + timing->v_front_porch) <= 3) {
399 set_reg_field_value(
400 value,
401 3,
402 CRTCV_START_LINE_CONTROL,
403 CRTC_ADVANCED_START_LINE_POSITION);
404 } else {
405 set_reg_field_value(
406 value,
407 4,
408 CRTCV_START_LINE_CONTROL,
409 CRTC_ADVANCED_START_LINE_POSITION);
410 }
411 set_reg_field_value(
412 value,
413 0,
414 CRTCV_START_LINE_CONTROL,
415 CRTC_LEGACY_REQUESTOR_EN);
416 } else {
417 set_reg_field_value(
418 value,
419 2,
420 CRTCV_START_LINE_CONTROL,
421 CRTC_ADVANCED_START_LINE_POSITION);
422 set_reg_field_value(
423 value,
424 1,
425 CRTCV_START_LINE_CONTROL,
426 CRTC_LEGACY_REQUESTOR_EN);
427 }
428
429 dm_write_reg(tg->ctx, addr, value);
430}
431
432static bool dce110_timing_generator_v_set_blank(struct timing_generator *tg,
433 bool enable_blanking)
434{
435 if (enable_blanking)
436 return dce110_timing_generator_v_blank_crtc(tg);
437 else
438 return dce110_timing_generator_v_unblank_crtc(tg);
439}
440
441static void dce110_timing_generator_v_program_timing(struct timing_generator *tg,
442 const struct dc_crtc_timing *timing,
443 bool use_vbios)
444{
445 if (use_vbios)
446 dce110_timing_generator_program_timing_generator(tg, timing);
447 else
448 dce110_timing_generator_v_program_blanking(tg, timing);
449}
450
451static void dce110_timing_generator_v_program_blank_color(
452 struct timing_generator *tg,
453 const struct tg_color *black_color)
454{
455 uint32_t addr = mmCRTCV_BLACK_COLOR;
456 uint32_t value = dm_read_reg(tg->ctx, addr);
457
458 set_reg_field_value(
459 value,
460 black_color->color_b_cb,
461 CRTCV_BLACK_COLOR,
462 CRTC_BLACK_COLOR_B_CB);
463 set_reg_field_value(
464 value,
465 black_color->color_g_y,
466 CRTCV_BLACK_COLOR,
467 CRTC_BLACK_COLOR_G_Y);
468 set_reg_field_value(
469 value,
470 black_color->color_r_cr,
471 CRTCV_BLACK_COLOR,
472 CRTC_BLACK_COLOR_R_CR);
473
474 dm_write_reg(tg->ctx, addr, value);
475}
476
477static void dce110_timing_generator_v_set_overscan_color_black(
478 struct timing_generator *tg,
479 const struct tg_color *color)
480{
481 struct dc_context *ctx = tg->ctx;
482 uint32_t addr;
483 uint32_t value = 0;
484
485 set_reg_field_value(
486 value,
487 color->color_b_cb,
488 CRTC_OVERSCAN_COLOR,
489 CRTC_OVERSCAN_COLOR_BLUE);
490
491 set_reg_field_value(
492 value,
493 color->color_r_cr,
494 CRTC_OVERSCAN_COLOR,
495 CRTC_OVERSCAN_COLOR_RED);
496
497 set_reg_field_value(
498 value,
499 color->color_g_y,
500 CRTC_OVERSCAN_COLOR,
501 CRTC_OVERSCAN_COLOR_GREEN);
502
503 addr = mmCRTCV_OVERSCAN_COLOR;
504 dm_write_reg(ctx, addr, value);
505 addr = mmCRTCV_BLACK_COLOR;
506 dm_write_reg(ctx, addr, value);
507 /* This is desirable to have a constant DAC output voltage during the
508 * blank time that is higher than the 0 volt reference level that the
509 * DAC outputs when the NBLANK signal
510 * is asserted low, such as for output to an analog TV. */
511 addr = mmCRTCV_BLANK_DATA_COLOR;
512 dm_write_reg(ctx, addr, value);
513
514 /* TO DO we have to program EXT registers and we need to know LB DATA
515 * format because it is used when more 10 , i.e. 12 bits per color
516 *
517 * m_mmDxCRTC_OVERSCAN_COLOR_EXT
518 * m_mmDxCRTC_BLACK_COLOR_EXT
519 * m_mmDxCRTC_BLANK_DATA_COLOR_EXT
520 */
521}
522
523static void dce110_tg_v_program_blank_color(struct timing_generator *tg,
524 const struct tg_color *black_color)
525{
526 uint32_t addr = mmCRTCV_BLACK_COLOR;
527 uint32_t value = dm_read_reg(tg->ctx, addr);
528
529 set_reg_field_value(
530 value,
531 black_color->color_b_cb,
532 CRTCV_BLACK_COLOR,
533 CRTC_BLACK_COLOR_B_CB);
534 set_reg_field_value(
535 value,
536 black_color->color_g_y,
537 CRTCV_BLACK_COLOR,
538 CRTC_BLACK_COLOR_G_Y);
539 set_reg_field_value(
540 value,
541 black_color->color_r_cr,
542 CRTCV_BLACK_COLOR,
543 CRTC_BLACK_COLOR_R_CR);
544
545 dm_write_reg(tg->ctx, addr, value);
546
547 addr = mmCRTCV_BLANK_DATA_COLOR;
548 dm_write_reg(tg->ctx, addr, value);
549}
550
551static void dce110_timing_generator_v_set_overscan_color(struct timing_generator *tg,
552 const struct tg_color *overscan_color)
553{
554 struct dc_context *ctx = tg->ctx;
555 uint32_t value = 0;
556 uint32_t addr;
557
558 set_reg_field_value(
559 value,
560 overscan_color->color_b_cb,
561 CRTCV_OVERSCAN_COLOR,
562 CRTC_OVERSCAN_COLOR_BLUE);
563
564 set_reg_field_value(
565 value,
566 overscan_color->color_g_y,
567 CRTCV_OVERSCAN_COLOR,
568 CRTC_OVERSCAN_COLOR_GREEN);
569
570 set_reg_field_value(
571 value,
572 overscan_color->color_r_cr,
573 CRTCV_OVERSCAN_COLOR,
574 CRTC_OVERSCAN_COLOR_RED);
575
576 addr = mmCRTCV_OVERSCAN_COLOR;
577 dm_write_reg(ctx, addr, value);
578}
579
580static void dce110_timing_generator_v_set_colors(struct timing_generator *tg,
581 const struct tg_color *blank_color,
582 const struct tg_color *overscan_color)
583{
584 if (blank_color != NULL)
585 dce110_tg_v_program_blank_color(tg, blank_color);
586 if (overscan_color != NULL)
587 dce110_timing_generator_v_set_overscan_color(tg, overscan_color);
588}
589
590static void dce110_timing_generator_v_set_early_control(
591 struct timing_generator *tg,
592 uint32_t early_cntl)
593{
594 uint32_t regval;
595 uint32_t address = mmCRTC_CONTROL;
596
597 regval = dm_read_reg(tg->ctx, address);
598 set_reg_field_value(regval, early_cntl,
599 CRTCV_CONTROL, CRTC_HBLANK_EARLY_CONTROL);
600 dm_write_reg(tg->ctx, address, regval);
601}
602
603static void dce110_timing_generator_v_get_crtc_positions(
604 struct timing_generator *tg,
605 int32_t *h_position,
606 int32_t *v_position)
607{
608 uint32_t value;
609
610 value = dm_read_reg(tg->ctx, mmCRTCV_STATUS_POSITION);
611
612 *h_position = get_reg_field_value(
613 value,
614 CRTCV_STATUS_POSITION,
615 CRTC_HORZ_COUNT);
616
617 *v_position = get_reg_field_value(
618 value,
619 CRTCV_STATUS_POSITION,
620 CRTC_VERT_COUNT);
621}
622
623static uint32_t dce110_timing_generator_v_get_vblank_counter(struct timing_generator *tg)
624{
625 uint32_t addr = mmCRTCV_STATUS_FRAME_COUNT;
626 uint32_t value = dm_read_reg(tg->ctx, addr);
627 uint32_t field = get_reg_field_value(
628 value, CRTCV_STATUS_FRAME_COUNT, CRTC_FRAME_COUNT);
629
630 return field;
631}
632
633static bool dce110_timing_generator_v_did_triggered_reset_occur(
634 struct timing_generator *tg)
635{
636 dm_logger_write(tg->ctx->logger, LOG_ERROR,
637 "Timing Sync not supported on underlay pipe\n");
638 return false;
639}
640
641static void dce110_timing_generator_v_setup_global_swap_lock(
642 struct timing_generator *tg,
643 const struct dcp_gsl_params *gsl_params)
644{
645 dm_logger_write(tg->ctx->logger, LOG_ERROR,
646 "Timing Sync not supported on underlay pipe\n");
647 return;
648}
649
650static void dce110_timing_generator_v_enable_reset_trigger(
651 struct timing_generator *tg,
652 int source_tg_inst)
653{
654 dm_logger_write(tg->ctx->logger, LOG_ERROR,
655 "Timing Sync not supported on underlay pipe\n");
656 return;
657}
658
659static void dce110_timing_generator_v_disable_reset_trigger(
660 struct timing_generator *tg)
661{
662 dm_logger_write(tg->ctx->logger, LOG_ERROR,
663 "Timing Sync not supported on underlay pipe\n");
664 return;
665}
666
667static void dce110_timing_generator_v_tear_down_global_swap_lock(
668 struct timing_generator *tg)
669{
670 dm_logger_write(tg->ctx->logger, LOG_ERROR,
671 "Timing Sync not supported on underlay pipe\n");
672 return;
673}
674
675static void dce110_timing_generator_v_disable_vga(
676 struct timing_generator *tg)
677{
678 return;
679}
680
681static bool dce110_tg_v_is_blanked(struct timing_generator *tg)
682{
683 /* Signal comes from the primary pipe, underlay is never blanked. */
684 return false;
685}
686
687/** ********************************************************************************************
688 *
689 * DCE11 Timing Generator Constructor / Destructor
690 *
691 *********************************************************************************************/
692static const struct timing_generator_funcs dce110_tg_v_funcs = {
693 .validate_timing = dce110_tg_validate_timing,
694 .program_timing = dce110_timing_generator_v_program_timing,
695 .enable_crtc = dce110_timing_generator_v_enable_crtc,
696 .disable_crtc = dce110_timing_generator_v_disable_crtc,
697 .is_counter_moving = dce110_timing_generator_v_is_counter_moving,
698 .get_position = dce110_timing_generator_v_get_crtc_positions,
699 .get_frame_count = dce110_timing_generator_v_get_vblank_counter,
700 .set_early_control = dce110_timing_generator_v_set_early_control,
701 .wait_for_state = dce110_timing_generator_v_wait_for_state,
702 .set_blank = dce110_timing_generator_v_set_blank,
703 .is_blanked = dce110_tg_v_is_blanked,
704 .set_colors = dce110_timing_generator_v_set_colors,
705 .set_overscan_blank_color =
706 dce110_timing_generator_v_set_overscan_color_black,
707 .set_blank_color = dce110_timing_generator_v_program_blank_color,
708 .disable_vga = dce110_timing_generator_v_disable_vga,
709 .did_triggered_reset_occur =
710 dce110_timing_generator_v_did_triggered_reset_occur,
711 .setup_global_swap_lock =
712 dce110_timing_generator_v_setup_global_swap_lock,
713 .enable_reset_trigger = dce110_timing_generator_v_enable_reset_trigger,
714 .disable_reset_trigger = dce110_timing_generator_v_disable_reset_trigger,
715 .tear_down_global_swap_lock =
716 dce110_timing_generator_v_tear_down_global_swap_lock,
717 .enable_advanced_request =
718 dce110_timing_generator_v_enable_advanced_request
719};
720
721bool dce110_timing_generator_v_construct(
722 struct dce110_timing_generator *tg110,
723 struct dc_context *ctx)
724{
725 if (!tg110)
726 return false;
727
728 tg110->controller_id = CONTROLLER_ID_UNDERLAY0;
729
730 tg110->base.funcs = &dce110_tg_v_funcs;
731
732 tg110->base.ctx = ctx;
733 tg110->base.bp = ctx->dc_bios;
734
735 tg110->max_h_total = CRTC_H_TOTAL__CRTC_H_TOTAL_MASK + 1;
736 tg110->max_v_total = CRTC_V_TOTAL__CRTC_V_TOTAL_MASK + 1;
737
738 tg110->min_h_blank = 56;
739 tg110->min_h_front_porch = 4;
740 tg110->min_h_back_porch = 4;
741
742 return true;
743}
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.h
new file mode 100644
index 000000000000..7e49ca8e26ad
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.h
@@ -0,0 +1,33 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DC_TIMING_GENERATOR_V_DCE110_H__
27#define __DC_TIMING_GENERATOR_V_DCE110_H__
28
29bool dce110_timing_generator_v_construct(
30 struct dce110_timing_generator *tg110,
31 struct dc_context *ctx);
32
33#endif /* __DC_TIMING_GENERATOR_V_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c
new file mode 100644
index 000000000000..7d8cf7a58f46
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c
@@ -0,0 +1,704 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dce110_transform_v.h"
27#include "dm_services.h"
28#include "dce/dce_11_0_d.h"
29#include "dce/dce_11_0_sh_mask.h"
30
31#define SCLV_PHASES 64
32
33struct sclv_ratios_inits {
34 uint32_t h_int_scale_ratio_luma;
35 uint32_t h_int_scale_ratio_chroma;
36 uint32_t v_int_scale_ratio_luma;
37 uint32_t v_int_scale_ratio_chroma;
38 struct init_int_and_frac h_init_luma;
39 struct init_int_and_frac h_init_chroma;
40 struct init_int_and_frac v_init_luma;
41 struct init_int_and_frac v_init_chroma;
42};
43
44static void calculate_viewport(
45 const struct scaler_data *scl_data,
46 struct rect *luma_viewport,
47 struct rect *chroma_viewport)
48{
49 /*Do not set chroma vp for rgb444 pixel format*/
50 luma_viewport->x = scl_data->viewport.x - scl_data->viewport.x % 2;
51 luma_viewport->y = scl_data->viewport.y - scl_data->viewport.y % 2;
52 luma_viewport->width =
53 scl_data->viewport.width - scl_data->viewport.width % 2;
54 luma_viewport->height =
55 scl_data->viewport.height - scl_data->viewport.height % 2;
56 chroma_viewport->x = luma_viewport->x;
57 chroma_viewport->y = luma_viewport->y;
58 chroma_viewport->height = luma_viewport->height;
59 chroma_viewport->width = luma_viewport->width;
60
61 if (scl_data->format == PIXEL_FORMAT_420BPP12) {
62 luma_viewport->height += luma_viewport->height % 2;
63 luma_viewport->width += luma_viewport->width % 2;
64 /*for 420 video chroma is 1/4 the area of luma, scaled
65 *vertically and horizontally
66 */
67 chroma_viewport->x = luma_viewport->x / 2;
68 chroma_viewport->y = luma_viewport->y / 2;
69 chroma_viewport->height = luma_viewport->height / 2;
70 chroma_viewport->width = luma_viewport->width / 2;
71 }
72}
73
74static void program_viewport(
75 struct dce_transform *xfm_dce,
76 struct rect *luma_view_port,
77 struct rect *chroma_view_port)
78{
79 struct dc_context *ctx = xfm_dce->base.ctx;
80 uint32_t value = 0;
81 uint32_t addr = 0;
82
83 if (luma_view_port->width != 0 && luma_view_port->height != 0) {
84 addr = mmSCLV_VIEWPORT_START;
85 value = 0;
86 set_reg_field_value(
87 value,
88 luma_view_port->x,
89 SCLV_VIEWPORT_START,
90 VIEWPORT_X_START);
91 set_reg_field_value(
92 value,
93 luma_view_port->y,
94 SCLV_VIEWPORT_START,
95 VIEWPORT_Y_START);
96 dm_write_reg(ctx, addr, value);
97
98 addr = mmSCLV_VIEWPORT_SIZE;
99 value = 0;
100 set_reg_field_value(
101 value,
102 luma_view_port->height,
103 SCLV_VIEWPORT_SIZE,
104 VIEWPORT_HEIGHT);
105 set_reg_field_value(
106 value,
107 luma_view_port->width,
108 SCLV_VIEWPORT_SIZE,
109 VIEWPORT_WIDTH);
110 dm_write_reg(ctx, addr, value);
111 }
112
113 if (chroma_view_port->width != 0 && chroma_view_port->height != 0) {
114 addr = mmSCLV_VIEWPORT_START_C;
115 value = 0;
116 set_reg_field_value(
117 value,
118 chroma_view_port->x,
119 SCLV_VIEWPORT_START_C,
120 VIEWPORT_X_START_C);
121 set_reg_field_value(
122 value,
123 chroma_view_port->y,
124 SCLV_VIEWPORT_START_C,
125 VIEWPORT_Y_START_C);
126 dm_write_reg(ctx, addr, value);
127
128 addr = mmSCLV_VIEWPORT_SIZE_C;
129 value = 0;
130 set_reg_field_value(
131 value,
132 chroma_view_port->height,
133 SCLV_VIEWPORT_SIZE_C,
134 VIEWPORT_HEIGHT_C);
135 set_reg_field_value(
136 value,
137 chroma_view_port->width,
138 SCLV_VIEWPORT_SIZE_C,
139 VIEWPORT_WIDTH_C);
140 dm_write_reg(ctx, addr, value);
141 }
142}
143
144/*
145 * Function:
146 * void setup_scaling_configuration
147 *
148 * Purpose: setup scaling mode : bypass, RGb, YCbCr and nummber of taps
149 * Input: data
150 *
151 * Output:
152 * void
153 */
154static bool setup_scaling_configuration(
155 struct dce_transform *xfm_dce,
156 const struct scaler_data *data)
157{
158 bool is_scaling_needed = false;
159 struct dc_context *ctx = xfm_dce->base.ctx;
160 uint32_t value = 0;
161
162 set_reg_field_value(value, data->taps.h_taps - 1,
163 SCLV_TAP_CONTROL, SCL_H_NUM_OF_TAPS);
164 set_reg_field_value(value, data->taps.v_taps - 1,
165 SCLV_TAP_CONTROL, SCL_V_NUM_OF_TAPS);
166 set_reg_field_value(value, data->taps.h_taps_c - 1,
167 SCLV_TAP_CONTROL, SCL_H_NUM_OF_TAPS_C);
168 set_reg_field_value(value, data->taps.v_taps_c - 1,
169 SCLV_TAP_CONTROL, SCL_V_NUM_OF_TAPS_C);
170 dm_write_reg(ctx, mmSCLV_TAP_CONTROL, value);
171
172 value = 0;
173 if (data->taps.h_taps + data->taps.v_taps > 2) {
174 set_reg_field_value(value, 1, SCLV_MODE, SCL_MODE);
175 set_reg_field_value(value, 1, SCLV_MODE, SCL_PSCL_EN);
176 is_scaling_needed = true;
177 } else {
178 set_reg_field_value(value, 0, SCLV_MODE, SCL_MODE);
179 set_reg_field_value(value, 0, SCLV_MODE, SCL_PSCL_EN);
180 }
181
182 if (data->taps.h_taps_c + data->taps.v_taps_c > 2) {
183 set_reg_field_value(value, 1, SCLV_MODE, SCL_MODE_C);
184 set_reg_field_value(value, 1, SCLV_MODE, SCL_PSCL_EN_C);
185 is_scaling_needed = true;
186 } else if (data->format != PIXEL_FORMAT_420BPP12) {
187 set_reg_field_value(
188 value,
189 get_reg_field_value(value, SCLV_MODE, SCL_MODE),
190 SCLV_MODE,
191 SCL_MODE_C);
192 set_reg_field_value(
193 value,
194 get_reg_field_value(value, SCLV_MODE, SCL_PSCL_EN),
195 SCLV_MODE,
196 SCL_PSCL_EN_C);
197 } else {
198 set_reg_field_value(value, 0, SCLV_MODE, SCL_MODE_C);
199 set_reg_field_value(value, 0, SCLV_MODE, SCL_PSCL_EN_C);
200 }
201 dm_write_reg(ctx, mmSCLV_MODE, value);
202
203 value = 0;
204 /*
205 * 0 - Replaced out of bound pixels with black pixel
206 * (or any other required color)
207 * 1 - Replaced out of bound pixels with the edge pixel
208 */
209 set_reg_field_value(value, 1, SCLV_CONTROL, SCL_BOUNDARY_MODE);
210 dm_write_reg(ctx, mmSCLV_CONTROL, value);
211
212 return is_scaling_needed;
213}
214
215/**
216* Function:
217* void program_overscan
218*
219* Purpose: Programs overscan border
220* Input: overscan
221*
222* Output:
223 void
224*/
225static void program_overscan(
226 struct dce_transform *xfm_dce,
227 const struct scaler_data *data)
228{
229 uint32_t overscan_left_right = 0;
230 uint32_t overscan_top_bottom = 0;
231
232 int overscan_right = data->h_active - data->recout.x - data->recout.width;
233 int overscan_bottom = data->v_active - data->recout.y - data->recout.height;
234
235 if (overscan_right < 0) {
236 BREAK_TO_DEBUGGER();
237 overscan_right = 0;
238 }
239 if (overscan_bottom < 0) {
240 BREAK_TO_DEBUGGER();
241 overscan_bottom = 0;
242 }
243
244 set_reg_field_value(overscan_left_right, data->recout.x,
245 EXT_OVERSCAN_LEFT_RIGHT, EXT_OVERSCAN_LEFT);
246
247 set_reg_field_value(overscan_left_right, overscan_right,
248 EXT_OVERSCAN_LEFT_RIGHT, EXT_OVERSCAN_RIGHT);
249
250 set_reg_field_value(overscan_top_bottom, data->recout.y,
251 EXT_OVERSCAN_TOP_BOTTOM, EXT_OVERSCAN_TOP);
252
253 set_reg_field_value(overscan_top_bottom, overscan_bottom,
254 EXT_OVERSCAN_TOP_BOTTOM, EXT_OVERSCAN_BOTTOM);
255
256 dm_write_reg(xfm_dce->base.ctx,
257 mmSCLV_EXT_OVERSCAN_LEFT_RIGHT,
258 overscan_left_right);
259
260 dm_write_reg(xfm_dce->base.ctx,
261 mmSCLV_EXT_OVERSCAN_TOP_BOTTOM,
262 overscan_top_bottom);
263}
264
265static void set_coeff_update_complete(
266 struct dce_transform *xfm_dce)
267{
268 uint32_t value;
269
270 value = dm_read_reg(xfm_dce->base.ctx, mmSCLV_UPDATE);
271 set_reg_field_value(value, 1, SCLV_UPDATE, SCL_COEF_UPDATE_COMPLETE);
272 dm_write_reg(xfm_dce->base.ctx, mmSCLV_UPDATE, value);
273}
274
275static void program_multi_taps_filter(
276 struct dce_transform *xfm_dce,
277 int taps,
278 const uint16_t *coeffs,
279 enum ram_filter_type filter_type)
280{
281 struct dc_context *ctx = xfm_dce->base.ctx;
282 int i, phase, pair;
283 int array_idx = 0;
284 int taps_pairs = (taps + 1) / 2;
285 int phases_to_program = SCLV_PHASES / 2 + 1;
286
287 uint32_t select = 0;
288 uint32_t power_ctl, power_ctl_off;
289
290 if (!coeffs)
291 return;
292
293 /*We need to disable power gating on coeff memory to do programming*/
294 power_ctl = dm_read_reg(ctx, mmDCFEV_MEM_PWR_CTRL);
295 power_ctl_off = power_ctl;
296 set_reg_field_value(power_ctl_off, 1, DCFEV_MEM_PWR_CTRL, SCLV_COEFF_MEM_PWR_DIS);
297 dm_write_reg(ctx, mmDCFEV_MEM_PWR_CTRL, power_ctl_off);
298
299 /*Wait to disable gating:*/
300 for (i = 0; i < 10; i++) {
301 if (get_reg_field_value(
302 dm_read_reg(ctx, mmDCFEV_MEM_PWR_STATUS),
303 DCFEV_MEM_PWR_STATUS,
304 SCLV_COEFF_MEM_PWR_STATE) == 0)
305 break;
306
307 udelay(1);
308 }
309
310 set_reg_field_value(select, filter_type, SCLV_COEF_RAM_SELECT, SCL_C_RAM_FILTER_TYPE);
311
312 for (phase = 0; phase < phases_to_program; phase++) {
313 /*we always program N/2 + 1 phases, total phases N, but N/2-1 are just mirror
314 phase 0 is unique and phase N/2 is unique if N is even*/
315 set_reg_field_value(select, phase, SCLV_COEF_RAM_SELECT, SCL_C_RAM_PHASE);
316 for (pair = 0; pair < taps_pairs; pair++) {
317 uint32_t data = 0;
318
319 set_reg_field_value(select, pair,
320 SCLV_COEF_RAM_SELECT, SCL_C_RAM_TAP_PAIR_IDX);
321
322 dm_write_reg(ctx, mmSCLV_COEF_RAM_SELECT, select);
323
324 set_reg_field_value(
325 data, 1,
326 SCLV_COEF_RAM_TAP_DATA,
327 SCL_C_RAM_EVEN_TAP_COEF_EN);
328 set_reg_field_value(
329 data, coeffs[array_idx],
330 SCLV_COEF_RAM_TAP_DATA,
331 SCL_C_RAM_EVEN_TAP_COEF);
332
333 if (taps % 2 && pair == taps_pairs - 1) {
334 set_reg_field_value(
335 data, 0,
336 SCLV_COEF_RAM_TAP_DATA,
337 SCL_C_RAM_ODD_TAP_COEF_EN);
338 array_idx++;
339 } else {
340 set_reg_field_value(
341 data, 1,
342 SCLV_COEF_RAM_TAP_DATA,
343 SCL_C_RAM_ODD_TAP_COEF_EN);
344 set_reg_field_value(
345 data, coeffs[array_idx + 1],
346 SCLV_COEF_RAM_TAP_DATA,
347 SCL_C_RAM_ODD_TAP_COEF);
348
349 array_idx += 2;
350 }
351
352 dm_write_reg(ctx, mmSCLV_COEF_RAM_TAP_DATA, data);
353 }
354 }
355
356 /*We need to restore power gating on coeff memory to initial state*/
357 dm_write_reg(ctx, mmDCFEV_MEM_PWR_CTRL, power_ctl);
358}
359
360static void calculate_inits(
361 struct dce_transform *xfm_dce,
362 const struct scaler_data *data,
363 struct sclv_ratios_inits *inits,
364 struct rect *luma_viewport,
365 struct rect *chroma_viewport)
366{
367 inits->h_int_scale_ratio_luma =
368 dal_fixed31_32_u2d19(data->ratios.horz) << 5;
369 inits->v_int_scale_ratio_luma =
370 dal_fixed31_32_u2d19(data->ratios.vert) << 5;
371 inits->h_int_scale_ratio_chroma =
372 dal_fixed31_32_u2d19(data->ratios.horz_c) << 5;
373 inits->v_int_scale_ratio_chroma =
374 dal_fixed31_32_u2d19(data->ratios.vert_c) << 5;
375
376 inits->h_init_luma.integer = 1;
377 inits->v_init_luma.integer = 1;
378 inits->h_init_chroma.integer = 1;
379 inits->v_init_chroma.integer = 1;
380}
381
382static void program_scl_ratios_inits(
383 struct dce_transform *xfm_dce,
384 struct sclv_ratios_inits *inits)
385{
386 struct dc_context *ctx = xfm_dce->base.ctx;
387 uint32_t addr = mmSCLV_HORZ_FILTER_SCALE_RATIO;
388 uint32_t value = 0;
389
390 set_reg_field_value(
391 value,
392 inits->h_int_scale_ratio_luma,
393 SCLV_HORZ_FILTER_SCALE_RATIO,
394 SCL_H_SCALE_RATIO);
395 dm_write_reg(ctx, addr, value);
396
397 addr = mmSCLV_VERT_FILTER_SCALE_RATIO;
398 value = 0;
399 set_reg_field_value(
400 value,
401 inits->v_int_scale_ratio_luma,
402 SCLV_VERT_FILTER_SCALE_RATIO,
403 SCL_V_SCALE_RATIO);
404 dm_write_reg(ctx, addr, value);
405
406 addr = mmSCLV_HORZ_FILTER_SCALE_RATIO_C;
407 value = 0;
408 set_reg_field_value(
409 value,
410 inits->h_int_scale_ratio_chroma,
411 SCLV_HORZ_FILTER_SCALE_RATIO_C,
412 SCL_H_SCALE_RATIO_C);
413 dm_write_reg(ctx, addr, value);
414
415 addr = mmSCLV_VERT_FILTER_SCALE_RATIO_C;
416 value = 0;
417 set_reg_field_value(
418 value,
419 inits->v_int_scale_ratio_chroma,
420 SCLV_VERT_FILTER_SCALE_RATIO_C,
421 SCL_V_SCALE_RATIO_C);
422 dm_write_reg(ctx, addr, value);
423
424 addr = mmSCLV_HORZ_FILTER_INIT;
425 value = 0;
426 set_reg_field_value(
427 value,
428 inits->h_init_luma.fraction,
429 SCLV_HORZ_FILTER_INIT,
430 SCL_H_INIT_FRAC);
431 set_reg_field_value(
432 value,
433 inits->h_init_luma.integer,
434 SCLV_HORZ_FILTER_INIT,
435 SCL_H_INIT_INT);
436 dm_write_reg(ctx, addr, value);
437
438 addr = mmSCLV_VERT_FILTER_INIT;
439 value = 0;
440 set_reg_field_value(
441 value,
442 inits->v_init_luma.fraction,
443 SCLV_VERT_FILTER_INIT,
444 SCL_V_INIT_FRAC);
445 set_reg_field_value(
446 value,
447 inits->v_init_luma.integer,
448 SCLV_VERT_FILTER_INIT,
449 SCL_V_INIT_INT);
450 dm_write_reg(ctx, addr, value);
451
452 addr = mmSCLV_HORZ_FILTER_INIT_C;
453 value = 0;
454 set_reg_field_value(
455 value,
456 inits->h_init_chroma.fraction,
457 SCLV_HORZ_FILTER_INIT_C,
458 SCL_H_INIT_FRAC_C);
459 set_reg_field_value(
460 value,
461 inits->h_init_chroma.integer,
462 SCLV_HORZ_FILTER_INIT_C,
463 SCL_H_INIT_INT_C);
464 dm_write_reg(ctx, addr, value);
465
466 addr = mmSCLV_VERT_FILTER_INIT_C;
467 value = 0;
468 set_reg_field_value(
469 value,
470 inits->v_init_chroma.fraction,
471 SCLV_VERT_FILTER_INIT_C,
472 SCL_V_INIT_FRAC_C);
473 set_reg_field_value(
474 value,
475 inits->v_init_chroma.integer,
476 SCLV_VERT_FILTER_INIT_C,
477 SCL_V_INIT_INT_C);
478 dm_write_reg(ctx, addr, value);
479}
480
481static const uint16_t *get_filter_coeffs_64p(int taps, struct fixed31_32 ratio)
482{
483 if (taps == 4)
484 return get_filter_4tap_64p(ratio);
485 else if (taps == 2)
486 return filter_2tap_64p;
487 else if (taps == 1)
488 return NULL;
489 else {
490 /* should never happen, bug */
491 BREAK_TO_DEBUGGER();
492 return NULL;
493 }
494}
495
496static bool dce110_xfmv_power_up_line_buffer(struct transform *xfm)
497{
498 struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
499 uint32_t value;
500
501 value = dm_read_reg(xfm_dce->base.ctx, mmLBV_MEMORY_CTRL);
502
503 /*Use all three pieces of memory always*/
504 set_reg_field_value(value, 0, LBV_MEMORY_CTRL, LB_MEMORY_CONFIG);
505 /*hard coded number DCE11 1712(0x6B0) Partitions: 720/960/1712*/
506 set_reg_field_value(value, xfm_dce->lb_memory_size, LBV_MEMORY_CTRL,
507 LB_MEMORY_SIZE);
508
509 dm_write_reg(xfm_dce->base.ctx, mmLBV_MEMORY_CTRL, value);
510
511 return true;
512}
513
514static void dce110_xfmv_set_scaler(
515 struct transform *xfm,
516 const struct scaler_data *data)
517{
518 struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
519 bool is_scaling_required = false;
520 bool filter_updated = false;
521 const uint16_t *coeffs_v, *coeffs_h, *coeffs_h_c, *coeffs_v_c;
522 struct rect luma_viewport = {0};
523 struct rect chroma_viewport = {0};
524
525 dce110_xfmv_power_up_line_buffer(xfm);
526 /* 1. Calculate viewport, viewport programming should happen after init
527 * calculations as they may require an adjustment in the viewport.
528 */
529
530 calculate_viewport(data, &luma_viewport, &chroma_viewport);
531
532 /* 2. Program overscan */
533 program_overscan(xfm_dce, data);
534
535 /* 3. Program taps and configuration */
536 is_scaling_required = setup_scaling_configuration(xfm_dce, data);
537
538 if (is_scaling_required) {
539 /* 4. Calculate and program ratio, filter initialization */
540
541 struct sclv_ratios_inits inits = { 0 };
542
543 calculate_inits(
544 xfm_dce,
545 data,
546 &inits,
547 &luma_viewport,
548 &chroma_viewport);
549
550 program_scl_ratios_inits(xfm_dce, &inits);
551
552 coeffs_v = get_filter_coeffs_64p(data->taps.v_taps, data->ratios.vert);
553 coeffs_h = get_filter_coeffs_64p(data->taps.h_taps, data->ratios.horz);
554 coeffs_v_c = get_filter_coeffs_64p(data->taps.v_taps_c, data->ratios.vert_c);
555 coeffs_h_c = get_filter_coeffs_64p(data->taps.h_taps_c, data->ratios.horz_c);
556
557 if (coeffs_v != xfm_dce->filter_v
558 || coeffs_v_c != xfm_dce->filter_v_c
559 || coeffs_h != xfm_dce->filter_h
560 || coeffs_h_c != xfm_dce->filter_h_c) {
561 /* 5. Program vertical filters */
562 program_multi_taps_filter(
563 xfm_dce,
564 data->taps.v_taps,
565 coeffs_v,
566 FILTER_TYPE_RGB_Y_VERTICAL);
567 program_multi_taps_filter(
568 xfm_dce,
569 data->taps.v_taps_c,
570 coeffs_v_c,
571 FILTER_TYPE_CBCR_VERTICAL);
572
573 /* 6. Program horizontal filters */
574 program_multi_taps_filter(
575 xfm_dce,
576 data->taps.h_taps,
577 coeffs_h,
578 FILTER_TYPE_RGB_Y_HORIZONTAL);
579 program_multi_taps_filter(
580 xfm_dce,
581 data->taps.h_taps_c,
582 coeffs_h_c,
583 FILTER_TYPE_CBCR_HORIZONTAL);
584
585 xfm_dce->filter_v = coeffs_v;
586 xfm_dce->filter_v_c = coeffs_v_c;
587 xfm_dce->filter_h = coeffs_h;
588 xfm_dce->filter_h_c = coeffs_h_c;
589 filter_updated = true;
590 }
591 }
592
593 /* 7. Program the viewport */
594 program_viewport(xfm_dce, &luma_viewport, &chroma_viewport);
595
596 /* 8. Set bit to flip to new coefficient memory */
597 if (filter_updated)
598 set_coeff_update_complete(xfm_dce);
599}
600
601static void dce110_xfmv_reset(struct transform *xfm)
602{
603 struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
604
605 xfm_dce->filter_h = NULL;
606 xfm_dce->filter_v = NULL;
607 xfm_dce->filter_h_c = NULL;
608 xfm_dce->filter_v_c = NULL;
609}
610
611static void dce110_xfmv_set_gamut_remap(
612 struct transform *xfm,
613 const struct xfm_grph_csc_adjustment *adjust)
614{
615 /* DO NOTHING*/
616}
617
618static void dce110_xfmv_set_pixel_storage_depth(
619 struct transform *xfm,
620 enum lb_pixel_depth depth,
621 const struct bit_depth_reduction_params *bit_depth_params)
622{
623 struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
624 int pixel_depth, expan_mode;
625 uint32_t reg_data = 0;
626
627 switch (depth) {
628 case LB_PIXEL_DEPTH_18BPP:
629 pixel_depth = 2;
630 expan_mode = 1;
631 break;
632 case LB_PIXEL_DEPTH_24BPP:
633 pixel_depth = 1;
634 expan_mode = 1;
635 break;
636 case LB_PIXEL_DEPTH_30BPP:
637 pixel_depth = 0;
638 expan_mode = 1;
639 break;
640 case LB_PIXEL_DEPTH_36BPP:
641 pixel_depth = 3;
642 expan_mode = 0;
643 break;
644 default:
645 BREAK_TO_DEBUGGER();
646 break;
647 }
648
649 set_reg_field_value(
650 reg_data,
651 expan_mode,
652 LBV_DATA_FORMAT,
653 PIXEL_EXPAN_MODE);
654
655 set_reg_field_value(
656 reg_data,
657 pixel_depth,
658 LBV_DATA_FORMAT,
659 PIXEL_DEPTH);
660
661 dm_write_reg(xfm->ctx, mmLBV_DATA_FORMAT, reg_data);
662
663 if (!(xfm_dce->lb_pixel_depth_supported & depth)) {
664 /*we should use unsupported capabilities
665 * unless it is required by w/a*/
666 dm_logger_write(xfm->ctx->logger, LOG_WARNING,
667 "%s: Capability not supported",
668 __func__);
669 }
670}
671
672static const struct transform_funcs dce110_xfmv_funcs = {
673 .transform_reset = dce110_xfmv_reset,
674 .transform_set_scaler = dce110_xfmv_set_scaler,
675 .transform_set_gamut_remap =
676 dce110_xfmv_set_gamut_remap,
677 .transform_set_pixel_storage_depth =
678 dce110_xfmv_set_pixel_storage_depth,
679 .transform_get_optimal_number_of_taps =
680 dce_transform_get_optimal_number_of_taps
681};
682/*****************************************/
683/* Constructor, Destructor */
684/*****************************************/
685
686bool dce110_transform_v_construct(
687 struct dce_transform *xfm_dce,
688 struct dc_context *ctx)
689{
690 xfm_dce->base.ctx = ctx;
691
692 xfm_dce->base.funcs = &dce110_xfmv_funcs;
693
694 xfm_dce->lb_pixel_depth_supported =
695 LB_PIXEL_DEPTH_18BPP |
696 LB_PIXEL_DEPTH_24BPP |
697 LB_PIXEL_DEPTH_30BPP;
698
699 xfm_dce->prescaler_on = true;
700 xfm_dce->lb_bits_per_entry = LB_BITS_PER_ENTRY;
701 xfm_dce->lb_memory_size = LB_TOTAL_NUMBER_OF_ENTRIES; /*0x6B0*/
702
703 return true;
704}
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.h
new file mode 100644
index 000000000000..267af34db3e5
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.h
@@ -0,0 +1,37 @@
1/* Copyright 2012-15 Advanced Micro Devices, Inc.
2 *
3 * Permission is hereby granted, free of charge, to any person obtaining a
4 * copy of this software and associated documentation files (the "Software"),
5 * to deal in the Software without restriction, including without limitation
6 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7 * and/or sell copies of the Software, and to permit persons to whom the
8 * Software is furnished to do so, subject to the following conditions:
9 *
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
17 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
18 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
19 * OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * Authors: AMD
22 *
23 */
24
25#ifndef __DAL_TRANSFORM_V_DCE110_H__
26#define __DAL_TRANSFORM_V_DCE110_H__
27
28#include "../dce/dce_transform.h"
29
30#define LB_TOTAL_NUMBER_OF_ENTRIES 1712
31#define LB_BITS_PER_ENTRY 144
32
33bool dce110_transform_v_construct(
34 struct dce_transform *xfm110,
35 struct dc_context *ctx);
36
37#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_types.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_types.h
new file mode 100644
index 000000000000..55f52382ddfb
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_types.h
@@ -0,0 +1,30 @@
1/* Copyright 2012-15 Advanced Micro Devices, Inc.
2 *
3 * Permission is hereby granted, free of charge, to any person obtaining a
4 * copy of this software and associated documentation files (the "Software"),
5 * to deal in the Software without restriction, including without limitation
6 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7 * and/or sell copies of the Software, and to permit persons to whom the
8 * Software is furnished to do so, subject to the following conditions:
9 *
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
17 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
18 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
19 * OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * Authors: AMD
22 *
23 */
24
25#ifndef _DCE110_TYPES_H_
26#define __DCE110_TYPES_H_
27
28#define GAMMA_SEGMENTS_NUM 16
29
30#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_DCE110_DCE110_TYPES_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/Makefile b/drivers/gpu/drm/amd/display/dc/dce112/Makefile
new file mode 100644
index 000000000000..34fba0730bed
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce112/Makefile
@@ -0,0 +1,11 @@
1#
2# Makefile for the 'controller' sub-component of DAL.
3# It provides the control and status of HW CRTC block.
4
5DCE112 = dce112_compressor.o dce112_hw_sequencer.o \
6dce112_resource.o dce112_mem_input.o dce112_opp_formatter.o \
7dce112_opp.o
8
9AMD_DAL_DCE112 = $(addprefix $(AMDDALPATH)/dc/dce112/,$(DCE112))
10
11AMD_DISPLAY_FILES += $(AMD_DAL_DCE112)
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_compressor.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_compressor.c
new file mode 100644
index 000000000000..22a5aba073ca
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_compressor.c
@@ -0,0 +1,859 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28#include "dce/dce_11_2_d.h"
29#include "dce/dce_11_2_sh_mask.h"
30#include "gmc/gmc_8_1_sh_mask.h"
31#include "gmc/gmc_8_1_d.h"
32
33#include "include/logger_interface.h"
34
35#include "dce112_compressor.h"
36
37#define DCP_REG(reg)\
38 (reg + cp110->offsets.dcp_offset)
39#define DMIF_REG(reg)\
40 (reg + cp110->offsets.dmif_offset)
41
42static const struct dce112_compressor_reg_offsets reg_offsets[] = {
43{
44 .dcp_offset = (mmDCP0_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
45 .dmif_offset =
46 (mmDMIF_PG0_DPG_PIPE_DPM_CONTROL
47 - mmDMIF_PG0_DPG_PIPE_DPM_CONTROL),
48},
49{
50 .dcp_offset = (mmDCP1_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
51 .dmif_offset =
52 (mmDMIF_PG1_DPG_PIPE_DPM_CONTROL
53 - mmDMIF_PG0_DPG_PIPE_DPM_CONTROL),
54},
55{
56 .dcp_offset = (mmDCP2_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
57 .dmif_offset =
58 (mmDMIF_PG2_DPG_PIPE_DPM_CONTROL
59 - mmDMIF_PG0_DPG_PIPE_DPM_CONTROL),
60}
61};
62
63static const uint32_t dce11_one_lpt_channel_max_resolution = 2560 * 1600;
64
65enum fbc_idle_force {
66 /* Bit 0 - Display registers updated */
67 FBC_IDLE_FORCE_DISPLAY_REGISTER_UPDATE = 0x00000001,
68
69 /* Bit 2 - FBC_GRPH_COMP_EN register updated */
70 FBC_IDLE_FORCE_GRPH_COMP_EN = 0x00000002,
71 /* Bit 3 - FBC_SRC_SEL register updated */
72 FBC_IDLE_FORCE_SRC_SEL_CHANGE = 0x00000004,
73 /* Bit 4 - FBC_MIN_COMPRESSION register updated */
74 FBC_IDLE_FORCE_MIN_COMPRESSION_CHANGE = 0x00000008,
75 /* Bit 5 - FBC_ALPHA_COMP_EN register updated */
76 FBC_IDLE_FORCE_ALPHA_COMP_EN = 0x00000010,
77 /* Bit 6 - FBC_ZERO_ALPHA_CHUNK_SKIP_EN register updated */
78 FBC_IDLE_FORCE_ZERO_ALPHA_CHUNK_SKIP_EN = 0x00000020,
79 /* Bit 7 - FBC_FORCE_COPY_TO_COMP_BUF register updated */
80 FBC_IDLE_FORCE_FORCE_COPY_TO_COMP_BUF = 0x00000040,
81
82 /* Bit 24 - Memory write to region 0 defined by MC registers. */
83 FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION0 = 0x01000000,
84 /* Bit 25 - Memory write to region 1 defined by MC registers */
85 FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION1 = 0x02000000,
86 /* Bit 26 - Memory write to region 2 defined by MC registers */
87 FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION2 = 0x04000000,
88 /* Bit 27 - Memory write to region 3 defined by MC registers. */
89 FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION3 = 0x08000000,
90
91 /* Bit 28 - Memory write from any client other than MCIF */
92 FBC_IDLE_FORCE_MEMORY_WRITE_OTHER_THAN_MCIF = 0x10000000,
93 /* Bit 29 - CG statics screen signal is inactive */
94 FBC_IDLE_FORCE_CG_STATIC_SCREEN_IS_INACTIVE = 0x20000000,
95};
96
97static uint32_t lpt_size_alignment(struct dce112_compressor *cp110)
98{
99 /*LPT_ALIGNMENT (in bytes) = ROW_SIZE * #BANKS * # DRAM CHANNELS. */
100 return cp110->base.raw_size * cp110->base.banks_num *
101 cp110->base.dram_channels_num;
102}
103
104static uint32_t lpt_memory_control_config(struct dce112_compressor *cp110,
105 uint32_t lpt_control)
106{
107 /*LPT MC Config */
108 if (cp110->base.options.bits.LPT_MC_CONFIG == 1) {
109 /* POSSIBLE VALUES for LPT NUM_PIPES (DRAM CHANNELS):
110 * 00 - 1 CHANNEL
111 * 01 - 2 CHANNELS
112 * 02 - 4 OR 6 CHANNELS
113 * (Only for discrete GPU, N/A for CZ)
114 * 03 - 8 OR 12 CHANNELS
115 * (Only for discrete GPU, N/A for CZ) */
116 switch (cp110->base.dram_channels_num) {
117 case 2:
118 set_reg_field_value(
119 lpt_control,
120 1,
121 LOW_POWER_TILING_CONTROL,
122 LOW_POWER_TILING_NUM_PIPES);
123 break;
124 case 1:
125 set_reg_field_value(
126 lpt_control,
127 0,
128 LOW_POWER_TILING_CONTROL,
129 LOW_POWER_TILING_NUM_PIPES);
130 break;
131 default:
132 dm_logger_write(
133 cp110->base.ctx->logger, LOG_WARNING,
134 "%s: Invalid LPT NUM_PIPES!!!",
135 __func__);
136 break;
137 }
138
139 /* The mapping for LPT NUM_BANKS is in
140 * GRPH_CONTROL.GRPH_NUM_BANKS register field
141 * Specifies the number of memory banks for tiling
142 * purposes. Only applies to 2D and 3D tiling modes.
143 * POSSIBLE VALUES:
144 * 00 - DCP_GRPH_NUM_BANKS_2BANK: ADDR_SURF_2_BANK
145 * 01 - DCP_GRPH_NUM_BANKS_4BANK: ADDR_SURF_4_BANK
146 * 02 - DCP_GRPH_NUM_BANKS_8BANK: ADDR_SURF_8_BANK
147 * 03 - DCP_GRPH_NUM_BANKS_16BANK: ADDR_SURF_16_BANK */
148 switch (cp110->base.banks_num) {
149 case 16:
150 set_reg_field_value(
151 lpt_control,
152 3,
153 LOW_POWER_TILING_CONTROL,
154 LOW_POWER_TILING_NUM_BANKS);
155 break;
156 case 8:
157 set_reg_field_value(
158 lpt_control,
159 2,
160 LOW_POWER_TILING_CONTROL,
161 LOW_POWER_TILING_NUM_BANKS);
162 break;
163 case 4:
164 set_reg_field_value(
165 lpt_control,
166 1,
167 LOW_POWER_TILING_CONTROL,
168 LOW_POWER_TILING_NUM_BANKS);
169 break;
170 case 2:
171 set_reg_field_value(
172 lpt_control,
173 0,
174 LOW_POWER_TILING_CONTROL,
175 LOW_POWER_TILING_NUM_BANKS);
176 break;
177 default:
178 dm_logger_write(
179 cp110->base.ctx->logger, LOG_WARNING,
180 "%s: Invalid LPT NUM_BANKS!!!",
181 __func__);
182 break;
183 }
184
185 /* The mapping is in DMIF_ADDR_CALC.
186 * ADDR_CONFIG_PIPE_INTERLEAVE_SIZE register field for
187 * Carrizo specifies the memory interleave per pipe.
188 * It effectively specifies the location of pipe bits in
189 * the memory address.
190 * POSSIBLE VALUES:
191 * 00 - ADDR_CONFIG_PIPE_INTERLEAVE_256B: 256 byte
192 * interleave
193 * 01 - ADDR_CONFIG_PIPE_INTERLEAVE_512B: 512 byte
194 * interleave
195 */
196 switch (cp110->base.channel_interleave_size) {
197 case 256: /*256B */
198 set_reg_field_value(
199 lpt_control,
200 0,
201 LOW_POWER_TILING_CONTROL,
202 LOW_POWER_TILING_PIPE_INTERLEAVE_SIZE);
203 break;
204 case 512: /*512B */
205 set_reg_field_value(
206 lpt_control,
207 1,
208 LOW_POWER_TILING_CONTROL,
209 LOW_POWER_TILING_PIPE_INTERLEAVE_SIZE);
210 break;
211 default:
212 dm_logger_write(
213 cp110->base.ctx->logger, LOG_WARNING,
214 "%s: Invalid LPT INTERLEAVE_SIZE!!!",
215 __func__);
216 break;
217 }
218
219 /* The mapping for LOW_POWER_TILING_ROW_SIZE is in
220 * DMIF_ADDR_CALC.ADDR_CONFIG_ROW_SIZE register field
221 * for Carrizo. Specifies the size of dram row in bytes.
222 * This should match up with NOOFCOLS field in
223 * MC_ARB_RAMCFG (ROW_SIZE = 4 * 2 ^^ columns).
224 * This register DMIF_ADDR_CALC is not used by the
225 * hardware as it is only used for addrlib assertions.
226 * POSSIBLE VALUES:
227 * 00 - ADDR_CONFIG_1KB_ROW: Treat 1KB as DRAM row
228 * boundary
229 * 01 - ADDR_CONFIG_2KB_ROW: Treat 2KB as DRAM row
230 * boundary
231 * 02 - ADDR_CONFIG_4KB_ROW: Treat 4KB as DRAM row
232 * boundary */
233 switch (cp110->base.raw_size) {
234 case 4096: /*4 KB */
235 set_reg_field_value(
236 lpt_control,
237 2,
238 LOW_POWER_TILING_CONTROL,
239 LOW_POWER_TILING_ROW_SIZE);
240 break;
241 case 2048:
242 set_reg_field_value(
243 lpt_control,
244 1,
245 LOW_POWER_TILING_CONTROL,
246 LOW_POWER_TILING_ROW_SIZE);
247 break;
248 case 1024:
249 set_reg_field_value(
250 lpt_control,
251 0,
252 LOW_POWER_TILING_CONTROL,
253 LOW_POWER_TILING_ROW_SIZE);
254 break;
255 default:
256 dm_logger_write(
257 cp110->base.ctx->logger, LOG_WARNING,
258 "%s: Invalid LPT ROW_SIZE!!!",
259 __func__);
260 break;
261 }
262 } else {
263 dm_logger_write(
264 cp110->base.ctx->logger, LOG_WARNING,
265 "%s: LPT MC Configuration is not provided",
266 __func__);
267 }
268
269 return lpt_control;
270}
271
272static bool is_source_bigger_than_epanel_size(
273 struct dce112_compressor *cp110,
274 uint32_t source_view_width,
275 uint32_t source_view_height)
276{
277 if (cp110->base.embedded_panel_h_size != 0 &&
278 cp110->base.embedded_panel_v_size != 0 &&
279 ((source_view_width * source_view_height) >
280 (cp110->base.embedded_panel_h_size *
281 cp110->base.embedded_panel_v_size)))
282 return true;
283
284 return false;
285}
286
287static uint32_t align_to_chunks_number_per_line(
288 struct dce112_compressor *cp110,
289 uint32_t pixels)
290{
291 return 256 * ((pixels + 255) / 256);
292}
293
294static void wait_for_fbc_state_changed(
295 struct dce112_compressor *cp110,
296 bool enabled)
297{
298 uint8_t counter = 0;
299 uint32_t addr = mmFBC_STATUS;
300 uint32_t value;
301
302 while (counter < 10) {
303 value = dm_read_reg(cp110->base.ctx, addr);
304 if (get_reg_field_value(
305 value,
306 FBC_STATUS,
307 FBC_ENABLE_STATUS) == enabled)
308 break;
309 udelay(10);
310 counter++;
311 }
312
313 if (counter == 10) {
314 dm_logger_write(
315 cp110->base.ctx->logger, LOG_WARNING,
316 "%s: wait counter exceeded, changes to HW not applied",
317 __func__);
318 }
319}
320
321void dce112_compressor_power_up_fbc(struct compressor *compressor)
322{
323 uint32_t value;
324 uint32_t addr;
325
326 addr = mmFBC_CNTL;
327 value = dm_read_reg(compressor->ctx, addr);
328 set_reg_field_value(value, 0, FBC_CNTL, FBC_GRPH_COMP_EN);
329 set_reg_field_value(value, 1, FBC_CNTL, FBC_EN);
330 set_reg_field_value(value, 2, FBC_CNTL, FBC_COHERENCY_MODE);
331 if (compressor->options.bits.CLK_GATING_DISABLED == 1) {
332 /* HW needs to do power measurement comparison. */
333 set_reg_field_value(
334 value,
335 0,
336 FBC_CNTL,
337 FBC_COMP_CLK_GATE_EN);
338 }
339 dm_write_reg(compressor->ctx, addr, value);
340
341 addr = mmFBC_COMP_MODE;
342 value = dm_read_reg(compressor->ctx, addr);
343 set_reg_field_value(value, 1, FBC_COMP_MODE, FBC_RLE_EN);
344 set_reg_field_value(value, 1, FBC_COMP_MODE, FBC_DPCM4_RGB_EN);
345 set_reg_field_value(value, 1, FBC_COMP_MODE, FBC_IND_EN);
346 dm_write_reg(compressor->ctx, addr, value);
347
348 addr = mmFBC_COMP_CNTL;
349 value = dm_read_reg(compressor->ctx, addr);
350 set_reg_field_value(value, 1, FBC_COMP_CNTL, FBC_DEPTH_RGB08_EN);
351 dm_write_reg(compressor->ctx, addr, value);
352 /*FBC_MIN_COMPRESSION 0 ==> 2:1 */
353 /* 1 ==> 4:1 */
354 /* 2 ==> 8:1 */
355 /* 0xF ==> 1:1 */
356 set_reg_field_value(value, 0xF, FBC_COMP_CNTL, FBC_MIN_COMPRESSION);
357 dm_write_reg(compressor->ctx, addr, value);
358 compressor->min_compress_ratio = FBC_COMPRESS_RATIO_1TO1;
359
360 value = 0;
361 dm_write_reg(compressor->ctx, mmFBC_IND_LUT0, value);
362
363 value = 0xFFFFFF;
364 dm_write_reg(compressor->ctx, mmFBC_IND_LUT1, value);
365}
366
367void dce112_compressor_enable_fbc(
368 struct compressor *compressor,
369 uint32_t paths_num,
370 struct compr_addr_and_pitch_params *params)
371{
372 struct dce112_compressor *cp110 = TO_DCE112_COMPRESSOR(compressor);
373
374 if (compressor->options.bits.FBC_SUPPORT &&
375 (compressor->options.bits.DUMMY_BACKEND == 0) &&
376 (!dce112_compressor_is_fbc_enabled_in_hw(compressor, NULL)) &&
377 (!is_source_bigger_than_epanel_size(
378 cp110,
379 params->source_view_width,
380 params->source_view_height))) {
381
382 uint32_t addr;
383 uint32_t value;
384
385 /* Before enabling FBC first need to enable LPT if applicable
386 * LPT state should always be changed (enable/disable) while FBC
387 * is disabled */
388 if (compressor->options.bits.LPT_SUPPORT && (paths_num < 2) &&
389 (params->source_view_width *
390 params->source_view_height <=
391 dce11_one_lpt_channel_max_resolution)) {
392 dce112_compressor_enable_lpt(compressor);
393 }
394
395 addr = mmFBC_CNTL;
396 value = dm_read_reg(compressor->ctx, addr);
397 set_reg_field_value(value, 1, FBC_CNTL, FBC_GRPH_COMP_EN);
398 set_reg_field_value(
399 value,
400 params->inst,
401 FBC_CNTL, FBC_SRC_SEL);
402 dm_write_reg(compressor->ctx, addr, value);
403
404 /* Keep track of enum controller_id FBC is attached to */
405 compressor->is_enabled = true;
406 compressor->attached_inst = params->inst;
407 cp110->offsets = reg_offsets[params->inst - 1];
408
409 /*Toggle it as there is bug in HW */
410 set_reg_field_value(value, 0, FBC_CNTL, FBC_GRPH_COMP_EN);
411 dm_write_reg(compressor->ctx, addr, value);
412 set_reg_field_value(value, 1, FBC_CNTL, FBC_GRPH_COMP_EN);
413 dm_write_reg(compressor->ctx, addr, value);
414
415 wait_for_fbc_state_changed(cp110, true);
416 }
417}
418
419void dce112_compressor_disable_fbc(struct compressor *compressor)
420{
421 struct dce112_compressor *cp110 = TO_DCE112_COMPRESSOR(compressor);
422
423 if (compressor->options.bits.FBC_SUPPORT &&
424 dce112_compressor_is_fbc_enabled_in_hw(compressor, NULL)) {
425 uint32_t reg_data;
426 /* Turn off compression */
427 reg_data = dm_read_reg(compressor->ctx, mmFBC_CNTL);
428 set_reg_field_value(reg_data, 0, FBC_CNTL, FBC_GRPH_COMP_EN);
429 dm_write_reg(compressor->ctx, mmFBC_CNTL, reg_data);
430
431 /* Reset enum controller_id to undefined */
432 compressor->attached_inst = 0;
433 compressor->is_enabled = false;
434
435 /* Whenever disabling FBC make sure LPT is disabled if LPT
436 * supported */
437 if (compressor->options.bits.LPT_SUPPORT)
438 dce112_compressor_disable_lpt(compressor);
439
440 wait_for_fbc_state_changed(cp110, false);
441 }
442}
443
444bool dce112_compressor_is_fbc_enabled_in_hw(
445 struct compressor *compressor,
446 uint32_t *inst)
447{
448 /* Check the hardware register */
449 uint32_t value;
450
451 value = dm_read_reg(compressor->ctx, mmFBC_STATUS);
452 if (get_reg_field_value(value, FBC_STATUS, FBC_ENABLE_STATUS)) {
453 if (inst != NULL)
454 *inst = compressor->attached_inst;
455 return true;
456 }
457
458 value = dm_read_reg(compressor->ctx, mmFBC_MISC);
459 if (get_reg_field_value(value, FBC_MISC, FBC_STOP_ON_HFLIP_EVENT)) {
460 value = dm_read_reg(compressor->ctx, mmFBC_CNTL);
461
462 if (get_reg_field_value(value, FBC_CNTL, FBC_GRPH_COMP_EN)) {
463 if (inst != NULL)
464 *inst =
465 compressor->attached_inst;
466 return true;
467 }
468 }
469 return false;
470}
471
472bool dce112_compressor_is_lpt_enabled_in_hw(struct compressor *compressor)
473{
474 /* Check the hardware register */
475 uint32_t value = dm_read_reg(compressor->ctx,
476 mmLOW_POWER_TILING_CONTROL);
477
478 return get_reg_field_value(
479 value,
480 LOW_POWER_TILING_CONTROL,
481 LOW_POWER_TILING_ENABLE);
482}
483
484void dce112_compressor_program_compressed_surface_address_and_pitch(
485 struct compressor *compressor,
486 struct compr_addr_and_pitch_params *params)
487{
488 struct dce112_compressor *cp110 = TO_DCE112_COMPRESSOR(compressor);
489 uint32_t value = 0;
490 uint32_t fbc_pitch = 0;
491 uint32_t compressed_surf_address_low_part =
492 compressor->compr_surface_address.addr.low_part;
493
494 /* Clear content first. */
495 dm_write_reg(
496 compressor->ctx,
497 DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS_HIGH),
498 0);
499 dm_write_reg(compressor->ctx,
500 DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS), 0);
501
502 if (compressor->options.bits.LPT_SUPPORT) {
503 uint32_t lpt_alignment = lpt_size_alignment(cp110);
504
505 if (lpt_alignment != 0) {
506 compressed_surf_address_low_part =
507 ((compressed_surf_address_low_part
508 + (lpt_alignment - 1)) / lpt_alignment)
509 * lpt_alignment;
510 }
511 }
512
513 /* Write address, HIGH has to be first. */
514 dm_write_reg(compressor->ctx,
515 DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS_HIGH),
516 compressor->compr_surface_address.addr.high_part);
517 dm_write_reg(compressor->ctx,
518 DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS),
519 compressed_surf_address_low_part);
520
521 fbc_pitch = align_to_chunks_number_per_line(
522 cp110,
523 params->source_view_width);
524
525 if (compressor->min_compress_ratio == FBC_COMPRESS_RATIO_1TO1)
526 fbc_pitch = fbc_pitch / 8;
527 else
528 dm_logger_write(
529 compressor->ctx->logger, LOG_WARNING,
530 "%s: Unexpected DCE11 compression ratio",
531 __func__);
532
533 /* Clear content first. */
534 dm_write_reg(compressor->ctx, DCP_REG(mmGRPH_COMPRESS_PITCH), 0);
535
536 /* Write FBC Pitch. */
537 set_reg_field_value(
538 value,
539 fbc_pitch,
540 GRPH_COMPRESS_PITCH,
541 GRPH_COMPRESS_PITCH);
542 dm_write_reg(compressor->ctx, DCP_REG(mmGRPH_COMPRESS_PITCH), value);
543
544}
545
546void dce112_compressor_disable_lpt(struct compressor *compressor)
547{
548 struct dce112_compressor *cp110 = TO_DCE112_COMPRESSOR(compressor);
549 uint32_t value;
550 uint32_t addr;
551 uint32_t inx;
552
553 /* Disable all pipes LPT Stutter */
554 for (inx = 0; inx < 3; inx++) {
555 value =
556 dm_read_reg(
557 compressor->ctx,
558 DMIF_REG(mmDPG_PIPE_STUTTER_CONTROL_NONLPTCH));
559 set_reg_field_value(
560 value,
561 0,
562 DPG_PIPE_STUTTER_CONTROL_NONLPTCH,
563 STUTTER_ENABLE_NONLPTCH);
564 dm_write_reg(
565 compressor->ctx,
566 DMIF_REG(mmDPG_PIPE_STUTTER_CONTROL_NONLPTCH),
567 value);
568 }
569 /* Disable Underlay pipe LPT Stutter */
570 addr = mmDPGV0_PIPE_STUTTER_CONTROL_NONLPTCH;
571 value = dm_read_reg(compressor->ctx, addr);
572 set_reg_field_value(
573 value,
574 0,
575 DPGV0_PIPE_STUTTER_CONTROL_NONLPTCH,
576 STUTTER_ENABLE_NONLPTCH);
577 dm_write_reg(compressor->ctx, addr, value);
578
579 /* Disable LPT */
580 addr = mmLOW_POWER_TILING_CONTROL;
581 value = dm_read_reg(compressor->ctx, addr);
582 set_reg_field_value(
583 value,
584 0,
585 LOW_POWER_TILING_CONTROL,
586 LOW_POWER_TILING_ENABLE);
587 dm_write_reg(compressor->ctx, addr, value);
588
589 /* Clear selection of Channel(s) containing Compressed Surface */
590 addr = mmGMCON_LPT_TARGET;
591 value = dm_read_reg(compressor->ctx, addr);
592 set_reg_field_value(
593 value,
594 0xFFFFFFFF,
595 GMCON_LPT_TARGET,
596 STCTRL_LPT_TARGET);
597 dm_write_reg(compressor->ctx, mmGMCON_LPT_TARGET, value);
598}
599
600void dce112_compressor_enable_lpt(struct compressor *compressor)
601{
602 struct dce112_compressor *cp110 = TO_DCE112_COMPRESSOR(compressor);
603 uint32_t value;
604 uint32_t addr;
605 uint32_t value_control;
606 uint32_t channels;
607
608 /* Enable LPT Stutter from Display pipe */
609 value = dm_read_reg(compressor->ctx,
610 DMIF_REG(mmDPG_PIPE_STUTTER_CONTROL_NONLPTCH));
611 set_reg_field_value(
612 value,
613 1,
614 DPG_PIPE_STUTTER_CONTROL_NONLPTCH,
615 STUTTER_ENABLE_NONLPTCH);
616 dm_write_reg(compressor->ctx,
617 DMIF_REG(mmDPG_PIPE_STUTTER_CONTROL_NONLPTCH), value);
618
619 /* Enable Underlay pipe LPT Stutter */
620 addr = mmDPGV0_PIPE_STUTTER_CONTROL_NONLPTCH;
621 value = dm_read_reg(compressor->ctx, addr);
622 set_reg_field_value(
623 value,
624 1,
625 DPGV0_PIPE_STUTTER_CONTROL_NONLPTCH,
626 STUTTER_ENABLE_NONLPTCH);
627 dm_write_reg(compressor->ctx, addr, value);
628
629 /* Selection of Channel(s) containing Compressed Surface: 0xfffffff
630 * will disable LPT.
631 * STCTRL_LPT_TARGETn corresponds to channel n. */
632 addr = mmLOW_POWER_TILING_CONTROL;
633 value_control = dm_read_reg(compressor->ctx, addr);
634 channels = get_reg_field_value(value_control,
635 LOW_POWER_TILING_CONTROL,
636 LOW_POWER_TILING_MODE);
637
638 addr = mmGMCON_LPT_TARGET;
639 value = dm_read_reg(compressor->ctx, addr);
640 set_reg_field_value(
641 value,
642 channels + 1, /* not mentioned in programming guide,
643 but follow DCE8.1 */
644 GMCON_LPT_TARGET,
645 STCTRL_LPT_TARGET);
646 dm_write_reg(compressor->ctx, addr, value);
647
648 /* Enable LPT */
649 addr = mmLOW_POWER_TILING_CONTROL;
650 value = dm_read_reg(compressor->ctx, addr);
651 set_reg_field_value(
652 value,
653 1,
654 LOW_POWER_TILING_CONTROL,
655 LOW_POWER_TILING_ENABLE);
656 dm_write_reg(compressor->ctx, addr, value);
657}
658
659void dce112_compressor_program_lpt_control(
660 struct compressor *compressor,
661 struct compr_addr_and_pitch_params *params)
662{
663 struct dce112_compressor *cp110 = TO_DCE112_COMPRESSOR(compressor);
664 uint32_t rows_per_channel;
665 uint32_t lpt_alignment;
666 uint32_t source_view_width;
667 uint32_t source_view_height;
668 uint32_t lpt_control = 0;
669
670 if (!compressor->options.bits.LPT_SUPPORT)
671 return;
672
673 lpt_control = dm_read_reg(compressor->ctx,
674 mmLOW_POWER_TILING_CONTROL);
675
676 /* POSSIBLE VALUES for Low Power Tiling Mode:
677 * 00 - Use channel 0
678 * 01 - Use Channel 0 and 1
679 * 02 - Use Channel 0,1,2,3
680 * 03 - reserved */
681 switch (compressor->lpt_channels_num) {
682 /* case 2:
683 * Use Channel 0 & 1 / Not used for DCE 11 */
684 case 1:
685 /*Use Channel 0 for LPT for DCE 11 */
686 set_reg_field_value(
687 lpt_control,
688 0,
689 LOW_POWER_TILING_CONTROL,
690 LOW_POWER_TILING_MODE);
691 break;
692 default:
693 dm_logger_write(
694 compressor->ctx->logger, LOG_WARNING,
695 "%s: Invalid selected DRAM channels for LPT!!!",
696 __func__);
697 break;
698 }
699
700 lpt_control = lpt_memory_control_config(cp110, lpt_control);
701
702 /* Program LOW_POWER_TILING_ROWS_PER_CHAN field which depends on
703 * FBC compressed surface pitch.
704 * LOW_POWER_TILING_ROWS_PER_CHAN = Roundup ((Surface Height *
705 * Surface Pitch) / (Row Size * Number of Channels *
706 * Number of Banks)). */
707 rows_per_channel = 0;
708 lpt_alignment = lpt_size_alignment(cp110);
709 source_view_width =
710 align_to_chunks_number_per_line(
711 cp110,
712 params->source_view_width);
713 source_view_height = (params->source_view_height + 1) & (~0x1);
714
715 if (lpt_alignment != 0) {
716 rows_per_channel = source_view_width * source_view_height * 4;
717 rows_per_channel =
718 (rows_per_channel % lpt_alignment) ?
719 (rows_per_channel / lpt_alignment + 1) :
720 rows_per_channel / lpt_alignment;
721 }
722
723 set_reg_field_value(
724 lpt_control,
725 rows_per_channel,
726 LOW_POWER_TILING_CONTROL,
727 LOW_POWER_TILING_ROWS_PER_CHAN);
728
729 dm_write_reg(compressor->ctx,
730 mmLOW_POWER_TILING_CONTROL, lpt_control);
731}
732
733/*
734 * DCE 11 Frame Buffer Compression Implementation
735 */
736
737void dce112_compressor_set_fbc_invalidation_triggers(
738 struct compressor *compressor,
739 uint32_t fbc_trigger)
740{
741 /* Disable region hit event, FBC_MEMORY_REGION_MASK = 0 (bits 16-19)
742 * for DCE 11 regions cannot be used - does not work with S/G
743 */
744 uint32_t addr = mmFBC_CLIENT_REGION_MASK;
745 uint32_t value = dm_read_reg(compressor->ctx, addr);
746
747 set_reg_field_value(
748 value,
749 0,
750 FBC_CLIENT_REGION_MASK,
751 FBC_MEMORY_REGION_MASK);
752 dm_write_reg(compressor->ctx, addr, value);
753
754 /* Setup events when to clear all CSM entries (effectively marking
755 * current compressed data invalid)
756 * For DCE 11 CSM metadata 11111 means - "Not Compressed"
757 * Used as the initial value of the metadata sent to the compressor
758 * after invalidation, to indicate that the compressor should attempt
759 * to compress all chunks on the current pass. Also used when the chunk
760 * is not successfully written to memory.
761 * When this CSM value is detected, FBC reads from the uncompressed
762 * buffer. Set events according to passed in value, these events are
763 * valid for DCE11:
764 * - bit 0 - display register updated
765 * - bit 28 - memory write from any client except from MCIF
766 * - bit 29 - CG static screen signal is inactive
767 * In addition, DCE11.1 also needs to set new DCE11.1 specific events
768 * that are used to trigger invalidation on certain register changes,
769 * for example enabling of Alpha Compression may trigger invalidation of
770 * FBC once bit is set. These events are as follows:
771 * - Bit 2 - FBC_GRPH_COMP_EN register updated
772 * - Bit 3 - FBC_SRC_SEL register updated
773 * - Bit 4 - FBC_MIN_COMPRESSION register updated
774 * - Bit 5 - FBC_ALPHA_COMP_EN register updated
775 * - Bit 6 - FBC_ZERO_ALPHA_CHUNK_SKIP_EN register updated
776 * - Bit 7 - FBC_FORCE_COPY_TO_COMP_BUF register updated
777 */
778 addr = mmFBC_IDLE_FORCE_CLEAR_MASK;
779 value = dm_read_reg(compressor->ctx, addr);
780 set_reg_field_value(
781 value,
782 fbc_trigger |
783 FBC_IDLE_FORCE_GRPH_COMP_EN |
784 FBC_IDLE_FORCE_SRC_SEL_CHANGE |
785 FBC_IDLE_FORCE_MIN_COMPRESSION_CHANGE |
786 FBC_IDLE_FORCE_ALPHA_COMP_EN |
787 FBC_IDLE_FORCE_ZERO_ALPHA_CHUNK_SKIP_EN |
788 FBC_IDLE_FORCE_FORCE_COPY_TO_COMP_BUF,
789 FBC_IDLE_FORCE_CLEAR_MASK,
790 FBC_IDLE_FORCE_CLEAR_MASK);
791 dm_write_reg(compressor->ctx, addr, value);
792}
793
794bool dce112_compressor_construct(struct dce112_compressor *compressor,
795 struct dc_context *ctx)
796{
797 struct dc_bios *bp = ctx->dc_bios;
798 struct embedded_panel_info panel_info;
799
800 compressor->base.options.bits.FBC_SUPPORT = true;
801 compressor->base.options.bits.LPT_SUPPORT = true;
802 /* For DCE 11 always use one DRAM channel for LPT */
803 compressor->base.lpt_channels_num = 1;
804 compressor->base.options.bits.DUMMY_BACKEND = false;
805
806 /* Check if this system has more than 1 DRAM channel; if only 1 then LPT
807 * should not be supported */
808 if (compressor->base.memory_bus_width == 64)
809 compressor->base.options.bits.LPT_SUPPORT = false;
810
811 compressor->base.options.bits.CLK_GATING_DISABLED = false;
812
813 compressor->base.ctx = ctx;
814 compressor->base.embedded_panel_h_size = 0;
815 compressor->base.embedded_panel_v_size = 0;
816 compressor->base.memory_bus_width = ctx->asic_id.vram_width;
817 compressor->base.allocated_size = 0;
818 compressor->base.preferred_requested_size = 0;
819 compressor->base.min_compress_ratio = FBC_COMPRESS_RATIO_INVALID;
820 compressor->base.options.raw = 0;
821 compressor->base.banks_num = 0;
822 compressor->base.raw_size = 0;
823 compressor->base.channel_interleave_size = 0;
824 compressor->base.dram_channels_num = 0;
825 compressor->base.lpt_channels_num = 0;
826 compressor->base.attached_inst = 0;
827 compressor->base.is_enabled = false;
828
829 if (BP_RESULT_OK ==
830 bp->funcs->get_embedded_panel_info(bp, &panel_info)) {
831 compressor->base.embedded_panel_h_size =
832 panel_info.lcd_timing.horizontal_addressable;
833 compressor->base.embedded_panel_v_size =
834 panel_info.lcd_timing.vertical_addressable;
835 }
836 return true;
837}
838
839struct compressor *dce112_compressor_create(struct dc_context *ctx)
840{
841 struct dce112_compressor *cp110 =
842 dm_alloc(sizeof(struct dce112_compressor));
843
844 if (!cp110)
845 return NULL;
846
847 if (dce112_compressor_construct(cp110, ctx))
848 return &cp110->base;
849
850 BREAK_TO_DEBUGGER();
851 dm_free(cp110);
852 return NULL;
853}
854
855void dce112_compressor_destroy(struct compressor **compressor)
856{
857 dm_free(TO_DCE112_COMPRESSOR(*compressor));
858 *compressor = NULL;
859}
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_compressor.h b/drivers/gpu/drm/amd/display/dc/dce112/dce112_compressor.h
new file mode 100644
index 000000000000..106506387270
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_compressor.h
@@ -0,0 +1,78 @@
1/* Copyright 2012-15 Advanced Micro Devices, Inc.
2 *
3 * Permission is hereby granted, free of charge, to any person obtaining a
4 * copy of this software and associated documentation files (the "Software"),
5 * to deal in the Software without restriction, including without limitation
6 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7 * and/or sell copies of the Software, and to permit persons to whom the
8 * Software is furnished to do so, subject to the following conditions:
9 *
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
17 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
18 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
19 * OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * Authors: AMD
22 *
23 */
24
25#ifndef __DC_COMPRESSOR_DCE112_H__
26#define __DC_COMPRESSOR_DCE112_H__
27
28#include "../inc/compressor.h"
29
30#define TO_DCE112_COMPRESSOR(compressor)\
31 container_of(compressor, struct dce112_compressor, base)
32
33struct dce112_compressor_reg_offsets {
34 uint32_t dcp_offset;
35 uint32_t dmif_offset;
36};
37
38struct dce112_compressor {
39 struct compressor base;
40 struct dce112_compressor_reg_offsets offsets;
41};
42
43struct compressor *dce112_compressor_create(struct dc_context *ctx);
44
45bool dce112_compressor_construct(struct dce112_compressor *cp110,
46 struct dc_context *ctx);
47
48void dce112_compressor_destroy(struct compressor **cp);
49
50/* FBC RELATED */
51void dce112_compressor_power_up_fbc(struct compressor *cp);
52
53void dce112_compressor_enable_fbc(struct compressor *cp, uint32_t paths_num,
54 struct compr_addr_and_pitch_params *params);
55
56void dce112_compressor_disable_fbc(struct compressor *cp);
57
58void dce112_compressor_set_fbc_invalidation_triggers(struct compressor *cp,
59 uint32_t fbc_trigger);
60
61void dce112_compressor_program_compressed_surface_address_and_pitch(
62 struct compressor *cp,
63 struct compr_addr_and_pitch_params *params);
64
65bool dce112_compressor_is_fbc_enabled_in_hw(struct compressor *cp,
66 uint32_t *fbc_mapped_crtc_id);
67
68/* LPT RELATED */
69void dce112_compressor_enable_lpt(struct compressor *cp);
70
71void dce112_compressor_disable_lpt(struct compressor *cp);
72
73void dce112_compressor_program_lpt_control(struct compressor *cp,
74 struct compr_addr_and_pitch_params *params);
75
76bool dce112_compressor_is_lpt_enabled_in_hw(struct compressor *cp);
77
78#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c
new file mode 100644
index 000000000000..204f613467b7
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c
@@ -0,0 +1,166 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27#include "dc.h"
28#include "core_dc.h"
29#include "core_types.h"
30#include "dce112_hw_sequencer.h"
31
32#include "dce110/dce110_hw_sequencer.h"
33
34/* include DCE11.2 register header files */
35#include "dce/dce_11_2_d.h"
36#include "dce/dce_11_2_sh_mask.h"
37
38struct dce112_hw_seq_reg_offsets {
39 uint32_t crtc;
40};
41
42
43static const struct dce112_hw_seq_reg_offsets reg_offsets[] = {
44{
45 .crtc = (mmCRTC0_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
46},
47{
48 .crtc = (mmCRTC1_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
49},
50{
51 .crtc = (mmCRTC2_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
52},
53{
54 .crtc = (mmCRTC3_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
55},
56{
57 .crtc = (mmCRTC4_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
58},
59{
60 .crtc = (mmCRTC5_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
61}
62};
63#define HW_REG_CRTC(reg, id)\
64 (reg + reg_offsets[id].crtc)
65
66/*******************************************************************************
67 * Private definitions
68 ******************************************************************************/
69
70static void dce112_init_pte(struct dc_context *ctx)
71{
72 uint32_t addr;
73 uint32_t value = 0;
74 uint32_t chunk_int = 0;
75 uint32_t chunk_mul = 0;
76
77 addr = mmDVMM_PTE_REQ;
78 value = dm_read_reg(ctx, addr);
79
80 chunk_int = get_reg_field_value(
81 value,
82 DVMM_PTE_REQ,
83 HFLIP_PTEREQ_PER_CHUNK_INT);
84
85 chunk_mul = get_reg_field_value(
86 value,
87 DVMM_PTE_REQ,
88 HFLIP_PTEREQ_PER_CHUNK_MULTIPLIER);
89
90 if (chunk_int != 0x4 || chunk_mul != 0x4) {
91
92 set_reg_field_value(
93 value,
94 255,
95 DVMM_PTE_REQ,
96 MAX_PTEREQ_TO_ISSUE);
97
98 set_reg_field_value(
99 value,
100 4,
101 DVMM_PTE_REQ,
102 HFLIP_PTEREQ_PER_CHUNK_INT);
103
104 set_reg_field_value(
105 value,
106 4,
107 DVMM_PTE_REQ,
108 HFLIP_PTEREQ_PER_CHUNK_MULTIPLIER);
109
110 dm_write_reg(ctx, addr, value);
111 }
112}
113
114static bool dce112_enable_display_power_gating(
115 struct core_dc *dc,
116 uint8_t controller_id,
117 struct dc_bios *dcb,
118 enum pipe_gating_control power_gating)
119{
120 enum bp_result bp_result = BP_RESULT_OK;
121 enum bp_pipe_control_action cntl;
122 struct dc_context *ctx = dc->ctx;
123
124 if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment))
125 return true;
126
127 if (power_gating == PIPE_GATING_CONTROL_INIT)
128 cntl = ASIC_PIPE_INIT;
129 else if (power_gating == PIPE_GATING_CONTROL_ENABLE)
130 cntl = ASIC_PIPE_ENABLE;
131 else
132 cntl = ASIC_PIPE_DISABLE;
133
134 if (power_gating != PIPE_GATING_CONTROL_INIT || controller_id == 0){
135
136 bp_result = dcb->funcs->enable_disp_power_gating(
137 dcb, controller_id + 1, cntl);
138
139 /* Revert MASTER_UPDATE_MODE to 0 because bios sets it 2
140 * by default when command table is called
141 */
142 dm_write_reg(ctx,
143 HW_REG_CRTC(mmCRTC_MASTER_UPDATE_MODE, controller_id),
144 0);
145 }
146
147 if (power_gating != PIPE_GATING_CONTROL_ENABLE)
148 dce112_init_pte(ctx);
149
150 if (bp_result == BP_RESULT_OK)
151 return true;
152 else
153 return false;
154}
155
156bool dce112_hw_sequencer_construct(struct core_dc *dc)
157{
158 /* All registers used by dce11.2 match those in dce11 in offset and
159 * structure
160 */
161 dce110_hw_sequencer_construct(dc);
162 dc->hwss.enable_display_power_gating = dce112_enable_display_power_gating;
163
164 return true;
165}
166
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.h
new file mode 100644
index 000000000000..d96c582da45c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.h
@@ -0,0 +1,36 @@
1/*
2* Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DC_HWSS_DCE112_H__
27#define __DC_HWSS_DCE112_H__
28
29#include "core_types.h"
30
31struct core_dc;
32
33bool dce112_hw_sequencer_construct(struct core_dc *dc);
34
35#endif /* __DC_HWSS_DCE112_H__ */
36
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_mem_input.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_mem_input.c
new file mode 100644
index 000000000000..c29007dafe21
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_mem_input.c
@@ -0,0 +1,54 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25#include "dm_services.h"
26#include "dce112_mem_input.h"
27
28
29#include "dce/dce_11_2_d.h"
30#include "dce/dce_11_2_sh_mask.h"
31
32
33#define DCP_REG(reg) (reg + mem_input110->offsets.dcp)
34#define DMIF_REG(reg) (reg + mem_input110->offsets.dmif)
35#define PIPE_REG(reg) (reg + mem_input110->offsets.pipe)
36
37/*****************************************/
38/* Constructor, Destructor */
39/*****************************************/
40
41bool dce112_mem_input_construct(
42 struct dce110_mem_input *mem_input110,
43 struct dc_context *ctx,
44 uint32_t inst,
45 const struct dce110_mem_input_reg_offsets *offsets)
46{
47 if (!dce110_mem_input_construct(mem_input110, ctx, inst, offsets))
48 return false;
49
50 mem_input110->base.funcs->mem_input_program_display_marks =
51 dce_mem_input_program_display_marks;
52
53 return true;
54}
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_mem_input.h b/drivers/gpu/drm/amd/display/dc/dce112/dce112_mem_input.h
new file mode 100644
index 000000000000..de2aaf0f9a8e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_mem_input.h
@@ -0,0 +1,38 @@
1/* Copyright 2012-15 Advanced Micro Devices, Inc.
2 *
3 * Permission is hereby granted, free of charge, to any person obtaining a
4 * copy of this software and associated documentation files (the "Software"),
5 * to deal in the Software without restriction, including without limitation
6 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7 * and/or sell copies of the Software, and to permit persons to whom the
8 * Software is furnished to do so, subject to the following conditions:
9 *
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
17 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
18 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
19 * OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * Authors: AMD
22 *
23 */
24
25#ifndef __DC_MEM_INPUT_DCE112_H__
26#define __DC_MEM_INPUT_DCE112_H__
27
28#include "mem_input.h"
29#include "dce110/dce110_mem_input.h"
30
31bool dce112_mem_input_construct(
32 struct dce110_mem_input *mem_input110,
33 struct dc_context *ctx,
34 uint32_t inst,
35 const struct dce110_mem_input_reg_offsets *offsets);
36
37
38#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_opp.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_opp.c
new file mode 100644
index 000000000000..23c2d1086b3b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_opp.c
@@ -0,0 +1,72 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28/* include DCE11 register header files */
29#include "dce/dce_11_2_d.h"
30#include "dce/dce_11_2_sh_mask.h"
31
32#include "dce112_opp.h"
33
34#include "gamma_types.h"
35
36enum {
37 MAX_LUT_ENTRY = 256,
38 MAX_NUMBER_OF_ENTRIES = 256
39};
40
41/*****************************************/
42/* Constructor, Destructor */
43/*****************************************/
44
45static struct opp_funcs funcs = {
46 .opp_power_on_regamma_lut = dce110_opp_power_on_regamma_lut,
47 .opp_set_csc_adjustment = dce110_opp_set_csc_adjustment,
48 .opp_set_csc_default = dce110_opp_set_csc_default,
49 .opp_set_dyn_expansion = dce110_opp_set_dyn_expansion,
50 .opp_program_regamma_pwl = dce110_opp_program_regamma_pwl,
51 .opp_set_regamma_mode = dce110_opp_set_regamma_mode,
52 .opp_destroy = dce110_opp_destroy,
53 .opp_program_fmt = dce112_opp_program_fmt,
54 .opp_program_bit_depth_reduction =
55 dce110_opp_program_bit_depth_reduction
56};
57
58bool dce112_opp_construct(struct dce110_opp *opp110,
59 struct dc_context *ctx,
60 uint32_t inst,
61 const struct dce110_opp_reg_offsets *offsets)
62{
63 opp110->base.funcs = &funcs;
64
65 opp110->base.ctx = ctx;
66
67 opp110->base.inst = inst;
68
69 opp110->offsets = *offsets;
70
71 return true;
72}
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_opp.h b/drivers/gpu/drm/amd/display/dc/dce112/dce112_opp.h
new file mode 100644
index 000000000000..9443b87776c6
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_opp.h
@@ -0,0 +1,48 @@
1/* Copyright 2012-15 Advanced Micro Devices, Inc.
2 *
3 * Permission is hereby granted, free of charge, to any person obtaining a
4 * copy of this software and associated documentation files (the "Software"),
5 * to deal in the Software without restriction, including without limitation
6 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7 * and/or sell copies of the Software, and to permit persons to whom the
8 * Software is furnished to do so, subject to the following conditions:
9 *
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
17 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
18 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
19 * OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * Authors: AMD
22 *
23 */
24
25#ifndef __DC_OPP_DCE112_H__
26#define __DC_OPP_DCE112_H__
27
28#include "dc_types.h"
29#include "opp.h"
30#include "../dce110/dce110_opp.h"
31#include "core_types.h"
32
33void dce112_opp_program_clamping_and_pixel_encoding(
34 struct output_pixel_processor *opp,
35 const struct clamping_and_pixel_encoding_params *params);
36
37void dce112_opp_program_fmt(
38 struct output_pixel_processor *opp,
39 struct bit_depth_reduction_params *fmt_bit_depth,
40 struct clamping_and_pixel_encoding_params *clamping);
41
42bool dce112_opp_construct(struct dce110_opp *opp110,
43 struct dc_context *ctx,
44 uint32_t inst,
45 const struct dce110_opp_reg_offsets *offsets);
46
47
48#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_opp_formatter.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_opp_formatter.c
new file mode 100644
index 000000000000..2d9072138834
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_opp_formatter.c
@@ -0,0 +1,215 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28#include "dce/dce_11_2_d.h"
29#include "dce/dce_11_2_sh_mask.h"
30
31#include "dce112_opp.h"
32
33#define FMT_REG(reg)\
34 (reg + opp110->offsets.fmt_offset)
35#define FMT_MEM_REG(reg)\
36 (reg + opp110->offsets.fmt_mem_offset)
37
38/**
39 * Set Clamping
40 * 1) Set clamping format based on bpc - 0 for 6bpc (No clamping)
41 * 1 for 8 bpc
42 * 2 for 10 bpc
43 * 3 for 12 bpc
44 * 7 for programable
45 * 2) Enable clamp if Limited range requested
46 */
47
48/**
49 * set_pixel_encoding
50 *
51 * Set Pixel Encoding
52 * 0: RGB 4:4:4 or YCbCr 4:4:4 or YOnly
53 * 1: YCbCr 4:2:2
54 * 2: YCbCr 4:2:0
55 */
56static void set_pixel_encoding(
57 struct dce110_opp *opp110,
58 const struct clamping_and_pixel_encoding_params *params)
59{
60 uint32_t fmt_cntl_value;
61 uint32_t addr = FMT_REG(mmFMT_CONTROL);
62
63 /*RGB 4:4:4 or YCbCr 4:4:4 - 0; YCbCr 4:2:2 -1.*/
64 fmt_cntl_value = dm_read_reg(opp110->base.ctx, addr);
65
66 set_reg_field_value(fmt_cntl_value,
67 0,
68 FMT_CONTROL,
69 FMT_PIXEL_ENCODING);
70
71 /*00 - Pixels drop mode HW default*/
72 set_reg_field_value(fmt_cntl_value,
73 0,
74 FMT_CONTROL,
75 FMT_SUBSAMPLING_MODE);
76
77 /* By default no bypass*/
78 set_reg_field_value(fmt_cntl_value,
79 0,
80 FMT_CONTROL,
81 FMT_CBCR_BIT_REDUCTION_BYPASS);
82
83 if (params->pixel_encoding == PIXEL_ENCODING_YCBCR422) {
84 set_reg_field_value(fmt_cntl_value,
85 1,
86 FMT_CONTROL,
87 FMT_PIXEL_ENCODING);
88
89 /*00 - Cb before Cr ,01 - Cr before Cb*/
90 set_reg_field_value(fmt_cntl_value,
91 0,
92 FMT_CONTROL,
93 FMT_SUBSAMPLING_ORDER);
94 }
95
96 if (params->pixel_encoding == PIXEL_ENCODING_YCBCR420) {
97 set_reg_field_value(fmt_cntl_value,
98 2,
99 FMT_CONTROL,
100 FMT_PIXEL_ENCODING);
101
102 /* 02 - Subsampling mode, 3 taps*/
103 set_reg_field_value(fmt_cntl_value,
104 2,
105 FMT_CONTROL,
106 FMT_SUBSAMPLING_MODE);
107
108 /* 00 - Enable CbCr bit reduction bypass to preserve precision*/
109 set_reg_field_value(fmt_cntl_value,
110 1,
111 FMT_CONTROL,
112 FMT_CBCR_BIT_REDUCTION_BYPASS);
113 }
114 dm_write_reg(opp110->base.ctx, addr, fmt_cntl_value);
115
116}
117
118void dce112_opp_program_clamping_and_pixel_encoding(
119 struct output_pixel_processor *opp,
120 const struct clamping_and_pixel_encoding_params *params)
121{
122 struct dce110_opp *opp110 = TO_DCE110_OPP(opp);
123
124 dce110_opp_set_clamping(opp110, params);
125 set_pixel_encoding(opp110, params);
126}
127
128static void program_formatter_420_memory(struct output_pixel_processor *opp)
129{
130 struct dce110_opp *opp110 = TO_DCE110_OPP(opp);
131 uint32_t fmt_cntl_value;
132 uint32_t fmt_mem_cntl_value;
133 uint32_t fmt_cntl_addr = FMT_REG(mmFMT_CONTROL);
134 uint32_t fmt_mem_cntl_addr = FMT_MEM_REG(mmFMT_MEMORY0_CONTROL);
135
136 fmt_mem_cntl_value = dm_read_reg(opp110->base.ctx, fmt_mem_cntl_addr);
137 fmt_cntl_value = dm_read_reg(opp110->base.ctx, fmt_cntl_addr);
138 /* Program source select*/
139 /* Use HW default source select for FMT_MEMORYx_CONTROL */
140 /* Use that value for FMT_SRC_SELECT as well*/
141 set_reg_field_value(fmt_cntl_value,
142 get_reg_field_value(fmt_mem_cntl_value, FMT_MEMORY0_CONTROL, FMT420_MEM0_SOURCE_SEL),
143 FMT_CONTROL,
144 FMT_SRC_SELECT);
145 dm_write_reg(opp110->base.ctx, fmt_cntl_addr, fmt_cntl_value);
146
147 /* Turn on the memory */
148 set_reg_field_value(fmt_mem_cntl_value,
149 0,
150 FMT_MEMORY0_CONTROL,
151 FMT420_MEM0_PWR_FORCE);
152 dm_write_reg(opp110->base.ctx, fmt_mem_cntl_addr, fmt_mem_cntl_value);
153}
154
155static void program_formatter_reset_dig_resync_fifo(struct output_pixel_processor *opp)
156{
157 struct dce110_opp *opp110 = TO_DCE110_OPP(opp);
158 uint32_t value;
159 uint32_t addr = FMT_REG(mmFMT_CONTROL);
160 uint8_t counter = 10;
161
162
163 value = dm_read_reg(opp110->base.ctx, addr);
164
165 /* clear previous phase lock status*/
166 set_reg_field_value(value,
167 1,
168 FMT_CONTROL,
169 FMT_420_PIXEL_PHASE_LOCKED_CLEAR);
170 dm_write_reg(opp110->base.ctx, addr, value);
171
172 /* poll until FMT_420_PIXEL_PHASE_LOCKED become 1*/
173 while (counter > 0) {
174 value = dm_read_reg(opp110->base.ctx, addr);
175
176 if (get_reg_field_value(
177 value,
178 FMT_CONTROL,
179 FMT_420_PIXEL_PHASE_LOCKED) == 1)
180 break;
181
182 msleep(10);
183 counter--;
184 }
185
186 if (counter == 0)
187 dm_logger_write(opp->ctx->logger, LOG_ERROR,
188 "%s:opp program formattter reset dig resync info time out.\n",
189 __func__);
190}
191
192void dce112_opp_program_fmt(
193 struct output_pixel_processor *opp,
194 struct bit_depth_reduction_params *fmt_bit_depth,
195 struct clamping_and_pixel_encoding_params *clamping)
196{
197 /* dithering is affected by <CrtcSourceSelect>, hence should be
198 * programmed afterwards */
199
200 if (clamping->pixel_encoding == PIXEL_ENCODING_YCBCR420)
201 program_formatter_420_memory(opp);
202
203 dce110_opp_program_bit_depth_reduction(
204 opp,
205 fmt_bit_depth);
206
207 dce112_opp_program_clamping_and_pixel_encoding(
208 opp,
209 clamping);
210
211 if (clamping->pixel_encoding == PIXEL_ENCODING_YCBCR420)
212 program_formatter_reset_dig_resync_fifo(opp);
213
214 return;
215}
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
new file mode 100644
index 000000000000..bfb2c3fcd2cb
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
@@ -0,0 +1,1418 @@
1/*
2* Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28#include "link_encoder.h"
29#include "stream_encoder.h"
30
31#include "resource.h"
32#include "include/irq_service_interface.h"
33#include "dce110/dce110_resource.h"
34#include "dce110/dce110_timing_generator.h"
35#include "dce112/dce112_mem_input.h"
36
37#include "irq/dce110/irq_service_dce110.h"
38#include "dce/dce_transform.h"
39#include "dce/dce_link_encoder.h"
40#include "dce/dce_stream_encoder.h"
41#include "dce/dce_audio.h"
42#include "dce112/dce112_opp.h"
43#include "dce110/dce110_ipp.h"
44#include "dce/dce_clock_source.h"
45
46#include "dce/dce_hwseq.h"
47#include "dce112/dce112_hw_sequencer.h"
48
49#include "reg_helper.h"
50
51#include "dce/dce_11_2_d.h"
52#include "dce/dce_11_2_sh_mask.h"
53
54#ifndef mmDP_DPHY_INTERNAL_CTRL
55 #define mmDP_DPHY_INTERNAL_CTRL 0x4aa7
56 #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x4aa7
57 #define mmDP1_DP_DPHY_INTERNAL_CTRL 0x4ba7
58 #define mmDP2_DP_DPHY_INTERNAL_CTRL 0x4ca7
59 #define mmDP3_DP_DPHY_INTERNAL_CTRL 0x4da7
60 #define mmDP4_DP_DPHY_INTERNAL_CTRL 0x4ea7
61 #define mmDP5_DP_DPHY_INTERNAL_CTRL 0x4fa7
62 #define mmDP6_DP_DPHY_INTERNAL_CTRL 0x54a7
63 #define mmDP7_DP_DPHY_INTERNAL_CTRL 0x56a7
64 #define mmDP8_DP_DPHY_INTERNAL_CTRL 0x57a7
65#endif
66
67#ifndef mmBIOS_SCRATCH_2
68 #define mmBIOS_SCRATCH_2 0x05CB
69 #define mmBIOS_SCRATCH_6 0x05CF
70#endif
71
72#ifndef mmDP_DPHY_BS_SR_SWAP_CNTL
73 #define mmDP_DPHY_BS_SR_SWAP_CNTL 0x4ADC
74 #define mmDP0_DP_DPHY_BS_SR_SWAP_CNTL 0x4ADC
75 #define mmDP1_DP_DPHY_BS_SR_SWAP_CNTL 0x4BDC
76 #define mmDP2_DP_DPHY_BS_SR_SWAP_CNTL 0x4CDC
77 #define mmDP3_DP_DPHY_BS_SR_SWAP_CNTL 0x4DDC
78 #define mmDP4_DP_DPHY_BS_SR_SWAP_CNTL 0x4EDC
79 #define mmDP5_DP_DPHY_BS_SR_SWAP_CNTL 0x4FDC
80 #define mmDP6_DP_DPHY_BS_SR_SWAP_CNTL 0x54DC
81#endif
82
83#ifndef mmDP_DPHY_FAST_TRAINING
84 #define mmDP_DPHY_FAST_TRAINING 0x4ABC
85 #define mmDP0_DP_DPHY_FAST_TRAINING 0x4ABC
86 #define mmDP1_DP_DPHY_FAST_TRAINING 0x4BBC
87 #define mmDP2_DP_DPHY_FAST_TRAINING 0x4CBC
88 #define mmDP3_DP_DPHY_FAST_TRAINING 0x4DBC
89 #define mmDP4_DP_DPHY_FAST_TRAINING 0x4EBC
90 #define mmDP5_DP_DPHY_FAST_TRAINING 0x4FBC
91 #define mmDP6_DP_DPHY_FAST_TRAINING 0x54BC
92#endif
93
94enum dce112_clk_src_array_id {
95 DCE112_CLK_SRC_PLL0,
96 DCE112_CLK_SRC_PLL1,
97 DCE112_CLK_SRC_PLL2,
98 DCE112_CLK_SRC_PLL3,
99 DCE112_CLK_SRC_PLL4,
100 DCE112_CLK_SRC_PLL5,
101
102 DCE112_CLK_SRC_TOTAL
103};
104
105static const struct dce110_timing_generator_offsets dce112_tg_offsets[] = {
106 {
107 .crtc = (mmCRTC0_CRTC_CONTROL - mmCRTC_CONTROL),
108 .dcp = (mmDCP0_GRPH_CONTROL - mmGRPH_CONTROL),
109 },
110 {
111 .crtc = (mmCRTC1_CRTC_CONTROL - mmCRTC_CONTROL),
112 .dcp = (mmDCP1_GRPH_CONTROL - mmGRPH_CONTROL),
113 },
114 {
115 .crtc = (mmCRTC2_CRTC_CONTROL - mmCRTC_CONTROL),
116 .dcp = (mmDCP2_GRPH_CONTROL - mmGRPH_CONTROL),
117 },
118 {
119 .crtc = (mmCRTC3_CRTC_CONTROL - mmCRTC_CONTROL),
120 .dcp = (mmDCP3_GRPH_CONTROL - mmGRPH_CONTROL),
121 },
122 {
123 .crtc = (mmCRTC4_CRTC_CONTROL - mmCRTC_CONTROL),
124 .dcp = (mmDCP4_GRPH_CONTROL - mmGRPH_CONTROL),
125 },
126 {
127 .crtc = (mmCRTC5_CRTC_CONTROL - mmCRTC_CONTROL),
128 .dcp = (mmDCP5_GRPH_CONTROL - mmGRPH_CONTROL),
129 }
130};
131
132static const struct dce110_mem_input_reg_offsets dce112_mi_reg_offsets[] = {
133 {
134 .dcp = (mmDCP0_GRPH_CONTROL - mmGRPH_CONTROL),
135 .dmif = (mmDMIF_PG0_DPG_WATERMARK_MASK_CONTROL
136 - mmDPG_WATERMARK_MASK_CONTROL),
137 .pipe = (mmPIPE0_DMIF_BUFFER_CONTROL
138 - mmPIPE0_DMIF_BUFFER_CONTROL),
139 },
140 {
141 .dcp = (mmDCP1_GRPH_CONTROL - mmGRPH_CONTROL),
142 .dmif = (mmDMIF_PG1_DPG_WATERMARK_MASK_CONTROL
143 - mmDPG_WATERMARK_MASK_CONTROL),
144 .pipe = (mmPIPE1_DMIF_BUFFER_CONTROL
145 - mmPIPE0_DMIF_BUFFER_CONTROL),
146 },
147 {
148 .dcp = (mmDCP2_GRPH_CONTROL - mmGRPH_CONTROL),
149 .dmif = (mmDMIF_PG2_DPG_WATERMARK_MASK_CONTROL
150 - mmDPG_WATERMARK_MASK_CONTROL),
151 .pipe = (mmPIPE2_DMIF_BUFFER_CONTROL
152 - mmPIPE0_DMIF_BUFFER_CONTROL),
153 },
154 {
155 .dcp = (mmDCP3_GRPH_CONTROL - mmGRPH_CONTROL),
156 .dmif = (mmDMIF_PG3_DPG_WATERMARK_MASK_CONTROL
157 - mmDPG_WATERMARK_MASK_CONTROL),
158 .pipe = (mmPIPE3_DMIF_BUFFER_CONTROL
159 - mmPIPE0_DMIF_BUFFER_CONTROL),
160 },
161 {
162 .dcp = (mmDCP4_GRPH_CONTROL - mmGRPH_CONTROL),
163 .dmif = (mmDMIF_PG4_DPG_WATERMARK_MASK_CONTROL
164 - mmDPG_WATERMARK_MASK_CONTROL),
165 .pipe = (mmPIPE4_DMIF_BUFFER_CONTROL
166 - mmPIPE0_DMIF_BUFFER_CONTROL),
167 },
168 {
169 .dcp = (mmDCP5_GRPH_CONTROL - mmGRPH_CONTROL),
170 .dmif = (mmDMIF_PG5_DPG_WATERMARK_MASK_CONTROL
171 - mmDPG_WATERMARK_MASK_CONTROL),
172 .pipe = (mmPIPE5_DMIF_BUFFER_CONTROL
173 - mmPIPE0_DMIF_BUFFER_CONTROL),
174 }
175};
176
177static const struct dce110_ipp_reg_offsets ipp_reg_offsets[] = {
178{
179 .dcp_offset = (mmDCP0_CUR_CONTROL - mmCUR_CONTROL),
180},
181{
182 .dcp_offset = (mmDCP1_CUR_CONTROL - mmCUR_CONTROL),
183},
184{
185 .dcp_offset = (mmDCP2_CUR_CONTROL - mmCUR_CONTROL),
186},
187{
188 .dcp_offset = (mmDCP3_CUR_CONTROL - mmCUR_CONTROL),
189},
190{
191 .dcp_offset = (mmDCP4_CUR_CONTROL - mmCUR_CONTROL),
192},
193{
194 .dcp_offset = (mmDCP5_CUR_CONTROL - mmCUR_CONTROL),
195}
196};
197
198
199/* set register offset */
200#define SR(reg_name)\
201 .reg_name = mm ## reg_name
202
203/* set register offset with instance */
204#define SRI(reg_name, block, id)\
205 .reg_name = mm ## block ## id ## _ ## reg_name
206
207#define transform_regs(id)\
208[id] = {\
209 XFM_COMMON_REG_LIST_DCE110(id)\
210}
211
212static const struct dce_transform_registers xfm_regs[] = {
213 transform_regs(0),
214 transform_regs(1),
215 transform_regs(2),
216 transform_regs(3),
217 transform_regs(4),
218 transform_regs(5)
219};
220
221static const struct dce_transform_shift xfm_shift = {
222 XFM_COMMON_MASK_SH_LIST_DCE110(__SHIFT)
223};
224
225static const struct dce_transform_mask xfm_mask = {
226 XFM_COMMON_MASK_SH_LIST_DCE110(_MASK)
227};
228
229#define aux_regs(id)\
230[id] = {\
231 AUX_REG_LIST(id)\
232}
233
234static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = {
235 aux_regs(0),
236 aux_regs(1),
237 aux_regs(2),
238 aux_regs(3),
239 aux_regs(4),
240 aux_regs(5)
241};
242
243#define hpd_regs(id)\
244[id] = {\
245 HPD_REG_LIST(id)\
246}
247
248static const struct dce110_link_enc_hpd_registers link_enc_hpd_regs[] = {
249 hpd_regs(0),
250 hpd_regs(1),
251 hpd_regs(2),
252 hpd_regs(3),
253 hpd_regs(4),
254 hpd_regs(5)
255};
256
257#define link_regs(id)\
258[id] = {\
259 LE_DCE110_REG_LIST(id)\
260}
261
262static const struct dce110_link_enc_registers link_enc_regs[] = {
263 link_regs(0),
264 link_regs(1),
265 link_regs(2),
266 link_regs(3),
267 link_regs(4),
268 link_regs(5),
269 link_regs(6),
270};
271
272#define stream_enc_regs(id)\
273[id] = {\
274 SE_COMMON_REG_LIST(id),\
275 .TMDS_CNTL = 0,\
276}
277
278static const struct dce110_stream_enc_registers stream_enc_regs[] = {
279 stream_enc_regs(0),
280 stream_enc_regs(1),
281 stream_enc_regs(2),
282 stream_enc_regs(3),
283 stream_enc_regs(4),
284 stream_enc_regs(5)
285};
286
287static const struct dce_stream_encoder_shift se_shift = {
288 SE_COMMON_MASK_SH_LIST_DCE112(__SHIFT)
289};
290
291static const struct dce_stream_encoder_mask se_mask = {
292 SE_COMMON_MASK_SH_LIST_DCE112(_MASK)
293};
294
295#define audio_regs(id)\
296[id] = {\
297 AUD_COMMON_REG_LIST(id)\
298}
299
300static const struct dce_audio_registers audio_regs[] = {
301 audio_regs(0),
302 audio_regs(1),
303 audio_regs(2),
304 audio_regs(3),
305 audio_regs(4),
306 audio_regs(5)
307};
308
309static const struct dce_audio_shift audio_shift = {
310 AUD_COMMON_MASK_SH_LIST(__SHIFT)
311};
312
313static const struct dce_aduio_mask audio_mask = {
314 AUD_COMMON_MASK_SH_LIST(_MASK)
315};
316
317
318static const struct dce110_opp_reg_offsets dce112_opp_reg_offsets[] = {
319{
320 .fmt_offset = (mmFMT0_FMT_CONTROL - mmFMT0_FMT_CONTROL),
321 .fmt_mem_offset = (mmFMT_MEMORY0_CONTROL - mmFMT_MEMORY0_CONTROL),
322 .dcfe_offset = (mmDCFE0_DCFE_MEM_PWR_CTRL - mmDCFE0_DCFE_MEM_PWR_CTRL),
323 .dcp_offset = (mmDCP0_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
324},
325{ .fmt_offset = (mmFMT1_FMT_CONTROL - mmFMT0_FMT_CONTROL),
326 .fmt_mem_offset = (mmFMT_MEMORY1_CONTROL - mmFMT_MEMORY0_CONTROL),
327 .dcfe_offset = (mmDCFE1_DCFE_MEM_PWR_CTRL - mmDCFE0_DCFE_MEM_PWR_CTRL),
328 .dcp_offset = (mmDCP1_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
329},
330{ .fmt_offset = (mmFMT2_FMT_CONTROL - mmFMT0_FMT_CONTROL),
331 .fmt_mem_offset = (mmFMT_MEMORY2_CONTROL - mmFMT_MEMORY0_CONTROL),
332 .dcfe_offset = (mmDCFE2_DCFE_MEM_PWR_CTRL - mmDCFE0_DCFE_MEM_PWR_CTRL),
333 .dcp_offset = (mmDCP2_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
334},
335{
336 .fmt_offset = (mmFMT3_FMT_CONTROL - mmFMT0_FMT_CONTROL),
337 .fmt_mem_offset = (mmFMT_MEMORY3_CONTROL - mmFMT_MEMORY0_CONTROL),
338 .dcfe_offset = (mmDCFE3_DCFE_MEM_PWR_CTRL - mmDCFE0_DCFE_MEM_PWR_CTRL),
339 .dcp_offset = (mmDCP3_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
340},
341{ .fmt_offset = (mmFMT4_FMT_CONTROL - mmFMT0_FMT_CONTROL),
342 .fmt_mem_offset = (mmFMT_MEMORY4_CONTROL - mmFMT_MEMORY0_CONTROL),
343 .dcfe_offset = (mmDCFE4_DCFE_MEM_PWR_CTRL - mmDCFE0_DCFE_MEM_PWR_CTRL),
344 .dcp_offset = (mmDCP4_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
345},
346{ .fmt_offset = (mmFMT5_FMT_CONTROL - mmFMT0_FMT_CONTROL),
347 .fmt_mem_offset = (mmFMT_MEMORY5_CONTROL - mmFMT_MEMORY0_CONTROL),
348 .dcfe_offset = (mmDCFE5_DCFE_MEM_PWR_CTRL - mmDCFE0_DCFE_MEM_PWR_CTRL),
349 .dcp_offset = (mmDCP5_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
350}
351};
352
353#define clk_src_regs(index, id)\
354[index] = {\
355 CS_COMMON_REG_LIST_DCE_112(id),\
356}
357
358static const struct dce110_clk_src_regs clk_src_regs[] = {
359 clk_src_regs(0, A),
360 clk_src_regs(1, B),
361 clk_src_regs(2, C),
362 clk_src_regs(3, D),
363 clk_src_regs(4, E),
364 clk_src_regs(5, F)
365};
366
367static const struct dce110_clk_src_shift cs_shift = {
368 CS_COMMON_MASK_SH_LIST_DCE_112(__SHIFT)
369};
370
371static const struct dce110_clk_src_mask cs_mask = {
372 CS_COMMON_MASK_SH_LIST_DCE_112(_MASK)
373};
374
375static const struct bios_registers bios_regs = {
376 .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6
377};
378
379static const struct resource_caps polaris_10_resource_cap = {
380 .num_timing_generator = 6,
381 .num_audio = 6,
382 .num_stream_encoder = 6,
383 .num_pll = 8, /* why 8? 6 combo PHY PLL + 2 regular PLLs? */
384};
385
386static const struct resource_caps polaris_11_resource_cap = {
387 .num_timing_generator = 5,
388 .num_audio = 5,
389 .num_stream_encoder = 5,
390 .num_pll = 8, /* why 8? 6 combo PHY PLL + 2 regular PLLs? */
391};
392
393#define CTX ctx
394#define REG(reg) mm ## reg
395
396#ifndef mmCC_DC_HDMI_STRAPS
397#define mmCC_DC_HDMI_STRAPS 0x4819
398#define CC_DC_HDMI_STRAPS__HDMI_DISABLE_MASK 0x40
399#define CC_DC_HDMI_STRAPS__HDMI_DISABLE__SHIFT 0x6
400#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER_MASK 0x700
401#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8
402#endif
403
404static void read_dce_straps(
405 struct dc_context *ctx,
406 struct resource_straps *straps)
407{
408 REG_GET_2(CC_DC_HDMI_STRAPS,
409 HDMI_DISABLE, &straps->hdmi_disable,
410 AUDIO_STREAM_NUMBER, &straps->audio_stream_number);
411
412 REG_GET(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO, &straps->dc_pinstraps_audio);
413}
414
415static struct audio *create_audio(
416 struct dc_context *ctx, unsigned int inst)
417{
418 return dce_audio_create(ctx, inst,
419 &audio_regs[inst], &audio_shift, &audio_mask);
420}
421
422
423static struct timing_generator *dce112_timing_generator_create(
424 struct dc_context *ctx,
425 uint32_t instance,
426 const struct dce110_timing_generator_offsets *offsets)
427{
428 struct dce110_timing_generator *tg110 =
429 dm_alloc(sizeof(struct dce110_timing_generator));
430
431 if (!tg110)
432 return NULL;
433
434 if (dce110_timing_generator_construct(tg110, ctx, instance, offsets))
435 return &tg110->base;
436
437 BREAK_TO_DEBUGGER();
438 dm_free(tg110);
439 return NULL;
440}
441
442static struct stream_encoder *dce112_stream_encoder_create(
443 enum engine_id eng_id,
444 struct dc_context *ctx)
445{
446 struct dce110_stream_encoder *enc110 =
447 dm_alloc(sizeof(struct dce110_stream_encoder));
448
449 if (!enc110)
450 return NULL;
451
452 if (dce110_stream_encoder_construct(
453 enc110, ctx, ctx->dc_bios, eng_id,
454 &stream_enc_regs[eng_id], &se_shift, &se_mask))
455 return &enc110->base;
456
457 BREAK_TO_DEBUGGER();
458 dm_free(enc110);
459 return NULL;
460}
461
462#define SRII(reg_name, block, id)\
463 .reg_name[id] = mm ## block ## id ## _ ## reg_name
464
465static const struct dce_hwseq_registers hwseq_reg = {
466 HWSEQ_DCE112_REG_LIST()
467};
468
469static const struct dce_hwseq_shift hwseq_shift = {
470 HWSEQ_DCE112_MASK_SH_LIST(__SHIFT)
471};
472
473static const struct dce_hwseq_mask hwseq_mask = {
474 HWSEQ_DCE112_MASK_SH_LIST(_MASK)
475};
476
477static struct dce_hwseq *dce112_hwseq_create(
478 struct dc_context *ctx)
479{
480 struct dce_hwseq *hws = dm_alloc(sizeof(struct dce_hwseq));
481
482 if (hws) {
483 hws->ctx = ctx;
484 hws->regs = &hwseq_reg;
485 hws->shifts = &hwseq_shift;
486 hws->masks = &hwseq_mask;
487 }
488 return hws;
489}
490
491static const struct resource_create_funcs res_create_funcs = {
492 .read_dce_straps = read_dce_straps,
493 .create_audio = create_audio,
494 .create_stream_encoder = dce112_stream_encoder_create,
495 .create_hwseq = dce112_hwseq_create,
496};
497
498#define mi_inst_regs(id) { MI_REG_LIST(id) }
499static const struct dce_mem_input_registers mi_regs[] = {
500 mi_inst_regs(0),
501 mi_inst_regs(1),
502 mi_inst_regs(2),
503 mi_inst_regs(3),
504 mi_inst_regs(4),
505 mi_inst_regs(5),
506};
507
508static const struct dce_mem_input_shift mi_shifts = {
509 MI_DCE_MASK_SH_LIST(__SHIFT)
510};
511
512static const struct dce_mem_input_mask mi_masks = {
513 MI_DCE_MASK_SH_LIST(_MASK)
514};
515
516static struct mem_input *dce112_mem_input_create(
517 struct dc_context *ctx,
518 uint32_t inst,
519 const struct dce110_mem_input_reg_offsets *offset)
520{
521 struct dce110_mem_input *mem_input110 =
522 dm_alloc(sizeof(struct dce110_mem_input));
523
524 if (!mem_input110)
525 return NULL;
526
527 if (dce112_mem_input_construct(mem_input110, ctx, inst, offset)) {
528 struct mem_input *mi = &mem_input110->base;
529
530 mi->regs = &mi_regs[inst];
531 mi->shifts = &mi_shifts;
532 mi->masks = &mi_masks;
533 return mi;
534 }
535
536 BREAK_TO_DEBUGGER();
537 dm_free(mem_input110);
538 return NULL;
539}
540
541static void dce112_transform_destroy(struct transform **xfm)
542{
543 dm_free(TO_DCE_TRANSFORM(*xfm));
544 *xfm = NULL;
545}
546
547static struct transform *dce112_transform_create(
548 struct dc_context *ctx,
549 uint32_t inst)
550{
551 struct dce_transform *transform =
552 dm_alloc(sizeof(struct dce_transform));
553
554 if (!transform)
555 return NULL;
556
557 if (dce_transform_construct(transform, ctx, inst,
558 &xfm_regs[inst], &xfm_shift, &xfm_mask)) {
559 transform->lb_memory_size = 0x1404; /*5124*/
560 return &transform->base;
561 }
562
563 BREAK_TO_DEBUGGER();
564 dm_free(transform);
565 return NULL;
566}
567struct link_encoder *dce112_link_encoder_create(
568 const struct encoder_init_data *enc_init_data)
569{
570 struct dce110_link_encoder *enc110 =
571 dm_alloc(sizeof(struct dce110_link_encoder));
572
573 if (!enc110)
574 return NULL;
575
576 if (dce110_link_encoder_construct(
577 enc110,
578 enc_init_data,
579 &link_enc_regs[enc_init_data->transmitter],
580 &link_enc_aux_regs[enc_init_data->channel - 1],
581 &link_enc_hpd_regs[enc_init_data->hpd_source])) {
582
583 enc110->base.features.ycbcr420_supported = false;
584 enc110->base.features.max_hdmi_pixel_clock = 600000;
585 return &enc110->base;
586 }
587
588 BREAK_TO_DEBUGGER();
589 dm_free(enc110);
590 return NULL;
591}
592
593struct input_pixel_processor *dce112_ipp_create(
594 struct dc_context *ctx,
595 uint32_t inst,
596 const struct dce110_ipp_reg_offsets *offset)
597{
598 struct dce110_ipp *ipp =
599 dm_alloc(sizeof(struct dce110_ipp));
600
601 if (!ipp)
602 return NULL;
603
604 if (dce110_ipp_construct(ipp, ctx, inst, offset))
605 return &ipp->base;
606
607 BREAK_TO_DEBUGGER();
608 dm_free(ipp);
609 return NULL;
610}
611
612void dce112_ipp_destroy(struct input_pixel_processor **ipp)
613{
614 dm_free(TO_DCE110_IPP(*ipp));
615 *ipp = NULL;
616}
617
618struct output_pixel_processor *dce112_opp_create(
619 struct dc_context *ctx,
620 uint32_t inst,
621 const struct dce110_opp_reg_offsets *offset)
622{
623 struct dce110_opp *opp =
624 dm_alloc(sizeof(struct dce110_opp));
625
626 if (!opp)
627 return NULL;
628
629 if (dce112_opp_construct(opp,
630 ctx, inst, offset))
631 return &opp->base;
632
633 BREAK_TO_DEBUGGER();
634 dm_free(opp);
635 return NULL;
636}
637
638void dce112_opp_destroy(struct output_pixel_processor **opp)
639{
640 struct dce110_opp *dce110_opp;
641
642 if (!opp || !*opp)
643 return;
644
645 dce110_opp = FROM_DCE11_OPP(*opp);
646
647 dm_free(dce110_opp->regamma.coeff128_dx);
648 dm_free(dce110_opp->regamma.coeff128_oem);
649 dm_free(dce110_opp->regamma.coeff128);
650 dm_free(dce110_opp->regamma.axis_x_1025);
651 dm_free(dce110_opp->regamma.axis_x_256);
652 dm_free(dce110_opp->regamma.coordinates_x);
653 dm_free(dce110_opp->regamma.rgb_regamma);
654 dm_free(dce110_opp->regamma.rgb_resulted);
655 dm_free(dce110_opp->regamma.rgb_oem);
656 dm_free(dce110_opp->regamma.rgb_user);
657
658 dm_free(dce110_opp);
659 *opp = NULL;
660}
661
662struct clock_source *dce112_clock_source_create(
663 struct dc_context *ctx,
664 struct dc_bios *bios,
665 enum clock_source_id id,
666 const struct dce110_clk_src_regs *regs,
667 bool dp_clk_src)
668{
669 struct dce110_clk_src *clk_src =
670 dm_alloc(sizeof(struct dce110_clk_src));
671
672 if (!clk_src)
673 return NULL;
674
675 if (dce110_clk_src_construct(clk_src, ctx, bios, id,
676 regs, &cs_shift, &cs_mask)) {
677 clk_src->base.dp_clk_src = dp_clk_src;
678 return &clk_src->base;
679 }
680
681 BREAK_TO_DEBUGGER();
682 return NULL;
683}
684
685void dce112_clock_source_destroy(struct clock_source **clk_src)
686{
687 dm_free(TO_DCE110_CLK_SRC(*clk_src));
688 *clk_src = NULL;
689}
690
691static void destruct(struct dce110_resource_pool *pool)
692{
693 unsigned int i;
694
695 for (i = 0; i < pool->base.pipe_count; i++) {
696 if (pool->base.opps[i] != NULL)
697 dce112_opp_destroy(&pool->base.opps[i]);
698
699 if (pool->base.transforms[i] != NULL)
700 dce112_transform_destroy(&pool->base.transforms[i]);
701
702 if (pool->base.ipps[i] != NULL)
703 dce112_ipp_destroy(&pool->base.ipps[i]);
704
705 if (pool->base.mis[i] != NULL) {
706 dm_free(TO_DCE110_MEM_INPUT(pool->base.mis[i]));
707 pool->base.mis[i] = NULL;
708 }
709
710 if (pool->base.timing_generators[i] != NULL) {
711 dm_free(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
712 pool->base.timing_generators[i] = NULL;
713 }
714 }
715
716 for (i = 0; i < pool->base.stream_enc_count; i++) {
717 if (pool->base.stream_enc[i] != NULL)
718 dm_free(DCE110STRENC_FROM_STRENC(pool->base.stream_enc[i]));
719 }
720
721 for (i = 0; i < pool->base.clk_src_count; i++) {
722 if (pool->base.clock_sources[i] != NULL) {
723 dce112_clock_source_destroy(&pool->base.clock_sources[i]);
724 }
725 }
726
727 if (pool->base.dp_clock_source != NULL)
728 dce112_clock_source_destroy(&pool->base.dp_clock_source);
729
730 for (i = 0; i < pool->base.audio_count; i++) {
731 if (pool->base.audios[i] != NULL) {
732 dce_aud_destroy(&pool->base.audios[i]);
733 }
734 }
735
736 if (pool->base.display_clock != NULL) {
737 dal_display_clock_destroy(&pool->base.display_clock);
738 }
739
740 if (pool->base.irqs != NULL) {
741 dal_irq_service_destroy(&pool->base.irqs);
742 }
743}
744
745static struct clock_source *find_matching_pll(struct resource_context *res_ctx,
746 const struct core_stream *const stream)
747{
748 switch (stream->sink->link->link_enc->transmitter) {
749 case TRANSMITTER_UNIPHY_A:
750 return res_ctx->pool->clock_sources[DCE112_CLK_SRC_PLL0];
751 case TRANSMITTER_UNIPHY_B:
752 return res_ctx->pool->clock_sources[DCE112_CLK_SRC_PLL1];
753 case TRANSMITTER_UNIPHY_C:
754 return res_ctx->pool->clock_sources[DCE112_CLK_SRC_PLL2];
755 case TRANSMITTER_UNIPHY_D:
756 return res_ctx->pool->clock_sources[DCE112_CLK_SRC_PLL3];
757 case TRANSMITTER_UNIPHY_E:
758 return res_ctx->pool->clock_sources[DCE112_CLK_SRC_PLL4];
759 case TRANSMITTER_UNIPHY_F:
760 return res_ctx->pool->clock_sources[DCE112_CLK_SRC_PLL5];
761 default:
762 return NULL;
763 };
764
765 return 0;
766}
767
768static enum dc_status validate_mapped_resource(
769 const struct core_dc *dc,
770 struct validate_context *context)
771{
772 enum dc_status status = DC_OK;
773 uint8_t i, j, k;
774
775 for (i = 0; i < context->target_count; i++) {
776 struct core_target *target = context->targets[i];
777
778 for (j = 0; j < target->public.stream_count; j++) {
779 struct core_stream *stream =
780 DC_STREAM_TO_CORE(target->public.streams[j]);
781 struct core_link *link = stream->sink->link;
782
783 if (resource_is_stream_unchanged(dc->current_context, stream))
784 continue;
785
786 for (k = 0; k < MAX_PIPES; k++) {
787 struct pipe_ctx *pipe_ctx =
788 &context->res_ctx.pipe_ctx[k];
789
790 if (context->res_ctx.pipe_ctx[k].stream != stream)
791 continue;
792
793 if (!pipe_ctx->tg->funcs->validate_timing(
794 pipe_ctx->tg, &stream->public.timing))
795 return DC_FAIL_CONTROLLER_VALIDATE;
796
797 status = dce110_resource_build_pipe_hw_param(pipe_ctx);
798
799 if (status != DC_OK)
800 return status;
801
802 if (!link->link_enc->funcs->validate_output_with_stream(
803 link->link_enc,
804 pipe_ctx))
805 return DC_FAIL_ENC_VALIDATE;
806
807 /* TODO: validate audio ASIC caps, encoder */
808
809 status = dc_link_validate_mode_timing(stream,
810 link,
811 &stream->public.timing);
812
813 if (status != DC_OK)
814 return status;
815
816 resource_build_info_frame(pipe_ctx);
817
818 /* do not need to validate non root pipes */
819 break;
820 }
821 }
822 }
823
824 return DC_OK;
825}
826
827enum dc_status dce112_validate_bandwidth(
828 const struct core_dc *dc,
829 struct validate_context *context)
830{
831 enum dc_status result = DC_ERROR_UNEXPECTED;
832
833 dm_logger_write(
834 dc->ctx->logger, LOG_BANDWIDTH_CALCS,
835 "%s: start",
836 __func__);
837
838 if (!bw_calcs(
839 dc->ctx,
840 &dc->bw_dceip,
841 &dc->bw_vbios,
842 context->res_ctx.pipe_ctx,
843 context->res_ctx.pool->pipe_count,
844 &context->bw_results))
845 result = DC_FAIL_BANDWIDTH_VALIDATE;
846 else
847 result = DC_OK;
848
849 if (result == DC_FAIL_BANDWIDTH_VALIDATE)
850 dm_logger_write(dc->ctx->logger, LOG_BANDWIDTH_VALIDATION,
851 "%s: Bandwidth validation failed!",
852 __func__);
853
854 if (memcmp(&dc->current_context->bw_results,
855 &context->bw_results, sizeof(context->bw_results))) {
856 struct log_entry log_entry;
857 dm_logger_open(
858 dc->ctx->logger,
859 &log_entry,
860 LOG_BANDWIDTH_CALCS);
861 dm_logger_append(&log_entry, "%s: finish,\n"
862 "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
863 "stutMark_b: %d stutMark_a: %d\n",
864 __func__,
865 context->bw_results.nbp_state_change_wm_ns[0].b_mark,
866 context->bw_results.nbp_state_change_wm_ns[0].a_mark,
867 context->bw_results.urgent_wm_ns[0].b_mark,
868 context->bw_results.urgent_wm_ns[0].a_mark,
869 context->bw_results.stutter_exit_wm_ns[0].b_mark,
870 context->bw_results.stutter_exit_wm_ns[0].a_mark);
871 dm_logger_append(&log_entry,
872 "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
873 "stutMark_b: %d stutMark_a: %d\n",
874 context->bw_results.nbp_state_change_wm_ns[1].b_mark,
875 context->bw_results.nbp_state_change_wm_ns[1].a_mark,
876 context->bw_results.urgent_wm_ns[1].b_mark,
877 context->bw_results.urgent_wm_ns[1].a_mark,
878 context->bw_results.stutter_exit_wm_ns[1].b_mark,
879 context->bw_results.stutter_exit_wm_ns[1].a_mark);
880 dm_logger_append(&log_entry,
881 "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
882 "stutMark_b: %d stutMark_a: %d stutter_mode_enable: %d\n",
883 context->bw_results.nbp_state_change_wm_ns[2].b_mark,
884 context->bw_results.nbp_state_change_wm_ns[2].a_mark,
885 context->bw_results.urgent_wm_ns[2].b_mark,
886 context->bw_results.urgent_wm_ns[2].a_mark,
887 context->bw_results.stutter_exit_wm_ns[2].b_mark,
888 context->bw_results.stutter_exit_wm_ns[2].a_mark,
889 context->bw_results.stutter_mode_enable);
890 dm_logger_append(&log_entry,
891 "cstate: %d pstate: %d nbpstate: %d sync: %d dispclk: %d\n"
892 "sclk: %d sclk_sleep: %d yclk: %d blackout_recovery_time_us: %d\n",
893 context->bw_results.cpuc_state_change_enable,
894 context->bw_results.cpup_state_change_enable,
895 context->bw_results.nbp_state_change_enable,
896 context->bw_results.all_displays_in_sync,
897 context->bw_results.dispclk_khz,
898 context->bw_results.required_sclk,
899 context->bw_results.required_sclk_deep_sleep,
900 context->bw_results.required_yclk,
901 context->bw_results.blackout_recovery_time_us);
902 dm_logger_close(&log_entry);
903 }
904 return result;
905}
906
907enum dc_status resource_map_phy_clock_resources(
908 const struct core_dc *dc,
909 struct validate_context *context)
910{
911 uint8_t i, j, k;
912
913 /* acquire new resources */
914 for (i = 0; i < context->target_count; i++) {
915 struct core_target *target = context->targets[i];
916
917 for (j = 0; j < target->public.stream_count; j++) {
918 struct core_stream *stream =
919 DC_STREAM_TO_CORE(target->public.streams[j]);
920
921 if (resource_is_stream_unchanged(dc->current_context, stream))
922 continue;
923
924 for (k = 0; k < MAX_PIPES; k++) {
925 struct pipe_ctx *pipe_ctx =
926 &context->res_ctx.pipe_ctx[k];
927
928 if (context->res_ctx.pipe_ctx[k].stream != stream)
929 continue;
930
931 if (dc_is_dp_signal(pipe_ctx->stream->signal)
932 || pipe_ctx->stream->signal == SIGNAL_TYPE_VIRTUAL)
933 pipe_ctx->clock_source =
934 context->res_ctx.pool->dp_clock_source;
935 else
936 pipe_ctx->clock_source =
937 find_matching_pll(&context->res_ctx,
938 stream);
939
940 if (pipe_ctx->clock_source == NULL)
941 return DC_NO_CLOCK_SOURCE_RESOURCE;
942
943 resource_reference_clock_source(
944 &context->res_ctx,
945 pipe_ctx->clock_source);
946
947 /* only one cs per stream regardless of mpo */
948 break;
949 }
950 }
951 }
952
953 return DC_OK;
954}
955
956static bool dce112_validate_surface_sets(
957 const struct dc_validation_set set[],
958 int set_count)
959{
960 int i;
961
962 for (i = 0; i < set_count; i++) {
963 if (set[i].surface_count == 0)
964 continue;
965
966 if (set[i].surface_count > 1)
967 return false;
968
969 if (set[i].surfaces[0]->clip_rect.width
970 != set[i].target->streams[0]->src.width
971 || set[i].surfaces[0]->clip_rect.height
972 != set[i].target->streams[0]->src.height)
973 return false;
974 if (set[i].surfaces[0]->format
975 >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
976 return false;
977 }
978
979 return true;
980}
981
982enum dc_status dce112_validate_with_context(
983 const struct core_dc *dc,
984 const struct dc_validation_set set[],
985 int set_count,
986 struct validate_context *context)
987{
988 struct dc_context *dc_ctx = dc->ctx;
989 enum dc_status result = DC_ERROR_UNEXPECTED;
990 int i;
991
992 if (!dce112_validate_surface_sets(set, set_count))
993 return DC_FAIL_SURFACE_VALIDATE;
994
995 context->res_ctx.pool = dc->res_pool;
996
997 for (i = 0; i < set_count; i++) {
998 context->targets[i] = DC_TARGET_TO_CORE(set[i].target);
999 dc_target_retain(&context->targets[i]->public);
1000 context->target_count++;
1001 }
1002
1003 result = resource_map_pool_resources(dc, context);
1004
1005 if (result == DC_OK)
1006 result = resource_map_phy_clock_resources(dc, context);
1007
1008 if (!resource_validate_attach_surfaces(
1009 set, set_count, dc->current_context, context)) {
1010 DC_ERROR("Failed to attach surface to target!\n");
1011 return DC_FAIL_ATTACH_SURFACES;
1012 }
1013
1014 if (result == DC_OK)
1015 result = validate_mapped_resource(dc, context);
1016
1017 if (result == DC_OK)
1018 result = resource_build_scaling_params_for_context(dc, context);
1019
1020 if (result == DC_OK)
1021 result = dce112_validate_bandwidth(dc, context);
1022
1023 return result;
1024}
1025
1026enum dc_status dce112_validate_guaranteed(
1027 const struct core_dc *dc,
1028 const struct dc_target *dc_target,
1029 struct validate_context *context)
1030{
1031 enum dc_status result = DC_ERROR_UNEXPECTED;
1032
1033 context->res_ctx.pool = dc->res_pool;
1034
1035 context->targets[0] = DC_TARGET_TO_CORE(dc_target);
1036 dc_target_retain(&context->targets[0]->public);
1037 context->target_count++;
1038
1039 result = resource_map_pool_resources(dc, context);
1040
1041 if (result == DC_OK)
1042 result = resource_map_phy_clock_resources(dc, context);
1043
1044 if (result == DC_OK)
1045 result = validate_mapped_resource(dc, context);
1046
1047 if (result == DC_OK) {
1048 validate_guaranteed_copy_target(
1049 context, dc->public.caps.max_targets);
1050 result = resource_build_scaling_params_for_context(dc, context);
1051 }
1052
1053 if (result == DC_OK)
1054 result = dce112_validate_bandwidth(dc, context);
1055
1056 return result;
1057}
1058
1059static void dce112_destroy_resource_pool(struct resource_pool **pool)
1060{
1061 struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool);
1062
1063 destruct(dce110_pool);
1064 dm_free(dce110_pool);
1065 *pool = NULL;
1066}
1067
1068static const struct resource_funcs dce112_res_pool_funcs = {
1069 .destroy = dce112_destroy_resource_pool,
1070 .link_enc_create = dce112_link_encoder_create,
1071 .validate_with_context = dce112_validate_with_context,
1072 .validate_guaranteed = dce112_validate_guaranteed,
1073 .validate_bandwidth = dce112_validate_bandwidth
1074};
1075
1076static void bw_calcs_data_update_from_pplib(struct core_dc *dc)
1077{
1078 struct dm_pp_clock_levels_with_latency eng_clks = {0};
1079 struct dm_pp_clock_levels_with_latency mem_clks = {0};
1080 struct dm_pp_wm_sets_with_clock_ranges clk_ranges = {0};
1081 struct dm_pp_clock_levels clks = {0};
1082
1083 /*do system clock TODO PPLIB: after PPLIB implement,
1084 * then remove old way
1085 */
1086 if (!dm_pp_get_clock_levels_by_type_with_latency(
1087 dc->ctx,
1088 DM_PP_CLOCK_TYPE_ENGINE_CLK,
1089 &eng_clks)) {
1090
1091 /* This is only for temporary */
1092 dm_pp_get_clock_levels_by_type(
1093 dc->ctx,
1094 DM_PP_CLOCK_TYPE_ENGINE_CLK,
1095 &clks);
1096 /* convert all the clock fro kHz to fix point mHz */
1097 dc->bw_vbios.high_sclk = bw_frc_to_fixed(
1098 clks.clocks_in_khz[clks.num_levels-1], 1000);
1099 dc->bw_vbios.mid1_sclk = bw_frc_to_fixed(
1100 clks.clocks_in_khz[clks.num_levels/8], 1000);
1101 dc->bw_vbios.mid2_sclk = bw_frc_to_fixed(
1102 clks.clocks_in_khz[clks.num_levels*2/8], 1000);
1103 dc->bw_vbios.mid3_sclk = bw_frc_to_fixed(
1104 clks.clocks_in_khz[clks.num_levels*3/8], 1000);
1105 dc->bw_vbios.mid4_sclk = bw_frc_to_fixed(
1106 clks.clocks_in_khz[clks.num_levels*4/8], 1000);
1107 dc->bw_vbios.mid5_sclk = bw_frc_to_fixed(
1108 clks.clocks_in_khz[clks.num_levels*5/8], 1000);
1109 dc->bw_vbios.mid6_sclk = bw_frc_to_fixed(
1110 clks.clocks_in_khz[clks.num_levels*6/8], 1000);
1111 dc->bw_vbios.low_sclk = bw_frc_to_fixed(
1112 clks.clocks_in_khz[0], 1000);
1113
1114 /*do memory clock*/
1115 dm_pp_get_clock_levels_by_type(
1116 dc->ctx,
1117 DM_PP_CLOCK_TYPE_MEMORY_CLK,
1118 &clks);
1119
1120 dc->bw_vbios.low_yclk = bw_frc_to_fixed(
1121 clks.clocks_in_khz[0] * MEMORY_TYPE_MULTIPLIER, 1000);
1122 dc->bw_vbios.mid_yclk = bw_frc_to_fixed(
1123 clks.clocks_in_khz[clks.num_levels>>1] * MEMORY_TYPE_MULTIPLIER,
1124 1000);
1125 dc->bw_vbios.high_yclk = bw_frc_to_fixed(
1126 clks.clocks_in_khz[clks.num_levels-1] * MEMORY_TYPE_MULTIPLIER,
1127 1000);
1128
1129 return;
1130 }
1131
1132 /* convert all the clock fro kHz to fix point mHz TODO: wloop data */
1133 dc->bw_vbios.high_sclk = bw_frc_to_fixed(
1134 eng_clks.data[eng_clks.num_levels-1].clocks_in_khz, 1000);
1135 dc->bw_vbios.mid1_sclk = bw_frc_to_fixed(
1136 eng_clks.data[eng_clks.num_levels/8].clocks_in_khz, 1000);
1137 dc->bw_vbios.mid2_sclk = bw_frc_to_fixed(
1138 eng_clks.data[eng_clks.num_levels*2/8].clocks_in_khz, 1000);
1139 dc->bw_vbios.mid3_sclk = bw_frc_to_fixed(
1140 eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz, 1000);
1141 dc->bw_vbios.mid4_sclk = bw_frc_to_fixed(
1142 eng_clks.data[eng_clks.num_levels*4/8].clocks_in_khz, 1000);
1143 dc->bw_vbios.mid5_sclk = bw_frc_to_fixed(
1144 eng_clks.data[eng_clks.num_levels*5/8].clocks_in_khz, 1000);
1145 dc->bw_vbios.mid6_sclk = bw_frc_to_fixed(
1146 eng_clks.data[eng_clks.num_levels*6/8].clocks_in_khz, 1000);
1147 dc->bw_vbios.low_sclk = bw_frc_to_fixed(
1148 eng_clks.data[0].clocks_in_khz, 1000);
1149
1150 /*do memory clock*/
1151 dm_pp_get_clock_levels_by_type_with_latency(
1152 dc->ctx,
1153 DM_PP_CLOCK_TYPE_MEMORY_CLK,
1154 &mem_clks);
1155
1156 /* we don't need to call PPLIB for validation clock since they
1157 * also give us the highest sclk and highest mclk (UMA clock).
1158 * ALSO always convert UMA clock (from PPLIB) to YCLK (HW formula):
1159 * YCLK = UMACLK*m_memoryTypeMultiplier
1160 */
1161 dc->bw_vbios.low_yclk = bw_frc_to_fixed(
1162 mem_clks.data[0].clocks_in_khz * MEMORY_TYPE_MULTIPLIER, 1000);
1163 dc->bw_vbios.mid_yclk = bw_frc_to_fixed(
1164 mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER,
1165 1000);
1166 dc->bw_vbios.high_yclk = bw_frc_to_fixed(
1167 mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER,
1168 1000);
1169
1170 /* Now notify PPLib/SMU about which Watermarks sets they should select
1171 * depending on DPM state they are in. And update BW MGR GFX Engine and
1172 * Memory clock member variables for Watermarks calculations for each
1173 * Watermark Set
1174 */
1175 clk_ranges.num_wm_sets = 4;
1176 clk_ranges.wm_clk_ranges[0].wm_set_id = WM_SET_A;
1177 clk_ranges.wm_clk_ranges[0].wm_min_eng_clk_in_khz =
1178 eng_clks.data[0].clocks_in_khz;
1179 clk_ranges.wm_clk_ranges[0].wm_max_eng_clk_in_khz =
1180 eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz - 1;
1181 clk_ranges.wm_clk_ranges[0].wm_min_memg_clk_in_khz =
1182 mem_clks.data[0].clocks_in_khz;
1183 clk_ranges.wm_clk_ranges[0].wm_max_mem_clk_in_khz =
1184 mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz - 1;
1185
1186 clk_ranges.wm_clk_ranges[1].wm_set_id = WM_SET_B;
1187 clk_ranges.wm_clk_ranges[1].wm_min_eng_clk_in_khz =
1188 eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz;
1189 /* 5 GHz instead of data[7].clockInKHz to cover Overdrive */
1190 clk_ranges.wm_clk_ranges[1].wm_max_eng_clk_in_khz = 5000000;
1191 clk_ranges.wm_clk_ranges[1].wm_min_memg_clk_in_khz =
1192 mem_clks.data[0].clocks_in_khz;
1193 clk_ranges.wm_clk_ranges[1].wm_max_mem_clk_in_khz =
1194 mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz - 1;
1195
1196 clk_ranges.wm_clk_ranges[2].wm_set_id = WM_SET_C;
1197 clk_ranges.wm_clk_ranges[2].wm_min_eng_clk_in_khz =
1198 eng_clks.data[0].clocks_in_khz;
1199 clk_ranges.wm_clk_ranges[2].wm_max_eng_clk_in_khz =
1200 eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz - 1;
1201 clk_ranges.wm_clk_ranges[2].wm_min_memg_clk_in_khz =
1202 mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz;
1203 /* 5 GHz instead of data[2].clockInKHz to cover Overdrive */
1204 clk_ranges.wm_clk_ranges[2].wm_max_mem_clk_in_khz = 5000000;
1205
1206 clk_ranges.wm_clk_ranges[3].wm_set_id = WM_SET_D;
1207 clk_ranges.wm_clk_ranges[3].wm_min_eng_clk_in_khz =
1208 eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz;
1209 /* 5 GHz instead of data[7].clockInKHz to cover Overdrive */
1210 clk_ranges.wm_clk_ranges[3].wm_max_eng_clk_in_khz = 5000000;
1211 clk_ranges.wm_clk_ranges[3].wm_min_memg_clk_in_khz =
1212 mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz;
1213 /* 5 GHz instead of data[2].clockInKHz to cover Overdrive */
1214 clk_ranges.wm_clk_ranges[3].wm_max_mem_clk_in_khz = 5000000;
1215
1216 /* Notify PP Lib/SMU which Watermarks to use for which clock ranges */
1217 dm_pp_notify_wm_clock_changes(dc->ctx, &clk_ranges);
1218}
1219
1220const struct resource_caps *dce112_resource_cap(
1221 struct hw_asic_id *asic_id)
1222{
1223 if (ASIC_REV_IS_POLARIS11_M(asic_id->hw_internal_rev))
1224 return &polaris_11_resource_cap;
1225 else
1226 return &polaris_10_resource_cap;
1227}
1228
1229static bool construct(
1230 uint8_t num_virtual_links,
1231 struct core_dc *dc,
1232 struct dce110_resource_pool *pool)
1233{
1234 unsigned int i;
1235 struct dc_context *ctx = dc->ctx;
1236 struct dm_pp_static_clock_info static_clk_info = {0};
1237
1238 ctx->dc_bios->regs = &bios_regs;
1239
1240 pool->base.res_cap = dce112_resource_cap(&ctx->asic_id);
1241 pool->base.funcs = &dce112_res_pool_funcs;
1242
1243 /*************************************************
1244 * Resource + asic cap harcoding *
1245 *************************************************/
1246 pool->base.underlay_pipe_index = -1;
1247 pool->base.pipe_count = pool->base.res_cap->num_timing_generator;
1248 dc->public.caps.max_downscale_ratio = 200;
1249 dc->public.caps.i2c_speed_in_khz = 100;
1250
1251 /*************************************************
1252 * Create resources *
1253 *************************************************/
1254
1255 pool->base.clock_sources[DCE112_CLK_SRC_PLL0] =
1256 dce112_clock_source_create(
1257 ctx, ctx->dc_bios,
1258 CLOCK_SOURCE_COMBO_PHY_PLL0,
1259 &clk_src_regs[0], false);
1260 pool->base.clock_sources[DCE112_CLK_SRC_PLL1] =
1261 dce112_clock_source_create(
1262 ctx, ctx->dc_bios,
1263 CLOCK_SOURCE_COMBO_PHY_PLL1,
1264 &clk_src_regs[1], false);
1265 pool->base.clock_sources[DCE112_CLK_SRC_PLL2] =
1266 dce112_clock_source_create(
1267 ctx, ctx->dc_bios,
1268 CLOCK_SOURCE_COMBO_PHY_PLL2,
1269 &clk_src_regs[2], false);
1270 pool->base.clock_sources[DCE112_CLK_SRC_PLL3] =
1271 dce112_clock_source_create(
1272 ctx, ctx->dc_bios,
1273 CLOCK_SOURCE_COMBO_PHY_PLL3,
1274 &clk_src_regs[3], false);
1275 pool->base.clock_sources[DCE112_CLK_SRC_PLL4] =
1276 dce112_clock_source_create(
1277 ctx, ctx->dc_bios,
1278 CLOCK_SOURCE_COMBO_PHY_PLL4,
1279 &clk_src_regs[4], false);
1280 pool->base.clock_sources[DCE112_CLK_SRC_PLL5] =
1281 dce112_clock_source_create(
1282 ctx, ctx->dc_bios,
1283 CLOCK_SOURCE_COMBO_PHY_PLL5,
1284 &clk_src_regs[5], false);
1285 pool->base.clk_src_count = DCE112_CLK_SRC_TOTAL;
1286
1287 pool->base.dp_clock_source = dce112_clock_source_create(
1288 ctx, ctx->dc_bios,
1289 CLOCK_SOURCE_ID_DP_DTO, &clk_src_regs[0], true);
1290
1291
1292 for (i = 0; i < pool->base.clk_src_count; i++) {
1293 if (pool->base.clock_sources[i] == NULL) {
1294 dm_error("DC: failed to create clock sources!\n");
1295 BREAK_TO_DEBUGGER();
1296 goto res_create_fail;
1297 }
1298 }
1299
1300 pool->base.display_clock = dal_display_clock_dce112_create(
1301 ctx);
1302
1303 if (pool->base.display_clock == NULL) {
1304 dm_error("DC: failed to create display clock!\n");
1305 BREAK_TO_DEBUGGER();
1306 goto res_create_fail;
1307 }
1308
1309
1310 /* get static clock information for PPLIB or firmware, save
1311 * max_clock_state
1312 */
1313 if (dm_pp_get_static_clocks(ctx, &static_clk_info)) {
1314 enum clocks_state max_clocks_state =
1315 dce110_resource_convert_clock_state_pp_to_dc(
1316 static_clk_info.max_clocks_state);
1317
1318 dal_display_clock_store_max_clocks_state(
1319 pool->base.display_clock, max_clocks_state);
1320 }
1321
1322 {
1323 struct irq_service_init_data init_data;
1324 init_data.ctx = dc->ctx;
1325 pool->base.irqs = dal_irq_service_dce110_create(&init_data);
1326 if (!pool->base.irqs)
1327 goto res_create_fail;
1328 }
1329
1330 for (i = 0; i < pool->base.pipe_count; i++) {
1331 pool->base.timing_generators[i] =
1332 dce112_timing_generator_create(
1333 ctx,
1334 i,
1335 &dce112_tg_offsets[i]);
1336 if (pool->base.timing_generators[i] == NULL) {
1337 BREAK_TO_DEBUGGER();
1338 dm_error("DC: failed to create tg!\n");
1339 goto res_create_fail;
1340 }
1341
1342 pool->base.mis[i] = dce112_mem_input_create(
1343 ctx,
1344 i,
1345 &dce112_mi_reg_offsets[i]);
1346 if (pool->base.mis[i] == NULL) {
1347 BREAK_TO_DEBUGGER();
1348 dm_error(
1349 "DC: failed to create memory input!\n");
1350 goto res_create_fail;
1351 }
1352
1353 pool->base.ipps[i] = dce112_ipp_create(
1354 ctx,
1355 i,
1356 &ipp_reg_offsets[i]);
1357 if (pool->base.ipps[i] == NULL) {
1358 BREAK_TO_DEBUGGER();
1359 dm_error(
1360 "DC:failed to create input pixel processor!\n");
1361 goto res_create_fail;
1362 }
1363
1364 pool->base.transforms[i] = dce112_transform_create(ctx, i);
1365 if (pool->base.transforms[i] == NULL) {
1366 BREAK_TO_DEBUGGER();
1367 dm_error(
1368 "DC: failed to create transform!\n");
1369 goto res_create_fail;
1370 }
1371
1372 pool->base.opps[i] = dce112_opp_create(
1373 ctx,
1374 i,
1375 &dce112_opp_reg_offsets[i]);
1376 if (pool->base.opps[i] == NULL) {
1377 BREAK_TO_DEBUGGER();
1378 dm_error(
1379 "DC:failed to create output pixel processor!\n");
1380 goto res_create_fail;
1381 }
1382 }
1383
1384 if (!resource_construct(num_virtual_links, dc, &pool->base,
1385 &res_create_funcs))
1386 goto res_create_fail;
1387
1388 /* Create hardware sequencer */
1389 if (!dce112_hw_sequencer_construct(dc))
1390 goto res_create_fail;
1391
1392 bw_calcs_init(&dc->bw_dceip, &dc->bw_vbios, BW_CALCS_VERSION_POLARIS11);
1393
1394 bw_calcs_data_update_from_pplib(dc);
1395
1396 return true;
1397
1398res_create_fail:
1399 destruct(pool);
1400 return false;
1401}
1402
1403struct resource_pool *dce112_create_resource_pool(
1404 uint8_t num_virtual_links,
1405 struct core_dc *dc)
1406{
1407 struct dce110_resource_pool *pool =
1408 dm_alloc(sizeof(struct dce110_resource_pool));
1409
1410 if (!pool)
1411 return NULL;
1412
1413 if (construct(num_virtual_links, dc, pool))
1414 return &pool->base;
1415
1416 BREAK_TO_DEBUGGER();
1417 return NULL;
1418}
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h
new file mode 100644
index 000000000000..f21eb57857d4
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h
@@ -0,0 +1,55 @@
1/*
2* Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DC_RESOURCE_DCE112_H__
27#define __DC_RESOURCE_DCE112_H__
28
29#include "core_types.h"
30
31struct core_dc;
32struct resource_pool;
33
34struct resource_pool *dce112_create_resource_pool(
35 uint8_t num_virtual_links,
36 struct core_dc *dc);
37
38enum dc_status dce112_validate_with_context(
39 const struct core_dc *dc,
40 const struct dc_validation_set set[],
41 int set_count,
42 struct validate_context *context);
43
44enum dc_status dce112_validate_guaranteed(
45 const struct core_dc *dc,
46 const struct dc_target *dc_target,
47 struct validate_context *context);
48
49enum dc_status dce112_validate_bandwidth(
50 const struct core_dc *dc,
51 struct validate_context *context);
52
53
54#endif /* __DC_RESOURCE_DCE112_H__ */
55
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/Makefile b/drivers/gpu/drm/amd/display/dc/dce80/Makefile
new file mode 100644
index 000000000000..9979b8441a8d
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce80/Makefile
@@ -0,0 +1,16 @@
1#
2# Makefile for the 'controller' sub-component of DAL.
3# It provides the control and status of HW CRTC block.
4
5DCE80 = dce80_ipp.o dce80_ipp_gamma.o dce80_opp.o \
6 dce80_opp_formatter.o dce80_opp_regamma.o \
7 dce80_timing_generator.o dce80_opp_csc.o\
8 dce80_compressor.o dce80_mem_input.o dce80_hw_sequencer.o \
9 dce80_resource.o
10
11AMD_DAL_DCE80 = $(addprefix $(AMDDALPATH)/dc/dce80/,$(DCE80))
12
13AMD_DISPLAY_FILES += $(AMD_DAL_DCE80)
14
15
16
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_compressor.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_compressor.c
new file mode 100644
index 000000000000..eeedb7c4fe53
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_compressor.c
@@ -0,0 +1,839 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28#include "dce/dce_8_0_d.h"
29#include "dce/dce_8_0_sh_mask.h"
30#include "gmc/gmc_7_1_sh_mask.h"
31#include "gmc/gmc_7_1_d.h"
32
33#include "include/logger_interface.h"
34#include "dce80_compressor.h"
35
36#define DCP_REG(reg)\
37 (reg + cp80->offsets.dcp_offset)
38#define DMIF_REG(reg)\
39 (reg + cp80->offsets.dmif_offset)
40
41static const struct dce80_compressor_reg_offsets reg_offsets[] = {
42{
43 .dcp_offset = (mmDCP0_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
44 .dmif_offset = (mmDMIF_PG0_DPG_PIPE_DPM_CONTROL
45 - mmDMIF_PG0_DPG_PIPE_DPM_CONTROL),
46},
47{
48 .dcp_offset = (mmDCP1_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
49 .dmif_offset = (mmDMIF_PG1_DPG_PIPE_DPM_CONTROL
50 - mmDMIF_PG0_DPG_PIPE_DPM_CONTROL),
51},
52{
53 .dcp_offset = (mmDCP2_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
54 .dmif_offset = (mmDMIF_PG2_DPG_PIPE_DPM_CONTROL
55 - mmDMIF_PG0_DPG_PIPE_DPM_CONTROL),
56},
57{
58 .dcp_offset = (mmDCP3_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
59 .dmif_offset = (mmDMIF_PG3_DPG_PIPE_DPM_CONTROL
60 - mmDMIF_PG0_DPG_PIPE_DPM_CONTROL),
61},
62{
63 .dcp_offset = (mmDCP4_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
64 .dmif_offset = (mmDMIF_PG4_DPG_PIPE_DPM_CONTROL
65 - mmDMIF_PG0_DPG_PIPE_DPM_CONTROL),
66},
67{
68 .dcp_offset = (mmDCP5_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
69 .dmif_offset = (mmDMIF_PG5_DPG_PIPE_DPM_CONTROL
70 - mmDMIF_PG0_DPG_PIPE_DPM_CONTROL),
71}
72};
73
74static const uint32_t dce8_one_lpt_channel_max_resolution = 2048 * 1200;
75
76enum fbc_idle_force {
77 /* Bit 0 - Display registers updated */
78 FBC_IDLE_FORCE_DISPLAY_REGISTER_UPDATE = 0x00000001,
79
80 /* Bit 2 - FBC_GRPH_COMP_EN register updated */
81 FBC_IDLE_FORCE_GRPH_COMP_EN = 0x00000002,
82 /* Bit 3 - FBC_SRC_SEL register updated */
83 FBC_IDLE_FORCE_SRC_SEL_CHANGE = 0x00000004,
84 /* Bit 4 - FBC_MIN_COMPRESSION register updated */
85 FBC_IDLE_FORCE_MIN_COMPRESSION_CHANGE = 0x00000008,
86 /* Bit 5 - FBC_ALPHA_COMP_EN register updated */
87 FBC_IDLE_FORCE_ALPHA_COMP_EN = 0x00000010,
88 /* Bit 6 - FBC_ZERO_ALPHA_CHUNK_SKIP_EN register updated */
89 FBC_IDLE_FORCE_ZERO_ALPHA_CHUNK_SKIP_EN = 0x00000020,
90 /* Bit 7 - FBC_FORCE_COPY_TO_COMP_BUF register updated */
91 FBC_IDLE_FORCE_FORCE_COPY_TO_COMP_BUF = 0x00000040,
92
93 /* Bit 24 - Memory write to region 0 defined by MC registers. */
94 FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION0 = 0x01000000,
95 /* Bit 25 - Memory write to region 1 defined by MC registers */
96 FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION1 = 0x02000000,
97 /* Bit 26 - Memory write to region 2 defined by MC registers */
98 FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION2 = 0x04000000,
99 /* Bit 27 - Memory write to region 3 defined by MC registers. */
100 FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION3 = 0x08000000,
101
102 /* Bit 28 - Memory write from any client other than MCIF */
103 FBC_IDLE_FORCE_MEMORY_WRITE_OTHER_THAN_MCIF = 0x10000000,
104 /* Bit 29 - CG statics screen signal is inactive */
105 FBC_IDLE_FORCE_CG_STATIC_SCREEN_IS_INACTIVE = 0x20000000,
106};
107
108static uint32_t lpt_size_alignment(struct dce80_compressor *cp80)
109{
110 /*LPT_ALIGNMENT (in bytes) = ROW_SIZE * #BANKS * # DRAM CHANNELS. */
111 return cp80->base.raw_size * cp80->base.banks_num *
112 cp80->base.dram_channels_num;
113}
114
115static uint32_t lpt_memory_control_config(struct dce80_compressor *cp80,
116 uint32_t lpt_control)
117{
118 /*LPT MC Config */
119 if (cp80->base.options.bits.LPT_MC_CONFIG == 1) {
120 /* POSSIBLE VALUES for LPT NUM_PIPES (DRAM CHANNELS):
121 * 00 - 1 CHANNEL
122 * 01 - 2 CHANNELS
123 * 02 - 4 OR 6 CHANNELS
124 * (Only for discrete GPU, N/A for CZ)
125 * 03 - 8 OR 12 CHANNELS
126 * (Only for discrete GPU, N/A for CZ) */
127 switch (cp80->base.dram_channels_num) {
128 case 2:
129 set_reg_field_value(
130 lpt_control,
131 1,
132 LOW_POWER_TILING_CONTROL,
133 LOW_POWER_TILING_NUM_PIPES);
134 break;
135 case 1:
136 set_reg_field_value(
137 lpt_control,
138 0,
139 LOW_POWER_TILING_CONTROL,
140 LOW_POWER_TILING_NUM_PIPES);
141 break;
142 default:
143 dm_logger_write(
144 cp80->base.ctx->logger, LOG_WARNING,
145 "%s: Invalid LPT NUM_PIPES!!!",
146 __func__);
147 break;
148 }
149
150 /* The mapping for LPT NUM_BANKS is in
151 * GRPH_CONTROL.GRPH_NUM_BANKS register field
152 * Specifies the number of memory banks for tiling
153 * purposes. Only applies to 2D and 3D tiling modes.
154 * POSSIBLE VALUES:
155 * 00 - DCP_GRPH_NUM_BANKS_2BANK: ADDR_SURF_2_BANK
156 * 01 - DCP_GRPH_NUM_BANKS_4BANK: ADDR_SURF_4_BANK
157 * 02 - DCP_GRPH_NUM_BANKS_8BANK: ADDR_SURF_8_BANK
158 * 03 - DCP_GRPH_NUM_BANKS_16BANK: ADDR_SURF_16_BANK */
159 switch (cp80->base.banks_num) {
160 case 16:
161 set_reg_field_value(
162 lpt_control,
163 3,
164 LOW_POWER_TILING_CONTROL,
165 LOW_POWER_TILING_NUM_BANKS);
166 break;
167 case 8:
168 set_reg_field_value(
169 lpt_control,
170 2,
171 LOW_POWER_TILING_CONTROL,
172 LOW_POWER_TILING_NUM_BANKS);
173 break;
174 case 4:
175 set_reg_field_value(
176 lpt_control,
177 1,
178 LOW_POWER_TILING_CONTROL,
179 LOW_POWER_TILING_NUM_BANKS);
180 break;
181 case 2:
182 set_reg_field_value(
183 lpt_control,
184 0,
185 LOW_POWER_TILING_CONTROL,
186 LOW_POWER_TILING_NUM_BANKS);
187 break;
188 default:
189 dm_logger_write(
190 cp80->base.ctx->logger, LOG_WARNING,
191 "%s: Invalid LPT NUM_BANKS!!!",
192 __func__);
193 break;
194 }
195
196 /* The mapping is in DMIF_ADDR_CALC.
197 * ADDR_CONFIG_PIPE_INTERLEAVE_SIZE register field for
198 * Carrizo specifies the memory interleave per pipe.
199 * It effectively specifies the location of pipe bits in
200 * the memory address.
201 * POSSIBLE VALUES:
202 * 00 - ADDR_CONFIG_PIPE_INTERLEAVE_256B: 256 byte
203 * interleave
204 * 01 - ADDR_CONFIG_PIPE_INTERLEAVE_512B: 512 byte
205 * interleave
206 */
207 switch (cp80->base.channel_interleave_size) {
208 case 256: /*256B */
209 set_reg_field_value(
210 lpt_control,
211 0,
212 LOW_POWER_TILING_CONTROL,
213 LOW_POWER_TILING_PIPE_INTERLEAVE_SIZE);
214 break;
215 case 512: /*512B */
216 set_reg_field_value(
217 lpt_control,
218 1,
219 LOW_POWER_TILING_CONTROL,
220 LOW_POWER_TILING_PIPE_INTERLEAVE_SIZE);
221 break;
222 default:
223 dm_logger_write(
224 cp80->base.ctx->logger, LOG_WARNING,
225 "%s: Invalid LPT INTERLEAVE_SIZE!!!",
226 __func__);
227 break;
228 }
229
230 /* The mapping for LOW_POWER_TILING_ROW_SIZE is in
231 * DMIF_ADDR_CALC.ADDR_CONFIG_ROW_SIZE register field
232 * for Carrizo. Specifies the size of dram row in bytes.
233 * This should match up with NOOFCOLS field in
234 * MC_ARB_RAMCFG (ROW_SIZE = 4 * 2 ^^ columns).
235 * This register DMIF_ADDR_CALC is not used by the
236 * hardware as it is only used for addrlib assertions.
237 * POSSIBLE VALUES:
238 * 00 - ADDR_CONFIG_1KB_ROW: Treat 1KB as DRAM row
239 * boundary
240 * 01 - ADDR_CONFIG_2KB_ROW: Treat 2KB as DRAM row
241 * boundary
242 * 02 - ADDR_CONFIG_4KB_ROW: Treat 4KB as DRAM row
243 * boundary */
244 switch (cp80->base.raw_size) {
245 case 4096: /*4 KB */
246 set_reg_field_value(
247 lpt_control,
248 2,
249 LOW_POWER_TILING_CONTROL,
250 LOW_POWER_TILING_ROW_SIZE);
251 break;
252 case 2048:
253 set_reg_field_value(
254 lpt_control,
255 1,
256 LOW_POWER_TILING_CONTROL,
257 LOW_POWER_TILING_ROW_SIZE);
258 break;
259 case 1024:
260 set_reg_field_value(
261 lpt_control,
262 0,
263 LOW_POWER_TILING_CONTROL,
264 LOW_POWER_TILING_ROW_SIZE);
265 break;
266 default:
267 dm_logger_write(
268 cp80->base.ctx->logger, LOG_WARNING,
269 "%s: Invalid LPT ROW_SIZE!!!",
270 __func__);
271 break;
272 }
273 } else {
274 dm_logger_write(
275 cp80->base.ctx->logger, LOG_WARNING,
276 "%s: LPT MC Configuration is not provided",
277 __func__);
278 }
279
280 return lpt_control;
281}
282
283static bool is_source_bigger_than_epanel_size(
284 struct dce80_compressor *cp80,
285 uint32_t source_view_width,
286 uint32_t source_view_height)
287{
288 if (cp80->base.embedded_panel_h_size != 0 &&
289 cp80->base.embedded_panel_v_size != 0 &&
290 ((source_view_width * source_view_height) >
291 (cp80->base.embedded_panel_h_size *
292 cp80->base.embedded_panel_v_size)))
293 return true;
294
295 return false;
296}
297
298static uint32_t align_to_chunks_number_per_line(
299 struct dce80_compressor *cp80,
300 uint32_t pixels)
301{
302 return 256 * ((pixels + 255) / 256);
303}
304
305static void wait_for_fbc_state_changed(
306 struct dce80_compressor *cp80,
307 bool enabled)
308{
309 uint8_t counter = 0;
310 uint32_t addr = mmFBC_STATUS;
311 uint32_t value;
312
313 while (counter < 10) {
314 value = dm_read_reg(cp80->base.ctx, addr);
315 if (get_reg_field_value(
316 value,
317 FBC_STATUS,
318 FBC_ENABLE_STATUS) == enabled)
319 break;
320 udelay(10);
321 counter++;
322 }
323
324 if (counter == 10) {
325 dm_logger_write(
326 cp80->base.ctx->logger, LOG_WARNING,
327 "%s: wait counter exceeded, changes to HW not applied",
328 __func__);
329 }
330}
331
332void dce80_compressor_power_up_fbc(struct compressor *compressor)
333{
334 uint32_t value;
335 uint32_t addr;
336
337 addr = mmFBC_CNTL;
338 value = dm_read_reg(compressor->ctx, addr);
339 set_reg_field_value(value, 0, FBC_CNTL, FBC_GRPH_COMP_EN);
340 set_reg_field_value(value, 1, FBC_CNTL, FBC_EN);
341 set_reg_field_value(value, 2, FBC_CNTL, FBC_COHERENCY_MODE);
342 dm_write_reg(compressor->ctx, addr, value);
343
344 addr = mmFBC_COMP_MODE;
345 value = dm_read_reg(compressor->ctx, addr);
346 set_reg_field_value(value, 1, FBC_COMP_MODE, FBC_RLE_EN);
347 set_reg_field_value(value, 1, FBC_COMP_MODE, FBC_DPCM4_RGB_EN);
348 set_reg_field_value(value, 1, FBC_COMP_MODE, FBC_IND_EN);
349 dm_write_reg(compressor->ctx, addr, value);
350
351 addr = mmFBC_COMP_CNTL;
352 value = dm_read_reg(compressor->ctx, addr);
353 set_reg_field_value(value, 1, FBC_COMP_CNTL, FBC_DEPTH_RGB08_EN);
354 dm_write_reg(compressor->ctx, addr, value);
355 /*FBC_MIN_COMPRESSION 0 ==> 2:1 */
356 /* 1 ==> 4:1 */
357 /* 2 ==> 8:1 */
358 /* 0xF ==> 1:1 */
359 set_reg_field_value(value, 0xF, FBC_COMP_CNTL, FBC_MIN_COMPRESSION);
360 dm_write_reg(compressor->ctx, addr, value);
361 compressor->min_compress_ratio = FBC_COMPRESS_RATIO_1TO1;
362
363 value = 0;
364 dm_write_reg(compressor->ctx, mmFBC_IND_LUT0, value);
365
366 value = 0xFFFFFF;
367 dm_write_reg(compressor->ctx, mmFBC_IND_LUT1, value);
368}
369
370void dce80_compressor_enable_fbc(
371 struct compressor *compressor,
372 uint32_t paths_num,
373 struct compr_addr_and_pitch_params *params)
374{
375 struct dce80_compressor *cp80 = TO_DCE80_COMPRESSOR(compressor);
376
377 if (compressor->options.bits.FBC_SUPPORT &&
378 (compressor->options.bits.DUMMY_BACKEND == 0) &&
379 (!dce80_compressor_is_fbc_enabled_in_hw(compressor, NULL)) &&
380 (!is_source_bigger_than_epanel_size(
381 cp80,
382 params->source_view_width,
383 params->source_view_height))) {
384
385 uint32_t addr;
386 uint32_t value;
387
388 /* Before enabling FBC first need to enable LPT if applicable
389 * LPT state should always be changed (enable/disable) while FBC
390 * is disabled */
391 if (compressor->options.bits.LPT_SUPPORT && (paths_num < 2) &&
392 (params->source_view_width *
393 params->source_view_height <=
394 dce8_one_lpt_channel_max_resolution)) {
395 dce80_compressor_enable_lpt(compressor);
396 }
397
398 addr = mmFBC_CNTL;
399 value = dm_read_reg(compressor->ctx, addr);
400 set_reg_field_value(value, 1, FBC_CNTL, FBC_GRPH_COMP_EN);
401 set_reg_field_value(
402 value,
403 params->inst,
404 FBC_CNTL, FBC_SRC_SEL);
405 dm_write_reg(compressor->ctx, addr, value);
406
407 /* Keep track of enum controller_id FBC is attached to */
408 compressor->is_enabled = true;
409 compressor->attached_inst = params->inst;
410 cp80->offsets = reg_offsets[params->inst - 1];
411
412 /*Toggle it as there is bug in HW */
413 set_reg_field_value(value, 0, FBC_CNTL, FBC_GRPH_COMP_EN);
414 dm_write_reg(compressor->ctx, addr, value);
415 set_reg_field_value(value, 1, FBC_CNTL, FBC_GRPH_COMP_EN);
416 dm_write_reg(compressor->ctx, addr, value);
417
418 wait_for_fbc_state_changed(cp80, true);
419 }
420}
421
422void dce80_compressor_disable_fbc(struct compressor *compressor)
423{
424 struct dce80_compressor *cp80 = TO_DCE80_COMPRESSOR(compressor);
425
426 if (compressor->options.bits.FBC_SUPPORT &&
427 dce80_compressor_is_fbc_enabled_in_hw(compressor, NULL)) {
428 uint32_t reg_data;
429 /* Turn off compression */
430 reg_data = dm_read_reg(compressor->ctx, mmFBC_CNTL);
431 set_reg_field_value(reg_data, 0, FBC_CNTL, FBC_GRPH_COMP_EN);
432 dm_write_reg(compressor->ctx, mmFBC_CNTL, reg_data);
433
434 /* Reset enum controller_id to undefined */
435 compressor->attached_inst = 0;
436 compressor->is_enabled = false;
437
438 /* Whenever disabling FBC make sure LPT is disabled if LPT
439 * supported */
440 if (compressor->options.bits.LPT_SUPPORT)
441 dce80_compressor_disable_lpt(compressor);
442
443 wait_for_fbc_state_changed(cp80, false);
444 }
445}
446
447bool dce80_compressor_is_fbc_enabled_in_hw(
448 struct compressor *compressor,
449 uint32_t *inst)
450{
451 /* Check the hardware register */
452 uint32_t value;
453
454 value = dm_read_reg(compressor->ctx, mmFBC_STATUS);
455 if (get_reg_field_value(value, FBC_STATUS, FBC_ENABLE_STATUS)) {
456 if (inst != NULL)
457 *inst = compressor->attached_inst;
458 return true;
459 }
460
461 value = dm_read_reg(compressor->ctx, mmFBC_CNTL);
462 if (get_reg_field_value(value, FBC_CNTL, FBC_GRPH_COMP_EN)) {
463 if (inst != NULL)
464 *inst = compressor->attached_inst;
465 return true;
466 }
467
468 return false;
469}
470
471bool dce80_compressor_is_lpt_enabled_in_hw(struct compressor *compressor)
472{
473 /* Check the hardware register */
474 uint32_t value = dm_read_reg(compressor->ctx,
475 mmLOW_POWER_TILING_CONTROL);
476
477 return get_reg_field_value(
478 value,
479 LOW_POWER_TILING_CONTROL,
480 LOW_POWER_TILING_ENABLE);
481}
482
483void dce80_compressor_program_compressed_surface_address_and_pitch(
484 struct compressor *compressor,
485 struct compr_addr_and_pitch_params *params)
486{
487 struct dce80_compressor *cp80 = TO_DCE80_COMPRESSOR(compressor);
488 uint32_t value = 0;
489 uint32_t fbc_pitch = 0;
490 uint32_t compressed_surf_address_low_part =
491 compressor->compr_surface_address.addr.low_part;
492
493 /* Clear content first. */
494 dm_write_reg(
495 compressor->ctx,
496 DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS_HIGH),
497 0);
498 dm_write_reg(compressor->ctx,
499 DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS), 0);
500
501 if (compressor->options.bits.LPT_SUPPORT) {
502 uint32_t lpt_alignment = lpt_size_alignment(cp80);
503
504 if (lpt_alignment != 0) {
505 compressed_surf_address_low_part =
506 ((compressed_surf_address_low_part
507 + (lpt_alignment - 1)) / lpt_alignment)
508 * lpt_alignment;
509 }
510 }
511
512 /* Write address, HIGH has to be first. */
513 dm_write_reg(compressor->ctx,
514 DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS_HIGH),
515 compressor->compr_surface_address.addr.high_part);
516 dm_write_reg(compressor->ctx,
517 DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS),
518 compressed_surf_address_low_part);
519
520 fbc_pitch = align_to_chunks_number_per_line(
521 cp80,
522 params->source_view_width);
523
524 if (compressor->min_compress_ratio == FBC_COMPRESS_RATIO_1TO1)
525 fbc_pitch = fbc_pitch / 8;
526 else
527 dm_logger_write(
528 compressor->ctx->logger, LOG_WARNING,
529 "%s: Unexpected DCE8 compression ratio",
530 __func__);
531
532 /* Clear content first. */
533 dm_write_reg(compressor->ctx, DCP_REG(mmGRPH_COMPRESS_PITCH), 0);
534
535 /* Write FBC Pitch. */
536 set_reg_field_value(
537 value,
538 fbc_pitch,
539 GRPH_COMPRESS_PITCH,
540 GRPH_COMPRESS_PITCH);
541 dm_write_reg(compressor->ctx, DCP_REG(mmGRPH_COMPRESS_PITCH), value);
542
543}
544
545void dce80_compressor_disable_lpt(struct compressor *compressor)
546{
547 struct dce80_compressor *cp80 = TO_DCE80_COMPRESSOR(compressor);
548 uint32_t value;
549 uint32_t addr;
550 uint32_t inx;
551
552 /* Disable all pipes LPT Stutter */
553 for (inx = 0; inx < 3; inx++) {
554 value =
555 dm_read_reg(
556 compressor->ctx,
557 DMIF_REG(mmDPG_PIPE_STUTTER_CONTROL_NONLPTCH));
558 set_reg_field_value(
559 value,
560 0,
561 DPG_PIPE_STUTTER_CONTROL_NONLPTCH,
562 STUTTER_ENABLE_NONLPTCH);
563 dm_write_reg(
564 compressor->ctx,
565 DMIF_REG(mmDPG_PIPE_STUTTER_CONTROL_NONLPTCH),
566 value);
567 }
568
569 /* Disable LPT */
570 addr = mmLOW_POWER_TILING_CONTROL;
571 value = dm_read_reg(compressor->ctx, addr);
572 set_reg_field_value(
573 value,
574 0,
575 LOW_POWER_TILING_CONTROL,
576 LOW_POWER_TILING_ENABLE);
577 dm_write_reg(compressor->ctx, addr, value);
578
579 /* Clear selection of Channel(s) containing Compressed Surface */
580 addr = mmGMCON_LPT_TARGET;
581 value = dm_read_reg(compressor->ctx, addr);
582 set_reg_field_value(
583 value,
584 0xFFFFFFFF,
585 GMCON_LPT_TARGET,
586 STCTRL_LPT_TARGET);
587 dm_write_reg(compressor->ctx, mmGMCON_LPT_TARGET, value);
588}
589
590void dce80_compressor_enable_lpt(struct compressor *compressor)
591{
592 struct dce80_compressor *cp80 = TO_DCE80_COMPRESSOR(compressor);
593 uint32_t value;
594 uint32_t addr;
595 uint32_t value_control;
596 uint32_t channels;
597
598 /* Enable LPT Stutter from Display pipe */
599 value = dm_read_reg(compressor->ctx,
600 DMIF_REG(mmDPG_PIPE_STUTTER_CONTROL_NONLPTCH));
601 set_reg_field_value(
602 value,
603 1,
604 DPG_PIPE_STUTTER_CONTROL_NONLPTCH,
605 STUTTER_ENABLE_NONLPTCH);
606 dm_write_reg(compressor->ctx,
607 DMIF_REG(mmDPG_PIPE_STUTTER_CONTROL_NONLPTCH), value);
608
609 /* Selection of Channel(s) containing Compressed Surface: 0xfffffff
610 * will disable LPT.
611 * STCTRL_LPT_TARGETn corresponds to channel n. */
612 addr = mmLOW_POWER_TILING_CONTROL;
613 value_control = dm_read_reg(compressor->ctx, addr);
614 channels = get_reg_field_value(value_control,
615 LOW_POWER_TILING_CONTROL,
616 LOW_POWER_TILING_MODE);
617
618 addr = mmGMCON_LPT_TARGET;
619 value = dm_read_reg(compressor->ctx, addr);
620 set_reg_field_value(
621 value,
622 channels + 1, /* not mentioned in programming guide,
623 but follow DCE8.1 */
624 GMCON_LPT_TARGET,
625 STCTRL_LPT_TARGET);
626 dm_write_reg(compressor->ctx, addr, value);
627
628 /* Enable LPT */
629 addr = mmLOW_POWER_TILING_CONTROL;
630 value = dm_read_reg(compressor->ctx, addr);
631 set_reg_field_value(
632 value,
633 1,
634 LOW_POWER_TILING_CONTROL,
635 LOW_POWER_TILING_ENABLE);
636 dm_write_reg(compressor->ctx, addr, value);
637}
638
639void dce80_compressor_program_lpt_control(
640 struct compressor *compressor,
641 struct compr_addr_and_pitch_params *params)
642{
643 struct dce80_compressor *cp80 = TO_DCE80_COMPRESSOR(compressor);
644 uint32_t rows_per_channel;
645 uint32_t lpt_alignment;
646 uint32_t source_view_width;
647 uint32_t source_view_height;
648 uint32_t lpt_control = 0;
649
650 if (!compressor->options.bits.LPT_SUPPORT)
651 return;
652
653 lpt_control = dm_read_reg(compressor->ctx,
654 mmLOW_POWER_TILING_CONTROL);
655
656 /* POSSIBLE VALUES for Low Power Tiling Mode:
657 * 00 - Use channel 0
658 * 01 - Use Channel 0 and 1
659 * 02 - Use Channel 0,1,2,3
660 * 03 - reserved */
661 switch (compressor->lpt_channels_num) {
662 /* case 2:
663 * Use Channel 0 & 1 / Not used for DCE 11 */
664 case 1:
665 /*Use Channel 0 for LPT for DCE 11 */
666 set_reg_field_value(
667 lpt_control,
668 0,
669 LOW_POWER_TILING_CONTROL,
670 LOW_POWER_TILING_MODE);
671 break;
672 default:
673 dm_logger_write(
674 compressor->ctx->logger, LOG_WARNING,
675 "%s: Invalid selected DRAM channels for LPT!!!",
676 __func__);
677 break;
678 }
679
680 lpt_control = lpt_memory_control_config(cp80, lpt_control);
681
682 /* Program LOW_POWER_TILING_ROWS_PER_CHAN field which depends on
683 * FBC compressed surface pitch.
684 * LOW_POWER_TILING_ROWS_PER_CHAN = Roundup ((Surface Height *
685 * Surface Pitch) / (Row Size * Number of Channels *
686 * Number of Banks)). */
687 rows_per_channel = 0;
688 lpt_alignment = lpt_size_alignment(cp80);
689 source_view_width =
690 align_to_chunks_number_per_line(
691 cp80,
692 params->source_view_width);
693 source_view_height = (params->source_view_height + 1) & (~0x1);
694
695 if (lpt_alignment != 0) {
696 rows_per_channel = source_view_width * source_view_height * 4;
697 rows_per_channel =
698 (rows_per_channel % lpt_alignment) ?
699 (rows_per_channel / lpt_alignment + 1) :
700 rows_per_channel / lpt_alignment;
701 }
702
703 set_reg_field_value(
704 lpt_control,
705 rows_per_channel,
706 LOW_POWER_TILING_CONTROL,
707 LOW_POWER_TILING_ROWS_PER_CHAN);
708
709 dm_write_reg(compressor->ctx,
710 mmLOW_POWER_TILING_CONTROL, lpt_control);
711}
712
713/*
714 * DCE 11 Frame Buffer Compression Implementation
715 */
716
717void dce80_compressor_set_fbc_invalidation_triggers(
718 struct compressor *compressor,
719 uint32_t fbc_trigger)
720{
721 /* Disable region hit event, FBC_MEMORY_REGION_MASK = 0 (bits 16-19)
722 * for DCE 11 regions cannot be used - does not work with S/G
723 */
724 uint32_t addr = mmFBC_CLIENT_REGION_MASK;
725 uint32_t value = dm_read_reg(compressor->ctx, addr);
726
727 set_reg_field_value(
728 value,
729 0,
730 FBC_CLIENT_REGION_MASK,
731 FBC_MEMORY_REGION_MASK);
732 dm_write_reg(compressor->ctx, addr, value);
733
734 /* Setup events when to clear all CSM entries (effectively marking
735 * current compressed data invalid)
736 * For DCE 11 CSM metadata 11111 means - "Not Compressed"
737 * Used as the initial value of the metadata sent to the compressor
738 * after invalidation, to indicate that the compressor should attempt
739 * to compress all chunks on the current pass. Also used when the chunk
740 * is not successfully written to memory.
741 * When this CSM value is detected, FBC reads from the uncompressed
742 * buffer. Set events according to passed in value, these events are
743 * valid for DCE8:
744 * - bit 0 - display register updated
745 * - bit 28 - memory write from any client except from MCIF
746 * - bit 29 - CG static screen signal is inactive
747 * In addition, DCE8.1 also needs to set new DCE8.1 specific events
748 * that are used to trigger invalidation on certain register changes,
749 * for example enabling of Alpha Compression may trigger invalidation of
750 * FBC once bit is set. These events are as follows:
751 * - Bit 2 - FBC_GRPH_COMP_EN register updated
752 * - Bit 3 - FBC_SRC_SEL register updated
753 * - Bit 4 - FBC_MIN_COMPRESSION register updated
754 * - Bit 5 - FBC_ALPHA_COMP_EN register updated
755 * - Bit 6 - FBC_ZERO_ALPHA_CHUNK_SKIP_EN register updated
756 * - Bit 7 - FBC_FORCE_COPY_TO_COMP_BUF register updated
757 */
758 addr = mmFBC_IDLE_FORCE_CLEAR_MASK;
759 value = dm_read_reg(compressor->ctx, addr);
760 set_reg_field_value(
761 value,
762 fbc_trigger |
763 FBC_IDLE_FORCE_GRPH_COMP_EN |
764 FBC_IDLE_FORCE_SRC_SEL_CHANGE |
765 FBC_IDLE_FORCE_MIN_COMPRESSION_CHANGE |
766 FBC_IDLE_FORCE_ALPHA_COMP_EN |
767 FBC_IDLE_FORCE_ZERO_ALPHA_CHUNK_SKIP_EN |
768 FBC_IDLE_FORCE_FORCE_COPY_TO_COMP_BUF,
769 FBC_IDLE_FORCE_CLEAR_MASK,
770 FBC_IDLE_FORCE_CLEAR_MASK);
771 dm_write_reg(compressor->ctx, addr, value);
772}
773
774bool dce80_compressor_construct(struct dce80_compressor *compressor,
775 struct dc_context *ctx)
776{
777 struct dc_bios *bp = ctx->dc_bios;
778 struct embedded_panel_info panel_info;
779
780 compressor->base.options.bits.FBC_SUPPORT = true;
781 compressor->base.options.bits.LPT_SUPPORT = true;
782 /* For DCE 11 always use one DRAM channel for LPT */
783 compressor->base.lpt_channels_num = 1;
784 compressor->base.options.bits.DUMMY_BACKEND = false;
785
786 /* Check if this system has more than 1 DRAM channel; if only 1 then LPT
787 * should not be supported */
788 if (compressor->base.memory_bus_width == 64)
789 compressor->base.options.bits.LPT_SUPPORT = false;
790
791 compressor->base.options.bits.CLK_GATING_DISABLED = false;
792
793 compressor->base.ctx = ctx;
794 compressor->base.embedded_panel_h_size = 0;
795 compressor->base.embedded_panel_v_size = 0;
796 compressor->base.memory_bus_width = ctx->asic_id.vram_width;
797 compressor->base.allocated_size = 0;
798 compressor->base.preferred_requested_size = 0;
799 compressor->base.min_compress_ratio = FBC_COMPRESS_RATIO_INVALID;
800 compressor->base.options.raw = 0;
801 compressor->base.banks_num = 0;
802 compressor->base.raw_size = 0;
803 compressor->base.channel_interleave_size = 0;
804 compressor->base.dram_channels_num = 0;
805 compressor->base.lpt_channels_num = 0;
806 compressor->base.attached_inst = 0;
807 compressor->base.is_enabled = false;
808
809 if (BP_RESULT_OK ==
810 bp->funcs->get_embedded_panel_info(bp, &panel_info)) {
811 compressor->base.embedded_panel_h_size =
812 panel_info.lcd_timing.horizontal_addressable;
813 compressor->base.embedded_panel_v_size =
814 panel_info.lcd_timing.vertical_addressable;
815 }
816 return true;
817}
818
819struct compressor *dce80_compressor_create(struct dc_context *ctx)
820{
821 struct dce80_compressor *cp80 =
822 dm_alloc(sizeof(struct dce80_compressor));
823
824 if (!cp80)
825 return NULL;
826
827 if (dce80_compressor_construct(cp80, ctx))
828 return &cp80->base;
829
830 BREAK_TO_DEBUGGER();
831 dm_free(cp80);
832 return NULL;
833}
834
835void dce80_compressor_destroy(struct compressor **compressor)
836{
837 dm_free(TO_DCE80_COMPRESSOR(*compressor));
838 *compressor = NULL;
839}
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_compressor.h b/drivers/gpu/drm/amd/display/dc/dce80/dce80_compressor.h
new file mode 100644
index 000000000000..01290969ff92
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_compressor.h
@@ -0,0 +1,78 @@
1/* Copyright 2012-15 Advanced Micro Devices, Inc.
2 *
3 * Permission is hereby granted, free of charge, to any person obtaining a
4 * copy of this software and associated documentation files (the "Software"),
5 * to deal in the Software without restriction, including without limitation
6 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7 * and/or sell copies of the Software, and to permit persons to whom the
8 * Software is furnished to do so, subject to the following conditions:
9 *
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
17 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
18 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
19 * OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * Authors: AMD
22 *
23 */
24
25#ifndef __DC_COMPRESSOR_DCE80_H__
26#define __DC_COMPRESSOR_DCE80_H__
27
28#include "../inc/compressor.h"
29
30#define TO_DCE80_COMPRESSOR(compressor)\
31 container_of(compressor, struct dce80_compressor, base)
32
33struct dce80_compressor_reg_offsets {
34 uint32_t dcp_offset;
35 uint32_t dmif_offset;
36};
37
38struct dce80_compressor {
39 struct compressor base;
40 struct dce80_compressor_reg_offsets offsets;
41};
42
43struct compressor *dce80_compressor_create(struct dc_context *ctx);
44
45bool dce80_compressor_construct(struct dce80_compressor *cp80,
46 struct dc_context *ctx);
47
48void dce80_compressor_destroy(struct compressor **cp);
49
50/* FBC RELATED */
51void dce80_compressor_power_up_fbc(struct compressor *cp);
52
53void dce80_compressor_enable_fbc(struct compressor *cp, uint32_t paths_num,
54 struct compr_addr_and_pitch_params *params);
55
56void dce80_compressor_disable_fbc(struct compressor *cp);
57
58void dce80_compressor_set_fbc_invalidation_triggers(struct compressor *cp,
59 uint32_t fbc_trigger);
60
61void dce80_compressor_program_compressed_surface_address_and_pitch(
62 struct compressor *cp,
63 struct compr_addr_and_pitch_params *params);
64
65bool dce80_compressor_is_fbc_enabled_in_hw(struct compressor *cp,
66 uint32_t *fbc_mapped_crtc_id);
67
68/* LPT RELATED */
69void dce80_compressor_enable_lpt(struct compressor *cp);
70
71void dce80_compressor_disable_lpt(struct compressor *cp);
72
73void dce80_compressor_program_lpt_control(struct compressor *cp,
74 struct compr_addr_and_pitch_params *params);
75
76bool dce80_compressor_is_lpt_enabled_in_hw(struct compressor *cp);
77
78#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
new file mode 100644
index 000000000000..c7a2b768bcd1
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
@@ -0,0 +1,141 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27#include "dc.h"
28#include "core_dc.h"
29#include "core_types.h"
30#include "dce80_hw_sequencer.h"
31
32#include "dce/dce_hwseq.h"
33#include "dce110/dce110_hw_sequencer.h"
34
35/* include DCE8 register header files */
36#include "dce/dce_8_0_d.h"
37#include "dce/dce_8_0_sh_mask.h"
38
39struct dce80_hw_seq_reg_offsets {
40 uint32_t crtc;
41};
42
43static const struct dce80_hw_seq_reg_offsets reg_offsets[] = {
44{
45 .crtc = (mmCRTC0_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
46},
47{
48 .crtc = (mmCRTC1_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
49},
50{
51 .crtc = (mmCRTC2_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
52},
53{
54 .crtc = (mmCRTC3_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
55},
56{
57 .crtc = (mmCRTC4_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
58},
59{
60 .crtc = (mmCRTC5_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
61}
62};
63
64#define HW_REG_CRTC(reg, id)\
65 (reg + reg_offsets[id].crtc)
66
67/*******************************************************************************
68 * Private definitions
69 ******************************************************************************/
70
71/***************************PIPE_CONTROL***********************************/
72
73static bool dce80_enable_display_power_gating(
74 struct core_dc *dc,
75 uint8_t controller_id,
76 struct dc_bios *dcb,
77 enum pipe_gating_control power_gating)
78{
79 enum bp_result bp_result = BP_RESULT_OK;
80 enum bp_pipe_control_action cntl;
81 struct dc_context *ctx = dc->ctx;
82
83 if (power_gating == PIPE_GATING_CONTROL_INIT)
84 cntl = ASIC_PIPE_INIT;
85 else if (power_gating == PIPE_GATING_CONTROL_ENABLE)
86 cntl = ASIC_PIPE_ENABLE;
87 else
88 cntl = ASIC_PIPE_DISABLE;
89
90 if (!(power_gating == PIPE_GATING_CONTROL_INIT && controller_id != 0)){
91
92 bp_result = dcb->funcs->enable_disp_power_gating(
93 dcb, controller_id + 1, cntl);
94
95 /* Revert MASTER_UPDATE_MODE to 0 because bios sets it 2
96 * by default when command table is called
97 */
98 dm_write_reg(ctx,
99 HW_REG_CRTC(mmMASTER_UPDATE_MODE, controller_id),
100 0);
101 }
102
103 if (bp_result == BP_RESULT_OK)
104 return true;
105 else
106 return false;
107}
108
109
110static void set_display_mark_for_pipe_if_needed(struct core_dc *dc,
111 struct pipe_ctx *pipe_ctx,
112 struct validate_context *context)
113{
114 /* Do nothing until we have proper bandwitdth calcs */
115}
116
117static void set_displaymarks(
118 const struct core_dc *dc, struct validate_context *context)
119{
120 /* Do nothing until we have proper bandwitdth calcs */
121}
122
123static void set_bandwidth(struct core_dc *dc)
124{
125 /* Do nothing until we have proper bandwitdth calcs */
126}
127
128
129bool dce80_hw_sequencer_construct(struct core_dc *dc)
130{
131 dce110_hw_sequencer_construct(dc);
132
133 dc->hwss.enable_display_power_gating = dce80_enable_display_power_gating;
134 dc->hwss.pipe_control_lock = dce_pipe_control_lock;
135 dc->hwss.set_displaymarks = set_displaymarks;
136 dc->hwss.increase_watermarks_for_pipe = set_display_mark_for_pipe_if_needed;
137 dc->hwss.set_bandwidth = set_bandwidth;
138
139 return true;
140}
141
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.h
new file mode 100644
index 000000000000..7cc203f433d3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.h
@@ -0,0 +1,36 @@
1/*
2* Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DC_HWSS_DCE80_H__
27#define __DC_HWSS_DCE80_H__
28
29#include "core_types.h"
30
31struct core_dc;
32
33bool dce80_hw_sequencer_construct(struct core_dc *dc);
34
35#endif /* __DC_HWSS_DCE80_H__ */
36
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_ipp.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_ipp.c
new file mode 100644
index 000000000000..86826c229d39
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_ipp.c
@@ -0,0 +1,64 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27#include "include/logger_interface.h"
28
29#include "dce/dce_8_0_d.h"
30#include "dce/dce_8_0_sh_mask.h"
31
32#include "dce80_ipp.h"
33
34#include "dce110/dce110_ipp.h"
35
36static const struct ipp_funcs funcs = {
37 .ipp_cursor_set_attributes = dce110_ipp_cursor_set_attributes,
38 .ipp_cursor_set_position = dce110_ipp_cursor_set_position,
39 .ipp_program_prescale = dce110_ipp_program_prescale,
40 .ipp_set_degamma = dce110_ipp_set_degamma,
41};
42
43bool dce80_ipp_construct(
44 struct dce110_ipp *ipp,
45 struct dc_context *ctx,
46 uint32_t inst,
47 const struct dce110_ipp_reg_offsets *offset)
48{
49 ipp->base.ctx = ctx;
50
51 ipp->base.inst = inst;
52
53 ipp->offsets = *offset;
54
55 ipp->base.funcs = &funcs;
56
57 return true;
58}
59
60void dce80_ipp_destroy(struct input_pixel_processor **ipp)
61{
62 dm_free(TO_DCE80_IPP(*ipp));
63 *ipp = NULL;
64}
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_ipp.h b/drivers/gpu/drm/amd/display/dc/dce80/dce80_ipp.h
new file mode 100644
index 000000000000..d350138e5feb
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_ipp.h
@@ -0,0 +1,47 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DC_IPP_DCE80_H__
27#define __DC_IPP_DCE80_H__
28
29#include "ipp.h"
30
31#define TO_DCE80_IPP(input_pixel_processor)\
32 container_of(input_pixel_processor, struct dce110_ipp, base)
33
34struct dce110_ipp;
35struct dce110_ipp_reg_offsets;
36struct gamma_parameters;
37struct dev_c_lut;
38
39bool dce80_ipp_construct(
40 struct dce110_ipp *ipp,
41 struct dc_context *ctx,
42 uint32_t inst,
43 const struct dce110_ipp_reg_offsets *offset);
44
45void dce80_ipp_destroy(struct input_pixel_processor **ipp);
46
47#endif /*__DC_IPP_DCE80_H__*/
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_ipp_gamma.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_ipp_gamma.c
new file mode 100644
index 000000000000..eacb14e40d52
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_ipp_gamma.c
@@ -0,0 +1,76 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28#include "include/logger_interface.h"
29#include "include/fixed31_32.h"
30#include "basics/conversion.h"
31
32#include "dce/dce_8_0_d.h"
33#include "dce/dce_8_0_sh_mask.h"
34
35#include "dce80_ipp.h"
36#include "dce110/dce110_ipp.h"
37#include "gamma_types.h"
38
39#define DCP_REG(reg)\
40 (reg + ipp80->offsets.dcp_offset)
41
42enum {
43 MAX_INPUT_LUT_ENTRY = 256
44};
45
46/*PROTOTYPE DECLARATIONS*/
47
48static void set_legacy_input_gamma_mode(
49 struct dce110_ipp *ipp80,
50 bool is_legacy);
51
52void dce80_ipp_set_legacy_input_gamma_mode(
53 struct input_pixel_processor *ipp,
54 bool is_legacy)
55{
56 struct dce110_ipp *ipp80 = TO_DCE80_IPP(ipp);
57
58 set_legacy_input_gamma_mode(ipp80, is_legacy);
59}
60
61static void set_legacy_input_gamma_mode(
62 struct dce110_ipp *ipp80,
63 bool is_legacy)
64{
65 const uint32_t addr = DCP_REG(mmINPUT_GAMMA_CONTROL);
66 uint32_t value = dm_read_reg(ipp80->base.ctx, addr);
67
68 set_reg_field_value(
69 value,
70 !is_legacy,
71 INPUT_GAMMA_CONTROL,
72 GRPH_INPUT_GAMMA_MODE);
73
74 dm_write_reg(ipp80->base.ctx, addr, value);
75}
76
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_mem_input.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_mem_input.c
new file mode 100644
index 000000000000..ebb8df3cdf4a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_mem_input.c
@@ -0,0 +1,83 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25#include "dm_services.h"
26
27#include "dce/dce_8_0_d.h"
28#include "dce/dce_8_0_sh_mask.h"
29/* TODO: this needs to be looked at, used by Stella's workaround*/
30#include "gmc/gmc_7_1_d.h"
31#include "gmc/gmc_7_1_sh_mask.h"
32
33#include "include/logger_interface.h"
34#include "inc/bandwidth_calcs.h"
35
36#include "../dce110/dce110_mem_input.h"
37#include "dce80_mem_input.h"
38
39#define MAX_WATERMARK 0xFFFF
40#define SAFE_NBP_MARK 0x7FFF
41
42#define DCP_REG(reg) (reg + mem_input80->offsets.dcp)
43#define DMIF_REG(reg) (reg + mem_input80->offsets.dmif)
44#define PIPE_REG(reg) (reg + mem_input80->offsets.pipe)
45
46static struct mem_input_funcs dce80_mem_input_funcs = {
47 .mem_input_program_display_marks =
48 dce110_mem_input_program_display_marks,
49 .allocate_mem_input = dce_mem_input_allocate_dmif,
50 .free_mem_input = dce_mem_input_free_dmif,
51 .mem_input_program_surface_flip_and_addr =
52 dce110_mem_input_program_surface_flip_and_addr,
53 .mem_input_program_surface_config =
54 dce_mem_input_program_surface_config,
55 .mem_input_is_flip_pending =
56 dce110_mem_input_is_flip_pending,
57 .mem_input_update_dchub = NULL
58};
59
60/*****************************************/
61/* Constructor, Destructor */
62/*****************************************/
63
64bool dce80_mem_input_construct(
65 struct dce110_mem_input *mem_input80,
66 struct dc_context *ctx,
67 uint32_t inst,
68 const struct dce110_mem_input_reg_offsets *offsets)
69{
70 /* supported stutter method
71 * STUTTER_MODE_ENHANCED
72 * STUTTER_MODE_QUAD_DMIF_BUFFER
73 */
74 mem_input80->base.funcs = &dce80_mem_input_funcs;
75 mem_input80->base.ctx = ctx;
76
77 mem_input80->base.inst = inst;
78
79 mem_input80->offsets = *offsets;
80
81 return true;
82}
83
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_mem_input.h b/drivers/gpu/drm/amd/display/dc/dce80/dce80_mem_input.h
new file mode 100644
index 000000000000..357b9e2e9f1e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_mem_input.h
@@ -0,0 +1,36 @@
1/* Copyright 2012-15 Advanced Micro Devices, Inc.
2 *
3 * Permission is hereby granted, free of charge, to any person obtaining a
4 * copy of this software and associated documentation files (the "Software"),
5 * to deal in the Software without restriction, including without limitation
6 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7 * and/or sell copies of the Software, and to permit persons to whom the
8 * Software is furnished to do so, subject to the following conditions:
9 *
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
17 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
18 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
19 * OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * Authors: AMD
22 *
23 */
24
25#ifndef __DC_MEM_INPUT_DCE80_H__
26#define __DC_MEM_INPUT_DCE80_H__
27
28#include "mem_input.h"
29
30bool dce80_mem_input_construct(
31 struct dce110_mem_input *mem_input80,
32 struct dc_context *ctx,
33 uint32_t inst,
34 const struct dce110_mem_input_reg_offsets *offsets);
35
36#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_opp.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_opp.c
new file mode 100644
index 000000000000..b69e8a5d844d
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_opp.c
@@ -0,0 +1,136 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28/* include DCE8 register header files */
29#include "dce/dce_8_0_d.h"
30#include "dce/dce_8_0_sh_mask.h"
31
32#include "dce80_opp.h"
33
34#define FROM_OPP(opp)\
35 container_of(opp, struct dce80_opp, base)
36
37enum {
38 MAX_LUT_ENTRY = 256,
39 MAX_NUMBER_OF_ENTRIES = 256
40};
41
42static const struct dce80_opp_reg_offsets reg_offsets[] = {
43{
44 .fmt_offset = (mmFMT0_FMT_CONTROL - mmFMT0_FMT_CONTROL),
45 .crtc_offset = (mmCRTC0_DCFE_MEM_LIGHT_SLEEP_CNTL -
46 mmCRTC0_DCFE_MEM_LIGHT_SLEEP_CNTL),
47 .dcp_offset = (mmDCP0_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
48},
49{ .fmt_offset = (mmFMT1_FMT_CONTROL - mmFMT0_FMT_CONTROL),
50 .crtc_offset = (mmCRTC1_DCFE_MEM_LIGHT_SLEEP_CNTL -
51 mmCRTC0_DCFE_MEM_LIGHT_SLEEP_CNTL),
52 .dcp_offset = (mmDCP1_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
53},
54{ .fmt_offset = (mmFMT2_FMT_CONTROL - mmFMT0_FMT_CONTROL),
55 .crtc_offset = (mmCRTC2_DCFE_MEM_LIGHT_SLEEP_CNTL -
56 mmCRTC0_DCFE_MEM_LIGHT_SLEEP_CNTL),
57 .dcp_offset = (mmDCP2_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
58},
59{
60 .fmt_offset = (mmFMT3_FMT_CONTROL - mmFMT0_FMT_CONTROL),
61 .crtc_offset = (mmCRTC3_DCFE_MEM_LIGHT_SLEEP_CNTL -
62 mmCRTC0_DCFE_MEM_LIGHT_SLEEP_CNTL),
63 .dcp_offset = (mmDCP3_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
64},
65{
66 .fmt_offset = (mmFMT4_FMT_CONTROL - mmFMT0_FMT_CONTROL),
67 .crtc_offset = (mmCRTC4_DCFE_MEM_LIGHT_SLEEP_CNTL -
68 mmCRTC0_DCFE_MEM_LIGHT_SLEEP_CNTL),
69 .dcp_offset = (mmDCP4_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
70},
71{
72 .fmt_offset = (mmFMT5_FMT_CONTROL - mmFMT0_FMT_CONTROL),
73 .crtc_offset = (mmCRTC5_DCFE_MEM_LIGHT_SLEEP_CNTL -
74 mmCRTC0_DCFE_MEM_LIGHT_SLEEP_CNTL),
75 .dcp_offset = (mmDCP5_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
76}
77};
78
79static const struct opp_funcs funcs = {
80 .opp_power_on_regamma_lut = dce80_opp_power_on_regamma_lut,
81 .opp_set_csc_adjustment = dce80_opp_set_csc_adjustment,
82 .opp_set_csc_default = dce80_opp_set_csc_default,
83 .opp_set_dyn_expansion = dce80_opp_set_dyn_expansion,
84 .opp_program_regamma_pwl = dce80_opp_program_regamma_pwl,
85 .opp_set_regamma_mode = dce80_opp_set_regamma_mode,
86 .opp_destroy = dce80_opp_destroy,
87 .opp_program_fmt = dce110_opp_program_fmt,
88};
89
90/*****************************************/
91/* Constructor, Destructor */
92/*****************************************/
93
94bool dce80_opp_construct(struct dce80_opp *opp80,
95 struct dc_context *ctx,
96 uint32_t inst)
97{
98 if (inst >= ARRAY_SIZE(reg_offsets))
99 return false;
100
101 opp80->base.funcs = &funcs;
102
103 opp80->base.ctx = ctx;
104
105 opp80->base.inst = inst;
106
107 opp80->offsets = reg_offsets[inst];
108
109 return true;
110}
111
112void dce80_opp_destroy(struct output_pixel_processor **opp)
113{
114 dm_free(FROM_OPP(*opp));
115 *opp = NULL;
116}
117
118struct output_pixel_processor *dce80_opp_create(
119 struct dc_context *ctx,
120 uint32_t inst)
121{
122 struct dce80_opp *opp =
123 dm_alloc(sizeof(struct dce80_opp));
124
125 if (!opp)
126 return NULL;
127
128 if (dce80_opp_construct(opp,
129 ctx, inst))
130 return &opp->base;
131
132 BREAK_TO_DEBUGGER();
133 dm_free(opp);
134 return NULL;
135}
136
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_opp.h b/drivers/gpu/drm/amd/display/dc/dce80/dce80_opp.h
new file mode 100644
index 000000000000..965cce37f7e3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_opp.h
@@ -0,0 +1,130 @@
1/* Copyright 2012-15 Advanced Micro Devices, Inc.
2 *
3 * Permission is hereby granted, free of charge, to any person obtaining a
4 * copy of this software and associated documentation files (the "Software"),
5 * to deal in the Software without restriction, including without limitation
6 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7 * and/or sell copies of the Software, and to permit persons to whom the
8 * Software is furnished to do so, subject to the following conditions:
9 *
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
17 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
18 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
19 * OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * Authors: AMD
22 *
23 */
24
25#ifndef __DC_OPP_DCE80_H__
26#define __DC_OPP_DCE80_H__
27
28#include "dc_types.h"
29#include "opp.h"
30#include "gamma_types.h"
31#include "../dce110/dce110_opp.h"
32
33struct gamma_parameters;
34
35struct dce80_regamma {
36 struct gamma_curve arr_curve_points[16];
37 struct curve_points arr_points[3];
38 uint32_t hw_points_num;
39 struct hw_x_point *coordinates_x;
40 struct pwl_result_data *rgb_resulted;
41
42 /* re-gamma curve */
43 struct pwl_float_data_ex *rgb_regamma;
44 /* coeff used to map user evenly distributed points
45 * to our hardware points (predefined) for gamma 256 */
46 struct pixel_gamma_point *coeff128;
47 struct pixel_gamma_point *coeff128_oem;
48 /* coeff used to map user evenly distributed points
49 * to our hardware points (predefined) for gamma 1025 */
50 struct pixel_gamma_point *coeff128_dx;
51 /* evenly distributed points, gamma 256 software points 0-255 */
52 struct gamma_pixel *axis_x_256;
53 /* evenly distributed points, gamma 1025 software points 0-1025 */
54 struct gamma_pixel *axis_x_1025;
55 /* OEM supplied gamma for regamma LUT */
56 struct pwl_float_data *rgb_oem;
57 /* user supplied gamma */
58 struct pwl_float_data *rgb_user;
59 uint32_t extra_points;
60 bool use_half_points;
61 struct fixed31_32 x_max1;
62 struct fixed31_32 x_max2;
63 struct fixed31_32 x_min;
64 struct fixed31_32 divider1;
65 struct fixed31_32 divider2;
66 struct fixed31_32 divider3;
67};
68
69/* OPP RELATED */
70#define TO_DCE80_OPP(opp)\
71 container_of(opp, struct dce80_opp, base)
72
73struct dce80_opp_reg_offsets {
74 uint32_t fmt_offset;
75 uint32_t dcp_offset;
76 uint32_t crtc_offset;
77};
78
79struct dce80_opp {
80 struct output_pixel_processor base;
81 struct dce80_opp_reg_offsets offsets;
82 struct dce80_regamma regamma;
83};
84
85bool dce80_opp_construct(struct dce80_opp *opp80,
86 struct dc_context *ctx,
87 uint32_t inst);
88
89void dce80_opp_destroy(struct output_pixel_processor **opp);
90
91struct output_pixel_processor *dce80_opp_create(
92 struct dc_context *ctx,
93 uint32_t inst);
94
95/* REGAMMA RELATED */
96void dce80_opp_power_on_regamma_lut(
97 struct output_pixel_processor *opp,
98 bool power_on);
99
100bool dce80_opp_program_regamma_pwl(
101 struct output_pixel_processor *opp,
102 const struct pwl_params *pamras);
103
104void dce80_opp_set_regamma_mode(struct output_pixel_processor *opp,
105 enum opp_regamma mode);
106
107void dce80_opp_set_csc_adjustment(
108 struct output_pixel_processor *opp,
109 const struct out_csc_color_matrix *tbl_entry);
110
111void dce80_opp_set_csc_default(
112 struct output_pixel_processor *opp,
113 const struct default_adjustment *default_adjust);
114
115/* FORMATTER RELATED */
116void dce80_opp_program_bit_depth_reduction(
117 struct output_pixel_processor *opp,
118 const struct bit_depth_reduction_params *params);
119
120void dce80_opp_program_clamping_and_pixel_encoding(
121 struct output_pixel_processor *opp,
122 const struct clamping_and_pixel_encoding_params *params);
123
124void dce80_opp_set_dyn_expansion(
125 struct output_pixel_processor *opp,
126 enum dc_color_space color_sp,
127 enum dc_color_depth color_dpth,
128 enum signal_type signal);
129
130#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_opp_csc.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_opp_csc.c
new file mode 100644
index 000000000000..bdb9e0a77982
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_opp_csc.c
@@ -0,0 +1,363 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28#include "dce80_opp.h"
29#include "basics/conversion.h"
30
31/* include DCE8 register header files */
32#include "dce/dce_8_0_d.h"
33#include "dce/dce_8_0_sh_mask.h"
34
35#define DCP_REG(reg)\
36 (reg + opp80->offsets.dcp_offset)
37
38enum {
39 OUTPUT_CSC_MATRIX_SIZE = 12
40};
41
42static const struct out_csc_color_matrix global_color_matrix[] = {
43{ COLOR_SPACE_SRGB,
44 { 0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} },
45{ COLOR_SPACE_SRGB_LIMITED,
46 { 0x1B60, 0, 0, 0x200, 0, 0x1B60, 0, 0x200, 0, 0, 0x1B60, 0x200} },
47{ COLOR_SPACE_YCBCR601,
48 { 0xE00, 0xF447, 0xFDB9, 0x1000, 0x82F, 0x1012, 0x31F, 0x200, 0xFB47,
49 0xF6B9, 0xE00, 0x1000} },
50{ COLOR_SPACE_YCBCR709, { 0xE00, 0xF349, 0xFEB7, 0x1000, 0x5D2, 0x1394, 0x1FA,
51 0x200, 0xFCCB, 0xF535, 0xE00, 0x1000} },
52/* TODO: correct values below */
53{ COLOR_SPACE_YCBCR601_LIMITED, { 0xE00, 0xF447, 0xFDB9, 0x1000, 0x991,
54 0x12C9, 0x3A6, 0x200, 0xFB47, 0xF6B9, 0xE00, 0x1000} },
55{ COLOR_SPACE_YCBCR709_LIMITED, { 0xE00, 0xF349, 0xFEB7, 0x1000, 0x6CE, 0x16E3,
56 0x24F, 0x200, 0xFCCB, 0xF535, 0xE00, 0x1000} }
57};
58
59enum csc_color_mode {
60 /* 00 - BITS2:0 Bypass */
61 CSC_COLOR_MODE_GRAPHICS_BYPASS,
62 /* 01 - hard coded coefficient TV RGB */
63 CSC_COLOR_MODE_GRAPHICS_PREDEFINED,
64 /* 04 - programmable OUTPUT CSC coefficient */
65 CSC_COLOR_MODE_GRAPHICS_OUTPUT_CSC,
66};
67
68static void program_color_matrix(
69 struct dce80_opp *opp80,
70 const struct out_csc_color_matrix *tbl_entry,
71 enum grph_color_adjust_option options)
72{
73 struct dc_context *ctx = opp80->base.ctx;
74 {
75 uint32_t value = 0;
76 uint32_t addr = DCP_REG(mmOUTPUT_CSC_C11_C12);
77 /* fixed S2.13 format */
78 set_reg_field_value(
79 value,
80 tbl_entry->regval[0],
81 OUTPUT_CSC_C11_C12,
82 OUTPUT_CSC_C11);
83
84 set_reg_field_value(
85 value,
86 tbl_entry->regval[1],
87 OUTPUT_CSC_C11_C12,
88 OUTPUT_CSC_C12);
89
90 dm_write_reg(ctx, addr, value);
91 }
92 {
93 uint32_t value = 0;
94 uint32_t addr = DCP_REG(mmOUTPUT_CSC_C13_C14);
95 /* fixed S2.13 format */
96 set_reg_field_value(
97 value,
98 tbl_entry->regval[2],
99 OUTPUT_CSC_C13_C14,
100 OUTPUT_CSC_C13);
101 /* fixed S0.13 format */
102 set_reg_field_value(
103 value,
104 tbl_entry->regval[3],
105 OUTPUT_CSC_C13_C14,
106 OUTPUT_CSC_C14);
107
108 dm_write_reg(ctx, addr, value);
109 }
110 {
111 uint32_t value = 0;
112 uint32_t addr = DCP_REG(mmOUTPUT_CSC_C21_C22);
113 /* fixed S2.13 format */
114 set_reg_field_value(
115 value,
116 tbl_entry->regval[4],
117 OUTPUT_CSC_C21_C22,
118 OUTPUT_CSC_C21);
119 /* fixed S2.13 format */
120 set_reg_field_value(
121 value,
122 tbl_entry->regval[5],
123 OUTPUT_CSC_C21_C22,
124 OUTPUT_CSC_C22);
125
126 dm_write_reg(ctx, addr, value);
127 }
128 {
129 uint32_t value = 0;
130 uint32_t addr = DCP_REG(mmOUTPUT_CSC_C23_C24);
131 /* fixed S2.13 format */
132 set_reg_field_value(
133 value,
134 tbl_entry->regval[6],
135 OUTPUT_CSC_C23_C24,
136 OUTPUT_CSC_C23);
137 /* fixed S0.13 format */
138 set_reg_field_value(
139 value,
140 tbl_entry->regval[7],
141 OUTPUT_CSC_C23_C24,
142 OUTPUT_CSC_C24);
143
144 dm_write_reg(ctx, addr, value);
145 }
146 {
147 uint32_t value = 0;
148 uint32_t addr = DCP_REG(mmOUTPUT_CSC_C31_C32);
149 /* fixed S2.13 format */
150 set_reg_field_value(
151 value,
152 tbl_entry->regval[8],
153 OUTPUT_CSC_C31_C32,
154 OUTPUT_CSC_C31);
155 /* fixed S0.13 format */
156 set_reg_field_value(
157 value,
158 tbl_entry->regval[9],
159 OUTPUT_CSC_C31_C32,
160 OUTPUT_CSC_C32);
161
162 dm_write_reg(ctx, addr, value);
163 }
164 {
165 uint32_t value = 0;
166 uint32_t addr = DCP_REG(mmOUTPUT_CSC_C33_C34);
167 /* fixed S2.13 format */
168 set_reg_field_value(
169 value,
170 tbl_entry->regval[10],
171 OUTPUT_CSC_C33_C34,
172 OUTPUT_CSC_C33);
173 /* fixed S0.13 format */
174 set_reg_field_value(
175 value,
176 tbl_entry->regval[11],
177 OUTPUT_CSC_C33_C34,
178 OUTPUT_CSC_C34);
179
180 dm_write_reg(ctx, addr, value);
181 }
182}
183
184static bool configure_graphics_mode(
185 struct dce80_opp *opp80,
186 enum csc_color_mode config,
187 enum graphics_csc_adjust_type csc_adjust_type,
188 enum dc_color_space color_space)
189{
190 struct dc_context *ctx = opp80->base.ctx;
191 uint32_t addr = DCP_REG(mmOUTPUT_CSC_CONTROL);
192 uint32_t value = dm_read_reg(ctx, addr);
193
194 set_reg_field_value(
195 value,
196 0,
197 OUTPUT_CSC_CONTROL,
198 OUTPUT_CSC_GRPH_MODE);
199
200 if (csc_adjust_type == GRAPHICS_CSC_ADJUST_TYPE_SW) {
201 if (config == CSC_COLOR_MODE_GRAPHICS_OUTPUT_CSC) {
202 set_reg_field_value(
203 value,
204 4,
205 OUTPUT_CSC_CONTROL,
206 OUTPUT_CSC_GRPH_MODE);
207 } else {
208
209 switch (color_space) {
210 case COLOR_SPACE_SRGB:
211 /* by pass */
212 set_reg_field_value(
213 value,
214 0,
215 OUTPUT_CSC_CONTROL,
216 OUTPUT_CSC_GRPH_MODE);
217 break;
218 case COLOR_SPACE_SRGB_LIMITED:
219 /* TV RGB */
220 set_reg_field_value(
221 value,
222 1,
223 OUTPUT_CSC_CONTROL,
224 OUTPUT_CSC_GRPH_MODE);
225 break;
226 case COLOR_SPACE_YCBCR601:
227 case COLOR_SPACE_YPBPR601:
228 case COLOR_SPACE_YCBCR601_LIMITED:
229 /* YCbCr601 */
230 set_reg_field_value(
231 value,
232 2,
233 OUTPUT_CSC_CONTROL,
234 OUTPUT_CSC_GRPH_MODE);
235 break;
236 case COLOR_SPACE_YCBCR709:
237 case COLOR_SPACE_YPBPR709:
238 case COLOR_SPACE_YCBCR709_LIMITED:
239 /* YCbCr709 */
240 set_reg_field_value(
241 value,
242 3,
243 OUTPUT_CSC_CONTROL,
244 OUTPUT_CSC_GRPH_MODE);
245 break;
246 default:
247 return false;
248 }
249 }
250 } else if (csc_adjust_type == GRAPHICS_CSC_ADJUST_TYPE_HW) {
251 switch (color_space) {
252 case COLOR_SPACE_SRGB:
253 /* by pass */
254 set_reg_field_value(
255 value,
256 0,
257 OUTPUT_CSC_CONTROL,
258 OUTPUT_CSC_GRPH_MODE);
259 break;
260 case COLOR_SPACE_SRGB_LIMITED:
261 /* TV RGB */
262 set_reg_field_value(
263 value,
264 1,
265 OUTPUT_CSC_CONTROL,
266 OUTPUT_CSC_GRPH_MODE);
267 break;
268 case COLOR_SPACE_YCBCR601:
269 case COLOR_SPACE_YPBPR601:
270 case COLOR_SPACE_YCBCR601_LIMITED:
271 /* YCbCr601 */
272 set_reg_field_value(
273 value,
274 2,
275 OUTPUT_CSC_CONTROL,
276 OUTPUT_CSC_GRPH_MODE);
277 break;
278 case COLOR_SPACE_YCBCR709:
279 case COLOR_SPACE_YPBPR709:
280 case COLOR_SPACE_YCBCR709_LIMITED:
281 /* YCbCr709 */
282 set_reg_field_value(
283 value,
284 3,
285 OUTPUT_CSC_CONTROL,
286 OUTPUT_CSC_GRPH_MODE);
287 break;
288 default:
289 return false;
290 }
291
292 } else
293 /* by pass */
294 set_reg_field_value(
295 value,
296 0,
297 OUTPUT_CSC_CONTROL,
298 OUTPUT_CSC_GRPH_MODE);
299
300 addr = DCP_REG(mmOUTPUT_CSC_CONTROL);
301 dm_write_reg(ctx, addr, value);
302
303 return true;
304}
305
306void dce80_opp_set_csc_adjustment(
307 struct output_pixel_processor *opp,
308 const struct out_csc_color_matrix *tbl_entry)
309{
310 struct dce80_opp *opp80 = TO_DCE80_OPP(opp);
311 enum csc_color_mode config =
312 CSC_COLOR_MODE_GRAPHICS_OUTPUT_CSC;
313
314 program_color_matrix(opp80, tbl_entry, GRAPHICS_CSC_ADJUST_TYPE_SW);
315
316 /* We did everything ,now program DxOUTPUT_CSC_CONTROL */
317 configure_graphics_mode(opp80, config, GRAPHICS_CSC_ADJUST_TYPE_SW,
318 tbl_entry->color_space);
319}
320
321void dce80_opp_set_csc_default(
322 struct output_pixel_processor *opp,
323 const struct default_adjustment *default_adjust)
324{
325 struct dce80_opp *opp80 = TO_DCE80_OPP(opp);
326 enum csc_color_mode config =
327 CSC_COLOR_MODE_GRAPHICS_PREDEFINED;
328
329 if (default_adjust->force_hw_default == false) {
330 const struct out_csc_color_matrix *elm;
331 /* currently parameter not in use */
332 enum grph_color_adjust_option option =
333 GRPH_COLOR_MATRIX_HW_DEFAULT;
334 uint32_t i;
335 /*
336 * HW default false we program locally defined matrix
337 * HW default true we use predefined hw matrix and we
338 * do not need to program matrix
339 * OEM wants the HW default via runtime parameter.
340 */
341 option = GRPH_COLOR_MATRIX_SW;
342
343 for (i = 0; i < ARRAY_SIZE(global_color_matrix); ++i) {
344 elm = &global_color_matrix[i];
345 if (elm->color_space != default_adjust->out_color_space)
346 continue;
347 /* program the matrix with default values from this
348 * file */
349 program_color_matrix(opp80, elm, option);
350 config = CSC_COLOR_MODE_GRAPHICS_OUTPUT_CSC;
351 break;
352 }
353 }
354
355 /* configure the what we programmed :
356 * 1. Default values from this file
357 * 2. Use hardware default from ROM_A and we do not need to program
358 * matrix */
359
360 configure_graphics_mode(opp80, config,
361 default_adjust->csc_adjust_type,
362 default_adjust->out_color_space);
363}
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_opp_formatter.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_opp_formatter.c
new file mode 100644
index 000000000000..433296a4ed12
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_opp_formatter.c
@@ -0,0 +1,577 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28#include "dce/dce_8_0_d.h"
29#include "dce/dce_8_0_sh_mask.h"
30
31#include "dce80_opp.h"
32
33#define FMT_REG(reg)\
34 (reg + opp80->offsets.fmt_offset)
35
36/**
37 * set_truncation
38 * 1) set truncation depth: 0 for 18 bpp or 1 for 24 bpp
39 * 2) enable truncation
40 * 3) HW remove 12bit FMT support for DCE8 power saving reason.
41 */
42static void set_truncation(
43 struct dce80_opp *opp80,
44 const struct bit_depth_reduction_params *params)
45{
46 uint32_t value = 0;
47 uint32_t addr = FMT_REG(mmFMT_BIT_DEPTH_CONTROL);
48
49 /*Disable truncation*/
50 value = dm_read_reg(opp80->base.ctx, addr);
51 set_reg_field_value(value, 0,
52 FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN);
53 set_reg_field_value(value, 0,
54 FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH);
55 set_reg_field_value(value, 0,
56 FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_MODE);
57
58 dm_write_reg(opp80->base.ctx, addr, value);
59
60 /* no 10bpc trunc on DCE8*/
61 if (params->flags.TRUNCATE_ENABLED == 0 ||
62 params->flags.TRUNCATE_DEPTH == 2)
63 return;
64
65 /*Set truncation depth and Enable truncation*/
66 set_reg_field_value(value, 1,
67 FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN);
68 set_reg_field_value(value, params->flags.TRUNCATE_MODE,
69 FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_MODE);
70 set_reg_field_value(value, params->flags.TRUNCATE_DEPTH,
71 FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH);
72
73 dm_write_reg(opp80->base.ctx, addr, value);
74
75}
76
77/**
78 * set_spatial_dither
79 * 1) set spatial dithering mode: pattern of seed
80 * 2) set spatical dithering depth: 0 for 18bpp or 1 for 24bpp
81 * 3) set random seed
82 * 4) set random mode
83 * lfsr is reset every frame or not reset
84 * RGB dithering method
85 * 0: RGB data are all dithered with x^28+x^3+1
86 * 1: R data is dithered with x^28+x^3+1
87 * G data is dithered with x^28+X^9+1
88 * B data is dithered with x^28+x^13+1
89 * enable high pass filter or not
90 * 5) enable spatical dithering
91 */
92static void set_spatial_dither(
93 struct dce80_opp *opp80,
94 const struct bit_depth_reduction_params *params)
95{
96 uint32_t addr = FMT_REG(mmFMT_BIT_DEPTH_CONTROL);
97 uint32_t depth_cntl_value = 0;
98 uint32_t dither_r_value = 0;
99 uint32_t dither_g_value = 0;
100 uint32_t dither_b_value = 0;
101
102 /*Disable spatial (random) dithering*/
103 depth_cntl_value = dm_read_reg(opp80->base.ctx, addr);
104 set_reg_field_value(depth_cntl_value, 0,
105 FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN);
106 set_reg_field_value(depth_cntl_value, 0,
107 FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_MODE);
108 set_reg_field_value(depth_cntl_value, 0,
109 FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH);
110 set_reg_field_value(depth_cntl_value, 0,
111 FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_EN);
112 set_reg_field_value(depth_cntl_value, 0,
113 FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE);
114 set_reg_field_value(depth_cntl_value, 0,
115 FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE);
116 set_reg_field_value(depth_cntl_value, 0,
117 FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE);
118
119 dm_write_reg(opp80->base.ctx, addr, depth_cntl_value);
120
121 /* no 10bpc on DCE8*/
122 if (params->flags.SPATIAL_DITHER_ENABLED == 0 ||
123 params->flags.SPATIAL_DITHER_DEPTH == 2)
124 return;
125
126 /*Set seed for random values for
127 * spatial dithering for R,G,B channels*/
128 addr = FMT_REG(mmFMT_DITHER_RAND_R_SEED);
129 set_reg_field_value(dither_r_value, params->r_seed_value,
130 FMT_DITHER_RAND_R_SEED,
131 FMT_RAND_R_SEED);
132 dm_write_reg(opp80->base.ctx, addr, dither_r_value);
133
134 addr = FMT_REG(mmFMT_DITHER_RAND_G_SEED);
135 set_reg_field_value(dither_g_value,
136 params->g_seed_value,
137 FMT_DITHER_RAND_G_SEED,
138 FMT_RAND_G_SEED);
139 dm_write_reg(opp80->base.ctx, addr, dither_g_value);
140
141 addr = FMT_REG(mmFMT_DITHER_RAND_B_SEED);
142 set_reg_field_value(dither_b_value, params->b_seed_value,
143 FMT_DITHER_RAND_B_SEED,
144 FMT_RAND_B_SEED);
145 dm_write_reg(opp80->base.ctx, addr, dither_b_value);
146
147 /* FMT_OFFSET_R_Cr 31:16 0x0 Setting the zero
148 * offset for the R/Cr channel, lower 4LSB
149 * is forced to zeros. Typically set to 0
150 * RGB and 0x80000 YCbCr.
151 */
152 /* FMT_OFFSET_G_Y 31:16 0x0 Setting the zero
153 * offset for the G/Y channel, lower 4LSB is
154 * forced to zeros. Typically set to 0 RGB
155 * and 0x80000 YCbCr.
156 */
157 /* FMT_OFFSET_B_Cb 31:16 0x0 Setting the zero
158 * offset for the B/Cb channel, lower 4LSB is
159 * forced to zeros. Typically set to 0 RGB and
160 * 0x80000 YCbCr.
161 */
162
163 /*Set spatial dithering bit depth*/
164 set_reg_field_value(depth_cntl_value,
165 params->flags.SPATIAL_DITHER_DEPTH,
166 FMT_BIT_DEPTH_CONTROL,
167 FMT_SPATIAL_DITHER_DEPTH);
168
169 /* Set spatial dithering mode
170 * (default is Seed patterrn AAAA...)
171 */
172 set_reg_field_value(depth_cntl_value,
173 params->flags.SPATIAL_DITHER_MODE,
174 FMT_BIT_DEPTH_CONTROL,
175 FMT_SPATIAL_DITHER_MODE);
176
177 /*Reset only at startup*/
178 set_reg_field_value(depth_cntl_value,
179 params->flags.FRAME_RANDOM,
180 FMT_BIT_DEPTH_CONTROL,
181 FMT_FRAME_RANDOM_ENABLE);
182
183 /*Set RGB data dithered with x^28+x^3+1*/
184 set_reg_field_value(depth_cntl_value,
185 params->flags.RGB_RANDOM,
186 FMT_BIT_DEPTH_CONTROL,
187 FMT_RGB_RANDOM_ENABLE);
188
189 /*Disable High pass filter*/
190 set_reg_field_value(depth_cntl_value,
191 params->flags.HIGHPASS_RANDOM,
192 FMT_BIT_DEPTH_CONTROL,
193 FMT_HIGHPASS_RANDOM_ENABLE);
194
195 /*Enable spatial dithering*/
196 set_reg_field_value(depth_cntl_value,
197 1,
198 FMT_BIT_DEPTH_CONTROL,
199 FMT_SPATIAL_DITHER_EN);
200
201 addr = FMT_REG(mmFMT_BIT_DEPTH_CONTROL);
202 dm_write_reg(opp80->base.ctx, addr, depth_cntl_value);
203
204}
205
206/**
207 * SetTemporalDither (Frame Modulation)
208 * 1) set temporal dither depth
209 * 2) select pattern: from hard-coded pattern or programmable pattern
210 * 3) select optimized strips for BGR or RGB LCD sub-pixel
211 * 4) set s matrix
212 * 5) set t matrix
213 * 6) set grey level for 0.25, 0.5, 0.75
214 * 7) enable temporal dithering
215 */
216static void set_temporal_dither(
217 struct dce80_opp *opp80,
218 const struct bit_depth_reduction_params *params)
219{
220 uint32_t addr = FMT_REG(mmFMT_BIT_DEPTH_CONTROL);
221 uint32_t value;
222
223 /*Disable temporal (frame modulation) dithering first*/
224 value = dm_read_reg(opp80->base.ctx, addr);
225
226 set_reg_field_value(value,
227 0,
228 FMT_BIT_DEPTH_CONTROL,
229 FMT_TEMPORAL_DITHER_EN);
230
231 set_reg_field_value(value,
232 0,
233 FMT_BIT_DEPTH_CONTROL,
234 FMT_TEMPORAL_DITHER_RESET);
235 set_reg_field_value(value,
236 0,
237 FMT_BIT_DEPTH_CONTROL,
238 FMT_TEMPORAL_DITHER_OFFSET);
239 set_reg_field_value(value,
240 0,
241 FMT_BIT_DEPTH_CONTROL,
242 FMT_TEMPORAL_DITHER_DEPTH);
243 set_reg_field_value(value,
244 0,
245 FMT_BIT_DEPTH_CONTROL,
246 FMT_TEMPORAL_LEVEL);
247 set_reg_field_value(value,
248 0,
249 FMT_BIT_DEPTH_CONTROL,
250 FMT_25FRC_SEL);
251
252 set_reg_field_value(value,
253 0,
254 FMT_BIT_DEPTH_CONTROL,
255 FMT_50FRC_SEL);
256
257 set_reg_field_value(value,
258 0,
259 FMT_BIT_DEPTH_CONTROL,
260 FMT_75FRC_SEL);
261
262 dm_write_reg(opp80->base.ctx, addr, value);
263
264 /* no 10bpc dither on DCE8*/
265 if (params->flags.FRAME_MODULATION_ENABLED == 0 ||
266 params->flags.FRAME_MODULATION_DEPTH == 2)
267 return;
268
269 /* Set temporal dithering depth*/
270 set_reg_field_value(value,
271 params->flags.FRAME_MODULATION_DEPTH,
272 FMT_BIT_DEPTH_CONTROL,
273 FMT_TEMPORAL_DITHER_DEPTH);
274
275 set_reg_field_value(value,
276 0,
277 FMT_BIT_DEPTH_CONTROL,
278 FMT_TEMPORAL_DITHER_RESET);
279
280 set_reg_field_value(value,
281 0,
282 FMT_BIT_DEPTH_CONTROL,
283 FMT_TEMPORAL_DITHER_OFFSET);
284
285 /*Select legacy pattern based on FRC and Temporal level*/
286 addr = FMT_REG(mmFMT_TEMPORAL_DITHER_PATTERN_CONTROL);
287 dm_write_reg(opp80->base.ctx, addr, 0);
288 /*Set s matrix*/
289 addr = FMT_REG(
290 mmFMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_S_MATRIX);
291 dm_write_reg(opp80->base.ctx, addr, 0);
292 /*Set t matrix*/
293 addr = FMT_REG(
294 mmFMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_T_MATRIX);
295 dm_write_reg(opp80->base.ctx, addr, 0);
296
297 /*Select patterns for 0.25, 0.5 and 0.75 grey level*/
298 set_reg_field_value(value,
299 params->flags.TEMPORAL_LEVEL,
300 FMT_BIT_DEPTH_CONTROL,
301 FMT_TEMPORAL_LEVEL);
302
303 set_reg_field_value(value,
304 params->flags.FRC25,
305 FMT_BIT_DEPTH_CONTROL,
306 FMT_25FRC_SEL);
307
308 set_reg_field_value(value,
309 params->flags.FRC50,
310 FMT_BIT_DEPTH_CONTROL,
311 FMT_50FRC_SEL);
312
313 set_reg_field_value(value,
314 params->flags.FRC75,
315 FMT_BIT_DEPTH_CONTROL,
316 FMT_75FRC_SEL);
317
318 /*Enable bit reduction by temporal (frame modulation) dithering*/
319 set_reg_field_value(value,
320 1,
321 FMT_BIT_DEPTH_CONTROL,
322 FMT_TEMPORAL_DITHER_EN);
323
324 addr = FMT_REG(mmFMT_BIT_DEPTH_CONTROL);
325 dm_write_reg(opp80->base.ctx, addr, value);
326
327}
328
329/**
330 * Set Clamping
331 * 1) Set clamping format based on bpc - 0 for 6bpc (No clamping)
332 * 1 for 8 bpc
333 * 2 for 10 bpc
334 * 3 for 12 bpc
335 * 7 for programable
336 * 2) Enable clamp if Limited range requested
337 */
338static void set_clamping(
339 struct dce80_opp *opp80,
340 const struct clamping_and_pixel_encoding_params *params)
341{
342 uint32_t clamp_cntl_value = 0;
343 uint32_t red_clamp_value = 0;
344 uint32_t green_clamp_value = 0;
345 uint32_t blue_clamp_value = 0;
346 uint32_t addr = FMT_REG(mmFMT_CLAMP_CNTL);
347
348 clamp_cntl_value = dm_read_reg(opp80->base.ctx, addr);
349
350 set_reg_field_value(clamp_cntl_value,
351 0,
352 FMT_CLAMP_CNTL,
353 FMT_CLAMP_DATA_EN);
354
355 set_reg_field_value(clamp_cntl_value,
356 0,
357 FMT_CLAMP_CNTL,
358 FMT_CLAMP_COLOR_FORMAT);
359
360 switch (params->clamping_level) {
361 case CLAMPING_FULL_RANGE:
362 break;
363
364 case CLAMPING_LIMITED_RANGE_8BPC:
365 set_reg_field_value(clamp_cntl_value,
366 1,
367 FMT_CLAMP_CNTL,
368 FMT_CLAMP_DATA_EN);
369
370 set_reg_field_value(clamp_cntl_value,
371 1,
372 FMT_CLAMP_CNTL,
373 FMT_CLAMP_COLOR_FORMAT);
374
375 break;
376
377 case CLAMPING_LIMITED_RANGE_10BPC:
378 set_reg_field_value(clamp_cntl_value,
379 1,
380 FMT_CLAMP_CNTL,
381 FMT_CLAMP_DATA_EN);
382
383 set_reg_field_value(clamp_cntl_value,
384 2,
385 FMT_CLAMP_CNTL,
386 FMT_CLAMP_COLOR_FORMAT);
387
388 break;
389 case CLAMPING_LIMITED_RANGE_12BPC:
390 set_reg_field_value(clamp_cntl_value,
391 1,
392 FMT_CLAMP_CNTL,
393 FMT_CLAMP_DATA_EN);
394
395 set_reg_field_value(clamp_cntl_value,
396 3,
397 FMT_CLAMP_CNTL,
398 FMT_CLAMP_COLOR_FORMAT);
399
400 break;
401 case CLAMPING_LIMITED_RANGE_PROGRAMMABLE:
402 set_reg_field_value(clamp_cntl_value,
403 1,
404 FMT_CLAMP_CNTL,
405 FMT_CLAMP_DATA_EN);
406
407 set_reg_field_value(clamp_cntl_value,
408 7,
409 FMT_CLAMP_CNTL,
410 FMT_CLAMP_COLOR_FORMAT);
411
412 /*set the defaults*/
413 set_reg_field_value(red_clamp_value,
414 0x10,
415 FMT_CLAMP_COMPONENT_R,
416 FMT_CLAMP_LOWER_R);
417
418 set_reg_field_value(red_clamp_value,
419 0xFEF,
420 FMT_CLAMP_COMPONENT_R,
421 FMT_CLAMP_UPPER_R);
422
423 addr = FMT_REG(mmFMT_CLAMP_COMPONENT_R);
424 dm_write_reg(opp80->base.ctx, addr, red_clamp_value);
425
426 set_reg_field_value(green_clamp_value,
427 0x10,
428 FMT_CLAMP_COMPONENT_G,
429 FMT_CLAMP_LOWER_G);
430
431 set_reg_field_value(green_clamp_value,
432 0xFEF,
433 FMT_CLAMP_COMPONENT_G,
434 FMT_CLAMP_UPPER_G);
435
436 addr = FMT_REG(mmFMT_CLAMP_COMPONENT_G);
437 dm_write_reg(opp80->base.ctx, addr, green_clamp_value);
438
439 set_reg_field_value(blue_clamp_value,
440 0x10,
441 FMT_CLAMP_COMPONENT_B,
442 FMT_CLAMP_LOWER_B);
443
444 set_reg_field_value(blue_clamp_value,
445 0xFEF,
446 FMT_CLAMP_COMPONENT_B,
447 FMT_CLAMP_UPPER_B);
448
449 addr = FMT_REG(mmFMT_CLAMP_COMPONENT_B);
450 dm_write_reg(opp80->base.ctx, addr, blue_clamp_value);
451
452 break;
453
454 default:
455 break;
456 }
457
458 addr = FMT_REG(mmFMT_CLAMP_CNTL);
459 /*Set clamp control*/
460 dm_write_reg(opp80->base.ctx, addr, clamp_cntl_value);
461
462}
463
464/**
465 * set_pixel_encoding
466 *
467 * Set Pixel Encoding
468 * 0: RGB 4:4:4 or YCbCr 4:4:4 or YOnly
469 * 1: YCbCr 4:2:2
470 */
471static void set_pixel_encoding(
472 struct dce80_opp *opp80,
473 const struct clamping_and_pixel_encoding_params *params)
474{
475 uint32_t fmt_cntl_value;
476 uint32_t addr = FMT_REG(mmFMT_CONTROL);
477
478 /*RGB 4:4:4 or YCbCr 4:4:4 - 0; YCbCr 4:2:2 -1.*/
479 fmt_cntl_value = dm_read_reg(opp80->base.ctx, addr);
480
481 set_reg_field_value(fmt_cntl_value,
482 0,
483 FMT_CONTROL,
484 FMT_PIXEL_ENCODING);
485
486 if (params->pixel_encoding == PIXEL_ENCODING_YCBCR422) {
487 set_reg_field_value(fmt_cntl_value,
488 1,
489 FMT_CONTROL,
490 FMT_PIXEL_ENCODING);
491
492 /*00 - Pixels drop mode ,01 - Pixels average mode*/
493 set_reg_field_value(fmt_cntl_value,
494 0,
495 FMT_CONTROL,
496 FMT_SUBSAMPLING_MODE);
497
498 /*00 - Cb before Cr ,01 - Cr before Cb*/
499 set_reg_field_value(fmt_cntl_value,
500 0,
501 FMT_CONTROL,
502 FMT_SUBSAMPLING_ORDER);
503 }
504 dm_write_reg(opp80->base.ctx, addr, fmt_cntl_value);
505
506}
507
508void dce80_opp_program_bit_depth_reduction(
509 struct output_pixel_processor *opp,
510 const struct bit_depth_reduction_params *params)
511{
512 struct dce80_opp *opp80 = TO_DCE80_OPP(opp);
513
514 set_truncation(opp80, params);
515 set_spatial_dither(opp80, params);
516 set_temporal_dither(opp80, params);
517}
518
519void dce80_opp_program_clamping_and_pixel_encoding(
520 struct output_pixel_processor *opp,
521 const struct clamping_and_pixel_encoding_params *params)
522{
523 struct dce80_opp *opp80 = TO_DCE80_OPP(opp);
524
525 set_clamping(opp80, params);
526 set_pixel_encoding(opp80, params);
527}
528
529void dce80_opp_set_dyn_expansion(
530 struct output_pixel_processor *opp,
531 enum dc_color_space color_sp,
532 enum dc_color_depth color_dpth,
533 enum signal_type signal)
534{
535 struct dce80_opp *opp80 = TO_DCE80_OPP(opp);
536 uint32_t value;
537 bool enable_dyn_exp = false;
538 uint32_t addr = FMT_REG(mmFMT_DYNAMIC_EXP_CNTL);
539
540 value = dm_read_reg(opp->ctx, addr);
541
542 set_reg_field_value(value, 0,
543 FMT_DYNAMIC_EXP_CNTL, FMT_DYNAMIC_EXP_EN);
544 set_reg_field_value(value, 0,
545 FMT_DYNAMIC_EXP_CNTL, FMT_DYNAMIC_EXP_MODE);
546
547 /* From HW programming guide:
548 FMT_DYNAMIC_EXP_EN = 0 for limited RGB or YCbCr output
549 FMT_DYNAMIC_EXP_EN = 1 for RGB full range only*/
550 if (color_sp == COLOR_SPACE_SRGB)
551 enable_dyn_exp = true;
552
553 /*00 - 10-bit -> 12-bit dynamic expansion*/
554 /*01 - 8-bit -> 12-bit dynamic expansion*/
555 if (signal == SIGNAL_TYPE_HDMI_TYPE_A) {
556 switch (color_dpth) {
557 case COLOR_DEPTH_888:
558 set_reg_field_value(value, enable_dyn_exp ? 1:0,
559 FMT_DYNAMIC_EXP_CNTL, FMT_DYNAMIC_EXP_EN);
560 set_reg_field_value(value, 1,
561 FMT_DYNAMIC_EXP_CNTL, FMT_DYNAMIC_EXP_MODE);
562 break;
563 case COLOR_DEPTH_101010:
564 set_reg_field_value(value, enable_dyn_exp ? 1:0,
565 FMT_DYNAMIC_EXP_CNTL, FMT_DYNAMIC_EXP_EN);
566 set_reg_field_value(value, 0,
567 FMT_DYNAMIC_EXP_CNTL, FMT_DYNAMIC_EXP_MODE);
568 break;
569 case COLOR_DEPTH_121212:
570 break;
571 default:
572 break;
573 }
574 }
575
576 dm_write_reg(opp->ctx, addr, value);
577}
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_opp_regamma.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_opp_regamma.c
new file mode 100644
index 000000000000..648e3ef35d91
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_opp_regamma.c
@@ -0,0 +1,543 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28/* include DCE8 register header files */
29#include "dce/dce_8_0_d.h"
30#include "dce/dce_8_0_sh_mask.h"
31
32#include "dce80_opp.h"
33#include "gamma_types.h"
34
35#define DCP_REG(reg)\
36 (reg + opp80->offsets.dcp_offset)
37
38#define DCFE_REG(reg)\
39 (reg + opp80->offsets.crtc_offset)
40
41enum {
42 MAX_PWL_ENTRY = 128,
43 MAX_REGIONS_NUMBER = 16
44
45};
46
47struct curve_config {
48 uint32_t offset;
49 int8_t segments[MAX_REGIONS_NUMBER];
50 int8_t begin;
51};
52
53/*
54 *****************************************************************************
55 * Function: regamma_config_regions_and_segments
56 *
57 * build regamma curve by using predefined hw points
58 * uses interface parameters ,like EDID coeff.
59 *
60 * @param : parameters interface parameters
61 * @return void
62 *
63 * @note
64 *
65 * @see
66 *
67 *****************************************************************************
68 */
69static void regamma_config_regions_and_segments(
70 struct dce80_opp *opp80, const struct pwl_params *params)
71{
72 const struct gamma_curve *curve;
73 uint32_t value = 0;
74
75 {
76 set_reg_field_value(
77 value,
78 params->arr_points[0].custom_float_x,
79 REGAMMA_CNTLA_START_CNTL,
80 REGAMMA_CNTLA_EXP_REGION_START);
81
82 set_reg_field_value(
83 value,
84 0,
85 REGAMMA_CNTLA_START_CNTL,
86 REGAMMA_CNTLA_EXP_REGION_START_SEGMENT);
87
88 dm_write_reg(opp80->base.ctx,
89 DCP_REG(mmREGAMMA_CNTLA_START_CNTL),
90 value);
91 }
92 {
93 value = 0;
94 set_reg_field_value(
95 value,
96 params->arr_points[0].custom_float_slope,
97 REGAMMA_CNTLA_SLOPE_CNTL,
98 REGAMMA_CNTLA_EXP_REGION_LINEAR_SLOPE);
99
100 dm_write_reg(opp80->base.ctx,
101 DCP_REG(mmREGAMMA_CNTLA_SLOPE_CNTL), value);
102 }
103 {
104 value = 0;
105 set_reg_field_value(
106 value,
107 params->arr_points[1].custom_float_x,
108 REGAMMA_CNTLA_END_CNTL1,
109 REGAMMA_CNTLA_EXP_REGION_END);
110
111 dm_write_reg(opp80->base.ctx,
112 DCP_REG(mmREGAMMA_CNTLA_END_CNTL1), value);
113 }
114 {
115 value = 0;
116 set_reg_field_value(
117 value,
118 params->arr_points[2].custom_float_slope,
119 REGAMMA_CNTLA_END_CNTL2,
120 REGAMMA_CNTLA_EXP_REGION_END_BASE);
121
122 set_reg_field_value(
123 value,
124 params->arr_points[1].custom_float_y,
125 REGAMMA_CNTLA_END_CNTL2,
126 REGAMMA_CNTLA_EXP_REGION_END_SLOPE);
127
128 dm_write_reg(opp80->base.ctx,
129 DCP_REG(mmREGAMMA_CNTLA_END_CNTL2), value);
130 }
131
132 curve = params->arr_curve_points;
133
134 {
135 value = 0;
136 set_reg_field_value(
137 value,
138 curve[0].offset,
139 REGAMMA_CNTLA_REGION_0_1,
140 REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET);
141
142 set_reg_field_value(
143 value,
144 curve[0].segments_num,
145 REGAMMA_CNTLA_REGION_0_1,
146 REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS);
147
148 set_reg_field_value(
149 value,
150 curve[1].offset,
151 REGAMMA_CNTLA_REGION_0_1,
152 REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET);
153
154 set_reg_field_value(
155 value,
156 curve[1].segments_num,
157 REGAMMA_CNTLA_REGION_0_1,
158 REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS);
159
160 dm_write_reg(
161 opp80->base.ctx,
162 DCP_REG(mmREGAMMA_CNTLA_REGION_0_1),
163 value);
164 }
165
166 curve += 2;
167 {
168 value = 0;
169 set_reg_field_value(
170 value,
171 curve[0].offset,
172 REGAMMA_CNTLA_REGION_2_3,
173 REGAMMA_CNTLA_EXP_REGION2_LUT_OFFSET);
174
175 set_reg_field_value(
176 value,
177 curve[0].segments_num,
178 REGAMMA_CNTLA_REGION_2_3,
179 REGAMMA_CNTLA_EXP_REGION2_NUM_SEGMENTS);
180
181 set_reg_field_value(
182 value,
183 curve[1].offset,
184 REGAMMA_CNTLA_REGION_2_3,
185 REGAMMA_CNTLA_EXP_REGION3_LUT_OFFSET);
186
187 set_reg_field_value(
188 value,
189 curve[1].segments_num,
190 REGAMMA_CNTLA_REGION_2_3,
191 REGAMMA_CNTLA_EXP_REGION3_NUM_SEGMENTS);
192
193 dm_write_reg(opp80->base.ctx,
194 DCP_REG(mmREGAMMA_CNTLA_REGION_2_3),
195 value);
196 }
197
198 curve += 2;
199 {
200 value = 0;
201 set_reg_field_value(
202 value,
203 curve[0].offset,
204 REGAMMA_CNTLA_REGION_4_5,
205 REGAMMA_CNTLA_EXP_REGION4_LUT_OFFSET);
206
207 set_reg_field_value(
208 value,
209 curve[0].segments_num,
210 REGAMMA_CNTLA_REGION_4_5,
211 REGAMMA_CNTLA_EXP_REGION4_NUM_SEGMENTS);
212
213 set_reg_field_value(
214 value,
215 curve[1].offset,
216 REGAMMA_CNTLA_REGION_4_5,
217 REGAMMA_CNTLA_EXP_REGION5_LUT_OFFSET);
218
219 set_reg_field_value(
220 value,
221 curve[1].segments_num,
222 REGAMMA_CNTLA_REGION_4_5,
223 REGAMMA_CNTLA_EXP_REGION5_NUM_SEGMENTS);
224
225 dm_write_reg(opp80->base.ctx,
226 DCP_REG(mmREGAMMA_CNTLA_REGION_4_5),
227 value);
228 }
229
230 curve += 2;
231 {
232 value = 0;
233 set_reg_field_value(
234 value,
235 curve[0].offset,
236 REGAMMA_CNTLA_REGION_6_7,
237 REGAMMA_CNTLA_EXP_REGION6_LUT_OFFSET);
238
239 set_reg_field_value(
240 value,
241 curve[0].segments_num,
242 REGAMMA_CNTLA_REGION_6_7,
243 REGAMMA_CNTLA_EXP_REGION6_NUM_SEGMENTS);
244
245 set_reg_field_value(
246 value,
247 curve[1].offset,
248 REGAMMA_CNTLA_REGION_6_7,
249 REGAMMA_CNTLA_EXP_REGION7_LUT_OFFSET);
250
251 set_reg_field_value(
252 value,
253 curve[1].segments_num,
254 REGAMMA_CNTLA_REGION_6_7,
255 REGAMMA_CNTLA_EXP_REGION7_NUM_SEGMENTS);
256
257 dm_write_reg(opp80->base.ctx,
258 DCP_REG(mmREGAMMA_CNTLA_REGION_6_7),
259 value);
260 }
261
262 curve += 2;
263 {
264 value = 0;
265 set_reg_field_value(
266 value,
267 curve[0].offset,
268 REGAMMA_CNTLA_REGION_8_9,
269 REGAMMA_CNTLA_EXP_REGION8_LUT_OFFSET);
270
271 set_reg_field_value(
272 value,
273 curve[0].segments_num,
274 REGAMMA_CNTLA_REGION_8_9,
275 REGAMMA_CNTLA_EXP_REGION8_NUM_SEGMENTS);
276
277 set_reg_field_value(
278 value,
279 curve[1].offset,
280 REGAMMA_CNTLA_REGION_8_9,
281 REGAMMA_CNTLA_EXP_REGION9_LUT_OFFSET);
282
283 set_reg_field_value(
284 value,
285 curve[1].segments_num,
286 REGAMMA_CNTLA_REGION_8_9,
287 REGAMMA_CNTLA_EXP_REGION9_NUM_SEGMENTS);
288
289 dm_write_reg(opp80->base.ctx,
290 DCP_REG(mmREGAMMA_CNTLA_REGION_8_9),
291 value);
292 }
293
294 curve += 2;
295 {
296 value = 0;
297 set_reg_field_value(
298 value,
299 curve[0].offset,
300 REGAMMA_CNTLA_REGION_10_11,
301 REGAMMA_CNTLA_EXP_REGION10_LUT_OFFSET);
302
303 set_reg_field_value(
304 value,
305 curve[0].segments_num,
306 REGAMMA_CNTLA_REGION_10_11,
307 REGAMMA_CNTLA_EXP_REGION10_NUM_SEGMENTS);
308
309 set_reg_field_value(
310 value,
311 curve[1].offset,
312 REGAMMA_CNTLA_REGION_10_11,
313 REGAMMA_CNTLA_EXP_REGION11_LUT_OFFSET);
314
315 set_reg_field_value(
316 value,
317 curve[1].segments_num,
318 REGAMMA_CNTLA_REGION_10_11,
319 REGAMMA_CNTLA_EXP_REGION11_NUM_SEGMENTS);
320
321 dm_write_reg(opp80->base.ctx,
322 DCP_REG(mmREGAMMA_CNTLA_REGION_10_11),
323 value);
324 }
325
326 curve += 2;
327 {
328 value = 0;
329 set_reg_field_value(
330 value,
331 curve[0].offset,
332 REGAMMA_CNTLA_REGION_12_13,
333 REGAMMA_CNTLA_EXP_REGION12_LUT_OFFSET);
334
335 set_reg_field_value(
336 value,
337 curve[0].segments_num,
338 REGAMMA_CNTLA_REGION_12_13,
339 REGAMMA_CNTLA_EXP_REGION12_NUM_SEGMENTS);
340
341 set_reg_field_value(
342 value,
343 curve[1].offset,
344 REGAMMA_CNTLA_REGION_12_13,
345 REGAMMA_CNTLA_EXP_REGION13_LUT_OFFSET);
346
347 set_reg_field_value(
348 value,
349 curve[1].segments_num,
350 REGAMMA_CNTLA_REGION_12_13,
351 REGAMMA_CNTLA_EXP_REGION13_NUM_SEGMENTS);
352
353 dm_write_reg(opp80->base.ctx,
354 DCP_REG(mmREGAMMA_CNTLA_REGION_12_13),
355 value);
356 }
357
358 curve += 2;
359 {
360 value = 0;
361 set_reg_field_value(
362 value,
363 curve[0].offset,
364 REGAMMA_CNTLA_REGION_14_15,
365 REGAMMA_CNTLA_EXP_REGION14_LUT_OFFSET);
366
367 set_reg_field_value(
368 value,
369 curve[0].segments_num,
370 REGAMMA_CNTLA_REGION_14_15,
371 REGAMMA_CNTLA_EXP_REGION14_NUM_SEGMENTS);
372
373 set_reg_field_value(
374 value,
375 curve[1].offset,
376 REGAMMA_CNTLA_REGION_14_15,
377 REGAMMA_CNTLA_EXP_REGION15_LUT_OFFSET);
378
379 set_reg_field_value(
380 value,
381 curve[1].segments_num,
382 REGAMMA_CNTLA_REGION_14_15,
383 REGAMMA_CNTLA_EXP_REGION15_NUM_SEGMENTS);
384
385 dm_write_reg(opp80->base.ctx,
386 DCP_REG(mmREGAMMA_CNTLA_REGION_14_15),
387 value);
388 }
389}
390
391static void program_pwl(
392 struct dce80_opp *opp80,
393 const struct pwl_params *params)
394{
395 uint32_t value;
396
397 {
398 uint8_t max_tries = 10;
399 uint8_t counter = 0;
400
401 /* Power on LUT memory */
402 value = dm_read_reg(opp80->base.ctx,
403 DCFE_REG(mmDCFE_MEM_LIGHT_SLEEP_CNTL));
404
405 set_reg_field_value(
406 value,
407 1,
408 DCFE_MEM_LIGHT_SLEEP_CNTL,
409 REGAMMA_LUT_LIGHT_SLEEP_DIS);
410
411 dm_write_reg(opp80->base.ctx,
412 DCFE_REG(mmDCFE_MEM_LIGHT_SLEEP_CNTL), value);
413
414 while (counter < max_tries) {
415 value =
416 dm_read_reg(
417 opp80->base.ctx,
418 DCFE_REG(mmDCFE_MEM_LIGHT_SLEEP_CNTL));
419
420 if (get_reg_field_value(
421 value,
422 DCFE_MEM_LIGHT_SLEEP_CNTL,
423 REGAMMA_LUT_MEM_PWR_STATE) == 0)
424 break;
425
426 ++counter;
427 }
428
429 if (counter == max_tries) {
430 dm_logger_write(opp80->base.ctx->logger, LOG_WARNING,
431 "%s: regamma lut was not powered on "
432 "in a timely manner,"
433 " programming still proceeds\n",
434 __func__);
435 }
436 }
437
438 value = 0;
439
440 set_reg_field_value(
441 value,
442 7,
443 REGAMMA_LUT_WRITE_EN_MASK,
444 REGAMMA_LUT_WRITE_EN_MASK);
445
446 dm_write_reg(opp80->base.ctx,
447 DCP_REG(mmREGAMMA_LUT_WRITE_EN_MASK), value);
448 dm_write_reg(opp80->base.ctx,
449 DCP_REG(mmREGAMMA_LUT_INDEX), 0);
450
451 /* Program REGAMMA_LUT_DATA */
452 {
453 const uint32_t addr = DCP_REG(mmREGAMMA_LUT_DATA);
454
455 uint32_t i = 0;
456
457 const struct pwl_result_data *rgb =
458 params->rgb_resulted;
459
460 while (i != params->hw_points_num) {
461 dm_write_reg(opp80->base.ctx, addr, rgb->red_reg);
462 dm_write_reg(opp80->base.ctx, addr, rgb->green_reg);
463 dm_write_reg(opp80->base.ctx, addr, rgb->blue_reg);
464
465 dm_write_reg(opp80->base.ctx, addr,
466 rgb->delta_red_reg);
467 dm_write_reg(opp80->base.ctx, addr,
468 rgb->delta_green_reg);
469 dm_write_reg(opp80->base.ctx, addr,
470 rgb->delta_blue_reg);
471
472 ++rgb;
473 ++i;
474 }
475 }
476
477 /* we are done with DCP LUT memory; re-enable low power mode */
478 value = dm_read_reg(opp80->base.ctx,
479 DCFE_REG(mmDCFE_MEM_LIGHT_SLEEP_CNTL));
480
481 set_reg_field_value(
482 value,
483 0,
484 DCFE_MEM_LIGHT_SLEEP_CNTL,
485 REGAMMA_LUT_LIGHT_SLEEP_DIS);
486
487 dm_write_reg(opp80->base.ctx, DCFE_REG(mmDCFE_MEM_LIGHT_SLEEP_CNTL),
488 value);
489}
490
491void dce80_opp_power_on_regamma_lut(
492 struct output_pixel_processor *opp,
493 bool power_on)
494{
495 struct dce80_opp *opp80 = TO_DCE80_OPP(opp);
496
497 uint32_t value =
498 dm_read_reg(opp->ctx, DCFE_REG(mmDCFE_MEM_LIGHT_SLEEP_CNTL));
499
500 set_reg_field_value(
501 value,
502 power_on,
503 DCFE_MEM_LIGHT_SLEEP_CNTL,
504 REGAMMA_LUT_LIGHT_SLEEP_DIS);
505
506 set_reg_field_value(
507 value,
508 power_on,
509 DCFE_MEM_LIGHT_SLEEP_CNTL,
510 DCP_LUT_LIGHT_SLEEP_DIS);
511
512 dm_write_reg(opp->ctx, DCFE_REG(mmDCFE_MEM_LIGHT_SLEEP_CNTL), value);
513}
514
515bool dce80_opp_program_regamma_pwl(
516 struct output_pixel_processor *opp,
517 const struct pwl_params *params)
518{
519
520 struct dce80_opp *opp80 = TO_DCE80_OPP(opp);
521
522 regamma_config_regions_and_segments(opp80, params);
523
524 program_pwl(opp80, params);
525
526 return true;
527}
528
529void dce80_opp_set_regamma_mode(struct output_pixel_processor *opp,
530 enum opp_regamma mode)
531{
532 struct dce80_opp *opp80 = TO_DCE80_OPP(opp);
533 uint32_t value = dm_read_reg(opp80->base.ctx,
534 DCP_REG(mmREGAMMA_CONTROL));
535
536 set_reg_field_value(
537 value,
538 mode,
539 REGAMMA_CONTROL,
540 GRPH_REGAMMA_MODE);
541
542 dm_write_reg(opp80->base.ctx, DCP_REG(mmREGAMMA_CONTROL), value);
543}
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
new file mode 100644
index 000000000000..06720407b6fc
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
@@ -0,0 +1,1063 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dce/dce_8_0_d.h"
27#include "dce/dce_8_0_sh_mask.h"
28
29#include "dm_services.h"
30
31#include "link_encoder.h"
32#include "stream_encoder.h"
33
34#include "resource.h"
35#include "include/irq_service_interface.h"
36#include "irq/dce80/irq_service_dce80.h"
37#include "dce110/dce110_timing_generator.h"
38#include "dce110/dce110_mem_input.h"
39#include "dce110/dce110_resource.h"
40#include "dce80/dce80_timing_generator.h"
41#include "dce/dce_link_encoder.h"
42#include "dce/dce_stream_encoder.h"
43#include "dce80/dce80_mem_input.h"
44#include "dce80/dce80_ipp.h"
45#include "dce/dce_transform.h"
46#include "dce80/dce80_opp.h"
47#include "dce110/dce110_ipp.h"
48#include "dce/dce_clock_source.h"
49#include "dce/dce_audio.h"
50#include "dce/dce_hwseq.h"
51#include "dce80/dce80_hw_sequencer.h"
52
53#include "reg_helper.h"
54
55/* TODO remove this include */
56
57#ifndef mmMC_HUB_RDREQ_DMIF_LIMIT
58#include "gmc/gmc_7_1_d.h"
59#include "gmc/gmc_7_1_sh_mask.h"
60#endif
61
62#ifndef mmDP_DPHY_INTERNAL_CTRL
63#define mmDP_DPHY_INTERNAL_CTRL 0x1CDE
64#define mmDP0_DP_DPHY_INTERNAL_CTRL 0x1CDE
65#define mmDP1_DP_DPHY_INTERNAL_CTRL 0x1FDE
66#define mmDP2_DP_DPHY_INTERNAL_CTRL 0x42DE
67#define mmDP3_DP_DPHY_INTERNAL_CTRL 0x45DE
68#define mmDP4_DP_DPHY_INTERNAL_CTRL 0x48DE
69#define mmDP5_DP_DPHY_INTERNAL_CTRL 0x4BDE
70#define mmDP6_DP_DPHY_INTERNAL_CTRL 0x4EDE
71#endif
72
73
74#ifndef mmBIOS_SCRATCH_2
75 #define mmBIOS_SCRATCH_2 0x05CB
76 #define mmBIOS_SCRATCH_6 0x05CF
77#endif
78
79#ifndef mmDP_DPHY_FAST_TRAINING
80 #define mmDP_DPHY_FAST_TRAINING 0x1CCE
81 #define mmDP0_DP_DPHY_FAST_TRAINING 0x1CCE
82 #define mmDP1_DP_DPHY_FAST_TRAINING 0x1FCE
83 #define mmDP2_DP_DPHY_FAST_TRAINING 0x42CE
84 #define mmDP3_DP_DPHY_FAST_TRAINING 0x45CE
85 #define mmDP4_DP_DPHY_FAST_TRAINING 0x48CE
86 #define mmDP5_DP_DPHY_FAST_TRAINING 0x4BCE
87 #define mmDP6_DP_DPHY_FAST_TRAINING 0x4ECE
88#endif
89
90
91#ifndef mmHPD_DC_HPD_CONTROL
92 #define mmHPD_DC_HPD_CONTROL 0x189A
93 #define mmHPD0_DC_HPD_CONTROL 0x189A
94 #define mmHPD1_DC_HPD_CONTROL 0x18A2
95 #define mmHPD2_DC_HPD_CONTROL 0x18AA
96 #define mmHPD3_DC_HPD_CONTROL 0x18B2
97 #define mmHPD4_DC_HPD_CONTROL 0x18BA
98 #define mmHPD5_DC_HPD_CONTROL 0x18C2
99#endif
100
101#define DCE11_DIG_FE_CNTL 0x4a00
102#define DCE11_DIG_BE_CNTL 0x4a47
103#define DCE11_DP_SEC 0x4ac3
104
105static const struct dce110_timing_generator_offsets dce80_tg_offsets[] = {
106 {
107 .crtc = (mmCRTC0_CRTC_CONTROL - mmCRTC_CONTROL),
108 .dcp = (mmGRPH_CONTROL - mmGRPH_CONTROL),
109 .dmif = (mmDMIF_PG0_DPG_WATERMARK_MASK_CONTROL
110 - mmDPG_WATERMARK_MASK_CONTROL),
111 },
112 {
113 .crtc = (mmCRTC1_CRTC_CONTROL - mmCRTC_CONTROL),
114 .dcp = (mmDCP1_GRPH_CONTROL - mmGRPH_CONTROL),
115 .dmif = (mmDMIF_PG1_DPG_WATERMARK_MASK_CONTROL
116 - mmDPG_WATERMARK_MASK_CONTROL),
117 },
118 {
119 .crtc = (mmCRTC2_CRTC_CONTROL - mmCRTC_CONTROL),
120 .dcp = (mmDCP2_GRPH_CONTROL - mmGRPH_CONTROL),
121 .dmif = (mmDMIF_PG2_DPG_WATERMARK_MASK_CONTROL
122 - mmDPG_WATERMARK_MASK_CONTROL),
123 },
124 {
125 .crtc = (mmCRTC3_CRTC_CONTROL - mmCRTC_CONTROL),
126 .dcp = (mmDCP3_GRPH_CONTROL - mmGRPH_CONTROL),
127 .dmif = (mmDMIF_PG3_DPG_WATERMARK_MASK_CONTROL
128 - mmDPG_WATERMARK_MASK_CONTROL),
129 },
130 {
131 .crtc = (mmCRTC4_CRTC_CONTROL - mmCRTC_CONTROL),
132 .dcp = (mmDCP4_GRPH_CONTROL - mmGRPH_CONTROL),
133 .dmif = (mmDMIF_PG4_DPG_WATERMARK_MASK_CONTROL
134 - mmDPG_WATERMARK_MASK_CONTROL),
135 },
136 {
137 .crtc = (mmCRTC5_CRTC_CONTROL - mmCRTC_CONTROL),
138 .dcp = (mmDCP5_GRPH_CONTROL - mmGRPH_CONTROL),
139 .dmif = (mmDMIF_PG5_DPG_WATERMARK_MASK_CONTROL
140 - mmDPG_WATERMARK_MASK_CONTROL),
141 }
142};
143
144static const struct dce110_mem_input_reg_offsets dce80_mi_reg_offsets[] = {
145 {
146 .dcp = (mmGRPH_CONTROL - mmGRPH_CONTROL),
147 .dmif = (mmDMIF_PG0_DPG_WATERMARK_MASK_CONTROL
148 - mmDPG_WATERMARK_MASK_CONTROL),
149 .pipe = (mmPIPE0_DMIF_BUFFER_CONTROL
150 - mmPIPE0_DMIF_BUFFER_CONTROL),
151 },
152 {
153 .dcp = (mmDCP1_GRPH_CONTROL - mmGRPH_CONTROL),
154 .dmif = (mmDMIF_PG1_DPG_WATERMARK_MASK_CONTROL
155 - mmDPG_WATERMARK_MASK_CONTROL),
156 .pipe = (mmPIPE1_DMIF_BUFFER_CONTROL
157 - mmPIPE0_DMIF_BUFFER_CONTROL),
158 },
159 {
160 .dcp = (mmDCP2_GRPH_CONTROL - mmGRPH_CONTROL),
161 .dmif = (mmDMIF_PG2_DPG_WATERMARK_MASK_CONTROL
162 - mmDPG_WATERMARK_MASK_CONTROL),
163 .pipe = (mmPIPE2_DMIF_BUFFER_CONTROL
164 - mmPIPE0_DMIF_BUFFER_CONTROL),
165 },
166 {
167 .dcp = (mmDCP3_GRPH_CONTROL - mmGRPH_CONTROL),
168 .dmif = (mmDMIF_PG3_DPG_WATERMARK_MASK_CONTROL
169 - mmDPG_WATERMARK_MASK_CONTROL),
170 .pipe = (mmPIPE3_DMIF_BUFFER_CONTROL
171 - mmPIPE0_DMIF_BUFFER_CONTROL),
172 },
173 {
174 .dcp = (mmDCP4_GRPH_CONTROL - mmGRPH_CONTROL),
175 .dmif = (mmDMIF_PG4_DPG_WATERMARK_MASK_CONTROL
176 - mmDPG_WATERMARK_MASK_CONTROL),
177 .pipe = (mmPIPE4_DMIF_BUFFER_CONTROL
178 - mmPIPE0_DMIF_BUFFER_CONTROL),
179 },
180 {
181 .dcp = (mmDCP5_GRPH_CONTROL - mmGRPH_CONTROL),
182 .dmif = (mmDMIF_PG5_DPG_WATERMARK_MASK_CONTROL
183 - mmDPG_WATERMARK_MASK_CONTROL),
184 .pipe = (mmPIPE5_DMIF_BUFFER_CONTROL
185 - mmPIPE0_DMIF_BUFFER_CONTROL),
186 }
187};
188
189static const struct dce110_ipp_reg_offsets ipp_reg_offsets[] = {
190{
191 .dcp_offset = (mmDCP0_CUR_CONTROL - mmDCP0_CUR_CONTROL),
192},
193{
194 .dcp_offset = (mmDCP1_CUR_CONTROL - mmDCP0_CUR_CONTROL),
195},
196{
197 .dcp_offset = (mmDCP2_CUR_CONTROL - mmDCP0_CUR_CONTROL),
198},
199{
200 .dcp_offset = (mmDCP3_CUR_CONTROL - mmDCP0_CUR_CONTROL),
201},
202{
203 .dcp_offset = (mmDCP4_CUR_CONTROL - mmDCP0_CUR_CONTROL),
204},
205{
206 .dcp_offset = (mmDCP5_CUR_CONTROL - mmDCP0_CUR_CONTROL),
207}
208};
209
210/* set register offset */
211#define SR(reg_name)\
212 .reg_name = mm ## reg_name
213
214/* set register offset with instance */
215#define SRI(reg_name, block, id)\
216 .reg_name = mm ## block ## id ## _ ## reg_name
217
218#define transform_regs(id)\
219[id] = {\
220 XFM_COMMON_REG_LIST_DCE_BASE(id)\
221}
222
223static const struct dce_transform_registers xfm_regs[] = {
224 transform_regs(0),
225 transform_regs(1),
226 transform_regs(2),
227 transform_regs(3),
228 transform_regs(4),
229 transform_regs(5)
230};
231
232static const struct dce_transform_shift xfm_shift = {
233 XFM_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
234};
235
236static const struct dce_transform_mask xfm_mask = {
237 XFM_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
238};
239
240#define aux_regs(id)\
241[id] = {\
242 AUX_REG_LIST(id)\
243}
244
245static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = {
246 aux_regs(0),
247 aux_regs(1),
248 aux_regs(2),
249 aux_regs(3),
250 aux_regs(4),
251 aux_regs(5)
252};
253
254#define hpd_regs(id)\
255[id] = {\
256 HPD_REG_LIST(id)\
257}
258
259static const struct dce110_link_enc_hpd_registers link_enc_hpd_regs[] = {
260 hpd_regs(0),
261 hpd_regs(1),
262 hpd_regs(2),
263 hpd_regs(3),
264 hpd_regs(4),
265 hpd_regs(5)
266};
267
268#define link_regs(id)\
269[id] = {\
270 LE_DCE80_REG_LIST(id)\
271}
272
273static const struct dce110_link_enc_registers link_enc_regs[] = {
274 link_regs(0),
275 link_regs(1),
276 link_regs(2),
277 link_regs(3),
278 link_regs(4),
279 link_regs(5),
280 link_regs(6),
281};
282
283#define stream_enc_regs(id)\
284[id] = {\
285 SE_COMMON_REG_LIST_DCE_BASE(id),\
286 .AFMT_CNTL = 0,\
287}
288
289static const struct dce110_stream_enc_registers stream_enc_regs[] = {
290 stream_enc_regs(0),
291 stream_enc_regs(1),
292 stream_enc_regs(2),
293 stream_enc_regs(3),
294 stream_enc_regs(4),
295 stream_enc_regs(5)
296};
297
298static const struct dce_stream_encoder_shift se_shift = {
299 SE_COMMON_MASK_SH_LIST_DCE80_100(__SHIFT)
300};
301
302static const struct dce_stream_encoder_mask se_mask = {
303 SE_COMMON_MASK_SH_LIST_DCE80_100(_MASK)
304};
305
306#define audio_regs(id)\
307[id] = {\
308 AUD_COMMON_REG_LIST(id)\
309}
310
311static const struct dce_audio_registers audio_regs[] = {
312 audio_regs(0),
313 audio_regs(1),
314 audio_regs(2),
315 audio_regs(3),
316 audio_regs(4),
317 audio_regs(5),
318 audio_regs(6),
319};
320
321static const struct dce_audio_shift audio_shift = {
322 AUD_COMMON_MASK_SH_LIST(__SHIFT)
323};
324
325static const struct dce_aduio_mask audio_mask = {
326 AUD_COMMON_MASK_SH_LIST(_MASK)
327};
328
329#define clk_src_regs(id)\
330[id] = {\
331 CS_COMMON_REG_LIST_DCE_80(id),\
332}
333
334
335static const struct dce110_clk_src_regs clk_src_regs[] = {
336 clk_src_regs(0),
337 clk_src_regs(1),
338 clk_src_regs(2)
339};
340
341static const struct dce110_clk_src_shift cs_shift = {
342 CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
343};
344
345static const struct dce110_clk_src_mask cs_mask = {
346 CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
347};
348
349static const struct bios_registers bios_regs = {
350 .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6
351};
352
353static const struct resource_caps res_cap = {
354 .num_timing_generator = 6,
355 .num_audio = 6,
356 .num_stream_encoder = 6,
357 .num_pll = 3,
358};
359
360#define CTX ctx
361#define REG(reg) mm ## reg
362
363#ifndef mmCC_DC_HDMI_STRAPS
364#define mmCC_DC_HDMI_STRAPS 0x1918
365#define CC_DC_HDMI_STRAPS__HDMI_DISABLE_MASK 0x40
366#define CC_DC_HDMI_STRAPS__HDMI_DISABLE__SHIFT 0x6
367#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER_MASK 0x700
368#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8
369#endif
370
371static void read_dce_straps(
372 struct dc_context *ctx,
373 struct resource_straps *straps)
374{
375 REG_GET_2(CC_DC_HDMI_STRAPS,
376 HDMI_DISABLE, &straps->hdmi_disable,
377 AUDIO_STREAM_NUMBER, &straps->audio_stream_number);
378
379 REG_GET(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO, &straps->dc_pinstraps_audio);
380}
381
382static struct audio *create_audio(
383 struct dc_context *ctx, unsigned int inst)
384{
385 return dce_audio_create(ctx, inst,
386 &audio_regs[inst], &audio_shift, &audio_mask);
387}
388
389static struct timing_generator *dce80_timing_generator_create(
390 struct dc_context *ctx,
391 uint32_t instance,
392 const struct dce110_timing_generator_offsets *offsets)
393{
394 struct dce110_timing_generator *tg110 =
395 dm_alloc(sizeof(struct dce110_timing_generator));
396
397 if (!tg110)
398 return NULL;
399
400 if (dce80_timing_generator_construct(tg110, ctx, instance, offsets))
401 return &tg110->base;
402
403 BREAK_TO_DEBUGGER();
404 dm_free(tg110);
405 return NULL;
406}
407
408static struct stream_encoder *dce80_stream_encoder_create(
409 enum engine_id eng_id,
410 struct dc_context *ctx)
411{
412 struct dce110_stream_encoder *enc110 =
413 dm_alloc(sizeof(struct dce110_stream_encoder));
414
415 if (!enc110)
416 return NULL;
417
418 if (dce110_stream_encoder_construct(
419 enc110, ctx, ctx->dc_bios, eng_id,
420 &stream_enc_regs[eng_id], &se_shift, &se_mask))
421 return &enc110->base;
422
423 BREAK_TO_DEBUGGER();
424 dm_free(enc110);
425 return NULL;
426}
427
428#define SRII(reg_name, block, id)\
429 .reg_name[id] = mm ## block ## id ## _ ## reg_name
430
431static const struct dce_hwseq_registers hwseq_reg = {
432 HWSEQ_DCE8_REG_LIST()
433};
434
435static const struct dce_hwseq_shift hwseq_shift = {
436 HWSEQ_DCE8_MASK_SH_LIST(__SHIFT)
437};
438
439static const struct dce_hwseq_mask hwseq_mask = {
440 HWSEQ_DCE8_MASK_SH_LIST(_MASK)
441};
442
443static struct dce_hwseq *dce80_hwseq_create(
444 struct dc_context *ctx)
445{
446 struct dce_hwseq *hws = dm_alloc(sizeof(struct dce_hwseq));
447
448 if (hws) {
449 hws->ctx = ctx;
450 hws->regs = &hwseq_reg;
451 hws->shifts = &hwseq_shift;
452 hws->masks = &hwseq_mask;
453 }
454 return hws;
455}
456
457static const struct resource_create_funcs res_create_funcs = {
458 .read_dce_straps = read_dce_straps,
459 .create_audio = create_audio,
460 .create_stream_encoder = dce80_stream_encoder_create,
461 .create_hwseq = dce80_hwseq_create,
462};
463
464#define mi_inst_regs(id) { \
465 MI_REG_LIST(id), \
466 .MC_HUB_RDREQ_DMIF_LIMIT = mmMC_HUB_RDREQ_DMIF_LIMIT \
467}
468static const struct dce_mem_input_registers mi_regs[] = {
469 mi_inst_regs(0),
470 mi_inst_regs(1),
471 mi_inst_regs(2),
472 mi_inst_regs(3),
473 mi_inst_regs(4),
474 mi_inst_regs(5),
475};
476
477static const struct dce_mem_input_shift mi_shifts = {
478 MI_DCE_MASK_SH_LIST(__SHIFT),
479 .ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE__SHIFT
480};
481
482static const struct dce_mem_input_mask mi_masks = {
483 MI_DCE_MASK_SH_LIST(_MASK),
484 .ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE_MASK
485};
486
487static struct mem_input *dce80_mem_input_create(
488 struct dc_context *ctx,
489 uint32_t inst,
490 const struct dce110_mem_input_reg_offsets *offsets)
491{
492 struct dce110_mem_input *mem_input80 =
493 dm_alloc(sizeof(struct dce110_mem_input));
494
495 if (!mem_input80)
496 return NULL;
497
498 if (dce80_mem_input_construct(mem_input80, ctx, inst, offsets)) {
499 struct mem_input *mi = &mem_input80->base;
500
501 mi->regs = &mi_regs[inst];
502 mi->shifts = &mi_shifts;
503 mi->masks = &mi_masks;
504 mi->wa.single_head_rdreq_dmif_limit = 2;
505 return mi;
506 }
507
508 BREAK_TO_DEBUGGER();
509 dm_free(mem_input80);
510 return NULL;
511}
512
513static void dce80_transform_destroy(struct transform **xfm)
514{
515 dm_free(TO_DCE_TRANSFORM(*xfm));
516 *xfm = NULL;
517}
518
519static struct transform *dce80_transform_create(
520 struct dc_context *ctx,
521 uint32_t inst)
522{
523 struct dce_transform *transform =
524 dm_alloc(sizeof(struct dce_transform));
525
526 if (!transform)
527 return NULL;
528
529 if (dce_transform_construct(transform, ctx, inst,
530 &xfm_regs[inst], &xfm_shift, &xfm_mask)) {
531 transform->prescaler_on = false;
532 return &transform->base;
533 }
534
535 BREAK_TO_DEBUGGER();
536 dm_free(transform);
537 return NULL;
538}
539
540static struct input_pixel_processor *dce80_ipp_create(
541 struct dc_context *ctx,
542 uint32_t inst,
543 const struct dce110_ipp_reg_offsets *offset)
544{
545 struct dce110_ipp *ipp =
546 dm_alloc(sizeof(struct dce110_ipp));
547
548 if (!ipp)
549 return NULL;
550
551 if (dce80_ipp_construct(ipp, ctx, inst, offset))
552 return &ipp->base;
553
554 BREAK_TO_DEBUGGER();
555 dm_free(ipp);
556 return NULL;
557}
558
559struct link_encoder *dce80_link_encoder_create(
560 const struct encoder_init_data *enc_init_data)
561{
562 struct dce110_link_encoder *enc110 =
563 dm_alloc(sizeof(struct dce110_link_encoder));
564
565 if (!enc110)
566 return NULL;
567
568 if (dce110_link_encoder_construct(
569 enc110,
570 enc_init_data,
571 &link_enc_regs[enc_init_data->transmitter],
572 &link_enc_aux_regs[enc_init_data->channel - 1],
573 &link_enc_hpd_regs[enc_init_data->hpd_source])) {
574
575 enc110->base.features.ycbcr420_supported = false;
576 enc110->base.features.max_hdmi_pixel_clock = 297000;
577 return &enc110->base;
578 }
579
580 BREAK_TO_DEBUGGER();
581 dm_free(enc110);
582 return NULL;
583}
584
585struct clock_source *dce80_clock_source_create(
586 struct dc_context *ctx,
587 struct dc_bios *bios,
588 enum clock_source_id id,
589 const struct dce110_clk_src_regs *regs,
590 bool dp_clk_src)
591{
592 struct dce110_clk_src *clk_src =
593 dm_alloc(sizeof(struct dce110_clk_src));
594
595 if (!clk_src)
596 return NULL;
597
598 if (dce110_clk_src_construct(clk_src, ctx, bios, id,
599 regs, &cs_shift, &cs_mask)) {
600 clk_src->base.dp_clk_src = dp_clk_src;
601 return &clk_src->base;
602 }
603
604 BREAK_TO_DEBUGGER();
605 return NULL;
606}
607
608void dce80_clock_source_destroy(struct clock_source **clk_src)
609{
610 dm_free(TO_DCE110_CLK_SRC(*clk_src));
611 *clk_src = NULL;
612}
613
614static void destruct(struct dce110_resource_pool *pool)
615{
616 unsigned int i;
617
618 for (i = 0; i < pool->base.pipe_count; i++) {
619 if (pool->base.opps[i] != NULL)
620 dce80_opp_destroy(&pool->base.opps[i]);
621
622 if (pool->base.transforms[i] != NULL)
623 dce80_transform_destroy(&pool->base.transforms[i]);
624
625 if (pool->base.ipps[i] != NULL)
626 dce80_ipp_destroy(&pool->base.ipps[i]);
627
628 if (pool->base.mis[i] != NULL) {
629 dm_free(TO_DCE110_MEM_INPUT(pool->base.mis[i]));
630 pool->base.mis[i] = NULL;
631 }
632
633 if (pool->base.timing_generators[i] != NULL) {
634 dm_free(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
635 pool->base.timing_generators[i] = NULL;
636 }
637 }
638
639 for (i = 0; i < pool->base.stream_enc_count; i++) {
640 if (pool->base.stream_enc[i] != NULL)
641 dm_free(DCE110STRENC_FROM_STRENC(pool->base.stream_enc[i]));
642 }
643
644 for (i = 0; i < pool->base.clk_src_count; i++) {
645 if (pool->base.clock_sources[i] != NULL) {
646 dce80_clock_source_destroy(&pool->base.clock_sources[i]);
647 }
648 }
649
650 if (pool->base.dp_clock_source != NULL)
651 dce80_clock_source_destroy(&pool->base.dp_clock_source);
652
653 for (i = 0; i < pool->base.audio_count; i++) {
654 if (pool->base.audios[i] != NULL) {
655 dce_aud_destroy(&pool->base.audios[i]);
656 }
657 }
658
659 if (pool->base.display_clock != NULL) {
660 dal_display_clock_destroy(&pool->base.display_clock);
661 }
662
663 if (pool->base.irqs != NULL) {
664 dal_irq_service_destroy(&pool->base.irqs);
665 }
666}
667
668static enum dc_status validate_mapped_resource(
669 const struct core_dc *dc,
670 struct validate_context *context)
671{
672 enum dc_status status = DC_OK;
673 uint8_t i, j, k;
674
675 for (i = 0; i < context->target_count; i++) {
676 struct core_target *target = context->targets[i];
677
678 for (j = 0; j < target->public.stream_count; j++) {
679 struct core_stream *stream =
680 DC_STREAM_TO_CORE(target->public.streams[j]);
681 struct core_link *link = stream->sink->link;
682
683 if (resource_is_stream_unchanged(dc->current_context, stream))
684 continue;
685
686 for (k = 0; k < MAX_PIPES; k++) {
687 struct pipe_ctx *pipe_ctx =
688 &context->res_ctx.pipe_ctx[k];
689
690 if (context->res_ctx.pipe_ctx[k].stream != stream)
691 continue;
692
693 if (!pipe_ctx->tg->funcs->validate_timing(
694 pipe_ctx->tg, &stream->public.timing))
695 return DC_FAIL_CONTROLLER_VALIDATE;
696
697 status = dce110_resource_build_pipe_hw_param(pipe_ctx);
698
699 if (status != DC_OK)
700 return status;
701
702 if (!link->link_enc->funcs->validate_output_with_stream(
703 link->link_enc,
704 pipe_ctx))
705 return DC_FAIL_ENC_VALIDATE;
706
707 /* TODO: validate audio ASIC caps, encoder */
708
709 status = dc_link_validate_mode_timing(stream,
710 link,
711 &stream->public.timing);
712
713 if (status != DC_OK)
714 return status;
715
716 resource_build_info_frame(pipe_ctx);
717
718 /* do not need to validate non root pipes */
719 break;
720 }
721 }
722 }
723
724 return DC_OK;
725}
726
727enum dc_status dce80_validate_bandwidth(
728 const struct core_dc *dc,
729 struct validate_context *context)
730{
731 /* TODO implement when needed but for now hardcode max value*/
732 context->bw_results.dispclk_khz = 681000;
733 context->bw_results.required_yclk = 250000 * MEMORY_TYPE_MULTIPLIER;
734
735 return DC_OK;
736}
737
738static bool dce80_validate_surface_sets(
739 const struct dc_validation_set set[],
740 int set_count)
741{
742 int i;
743
744 for (i = 0; i < set_count; i++) {
745 if (set[i].surface_count == 0)
746 continue;
747
748 if (set[i].surface_count > 1)
749 return false;
750
751 if (set[i].surfaces[0]->clip_rect.width
752 != set[i].target->streams[0]->src.width
753 || set[i].surfaces[0]->clip_rect.height
754 != set[i].target->streams[0]->src.height)
755 return false;
756 if (set[i].surfaces[0]->format
757 >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
758 return false;
759 }
760
761 return true;
762}
763
764enum dc_status dce80_validate_with_context(
765 const struct core_dc *dc,
766 const struct dc_validation_set set[],
767 int set_count,
768 struct validate_context *context)
769{
770 struct dc_context *dc_ctx = dc->ctx;
771 enum dc_status result = DC_ERROR_UNEXPECTED;
772 int i;
773
774 if (!dce80_validate_surface_sets(set, set_count))
775 return DC_FAIL_SURFACE_VALIDATE;
776
777 context->res_ctx.pool = dc->res_pool;
778
779 for (i = 0; i < set_count; i++) {
780 context->targets[i] = DC_TARGET_TO_CORE(set[i].target);
781 dc_target_retain(&context->targets[i]->public);
782 context->target_count++;
783 }
784
785 result = resource_map_pool_resources(dc, context);
786
787 if (result == DC_OK)
788 result = resource_map_clock_resources(dc, context);
789
790 if (!resource_validate_attach_surfaces(
791 set, set_count, dc->current_context, context)) {
792 DC_ERROR("Failed to attach surface to target!\n");
793 return DC_FAIL_ATTACH_SURFACES;
794 }
795
796 if (result == DC_OK)
797 result = validate_mapped_resource(dc, context);
798
799 if (result == DC_OK)
800 result = resource_build_scaling_params_for_context(dc, context);
801
802 if (result == DC_OK)
803 result = dce80_validate_bandwidth(dc, context);
804
805 return result;
806}
807
808enum dc_status dce80_validate_guaranteed(
809 const struct core_dc *dc,
810 const struct dc_target *dc_target,
811 struct validate_context *context)
812{
813 enum dc_status result = DC_ERROR_UNEXPECTED;
814
815 context->res_ctx.pool = dc->res_pool;
816
817 context->targets[0] = DC_TARGET_TO_CORE(dc_target);
818 dc_target_retain(&context->targets[0]->public);
819 context->target_count++;
820
821 result = resource_map_pool_resources(dc, context);
822
823 if (result == DC_OK)
824 result = resource_map_clock_resources(dc, context);
825
826 if (result == DC_OK)
827 result = validate_mapped_resource(dc, context);
828
829 if (result == DC_OK) {
830 validate_guaranteed_copy_target(
831 context, dc->public.caps.max_targets);
832 result = resource_build_scaling_params_for_context(dc, context);
833 }
834
835 if (result == DC_OK)
836 result = dce80_validate_bandwidth(dc, context);
837
838 return result;
839}
840
841static void dce80_destroy_resource_pool(struct resource_pool **pool)
842{
843 struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool);
844
845 destruct(dce110_pool);
846 dm_free(dce110_pool);
847 *pool = NULL;
848}
849
850static const struct resource_funcs dce80_res_pool_funcs = {
851 .destroy = dce80_destroy_resource_pool,
852 .link_enc_create = dce80_link_encoder_create,
853 .validate_with_context = dce80_validate_with_context,
854 .validate_guaranteed = dce80_validate_guaranteed,
855 .validate_bandwidth = dce80_validate_bandwidth
856};
857
858static enum clocks_state dce80_resource_convert_clock_state_pp_to_dc(
859 enum dm_pp_clocks_state pp_clock_state)
860{
861 enum clocks_state dc_clocks_state = CLOCKS_STATE_INVALID;
862
863 switch (pp_clock_state) {
864 case DM_PP_CLOCKS_STATE_INVALID:
865 dc_clocks_state = CLOCKS_STATE_INVALID;
866 break;
867 case DM_PP_CLOCKS_STATE_ULTRA_LOW:
868 dc_clocks_state = CLOCKS_STATE_ULTRA_LOW;
869 break;
870 case DM_PP_CLOCKS_STATE_LOW:
871 dc_clocks_state = CLOCKS_STATE_LOW;
872 break;
873 case DM_PP_CLOCKS_STATE_NOMINAL:
874 dc_clocks_state = CLOCKS_STATE_NOMINAL;
875 break;
876 case DM_PP_CLOCKS_STATE_PERFORMANCE:
877 dc_clocks_state = CLOCKS_STATE_PERFORMANCE;
878 break;
879 case DM_PP_CLOCKS_DPM_STATE_LEVEL_4:
880 dc_clocks_state = CLOCKS_DPM_STATE_LEVEL_4;
881 break;
882 case DM_PP_CLOCKS_DPM_STATE_LEVEL_5:
883 dc_clocks_state = CLOCKS_DPM_STATE_LEVEL_5;
884 break;
885 case DM_PP_CLOCKS_DPM_STATE_LEVEL_6:
886 dc_clocks_state = CLOCKS_DPM_STATE_LEVEL_6;
887 break;
888 case DM_PP_CLOCKS_DPM_STATE_LEVEL_7:
889 dc_clocks_state = CLOCKS_DPM_STATE_LEVEL_7;
890 break;
891 default:
892 dc_clocks_state = CLOCKS_STATE_INVALID;
893 break;
894 }
895
896 return dc_clocks_state;
897}
898
899static bool construct(
900 uint8_t num_virtual_links,
901 struct core_dc *dc,
902 struct dce110_resource_pool *pool)
903{
904 unsigned int i;
905 struct dc_context *ctx = dc->ctx;
906 struct firmware_info info;
907 struct dc_bios *bp;
908 struct dm_pp_static_clock_info static_clk_info = {0};
909
910 ctx->dc_bios->regs = &bios_regs;
911
912 pool->base.res_cap = &res_cap;
913 pool->base.funcs = &dce80_res_pool_funcs;
914
915
916 /*************************************************
917 * Resource + asic cap harcoding *
918 *************************************************/
919 pool->base.underlay_pipe_index = -1;
920 pool->base.pipe_count = res_cap.num_timing_generator;
921 dc->public.caps.max_downscale_ratio = 200;
922 dc->public.caps.i2c_speed_in_khz = 40;
923
924 /*************************************************
925 * Create resources *
926 *************************************************/
927
928 bp = ctx->dc_bios;
929
930 if ((bp->funcs->get_firmware_info(bp, &info) == BP_RESULT_OK) &&
931 info.external_clock_source_frequency_for_dp != 0) {
932 pool->base.dp_clock_source =
933 dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true);
934
935 pool->base.clock_sources[0] =
936 dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], false);
937 pool->base.clock_sources[1] =
938 dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false);
939 pool->base.clock_sources[2] =
940 dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false);
941 pool->base.clk_src_count = 3;
942
943 } else {
944 pool->base.dp_clock_source =
945 dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], true);
946
947 pool->base.clock_sources[0] =
948 dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false);
949 pool->base.clock_sources[1] =
950 dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false);
951 pool->base.clk_src_count = 2;
952 }
953
954 if (pool->base.dp_clock_source == NULL) {
955 dm_error("DC: failed to create dp clock source!\n");
956 BREAK_TO_DEBUGGER();
957 goto res_create_fail;
958 }
959
960 for (i = 0; i < pool->base.clk_src_count; i++) {
961 if (pool->base.clock_sources[i] == NULL) {
962 dm_error("DC: failed to create clock sources!\n");
963 BREAK_TO_DEBUGGER();
964 goto res_create_fail;
965 }
966 }
967
968 pool->base.display_clock = dal_display_clock_dce80_create(ctx);
969 if (pool->base.display_clock == NULL) {
970 dm_error("DC: failed to create display clock!\n");
971 BREAK_TO_DEBUGGER();
972 goto res_create_fail;
973 }
974
975
976 if (dm_pp_get_static_clocks(ctx, &static_clk_info)) {
977 enum clocks_state max_clocks_state =
978 dce80_resource_convert_clock_state_pp_to_dc(
979 static_clk_info.max_clocks_state);
980
981 dal_display_clock_store_max_clocks_state(
982 pool->base.display_clock, max_clocks_state);
983 }
984
985 {
986 struct irq_service_init_data init_data;
987 init_data.ctx = dc->ctx;
988 pool->base.irqs = dal_irq_service_dce80_create(&init_data);
989 if (!pool->base.irqs)
990 goto res_create_fail;
991 }
992
993 for (i = 0; i < pool->base.pipe_count; i++) {
994 pool->base.timing_generators[i] = dce80_timing_generator_create(
995 ctx, i, &dce80_tg_offsets[i]);
996 if (pool->base.timing_generators[i] == NULL) {
997 BREAK_TO_DEBUGGER();
998 dm_error("DC: failed to create tg!\n");
999 goto res_create_fail;
1000 }
1001
1002 pool->base.mis[i] = dce80_mem_input_create(ctx, i,
1003 &dce80_mi_reg_offsets[i]);
1004 if (pool->base.mis[i] == NULL) {
1005 BREAK_TO_DEBUGGER();
1006 dm_error("DC: failed to create memory input!\n");
1007 goto res_create_fail;
1008 }
1009
1010 pool->base.ipps[i] = dce80_ipp_create(ctx, i, &ipp_reg_offsets[i]);
1011 if (pool->base.ipps[i] == NULL) {
1012 BREAK_TO_DEBUGGER();
1013 dm_error("DC: failed to create input pixel processor!\n");
1014 goto res_create_fail;
1015 }
1016
1017 pool->base.transforms[i] = dce80_transform_create(ctx, i);
1018 if (pool->base.transforms[i] == NULL) {
1019 BREAK_TO_DEBUGGER();
1020 dm_error("DC: failed to create transform!\n");
1021 goto res_create_fail;
1022 }
1023
1024 pool->base.opps[i] = dce80_opp_create(ctx, i);
1025 if (pool->base.opps[i] == NULL) {
1026 BREAK_TO_DEBUGGER();
1027 dm_error("DC: failed to create output pixel processor!\n");
1028 goto res_create_fail;
1029 }
1030 }
1031
1032 if (!resource_construct(num_virtual_links, dc, &pool->base,
1033 &res_create_funcs))
1034 goto res_create_fail;
1035
1036 /* Create hardware sequencer */
1037 if (!dce80_hw_sequencer_construct(dc))
1038 goto res_create_fail;
1039
1040 return true;
1041
1042res_create_fail:
1043 destruct(pool);
1044 return false;
1045}
1046
1047struct resource_pool *dce80_create_resource_pool(
1048 uint8_t num_virtual_links,
1049 struct core_dc *dc)
1050{
1051 struct dce110_resource_pool *pool =
1052 dm_alloc(sizeof(struct dce110_resource_pool));
1053
1054 if (!pool)
1055 return NULL;
1056
1057 if (construct(num_virtual_links, dc, pool))
1058 return &pool->base;
1059
1060 BREAK_TO_DEBUGGER();
1061 return NULL;
1062}
1063
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.h b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.h
new file mode 100644
index 000000000000..2a0cdccddeaf
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.h
@@ -0,0 +1,39 @@
1/*
2* Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DC_RESOURCE_DCE80_H__
27#define __DC_RESOURCE_DCE80_H__
28
29#include "core_types.h"
30
31struct core_dc;
32struct resource_pool;
33
34struct resource_pool *dce80_create_resource_pool(
35 uint8_t num_virtual_links,
36 struct core_dc *dc);
37
38#endif /* __DC_RESOURCE_DCE80_H__ */
39
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c
new file mode 100644
index 000000000000..e8fae0a7eeb4
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c
@@ -0,0 +1,241 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28/* include DCE8 register header files */
29#include "dce/dce_8_0_d.h"
30#include "dce/dce_8_0_sh_mask.h"
31
32#include "dc_types.h"
33
34#include "include/grph_object_id.h"
35#include "include/logger_interface.h"
36#include "../dce110/dce110_timing_generator.h"
37#include "dce80_timing_generator.h"
38
39#include "timing_generator.h"
40
41enum black_color_format {
42 BLACK_COLOR_FORMAT_RGB_FULLRANGE = 0, /* used as index in array */
43 BLACK_COLOR_FORMAT_RGB_LIMITED,
44 BLACK_COLOR_FORMAT_YUV_TV,
45 BLACK_COLOR_FORMAT_YUV_CV,
46 BLACK_COLOR_FORMAT_YUV_SUPER_AA,
47
48 BLACK_COLOR_FORMAT_COUNT
49};
50
51static const struct dce110_timing_generator_offsets reg_offsets[] = {
52{
53 .crtc = (mmCRTC0_DCFE_MEM_LIGHT_SLEEP_CNTL - mmCRTC0_DCFE_MEM_LIGHT_SLEEP_CNTL),
54 .dcp = (mmDCP0_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
55},
56{
57 .crtc = (mmCRTC1_DCFE_MEM_LIGHT_SLEEP_CNTL - mmCRTC0_DCFE_MEM_LIGHT_SLEEP_CNTL),
58 .dcp = (mmDCP1_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
59},
60{
61 .crtc = (mmCRTC2_DCFE_MEM_LIGHT_SLEEP_CNTL - mmCRTC0_DCFE_MEM_LIGHT_SLEEP_CNTL),
62 .dcp = (mmDCP2_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
63},
64{
65 .crtc = (mmCRTC3_DCFE_MEM_LIGHT_SLEEP_CNTL - mmCRTC0_DCFE_MEM_LIGHT_SLEEP_CNTL),
66 .dcp = (mmDCP3_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
67},
68{
69 .crtc = (mmCRTC4_DCFE_MEM_LIGHT_SLEEP_CNTL - mmCRTC0_DCFE_MEM_LIGHT_SLEEP_CNTL),
70 .dcp = (mmDCP4_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
71},
72{
73 .crtc = (mmCRTC5_DCFE_MEM_LIGHT_SLEEP_CNTL - mmCRTC0_DCFE_MEM_LIGHT_SLEEP_CNTL),
74 .dcp = (mmDCP5_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
75}
76};
77
78#define NUMBER_OF_FRAME_TO_WAIT_ON_TRIGGERED_RESET 10
79
80#define MAX_H_TOTAL (CRTC_H_TOTAL__CRTC_H_TOTAL_MASK + 1)
81#define MAX_V_TOTAL (CRTC_V_TOTAL__CRTC_V_TOTAL_MASKhw + 1)
82
83#define CRTC_REG(reg) (reg + tg110->offsets.crtc)
84#define DCP_REG(reg) (reg + tg110->offsets.dcp)
85#define DMIF_REG(reg) (reg + tg110->offsets.dmif)
86
87void program_pix_dur(struct timing_generator *tg, uint32_t pix_clk_khz)
88{
89 uint64_t pix_dur;
90 uint32_t addr = mmDMIF_PG0_DPG_PIPE_ARBITRATION_CONTROL1
91 + DCE110TG_FROM_TG(tg)->offsets.dmif;
92 uint32_t value = dm_read_reg(tg->ctx, addr);
93
94 if (pix_clk_khz == 0)
95 return;
96
97 pix_dur = 1000000000 / pix_clk_khz;
98
99 set_reg_field_value(
100 value,
101 pix_dur,
102 DPG_PIPE_ARBITRATION_CONTROL1,
103 PIXEL_DURATION);
104
105 dm_write_reg(tg->ctx, addr, value);
106}
107
108static void program_timing(struct timing_generator *tg,
109 const struct dc_crtc_timing *timing,
110 bool use_vbios)
111{
112 if (!use_vbios)
113 program_pix_dur(tg, timing->pix_clk_khz);
114
115 dce110_tg_program_timing(tg, timing, use_vbios);
116}
117
118static const struct timing_generator_funcs dce80_tg_funcs = {
119 .validate_timing = dce110_tg_validate_timing,
120 .program_timing = program_timing,
121 .enable_crtc = dce110_timing_generator_enable_crtc,
122 .disable_crtc = dce110_timing_generator_disable_crtc,
123 .is_counter_moving = dce110_timing_generator_is_counter_moving,
124 .get_position = dce110_timing_generator_get_crtc_positions,
125 .get_frame_count = dce110_timing_generator_get_vblank_counter,
126 .get_scanoutpos = dce110_timing_generator_get_crtc_scanoutpos,
127 .set_early_control = dce110_timing_generator_set_early_control,
128 .wait_for_state = dce110_tg_wait_for_state,
129 .set_blank = dce110_tg_set_blank,
130 .is_blanked = dce110_tg_is_blanked,
131 .set_colors = dce110_tg_set_colors,
132 .set_overscan_blank_color =
133 dce110_timing_generator_set_overscan_color_black,
134 .set_blank_color = dce110_timing_generator_program_blank_color,
135 .disable_vga = dce110_timing_generator_disable_vga,
136 .did_triggered_reset_occur =
137 dce110_timing_generator_did_triggered_reset_occur,
138 .setup_global_swap_lock =
139 dce110_timing_generator_setup_global_swap_lock,
140 .enable_reset_trigger = dce110_timing_generator_enable_reset_trigger,
141 .disable_reset_trigger = dce110_timing_generator_disable_reset_trigger,
142 .tear_down_global_swap_lock =
143 dce110_timing_generator_tear_down_global_swap_lock,
144
145 /* DCE8.0 overrides */
146 .enable_advanced_request =
147 dce80_timing_generator_enable_advanced_request,
148 .set_drr =
149 dce110_timing_generator_set_drr,
150};
151
152bool dce80_timing_generator_construct(
153 struct dce110_timing_generator *tg110,
154 struct dc_context *ctx,
155 uint32_t instance,
156 const struct dce110_timing_generator_offsets *offsets)
157{
158 if (!tg110)
159 return false;
160
161 tg110->controller_id = CONTROLLER_ID_D0 + instance;
162 tg110->base.inst = instance;
163 tg110->offsets = *offsets;
164 tg110->derived_offsets = reg_offsets[instance];
165
166 tg110->base.funcs = &dce80_tg_funcs;
167
168 tg110->base.ctx = ctx;
169 tg110->base.bp = ctx->dc_bios;
170
171 tg110->max_h_total = CRTC_H_TOTAL__CRTC_H_TOTAL_MASK + 1;
172 tg110->max_v_total = CRTC_V_TOTAL__CRTC_V_TOTAL_MASK + 1;
173
174 tg110->min_h_blank = 56;
175 tg110->min_h_front_porch = 4;
176 tg110->min_h_back_porch = 4;
177
178 return true;
179}
180
181void dce80_timing_generator_enable_advanced_request(
182 struct timing_generator *tg,
183 bool enable,
184 const struct dc_crtc_timing *timing)
185{
186 struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
187 uint32_t addr = CRTC_REG(mmCRTC_START_LINE_CONTROL);
188 uint32_t value = dm_read_reg(tg->ctx, addr);
189
190 if (enable) {
191 set_reg_field_value(
192 value,
193 0,
194 CRTC_START_LINE_CONTROL,
195 CRTC_LEGACY_REQUESTOR_EN);
196 } else {
197 set_reg_field_value(
198 value,
199 1,
200 CRTC_START_LINE_CONTROL,
201 CRTC_LEGACY_REQUESTOR_EN);
202 }
203
204 if ((timing->v_sync_width + timing->v_front_porch) <= 3) {
205 set_reg_field_value(
206 value,
207 3,
208 CRTC_START_LINE_CONTROL,
209 CRTC_ADVANCED_START_LINE_POSITION);
210 set_reg_field_value(
211 value,
212 0,
213 CRTC_START_LINE_CONTROL,
214 CRTC_PREFETCH_EN);
215 } else {
216 set_reg_field_value(
217 value,
218 4,
219 CRTC_START_LINE_CONTROL,
220 CRTC_ADVANCED_START_LINE_POSITION);
221 set_reg_field_value(
222 value,
223 1,
224 CRTC_START_LINE_CONTROL,
225 CRTC_PREFETCH_EN);
226 }
227
228 set_reg_field_value(
229 value,
230 1,
231 CRTC_START_LINE_CONTROL,
232 CRTC_PROGRESSIVE_START_LINE_EARLY);
233
234 set_reg_field_value(
235 value,
236 1,
237 CRTC_START_LINE_CONTROL,
238 CRTC_INTERLACE_START_LINE_EARLY);
239
240 dm_write_reg(tg->ctx, addr, value);
241}
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.h b/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.h
new file mode 100644
index 000000000000..6e4722a970d7
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.h
@@ -0,0 +1,45 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DC_TIMING_GENERATOR_DCE80_H__
27#define __DC_TIMING_GENERATOR_DCE80_H__
28
29#include "timing_generator.h"
30#include "../include/grph_object_id.h"
31
32/* DCE8.0 implementation inherits from DCE11.0 */
33bool dce80_timing_generator_construct(
34 struct dce110_timing_generator *tg,
35 struct dc_context *ctx,
36 uint32_t instance,
37 const struct dce110_timing_generator_offsets *offsets);
38
39/******** HW programming ************/
40void dce80_timing_generator_enable_advanced_request(
41 struct timing_generator *tg,
42 bool enable,
43 const struct dc_crtc_timing *timing);
44
45#endif /* __DC_TIMING_GENERATOR_DCE80_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
new file mode 100644
index 000000000000..d6c52d31f0f0
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
@@ -0,0 +1,101 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26/**
27 * This file defines helper functions provided by the Display Manager to
28 * Display Core.
29 */
30#ifndef __DM_HELPERS__
31#define __DM_HELPERS__
32
33#include "dc_types.h"
34#include "dc.h"
35
36struct dp_mst_stream_allocation_table;
37
38enum dc_edid_status dm_helpers_parse_edid_caps(
39 struct dc_context *ctx,
40 const struct dc_edid *edid,
41 struct dc_edid_caps *edid_caps);
42
43/*
44 * Writes payload allocation table in immediate downstream device.
45 */
46bool dm_helpers_dp_mst_write_payload_allocation_table(
47 struct dc_context *ctx,
48 const struct dc_stream *stream,
49 struct dp_mst_stream_allocation_table *proposed_table,
50 bool enable);
51
52/*
53 * Polls for ACT (allocation change trigger) handled and
54 */
55bool dm_helpers_dp_mst_poll_for_allocation_change_trigger(
56 struct dc_context *ctx,
57 const struct dc_stream *stream);
58/*
59 * Sends ALLOCATE_PAYLOAD message.
60 */
61bool dm_helpers_dp_mst_send_payload_allocation(
62 struct dc_context *ctx,
63 const struct dc_stream *stream,
64 bool enable);
65
66bool dm_helpers_dp_mst_start_top_mgr(
67 struct dc_context *ctx,
68 const struct dc_link *link,
69 bool boot);
70
71void dm_helpers_dp_mst_stop_top_mgr(
72 struct dc_context *ctx,
73 const struct dc_link *link);
74
75/**
76 * OS specific aux read callback.
77 */
78bool dm_helpers_dp_read_dpcd(
79 struct dc_context *ctx,
80 const struct dc_link *link,
81 uint32_t address,
82 uint8_t *data,
83 uint32_t size);
84
85/**
86 * OS specific aux write callback.
87 */
88bool dm_helpers_dp_write_dpcd(
89 struct dc_context *ctx,
90 const struct dc_link *link,
91 uint32_t address,
92 const uint8_t *data,
93 uint32_t size);
94
95bool dm_helpers_submit_i2c(
96 struct dc_context *ctx,
97 const struct dc_link *link,
98 struct i2c_command *cmd);
99
100
101#endif /* __DM_HELPERS__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h b/drivers/gpu/drm/amd/display/dc/dm_services.h
new file mode 100644
index 000000000000..7a3f10354830
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dm_services.h
@@ -0,0 +1,424 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26/**
27 * This file defines external dependencies of Display Core.
28 */
29
30#ifndef __DM_SERVICES_H__
31
32#define __DM_SERVICES_H__
33
34/* TODO: remove when DC is complete. */
35#include "dm_services_types.h"
36#include "logger_interface.h"
37#include "link_service_types.h"
38
39#undef DEPRECATED
40
41/*
42 *
43 * general debug capabilities
44 *
45 */
46#if defined(CONFIG_DEBUG_KERNEL) || defined(CONFIG_DEBUG_DRIVER)
47
48#if defined(CONFIG_HAVE_KGDB) || defined(CONFIG_KGDB)
49#define ASSERT_CRITICAL(expr) do { \
50 if (WARN_ON(!(expr))) { \
51 kgdb_breakpoint(); \
52 } \
53} while (0)
54#else
55#define ASSERT_CRITICAL(expr) do { \
56 if (WARN_ON(!(expr))) { \
57 ; \
58 } \
59} while (0)
60#endif
61
62#if defined(CONFIG_DEBUG_KERNEL_DC)
63#define ASSERT(expr) ASSERT_CRITICAL(expr)
64
65#else
66#define ASSERT(expr) WARN_ON(!(expr))
67#endif
68
69#define BREAK_TO_DEBUGGER() ASSERT(0)
70
71#endif /* CONFIG_DEBUG_KERNEL || CONFIG_DEBUG_DRIVER */
72
73#define DC_ERR(...) do { \
74 dm_error(__VA_ARGS__); \
75 BREAK_TO_DEBUGGER(); \
76} while (0)
77
78#define dm_alloc(size) kzalloc(size, GFP_KERNEL)
79#define dm_realloc(ptr, size) krealloc(ptr, size, GFP_KERNEL)
80#define dm_free(ptr) kfree(ptr)
81
82irq_handler_idx dm_register_interrupt(
83 struct dc_context *ctx,
84 struct dc_interrupt_params *int_params,
85 interrupt_handler ih,
86 void *handler_args);
87
88
89/*
90 *
91 * GPU registers access
92 *
93 */
94
95#define dm_read_reg(ctx, address) \
96 dm_read_reg_func(ctx, address, __func__)
97
98static inline uint32_t dm_read_reg_func(
99 const struct dc_context *ctx,
100 uint32_t address,
101 const char *func_name)
102{
103 uint32_t value;
104
105 if (address == 0) {
106 DC_ERR("invalid register read. address = 0");
107 return 0;
108 }
109
110 value = cgs_read_register(ctx->cgs_device, address);
111
112#if defined(__DAL_REGISTER_LOGGER__)
113 if (true == dal_reg_logger_should_dump_register()) {
114 dal_reg_logger_rw_count_increment();
115 DRM_INFO("%s DC_READ_REG: 0x%x 0x%x\n", func_name, address, value);
116 }
117#endif
118 return value;
119}
120
121#define dm_write_reg(ctx, address, value) \
122 dm_write_reg_func(ctx, address, value, __func__)
123
124static inline void dm_write_reg_func(
125 const struct dc_context *ctx,
126 uint32_t address,
127 uint32_t value,
128 const char *func_name)
129{
130#if defined(__DAL_REGISTER_LOGGER__)
131 if (true == dal_reg_logger_should_dump_register()) {
132 dal_reg_logger_rw_count_increment();
133 DRM_INFO("%s DC_WRITE_REG: 0x%x 0x%x\n", func_name, address, value);
134 }
135#endif
136
137 if (address == 0) {
138 DC_ERR("invalid register write. address = 0");
139 return;
140 }
141 cgs_write_register(ctx->cgs_device, address, value);
142}
143
144static inline uint32_t dm_read_index_reg(
145 const struct dc_context *ctx,
146 enum cgs_ind_reg addr_space,
147 uint32_t index)
148{
149 return cgs_read_ind_register(ctx->cgs_device, addr_space, index);
150}
151
152static inline void dm_write_index_reg(
153 const struct dc_context *ctx,
154 enum cgs_ind_reg addr_space,
155 uint32_t index,
156 uint32_t value)
157{
158 cgs_write_ind_register(ctx->cgs_device, addr_space, index, value);
159}
160
161static inline uint32_t get_reg_field_value_ex(
162 uint32_t reg_value,
163 uint32_t mask,
164 uint8_t shift)
165{
166 return (mask & reg_value) >> shift;
167}
168
169#define get_reg_field_value(reg_value, reg_name, reg_field)\
170 get_reg_field_value_ex(\
171 (reg_value),\
172 reg_name ## __ ## reg_field ## _MASK,\
173 reg_name ## __ ## reg_field ## __SHIFT)
174
175static inline uint32_t set_reg_field_value_ex(
176 uint32_t reg_value,
177 uint32_t value,
178 uint32_t mask,
179 uint8_t shift)
180{
181 return (reg_value & ~mask) | (mask & (value << shift));
182}
183
184#define set_reg_field_value(reg_value, value, reg_name, reg_field)\
185 (reg_value) = set_reg_field_value_ex(\
186 (reg_value),\
187 (value),\
188 reg_name ## __ ## reg_field ## _MASK,\
189 reg_name ## __ ## reg_field ## __SHIFT)
190
191uint32_t generic_reg_update_ex(const struct dc_context *ctx,
192 uint32_t addr, uint32_t reg_val, int n,
193 uint8_t shift1, uint32_t mask1, uint32_t field_value1, ...);
194
195#define FD(reg_field) reg_field ## __SHIFT, \
196 reg_field ## _MASK
197
198/*
199 * return number of poll before condition is met
200 * return 0 if condition is not meet after specified time out tries
201 */
202unsigned int generic_reg_wait(const struct dc_context *ctx,
203 uint32_t addr, uint32_t mask, uint32_t shift, uint32_t condition_value,
204 unsigned int delay_between_poll_us, unsigned int time_out_num_tries,
205 const char *func_name);
206
207/**************************************
208 * Power Play (PP) interfaces
209 **************************************/
210
211/* DAL calls this function to notify PP about clocks it needs for the Mode Set.
212 * This is done *before* it changes DCE clock.
213 *
214 * If required clock is higher than current, then PP will increase the voltage.
215 *
216 * If required clock is lower than current, then PP will defer reduction of
217 * voltage until the call to dc_service_pp_post_dce_clock_change().
218 *
219 * \input - Contains clocks needed for Mode Set.
220 *
221 * \output - Contains clocks adjusted by PP which DAL should use for Mode Set.
222 * Valid only if function returns zero.
223 *
224 * \returns true - call is successful
225 * false - call failed
226 */
227bool dm_pp_pre_dce_clock_change(
228 struct dc_context *ctx,
229 struct dm_pp_gpu_clock_range *requested_state,
230 struct dm_pp_gpu_clock_range *actual_state);
231
232/* The returned clocks range are 'static' system clocks which will be used for
233 * mode validation purposes.
234 *
235 * \returns true - call is successful
236 * false - call failed
237 */
238bool dc_service_get_system_clocks_range(
239 const struct dc_context *ctx,
240 struct dm_pp_gpu_clock_range *sys_clks);
241
242/* Gets valid clocks levels from pplib
243 *
244 * input: clk_type - display clk / sclk / mem clk
245 *
246 * output: array of valid clock levels for given type in ascending order,
247 * with invalid levels filtered out
248 *
249 */
250bool dm_pp_get_clock_levels_by_type(
251 const struct dc_context *ctx,
252 enum dm_pp_clock_type clk_type,
253 struct dm_pp_clock_levels *clk_level_info);
254
255bool dm_pp_get_clock_levels_by_type_with_latency(
256 const struct dc_context *ctx,
257 enum dm_pp_clock_type clk_type,
258 struct dm_pp_clock_levels_with_latency *clk_level_info);
259
260bool dm_pp_get_clock_levels_by_type_with_voltage(
261 const struct dc_context *ctx,
262 enum dm_pp_clock_type clk_type,
263 struct dm_pp_clock_levels_with_voltage *clk_level_info);
264
265bool dm_pp_notify_wm_clock_changes(
266 const struct dc_context *ctx,
267 struct dm_pp_wm_sets_with_clock_ranges *wm_with_clock_ranges);
268
269/* DAL calls this function to notify PP about completion of Mode Set.
270 * For PP it means that current DCE clocks are those which were returned
271 * by dc_service_pp_pre_dce_clock_change(), in the 'output' parameter.
272 *
273 * If the clocks are higher than before, then PP does nothing.
274 *
275 * If the clocks are lower than before, then PP reduces the voltage.
276 *
277 * \returns true - call is successful
278 * false - call failed
279 */
280bool dm_pp_apply_display_requirements(
281 const struct dc_context *ctx,
282 const struct dm_pp_display_configuration *pp_display_cfg);
283
284bool dm_pp_apply_power_level_change_request(
285 const struct dc_context *ctx,
286 struct dm_pp_power_level_change_request *level_change_req);
287
288bool dm_pp_apply_clock_for_voltage_request(
289 const struct dc_context *ctx,
290 struct dm_pp_clock_for_voltage_req *clock_for_voltage_req);
291
292bool dm_pp_get_static_clocks(
293 const struct dc_context *ctx,
294 struct dm_pp_static_clock_info *static_clk_info);
295
296/****** end of PP interfaces ******/
297
298enum platform_method {
299 PM_GET_AVAILABLE_METHODS = 1 << 0,
300 PM_GET_LID_STATE = 1 << 1,
301 PM_GET_EXTENDED_BRIGHNESS_CAPS = 1 << 2
302};
303
304struct platform_info_params {
305 enum platform_method method;
306 void *data;
307};
308
309struct platform_info_brightness_caps {
310 uint8_t ac_level_percentage;
311 uint8_t dc_level_percentage;
312};
313
314struct platform_info_ext_brightness_caps {
315 struct platform_info_brightness_caps basic_caps;
316 struct data_point {
317 uint8_t luminance;
318 uint8_t signal_level;
319 } data_points[99];
320
321 uint8_t data_points_num;
322 uint8_t min_input_signal;
323 uint8_t max_input_signal;
324};
325
326bool dm_get_platform_info(
327 struct dc_context *ctx,
328 struct platform_info_params *params);
329
330struct persistent_data_flag {
331 bool save_per_link;
332 bool save_per_edid;
333};
334
335/* Call to write data in registry editor for persistent data storage.
336 *
337 * \inputs sink - identify edid/link for registry folder creation
338 * module name - identify folders for registry
339 * key name - identify keys within folders for registry
340 * params - value to write in defined folder/key
341 * size - size of the input params
342 * flag - determine whether to save by link or edid
343 *
344 * \returns true - call is successful
345 * false - call failed
346 *
347 * sink module key
348 * -----------------------------------------------------------------------------
349 * NULL NULL NULL - failure
350 * NULL NULL - - create key with param value
351 * under base folder
352 * NULL - NULL - create module folder under base folder
353 * - NULL NULL - failure
354 * NULL - - - create key under module folder
355 * with no edid/link identification
356 * - NULL - - create key with param value
357 * under base folder
358 * - - NULL - create module folder under base folder
359 * - - - - create key under module folder
360 * with edid/link identification
361 */
362bool dm_write_persistent_data(struct dc_context *ctx,
363 const struct dc_sink *sink,
364 const char *module_name,
365 const char *key_name,
366 void *params,
367 unsigned int size,
368 struct persistent_data_flag *flag);
369
370
371/* Call to read data in registry editor for persistent data storage.
372 *
373 * \inputs sink - identify edid/link for registry folder creation
374 * module name - identify folders for registry
375 * key name - identify keys within folders for registry
376 * size - size of the output params
377 * flag - determine whether it was save by link or edid
378 *
379 * \returns params - value read from defined folder/key
380 * true - call is successful
381 * false - call failed
382 *
383 * sink module key
384 * -----------------------------------------------------------------------------
385 * NULL NULL NULL - failure
386 * NULL NULL - - read key under base folder
387 * NULL - NULL - failure
388 * - NULL NULL - failure
389 * NULL - - - read key under module folder
390 * with no edid/link identification
391 * - NULL - - read key under base folder
392 * - - NULL - failure
393 * - - - - read key under module folder
394 * with edid/link identification
395 */
396bool dm_read_persistent_data(struct dc_context *ctx,
397 const struct dc_sink *sink,
398 const char *module_name,
399 const char *key_name,
400 void *params,
401 unsigned int size,
402 struct persistent_data_flag *flag);
403
404void dm_delay_in_microseconds
405 (struct dc_context *ctx, unsigned int microSeconds);
406
407bool dm_query_extended_brightness_caps
408 (struct dc_context *ctx, enum dm_acpi_display_type display,
409 struct dm_acpi_atif_backlight_caps *pCaps);
410
411bool dm_dmcu_set_pipe(struct dc_context *ctx, unsigned int controller_id);
412
413/*
414 *
415 * print-out services
416 *
417 */
418#define dm_log_to_buffer(buffer, size, fmt, args)\
419 vsnprintf(buffer, size, fmt, args)
420
421long dm_get_pid(void);
422long dm_get_tgid(void);
423
424#endif /* __DM_SERVICES_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services_types.h b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
new file mode 100644
index 000000000000..44bad17fa318
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
@@ -0,0 +1,242 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DM_SERVICES_TYPES_H__
27#define __DM_SERVICES_TYPES_H__
28
29#include "os_types.h"
30#include "dc_types.h"
31
32struct dm_pp_clock_range {
33 int min_khz;
34 int max_khz;
35};
36
37enum dm_pp_clocks_state {
38 DM_PP_CLOCKS_STATE_INVALID,
39 DM_PP_CLOCKS_STATE_ULTRA_LOW,
40 DM_PP_CLOCKS_STATE_LOW,
41 DM_PP_CLOCKS_STATE_NOMINAL,
42 DM_PP_CLOCKS_STATE_PERFORMANCE,
43
44 /* Starting from DCE11, Max 8 levels of DPM state supported. */
45 DM_PP_CLOCKS_DPM_STATE_LEVEL_INVALID = DM_PP_CLOCKS_STATE_INVALID,
46 DM_PP_CLOCKS_DPM_STATE_LEVEL_0 = DM_PP_CLOCKS_STATE_ULTRA_LOW,
47 DM_PP_CLOCKS_DPM_STATE_LEVEL_1 = DM_PP_CLOCKS_STATE_LOW,
48 DM_PP_CLOCKS_DPM_STATE_LEVEL_2 = DM_PP_CLOCKS_STATE_NOMINAL,
49 /* to be backward compatible */
50 DM_PP_CLOCKS_DPM_STATE_LEVEL_3 = DM_PP_CLOCKS_STATE_PERFORMANCE,
51 DM_PP_CLOCKS_DPM_STATE_LEVEL_4 = DM_PP_CLOCKS_DPM_STATE_LEVEL_3 + 1,
52 DM_PP_CLOCKS_DPM_STATE_LEVEL_5 = DM_PP_CLOCKS_DPM_STATE_LEVEL_4 + 1,
53 DM_PP_CLOCKS_DPM_STATE_LEVEL_6 = DM_PP_CLOCKS_DPM_STATE_LEVEL_5 + 1,
54 DM_PP_CLOCKS_DPM_STATE_LEVEL_7 = DM_PP_CLOCKS_DPM_STATE_LEVEL_6 + 1,
55};
56
57struct dm_pp_gpu_clock_range {
58 enum dm_pp_clocks_state clock_state;
59 struct dm_pp_clock_range sclk;
60 struct dm_pp_clock_range mclk;
61 struct dm_pp_clock_range eclk;
62 struct dm_pp_clock_range dclk;
63};
64
65enum dm_pp_clock_type {
66 DM_PP_CLOCK_TYPE_DISPLAY_CLK = 1,
67 DM_PP_CLOCK_TYPE_ENGINE_CLK, /* System clock */
68 DM_PP_CLOCK_TYPE_MEMORY_CLK,
69 DM_PP_CLOCK_TYPE_DCFCLK,
70 DM_PP_CLOCK_TYPE_SOCCLK,
71 DM_PP_CLOCK_TYPE_PIXELCLK,
72 DM_PP_CLOCK_TYPE_DISPLAYPHYCLK
73};
74
75#define DC_DECODE_PP_CLOCK_TYPE(clk_type) \
76 (clk_type) == DM_PP_CLOCK_TYPE_DISPLAY_CLK ? "Display" : \
77 (clk_type) == DM_PP_CLOCK_TYPE_ENGINE_CLK ? "Engine" : \
78 (clk_type) == DM_PP_CLOCK_TYPE_MEMORY_CLK ? "Memory" : "Invalid"
79
80#define DM_PP_MAX_CLOCK_LEVELS 8
81
82struct dm_pp_clock_levels {
83 uint32_t num_levels;
84 uint32_t clocks_in_khz[DM_PP_MAX_CLOCK_LEVELS];
85};
86
87struct dm_pp_clock_with_latency {
88 uint32_t clocks_in_khz;
89 uint32_t latency_in_us;
90};
91
92struct dm_pp_clock_levels_with_latency {
93 uint32_t num_levels;
94 struct dm_pp_clock_with_latency data[DM_PP_MAX_CLOCK_LEVELS];
95};
96
97struct dm_pp_clock_with_voltage {
98 uint32_t clocks_in_khz;
99 uint32_t voltage_in_mv;
100};
101
102struct dm_pp_clock_levels_with_voltage {
103 uint32_t num_levels;
104 struct dm_pp_clock_with_voltage data[DM_PP_MAX_CLOCK_LEVELS];
105};
106
107struct dm_pp_single_disp_config {
108 enum signal_type signal;
109 uint8_t transmitter;
110 uint8_t ddi_channel_mapping;
111 uint8_t pipe_idx;
112 uint32_t src_height;
113 uint32_t src_width;
114 uint32_t v_refresh;
115 uint32_t sym_clock; /* HDMI only */
116 struct dc_link_settings link_settings; /* DP only */
117};
118
119#define MAX_WM_SETS 4
120
121enum dm_pp_wm_set_id {
122 WM_SET_A = 0,
123 WM_SET_B,
124 WM_SET_C,
125 WM_SET_D,
126 WM_SET_INVALID = 0xffff,
127};
128
129struct dm_pp_clock_range_for_wm_set {
130 enum dm_pp_wm_set_id wm_set_id;
131 uint32_t wm_min_eng_clk_in_khz;
132 uint32_t wm_max_eng_clk_in_khz;
133 uint32_t wm_min_memg_clk_in_khz;
134 uint32_t wm_max_mem_clk_in_khz;
135};
136
137struct dm_pp_wm_sets_with_clock_ranges {
138 uint32_t num_wm_sets;
139 struct dm_pp_clock_range_for_wm_set wm_clk_ranges[MAX_WM_SETS];
140};
141
142#define MAX_DISPLAY_CONFIGS 6
143
144struct dm_pp_display_configuration {
145 bool nb_pstate_switch_disable;/* controls NB PState switch */
146 bool cpu_cc6_disable; /* controls CPU CState switch ( on or off) */
147 bool cpu_pstate_disable;
148 uint32_t cpu_pstate_separation_time;
149
150 uint32_t min_memory_clock_khz;
151 uint32_t min_engine_clock_khz;
152 uint32_t min_engine_clock_deep_sleep_khz;
153
154 uint32_t avail_mclk_switch_time_us;
155 uint32_t avail_mclk_switch_time_in_disp_active_us;
156
157 uint32_t disp_clk_khz;
158
159 bool all_displays_in_sync;
160
161 uint8_t display_count;
162 struct dm_pp_single_disp_config disp_configs[MAX_DISPLAY_CONFIGS];
163
164 /*Controller Index of primary display - used in MCLK SMC switching hang
165 * SW Workaround*/
166 uint8_t crtc_index;
167 /*htotal*1000/pixelclk - used in MCLK SMC switching hang SW Workaround*/
168 uint32_t line_time_in_us;
169};
170
171struct dm_bl_data_point {
172 /* Brightness level in percentage */
173 uint8_t luminance;
174 /* Brightness level as effective value in range 0-255,
175 * corresponding to above percentage
176 */
177 uint8_t signalLevel;
178};
179
180/* Total size of the structure should not exceed 256 bytes */
181struct dm_acpi_atif_backlight_caps {
182
183
184 uint16_t size; /* Bytes 0-1 (2 bytes) */
185 uint16_t flags; /* Byted 2-3 (2 bytes) */
186 uint8_t errorCode; /* Byte 4 */
187 uint8_t acLevelPercentage; /* Byte 5 */
188 uint8_t dcLevelPercentage; /* Byte 6 */
189 uint8_t minInputSignal; /* Byte 7 */
190 uint8_t maxInputSignal; /* Byte 8 */
191 uint8_t numOfDataPoints; /* Byte 9 */
192 struct dm_bl_data_point dataPoints[99]; /* Bytes 10-207 (198 bytes)*/
193};
194
195enum dm_acpi_display_type {
196 AcpiDisplayType_LCD1 = 0,
197 AcpiDisplayType_CRT1 = 1,
198 AcpiDisplayType_DFP1 = 3,
199 AcpiDisplayType_CRT2 = 4,
200 AcpiDisplayType_LCD2 = 5,
201 AcpiDisplayType_DFP2 = 7,
202 AcpiDisplayType_DFP3 = 9,
203 AcpiDisplayType_DFP4 = 10,
204 AcpiDisplayType_DFP5 = 11,
205 AcpiDisplayType_DFP6 = 12
206};
207
208enum dm_pp_power_level {
209 DM_PP_POWER_LEVEL_INVALID,
210 DM_PP_POWER_LEVEL_ULTRA_LOW,
211 DM_PP_POWER_LEVEL_LOW,
212 DM_PP_POWER_LEVEL_NOMINAL,
213 DM_PP_POWER_LEVEL_PERFORMANCE,
214
215 DM_PP_POWER_LEVEL_0 = DM_PP_POWER_LEVEL_ULTRA_LOW,
216 DM_PP_POWER_LEVEL_1 = DM_PP_POWER_LEVEL_LOW,
217 DM_PP_POWER_LEVEL_2 = DM_PP_POWER_LEVEL_NOMINAL,
218 DM_PP_POWER_LEVEL_3 = DM_PP_POWER_LEVEL_PERFORMANCE,
219 DM_PP_POWER_LEVEL_4 = DM_PP_CLOCKS_DPM_STATE_LEVEL_3 + 1,
220 DM_PP_POWER_LEVEL_5 = DM_PP_CLOCKS_DPM_STATE_LEVEL_4 + 1,
221 DM_PP_POWER_LEVEL_6 = DM_PP_CLOCKS_DPM_STATE_LEVEL_5 + 1,
222 DM_PP_POWER_LEVEL_7 = DM_PP_CLOCKS_DPM_STATE_LEVEL_6 + 1,
223};
224
225struct dm_pp_power_level_change_request {
226 enum dm_pp_power_level power_level;
227};
228
229struct dm_pp_clock_for_voltage_req {
230 enum dm_pp_clock_type clk_type;
231 uint32_t clocks_in_khz;
232};
233
234struct dm_pp_static_clock_info {
235 uint32_t max_sclk_khz;
236 uint32_t max_mclk_khz;
237
238 /* max possible display block clocks state */
239 enum dm_pp_clocks_state max_clocks_state;
240};
241
242#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/Makefile b/drivers/gpu/drm/amd/display/dc/gpio/Makefile
new file mode 100644
index 000000000000..a15c257fd2fa
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/Makefile
@@ -0,0 +1,38 @@
1#
2# Makefile for the 'gpio' sub-component of DAL.
3# It provides the control and status of HW GPIO pins.
4
5GPIO = gpio_base.o gpio_service.o hw_factory.o \
6 hw_gpio.o hw_hpd.o hw_ddc.o hw_translate.o
7
8AMD_DAL_GPIO = $(addprefix $(AMDDALPATH)/dc/gpio/,$(GPIO))
9
10AMD_DISPLAY_FILES += $(AMD_DAL_GPIO)
11
12###############################################################################
13# DCE 8x
14###############################################################################
15# all DCE8.x are derived from DCE8.0
16GPIO_DCE80 = hw_translate_dce80.o hw_factory_dce80.o
17
18AMD_DAL_GPIO_DCE80 = $(addprefix $(AMDDALPATH)/dc/gpio/dce80/,$(GPIO_DCE80))
19
20AMD_DISPLAY_FILES += $(AMD_DAL_GPIO_DCE80)
21
22###############################################################################
23# DCE 11x
24###############################################################################
25GPIO_DCE110 = hw_translate_dce110.o hw_factory_dce110.o
26
27AMD_DAL_GPIO_DCE110 = $(addprefix $(AMDDALPATH)/dc/gpio/dce110/,$(GPIO_DCE110))
28
29AMD_DISPLAY_FILES += $(AMD_DAL_GPIO_DCE110)
30
31###############################################################################
32# Diagnostics on FPGA
33###############################################################################
34GPIO_DIAG_FPGA = hw_translate_diag.o hw_factory_diag.o
35
36AMD_DAL_GPIO_DIAG_FPGA = $(addprefix $(AMDDALPATH)/dc/gpio/diagnostics/,$(GPIO_DIAG_FPGA))
37
38AMD_DISPLAY_FILES += $(AMD_DAL_GPIO_DIAG_FPGA)
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_factory_dce110.c b/drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_factory_dce110.c
new file mode 100644
index 000000000000..20d81bca119c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_factory_dce110.c
@@ -0,0 +1,178 @@
1/*
2 * Copyright 2013-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27#include "include/gpio_types.h"
28#include "../hw_factory.h"
29
30#include "hw_factory_dce110.h"
31
32#include "dce/dce_11_0_d.h"
33#include "dce/dce_11_0_sh_mask.h"
34
35/* set field name */
36#define SF_HPD(reg_name, field_name, post_fix)\
37 .field_name = reg_name ## __ ## field_name ## post_fix
38
39#define REG(reg_name)\
40 mm ## reg_name
41
42#define REGI(reg_name, block, id)\
43 mm ## block ## id ## _ ## reg_name
44
45#include "../hw_gpio.h"
46#include "../hw_ddc.h"
47#include "../hw_hpd.h"
48
49#include "reg_helper.h"
50#include "../hpd_regs.h"
51
52#define hpd_regs(id) \
53{\
54 HPD_REG_LIST(id)\
55}
56
57static const struct hpd_registers hpd_regs[] = {
58 hpd_regs(0),
59 hpd_regs(1),
60 hpd_regs(2),
61 hpd_regs(3),
62 hpd_regs(4),
63 hpd_regs(5)
64};
65
66static const struct hpd_sh_mask hpd_shift = {
67 HPD_MASK_SH_LIST(__SHIFT)
68};
69
70static const struct hpd_sh_mask hpd_mask = {
71 HPD_MASK_SH_LIST(_MASK)
72};
73
74#include "../ddc_regs.h"
75
76 /* set field name */
77#define SF_DDC(reg_name, field_name, post_fix)\
78 .field_name = reg_name ## __ ## field_name ## post_fix
79
80static const struct ddc_registers ddc_data_regs[] = {
81 ddc_data_regs(1),
82 ddc_data_regs(2),
83 ddc_data_regs(3),
84 ddc_data_regs(4),
85 ddc_data_regs(5),
86 ddc_data_regs(6),
87 ddc_vga_data_regs,
88 ddc_i2c_data_regs
89};
90
91static const struct ddc_registers ddc_clk_regs[] = {
92 ddc_clk_regs(1),
93 ddc_clk_regs(2),
94 ddc_clk_regs(3),
95 ddc_clk_regs(4),
96 ddc_clk_regs(5),
97 ddc_clk_regs(6),
98 ddc_vga_clk_regs,
99 ddc_i2c_clk_regs
100};
101
102static const struct ddc_sh_mask ddc_shift = {
103 DDC_MASK_SH_LIST(__SHIFT)
104};
105
106static const struct ddc_sh_mask ddc_mask = {
107 DDC_MASK_SH_LIST(_MASK)
108};
109
110static void define_ddc_registers(
111 struct hw_gpio_pin *pin,
112 uint32_t en)
113{
114 struct hw_ddc *ddc = HW_DDC_FROM_BASE(pin);
115
116 switch (pin->id) {
117 case GPIO_ID_DDC_DATA:
118 ddc->regs = &ddc_data_regs[en];
119 ddc->base.regs = &ddc_data_regs[en].gpio;
120 break;
121 case GPIO_ID_DDC_CLOCK:
122 ddc->regs = &ddc_clk_regs[en];
123 ddc->base.regs = &ddc_clk_regs[en].gpio;
124 break;
125 default:
126 ASSERT_CRITICAL(false);
127 return;
128 }
129
130 ddc->shifts = &ddc_shift;
131 ddc->masks = &ddc_mask;
132
133}
134
135static void define_hpd_registers(struct hw_gpio_pin *pin, uint32_t en)
136{
137 struct hw_hpd *hpd = HW_HPD_FROM_BASE(pin);
138
139 hpd->regs = &hpd_regs[en];
140 hpd->shifts = &hpd_shift;
141 hpd->masks = &hpd_mask;
142 hpd->base.regs = &hpd_regs[en].gpio;
143}
144
145static const struct hw_factory_funcs funcs = {
146 .create_ddc_data = dal_hw_ddc_create,
147 .create_ddc_clock = dal_hw_ddc_create,
148 .create_generic = NULL,
149 .create_hpd = dal_hw_hpd_create,
150 .create_sync = NULL,
151 .create_gsl = NULL,
152 .define_hpd_registers = define_hpd_registers,
153 .define_ddc_registers = define_ddc_registers
154};
155
156/*
157 * dal_hw_factory_dce110_init
158 *
159 * @brief
160 * Initialize HW factory function pointers and pin info
161 *
162 * @param
163 * struct hw_factory *factory - [out] struct of function pointers
164 */
165void dal_hw_factory_dce110_init(struct hw_factory *factory)
166{
167 /*TODO check ASIC CAPs*/
168 factory->number_of_pins[GPIO_ID_DDC_DATA] = 8;
169 factory->number_of_pins[GPIO_ID_DDC_CLOCK] = 8;
170 factory->number_of_pins[GPIO_ID_GENERIC] = 7;
171 factory->number_of_pins[GPIO_ID_HPD] = 6;
172 factory->number_of_pins[GPIO_ID_GPIO_PAD] = 31;
173 factory->number_of_pins[GPIO_ID_VIP_PAD] = 0;
174 factory->number_of_pins[GPIO_ID_SYNC] = 2;
175 factory->number_of_pins[GPIO_ID_GSL] = 4;
176
177 factory->funcs = &funcs;
178}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_factory_dce110.h b/drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_factory_dce110.h
new file mode 100644
index 000000000000..ecf06ed0d587
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_factory_dce110.h
@@ -0,0 +1,32 @@
1/*
2 * Copyright 2013-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_HW_FACTORY_DCE110_H__
27#define __DAL_HW_FACTORY_DCE110_H__
28
29/* Initialize HW factory function pointers and pin info */
30void dal_hw_factory_dce110_init(struct hw_factory *factory);
31
32#endif /* __DAL_HW_FACTORY_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_translate_dce110.c b/drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_translate_dce110.c
new file mode 100644
index 000000000000..ac4cddbba815
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_translate_dce110.c
@@ -0,0 +1,387 @@
1/*
2 * Copyright 2013-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26/*
27 * Pre-requisites: headers required by header of this unit
28 */
29
30#include "dm_services.h"
31#include "include/gpio_types.h"
32#include "../hw_translate.h"
33
34#include "hw_translate_dce110.h"
35
36#include "dce/dce_11_0_d.h"
37#include "dce/dce_11_0_sh_mask.h"
38
39static bool offset_to_id(
40 uint32_t offset,
41 uint32_t mask,
42 enum gpio_id *id,
43 uint32_t *en)
44{
45 switch (offset) {
46 /* GENERIC */
47 case mmDC_GPIO_GENERIC_A:
48 *id = GPIO_ID_GENERIC;
49 switch (mask) {
50 case DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK:
51 *en = GPIO_GENERIC_A;
52 return true;
53 case DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK:
54 *en = GPIO_GENERIC_B;
55 return true;
56 case DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK:
57 *en = GPIO_GENERIC_C;
58 return true;
59 case DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK:
60 *en = GPIO_GENERIC_D;
61 return true;
62 case DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK:
63 *en = GPIO_GENERIC_E;
64 return true;
65 case DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK:
66 *en = GPIO_GENERIC_F;
67 return true;
68 case DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A_MASK:
69 *en = GPIO_GENERIC_G;
70 return true;
71 default:
72 ASSERT_CRITICAL(false);
73 return false;
74 }
75 break;
76 /* HPD */
77 case mmDC_GPIO_HPD_A:
78 *id = GPIO_ID_HPD;
79 switch (mask) {
80 case DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK:
81 *en = GPIO_HPD_1;
82 return true;
83 case DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK:
84 *en = GPIO_HPD_2;
85 return true;
86 case DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK:
87 *en = GPIO_HPD_3;
88 return true;
89 case DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK:
90 *en = GPIO_HPD_4;
91 return true;
92 case DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK:
93 *en = GPIO_HPD_5;
94 return true;
95 case DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK:
96 *en = GPIO_HPD_6;
97 return true;
98 default:
99 ASSERT_CRITICAL(false);
100 return false;
101 }
102 break;
103 /* SYNCA */
104 case mmDC_GPIO_SYNCA_A:
105 *id = GPIO_ID_SYNC;
106 switch (mask) {
107 case DC_GPIO_SYNCA_A__DC_GPIO_HSYNCA_A_MASK:
108 *en = GPIO_SYNC_HSYNC_A;
109 return true;
110 case DC_GPIO_SYNCA_A__DC_GPIO_VSYNCA_A_MASK:
111 *en = GPIO_SYNC_VSYNC_A;
112 return true;
113 default:
114 ASSERT_CRITICAL(false);
115 return false;
116 }
117 break;
118 /* mmDC_GPIO_GENLK_MASK */
119 case mmDC_GPIO_GENLK_A:
120 *id = GPIO_ID_GSL;
121 switch (mask) {
122 case DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A_MASK:
123 *en = GPIO_GSL_GENLOCK_CLOCK;
124 return true;
125 case DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A_MASK:
126 *en = GPIO_GSL_GENLOCK_VSYNC;
127 return true;
128 case DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A_MASK:
129 *en = GPIO_GSL_SWAPLOCK_A;
130 return true;
131 case DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A_MASK:
132 *en = GPIO_GSL_SWAPLOCK_B;
133 return true;
134 default:
135 ASSERT_CRITICAL(false);
136 return false;
137 }
138 break;
139 /* DDC */
140 /* we don't care about the GPIO_ID for DDC
141 * in DdcHandle it will use GPIO_ID_DDC_DATA/GPIO_ID_DDC_CLOCK
142 * directly in the create method */
143 case mmDC_GPIO_DDC1_A:
144 *en = GPIO_DDC_LINE_DDC1;
145 return true;
146 case mmDC_GPIO_DDC2_A:
147 *en = GPIO_DDC_LINE_DDC2;
148 return true;
149 case mmDC_GPIO_DDC3_A:
150 *en = GPIO_DDC_LINE_DDC3;
151 return true;
152 case mmDC_GPIO_DDC4_A:
153 *en = GPIO_DDC_LINE_DDC4;
154 return true;
155 case mmDC_GPIO_DDC5_A:
156 *en = GPIO_DDC_LINE_DDC5;
157 return true;
158 case mmDC_GPIO_DDC6_A:
159 *en = GPIO_DDC_LINE_DDC6;
160 return true;
161 case mmDC_GPIO_DDCVGA_A:
162 *en = GPIO_DDC_LINE_DDC_VGA;
163 return true;
164 /* GPIO_I2CPAD */
165 case mmDC_GPIO_I2CPAD_A:
166 *en = GPIO_DDC_LINE_I2C_PAD;
167 return true;
168 /* Not implemented */
169 case mmDC_GPIO_PWRSEQ_A:
170 case mmDC_GPIO_PAD_STRENGTH_1:
171 case mmDC_GPIO_PAD_STRENGTH_2:
172 case mmDC_GPIO_DEBUG:
173 return false;
174 /* UNEXPECTED */
175 default:
176 ASSERT_CRITICAL(false);
177 return false;
178 }
179}
180
181static bool id_to_offset(
182 enum gpio_id id,
183 uint32_t en,
184 struct gpio_pin_info *info)
185{
186 bool result = true;
187
188 switch (id) {
189 case GPIO_ID_DDC_DATA:
190 info->mask = DC_GPIO_DDC6_A__DC_GPIO_DDC6DATA_A_MASK;
191 switch (en) {
192 case GPIO_DDC_LINE_DDC1:
193 info->offset = mmDC_GPIO_DDC1_A;
194 break;
195 case GPIO_DDC_LINE_DDC2:
196 info->offset = mmDC_GPIO_DDC2_A;
197 break;
198 case GPIO_DDC_LINE_DDC3:
199 info->offset = mmDC_GPIO_DDC3_A;
200 break;
201 case GPIO_DDC_LINE_DDC4:
202 info->offset = mmDC_GPIO_DDC4_A;
203 break;
204 case GPIO_DDC_LINE_DDC5:
205 info->offset = mmDC_GPIO_DDC5_A;
206 break;
207 case GPIO_DDC_LINE_DDC6:
208 info->offset = mmDC_GPIO_DDC6_A;
209 break;
210 case GPIO_DDC_LINE_DDC_VGA:
211 info->offset = mmDC_GPIO_DDCVGA_A;
212 break;
213 case GPIO_DDC_LINE_I2C_PAD:
214 info->offset = mmDC_GPIO_I2CPAD_A;
215 break;
216 default:
217 ASSERT_CRITICAL(false);
218 result = false;
219 }
220 break;
221 case GPIO_ID_DDC_CLOCK:
222 info->mask = DC_GPIO_DDC6_A__DC_GPIO_DDC6CLK_A_MASK;
223 switch (en) {
224 case GPIO_DDC_LINE_DDC1:
225 info->offset = mmDC_GPIO_DDC1_A;
226 break;
227 case GPIO_DDC_LINE_DDC2:
228 info->offset = mmDC_GPIO_DDC2_A;
229 break;
230 case GPIO_DDC_LINE_DDC3:
231 info->offset = mmDC_GPIO_DDC3_A;
232 break;
233 case GPIO_DDC_LINE_DDC4:
234 info->offset = mmDC_GPIO_DDC4_A;
235 break;
236 case GPIO_DDC_LINE_DDC5:
237 info->offset = mmDC_GPIO_DDC5_A;
238 break;
239 case GPIO_DDC_LINE_DDC6:
240 info->offset = mmDC_GPIO_DDC6_A;
241 break;
242 case GPIO_DDC_LINE_DDC_VGA:
243 info->offset = mmDC_GPIO_DDCVGA_A;
244 break;
245 case GPIO_DDC_LINE_I2C_PAD:
246 info->offset = mmDC_GPIO_I2CPAD_A;
247 break;
248 default:
249 ASSERT_CRITICAL(false);
250 result = false;
251 }
252 break;
253 case GPIO_ID_GENERIC:
254 info->offset = mmDC_GPIO_GENERIC_A;
255 switch (en) {
256 case GPIO_GENERIC_A:
257 info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK;
258 break;
259 case GPIO_GENERIC_B:
260 info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK;
261 break;
262 case GPIO_GENERIC_C:
263 info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK;
264 break;
265 case GPIO_GENERIC_D:
266 info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK;
267 break;
268 case GPIO_GENERIC_E:
269 info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK;
270 break;
271 case GPIO_GENERIC_F:
272 info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK;
273 break;
274 case GPIO_GENERIC_G:
275 info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A_MASK;
276 break;
277 default:
278 ASSERT_CRITICAL(false);
279 result = false;
280 }
281 break;
282 case GPIO_ID_HPD:
283 info->offset = mmDC_GPIO_HPD_A;
284 switch (en) {
285 case GPIO_HPD_1:
286 info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK;
287 break;
288 case GPIO_HPD_2:
289 info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK;
290 break;
291 case GPIO_HPD_3:
292 info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK;
293 break;
294 case GPIO_HPD_4:
295 info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK;
296 break;
297 case GPIO_HPD_5:
298 info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK;
299 break;
300 case GPIO_HPD_6:
301 info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK;
302 break;
303 default:
304 ASSERT_CRITICAL(false);
305 result = false;
306 }
307 break;
308 case GPIO_ID_SYNC:
309 switch (en) {
310 case GPIO_SYNC_HSYNC_A:
311 info->offset = mmDC_GPIO_SYNCA_A;
312 info->mask = DC_GPIO_SYNCA_A__DC_GPIO_HSYNCA_A_MASK;
313 break;
314 case GPIO_SYNC_VSYNC_A:
315 info->offset = mmDC_GPIO_SYNCA_A;
316 info->mask = DC_GPIO_SYNCA_A__DC_GPIO_VSYNCA_A_MASK;
317 break;
318 case GPIO_SYNC_HSYNC_B:
319 case GPIO_SYNC_VSYNC_B:
320 default:
321 ASSERT_CRITICAL(false);
322 result = false;
323 }
324 break;
325 case GPIO_ID_GSL:
326 switch (en) {
327 case GPIO_GSL_GENLOCK_CLOCK:
328 info->offset = mmDC_GPIO_GENLK_A;
329 info->mask = DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A_MASK;
330 break;
331 case GPIO_GSL_GENLOCK_VSYNC:
332 info->offset = mmDC_GPIO_GENLK_A;
333 info->mask =
334 DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A_MASK;
335 break;
336 case GPIO_GSL_SWAPLOCK_A:
337 info->offset = mmDC_GPIO_GENLK_A;
338 info->mask = DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A_MASK;
339 break;
340 case GPIO_GSL_SWAPLOCK_B:
341 info->offset = mmDC_GPIO_GENLK_A;
342 info->mask = DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A_MASK;
343 break;
344 default:
345 ASSERT_CRITICAL(false);
346 result = false;
347 }
348 break;
349 case GPIO_ID_VIP_PAD:
350 default:
351 ASSERT_CRITICAL(false);
352 result = false;
353 }
354
355 if (result) {
356 info->offset_y = info->offset + 2;
357 info->offset_en = info->offset + 1;
358 info->offset_mask = info->offset - 1;
359
360 info->mask_y = info->mask;
361 info->mask_en = info->mask;
362 info->mask_mask = info->mask;
363 }
364
365 return result;
366}
367
368/* function table */
369static const struct hw_translate_funcs funcs = {
370 .offset_to_id = offset_to_id,
371 .id_to_offset = id_to_offset,
372};
373
374/*
375 * dal_hw_translate_dce110_init
376 *
377 * @brief
378 * Initialize Hw translate function pointers.
379 *
380 * @param
381 * struct hw_translate *tr - [out] struct of function pointers
382 *
383 */
384void dal_hw_translate_dce110_init(struct hw_translate *tr)
385{
386 tr->funcs = &funcs;
387}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_translate_dce110.h b/drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_translate_dce110.h
new file mode 100644
index 000000000000..4d16e09853c8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_translate_dce110.h
@@ -0,0 +1,34 @@
1/*
2 * Copyright 2013-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_HW_TRANSLATE_DCE110_H__
27#define __DAL_HW_TRANSLATE_DCE110_H__
28
29struct hw_translate;
30
31/* Initialize Hw translate function pointers */
32void dal_hw_translate_dce110_init(struct hw_translate *tr);
33
34#endif /* __DAL_HW_TRANSLATE_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_factory_dce80.c b/drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_factory_dce80.c
new file mode 100644
index 000000000000..48b67866377e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_factory_dce80.c
@@ -0,0 +1,173 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27#include "include/gpio_types.h"
28#include "../hw_factory.h"
29
30#include "hw_factory_dce80.h"
31
32#include "../hw_gpio.h"
33#include "../hw_ddc.h"
34#include "../hw_hpd.h"
35
36#include "dce/dce_8_0_d.h"
37#include "dce/dce_8_0_sh_mask.h"
38
39#define REG(reg_name)\
40 mm ## reg_name
41
42#include "reg_helper.h"
43#include "../hpd_regs.h"
44
45#define HPD_REG_LIST_DCE8(id) \
46 HPD_GPIO_REG_LIST(id), \
47 .int_status = mmDC_HPD ## id ## _INT_STATUS,\
48 .toggle_filt_cntl = mmDC_HPD ## id ## _TOGGLE_FILT_CNTL
49
50#define HPD_MASK_SH_LIST_DCE8(mask_sh) \
51 .DC_HPD_SENSE_DELAYED = DC_HPD1_INT_STATUS__DC_HPD1_SENSE_DELAYED ## mask_sh,\
52 .DC_HPD_SENSE = DC_HPD1_INT_STATUS__DC_HPD1_SENSE ## mask_sh,\
53 .DC_HPD_CONNECT_INT_DELAY = DC_HPD1_TOGGLE_FILT_CNTL__DC_HPD1_CONNECT_INT_DELAY ## mask_sh,\
54 .DC_HPD_DISCONNECT_INT_DELAY = DC_HPD1_TOGGLE_FILT_CNTL__DC_HPD1_DISCONNECT_INT_DELAY ## mask_sh
55
56#define hpd_regs(id) \
57{\
58 HPD_REG_LIST_DCE8(id)\
59}
60
61static const struct hpd_registers hpd_regs[] = {
62 hpd_regs(1),
63 hpd_regs(2),
64 hpd_regs(3),
65 hpd_regs(4),
66 hpd_regs(5),
67 hpd_regs(6)
68};
69
70static const struct hpd_sh_mask hpd_shift = {
71 HPD_MASK_SH_LIST_DCE8(__SHIFT)
72};
73
74static const struct hpd_sh_mask hpd_mask = {
75 HPD_MASK_SH_LIST_DCE8(_MASK)
76};
77
78#include "../ddc_regs.h"
79
80 /* set field name */
81#define SF_DDC(reg_name, field_name, post_fix)\
82 .field_name = reg_name ## __ ## field_name ## post_fix
83
84static const struct ddc_registers ddc_data_regs[] = {
85 ddc_data_regs(1),
86 ddc_data_regs(2),
87 ddc_data_regs(3),
88 ddc_data_regs(4),
89 ddc_data_regs(5),
90 ddc_data_regs(6),
91 ddc_vga_data_regs,
92 ddc_i2c_data_regs
93};
94
95static const struct ddc_registers ddc_clk_regs[] = {
96 ddc_clk_regs(1),
97 ddc_clk_regs(2),
98 ddc_clk_regs(3),
99 ddc_clk_regs(4),
100 ddc_clk_regs(5),
101 ddc_clk_regs(6),
102 ddc_vga_clk_regs,
103 ddc_i2c_clk_regs
104};
105
106static const struct ddc_sh_mask ddc_shift = {
107 DDC_MASK_SH_LIST(__SHIFT)
108};
109
110static const struct ddc_sh_mask ddc_mask = {
111 DDC_MASK_SH_LIST(_MASK)
112};
113
114static void define_ddc_registers(
115 struct hw_gpio_pin *pin,
116 uint32_t en)
117{
118 struct hw_ddc *ddc = HW_DDC_FROM_BASE(pin);
119
120 switch (pin->id) {
121 case GPIO_ID_DDC_DATA:
122 ddc->regs = &ddc_data_regs[en];
123 ddc->base.regs = &ddc_data_regs[en].gpio;
124 break;
125 case GPIO_ID_DDC_CLOCK:
126 ddc->regs = &ddc_clk_regs[en];
127 ddc->base.regs = &ddc_clk_regs[en].gpio;
128 break;
129 default:
130 ASSERT_CRITICAL(false);
131 return;
132 }
133
134 ddc->shifts = &ddc_shift;
135 ddc->masks = &ddc_mask;
136
137}
138
139static void define_hpd_registers(struct hw_gpio_pin *pin, uint32_t en)
140{
141 struct hw_hpd *hpd = HW_HPD_FROM_BASE(pin);
142
143 hpd->regs = &hpd_regs[en];
144 hpd->shifts = &hpd_shift;
145 hpd->masks = &hpd_mask;
146 hpd->base.regs = &hpd_regs[en].gpio;
147}
148
149static const struct hw_factory_funcs funcs = {
150 .create_ddc_data = dal_hw_ddc_create,
151 .create_ddc_clock = dal_hw_ddc_create,
152 .create_generic = NULL,
153 .create_hpd = dal_hw_hpd_create,
154 .create_sync = NULL,
155 .create_gsl = NULL,
156 .define_hpd_registers = define_hpd_registers,
157 .define_ddc_registers = define_ddc_registers
158};
159
160void dal_hw_factory_dce80_init(
161 struct hw_factory *factory)
162{
163 factory->number_of_pins[GPIO_ID_DDC_DATA] = 8;
164 factory->number_of_pins[GPIO_ID_DDC_CLOCK] = 8;
165 factory->number_of_pins[GPIO_ID_GENERIC] = 7;
166 factory->number_of_pins[GPIO_ID_HPD] = 6;
167 factory->number_of_pins[GPIO_ID_GPIO_PAD] = 31;
168 factory->number_of_pins[GPIO_ID_VIP_PAD] = 0;
169 factory->number_of_pins[GPIO_ID_SYNC] = 2;
170 factory->number_of_pins[GPIO_ID_GSL] = 4;
171
172 factory->funcs = &funcs;
173}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_factory_dce80.h b/drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_factory_dce80.h
new file mode 100644
index 000000000000..e78a8b36f35a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_factory_dce80.h
@@ -0,0 +1,32 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_HW_FACTORY_DCE80_H__
27#define __DAL_HW_FACTORY_DCE80_H__
28
29void dal_hw_factory_dce80_init(
30 struct hw_factory *factory);
31
32#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_translate_dce80.c b/drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_translate_dce80.c
new file mode 100644
index 000000000000..fabb9da504be
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_translate_dce80.c
@@ -0,0 +1,411 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28/*
29 * Pre-requisites: headers required by header of this unit
30 */
31#include "include/gpio_types.h"
32#include "../hw_translate.h"
33
34#include "hw_translate_dce80.h"
35
36#include "dce/dce_8_0_d.h"
37#include "dce/dce_8_0_sh_mask.h"
38#include "smu/smu_7_0_1_d.h"
39
40/*
41 * @brief
42 * Returns index of first bit (starting with LSB) which is set
43 */
44static uint32_t index_from_vector(
45 uint32_t vector)
46{
47 uint32_t result = 0;
48 uint32_t mask = 1;
49
50 do {
51 if (vector == mask)
52 return result;
53
54 ++result;
55 mask <<= 1;
56 } while (mask);
57
58 BREAK_TO_DEBUGGER();
59
60 return GPIO_ENUM_UNKNOWN;
61}
62
63static bool offset_to_id(
64 uint32_t offset,
65 uint32_t mask,
66 enum gpio_id *id,
67 uint32_t *en)
68{
69 switch (offset) {
70 /* GENERIC */
71 case mmDC_GPIO_GENERIC_A:
72 *id = GPIO_ID_GENERIC;
73 switch (mask) {
74 case DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK:
75 *en = GPIO_GENERIC_A;
76 return true;
77 case DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK:
78 *en = GPIO_GENERIC_B;
79 return true;
80 case DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK:
81 *en = GPIO_GENERIC_C;
82 return true;
83 case DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK:
84 *en = GPIO_GENERIC_D;
85 return true;
86 case DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK:
87 *en = GPIO_GENERIC_E;
88 return true;
89 case DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK:
90 *en = GPIO_GENERIC_F;
91 return true;
92 case DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A_MASK:
93 *en = GPIO_GENERIC_G;
94 return true;
95 default:
96 BREAK_TO_DEBUGGER();
97 return false;
98 }
99 break;
100 /* HPD */
101 case mmDC_GPIO_HPD_A:
102 *id = GPIO_ID_HPD;
103 switch (mask) {
104 case DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK:
105 *en = GPIO_HPD_1;
106 return true;
107 case DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK:
108 *en = GPIO_HPD_2;
109 return true;
110 case DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK:
111 *en = GPIO_HPD_3;
112 return true;
113 case DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK:
114 *en = GPIO_HPD_4;
115 return true;
116 case DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK:
117 *en = GPIO_HPD_5;
118 return true;
119 case DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK:
120 *en = GPIO_HPD_6;
121 return true;
122 default:
123 BREAK_TO_DEBUGGER();
124 return false;
125 }
126 break;
127 /* SYNCA */
128 case mmDC_GPIO_SYNCA_A:
129 *id = GPIO_ID_SYNC;
130 switch (mask) {
131 case DC_GPIO_SYNCA_A__DC_GPIO_HSYNCA_A_MASK:
132 *en = GPIO_SYNC_HSYNC_A;
133 return true;
134 case DC_GPIO_SYNCA_A__DC_GPIO_VSYNCA_A_MASK:
135 *en = GPIO_SYNC_VSYNC_A;
136 return true;
137 default:
138 BREAK_TO_DEBUGGER();
139 return false;
140 }
141 break;
142 /* mmDC_GPIO_GENLK_MASK */
143 case mmDC_GPIO_GENLK_A:
144 *id = GPIO_ID_GSL;
145 switch (mask) {
146 case DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A_MASK:
147 *en = GPIO_GSL_GENLOCK_CLOCK;
148 return true;
149 case DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A_MASK:
150 *en = GPIO_GSL_GENLOCK_VSYNC;
151 return true;
152 case DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A_MASK:
153 *en = GPIO_GSL_SWAPLOCK_A;
154 return true;
155 case DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A_MASK:
156 *en = GPIO_GSL_SWAPLOCK_B;
157 return true;
158 default:
159 BREAK_TO_DEBUGGER();
160 return false;
161 }
162 break;
163 /* GPIOPAD */
164 case mmGPIOPAD_A:
165 *id = GPIO_ID_GPIO_PAD;
166 *en = index_from_vector(mask);
167 return (*en <= GPIO_GPIO_PAD_MAX);
168 /* DDC */
169 /* we don't care about the GPIO_ID for DDC
170 * in DdcHandle it will use GPIO_ID_DDC_DATA/GPIO_ID_DDC_CLOCK
171 * directly in the create method */
172 case mmDC_GPIO_DDC1_A:
173 *en = GPIO_DDC_LINE_DDC1;
174 return true;
175 case mmDC_GPIO_DDC2_A:
176 *en = GPIO_DDC_LINE_DDC2;
177 return true;
178 case mmDC_GPIO_DDC3_A:
179 *en = GPIO_DDC_LINE_DDC3;
180 return true;
181 case mmDC_GPIO_DDC4_A:
182 *en = GPIO_DDC_LINE_DDC4;
183 return true;
184 case mmDC_GPIO_DDC5_A:
185 *en = GPIO_DDC_LINE_DDC5;
186 return true;
187 case mmDC_GPIO_DDC6_A:
188 *en = GPIO_DDC_LINE_DDC6;
189 return true;
190 case mmDC_GPIO_DDCVGA_A:
191 *en = GPIO_DDC_LINE_DDC_VGA;
192 return true;
193 /* GPIO_I2CPAD */
194 case mmDC_GPIO_I2CPAD_A:
195 *en = GPIO_DDC_LINE_I2C_PAD;
196 return true;
197 /* Not implemented */
198 case mmDC_GPIO_PWRSEQ_A:
199 case mmDC_GPIO_PAD_STRENGTH_1:
200 case mmDC_GPIO_PAD_STRENGTH_2:
201 case mmDC_GPIO_DEBUG:
202 return false;
203 /* UNEXPECTED */
204 default:
205 BREAK_TO_DEBUGGER();
206 return false;
207 }
208}
209
210static bool id_to_offset(
211 enum gpio_id id,
212 uint32_t en,
213 struct gpio_pin_info *info)
214{
215 bool result = true;
216
217 switch (id) {
218 case GPIO_ID_DDC_DATA:
219 info->mask = DC_GPIO_DDC6_A__DC_GPIO_DDC6DATA_A_MASK;
220 switch (en) {
221 case GPIO_DDC_LINE_DDC1:
222 info->offset = mmDC_GPIO_DDC1_A;
223 break;
224 case GPIO_DDC_LINE_DDC2:
225 info->offset = mmDC_GPIO_DDC2_A;
226 break;
227 case GPIO_DDC_LINE_DDC3:
228 info->offset = mmDC_GPIO_DDC3_A;
229 break;
230 case GPIO_DDC_LINE_DDC4:
231 info->offset = mmDC_GPIO_DDC4_A;
232 break;
233 case GPIO_DDC_LINE_DDC5:
234 info->offset = mmDC_GPIO_DDC5_A;
235 break;
236 case GPIO_DDC_LINE_DDC6:
237 info->offset = mmDC_GPIO_DDC6_A;
238 break;
239 case GPIO_DDC_LINE_DDC_VGA:
240 info->offset = mmDC_GPIO_DDCVGA_A;
241 break;
242 case GPIO_DDC_LINE_I2C_PAD:
243 info->offset = mmDC_GPIO_I2CPAD_A;
244 break;
245 default:
246 BREAK_TO_DEBUGGER();
247 result = false;
248 }
249 break;
250 case GPIO_ID_DDC_CLOCK:
251 info->mask = DC_GPIO_DDC6_A__DC_GPIO_DDC6CLK_A_MASK;
252 switch (en) {
253 case GPIO_DDC_LINE_DDC1:
254 info->offset = mmDC_GPIO_DDC1_A;
255 break;
256 case GPIO_DDC_LINE_DDC2:
257 info->offset = mmDC_GPIO_DDC2_A;
258 break;
259 case GPIO_DDC_LINE_DDC3:
260 info->offset = mmDC_GPIO_DDC3_A;
261 break;
262 case GPIO_DDC_LINE_DDC4:
263 info->offset = mmDC_GPIO_DDC4_A;
264 break;
265 case GPIO_DDC_LINE_DDC5:
266 info->offset = mmDC_GPIO_DDC5_A;
267 break;
268 case GPIO_DDC_LINE_DDC6:
269 info->offset = mmDC_GPIO_DDC6_A;
270 break;
271 case GPIO_DDC_LINE_DDC_VGA:
272 info->offset = mmDC_GPIO_DDCVGA_A;
273 break;
274 case GPIO_DDC_LINE_I2C_PAD:
275 info->offset = mmDC_GPIO_I2CPAD_A;
276 break;
277 default:
278 BREAK_TO_DEBUGGER();
279 result = false;
280 }
281 break;
282 case GPIO_ID_GENERIC:
283 info->offset = mmDC_GPIO_GENERIC_A;
284 switch (en) {
285 case GPIO_GENERIC_A:
286 info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK;
287 break;
288 case GPIO_GENERIC_B:
289 info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK;
290 break;
291 case GPIO_GENERIC_C:
292 info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK;
293 break;
294 case GPIO_GENERIC_D:
295 info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK;
296 break;
297 case GPIO_GENERIC_E:
298 info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK;
299 break;
300 case GPIO_GENERIC_F:
301 info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK;
302 break;
303 case GPIO_GENERIC_G:
304 info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A_MASK;
305 break;
306 default:
307 BREAK_TO_DEBUGGER();
308 result = false;
309 }
310 break;
311 case GPIO_ID_HPD:
312 info->offset = mmDC_GPIO_HPD_A;
313 switch (en) {
314 case GPIO_HPD_1:
315 info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK;
316 break;
317 case GPIO_HPD_2:
318 info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK;
319 break;
320 case GPIO_HPD_3:
321 info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK;
322 break;
323 case GPIO_HPD_4:
324 info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK;
325 break;
326 case GPIO_HPD_5:
327 info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK;
328 break;
329 case GPIO_HPD_6:
330 info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK;
331 break;
332 default:
333 BREAK_TO_DEBUGGER();
334 result = false;
335 }
336 break;
337 case GPIO_ID_SYNC:
338 switch (en) {
339 case GPIO_SYNC_HSYNC_A:
340 info->offset = mmDC_GPIO_SYNCA_A;
341 info->mask = DC_GPIO_SYNCA_A__DC_GPIO_HSYNCA_A_MASK;
342 break;
343 case GPIO_SYNC_VSYNC_A:
344 info->offset = mmDC_GPIO_SYNCA_A;
345 info->mask = DC_GPIO_SYNCA_A__DC_GPIO_VSYNCA_A_MASK;
346 break;
347 case GPIO_SYNC_HSYNC_B:
348 case GPIO_SYNC_VSYNC_B:
349 default:
350 BREAK_TO_DEBUGGER();
351 result = false;
352 }
353 break;
354 case GPIO_ID_GSL:
355 switch (en) {
356 case GPIO_GSL_GENLOCK_CLOCK:
357 info->offset = mmDC_GPIO_GENLK_A;
358 info->mask = DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A_MASK;
359 break;
360 case GPIO_GSL_GENLOCK_VSYNC:
361 info->offset = mmDC_GPIO_GENLK_A;
362 info->mask =
363 DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A_MASK;
364 break;
365 case GPIO_GSL_SWAPLOCK_A:
366 info->offset = mmDC_GPIO_GENLK_A;
367 info->mask = DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A_MASK;
368 break;
369 case GPIO_GSL_SWAPLOCK_B:
370 info->offset = mmDC_GPIO_GENLK_A;
371 info->mask = DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A_MASK;
372 break;
373 default:
374 BREAK_TO_DEBUGGER();
375 result = false;
376 }
377 break;
378 case GPIO_ID_GPIO_PAD:
379 info->offset = mmGPIOPAD_A;
380 info->mask = (1 << en);
381 result = (info->mask <= GPIO_GPIO_PAD_MAX);
382 break;
383 case GPIO_ID_VIP_PAD:
384 default:
385 BREAK_TO_DEBUGGER();
386 result = false;
387 }
388
389 if (result) {
390 info->offset_y = info->offset + 2;
391 info->offset_en = info->offset + 1;
392 info->offset_mask = info->offset - 1;
393
394 info->mask_y = info->mask;
395 info->mask_en = info->mask;
396 info->mask_mask = info->mask;
397 }
398
399 return result;
400}
401
402static const struct hw_translate_funcs funcs = {
403 .offset_to_id = offset_to_id,
404 .id_to_offset = id_to_offset,
405};
406
407void dal_hw_translate_dce80_init(
408 struct hw_translate *translate)
409{
410 translate->funcs = &funcs;
411}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_translate_dce80.h b/drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_translate_dce80.h
new file mode 100644
index 000000000000..374f2f3282a1
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_translate_dce80.h
@@ -0,0 +1,32 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_HW_TRANSLATE_DCE80_H__
27#define __DAL_HW_TRANSLATE_DCE80_H__
28
29void dal_hw_translate_dce80_init(
30 struct hw_translate *tr);
31
32#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h b/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h
new file mode 100644
index 000000000000..9c4a56c738c0
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h
@@ -0,0 +1,150 @@
1/*
2 * Copyright 2012-16 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef DRIVERS_GPU_DRM_AMD_DC_DEV_DC_GPIO_DDC_REGS_H_
27#define DRIVERS_GPU_DRM_AMD_DC_DEV_DC_GPIO_DDC_REGS_H_
28
29#include "gpio_regs.h"
30
31/****************************** new register headers */
32/*** following in header */
33
34#define DDC_GPIO_REG_LIST_ENTRY(type,cd,id) \
35 .type ## _reg = REG(DC_GPIO_DDC ## id ## _ ## type),\
36 .type ## _mask = DC_GPIO_DDC ## id ## _ ## type ## __DC_GPIO_DDC ## id ## cd ## _ ## type ## _MASK,\
37 .type ## _shift = DC_GPIO_DDC ## id ## _ ## type ## __DC_GPIO_DDC ## id ## cd ## _ ## type ## __SHIFT
38
39#define DDC_GPIO_REG_LIST(cd,id) \
40 {\
41 DDC_GPIO_REG_LIST_ENTRY(MASK,cd,id),\
42 DDC_GPIO_REG_LIST_ENTRY(A,cd,id),\
43 DDC_GPIO_REG_LIST_ENTRY(EN,cd,id),\
44 DDC_GPIO_REG_LIST_ENTRY(Y,cd,id)\
45 }
46
47#define DDC_REG_LIST(cd,id) \
48 DDC_GPIO_REG_LIST(cd,id),\
49 .ddc_setup = REG(DC_I2C_DDC ## id ## _SETUP)
50
51#define DDC_GPIO_VGA_REG_LIST_ENTRY(type,cd)\
52 .type ## _reg = REG(DC_GPIO_DDCVGA_ ## type),\
53 .type ## _mask = DC_GPIO_DDCVGA_ ## type ## __DC_GPIO_DDCVGA ## cd ## _ ## type ## _MASK,\
54 .type ## _shift = DC_GPIO_DDCVGA_ ## type ## __DC_GPIO_DDCVGA ## cd ## _ ## type ## __SHIFT
55
56#define DDC_GPIO_VGA_REG_LIST(cd) \
57 {\
58 DDC_GPIO_VGA_REG_LIST_ENTRY(MASK,cd),\
59 DDC_GPIO_VGA_REG_LIST_ENTRY(A,cd),\
60 DDC_GPIO_VGA_REG_LIST_ENTRY(EN,cd),\
61 DDC_GPIO_VGA_REG_LIST_ENTRY(Y,cd)\
62 }
63
64#define DDC_VGA_REG_LIST(cd) \
65 DDC_GPIO_VGA_REG_LIST(cd),\
66 .ddc_setup = mmDC_I2C_DDCVGA_SETUP
67
68#define DDC_GPIO_I2C_REG_LIST_ENTRY(type,cd) \
69 .type ## _reg = REG(DC_GPIO_I2CPAD_ ## type),\
70 .type ## _mask = DC_GPIO_I2CPAD_ ## type ## __DC_GPIO_ ## cd ## _ ## type ## _MASK,\
71 .type ## _shift = DC_GPIO_I2CPAD_ ## type ## __DC_GPIO_ ## cd ## _ ## type ## __SHIFT
72
73#define DDC_GPIO_I2C_REG_LIST(cd) \
74 {\
75 DDC_GPIO_I2C_REG_LIST_ENTRY(MASK,cd),\
76 DDC_GPIO_I2C_REG_LIST_ENTRY(A,cd),\
77 DDC_GPIO_I2C_REG_LIST_ENTRY(EN,cd),\
78 DDC_GPIO_I2C_REG_LIST_ENTRY(Y,cd)\
79 }
80
81#define DDC_I2C_REG_LIST(cd) \
82 DDC_GPIO_I2C_REG_LIST(cd),\
83 .ddc_setup = 0
84
85#define DDC_MASK_SH_LIST(mask_sh) \
86 SF_DDC(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE, mask_sh),\
87 SF_DDC(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_EDID_DETECT_ENABLE, mask_sh),\
88 SF_DDC(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_EDID_DETECT_MODE, mask_sh),\
89 SF_DDC(DC_GPIO_DDC1_MASK, DC_GPIO_DDC1DATA_PD_EN, mask_sh),\
90 SF_DDC(DC_GPIO_DDC1_MASK, DC_GPIO_DDC1CLK_PD_EN, mask_sh),\
91 SF_DDC(DC_GPIO_DDC1_MASK, AUX_PAD1_MODE, mask_sh),\
92 SF_DDC(DC_GPIO_I2CPAD_MASK, DC_GPIO_SDA_PD_DIS, mask_sh),\
93 SF_DDC(DC_GPIO_I2CPAD_MASK, DC_GPIO_SCL_PD_DIS, mask_sh)
94
95
96struct ddc_registers {
97 struct gpio_registers gpio;
98 uint32_t ddc_setup;
99};
100
101struct ddc_sh_mask {
102 /* i2c_dd_setup */
103 uint32_t DC_I2C_DDC1_ENABLE;
104 uint32_t DC_I2C_DDC1_EDID_DETECT_ENABLE;
105 uint32_t DC_I2C_DDC1_EDID_DETECT_MODE;
106 /* ddc1_mask */
107 uint32_t DC_GPIO_DDC1DATA_PD_EN;
108 uint32_t DC_GPIO_DDC1CLK_PD_EN;
109 uint32_t AUX_PAD1_MODE;
110 /* i2cpad_mask */
111 uint32_t DC_GPIO_SDA_PD_DIS;
112 uint32_t DC_GPIO_SCL_PD_DIS;
113};
114
115
116
117/*** following in dc_resource */
118
119#define ddc_data_regs(id) \
120{\
121 DDC_REG_LIST(DATA,id)\
122}
123
124#define ddc_clk_regs(id) \
125{\
126 DDC_REG_LIST(CLK,id)\
127}
128
129#define ddc_vga_data_regs \
130{\
131 DDC_VGA_REG_LIST(DATA)\
132}
133
134#define ddc_vga_clk_regs \
135{\
136 DDC_VGA_REG_LIST(CLK)\
137}
138
139#define ddc_i2c_data_regs \
140{\
141 DDC_I2C_REG_LIST(SDA)\
142}
143
144#define ddc_i2c_clk_regs \
145{\
146 DDC_I2C_REG_LIST(SCL)\
147}
148
149
150#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_GPIO_DDC_REGS_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.c b/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.c
new file mode 100644
index 000000000000..26695b963c58
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.c
@@ -0,0 +1,63 @@
1/*
2 * Copyright 2013-16 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26/*
27 * Pre-requisites: headers required by header of this unit
28 */
29
30#include "dm_services.h"
31#include "include/gpio_types.h"
32#include "../hw_factory.h"
33
34/*
35 * Header of this unit
36 */
37
38#include "../hw_gpio.h"
39#include "../hw_ddc.h"
40#include "../hw_hpd.h"
41
42/* function table */
43static const struct hw_factory_funcs funcs = {
44 .create_ddc_data = NULL,
45 .create_ddc_clock = NULL,
46 .create_generic = NULL,
47 .create_hpd = NULL,
48 .create_sync = NULL,
49 .create_gsl = NULL,
50};
51
52void dal_hw_factory_diag_fpga_init(struct hw_factory *factory)
53{
54 factory->number_of_pins[GPIO_ID_DDC_DATA] = 8;
55 factory->number_of_pins[GPIO_ID_DDC_CLOCK] = 8;
56 factory->number_of_pins[GPIO_ID_GENERIC] = 7;
57 factory->number_of_pins[GPIO_ID_HPD] = 6;
58 factory->number_of_pins[GPIO_ID_GPIO_PAD] = 31;
59 factory->number_of_pins[GPIO_ID_VIP_PAD] = 0;
60 factory->number_of_pins[GPIO_ID_SYNC] = 2;
61 factory->number_of_pins[GPIO_ID_GSL] = 4;
62 factory->funcs = &funcs;
63}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.h b/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.h
new file mode 100644
index 000000000000..8a74f6adb8ee
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.h
@@ -0,0 +1,32 @@
1/*
2 * Copyright 2013-16 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_HW_FACTORY_DIAG_FPGA_H__
27#define __DAL_HW_FACTORY_DIAG_FPGA_H__
28
29/* Initialize HW factory function pointers and pin info */
30void dal_hw_factory_diag_fpga_init(struct hw_factory *factory);
31
32#endif /* __DAL_HW_FACTORY_DIAG_FPGA_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_translate_diag.c b/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_translate_diag.c
new file mode 100644
index 000000000000..bf9068846927
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_translate_diag.c
@@ -0,0 +1,40 @@
1/*
2 * Copyright 2013-16 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27#include "include/gpio_types.h"
28
29#include "../hw_translate.h"
30
31/* function table */
32static const struct hw_translate_funcs funcs = {
33 .offset_to_id = NULL,
34 .id_to_offset = NULL,
35};
36
37void dal_hw_translate_diag_fpga_init(struct hw_translate *tr)
38{
39 tr->funcs = &funcs;
40}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_translate_diag.h b/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_translate_diag.h
new file mode 100644
index 000000000000..4f053241fe96
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_translate_diag.h
@@ -0,0 +1,34 @@
1/*
2 * Copyright 2013-16 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_HW_TRANSLATE_DIAG_FPGA_H__
27#define __DAL_HW_TRANSLATE_DIAG_FPGA_H__
28
29struct hw_translate;
30
31/* Initialize Hw translate function pointers */
32void dal_hw_translate_diag_fpga_init(struct hw_translate *tr);
33
34#endif /* __DAL_HW_TRANSLATE_DIAG_FPGA_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c
new file mode 100644
index 000000000000..d42eb3de2ea4
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c
@@ -0,0 +1,272 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26/*
27 * Pre-requisites: headers required by header of this unit
28 */
29
30#include "dm_services.h"
31
32#include "include/gpio_interface.h"
33#include "include/gpio_service_interface.h"
34#include "hw_gpio.h"
35#include "hw_translate.h"
36#include "hw_factory.h"
37#include "gpio_service.h"
38
39/*
40 * Post-requisites: headers required by this unit
41 */
42
43/*
44 * This unit
45 */
46
47/*
48 * @brief
49 * Public API
50 */
51
52enum gpio_result dal_gpio_open(
53 struct gpio *gpio,
54 enum gpio_mode mode)
55{
56 return dal_gpio_open_ex(gpio, mode);
57}
58
59enum gpio_result dal_gpio_open_ex(
60 struct gpio *gpio,
61 enum gpio_mode mode)
62{
63 if (gpio->pin) {
64 ASSERT_CRITICAL(false);
65 return GPIO_RESULT_ALREADY_OPENED;
66 }
67
68 gpio->mode = mode;
69
70 return dal_gpio_service_open(
71 gpio->service, gpio->id, gpio->en, mode, &gpio->pin);
72}
73
74enum gpio_result dal_gpio_get_value(
75 const struct gpio *gpio,
76 uint32_t *value)
77{
78 if (!gpio->pin) {
79 BREAK_TO_DEBUGGER();
80 return GPIO_RESULT_NULL_HANDLE;
81 }
82
83 return gpio->pin->funcs->get_value(gpio->pin, value);
84}
85
86enum gpio_result dal_gpio_set_value(
87 const struct gpio *gpio,
88 uint32_t value)
89{
90 if (!gpio->pin) {
91 BREAK_TO_DEBUGGER();
92 return GPIO_RESULT_NULL_HANDLE;
93 }
94
95 return gpio->pin->funcs->set_value(gpio->pin, value);
96}
97
98enum gpio_mode dal_gpio_get_mode(
99 const struct gpio *gpio)
100{
101 return gpio->mode;
102}
103
104enum gpio_result dal_gpio_change_mode(
105 struct gpio *gpio,
106 enum gpio_mode mode)
107{
108 if (!gpio->pin) {
109 BREAK_TO_DEBUGGER();
110 return GPIO_RESULT_NULL_HANDLE;
111 }
112
113 return gpio->pin->funcs->change_mode(gpio->pin, mode);
114}
115
116enum gpio_id dal_gpio_get_id(
117 const struct gpio *gpio)
118{
119 return gpio->id;
120}
121
122uint32_t dal_gpio_get_enum(
123 const struct gpio *gpio)
124{
125 return gpio->en;
126}
127
128enum gpio_result dal_gpio_set_config(
129 struct gpio *gpio,
130 const struct gpio_config_data *config_data)
131{
132 if (!gpio->pin) {
133 BREAK_TO_DEBUGGER();
134 return GPIO_RESULT_NULL_HANDLE;
135 }
136
137 return gpio->pin->funcs->set_config(gpio->pin, config_data);
138}
139
140enum gpio_result dal_gpio_get_pin_info(
141 const struct gpio *gpio,
142 struct gpio_pin_info *pin_info)
143{
144 return gpio->service->translate.funcs->id_to_offset(
145 gpio->id, gpio->en, pin_info) ?
146 GPIO_RESULT_OK : GPIO_RESULT_INVALID_DATA;
147}
148
149enum sync_source dal_gpio_get_sync_source(
150 const struct gpio *gpio)
151{
152 switch (gpio->id) {
153 case GPIO_ID_GENERIC:
154 switch (gpio->en) {
155 case GPIO_GENERIC_A:
156 return SYNC_SOURCE_IO_GENERIC_A;
157 case GPIO_GENERIC_B:
158 return SYNC_SOURCE_IO_GENERIC_B;
159 case GPIO_GENERIC_C:
160 return SYNC_SOURCE_IO_GENERIC_C;
161 case GPIO_GENERIC_D:
162 return SYNC_SOURCE_IO_GENERIC_D;
163 case GPIO_GENERIC_E:
164 return SYNC_SOURCE_IO_GENERIC_E;
165 case GPIO_GENERIC_F:
166 return SYNC_SOURCE_IO_GENERIC_F;
167 default:
168 return SYNC_SOURCE_NONE;
169 }
170 break;
171 case GPIO_ID_SYNC:
172 switch (gpio->en) {
173 case GPIO_SYNC_HSYNC_A:
174 return SYNC_SOURCE_IO_HSYNC_A;
175 case GPIO_SYNC_VSYNC_A:
176 return SYNC_SOURCE_IO_VSYNC_A;
177 case GPIO_SYNC_HSYNC_B:
178 return SYNC_SOURCE_IO_HSYNC_B;
179 case GPIO_SYNC_VSYNC_B:
180 return SYNC_SOURCE_IO_VSYNC_B;
181 default:
182 return SYNC_SOURCE_NONE;
183 }
184 break;
185 case GPIO_ID_HPD:
186 switch (gpio->en) {
187 case GPIO_HPD_1:
188 return SYNC_SOURCE_IO_HPD1;
189 case GPIO_HPD_2:
190 return SYNC_SOURCE_IO_HPD2;
191 default:
192 return SYNC_SOURCE_NONE;
193 }
194 break;
195 case GPIO_ID_GSL:
196 switch (gpio->en) {
197 case GPIO_GSL_GENLOCK_CLOCK:
198 return SYNC_SOURCE_GSL_IO_GENLOCK_CLOCK;
199 case GPIO_GSL_GENLOCK_VSYNC:
200 return SYNC_SOURCE_GSL_IO_GENLOCK_VSYNC;
201 case GPIO_GSL_SWAPLOCK_A:
202 return SYNC_SOURCE_GSL_IO_SWAPLOCK_A;
203 case GPIO_GSL_SWAPLOCK_B:
204 return SYNC_SOURCE_GSL_IO_SWAPLOCK_B;
205 default:
206 return SYNC_SOURCE_NONE;
207 }
208 break;
209 default:
210 return SYNC_SOURCE_NONE;
211 }
212}
213
214enum gpio_pin_output_state dal_gpio_get_output_state(
215 const struct gpio *gpio)
216{
217 return gpio->output_state;
218}
219
220void dal_gpio_close(
221 struct gpio *gpio)
222{
223 if (!gpio)
224 return;
225
226 dal_gpio_service_close(gpio->service, &gpio->pin);
227
228 gpio->mode = GPIO_MODE_UNKNOWN;
229}
230
231/*
232 * @brief
233 * Creation and destruction
234 */
235
236struct gpio *dal_gpio_create(
237 struct gpio_service *service,
238 enum gpio_id id,
239 uint32_t en,
240 enum gpio_pin_output_state output_state)
241{
242 struct gpio *gpio = dm_alloc(sizeof(struct gpio));
243
244 if (!gpio) {
245 ASSERT_CRITICAL(false);
246 return NULL;
247 }
248
249 gpio->service = service;
250 gpio->pin = NULL;
251 gpio->id = id;
252 gpio->en = en;
253 gpio->mode = GPIO_MODE_UNKNOWN;
254 gpio->output_state = output_state;
255
256 return gpio;
257}
258
259void dal_gpio_destroy(
260 struct gpio **gpio)
261{
262 if (!gpio || !*gpio) {
263 ASSERT_CRITICAL(false);
264 return;
265 }
266
267 dal_gpio_close(*gpio);
268
269 dm_free(*gpio);
270
271 *gpio = NULL;
272}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_regs.h b/drivers/gpu/drm/amd/display/dc/gpio/gpio_regs.h
new file mode 100644
index 000000000000..5c5925299f8d
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_regs.h
@@ -0,0 +1,45 @@
1/*
2 * Copyright 2012-16 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef DRIVERS_GPU_DRM_AMD_DC_DEV_DC_GPIO_GPIO_REGS_H_
27#define DRIVERS_GPU_DRM_AMD_DC_DEV_DC_GPIO_GPIO_REGS_H_
28
29struct gpio_registers {
30 uint32_t MASK_reg;
31 uint32_t MASK_mask;
32 uint32_t MASK_shift;
33 uint32_t A_reg;
34 uint32_t A_mask;
35 uint32_t A_shift;
36 uint32_t EN_reg;
37 uint32_t EN_mask;
38 uint32_t EN_shift;
39 uint32_t Y_reg;
40 uint32_t Y_mask;
41 uint32_t Y_shift;
42};
43
44
45#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_GPIO_GPIO_REGS_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
new file mode 100644
index 000000000000..eeb1cd0f75a6
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
@@ -0,0 +1,592 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26/*
27 * Pre-requisites: headers required by header of this unit
28 */
29
30#include "dm_services.h"
31#include "include/gpio_interface.h"
32#include "include/gpio_service_interface.h"
33#include "hw_translate.h"
34#include "hw_factory.h"
35
36/*
37 * Header of this unit
38 */
39
40#include "gpio_service.h"
41
42/*
43 * Post-requisites: headers required by this unit
44 */
45
46#include "hw_gpio.h"
47
48/*
49 * @brief
50 * Public API.
51 */
52
53struct gpio_service *dal_gpio_service_create(
54 enum dce_version dce_version_major,
55 enum dce_version dce_version_minor,
56 struct dc_context *ctx)
57{
58 struct gpio_service *service;
59
60 uint32_t index_of_id;
61
62 service = dm_alloc(sizeof(struct gpio_service));
63
64 if (!service) {
65 BREAK_TO_DEBUGGER();
66 return NULL;
67 }
68
69 if (!dal_hw_translate_init(&service->translate, dce_version_major,
70 dce_version_minor)) {
71 BREAK_TO_DEBUGGER();
72 goto failure_1;
73 }
74
75 if (!dal_hw_factory_init(&service->factory, dce_version_major,
76 dce_version_minor)) {
77 BREAK_TO_DEBUGGER();
78 goto failure_1;
79 }
80
81 /* allocate and initialize business storage */
82 {
83 const uint32_t bits_per_uint = sizeof(uint32_t) << 3;
84
85 index_of_id = 0;
86 service->ctx = ctx;
87
88 do {
89 uint32_t number_of_bits =
90 service->factory.number_of_pins[index_of_id];
91
92 uint32_t number_of_uints =
93 (number_of_bits + bits_per_uint - 1) /
94 bits_per_uint;
95
96 uint32_t *slot;
97
98 if (number_of_bits) {
99 uint32_t index_of_uint = 0;
100
101 slot = dm_alloc(number_of_uints * sizeof(uint32_t));
102
103 if (!slot) {
104 BREAK_TO_DEBUGGER();
105 goto failure_2;
106 }
107
108 do {
109 slot[index_of_uint] = 0;
110
111 ++index_of_uint;
112 } while (index_of_uint < number_of_uints);
113 } else
114 slot = NULL;
115
116 service->busyness[index_of_id] = slot;
117
118 ++index_of_id;
119 } while (index_of_id < GPIO_ID_COUNT);
120 }
121
122 return service;
123
124failure_2:
125 while (index_of_id) {
126 uint32_t *slot;
127
128 --index_of_id;
129
130 slot = service->busyness[index_of_id];
131
132 if (slot)
133 dm_free(slot);
134 };
135
136failure_1:
137 dm_free(service);
138
139 return NULL;
140}
141
142struct gpio *dal_gpio_service_create_irq(
143 struct gpio_service *service,
144 uint32_t offset,
145 uint32_t mask)
146{
147 enum gpio_id id;
148 uint32_t en;
149
150 if (!service->translate.funcs->offset_to_id(offset, mask, &id, &en)) {
151 ASSERT_CRITICAL(false);
152 return NULL;
153 }
154
155 return dal_gpio_create_irq(service, id, en);
156}
157
158void dal_gpio_service_destroy(
159 struct gpio_service **ptr)
160{
161 if (!ptr || !*ptr) {
162 BREAK_TO_DEBUGGER();
163 return;
164 }
165
166 /* free business storage */
167 {
168 uint32_t index_of_id = 0;
169
170 do {
171 uint32_t *slot = (*ptr)->busyness[index_of_id];
172
173 if (slot)
174 dm_free(slot);
175
176 ++index_of_id;
177 } while (index_of_id < GPIO_ID_COUNT);
178 }
179
180 dm_free(*ptr);
181
182 *ptr = NULL;
183}
184
185/*
186 * @brief
187 * Private API.
188 */
189
190static bool is_pin_busy(
191 const struct gpio_service *service,
192 enum gpio_id id,
193 uint32_t en)
194{
195 const uint32_t bits_per_uint = sizeof(uint32_t) << 3;
196
197 const uint32_t *slot = service->busyness[id] + (en / bits_per_uint);
198
199 return 0 != (*slot & (1 << (en % bits_per_uint)));
200}
201
202static void set_pin_busy(
203 struct gpio_service *service,
204 enum gpio_id id,
205 uint32_t en)
206{
207 const uint32_t bits_per_uint = sizeof(uint32_t) << 3;
208
209 service->busyness[id][en / bits_per_uint] |=
210 (1 << (en % bits_per_uint));
211}
212
213static void set_pin_free(
214 struct gpio_service *service,
215 enum gpio_id id,
216 uint32_t en)
217{
218 const uint32_t bits_per_uint = sizeof(uint32_t) << 3;
219
220 service->busyness[id][en / bits_per_uint] &=
221 ~(1 << (en % bits_per_uint));
222}
223
224enum gpio_result dal_gpio_service_open(
225 struct gpio_service *service,
226 enum gpio_id id,
227 uint32_t en,
228 enum gpio_mode mode,
229 struct hw_gpio_pin **ptr)
230{
231 struct hw_gpio_pin *pin;
232
233 if (!service->busyness[id]) {
234 ASSERT_CRITICAL(false);
235 return GPIO_RESULT_OPEN_FAILED;
236 }
237
238 if (is_pin_busy(service, id, en)) {
239 ASSERT_CRITICAL(false);
240 return GPIO_RESULT_DEVICE_BUSY;
241 }
242
243 switch (id) {
244 case GPIO_ID_DDC_DATA:
245 pin = service->factory.funcs->create_ddc_data(
246 service->ctx, id, en);
247 service->factory.funcs->define_ddc_registers(pin, en);
248 break;
249 case GPIO_ID_DDC_CLOCK:
250 pin = service->factory.funcs->create_ddc_clock(
251 service->ctx, id, en);
252 service->factory.funcs->define_ddc_registers(pin, en);
253 break;
254 case GPIO_ID_GENERIC:
255 pin = service->factory.funcs->create_generic(
256 service->ctx, id, en);
257 break;
258 case GPIO_ID_HPD:
259 pin = service->factory.funcs->create_hpd(
260 service->ctx, id, en);
261 service->factory.funcs->define_hpd_registers(pin, en);
262 break;
263 case GPIO_ID_SYNC:
264 pin = service->factory.funcs->create_sync(
265 service->ctx, id, en);
266 break;
267 case GPIO_ID_GSL:
268 pin = service->factory.funcs->create_gsl(
269 service->ctx, id, en);
270 break;
271 default:
272 ASSERT_CRITICAL(false);
273 return GPIO_RESULT_NON_SPECIFIC_ERROR;
274 }
275
276 if (!pin) {
277 ASSERT_CRITICAL(false);
278 return GPIO_RESULT_NON_SPECIFIC_ERROR;
279 }
280
281 if (!pin->funcs->open(pin, mode)) {
282 ASSERT_CRITICAL(false);
283 dal_gpio_service_close(service, &pin);
284 return GPIO_RESULT_OPEN_FAILED;
285 }
286
287 set_pin_busy(service, id, en);
288 *ptr = pin;
289 return GPIO_RESULT_OK;
290}
291
292void dal_gpio_service_close(
293 struct gpio_service *service,
294 struct hw_gpio_pin **ptr)
295{
296 struct hw_gpio_pin *pin;
297
298 if (!ptr) {
299 ASSERT_CRITICAL(false);
300 return;
301 }
302
303 pin = *ptr;
304
305 if (pin) {
306 set_pin_free(service, pin->id, pin->en);
307
308 pin->funcs->close(pin);
309
310 pin->funcs->destroy(ptr);
311 }
312}
313
314
315enum dc_irq_source dal_irq_get_source(
316 const struct gpio *irq)
317{
318 enum gpio_id id = dal_gpio_get_id(irq);
319
320 switch (id) {
321 case GPIO_ID_HPD:
322 return (enum dc_irq_source)(DC_IRQ_SOURCE_HPD1 +
323 dal_gpio_get_enum(irq));
324 case GPIO_ID_GPIO_PAD:
325 return (enum dc_irq_source)(DC_IRQ_SOURCE_GPIOPAD0 +
326 dal_gpio_get_enum(irq));
327 default:
328 return DC_IRQ_SOURCE_INVALID;
329 }
330}
331
332enum dc_irq_source dal_irq_get_rx_source(
333 const struct gpio *irq)
334{
335 enum gpio_id id = dal_gpio_get_id(irq);
336
337 switch (id) {
338 case GPIO_ID_HPD:
339 return (enum dc_irq_source)(DC_IRQ_SOURCE_HPD1RX +
340 dal_gpio_get_enum(irq));
341 default:
342 return DC_IRQ_SOURCE_INVALID;
343 }
344}
345
346enum gpio_result dal_irq_setup_hpd_filter(
347 struct gpio *irq,
348 struct gpio_hpd_config *config)
349{
350 struct gpio_config_data config_data;
351
352 if (!config)
353 return GPIO_RESULT_INVALID_DATA;
354
355 config_data.type = GPIO_CONFIG_TYPE_HPD;
356 config_data.config.hpd = *config;
357
358 return dal_gpio_set_config(irq, &config_data);
359}
360
361/*
362 * @brief
363 * Creation and destruction
364 */
365
366struct gpio *dal_gpio_create_irq(
367 struct gpio_service *service,
368 enum gpio_id id,
369 uint32_t en)
370{
371 struct gpio *irq;
372
373 switch (id) {
374 case GPIO_ID_HPD:
375 case GPIO_ID_GPIO_PAD:
376 break;
377 default:
378 ASSERT_CRITICAL(false);
379 return NULL;
380 }
381
382 irq = dal_gpio_create(
383 service, id, en, GPIO_PIN_OUTPUT_STATE_DEFAULT);
384
385 if (irq)
386 return irq;
387
388 ASSERT_CRITICAL(false);
389 return NULL;
390}
391
392void dal_gpio_destroy_irq(
393 struct gpio **irq)
394{
395 if (!irq || !*irq) {
396 ASSERT_CRITICAL(false);
397 return;
398 }
399
400 dal_gpio_close(*irq);
401 dal_gpio_destroy(irq);
402 dm_free(*irq);
403
404 *irq = NULL;
405}
406
407struct ddc *dal_gpio_create_ddc(
408 struct gpio_service *service,
409 uint32_t offset,
410 uint32_t mask,
411 struct gpio_ddc_hw_info *info)
412{
413 enum gpio_id id;
414 uint32_t en;
415 struct ddc *ddc;
416
417 if (!service->translate.funcs->offset_to_id(offset, mask, &id, &en))
418 return NULL;
419
420 ddc = dm_alloc(sizeof(struct ddc));
421
422 if (!ddc) {
423 BREAK_TO_DEBUGGER();
424 return NULL;
425 }
426
427 ddc->pin_data = dal_gpio_create(
428 service, GPIO_ID_DDC_DATA, en, GPIO_PIN_OUTPUT_STATE_DEFAULT);
429
430 if (!ddc->pin_data) {
431 BREAK_TO_DEBUGGER();
432 goto failure_1;
433 }
434
435 ddc->pin_clock = dal_gpio_create(
436 service, GPIO_ID_DDC_CLOCK, en, GPIO_PIN_OUTPUT_STATE_DEFAULT);
437
438 if (!ddc->pin_clock) {
439 BREAK_TO_DEBUGGER();
440 goto failure_2;
441 }
442
443 ddc->hw_info = *info;
444
445 ddc->ctx = service->ctx;
446
447 return ddc;
448
449failure_2:
450 dal_gpio_destroy(&ddc->pin_data);
451
452failure_1:
453 dm_free(ddc);
454
455 return NULL;
456}
457
458void dal_gpio_destroy_ddc(
459 struct ddc **ddc)
460{
461 if (!ddc || !*ddc) {
462 BREAK_TO_DEBUGGER();
463 return;
464 }
465
466 dal_ddc_close(*ddc);
467 dal_gpio_destroy(&(*ddc)->pin_data);
468 dal_gpio_destroy(&(*ddc)->pin_clock);
469 dm_free(*ddc);
470
471 *ddc = NULL;
472}
473
474enum gpio_result dal_ddc_open(
475 struct ddc *ddc,
476 enum gpio_mode mode,
477 enum gpio_ddc_config_type config_type)
478{
479 enum gpio_result result;
480
481 struct gpio_config_data config_data;
482 struct hw_gpio *hw_data;
483 struct hw_gpio *hw_clock;
484
485 result = dal_gpio_open_ex(ddc->pin_data, mode);
486
487 if (result != GPIO_RESULT_OK) {
488 BREAK_TO_DEBUGGER();
489 return result;
490 }
491
492 result = dal_gpio_open_ex(ddc->pin_clock, mode);
493
494 if (result != GPIO_RESULT_OK) {
495 BREAK_TO_DEBUGGER();
496 goto failure;
497 }
498
499 /* DDC clock and data pins should belong
500 * to the same DDC block id,
501 * we use the data pin to set the pad mode. */
502
503 if (mode == GPIO_MODE_INPUT)
504 /* this is from detect_sink_type,
505 * we need extra delay there */
506 config_data.type = GPIO_CONFIG_TYPE_I2C_AUX_DUAL_MODE;
507 else
508 config_data.type = GPIO_CONFIG_TYPE_DDC;
509
510 config_data.config.ddc.type = config_type;
511
512 hw_data = FROM_HW_GPIO_PIN(ddc->pin_data->pin);
513 hw_clock = FROM_HW_GPIO_PIN(ddc->pin_clock->pin);
514
515 config_data.config.ddc.data_en_bit_present = hw_data->store.en != 0;
516 config_data.config.ddc.clock_en_bit_present = hw_clock->store.en != 0;
517
518 result = dal_gpio_set_config(ddc->pin_data, &config_data);
519
520 if (result == GPIO_RESULT_OK)
521 return result;
522
523 BREAK_TO_DEBUGGER();
524
525 dal_gpio_close(ddc->pin_clock);
526
527failure:
528 dal_gpio_close(ddc->pin_data);
529
530 return result;
531}
532
533enum gpio_result dal_ddc_change_mode(
534 struct ddc *ddc,
535 enum gpio_mode mode)
536{
537 enum gpio_result result;
538
539 enum gpio_mode original_mode =
540 dal_gpio_get_mode(ddc->pin_data);
541
542 result = dal_gpio_change_mode(ddc->pin_data, mode);
543
544 /* [anaumov] DAL2 code returns GPIO_RESULT_NON_SPECIFIC_ERROR
545 * in case of failures;
546 * set_mode() is so that, in case of failure,
547 * we must explicitly set original mode */
548
549 if (result != GPIO_RESULT_OK)
550 goto failure;
551
552 result = dal_gpio_change_mode(ddc->pin_clock, mode);
553
554 if (result == GPIO_RESULT_OK)
555 return result;
556
557 dal_gpio_change_mode(ddc->pin_clock, original_mode);
558
559failure:
560 dal_gpio_change_mode(ddc->pin_data, original_mode);
561
562 return result;
563}
564
565enum gpio_ddc_line dal_ddc_get_line(
566 const struct ddc *ddc)
567{
568 return (enum gpio_ddc_line)dal_gpio_get_enum(ddc->pin_data);
569}
570
571enum gpio_result dal_ddc_set_config(
572 struct ddc *ddc,
573 enum gpio_ddc_config_type config_type)
574{
575 struct gpio_config_data config_data;
576
577 config_data.type = GPIO_CONFIG_TYPE_DDC;
578
579 config_data.config.ddc.type = config_type;
580 config_data.config.ddc.data_en_bit_present = false;
581 config_data.config.ddc.clock_en_bit_present = false;
582
583 return dal_gpio_set_config(ddc->pin_data, &config_data);
584}
585
586void dal_ddc_close(
587 struct ddc *ddc)
588{
589 dal_gpio_close(ddc->pin_clock);
590 dal_gpio_close(ddc->pin_data);
591}
592
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.h b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.h
new file mode 100644
index 000000000000..c7f3081f59cc
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.h
@@ -0,0 +1,56 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_GPIO_SERVICE_H__
27#define __DAL_GPIO_SERVICE_H__
28
29struct hw_translate;
30struct hw_factory;
31
32struct gpio_service {
33 struct dc_context *ctx;
34 struct hw_translate translate;
35 struct hw_factory factory;
36 /*
37 * @brief
38 * Business storage.
39 * For each member of 'enum gpio_id',
40 * store array of bits (packed into uint32_t slots),
41 * index individual bit by 'en' value */
42 uint32_t *busyness[GPIO_ID_COUNT];
43};
44
45enum gpio_result dal_gpio_service_open(
46 struct gpio_service *service,
47 enum gpio_id id,
48 uint32_t en,
49 enum gpio_mode mode,
50 struct hw_gpio_pin **ptr);
51
52void dal_gpio_service_close(
53 struct gpio_service *service,
54 struct hw_gpio_pin **ptr);
55
56#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hpd_regs.h b/drivers/gpu/drm/amd/display/dc/gpio/hpd_regs.h
new file mode 100644
index 000000000000..dcfdd71b2304
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hpd_regs.h
@@ -0,0 +1,79 @@
1/*
2 * Copyright 2012-16 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef DRIVERS_GPU_DRM_AMD_DC_DEV_DC_GPIO_HPD_REGS_H_
27#define DRIVERS_GPU_DRM_AMD_DC_DEV_DC_GPIO_HPD_REGS_H_
28
29#include "gpio_regs.h"
30
31#define ONE_MORE_0 1
32#define ONE_MORE_1 2
33#define ONE_MORE_2 3
34#define ONE_MORE_3 4
35#define ONE_MORE_4 5
36#define ONE_MORE_5 6
37
38
39#define HPD_GPIO_REG_LIST_ENTRY(type,cd,id) \
40 .type ## _reg = REG(DC_GPIO_HPD_## type),\
41 .type ## _mask = DC_GPIO_HPD_ ## type ## __DC_GPIO_HPD ## id ## _ ## type ## _MASK,\
42 .type ## _shift = DC_GPIO_HPD_ ## type ## __DC_GPIO_HPD ## id ## _ ## type ## __SHIFT
43
44#define HPD_GPIO_REG_LIST(id) \
45 {\
46 HPD_GPIO_REG_LIST_ENTRY(MASK,cd,id),\
47 HPD_GPIO_REG_LIST_ENTRY(A,cd,id),\
48 HPD_GPIO_REG_LIST_ENTRY(EN,cd,id),\
49 HPD_GPIO_REG_LIST_ENTRY(Y,cd,id)\
50 }
51
52#define HPD_REG_LIST(id) \
53 HPD_GPIO_REG_LIST(ONE_MORE_ ## id), \
54 .int_status = REGI(DC_HPD_INT_STATUS, HPD, id),\
55 .toggle_filt_cntl = REGI(DC_HPD_TOGGLE_FILT_CNTL, HPD, id)
56
57 #define HPD_MASK_SH_LIST(mask_sh) \
58 SF_HPD(DC_HPD_INT_STATUS, DC_HPD_SENSE_DELAYED, mask_sh),\
59 SF_HPD(DC_HPD_INT_STATUS, DC_HPD_SENSE, mask_sh),\
60 SF_HPD(DC_HPD_TOGGLE_FILT_CNTL, DC_HPD_CONNECT_INT_DELAY, mask_sh),\
61 SF_HPD(DC_HPD_TOGGLE_FILT_CNTL, DC_HPD_DISCONNECT_INT_DELAY, mask_sh)
62
63struct hpd_registers {
64 struct gpio_registers gpio;
65 uint32_t int_status;
66 uint32_t toggle_filt_cntl;
67};
68
69struct hpd_sh_mask {
70 /* int_status */
71 uint32_t DC_HPD_SENSE_DELAYED;
72 uint32_t DC_HPD_SENSE;
73 /* toggle_filt_cntl */
74 uint32_t DC_HPD_CONNECT_INT_DELAY;
75 uint32_t DC_HPD_DISCONNECT_INT_DELAY;
76};
77
78
79#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_GPIO_HPD_REGS_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c
new file mode 100644
index 000000000000..47e0f8f24a86
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c
@@ -0,0 +1,243 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28#include "include/gpio_types.h"
29#include "hw_gpio.h"
30#include "hw_ddc.h"
31
32#include "reg_helper.h"
33#include "gpio_regs.h"
34
35
36#undef FN
37#define FN(reg_name, field_name) \
38 ddc->shifts->field_name, ddc->masks->field_name
39
40#define CTX \
41 ddc->base.base.ctx
42#define REG(reg)\
43 (ddc->regs->reg)
44
45static void destruct(
46 struct hw_ddc *pin)
47{
48 dal_hw_gpio_destruct(&pin->base);
49}
50
51static void destroy(
52 struct hw_gpio_pin **ptr)
53{
54 struct hw_ddc *pin = HW_DDC_FROM_BASE(*ptr);
55
56 destruct(pin);
57
58 dm_free(pin);
59
60 *ptr = NULL;
61}
62
63static enum gpio_result set_config(
64 struct hw_gpio_pin *ptr,
65 const struct gpio_config_data *config_data)
66{
67 struct hw_ddc *ddc = HW_DDC_FROM_BASE(ptr);
68 struct hw_gpio *hw_gpio = NULL;
69 uint32_t regval;
70 uint32_t ddc_data_pd_en = 0;
71 uint32_t ddc_clk_pd_en = 0;
72 uint32_t aux_pad_mode = 0;
73
74 hw_gpio = &ddc->base;
75
76 if (hw_gpio == NULL) {
77 ASSERT_CRITICAL(false);
78 return GPIO_RESULT_NULL_HANDLE;
79 }
80
81 regval = REG_GET_3(gpio.MASK_reg,
82 DC_GPIO_DDC1DATA_PD_EN, &ddc_data_pd_en,
83 DC_GPIO_DDC1CLK_PD_EN, &ddc_clk_pd_en,
84 AUX_PAD1_MODE, &aux_pad_mode);
85
86 switch (config_data->config.ddc.type) {
87 case GPIO_DDC_CONFIG_TYPE_MODE_I2C:
88 /* On plug-in, there is a transient level on the pad
89 * which must be discharged through the internal pull-down.
90 * Enable internal pull-down, 2.5msec discharge time
91 * is required for detection of AUX mode */
92 if (hw_gpio->base.en != GPIO_DDC_LINE_VIP_PAD) {
93 if (!ddc_data_pd_en || !ddc_clk_pd_en) {
94
95 REG_SET_2(gpio.MASK_reg, regval,
96 DC_GPIO_DDC1DATA_PD_EN, 1,
97 DC_GPIO_DDC1CLK_PD_EN, 1);
98
99 if (config_data->type ==
100 GPIO_CONFIG_TYPE_I2C_AUX_DUAL_MODE)
101 msleep(3);
102 }
103 } else {
104 uint32_t reg2;
105 uint32_t sda_pd_dis = 0;
106 uint32_t scl_pd_dis = 0;
107
108 reg2 = REG_GET_2(gpio.MASK_reg,
109 DC_GPIO_SDA_PD_DIS, &sda_pd_dis,
110 DC_GPIO_SCL_PD_DIS, &scl_pd_dis);
111
112 if (sda_pd_dis) {
113 REG_SET(gpio.MASK_reg, regval,
114 DC_GPIO_SDA_PD_DIS, 0);
115
116 if (config_data->type ==
117 GPIO_CONFIG_TYPE_I2C_AUX_DUAL_MODE)
118 msleep(3);
119 }
120
121 if (!scl_pd_dis) {
122 REG_SET(gpio.MASK_reg, regval,
123 DC_GPIO_SCL_PD_DIS, 1);
124
125 if (config_data->type ==
126 GPIO_CONFIG_TYPE_I2C_AUX_DUAL_MODE)
127 msleep(3);
128 }
129 }
130
131 if (aux_pad_mode) {
132 /* let pins to get de-asserted
133 * before setting pad to I2C mode */
134 if (config_data->config.ddc.data_en_bit_present ||
135 config_data->config.ddc.clock_en_bit_present)
136 /* [anaumov] in DAL2, there was
137 * dc_service_delay_in_microseconds(2000); */
138 msleep(2);
139
140 /* set the I2C pad mode */
141 /* read the register again,
142 * some bits may have been changed */
143 REG_UPDATE(gpio.MASK_reg,
144 AUX_PAD1_MODE, 0);
145 }
146
147 return GPIO_RESULT_OK;
148 case GPIO_DDC_CONFIG_TYPE_MODE_AUX:
149 /* set the AUX pad mode */
150 if (!aux_pad_mode) {
151 REG_SET(gpio.MASK_reg, regval,
152 AUX_PAD1_MODE, 1);
153 }
154
155 return GPIO_RESULT_OK;
156 case GPIO_DDC_CONFIG_TYPE_POLL_FOR_CONNECT:
157 if ((hw_gpio->base.en >= GPIO_DDC_LINE_DDC1) &&
158 (hw_gpio->base.en <= GPIO_DDC_LINE_DDC_VGA)) {
159 REG_UPDATE_3(ddc_setup,
160 DC_I2C_DDC1_ENABLE, 1,
161 DC_I2C_DDC1_EDID_DETECT_ENABLE, 1,
162 DC_I2C_DDC1_EDID_DETECT_MODE, 0);
163 return GPIO_RESULT_OK;
164 }
165 break;
166 case GPIO_DDC_CONFIG_TYPE_POLL_FOR_DISCONNECT:
167 if ((hw_gpio->base.en >= GPIO_DDC_LINE_DDC1) &&
168 (hw_gpio->base.en <= GPIO_DDC_LINE_DDC_VGA)) {
169 REG_UPDATE_3(ddc_setup,
170 DC_I2C_DDC1_ENABLE, 1,
171 DC_I2C_DDC1_EDID_DETECT_ENABLE, 1,
172 DC_I2C_DDC1_EDID_DETECT_MODE, 1);
173 return GPIO_RESULT_OK;
174 }
175 break;
176 case GPIO_DDC_CONFIG_TYPE_DISABLE_POLLING:
177 if ((hw_gpio->base.en >= GPIO_DDC_LINE_DDC1) &&
178 (hw_gpio->base.en <= GPIO_DDC_LINE_DDC_VGA)) {
179 REG_UPDATE_2(ddc_setup,
180 DC_I2C_DDC1_ENABLE, 0,
181 DC_I2C_DDC1_EDID_DETECT_ENABLE, 0);
182 return GPIO_RESULT_OK;
183 }
184 break;
185 }
186
187 BREAK_TO_DEBUGGER();
188
189 return GPIO_RESULT_NON_SPECIFIC_ERROR;
190}
191
192static const struct hw_gpio_pin_funcs funcs = {
193 .destroy = destroy,
194 .open = dal_hw_gpio_open,
195 .get_value = dal_hw_gpio_get_value,
196 .set_value = dal_hw_gpio_set_value,
197 .set_config = set_config,
198 .change_mode = dal_hw_gpio_change_mode,
199 .close = dal_hw_gpio_close,
200};
201
202static bool construct(
203 struct hw_ddc *ddc,
204 enum gpio_id id,
205 uint32_t en,
206 struct dc_context *ctx)
207{
208 if ((en < GPIO_DDC_LINE_MIN) || (en > GPIO_DDC_LINE_MAX)) {
209 ASSERT_CRITICAL(false);
210 return false;
211 }
212
213 if (!dal_hw_gpio_construct(&ddc->base, id, en, ctx)) {
214 ASSERT_CRITICAL(false);
215 return false;
216 }
217
218 ddc->base.base.funcs = &funcs;
219
220 return true;
221}
222
223struct hw_gpio_pin *dal_hw_ddc_create(
224 struct dc_context *ctx,
225 enum gpio_id id,
226 uint32_t en)
227{
228 struct hw_ddc *pin = dm_alloc(sizeof(struct hw_ddc));
229
230 if (!pin) {
231 ASSERT_CRITICAL(false);
232 return NULL;
233 }
234
235 if (construct(pin, id, en, ctx))
236 return &pin->base.base;
237
238 ASSERT_CRITICAL(false);
239
240 dm_free(pin);
241
242 return NULL;
243}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.h b/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.h
new file mode 100644
index 000000000000..9690e2a885d7
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.h
@@ -0,0 +1,46 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_HW_DDC_H__
27#define __DAL_HW_DDC_H__
28
29#include "ddc_regs.h"
30
31struct hw_ddc {
32 struct hw_gpio base;
33 const struct ddc_registers *regs;
34 const struct ddc_sh_mask *shifts;
35 const struct ddc_sh_mask *masks;
36};
37
38#define HW_DDC_FROM_BASE(hw_gpio) \
39 container_of((HW_GPIO_FROM_BASE(hw_gpio)), struct hw_ddc, base)
40
41struct hw_gpio_pin *dal_hw_ddc_create(
42 struct dc_context *ctx,
43 enum gpio_id id,
44 uint32_t en);
45
46#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
new file mode 100644
index 000000000000..f1a6fa7391c6
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
@@ -0,0 +1,93 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28/*
29 * Pre-requisites: headers required by header of this unit
30 */
31#include "include/gpio_types.h"
32
33/*
34 * Header of this unit
35 */
36
37#include "hw_factory.h"
38
39/*
40 * Post-requisites: headers required by this unit
41 */
42
43#include "dce80/hw_factory_dce80.h"
44
45#include "dce110/hw_factory_dce110.h"
46
47#include "diagnostics/hw_factory_diag.h"
48
49/*
50 * This unit
51 */
52
53bool dal_hw_factory_init(
54 struct hw_factory *factory,
55 enum dce_version dce_version,
56 enum dce_environment dce_environment)
57{
58 if (IS_FPGA_MAXIMUS_DC(dce_environment)) {
59 dal_hw_factory_diag_fpga_init(factory);
60 return true;
61 }
62
63 switch (dce_version) {
64 case DCE_VERSION_8_0:
65 dal_hw_factory_dce80_init(factory);
66 return true;
67
68 case DCE_VERSION_10_0:
69 dal_hw_factory_dce110_init(factory);
70 return true;
71 case DCE_VERSION_11_0:
72 case DCE_VERSION_11_2:
73 dal_hw_factory_dce110_init(factory);
74 return true;
75 default:
76 ASSERT_CRITICAL(false);
77 return false;
78 }
79}
80
81void dal_hw_factory_destroy(
82 struct dc_context *ctx,
83 struct hw_factory **factory)
84{
85 if (!factory || !*factory) {
86 BREAK_TO_DEBUGGER();
87 return;
88 }
89
90 dm_free(*factory);
91
92 *factory = NULL;
93}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.h b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.h
new file mode 100644
index 000000000000..6e4dd3521935
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.h
@@ -0,0 +1,74 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_HW_FACTORY_H__
27#define __DAL_HW_FACTORY_H__
28
29struct hw_gpio_pin;
30struct hw_hpd;
31
32struct hw_factory {
33 uint32_t number_of_pins[GPIO_ID_COUNT];
34
35 const struct hw_factory_funcs {
36 struct hw_gpio_pin *(*create_ddc_data)(
37 struct dc_context *ctx,
38 enum gpio_id id,
39 uint32_t en);
40 struct hw_gpio_pin *(*create_ddc_clock)(
41 struct dc_context *ctx,
42 enum gpio_id id,
43 uint32_t en);
44 struct hw_gpio_pin *(*create_generic)(
45 struct dc_context *ctx,
46 enum gpio_id id,
47 uint32_t en);
48 struct hw_gpio_pin *(*create_hpd)(
49 struct dc_context *ctx,
50 enum gpio_id id,
51 uint32_t en);
52 struct hw_gpio_pin *(*create_sync)(
53 struct dc_context *ctx,
54 enum gpio_id id,
55 uint32_t en);
56 struct hw_gpio_pin *(*create_gsl)(
57 struct dc_context *ctx,
58 enum gpio_id id,
59 uint32_t en);
60 void (*define_hpd_registers)(
61 struct hw_gpio_pin *pin,
62 uint32_t en);
63 void (*define_ddc_registers)(
64 struct hw_gpio_pin *pin,
65 uint32_t en);
66 } *funcs;
67};
68
69bool dal_hw_factory_init(
70 struct hw_factory *factory,
71 enum dce_version dce_version,
72 enum dce_environment dce_environment);
73
74#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_gpio.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_gpio.c
new file mode 100644
index 000000000000..4cdcdfb73e5c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_gpio.c
@@ -0,0 +1,205 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27#include "include/gpio_types.h"
28#include "hw_gpio.h"
29
30#include "reg_helper.h"
31#include "gpio_regs.h"
32
33#undef FN
34#define FN(reg_name, field_name) \
35 gpio->regs->field_name ## _shift, gpio->regs->field_name ## _mask
36
37#define CTX \
38 gpio->base.ctx
39#define REG(reg)\
40 (gpio->regs->reg)
41
42static void store_registers(
43 struct hw_gpio *gpio)
44{
45 REG_GET(MASK_reg, MASK, &gpio->store.mask);
46 REG_GET(A_reg, A, &gpio->store.a);
47 REG_GET(EN_reg, EN, &gpio->store.en);
48 /* TODO store GPIO_MUX_CONTROL if we ever use it */
49}
50
51static void restore_registers(
52 struct hw_gpio *gpio)
53{
54 REG_UPDATE(MASK_reg, MASK, gpio->store.mask);
55 REG_UPDATE(A_reg, A, gpio->store.a);
56 REG_UPDATE(EN_reg, EN, gpio->store.en);
57 /* TODO restore GPIO_MUX_CONTROL if we ever use it */
58}
59
60bool dal_hw_gpio_open(
61 struct hw_gpio_pin *ptr,
62 enum gpio_mode mode)
63{
64 struct hw_gpio *pin = FROM_HW_GPIO_PIN(ptr);
65
66 store_registers(pin);
67
68 ptr->opened = (dal_hw_gpio_config_mode(pin, mode) == GPIO_RESULT_OK);
69
70 return ptr->opened;
71}
72
73enum gpio_result dal_hw_gpio_get_value(
74 const struct hw_gpio_pin *ptr,
75 uint32_t *value)
76{
77 const struct hw_gpio *gpio = FROM_HW_GPIO_PIN(ptr);
78
79 enum gpio_result result = GPIO_RESULT_OK;
80
81 switch (ptr->mode) {
82 case GPIO_MODE_INPUT:
83 case GPIO_MODE_OUTPUT:
84 case GPIO_MODE_HARDWARE:
85 case GPIO_MODE_FAST_OUTPUT:
86 REG_GET(Y_reg, Y, value);
87 break;
88 default:
89 result = GPIO_RESULT_NON_SPECIFIC_ERROR;
90 }
91
92 return result;
93}
94
95enum gpio_result dal_hw_gpio_set_value(
96 const struct hw_gpio_pin *ptr,
97 uint32_t value)
98{
99 struct hw_gpio *gpio = FROM_HW_GPIO_PIN(ptr);
100
101 /* This is the public interface
102 * where the input comes from client, not shifted yet
103 * (because client does not know the shifts). */
104
105 switch (ptr->mode) {
106 case GPIO_MODE_OUTPUT:
107 REG_UPDATE(A_reg, A, value);
108 return GPIO_RESULT_OK;
109 case GPIO_MODE_FAST_OUTPUT:
110 /* We use (EN) to faster switch (used in DDC GPIO).
111 * So (A) is grounded, output is driven by (EN = 0)
112 * to pull the line down (output == 0) and (EN=1)
113 * then output is tri-state */
114 REG_UPDATE(EN_reg, EN, ~value);
115 return GPIO_RESULT_OK;
116 default:
117 return GPIO_RESULT_NON_SPECIFIC_ERROR;
118 }
119}
120
121enum gpio_result dal_hw_gpio_change_mode(
122 struct hw_gpio_pin *ptr,
123 enum gpio_mode mode)
124{
125 struct hw_gpio *pin = FROM_HW_GPIO_PIN(ptr);
126
127 return dal_hw_gpio_config_mode(pin, mode);
128}
129
130void dal_hw_gpio_close(
131 struct hw_gpio_pin *ptr)
132{
133 struct hw_gpio *pin = FROM_HW_GPIO_PIN(ptr);
134
135 restore_registers(pin);
136
137 ptr->mode = GPIO_MODE_UNKNOWN;
138 ptr->opened = false;
139}
140
141enum gpio_result dal_hw_gpio_config_mode(
142 struct hw_gpio *gpio,
143 enum gpio_mode mode)
144{
145 gpio->base.mode = mode;
146
147 switch (mode) {
148 case GPIO_MODE_INPUT:
149 /* turn off output enable, act as input pin;
150 * program the pin as GPIO, mask out signal driven by HW */
151 REG_UPDATE(EN_reg, EN, 0);
152 REG_UPDATE(MASK_reg, MASK, 1);
153 return GPIO_RESULT_OK;
154 case GPIO_MODE_OUTPUT:
155 /* turn on output enable, act as output pin;
156 * program the pin as GPIO, mask out signal driven by HW */
157 REG_UPDATE(A_reg, A, 0);
158 REG_UPDATE(MASK_reg, MASK, 1);
159 return GPIO_RESULT_OK;
160 case GPIO_MODE_FAST_OUTPUT:
161 /* grounding the A register then use the EN register bit
162 * will have faster effect on the rise time */
163 REG_UPDATE(A_reg, A, 0);
164 REG_UPDATE(MASK_reg, MASK, 1);
165 return GPIO_RESULT_OK;
166 case GPIO_MODE_HARDWARE:
167 /* program the pin as tri-state, pin is driven by HW */
168 REG_UPDATE(MASK_reg, MASK, 0);
169 return GPIO_RESULT_OK;
170 case GPIO_MODE_INTERRUPT:
171 /* Interrupt mode supported only by HPD (IrqGpio) pins. */
172 REG_UPDATE(MASK_reg, MASK, 0);
173 return GPIO_RESULT_OK;
174 default:
175 return GPIO_RESULT_NON_SPECIFIC_ERROR;
176 }
177}
178
179bool dal_hw_gpio_construct(
180 struct hw_gpio *pin,
181 enum gpio_id id,
182 uint32_t en,
183 struct dc_context *ctx)
184{
185 pin->base.ctx = ctx;
186 pin->base.id = id;
187 pin->base.en = en;
188 pin->base.mode = GPIO_MODE_UNKNOWN;
189 pin->base.opened = false;
190
191 pin->store.mask = 0;
192 pin->store.a = 0;
193 pin->store.en = 0;
194 pin->store.mux = 0;
195
196 pin->mux_supported = false;
197
198 return true;
199}
200
201void dal_hw_gpio_destruct(
202 struct hw_gpio *pin)
203{
204 ASSERT(!pin->base.opened);
205}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_gpio.h b/drivers/gpu/drm/amd/display/dc/gpio/hw_gpio.h
new file mode 100644
index 000000000000..fb41ee2be958
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_gpio.h
@@ -0,0 +1,144 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_HW_GPIO_H__
27#define __DAL_HW_GPIO_H__
28
29#include "gpio_regs.h"
30
31#define FROM_HW_GPIO_PIN(ptr) \
32 container_of((ptr), struct hw_gpio, base)
33
34struct addr_mask {
35 uint32_t addr;
36 uint32_t mask;
37};
38
39struct hw_gpio_pin {
40 const struct hw_gpio_pin_funcs *funcs;
41 enum gpio_id id;
42 uint32_t en;
43 enum gpio_mode mode;
44 bool opened;
45 struct dc_context *ctx;
46};
47
48struct hw_gpio_pin_funcs {
49 void (*destroy)(
50 struct hw_gpio_pin **ptr);
51 bool (*open)(
52 struct hw_gpio_pin *pin,
53 enum gpio_mode mode);
54 enum gpio_result (*get_value)(
55 const struct hw_gpio_pin *pin,
56 uint32_t *value);
57 enum gpio_result (*set_value)(
58 const struct hw_gpio_pin *pin,
59 uint32_t value);
60 enum gpio_result (*set_config)(
61 struct hw_gpio_pin *pin,
62 const struct gpio_config_data *config_data);
63 enum gpio_result (*change_mode)(
64 struct hw_gpio_pin *pin,
65 enum gpio_mode mode);
66 void (*close)(
67 struct hw_gpio_pin *pin);
68};
69
70
71struct hw_gpio;
72
73/* Register indices are represented by member variables
74 * and are to be filled in by constructors of derived classes.
75 * These members permit the use of common code
76 * for programming registers, where the sequence is the same
77 * but register sets are different.
78 * Some GPIOs have HW mux which allows to choose
79 * what is the source of the signal in HW mode */
80
81struct hw_gpio_pin_reg {
82 struct addr_mask DC_GPIO_DATA_MASK;
83 struct addr_mask DC_GPIO_DATA_A;
84 struct addr_mask DC_GPIO_DATA_EN;
85 struct addr_mask DC_GPIO_DATA_Y;
86};
87
88struct hw_gpio_mux_reg {
89 struct addr_mask GPIO_MUX_CONTROL;
90 struct addr_mask GPIO_MUX_STEREO_SEL;
91};
92
93struct hw_gpio {
94 struct hw_gpio_pin base;
95
96 /* variables to save register value */
97 struct {
98 uint32_t mask;
99 uint32_t a;
100 uint32_t en;
101 uint32_t mux;
102 } store;
103
104 /* GPIO MUX support */
105 bool mux_supported;
106 const struct gpio_registers *regs;
107};
108
109#define HW_GPIO_FROM_BASE(hw_gpio_pin) \
110 container_of((hw_gpio_pin), struct hw_gpio, base)
111
112bool dal_hw_gpio_construct(
113 struct hw_gpio *pin,
114 enum gpio_id id,
115 uint32_t en,
116 struct dc_context *ctx);
117
118bool dal_hw_gpio_open(
119 struct hw_gpio_pin *pin,
120 enum gpio_mode mode);
121
122enum gpio_result dal_hw_gpio_get_value(
123 const struct hw_gpio_pin *pin,
124 uint32_t *value);
125
126enum gpio_result dal_hw_gpio_config_mode(
127 struct hw_gpio *pin,
128 enum gpio_mode mode);
129
130void dal_hw_gpio_destruct(
131 struct hw_gpio *pin);
132
133enum gpio_result dal_hw_gpio_set_value(
134 const struct hw_gpio_pin *ptr,
135 uint32_t value);
136
137enum gpio_result dal_hw_gpio_change_mode(
138 struct hw_gpio_pin *ptr,
139 enum gpio_mode mode);
140
141void dal_hw_gpio_close(
142 struct hw_gpio_pin *ptr);
143
144#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c
new file mode 100644
index 000000000000..9634e8841d90
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c
@@ -0,0 +1,175 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28#include "include/gpio_types.h"
29#include "hw_gpio.h"
30#include "hw_hpd.h"
31
32#include "reg_helper.h"
33#include "hpd_regs.h"
34
35#undef FN
36#define FN(reg_name, field_name) \
37 hpd->shifts->field_name, hpd->masks->field_name
38
39#define CTX \
40 hpd->base.base.ctx
41#define REG(reg)\
42 (hpd->regs->reg)
43
44static bool dal_hw_hpd_construct(
45 struct hw_hpd *pin,
46 enum gpio_id id,
47 uint32_t en,
48 struct dc_context *ctx)
49{
50 if (!dal_hw_gpio_construct(&pin->base, id, en, ctx))
51 return false;
52 return true;
53}
54
55static void dal_hw_hpd_destruct(
56 struct hw_hpd *pin)
57{
58 dal_hw_gpio_destruct(&pin->base);
59}
60
61
62static void destruct(
63 struct hw_hpd *hpd)
64{
65 dal_hw_hpd_destruct(hpd);
66}
67
68static void destroy(
69 struct hw_gpio_pin **ptr)
70{
71 struct hw_hpd *hpd = HW_HPD_FROM_BASE(*ptr);
72
73 destruct(hpd);
74
75 dm_free(hpd);
76
77 *ptr = NULL;
78}
79
80static enum gpio_result get_value(
81 const struct hw_gpio_pin *ptr,
82 uint32_t *value)
83{
84 struct hw_hpd *hpd = HW_HPD_FROM_BASE(ptr);
85 uint32_t hpd_delayed = 0;
86
87 /* in Interrupt mode we ask for SENSE bit */
88
89 if (ptr->mode == GPIO_MODE_INTERRUPT) {
90
91 REG_GET(int_status,
92 DC_HPD_SENSE_DELAYED, &hpd_delayed);
93
94 *value = hpd_delayed;
95 return GPIO_RESULT_OK;
96 }
97
98 /* in any other modes, operate as normal GPIO */
99
100 return dal_hw_gpio_get_value(ptr, value);
101}
102
103static enum gpio_result set_config(
104 struct hw_gpio_pin *ptr,
105 const struct gpio_config_data *config_data)
106{
107 struct hw_hpd *hpd = HW_HPD_FROM_BASE(ptr);
108
109 if (!config_data)
110 return GPIO_RESULT_INVALID_DATA;
111
112 REG_UPDATE_2(toggle_filt_cntl,
113 DC_HPD_CONNECT_INT_DELAY, config_data->config.hpd.delay_on_connect / 10,
114 DC_HPD_DISCONNECT_INT_DELAY, config_data->config.hpd.delay_on_disconnect / 10);
115
116 return GPIO_RESULT_OK;
117}
118
119static const struct hw_gpio_pin_funcs funcs = {
120 .destroy = destroy,
121 .open = dal_hw_gpio_open,
122 .get_value = get_value,
123 .set_value = dal_hw_gpio_set_value,
124 .set_config = set_config,
125 .change_mode = dal_hw_gpio_change_mode,
126 .close = dal_hw_gpio_close,
127};
128
129static bool construct(
130 struct hw_hpd *hpd,
131 enum gpio_id id,
132 uint32_t en,
133 struct dc_context *ctx)
134{
135 if (id != GPIO_ID_HPD) {
136 ASSERT_CRITICAL(false);
137 return false;
138 }
139
140 if ((en < GPIO_HPD_MIN) || (en > GPIO_HPD_MAX)) {
141 ASSERT_CRITICAL(false);
142 return false;
143 }
144
145 if (!dal_hw_hpd_construct(hpd, id, en, ctx)) {
146 ASSERT_CRITICAL(false);
147 return false;
148 }
149
150 hpd->base.base.funcs = &funcs;
151
152 return true;
153}
154
155struct hw_gpio_pin *dal_hw_hpd_create(
156 struct dc_context *ctx,
157 enum gpio_id id,
158 uint32_t en)
159{
160 struct hw_hpd *hpd = dm_alloc(sizeof(struct hw_hpd));
161
162 if (!hpd) {
163 ASSERT_CRITICAL(false);
164 return NULL;
165 }
166
167 if (construct(hpd, id, en, ctx))
168 return &hpd->base.base;
169
170 ASSERT_CRITICAL(false);
171
172 dm_free(hpd);
173
174 return NULL;
175}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.h b/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.h
new file mode 100644
index 000000000000..4ab7a208f781
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.h
@@ -0,0 +1,46 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_HW_HPD_H__
27#define __DAL_HW_HPD_H__
28
29#include "hpd_regs.h"
30
31struct hw_hpd {
32 struct hw_gpio base;
33 const struct hpd_registers *regs;
34 const struct hpd_sh_mask *shifts;
35 const struct hpd_sh_mask *masks;
36};
37
38#define HW_HPD_FROM_BASE(hw_gpio) \
39 container_of((HW_GPIO_FROM_BASE(hw_gpio)), struct hw_hpd, base)
40
41struct hw_gpio_pin *dal_hw_hpd_create(
42 struct dc_context *ctx,
43 enum gpio_id id,
44 uint32_t en);
45
46#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
new file mode 100644
index 000000000000..23e097fa5ace
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
@@ -0,0 +1,75 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28/*
29 * Pre-requisites: headers required by header of this unit
30 */
31#include "include/gpio_types.h"
32
33/*
34 * Header of this unit
35 */
36
37#include "hw_translate.h"
38
39/*
40 * Post-requisites: headers required by this unit
41 */
42
43#include "dce80/hw_translate_dce80.h"
44#include "dce110/hw_translate_dce110.h"
45
46#include "diagnostics/hw_translate_diag.h"
47
48/*
49 * This unit
50 */
51
52bool dal_hw_translate_init(
53 struct hw_translate *translate,
54 enum dce_version dce_version,
55 enum dce_environment dce_environment)
56{
57 if (IS_FPGA_MAXIMUS_DC(dce_environment)) {
58 dal_hw_translate_diag_fpga_init(translate);
59 return true;
60 }
61
62 switch (dce_version) {
63 case DCE_VERSION_8_0:
64 dal_hw_translate_dce80_init(translate);
65 return true;
66 case DCE_VERSION_10_0:
67 case DCE_VERSION_11_0:
68 case DCE_VERSION_11_2:
69 dal_hw_translate_dce110_init(translate);
70 return true;
71 default:
72 BREAK_TO_DEBUGGER();
73 return false;
74 }
75}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.h b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.h
new file mode 100644
index 000000000000..3a7d89ca1605
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.h
@@ -0,0 +1,50 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_HW_TRANSLATE_H__
27#define __DAL_HW_TRANSLATE_H__
28
29struct hw_translate_funcs {
30 bool (*offset_to_id)(
31 uint32_t offset,
32 uint32_t mask,
33 enum gpio_id *id,
34 uint32_t *en);
35 bool (*id_to_offset)(
36 enum gpio_id id,
37 uint32_t en,
38 struct gpio_pin_info *info);
39};
40
41struct hw_translate {
42 const struct hw_translate_funcs *funcs;
43};
44
45bool dal_hw_translate_init(
46 struct hw_translate *translate,
47 enum dce_version dce_version,
48 enum dce_environment dce_environment);
49
50#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpu/Makefile b/drivers/gpu/drm/amd/display/dc/gpu/Makefile
new file mode 100644
index 000000000000..fd17af1ce88e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpu/Makefile
@@ -0,0 +1,36 @@
1#
2# Makefile for the 'gpu' sub-component of DAL.
3# It provides the control and status of HW adapter resources,
4# that are global for the ASIC and sharable between pipes.
5
6GPU = display_clock.o divider_range.o
7
8AMD_DAL_GPU = $(addprefix $(AMDDALPATH)/dc/gpu/,$(GPU))
9
10AMD_DISPLAY_FILES += $(AMD_DAL_GPU)
11
12###############################################################################
13# DCE 80 family
14###############################################################################
15GPU_DCE80 = display_clock_dce80.o
16
17AMD_DAL_GPU_DCE80 = $(addprefix $(AMDDALPATH)/dc/gpu/dce80/,$(GPU_DCE80))
18
19AMD_DISPLAY_FILES += $(AMD_DAL_GPU_DCE80)
20
21
22###############################################################################
23# DCE 110 family
24###############################################################################
25GPU_DCE110 = display_clock_dce110.o
26
27AMD_DAL_GPU_DCE110 = $(addprefix $(AMDDALPATH)/dc/gpu/dce110/,$(GPU_DCE110))
28
29AMD_DISPLAY_FILES += $(AMD_DAL_GPU_DCE110)
30
31GPU_DCE112 = display_clock_dce112.o
32
33AMD_DAL_GPU_DCE112 = $(addprefix $(AMDDALPATH)/dc/gpu/dce112/,$(GPU_DCE112))
34
35AMD_DISPLAY_FILES += $(AMD_DAL_GPU_DCE110) $(AMD_DAL_GPU_DCE112)
36
diff --git a/drivers/gpu/drm/amd/display/dc/gpu/dce110/display_clock_dce110.c b/drivers/gpu/drm/amd/display/dc/gpu/dce110/display_clock_dce110.c
new file mode 100644
index 000000000000..1bc39f12b6a1
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpu/dce110/display_clock_dce110.c
@@ -0,0 +1,1035 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28#include "dce/dce_11_0_d.h"
29#include "dce/dce_11_0_sh_mask.h"
30
31#include "include/bios_parser_interface.h"
32#include "include/fixed32_32.h"
33#include "include/logger_interface.h"
34
35#include "../divider_range.h"
36
37#include "display_clock_dce110.h"
38#include "dc.h"
39
40#define FROM_DISPLAY_CLOCK(base) \
41 container_of(base, struct display_clock_dce110, disp_clk_base)
42
43#define PSR_SET_WAITLOOP 0x31
44
45static struct state_dependent_clocks max_clks_by_state[] = {
46/*ClocksStateInvalid - should not be used*/
47{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
48/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
49{ .display_clk_khz = 352000, .pixel_clk_khz = 330000 },
50/*ClocksStateLow*/
51{ .display_clk_khz = 352000, .pixel_clk_khz = 330000 },
52/*ClocksStateNominal*/
53{ .display_clk_khz = 467000, .pixel_clk_khz = 400000 },
54/*ClocksStatePerformance*/
55{ .display_clk_khz = 643000, .pixel_clk_khz = 400000 } };
56
57/* Starting point for each divider range.*/
58enum divider_range_start {
59 DIVIDER_RANGE_01_START = 200, /* 2.00*/
60 DIVIDER_RANGE_02_START = 1600, /* 16.00*/
61 DIVIDER_RANGE_03_START = 3200, /* 32.00*/
62 DIVIDER_RANGE_SCALE_FACTOR = 100 /* Results are scaled up by 100.*/
63};
64
65/* Array identifiers and count for the divider ranges.*/
66enum divider_range_count {
67 DIVIDER_RANGE_01 = 0,
68 DIVIDER_RANGE_02,
69 DIVIDER_RANGE_03,
70 DIVIDER_RANGE_MAX /* == 3*/
71};
72
73/* Ranges for divider identifiers (Divider ID or DID)
74 mmDENTIST_DISPCLK_CNTL.DENTIST_DISPCLK_WDIVIDER*/
75enum divider_id_register_setting {
76 DIVIDER_RANGE_01_BASE_DIVIDER_ID = 0X08,
77 DIVIDER_RANGE_02_BASE_DIVIDER_ID = 0X40,
78 DIVIDER_RANGE_03_BASE_DIVIDER_ID = 0X60,
79 DIVIDER_RANGE_MAX_DIVIDER_ID = 0X80
80};
81
82/* Step size between each divider within a range.
83 Incrementing the DENTIST_DISPCLK_WDIVIDER by one
84 will increment the divider by this much.*/
85enum divider_range_step_size {
86 DIVIDER_RANGE_01_STEP_SIZE = 25, /* 0.25*/
87 DIVIDER_RANGE_02_STEP_SIZE = 50, /* 0.50*/
88 DIVIDER_RANGE_03_STEP_SIZE = 100 /* 1.00 */
89};
90
91union dce110_dmcu_psr_config_data_wait_loop_reg1 {
92 struct {
93 unsigned int waitLoop:16; /* [15:0] */
94 unsigned int reserved:16; /* [31:16] */
95 } bits;
96 unsigned int u32All;
97};
98
99static struct divider_range divider_ranges[DIVIDER_RANGE_MAX];
100
101#define DCE110_DFS_BYPASS_THRESHOLD_KHZ 400000
102/*****************************************************************************
103 * static functions
104 *****************************************************************************/
105
106/*
107 * store_max_clocks_state
108 *
109 * @brief
110 * Cache the clock state
111 *
112 * @param
113 * struct display_clock *base - [out] cach the state in this structure
114 * enum clocks_state max_clocks_state - [in] state to be stored
115 */
116static void store_max_clocks_state(
117 struct display_clock *base,
118 enum clocks_state max_clocks_state)
119{
120 struct display_clock_dce110 *dc = DCLCK110_FROM_BASE(base);
121
122 switch (max_clocks_state) {
123 case CLOCKS_STATE_LOW:
124 case CLOCKS_STATE_NOMINAL:
125 case CLOCKS_STATE_PERFORMANCE:
126 case CLOCKS_STATE_ULTRA_LOW:
127 dc->max_clks_state = max_clocks_state;
128 break;
129
130 case CLOCKS_STATE_INVALID:
131 default:
132 /*Invalid Clocks State!*/
133 ASSERT_CRITICAL(false);
134 break;
135 }
136}
137
138static enum clocks_state get_min_clocks_state(struct display_clock *base)
139{
140 return base->cur_min_clks_state;
141}
142
143static bool set_min_clocks_state(
144 struct display_clock *base,
145 enum clocks_state clocks_state)
146{
147 struct display_clock_dce110 *dc = DCLCK110_FROM_BASE(base);
148 struct dm_pp_power_level_change_request level_change_req = {
149 DM_PP_POWER_LEVEL_INVALID};
150
151 if (clocks_state > dc->max_clks_state) {
152 /*Requested state exceeds max supported state.*/
153 dm_logger_write(base->ctx->logger, LOG_WARNING,
154 "Requested state exceeds max supported state");
155 return false;
156 } else if (clocks_state == base->cur_min_clks_state) {
157 /*if we're trying to set the same state, we can just return
158 * since nothing needs to be done*/
159 return true;
160 }
161
162 switch (clocks_state) {
163 case CLOCKS_STATE_ULTRA_LOW:
164 level_change_req.power_level = DM_PP_POWER_LEVEL_ULTRA_LOW;
165 break;
166 case CLOCKS_STATE_LOW:
167 level_change_req.power_level = DM_PP_POWER_LEVEL_LOW;
168 break;
169 case CLOCKS_STATE_NOMINAL:
170 level_change_req.power_level = DM_PP_POWER_LEVEL_NOMINAL;
171 break;
172 case CLOCKS_STATE_PERFORMANCE:
173 level_change_req.power_level = DM_PP_POWER_LEVEL_PERFORMANCE;
174 break;
175 case CLOCKS_STATE_INVALID:
176 default:
177 dm_logger_write(base->ctx->logger, LOG_WARNING,
178 "Requested state invalid state");
179 return false;
180 }
181
182 /* get max clock state from PPLIB */
183 if (dm_pp_apply_power_level_change_request(
184 base->ctx, &level_change_req))
185 base->cur_min_clks_state = clocks_state;
186
187 return true;
188}
189
190static uint32_t get_dp_ref_clk_frequency(struct display_clock *dc)
191{
192 uint32_t dispclk_cntl_value;
193 uint32_t dp_ref_clk_cntl_value;
194 uint32_t dp_ref_clk_cntl_src_sel_value;
195 uint32_t dp_ref_clk_khz = 600000;
196 uint32_t target_div = INVALID_DIVIDER;
197 struct display_clock_dce110 *disp_clk = FROM_DISPLAY_CLOCK(dc);
198
199 /* ASSERT DP Reference Clock source is from DFS*/
200 dp_ref_clk_cntl_value = dm_read_reg(dc->ctx,
201 mmDPREFCLK_CNTL);
202
203 dp_ref_clk_cntl_src_sel_value =
204 get_reg_field_value(
205 dp_ref_clk_cntl_value,
206 DPREFCLK_CNTL, DPREFCLK_SRC_SEL);
207
208 ASSERT(dp_ref_clk_cntl_src_sel_value == 0);
209
210 /* Read the mmDENTIST_DISPCLK_CNTL to get the currently
211 * programmed DID DENTIST_DPREFCLK_WDIVIDER*/
212 dispclk_cntl_value = dm_read_reg(dc->ctx,
213 mmDENTIST_DISPCLK_CNTL);
214
215 /* Convert DENTIST_DPREFCLK_WDIVIDERto actual divider*/
216 target_div = dal_divider_range_get_divider(
217 divider_ranges,
218 DIVIDER_RANGE_MAX,
219 get_reg_field_value(dispclk_cntl_value,
220 DENTIST_DISPCLK_CNTL,
221 DENTIST_DPREFCLK_WDIVIDER));
222
223 if (target_div != INVALID_DIVIDER) {
224 /* Calculate the current DFS clock, in kHz.*/
225 dp_ref_clk_khz = (DIVIDER_RANGE_SCALE_FACTOR
226 * disp_clk->dentist_vco_freq_khz) / target_div;
227 }
228
229 /* SW will adjust DP REF Clock average value for all purposes
230 * (DP DTO / DP Audio DTO and DP GTC)
231 if clock is spread for all cases:
232 -if SS enabled on DP Ref clock and HW de-spreading enabled with SW
233 calculations for DS_INCR/DS_MODULO (this is planned to be default case)
234 -if SS enabled on DP Ref clock and HW de-spreading enabled with HW
235 calculations (not planned to be used, but average clock should still
236 be valid)
237 -if SS enabled on DP Ref clock and HW de-spreading disabled
238 (should not be case with CIK) then SW should program all rates
239 generated according to average value (case as with previous ASICs)
240 */
241 if ((disp_clk->ss_on_gpu_pll) && (disp_clk->gpu_pll_ss_divider != 0)) {
242 struct fixed32_32 ss_percentage = dal_fixed32_32_div_int(
243 dal_fixed32_32_from_fraction(
244 disp_clk->gpu_pll_ss_percentage,
245 disp_clk->gpu_pll_ss_divider), 200);
246 struct fixed32_32 adj_dp_ref_clk_khz;
247
248 ss_percentage = dal_fixed32_32_sub(dal_fixed32_32_one,
249 ss_percentage);
250 adj_dp_ref_clk_khz =
251 dal_fixed32_32_mul_int(
252 ss_percentage,
253 dp_ref_clk_khz);
254 dp_ref_clk_khz = dal_fixed32_32_floor(adj_dp_ref_clk_khz);
255 }
256
257 return dp_ref_clk_khz;
258}
259
260static void destroy(struct display_clock **base)
261{
262 struct display_clock_dce110 *dc110;
263
264 dc110 = DCLCK110_FROM_BASE(*base);
265
266 dm_free(dc110);
267
268 *base = NULL;
269}
270
271static uint32_t get_validation_clock(struct display_clock *dc)
272{
273 uint32_t clk = 0;
274 struct display_clock_dce110 *disp_clk = DCLCK110_FROM_BASE(dc);
275
276 switch (disp_clk->max_clks_state) {
277 case CLOCKS_STATE_ULTRA_LOW:
278 /*Currently not supported, it has 0 in table entry*/
279 case CLOCKS_STATE_LOW:
280 clk = max_clks_by_state[CLOCKS_STATE_LOW].
281 display_clk_khz;
282 break;
283
284 case CLOCKS_STATE_NOMINAL:
285 clk = max_clks_by_state[CLOCKS_STATE_NOMINAL].
286 display_clk_khz;
287 break;
288
289 case CLOCKS_STATE_PERFORMANCE:
290 clk = max_clks_by_state[CLOCKS_STATE_PERFORMANCE].
291 display_clk_khz;
292 break;
293
294 case CLOCKS_STATE_INVALID:
295 default:
296 /*Invalid Clocks State*/
297 dm_logger_write(dc->ctx->logger, LOG_WARNING,
298 "Invalid clock state");
299 /* just return the display engine clock for
300 * lowest supported state*/
301 clk = max_clks_by_state[CLOCKS_STATE_LOW].
302 display_clk_khz;
303 break;
304 }
305 return clk;
306}
307
308static struct fixed32_32 get_deep_color_factor(struct min_clock_params *params)
309{
310 /* DeepColorFactor = IF (HDMI = True, bpp / 24, 1)*/
311 struct fixed32_32 deep_color_factor = dal_fixed32_32_from_int(1);
312
313 if (params->signal_type != SIGNAL_TYPE_HDMI_TYPE_A)
314 return deep_color_factor;
315
316 switch (params->deep_color_depth) {
317 case COLOR_DEPTH_101010:
318 /*deep color ratio for 30bpp is 30/24 = 1.25*/
319 deep_color_factor = dal_fixed32_32_from_fraction(30, 24);
320 break;
321
322 case COLOR_DEPTH_121212:
323 /* deep color ratio for 36bpp is 36/24 = 1.5*/
324 deep_color_factor = dal_fixed32_32_from_fraction(36, 24);
325 break;
326
327 case COLOR_DEPTH_161616:
328 /* deep color ratio for 48bpp is 48/24 = 2.0 */
329 deep_color_factor = dal_fixed32_32_from_fraction(48, 24);
330 break;
331 default:
332 break;
333 }
334 return deep_color_factor;
335}
336
337static struct fixed32_32 get_scaler_efficiency(
338 struct dc_context *ctx,
339 struct min_clock_params *params)
340{
341 struct fixed32_32 scaler_efficiency = dal_fixed32_32_from_int(3);
342
343 if (params->scaler_efficiency == V_SCALER_EFFICIENCY_LB18BPP) {
344 scaler_efficiency =
345 dal_fixed32_32_add(
346 dal_fixed32_32_from_fraction(35555, 10000),
347 dal_fixed32_32_from_fraction(
348 55556,
349 100000 * 10000));
350 } else if (params->scaler_efficiency == V_SCALER_EFFICIENCY_LB24BPP) {
351 scaler_efficiency =
352 dal_fixed32_32_add(
353 dal_fixed32_32_from_fraction(34285, 10000),
354 dal_fixed32_32_from_fraction(
355 71429,
356 100000 * 10000));
357 } else if (params->scaler_efficiency == V_SCALER_EFFICIENCY_LB30BPP)
358 scaler_efficiency = dal_fixed32_32_from_fraction(32, 10);
359
360 return scaler_efficiency;
361}
362
363static struct fixed32_32 get_lb_lines_in_per_line_out(
364 struct min_clock_params *params,
365 struct fixed32_32 v_scale_ratio)
366{
367 struct fixed32_32 two = dal_fixed32_32_from_int(2);
368 struct fixed32_32 four = dal_fixed32_32_from_int(4);
369 struct fixed32_32 f4_to_3 = dal_fixed32_32_from_fraction(4, 3);
370 struct fixed32_32 f6_to_4 = dal_fixed32_32_from_fraction(6, 4);
371
372 if (params->line_buffer_prefetch_enabled)
373 return dal_fixed32_32_max(v_scale_ratio, dal_fixed32_32_one);
374 else if (dal_fixed32_32_le(v_scale_ratio, dal_fixed32_32_one))
375 return dal_fixed32_32_one;
376 else if (dal_fixed32_32_le(v_scale_ratio, f4_to_3))
377 return f4_to_3;
378 else if (dal_fixed32_32_le(v_scale_ratio, f6_to_4))
379 return f6_to_4;
380 else if (dal_fixed32_32_le(v_scale_ratio, two))
381 return two;
382 else if (dal_fixed32_32_le(v_scale_ratio, dal_fixed32_32_from_int(3)))
383 return four;
384 else
385 return dal_fixed32_32_zero;
386}
387
388static uint32_t get_actual_required_display_clk(
389 struct display_clock_dce110 *disp_clk,
390 uint32_t target_clk_khz)
391{
392 uint32_t disp_clk_khz = target_clk_khz;
393 uint32_t div = INVALID_DIVIDER;
394 uint32_t did = INVALID_DID;
395 uint32_t scaled_vco =
396 disp_clk->dentist_vco_freq_khz * DIVIDER_RANGE_SCALE_FACTOR;
397
398 ASSERT_CRITICAL(!!disp_clk_khz);
399
400 if (disp_clk_khz)
401 div = scaled_vco / disp_clk_khz;
402
403 did = dal_divider_range_get_did(divider_ranges, DIVIDER_RANGE_MAX, div);
404
405 if (did != INVALID_DID) {
406 div = dal_divider_range_get_divider(
407 divider_ranges, DIVIDER_RANGE_MAX, did);
408
409 if ((div != INVALID_DIVIDER) &&
410 (did > DIVIDER_RANGE_01_BASE_DIVIDER_ID))
411 if (disp_clk_khz > (scaled_vco / div))
412 div = dal_divider_range_get_divider(
413 divider_ranges, DIVIDER_RANGE_MAX,
414 did - 1);
415
416 if (div != INVALID_DIVIDER)
417 disp_clk_khz = scaled_vco / div;
418
419 }
420 /* We need to add 10KHz to this value because the accuracy in VBIOS is
421 in 10KHz units. So we need to always round the last digit up in order
422 to reach the next div level.*/
423 return disp_clk_khz + 10;
424}
425
426static uint32_t calc_single_display_min_clks(
427 struct display_clock *base,
428 struct min_clock_params *params,
429 bool set_clk)
430{
431 struct fixed32_32 h_scale_ratio = dal_fixed32_32_one;
432 struct fixed32_32 v_scale_ratio = dal_fixed32_32_one;
433 uint32_t pix_clk_khz = 0;
434 uint32_t lb_source_width = 0;
435 struct fixed32_32 deep_color_factor;
436 struct fixed32_32 scaler_efficiency;
437 struct fixed32_32 v_filter_init;
438 uint32_t v_filter_init_trunc;
439 uint32_t num_lines_at_frame_start = 3;
440 struct fixed32_32 v_filter_init_ceil;
441 struct fixed32_32 lines_per_lines_out_at_frame_start;
442 struct fixed32_32 lb_lines_in_per_line_out; /* in middle of the frame*/
443 uint32_t src_wdth_rnd_to_chunks;
444 struct fixed32_32 scaling_coeff;
445 struct fixed32_32 h_blank_granularity_factor =
446 dal_fixed32_32_one;
447 struct fixed32_32 fx_disp_clk_mhz;
448 struct fixed32_32 line_time;
449 struct fixed32_32 disp_pipe_pix_throughput;
450 struct fixed32_32 fx_alt_disp_clk_mhz;
451 uint32_t disp_clk_khz;
452 uint32_t alt_disp_clk_khz;
453 struct display_clock_dce110 *disp_clk_110 = DCLCK110_FROM_BASE(base);
454 uint32_t max_clk_khz = get_validation_clock(base);
455 bool panning_allowed = false; /* TODO: receive this value from AS */
456
457 if (params == NULL) {
458 dm_logger_write(base->ctx->logger, LOG_WARNING,
459 "Invalid input parameter in %s",
460 __func__);
461 return 0;
462 }
463
464 deep_color_factor = get_deep_color_factor(params);
465 scaler_efficiency = get_scaler_efficiency(base->ctx, params);
466 pix_clk_khz = params->requested_pixel_clock;
467 lb_source_width = params->source_view.width;
468
469 if (0 != params->dest_view.height && 0 != params->dest_view.width) {
470
471 h_scale_ratio = dal_fixed32_32_from_fraction(
472 params->source_view.width,
473 params->dest_view.width);
474 v_scale_ratio = dal_fixed32_32_from_fraction(
475 params->source_view.height,
476 params->dest_view.height);
477 } else {
478 dm_logger_write(base->ctx->logger, LOG_WARNING,
479 "Destination height or width is 0!\n");
480 }
481
482 v_filter_init =
483 dal_fixed32_32_add(
484 v_scale_ratio,
485 dal_fixed32_32_add_int(
486 dal_fixed32_32_div_int(
487 dal_fixed32_32_mul_int(
488 v_scale_ratio,
489 params->timing_info.INTERLACED),
490 2),
491 params->scaling_info.v_taps + 1));
492 v_filter_init = dal_fixed32_32_div_int(v_filter_init, 2);
493
494 v_filter_init_trunc = dal_fixed32_32_floor(v_filter_init);
495
496 v_filter_init_ceil = dal_fixed32_32_from_fraction(
497 v_filter_init_trunc, 2);
498 v_filter_init_ceil = dal_fixed32_32_from_int(
499 dal_fixed32_32_ceil(v_filter_init_ceil));
500 v_filter_init_ceil = dal_fixed32_32_mul_int(v_filter_init_ceil, 2);
501
502 lines_per_lines_out_at_frame_start =
503 dal_fixed32_32_div_int(v_filter_init_ceil,
504 num_lines_at_frame_start);
505 lb_lines_in_per_line_out =
506 get_lb_lines_in_per_line_out(params, v_scale_ratio);
507
508 if (panning_allowed)
509 src_wdth_rnd_to_chunks =
510 ((lb_source_width - 1) / 128) * 128 + 256;
511 else
512 src_wdth_rnd_to_chunks =
513 ((lb_source_width + 127) / 128) * 128;
514
515 scaling_coeff =
516 dal_fixed32_32_div(
517 dal_fixed32_32_from_int(params->scaling_info.v_taps),
518 scaler_efficiency);
519
520 if (dal_fixed32_32_le(h_scale_ratio, dal_fixed32_32_one))
521 scaling_coeff = dal_fixed32_32_max(
522 dal_fixed32_32_from_int(
523 dal_fixed32_32_ceil(
524 dal_fixed32_32_from_fraction(
525 params->scaling_info.h_taps,
526 4))),
527 dal_fixed32_32_max(
528 dal_fixed32_32_mul(
529 scaling_coeff,
530 h_scale_ratio),
531 dal_fixed32_32_one));
532
533 if (!params->line_buffer_prefetch_enabled &&
534 dal_fixed32_32_floor(lb_lines_in_per_line_out) != 2 &&
535 dal_fixed32_32_floor(lb_lines_in_per_line_out) != 4) {
536 uint32_t line_total_pixel =
537 params->timing_info.h_total + lb_source_width - 256;
538 h_blank_granularity_factor = dal_fixed32_32_div(
539 dal_fixed32_32_from_int(params->timing_info.h_total),
540 dal_fixed32_32_div(
541 dal_fixed32_32_from_fraction(
542 line_total_pixel, 2),
543 h_scale_ratio));
544 }
545
546 /* Calculate display clock with ramping. Ramping factor is 1.1*/
547 fx_disp_clk_mhz =
548 dal_fixed32_32_div_int(
549 dal_fixed32_32_mul_int(scaling_coeff, 11),
550 10);
551 line_time = dal_fixed32_32_from_fraction(
552 params->timing_info.h_total * 1000, pix_clk_khz);
553
554 disp_pipe_pix_throughput = dal_fixed32_32_mul(
555 lb_lines_in_per_line_out, h_blank_granularity_factor);
556 disp_pipe_pix_throughput = dal_fixed32_32_max(
557 disp_pipe_pix_throughput,
558 lines_per_lines_out_at_frame_start);
559 disp_pipe_pix_throughput = dal_fixed32_32_div(dal_fixed32_32_mul_int(
560 disp_pipe_pix_throughput, src_wdth_rnd_to_chunks),
561 line_time);
562
563 if (0 != params->timing_info.h_total) {
564 fx_disp_clk_mhz =
565 dal_fixed32_32_max(
566 dal_fixed32_32_div_int(
567 dal_fixed32_32_mul_int(
568 scaling_coeff, pix_clk_khz),
569 1000),
570 disp_pipe_pix_throughput);
571 fx_disp_clk_mhz =
572 dal_fixed32_32_mul(
573 fx_disp_clk_mhz,
574 dal_fixed32_32_from_fraction(11, 10));
575 }
576
577 fx_disp_clk_mhz = dal_fixed32_32_max(fx_disp_clk_mhz,
578 dal_fixed32_32_mul(deep_color_factor,
579 dal_fixed32_32_from_fraction(11, 10)));
580
581 /* Calculate display clock without ramping */
582 fx_alt_disp_clk_mhz = scaling_coeff;
583
584 if (0 != params->timing_info.h_total) {
585 fx_alt_disp_clk_mhz = dal_fixed32_32_max(
586 dal_fixed32_32_div_int(dal_fixed32_32_mul_int(
587 scaling_coeff, pix_clk_khz),
588 1000),
589 dal_fixed32_32_div_int(dal_fixed32_32_mul_int(
590 disp_pipe_pix_throughput, 105),
591 100));
592 }
593
594 if (set_clk && disp_clk_110->ss_on_gpu_pll &&
595 disp_clk_110->gpu_pll_ss_divider)
596 fx_alt_disp_clk_mhz = dal_fixed32_32_mul(fx_alt_disp_clk_mhz,
597 dal_fixed32_32_add_int(
598 dal_fixed32_32_div_int(
599 dal_fixed32_32_div_int(
600 dal_fixed32_32_from_fraction(
601 disp_clk_110->gpu_pll_ss_percentage,
602 disp_clk_110->gpu_pll_ss_divider), 100),
603 2),
604 1));
605
606 /* convert to integer */
607 disp_clk_khz = dal_fixed32_32_round(
608 dal_fixed32_32_mul_int(fx_disp_clk_mhz, 1000));
609 alt_disp_clk_khz = dal_fixed32_32_round(
610 dal_fixed32_32_mul_int(fx_alt_disp_clk_mhz, 1000));
611
612 if ((disp_clk_khz > max_clk_khz) && (alt_disp_clk_khz <= max_clk_khz))
613 disp_clk_khz = alt_disp_clk_khz;
614
615 if (set_clk) { /* only compensate clock if we are going to set it.*/
616 disp_clk_khz = get_actual_required_display_clk(
617 disp_clk_110, disp_clk_khz);
618 }
619
620 disp_clk_khz = disp_clk_khz > max_clk_khz ? max_clk_khz : disp_clk_khz;
621
622 return disp_clk_khz;
623}
624
625static uint32_t calculate_min_clock(
626 struct display_clock *base,
627 uint32_t path_num,
628 struct min_clock_params *params)
629{
630 uint32_t i;
631 uint32_t validation_clk_khz =
632 get_validation_clock(base);
633 uint32_t min_clk_khz = validation_clk_khz;
634 uint32_t max_clk_khz = 0;
635 struct display_clock_dce110 *dc = DCLCK110_FROM_BASE(base);
636
637 if (dc->use_max_disp_clk)
638 return min_clk_khz;
639
640 if (params != NULL) {
641 uint32_t disp_clk_khz = 0;
642
643 for (i = 0; i < path_num; ++i) {
644
645 disp_clk_khz = calc_single_display_min_clks(
646 base, params, true);
647
648 /* update the max required clock found*/
649 if (disp_clk_khz > max_clk_khz)
650 max_clk_khz = disp_clk_khz;
651
652 params++;
653 }
654 }
655
656 min_clk_khz = max_clk_khz;
657
658 if (min_clk_khz > validation_clk_khz)
659 min_clk_khz = validation_clk_khz;
660 else if (min_clk_khz < base->min_display_clk_threshold_khz)
661 min_clk_khz = base->min_display_clk_threshold_khz;
662
663 if (dc->use_max_disp_clk)
664 min_clk_khz = get_validation_clock(base);
665
666 return min_clk_khz;
667}
668
669static bool display_clock_integrated_info_construct(
670 struct display_clock_dce110 *disp_clk)
671{
672 struct dc_debug *debug = &disp_clk->disp_clk_base.ctx->dc->debug;
673 struct dc_bios *bp = disp_clk->disp_clk_base.ctx->dc_bios;
674 struct integrated_info info;
675 struct firmware_info fw_info;
676 uint32_t i;
677 struct display_clock *base = &disp_clk->disp_clk_base;
678
679 memset(&info, 0, sizeof(struct integrated_info));
680 memset(&fw_info, 0, sizeof(struct firmware_info));
681
682 if (bp->integrated_info)
683 info = *bp->integrated_info;
684
685 disp_clk->dentist_vco_freq_khz = info.dentist_vco_freq;
686 if (disp_clk->dentist_vco_freq_khz == 0) {
687 bp->funcs->get_firmware_info(bp, &fw_info);
688 disp_clk->dentist_vco_freq_khz =
689 fw_info.smu_gpu_pll_output_freq;
690 if (disp_clk->dentist_vco_freq_khz == 0)
691 disp_clk->dentist_vco_freq_khz = 3600000;
692 }
693
694 base->min_display_clk_threshold_khz =
695 disp_clk->dentist_vco_freq_khz / 64;
696
697 if (bp->integrated_info == NULL)
698 return false;
699
700 /*update the maximum display clock for each power state*/
701 for (i = 0; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
702 enum clocks_state clk_state = CLOCKS_STATE_INVALID;
703
704 switch (i) {
705 case 0:
706 clk_state = CLOCKS_STATE_ULTRA_LOW;
707 break;
708
709 case 1:
710 clk_state = CLOCKS_STATE_LOW;
711 break;
712
713 case 2:
714 clk_state = CLOCKS_STATE_NOMINAL;
715 break;
716
717 case 3:
718 clk_state = CLOCKS_STATE_PERFORMANCE;
719 break;
720
721 default:
722 clk_state = CLOCKS_STATE_INVALID;
723 break;
724 }
725
726 /*Do not allow bad VBIOS/SBIOS to override with invalid values,
727 * check for > 100MHz*/
728 if (info.disp_clk_voltage[i].max_supported_clk >= 100000) {
729 max_clks_by_state[clk_state].display_clk_khz =
730 info.disp_clk_voltage[i].max_supported_clk;
731 }
732 }
733
734 disp_clk->dfs_bypass_enabled = false;
735 if (!debug->disable_dfs_bypass)
736 if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
737 disp_clk->dfs_bypass_enabled = true;
738
739 disp_clk->use_max_disp_clk = debug->max_disp_clk;
740
741 return true;
742}
743
744static uint32_t get_clock(struct display_clock *dc)
745{
746 uint32_t disp_clock = get_validation_clock(dc);
747 uint32_t target_div = INVALID_DIVIDER;
748 uint32_t addr = mmDENTIST_DISPCLK_CNTL;
749 uint32_t value = 0;
750 uint32_t field = 0;
751 struct display_clock_dce110 *disp_clk = DCLCK110_FROM_BASE(dc);
752
753 if (disp_clk->dfs_bypass_enabled && disp_clk->dfs_bypass_disp_clk)
754 return disp_clk->dfs_bypass_disp_clk;
755
756 /* Read the mmDENTIST_DISPCLK_CNTL to get the currently programmed
757 DID DENTIST_DISPCLK_WDIVIDER.*/
758 value = dm_read_reg(dc->ctx, addr);
759 field = get_reg_field_value(
760 value, DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER);
761
762 /* Convert DENTIST_DISPCLK_WDIVIDER to actual divider*/
763 target_div = dal_divider_range_get_divider(
764 divider_ranges,
765 DIVIDER_RANGE_MAX,
766 field);
767
768 if (target_div != INVALID_DIVIDER)
769 /* Calculate the current DFS clock in KHz.
770 Should be okay up to 42.9 THz before overflowing.*/
771 disp_clock = (DIVIDER_RANGE_SCALE_FACTOR
772 * disp_clk->dentist_vco_freq_khz) / target_div;
773 return disp_clock;
774}
775
776static enum clocks_state get_required_clocks_state(
777 struct display_clock *dc,
778 struct state_dependent_clocks *req_clocks)
779{
780 int32_t i;
781 struct display_clock_dce110 *disp_clk = DCLCK110_FROM_BASE(dc);
782 enum clocks_state low_req_clk = disp_clk->max_clks_state;
783
784 if (!req_clocks) {
785 /* NULL pointer*/
786 dm_logger_write(dc->ctx->logger, LOG_WARNING,
787 "%s: Invalid parameter",
788 __func__);
789 return CLOCKS_STATE_INVALID;
790 }
791
792 /* Iterate from highest supported to lowest valid state, and update
793 * lowest RequiredState with the lowest state that satisfies
794 * all required clocks
795 */
796 for (i = disp_clk->max_clks_state; i >= CLOCKS_STATE_ULTRA_LOW; --i) {
797 if ((req_clocks->display_clk_khz <=
798 max_clks_by_state[i].display_clk_khz) &&
799 (req_clocks->pixel_clk_khz <=
800 max_clks_by_state[i].pixel_clk_khz))
801 low_req_clk = i;
802 }
803 return low_req_clk;
804}
805
806static void psr_wait_loop(struct dc_context *ctx, unsigned int display_clk_khz)
807{
808 unsigned int dmcu_max_retry_on_wait_reg_ready = 801;
809 unsigned int dmcu_wait_reg_ready_interval = 100;
810 unsigned int regValue;
811 uint32_t masterCmd;
812 uint32_t masterComCntl;
813 union dce110_dmcu_psr_config_data_wait_loop_reg1 masterCmdData1;
814
815 /* waitDMCUReadyForCmd */
816 do {
817 dm_delay_in_microseconds(ctx, dmcu_wait_reg_ready_interval);
818 regValue = dm_read_reg(ctx, mmMASTER_COMM_CNTL_REG);
819 dmcu_max_retry_on_wait_reg_ready--;
820 } while
821 /* expected value is 0, loop while not 0*/
822 ((MASTER_COMM_CNTL_REG__MASTER_COMM_INTERRUPT_MASK & regValue) &&
823 dmcu_max_retry_on_wait_reg_ready > 0);
824
825 masterCmdData1.u32All = 0;
826 masterCmdData1.bits.waitLoop = display_clk_khz / 1000 / 7;
827 dm_write_reg(ctx, mmMASTER_COMM_DATA_REG1, masterCmdData1.u32All);
828
829 /* setDMCUParam_Cmd */
830 masterCmd = dm_read_reg(ctx, mmMASTER_COMM_CMD_REG);
831 set_reg_field_value(
832 masterCmd,
833 PSR_SET_WAITLOOP,
834 MASTER_COMM_CMD_REG,
835 MASTER_COMM_CMD_REG_BYTE0);
836
837 dm_write_reg(ctx, mmMASTER_COMM_CMD_REG, masterCmd);
838
839 /* notifyDMCUMsg */
840 masterComCntl = dm_read_reg(ctx, mmMASTER_COMM_CNTL_REG);
841 set_reg_field_value(
842 masterComCntl,
843 1,
844 MASTER_COMM_CNTL_REG,
845 MASTER_COMM_INTERRUPT);
846 dm_write_reg(ctx, mmMASTER_COMM_CNTL_REG, masterComCntl);
847}
848
849static void set_clock(
850 struct display_clock *base,
851 uint32_t requested_clk_khz)
852{
853 struct bp_pixel_clock_parameters pxl_clk_params;
854 struct display_clock_dce110 *dc = DCLCK110_FROM_BASE(base);
855 struct dc_bios *bp = base->ctx->dc_bios;
856
857 /* Prepare to program display clock*/
858 memset(&pxl_clk_params, 0, sizeof(pxl_clk_params));
859
860 /* Make sure requested clock isn't lower than minimum threshold*/
861 if (requested_clk_khz > 0)
862 requested_clk_khz = dm_max(requested_clk_khz,
863 base->min_display_clk_threshold_khz);
864
865 pxl_clk_params.target_pixel_clock = requested_clk_khz;
866 pxl_clk_params.pll_id = base->id;
867
868 bp->funcs->program_display_engine_pll(bp, &pxl_clk_params);
869
870 if (dc->dfs_bypass_enabled) {
871
872 /* Cache the fixed display clock*/
873 dc->dfs_bypass_disp_clk =
874 pxl_clk_params.dfs_bypass_display_clock;
875 }
876
877 /* from power down, we need mark the clock state as ClocksStateNominal
878 * from HWReset, so when resume we will call pplib voltage regulator.*/
879 if (requested_clk_khz == 0)
880 base->cur_min_clks_state = CLOCKS_STATE_NOMINAL;
881
882 psr_wait_loop(base->ctx, requested_clk_khz);
883}
884
885static void set_clock_state(
886 struct display_clock *dc,
887 struct display_clock_state clk_state)
888{
889 struct display_clock_dce110 *disp_clk = DCLCK110_FROM_BASE(dc);
890
891 disp_clk->clock_state = clk_state;
892}
893
894static struct display_clock_state get_clock_state(
895 struct display_clock *dc)
896{
897 struct display_clock_dce110 *disp_clk = DCLCK110_FROM_BASE(dc);
898
899 return disp_clk->clock_state;
900}
901
902static uint32_t get_dfs_bypass_threshold(struct display_clock *dc)
903{
904 return DCE110_DFS_BYPASS_THRESHOLD_KHZ;
905}
906
907static const struct display_clock_funcs funcs = {
908 .destroy = destroy,
909 .calculate_min_clock = calculate_min_clock,
910 .get_clock = get_clock,
911 .get_clock_state = get_clock_state,
912 .get_dfs_bypass_threshold = get_dfs_bypass_threshold,
913 .get_dp_ref_clk_frequency = get_dp_ref_clk_frequency,
914 .get_min_clocks_state = get_min_clocks_state,
915 .get_required_clocks_state = get_required_clocks_state,
916 .get_validation_clock = get_validation_clock,
917 .set_clock = set_clock,
918 .set_clock_state = set_clock_state,
919 .set_dp_ref_clock_source = NULL,
920 .set_min_clocks_state = set_min_clocks_state,
921 .store_max_clocks_state = store_max_clocks_state,
922 .validate = NULL,
923};
924
925static bool dal_display_clock_dce110_construct(
926 struct display_clock_dce110 *dc110,
927 struct dc_context *ctx)
928{
929 struct display_clock *dc_base = &dc110->disp_clk_base;
930 struct dc_bios *bp = ctx->dc_bios;
931
932 if (!dal_display_clock_construct_base(dc_base, ctx))
933 return false;
934
935 dc_base->funcs = &funcs;
936
937 dc110->dfs_bypass_disp_clk = 0;
938
939 if (!display_clock_integrated_info_construct(dc110))
940 dm_logger_write(dc_base->ctx->logger, LOG_WARNING,
941 "Cannot obtain VBIOS integrated info\n");
942
943 dc110->gpu_pll_ss_percentage = 0;
944 dc110->gpu_pll_ss_divider = 1000;
945 dc110->ss_on_gpu_pll = false;
946
947 dc_base->id = CLOCK_SOURCE_ID_DFS;
948/* Initially set max clocks state to nominal. This should be updated by
949 * via a pplib call to DAL IRI eventually calling a
950 * DisplayEngineClock_Dce110::StoreMaxClocksState(). This call will come in
951 * on PPLIB init. This is from DCE5x. in case HW wants to use mixed method.*/
952 dc110->max_clks_state = CLOCKS_STATE_NOMINAL;
953
954 dal_divider_range_construct(
955 &divider_ranges[DIVIDER_RANGE_01],
956 DIVIDER_RANGE_01_START,
957 DIVIDER_RANGE_01_STEP_SIZE,
958 DIVIDER_RANGE_01_BASE_DIVIDER_ID,
959 DIVIDER_RANGE_02_BASE_DIVIDER_ID);
960 dal_divider_range_construct(
961 &divider_ranges[DIVIDER_RANGE_02],
962 DIVIDER_RANGE_02_START,
963 DIVIDER_RANGE_02_STEP_SIZE,
964 DIVIDER_RANGE_02_BASE_DIVIDER_ID,
965 DIVIDER_RANGE_03_BASE_DIVIDER_ID);
966 dal_divider_range_construct(
967 &divider_ranges[DIVIDER_RANGE_03],
968 DIVIDER_RANGE_03_START,
969 DIVIDER_RANGE_03_STEP_SIZE,
970 DIVIDER_RANGE_03_BASE_DIVIDER_ID,
971 DIVIDER_RANGE_MAX_DIVIDER_ID);
972
973 {
974 uint32_t ss_info_num =
975 bp->funcs->get_ss_entry_number(bp,
976 AS_SIGNAL_TYPE_GPU_PLL);
977
978 if (ss_info_num) {
979 struct spread_spectrum_info info;
980 enum bp_result result;
981
982 memset(&info, 0, sizeof(info));
983
984 result = bp->funcs->get_spread_spectrum_info(bp,
985 AS_SIGNAL_TYPE_GPU_PLL,
986 0,
987 &info);
988
989 /* Based on VBIOS, VBIOS will keep entry for GPU PLL SS
990 * even if SS not enabled and in that case
991 * SSInfo.spreadSpectrumPercentage !=0 would be sign
992 * that SS is enabled
993 */
994 if (result == BP_RESULT_OK &&
995 info.spread_spectrum_percentage != 0) {
996 dc110->ss_on_gpu_pll = true;
997 dc110->gpu_pll_ss_divider =
998 info.spread_percentage_divider;
999
1000 if (info.type.CENTER_MODE == 0) {
1001 /* Currently for DP Reference clock we
1002 * need only SS percentage for
1003 * downspread */
1004 dc110->gpu_pll_ss_percentage =
1005 info.spread_spectrum_percentage;
1006 }
1007 }
1008
1009 }
1010 }
1011
1012 return true;
1013}
1014
1015/*****************************************************************************
1016 * public functions
1017 *****************************************************************************/
1018
1019struct display_clock *dal_display_clock_dce110_create(
1020 struct dc_context *ctx)
1021{
1022 struct display_clock_dce110 *dc110;
1023
1024 dc110 = dm_alloc(sizeof(struct display_clock_dce110));
1025
1026 if (dc110 == NULL)
1027 return NULL;
1028
1029 if (dal_display_clock_dce110_construct(dc110, ctx))
1030 return &dc110->disp_clk_base;
1031
1032 dm_free(dc110);
1033
1034 return NULL;
1035}
diff --git a/drivers/gpu/drm/amd/display/dc/gpu/dce110/display_clock_dce110.h b/drivers/gpu/drm/amd/display/dc/gpu/dce110/display_clock_dce110.h
new file mode 100644
index 000000000000..0cdc7b52a09f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpu/dce110/display_clock_dce110.h
@@ -0,0 +1,53 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25#ifndef __DAL_DISPLAY_CLOCK_DCE110_H__
26#define __DAL_DISPLAY_CLOCK_DCE110_H__
27
28#include "gpu/display_clock.h"
29
30struct display_clock_dce110 {
31 struct display_clock disp_clk_base;
32 /* Max display block clocks state*/
33 enum clocks_state max_clks_state;
34 bool use_max_disp_clk;
35 uint32_t dentist_vco_freq_khz;
36 /* Cache the status of DFS-bypass feature*/
37 bool dfs_bypass_enabled;
38 /* GPU PLL SS percentage (if down-spread enabled) */
39 uint32_t gpu_pll_ss_percentage;
40 /* GPU PLL SS percentage Divider (100 or 1000) */
41 uint32_t gpu_pll_ss_divider;
42 /* Flag for Enabled SS on GPU PLL */
43 bool ss_on_gpu_pll;
44 /* Cache the display clock returned by VBIOS if DFS-bypass is enabled.
45 * This is basically "Crystal Frequency In KHz" (XTALIN) frequency */
46 uint32_t dfs_bypass_disp_clk;
47 struct display_clock_state clock_state;
48};
49
50#define DCLCK110_FROM_BASE(dc_base) \
51 container_of(dc_base, struct display_clock_dce110, disp_clk_base)
52
53#endif /* __DAL_DISPLAY_CLOCK_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/gpu/dce112/display_clock_dce112.c b/drivers/gpu/drm/amd/display/dc/gpu/dce112/display_clock_dce112.c
new file mode 100644
index 000000000000..9b7c9755e316
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpu/dce112/display_clock_dce112.c
@@ -0,0 +1,964 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28#include "dce/dce_11_2_d.h"
29#include "dce/dce_11_2_sh_mask.h"
30
31#include "include/bios_parser_interface.h"
32#include "include/fixed32_32.h"
33#include "include/logger_interface.h"
34
35#include "../divider_range.h"
36
37#include "display_clock_dce112.h"
38
39#define FROM_DISPLAY_CLOCK(base) \
40 container_of(base, struct display_clock_dce112, disp_clk_base)
41
42static struct state_dependent_clocks max_clks_by_state[] = {
43/*ClocksStateInvalid - should not be used*/
44{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
45/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
46{ .display_clk_khz = 389189, .pixel_clk_khz = 346672 },
47/*ClocksStateLow*/
48{ .display_clk_khz = 459000, .pixel_clk_khz = 400000 },
49/*ClocksStateNominal*/
50{ .display_clk_khz = 667000, .pixel_clk_khz = 600000 },
51/*ClocksStatePerformance*/
52{ .display_clk_khz = 1132000, .pixel_clk_khz = 600000 } };
53
54/* Ranges for divider identifiers (Divider ID or DID)
55 mmDENTIST_DISPCLK_CNTL.DENTIST_DISPCLK_WDIVIDER*/
56enum divider_id_register_setting {
57 DIVIDER_RANGE_01_BASE_DIVIDER_ID = 0X08,
58 DIVIDER_RANGE_02_BASE_DIVIDER_ID = 0X40,
59 DIVIDER_RANGE_03_BASE_DIVIDER_ID = 0X60,
60 DIVIDER_RANGE_MAX_DIVIDER_ID = 0X80
61};
62
63/* Step size between each divider within a range.
64 Incrementing the DENTIST_DISPCLK_WDIVIDER by one
65 will increment the divider by this much.*/
66enum divider_range_step_size {
67 DIVIDER_RANGE_01_STEP_SIZE = 25, /* 0.25*/
68 DIVIDER_RANGE_02_STEP_SIZE = 50, /* 0.50*/
69 DIVIDER_RANGE_03_STEP_SIZE = 100 /* 1.00 */
70};
71
72static struct divider_range divider_ranges[DIVIDER_RANGE_MAX];
73
74#define dce112_DFS_BYPASS_THRESHOLD_KHZ 400000
75/*****************************************************************************
76 * static functions
77 *****************************************************************************/
78
79/*
80 * store_max_clocks_state
81 *
82 * @brief
83 * Cache the clock state
84 *
85 * @param
86 * struct display_clock *base - [out] cach the state in this structure
87 * enum clocks_state max_clocks_state - [in] state to be stored
88 */
89void dispclk_dce112_store_max_clocks_state(
90 struct display_clock *base,
91 enum clocks_state max_clocks_state)
92{
93 struct display_clock_dce112 *dc = DCLCK112_FROM_BASE(base);
94
95 switch (max_clocks_state) {
96 case CLOCKS_STATE_LOW:
97 case CLOCKS_STATE_NOMINAL:
98 case CLOCKS_STATE_PERFORMANCE:
99 case CLOCKS_STATE_ULTRA_LOW:
100 dc->max_clks_state = max_clocks_state;
101 break;
102
103 case CLOCKS_STATE_INVALID:
104 default:
105 /*Invalid Clocks State!*/
106 ASSERT_CRITICAL(false);
107 break;
108 }
109}
110
111enum clocks_state dispclk_dce112_get_min_clocks_state(
112 struct display_clock *base)
113{
114 return base->cur_min_clks_state;
115}
116
117bool dispclk_dce112_set_min_clocks_state(
118 struct display_clock *base,
119 enum clocks_state clocks_state)
120{
121 struct display_clock_dce112 *dc = DCLCK112_FROM_BASE(base);
122 struct dm_pp_power_level_change_request level_change_req = {
123 DM_PP_POWER_LEVEL_INVALID};
124
125 if (clocks_state > dc->max_clks_state) {
126 /*Requested state exceeds max supported state.*/
127 dm_logger_write(base->ctx->logger, LOG_WARNING,
128 "Requested state exceeds max supported state");
129 return false;
130 } else if (clocks_state == base->cur_min_clks_state) {
131 /*if we're trying to set the same state, we can just return
132 * since nothing needs to be done*/
133 return true;
134 }
135
136 switch (clocks_state) {
137 case CLOCKS_STATE_ULTRA_LOW:
138 level_change_req.power_level = DM_PP_POWER_LEVEL_ULTRA_LOW;
139 break;
140 case CLOCKS_STATE_LOW:
141 level_change_req.power_level = DM_PP_POWER_LEVEL_LOW;
142 break;
143 case CLOCKS_STATE_NOMINAL:
144 level_change_req.power_level = DM_PP_POWER_LEVEL_NOMINAL;
145 break;
146 case CLOCKS_STATE_PERFORMANCE:
147 level_change_req.power_level = DM_PP_POWER_LEVEL_PERFORMANCE;
148 break;
149 case CLOCKS_STATE_INVALID:
150 default:
151 dm_logger_write(base->ctx->logger, LOG_WARNING,
152 "Requested state invalid state");
153 return false;
154 }
155
156 /* get max clock state from PPLIB */
157 if (dm_pp_apply_power_level_change_request(
158 base->ctx, &level_change_req))
159 base->cur_min_clks_state = clocks_state;
160
161 return true;
162}
163
164static uint32_t get_dp_ref_clk_frequency(struct display_clock *dc)
165{
166 uint32_t dispclk_cntl_value;
167 uint32_t dp_ref_clk_cntl_value;
168 uint32_t dp_ref_clk_cntl_src_sel_value;
169 uint32_t dp_ref_clk_khz = 600000;
170 uint32_t target_div = INVALID_DIVIDER;
171 struct display_clock_dce112 *disp_clk = FROM_DISPLAY_CLOCK(dc);
172
173 /* ASSERT DP Reference Clock source is from DFS*/
174 dp_ref_clk_cntl_value = dm_read_reg(dc->ctx,
175 mmDPREFCLK_CNTL);
176
177 dp_ref_clk_cntl_src_sel_value =
178 get_reg_field_value(
179 dp_ref_clk_cntl_value,
180 DPREFCLK_CNTL, DPREFCLK_SRC_SEL);
181
182 ASSERT(dp_ref_clk_cntl_src_sel_value == 0);
183
184 /* Read the mmDENTIST_DISPCLK_CNTL to get the currently
185 * programmed DID DENTIST_DPREFCLK_WDIVIDER*/
186 dispclk_cntl_value = dm_read_reg(dc->ctx,
187 mmDENTIST_DISPCLK_CNTL);
188
189 /* Convert DENTIST_DPREFCLK_WDIVIDERto actual divider*/
190 target_div = dal_divider_range_get_divider(
191 divider_ranges,
192 DIVIDER_RANGE_MAX,
193 get_reg_field_value(dispclk_cntl_value,
194 DENTIST_DISPCLK_CNTL,
195 DENTIST_DPREFCLK_WDIVIDER));
196
197 if (target_div != INVALID_DIVIDER) {
198 /* Calculate the current DFS clock, in kHz.*/
199 dp_ref_clk_khz = (DIVIDER_RANGE_SCALE_FACTOR
200 * disp_clk->dentist_vco_freq_khz) / target_div;
201 }
202
203 /* SW will adjust DP REF Clock average value for all purposes
204 * (DP DTO / DP Audio DTO and DP GTC)
205 if clock is spread for all cases:
206 -if SS enabled on DP Ref clock and HW de-spreading enabled with SW
207 calculations for DS_INCR/DS_MODULO (this is planned to be default case)
208 -if SS enabled on DP Ref clock and HW de-spreading enabled with HW
209 calculations (not planned to be used, but average clock should still
210 be valid)
211 -if SS enabled on DP Ref clock and HW de-spreading disabled
212 (should not be case with CIK) then SW should program all rates
213 generated according to average value (case as with previous ASICs)
214 */
215 if ((disp_clk->ss_on_gpu_pll) && (disp_clk->gpu_pll_ss_divider != 0)) {
216 struct fixed32_32 ss_percentage = dal_fixed32_32_div_int(
217 dal_fixed32_32_from_fraction(
218 disp_clk->gpu_pll_ss_percentage,
219 disp_clk->gpu_pll_ss_divider), 200);
220 struct fixed32_32 adj_dp_ref_clk_khz;
221
222 ss_percentage = dal_fixed32_32_sub(dal_fixed32_32_one,
223 ss_percentage);
224 adj_dp_ref_clk_khz =
225 dal_fixed32_32_mul_int(
226 ss_percentage,
227 dp_ref_clk_khz);
228 dp_ref_clk_khz = dal_fixed32_32_floor(adj_dp_ref_clk_khz);
229 }
230
231 return dp_ref_clk_khz;
232}
233
234void dispclk_dce112_destroy(struct display_clock **base)
235{
236 struct display_clock_dce112 *dc112;
237
238 dc112 = DCLCK112_FROM_BASE(*base);
239
240 dm_free(dc112);
241
242 *base = NULL;
243}
244
245uint32_t dispclk_dce112_get_validation_clock(struct display_clock *dc)
246{
247 uint32_t clk = 0;
248 struct display_clock_dce112 *disp_clk = DCLCK112_FROM_BASE(dc);
249
250 switch (disp_clk->max_clks_state) {
251 case CLOCKS_STATE_ULTRA_LOW:
252 clk = (disp_clk->max_clks_by_state + CLOCKS_STATE_ULTRA_LOW)->
253 display_clk_khz;
254
255 case CLOCKS_STATE_LOW:
256 clk = (disp_clk->max_clks_by_state + CLOCKS_STATE_LOW)->
257 display_clk_khz;
258 break;
259
260 case CLOCKS_STATE_NOMINAL:
261 clk = (disp_clk->max_clks_by_state + CLOCKS_STATE_NOMINAL)->
262 display_clk_khz;
263 break;
264
265 case CLOCKS_STATE_PERFORMANCE:
266 clk = (disp_clk->max_clks_by_state + CLOCKS_STATE_PERFORMANCE)->
267 display_clk_khz;
268 break;
269
270 case CLOCKS_STATE_INVALID:
271 default:
272 /*Invalid Clocks State*/
273 dm_logger_write(dc->ctx->logger, LOG_WARNING,
274 "Invalid clock state");
275 /* just return the display engine clock for
276 * lowest supported state*/
277 clk = (disp_clk->max_clks_by_state + CLOCKS_STATE_LOW)->
278 display_clk_khz;
279 break;
280 }
281 return clk;
282}
283
284static struct fixed32_32 get_deep_color_factor(struct min_clock_params *params)
285{
286 /* DeepColorFactor = IF (HDMI = True, bpp / 24, 1)*/
287 struct fixed32_32 deep_color_factor = dal_fixed32_32_from_int(1);
288
289 if (params->signal_type != SIGNAL_TYPE_HDMI_TYPE_A)
290 return deep_color_factor;
291
292 switch (params->deep_color_depth) {
293 case COLOR_DEPTH_101010:
294 /*deep color ratio for 30bpp is 30/24 = 1.25*/
295 deep_color_factor = dal_fixed32_32_from_fraction(30, 24);
296 break;
297
298 case COLOR_DEPTH_121212:
299 /* deep color ratio for 36bpp is 36/24 = 1.5*/
300 deep_color_factor = dal_fixed32_32_from_fraction(36, 24);
301 break;
302
303 case COLOR_DEPTH_161616:
304 /* deep color ratio for 48bpp is 48/24 = 2.0 */
305 deep_color_factor = dal_fixed32_32_from_fraction(48, 24);
306 break;
307 default:
308 break;
309 }
310 return deep_color_factor;
311}
312
313static struct fixed32_32 get_scaler_efficiency(
314 struct dc_context *ctx,
315 struct min_clock_params *params)
316{
317 struct fixed32_32 scaler_efficiency = dal_fixed32_32_from_int(3);
318
319 if (params->scaler_efficiency == V_SCALER_EFFICIENCY_LB18BPP) {
320 scaler_efficiency =
321 dal_fixed32_32_add(
322 dal_fixed32_32_from_fraction(35555, 10000),
323 dal_fixed32_32_from_fraction(
324 55556,
325 100000 * 10000));
326 } else if (params->scaler_efficiency == V_SCALER_EFFICIENCY_LB24BPP) {
327 scaler_efficiency =
328 dal_fixed32_32_add(
329 dal_fixed32_32_from_fraction(34285, 10000),
330 dal_fixed32_32_from_fraction(
331 71429,
332 100000 * 10000));
333 } else if (params->scaler_efficiency == V_SCALER_EFFICIENCY_LB30BPP)
334 scaler_efficiency = dal_fixed32_32_from_fraction(32, 10);
335
336 return scaler_efficiency;
337}
338
339static struct fixed32_32 get_lb_lines_in_per_line_out(
340 struct min_clock_params *params,
341 struct fixed32_32 v_scale_ratio)
342{
343 struct fixed32_32 two = dal_fixed32_32_from_int(2);
344 struct fixed32_32 four = dal_fixed32_32_from_int(4);
345 struct fixed32_32 f4_to_3 = dal_fixed32_32_from_fraction(4, 3);
346 struct fixed32_32 f6_to_4 = dal_fixed32_32_from_fraction(6, 4);
347
348 if (params->line_buffer_prefetch_enabled)
349 return dal_fixed32_32_max(v_scale_ratio, dal_fixed32_32_one);
350 else if (dal_fixed32_32_le(v_scale_ratio, dal_fixed32_32_one))
351 return dal_fixed32_32_one;
352 else if (dal_fixed32_32_le(v_scale_ratio, f4_to_3))
353 return f4_to_3;
354 else if (dal_fixed32_32_le(v_scale_ratio, f6_to_4))
355 return f6_to_4;
356 else if (dal_fixed32_32_le(v_scale_ratio, two))
357 return two;
358 else if (dal_fixed32_32_le(v_scale_ratio, dal_fixed32_32_from_int(3)))
359 return four;
360 else
361 return dal_fixed32_32_zero;
362}
363
364static uint32_t get_actual_required_display_clk(
365 struct display_clock_dce112 *disp_clk,
366 uint32_t target_clk_khz)
367{
368 uint32_t disp_clk_khz = target_clk_khz;
369 uint32_t div = INVALID_DIVIDER;
370 uint32_t did = INVALID_DID;
371 uint32_t scaled_vco =
372 disp_clk->dentist_vco_freq_khz * DIVIDER_RANGE_SCALE_FACTOR;
373
374 ASSERT_CRITICAL(!!disp_clk_khz);
375
376 if (disp_clk_khz)
377 div = scaled_vco / disp_clk_khz;
378
379 did = dal_divider_range_get_did(divider_ranges, DIVIDER_RANGE_MAX, div);
380
381 if (did != INVALID_DID) {
382 div = dal_divider_range_get_divider(
383 divider_ranges, DIVIDER_RANGE_MAX, did);
384
385 if ((div != INVALID_DIVIDER) &&
386 (did > DIVIDER_RANGE_01_BASE_DIVIDER_ID))
387 if (disp_clk_khz > (scaled_vco / div))
388 div = dal_divider_range_get_divider(
389 divider_ranges, DIVIDER_RANGE_MAX,
390 did - 1);
391
392 if (div != INVALID_DIVIDER)
393 disp_clk_khz = scaled_vco / div;
394
395 }
396 /* We need to add 10KHz to this value because the accuracy in VBIOS is
397 in 10KHz units. So we need to always round the last digit up in order
398 to reach the next div level.*/
399 return disp_clk_khz + 10;
400}
401
402static uint32_t calc_single_display_min_clks(
403 struct display_clock *base,
404 struct min_clock_params *params,
405 bool set_clk)
406{
407 struct fixed32_32 h_scale_ratio = dal_fixed32_32_one;
408 struct fixed32_32 v_scale_ratio = dal_fixed32_32_one;
409 uint32_t pix_clk_khz = 0;
410 uint32_t lb_source_width = 0;
411 struct fixed32_32 deep_color_factor;
412 struct fixed32_32 scaler_efficiency;
413 struct fixed32_32 v_filter_init;
414 uint32_t v_filter_init_trunc;
415 uint32_t num_lines_at_frame_start = 3;
416 struct fixed32_32 v_filter_init_ceil;
417 struct fixed32_32 lines_per_lines_out_at_frame_start;
418 struct fixed32_32 lb_lines_in_per_line_out; /* in middle of the frame*/
419 uint32_t src_wdth_rnd_to_chunks;
420 struct fixed32_32 scaling_coeff;
421 struct fixed32_32 h_blank_granularity_factor =
422 dal_fixed32_32_one;
423 struct fixed32_32 fx_disp_clk_mhz;
424 struct fixed32_32 line_time;
425 struct fixed32_32 disp_pipe_pix_throughput;
426 struct fixed32_32 fx_alt_disp_clk_mhz;
427 uint32_t disp_clk_khz;
428 uint32_t alt_disp_clk_khz;
429 struct display_clock_dce112 *disp_clk_110 = DCLCK112_FROM_BASE(base);
430 uint32_t max_clk_khz = dispclk_dce112_get_validation_clock(base);
431 bool panning_allowed = false; /* TODO: receive this value from AS */
432
433 if (params == NULL) {
434 dm_logger_write(base->ctx->logger, LOG_WARNING,
435 "Invalid input parameter in %s",
436 __func__);
437 return 0;
438 }
439
440 deep_color_factor = get_deep_color_factor(params);
441 scaler_efficiency = get_scaler_efficiency(base->ctx, params);
442 pix_clk_khz = params->requested_pixel_clock;
443 lb_source_width = params->source_view.width;
444
445 if (0 != params->dest_view.height && 0 != params->dest_view.width) {
446
447 h_scale_ratio = dal_fixed32_32_from_fraction(
448 params->source_view.width,
449 params->dest_view.width);
450 v_scale_ratio = dal_fixed32_32_from_fraction(
451 params->source_view.height,
452 params->dest_view.height);
453 } else {
454 dm_logger_write(base->ctx->logger, LOG_WARNING,
455 "Destination height or width is 0!\n");
456 }
457
458 v_filter_init =
459 dal_fixed32_32_add(
460 v_scale_ratio,
461 dal_fixed32_32_add_int(
462 dal_fixed32_32_div_int(
463 dal_fixed32_32_mul_int(
464 v_scale_ratio,
465 params->timing_info.INTERLACED),
466 2),
467 params->scaling_info.v_taps + 1));
468 v_filter_init = dal_fixed32_32_div_int(v_filter_init, 2);
469
470 v_filter_init_trunc = dal_fixed32_32_floor(v_filter_init);
471
472 v_filter_init_ceil = dal_fixed32_32_from_fraction(
473 v_filter_init_trunc, 2);
474 v_filter_init_ceil = dal_fixed32_32_from_int(
475 dal_fixed32_32_ceil(v_filter_init_ceil));
476 v_filter_init_ceil = dal_fixed32_32_mul_int(v_filter_init_ceil, 2);
477
478 lines_per_lines_out_at_frame_start =
479 dal_fixed32_32_div_int(v_filter_init_ceil,
480 num_lines_at_frame_start);
481 lb_lines_in_per_line_out =
482 get_lb_lines_in_per_line_out(params, v_scale_ratio);
483
484 if (panning_allowed)
485 src_wdth_rnd_to_chunks =
486 ((lb_source_width - 1) / 128) * 128 + 256;
487 else
488 src_wdth_rnd_to_chunks =
489 ((lb_source_width + 127) / 128) * 128;
490
491 scaling_coeff =
492 dal_fixed32_32_div(
493 dal_fixed32_32_from_int(params->scaling_info.v_taps),
494 scaler_efficiency);
495
496 if (dal_fixed32_32_le(h_scale_ratio, dal_fixed32_32_one))
497 scaling_coeff = dal_fixed32_32_max(
498 dal_fixed32_32_from_int(
499 dal_fixed32_32_ceil(
500 dal_fixed32_32_from_fraction(
501 params->scaling_info.h_taps,
502 4))),
503 dal_fixed32_32_max(
504 dal_fixed32_32_mul(
505 scaling_coeff,
506 h_scale_ratio),
507 dal_fixed32_32_one));
508
509 if (!params->line_buffer_prefetch_enabled &&
510 dal_fixed32_32_floor(lb_lines_in_per_line_out) != 2 &&
511 dal_fixed32_32_floor(lb_lines_in_per_line_out) != 4) {
512 uint32_t line_total_pixel =
513 params->timing_info.h_total + lb_source_width - 256;
514 h_blank_granularity_factor = dal_fixed32_32_div(
515 dal_fixed32_32_from_int(params->timing_info.h_total),
516 dal_fixed32_32_div(
517 dal_fixed32_32_from_fraction(
518 line_total_pixel, 2),
519 h_scale_ratio));
520 }
521
522 /* Calculate display clock with ramping. Ramping factor is 1.1*/
523 fx_disp_clk_mhz =
524 dal_fixed32_32_div_int(
525 dal_fixed32_32_mul_int(scaling_coeff, 11),
526 10);
527 line_time = dal_fixed32_32_from_fraction(
528 params->timing_info.h_total * 1000, pix_clk_khz);
529
530 disp_pipe_pix_throughput = dal_fixed32_32_mul(
531 lb_lines_in_per_line_out, h_blank_granularity_factor);
532 disp_pipe_pix_throughput = dal_fixed32_32_max(
533 disp_pipe_pix_throughput,
534 lines_per_lines_out_at_frame_start);
535 disp_pipe_pix_throughput = dal_fixed32_32_div(dal_fixed32_32_mul_int(
536 disp_pipe_pix_throughput, src_wdth_rnd_to_chunks),
537 line_time);
538
539 if (0 != params->timing_info.h_total) {
540 fx_disp_clk_mhz =
541 dal_fixed32_32_max(
542 dal_fixed32_32_div_int(
543 dal_fixed32_32_mul_int(
544 scaling_coeff, pix_clk_khz),
545 1000),
546 disp_pipe_pix_throughput);
547 fx_disp_clk_mhz =
548 dal_fixed32_32_mul(
549 fx_disp_clk_mhz,
550 dal_fixed32_32_from_fraction(11, 10));
551 }
552
553 fx_disp_clk_mhz = dal_fixed32_32_max(fx_disp_clk_mhz,
554 dal_fixed32_32_mul(deep_color_factor,
555 dal_fixed32_32_from_fraction(11, 10)));
556
557 /* Calculate display clock without ramping */
558 fx_alt_disp_clk_mhz = scaling_coeff;
559
560 if (0 != params->timing_info.h_total) {
561 fx_alt_disp_clk_mhz = dal_fixed32_32_max(
562 dal_fixed32_32_div_int(dal_fixed32_32_mul_int(
563 scaling_coeff, pix_clk_khz),
564 1000),
565 dal_fixed32_32_div_int(dal_fixed32_32_mul_int(
566 disp_pipe_pix_throughput, 105),
567 100));
568 }
569
570 if (set_clk && disp_clk_110->ss_on_gpu_pll &&
571 disp_clk_110->gpu_pll_ss_divider)
572 fx_alt_disp_clk_mhz = dal_fixed32_32_mul(fx_alt_disp_clk_mhz,
573 dal_fixed32_32_add_int(
574 dal_fixed32_32_div_int(
575 dal_fixed32_32_div_int(
576 dal_fixed32_32_from_fraction(
577 disp_clk_110->gpu_pll_ss_percentage,
578 disp_clk_110->gpu_pll_ss_divider), 100),
579 2),
580 1));
581
582 /* convert to integer */
583 disp_clk_khz = dal_fixed32_32_round(
584 dal_fixed32_32_mul_int(fx_disp_clk_mhz, 1000));
585 alt_disp_clk_khz = dal_fixed32_32_round(
586 dal_fixed32_32_mul_int(fx_alt_disp_clk_mhz, 1000));
587
588 if ((disp_clk_khz > max_clk_khz) && (alt_disp_clk_khz <= max_clk_khz))
589 disp_clk_khz = alt_disp_clk_khz;
590
591 if (set_clk) { /* only compensate clock if we are going to set it.*/
592 disp_clk_khz = get_actual_required_display_clk(
593 disp_clk_110, disp_clk_khz);
594 }
595
596 disp_clk_khz = disp_clk_khz > max_clk_khz ? max_clk_khz : disp_clk_khz;
597
598 return disp_clk_khz;
599}
600
601uint32_t dispclk_dce112_calculate_min_clock(
602 struct display_clock *base,
603 uint32_t path_num,
604 struct min_clock_params *params)
605{
606 uint32_t i;
607 uint32_t validation_clk_khz =
608 dispclk_dce112_get_validation_clock(base);
609 uint32_t min_clk_khz = validation_clk_khz;
610 uint32_t max_clk_khz = 0;
611 struct display_clock_dce112 *dc = DCLCK112_FROM_BASE(base);
612
613 if (dc->use_max_disp_clk)
614 return min_clk_khz;
615
616 if (params != NULL) {
617 uint32_t disp_clk_khz = 0;
618
619 for (i = 0; i < path_num; ++i) {
620
621 disp_clk_khz = calc_single_display_min_clks(
622 base, params, true);
623
624 /* update the max required clock found*/
625 if (disp_clk_khz > max_clk_khz)
626 max_clk_khz = disp_clk_khz;
627
628 params++;
629 }
630 }
631
632 min_clk_khz = max_clk_khz;
633
634 if (min_clk_khz > validation_clk_khz)
635 min_clk_khz = validation_clk_khz;
636 else if (min_clk_khz < base->min_display_clk_threshold_khz)
637 min_clk_khz = base->min_display_clk_threshold_khz;
638
639 if (dc->use_max_disp_clk)
640 min_clk_khz = dispclk_dce112_get_validation_clock(base);
641
642 return min_clk_khz;
643}
644
645static bool display_clock_integrated_info_construct(
646 struct display_clock_dce112 *disp_clk)
647{
648 struct integrated_info info;
649 uint32_t i;
650 struct display_clock *base = &disp_clk->disp_clk_base;
651
652 memset(&info, 0, sizeof(struct integrated_info));
653
654 disp_clk->dentist_vco_freq_khz = info.dentist_vco_freq;
655 if (disp_clk->dentist_vco_freq_khz == 0)
656 disp_clk->dentist_vco_freq_khz = 3600000;
657
658 disp_clk->crystal_freq_khz = 100000;
659
660 base->min_display_clk_threshold_khz =
661 disp_clk->dentist_vco_freq_khz / 64;
662
663 /*update the maximum display clock for each power state*/
664 for (i = 0; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
665 enum clocks_state clk_state = CLOCKS_STATE_INVALID;
666
667 switch (i) {
668 case 0:
669 clk_state = CLOCKS_STATE_ULTRA_LOW;
670 break;
671
672 case 1:
673 clk_state = CLOCKS_STATE_LOW;
674 break;
675
676 case 2:
677 clk_state = CLOCKS_STATE_NOMINAL;
678 break;
679
680 case 3:
681 clk_state = CLOCKS_STATE_PERFORMANCE;
682 break;
683
684 default:
685 clk_state = CLOCKS_STATE_INVALID;
686 break;
687 }
688
689 /*Do not allow bad VBIOS/SBIOS to override with invalid values,
690 * check for > 100MHz*/
691 if (info.disp_clk_voltage[i].max_supported_clk >= 100000) {
692 (disp_clk->max_clks_by_state + clk_state)->
693 display_clk_khz =
694 info.disp_clk_voltage[i].max_supported_clk;
695 }
696 }
697
698 return true;
699}
700
701static uint32_t get_clock(struct display_clock *dc)
702{
703 uint32_t disp_clock = dispclk_dce112_get_validation_clock(dc);
704 uint32_t target_div = INVALID_DIVIDER;
705 uint32_t addr = mmDENTIST_DISPCLK_CNTL;
706 uint32_t value = 0;
707 uint32_t field = 0;
708 struct display_clock_dce112 *disp_clk = DCLCK112_FROM_BASE(dc);
709
710 /* Read the mmDENTIST_DISPCLK_CNTL to get the currently programmed
711 DID DENTIST_DISPCLK_WDIVIDER.*/
712 value = dm_read_reg(dc->ctx, addr);
713 field = get_reg_field_value(
714 value, DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER);
715
716 /* Convert DENTIST_DISPCLK_WDIVIDER to actual divider*/
717 target_div = dal_divider_range_get_divider(
718 divider_ranges,
719 DIVIDER_RANGE_MAX,
720 field);
721
722 if (target_div != INVALID_DIVIDER)
723 /* Calculate the current DFS clock in KHz.
724 Should be okay up to 42.9 THz before overflowing.*/
725 disp_clock = (DIVIDER_RANGE_SCALE_FACTOR
726 * disp_clk->dentist_vco_freq_khz) / target_div;
727 return disp_clock;
728}
729
730enum clocks_state dispclk_dce112_get_required_clocks_state(
731 struct display_clock *dc,
732 struct state_dependent_clocks *req_clocks)
733{
734 int32_t i;
735 struct display_clock_dce112 *disp_clk = DCLCK112_FROM_BASE(dc);
736 enum clocks_state low_req_clk = disp_clk->max_clks_state;
737
738 if (!req_clocks) {
739 /* NULL pointer*/
740 dm_logger_write(dc->ctx->logger, LOG_WARNING,
741 "%s: Invalid parameter",
742 __func__);
743 return CLOCKS_STATE_INVALID;
744 }
745
746 /* Iterate from highest supported to lowest valid state, and update
747 * lowest RequiredState with the lowest state that satisfies
748 * all required clocks
749 */
750 for (i = disp_clk->max_clks_state; i >= CLOCKS_STATE_ULTRA_LOW; --i) {
751 if ((req_clocks->display_clk_khz <=
752 (disp_clk->max_clks_by_state + i)->
753 display_clk_khz) &&
754 (req_clocks->pixel_clk_khz <=
755 (disp_clk->max_clks_by_state + i)->
756 pixel_clk_khz))
757 low_req_clk = i;
758 }
759 return low_req_clk;
760}
761
762void dispclk_dce112_set_clock(
763 struct display_clock *base,
764 uint32_t requested_clk_khz)
765{
766 struct bp_set_dce_clock_parameters dce_clk_params;
767 struct display_clock_dce112 *dc = DCLCK112_FROM_BASE(base);
768 struct dc_bios *bp = base->ctx->dc_bios;
769
770 /* Prepare to program display clock*/
771 memset(&dce_clk_params, 0, sizeof(dce_clk_params));
772
773 /* Make sure requested clock isn't lower than minimum threshold*/
774 if (requested_clk_khz > 0)
775 requested_clk_khz = dm_max(requested_clk_khz,
776 base->min_display_clk_threshold_khz);
777
778 dce_clk_params.target_clock_frequency = requested_clk_khz;
779 dce_clk_params.pll_id = dc->disp_clk_base.id;
780 dce_clk_params.clock_type = DCECLOCK_TYPE_DISPLAY_CLOCK;
781
782 bp->funcs->set_dce_clock(bp, &dce_clk_params);
783
784 /* from power down, we need mark the clock state as ClocksStateNominal
785 * from HWReset, so when resume we will call pplib voltage regulator.*/
786 if (requested_clk_khz == 0)
787 base->cur_min_clks_state = CLOCKS_STATE_NOMINAL;
788
789 /*Program DP ref Clock*/
790 /*VBIOS will determine DPREFCLK frequency, so we don't set it*/
791 dce_clk_params.target_clock_frequency = 0;
792 dce_clk_params.clock_type = DCECLOCK_TYPE_DPREFCLK;
793 dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK =
794 (dce_clk_params.pll_id ==
795 CLOCK_SOURCE_COMBO_DISPLAY_PLL0);
796
797 bp->funcs->set_dce_clock(bp, &dce_clk_params);
798}
799
800void dispclk_dce112_set_clock_state(
801 struct display_clock *dc,
802 struct display_clock_state clk_state)
803{
804 struct display_clock_dce112 *disp_clk = DCLCK112_FROM_BASE(dc);
805
806 disp_clk->clock_state = clk_state;
807}
808
809struct display_clock_state dispclk_dce112_get_clock_state(
810 struct display_clock *dc)
811{
812 struct display_clock_dce112 *disp_clk = DCLCK112_FROM_BASE(dc);
813
814 return disp_clk->clock_state;
815}
816
817uint32_t dispclk_dce112_get_dfs_bypass_threshold(
818 struct display_clock *dc)
819{
820 return dce112_DFS_BYPASS_THRESHOLD_KHZ;
821}
822
823static const struct display_clock_funcs funcs = {
824 .destroy = dispclk_dce112_destroy,
825 .calculate_min_clock = dispclk_dce112_calculate_min_clock,
826 .get_clock = get_clock,
827 .get_clock_state = dispclk_dce112_get_clock_state,
828 .get_dfs_bypass_threshold = dispclk_dce112_get_dfs_bypass_threshold,
829 .get_dp_ref_clk_frequency = get_dp_ref_clk_frequency,
830 .get_min_clocks_state = dispclk_dce112_get_min_clocks_state,
831 .get_required_clocks_state = dispclk_dce112_get_required_clocks_state,
832 .get_validation_clock = dispclk_dce112_get_validation_clock,
833 .set_clock = dispclk_dce112_set_clock,
834 .set_clock_state = dispclk_dce112_set_clock_state,
835 .set_dp_ref_clock_source = NULL,
836 .set_min_clocks_state = dispclk_dce112_set_min_clocks_state,
837 .store_max_clocks_state = dispclk_dce112_store_max_clocks_state,
838 .validate = NULL,
839};
840
841bool dal_display_clock_dce112_construct(
842 struct display_clock_dce112 *dc112,
843 struct dc_context *ctx)
844{
845 struct display_clock *dc_base = &dc112->disp_clk_base;
846
847 /*if (NULL == as)
848 return false;*/
849
850 if (!dal_display_clock_construct_base(dc_base, ctx))
851 return false;
852
853 dc_base->funcs = &funcs;
854
855 dc112->dfs_bypass_disp_clk = 0;
856
857 if (!display_clock_integrated_info_construct(dc112))
858 dm_logger_write(dc_base->ctx->logger, LOG_WARNING,
859 "Cannot obtain VBIOS integrated info\n");
860
861 dc112->gpu_pll_ss_percentage = 0;
862 dc112->gpu_pll_ss_divider = 1000;
863 dc112->ss_on_gpu_pll = false;
864
865 dc_base->id = CLOCK_SOURCE_ID_DFS;
866/* Initially set max clocks state to nominal. This should be updated by
867 * via a pplib call to DAL IRI eventually calling a
868 * DisplayEngineClock_dce112::StoreMaxClocksState(). This call will come in
869 * on PPLIB init. This is from DCE5x. in case HW wants to use mixed method.*/
870 dc112->max_clks_state = CLOCKS_STATE_NOMINAL;
871
872 dc112->disp_clk_base.min_display_clk_threshold_khz =
873 dc112->crystal_freq_khz;
874
875 if (dc112->disp_clk_base.min_display_clk_threshold_khz <
876 (dc112->dentist_vco_freq_khz / 62))
877 dc112->disp_clk_base.min_display_clk_threshold_khz =
878 (dc112->dentist_vco_freq_khz / 62);
879
880 dal_divider_range_construct(
881 &divider_ranges[DIVIDER_RANGE_01],
882 DIVIDER_RANGE_01_START,
883 DIVIDER_RANGE_01_STEP_SIZE,
884 DIVIDER_RANGE_01_BASE_DIVIDER_ID,
885 DIVIDER_RANGE_02_BASE_DIVIDER_ID);
886 dal_divider_range_construct(
887 &divider_ranges[DIVIDER_RANGE_02],
888 DIVIDER_RANGE_02_START,
889 DIVIDER_RANGE_02_STEP_SIZE,
890 DIVIDER_RANGE_02_BASE_DIVIDER_ID,
891 DIVIDER_RANGE_03_BASE_DIVIDER_ID);
892 dal_divider_range_construct(
893 &divider_ranges[DIVIDER_RANGE_03],
894 DIVIDER_RANGE_03_START,
895 DIVIDER_RANGE_03_STEP_SIZE,
896 DIVIDER_RANGE_03_BASE_DIVIDER_ID,
897 DIVIDER_RANGE_MAX_DIVIDER_ID);
898
899 {
900 uint32_t ss_info_num =
901 ctx->dc_bios->funcs->
902 get_ss_entry_number(ctx->dc_bios, AS_SIGNAL_TYPE_GPU_PLL);
903
904 if (ss_info_num) {
905 struct spread_spectrum_info info;
906 bool result;
907
908 memset(&info, 0, sizeof(info));
909
910 result =
911 (BP_RESULT_OK == ctx->dc_bios->funcs->
912 get_spread_spectrum_info(ctx->dc_bios,
913 AS_SIGNAL_TYPE_GPU_PLL, 0, &info)) ? true : false;
914
915
916 /* Based on VBIOS, VBIOS will keep entry for GPU PLL SS
917 * even if SS not enabled and in that case
918 * SSInfo.spreadSpectrumPercentage !=0 would be sign
919 * that SS is enabled
920 */
921 if (result && info.spread_spectrum_percentage != 0) {
922 dc112->ss_on_gpu_pll = true;
923 dc112->gpu_pll_ss_divider =
924 info.spread_percentage_divider;
925
926 if (info.type.CENTER_MODE == 0) {
927 /* Currently for DP Reference clock we
928 * need only SS percentage for
929 * downspread */
930 dc112->gpu_pll_ss_percentage =
931 info.spread_spectrum_percentage;
932 }
933 }
934
935 }
936 }
937
938 dc112->use_max_disp_clk = true;
939 dc112->max_clks_by_state = max_clks_by_state;
940
941 return true;
942}
943
944/*****************************************************************************
945 * public functions
946 *****************************************************************************/
947
948struct display_clock *dal_display_clock_dce112_create(
949 struct dc_context *ctx)
950{
951 struct display_clock_dce112 *dc112;
952
953 dc112 = dm_alloc(sizeof(struct display_clock_dce112));
954
955 if (dc112 == NULL)
956 return NULL;
957
958 if (dal_display_clock_dce112_construct(dc112, ctx))
959 return &dc112->disp_clk_base;
960
961 dm_free(dc112);
962
963 return NULL;
964}
diff --git a/drivers/gpu/drm/amd/display/dc/gpu/dce112/display_clock_dce112.h b/drivers/gpu/drm/amd/display/dc/gpu/dce112/display_clock_dce112.h
new file mode 100644
index 000000000000..937e17929b7c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpu/dce112/display_clock_dce112.h
@@ -0,0 +1,114 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25#ifndef __DAL_DISPLAY_CLOCK_DCE112_H__
26#define __DAL_DISPLAY_CLOCK_DCE112_H__
27
28#include "gpu/display_clock.h"
29
30struct display_clock_dce112 {
31 struct display_clock disp_clk_base;
32 /* Max display block clocks state*/
33 enum clocks_state max_clks_state;
34 bool use_max_disp_clk;
35 uint32_t crystal_freq_khz;
36 uint32_t dentist_vco_freq_khz;
37 /* Cache the status of DFS-bypass feature*/
38 bool dfs_bypass_enabled;
39 /* GPU PLL SS percentage (if down-spread enabled) */
40 uint32_t gpu_pll_ss_percentage;
41 /* GPU PLL SS percentage Divider (100 or 1000) */
42 uint32_t gpu_pll_ss_divider;
43 /* Flag for Enabled SS on GPU PLL */
44 bool ss_on_gpu_pll;
45 /* Cache the display clock returned by VBIOS if DFS-bypass is enabled.
46 * This is basically "Crystal Frequency In KHz" (XTALIN) frequency */
47 uint32_t dfs_bypass_disp_clk;
48 struct display_clock_state clock_state;
49 struct state_dependent_clocks *max_clks_by_state;
50
51};
52
53#define DCLCK112_FROM_BASE(dc_base) \
54 container_of(dc_base, struct display_clock_dce112, disp_clk_base)
55
56/* Array identifiers and count for the divider ranges.*/
57enum divider_range_count {
58 DIVIDER_RANGE_01 = 0,
59 DIVIDER_RANGE_02,
60 DIVIDER_RANGE_03,
61 DIVIDER_RANGE_MAX /* == 3*/
62};
63
64/* Starting point for each divider range.*/
65enum divider_range_start {
66 DIVIDER_RANGE_01_START = 200, /* 2.00*/
67 DIVIDER_RANGE_02_START = 1600, /* 16.00*/
68 DIVIDER_RANGE_03_START = 3200, /* 32.00*/
69 DIVIDER_RANGE_SCALE_FACTOR = 100 /* Results are scaled up by 100.*/
70};
71
72bool dal_display_clock_dce112_construct(
73 struct display_clock_dce112 *dc112,
74 struct dc_context *ctx);
75
76void dispclk_dce112_destroy(struct display_clock **base);
77
78uint32_t dispclk_dce112_calculate_min_clock(
79 struct display_clock *base,
80 uint32_t path_num,
81 struct min_clock_params *params);
82
83struct display_clock_state dispclk_dce112_get_clock_state(
84 struct display_clock *dc);
85
86uint32_t dispclk_dce112_get_dfs_bypass_threshold(
87 struct display_clock *dc);
88
89enum clocks_state dispclk_dce112_get_min_clocks_state(
90 struct display_clock *base);
91
92enum clocks_state dispclk_dce112_get_required_clocks_state(
93 struct display_clock *dc,
94 struct state_dependent_clocks *req_clocks);
95
96uint32_t dispclk_dce112_get_validation_clock(struct display_clock *dc);
97
98void dispclk_dce112_set_clock(
99 struct display_clock *base,
100 uint32_t requested_clk_khz);
101
102void dispclk_dce112_set_clock_state(
103 struct display_clock *dc,
104 struct display_clock_state clk_state);
105
106bool dispclk_dce112_set_min_clocks_state(
107 struct display_clock *base,
108 enum clocks_state clocks_state);
109
110void dispclk_dce112_store_max_clocks_state(
111 struct display_clock *base,
112 enum clocks_state max_clocks_state);
113
114#endif /* __DAL_DISPLAY_CLOCK_DCE112_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/gpu/dce80/display_clock_dce80.c b/drivers/gpu/drm/amd/display/dc/gpu/dce80/display_clock_dce80.c
new file mode 100644
index 000000000000..eedcfd6232fc
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpu/dce80/display_clock_dce80.c
@@ -0,0 +1,934 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28#include "dce/dce_8_0_d.h"
29#include "dce/dce_8_0_sh_mask.h"
30
31#include "include/bios_parser_interface.h"
32#include "include/fixed32_32.h"
33#include "include/logger_interface.h"
34
35#include "../divider_range.h"
36#include "display_clock_dce80.h"
37#include "dc.h"
38
39#define DCE80_DFS_BYPASS_THRESHOLD_KHZ 100000
40
41/* Max clock values for each state indexed by "enum clocks_state": */
42static struct state_dependent_clocks max_clks_by_state[] = {
43/* ClocksStateInvalid - should not be used */
44{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
45/* ClocksStateUltraLow - not expected to be used for DCE 8.0 */
46{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
47/* ClocksStateLow */
48{ .display_clk_khz = 352000, .pixel_clk_khz = 330000},
49/* ClocksStateNominal */
50{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 },
51/* ClocksStatePerformance */
52{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 } };
53
54/* Starting point for each divider range.*/
55enum divider_range_start {
56 DIVIDER_RANGE_01_START = 200, /* 2.00*/
57 DIVIDER_RANGE_02_START = 1600, /* 16.00*/
58 DIVIDER_RANGE_03_START = 3200, /* 32.00*/
59 DIVIDER_RANGE_SCALE_FACTOR = 100 /* Results are scaled up by 100.*/
60};
61
62/* Ranges for divider identifiers (Divider ID or DID)
63 mmDENTIST_DISPCLK_CNTL.DENTIST_DISPCLK_WDIVIDER*/
64enum divider_id_register_setting {
65 DIVIDER_RANGE_01_BASE_DIVIDER_ID = 0X08,
66 DIVIDER_RANGE_02_BASE_DIVIDER_ID = 0X40,
67 DIVIDER_RANGE_03_BASE_DIVIDER_ID = 0X60,
68 DIVIDER_RANGE_MAX_DIVIDER_ID = 0X80
69};
70
71/* Step size between each divider within a range.
72 Incrementing the DENTIST_DISPCLK_WDIVIDER by one
73 will increment the divider by this much.*/
74enum divider_range_step_size {
75 DIVIDER_RANGE_01_STEP_SIZE = 25, /* 0.25*/
76 DIVIDER_RANGE_02_STEP_SIZE = 50, /* 0.50*/
77 DIVIDER_RANGE_03_STEP_SIZE = 100 /* 1.00 */
78};
79
80/* Array identifiers and count for the divider ranges.*/
81enum divider_range_count {
82 DIVIDER_RANGE_01 = 0,
83 DIVIDER_RANGE_02,
84 DIVIDER_RANGE_03,
85 DIVIDER_RANGE_MAX /* == 3*/
86};
87
88static struct divider_range divider_ranges[DIVIDER_RANGE_MAX];
89
90#define FROM_DISPLAY_CLOCK(base) \
91 container_of(base, struct display_clock_dce80, disp_clk)
92
93static struct fixed32_32 get_deep_color_factor(struct min_clock_params *params)
94{
95 /* DeepColorFactor = IF (HDMI = True, bpp / 24, 1)*/
96 struct fixed32_32 deep_color_factor = dal_fixed32_32_from_int(1);
97
98 if (params->signal_type != SIGNAL_TYPE_HDMI_TYPE_A)
99 return deep_color_factor;
100
101 switch (params->deep_color_depth) {
102 case COLOR_DEPTH_101010:
103 /*deep color ratio for 30bpp is 30/24 = 1.25*/
104 deep_color_factor = dal_fixed32_32_from_fraction(30, 24);
105 break;
106
107 case COLOR_DEPTH_121212:
108 /* deep color ratio for 36bpp is 36/24 = 1.5*/
109 deep_color_factor = dal_fixed32_32_from_fraction(36, 24);
110 break;
111
112 case COLOR_DEPTH_161616:
113 /* deep color ratio for 48bpp is 48/24 = 2.0 */
114 deep_color_factor = dal_fixed32_32_from_fraction(48, 24);
115 break;
116 default:
117 break;
118 }
119 return deep_color_factor;
120}
121
122static uint32_t get_scaler_efficiency(struct min_clock_params *params)
123{
124 uint32_t scaler_efficiency = 3;
125
126 switch (params->scaler_efficiency) {
127 case V_SCALER_EFFICIENCY_LB18BPP:
128 case V_SCALER_EFFICIENCY_LB24BPP:
129 scaler_efficiency = 4;
130 break;
131
132 case V_SCALER_EFFICIENCY_LB30BPP:
133 case V_SCALER_EFFICIENCY_LB36BPP:
134 scaler_efficiency = 3;
135 break;
136
137 default:
138 break;
139 }
140
141 return scaler_efficiency;
142}
143
144static uint32_t get_actual_required_display_clk(
145 struct display_clock_dce80 *disp_clk,
146 uint32_t target_clk_khz)
147{
148 uint32_t disp_clk_khz = target_clk_khz;
149 uint32_t div = INVALID_DIVIDER;
150 uint32_t did = INVALID_DID;
151 uint32_t scaled_vco =
152 disp_clk->dentist_vco_freq_khz * DIVIDER_RANGE_SCALE_FACTOR;
153
154 ASSERT(disp_clk_khz);
155
156 if (disp_clk_khz)
157 div = scaled_vco / disp_clk_khz;
158
159 did = dal_divider_range_get_did(divider_ranges, DIVIDER_RANGE_MAX, div);
160
161 if (did != INVALID_DID) {
162 div = dal_divider_range_get_divider(
163 divider_ranges, DIVIDER_RANGE_MAX, did);
164
165 if ((div != INVALID_DIVIDER) &&
166 (did > DIVIDER_RANGE_01_BASE_DIVIDER_ID))
167 if (disp_clk_khz > (scaled_vco / div))
168 div = dal_divider_range_get_divider(
169 divider_ranges, DIVIDER_RANGE_MAX,
170 did - 1);
171
172 if (div != INVALID_DIVIDER)
173 disp_clk_khz = scaled_vco / div;
174
175 }
176 /* We need to add 10KHz to this value because the accuracy in VBIOS is
177 in 10KHz units. So we need to always round the last digit up in order
178 to reach the next div level.*/
179 return disp_clk_khz + 10;
180}
181
182static uint32_t get_validation_clock(struct display_clock *dc)
183{
184 uint32_t clk = 0;
185 struct display_clock_dce80 *disp_clk = FROM_DISPLAY_CLOCK(dc);
186
187 switch (disp_clk->max_clks_state) {
188 case CLOCKS_STATE_ULTRA_LOW:
189 /*Currently not supported, it has 0 in table entry*/
190 case CLOCKS_STATE_LOW:
191 clk = max_clks_by_state[CLOCKS_STATE_LOW].
192 display_clk_khz;
193 break;
194
195 case CLOCKS_STATE_NOMINAL:
196 clk = max_clks_by_state[CLOCKS_STATE_NOMINAL].
197 display_clk_khz;
198 break;
199
200 case CLOCKS_STATE_PERFORMANCE:
201 clk = max_clks_by_state[CLOCKS_STATE_PERFORMANCE].
202 display_clk_khz;
203 break;
204
205 case CLOCKS_STATE_INVALID:
206 default:
207 /*Invalid Clocks State*/
208 BREAK_TO_DEBUGGER();
209 /* just return the display engine clock for
210 * lowest supported state*/
211 clk = max_clks_by_state[CLOCKS_STATE_LOW].
212 display_clk_khz;
213 break;
214 }
215 return clk;
216}
217
218static uint32_t calc_single_display_min_clks(
219 struct display_clock *base,
220 struct min_clock_params *params,
221 bool set_clk)
222{
223 struct fixed32_32 h_scale = dal_fixed32_32_from_int(1);
224 struct fixed32_32 v_scale = dal_fixed32_32_from_int(1);
225 uint32_t pix_clk_khz = params->requested_pixel_clock;
226 uint32_t line_total = params->timing_info.h_total;
227 uint32_t max_clk_khz = get_validation_clock(base);
228 struct fixed32_32 deep_color_factor = get_deep_color_factor(params);
229 uint32_t scaler_efficiency = get_scaler_efficiency(params);
230 struct fixed32_32 v_filter_init;
231 uint32_t v_filter_init_trunc;
232 struct fixed32_32 v_filter_init_ceil;
233 struct fixed32_32 src_lines_per_dst_line;
234 uint32_t src_wdth_rnd_to_chunks;
235 struct fixed32_32 scaling_coeff;
236 struct fixed32_32 fx_disp_clk_khz;
237 struct fixed32_32 fx_alt_disp_clk_khz;
238 uint32_t disp_clk_khz;
239 uint32_t alt_disp_clk_khz;
240 struct display_clock_dce80 *dc = FROM_DISPLAY_CLOCK(base);
241
242 if (0 != params->dest_view.height && 0 != params->dest_view.width) {
243
244 h_scale = dal_fixed32_32_from_fraction(
245 params->source_view.width,
246 params->dest_view.width);
247 v_scale = dal_fixed32_32_from_fraction(
248 params->source_view.height,
249 params->dest_view.height);
250 }
251
252 v_filter_init = dal_fixed32_32_from_fraction(
253 params->scaling_info.v_taps, 2u);
254 v_filter_init = dal_fixed32_32_add(v_filter_init,
255 dal_fixed32_32_div_int(v_scale, 2));
256 v_filter_init = dal_fixed32_32_add(v_filter_init,
257 dal_fixed32_32_from_fraction(15, 10));
258
259 v_filter_init_trunc = dal_fixed32_32_floor(v_filter_init);
260
261 v_filter_init_ceil = dal_fixed32_32_from_fraction(
262 v_filter_init_trunc, 2);
263 v_filter_init_ceil = dal_fixed32_32_from_int(
264 dal_fixed32_32_ceil(v_filter_init_ceil));
265 v_filter_init_ceil = dal_fixed32_32_mul_int(v_filter_init_ceil, 2);
266 v_filter_init_ceil = dal_fixed32_32_div_int(v_filter_init_ceil, 3);
267 v_filter_init_ceil = dal_fixed32_32_from_int(
268 dal_fixed32_32_ceil(v_filter_init_ceil));
269
270 src_lines_per_dst_line = dal_fixed32_32_max(
271 dal_fixed32_32_from_int(dal_fixed32_32_ceil(v_scale)),
272 v_filter_init_ceil);
273
274 src_wdth_rnd_to_chunks =
275 ((params->source_view.width - 1) / 128) * 128 + 256;
276
277 scaling_coeff = dal_fixed32_32_max(
278 dal_fixed32_32_from_fraction(params->scaling_info.h_taps, 4),
279 dal_fixed32_32_mul(
280 dal_fixed32_32_from_fraction(
281 params->scaling_info.v_taps,
282 scaler_efficiency),
283 h_scale));
284
285 scaling_coeff = dal_fixed32_32_max(scaling_coeff, h_scale);
286
287 fx_disp_clk_khz = dal_fixed32_32_mul(
288 scaling_coeff, dal_fixed32_32_from_fraction(11, 10));
289 if (0 != line_total) {
290 struct fixed32_32 d_clk = dal_fixed32_32_mul_int(
291 src_lines_per_dst_line, src_wdth_rnd_to_chunks);
292 d_clk = dal_fixed32_32_div_int(d_clk, line_total);
293 d_clk = dal_fixed32_32_mul(d_clk,
294 dal_fixed32_32_from_fraction(11, 10));
295 fx_disp_clk_khz = dal_fixed32_32_max(fx_disp_clk_khz, d_clk);
296 }
297
298 fx_disp_clk_khz = dal_fixed32_32_max(fx_disp_clk_khz,
299 dal_fixed32_32_mul(deep_color_factor,
300 dal_fixed32_32_from_fraction(11, 10)));
301
302 fx_disp_clk_khz = dal_fixed32_32_mul_int(fx_disp_clk_khz, pix_clk_khz);
303 fx_disp_clk_khz = dal_fixed32_32_mul(fx_disp_clk_khz,
304 dal_fixed32_32_from_fraction(1005, 1000));
305
306 fx_alt_disp_clk_khz = scaling_coeff;
307
308 if (0 != line_total) {
309 struct fixed32_32 d_clk = dal_fixed32_32_mul_int(
310 src_lines_per_dst_line, src_wdth_rnd_to_chunks);
311 d_clk = dal_fixed32_32_div_int(d_clk, line_total);
312 d_clk = dal_fixed32_32_mul(d_clk,
313 dal_fixed32_32_from_fraction(105, 100));
314 fx_alt_disp_clk_khz = dal_fixed32_32_max(
315 fx_alt_disp_clk_khz, d_clk);
316 }
317 fx_alt_disp_clk_khz = dal_fixed32_32_max(
318 fx_alt_disp_clk_khz, fx_alt_disp_clk_khz);
319
320 fx_alt_disp_clk_khz = dal_fixed32_32_mul_int(
321 fx_alt_disp_clk_khz, pix_clk_khz);
322
323 /* convert to integer*/
324 disp_clk_khz = dal_fixed32_32_floor(fx_disp_clk_khz);
325 alt_disp_clk_khz = dal_fixed32_32_floor(fx_alt_disp_clk_khz);
326
327 if (set_clk) { /* only compensate clock if we are going to set it.*/
328 disp_clk_khz = get_actual_required_display_clk(
329 dc, disp_clk_khz);
330 alt_disp_clk_khz = get_actual_required_display_clk(
331 dc, alt_disp_clk_khz);
332 }
333
334 if ((disp_clk_khz > max_clk_khz) && (alt_disp_clk_khz <= max_clk_khz))
335 disp_clk_khz = alt_disp_clk_khz;
336
337 return disp_clk_khz;
338
339}
340
341static uint32_t calc_cursor_bw_for_min_clks(struct min_clock_params *params)
342{
343
344 struct fixed32_32 v_scale = dal_fixed32_32_from_int(1);
345 struct fixed32_32 v_filter_ceiling;
346 struct fixed32_32 src_lines_per_dst_line;
347 struct fixed32_32 cursor_bw;
348
349 /* DCE8 Mode Support and Mode Set Architecture Specification Rev 1.3
350 6.3.3 Cursor data Throughput requirement on DISPCLK
351 The MCIF to DCP cursor data return throughput is one pixel per DISPCLK
352 shared among the display heads.
353 If (Total Cursor Bandwidth in pixels for All heads> DISPCLK)
354 The mode is not supported
355 Cursor Bandwidth in Pixels = Cursor Width *
356 (SourceLinesPerDestinationLine / Line Time)
357 Assuming that Cursor Width = 128
358 */
359 /*In the hardware doc they mention an Interlace Factor
360 It is not used here because we have already used it when
361 calculating destination view*/
362 if (0 != params->dest_view.height)
363 v_scale = dal_fixed32_32_from_fraction(
364 params->source_view.height,
365 params->dest_view.height);
366
367 {
368 /*Do: Vertical Filter Init = 0.5 + VTAPS/2 + VSR/2 * Interlace Factor*/
369 /*Interlace Factor is included in verticalScaleRatio*/
370 struct fixed32_32 v_filter = dal_fixed32_32_add(
371 dal_fixed32_32_from_fraction(params->scaling_info.v_taps, 2),
372 dal_fixed32_32_div_int(v_scale, 2));
373 /*Do : Ceiling (Vertical Filter Init, 2)/3 )*/
374 v_filter_ceiling = dal_fixed32_32_div_int(v_filter, 2);
375 v_filter_ceiling = dal_fixed32_32_mul_int(
376 dal_fixed32_32_from_int(dal_fixed32_32_ceil(v_filter_ceiling)),
377 2);
378 v_filter_ceiling = dal_fixed32_32_div_int(v_filter_ceiling, 3);
379 }
380 /*Do : MAX( CeilCeiling (VSR), Ceiling (Vertical Filter Init, 2)/3 )*/
381 /*Do : SourceLinesPerDestinationLine =
382 * MAX( Ceiling (VSR), Ceiling (Vertical Filter Init, 2)/3 )*/
383 src_lines_per_dst_line = dal_fixed32_32_max(v_scale, v_filter_ceiling);
384
385 if ((params->requested_pixel_clock != 0) &&
386 (params->timing_info.h_total != 0)) {
387 /* pixelClock is in units of KHz. Calc lineTime in us*/
388 struct fixed32_32 inv_line_time = dal_fixed32_32_from_fraction(
389 params->requested_pixel_clock,
390 params->timing_info.h_total);
391 cursor_bw = dal_fixed32_32_mul(
392 dal_fixed32_32_mul_int(inv_line_time, 128),
393 src_lines_per_dst_line);
394 }
395
396 /* convert to integer*/
397 return dal_fixed32_32_floor(cursor_bw);
398}
399
400static bool validate(
401 struct display_clock *dc,
402 struct min_clock_params *params)
403{
404 uint32_t max_clk_khz = get_validation_clock(dc);
405 uint32_t req_clk_khz;
406
407 if (params == NULL)
408 return false;
409
410 req_clk_khz = calc_single_display_min_clks(dc, params, false);
411
412 return (req_clk_khz <= max_clk_khz);
413}
414
415static uint32_t calculate_min_clock(
416 struct display_clock *dc,
417 uint32_t path_num,
418 struct min_clock_params *params)
419{
420 uint32_t i;
421 uint32_t validation_clk_khz = get_validation_clock(dc);
422 uint32_t min_clk_khz = validation_clk_khz;
423 uint32_t max_clk_khz = 0;
424 uint32_t total_cursor_bw = 0;
425 struct display_clock_dce80 *disp_clk = FROM_DISPLAY_CLOCK(dc);
426
427 if (disp_clk->use_max_disp_clk)
428 return min_clk_khz;
429
430 if (params != NULL) {
431 uint32_t disp_clk_khz = 0;
432
433 for (i = 0; i < path_num; ++i) {
434 disp_clk_khz = calc_single_display_min_clks(
435 dc, params, true);
436
437 /* update the max required clock found*/
438 if (disp_clk_khz > max_clk_khz)
439 max_clk_khz = disp_clk_khz;
440
441 disp_clk_khz = calc_cursor_bw_for_min_clks(params);
442
443 total_cursor_bw += disp_clk_khz;
444
445 params++;
446
447 }
448 }
449
450 max_clk_khz = (total_cursor_bw > max_clk_khz) ? total_cursor_bw :
451 max_clk_khz;
452
453 min_clk_khz = max_clk_khz;
454
455 /*"Cursor data Throughput requirement on DISPCLK is now a factor,
456 * need to change the code */
457 ASSERT(total_cursor_bw < validation_clk_khz);
458
459 if (min_clk_khz > validation_clk_khz)
460 min_clk_khz = validation_clk_khz;
461 else if (min_clk_khz < dc->min_display_clk_threshold_khz)
462 min_clk_khz = dc->min_display_clk_threshold_khz;
463
464 return min_clk_khz;
465}
466
467static void set_clock(
468 struct display_clock *dc,
469 uint32_t requested_clk_khz)
470{
471 struct bp_pixel_clock_parameters pxl_clk_params;
472 struct display_clock_dce80 *disp_clk = FROM_DISPLAY_CLOCK(dc);
473 struct dc_bios *bp = dc->ctx->dc_bios;
474
475 /* Prepare to program display clock*/
476 memset(&pxl_clk_params, 0, sizeof(pxl_clk_params));
477
478 pxl_clk_params.target_pixel_clock = requested_clk_khz;
479 pxl_clk_params.pll_id = dc->id;
480
481 bp->funcs->program_display_engine_pll(bp, &pxl_clk_params);
482
483 if (disp_clk->dfs_bypass_enabled) {
484
485 /* Cache the fixed display clock*/
486 disp_clk->dfs_bypass_disp_clk =
487 pxl_clk_params.dfs_bypass_display_clock;
488 }
489
490 /* from power down, we need mark the clock state as ClocksStateNominal
491 * from HWReset, so when resume we will call pplib voltage regulator.*/
492 if (requested_clk_khz == 0)
493 disp_clk->cur_min_clks_state = CLOCKS_STATE_NOMINAL;
494}
495
496static uint32_t get_clock(struct display_clock *dc)
497{
498 uint32_t disp_clock = get_validation_clock(dc);
499 uint32_t target_div = INVALID_DIVIDER;
500 uint32_t addr = mmDENTIST_DISPCLK_CNTL;
501 uint32_t value = 0;
502 uint32_t field = 0;
503 struct display_clock_dce80 *disp_clk = FROM_DISPLAY_CLOCK(dc);
504
505 if (disp_clk->dfs_bypass_enabled && disp_clk->dfs_bypass_disp_clk)
506 return disp_clk->dfs_bypass_disp_clk;
507
508 /* Read the mmDENTIST_DISPCLK_CNTL to get the currently programmed
509 DID DENTIST_DISPCLK_WDIVIDER.*/
510 value = dm_read_reg(dc->ctx, addr);
511 field = get_reg_field_value(
512 value, DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER);
513
514 /* Convert DENTIST_DISPCLK_WDIVIDER to actual divider*/
515 target_div = dal_divider_range_get_divider(
516 divider_ranges,
517 DIVIDER_RANGE_MAX,
518 field);
519
520 if (target_div != INVALID_DIVIDER)
521 /* Calculate the current DFS clock in KHz.
522 Should be okay up to 42.9 THz before overflowing.*/
523 disp_clock = (DIVIDER_RANGE_SCALE_FACTOR
524 * disp_clk->dentist_vco_freq_khz) / target_div;
525 return disp_clock;
526}
527
528static void set_clock_state(
529 struct display_clock *dc,
530 struct display_clock_state clk_state)
531{
532 struct display_clock_dce80 *disp_clk = FROM_DISPLAY_CLOCK(dc);
533
534 disp_clk->clock_state = clk_state;
535}
536static struct display_clock_state get_clock_state(
537 struct display_clock *dc)
538{
539 struct display_clock_dce80 *disp_clk = FROM_DISPLAY_CLOCK(dc);
540
541 return disp_clk->clock_state;
542}
543
544static enum clocks_state get_min_clocks_state(struct display_clock *dc)
545{
546 struct display_clock_dce80 *disp_clk = FROM_DISPLAY_CLOCK(dc);
547
548 return disp_clk->cur_min_clks_state;
549}
550
551static enum clocks_state get_required_clocks_state
552 (struct display_clock *dc,
553 struct state_dependent_clocks *req_clocks)
554{
555 int32_t i;
556 struct display_clock_dce80 *disp_clk = FROM_DISPLAY_CLOCK(dc);
557 enum clocks_state low_req_clk = disp_clk->max_clks_state;
558
559 if (!req_clocks) {
560 /* NULL pointer*/
561 BREAK_TO_DEBUGGER();
562 return CLOCKS_STATE_INVALID;
563 }
564
565 /* Iterate from highest supported to lowest valid state, and update
566 * lowest RequiredState with the lowest state that satisfies
567 * all required clocks
568 */
569 for (i = disp_clk->max_clks_state; i >= CLOCKS_STATE_ULTRA_LOW; --i) {
570 if ((req_clocks->display_clk_khz <=
571 max_clks_by_state[i].display_clk_khz) &&
572 (req_clocks->pixel_clk_khz <=
573 max_clks_by_state[i].pixel_clk_khz))
574 low_req_clk = i;
575 }
576 return low_req_clk;
577}
578
579static bool set_min_clocks_state(
580 struct display_clock *dc,
581 enum clocks_state clocks_state)
582{
583 struct display_clock_dce80 *disp_clk = FROM_DISPLAY_CLOCK(dc);
584
585 struct dm_pp_power_level_change_request level_change_req = {
586 DM_PP_POWER_LEVEL_INVALID};
587
588 if (clocks_state > disp_clk->max_clks_state) {
589 /*Requested state exceeds max supported state.*/
590 dm_logger_write(dc->ctx->logger, LOG_WARNING,
591 "Requested state exceeds max supported state");
592 return false;
593 } else if (clocks_state == dc->cur_min_clks_state) {
594 /*if we're trying to set the same state, we can just return
595 * since nothing needs to be done*/
596 return true;
597 }
598
599 switch (clocks_state) {
600 case CLOCKS_STATE_ULTRA_LOW:
601 level_change_req.power_level = DM_PP_POWER_LEVEL_ULTRA_LOW;
602 break;
603 case CLOCKS_STATE_LOW:
604 level_change_req.power_level = DM_PP_POWER_LEVEL_LOW;
605 break;
606 case CLOCKS_STATE_NOMINAL:
607 level_change_req.power_level = DM_PP_POWER_LEVEL_NOMINAL;
608 break;
609 case CLOCKS_STATE_PERFORMANCE:
610 level_change_req.power_level = DM_PP_POWER_LEVEL_PERFORMANCE;
611 break;
612 case CLOCKS_STATE_INVALID:
613 default:
614 dm_logger_write(dc->ctx->logger, LOG_WARNING,
615 "Requested state invalid state");
616 return false;
617 }
618
619 /* get max clock state from PPLIB */
620 if (dm_pp_apply_power_level_change_request(dc->ctx, &level_change_req))
621 dc->cur_min_clks_state = clocks_state;
622
623 return true;
624}
625
626static uint32_t get_dp_ref_clk_frequency(struct display_clock *dc)
627{
628 uint32_t dispclk_cntl_value;
629 uint32_t dp_ref_clk_cntl_value;
630 uint32_t dp_ref_clk_cntl_src_sel_value;
631 uint32_t dp_ref_clk_khz = 600000;
632 uint32_t target_div = INVALID_DIVIDER;
633 struct display_clock_dce80 *disp_clk = FROM_DISPLAY_CLOCK(dc);
634
635 /* ASSERT DP Reference Clock source is from DFS*/
636 dp_ref_clk_cntl_value = dm_read_reg(dc->ctx,
637 mmDPREFCLK_CNTL);
638
639 dp_ref_clk_cntl_src_sel_value =
640 get_reg_field_value(
641 dp_ref_clk_cntl_value,
642 DPREFCLK_CNTL, DPREFCLK_SRC_SEL);
643
644 ASSERT(dp_ref_clk_cntl_src_sel_value == 0);
645
646 /* Read the mmDENTIST_DISPCLK_CNTL to get the currently
647 * programmed DID DENTIST_DPREFCLK_WDIVIDER*/
648 dispclk_cntl_value = dm_read_reg(dc->ctx,
649 mmDENTIST_DISPCLK_CNTL);
650
651 /* Convert DENTIST_DPREFCLK_WDIVIDERto actual divider*/
652 target_div = dal_divider_range_get_divider(
653 divider_ranges,
654 DIVIDER_RANGE_MAX,
655 get_reg_field_value(dispclk_cntl_value,
656 DENTIST_DISPCLK_CNTL,
657 DENTIST_DPREFCLK_WDIVIDER));
658
659 if (target_div != INVALID_DIVIDER) {
660 /* Calculate the current DFS clock, in kHz.*/
661 dp_ref_clk_khz = (DIVIDER_RANGE_SCALE_FACTOR
662 * disp_clk->dentist_vco_freq_khz) / target_div;
663 }
664
665 /* SW will adjust DP REF Clock average value for all purposes
666 * (DP DTO / DP Audio DTO and DP GTC)
667 if clock is spread for all cases:
668 -if SS enabled on DP Ref clock and HW de-spreading enabled with SW
669 calculations for DS_INCR/DS_MODULO (this is planned to be default case)
670 -if SS enabled on DP Ref clock and HW de-spreading enabled with HW
671 calculations (not planned to be used, but average clock should still
672 be valid)
673 -if SS enabled on DP Ref clock and HW de-spreading disabled
674 (should not be case with CIK) then SW should program all rates
675 generated according to average value (case as with previous ASICs)
676 */
677 if ((disp_clk->ss_on_gpu_pll) && (disp_clk->gpu_pll_ss_divider != 0)) {
678 struct fixed32_32 ss_percentage = dal_fixed32_32_div_int(
679 dal_fixed32_32_from_fraction(
680 disp_clk->gpu_pll_ss_percentage,
681 disp_clk->gpu_pll_ss_divider), 200);
682 struct fixed32_32 adj_dp_ref_clk_khz;
683
684 ss_percentage = dal_fixed32_32_sub(dal_fixed32_32_one,
685 ss_percentage);
686 adj_dp_ref_clk_khz =
687 dal_fixed32_32_mul_int(
688 ss_percentage,
689 dp_ref_clk_khz);
690 dp_ref_clk_khz = dal_fixed32_32_floor(adj_dp_ref_clk_khz);
691 }
692
693 return dp_ref_clk_khz;
694}
695
696static void store_max_clocks_state(
697 struct display_clock *dc,
698 enum clocks_state max_clocks_state)
699{
700 struct display_clock_dce80 *disp_clk = FROM_DISPLAY_CLOCK(dc);
701
702 switch (max_clocks_state) {
703 case CLOCKS_STATE_LOW:
704 case CLOCKS_STATE_NOMINAL:
705 case CLOCKS_STATE_PERFORMANCE:
706 case CLOCKS_STATE_ULTRA_LOW:
707 disp_clk->max_clks_state = max_clocks_state;
708 break;
709
710 case CLOCKS_STATE_INVALID:
711 default:
712 /*Invalid Clocks State!*/
713 BREAK_TO_DEBUGGER();
714 break;
715 }
716}
717
718static void display_clock_ss_construct(
719 struct display_clock_dce80 *disp_clk)
720{
721 struct dc_bios *bp = disp_clk->disp_clk.ctx->dc_bios;
722 uint32_t ss_entry_num = bp->funcs->get_ss_entry_number(bp,
723 AS_SIGNAL_TYPE_GPU_PLL);
724
725 /*Read SS Info from VBIOS SS Info table for DP Reference Clock spread.*/
726 if (ss_entry_num > 0) {/* Should be only one entry */
727 struct spread_spectrum_info ss_info;
728 enum bp_result res;
729
730 memset(&ss_info, 0, sizeof(struct spread_spectrum_info));
731
732 res = bp->funcs->get_spread_spectrum_info(bp,
733 AS_SIGNAL_TYPE_GPU_PLL, 0, &ss_info);
734
735 /* Based on VBIOS, VBIOS will keep entry for GPU PLL SS even if
736 * SS not enabled and in that case
737 * SSInfo.spreadSpectrumPercentage !=0 would be
738 * sign that SS is enabled*/
739 if (res == BP_RESULT_OK && ss_info.spread_spectrum_percentage != 0) {
740 disp_clk->ss_on_gpu_pll = true;
741 disp_clk->gpu_pll_ss_divider =
742 ss_info.spread_percentage_divider;
743 if (ss_info.type.CENTER_MODE == 0)
744 /* Currently we need only SS
745 * percentage for down-spread*/
746 disp_clk->gpu_pll_ss_percentage =
747 ss_info.spread_spectrum_percentage;
748 }
749 }
750}
751
752static bool display_clock_integrated_info_construct(
753 struct display_clock_dce80 *disp_clk)
754{
755 struct dc_debug *debug = &disp_clk->disp_clk.ctx->dc->debug;
756 struct dc_bios *bp = disp_clk->disp_clk.ctx->dc_bios;
757 struct integrated_info info = { { { 0 } } };
758 struct firmware_info fw_info = { { 0 } };
759 uint32_t i;
760
761 if (bp->integrated_info)
762 info = *bp->integrated_info;
763
764 disp_clk->dentist_vco_freq_khz = info.dentist_vco_freq;
765 if (disp_clk->dentist_vco_freq_khz == 0) {
766 bp->funcs->get_firmware_info(bp, &fw_info);
767 disp_clk->dentist_vco_freq_khz =
768 fw_info.smu_gpu_pll_output_freq;
769 if (disp_clk->dentist_vco_freq_khz == 0)
770 disp_clk->dentist_vco_freq_khz = 3600000;
771 }
772 disp_clk->disp_clk.min_display_clk_threshold_khz =
773 disp_clk->dentist_vco_freq_khz / 64;
774
775 /* TODO: initialise disp_clk->dfs_bypass_disp_clk */
776
777 /*update the maximum display clock for each power state*/
778 for (i = 0; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
779 enum clocks_state clk_state = CLOCKS_STATE_INVALID;
780
781 switch (i) {
782 case 0:
783 clk_state = CLOCKS_STATE_ULTRA_LOW;
784 break;
785
786 case 1:
787 clk_state = CLOCKS_STATE_LOW;
788 break;
789
790 case 2:
791 clk_state = CLOCKS_STATE_NOMINAL;
792 break;
793
794 case 3:
795 clk_state = CLOCKS_STATE_PERFORMANCE;
796 break;
797
798 default:
799 clk_state = CLOCKS_STATE_INVALID;
800 break;
801 }
802
803 /*Do not allow bad VBIOS/SBIOS to override with invalid values,
804 * check for > 100MHz*/
805 if (info.disp_clk_voltage[i].max_supported_clk >= 100000) {
806 max_clks_by_state[clk_state].display_clk_khz =
807 info.disp_clk_voltage[i].max_supported_clk;
808 }
809 }
810
811 disp_clk->dfs_bypass_enabled = false;
812 if (!debug->disable_dfs_bypass)
813 if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
814 disp_clk->dfs_bypass_enabled = true;
815
816 disp_clk->use_max_disp_clk = debug->max_disp_clk;
817
818 return true;
819}
820
821static uint32_t get_dfs_bypass_threshold(struct display_clock *dc)
822{
823 return DCE80_DFS_BYPASS_THRESHOLD_KHZ;
824}
825
826static void destroy(struct display_clock **dc)
827{
828 struct display_clock_dce80 *disp_clk;
829
830 disp_clk = FROM_DISPLAY_CLOCK(*dc);
831 dm_free(disp_clk);
832 *dc = NULL;
833}
834
835static const struct display_clock_funcs funcs = {
836 .calculate_min_clock = calculate_min_clock,
837 .destroy = destroy,
838 .get_clock = get_clock,
839 .get_clock_state = get_clock_state,
840 .get_dfs_bypass_threshold = get_dfs_bypass_threshold,
841 .get_dp_ref_clk_frequency = get_dp_ref_clk_frequency,
842 .get_min_clocks_state = get_min_clocks_state,
843 .get_required_clocks_state = get_required_clocks_state,
844 .get_validation_clock = get_validation_clock,
845 .set_clock = set_clock,
846 .set_clock_state = set_clock_state,
847 .set_dp_ref_clock_source =
848 dal_display_clock_base_set_dp_ref_clock_source,
849 .set_min_clocks_state = set_min_clocks_state,
850 .store_max_clocks_state = store_max_clocks_state,
851 .validate = validate,
852};
853
854static bool display_clock_construct(
855 struct dc_context *ctx,
856 struct display_clock_dce80 *disp_clk)
857{
858 struct display_clock *dc_base = &disp_clk->disp_clk;
859
860 if (!dal_display_clock_construct_base(dc_base, ctx))
861 return false;
862
863 dc_base->funcs = &funcs;
864 /*
865 * set_dp_ref_clock_source
866 * set_clock_state
867 * get_clock_state
868 * get_dfs_bypass_threshold
869 */
870
871 disp_clk->gpu_pll_ss_percentage = 0;
872 disp_clk->gpu_pll_ss_divider = 1000;
873 disp_clk->ss_on_gpu_pll = false;
874 disp_clk->dfs_bypass_enabled = false;
875 disp_clk->dfs_bypass_disp_clk = 0;
876 disp_clk->use_max_disp_clk = true;/* false will hang the system! */
877
878 disp_clk->disp_clk.id = CLOCK_SOURCE_ID_DFS;
879/* Initially set max clocks state to nominal. This should be updated by
880 * via a pplib call to DAL IRI eventually calling a
881 * DisplayEngineClock_Dce50::StoreMaxClocksState(). This call will come in
882 * on PPLIB init. This is from DCE5x. in case HW wants to use mixed method.*/
883 disp_clk->max_clks_state = CLOCKS_STATE_NOMINAL;
884/* Initially set current min clocks state to invalid since we
885 * cannot make any assumption about PPLIB's initial state. This will be updated
886 * by HWSS via SetMinClocksState() on first mode set prior to programming
887 * state dependent clocks.*/
888 disp_clk->cur_min_clks_state = CLOCKS_STATE_INVALID;
889
890 display_clock_ss_construct(disp_clk);
891
892 if (!display_clock_integrated_info_construct(disp_clk)) {
893 dm_logger_write(dc_base->ctx->logger, LOG_WARNING,
894 "Cannot obtain VBIOS integrated info");
895 }
896
897 dal_divider_range_construct(
898 &divider_ranges[DIVIDER_RANGE_01],
899 DIVIDER_RANGE_01_START,
900 DIVIDER_RANGE_01_STEP_SIZE,
901 DIVIDER_RANGE_01_BASE_DIVIDER_ID,
902 DIVIDER_RANGE_02_BASE_DIVIDER_ID);
903 dal_divider_range_construct(
904 &divider_ranges[DIVIDER_RANGE_02],
905 DIVIDER_RANGE_02_START,
906 DIVIDER_RANGE_02_STEP_SIZE,
907 DIVIDER_RANGE_02_BASE_DIVIDER_ID,
908 DIVIDER_RANGE_03_BASE_DIVIDER_ID);
909 dal_divider_range_construct(
910 &divider_ranges[DIVIDER_RANGE_03],
911 DIVIDER_RANGE_03_START,
912 DIVIDER_RANGE_03_STEP_SIZE,
913 DIVIDER_RANGE_03_BASE_DIVIDER_ID,
914 DIVIDER_RANGE_MAX_DIVIDER_ID);
915 return true;
916}
917
918struct display_clock *dal_display_clock_dce80_create(
919 struct dc_context *ctx)
920{
921 struct display_clock_dce80 *disp_clk;
922
923 disp_clk = dm_alloc(sizeof(struct display_clock_dce80));
924
925 if (disp_clk == NULL)
926 return NULL;
927
928 if (display_clock_construct(ctx, disp_clk))
929 return &disp_clk->disp_clk;
930
931 dm_free(disp_clk);
932 return NULL;
933}
934
diff --git a/drivers/gpu/drm/amd/display/dc/gpu/dce80/display_clock_dce80.h b/drivers/gpu/drm/amd/display/dc/gpu/dce80/display_clock_dce80.h
new file mode 100644
index 000000000000..944dd0380413
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpu/dce80/display_clock_dce80.h
@@ -0,0 +1,57 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25#ifndef __DAL_DISPLAY_CLOCK_DCE80_H__
26#define __DAL_DISPLAY_CLOCK_DCE80_H__
27
28#include "gpu/display_clock.h"
29
30struct display_clock_dce80 {
31 struct display_clock disp_clk;
32 /* DFS input - GPUPLL VCO frequency - from VBIOS Firmware info. */
33 uint32_t dentist_vco_freq_khz;
34 /* GPU PLL SS percentage (if down-spread enabled)*/
35 uint32_t gpu_pll_ss_percentage;
36 /* GPU PLL SS percentage Divider (100 or 1000)*/
37 uint32_t gpu_pll_ss_divider;
38 /* Flag for Enabled SS on GPU PLL*/
39 bool ss_on_gpu_pll;
40 /* Max display block clocks state*/
41 enum clocks_state max_clks_state;
42 /* Current minimum display block clocks state*/
43 enum clocks_state cur_min_clks_state;
44 /* DFS-bypass feature variable
45 Cache the status of DFS-bypass feature*/
46 bool dfs_bypass_enabled;
47 /* Cache the display clock returned by VBIOS if DFS-bypass is enabled.
48 * This is basically "Crystal Frequency In KHz" (XTALIN) frequency */
49 uint32_t dfs_bypass_disp_clk;
50 bool use_max_disp_clk;
51 struct display_clock_state clock_state;
52};
53
54struct display_clock *dal_display_clock_dce80_create(
55 struct dc_context *ctx);
56
57#endif /* __DAL_DISPLAY_CLOCK_DCE80_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/gpu/display_clock.c b/drivers/gpu/drm/amd/display/dc/gpu/display_clock.c
new file mode 100644
index 000000000000..bcc0a5132600
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpu/display_clock.c
@@ -0,0 +1,217 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27#include "display_clock.h"
28
29void dal_display_clock_base_set_dp_ref_clock_source(
30 struct display_clock *disp_clk,
31 enum clock_source_id clk_src)
32{/*must be implemented in derived*/
33
34}
35
36void dal_display_clock_base_set_clock_state(struct display_clock *disp_clk,
37 struct display_clock_state clk_state)
38{
39 /*Implemented only in DCE81*/
40}
41struct display_clock_state dal_display_clock_base_get_clock_state(
42 struct display_clock *disp_clk)
43{
44 /*Implemented only in DCE81*/
45 struct display_clock_state state = {0};
46 return state;
47}
48uint32_t dal_display_clock_base_get_dfs_bypass_threshold(
49 struct display_clock *disp_clk)
50{
51 /*Implemented only in DCE81*/
52 return 0;
53}
54
55bool dal_display_clock_construct_base(
56 struct display_clock *base,
57 struct dc_context *ctx)
58{
59 base->ctx = ctx;
60 base->id = CLOCK_SOURCE_ID_DCPLL;
61 base->min_display_clk_threshold_khz = 0;
62
63/* Initially set current min clocks state to invalid since we
64 * cannot make any assumption about PPLIB's initial state. This will be updated
65 * by HWSS via SetMinClocksState() on first mode set prior to programming
66 * state dependent clocks.*/
67 base->cur_min_clks_state = CLOCKS_STATE_INVALID;
68
69 return true;
70}
71
72void dal_display_clock_destroy(struct display_clock **disp_clk)
73{
74 if (!disp_clk || !*disp_clk) {
75 BREAK_TO_DEBUGGER();
76 return;
77 }
78
79 (*disp_clk)->funcs->destroy(disp_clk);
80
81 *disp_clk = NULL;
82}
83
84bool dal_display_clock_validate(
85 struct display_clock *disp_clk,
86 struct min_clock_params *params)
87{
88 return disp_clk->funcs->validate(disp_clk, params);
89}
90
91uint32_t dal_display_clock_calculate_min_clock(
92 struct display_clock *disp_clk,
93 uint32_t path_num,
94 struct min_clock_params *params)
95{
96 return disp_clk->funcs->calculate_min_clock(disp_clk, path_num, params);
97}
98
99uint32_t dal_display_clock_get_validation_clock(struct display_clock *disp_clk)
100{
101 return disp_clk->funcs->get_validation_clock(disp_clk);
102}
103
104void dal_display_clock_set_clock(
105 struct display_clock *disp_clk,
106 uint32_t requested_clock_khz)
107{
108 disp_clk->funcs->set_clock(disp_clk, requested_clock_khz);
109}
110
111uint32_t dal_display_clock_get_clock(struct display_clock *disp_clk)
112{
113 return disp_clk->funcs->get_clock(disp_clk);
114}
115
116bool dal_display_clock_get_min_clocks_state(
117 struct display_clock *disp_clk,
118 enum clocks_state *clocks_state)
119{
120 if (!disp_clk->funcs->get_min_clocks_state)
121 return false;
122
123 *clocks_state = disp_clk->funcs->get_min_clocks_state(disp_clk);
124 return true;
125}
126
127bool dal_display_clock_get_required_clocks_state(
128 struct display_clock *disp_clk,
129 struct state_dependent_clocks *req_clocks,
130 enum clocks_state *clocks_state)
131{
132 if (!disp_clk->funcs->get_required_clocks_state)
133 return false;
134
135 *clocks_state = disp_clk->funcs->get_required_clocks_state(
136 disp_clk, req_clocks);
137 return true;
138}
139
140bool dal_display_clock_set_min_clocks_state(
141 struct display_clock *disp_clk,
142 enum clocks_state clocks_state)
143{
144 if (!disp_clk->funcs->set_min_clocks_state)
145 return false;
146
147 disp_clk->funcs->set_min_clocks_state(disp_clk, clocks_state);
148 return true;
149}
150
151uint32_t dal_display_clock_get_dp_ref_clk_frequency(
152 struct display_clock *disp_clk)
153{
154 return disp_clk->funcs->get_dp_ref_clk_frequency(disp_clk);
155}
156
157/*the second parameter of "switchreferenceclock" is
158 * a dummy argument for all pre dce 6.0 versions*/
159
160void dal_display_clock_switch_reference_clock(
161 struct display_clock *disp_clk,
162 bool use_external_ref_clk,
163 uint32_t requested_clk_khz)
164{
165 /* TODO: requires Asic Control*/
166 /*
167 struct ac_pixel_clk_params params;
168 struct asic_control *ac =
169 dal_adapter_service_get_asic_control(disp_clk->as);
170 dc_service_memset(&params, 0, sizeof(struct ac_pixel_clk_params));
171
172 params.tgt_pixel_clk_khz = requested_clk_khz;
173 params.flags.SET_EXTERNAL_REF_DIV_SRC = use_external_ref_clk;
174 params.pll_id = disp_clk->id;
175 dal_asic_control_program_display_engine_pll(ac, &params);
176 */
177}
178
179void dal_display_clock_set_dp_ref_clock_source(
180 struct display_clock *disp_clk,
181 enum clock_source_id clk_src)
182{
183 disp_clk->funcs->set_dp_ref_clock_source(disp_clk, clk_src);
184}
185
186void dal_display_clock_store_max_clocks_state(
187 struct display_clock *disp_clk,
188 enum clocks_state max_clocks_state)
189{
190 disp_clk->funcs->store_max_clocks_state(disp_clk, max_clocks_state);
191}
192
193void dal_display_clock_set_clock_state(
194 struct display_clock *disp_clk,
195 struct display_clock_state clk_state)
196{
197 disp_clk->funcs->set_clock_state(disp_clk, clk_state);
198}
199
200struct display_clock_state dal_display_clock_get_clock_state(
201 struct display_clock *disp_clk)
202{
203 return disp_clk->funcs->get_clock_state(disp_clk);
204}
205
206uint32_t dal_display_clock_get_dfs_bypass_threshold(
207 struct display_clock *disp_clk)
208{
209 return disp_clk->funcs->get_dfs_bypass_threshold(disp_clk);
210}
211
212void dal_display_clock_invalid_clock_state(
213 struct display_clock *disp_clk)
214{
215 disp_clk->cur_min_clks_state = CLOCKS_STATE_INVALID;
216}
217
diff --git a/drivers/gpu/drm/amd/display/dc/gpu/display_clock.h b/drivers/gpu/drm/amd/display/dc/gpu/display_clock.h
new file mode 100644
index 000000000000..663580d18a09
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpu/display_clock.h
@@ -0,0 +1,89 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_DISPLAY_CLOCK_H__
27#define __DAL_DISPLAY_CLOCK_H__
28
29#include "include/display_clock_interface.h"
30
31struct display_clock_funcs {
32 void (*destroy)(struct display_clock **to_destroy);
33 bool (*validate)(struct display_clock *disp_clk,
34 struct min_clock_params *params);
35 uint32_t (*calculate_min_clock)(struct display_clock *disp_clk,
36 uint32_t path_num, struct min_clock_params *params);
37 uint32_t (*get_validation_clock)(struct display_clock *disp_clk);
38 void (*set_clock)(struct display_clock *disp_clk,
39 uint32_t requested_clock_khz);
40 uint32_t (*get_clock)(struct display_clock *disp_clk);
41 enum clocks_state (*get_min_clocks_state)(
42 struct display_clock *disp_clk);
43 enum clocks_state (*get_required_clocks_state)(
44 struct display_clock *disp_clk,
45 struct state_dependent_clocks *req_clocks);
46 bool (*set_min_clocks_state)(struct display_clock *disp_clk,
47 enum clocks_state clocks_state);
48 uint32_t (*get_dp_ref_clk_frequency)(struct display_clock *disp_clk);
49 void (*set_dp_ref_clock_source)(struct display_clock *disp_clk,
50 enum clock_source_id clk_src);
51 void (*store_max_clocks_state)(struct display_clock *disp_clk,
52 enum clocks_state max_clocks_state);
53 void (*set_clock_state)(struct display_clock *disp_clk,
54 struct display_clock_state clk_state);
55 struct display_clock_state (*get_clock_state)(
56 struct display_clock *disp_clk);
57 uint32_t (*get_dfs_bypass_threshold)(struct display_clock *disp_clk);
58
59};
60
61struct display_clock {
62 struct dc_context *ctx;
63 const struct display_clock_funcs *funcs;
64 uint32_t min_display_clk_threshold_khz;
65 enum clock_source_id id;
66
67 enum clocks_state cur_min_clks_state;
68};
69void dal_display_clock_base_set_dp_ref_clock_source(
70 struct display_clock *disp_clk,
71 enum clock_source_id clk_src);
72struct display_clock_state dal_display_clock_base_get_clock_state(
73 struct display_clock *disp_clk);
74uint32_t dal_display_clock_base_get_dfs_bypass_threshold(
75 struct display_clock *disp_clk);
76void dal_display_clock_base_set_clock_state(struct display_clock *disp_clk,
77 struct display_clock_state clk_state);
78bool dal_display_clock_construct_base(
79 struct display_clock *base,
80 struct dc_context *ctx);
81
82uint32_t dal_display_clock_get_validation_clock(struct display_clock *disp_clk);
83
84void dal_display_clock_store_max_clocks_state(
85 struct display_clock *disp_clk,
86 enum clocks_state max_clocks_state);
87
88
89#endif /* __DAL_DISPLAY_CLOCK_H__*/
diff --git a/drivers/gpu/drm/amd/display/dc/gpu/divider_range.c b/drivers/gpu/drm/amd/display/dc/gpu/divider_range.c
new file mode 100644
index 000000000000..59d44004411b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpu/divider_range.c
@@ -0,0 +1,127 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25#include "dm_services.h"
26#include "divider_range.h"
27
28bool dal_divider_range_construct(
29 struct divider_range *div_range,
30 uint32_t range_start,
31 uint32_t range_step,
32 uint32_t did_min,
33 uint32_t did_max)
34{
35 div_range->div_range_start = range_start;
36 div_range->div_range_step = range_step;
37 div_range->did_min = did_min;
38 div_range->did_max = did_max;
39
40 if (div_range->div_range_step == 0) {
41 div_range->div_range_step = 1;
42 /*div_range_step cannot be zero*/
43 BREAK_TO_DEBUGGER();
44 }
45 /* Calculate this based on the other inputs.*/
46 /* See DividerRange.h for explanation of */
47 /* the relationship between divider id (DID) and a divider.*/
48 /* Number of Divider IDs = (Maximum Divider ID - Minimum Divider ID)*/
49 /* Maximum divider identified in this range =
50 * (Number of Divider IDs)*Step size between dividers
51 * + The start of this range.*/
52 div_range->div_range_end = (did_max - did_min) * range_step
53 + range_start;
54 return true;
55}
56
57static uint32_t dal_divider_range_calc_divider(
58 struct divider_range *div_range,
59 uint32_t did)
60{
61 /* Is this DID within our range?*/
62 if ((did < div_range->did_min) || (did >= div_range->did_max))
63 return INVALID_DIVIDER;
64
65 return ((did - div_range->did_min) * div_range->div_range_step)
66 + div_range->div_range_start;
67
68}
69
70static uint32_t dal_divider_range_calc_did(
71 struct divider_range *div_range,
72 uint32_t div)
73{
74 uint32_t did;
75 /* Check before dividing.*/
76 if (div_range->div_range_step == 0) {
77 div_range->div_range_step = 1;
78 /*div_range_step cannot be zero*/
79 BREAK_TO_DEBUGGER();
80 }
81 /* Is this divider within our range?*/
82 if ((div < div_range->div_range_start)
83 || (div >= div_range->div_range_end))
84 return INVALID_DID;
85/* did = (divider - range_start + (range_step-1)) / range_step) + did_min*/
86 did = div - div_range->div_range_start;
87 did += div_range->div_range_step - 1;
88 did /= div_range->div_range_step;
89 did += div_range->did_min;
90 return did;
91}
92
93uint32_t dal_divider_range_get_divider(
94 struct divider_range *div_range,
95 uint32_t ranges_num,
96 uint32_t did)
97{
98 uint32_t div = INVALID_DIVIDER;
99 uint32_t i;
100
101 for (i = 0; i < ranges_num; i++) {
102 /* Calculate divider with given divider ID*/
103 div = dal_divider_range_calc_divider(&div_range[i], did);
104 /* Found a valid return divider*/
105 if (div != INVALID_DIVIDER)
106 break;
107 }
108 return div;
109}
110uint32_t dal_divider_range_get_did(
111 struct divider_range *div_range,
112 uint32_t ranges_num,
113 uint32_t divider)
114{
115 uint32_t did = INVALID_DID;
116 uint32_t i;
117
118 for (i = 0; i < ranges_num; i++) {
119 /* CalcDid returns InvalidDid if a divider ID isn't found*/
120 did = dal_divider_range_calc_did(&div_range[i], divider);
121 /* Found a valid return did*/
122 if (did != INVALID_DID)
123 break;
124 }
125 return did;
126}
127
diff --git a/drivers/gpu/drm/amd/display/dc/gpu/divider_range.h b/drivers/gpu/drm/amd/display/dc/gpu/divider_range.h
new file mode 100644
index 000000000000..e53522f652cc
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpu/divider_range.h
@@ -0,0 +1,62 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_DIVIDER_RANGE_H__
27#define __DAL_DIVIDER_RANGE_H__
28
29enum divider_error_types {
30 INVALID_DID = 0,
31 INVALID_DIVIDER = 1
32};
33
34struct divider_range {
35 uint32_t div_range_start;
36 /* The end of this range of dividers.*/
37 uint32_t div_range_end;
38 /* The distance between each divider in this range.*/
39 uint32_t div_range_step;
40 /* The divider id for the lowest divider.*/
41 uint32_t did_min;
42 /* The divider id for the highest divider.*/
43 uint32_t did_max;
44};
45
46bool dal_divider_range_construct(
47 struct divider_range *div_range,
48 uint32_t range_start,
49 uint32_t range_step,
50 uint32_t did_min,
51 uint32_t did_max);
52
53uint32_t dal_divider_range_get_divider(
54 struct divider_range *div_range,
55 uint32_t ranges_num,
56 uint32_t did);
57uint32_t dal_divider_range_get_did(
58 struct divider_range *div_range,
59 uint32_t ranges_num,
60 uint32_t divider);
61
62#endif /* __DAL_DIVIDER_RANGE_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/Makefile b/drivers/gpu/drm/amd/display/dc/i2caux/Makefile
new file mode 100644
index 000000000000..83dfc437aae4
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/Makefile
@@ -0,0 +1,58 @@
1#
2# Makefile for the 'i2c' sub-component of DAL.
3# It provides the control and status of HW i2c engine of the adapter.
4
5I2CAUX = aux_engine.o engine_base.o i2caux.o i2c_engine.o \
6 i2c_generic_hw_engine.o i2c_hw_engine.o i2c_sw_engine.o
7
8AMD_DAL_I2CAUX = $(addprefix $(AMDDALPATH)/dc/i2caux/,$(I2CAUX))
9
10AMD_DISPLAY_FILES += $(AMD_DAL_I2CAUX)
11
12###############################################################################
13# DCE 8x family
14###############################################################################
15I2CAUX_DCE80 = i2caux_dce80.o i2c_hw_engine_dce80.o \
16 i2c_sw_engine_dce80.o
17
18AMD_DAL_I2CAUX_DCE80 = $(addprefix $(AMDDALPATH)/dc/i2caux/dce80/,$(I2CAUX_DCE80))
19
20AMD_DISPLAY_FILES += $(AMD_DAL_I2CAUX_DCE80)
21
22###############################################################################
23# DCE 100 family
24###############################################################################
25I2CAUX_DCE100 = i2caux_dce100.o
26
27AMD_DAL_I2CAUX_DCE100 = $(addprefix $(AMDDALPATH)/dc/i2caux/dce100/,$(I2CAUX_DCE100))
28
29AMD_DISPLAY_FILES += $(AMD_DAL_I2CAUX_DCE100)
30
31###############################################################################
32# DCE 110 family
33###############################################################################
34I2CAUX_DCE110 = i2caux_dce110.o i2c_sw_engine_dce110.o i2c_hw_engine_dce110.o \
35 aux_engine_dce110.o
36
37AMD_DAL_I2CAUX_DCE110 = $(addprefix $(AMDDALPATH)/dc/i2caux/dce110/,$(I2CAUX_DCE110))
38
39AMD_DISPLAY_FILES += $(AMD_DAL_I2CAUX_DCE110)
40
41###############################################################################
42# DCE 112 family
43###############################################################################
44I2CAUX_DCE112 = i2caux_dce112.o
45
46AMD_DAL_I2CAUX_DCE112 = $(addprefix $(AMDDALPATH)/dc/i2caux/dce112/,$(I2CAUX_DCE112))
47
48AMD_DISPLAY_FILES += $(AMD_DAL_I2CAUX_DCE112)
49
50###############################################################################
51# Diagnostics on FPGA
52###############################################################################
53I2CAUX_DIAG = i2caux_diag.o
54
55AMD_DAL_I2CAUX_DIAG = $(addprefix $(AMDDALPATH)/dc/i2caux/diagnostics/,$(I2CAUX_DIAG))
56
57AMD_DISPLAY_FILES += $(AMD_DAL_I2CAUX_DIAG)
58
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c b/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c
new file mode 100644
index 000000000000..667660f3fa26
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c
@@ -0,0 +1,567 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28/*
29 * Pre-requisites: headers required by header of this unit
30 */
31#include "include/i2caux_interface.h"
32#include "engine.h"
33
34/*
35 * Header of this unit
36 */
37
38#include "aux_engine.h"
39
40/*
41 * Post-requisites: headers required by this unit
42 */
43
44#include "include/link_service_types.h"
45
46/*
47 * This unit
48 */
49
50enum {
51 AUX_INVALID_REPLY_RETRY_COUNTER = 1,
52 AUX_TIMED_OUT_RETRY_COUNTER = 2,
53 AUX_DEFER_RETRY_COUNTER = 6
54};
55
56#define FROM_ENGINE(ptr) \
57 container_of((ptr), struct aux_engine, base)
58
59enum i2caux_engine_type dal_aux_engine_get_engine_type(
60 const struct engine *engine)
61{
62 return I2CAUX_ENGINE_TYPE_AUX;
63}
64
65bool dal_aux_engine_acquire(
66 struct engine *engine,
67 struct ddc *ddc)
68{
69 struct aux_engine *aux_engine = FROM_ENGINE(engine);
70
71 enum gpio_result result;
72
73 result = dal_ddc_open(ddc, GPIO_MODE_HARDWARE,
74 GPIO_DDC_CONFIG_TYPE_MODE_AUX);
75
76 if (result != GPIO_RESULT_OK)
77 return false;
78
79 if (!aux_engine->funcs->acquire_engine(aux_engine)) {
80 dal_ddc_close(ddc);
81 return false;
82 }
83
84 engine->ddc = ddc;
85
86 return true;
87}
88
89struct read_command_context {
90 uint8_t *buffer;
91 uint32_t current_read_length;
92 uint32_t offset;
93 enum i2caux_transaction_status status;
94
95 struct aux_request_transaction_data request;
96 struct aux_reply_transaction_data reply;
97
98 uint8_t returned_byte;
99
100 uint32_t timed_out_retry_aux;
101 uint32_t invalid_reply_retry_aux;
102 uint32_t defer_retry_aux;
103 uint32_t defer_retry_i2c;
104 uint32_t invalid_reply_retry_aux_on_ack;
105
106 bool transaction_complete;
107 bool operation_succeeded;
108};
109
110static void process_read_reply(
111 struct aux_engine *engine,
112 struct read_command_context *ctx)
113{
114 engine->funcs->process_channel_reply(engine, &ctx->reply);
115
116 switch (ctx->reply.status) {
117 case AUX_TRANSACTION_REPLY_AUX_ACK:
118 ctx->defer_retry_aux = 0;
119 if (ctx->returned_byte > ctx->current_read_length) {
120 ctx->status =
121 I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
122 ctx->operation_succeeded = false;
123 } else if (ctx->returned_byte < ctx->current_read_length) {
124 ctx->current_read_length -= ctx->returned_byte;
125
126 ctx->offset += ctx->returned_byte;
127
128 ++ctx->invalid_reply_retry_aux_on_ack;
129
130 if (ctx->invalid_reply_retry_aux_on_ack >
131 AUX_INVALID_REPLY_RETRY_COUNTER) {
132 ctx->status =
133 I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
134 ctx->operation_succeeded = false;
135 }
136 } else {
137 ctx->status = I2CAUX_TRANSACTION_STATUS_SUCCEEDED;
138 ctx->transaction_complete = true;
139 ctx->operation_succeeded = true;
140 }
141 break;
142 case AUX_TRANSACTION_REPLY_AUX_NACK:
143 ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_NACK;
144 ctx->operation_succeeded = false;
145 break;
146 case AUX_TRANSACTION_REPLY_AUX_DEFER:
147 ++ctx->defer_retry_aux;
148
149 if (ctx->defer_retry_aux > AUX_DEFER_RETRY_COUNTER) {
150 ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
151 ctx->operation_succeeded = false;
152 }
153 break;
154 case AUX_TRANSACTION_REPLY_I2C_DEFER:
155 ctx->defer_retry_aux = 0;
156
157 ++ctx->defer_retry_i2c;
158
159 if (ctx->defer_retry_i2c > AUX_DEFER_RETRY_COUNTER) {
160 ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
161 ctx->operation_succeeded = false;
162 }
163 break;
164 default:
165 ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
166 ctx->operation_succeeded = false;
167 }
168}
169
170static void process_read_request(
171 struct aux_engine *engine,
172 struct read_command_context *ctx)
173{
174 enum aux_channel_operation_result operation_result;
175
176 engine->funcs->submit_channel_request(engine, &ctx->request);
177
178 operation_result = engine->funcs->get_channel_status(
179 engine, &ctx->returned_byte);
180
181 switch (operation_result) {
182 case AUX_CHANNEL_OPERATION_SUCCEEDED:
183 if (ctx->returned_byte > ctx->current_read_length) {
184 ctx->status =
185 I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
186 ctx->operation_succeeded = false;
187 } else {
188 ctx->timed_out_retry_aux = 0;
189 ctx->invalid_reply_retry_aux = 0;
190
191 ctx->reply.length = ctx->returned_byte;
192 ctx->reply.data = ctx->buffer;
193
194 process_read_reply(engine, ctx);
195 }
196 break;
197 case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY:
198 ++ctx->invalid_reply_retry_aux;
199
200 if (ctx->invalid_reply_retry_aux >
201 AUX_INVALID_REPLY_RETRY_COUNTER) {
202 ctx->status =
203 I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
204 ctx->operation_succeeded = false;
205 } else
206 udelay(400);
207 break;
208 case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
209 ++ctx->timed_out_retry_aux;
210
211 if (ctx->timed_out_retry_aux > AUX_TIMED_OUT_RETRY_COUNTER) {
212 ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
213 ctx->operation_succeeded = false;
214 } else {
215 /* DP 1.2a, table 2-58:
216 * "S3: AUX Request CMD PENDING:
217 * retry 3 times, with 400usec wait on each"
218 * The HW timeout is set to 550usec,
219 * so we should not wait here */
220 }
221 break;
222 default:
223 ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
224 ctx->operation_succeeded = false;
225 }
226}
227
228static bool read_command(
229 struct aux_engine *engine,
230 struct i2caux_transaction_request *request,
231 bool middle_of_transaction)
232{
233 struct read_command_context ctx;
234
235 ctx.buffer = request->payload.data;
236 ctx.current_read_length = request->payload.length;
237 ctx.offset = 0;
238 ctx.timed_out_retry_aux = 0;
239 ctx.invalid_reply_retry_aux = 0;
240 ctx.defer_retry_aux = 0;
241 ctx.defer_retry_i2c = 0;
242 ctx.invalid_reply_retry_aux_on_ack = 0;
243 ctx.transaction_complete = false;
244 ctx.operation_succeeded = true;
245
246 if (request->payload.address_space ==
247 I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) {
248 ctx.request.type = AUX_TRANSACTION_TYPE_DP;
249 ctx.request.action = I2CAUX_TRANSACTION_ACTION_DP_READ;
250 ctx.request.address = request->payload.address;
251 } else if (request->payload.address_space ==
252 I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C) {
253 ctx.request.type = AUX_TRANSACTION_TYPE_I2C;
254 ctx.request.action = middle_of_transaction ?
255 I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT :
256 I2CAUX_TRANSACTION_ACTION_I2C_READ;
257 ctx.request.address = request->payload.address >> 1;
258 } else {
259 /* in DAL2, there was no return in such case */
260 BREAK_TO_DEBUGGER();
261 return false;
262 }
263
264 ctx.request.delay = 0;
265
266 do {
267 memset(ctx.buffer + ctx.offset, 0, ctx.current_read_length);
268
269 ctx.request.data = ctx.buffer + ctx.offset;
270 ctx.request.length = ctx.current_read_length;
271
272 process_read_request(engine, &ctx);
273
274 request->status = ctx.status;
275
276 if (ctx.operation_succeeded && !ctx.transaction_complete)
277 if (ctx.request.type == AUX_TRANSACTION_TYPE_I2C)
278 msleep(engine->delay);
279 } while (ctx.operation_succeeded && !ctx.transaction_complete);
280
281 return ctx.operation_succeeded;
282}
283
284struct write_command_context {
285 bool mot;
286
287 uint8_t *buffer;
288 uint32_t current_write_length;
289 enum i2caux_transaction_status status;
290
291 struct aux_request_transaction_data request;
292 struct aux_reply_transaction_data reply;
293
294 uint8_t returned_byte;
295
296 uint32_t timed_out_retry_aux;
297 uint32_t invalid_reply_retry_aux;
298 uint32_t defer_retry_aux;
299 uint32_t defer_retry_i2c;
300 uint32_t max_defer_retry;
301 uint32_t ack_m_retry;
302
303 uint8_t reply_data[DEFAULT_AUX_MAX_DATA_SIZE];
304
305 bool transaction_complete;
306 bool operation_succeeded;
307};
308
309static void process_write_reply(
310 struct aux_engine *engine,
311 struct write_command_context *ctx)
312{
313 engine->funcs->process_channel_reply(engine, &ctx->reply);
314
315 switch (ctx->reply.status) {
316 case AUX_TRANSACTION_REPLY_AUX_ACK:
317 ctx->operation_succeeded = true;
318
319 if (ctx->returned_byte) {
320 ctx->request.action = ctx->mot ?
321 I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST_MOT :
322 I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST;
323
324 ctx->current_write_length = 0;
325
326 ++ctx->ack_m_retry;
327
328 if (ctx->ack_m_retry > AUX_DEFER_RETRY_COUNTER) {
329 ctx->status =
330 I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
331 ctx->operation_succeeded = false;
332 } else
333 udelay(300);
334 } else {
335 ctx->status = I2CAUX_TRANSACTION_STATUS_SUCCEEDED;
336 ctx->defer_retry_aux = 0;
337 ctx->ack_m_retry = 0;
338 ctx->transaction_complete = true;
339 }
340 break;
341 case AUX_TRANSACTION_REPLY_AUX_NACK:
342 ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_NACK;
343 ctx->operation_succeeded = false;
344 break;
345 case AUX_TRANSACTION_REPLY_AUX_DEFER:
346 ++ctx->defer_retry_aux;
347
348 if (ctx->defer_retry_aux > ctx->max_defer_retry) {
349 ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
350 ctx->operation_succeeded = false;
351 }
352 break;
353 case AUX_TRANSACTION_REPLY_I2C_DEFER:
354 ctx->defer_retry_aux = 0;
355 ctx->current_write_length = 0;
356
357 ctx->request.action = ctx->mot ?
358 I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST_MOT :
359 I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST;
360
361 ++ctx->defer_retry_i2c;
362
363 if (ctx->defer_retry_i2c > ctx->max_defer_retry) {
364 ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
365 ctx->operation_succeeded = false;
366 }
367 break;
368 default:
369 ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
370 ctx->operation_succeeded = false;
371 }
372}
373
374static void process_write_request(
375 struct aux_engine *engine,
376 struct write_command_context *ctx)
377{
378 enum aux_channel_operation_result operation_result;
379
380 engine->funcs->submit_channel_request(engine, &ctx->request);
381
382 operation_result = engine->funcs->get_channel_status(
383 engine, &ctx->returned_byte);
384
385 switch (operation_result) {
386 case AUX_CHANNEL_OPERATION_SUCCEEDED:
387 ctx->timed_out_retry_aux = 0;
388 ctx->invalid_reply_retry_aux = 0;
389
390 ctx->reply.length = ctx->returned_byte;
391 ctx->reply.data = ctx->reply_data;
392
393 process_write_reply(engine, ctx);
394 break;
395 case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY:
396 ++ctx->invalid_reply_retry_aux;
397
398 if (ctx->invalid_reply_retry_aux >
399 AUX_INVALID_REPLY_RETRY_COUNTER) {
400 ctx->status =
401 I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
402 ctx->operation_succeeded = false;
403 } else
404 udelay(400);
405 break;
406 case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
407 ++ctx->timed_out_retry_aux;
408
409 if (ctx->timed_out_retry_aux > AUX_TIMED_OUT_RETRY_COUNTER) {
410 ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
411 ctx->operation_succeeded = false;
412 } else {
413 /* DP 1.2a, table 2-58:
414 * "S3: AUX Request CMD PENDING:
415 * retry 3 times, with 400usec wait on each"
416 * The HW timeout is set to 550usec,
417 * so we should not wait here */
418 }
419 break;
420 default:
421 ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
422 ctx->operation_succeeded = false;
423 }
424}
425
426static bool write_command(
427 struct aux_engine *engine,
428 struct i2caux_transaction_request *request,
429 bool middle_of_transaction)
430{
431 struct write_command_context ctx;
432
433 ctx.mot = middle_of_transaction;
434 ctx.buffer = request->payload.data;
435 ctx.current_write_length = request->payload.length;
436 ctx.timed_out_retry_aux = 0;
437 ctx.invalid_reply_retry_aux = 0;
438 ctx.defer_retry_aux = 0;
439 ctx.defer_retry_i2c = 0;
440 ctx.ack_m_retry = 0;
441 ctx.transaction_complete = false;
442 ctx.operation_succeeded = true;
443
444 if (request->payload.address_space ==
445 I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) {
446 ctx.request.type = AUX_TRANSACTION_TYPE_DP;
447 ctx.request.action = I2CAUX_TRANSACTION_ACTION_DP_WRITE;
448 ctx.request.address = request->payload.address;
449 } else if (request->payload.address_space ==
450 I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C) {
451 ctx.request.type = AUX_TRANSACTION_TYPE_I2C;
452 ctx.request.action = middle_of_transaction ?
453 I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT :
454 I2CAUX_TRANSACTION_ACTION_I2C_WRITE;
455 ctx.request.address = request->payload.address >> 1;
456 } else {
457 /* in DAL2, there was no return in such case */
458 BREAK_TO_DEBUGGER();
459 return false;
460 }
461
462 ctx.request.delay = 0;
463
464 ctx.max_defer_retry =
465 (engine->max_defer_write_retry > AUX_DEFER_RETRY_COUNTER) ?
466 engine->max_defer_write_retry : AUX_DEFER_RETRY_COUNTER;
467
468 do {
469 ctx.request.data = ctx.buffer;
470 ctx.request.length = ctx.current_write_length;
471
472 process_write_request(engine, &ctx);
473
474 request->status = ctx.status;
475
476 if (ctx.operation_succeeded && !ctx.transaction_complete)
477 if (ctx.request.type == AUX_TRANSACTION_TYPE_I2C)
478 msleep(engine->delay);
479 } while (ctx.operation_succeeded && !ctx.transaction_complete);
480
481 return ctx.operation_succeeded;
482}
483
484static bool end_of_transaction_command(
485 struct aux_engine *engine,
486 struct i2caux_transaction_request *request)
487{
488 struct i2caux_transaction_request dummy_request;
489 uint8_t dummy_data;
490
491 /* [tcheng] We only need to send the stop (read with MOT = 0)
492 * for I2C-over-Aux, not native AUX */
493
494 if (request->payload.address_space !=
495 I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C)
496 return false;
497
498 dummy_request.operation = request->operation;
499 dummy_request.payload.address_space = request->payload.address_space;
500 dummy_request.payload.address = request->payload.address;
501
502 /*
503 * Add a dummy byte due to some receiver quirk
504 * where one byte is sent along with MOT = 0.
505 * Ideally this should be 0.
506 */
507
508 dummy_request.payload.length = 0;
509 dummy_request.payload.data = &dummy_data;
510
511 if (request->operation == I2CAUX_TRANSACTION_READ)
512 return read_command(engine, &dummy_request, false);
513 else
514 return write_command(engine, &dummy_request, false);
515
516 /* according Syed, it does not need now DoDummyMOT */
517}
518
519bool dal_aux_engine_submit_request(
520 struct engine *engine,
521 struct i2caux_transaction_request *request,
522 bool middle_of_transaction)
523{
524 struct aux_engine *aux_engine = FROM_ENGINE(engine);
525
526 bool result;
527 bool mot_used = true;
528
529 switch (request->operation) {
530 case I2CAUX_TRANSACTION_READ:
531 result = read_command(aux_engine, request, mot_used);
532 break;
533 case I2CAUX_TRANSACTION_WRITE:
534 result = write_command(aux_engine, request, mot_used);
535 break;
536 default:
537 result = false;
538 }
539
540 /* [tcheng]
541 * need to send stop for the last transaction to free up the AUX
542 * if the above command fails, this would be the last transaction */
543
544 if (!middle_of_transaction || !result)
545 end_of_transaction_command(aux_engine, request);
546
547 /* mask AUX interrupt */
548
549 return result;
550}
551
552bool dal_aux_engine_construct(
553 struct aux_engine *engine,
554 struct dc_context *ctx)
555{
556 if (!dal_i2caux_construct_engine(&engine->base, ctx))
557 return false;
558 engine->delay = 0;
559 engine->max_defer_write_retry = 0;
560 return true;
561}
562
563void dal_aux_engine_destruct(
564 struct aux_engine *engine)
565{
566 dal_i2caux_destruct_engine(&engine->base);
567}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.h b/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.h
new file mode 100644
index 000000000000..b5d6c79eb029
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.h
@@ -0,0 +1,117 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_AUX_ENGINE_H__
27#define __DAL_AUX_ENGINE_H__
28
29enum aux_transaction_type {
30 AUX_TRANSACTION_TYPE_DP,
31 AUX_TRANSACTION_TYPE_I2C
32};
33
34struct aux_request_transaction_data {
35 enum aux_transaction_type type;
36 enum i2caux_transaction_action action;
37 /* 20-bit AUX channel transaction address */
38 uint32_t address;
39 /* delay, in 100-microsecond units */
40 uint8_t delay;
41 uint32_t length;
42 uint8_t *data;
43};
44
45enum aux_transaction_reply {
46 AUX_TRANSACTION_REPLY_AUX_ACK = 0x00,
47 AUX_TRANSACTION_REPLY_AUX_NACK = 0x01,
48 AUX_TRANSACTION_REPLY_AUX_DEFER = 0x02,
49
50 AUX_TRANSACTION_REPLY_I2C_ACK = 0x00,
51 AUX_TRANSACTION_REPLY_I2C_NACK = 0x10,
52 AUX_TRANSACTION_REPLY_I2C_DEFER = 0x20,
53
54 AUX_TRANSACTION_REPLY_INVALID = 0xFF
55};
56
57struct aux_reply_transaction_data {
58 enum aux_transaction_reply status;
59 uint32_t length;
60 uint8_t *data;
61};
62
63enum aux_channel_operation_result {
64 AUX_CHANNEL_OPERATION_SUCCEEDED,
65 AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN,
66 AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY,
67 AUX_CHANNEL_OPERATION_FAILED_TIMEOUT
68};
69
70struct aux_engine;
71
72struct aux_engine_funcs {
73 void (*destroy)(
74 struct aux_engine **ptr);
75 bool (*acquire_engine)(
76 struct aux_engine *engine);
77 void (*configure)(
78 struct aux_engine *engine,
79 union aux_config cfg);
80 void (*submit_channel_request)(
81 struct aux_engine *engine,
82 struct aux_request_transaction_data *request);
83 void (*process_channel_reply)(
84 struct aux_engine *engine,
85 struct aux_reply_transaction_data *reply);
86 enum aux_channel_operation_result (*get_channel_status)(
87 struct aux_engine *engine,
88 uint8_t *returned_bytes);
89};
90
91struct aux_engine {
92 struct engine base;
93 const struct aux_engine_funcs *funcs;
94 /* following values are expressed in milliseconds */
95 uint32_t delay;
96 uint32_t max_defer_write_retry;
97
98 bool acquire_reset;
99};
100
101bool dal_aux_engine_construct(
102 struct aux_engine *engine,
103 struct dc_context *ctx);
104
105void dal_aux_engine_destruct(
106 struct aux_engine *engine);
107bool dal_aux_engine_submit_request(
108 struct engine *ptr,
109 struct i2caux_transaction_request *request,
110 bool middle_of_transaction);
111bool dal_aux_engine_acquire(
112 struct engine *ptr,
113 struct ddc *ddc);
114enum i2caux_engine_type dal_aux_engine_get_engine_type(
115 const struct engine *engine);
116
117#endif
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.c
new file mode 100644
index 000000000000..0712cafb4c42
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.c
@@ -0,0 +1,112 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28#include "include/i2caux_interface.h"
29#include "../i2caux.h"
30#include "../engine.h"
31#include "../i2c_engine.h"
32#include "../i2c_sw_engine.h"
33#include "../i2c_hw_engine.h"
34
35#include "../dce110/aux_engine_dce110.h"
36#include "../dce110/i2c_hw_engine_dce110.h"
37#include "../dce110/i2caux_dce110.h"
38
39#include "dce/dce_10_0_d.h"
40#include "dce/dce_10_0_sh_mask.h"
41
42/* set register offset */
43#define SR(reg_name)\
44 .reg_name = mm ## reg_name
45
46/* set register offset with instance */
47#define SRI(reg_name, block, id)\
48 .reg_name = mm ## block ## id ## _ ## reg_name
49
50#define aux_regs(id)\
51[id] = {\
52 AUX_COMMON_REG_LIST(id), \
53 .AUX_RESET_MASK = 0 \
54}
55
56#define hw_engine_regs(id)\
57{\
58 I2C_HW_ENGINE_COMMON_REG_LIST(id) \
59}
60
61static const struct dce110_aux_registers dce100_aux_regs[] = {
62 aux_regs(0),
63 aux_regs(1),
64 aux_regs(2),
65 aux_regs(3),
66 aux_regs(4),
67 aux_regs(5),
68};
69
70static const struct dce110_i2c_hw_engine_registers dce100_hw_engine_regs[] = {
71 hw_engine_regs(1),
72 hw_engine_regs(2),
73 hw_engine_regs(3),
74 hw_engine_regs(4),
75 hw_engine_regs(5),
76 hw_engine_regs(6)
77};
78
79static const struct dce110_i2c_hw_engine_shift i2c_shift = {
80 I2C_COMMON_MASK_SH_LIST_DCE100(__SHIFT)
81};
82
83static const struct dce110_i2c_hw_engine_mask i2c_mask = {
84 I2C_COMMON_MASK_SH_LIST_DCE100(_MASK)
85};
86
87struct i2caux *dal_i2caux_dce100_create(
88 struct dc_context *ctx)
89{
90 struct i2caux_dce110 *i2caux_dce110 =
91 dm_alloc(sizeof(struct i2caux_dce110));
92
93 if (!i2caux_dce110) {
94 ASSERT_CRITICAL(false);
95 return NULL;
96 }
97
98 if (dal_i2caux_dce110_construct(
99 i2caux_dce110,
100 ctx,
101 dce100_aux_regs,
102 dce100_hw_engine_regs,
103 &i2c_shift,
104 &i2c_mask))
105 return &i2caux_dce110->base;
106
107 ASSERT_CRITICAL(false);
108
109 dm_free(i2caux_dce110);
110
111 return NULL;
112}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.h b/drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.h
new file mode 100644
index 000000000000..2b508d3e0ef4
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.h
@@ -0,0 +1,32 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_I2C_AUX_DCE100_H__
27#define __DAL_I2C_AUX_DCE100_H__
28
29struct i2caux *dal_i2caux_dce100_create(
30 struct dc_context *ctx);
31
32#endif /* __DAL_I2C_AUX_DCE100_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c
new file mode 100644
index 000000000000..f49fd1ad3807
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c
@@ -0,0 +1,456 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28/*
29 * Pre-requisites: headers required by header of this unit
30 */
31#include "include/i2caux_interface.h"
32#include "../engine.h"
33#include "../aux_engine.h"
34
35/*
36 * Header of this unit
37 */
38
39#include "aux_engine_dce110.h"
40
41/*
42 * Post-requisites: headers required by this unit
43 */
44#include "dce/dce_11_0_sh_mask.h"
45
46#define CTX \
47 aux110->base.base.ctx
48#define REG(reg_name)\
49 (aux110->regs->reg_name)
50#include "reg_helper.h"
51
52/*
53 * This unit
54 */
55
56/*
57 * @brief
58 * Cast 'struct aux_engine *'
59 * to 'struct aux_engine_dce110 *'
60 */
61#define FROM_AUX_ENGINE(ptr) \
62 container_of((ptr), struct aux_engine_dce110, base)
63
64/*
65 * @brief
66 * Cast 'struct engine *'
67 * to 'struct aux_engine_dce110 *'
68 */
69#define FROM_ENGINE(ptr) \
70 FROM_AUX_ENGINE(container_of((ptr), struct aux_engine, base))
71
72static void release_engine(
73 struct engine *engine)
74{
75 struct aux_engine_dce110 *aux110 = FROM_ENGINE(engine);
76
77 REG_UPDATE(AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, 1);
78}
79
80static void destruct(
81 struct aux_engine_dce110 *engine);
82
83static void destroy(
84 struct aux_engine **aux_engine)
85{
86 struct aux_engine_dce110 *engine = FROM_AUX_ENGINE(*aux_engine);
87
88 destruct(engine);
89
90 dm_free(engine);
91
92 *aux_engine = NULL;
93}
94
95#define SW_CAN_ACCESS_AUX 1
96
97static bool acquire_engine(
98 struct aux_engine *engine)
99{
100 struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
101
102 /* enable AUX before request SW to access AUX */
103 uint32_t value = REG_READ(AUX_CONTROL);
104 uint32_t field = get_reg_field_value(value,
105 AUX_CONTROL,
106 AUX_EN);
107
108 if (field == 0) {
109 set_reg_field_value(
110 value,
111 1,
112 AUX_CONTROL,
113 AUX_EN);
114
115 if (REG(AUX_RESET_MASK)) {
116 /*DP_AUX block as part of the enable sequence*/
117 set_reg_field_value(
118 value,
119 1,
120 AUX_CONTROL,
121 AUX_RESET);
122 }
123
124 REG_WRITE(AUX_CONTROL, value);
125
126 if (REG(AUX_RESET_MASK)) {
127 /*poll HW to make sure reset it done*/
128
129 REG_WAIT(AUX_CONTROL, AUX_RESET_DONE, 1,
130 1, 11);
131
132 set_reg_field_value(
133 value,
134 0,
135 AUX_CONTROL,
136 AUX_RESET);
137
138 REG_WRITE(AUX_CONTROL, value);
139
140 REG_WAIT(AUX_CONTROL, AUX_RESET_DONE, 0,
141 1, 11);
142 }
143 } /*if (field)*/
144
145 /* request SW to access AUX */
146 REG_UPDATE(AUX_ARB_CONTROL, AUX_SW_USE_AUX_REG_REQ, 1);
147
148 value = REG_READ(AUX_ARB_CONTROL);
149 field = get_reg_field_value(
150 value,
151 AUX_ARB_CONTROL,
152 AUX_REG_RW_CNTL_STATUS);
153
154 return (field == SW_CAN_ACCESS_AUX);
155}
156
157#define COMPOSE_AUX_SW_DATA_16_20(command, address) \
158 ((command) | ((0xF0000 & (address)) >> 16))
159
160#define COMPOSE_AUX_SW_DATA_8_15(address) \
161 ((0xFF00 & (address)) >> 8)
162
163#define COMPOSE_AUX_SW_DATA_0_7(address) \
164 (0xFF & (address))
165
166static void submit_channel_request(
167 struct aux_engine *engine,
168 struct aux_request_transaction_data *request)
169{
170 struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
171 uint32_t value;
172 uint32_t length;
173
174 bool is_write =
175 ((request->type == AUX_TRANSACTION_TYPE_DP) &&
176 (request->action == I2CAUX_TRANSACTION_ACTION_DP_WRITE)) ||
177 ((request->type == AUX_TRANSACTION_TYPE_I2C) &&
178 ((request->action == I2CAUX_TRANSACTION_ACTION_I2C_WRITE) ||
179 (request->action == I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT)));
180
181 /* clear_aux_error */
182 REG_UPDATE_SEQ(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK,
183 1,
184 0);
185
186 REG_UPDATE_SEQ(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK,
187 1,
188 0);
189
190 /* force_default_calibrate */
191 REG_UPDATE_1BY1_2(AUXN_IMPCAL,
192 AUXN_IMPCAL_ENABLE, 1,
193 AUXN_IMPCAL_OVERRIDE_ENABLE, 0);
194
195 /* bug? why AUXN update EN and OVERRIDE_EN 1 by 1 while AUX P toggles OVERRIDE? */
196
197 REG_UPDATE_SEQ(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE,
198 1,
199 0);
200
201 /* set the delay and the number of bytes to write */
202
203 /* The length include
204 * the 4 bit header and the 20 bit address
205 * (that is 3 byte).
206 * If the requested length is non zero this means
207 * an addition byte specifying the length is required. */
208
209 length = request->length ? 4 : 3;
210 if (is_write)
211 length += request->length;
212
213 REG_UPDATE_2(AUX_SW_CONTROL,
214 AUX_SW_START_DELAY, request->delay,
215 AUX_SW_WR_BYTES, length);
216
217 /* program action and address and payload data (if 'is_write') */
218 value = REG_UPDATE_4(AUX_SW_DATA,
219 AUX_SW_INDEX, 0,
220 AUX_SW_DATA_RW, 0,
221 AUX_SW_AUTOINCREMENT_DISABLE, 1,
222 AUX_SW_DATA, COMPOSE_AUX_SW_DATA_16_20(request->action, request->address));
223
224 value = REG_SET_2(AUX_SW_DATA, value,
225 AUX_SW_AUTOINCREMENT_DISABLE, 0,
226 AUX_SW_DATA, COMPOSE_AUX_SW_DATA_8_15(request->address));
227
228 value = REG_SET(AUX_SW_DATA, value,
229 AUX_SW_DATA, COMPOSE_AUX_SW_DATA_0_7(request->address));
230
231 if (request->length) {
232 value = REG_SET(AUX_SW_DATA, value,
233 AUX_SW_DATA, request->length - 1);
234 }
235
236 if (is_write) {
237 /* Load the HW buffer with the Data to be sent.
238 * This is relevant for write operation.
239 * For read, the data recived data will be
240 * processed in process_channel_reply(). */
241 uint32_t i = 0;
242
243 while (i < request->length) {
244 value = REG_SET(AUX_SW_DATA, value,
245 AUX_SW_DATA, request->data[i]);
246
247 ++i;
248 }
249 }
250
251 REG_UPDATE(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, 1);
252 REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 0,
253 10, aux110->timeout_period/10);
254 REG_UPDATE(AUX_SW_CONTROL, AUX_SW_GO, 1);
255}
256
257static void process_channel_reply(
258 struct aux_engine *engine,
259 struct aux_reply_transaction_data *reply)
260{
261 struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
262
263 /* Need to do a read to get the number of bytes to process
264 * Alternatively, this information can be passed -
265 * but that causes coupling which isn't good either. */
266
267 uint32_t bytes_replied;
268 uint32_t value;
269
270 value = REG_GET(AUX_SW_STATUS,
271 AUX_SW_REPLY_BYTE_COUNT, &bytes_replied);
272
273 if (bytes_replied) {
274 uint32_t reply_result;
275
276 REG_UPDATE_1BY1_3(AUX_SW_DATA,
277 AUX_SW_INDEX, 0,
278 AUX_SW_AUTOINCREMENT_DISABLE, 1,
279 AUX_SW_DATA_RW, 1);
280
281 REG_GET(AUX_SW_DATA,
282 AUX_SW_DATA, &reply_result);
283
284 reply_result = reply_result >> 4;
285
286 switch (reply_result) {
287 case 0: /* ACK */ {
288 uint32_t i = 0;
289
290 /* first byte was already used
291 * to get the command status */
292 --bytes_replied;
293
294 while (i < bytes_replied) {
295 uint32_t aux_sw_data_val;
296
297 REG_GET(AUX_SW_DATA,
298 AUX_SW_DATA, &aux_sw_data_val);
299
300 reply->data[i] = aux_sw_data_val;
301 ++i;
302 }
303
304 reply->status = AUX_TRANSACTION_REPLY_AUX_ACK;
305 }
306 break;
307 case 1: /* NACK */
308 reply->status = AUX_TRANSACTION_REPLY_AUX_NACK;
309 break;
310 case 2: /* DEFER */
311 reply->status = AUX_TRANSACTION_REPLY_AUX_DEFER;
312 break;
313 case 4: /* AUX ACK / I2C NACK */
314 reply->status = AUX_TRANSACTION_REPLY_I2C_NACK;
315 break;
316 case 8: /* AUX ACK / I2C DEFER */
317 reply->status = AUX_TRANSACTION_REPLY_I2C_DEFER;
318 break;
319 default:
320 reply->status = AUX_TRANSACTION_REPLY_INVALID;
321 }
322 } else {
323 /* Need to handle an error case...
324 * hopefully, upper layer function won't call this function
325 * if the number of bytes in the reply was 0
326 * because there was surely an error that was asserted
327 * that should have been handled
328 * for hot plug case, this could happens*/
329 if (!(value & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK))
330 ASSERT_CRITICAL(false);
331 }
332}
333
334static enum aux_channel_operation_result get_channel_status(
335 struct aux_engine *engine,
336 uint8_t *returned_bytes)
337{
338 struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
339
340 uint32_t value;
341
342 if (returned_bytes == NULL) {
343 /*caller pass NULL pointer*/
344 ASSERT_CRITICAL(false);
345 return AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN;
346 }
347 *returned_bytes = 0;
348
349 /* poll to make sure that SW_DONE is asserted */
350 value = REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 1,
351 10, aux110->timeout_period/10);
352
353 /* Note that the following bits are set in 'status.bits'
354 * during CTS 4.2.1.2:
355 * AUX_SW_RX_MIN_COUNT_VIOL, AUX_SW_RX_INVALID_STOP,
356 * AUX_SW_RX_RECV_NO_DET, AUX_SW_RX_RECV_INVALID_H.
357 *
358 * AUX_SW_RX_MIN_COUNT_VIOL is an internal,
359 * HW debugging bit and should be ignored. */
360 if (value & AUX_SW_STATUS__AUX_SW_DONE_MASK) {
361 if ((value & AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE_MASK) ||
362 (value & AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_MASK))
363 return AUX_CHANNEL_OPERATION_FAILED_TIMEOUT;
364
365 else if ((value & AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP_MASK) ||
366 (value & AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET_MASK) ||
367 (value &
368 AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H_MASK) ||
369 (value & AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L_MASK))
370 return AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY;
371
372 *returned_bytes = get_reg_field_value(value,
373 AUX_SW_STATUS,
374 AUX_SW_REPLY_BYTE_COUNT);
375
376 if (*returned_bytes == 0)
377 return
378 AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY;
379 else {
380 *returned_bytes -= 1;
381 return AUX_CHANNEL_OPERATION_SUCCEEDED;
382 }
383 } else {
384 /*time_elapsed >= aux_engine->timeout_period */
385 if (!(value & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK))
386 ASSERT_CRITICAL(false);
387
388 return AUX_CHANNEL_OPERATION_FAILED_TIMEOUT;
389 }
390}
391
392static const struct aux_engine_funcs aux_engine_funcs = {
393 .destroy = destroy,
394 .acquire_engine = acquire_engine,
395 .submit_channel_request = submit_channel_request,
396 .process_channel_reply = process_channel_reply,
397 .get_channel_status = get_channel_status,
398};
399
400static const struct engine_funcs engine_funcs = {
401 .release_engine = release_engine,
402 .submit_request = dal_aux_engine_submit_request,
403 .get_engine_type = dal_aux_engine_get_engine_type,
404 .acquire = dal_aux_engine_acquire,
405};
406
407static bool construct(
408 struct aux_engine_dce110 *engine,
409 const struct aux_engine_dce110_init_data *aux_init_data)
410{
411 if (!dal_aux_engine_construct(
412 &engine->base, aux_init_data->ctx)) {
413 ASSERT_CRITICAL(false);
414 return false;
415 }
416 engine->base.base.funcs = &engine_funcs;
417 engine->base.funcs = &aux_engine_funcs;
418
419 engine->timeout_period = aux_init_data->timeout_period;
420 engine->regs = aux_init_data->regs;
421
422 return true;
423}
424
425static void destruct(
426 struct aux_engine_dce110 *engine)
427{
428 dal_aux_engine_destruct(&engine->base);
429}
430
431struct aux_engine *dal_aux_engine_dce110_create(
432 const struct aux_engine_dce110_init_data *aux_init_data)
433{
434 struct aux_engine_dce110 *engine;
435
436 if (!aux_init_data) {
437 ASSERT_CRITICAL(false);
438 return NULL;
439 }
440
441 engine = dm_alloc(sizeof(*engine));
442
443 if (!engine) {
444 ASSERT_CRITICAL(false);
445 return NULL;
446 }
447
448 if (construct(engine, aux_init_data))
449 return &engine->base;
450
451 ASSERT_CRITICAL(false);
452
453 dm_free(engine);
454
455 return NULL;
456}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.h b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.h
new file mode 100644
index 000000000000..85ee82162590
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.h
@@ -0,0 +1,78 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_AUX_ENGINE_DCE110_H__
27#define __DAL_AUX_ENGINE_DCE110_H__
28
29#include "../aux_engine.h"
30
31#define AUX_COMMON_REG_LIST(id)\
32 SRI(AUX_CONTROL, DP_AUX, id), \
33 SRI(AUX_ARB_CONTROL, DP_AUX, id), \
34 SRI(AUX_SW_DATA, DP_AUX, id), \
35 SRI(AUX_SW_CONTROL, DP_AUX, id), \
36 SRI(AUX_INTERRUPT_CONTROL, DP_AUX, id), \
37 SRI(AUX_SW_STATUS, DP_AUX, id), \
38 SR(AUXN_IMPCAL), \
39 SR(AUXP_IMPCAL)
40
41struct dce110_aux_registers {
42 uint32_t AUX_CONTROL;
43 uint32_t AUX_ARB_CONTROL;
44 uint32_t AUX_SW_DATA;
45 uint32_t AUX_SW_CONTROL;
46 uint32_t AUX_INTERRUPT_CONTROL;
47 uint32_t AUX_SW_STATUS;
48 uint32_t AUXN_IMPCAL;
49 uint32_t AUXP_IMPCAL;
50
51 uint32_t AUX_RESET_MASK;
52};
53
54struct aux_engine_dce110 {
55 struct aux_engine base;
56 const struct dce110_aux_registers *regs;
57 struct {
58 uint32_t aux_control;
59 uint32_t aux_arb_control;
60 uint32_t aux_sw_data;
61 uint32_t aux_sw_control;
62 uint32_t aux_interrupt_control;
63 uint32_t aux_sw_status;
64 } addr;
65 uint32_t timeout_period;
66};
67
68struct aux_engine_dce110_init_data {
69 uint32_t engine_id;
70 uint32_t timeout_period;
71 struct dc_context *ctx;
72 const struct dce110_aux_registers *regs;
73};
74
75struct aux_engine *dal_aux_engine_dce110_create(
76 const struct aux_engine_dce110_init_data *aux_init_data);
77
78#endif
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c
new file mode 100644
index 000000000000..80d06ad78e07
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c
@@ -0,0 +1,577 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27#include "include/logger_interface.h"
28/*
29 * Pre-requisites: headers required by header of this unit
30 */
31
32#include "include/i2caux_interface.h"
33#include "../engine.h"
34#include "../i2c_engine.h"
35#include "../i2c_hw_engine.h"
36#include "../i2c_generic_hw_engine.h"
37/*
38 * Header of this unit
39 */
40
41#include "i2c_hw_engine_dce110.h"
42
43/*
44 * Post-requisites: headers required by this unit
45 */
46#include "reg_helper.h"
47
48/*
49 * This unit
50 */
51
52enum dc_i2c_status {
53 DC_I2C_STATUS__DC_I2C_STATUS_IDLE,
54 DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW,
55 DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_HW
56};
57
58enum dc_i2c_arbitration {
59 DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_NORMAL,
60 DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_HIGH
61};
62
63enum {
64 /* No timeout in HW
65 * (timeout implemented in SW by querying status) */
66 I2C_SETUP_TIME_LIMIT = 255,
67 I2C_HW_BUFFER_SIZE = 538
68};
69
70/*
71 * @brief
72 * Cast pointer to 'struct i2c_hw_engine *'
73 * to pointer 'struct i2c_hw_engine_dce110 *'
74 */
75#define FROM_I2C_HW_ENGINE(ptr) \
76 container_of((ptr), struct i2c_hw_engine_dce110, base)
77/*
78 * @brief
79 * Cast pointer to 'struct i2c_engine *'
80 * to pointer to 'struct i2c_hw_engine_dce110 *'
81 */
82#define FROM_I2C_ENGINE(ptr) \
83 FROM_I2C_HW_ENGINE(container_of((ptr), struct i2c_hw_engine, base))
84
85/*
86 * @brief
87 * Cast pointer to 'struct engine *'
88 * to 'pointer to struct i2c_hw_engine_dce110 *'
89 */
90#define FROM_ENGINE(ptr) \
91 FROM_I2C_ENGINE(container_of((ptr), struct i2c_engine, base))
92
93#define CTX \
94 hw_engine->base.base.base.ctx
95
96#define REG(reg_name)\
97 (hw_engine->regs->reg_name)
98
99#undef FN
100#define FN(reg_name, field_name) \
101 hw_engine->i2c_shift->field_name, hw_engine->i2c_mask->field_name
102
103#include "reg_helper.h"
104
105static void disable_i2c_hw_engine(
106 struct i2c_hw_engine_dce110 *hw_engine)
107{
108 REG_UPDATE_N(SETUP, 1, FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE), 0);
109}
110
111static void release_engine(
112 struct engine *engine)
113{
114 struct i2c_hw_engine_dce110 *hw_engine = FROM_ENGINE(engine);
115
116 struct i2c_engine *base = NULL;
117 bool safe_to_reset;
118
119 base = &hw_engine->base.base;
120
121 /* Restore original HW engine speed */
122
123 base->funcs->set_speed(base, hw_engine->base.original_speed);
124
125 /* Release I2C */
126 REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_DONE_USING_I2C_REG, 1);
127
128 /* Reset HW engine */
129 {
130 uint32_t i2c_sw_status = 0;
131 REG_GET(DC_I2C_SW_STATUS, DC_I2C_SW_STATUS, &i2c_sw_status);
132 /* if used by SW, safe to reset */
133 safe_to_reset = (i2c_sw_status == 1);
134 }
135
136 if (safe_to_reset)
137 REG_UPDATE_2(
138 DC_I2C_CONTROL,
139 DC_I2C_SOFT_RESET, 1,
140 DC_I2C_SW_STATUS_RESET, 1);
141 else
142 REG_UPDATE(DC_I2C_CONTROL, DC_I2C_SW_STATUS_RESET, 1);
143
144 /* HW I2c engine - clock gating feature */
145 if (!hw_engine->engine_keep_power_up_count)
146 disable_i2c_hw_engine(hw_engine);
147}
148
149static bool setup_engine(
150 struct i2c_engine *i2c_engine)
151{
152 struct i2c_hw_engine_dce110 *hw_engine = FROM_I2C_ENGINE(i2c_engine);
153
154 /* Program pin select */
155 REG_UPDATE_6(
156 DC_I2C_CONTROL,
157 DC_I2C_GO, 0,
158 DC_I2C_SOFT_RESET, 0,
159 DC_I2C_SEND_RESET, 0,
160 DC_I2C_SW_STATUS_RESET, 1,
161 DC_I2C_TRANSACTION_COUNT, 0,
162 DC_I2C_DDC_SELECT, hw_engine->engine_id);
163
164 /* Program time limit */
165 REG_UPDATE_N(
166 SETUP, 2,
167 FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_TIME_LIMIT), I2C_SETUP_TIME_LIMIT,
168 FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE), 1);
169
170 /* Program HW priority
171 * set to High - interrupt software I2C at any time
172 * Enable restart of SW I2C that was interrupted by HW
173 * disable queuing of software while I2C is in use by HW */
174 REG_UPDATE_2(
175 DC_I2C_ARBITRATION,
176 DC_I2C_NO_QUEUED_SW_GO, 0,
177 DC_I2C_SW_PRIORITY, DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_NORMAL);
178
179 return true;
180}
181
182static uint32_t get_speed(
183 const struct i2c_engine *i2c_engine)
184{
185 const struct i2c_hw_engine_dce110 *hw_engine = FROM_I2C_ENGINE(i2c_engine);
186 uint32_t pre_scale = 0;
187
188 REG_GET(SPEED, DC_I2C_DDC1_PRESCALE, &pre_scale);
189
190 /* [anaumov] it seems following is unnecessary */
191 /*ASSERT(value.bits.DC_I2C_DDC1_PRESCALE);*/
192 return pre_scale ?
193 hw_engine->reference_frequency / pre_scale :
194 hw_engine->base.default_speed;
195}
196
197static void set_speed(
198 struct i2c_engine *i2c_engine,
199 uint32_t speed)
200{
201 struct i2c_hw_engine_dce110 *hw_engine = FROM_I2C_ENGINE(i2c_engine);
202
203 if (speed) {
204 if (hw_engine->i2c_mask->DC_I2C_DDC1_START_STOP_TIMING_CNTL)
205 REG_UPDATE_N(
206 SPEED, 3,
207 FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), hw_engine->reference_frequency / speed,
208 FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2,
209 FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_START_STOP_TIMING_CNTL), speed > 50 ? 2:1);
210 else
211 REG_UPDATE_N(
212 SPEED, 2,
213 FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), hw_engine->reference_frequency / speed,
214 FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2);
215 }
216}
217
218static inline void reset_hw_engine(struct engine *engine)
219{
220 struct i2c_hw_engine_dce110 *hw_engine = FROM_ENGINE(engine);
221
222 REG_UPDATE_2(
223 DC_I2C_CONTROL,
224 DC_I2C_SW_STATUS_RESET, 1,
225 DC_I2C_SW_STATUS_RESET, 1);
226}
227
228static bool is_hw_busy(struct engine *engine)
229{
230 struct i2c_hw_engine_dce110 *hw_engine = FROM_ENGINE(engine);
231 uint32_t i2c_sw_status = 0;
232
233 REG_GET(DC_I2C_SW_STATUS, DC_I2C_SW_STATUS, &i2c_sw_status);
234 if (i2c_sw_status == DC_I2C_STATUS__DC_I2C_STATUS_IDLE)
235 return false;
236
237 reset_hw_engine(engine);
238
239 REG_GET(DC_I2C_SW_STATUS, DC_I2C_SW_STATUS, &i2c_sw_status);
240 return i2c_sw_status != DC_I2C_STATUS__DC_I2C_STATUS_IDLE;
241}
242
243
244#define STOP_TRANS_PREDICAT \
245 ((hw_engine->transaction_count == 3) || \
246 (request->action == I2CAUX_TRANSACTION_ACTION_I2C_WRITE) || \
247 (request->action & I2CAUX_TRANSACTION_ACTION_I2C_READ))
248
249#define SET_I2C_TRANSACTION(id) \
250 do { \
251 REG_UPDATE_N(DC_I2C_TRANSACTION##id, 5, \
252 FN(DC_I2C_TRANSACTION0, DC_I2C_STOP_ON_NACK0), 1, \
253 FN(DC_I2C_TRANSACTION0, DC_I2C_START0), 1, \
254 FN(DC_I2C_TRANSACTION0, DC_I2C_STOP0), STOP_TRANS_PREDICAT ? 1:0, \
255 FN(DC_I2C_TRANSACTION0, DC_I2C_RW0), (0 != (request->action & I2CAUX_TRANSACTION_ACTION_I2C_READ)), \
256 FN(DC_I2C_TRANSACTION0, DC_I2C_COUNT0), length); \
257 if (STOP_TRANS_PREDICAT) \
258 last_transaction = true; \
259 } while (false)
260
261
262static bool process_transaction(
263 struct i2c_hw_engine_dce110 *hw_engine,
264 struct i2c_request_transaction_data *request)
265{
266 uint32_t length = request->length;
267 uint8_t *buffer = request->data;
268 uint32_t value = 0;
269
270 bool last_transaction = false;
271
272 struct dc_context *ctx = NULL;
273
274 ctx = hw_engine->base.base.base.ctx;
275
276
277
278 switch (hw_engine->transaction_count) {
279 case 0:
280 SET_I2C_TRANSACTION(0);
281 break;
282 case 1:
283 SET_I2C_TRANSACTION(1);
284 break;
285 case 2:
286 SET_I2C_TRANSACTION(2);
287 break;
288 case 3:
289 SET_I2C_TRANSACTION(3);
290 break;
291 default:
292 /* TODO Warning ? */
293 break;
294 }
295
296
297 /* Write the I2C address and I2C data
298 * into the hardware circular buffer, one byte per entry.
299 * As an example, the 7-bit I2C slave address for CRT monitor
300 * for reading DDC/EDID information is 0b1010001.
301 * For an I2C send operation, the LSB must be programmed to 0;
302 * for I2C receive operation, the LSB must be programmed to 1. */
303 if (hw_engine->transaction_count == 0) {
304 value = REG_SET_4(DC_I2C_DATA, 0,
305 DC_I2C_DATA_RW, false,
306 DC_I2C_DATA, request->address,
307 DC_I2C_INDEX, 0,
308 DC_I2C_INDEX_WRITE, 1);
309 hw_engine->buffer_used_write = 0;
310 } else
311 value = REG_SET_2(DC_I2C_DATA, 0,
312 DC_I2C_DATA_RW, false,
313 DC_I2C_DATA, request->address);
314
315 hw_engine->buffer_used_write++;
316
317 if (!(request->action & I2CAUX_TRANSACTION_ACTION_I2C_READ)) {
318 while (length) {
319 REG_SET_2(DC_I2C_DATA, value,
320 DC_I2C_INDEX_WRITE, 0,
321 DC_I2C_DATA, *buffer++);
322 hw_engine->buffer_used_write++;
323 --length;
324 }
325 }
326
327 ++hw_engine->transaction_count;
328 hw_engine->buffer_used_bytes += length + 1;
329
330 return last_transaction;
331}
332
333static void execute_transaction(
334 struct i2c_hw_engine_dce110 *hw_engine)
335{
336 REG_UPDATE_N(SETUP, 5,
337 FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_DATA_DRIVE_EN), 0,
338 FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_CLK_DRIVE_EN), 0,
339 FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_DATA_DRIVE_SEL), 0,
340 FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_INTRA_TRANSACTION_DELAY), 0,
341 FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_INTRA_BYTE_DELAY), 0);
342
343
344 REG_UPDATE_5(DC_I2C_CONTROL,
345 DC_I2C_SOFT_RESET, 0,
346 DC_I2C_SW_STATUS_RESET, 0,
347 DC_I2C_SEND_RESET, 0,
348 DC_I2C_GO, 0,
349 DC_I2C_TRANSACTION_COUNT, hw_engine->transaction_count - 1);
350
351 /* start I2C transfer */
352 REG_UPDATE(DC_I2C_CONTROL, DC_I2C_GO, 1);
353
354 /* all transactions were executed and HW buffer became empty
355 * (even though it actually happens when status becomes DONE) */
356 hw_engine->transaction_count = 0;
357 hw_engine->buffer_used_bytes = 0;
358}
359
360static void submit_channel_request(
361 struct i2c_engine *engine,
362 struct i2c_request_transaction_data *request)
363{
364 request->status = I2C_CHANNEL_OPERATION_SUCCEEDED;
365
366 if (!process_transaction(FROM_I2C_ENGINE(engine), request))
367 return;
368
369 if (is_hw_busy(&engine->base)) {
370 request->status = I2C_CHANNEL_OPERATION_ENGINE_BUSY;
371 return;
372 }
373
374 execute_transaction(FROM_I2C_ENGINE(engine));
375}
376
377static void process_channel_reply(
378 struct i2c_engine *engine,
379 struct i2c_reply_transaction_data *reply)
380{
381 uint32_t length = reply->length;
382 uint8_t *buffer = reply->data;
383
384 struct i2c_hw_engine_dce110 *hw_engine =
385 FROM_I2C_ENGINE(engine);
386
387
388 REG_SET_3(DC_I2C_DATA, 0,
389 DC_I2C_INDEX, hw_engine->buffer_used_write,
390 DC_I2C_DATA_RW, 1,
391 DC_I2C_INDEX_WRITE, 1);
392
393 while (length) {
394 /* after reading the status,
395 * if the I2C operation executed successfully
396 * (i.e. DC_I2C_STATUS_DONE = 1) then the I2C controller
397 * should read data bytes from I2C circular data buffer */
398
399 uint32_t i2c_data;
400
401 REG_GET(DC_I2C_DATA, DC_I2C_DATA, &i2c_data);
402 *buffer++ = i2c_data;
403
404 --length;
405 }
406}
407
408static enum i2c_channel_operation_result get_channel_status(
409 struct i2c_engine *i2c_engine,
410 uint8_t *returned_bytes)
411{
412 uint32_t i2c_sw_status = 0;
413 struct i2c_hw_engine_dce110 *hw_engine = FROM_I2C_ENGINE(i2c_engine);
414 uint32_t value =
415 REG_GET(DC_I2C_SW_STATUS, DC_I2C_SW_STATUS, &i2c_sw_status);
416
417 if (i2c_sw_status == DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW)
418 return I2C_CHANNEL_OPERATION_ENGINE_BUSY;
419 else if (value & hw_engine->i2c_mask->DC_I2C_SW_STOPPED_ON_NACK)
420 return I2C_CHANNEL_OPERATION_NO_RESPONSE;
421 else if (value & hw_engine->i2c_mask->DC_I2C_SW_TIMEOUT)
422 return I2C_CHANNEL_OPERATION_TIMEOUT;
423 else if (value & hw_engine->i2c_mask->DC_I2C_SW_ABORTED)
424 return I2C_CHANNEL_OPERATION_FAILED;
425 else if (value & hw_engine->i2c_mask->DC_I2C_SW_DONE)
426 return I2C_CHANNEL_OPERATION_SUCCEEDED;
427
428 /*
429 * this is the case when HW used for communication, I2C_SW_STATUS
430 * could be zero
431 */
432 return I2C_CHANNEL_OPERATION_SUCCEEDED;
433}
434
435static uint32_t get_hw_buffer_available_size(
436 const struct i2c_hw_engine *engine)
437{
438 return I2C_HW_BUFFER_SIZE -
439 FROM_I2C_HW_ENGINE(engine)->buffer_used_bytes;
440}
441
442static uint32_t get_transaction_timeout(
443 const struct i2c_hw_engine *engine,
444 uint32_t length)
445{
446 uint32_t speed = engine->base.funcs->get_speed(&engine->base);
447
448 uint32_t period_timeout;
449 uint32_t num_of_clock_stretches;
450
451 if (!speed)
452 return 0;
453
454 period_timeout = (1000 * TRANSACTION_TIMEOUT_IN_I2C_CLOCKS) / speed;
455
456 num_of_clock_stretches = 1 + (length << 3) + 1;
457 num_of_clock_stretches +=
458 (FROM_I2C_HW_ENGINE(engine)->buffer_used_bytes << 3) +
459 (FROM_I2C_HW_ENGINE(engine)->transaction_count << 1);
460
461 return period_timeout * num_of_clock_stretches;
462}
463
464static void destroy(
465 struct i2c_engine **i2c_engine)
466{
467 struct i2c_hw_engine_dce110 *engine_dce110 =
468 FROM_I2C_ENGINE(*i2c_engine);
469
470 dal_i2c_hw_engine_destruct(&engine_dce110->base);
471
472 dm_free(engine_dce110);
473
474 *i2c_engine = NULL;
475}
476
477static const struct i2c_engine_funcs i2c_engine_funcs = {
478 .destroy = destroy,
479 .get_speed = get_speed,
480 .set_speed = set_speed,
481 .setup_engine = setup_engine,
482 .submit_channel_request = submit_channel_request,
483 .process_channel_reply = process_channel_reply,
484 .get_channel_status = get_channel_status,
485 .acquire_engine = dal_i2c_hw_engine_acquire_engine,
486};
487
488static const struct engine_funcs engine_funcs = {
489 .release_engine = release_engine,
490 .get_engine_type = dal_i2c_hw_engine_get_engine_type,
491 .acquire = dal_i2c_engine_acquire,
492 .submit_request = dal_i2c_hw_engine_submit_request,
493};
494
495static const struct i2c_hw_engine_funcs i2c_hw_engine_funcs = {
496 .get_hw_buffer_available_size = get_hw_buffer_available_size,
497 .get_transaction_timeout = get_transaction_timeout,
498 .wait_on_operation_result = dal_i2c_hw_engine_wait_on_operation_result,
499};
500
501bool i2c_hw_engine_dce110_construct(
502 struct i2c_hw_engine_dce110 *hw_engine,
503 const struct i2c_hw_engine_dce110_create_arg *arg)
504{
505 uint32_t xtal_ref_div = 0;
506
507 if (!arg->reference_frequency)
508 return false;
509
510 if (!dal_i2c_hw_engine_construct(&hw_engine->base, arg->ctx))
511 return false;
512
513 hw_engine->base.base.base.funcs = &engine_funcs;
514 hw_engine->base.base.funcs = &i2c_engine_funcs;
515 hw_engine->base.funcs = &i2c_hw_engine_funcs;
516 hw_engine->base.default_speed = arg->default_speed;
517
518 hw_engine->regs = arg->regs;
519 hw_engine->i2c_shift = arg->i2c_shift;
520 hw_engine->i2c_mask = arg->i2c_mask;
521
522 hw_engine->engine_id = arg->engine_id;
523
524 hw_engine->buffer_used_bytes = 0;
525 hw_engine->transaction_count = 0;
526 hw_engine->engine_keep_power_up_count = 1;
527
528
529 REG_GET(MICROSECOND_TIME_BASE_DIV, XTAL_REF_DIV, &xtal_ref_div);
530
531 if (xtal_ref_div == 0) {
532 dm_logger_write(
533 hw_engine->base.base.base.ctx->logger, LOG_WARNING,
534 "Invalid base timer divider\n",
535 __func__);
536 xtal_ref_div = 2;
537 }
538
539 /*Calculating Reference Clock by divding original frequency by
540 * XTAL_REF_DIV.
541 * At upper level, uint32_t reference_frequency =
542 * dal_i2caux_get_reference_clock(as) >> 1
543 * which already divided by 2. So we need x2 to get original
544 * reference clock from ppll_info
545 */
546 hw_engine->reference_frequency =
547 (arg->reference_frequency * 2) / xtal_ref_div;
548
549 return true;
550}
551
552struct i2c_engine *dal_i2c_hw_engine_dce110_create(
553 const struct i2c_hw_engine_dce110_create_arg *arg)
554{
555 struct i2c_hw_engine_dce110 *engine_dce10;
556
557 if (!arg) {
558 ASSERT_CRITICAL(false);
559 return NULL;
560 }
561
562 engine_dce10 = dm_alloc(sizeof(struct i2c_hw_engine_dce110));
563
564 if (!engine_dce10) {
565 ASSERT_CRITICAL(false);
566 return NULL;
567 }
568
569 if (i2c_hw_engine_dce110_construct(engine_dce10, arg))
570 return &engine_dce10->base.base;
571
572 ASSERT_CRITICAL(false);
573
574 dm_free(engine_dce10);
575
576 return NULL;
577}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.h b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.h
new file mode 100644
index 000000000000..c573c6459ec6
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.h
@@ -0,0 +1,214 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_I2C_HW_ENGINE_DCE110_H__
27#define __DAL_I2C_HW_ENGINE_DCE110_H__
28
29#define I2C_HW_ENGINE_COMMON_REG_LIST(id)\
30 SRI(SETUP, DC_I2C_DDC, id),\
31 SRI(SPEED, DC_I2C_DDC, id),\
32 SR(DC_I2C_ARBITRATION),\
33 SR(DC_I2C_CONTROL),\
34 SR(DC_I2C_SW_STATUS),\
35 SR(DC_I2C_TRANSACTION0),\
36 SR(DC_I2C_TRANSACTION1),\
37 SR(DC_I2C_TRANSACTION2),\
38 SR(DC_I2C_TRANSACTION3),\
39 SR(DC_I2C_DATA),\
40 SR(MICROSECOND_TIME_BASE_DIV)
41
42#define I2C_SF(reg_name, field_name, post_fix)\
43 .field_name = reg_name ## __ ## field_name ## post_fix
44
45#define I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh)\
46 I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE, mask_sh),\
47 I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_TIME_LIMIT, mask_sh),\
48 I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_DATA_DRIVE_EN, mask_sh),\
49 I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_CLK_DRIVE_EN, mask_sh),\
50 I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_DATA_DRIVE_SEL, mask_sh),\
51 I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_INTRA_TRANSACTION_DELAY, mask_sh),\
52 I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_INTRA_BYTE_DELAY, mask_sh),\
53 I2C_SF(DC_I2C_ARBITRATION, DC_I2C_SW_DONE_USING_I2C_REG, mask_sh),\
54 I2C_SF(DC_I2C_ARBITRATION, DC_I2C_NO_QUEUED_SW_GO, mask_sh),\
55 I2C_SF(DC_I2C_ARBITRATION, DC_I2C_SW_PRIORITY, mask_sh),\
56 I2C_SF(DC_I2C_CONTROL, DC_I2C_SOFT_RESET, mask_sh),\
57 I2C_SF(DC_I2C_CONTROL, DC_I2C_SW_STATUS_RESET, mask_sh),\
58 I2C_SF(DC_I2C_CONTROL, DC_I2C_GO, mask_sh),\
59 I2C_SF(DC_I2C_CONTROL, DC_I2C_SEND_RESET, mask_sh),\
60 I2C_SF(DC_I2C_CONTROL, DC_I2C_TRANSACTION_COUNT, mask_sh),\
61 I2C_SF(DC_I2C_CONTROL, DC_I2C_DDC_SELECT, mask_sh),\
62 I2C_SF(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE, mask_sh),\
63 I2C_SF(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD, mask_sh),\
64 I2C_SF(DC_I2C_SW_STATUS, DC_I2C_SW_STOPPED_ON_NACK, mask_sh),\
65 I2C_SF(DC_I2C_SW_STATUS, DC_I2C_SW_TIMEOUT, mask_sh),\
66 I2C_SF(DC_I2C_SW_STATUS, DC_I2C_SW_ABORTED, mask_sh),\
67 I2C_SF(DC_I2C_SW_STATUS, DC_I2C_SW_DONE, mask_sh),\
68 I2C_SF(DC_I2C_SW_STATUS, DC_I2C_SW_STATUS, mask_sh),\
69 I2C_SF(DC_I2C_TRANSACTION0, DC_I2C_STOP_ON_NACK0, mask_sh),\
70 I2C_SF(DC_I2C_TRANSACTION0, DC_I2C_START0, mask_sh),\
71 I2C_SF(DC_I2C_TRANSACTION0, DC_I2C_RW0, mask_sh),\
72 I2C_SF(DC_I2C_TRANSACTION0, DC_I2C_STOP0, mask_sh),\
73 I2C_SF(DC_I2C_TRANSACTION0, DC_I2C_COUNT0, mask_sh),\
74 I2C_SF(DC_I2C_DATA, DC_I2C_DATA_RW, mask_sh),\
75 I2C_SF(DC_I2C_DATA, DC_I2C_DATA, mask_sh),\
76 I2C_SF(DC_I2C_DATA, DC_I2C_INDEX, mask_sh),\
77 I2C_SF(DC_I2C_DATA, DC_I2C_INDEX_WRITE, mask_sh),\
78 I2C_SF(MICROSECOND_TIME_BASE_DIV, XTAL_REF_DIV, mask_sh)
79
80#define I2C_COMMON_MASK_SH_LIST_DCE100(mask_sh)\
81 I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh)
82
83#define I2C_COMMON_MASK_SH_LIST_DCE110(mask_sh)\
84 I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh),\
85 I2C_SF(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_START_STOP_TIMING_CNTL, mask_sh)
86
87struct dce110_i2c_hw_engine_shift {
88 uint8_t DC_I2C_DDC1_ENABLE;
89 uint8_t DC_I2C_DDC1_TIME_LIMIT;
90 uint8_t DC_I2C_DDC1_DATA_DRIVE_EN;
91 uint8_t DC_I2C_DDC1_CLK_DRIVE_EN;
92 uint8_t DC_I2C_DDC1_DATA_DRIVE_SEL;
93 uint8_t DC_I2C_DDC1_INTRA_TRANSACTION_DELAY;
94 uint8_t DC_I2C_DDC1_INTRA_BYTE_DELAY;
95 uint8_t DC_I2C_SW_DONE_USING_I2C_REG;
96 uint8_t DC_I2C_NO_QUEUED_SW_GO;
97 uint8_t DC_I2C_SW_PRIORITY;
98 uint8_t DC_I2C_SOFT_RESET;
99 uint8_t DC_I2C_SW_STATUS_RESET;
100 uint8_t DC_I2C_GO;
101 uint8_t DC_I2C_SEND_RESET;
102 uint8_t DC_I2C_TRANSACTION_COUNT;
103 uint8_t DC_I2C_DDC_SELECT;
104 uint8_t DC_I2C_DDC1_PRESCALE;
105 uint8_t DC_I2C_DDC1_THRESHOLD;
106 uint8_t DC_I2C_DDC1_START_STOP_TIMING_CNTL;
107 uint8_t DC_I2C_SW_STOPPED_ON_NACK;
108 uint8_t DC_I2C_SW_TIMEOUT;
109 uint8_t DC_I2C_SW_ABORTED;
110 uint8_t DC_I2C_SW_DONE;
111 uint8_t DC_I2C_SW_STATUS;
112 uint8_t DC_I2C_STOP_ON_NACK0;
113 uint8_t DC_I2C_START0;
114 uint8_t DC_I2C_RW0;
115 uint8_t DC_I2C_STOP0;
116 uint8_t DC_I2C_COUNT0;
117 uint8_t DC_I2C_DATA_RW;
118 uint8_t DC_I2C_DATA;
119 uint8_t DC_I2C_INDEX;
120 uint8_t DC_I2C_INDEX_WRITE;
121 uint8_t XTAL_REF_DIV;
122};
123
124struct dce110_i2c_hw_engine_mask {
125 uint32_t DC_I2C_DDC1_ENABLE;
126 uint32_t DC_I2C_DDC1_TIME_LIMIT;
127 uint32_t DC_I2C_DDC1_DATA_DRIVE_EN;
128 uint32_t DC_I2C_DDC1_CLK_DRIVE_EN;
129 uint32_t DC_I2C_DDC1_DATA_DRIVE_SEL;
130 uint32_t DC_I2C_DDC1_INTRA_TRANSACTION_DELAY;
131 uint32_t DC_I2C_DDC1_INTRA_BYTE_DELAY;
132 uint32_t DC_I2C_SW_DONE_USING_I2C_REG;
133 uint32_t DC_I2C_NO_QUEUED_SW_GO;
134 uint32_t DC_I2C_SW_PRIORITY;
135 uint32_t DC_I2C_SOFT_RESET;
136 uint32_t DC_I2C_SW_STATUS_RESET;
137 uint32_t DC_I2C_GO;
138 uint32_t DC_I2C_SEND_RESET;
139 uint32_t DC_I2C_TRANSACTION_COUNT;
140 uint32_t DC_I2C_DDC_SELECT;
141 uint32_t DC_I2C_DDC1_PRESCALE;
142 uint32_t DC_I2C_DDC1_THRESHOLD;
143 uint32_t DC_I2C_DDC1_START_STOP_TIMING_CNTL;
144 uint32_t DC_I2C_SW_STOPPED_ON_NACK;
145 uint32_t DC_I2C_SW_TIMEOUT;
146 uint32_t DC_I2C_SW_ABORTED;
147 uint32_t DC_I2C_SW_DONE;
148 uint32_t DC_I2C_SW_STATUS;
149 uint32_t DC_I2C_STOP_ON_NACK0;
150 uint32_t DC_I2C_START0;
151 uint32_t DC_I2C_RW0;
152 uint32_t DC_I2C_STOP0;
153 uint32_t DC_I2C_COUNT0;
154 uint32_t DC_I2C_DATA_RW;
155 uint32_t DC_I2C_DATA;
156 uint32_t DC_I2C_INDEX;
157 uint32_t DC_I2C_INDEX_WRITE;
158 uint32_t XTAL_REF_DIV;
159};
160
161struct dce110_i2c_hw_engine_registers {
162 uint32_t SETUP;
163 uint32_t SPEED;
164 uint32_t DC_I2C_ARBITRATION;
165 uint32_t DC_I2C_CONTROL;
166 uint32_t DC_I2C_SW_STATUS;
167 uint32_t DC_I2C_TRANSACTION0;
168 uint32_t DC_I2C_TRANSACTION1;
169 uint32_t DC_I2C_TRANSACTION2;
170 uint32_t DC_I2C_TRANSACTION3;
171 uint32_t DC_I2C_DATA;
172 uint32_t MICROSECOND_TIME_BASE_DIV;
173};
174
175struct i2c_hw_engine_dce110 {
176 struct i2c_hw_engine base;
177 const struct dce110_i2c_hw_engine_registers *regs;
178 const struct dce110_i2c_hw_engine_shift *i2c_shift;
179 const struct dce110_i2c_hw_engine_mask *i2c_mask;
180 struct {
181 uint32_t DC_I2C_DDCX_SETUP;
182 uint32_t DC_I2C_DDCX_SPEED;
183 } addr;
184 uint32_t engine_id;
185 /* expressed in kilohertz */
186 uint32_t reference_frequency;
187 /* number of bytes currently used in HW buffer */
188 uint32_t buffer_used_bytes;
189 /* number of bytes used for write transaction in HW buffer
190 * - this will be used as the index to read from*/
191 uint32_t buffer_used_write;
192 /* number of pending transactions (before GO) */
193 uint32_t transaction_count;
194 uint32_t engine_keep_power_up_count;
195};
196
197struct i2c_hw_engine_dce110_create_arg {
198 uint32_t engine_id;
199 uint32_t reference_frequency;
200 uint32_t default_speed;
201 struct dc_context *ctx;
202 const struct dce110_i2c_hw_engine_registers *regs;
203 const struct dce110_i2c_hw_engine_shift *i2c_shift;
204 const struct dce110_i2c_hw_engine_mask *i2c_mask;
205};
206
207struct i2c_engine *dal_i2c_hw_engine_dce110_create(
208 const struct i2c_hw_engine_dce110_create_arg *arg);
209
210bool i2c_hw_engine_dce110_construct(
211 struct i2c_hw_engine_dce110 *engine_dce110,
212 const struct i2c_hw_engine_dce110_create_arg *arg);
213
214#endif
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_sw_engine_dce110.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_sw_engine_dce110.c
new file mode 100644
index 000000000000..996813d9165f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_sw_engine_dce110.c
@@ -0,0 +1,171 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28/*
29 * Pre-requisites: headers required by header of this unit
30 */
31#include "include/i2caux_interface.h"
32#include "../engine.h"
33#include "../i2c_engine.h"
34#include "../i2c_sw_engine.h"
35
36/*
37 * Header of this unit
38 */
39
40#include "i2c_sw_engine_dce110.h"
41
42/*
43 * Post-requisites: headers required by this unit
44 */
45
46/*
47 * This unit
48 */
49
50/*
51 * @brief
52 * Cast 'struct i2c_sw_engine *'
53 * to 'struct i2c_sw_engine_dce110 *'
54 */
55#define FROM_I2C_SW_ENGINE(ptr) \
56 container_of((ptr), struct i2c_sw_engine_dce110, base)
57/*
58 * @brief
59 * Cast 'struct i2c_engine *'
60 * to 'struct i2c_sw_engine_dce80 *'
61 */
62#define FROM_I2C_ENGINE(ptr) \
63 FROM_I2C_SW_ENGINE(container_of((ptr), struct i2c_sw_engine, base))
64
65/*
66 * @brief
67 * Cast 'struct engine *'
68 * to 'struct i2c_sw_engine_dce80 *'
69 */
70#define FROM_ENGINE(ptr) \
71 FROM_I2C_ENGINE(container_of((ptr), struct i2c_engine, base))
72
73static void release_engine(
74 struct engine *engine)
75{
76}
77
78static void destruct(
79 struct i2c_sw_engine_dce110 *engine)
80{
81 dal_i2c_sw_engine_destruct(&engine->base);
82}
83
84static void destroy(
85 struct i2c_engine **engine)
86{
87 struct i2c_sw_engine_dce110 *sw_engine = FROM_I2C_ENGINE(*engine);
88
89 destruct(sw_engine);
90
91 dm_free(sw_engine);
92
93 *engine = NULL;
94}
95
96static bool acquire_engine(
97 struct i2c_engine *engine,
98 struct ddc *ddc_handle)
99{
100 return dal_i2caux_i2c_sw_engine_acquire_engine(engine, ddc_handle);
101}
102
103static const struct i2c_engine_funcs i2c_engine_funcs = {
104 .acquire_engine = acquire_engine,
105 .destroy = destroy,
106 .get_speed = dal_i2c_sw_engine_get_speed,
107 .set_speed = dal_i2c_sw_engine_set_speed,
108 .setup_engine = dal_i2c_engine_setup_i2c_engine,
109 .submit_channel_request = dal_i2c_sw_engine_submit_channel_request,
110 .process_channel_reply = dal_i2c_engine_process_channel_reply,
111 .get_channel_status = dal_i2c_sw_engine_get_channel_status,
112};
113
114static const struct engine_funcs engine_funcs = {
115 .release_engine = release_engine,
116 .get_engine_type = dal_i2c_sw_engine_get_engine_type,
117 .acquire = dal_i2c_engine_acquire,
118 .submit_request = dal_i2c_sw_engine_submit_request,
119};
120
121static bool construct(
122 struct i2c_sw_engine_dce110 *engine_dce110,
123 const struct i2c_sw_engine_dce110_create_arg *arg_dce110)
124{
125 struct i2c_sw_engine_create_arg arg_base;
126
127 arg_base.ctx = arg_dce110->ctx;
128 arg_base.default_speed = arg_dce110->default_speed;
129
130 if (!dal_i2c_sw_engine_construct(
131 &engine_dce110->base, &arg_base)) {
132 ASSERT_CRITICAL(false);
133 return false;
134 }
135
136 /*struct engine struct engine_funcs*/
137 engine_dce110->base.base.base.funcs = &engine_funcs;
138 /*struct i2c_engine struct i2c_engine_funcs*/
139 engine_dce110->base.base.funcs = &i2c_engine_funcs;
140 engine_dce110->base.default_speed = arg_dce110->default_speed;
141 engine_dce110->engine_id = arg_dce110->engine_id;
142
143 return true;
144}
145
146struct i2c_engine *dal_i2c_sw_engine_dce110_create(
147 const struct i2c_sw_engine_dce110_create_arg *arg)
148{
149 struct i2c_sw_engine_dce110 *engine_dce110;
150
151 if (!arg) {
152 ASSERT_CRITICAL(false);
153 return NULL;
154 }
155
156 engine_dce110 = dm_alloc(sizeof(struct i2c_sw_engine_dce110));
157
158 if (!engine_dce110) {
159 ASSERT_CRITICAL(false);
160 return NULL;
161 }
162
163 if (construct(engine_dce110, arg))
164 return &engine_dce110->base.base;
165
166 ASSERT_CRITICAL(false);
167
168 dm_free(engine_dce110);
169
170 return NULL;
171}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_sw_engine_dce110.h b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_sw_engine_dce110.h
new file mode 100644
index 000000000000..c48c61f540a8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_sw_engine_dce110.h
@@ -0,0 +1,43 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_I2C_SW_ENGINE_DCE110_H__
27#define __DAL_I2C_SW_ENGINE_DCE110_H__
28
29struct i2c_sw_engine_dce110 {
30 struct i2c_sw_engine base;
31 uint32_t engine_id;
32};
33
34struct i2c_sw_engine_dce110_create_arg {
35 uint32_t engine_id;
36 uint32_t default_speed;
37 struct dc_context *ctx;
38};
39
40struct i2c_engine *dal_i2c_sw_engine_dce110_create(
41 const struct i2c_sw_engine_dce110_create_arg *arg);
42
43#endif
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.c
new file mode 100644
index 000000000000..1c00ed0010d9
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.c
@@ -0,0 +1,323 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28/*
29 * Pre-requisites: headers required by header of this unit
30 */
31#include "include/i2caux_interface.h"
32#include "../i2caux.h"
33#include "../engine.h"
34#include "../i2c_engine.h"
35#include "../i2c_sw_engine.h"
36#include "../i2c_hw_engine.h"
37
38/*
39 * Header of this unit
40 */
41#include "i2caux_dce110.h"
42
43#include "i2c_sw_engine_dce110.h"
44#include "i2c_hw_engine_dce110.h"
45#include "aux_engine_dce110.h"
46
47/*
48 * Post-requisites: headers required by this unit
49 */
50
51/*
52 * This unit
53 */
54/*cast pointer to struct i2caux TO pointer to struct i2caux_dce110*/
55#define FROM_I2C_AUX(ptr) \
56 container_of((ptr), struct i2caux_dce110, base)
57
58static void destruct(
59 struct i2caux_dce110 *i2caux_dce110)
60{
61 dal_i2caux_destruct(&i2caux_dce110->base);
62}
63
64static void destroy(
65 struct i2caux **i2c_engine)
66{
67 struct i2caux_dce110 *i2caux_dce110 = FROM_I2C_AUX(*i2c_engine);
68
69 destruct(i2caux_dce110);
70
71 dm_free(i2caux_dce110);
72
73 *i2c_engine = NULL;
74}
75
76static struct i2c_engine *acquire_i2c_hw_engine(
77 struct i2caux *i2caux,
78 struct ddc *ddc)
79{
80 struct i2caux_dce110 *i2caux_dce110 = FROM_I2C_AUX(i2caux);
81
82 struct i2c_engine *engine = NULL;
83 /* generic hw engine is not used for EDID read
84 * It may be needed for external i2c device, like thermal chip,
85 * TODO will be implemented when needed.
86 * check dce80 bool non_generic for generic hw engine;
87 */
88
89 if (!ddc)
90 return NULL;
91
92 if (ddc->hw_info.hw_supported) {
93 enum gpio_ddc_line line = dal_ddc_get_line(ddc);
94
95 if (line < GPIO_DDC_LINE_COUNT)
96 engine = i2caux->i2c_hw_engines[line];
97 }
98
99 if (!engine)
100 return NULL;
101
102 if (!i2caux_dce110->i2c_hw_buffer_in_use &&
103 engine->base.funcs->acquire(&engine->base, ddc)) {
104 i2caux_dce110->i2c_hw_buffer_in_use = true;
105 return engine;
106 }
107
108 return NULL;
109}
110
111static void release_engine(
112 struct i2caux *i2caux,
113 struct engine *engine)
114{
115 struct i2caux_dce110 *i2caux_dce110 = FROM_I2C_AUX(i2caux);
116
117 if (engine->funcs->get_engine_type(engine) ==
118 I2CAUX_ENGINE_TYPE_I2C_DDC_HW)
119 i2caux_dce110->i2c_hw_buffer_in_use = false;
120
121 dal_i2caux_release_engine(i2caux, engine);
122}
123
124static const enum gpio_ddc_line hw_ddc_lines[] = {
125 GPIO_DDC_LINE_DDC1,
126 GPIO_DDC_LINE_DDC2,
127 GPIO_DDC_LINE_DDC3,
128 GPIO_DDC_LINE_DDC4,
129 GPIO_DDC_LINE_DDC5,
130 GPIO_DDC_LINE_DDC6,
131};
132
133static const enum gpio_ddc_line hw_aux_lines[] = {
134 GPIO_DDC_LINE_DDC1,
135 GPIO_DDC_LINE_DDC2,
136 GPIO_DDC_LINE_DDC3,
137 GPIO_DDC_LINE_DDC4,
138 GPIO_DDC_LINE_DDC5,
139 GPIO_DDC_LINE_DDC6,
140};
141
142/* function table */
143static const struct i2caux_funcs i2caux_funcs = {
144 .destroy = destroy,
145 .acquire_i2c_hw_engine = acquire_i2c_hw_engine,
146 .release_engine = release_engine,
147 .acquire_i2c_sw_engine = dal_i2caux_acquire_i2c_sw_engine,
148 .acquire_aux_engine = dal_i2caux_acquire_aux_engine,
149};
150
151#include "dce/dce_11_0_d.h"
152#include "dce/dce_11_0_sh_mask.h"
153
154/* set register offset */
155#define SR(reg_name)\
156 .reg_name = mm ## reg_name
157
158/* set register offset with instance */
159#define SRI(reg_name, block, id)\
160 .reg_name = mm ## block ## id ## _ ## reg_name
161
162#define aux_regs(id)\
163[id] = {\
164 AUX_COMMON_REG_LIST(id), \
165 .AUX_RESET_MASK = AUX_CONTROL__AUX_RESET_MASK \
166}
167
168#define hw_engine_regs(id)\
169{\
170 I2C_HW_ENGINE_COMMON_REG_LIST(id) \
171}
172
173static const struct dce110_aux_registers dce110_aux_regs[] = {
174 aux_regs(0),
175 aux_regs(1),
176 aux_regs(2),
177 aux_regs(3),
178 aux_regs(4),
179 aux_regs(5)
180};
181
182static const struct dce110_i2c_hw_engine_registers i2c_hw_engine_regs[] = {
183 hw_engine_regs(1),
184 hw_engine_regs(2),
185 hw_engine_regs(3),
186 hw_engine_regs(4),
187 hw_engine_regs(5),
188 hw_engine_regs(6)
189};
190
191static const struct dce110_i2c_hw_engine_shift i2c_shift = {
192 I2C_COMMON_MASK_SH_LIST_DCE110(__SHIFT)
193};
194
195static const struct dce110_i2c_hw_engine_mask i2c_mask = {
196 I2C_COMMON_MASK_SH_LIST_DCE110(_MASK)
197};
198
199bool dal_i2caux_dce110_construct(
200 struct i2caux_dce110 *i2caux_dce110,
201 struct dc_context *ctx,
202 const struct dce110_aux_registers aux_regs[],
203 const struct dce110_i2c_hw_engine_registers i2c_hw_engine_regs[],
204 const struct dce110_i2c_hw_engine_shift *i2c_shift,
205 const struct dce110_i2c_hw_engine_mask *i2c_mask)
206{
207 uint32_t i = 0;
208 uint32_t reference_frequency = 0;
209 bool use_i2c_sw_engine = false;
210 struct i2caux *base = NULL;
211 /*TODO: For CZ bring up, if dal_i2caux_get_reference_clock
212 * does not return 48KHz, we need hard coded for 48Khz.
213 * Some BIOS setting incorrect cause this
214 * For production, we always get value from BIOS*/
215 reference_frequency =
216 dal_i2caux_get_reference_clock(ctx->dc_bios) >> 1;
217
218 base = &i2caux_dce110->base;
219
220 if (!dal_i2caux_construct(base, ctx)) {
221 ASSERT_CRITICAL(false);
222 return false;
223 }
224
225 i2caux_dce110->base.funcs = &i2caux_funcs;
226 i2caux_dce110->i2c_hw_buffer_in_use = false;
227 /* Create I2C engines (DDC lines per connector)
228 * different I2C/AUX usage cases, DDC, Generic GPIO, AUX.
229 */
230 do {
231 enum gpio_ddc_line line_id = hw_ddc_lines[i];
232
233 struct i2c_hw_engine_dce110_create_arg hw_arg_dce110;
234
235 if (use_i2c_sw_engine) {
236 struct i2c_sw_engine_dce110_create_arg sw_arg;
237
238 sw_arg.engine_id = i;
239 sw_arg.default_speed = base->default_i2c_sw_speed;
240 sw_arg.ctx = ctx;
241 base->i2c_sw_engines[line_id] =
242 dal_i2c_sw_engine_dce110_create(&sw_arg);
243 }
244
245 hw_arg_dce110.engine_id = i;
246 hw_arg_dce110.reference_frequency = reference_frequency;
247 hw_arg_dce110.default_speed = base->default_i2c_hw_speed;
248 hw_arg_dce110.ctx = ctx;
249 hw_arg_dce110.regs = &i2c_hw_engine_regs[i];
250 hw_arg_dce110.i2c_shift = i2c_shift;
251 hw_arg_dce110.i2c_mask = i2c_mask;
252
253 base->i2c_hw_engines[line_id] =
254 dal_i2c_hw_engine_dce110_create(&hw_arg_dce110);
255
256 ++i;
257 } while (i < ARRAY_SIZE(hw_ddc_lines));
258
259 /* Create AUX engines for all lines which has assisted HW AUX
260 * 'i' (loop counter) used as DDC/AUX engine_id */
261
262 i = 0;
263
264 do {
265 enum gpio_ddc_line line_id = hw_aux_lines[i];
266
267 struct aux_engine_dce110_init_data aux_init_data;
268
269 aux_init_data.engine_id = i;
270 aux_init_data.timeout_period = base->aux_timeout_period;
271 aux_init_data.ctx = ctx;
272 aux_init_data.regs = &aux_regs[i];
273
274 base->aux_engines[line_id] =
275 dal_aux_engine_dce110_create(&aux_init_data);
276
277 ++i;
278 } while (i < ARRAY_SIZE(hw_aux_lines));
279
280 /*TODO Generic I2C SW and HW*/
281
282 return true;
283}
284
285/*
286 * dal_i2caux_dce110_create
287 *
288 * @brief
289 * public interface to allocate memory for DCE11 I2CAUX
290 *
291 * @param
292 * struct adapter_service *as - [in]
293 * struct dc_context *ctx - [in]
294 *
295 * @return
296 * pointer to the base struct of DCE11 I2CAUX
297 */
298struct i2caux *dal_i2caux_dce110_create(
299 struct dc_context *ctx)
300{
301 struct i2caux_dce110 *i2caux_dce110 =
302 dm_alloc(sizeof(struct i2caux_dce110));
303
304 if (!i2caux_dce110) {
305 ASSERT_CRITICAL(false);
306 return NULL;
307 }
308
309 if (dal_i2caux_dce110_construct(
310 i2caux_dce110,
311 ctx,
312 dce110_aux_regs,
313 i2c_hw_engine_regs,
314 &i2c_shift,
315 &i2c_mask))
316 return &i2caux_dce110->base;
317
318 ASSERT_CRITICAL(false);
319
320 dm_free(i2caux_dce110);
321
322 return NULL;
323}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.h b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.h
new file mode 100644
index 000000000000..fd1cc23c3d70
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.h
@@ -0,0 +1,53 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_I2C_AUX_DCE110_H__
27#define __DAL_I2C_AUX_DCE110_H__
28
29#include "../i2caux.h"
30
31struct i2caux_dce110 {
32 struct i2caux base;
33 /* indicate the I2C HW circular buffer is in use */
34 bool i2c_hw_buffer_in_use;
35};
36
37struct dce110_aux_registers;
38struct dce110_i2c_hw_engine_registers;
39struct dce110_i2c_hw_engine_shift;
40struct dce110_i2c_hw_engine_mask;
41
42struct i2caux *dal_i2caux_dce110_create(
43 struct dc_context *ctx);
44
45bool dal_i2caux_dce110_construct(
46 struct i2caux_dce110 *i2caux_dce110,
47 struct dc_context *ctx,
48 const struct dce110_aux_registers *aux_regs,
49 const struct dce110_i2c_hw_engine_registers *i2c_hw_engine_regs,
50 const struct dce110_i2c_hw_engine_shift *i2c_shift,
51 const struct dce110_i2c_hw_engine_mask *i2c_mask);
52
53#endif /* __DAL_I2C_AUX_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.c
new file mode 100644
index 000000000000..d74f3f15d600
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.c
@@ -0,0 +1,140 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28#include "include/i2caux_interface.h"
29#include "../i2caux.h"
30#include "../engine.h"
31#include "../i2c_engine.h"
32#include "../i2c_sw_engine.h"
33#include "../i2c_hw_engine.h"
34
35#include "../dce110/i2caux_dce110.h"
36#include "i2caux_dce112.h"
37
38#include "../dce110/aux_engine_dce110.h"
39
40#include "../dce110/i2c_hw_engine_dce110.h"
41
42#include "dce/dce_11_2_d.h"
43#include "dce/dce_11_2_sh_mask.h"
44
45/* set register offset */
46#define SR(reg_name)\
47 .reg_name = mm ## reg_name
48
49/* set register offset with instance */
50#define SRI(reg_name, block, id)\
51 .reg_name = mm ## block ## id ## _ ## reg_name
52
53#define aux_regs(id)\
54[id] = {\
55 AUX_COMMON_REG_LIST(id), \
56 .AUX_RESET_MASK = AUX_CONTROL__AUX_RESET_MASK \
57}
58
59#define hw_engine_regs(id)\
60{\
61 I2C_HW_ENGINE_COMMON_REG_LIST(id) \
62}
63
64static const struct dce110_aux_registers dce112_aux_regs[] = {
65 aux_regs(0),
66 aux_regs(1),
67 aux_regs(2),
68 aux_regs(3),
69 aux_regs(4),
70 aux_regs(5),
71};
72
73static const struct dce110_i2c_hw_engine_registers dce112_hw_engine_regs[] = {
74 hw_engine_regs(1),
75 hw_engine_regs(2),
76 hw_engine_regs(3),
77 hw_engine_regs(4),
78 hw_engine_regs(5),
79 hw_engine_regs(6)
80};
81
82static const struct dce110_i2c_hw_engine_shift i2c_shift = {
83 I2C_COMMON_MASK_SH_LIST_DCE110(__SHIFT)
84};
85
86static const struct dce110_i2c_hw_engine_mask i2c_mask = {
87 I2C_COMMON_MASK_SH_LIST_DCE110(_MASK)
88};
89
90static bool construct(
91 struct i2caux_dce110 *i2caux_dce110,
92 struct dc_context *ctx)
93{
94 if (!dal_i2caux_dce110_construct(
95 i2caux_dce110,
96 ctx,
97 dce112_aux_regs,
98 dce112_hw_engine_regs,
99 &i2c_shift,
100 &i2c_mask)) {
101 ASSERT_CRITICAL(false);
102 return false;
103 }
104
105 return true;
106}
107
108/*
109 * dal_i2caux_dce110_create
110 *
111 * @brief
112 * public interface to allocate memory for DCE11 I2CAUX
113 *
114 * @param
115 * struct adapter_service *as - [in]
116 * struct dc_context *ctx - [in]
117 *
118 * @return
119 * pointer to the base struct of DCE11 I2CAUX
120 */
121struct i2caux *dal_i2caux_dce112_create(
122 struct dc_context *ctx)
123{
124 struct i2caux_dce110 *i2caux_dce110 =
125 dm_alloc(sizeof(struct i2caux_dce110));
126
127 if (!i2caux_dce110) {
128 ASSERT_CRITICAL(false);
129 return NULL;
130 }
131
132 if (construct(i2caux_dce110, ctx))
133 return &i2caux_dce110->base;
134
135 ASSERT_CRITICAL(false);
136
137 dm_free(i2caux_dce110);
138
139 return NULL;
140}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.h b/drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.h
new file mode 100644
index 000000000000..8d35453c25b6
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.h
@@ -0,0 +1,32 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_I2C_AUX_DCE112_H__
27#define __DAL_I2C_AUX_DCE112_H__
28
29struct i2caux *dal_i2caux_dce112_create(
30 struct dc_context *ctx);
31
32#endif /* __DAL_I2C_AUX_DCE112_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_hw_engine_dce80.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_hw_engine_dce80.c
new file mode 100644
index 000000000000..423c38ac880c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_hw_engine_dce80.c
@@ -0,0 +1,885 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28/*
29 * Pre-requisites: headers required by header of this unit
30 */
31#include "include/i2caux_interface.h"
32#include "../engine.h"
33#include "../i2c_engine.h"
34#include "../i2c_hw_engine.h"
35#include "../i2c_generic_hw_engine.h"
36/*
37 * Header of this unit
38 */
39
40#include "i2c_hw_engine_dce80.h"
41
42/*
43 * Post-requisites: headers required by this unit
44 */
45
46#include "dce/dce_8_0_d.h"
47#include "dce/dce_8_0_sh_mask.h"
48/*
49 * This unit
50 */
51
52enum dc_i2c_status {
53 DC_I2C_STATUS__DC_I2C_STATUS_IDLE,
54 DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW,
55 DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_HW
56};
57
58enum dc_i2c_arbitration {
59 DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_NORMAL,
60 DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_HIGH
61};
62
63enum {
64 /* No timeout in HW
65 * (timeout implemented in SW by querying status) */
66 I2C_SETUP_TIME_LIMIT = 255,
67 I2C_HW_BUFFER_SIZE = 144
68};
69
70/*
71 * @brief
72 * Cast 'struct i2c_hw_engine *'
73 * to 'struct i2c_hw_engine_dce80 *'
74 */
75#define FROM_I2C_HW_ENGINE(ptr) \
76 container_of((ptr), struct i2c_hw_engine_dce80, base)
77
78/*
79 * @brief
80 * Cast pointer to 'struct i2c_engine *'
81 * to pointer to 'struct i2c_hw_engine_dce80 *'
82 */
83#define FROM_I2C_ENGINE(ptr) \
84 FROM_I2C_HW_ENGINE(container_of((ptr), struct i2c_hw_engine, base))
85
86/*
87 * @brief
88 * Cast pointer to 'struct engine *'
89 * to 'pointer to struct i2c_hw_engine_dce80 *'
90 */
91#define FROM_ENGINE(ptr) \
92 FROM_I2C_ENGINE(container_of((ptr), struct i2c_engine, base))
93
94static void disable_i2c_hw_engine(
95 struct i2c_hw_engine_dce80 *engine)
96{
97 const uint32_t addr = engine->addr.DC_I2C_DDCX_SETUP;
98 uint32_t value = 0;
99
100 struct dc_context *ctx = NULL;
101
102 ctx = engine->base.base.base.ctx;
103
104 value = dm_read_reg(ctx, addr);
105
106 set_reg_field_value(
107 value,
108 0,
109 DC_I2C_DDC1_SETUP,
110 DC_I2C_DDC1_ENABLE);
111
112 dm_write_reg(ctx, addr, value);
113}
114
115static void release_engine(
116 struct engine *engine)
117{
118 struct i2c_hw_engine_dce80 *hw_engine = FROM_ENGINE(engine);
119
120 struct i2c_engine *base = NULL;
121 bool safe_to_reset;
122 uint32_t value = 0;
123
124 base = &hw_engine->base.base;
125
126 /* Restore original HW engine speed */
127
128 base->funcs->set_speed(base, hw_engine->base.original_speed);
129
130 /* Release I2C */
131 {
132 value = dm_read_reg(engine->ctx, mmDC_I2C_ARBITRATION);
133
134 set_reg_field_value(
135 value,
136 1,
137 DC_I2C_ARBITRATION,
138 DC_I2C_SW_DONE_USING_I2C_REG);
139
140 dm_write_reg(engine->ctx, mmDC_I2C_ARBITRATION, value);
141 }
142
143 /* Reset HW engine */
144 {
145 uint32_t i2c_sw_status = 0;
146
147 value = dm_read_reg(engine->ctx, mmDC_I2C_SW_STATUS);
148
149 i2c_sw_status = get_reg_field_value(
150 value,
151 DC_I2C_SW_STATUS,
152 DC_I2C_SW_STATUS);
153 /* if used by SW, safe to reset */
154 safe_to_reset = (i2c_sw_status == 1);
155 }
156 {
157 value = dm_read_reg(engine->ctx, mmDC_I2C_CONTROL);
158
159 if (safe_to_reset)
160 set_reg_field_value(
161 value,
162 1,
163 DC_I2C_CONTROL,
164 DC_I2C_SOFT_RESET);
165
166 set_reg_field_value(
167 value,
168 1,
169 DC_I2C_CONTROL,
170 DC_I2C_SW_STATUS_RESET);
171
172 dm_write_reg(engine->ctx, mmDC_I2C_CONTROL, value);
173 }
174
175 /* HW I2c engine - clock gating feature */
176 if (!hw_engine->engine_keep_power_up_count)
177 disable_i2c_hw_engine(hw_engine);
178}
179
180static void destruct(
181 struct i2c_hw_engine_dce80 *engine)
182{
183 dal_i2c_hw_engine_destruct(&engine->base);
184}
185
186static void destroy(
187 struct i2c_engine **i2c_engine)
188{
189 struct i2c_hw_engine_dce80 *engine = FROM_I2C_ENGINE(*i2c_engine);
190
191 destruct(engine);
192
193 dm_free(engine);
194
195 *i2c_engine = NULL;
196}
197
198static bool setup_engine(
199 struct i2c_engine *i2c_engine)
200{
201 uint32_t value = 0;
202 struct i2c_hw_engine_dce80 *engine = FROM_I2C_ENGINE(i2c_engine);
203
204 /* Program pin select */
205 {
206 const uint32_t addr = mmDC_I2C_CONTROL;
207
208 value = dm_read_reg(i2c_engine->base.ctx, addr);
209
210 set_reg_field_value(
211 value,
212 0,
213 DC_I2C_CONTROL,
214 DC_I2C_GO);
215
216 set_reg_field_value(
217 value,
218 0,
219 DC_I2C_CONTROL,
220 DC_I2C_SOFT_RESET);
221
222 set_reg_field_value(
223 value,
224 0,
225 DC_I2C_CONTROL,
226 DC_I2C_SEND_RESET);
227
228 set_reg_field_value(
229 value,
230 0,
231 DC_I2C_CONTROL,
232 DC_I2C_SW_STATUS_RESET);
233
234 set_reg_field_value(
235 value,
236 0,
237 DC_I2C_CONTROL,
238 DC_I2C_TRANSACTION_COUNT);
239
240 set_reg_field_value(
241 value,
242 engine->engine_id,
243 DC_I2C_CONTROL,
244 DC_I2C_DDC_SELECT);
245
246 dm_write_reg(i2c_engine->base.ctx, addr, value);
247 }
248
249 /* Program time limit */
250 {
251 const uint32_t addr = engine->addr.DC_I2C_DDCX_SETUP;
252
253 value = dm_read_reg(i2c_engine->base.ctx, addr);
254
255 set_reg_field_value(
256 value,
257 I2C_SETUP_TIME_LIMIT,
258 DC_I2C_DDC1_SETUP,
259 DC_I2C_DDC1_TIME_LIMIT);
260
261 set_reg_field_value(
262 value,
263 1,
264 DC_I2C_DDC1_SETUP,
265 DC_I2C_DDC1_ENABLE);
266
267 dm_write_reg(i2c_engine->base.ctx, addr, value);
268 }
269
270 /* Program HW priority
271 * set to High - interrupt software I2C at any time
272 * Enable restart of SW I2C that was interrupted by HW
273 * disable queuing of software while I2C is in use by HW */
274 {
275 value = dm_read_reg(i2c_engine->base.ctx,
276 mmDC_I2C_ARBITRATION);
277
278 set_reg_field_value(
279 value,
280 0,
281 DC_I2C_ARBITRATION,
282 DC_I2C_NO_QUEUED_SW_GO);
283
284 set_reg_field_value(
285 value,
286 DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_NORMAL,
287 DC_I2C_ARBITRATION,
288 DC_I2C_SW_PRIORITY);
289
290 dm_write_reg(i2c_engine->base.ctx,
291 mmDC_I2C_ARBITRATION, value);
292 }
293
294 return true;
295}
296
297static uint32_t get_speed(
298 const struct i2c_engine *i2c_engine)
299{
300 const struct i2c_hw_engine_dce80 *engine = FROM_I2C_ENGINE(i2c_engine);
301
302 const uint32_t addr = engine->addr.DC_I2C_DDCX_SPEED;
303
304 uint32_t pre_scale = 0;
305
306 uint32_t value = dm_read_reg(i2c_engine->base.ctx, addr);
307
308 pre_scale = get_reg_field_value(
309 value,
310 DC_I2C_DDC1_SPEED,
311 DC_I2C_DDC1_PRESCALE);
312
313 /* [anaumov] it seems following is unnecessary */
314 /*ASSERT(value.bits.DC_I2C_DDC1_PRESCALE);*/
315
316 return pre_scale ?
317 engine->reference_frequency / pre_scale :
318 engine->base.default_speed;
319}
320
321static void set_speed(
322 struct i2c_engine *i2c_engine,
323 uint32_t speed)
324{
325 struct i2c_hw_engine_dce80 *engine = FROM_I2C_ENGINE(i2c_engine);
326
327 if (speed) {
328 const uint32_t addr = engine->addr.DC_I2C_DDCX_SPEED;
329
330 uint32_t value = dm_read_reg(i2c_engine->base.ctx, addr);
331
332 set_reg_field_value(
333 value,
334 engine->reference_frequency / speed,
335 DC_I2C_DDC1_SPEED,
336 DC_I2C_DDC1_PRESCALE);
337
338 set_reg_field_value(
339 value,
340 2,
341 DC_I2C_DDC1_SPEED,
342 DC_I2C_DDC1_THRESHOLD);
343
344 dm_write_reg(i2c_engine->base.ctx, addr, value);
345 }
346}
347
348static inline void reset_hw_engine(struct engine *engine)
349{
350 uint32_t value = dm_read_reg(engine->ctx, mmDC_I2C_CONTROL);
351
352 set_reg_field_value(
353 value,
354 1,
355 DC_I2C_CONTROL,
356 DC_I2C_SOFT_RESET);
357
358 set_reg_field_value(
359 value,
360 1,
361 DC_I2C_CONTROL,
362 DC_I2C_SW_STATUS_RESET);
363
364 dm_write_reg(engine->ctx, mmDC_I2C_CONTROL, value);
365}
366
367static bool is_hw_busy(struct engine *engine)
368{
369 uint32_t i2c_sw_status = 0;
370
371 uint32_t value = dm_read_reg(engine->ctx, mmDC_I2C_SW_STATUS);
372
373 i2c_sw_status = get_reg_field_value(
374 value,
375 DC_I2C_SW_STATUS,
376 DC_I2C_SW_STATUS);
377
378 if (i2c_sw_status == DC_I2C_STATUS__DC_I2C_STATUS_IDLE)
379 return false;
380
381 reset_hw_engine(engine);
382
383 value = dm_read_reg(engine->ctx, mmDC_I2C_SW_STATUS);
384
385 i2c_sw_status = get_reg_field_value(
386 value,
387 DC_I2C_SW_STATUS,
388 DC_I2C_SW_STATUS);
389
390 return i2c_sw_status != DC_I2C_STATUS__DC_I2C_STATUS_IDLE;
391}
392
393/*
394 * @brief
395 * DC_GPIO_DDC MM register offsets
396 */
397static const uint32_t transaction_addr[] = {
398 mmDC_I2C_TRANSACTION0,
399 mmDC_I2C_TRANSACTION1,
400 mmDC_I2C_TRANSACTION2,
401 mmDC_I2C_TRANSACTION3
402};
403
404static bool process_transaction(
405 struct i2c_hw_engine_dce80 *engine,
406 struct i2c_request_transaction_data *request)
407{
408 uint32_t length = request->length;
409 uint8_t *buffer = request->data;
410
411 bool last_transaction = false;
412 uint32_t value = 0;
413
414 struct dc_context *ctx = NULL;
415
416 ctx = engine->base.base.base.ctx;
417
418 {
419 const uint32_t addr =
420 transaction_addr[engine->transaction_count];
421
422 value = dm_read_reg(ctx, addr);
423
424 set_reg_field_value(
425 value,
426 1,
427 DC_I2C_TRANSACTION0,
428 DC_I2C_STOP_ON_NACK0);
429
430 set_reg_field_value(
431 value,
432 1,
433 DC_I2C_TRANSACTION0,
434 DC_I2C_START0);
435
436 if ((engine->transaction_count == 3) ||
437 (request->action == I2CAUX_TRANSACTION_ACTION_I2C_WRITE) ||
438 (request->action & I2CAUX_TRANSACTION_ACTION_I2C_READ)) {
439
440 set_reg_field_value(
441 value,
442 1,
443 DC_I2C_TRANSACTION0,
444 DC_I2C_STOP0);
445
446 last_transaction = true;
447 } else
448 set_reg_field_value(
449 value,
450 0,
451 DC_I2C_TRANSACTION0,
452 DC_I2C_STOP0);
453
454 set_reg_field_value(
455 value,
456 (0 != (request->action &
457 I2CAUX_TRANSACTION_ACTION_I2C_READ)),
458 DC_I2C_TRANSACTION0,
459 DC_I2C_RW0);
460
461 set_reg_field_value(
462 value,
463 length,
464 DC_I2C_TRANSACTION0,
465 DC_I2C_COUNT0);
466
467 dm_write_reg(ctx, addr, value);
468 }
469
470 /* Write the I2C address and I2C data
471 * into the hardware circular buffer, one byte per entry.
472 * As an example, the 7-bit I2C slave address for CRT monitor
473 * for reading DDC/EDID information is 0b1010001.
474 * For an I2C send operation, the LSB must be programmed to 0;
475 * for I2C receive operation, the LSB must be programmed to 1. */
476
477 {
478 value = 0;
479
480 set_reg_field_value(
481 value,
482 false,
483 DC_I2C_DATA,
484 DC_I2C_DATA_RW);
485
486 set_reg_field_value(
487 value,
488 request->address,
489 DC_I2C_DATA,
490 DC_I2C_DATA);
491
492 if (engine->transaction_count == 0) {
493 set_reg_field_value(
494 value,
495 0,
496 DC_I2C_DATA,
497 DC_I2C_INDEX);
498
499 /*enable index write*/
500 set_reg_field_value(
501 value,
502 1,
503 DC_I2C_DATA,
504 DC_I2C_INDEX_WRITE);
505 }
506
507 dm_write_reg(ctx, mmDC_I2C_DATA, value);
508
509 if (!(request->action & I2CAUX_TRANSACTION_ACTION_I2C_READ)) {
510
511 set_reg_field_value(
512 value,
513 0,
514 DC_I2C_DATA,
515 DC_I2C_INDEX_WRITE);
516
517 while (length) {
518
519 set_reg_field_value(
520 value,
521 *buffer++,
522 DC_I2C_DATA,
523 DC_I2C_DATA);
524
525 dm_write_reg(ctx, mmDC_I2C_DATA, value);
526 --length;
527 }
528 }
529 }
530
531 ++engine->transaction_count;
532 engine->buffer_used_bytes += length + 1;
533
534 return last_transaction;
535}
536
537static void execute_transaction(
538 struct i2c_hw_engine_dce80 *engine)
539{
540 uint32_t value = 0;
541 struct dc_context *ctx = NULL;
542
543 ctx = engine->base.base.base.ctx;
544
545 {
546 const uint32_t addr = engine->addr.DC_I2C_DDCX_SETUP;
547
548 value = dm_read_reg(ctx, addr);
549
550 set_reg_field_value(
551 value,
552 0,
553 DC_I2C_DDC1_SETUP,
554 DC_I2C_DDC1_DATA_DRIVE_EN);
555
556 set_reg_field_value(
557 value,
558 0,
559 DC_I2C_DDC1_SETUP,
560 DC_I2C_DDC1_CLK_DRIVE_EN);
561
562 set_reg_field_value(
563 value,
564 0,
565 DC_I2C_DDC1_SETUP,
566 DC_I2C_DDC1_DATA_DRIVE_SEL);
567
568 set_reg_field_value(
569 value,
570 0,
571 DC_I2C_DDC1_SETUP,
572 DC_I2C_DDC1_INTRA_TRANSACTION_DELAY);
573
574 set_reg_field_value(
575 value,
576 0,
577 DC_I2C_DDC1_SETUP,
578 DC_I2C_DDC1_INTRA_BYTE_DELAY);
579
580 dm_write_reg(ctx, addr, value);
581 }
582
583 {
584 const uint32_t addr = mmDC_I2C_CONTROL;
585
586 value = dm_read_reg(ctx, addr);
587
588 set_reg_field_value(
589 value,
590 0,
591 DC_I2C_CONTROL,
592 DC_I2C_SOFT_RESET);
593
594 set_reg_field_value(
595 value,
596 0,
597 DC_I2C_CONTROL,
598 DC_I2C_SW_STATUS_RESET);
599
600 set_reg_field_value(
601 value,
602 0,
603 DC_I2C_CONTROL,
604 DC_I2C_SEND_RESET);
605
606 set_reg_field_value(
607 value,
608 0,
609 DC_I2C_CONTROL,
610 DC_I2C_GO);
611
612 set_reg_field_value(
613 value,
614 engine->transaction_count - 1,
615 DC_I2C_CONTROL,
616 DC_I2C_TRANSACTION_COUNT);
617
618 dm_write_reg(ctx, addr, value);
619 }
620
621 /* start I2C transfer */
622 {
623 const uint32_t addr = mmDC_I2C_CONTROL;
624
625 value = dm_read_reg(ctx, addr);
626
627 set_reg_field_value(
628 value,
629 1,
630 DC_I2C_CONTROL,
631 DC_I2C_GO);
632
633 dm_write_reg(ctx, addr, value);
634 }
635
636 /* all transactions were executed and HW buffer became empty
637 * (even though it actually happens when status becomes DONE) */
638 engine->transaction_count = 0;
639 engine->buffer_used_bytes = 0;
640}
641
642static void submit_channel_request(
643 struct i2c_engine *engine,
644 struct i2c_request_transaction_data *request)
645{
646 request->status = I2C_CHANNEL_OPERATION_SUCCEEDED;
647
648 if (!process_transaction(FROM_I2C_ENGINE(engine), request))
649 return;
650
651 if (is_hw_busy(&engine->base)) {
652 request->status = I2C_CHANNEL_OPERATION_ENGINE_BUSY;
653 return;
654 }
655
656 execute_transaction(FROM_I2C_ENGINE(engine));
657}
658
659static void process_channel_reply(
660 struct i2c_engine *engine,
661 struct i2c_reply_transaction_data *reply)
662{
663 uint32_t length = reply->length;
664 uint8_t *buffer = reply->data;
665
666 uint32_t value = 0;
667
668 /*set index*/
669 set_reg_field_value(
670 value,
671 length - 1,
672 DC_I2C_DATA,
673 DC_I2C_INDEX);
674
675 set_reg_field_value(
676 value,
677 1,
678 DC_I2C_DATA,
679 DC_I2C_DATA_RW);
680
681 set_reg_field_value(
682 value,
683 1,
684 DC_I2C_DATA,
685 DC_I2C_INDEX_WRITE);
686
687 dm_write_reg(engine->base.ctx, mmDC_I2C_DATA, value);
688
689 while (length) {
690 /* after reading the status,
691 * if the I2C operation executed successfully
692 * (i.e. DC_I2C_STATUS_DONE = 1) then the I2C controller
693 * should read data bytes from I2C circular data buffer */
694
695 value = dm_read_reg(engine->base.ctx, mmDC_I2C_DATA);
696
697 *buffer++ = get_reg_field_value(
698 value,
699 DC_I2C_DATA,
700 DC_I2C_DATA);
701
702 --length;
703 }
704}
705
706static enum i2c_channel_operation_result get_channel_status(
707 struct i2c_engine *engine,
708 uint8_t *returned_bytes)
709{
710 uint32_t i2c_sw_status = 0;
711 uint32_t value = dm_read_reg(engine->base.ctx, mmDC_I2C_SW_STATUS);
712
713 i2c_sw_status = get_reg_field_value(
714 value,
715 DC_I2C_SW_STATUS,
716 DC_I2C_SW_STATUS);
717
718 if (i2c_sw_status == DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW)
719 return I2C_CHANNEL_OPERATION_ENGINE_BUSY;
720 else if (value & DC_I2C_SW_STATUS__DC_I2C_SW_STOPPED_ON_NACK_MASK)
721 return I2C_CHANNEL_OPERATION_NO_RESPONSE;
722 else if (value & DC_I2C_SW_STATUS__DC_I2C_SW_TIMEOUT_MASK)
723 return I2C_CHANNEL_OPERATION_TIMEOUT;
724 else if (value & DC_I2C_SW_STATUS__DC_I2C_SW_ABORTED_MASK)
725 return I2C_CHANNEL_OPERATION_FAILED;
726 else if (value & DC_I2C_SW_STATUS__DC_I2C_SW_DONE_MASK)
727 return I2C_CHANNEL_OPERATION_SUCCEEDED;
728
729 /*
730 * this is the case when HW used for communication, I2C_SW_STATUS
731 * could be zero
732 */
733 return I2C_CHANNEL_OPERATION_SUCCEEDED;
734}
735
736static uint32_t get_hw_buffer_available_size(
737 const struct i2c_hw_engine *engine)
738{
739 return I2C_HW_BUFFER_SIZE -
740 FROM_I2C_HW_ENGINE(engine)->buffer_used_bytes;
741}
742
743static uint32_t get_transaction_timeout(
744 const struct i2c_hw_engine *engine,
745 uint32_t length)
746{
747 uint32_t speed = engine->base.funcs->get_speed(&engine->base);
748
749 uint32_t period_timeout;
750 uint32_t num_of_clock_stretches;
751
752 if (!speed)
753 return 0;
754
755 period_timeout = (1000 * TRANSACTION_TIMEOUT_IN_I2C_CLOCKS) / speed;
756
757 num_of_clock_stretches = 1 + (length << 3) + 1;
758 num_of_clock_stretches +=
759 (FROM_I2C_HW_ENGINE(engine)->buffer_used_bytes << 3) +
760 (FROM_I2C_HW_ENGINE(engine)->transaction_count << 1);
761
762 return period_timeout * num_of_clock_stretches;
763}
764
765/*
766 * @brief
767 * DC_I2C_DDC1_SETUP MM register offsets
768 *
769 * @note
770 * The indices of this offset array are DDC engine IDs
771 */
772static const int32_t ddc_setup_offset[] = {
773
774 mmDC_I2C_DDC1_SETUP - mmDC_I2C_DDC1_SETUP, /* DDC Engine 1 */
775 mmDC_I2C_DDC2_SETUP - mmDC_I2C_DDC1_SETUP, /* DDC Engine 2 */
776 mmDC_I2C_DDC3_SETUP - mmDC_I2C_DDC1_SETUP, /* DDC Engine 3 */
777 mmDC_I2C_DDC4_SETUP - mmDC_I2C_DDC1_SETUP, /* DDC Engine 4 */
778 mmDC_I2C_DDC5_SETUP - mmDC_I2C_DDC1_SETUP, /* DDC Engine 5 */
779 mmDC_I2C_DDC6_SETUP - mmDC_I2C_DDC1_SETUP, /* DDC Engine 6 */
780 mmDC_I2C_DDCVGA_SETUP - mmDC_I2C_DDC1_SETUP /* DDC Engine 7 */
781};
782
783/*
784 * @brief
785 * DC_I2C_DDC1_SPEED MM register offsets
786 *
787 * @note
788 * The indices of this offset array are DDC engine IDs
789 */
790static const int32_t ddc_speed_offset[] = {
791 mmDC_I2C_DDC1_SPEED - mmDC_I2C_DDC1_SPEED, /* DDC Engine 1 */
792 mmDC_I2C_DDC2_SPEED - mmDC_I2C_DDC1_SPEED, /* DDC Engine 2 */
793 mmDC_I2C_DDC3_SPEED - mmDC_I2C_DDC1_SPEED, /* DDC Engine 3 */
794 mmDC_I2C_DDC4_SPEED - mmDC_I2C_DDC1_SPEED, /* DDC Engine 4 */
795 mmDC_I2C_DDC5_SPEED - mmDC_I2C_DDC1_SPEED, /* DDC Engine 5 */
796 mmDC_I2C_DDC6_SPEED - mmDC_I2C_DDC1_SPEED, /* DDC Engine 6 */
797 mmDC_I2C_DDCVGA_SPEED - mmDC_I2C_DDC1_SPEED /* DDC Engine 7 */
798};
799
800static const struct i2c_engine_funcs i2c_engine_funcs = {
801 .destroy = destroy,
802 .get_speed = get_speed,
803 .set_speed = set_speed,
804 .setup_engine = setup_engine,
805 .submit_channel_request = submit_channel_request,
806 .process_channel_reply = process_channel_reply,
807 .get_channel_status = get_channel_status,
808 .acquire_engine = dal_i2c_hw_engine_acquire_engine,
809};
810
811static const struct engine_funcs engine_funcs = {
812 .release_engine = release_engine,
813 .get_engine_type = dal_i2c_hw_engine_get_engine_type,
814 .acquire = dal_i2c_engine_acquire,
815 .submit_request = dal_i2c_hw_engine_submit_request,
816};
817
818static const struct i2c_hw_engine_funcs i2c_hw_engine_funcs = {
819 .get_hw_buffer_available_size =
820 get_hw_buffer_available_size,
821 .get_transaction_timeout =
822 get_transaction_timeout,
823 .wait_on_operation_result =
824 dal_i2c_hw_engine_wait_on_operation_result,
825};
826
827static bool construct(
828 struct i2c_hw_engine_dce80 *engine,
829 const struct i2c_hw_engine_dce80_create_arg *arg)
830{
831 if (arg->engine_id >= sizeof(ddc_setup_offset) / sizeof(int32_t))
832 return false;
833 if (arg->engine_id >= sizeof(ddc_speed_offset) / sizeof(int32_t))
834 return false;
835
836 if (!arg->reference_frequency)
837 return false;
838
839 if (!dal_i2c_hw_engine_construct(&engine->base, arg->ctx))
840 return false;
841
842 engine->base.base.base.funcs = &engine_funcs;
843 engine->base.base.funcs = &i2c_engine_funcs;
844 engine->base.funcs = &i2c_hw_engine_funcs;
845 engine->base.default_speed = arg->default_speed;
846 engine->addr.DC_I2C_DDCX_SETUP =
847 mmDC_I2C_DDC1_SETUP + ddc_setup_offset[arg->engine_id];
848 engine->addr.DC_I2C_DDCX_SPEED =
849 mmDC_I2C_DDC1_SPEED + ddc_speed_offset[arg->engine_id];
850
851 engine->engine_id = arg->engine_id;
852 engine->reference_frequency = arg->reference_frequency;
853 engine->buffer_used_bytes = 0;
854 engine->transaction_count = 0;
855 engine->engine_keep_power_up_count = 1;
856
857 return true;
858}
859
860struct i2c_engine *dal_i2c_hw_engine_dce80_create(
861 const struct i2c_hw_engine_dce80_create_arg *arg)
862{
863 struct i2c_hw_engine_dce80 *engine;
864
865 if (!arg) {
866 BREAK_TO_DEBUGGER();
867 return NULL;
868 }
869
870 engine = dm_alloc(sizeof(struct i2c_hw_engine_dce80));
871
872 if (!engine) {
873 BREAK_TO_DEBUGGER();
874 return NULL;
875 }
876
877 if (construct(engine, arg))
878 return &engine->base.base;
879
880 BREAK_TO_DEBUGGER();
881
882 dm_free(engine);
883
884 return NULL;
885}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_hw_engine_dce80.h b/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_hw_engine_dce80.h
new file mode 100644
index 000000000000..5c6116fb5479
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_hw_engine_dce80.h
@@ -0,0 +1,54 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_I2C_HW_ENGINE_DCE80_H__
27#define __DAL_I2C_HW_ENGINE_DCE80_H__
28
29struct i2c_hw_engine_dce80 {
30 struct i2c_hw_engine base;
31 struct {
32 uint32_t DC_I2C_DDCX_SETUP;
33 uint32_t DC_I2C_DDCX_SPEED;
34 } addr;
35 uint32_t engine_id;
36 /* expressed in kilohertz */
37 uint32_t reference_frequency;
38 /* number of bytes currently used in HW buffer */
39 uint32_t buffer_used_bytes;
40 /* number of pending transactions (before GO) */
41 uint32_t transaction_count;
42 uint32_t engine_keep_power_up_count;
43};
44
45struct i2c_hw_engine_dce80_create_arg {
46 uint32_t engine_id;
47 uint32_t reference_frequency;
48 uint32_t default_speed;
49 struct dc_context *ctx;
50};
51
52struct i2c_engine *dal_i2c_hw_engine_dce80_create(
53 const struct i2c_hw_engine_dce80_create_arg *arg);
54#endif
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_sw_engine_dce80.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_sw_engine_dce80.c
new file mode 100644
index 000000000000..804a3266c578
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_sw_engine_dce80.c
@@ -0,0 +1,184 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28/*
29 * Pre-requisites: headers required by header of this unit
30 */
31#include "include/i2caux_interface.h"
32#include "../engine.h"
33#include "../i2c_engine.h"
34#include "../i2c_sw_engine.h"
35
36/*
37 * Header of this unit
38 */
39
40#include "i2c_sw_engine_dce80.h"
41
42/*
43 * Post-requisites: headers required by this unit
44 */
45
46#include "dce/dce_8_0_d.h"
47#include "dce/dce_8_0_sh_mask.h"
48
49/*
50 * This unit
51 */
52
53static const uint32_t ddc_hw_status_addr[] = {
54 mmDC_I2C_DDC1_HW_STATUS,
55 mmDC_I2C_DDC2_HW_STATUS,
56 mmDC_I2C_DDC3_HW_STATUS,
57 mmDC_I2C_DDC4_HW_STATUS,
58 mmDC_I2C_DDC5_HW_STATUS,
59 mmDC_I2C_DDC6_HW_STATUS,
60 mmDC_I2C_DDCVGA_HW_STATUS
61};
62
63/*
64 * @brief
65 * Cast 'struct i2c_sw_engine *'
66 * to 'struct i2c_sw_engine_dce80 *'
67 */
68#define FROM_I2C_SW_ENGINE(ptr) \
69 container_of((ptr), struct i2c_sw_engine_dce80, base)
70
71/*
72 * @brief
73 * Cast 'struct i2c_engine *'
74 * to 'struct i2c_sw_engine_dce80 *'
75 */
76#define FROM_I2C_ENGINE(ptr) \
77 FROM_I2C_SW_ENGINE(container_of((ptr), struct i2c_sw_engine, base))
78
79/*
80 * @brief
81 * Cast 'struct engine *'
82 * to 'struct i2c_sw_engine_dce80 *'
83 */
84#define FROM_ENGINE(ptr) \
85 FROM_I2C_ENGINE(container_of((ptr), struct i2c_engine, base))
86
87static void release_engine(
88 struct engine *engine)
89{
90
91}
92
93static void destruct(
94 struct i2c_sw_engine_dce80 *engine)
95{
96 dal_i2c_sw_engine_destruct(&engine->base);
97}
98
99static void destroy(
100 struct i2c_engine **engine)
101{
102 struct i2c_sw_engine_dce80 *sw_engine = FROM_I2C_ENGINE(*engine);
103
104 destruct(sw_engine);
105
106 dm_free(sw_engine);
107
108 *engine = NULL;
109}
110
111static bool acquire_engine(
112 struct i2c_engine *engine,
113 struct ddc *ddc_handle)
114{
115 return dal_i2caux_i2c_sw_engine_acquire_engine(engine, ddc_handle);
116}
117
118static const struct i2c_engine_funcs i2c_engine_funcs = {
119 .acquire_engine = acquire_engine,
120 .destroy = destroy,
121 .get_speed = dal_i2c_sw_engine_get_speed,
122 .set_speed = dal_i2c_sw_engine_set_speed,
123 .setup_engine = dal_i2c_engine_setup_i2c_engine,
124 .submit_channel_request = dal_i2c_sw_engine_submit_channel_request,
125 .process_channel_reply = dal_i2c_engine_process_channel_reply,
126 .get_channel_status = dal_i2c_sw_engine_get_channel_status,
127};
128
129static const struct engine_funcs engine_funcs = {
130 .release_engine = release_engine,
131 .get_engine_type = dal_i2c_sw_engine_get_engine_type,
132 .acquire = dal_i2c_engine_acquire,
133 .submit_request = dal_i2c_sw_engine_submit_request,
134};
135
136static bool construct(
137 struct i2c_sw_engine_dce80 *engine,
138 const struct i2c_sw_engine_dce80_create_arg *arg)
139{
140 struct i2c_sw_engine_create_arg arg_base;
141
142 arg_base.ctx = arg->ctx;
143 arg_base.default_speed = arg->default_speed;
144
145 if (!dal_i2c_sw_engine_construct(&engine->base, &arg_base)) {
146 BREAK_TO_DEBUGGER();
147 return false;
148 }
149
150 engine->base.base.base.funcs = &engine_funcs;
151 engine->base.base.funcs = &i2c_engine_funcs;
152 engine->base.default_speed = arg->default_speed;
153 engine->engine_id = arg->engine_id;
154
155 return true;
156}
157
158struct i2c_engine *dal_i2c_sw_engine_dce80_create(
159 const struct i2c_sw_engine_dce80_create_arg *arg)
160{
161 struct i2c_sw_engine_dce80 *engine;
162
163 if (!arg) {
164 BREAK_TO_DEBUGGER();
165 return NULL;
166 }
167
168 engine = dm_alloc(sizeof(struct i2c_sw_engine_dce80));
169
170 if (!engine) {
171 BREAK_TO_DEBUGGER();
172 return NULL;
173 }
174
175 if (construct(engine, arg))
176 return &engine->base.base;
177
178 BREAK_TO_DEBUGGER();
179
180 dm_free(engine);
181
182 return NULL;
183}
184
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_sw_engine_dce80.h b/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_sw_engine_dce80.h
new file mode 100644
index 000000000000..26355c088746
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_sw_engine_dce80.h
@@ -0,0 +1,43 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_I2C_SW_ENGINE_DCE80_H__
27#define __DAL_I2C_SW_ENGINE_DCE80_H__
28
29struct i2c_sw_engine_dce80 {
30 struct i2c_sw_engine base;
31 uint32_t engine_id;
32};
33
34struct i2c_sw_engine_dce80_create_arg {
35 uint32_t engine_id;
36 uint32_t default_speed;
37 struct dc_context *ctx;
38};
39
40struct i2c_engine *dal_i2c_sw_engine_dce80_create(
41 const struct i2c_sw_engine_dce80_create_arg *arg);
42
43#endif
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2caux_dce80.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2caux_dce80.c
new file mode 100644
index 000000000000..5e71450c44e0
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2caux_dce80.c
@@ -0,0 +1,295 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28/*
29 * Pre-requisites: headers required by header of this unit
30 */
31#include "include/i2caux_interface.h"
32#include "../i2caux.h"
33
34/*
35 * Header of this unit
36 */
37
38#include "i2caux_dce80.h"
39
40/*
41 * Post-requisites: headers required by this unit
42 */
43
44#include "../engine.h"
45#include "../i2c_engine.h"
46#include "../i2c_sw_engine.h"
47#include "i2c_sw_engine_dce80.h"
48#include "../i2c_hw_engine.h"
49#include "i2c_hw_engine_dce80.h"
50#include "../i2c_generic_hw_engine.h"
51#include "../aux_engine.h"
52
53
54#include "../dce110/aux_engine_dce110.h"
55#include "../dce110/i2caux_dce110.h"
56
57#include "dce/dce_8_0_d.h"
58#include "dce/dce_8_0_sh_mask.h"
59
60
61/* set register offset */
62#define SR(reg_name)\
63 .reg_name = mm ## reg_name
64
65/* set register offset with instance */
66#define SRI(reg_name, block, id)\
67 .reg_name = mm ## block ## id ## _ ## reg_name
68
69#define aux_regs(id)\
70[id] = {\
71 AUX_COMMON_REG_LIST(id), \
72 .AUX_RESET_MASK = 0 \
73}
74
75static const struct dce110_aux_registers dce80_aux_regs[] = {
76 aux_regs(0),
77 aux_regs(1),
78 aux_regs(2),
79 aux_regs(3),
80 aux_regs(4),
81 aux_regs(5)
82};
83
84/*
85 * This unit
86 */
87
88#define FROM_I2C_AUX(ptr) \
89 container_of((ptr), struct i2caux_dce80, base)
90
91static void destruct(
92 struct i2caux_dce80 *i2caux_dce80)
93{
94 dal_i2caux_destruct(&i2caux_dce80->base);
95}
96
97static void destroy(
98 struct i2caux **i2c_engine)
99{
100 struct i2caux_dce80 *i2caux_dce80 = FROM_I2C_AUX(*i2c_engine);
101
102 destruct(i2caux_dce80);
103
104 dm_free(i2caux_dce80);
105
106 *i2c_engine = NULL;
107}
108
109static struct i2c_engine *acquire_i2c_hw_engine(
110 struct i2caux *i2caux,
111 struct ddc *ddc)
112{
113 struct i2caux_dce80 *i2caux_dce80 = FROM_I2C_AUX(i2caux);
114
115 struct i2c_engine *engine = NULL;
116 bool non_generic;
117
118 if (!ddc)
119 return NULL;
120
121 if (ddc->hw_info.hw_supported) {
122 enum gpio_ddc_line line = dal_ddc_get_line(ddc);
123
124 if (line < GPIO_DDC_LINE_COUNT) {
125 non_generic = true;
126 engine = i2caux->i2c_hw_engines[line];
127 }
128 }
129
130 if (!engine) {
131 non_generic = false;
132 engine = i2caux->i2c_generic_hw_engine;
133 }
134
135 if (!engine)
136 return NULL;
137
138 if (non_generic) {
139 if (!i2caux_dce80->i2c_hw_buffer_in_use &&
140 engine->base.funcs->acquire(&engine->base, ddc)) {
141 i2caux_dce80->i2c_hw_buffer_in_use = true;
142 return engine;
143 }
144 } else {
145 if (engine->base.funcs->acquire(&engine->base, ddc))
146 return engine;
147 }
148
149 return NULL;
150}
151
152static void release_engine(
153 struct i2caux *i2caux,
154 struct engine *engine)
155{
156 if (engine->funcs->get_engine_type(engine) ==
157 I2CAUX_ENGINE_TYPE_I2C_DDC_HW)
158 FROM_I2C_AUX(i2caux)->i2c_hw_buffer_in_use = false;
159
160 dal_i2caux_release_engine(i2caux, engine);
161}
162
163static const enum gpio_ddc_line hw_ddc_lines[] = {
164 GPIO_DDC_LINE_DDC1,
165 GPIO_DDC_LINE_DDC2,
166 GPIO_DDC_LINE_DDC3,
167 GPIO_DDC_LINE_DDC4,
168 GPIO_DDC_LINE_DDC5,
169 GPIO_DDC_LINE_DDC6,
170 GPIO_DDC_LINE_DDC_VGA
171};
172
173static const enum gpio_ddc_line hw_aux_lines[] = {
174 GPIO_DDC_LINE_DDC1,
175 GPIO_DDC_LINE_DDC2,
176 GPIO_DDC_LINE_DDC3,
177 GPIO_DDC_LINE_DDC4,
178 GPIO_DDC_LINE_DDC5,
179 GPIO_DDC_LINE_DDC6
180};
181
182static const struct i2caux_funcs i2caux_funcs = {
183 .destroy = destroy,
184 .acquire_i2c_hw_engine = acquire_i2c_hw_engine,
185 .release_engine = release_engine,
186 .acquire_i2c_sw_engine = dal_i2caux_acquire_i2c_sw_engine,
187 .acquire_aux_engine = dal_i2caux_acquire_aux_engine,
188};
189
190static bool construct(
191 struct i2caux_dce80 *i2caux_dce80,
192 struct dc_context *ctx)
193{
194 /* Entire family have I2C engine reference clock frequency
195 * changed from XTALIN (27) to XTALIN/2 (13.5) */
196
197 struct i2caux *base = &i2caux_dce80->base;
198
199 uint32_t reference_frequency =
200 dal_i2caux_get_reference_clock(ctx->dc_bios) >> 1;
201
202 /*bool use_i2c_sw_engine = dal_adapter_service_is_feature_supported(as,
203 FEATURE_RESTORE_USAGE_I2C_SW_ENGINE);*/
204
205 /* Use SWI2C for dce8 currently, sicne we have bug with hwi2c */
206 bool use_i2c_sw_engine = true;
207
208 uint32_t i;
209
210 if (!dal_i2caux_construct(base, ctx)) {
211 BREAK_TO_DEBUGGER();
212 return false;
213 }
214
215 i2caux_dce80->base.funcs = &i2caux_funcs;
216 i2caux_dce80->i2c_hw_buffer_in_use = false;
217
218 /* Create I2C HW engines (HW + SW pairs)
219 * for all lines which has assisted HW DDC
220 * 'i' (loop counter) used as DDC/AUX engine_id */
221
222 i = 0;
223
224 do {
225 enum gpio_ddc_line line_id = hw_ddc_lines[i];
226
227 struct i2c_hw_engine_dce80_create_arg hw_arg;
228
229 if (use_i2c_sw_engine) {
230 struct i2c_sw_engine_dce80_create_arg sw_arg;
231
232 sw_arg.engine_id = i;
233 sw_arg.default_speed = base->default_i2c_sw_speed;
234 sw_arg.ctx = ctx;
235 base->i2c_sw_engines[line_id] =
236 dal_i2c_sw_engine_dce80_create(&sw_arg);
237 }
238
239 hw_arg.engine_id = i;
240 hw_arg.reference_frequency = reference_frequency;
241 hw_arg.default_speed = base->default_i2c_hw_speed;
242 hw_arg.ctx = ctx;
243
244 base->i2c_hw_engines[line_id] =
245 dal_i2c_hw_engine_dce80_create(&hw_arg);
246
247 ++i;
248 } while (i < ARRAY_SIZE(hw_ddc_lines));
249
250 /* Create AUX engines for all lines which has assisted HW AUX
251 * 'i' (loop counter) used as DDC/AUX engine_id */
252
253 i = 0;
254
255 do {
256 enum gpio_ddc_line line_id = hw_aux_lines[i];
257
258 struct aux_engine_dce110_init_data arg;
259
260 arg.engine_id = i;
261 arg.timeout_period = base->aux_timeout_period;
262 arg.ctx = ctx;
263 arg.regs = &dce80_aux_regs[i];
264
265 base->aux_engines[line_id] =
266 dal_aux_engine_dce110_create(&arg);
267
268 ++i;
269 } while (i < ARRAY_SIZE(hw_aux_lines));
270
271 /* TODO Generic I2C SW and HW */
272
273 return true;
274}
275
276struct i2caux *dal_i2caux_dce80_create(
277 struct dc_context *ctx)
278{
279 struct i2caux_dce80 *i2caux_dce80 =
280 dm_alloc(sizeof(struct i2caux_dce80));
281
282 if (!i2caux_dce80) {
283 BREAK_TO_DEBUGGER();
284 return NULL;
285 }
286
287 if (construct(i2caux_dce80, ctx))
288 return &i2caux_dce80->base;
289
290 BREAK_TO_DEBUGGER();
291
292 dm_free(i2caux_dce80);
293
294 return NULL;
295}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2caux_dce80.h b/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2caux_dce80.h
new file mode 100644
index 000000000000..21908629e973
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2caux_dce80.h
@@ -0,0 +1,38 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_I2C_AUX_DCE80_H__
27#define __DAL_I2C_AUX_DCE80_H__
28
29struct i2caux_dce80 {
30 struct i2caux base;
31 /* indicate the I2C HW circular buffer is in use */
32 bool i2c_hw_buffer_in_use;
33};
34
35struct i2caux *dal_i2caux_dce80_create(
36 struct dc_context *ctx);
37
38#endif
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/diagnostics/i2caux_diag.c b/drivers/gpu/drm/amd/display/dc/i2caux/diagnostics/i2caux_diag.c
new file mode 100644
index 000000000000..029bf735036c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/diagnostics/i2caux_diag.c
@@ -0,0 +1,108 @@
1/*
2 * Copyright 2012-16 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28/*
29 * Pre-requisites: headers required by header of this unit
30 */
31#include "include/i2caux_interface.h"
32#include "../i2caux.h"
33#include "../engine.h"
34#include "../i2c_engine.h"
35#include "../i2c_sw_engine.h"
36#include "../i2c_hw_engine.h"
37
38/*
39 * Header of this unit
40 */
41#include "i2caux_diag.h"
42
43/*
44 * Post-requisites: headers required by this unit
45 */
46
47/*
48 * This unit
49 */
50
51static void destruct(
52 struct i2caux *i2caux)
53{
54 dal_i2caux_destruct(i2caux);
55}
56
57static void destroy(
58 struct i2caux **i2c_engine)
59{
60 destruct(*i2c_engine);
61
62 dm_free(*i2c_engine);
63
64 *i2c_engine = NULL;
65}
66
67/* function table */
68static const struct i2caux_funcs i2caux_funcs = {
69 .destroy = destroy,
70 .acquire_i2c_hw_engine = NULL,
71 .release_engine = NULL,
72 .acquire_i2c_sw_engine = NULL,
73 .acquire_aux_engine = NULL,
74};
75
76static bool construct(
77 struct i2caux *i2caux,
78 struct dc_context *ctx)
79{
80 if (!dal_i2caux_construct(i2caux, ctx)) {
81 ASSERT_CRITICAL(false);
82 return false;
83 }
84
85 i2caux->funcs = &i2caux_funcs;
86
87 return true;
88}
89
90struct i2caux *dal_i2caux_diag_fpga_create(
91 struct dc_context *ctx)
92{
93 struct i2caux *i2caux = dm_alloc(sizeof(struct i2caux));
94
95 if (!i2caux) {
96 ASSERT_CRITICAL(false);
97 return NULL;
98 }
99
100 if (construct(i2caux, ctx))
101 return i2caux;
102
103 ASSERT_CRITICAL(false);
104
105 dm_free(i2caux);
106
107 return NULL;
108}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/diagnostics/i2caux_diag.h b/drivers/gpu/drm/amd/display/dc/i2caux/diagnostics/i2caux_diag.h
new file mode 100644
index 000000000000..a83eeb748283
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/diagnostics/i2caux_diag.h
@@ -0,0 +1,32 @@
1/*
2 * Copyright 2012-16 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_I2C_AUX_DIAG_FPGA_H__
27#define __DAL_I2C_AUX_DIAG_FPGA_H__
28
29struct i2caux *dal_i2caux_diag_fpga_create(
30 struct dc_context *ctx);
31
32#endif /* __DAL_I2C_AUX_DIAG_FPGA_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/engine.h b/drivers/gpu/drm/amd/display/dc/i2caux/engine.h
new file mode 100644
index 000000000000..76fe2df3c2f8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/engine.h
@@ -0,0 +1,120 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_ENGINE_H__
27#define __DAL_ENGINE_H__
28
29enum i2caux_transaction_operation {
30 I2CAUX_TRANSACTION_READ,
31 I2CAUX_TRANSACTION_WRITE
32};
33
34enum i2caux_transaction_address_space {
35 I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C = 1,
36 I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD
37};
38
39struct i2caux_transaction_payload {
40 enum i2caux_transaction_address_space address_space;
41 uint32_t address;
42 uint32_t length;
43 uint8_t *data;
44};
45
46enum i2caux_transaction_status {
47 I2CAUX_TRANSACTION_STATUS_UNKNOWN = (-1L),
48 I2CAUX_TRANSACTION_STATUS_SUCCEEDED,
49 I2CAUX_TRANSACTION_STATUS_FAILED_CHANNEL_BUSY,
50 I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT,
51 I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR,
52 I2CAUX_TRANSACTION_STATUS_FAILED_NACK,
53 I2CAUX_TRANSACTION_STATUS_FAILED_INCOMPLETE,
54 I2CAUX_TRANSACTION_STATUS_FAILED_OPERATION,
55 I2CAUX_TRANSACTION_STATUS_FAILED_INVALID_OPERATION,
56 I2CAUX_TRANSACTION_STATUS_FAILED_BUFFER_OVERFLOW
57};
58
59struct i2caux_transaction_request {
60 enum i2caux_transaction_operation operation;
61 struct i2caux_transaction_payload payload;
62 enum i2caux_transaction_status status;
63};
64
65enum i2caux_engine_type {
66 I2CAUX_ENGINE_TYPE_UNKNOWN = (-1L),
67 I2CAUX_ENGINE_TYPE_AUX,
68 I2CAUX_ENGINE_TYPE_I2C_DDC_HW,
69 I2CAUX_ENGINE_TYPE_I2C_GENERIC_HW,
70 I2CAUX_ENGINE_TYPE_I2C_SW
71};
72
73enum i2c_default_speed {
74 I2CAUX_DEFAULT_I2C_HW_SPEED = 50,
75 I2CAUX_DEFAULT_I2C_SW_SPEED = 50
76};
77
78enum i2caux_transaction_action {
79 I2CAUX_TRANSACTION_ACTION_I2C_WRITE = 0x00,
80 I2CAUX_TRANSACTION_ACTION_I2C_READ = 0x10,
81 I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST = 0x20,
82
83 I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT = 0x40,
84 I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT = 0x50,
85 I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST_MOT = 0x60,
86
87 I2CAUX_TRANSACTION_ACTION_DP_WRITE = 0x80,
88 I2CAUX_TRANSACTION_ACTION_DP_READ = 0x90
89};
90
91struct engine;
92
93struct engine_funcs {
94 enum i2caux_engine_type (*get_engine_type)(
95 const struct engine *engine);
96 bool (*acquire)(
97 struct engine *engine,
98 struct ddc *ddc);
99 bool (*submit_request)(
100 struct engine *engine,
101 struct i2caux_transaction_request *request,
102 bool middle_of_transaction);
103 void (*release_engine)(
104 struct engine *engine);
105};
106
107struct engine {
108 const struct engine_funcs *funcs;
109 struct ddc *ddc;
110 struct dc_context *ctx;
111};
112
113bool dal_i2caux_construct_engine(
114 struct engine *engine,
115 struct dc_context *ctx);
116
117void dal_i2caux_destruct_engine(
118 struct engine *engine);
119
120#endif
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/engine_base.c b/drivers/gpu/drm/amd/display/dc/i2caux/engine_base.c
new file mode 100644
index 000000000000..09da81379491
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/engine_base.c
@@ -0,0 +1,53 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28/*
29 * Pre-requisites: headers required by header of this unit
30 */
31#include "include/i2caux_interface.h"
32
33/*
34 * Header of this unit
35 */
36
37#include "engine.h"
38
39bool dal_i2caux_construct_engine(
40 struct engine *engine,
41 struct dc_context *ctx)
42{
43 engine->ddc = NULL;
44 engine->ctx = ctx;
45 return true;
46}
47
48void dal_i2caux_destruct_engine(
49 struct engine *engine)
50{
51 /* nothing to do */
52}
53
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.c b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.c
new file mode 100644
index 000000000000..144f51dc4523
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.c
@@ -0,0 +1,121 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28/*
29 * Pre-requisites: headers required by header of this unit
30 */
31#include "include/i2caux_interface.h"
32#include "engine.h"
33
34/*
35 * Header of this unit
36 */
37
38#include "i2c_engine.h"
39
40/*
41 * Post-requisites: headers required by this unit
42 */
43
44/*
45 * This unit
46 */
47
48#define FROM_ENGINE(ptr) \
49 container_of((ptr), struct i2c_engine, base)
50
51bool dal_i2c_engine_acquire(
52 struct engine *engine,
53 struct ddc *ddc_handle)
54{
55 struct i2c_engine *i2c_engine = FROM_ENGINE(engine);
56
57 uint32_t counter = 0;
58 bool result;
59
60 do {
61 result = i2c_engine->funcs->acquire_engine(
62 i2c_engine, ddc_handle);
63
64 if (result)
65 break;
66
67 /* i2c_engine is busy by VBios, lets wait and retry */
68
69 udelay(10);
70
71 ++counter;
72 } while (counter < 2);
73
74 if (result) {
75 if (!i2c_engine->funcs->setup_engine(i2c_engine)) {
76 engine->funcs->release_engine(engine);
77 result = false;
78 }
79 }
80
81 return result;
82}
83
84bool dal_i2c_engine_setup_i2c_engine(
85 struct i2c_engine *engine)
86{
87 /* Derivative classes do not have to override this */
88
89 return true;
90}
91
92void dal_i2c_engine_submit_channel_request(
93 struct i2c_engine *engine,
94 struct i2c_request_transaction_data *request)
95{
96
97}
98
99void dal_i2c_engine_process_channel_reply(
100 struct i2c_engine *engine,
101 struct i2c_reply_transaction_data *reply)
102{
103
104}
105
106bool dal_i2c_engine_construct(
107 struct i2c_engine *engine,
108 struct dc_context *ctx)
109{
110 if (!dal_i2caux_construct_engine(&engine->base, ctx))
111 return false;
112
113 engine->timeout_delay = 0;
114 return true;
115}
116
117void dal_i2c_engine_destruct(
118 struct i2c_engine *engine)
119{
120 dal_i2caux_destruct_engine(&engine->base);
121}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.h b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.h
new file mode 100644
index 000000000000..ce2c51ddea02
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.h
@@ -0,0 +1,113 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_I2C_ENGINE_H__
27#define __DAL_I2C_ENGINE_H__
28
29enum i2c_channel_operation_result {
30 I2C_CHANNEL_OPERATION_SUCCEEDED,
31 I2C_CHANNEL_OPERATION_FAILED,
32 I2C_CHANNEL_OPERATION_NOT_GRANTED,
33 I2C_CHANNEL_OPERATION_IS_BUSY,
34 I2C_CHANNEL_OPERATION_NO_HANDLE_PROVIDED,
35 I2C_CHANNEL_OPERATION_CHANNEL_IN_USE,
36 I2C_CHANNEL_OPERATION_CHANNEL_CLIENT_MAX_ALLOWED,
37 I2C_CHANNEL_OPERATION_ENGINE_BUSY,
38 I2C_CHANNEL_OPERATION_TIMEOUT,
39 I2C_CHANNEL_OPERATION_NO_RESPONSE,
40 I2C_CHANNEL_OPERATION_HW_REQUEST_I2C_BUS,
41 I2C_CHANNEL_OPERATION_WRONG_PARAMETER,
42 I2C_CHANNEL_OPERATION_OUT_NB_OF_RETRIES,
43 I2C_CHANNEL_OPERATION_NOT_STARTED
44};
45
46struct i2c_request_transaction_data {
47 enum i2caux_transaction_action action;
48 enum i2c_channel_operation_result status;
49 uint8_t address;
50 uint32_t length;
51 uint8_t *data;
52};
53
54struct i2c_reply_transaction_data {
55 uint32_t length;
56 uint8_t *data;
57};
58
59struct i2c_engine;
60
61struct i2c_engine_funcs {
62 void (*destroy)(
63 struct i2c_engine **ptr);
64 uint32_t (*get_speed)(
65 const struct i2c_engine *engine);
66 void (*set_speed)(
67 struct i2c_engine *engine,
68 uint32_t speed);
69 bool (*acquire_engine)(
70 struct i2c_engine *engine,
71 struct ddc *ddc);
72 bool (*setup_engine)(
73 struct i2c_engine *engine);
74 void (*submit_channel_request)(
75 struct i2c_engine *engine,
76 struct i2c_request_transaction_data *request);
77 void (*process_channel_reply)(
78 struct i2c_engine *engine,
79 struct i2c_reply_transaction_data *reply);
80 enum i2c_channel_operation_result (*get_channel_status)(
81 struct i2c_engine *engine,
82 uint8_t *returned_bytes);
83};
84
85struct i2c_engine {
86 struct engine base;
87 const struct i2c_engine_funcs *funcs;
88 uint32_t timeout_delay;
89};
90
91bool dal_i2c_engine_construct(
92 struct i2c_engine *engine,
93 struct dc_context *ctx);
94
95void dal_i2c_engine_destruct(
96 struct i2c_engine *engine);
97
98bool dal_i2c_engine_setup_i2c_engine(
99 struct i2c_engine *engine);
100
101void dal_i2c_engine_submit_channel_request(
102 struct i2c_engine *engine,
103 struct i2c_request_transaction_data *request);
104
105void dal_i2c_engine_process_channel_reply(
106 struct i2c_engine *engine,
107 struct i2c_reply_transaction_data *reply);
108
109bool dal_i2c_engine_acquire(
110 struct engine *ptr,
111 struct ddc *ddc_handle);
112
113#endif
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_generic_hw_engine.c b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_generic_hw_engine.c
new file mode 100644
index 000000000000..521c4ec98632
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_generic_hw_engine.c
@@ -0,0 +1,286 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28/*
29 * Pre-requisites: headers required by header of this unit
30 */
31#include "include/i2caux_interface.h"
32#include "engine.h"
33#include "i2c_engine.h"
34#include "i2c_hw_engine.h"
35
36/*
37 * Header of this unit
38 */
39
40#include "i2c_generic_hw_engine.h"
41
42/*
43 * Post-requisites: headers required by this unit
44 */
45
46/*
47 * This unit
48 */
49
50/*
51 * @brief
52 * Cast 'struct i2c_hw_engine *'
53 * to 'struct i2c_generic_hw_engine *'
54 */
55#define FROM_I2C_HW_ENGINE(ptr) \
56 container_of((ptr), struct i2c_generic_hw_engine, base)
57
58/*
59 * @brief
60 * Cast 'struct i2c_engine *'
61 * to 'struct i2c_generic_hw_engine *'
62 */
63#define FROM_I2C_ENGINE(ptr) \
64 FROM_I2C_HW_ENGINE(container_of((ptr), struct i2c_hw_engine, base))
65
66/*
67 * @brief
68 * Cast 'struct engine *'
69 * to 'struct i2c_generic_hw_engine *'
70 */
71#define FROM_ENGINE(ptr) \
72 FROM_I2C_ENGINE(container_of((ptr), struct i2c_engine, base))
73
74enum i2caux_engine_type dal_i2c_generic_hw_engine_get_engine_type(
75 const struct engine *engine)
76{
77 return I2CAUX_ENGINE_TYPE_I2C_GENERIC_HW;
78}
79
80/*
81 * @brief
82 * Single transaction handling.
83 * Since transaction may be bigger than HW buffer size,
84 * it divides transaction to sub-transactions
85 * and uses batch transaction feature of the engine.
86 */
87bool dal_i2c_generic_hw_engine_submit_request(
88 struct engine *engine,
89 struct i2caux_transaction_request *i2caux_request,
90 bool middle_of_transaction)
91{
92 struct i2c_generic_hw_engine *hw_engine = FROM_ENGINE(engine);
93
94 struct i2c_hw_engine *base = &hw_engine->base;
95
96 uint32_t max_payload_size =
97 base->funcs->get_hw_buffer_available_size(base);
98
99 bool initial_stop_bit = !middle_of_transaction;
100
101 struct i2c_generic_transaction_attributes attributes;
102
103 enum i2c_channel_operation_result operation_result =
104 I2C_CHANNEL_OPERATION_FAILED;
105
106 bool result = false;
107
108 /* setup transaction initial properties */
109
110 uint8_t address = i2caux_request->payload.address;
111 uint8_t *current_payload = i2caux_request->payload.data;
112 uint32_t remaining_payload_size = i2caux_request->payload.length;
113
114 bool first_iteration = true;
115
116 if (i2caux_request->operation == I2CAUX_TRANSACTION_READ)
117 attributes.action = I2CAUX_TRANSACTION_ACTION_I2C_READ;
118 else if (i2caux_request->operation == I2CAUX_TRANSACTION_WRITE)
119 attributes.action = I2CAUX_TRANSACTION_ACTION_I2C_WRITE;
120 else {
121 i2caux_request->status =
122 I2CAUX_TRANSACTION_STATUS_FAILED_INVALID_OPERATION;
123 return false;
124 }
125
126 /* Do batch transaction.
127 * Divide read/write data into payloads which fit HW buffer size.
128 * 1. Single transaction:
129 * start_bit = 1, stop_bit depends on session state, ack_on_read = 0;
130 * 2. Start of batch transaction:
131 * start_bit = 1, stop_bit = 0, ack_on_read = 1;
132 * 3. Middle of batch transaction:
133 * start_bit = 0, stop_bit = 0, ack_on_read = 1;
134 * 4. End of batch transaction:
135 * start_bit = 0, stop_bit depends on session state, ack_on_read = 0.
136 * Session stop bit is set if 'middle_of_transaction' = 0. */
137
138 while (remaining_payload_size) {
139 uint32_t current_transaction_size;
140 uint32_t current_payload_size;
141
142 bool last_iteration;
143 bool stop_bit;
144
145 /* Calculate current transaction size and payload size.
146 * Transaction size = total number of bytes in transaction,
147 * including slave's address;
148 * Payload size = number of data bytes in transaction. */
149
150 if (first_iteration) {
151 /* In the first sub-transaction we send slave's address
152 * thus we need to reserve one byte for it */
153 current_transaction_size =
154 (remaining_payload_size > max_payload_size - 1) ?
155 max_payload_size :
156 remaining_payload_size + 1;
157
158 current_payload_size = current_transaction_size - 1;
159 } else {
160 /* Second and further sub-transactions will have
161 * entire buffer reserved for data */
162 current_transaction_size =
163 (remaining_payload_size > max_payload_size) ?
164 max_payload_size :
165 remaining_payload_size;
166
167 current_payload_size = current_transaction_size;
168 }
169
170 last_iteration =
171 (remaining_payload_size == current_payload_size);
172
173 stop_bit = last_iteration ? initial_stop_bit : false;
174
175 /* write slave device address */
176
177 if (first_iteration)
178 hw_engine->funcs->write_address(hw_engine, address);
179
180 /* write current portion of data, if requested */
181
182 if (i2caux_request->operation == I2CAUX_TRANSACTION_WRITE)
183 hw_engine->funcs->write_data(
184 hw_engine,
185 current_payload,
186 current_payload_size);
187
188 /* execute transaction */
189
190 attributes.start_bit = first_iteration;
191 attributes.stop_bit = stop_bit;
192 attributes.last_read = last_iteration;
193 attributes.transaction_size = current_transaction_size;
194
195 hw_engine->funcs->execute_transaction(hw_engine, &attributes);
196
197 /* wait until transaction is processed; if it fails - quit */
198
199 operation_result = base->funcs->wait_on_operation_result(
200 base,
201 base->funcs->get_transaction_timeout(
202 base, current_transaction_size),
203 I2C_CHANNEL_OPERATION_ENGINE_BUSY);
204
205 if (operation_result != I2C_CHANNEL_OPERATION_SUCCEEDED)
206 break;
207
208 /* read current portion of data, if requested */
209
210 /* the read offset should be 1 for first sub-transaction,
211 * and 0 for any next one */
212
213 if (i2caux_request->operation == I2CAUX_TRANSACTION_READ)
214 hw_engine->funcs->read_data(hw_engine, current_payload,
215 current_payload_size, first_iteration ? 1 : 0);
216
217 /* update loop variables */
218
219 first_iteration = false;
220 current_payload += current_payload_size;
221 remaining_payload_size -= current_payload_size;
222 }
223
224 /* update transaction status */
225
226 switch (operation_result) {
227 case I2C_CHANNEL_OPERATION_SUCCEEDED:
228 i2caux_request->status =
229 I2CAUX_TRANSACTION_STATUS_SUCCEEDED;
230 result = true;
231 break;
232 case I2C_CHANNEL_OPERATION_NO_RESPONSE:
233 i2caux_request->status =
234 I2CAUX_TRANSACTION_STATUS_FAILED_NACK;
235 break;
236 case I2C_CHANNEL_OPERATION_TIMEOUT:
237 i2caux_request->status =
238 I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
239 break;
240 case I2C_CHANNEL_OPERATION_FAILED:
241 i2caux_request->status =
242 I2CAUX_TRANSACTION_STATUS_FAILED_INCOMPLETE;
243 break;
244 default:
245 i2caux_request->status =
246 I2CAUX_TRANSACTION_STATUS_FAILED_OPERATION;
247 }
248
249 return result;
250}
251
252/*
253 * @brief
254 * Returns number of microseconds to wait until timeout to be considered
255 */
256uint32_t dal_i2c_generic_hw_engine_get_transaction_timeout(
257 const struct i2c_hw_engine *engine,
258 uint32_t length)
259{
260 const struct i2c_engine *base = &engine->base;
261
262 uint32_t speed = base->funcs->get_speed(base);
263
264 if (!speed)
265 return 0;
266
267 /* total timeout = period_timeout * (start + data bits count + stop) */
268
269 return ((1000 * TRANSACTION_TIMEOUT_IN_I2C_CLOCKS) / speed) *
270 (1 + (length << 3) + 1);
271}
272
273bool dal_i2c_generic_hw_engine_construct(
274 struct i2c_generic_hw_engine *engine,
275 struct dc_context *ctx)
276{
277 if (!dal_i2c_hw_engine_construct(&engine->base, ctx))
278 return false;
279 return true;
280}
281
282void dal_i2c_generic_hw_engine_destruct(
283 struct i2c_generic_hw_engine *engine)
284{
285 dal_i2c_hw_engine_destruct(&engine->base);
286}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_generic_hw_engine.h b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_generic_hw_engine.h
new file mode 100644
index 000000000000..083bb0dee9a1
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_generic_hw_engine.h
@@ -0,0 +1,77 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_I2C_GENERIC_HW_ENGINE_H__
27#define __DAL_I2C_GENERIC_HW_ENGINE_H__
28
29struct i2c_generic_transaction_attributes {
30 enum i2caux_transaction_action action;
31 uint32_t transaction_size;
32 bool start_bit;
33 bool stop_bit;
34 bool last_read;
35};
36
37struct i2c_generic_hw_engine;
38
39struct i2c_generic_hw_engine_funcs {
40 void (*write_address)(
41 struct i2c_generic_hw_engine *engine,
42 uint8_t address);
43 void (*write_data)(
44 struct i2c_generic_hw_engine *engine,
45 const uint8_t *buffer,
46 uint32_t length);
47 void (*read_data)(
48 struct i2c_generic_hw_engine *engine,
49 uint8_t *buffer,
50 uint32_t length,
51 uint32_t offset);
52 void (*execute_transaction)(
53 struct i2c_generic_hw_engine *engine,
54 struct i2c_generic_transaction_attributes *attributes);
55};
56
57struct i2c_generic_hw_engine {
58 struct i2c_hw_engine base;
59 const struct i2c_generic_hw_engine_funcs *funcs;
60};
61
62bool dal_i2c_generic_hw_engine_construct(
63 struct i2c_generic_hw_engine *engine,
64 struct dc_context *ctx);
65
66void dal_i2c_generic_hw_engine_destruct(
67 struct i2c_generic_hw_engine *engine);
68enum i2caux_engine_type dal_i2c_generic_hw_engine_get_engine_type(
69 const struct engine *engine);
70bool dal_i2c_generic_hw_engine_submit_request(
71 struct engine *ptr,
72 struct i2caux_transaction_request *i2caux_request,
73 bool middle_of_transaction);
74uint32_t dal_i2c_generic_hw_engine_get_transaction_timeout(
75 const struct i2c_hw_engine *engine,
76 uint32_t length);
77#endif
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.c b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.c
new file mode 100644
index 000000000000..00a8f07a74e9
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.c
@@ -0,0 +1,246 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28/*
29 * Pre-requisites: headers required by header of this unit
30 */
31#include "include/i2caux_interface.h"
32#include "engine.h"
33#include "i2c_engine.h"
34
35/*
36 * Header of this unit
37 */
38
39#include "i2c_hw_engine.h"
40
41/*
42 * Post-requisites: headers required by this unit
43 */
44
45/*
46 * This unit
47 */
48
49/*
50 * @brief
51 * Cast 'struct i2c_engine *'
52 * to 'struct i2c_hw_engine *'
53 */
54#define FROM_I2C_ENGINE(ptr) \
55 container_of((ptr), struct i2c_hw_engine, base)
56
57/*
58 * @brief
59 * Cast 'struct engine *'
60 * to 'struct i2c_hw_engine *'
61 */
62#define FROM_ENGINE(ptr) \
63 FROM_I2C_ENGINE(container_of((ptr), struct i2c_engine, base))
64
65enum i2caux_engine_type dal_i2c_hw_engine_get_engine_type(
66 const struct engine *engine)
67{
68 return I2CAUX_ENGINE_TYPE_I2C_DDC_HW;
69}
70
71bool dal_i2c_hw_engine_submit_request(
72 struct engine *engine,
73 struct i2caux_transaction_request *i2caux_request,
74 bool middle_of_transaction)
75{
76 struct i2c_hw_engine *hw_engine = FROM_ENGINE(engine);
77
78 struct i2c_request_transaction_data request;
79
80 uint32_t transaction_timeout;
81
82 enum i2c_channel_operation_result operation_result;
83
84 bool result = false;
85
86 /* We need following:
87 * transaction length will not exceed
88 * the number of free bytes in HW buffer (minus one for address)*/
89
90 if (i2caux_request->payload.length >=
91 hw_engine->funcs->get_hw_buffer_available_size(hw_engine)) {
92 i2caux_request->status =
93 I2CAUX_TRANSACTION_STATUS_FAILED_BUFFER_OVERFLOW;
94 return false;
95 }
96
97 if (i2caux_request->operation == I2CAUX_TRANSACTION_READ)
98 request.action = middle_of_transaction ?
99 I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT :
100 I2CAUX_TRANSACTION_ACTION_I2C_READ;
101 else if (i2caux_request->operation == I2CAUX_TRANSACTION_WRITE)
102 request.action = middle_of_transaction ?
103 I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT :
104 I2CAUX_TRANSACTION_ACTION_I2C_WRITE;
105 else {
106 i2caux_request->status =
107 I2CAUX_TRANSACTION_STATUS_FAILED_INVALID_OPERATION;
108 /* [anaumov] in DAL2, there was no "return false" */
109 return false;
110 }
111
112 request.address = (uint8_t)i2caux_request->payload.address;
113 request.length = i2caux_request->payload.length;
114 request.data = i2caux_request->payload.data;
115
116 /* obtain timeout value before submitting request */
117
118 transaction_timeout = hw_engine->funcs->get_transaction_timeout(
119 hw_engine, i2caux_request->payload.length + 1);
120
121 hw_engine->base.funcs->submit_channel_request(
122 &hw_engine->base, &request);
123
124 if ((request.status == I2C_CHANNEL_OPERATION_FAILED) ||
125 (request.status == I2C_CHANNEL_OPERATION_ENGINE_BUSY)) {
126 i2caux_request->status =
127 I2CAUX_TRANSACTION_STATUS_FAILED_CHANNEL_BUSY;
128 return false;
129 }
130
131 /* wait until transaction proceed */
132
133 operation_result = hw_engine->funcs->wait_on_operation_result(
134 hw_engine,
135 transaction_timeout,
136 I2C_CHANNEL_OPERATION_ENGINE_BUSY);
137
138 /* update transaction status */
139
140 switch (operation_result) {
141 case I2C_CHANNEL_OPERATION_SUCCEEDED:
142 i2caux_request->status =
143 I2CAUX_TRANSACTION_STATUS_SUCCEEDED;
144 result = true;
145 break;
146 case I2C_CHANNEL_OPERATION_NO_RESPONSE:
147 i2caux_request->status =
148 I2CAUX_TRANSACTION_STATUS_FAILED_NACK;
149 break;
150 case I2C_CHANNEL_OPERATION_TIMEOUT:
151 i2caux_request->status =
152 I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
153 break;
154 case I2C_CHANNEL_OPERATION_FAILED:
155 i2caux_request->status =
156 I2CAUX_TRANSACTION_STATUS_FAILED_INCOMPLETE;
157 break;
158 default:
159 i2caux_request->status =
160 I2CAUX_TRANSACTION_STATUS_FAILED_OPERATION;
161 }
162
163 if (result && (i2caux_request->operation == I2CAUX_TRANSACTION_READ)) {
164 struct i2c_reply_transaction_data reply;
165
166 reply.data = i2caux_request->payload.data;
167 reply.length = i2caux_request->payload.length;
168
169 hw_engine->base.funcs->
170 process_channel_reply(&hw_engine->base, &reply);
171 }
172
173 return result;
174}
175
176bool dal_i2c_hw_engine_acquire_engine(
177 struct i2c_engine *engine,
178 struct ddc *ddc)
179{
180 enum gpio_result result;
181 uint32_t current_speed;
182
183 result = dal_ddc_open(ddc, GPIO_MODE_HARDWARE,
184 GPIO_DDC_CONFIG_TYPE_MODE_I2C);
185
186 if (result != GPIO_RESULT_OK)
187 return false;
188
189 engine->base.ddc = ddc;
190
191 current_speed = engine->funcs->get_speed(engine);
192
193 if (current_speed)
194 FROM_I2C_ENGINE(engine)->original_speed = current_speed;
195
196 return true;
197}
198/*
199 * @brief
200 * Queries in a loop for current engine status
201 * until retrieved status matches 'expected_result', or timeout occurs.
202 * Timeout given in microseconds
203 * and the status query frequency is also one per microsecond.
204 */
205enum i2c_channel_operation_result dal_i2c_hw_engine_wait_on_operation_result(
206 struct i2c_hw_engine *engine,
207 uint32_t timeout,
208 enum i2c_channel_operation_result expected_result)
209{
210 enum i2c_channel_operation_result result;
211 uint32_t i = 0;
212
213 if (!timeout)
214 return I2C_CHANNEL_OPERATION_SUCCEEDED;
215
216 do {
217 result = engine->base.funcs->get_channel_status(
218 &engine->base, NULL);
219
220 if (result != expected_result)
221 break;
222
223 udelay(1);
224
225 ++i;
226 } while (i < timeout);
227
228 return result;
229}
230
231bool dal_i2c_hw_engine_construct(
232 struct i2c_hw_engine *engine,
233 struct dc_context *ctx)
234{
235 if (!dal_i2c_engine_construct(&engine->base, ctx))
236 return false;
237 engine->original_speed = I2CAUX_DEFAULT_I2C_HW_SPEED;
238 engine->default_speed = I2CAUX_DEFAULT_I2C_HW_SPEED;
239 return true;
240}
241
242void dal_i2c_hw_engine_destruct(
243 struct i2c_hw_engine *engine)
244{
245 dal_i2c_engine_destruct(&engine->base);
246}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.h b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.h
new file mode 100644
index 000000000000..f2df1749820e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.h
@@ -0,0 +1,80 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_I2C_HW_ENGINE_H__
27#define __DAL_I2C_HW_ENGINE_H__
28
29enum {
30 TRANSACTION_TIMEOUT_IN_I2C_CLOCKS = 32
31};
32
33struct i2c_hw_engine;
34
35struct i2c_hw_engine_funcs {
36 uint32_t (*get_hw_buffer_available_size)(
37 const struct i2c_hw_engine *engine);
38 enum i2c_channel_operation_result (*wait_on_operation_result)(
39 struct i2c_hw_engine *engine,
40 uint32_t timeout,
41 enum i2c_channel_operation_result expected_result);
42 uint32_t (*get_transaction_timeout)(
43 const struct i2c_hw_engine *engine,
44 uint32_t length);
45};
46
47struct i2c_hw_engine {
48 struct i2c_engine base;
49 const struct i2c_hw_engine_funcs *funcs;
50
51 /* Values below are in kilohertz */
52 uint32_t original_speed;
53 uint32_t default_speed;
54};
55
56bool dal_i2c_hw_engine_construct(
57 struct i2c_hw_engine *engine,
58 struct dc_context *ctx);
59
60void dal_i2c_hw_engine_destruct(
61 struct i2c_hw_engine *engine);
62
63enum i2c_channel_operation_result dal_i2c_hw_engine_wait_on_operation_result(
64 struct i2c_hw_engine *engine,
65 uint32_t timeout,
66 enum i2c_channel_operation_result expected_result);
67
68bool dal_i2c_hw_engine_acquire_engine(
69 struct i2c_engine *engine,
70 struct ddc *ddc);
71
72bool dal_i2c_hw_engine_submit_request(
73 struct engine *ptr,
74 struct i2caux_transaction_request *i2caux_request,
75 bool middle_of_transaction);
76
77enum i2caux_engine_type dal_i2c_hw_engine_get_engine_type(
78 const struct engine *engine);
79
80#endif
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_sw_engine.c b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_sw_engine.c
new file mode 100644
index 000000000000..95bc4457d44b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_sw_engine.c
@@ -0,0 +1,610 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28/*
29 * Pre-requisites: headers required by header of this unit
30 */
31#include "include/i2caux_interface.h"
32#include "engine.h"
33#include "i2c_engine.h"
34
35/*
36 * Header of this unit
37 */
38
39#include "i2c_sw_engine.h"
40
41/*
42 * Post-requisites: headers required by this unit
43 */
44
45/*
46 * This unit
47 */
48
49#define SCL false
50#define SDA true
51
52static inline bool read_bit_from_ddc(
53 struct ddc *ddc,
54 bool data_nor_clock)
55{
56 uint32_t value = 0;
57
58 if (data_nor_clock)
59 dal_gpio_get_value(ddc->pin_data, &value);
60 else
61 dal_gpio_get_value(ddc->pin_clock, &value);
62
63 return (value != 0);
64}
65
66static inline void write_bit_to_ddc(
67 struct ddc *ddc,
68 bool data_nor_clock,
69 bool bit)
70{
71 uint32_t value = bit ? 1 : 0;
72
73 if (data_nor_clock)
74 dal_gpio_set_value(ddc->pin_data, value);
75 else
76 dal_gpio_set_value(ddc->pin_clock, value);
77}
78
79static bool wait_for_scl_high(
80 struct dc_context *ctx,
81 struct ddc *ddc,
82 uint16_t clock_delay_div_4)
83{
84 uint32_t scl_retry = 0;
85 uint32_t scl_retry_max = I2C_SW_TIMEOUT_DELAY / clock_delay_div_4;
86
87 udelay(clock_delay_div_4);
88
89 /* 3 milliseconds delay
90 * to wake up some displays from "low power" state.
91 */
92
93 do {
94 if (read_bit_from_ddc(ddc, SCL))
95 return true;
96
97 udelay(clock_delay_div_4);
98
99 ++scl_retry;
100 } while (scl_retry <= scl_retry_max);
101
102 return false;
103}
104
105static bool start_sync(
106 struct dc_context *ctx,
107 struct ddc *ddc_handle,
108 uint16_t clock_delay_div_4)
109{
110 uint32_t retry = 0;
111
112 /* The I2C communications start signal is:
113 * the SDA going low from high, while the SCL is high. */
114
115 write_bit_to_ddc(ddc_handle, SCL, true);
116
117 udelay(clock_delay_div_4);
118
119 do {
120 write_bit_to_ddc(ddc_handle, SDA, true);
121
122 if (!read_bit_from_ddc(ddc_handle, SDA)) {
123 ++retry;
124 continue;
125 }
126
127 udelay(clock_delay_div_4);
128
129 write_bit_to_ddc(ddc_handle, SCL, true);
130
131 if (!wait_for_scl_high(ctx, ddc_handle, clock_delay_div_4))
132 break;
133
134 write_bit_to_ddc(ddc_handle, SDA, false);
135
136 udelay(clock_delay_div_4);
137
138 write_bit_to_ddc(ddc_handle, SCL, false);
139
140 udelay(clock_delay_div_4);
141
142 return true;
143 } while (retry <= I2C_SW_RETRIES);
144
145 return false;
146}
147
148static bool stop_sync(
149 struct dc_context *ctx,
150 struct ddc *ddc_handle,
151 uint16_t clock_delay_div_4)
152{
153 uint32_t retry = 0;
154
155 /* The I2C communications stop signal is:
156 * the SDA going high from low, while the SCL is high. */
157
158 write_bit_to_ddc(ddc_handle, SCL, false);
159
160 udelay(clock_delay_div_4);
161
162 write_bit_to_ddc(ddc_handle, SDA, false);
163
164 udelay(clock_delay_div_4);
165
166 write_bit_to_ddc(ddc_handle, SCL, true);
167
168 if (!wait_for_scl_high(ctx, ddc_handle, clock_delay_div_4))
169 return false;
170
171 write_bit_to_ddc(ddc_handle, SDA, true);
172
173 do {
174 udelay(clock_delay_div_4);
175
176 if (read_bit_from_ddc(ddc_handle, SDA))
177 return true;
178
179 ++retry;
180 } while (retry <= 2);
181
182 return false;
183}
184
185static bool write_byte(
186 struct dc_context *ctx,
187 struct ddc *ddc_handle,
188 uint16_t clock_delay_div_4,
189 uint8_t byte)
190{
191 int32_t shift = 7;
192 bool ack;
193
194 /* bits are transmitted serially, starting from MSB */
195
196 do {
197 udelay(clock_delay_div_4);
198
199 write_bit_to_ddc(ddc_handle, SDA, (byte >> shift) & 1);
200
201 udelay(clock_delay_div_4);
202
203 write_bit_to_ddc(ddc_handle, SCL, true);
204
205 if (!wait_for_scl_high(ctx, ddc_handle, clock_delay_div_4))
206 return false;
207
208 write_bit_to_ddc(ddc_handle, SCL, false);
209
210 --shift;
211 } while (shift >= 0);
212
213 /* The display sends ACK by preventing the SDA from going high
214 * after the SCL pulse we use to send our last data bit.
215 * If the SDA goes high after that bit, it's a NACK */
216
217 udelay(clock_delay_div_4);
218
219 write_bit_to_ddc(ddc_handle, SDA, true);
220
221 udelay(clock_delay_div_4);
222
223 write_bit_to_ddc(ddc_handle, SCL, true);
224
225 if (!wait_for_scl_high(ctx, ddc_handle, clock_delay_div_4))
226 return false;
227
228 /* read ACK bit */
229
230 ack = !read_bit_from_ddc(ddc_handle, SDA);
231
232 udelay(clock_delay_div_4 << 1);
233
234 write_bit_to_ddc(ddc_handle, SCL, false);
235
236 udelay(clock_delay_div_4 << 1);
237
238 return ack;
239}
240
241static bool read_byte(
242 struct dc_context *ctx,
243 struct ddc *ddc_handle,
244 uint16_t clock_delay_div_4,
245 uint8_t *byte,
246 bool more)
247{
248 int32_t shift = 7;
249
250 uint8_t data = 0;
251
252 /* The data bits are read from MSB to LSB;
253 * bit is read while SCL is high */
254
255 do {
256 write_bit_to_ddc(ddc_handle, SCL, true);
257
258 if (!wait_for_scl_high(ctx, ddc_handle, clock_delay_div_4))
259 return false;
260
261 if (read_bit_from_ddc(ddc_handle, SDA))
262 data |= (1 << shift);
263
264 write_bit_to_ddc(ddc_handle, SCL, false);
265
266 udelay(clock_delay_div_4 << 1);
267
268 --shift;
269 } while (shift >= 0);
270
271 /* read only whole byte */
272
273 *byte = data;
274
275 udelay(clock_delay_div_4);
276
277 /* send the acknowledge bit:
278 * SDA low means ACK, SDA high means NACK */
279
280 write_bit_to_ddc(ddc_handle, SDA, !more);
281
282 udelay(clock_delay_div_4);
283
284 write_bit_to_ddc(ddc_handle, SCL, true);
285
286 if (!wait_for_scl_high(ctx, ddc_handle, clock_delay_div_4))
287 return false;
288
289 write_bit_to_ddc(ddc_handle, SCL, false);
290
291 udelay(clock_delay_div_4);
292
293 write_bit_to_ddc(ddc_handle, SDA, true);
294
295 udelay(clock_delay_div_4);
296
297 return true;
298}
299
300static bool i2c_write(
301 struct dc_context *ctx,
302 struct ddc *ddc_handle,
303 uint16_t clock_delay_div_4,
304 uint8_t address,
305 uint32_t length,
306 const uint8_t *data)
307{
308 uint32_t i = 0;
309
310 if (!write_byte(ctx, ddc_handle, clock_delay_div_4, address))
311 return false;
312
313 while (i < length) {
314 if (!write_byte(ctx, ddc_handle, clock_delay_div_4, data[i]))
315 return false;
316 ++i;
317 }
318
319 return true;
320}
321
322static bool i2c_read(
323 struct dc_context *ctx,
324 struct ddc *ddc_handle,
325 uint16_t clock_delay_div_4,
326 uint8_t address,
327 uint32_t length,
328 uint8_t *data)
329{
330 uint32_t i = 0;
331
332 if (!write_byte(ctx, ddc_handle, clock_delay_div_4, address))
333 return false;
334
335 while (i < length) {
336 if (!read_byte(ctx, ddc_handle, clock_delay_div_4, data + i,
337 i < length - 1))
338 return false;
339 ++i;
340 }
341
342 return true;
343}
344
345/*
346 * @brief
347 * Cast 'struct i2c_engine *'
348 * to 'struct i2c_sw_engine *'
349 */
350#define FROM_I2C_ENGINE(ptr) \
351 container_of((ptr), struct i2c_sw_engine, base)
352
353/*
354 * @brief
355 * Cast 'struct engine *'
356 * to 'struct i2c_sw_engine *'
357 */
358#define FROM_ENGINE(ptr) \
359 FROM_I2C_ENGINE(container_of((ptr), struct i2c_engine, base))
360
361enum i2caux_engine_type dal_i2c_sw_engine_get_engine_type(
362 const struct engine *engine)
363{
364 return I2CAUX_ENGINE_TYPE_I2C_SW;
365}
366
367bool dal_i2c_sw_engine_submit_request(
368 struct engine *engine,
369 struct i2caux_transaction_request *i2caux_request,
370 bool middle_of_transaction)
371{
372 struct i2c_sw_engine *sw_engine = FROM_ENGINE(engine);
373
374 struct i2c_engine *base = &sw_engine->base;
375
376 struct i2c_request_transaction_data request;
377 bool operation_succeeded = false;
378
379 if (i2caux_request->operation == I2CAUX_TRANSACTION_READ)
380 request.action = middle_of_transaction ?
381 I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT :
382 I2CAUX_TRANSACTION_ACTION_I2C_READ;
383 else if (i2caux_request->operation == I2CAUX_TRANSACTION_WRITE)
384 request.action = middle_of_transaction ?
385 I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT :
386 I2CAUX_TRANSACTION_ACTION_I2C_WRITE;
387 else {
388 i2caux_request->status =
389 I2CAUX_TRANSACTION_STATUS_FAILED_INVALID_OPERATION;
390 /* in DAL2, there was no "return false" */
391 return false;
392 }
393
394 request.address = (uint8_t)i2caux_request->payload.address;
395 request.length = i2caux_request->payload.length;
396 request.data = i2caux_request->payload.data;
397
398 base->funcs->submit_channel_request(base, &request);
399
400 if ((request.status == I2C_CHANNEL_OPERATION_ENGINE_BUSY) ||
401 (request.status == I2C_CHANNEL_OPERATION_FAILED))
402 i2caux_request->status =
403 I2CAUX_TRANSACTION_STATUS_FAILED_CHANNEL_BUSY;
404 else {
405 enum i2c_channel_operation_result operation_result;
406
407 do {
408 operation_result =
409 base->funcs->get_channel_status(base, NULL);
410
411 switch (operation_result) {
412 case I2C_CHANNEL_OPERATION_SUCCEEDED:
413 i2caux_request->status =
414 I2CAUX_TRANSACTION_STATUS_SUCCEEDED;
415 operation_succeeded = true;
416 break;
417 case I2C_CHANNEL_OPERATION_NO_RESPONSE:
418 i2caux_request->status =
419 I2CAUX_TRANSACTION_STATUS_FAILED_NACK;
420 break;
421 case I2C_CHANNEL_OPERATION_TIMEOUT:
422 i2caux_request->status =
423 I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
424 break;
425 case I2C_CHANNEL_OPERATION_FAILED:
426 i2caux_request->status =
427 I2CAUX_TRANSACTION_STATUS_FAILED_INCOMPLETE;
428 break;
429 default:
430 i2caux_request->status =
431 I2CAUX_TRANSACTION_STATUS_FAILED_OPERATION;
432 break;
433 }
434 } while (operation_result == I2C_CHANNEL_OPERATION_ENGINE_BUSY);
435 }
436
437 return operation_succeeded;
438}
439
440uint32_t dal_i2c_sw_engine_get_speed(
441 const struct i2c_engine *engine)
442{
443 return FROM_I2C_ENGINE(engine)->speed;
444}
445
446void dal_i2c_sw_engine_set_speed(
447 struct i2c_engine *engine,
448 uint32_t speed)
449{
450 struct i2c_sw_engine *sw_engine = FROM_I2C_ENGINE(engine);
451
452 ASSERT(speed);
453
454 sw_engine->speed = speed ? speed : I2CAUX_DEFAULT_I2C_SW_SPEED;
455
456 sw_engine->clock_delay = 1000 / sw_engine->speed;
457
458 if (sw_engine->clock_delay < 12)
459 sw_engine->clock_delay = 12;
460}
461
462bool dal_i2caux_i2c_sw_engine_acquire_engine(
463 struct i2c_engine *engine,
464 struct ddc *ddc)
465{
466 enum gpio_result result;
467
468 result = dal_ddc_open(ddc, GPIO_MODE_FAST_OUTPUT,
469 GPIO_DDC_CONFIG_TYPE_MODE_I2C);
470
471 if (result != GPIO_RESULT_OK)
472 return false;
473
474 engine->base.ddc = ddc;
475
476 return true;
477}
478
479void dal_i2c_sw_engine_submit_channel_request(
480 struct i2c_engine *engine,
481 struct i2c_request_transaction_data *req)
482{
483 struct i2c_sw_engine *sw_engine = FROM_I2C_ENGINE(engine);
484
485 struct ddc *ddc = engine->base.ddc;
486 uint16_t clock_delay_div_4 = sw_engine->clock_delay >> 2;
487
488 /* send sync (start / repeated start) */
489
490 bool result = start_sync(engine->base.ctx, ddc, clock_delay_div_4);
491
492 /* process payload */
493
494 if (result) {
495 switch (req->action) {
496 case I2CAUX_TRANSACTION_ACTION_I2C_WRITE:
497 case I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT:
498 result = i2c_write(engine->base.ctx, ddc, clock_delay_div_4,
499 req->address, req->length, req->data);
500 break;
501 case I2CAUX_TRANSACTION_ACTION_I2C_READ:
502 case I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT:
503 result = i2c_read(engine->base.ctx, ddc, clock_delay_div_4,
504 req->address, req->length, req->data);
505 break;
506 default:
507 result = false;
508 break;
509 }
510 }
511
512 /* send stop if not 'mot' or operation failed */
513
514 if (!result ||
515 (req->action == I2CAUX_TRANSACTION_ACTION_I2C_WRITE) ||
516 (req->action == I2CAUX_TRANSACTION_ACTION_I2C_READ))
517 if (!stop_sync(engine->base.ctx, ddc, clock_delay_div_4))
518 result = false;
519
520 req->status = result ?
521 I2C_CHANNEL_OPERATION_SUCCEEDED :
522 I2C_CHANNEL_OPERATION_FAILED;
523}
524
525enum i2c_channel_operation_result dal_i2c_sw_engine_get_channel_status(
526 struct i2c_engine *engine,
527 uint8_t *returned_bytes)
528{
529 /* No arbitration with VBIOS is performed since DCE 6.0 */
530 return I2C_CHANNEL_OPERATION_SUCCEEDED;
531}
532
533void dal_i2c_sw_engine_destruct(
534 struct i2c_sw_engine *engine)
535{
536 dal_i2c_engine_destruct(&engine->base);
537}
538
539static void destroy(
540 struct i2c_engine **ptr)
541{
542 dal_i2c_sw_engine_destruct(FROM_I2C_ENGINE(*ptr));
543
544 dm_free(*ptr);
545 *ptr = NULL;
546}
547
548static const struct i2c_engine_funcs i2c_engine_funcs = {
549 .acquire_engine = dal_i2caux_i2c_sw_engine_acquire_engine,
550 .destroy = destroy,
551 .get_speed = dal_i2c_sw_engine_get_speed,
552 .set_speed = dal_i2c_sw_engine_set_speed,
553 .setup_engine = dal_i2c_engine_setup_i2c_engine,
554 .submit_channel_request = dal_i2c_sw_engine_submit_channel_request,
555 .process_channel_reply = dal_i2c_engine_process_channel_reply,
556 .get_channel_status = dal_i2c_sw_engine_get_channel_status,
557};
558
559static void release_engine(
560 struct engine *engine)
561{
562
563}
564
565static const struct engine_funcs engine_funcs = {
566 .release_engine = release_engine,
567 .get_engine_type = dal_i2c_sw_engine_get_engine_type,
568 .acquire = dal_i2c_engine_acquire,
569 .submit_request = dal_i2c_sw_engine_submit_request,
570};
571
572bool dal_i2c_sw_engine_construct(
573 struct i2c_sw_engine *engine,
574 const struct i2c_sw_engine_create_arg *arg)
575{
576 if (!dal_i2c_engine_construct(&engine->base, arg->ctx))
577 return false;
578
579 dal_i2c_sw_engine_set_speed(&engine->base, arg->default_speed);
580 engine->base.funcs = &i2c_engine_funcs;
581 engine->base.base.funcs = &engine_funcs;
582 return true;
583}
584
585struct i2c_engine *dal_i2c_sw_engine_create(
586 const struct i2c_sw_engine_create_arg *arg)
587{
588 struct i2c_sw_engine *engine;
589
590 if (!arg) {
591 BREAK_TO_DEBUGGER();
592 return NULL;
593 }
594
595 engine = dm_alloc(sizeof(struct i2c_sw_engine));
596
597 if (!engine) {
598 BREAK_TO_DEBUGGER();
599 return NULL;
600 }
601
602 if (dal_i2c_sw_engine_construct(engine, arg))
603 return &engine->base;
604
605 BREAK_TO_DEBUGGER();
606
607 dm_free(engine);
608
609 return NULL;
610}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_sw_engine.h b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_sw_engine.h
new file mode 100644
index 000000000000..e0cb4c3d483d
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_sw_engine.h
@@ -0,0 +1,81 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_I2C_SW_ENGINE_H__
27#define __DAL_I2C_SW_ENGINE_H__
28
29enum {
30 I2C_SW_RETRIES = 10,
31 I2C_SW_SCL_READ_RETRIES = 128,
32 /* following value is in microseconds */
33 I2C_SW_TIMEOUT_DELAY = 3000
34};
35
36struct i2c_sw_engine;
37
38struct i2c_sw_engine {
39 struct i2c_engine base;
40 uint32_t clock_delay;
41 /* Values below are in KHz */
42 uint32_t speed;
43 uint32_t default_speed;
44};
45
46struct i2c_sw_engine_create_arg {
47 uint32_t default_speed;
48 struct dc_context *ctx;
49};
50
51bool dal_i2c_sw_engine_construct(
52 struct i2c_sw_engine *engine,
53 const struct i2c_sw_engine_create_arg *arg);
54
55bool dal_i2caux_i2c_sw_engine_acquire_engine(
56 struct i2c_engine *engine,
57 struct ddc *ddc_handle);
58
59void dal_i2c_sw_engine_destruct(
60 struct i2c_sw_engine *engine);
61
62struct i2c_engine *dal_i2c_sw_engine_create(
63 const struct i2c_sw_engine_create_arg *arg);
64enum i2caux_engine_type dal_i2c_sw_engine_get_engine_type(
65 const struct engine *engine);
66bool dal_i2c_sw_engine_submit_request(
67 struct engine *ptr,
68 struct i2caux_transaction_request *i2caux_request,
69 bool middle_of_transaction);
70uint32_t dal_i2c_sw_engine_get_speed(
71 const struct i2c_engine *engine);
72void dal_i2c_sw_engine_set_speed(
73 struct i2c_engine *ptr,
74 uint32_t speed);
75void dal_i2c_sw_engine_submit_channel_request(
76 struct i2c_engine *ptr,
77 struct i2c_request_transaction_data *req);
78enum i2c_channel_operation_result dal_i2c_sw_engine_get_channel_status(
79 struct i2c_engine *engine,
80 uint8_t *returned_bytes);
81#endif
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c b/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c
new file mode 100644
index 000000000000..5391655af23a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c
@@ -0,0 +1,459 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28/*
29 * Pre-requisites: headers required by header of this unit
30 */
31#include "include/i2caux_interface.h"
32#include "dc_bios_types.h"
33
34/*
35 * Header of this unit
36 */
37
38#include "i2caux.h"
39
40/*
41 * Post-requisites: headers required by this unit
42 */
43
44#include "engine.h"
45#include "i2c_engine.h"
46#include "aux_engine.h"
47
48/*
49 * This unit
50 */
51
52#include "dce80/i2caux_dce80.h"
53
54#include "dce100/i2caux_dce100.h"
55
56#include "dce110/i2caux_dce110.h"
57
58#include "dce112/i2caux_dce112.h"
59
60#include "diagnostics/i2caux_diag.h"
61
62/*
63 * @brief
64 * Plain API, available publicly
65 */
66
67struct i2caux *dal_i2caux_create(
68 struct dc_context *ctx)
69{
70 if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) {
71 return dal_i2caux_diag_fpga_create(ctx);
72 }
73
74 switch (ctx->dce_version) {
75 case DCE_VERSION_8_0:
76 return dal_i2caux_dce80_create(ctx);
77 case DCE_VERSION_11_2:
78 return dal_i2caux_dce112_create(ctx);
79 case DCE_VERSION_11_0:
80 return dal_i2caux_dce110_create(ctx);
81 case DCE_VERSION_10_0:
82 return dal_i2caux_dce100_create(ctx);
83 default:
84 BREAK_TO_DEBUGGER();
85 return NULL;
86 }
87}
88
89bool dal_i2caux_submit_i2c_command(
90 struct i2caux *i2caux,
91 struct ddc *ddc,
92 struct i2c_command *cmd)
93{
94 struct i2c_engine *engine;
95 uint8_t index_of_payload = 0;
96 bool result;
97
98 if (!ddc) {
99 BREAK_TO_DEBUGGER();
100 return false;
101 }
102
103 if (!cmd) {
104 BREAK_TO_DEBUGGER();
105 return false;
106 }
107
108 /*
109 * default will be SW, however there is a feature flag in adapter
110 * service that determines whether SW i2c_engine will be available or
111 * not, if sw i2c is not available we will fallback to hw. This feature
112 * flag is set to not creating sw i2c engine for every dce except dce80
113 * currently
114 */
115 switch (cmd->engine) {
116 case I2C_COMMAND_ENGINE_DEFAULT:
117 case I2C_COMMAND_ENGINE_SW:
118 /* try to acquire SW engine first,
119 * acquire HW engine if SW engine not available */
120 engine = i2caux->funcs->acquire_i2c_sw_engine(i2caux, ddc);
121
122 if (!engine)
123 engine = i2caux->funcs->acquire_i2c_hw_engine(
124 i2caux, ddc);
125 break;
126 case I2C_COMMAND_ENGINE_HW:
127 default:
128 /* try to acquire HW engine first,
129 * acquire SW engine if HW engine not available */
130 engine = i2caux->funcs->acquire_i2c_hw_engine(i2caux, ddc);
131
132 if (!engine)
133 engine = i2caux->funcs->acquire_i2c_sw_engine(
134 i2caux, ddc);
135 }
136
137 if (!engine)
138 return false;
139
140 engine->funcs->set_speed(engine, cmd->speed);
141
142 result = true;
143
144 while (index_of_payload < cmd->number_of_payloads) {
145 bool mot = (index_of_payload != cmd->number_of_payloads - 1);
146
147 struct i2c_payload *payload = cmd->payloads + index_of_payload;
148
149 struct i2caux_transaction_request request = { 0 };
150
151 request.operation = payload->write ?
152 I2CAUX_TRANSACTION_WRITE :
153 I2CAUX_TRANSACTION_READ;
154
155 request.payload.address_space =
156 I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C;
157 request.payload.address = (payload->address << 1) |
158 !payload->write;
159 request.payload.length = payload->length;
160 request.payload.data = payload->data;
161
162 if (!engine->base.funcs->submit_request(
163 &engine->base, &request, mot)) {
164 result = false;
165 break;
166 }
167
168 ++index_of_payload;
169 }
170
171 i2caux->funcs->release_engine(i2caux, &engine->base);
172
173 return result;
174}
175
176bool dal_i2caux_submit_aux_command(
177 struct i2caux *i2caux,
178 struct ddc *ddc,
179 struct aux_command *cmd)
180{
181 struct aux_engine *engine;
182 uint8_t index_of_payload = 0;
183 bool result;
184
185 if (!ddc) {
186 BREAK_TO_DEBUGGER();
187 return false;
188 }
189
190 if (!cmd) {
191 BREAK_TO_DEBUGGER();
192 return false;
193 }
194
195 engine = i2caux->funcs->acquire_aux_engine(i2caux, ddc);
196
197 if (!engine)
198 return false;
199
200 engine->delay = cmd->defer_delay;
201 engine->max_defer_write_retry = cmd->max_defer_write_retry;
202
203 result = true;
204
205 while (index_of_payload < cmd->number_of_payloads) {
206 bool mot = (index_of_payload != cmd->number_of_payloads - 1);
207
208 struct aux_payload *payload = cmd->payloads + index_of_payload;
209
210 struct i2caux_transaction_request request = { 0 };
211
212 request.operation = payload->write ?
213 I2CAUX_TRANSACTION_WRITE :
214 I2CAUX_TRANSACTION_READ;
215
216 if (payload->i2c_over_aux) {
217 request.payload.address_space =
218 I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C;
219
220 request.payload.address = (payload->address << 1) |
221 !payload->write;
222 } else {
223 request.payload.address_space =
224 I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD;
225
226 request.payload.address = payload->address;
227 }
228
229 request.payload.length = payload->length;
230 request.payload.data = payload->data;
231
232 if (!engine->base.funcs->submit_request(
233 &engine->base, &request, mot)) {
234 result = false;
235 break;
236 }
237
238 ++index_of_payload;
239 }
240
241 i2caux->funcs->release_engine(i2caux, &engine->base);
242
243 return result;
244}
245
246static bool get_hw_supported_ddc_line(
247 struct ddc *ddc,
248 enum gpio_ddc_line *line)
249{
250 enum gpio_ddc_line line_found;
251
252 if (!ddc) {
253 BREAK_TO_DEBUGGER();
254 return false;
255 }
256
257 if (!ddc->hw_info.hw_supported)
258 return false;
259
260 line_found = dal_ddc_get_line(ddc);
261
262 if (line_found >= GPIO_DDC_LINE_COUNT)
263 return false;
264
265 *line = line_found;
266
267 return true;
268}
269
270void dal_i2caux_configure_aux(
271 struct i2caux *i2caux,
272 struct ddc *ddc,
273 union aux_config cfg)
274{
275 struct aux_engine *engine =
276 i2caux->funcs->acquire_aux_engine(i2caux, ddc);
277
278 if (!engine)
279 return;
280
281 engine->funcs->configure(engine, cfg);
282
283 i2caux->funcs->release_engine(i2caux, &engine->base);
284}
285
286void dal_i2caux_destroy(
287 struct i2caux **i2caux)
288{
289 if (!i2caux || !*i2caux) {
290 BREAK_TO_DEBUGGER();
291 return;
292 }
293
294 (*i2caux)->funcs->destroy(i2caux);
295
296 *i2caux = NULL;
297}
298
299/*
300 * @brief
301 * An utility function used by 'struct i2caux' and its descendants
302 */
303
304uint32_t dal_i2caux_get_reference_clock(
305 struct dc_bios *bios)
306{
307 struct firmware_info info = { { 0 } };
308
309 if (bios->funcs->get_firmware_info(bios, &info) != BP_RESULT_OK)
310 return 0;
311
312 return info.pll_info.crystal_frequency;
313}
314
315/*
316 * @brief
317 * i2caux
318 */
319
320enum {
321 /* following are expressed in KHz */
322 DEFAULT_I2C_SW_SPEED = 50,
323 DEFAULT_I2C_HW_SPEED = 50,
324
325 /* This is the timeout as defined in DP 1.2a,
326 * 2.3.4 "Detailed uPacket TX AUX CH State Description". */
327 AUX_TIMEOUT_PERIOD = 400,
328
329 /* Ideally, the SW timeout should be just above 550usec
330 * which is programmed in HW.
331 * But the SW timeout of 600usec is not reliable,
332 * because on some systems, delay_in_microseconds()
333 * returns faster than it should.
334 * EPR #379763: by trial-and-error on different systems,
335 * 700usec is the minimum reliable SW timeout for polling
336 * the AUX_SW_STATUS.AUX_SW_DONE bit.
337 * This timeout expires *only* when there is
338 * AUX Error or AUX Timeout conditions - not during normal operation.
339 * During normal operation, AUX_SW_STATUS.AUX_SW_DONE bit is set
340 * at most within ~240usec. That means,
341 * increasing this timeout will not affect normal operation,
342 * and we'll timeout after
343 * SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD = 1600usec.
344 * This timeout is especially important for
345 * resume from S3 and CTS. */
346 SW_AUX_TIMEOUT_PERIOD_MULTIPLIER = 4
347};
348
349struct i2c_engine *dal_i2caux_acquire_i2c_sw_engine(
350 struct i2caux *i2caux,
351 struct ddc *ddc)
352{
353 enum gpio_ddc_line line;
354 struct i2c_engine *engine = NULL;
355
356 if (get_hw_supported_ddc_line(ddc, &line))
357 engine = i2caux->i2c_sw_engines[line];
358
359 if (!engine)
360 engine = i2caux->i2c_generic_sw_engine;
361
362 if (!engine)
363 return NULL;
364
365 if (!engine->base.funcs->acquire(&engine->base, ddc))
366 return NULL;
367
368 return engine;
369}
370
371struct aux_engine *dal_i2caux_acquire_aux_engine(
372 struct i2caux *i2caux,
373 struct ddc *ddc)
374{
375 enum gpio_ddc_line line;
376 struct aux_engine *engine;
377
378 if (!get_hw_supported_ddc_line(ddc, &line))
379 return NULL;
380
381 engine = i2caux->aux_engines[line];
382
383 if (!engine)
384 return NULL;
385
386 if (!engine->base.funcs->acquire(&engine->base, ddc))
387 return NULL;
388
389 return engine;
390}
391
392void dal_i2caux_release_engine(
393 struct i2caux *i2caux,
394 struct engine *engine)
395{
396 engine->funcs->release_engine(engine);
397
398 dal_ddc_close(engine->ddc);
399
400 engine->ddc = NULL;
401}
402
403bool dal_i2caux_construct(
404 struct i2caux *i2caux,
405 struct dc_context *ctx)
406{
407 uint32_t i = 0;
408
409 i2caux->ctx = ctx;
410 do {
411 i2caux->i2c_sw_engines[i] = NULL;
412 i2caux->i2c_hw_engines[i] = NULL;
413 i2caux->aux_engines[i] = NULL;
414
415 ++i;
416 } while (i < GPIO_DDC_LINE_COUNT);
417
418 i2caux->i2c_generic_sw_engine = NULL;
419 i2caux->i2c_generic_hw_engine = NULL;
420
421 i2caux->aux_timeout_period =
422 SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD;
423
424 i2caux->default_i2c_sw_speed = DEFAULT_I2C_SW_SPEED;
425 i2caux->default_i2c_hw_speed = DEFAULT_I2C_HW_SPEED;
426
427 return true;
428}
429
430void dal_i2caux_destruct(
431 struct i2caux *i2caux)
432{
433 uint32_t i = 0;
434
435 if (i2caux->i2c_generic_hw_engine)
436 i2caux->i2c_generic_hw_engine->funcs->destroy(
437 &i2caux->i2c_generic_hw_engine);
438
439 if (i2caux->i2c_generic_sw_engine)
440 i2caux->i2c_generic_sw_engine->funcs->destroy(
441 &i2caux->i2c_generic_sw_engine);
442
443 do {
444 if (i2caux->aux_engines[i])
445 i2caux->aux_engines[i]->funcs->destroy(
446 &i2caux->aux_engines[i]);
447
448 if (i2caux->i2c_hw_engines[i])
449 i2caux->i2c_hw_engines[i]->funcs->destroy(
450 &i2caux->i2c_hw_engines[i]);
451
452 if (i2caux->i2c_sw_engines[i])
453 i2caux->i2c_sw_engines[i]->funcs->destroy(
454 &i2caux->i2c_sw_engines[i]);
455
456 ++i;
457 } while (i < GPIO_DDC_LINE_COUNT);
458}
459
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.h b/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.h
new file mode 100644
index 000000000000..bc20de3da1c4
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.h
@@ -0,0 +1,122 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_I2C_AUX_H__
27#define __DAL_I2C_AUX_H__
28
29uint32_t dal_i2caux_get_reference_clock(
30 struct dc_bios *bios);
31
32struct i2caux;
33
34struct engine;
35
36struct i2caux_funcs {
37 void (*destroy)(struct i2caux **ptr);
38 struct i2c_engine * (*acquire_i2c_sw_engine)(
39 struct i2caux *i2caux,
40 struct ddc *ddc);
41 struct i2c_engine * (*acquire_i2c_hw_engine)(
42 struct i2caux *i2caux,
43 struct ddc *ddc);
44 struct aux_engine * (*acquire_aux_engine)(
45 struct i2caux *i2caux,
46 struct ddc *ddc);
47 void (*release_engine)(
48 struct i2caux *i2caux,
49 struct engine *engine);
50};
51
52struct i2c_engine;
53struct aux_engine;
54
55struct i2caux {
56 struct dc_context *ctx;
57 const struct i2caux_funcs *funcs;
58 /* On ASIC we have certain amount of lines with HW DDC engine
59 * (4, 6, or maybe more in the future).
60 * For every such line, we create separate HW DDC engine
61 * (since we have these engines in HW) and separate SW DDC engine
62 * (to allow concurrent use of few lines).
63 * In similar way we have AUX engines. */
64
65 /* I2C SW engines, per DDC line.
66 * Only lines with HW DDC support will be initialized */
67 struct i2c_engine *i2c_sw_engines[GPIO_DDC_LINE_COUNT];
68
69 /* I2C HW engines, per DDC line.
70 * Only lines with HW DDC support will be initialized */
71 struct i2c_engine *i2c_hw_engines[GPIO_DDC_LINE_COUNT];
72
73 /* AUX engines, per DDC line.
74 * Only lines with HW AUX support will be initialized */
75 struct aux_engine *aux_engines[GPIO_DDC_LINE_COUNT];
76
77 /* For all other lines, we can use
78 * single instance of generic I2C HW engine
79 * (since in HW, there is single instance of it)
80 * or single instance of generic I2C SW engine.
81 * AUX is not supported for other lines. */
82
83 /* General-purpose I2C SW engine.
84 * Can be assigned dynamically to any line per transaction */
85 struct i2c_engine *i2c_generic_sw_engine;
86
87 /* General-purpose I2C generic HW engine.
88 * Can be assigned dynamically to almost any line per transaction */
89 struct i2c_engine *i2c_generic_hw_engine;
90
91 /* [anaumov] in DAL2, there is a Mutex */
92
93 uint32_t aux_timeout_period;
94
95 /* expressed in KHz */
96 uint32_t default_i2c_sw_speed;
97 uint32_t default_i2c_hw_speed;
98};
99
100bool dal_i2caux_construct(
101 struct i2caux *i2caux,
102 struct dc_context *ctx);
103
104void dal_i2caux_release_engine(
105 struct i2caux *i2caux,
106 struct engine *engine);
107
108void dal_i2caux_destruct(
109 struct i2caux *i2caux);
110
111void dal_i2caux_destroy(
112 struct i2caux **ptr);
113
114struct i2c_engine *dal_i2caux_acquire_i2c_sw_engine(
115 struct i2caux *i2caux,
116 struct ddc *ddc);
117
118struct aux_engine *dal_i2caux_acquire_aux_engine(
119 struct i2caux *i2caux,
120 struct ddc *ddc);
121
122#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/bandwidth_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/bandwidth_calcs.h
new file mode 100644
index 000000000000..f9b871b6199b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/bandwidth_calcs.h
@@ -0,0 +1,503 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26/**
27 * Bandwidth and Watermark calculations interface.
28 * (Refer to "DCEx_mode_support.xlsm" from Perforce.)
29 */
30#ifndef __BANDWIDTH_CALCS_H__
31#define __BANDWIDTH_CALCS_H__
32
33#include "bw_fixed.h"
34
35struct pipe_ctx;
36
37enum bw_calcs_version {
38 BW_CALCS_VERSION_INVALID,
39 BW_CALCS_VERSION_CARRIZO,
40 BW_CALCS_VERSION_POLARIS10,
41 BW_CALCS_VERSION_POLARIS11,
42 BW_CALCS_VERSION_STONEY,
43};
44
45/*******************************************************************************
46 * There are three types of input into Calculations:
47 * 1. per-DCE static values - these are "hardcoded" properties of the DCEIP
48 * 2. board-level values - these are generally coming from VBIOS parser
49 * 3. mode/configuration values - depending Mode, Scaling number of Displays etc.
50 ******************************************************************************/
51
52enum bw_defines {
53 //Common
54 bw_def_no = 0,
55 bw_def_none = 0,
56 bw_def_yes = 1,
57 bw_def_ok = 1,
58 bw_def_high = 2,
59 bw_def_mid = 1,
60 bw_def_low = 0,
61
62 //Internal
63 bw_defs_start = 255,
64 bw_def_underlay422,
65 bw_def_underlay420_luma,
66 bw_def_underlay420_chroma,
67 bw_def_underlay444,
68 bw_def_graphics,
69 bw_def_display_write_back420_luma,
70 bw_def_display_write_back420_chroma,
71 bw_def_portrait,
72 bw_def_hsr_mtn_4,
73 bw_def_hsr_mtn_h_taps,
74 bw_def_ceiling__h_taps_div_4___meq_hsr,
75 bw_def_invalid_linear_or_stereo_mode,
76 bw_def_invalid_rotation_or_bpp_or_stereo,
77 bw_def_vsr_mtn_v_taps,
78 bw_def_vsr_mtn_4,
79 bw_def_auto,
80 bw_def_manual,
81 bw_def_exceeded_allowed_maximum_sclk,
82 bw_def_exceeded_allowed_page_close_open,
83 bw_def_exceeded_allowed_outstanding_pte_req_queue_size,
84 bw_def_exceeded_allowed_maximum_bw,
85 bw_def_landscape,
86
87 //Panning and bezel
88 bw_def_any_lines,
89
90 //Underlay mode
91 bw_def_underlay_only,
92 bw_def_blended,
93 bw_def_blend,
94
95 //Stereo mode
96 bw_def_mono,
97 bw_def_side_by_side,
98 bw_def_top_bottom,
99
100 //Underlay surface type
101 bw_def_420,
102 bw_def_422,
103 bw_def_444,
104
105 //Tiling mode
106 bw_def_linear,
107 bw_def_tiled,
108 bw_def_array_linear_general,
109 bw_def_array_linear_aligned,
110 bw_def_rotated_micro_tiling,
111 bw_def_display_micro_tiling,
112
113 //Memory type
114 bw_def_gddr5,
115 bw_def_hbm,
116
117 //Voltage
118 bw_def_high_no_nbp_state_change,
119 bw_def_0_72,
120 bw_def_0_8,
121 bw_def_0_9,
122
123 bw_def_notok = -1,
124 bw_def_na = -1
125};
126
127struct bw_calcs_dceip {
128 enum bw_calcs_version version;
129 bool large_cursor;
130 uint32_t cursor_max_outstanding_group_num;
131 bool dmif_pipe_en_fbc_chunk_tracker;
132 struct bw_fixed dmif_request_buffer_size;
133 uint32_t lines_interleaved_into_lb;
134 uint32_t low_power_tiling_mode;
135 uint32_t chunk_width;
136 uint32_t number_of_graphics_pipes;
137 uint32_t number_of_underlay_pipes;
138 bool display_write_back_supported;
139 bool argb_compression_support;
140 struct bw_fixed underlay_vscaler_efficiency6_bit_per_component;
141 struct bw_fixed underlay_vscaler_efficiency8_bit_per_component;
142 struct bw_fixed underlay_vscaler_efficiency10_bit_per_component;
143 struct bw_fixed underlay_vscaler_efficiency12_bit_per_component;
144 struct bw_fixed graphics_vscaler_efficiency6_bit_per_component;
145 struct bw_fixed graphics_vscaler_efficiency8_bit_per_component;
146 struct bw_fixed graphics_vscaler_efficiency10_bit_per_component;
147 struct bw_fixed graphics_vscaler_efficiency12_bit_per_component;
148 struct bw_fixed alpha_vscaler_efficiency;
149 uint32_t max_dmif_buffer_allocated;
150 uint32_t graphics_dmif_size;
151 uint32_t underlay_luma_dmif_size;
152 uint32_t underlay_chroma_dmif_size;
153 bool pre_downscaler_enabled;
154 bool underlay_downscale_prefetch_enabled;
155 struct bw_fixed lb_write_pixels_per_dispclk;
156 struct bw_fixed lb_size_per_component444;
157 bool graphics_lb_nodownscaling_multi_line_prefetching;
158 struct bw_fixed stutter_and_dram_clock_state_change_gated_before_cursor;
159 struct bw_fixed underlay420_luma_lb_size_per_component;
160 struct bw_fixed underlay420_chroma_lb_size_per_component;
161 struct bw_fixed underlay422_lb_size_per_component;
162 struct bw_fixed cursor_chunk_width;
163 struct bw_fixed cursor_dcp_buffer_lines;
164 struct bw_fixed underlay_maximum_width_efficient_for_tiling;
165 struct bw_fixed underlay_maximum_height_efficient_for_tiling;
166 struct bw_fixed peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display;
167 struct bw_fixed peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation;
168 struct bw_fixed minimum_outstanding_pte_request_limit;
169 struct bw_fixed maximum_total_outstanding_pte_requests_allowed_by_saw;
170 bool limit_excessive_outstanding_dmif_requests;
171 struct bw_fixed linear_mode_line_request_alternation_slice;
172 uint32_t scatter_gather_lines_of_pte_prefetching_in_linear_mode;
173 uint32_t display_write_back420_luma_mcifwr_buffer_size;
174 uint32_t display_write_back420_chroma_mcifwr_buffer_size;
175 struct bw_fixed request_efficiency;
176 struct bw_fixed dispclk_per_request;
177 struct bw_fixed dispclk_ramping_factor;
178 struct bw_fixed display_pipe_throughput_factor;
179 uint32_t scatter_gather_pte_request_rows_in_tiling_mode;
180 struct bw_fixed mcifwr_all_surfaces_burst_time;
181};
182
183struct bw_calcs_vbios {
184 enum bw_defines memory_type;
185 uint32_t dram_channel_width_in_bits;
186 uint32_t number_of_dram_channels;
187 uint32_t number_of_dram_banks;
188 struct bw_fixed low_yclk; /*m_hz*/
189 struct bw_fixed mid_yclk; /*m_hz*/
190 struct bw_fixed high_yclk; /*m_hz*/
191 struct bw_fixed low_sclk; /*m_hz*/
192 struct bw_fixed mid1_sclk; /*m_hz*/
193 struct bw_fixed mid2_sclk; /*m_hz*/
194 struct bw_fixed mid3_sclk; /*m_hz*/
195 struct bw_fixed mid4_sclk; /*m_hz*/
196 struct bw_fixed mid5_sclk; /*m_hz*/
197 struct bw_fixed mid6_sclk; /*m_hz*/
198 struct bw_fixed high_sclk; /*m_hz*/
199 struct bw_fixed low_voltage_max_dispclk; /*m_hz*/
200 struct bw_fixed mid_voltage_max_dispclk; /*m_hz*/
201 struct bw_fixed high_voltage_max_dispclk; /*m_hz*/
202 struct bw_fixed low_voltage_max_phyclk;
203 struct bw_fixed mid_voltage_max_phyclk;
204 struct bw_fixed high_voltage_max_phyclk;
205 struct bw_fixed data_return_bus_width;
206 struct bw_fixed trc;
207 struct bw_fixed dmifmc_urgent_latency;
208 struct bw_fixed stutter_self_refresh_exit_latency;
209 struct bw_fixed stutter_self_refresh_entry_latency;
210 struct bw_fixed nbp_state_change_latency;
211 struct bw_fixed mcifwrmc_urgent_latency;
212 bool scatter_gather_enable;
213 struct bw_fixed down_spread_percentage;
214 uint32_t cursor_width;
215 uint32_t average_compression_rate;
216 uint32_t number_of_request_slots_gmc_reserves_for_dmif_per_channel;
217 struct bw_fixed blackout_duration;
218 struct bw_fixed maximum_blackout_recovery_time;
219};
220
221/*******************************************************************************
222 * Temporary data structure(s).
223 ******************************************************************************/
224#define maximum_number_of_surfaces 12
225/*Units : MHz, us */
226
227struct bw_calcs_data {
228 /* data for all displays */
229 uint32_t number_of_displays;
230 enum bw_defines underlay_surface_type;
231 enum bw_defines panning_and_bezel_adjustment;
232 enum bw_defines graphics_tiling_mode;
233 uint32_t graphics_lb_bpc;
234 uint32_t underlay_lb_bpc;
235 enum bw_defines underlay_tiling_mode;
236 enum bw_defines d0_underlay_mode;
237 bool d1_display_write_back_dwb_enable;
238 enum bw_defines d1_underlay_mode;
239
240 bool cpup_state_change_enable;
241 bool cpuc_state_change_enable;
242 bool nbp_state_change_enable;
243 bool stutter_mode_enable;
244 uint32_t y_clk_level;
245 uint32_t sclk_level;
246 uint32_t number_of_underlay_surfaces;
247 uint32_t number_of_dram_wrchannels;
248 uint32_t chunk_request_delay;
249 uint32_t number_of_dram_channels;
250 enum bw_defines underlay_micro_tile_mode;
251 enum bw_defines graphics_micro_tile_mode;
252 struct bw_fixed max_phyclk;
253 struct bw_fixed dram_efficiency;
254 struct bw_fixed src_width_after_surface_type;
255 struct bw_fixed src_height_after_surface_type;
256 struct bw_fixed hsr_after_surface_type;
257 struct bw_fixed vsr_after_surface_type;
258 struct bw_fixed src_width_after_rotation;
259 struct bw_fixed src_height_after_rotation;
260 struct bw_fixed hsr_after_rotation;
261 struct bw_fixed vsr_after_rotation;
262 struct bw_fixed source_height_pixels;
263 struct bw_fixed hsr_after_stereo;
264 struct bw_fixed vsr_after_stereo;
265 struct bw_fixed source_width_in_lb;
266 struct bw_fixed lb_line_pitch;
267 struct bw_fixed underlay_maximum_source_efficient_for_tiling;
268 struct bw_fixed num_lines_at_frame_start;
269 struct bw_fixed min_dmif_size_in_time;
270 struct bw_fixed min_mcifwr_size_in_time;
271 struct bw_fixed total_requests_for_dmif_size;
272 struct bw_fixed peak_pte_request_to_eviction_ratio_limiting;
273 struct bw_fixed useful_pte_per_pte_request;
274 struct bw_fixed scatter_gather_pte_request_rows;
275 struct bw_fixed scatter_gather_row_height;
276 struct bw_fixed scatter_gather_pte_requests_in_vblank;
277 struct bw_fixed inefficient_linear_pitch_in_bytes;
278 struct bw_fixed cursor_total_data;
279 struct bw_fixed cursor_total_request_groups;
280 struct bw_fixed scatter_gather_total_pte_requests;
281 struct bw_fixed scatter_gather_total_pte_request_groups;
282 struct bw_fixed tile_width_in_pixels;
283 struct bw_fixed dmif_total_number_of_data_request_page_close_open;
284 struct bw_fixed mcifwr_total_number_of_data_request_page_close_open;
285 struct bw_fixed bytes_per_page_close_open;
286 struct bw_fixed mcifwr_total_page_close_open_time;
287 struct bw_fixed total_requests_for_adjusted_dmif_size;
288 struct bw_fixed total_dmifmc_urgent_trips;
289 struct bw_fixed total_dmifmc_urgent_latency;
290 struct bw_fixed total_display_reads_required_data;
291 struct bw_fixed total_display_reads_required_dram_access_data;
292 struct bw_fixed total_display_writes_required_data;
293 struct bw_fixed total_display_writes_required_dram_access_data;
294 struct bw_fixed display_reads_required_data;
295 struct bw_fixed display_reads_required_dram_access_data;
296 struct bw_fixed dmif_total_page_close_open_time;
297 struct bw_fixed min_cursor_memory_interface_buffer_size_in_time;
298 struct bw_fixed min_read_buffer_size_in_time;
299 struct bw_fixed display_reads_time_for_data_transfer;
300 struct bw_fixed display_writes_time_for_data_transfer;
301 struct bw_fixed dmif_required_dram_bandwidth;
302 struct bw_fixed mcifwr_required_dram_bandwidth;
303 struct bw_fixed required_dmifmc_urgent_latency_for_page_close_open;
304 struct bw_fixed required_mcifmcwr_urgent_latency;
305 struct bw_fixed required_dram_bandwidth_gbyte_per_second;
306 struct bw_fixed dram_bandwidth;
307 struct bw_fixed dmif_required_sclk;
308 struct bw_fixed mcifwr_required_sclk;
309 struct bw_fixed required_sclk;
310 struct bw_fixed downspread_factor;
311 struct bw_fixed v_scaler_efficiency;
312 struct bw_fixed scaler_limits_factor;
313 struct bw_fixed display_pipe_pixel_throughput;
314 struct bw_fixed total_dispclk_required_with_ramping;
315 struct bw_fixed total_dispclk_required_without_ramping;
316 struct bw_fixed total_read_request_bandwidth;
317 struct bw_fixed total_write_request_bandwidth;
318 struct bw_fixed dispclk_required_for_total_read_request_bandwidth;
319 struct bw_fixed total_dispclk_required_with_ramping_with_request_bandwidth;
320 struct bw_fixed total_dispclk_required_without_ramping_with_request_bandwidth;
321 struct bw_fixed dispclk;
322 struct bw_fixed blackout_recovery_time;
323 struct bw_fixed min_pixels_per_data_fifo_entry;
324 struct bw_fixed sclk_deep_sleep;
325 struct bw_fixed chunk_request_time;
326 struct bw_fixed cursor_request_time;
327 struct bw_fixed line_source_pixels_transfer_time;
328 struct bw_fixed dmifdram_access_efficiency;
329 struct bw_fixed mcifwrdram_access_efficiency;
330 struct bw_fixed total_average_bandwidth_no_compression;
331 struct bw_fixed total_average_bandwidth;
332 struct bw_fixed total_stutter_cycle_duration;
333 struct bw_fixed stutter_burst_time;
334 struct bw_fixed time_in_self_refresh;
335 struct bw_fixed stutter_efficiency;
336 struct bw_fixed worst_number_of_trips_to_memory;
337 struct bw_fixed immediate_flip_time;
338 struct bw_fixed latency_for_non_dmif_clients;
339 struct bw_fixed latency_for_non_mcifwr_clients;
340 struct bw_fixed dmifmc_urgent_latency_supported_in_high_sclk_and_yclk;
341 struct bw_fixed nbp_state_dram_speed_change_margin;
342 struct bw_fixed display_reads_time_for_data_transfer_and_urgent_latency;
343 struct bw_fixed dram_speed_change_margin;
344 struct bw_fixed min_vblank_dram_speed_change_margin;
345 struct bw_fixed min_stutter_refresh_duration;
346 uint32_t total_stutter_dmif_buffer_size;
347 uint32_t total_bytes_requested;
348 uint32_t min_stutter_dmif_buffer_size;
349 uint32_t num_stutter_bursts;
350 struct bw_fixed v_blank_nbp_state_dram_speed_change_latency_supported;
351 struct bw_fixed nbp_state_dram_speed_change_latency_supported;
352 bool fbc_en[maximum_number_of_surfaces];
353 bool lpt_en[maximum_number_of_surfaces];
354 bool displays_match_flag[maximum_number_of_surfaces];
355 bool use_alpha[maximum_number_of_surfaces];
356 bool orthogonal_rotation[maximum_number_of_surfaces];
357 bool enable[maximum_number_of_surfaces];
358 bool access_one_channel_only[maximum_number_of_surfaces];
359 bool scatter_gather_enable_for_pipe[maximum_number_of_surfaces];
360 bool interlace_mode[maximum_number_of_surfaces];
361 bool display_pstate_change_enable[maximum_number_of_surfaces];
362 bool line_buffer_prefetch[maximum_number_of_surfaces];
363 uint32_t bytes_per_pixel[maximum_number_of_surfaces];
364 uint32_t max_chunks_non_fbc_mode[maximum_number_of_surfaces];
365 uint32_t lb_bpc[maximum_number_of_surfaces];
366 uint32_t output_bpphdmi[maximum_number_of_surfaces];
367 uint32_t output_bppdp4_lane_hbr[maximum_number_of_surfaces];
368 uint32_t output_bppdp4_lane_hbr2[maximum_number_of_surfaces];
369 uint32_t output_bppdp4_lane_hbr3[maximum_number_of_surfaces];
370 enum bw_defines stereo_mode[maximum_number_of_surfaces];
371 struct bw_fixed dmif_buffer_transfer_time[maximum_number_of_surfaces];
372 struct bw_fixed displays_with_same_mode[maximum_number_of_surfaces];
373 struct bw_fixed stutter_dmif_buffer_size[maximum_number_of_surfaces];
374 struct bw_fixed stutter_refresh_duration[maximum_number_of_surfaces];
375 struct bw_fixed stutter_exit_watermark[maximum_number_of_surfaces];
376 struct bw_fixed stutter_entry_watermark[maximum_number_of_surfaces];
377 struct bw_fixed h_total[maximum_number_of_surfaces];
378 struct bw_fixed v_total[maximum_number_of_surfaces];
379 struct bw_fixed pixel_rate[maximum_number_of_surfaces];
380 struct bw_fixed src_width[maximum_number_of_surfaces];
381 struct bw_fixed pitch_in_pixels[maximum_number_of_surfaces];
382 struct bw_fixed pitch_in_pixels_after_surface_type[maximum_number_of_surfaces];
383 struct bw_fixed src_height[maximum_number_of_surfaces];
384 struct bw_fixed scale_ratio[maximum_number_of_surfaces];
385 struct bw_fixed h_taps[maximum_number_of_surfaces];
386 struct bw_fixed v_taps[maximum_number_of_surfaces];
387 struct bw_fixed h_scale_ratio[maximum_number_of_surfaces];
388 struct bw_fixed v_scale_ratio[maximum_number_of_surfaces];
389 struct bw_fixed rotation_angle[maximum_number_of_surfaces];
390 struct bw_fixed compression_rate[maximum_number_of_surfaces];
391 struct bw_fixed hsr[maximum_number_of_surfaces];
392 struct bw_fixed vsr[maximum_number_of_surfaces];
393 struct bw_fixed source_width_rounded_up_to_chunks[maximum_number_of_surfaces];
394 struct bw_fixed source_width_pixels[maximum_number_of_surfaces];
395 struct bw_fixed source_height_rounded_up_to_chunks[maximum_number_of_surfaces];
396 struct bw_fixed display_bandwidth[maximum_number_of_surfaces];
397 struct bw_fixed request_bandwidth[maximum_number_of_surfaces];
398 struct bw_fixed bytes_per_request[maximum_number_of_surfaces];
399 struct bw_fixed useful_bytes_per_request[maximum_number_of_surfaces];
400 struct bw_fixed lines_interleaved_in_mem_access[maximum_number_of_surfaces];
401 struct bw_fixed latency_hiding_lines[maximum_number_of_surfaces];
402 struct bw_fixed lb_partitions[maximum_number_of_surfaces];
403 struct bw_fixed lb_partitions_max[maximum_number_of_surfaces];
404 struct bw_fixed dispclk_required_with_ramping[maximum_number_of_surfaces];
405 struct bw_fixed dispclk_required_without_ramping[maximum_number_of_surfaces];
406 struct bw_fixed data_buffer_size[maximum_number_of_surfaces];
407 struct bw_fixed outstanding_chunk_request_limit[maximum_number_of_surfaces];
408 struct bw_fixed urgent_watermark[maximum_number_of_surfaces];
409 struct bw_fixed nbp_state_change_watermark[maximum_number_of_surfaces];
410 struct bw_fixed v_filter_init[maximum_number_of_surfaces];
411 struct bw_fixed stutter_cycle_duration[maximum_number_of_surfaces];
412 struct bw_fixed average_bandwidth[maximum_number_of_surfaces];
413 struct bw_fixed average_bandwidth_no_compression[maximum_number_of_surfaces];
414 struct bw_fixed scatter_gather_pte_request_limit[maximum_number_of_surfaces];
415 struct bw_fixed lb_size_per_component[maximum_number_of_surfaces];
416 struct bw_fixed memory_chunk_size_in_bytes[maximum_number_of_surfaces];
417 struct bw_fixed pipe_chunk_size_in_bytes[maximum_number_of_surfaces];
418 struct bw_fixed number_of_trips_to_memory_for_getting_apte_row[maximum_number_of_surfaces];
419 struct bw_fixed adjusted_data_buffer_size[maximum_number_of_surfaces];
420 struct bw_fixed adjusted_data_buffer_size_in_memory[maximum_number_of_surfaces];
421 struct bw_fixed pixels_per_data_fifo_entry[maximum_number_of_surfaces];
422 struct bw_fixed scatter_gather_pte_requests_in_row[maximum_number_of_surfaces];
423 struct bw_fixed pte_request_per_chunk[maximum_number_of_surfaces];
424 struct bw_fixed scatter_gather_page_width[maximum_number_of_surfaces];
425 struct bw_fixed scatter_gather_page_height[maximum_number_of_surfaces];
426 struct bw_fixed lb_lines_in_per_line_out_in_beginning_of_frame[maximum_number_of_surfaces];
427 struct bw_fixed lb_lines_in_per_line_out_in_middle_of_frame[maximum_number_of_surfaces];
428 struct bw_fixed cursor_width_pixels[maximum_number_of_surfaces];
429 struct bw_fixed minimum_latency_hiding[maximum_number_of_surfaces];
430 struct bw_fixed maximum_latency_hiding[maximum_number_of_surfaces];
431 struct bw_fixed minimum_latency_hiding_with_cursor[maximum_number_of_surfaces];
432 struct bw_fixed maximum_latency_hiding_with_cursor[maximum_number_of_surfaces];
433 struct bw_fixed src_pixels_for_first_output_pixel[maximum_number_of_surfaces];
434 struct bw_fixed src_pixels_for_last_output_pixel[maximum_number_of_surfaces];
435 struct bw_fixed src_data_for_first_output_pixel[maximum_number_of_surfaces];
436 struct bw_fixed src_data_for_last_output_pixel[maximum_number_of_surfaces];
437 struct bw_fixed active_time[maximum_number_of_surfaces];
438 struct bw_fixed horizontal_blank_and_chunk_granularity_factor[maximum_number_of_surfaces];
439 struct bw_fixed cursor_latency_hiding[maximum_number_of_surfaces];
440 struct bw_fixed v_blank_dram_speed_change_margin[maximum_number_of_surfaces];
441 uint32_t num_displays_with_margin[3][8];
442 struct bw_fixed dmif_burst_time[3][8];
443 struct bw_fixed mcifwr_burst_time[3][8];
444 struct bw_fixed line_source_transfer_time[maximum_number_of_surfaces][3][8];
445 struct bw_fixed dram_speed_change_line_source_transfer_time[maximum_number_of_surfaces][3][8];
446 struct bw_fixed min_dram_speed_change_margin[3][8];
447 struct bw_fixed dispclk_required_for_dram_speed_change[3][8];
448 struct bw_fixed blackout_duration_margin[3][8];
449 struct bw_fixed dispclk_required_for_blackout_duration[3][8];
450 struct bw_fixed dispclk_required_for_blackout_recovery[3][8];
451 struct bw_fixed dmif_required_sclk_for_urgent_latency[6];
452};
453
454/*******************************************************************************
455 * Output data structures.
456 ******************************************************************************/
457struct bw_watermarks {
458 uint32_t a_mark;
459 uint32_t b_mark;
460 uint32_t c_mark;
461 uint32_t d_mark;
462};
463
464struct bw_calcs_output {
465 bool cpuc_state_change_enable;
466 bool cpup_state_change_enable;
467 bool stutter_mode_enable;
468 bool nbp_state_change_enable;
469 bool all_displays_in_sync;
470 struct bw_watermarks urgent_wm_ns[6];
471 struct bw_watermarks stutter_exit_wm_ns[6];
472 struct bw_watermarks nbp_state_change_wm_ns[6];
473 uint32_t required_sclk;
474 uint32_t required_sclk_deep_sleep;
475 uint32_t required_yclk;
476 uint32_t dispclk_khz;
477 int blackout_recovery_time_us;
478};
479
480/**
481 * Initialize structures with data which will NOT change at runtime.
482 */
483void bw_calcs_init(
484 struct bw_calcs_dceip *bw_dceip,
485 struct bw_calcs_vbios *bw_vbios,
486 enum bw_calcs_version version);
487
488/**
489 * Return:
490 * true - Display(s) configuration supported.
491 * In this case 'calcs_output' contains data for HW programming
492 * false - Display(s) configuration not supported (not enough bandwidth).
493 */
494bool bw_calcs(
495 struct dc_context *ctx,
496 const struct bw_calcs_dceip *dceip,
497 const struct bw_calcs_vbios *vbios,
498 const struct pipe_ctx *pipe,
499 int pipe_count,
500 struct bw_calcs_output *calcs_output);
501
502#endif /* __BANDWIDTH_CALCS_H__ */
503
diff --git a/drivers/gpu/drm/amd/display/dc/inc/bw_fixed.h b/drivers/gpu/drm/amd/display/dc/inc/bw_fixed.h
new file mode 100644
index 000000000000..b31d07a57c31
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/bw_fixed.h
@@ -0,0 +1,63 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef BW_FIXED_H_
27#define BW_FIXED_H_
28
29struct bw_fixed {
30 int64_t value;
31};
32
33struct bw_fixed bw_min3(struct bw_fixed v1, struct bw_fixed v2, struct bw_fixed v3);
34
35struct bw_fixed bw_max3(struct bw_fixed v1, struct bw_fixed v2, struct bw_fixed v3);
36
37struct bw_fixed bw_int_to_fixed(int64_t value);
38
39int32_t bw_fixed_to_int(struct bw_fixed value);
40
41struct bw_fixed bw_frc_to_fixed(int64_t num, int64_t denum);
42
43struct bw_fixed fixed31_32_to_bw_fixed(int64_t raw);
44
45struct bw_fixed bw_add(const struct bw_fixed arg1, const struct bw_fixed arg2);
46struct bw_fixed bw_sub(const struct bw_fixed arg1, const struct bw_fixed arg2);
47struct bw_fixed bw_mul(const struct bw_fixed arg1, const struct bw_fixed arg2);
48struct bw_fixed bw_div(const struct bw_fixed arg1, const struct bw_fixed arg2);
49struct bw_fixed bw_mod(const struct bw_fixed arg1, const struct bw_fixed arg2);
50
51struct bw_fixed bw_min2(const struct bw_fixed arg1, const struct bw_fixed arg2);
52struct bw_fixed bw_max2(const struct bw_fixed arg1, const struct bw_fixed arg2);
53struct bw_fixed bw_floor2(const struct bw_fixed arg, const struct bw_fixed significance);
54struct bw_fixed bw_ceil2(const struct bw_fixed arg, const struct bw_fixed significance);
55
56bool bw_equ(const struct bw_fixed arg1, const struct bw_fixed arg2);
57bool bw_neq(const struct bw_fixed arg1, const struct bw_fixed arg2);
58bool bw_leq(const struct bw_fixed arg1, const struct bw_fixed arg2);
59bool bw_meq(const struct bw_fixed arg1, const struct bw_fixed arg2);
60bool bw_ltn(const struct bw_fixed arg1, const struct bw_fixed arg2);
61bool bw_mtn(const struct bw_fixed arg1, const struct bw_fixed arg2);
62
63#endif //BW_FIXED_H_
diff --git a/drivers/gpu/drm/amd/display/dc/inc/clock_source.h b/drivers/gpu/drm/amd/display/dc/inc/clock_source.h
new file mode 100644
index 000000000000..c91c7815ec6e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/clock_source.h
@@ -0,0 +1,178 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DC_CLOCK_SOURCE_H__
27#define __DC_CLOCK_SOURCE_H__
28
29#include "dc_types.h"
30#include "include/grph_object_id.h"
31#include "include/bios_parser_types.h"
32
33struct clock_source;
34
35struct spread_spectrum_data {
36 uint32_t percentage; /*> In unit of 0.01% or 0.001%*/
37 uint32_t percentage_divider; /*> 100 or 1000 */
38 uint32_t freq_range_khz;
39 uint32_t modulation_freq_hz;
40
41 struct spread_spectrum_flags flags;
42};
43
44struct delta_sigma_data {
45 uint32_t feedback_amount;
46 uint32_t nfrac_amount;
47 uint32_t ds_frac_size;
48 uint32_t ds_frac_amount;
49};
50
51/**
52 * Pixel Clock Parameters structure
53 * These parameters are required as input
54 * when calculating Pixel Clock Dividers for requested Pixel Clock
55 */
56struct pixel_clk_flags {
57 uint32_t ENABLE_SS:1;
58 uint32_t DISPLAY_BLANKED:1;
59 uint32_t PROGRAM_PIXEL_CLOCK:1;
60 uint32_t PROGRAM_ID_CLOCK:1;
61 uint32_t SUPPORT_YCBCR420:1;
62};
63
64/**
65 * Display Port HW De spread of Reference Clock related Parameters structure
66 * Store it once at boot for later usage
67 */
68struct csdp_ref_clk_ds_params {
69 bool hw_dso_n_dp_ref_clk;
70/* Flag for HW De Spread enabled (if enabled SS on DP Reference Clock)*/
71 uint32_t avg_dp_ref_clk_khz;
72/* Average DP Reference clock (in KHz)*/
73 uint32_t ss_percentage_on_dp_ref_clk;
74/* DP Reference clock SS percentage
75 * (not to be mixed with DP IDCLK SS from PLL Settings)*/
76 uint32_t ss_percentage_divider;
77/* DP Reference clock SS percentage divider */
78};
79
80struct pixel_clk_params {
81 uint32_t requested_pix_clk; /* in KHz */
82/*> Requested Pixel Clock
83 * (based on Video Timing standard used for requested mode)*/
84 uint32_t requested_sym_clk; /* in KHz */
85/*> Requested Sym Clock (relevant only for display port)*/
86 uint32_t dp_ref_clk; /* in KHz */
87/*> DP reference clock - calculated only for DP signal for specific cases*/
88 struct graphics_object_id encoder_object_id;
89/*> Encoder object Id - needed by VBIOS Exec table*/
90 enum signal_type signal_type;
91/*> signalType -> Encoder Mode - needed by VBIOS Exec table*/
92 enum controller_id controller_id;
93/*> ControllerId - which controller using this PLL*/
94 enum dc_color_depth color_depth;
95 struct csdp_ref_clk_ds_params de_spread_params;
96/*> de-spread info, relevant only for on-the-fly tune-up pixel rate*/
97
98 struct pixel_clk_flags flags;
99};
100
101/**
102 * Pixel Clock Dividers structure with desired Pixel Clock
103 * (adjusted after VBIOS exec table),
104 * with actually calculated Clock and reference Crystal frequency
105 */
106struct pll_settings {
107 uint32_t actual_pix_clk;
108 uint32_t adjusted_pix_clk;
109 uint32_t calculated_pix_clk;
110 uint32_t vco_freq;
111 uint32_t reference_freq;
112 uint32_t reference_divider;
113 uint32_t feedback_divider;
114 uint32_t fract_feedback_divider;
115 uint32_t pix_clk_post_divider;
116 uint32_t ss_percentage;
117 bool use_external_clk;
118};
119
120struct calc_pll_clock_source_init_data {
121 struct dc_bios *bp;
122 uint32_t min_pix_clk_pll_post_divider;
123 uint32_t max_pix_clk_pll_post_divider;
124 uint32_t min_pll_ref_divider;
125 uint32_t max_pll_ref_divider;
126 uint32_t min_override_input_pxl_clk_pll_freq_khz;
127/* if not 0, override the firmware info */
128
129 uint32_t max_override_input_pxl_clk_pll_freq_khz;
130/* if not 0, override the firmware info */
131
132 uint32_t num_fract_fb_divider_decimal_point;
133/* number of decimal point for fractional feedback divider value */
134
135 uint32_t num_fract_fb_divider_decimal_point_precision;
136/* number of decimal point to round off for fractional feedback divider value*/
137 struct dc_context *ctx;
138
139};
140
141struct calc_pll_clock_source {
142 uint32_t ref_freq_khz;
143 uint32_t min_pix_clock_pll_post_divider;
144 uint32_t max_pix_clock_pll_post_divider;
145 uint32_t min_pll_ref_divider;
146 uint32_t max_pll_ref_divider;
147
148 uint32_t max_vco_khz;
149 uint32_t min_vco_khz;
150 uint32_t min_pll_input_freq_khz;
151 uint32_t max_pll_input_freq_khz;
152
153 uint32_t fract_fb_divider_decimal_points_num;
154 uint32_t fract_fb_divider_factor;
155 uint32_t fract_fb_divider_precision;
156 uint32_t fract_fb_divider_precision_factor;
157 struct dc_context *ctx;
158};
159
160struct clock_source_funcs {
161 bool (*cs_power_down)(
162 struct clock_source *);
163 bool (*program_pix_clk)(struct clock_source *,
164 struct pixel_clk_params *, struct pll_settings *);
165 uint32_t (*get_pix_clk_dividers)(
166 struct clock_source *,
167 struct pixel_clk_params *,
168 struct pll_settings *);
169};
170
171struct clock_source {
172 const struct clock_source_funcs *funcs;
173 struct dc_context *ctx;
174 enum clock_source_id id;
175 bool dp_clk_src;
176};
177
178#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/compressor.h b/drivers/gpu/drm/amd/display/dc/inc/compressor.h
new file mode 100644
index 000000000000..af292596b101
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/compressor.h
@@ -0,0 +1,93 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_COMPRESSOR_H__
27#define __DAL_COMPRESSOR_H__
28
29#include "include/grph_object_id.h"
30#include "bios_parser_interface.h"
31
32enum fbc_compress_ratio {
33 FBC_COMPRESS_RATIO_INVALID = 0,
34 FBC_COMPRESS_RATIO_1TO1 = 1,
35 FBC_COMPRESS_RATIO_2TO1 = 2,
36 FBC_COMPRESS_RATIO_4TO1 = 4,
37 FBC_COMPRESS_RATIO_8TO1 = 8,
38};
39
40union fbc_physical_address {
41 struct {
42 uint32_t low_part;
43 int32_t high_part;
44 } addr;
45};
46
47struct compr_addr_and_pitch_params {
48 uint32_t inst;
49 uint32_t source_view_width;
50 uint32_t source_view_height;
51};
52
53enum fbc_hw_max_resolution_supported {
54 FBC_MAX_X = 3840,
55 FBC_MAX_Y = 2400
56};
57
58struct compressor {
59 struct dc_context *ctx;
60 uint32_t attached_inst;
61 bool is_enabled;
62
63 union {
64 uint32_t raw;
65 struct {
66 uint32_t FBC_SUPPORT:1;
67 uint32_t FB_POOL:1;
68 uint32_t DYNAMIC_ALLOC:1;
69 uint32_t LPT_SUPPORT:1;
70 uint32_t LPT_MC_CONFIG:1;
71 uint32_t DUMMY_BACKEND:1;
72 uint32_t CLK_GATING_DISABLED:1;
73
74 } bits;
75 } options;
76
77 union fbc_physical_address compr_surface_address;
78
79 uint32_t embedded_panel_h_size;
80 uint32_t embedded_panel_v_size;
81 uint32_t memory_bus_width;
82 uint32_t banks_num;
83 uint32_t raw_size;
84 uint32_t channel_interleave_size;
85 uint32_t dram_channels_num;
86
87 uint32_t allocated_size;
88 uint32_t preferred_requested_size;
89 uint32_t lpt_channels_num;
90 enum fbc_compress_ratio min_compress_ratio;
91};
92
93#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_dc.h b/drivers/gpu/drm/amd/display/dc/inc/core_dc.h
new file mode 100644
index 000000000000..7d6dc8ea75ab
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_dc.h
@@ -0,0 +1,50 @@
1/*
2 * core_dc.h
3 *
4 * Created on: Nov 13, 2015
5 * Author: yonsun
6 */
7
8#ifndef __CORE_DC_H__
9#define __CORE_DC_H__
10
11#include "core_types.h"
12#include "hw_sequencer.h"
13
14#define DC_TO_CORE(dc)\
15 container_of(dc, struct core_dc, public)
16
17struct core_dc {
18 struct dc public;
19 struct dc_context *ctx;
20
21 uint8_t link_count;
22 struct core_link *links[MAX_PIPES * 2];
23
24 /* TODO: determine max number of targets*/
25 struct validate_context *current_context;
26 struct validate_context *temp_flip_context;
27 struct resource_pool *res_pool;
28
29 /*Power State*/
30 enum dc_video_power_state previous_power_state;
31 enum dc_video_power_state current_power_state;
32
33 /* Display Engine Clock levels */
34 struct dm_pp_clock_levels sclk_lvls;
35
36 /* Inputs into BW and WM calculations. */
37 struct bw_calcs_dceip bw_dceip;
38 struct bw_calcs_vbios bw_vbios;
39
40 /* HW functions */
41 struct hw_sequencer_funcs hwss;
42 struct dce_hwseq *hwseq;
43
44 /* temp store of dm_pp_display_configuration
45 * to compare to see if display config changed
46 */
47 struct dm_pp_display_configuration prev_display_config;
48};
49
50#endif /* __CORE_DC_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_status.h b/drivers/gpu/drm/amd/display/dc/inc/core_status.h
new file mode 100644
index 000000000000..32a2cc712000
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_status.h
@@ -0,0 +1,47 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef _CORE_STATUS_H_
27#define _CORE_STATUS_H_
28
29enum dc_status {
30 DC_OK = 1,
31
32 DC_NO_CONTROLLER_RESOURCE,
33 DC_NO_STREAM_ENG_RESOURCE,
34 DC_NO_CLOCK_SOURCE_RESOURCE,
35 DC_FAIL_CONTROLLER_VALIDATE,
36 DC_FAIL_ENC_VALIDATE,
37 DC_FAIL_ATTACH_SURFACES,
38 DC_FAIL_SURFACE_VALIDATE,
39 DC_NO_DP_LINK_BANDWIDTH,
40 DC_EXCEED_DONGLE_MAX_CLK,
41 DC_SURFACE_PIXEL_FORMAT_UNSUPPORTED,
42 DC_FAIL_BANDWIDTH_VALIDATE, /* BW and Watermark validation */
43
44 DC_ERROR_UNEXPECTED = -1
45};
46
47#endif /* _CORE_STATUS_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
new file mode 100644
index 000000000000..f2eb8945d5c4
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -0,0 +1,319 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef _CORE_TYPES_H_
27#define _CORE_TYPES_H_
28
29#include "dc.h"
30#include "bandwidth_calcs.h"
31#include "ddc_service_types.h"
32#include "dc_bios_types.h"
33
34struct core_stream;
35/********* core_target *************/
36
37#define CONST_DC_TARGET_TO_CORE(dc_target) \
38 container_of(dc_target, const struct core_target, public)
39#define DC_TARGET_TO_CORE(dc_target) \
40 container_of(dc_target, struct core_target, public)
41
42#define MAX_PIPES 6
43#define MAX_CLOCK_SOURCES 7
44
45struct core_target {
46 struct dc_target public;
47
48 struct dc_context *ctx;
49};
50
51/********* core_surface **********/
52#define DC_SURFACE_TO_CORE(dc_surface) \
53 container_of(dc_surface, struct core_surface, public)
54
55#define DC_GAMMA_TO_CORE(dc_gamma) \
56 container_of(dc_gamma, struct core_gamma, public)
57
58struct core_surface {
59 struct dc_surface public;
60 struct dc_surface_status status;
61 struct dc_context *ctx;
62};
63
64struct core_gamma {
65 struct dc_gamma public;
66 struct dc_context *ctx;
67};
68
69void enable_surface_flip_reporting(struct dc_surface *dc_surface,
70 uint32_t controller_id);
71
72/********* core_stream ************/
73#include "grph_object_id.h"
74#include "link_encoder.h"
75#include "stream_encoder.h"
76#include "clock_source.h"
77#include "audio.h"
78#include "hw_sequencer_types.h"
79#include "opp.h"
80
81#define DC_STREAM_TO_CORE(dc_stream) container_of( \
82 dc_stream, struct core_stream, public)
83
84struct core_stream {
85 struct dc_stream public;
86
87 /* field internal to DC */
88 struct dc_context *ctx;
89 const struct core_sink *sink;
90
91 /* used by DCP and FMT */
92 struct bit_depth_reduction_params bit_depth_params;
93 struct clamping_and_pixel_encoding_params clamping;
94
95 int phy_pix_clk;
96 enum signal_type signal;
97
98 struct dc_stream_status status;
99};
100
101/************ core_sink *****************/
102
103#define DC_SINK_TO_CORE(dc_sink) \
104 container_of(dc_sink, struct core_sink, public)
105
106struct core_sink {
107 /** The public, read-only (for DM) area of sink. **/
108 struct dc_sink public;
109 /** End-of-public area. **/
110
111 /** The 'protected' area - read/write access, for use only inside DC **/
112 /* not used for now */
113 struct core_link *link;
114 struct dc_context *ctx;
115 uint32_t dongle_max_pix_clk;
116 bool converter_disable_audio;
117};
118
119/************ link *****************/
120#define DC_LINK_TO_CORE(dc_link) container_of(dc_link, struct core_link, public)
121
122struct link_init_data {
123 const struct core_dc *dc;
124 struct dc_context *ctx; /* TODO: remove 'dal' when DC is complete. */
125 uint32_t connector_index; /* this will be mapped to the HPD pins */
126 uint32_t link_index; /* this is mapped to DAL display_index
127 TODO: remove it when DC is complete. */
128};
129
130/* DP MST stream allocation (payload bandwidth number) */
131struct link_mst_stream_allocation {
132 /* DIG front */
133 const struct stream_encoder *stream_enc;
134 /* associate DRM payload table with DC stream encoder */
135 uint8_t vcp_id;
136 /* number of slots required for the DP stream in transport packet */
137 uint8_t slot_count;
138};
139
140/* DP MST stream allocation table */
141struct link_mst_stream_allocation_table {
142 /* number of DP video streams */
143 int stream_count;
144 /* array of stream allocations */
145 struct link_mst_stream_allocation
146 stream_allocations[MAX_CONTROLLER_NUM];
147};
148
149struct core_link {
150 struct dc_link public;
151 const struct core_dc *dc;
152
153 struct dc_context *ctx; /* TODO: AUTO remove 'dal' when DC is complete*/
154
155 struct link_encoder *link_enc;
156 struct ddc_service *ddc;
157 struct graphics_object_id link_id;
158 union ddi_channel_mapping ddi_channel_mapping;
159 struct connector_device_tag_info device_tag;
160 struct dpcd_caps dpcd_caps;
161 unsigned int dpcd_sink_count;
162
163 enum edp_revision edp_revision;
164
165 /* MST record stream using this link */
166 struct link_flags {
167 bool dp_keep_receiver_powered;
168 } wa_flags;
169 struct link_mst_stream_allocation_table mst_stream_alloc_table;
170
171 struct dc_link_status link_status;
172};
173
174#define DC_LINK_TO_LINK(dc_link) container_of(dc_link, struct core_link, public)
175
176struct core_link *link_create(const struct link_init_data *init_params);
177void link_destroy(struct core_link **link);
178
179enum dc_status dc_link_validate_mode_timing(
180 const struct core_stream *stream,
181 struct core_link *link,
182 const struct dc_crtc_timing *timing);
183
184void core_link_resume(struct core_link *link);
185
186void core_link_enable_stream(struct pipe_ctx *pipe_ctx);
187
188void core_link_disable_stream(struct pipe_ctx *pipe_ctx);
189
190/********** DAL Core*********************/
191#include "display_clock_interface.h"
192#include "transform.h"
193
194struct resource_pool;
195struct validate_context;
196struct resource_context;
197
198struct resource_funcs {
199 void (*destroy)(struct resource_pool **pool);
200 struct link_encoder *(*link_enc_create)(
201 const struct encoder_init_data *init);
202 enum dc_status (*validate_with_context)(
203 const struct core_dc *dc,
204 const struct dc_validation_set set[],
205 int set_count,
206 struct validate_context *context);
207
208 enum dc_status (*validate_guaranteed)(
209 const struct core_dc *dc,
210 const struct dc_target *dc_target,
211 struct validate_context *context);
212
213 enum dc_status (*validate_bandwidth)(
214 const struct core_dc *dc,
215 struct validate_context *context);
216
217 struct validate_context *(*apply_clk_constraints)(
218 const struct core_dc *dc,
219 struct validate_context *context);
220
221 struct pipe_ctx *(*acquire_idle_pipe_for_layer)(
222 struct resource_context *res_ctx,
223 struct core_stream *stream);
224
225 void (*build_bit_depth_reduction_params)(
226 const struct core_stream *stream,
227 struct bit_depth_reduction_params *fmt_bit_depth);
228};
229
230struct audio_support{
231 bool dp_audio;
232 bool hdmi_audio_on_dongle;
233 bool hdmi_audio_native;
234};
235
236struct resource_pool {
237 struct mem_input *mis[MAX_PIPES];
238 struct input_pixel_processor *ipps[MAX_PIPES];
239 struct transform *transforms[MAX_PIPES];
240 struct output_pixel_processor *opps[MAX_PIPES];
241 struct timing_generator *timing_generators[MAX_PIPES];
242 struct stream_encoder *stream_enc[MAX_PIPES * 2];
243
244 unsigned int pipe_count;
245 unsigned int underlay_pipe_index;
246 unsigned int stream_enc_count;
247
248 /*
249 * reserved clock source for DP
250 */
251 struct clock_source *dp_clock_source;
252
253 struct clock_source *clock_sources[MAX_CLOCK_SOURCES];
254 unsigned int clk_src_count;
255
256 struct audio *audios[MAX_PIPES];
257 unsigned int audio_count;
258 struct audio_support audio_support;
259
260 struct display_clock *display_clock;
261 struct irq_service *irqs;
262
263 const struct resource_funcs *funcs;
264 const struct resource_caps *res_cap;
265};
266
267struct pipe_ctx {
268 struct core_surface *surface;
269 struct core_stream *stream;
270
271 struct mem_input *mi;
272 struct input_pixel_processor *ipp;
273 struct transform *xfm;
274 struct output_pixel_processor *opp;
275 struct timing_generator *tg;
276
277 struct scaler_data scl_data;
278
279 struct stream_encoder *stream_enc;
280 struct display_clock *dis_clk;
281 struct clock_source *clock_source;
282
283 struct audio *audio;
284
285 struct pixel_clk_params pix_clk_params;
286 struct pll_settings pll_settings;
287
288 /*fmt*/
289 struct encoder_info_frame encoder_info_frame;
290
291 uint8_t pipe_idx;
292
293 struct pipe_ctx *top_pipe;
294 struct pipe_ctx *bottom_pipe;
295};
296
297struct resource_context {
298 const struct resource_pool *pool;
299 struct pipe_ctx pipe_ctx[MAX_PIPES];
300 bool is_stream_enc_acquired[MAX_PIPES * 2];
301 bool is_audio_acquired[MAX_PIPES];
302 uint8_t clock_source_ref_count[MAX_CLOCK_SOURCES];
303 uint8_t dp_clock_source_ref_count;
304 };
305
306struct validate_context {
307 struct core_target *targets[MAX_PIPES];
308 struct dc_target_status target_status[MAX_PIPES];
309 uint8_t target_count;
310
311 struct resource_context res_ctx;
312
313 /* The output from BW and WM calculations. */
314 struct bw_calcs_output bw_results;
315 /* Note: this is a big structure, do *not* put on stack! */
316 struct dm_pp_display_configuration pp_display_cfg;
317};
318
319#endif /* _CORE_TYPES_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
new file mode 100644
index 000000000000..830fc3d039c9
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
@@ -0,0 +1,145 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_DDC_SERVICE_H__
27#define __DAL_DDC_SERVICE_H__
28
29#include "include/ddc_service_types.h"
30#include "include/i2caux_interface.h"
31
32#define EDID_SEGMENT_SIZE 256
33
34struct ddc_service;
35struct graphics_object_id;
36enum ddc_result;
37struct av_sync_data;
38struct dp_receiver_id_info;
39
40struct i2c_payloads;
41struct aux_payloads;
42
43struct i2c_payloads *dal_ddc_i2c_payloads_create(struct dc_context *ctx, uint32_t count);
44struct i2c_payload *dal_ddc_i2c_payloads_get(struct i2c_payloads *p);
45uint32_t dal_ddc_i2c_payloads_get_count(struct i2c_payloads *p);
46void dal_ddc_i2c_payloads_destroy(struct i2c_payloads **p);
47
48struct aux_payloads *dal_ddc_aux_payloads_create(struct dc_context *ctx, uint32_t count);
49struct aux_payload *dal_ddc_aux_payloads_get(struct aux_payloads *p);
50uint32_t dal_ddc_aux_payloads_get_count(struct aux_payloads *p);
51void dal_ddc_aux_payloads_destroy(struct aux_payloads **p);
52
53void dal_ddc_i2c_payloads_add(
54 struct i2c_payloads *payloads,
55 uint32_t address,
56 uint32_t len,
57 uint8_t *data,
58 bool write);
59
60void dal_ddc_aux_payloads_add(
61 struct aux_payloads *payloads,
62 uint32_t address,
63 uint32_t len,
64 uint8_t *data,
65 bool write);
66
67struct ddc_service_init_data {
68 struct graphics_object_id id;
69 struct dc_context *ctx;
70 struct core_link *link;
71};
72
73struct ddc_service *dal_ddc_service_create(
74 struct ddc_service_init_data *ddc_init_data);
75
76void dal_ddc_service_destroy(struct ddc_service **ddc);
77
78enum ddc_service_type dal_ddc_service_get_type(struct ddc_service *ddc);
79
80void dal_ddc_service_set_transaction_type(
81 struct ddc_service *ddc,
82 enum ddc_transaction_type type);
83
84bool dal_ddc_service_is_in_aux_transaction_mode(struct ddc_service *ddc);
85
86uint32_t dal_ddc_service_edid_query(struct ddc_service *ddc);
87
88uint32_t dal_ddc_service_get_edid_buf_len(struct ddc_service *ddc);
89
90void dal_ddc_service_get_edid_buf(struct ddc_service *ddc, uint8_t *edid_buf);
91
92void dal_ddc_service_i2c_query_dp_dual_mode_adaptor(
93 struct ddc_service *ddc,
94 struct display_sink_capability *sink_cap);
95
96bool dal_ddc_service_query_ddc_data(
97 struct ddc_service *ddc,
98 uint32_t address,
99 uint8_t *write_buf,
100 uint32_t write_size,
101 uint8_t *read_buf,
102 uint32_t read_size);
103
104enum ddc_result dal_ddc_service_read_dpcd_data(
105 struct ddc_service *ddc,
106 uint32_t address,
107 uint8_t *data,
108 uint32_t len);
109
110enum ddc_result dal_ddc_service_write_dpcd_data(
111 struct ddc_service *ddc,
112 uint32_t address,
113 const uint8_t *data,
114 uint32_t len);
115
116void dal_ddc_service_write_scdc_data(
117 struct ddc_service *ddc_service,
118 uint32_t pix_clk,
119 bool lte_340_scramble);
120
121void dal_ddc_service_read_scdc_data(
122 struct ddc_service *ddc_service);
123
124void ddc_service_set_dongle_type(struct ddc_service *ddc,
125 enum display_dongle_type dongle_type);
126
127void dal_ddc_service_set_ddc_pin(
128 struct ddc_service *ddc_service,
129 struct ddc *ddc);
130
131struct ddc *dal_ddc_service_get_ddc_pin(struct ddc_service *ddc_service);
132
133enum ddc_result dal_ddc_service_read_dpcd_data(
134 struct ddc_service *ddc,
135 uint32_t address,
136 uint8_t *data,
137 uint32_t len);
138enum ddc_result dal_ddc_service_write_dpcd_data(
139 struct ddc_service *ddc,
140 uint32_t address,
141 const uint8_t *data,
142 uint32_t len);
143
144#endif /* __DAL_DDC_SERVICE_H__ */
145
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
new file mode 100644
index 000000000000..b0cf8e00059c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
@@ -0,0 +1,60 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DC_LINK_DP_H__
27#define __DC_LINK_DP_H__
28
29#define LINK_TRAINING_ATTEMPTS 4
30#define LINK_TRAINING_RETRY_DELAY 50 /* ms */
31
32struct core_link;
33struct core_stream;
34struct dc_link_settings;
35
36bool dp_hbr_verify_link_cap(
37 struct core_link *link,
38 struct dc_link_settings *known_limit_link_setting);
39
40bool dp_validate_mode_timing(
41 struct core_link *link,
42 const struct dc_crtc_timing *timing);
43
44void decide_link_settings(
45 struct core_stream *stream,
46 struct dc_link_settings *link_setting);
47
48bool perform_link_training_with_retries(
49 struct core_link *link,
50 const struct dc_link_settings *link_setting,
51 bool skip_video_pattern,
52 int attempts);
53
54bool is_mst_supported(struct core_link *link);
55
56void detect_dp_sink_caps(struct core_link *link);
57
58bool is_dp_active_dongle(const struct core_link *link);
59
60#endif /* __DC_LINK_DP_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/gamma_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/gamma_calcs.h
new file mode 100644
index 000000000000..e2c63fd4fe92
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/gamma_calcs.h
@@ -0,0 +1,19 @@
1/*
2 * gamma_calcs.h
3 *
4 * Created on: Feb 9, 2016
5 * Author: yonsun
6 */
7
8#ifndef DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_GAMMA_CALCS_H_
9#define DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_GAMMA_CALCS_H_
10
11#include "opp.h"
12#include "core_types.h"
13#include "dc.h"
14
15bool calculate_regamma_params(struct pwl_params *params,
16 const struct core_gamma *ramp,
17 const struct core_surface *surface);
18
19#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_GAMMA_CALCS_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/gamma_types.h b/drivers/gpu/drm/amd/display/dc/inc/gamma_types.h
new file mode 100644
index 000000000000..7948d2cc0715
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/gamma_types.h
@@ -0,0 +1,38 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25#ifndef GAMMA_TYPES_H_
26
27#define GAMMA_TYPES_H_
28
29#include "dc_types.h"
30
31/* TODO: Used in IPP and OPP */
32
33struct dev_c_lut16 {
34 uint16_t red;
35 uint16_t green;
36 uint16_t blue;
37};
38#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/audio.h b/drivers/gpu/drm/amd/display/dc/inc/hw/audio.h
new file mode 100644
index 000000000000..925204f49717
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/audio.h
@@ -0,0 +1,62 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_AUDIO_H__
27#define __DAL_AUDIO_H__
28
29#include "audio_types.h"
30
31struct audio;
32
33struct audio_funcs {
34
35 bool (*endpoint_valid)(struct audio *audio);
36
37 void (*hw_init)(struct audio *audio);
38
39 void (*az_enable)(struct audio *audio);
40
41 void (*az_disable)(struct audio *audio);
42
43 void (*az_configure)(struct audio *audio,
44 enum signal_type signal,
45 const struct audio_crtc_info *crtc_info,
46 const struct audio_info *audio_info);
47
48 void (*wall_dto_setup)(struct audio *audio,
49 enum signal_type signal,
50 const struct audio_crtc_info *crtc_info,
51 const struct audio_pll_info *pll_info);
52
53 void (*destroy)(struct audio **audio);
54};
55
56struct audio {
57 const struct audio_funcs *funcs;
58 struct dc_context *ctx;
59 unsigned int inst;
60};
61
62#endif /* __DAL_AUDIO__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/gpio.h b/drivers/gpu/drm/amd/display/dc/inc/hw/gpio.h
new file mode 100644
index 000000000000..90d0148430fb
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/gpio.h
@@ -0,0 +1,86 @@
1/*
2 * Copyright 2012-16 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_GPIO_H__
27#define __DAL_GPIO_H__
28
29#include "gpio_types.h"
30
31struct gpio {
32 struct gpio_service *service;
33 struct hw_gpio_pin *pin;
34 enum gpio_id id;
35 uint32_t en;
36 enum gpio_mode mode;
37 /* when GPIO comes from VBIOS, it has defined output state */
38 enum gpio_pin_output_state output_state;
39};
40
41#if 0
42struct gpio_funcs {
43
44 struct hw_gpio_pin *(*create_ddc_data)(
45 struct dc_context *ctx,
46 enum gpio_id id,
47 uint32_t en);
48 struct hw_gpio_pin *(*create_ddc_clock)(
49 struct dc_context *ctx,
50 enum gpio_id id,
51 uint32_t en);
52 struct hw_gpio_pin *(*create_generic)(
53 struct dc_context *ctx,
54 enum gpio_id id,
55 uint32_t en);
56 struct hw_gpio_pin *(*create_hpd)(
57 struct dc_context *ctx,
58 enum gpio_id id,
59 uint32_t en);
60 struct hw_gpio_pin *(*create_gpio_pad)(
61 struct dc_context *ctx,
62 enum gpio_id id,
63 uint32_t en);
64 struct hw_gpio_pin *(*create_sync)(
65 struct dc_context *ctx,
66 enum gpio_id id,
67 uint32_t en);
68 struct hw_gpio_pin *(*create_gsl)(
69 struct dc_context *ctx,
70 enum gpio_id id,
71 uint32_t en);
72
73 /* HW translation */
74 bool (*offset_to_id)(
75 uint32_t offset,
76 uint32_t mask,
77 enum gpio_id *id,
78 uint32_t *en);
79 bool (*id_to_offset)(
80 enum gpio_id id,
81 uint32_t en,
82 struct gpio_pin_info *info);
83};
84#endif
85
86#endif /* __DAL_GPIO__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
new file mode 100644
index 000000000000..3b0e616f7066
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
@@ -0,0 +1,74 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_HW_SHARED_H__
27#define __DAL_HW_SHARED_H__
28
29/******************************************************************************
30 * Data types shared between different Virtual HW blocks
31 ******************************************************************************/
32struct gamma_curve {
33 uint32_t offset;
34 uint32_t segments_num;
35};
36
37struct curve_points {
38 struct fixed31_32 x;
39 struct fixed31_32 y;
40 struct fixed31_32 offset;
41 struct fixed31_32 slope;
42
43 uint32_t custom_float_x;
44 uint32_t custom_float_y;
45 uint32_t custom_float_offset;
46 uint32_t custom_float_slope;
47};
48
49struct pwl_result_data {
50 struct fixed31_32 red;
51 struct fixed31_32 green;
52 struct fixed31_32 blue;
53
54 struct fixed31_32 delta_red;
55 struct fixed31_32 delta_green;
56 struct fixed31_32 delta_blue;
57
58 uint32_t red_reg;
59 uint32_t green_reg;
60 uint32_t blue_reg;
61
62 uint32_t delta_red_reg;
63 uint32_t delta_green_reg;
64 uint32_t delta_blue_reg;
65};
66
67struct pwl_params {
68 uint32_t *data;
69 struct gamma_curve arr_curve_points[16];
70 struct curve_points arr_points[3];
71 struct pwl_result_data rgb_resulted[256 + 3];
72 uint32_t hw_points_num;
73};
74#endif /* __DAL_HW_SHARED_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/ipp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/ipp.h
new file mode 100644
index 000000000000..7e5f3e02a719
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/ipp.h
@@ -0,0 +1,121 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_IPP_H__
27#define __DAL_IPP_H__
28
29#include "hw_shared.h"
30
31#define MAXTRIX_COEFFICIENTS_NUMBER 12
32#define MAXTRIX_COEFFICIENTS_WRAP_NUMBER (MAXTRIX_COEFFICIENTS_NUMBER + 4)
33#define MAX_OVL_MATRIX_COUNT 12
34
35/* IPP RELATED */
36struct input_pixel_processor {
37 struct dc_context *ctx;
38 uint32_t inst;
39 const struct ipp_funcs *funcs;
40};
41
42enum ipp_prescale_mode {
43 IPP_PRESCALE_MODE_BYPASS,
44 IPP_PRESCALE_MODE_FIXED_SIGNED,
45 IPP_PRESCALE_MODE_FLOAT_SIGNED,
46 IPP_PRESCALE_MODE_FIXED_UNSIGNED,
47 IPP_PRESCALE_MODE_FLOAT_UNSIGNED
48};
49
50struct ipp_prescale_params {
51 enum ipp_prescale_mode mode;
52 uint16_t bias;
53 uint16_t scale;
54};
55
56enum ipp_degamma_mode {
57 IPP_DEGAMMA_MODE_BYPASS,
58 IPP_DEGAMMA_MODE_HW_sRGB,
59 IPP_DEGAMMA_MODE_HW_xvYCC,
60 IPP_DEGAMMA_MODE_USER_PWL
61};
62
63enum ovl_color_space {
64 OVL_COLOR_SPACE_UNKNOWN = 0,
65 OVL_COLOR_SPACE_RGB,
66 OVL_COLOR_SPACE_YUV601,
67 OVL_COLOR_SPACE_YUV709
68};
69
70enum expansion_mode {
71 EXPANSION_MODE_DYNAMIC,
72 EXPANSION_MODE_ZERO
73};
74
75enum ipp_output_format {
76 IPP_OUTPUT_FORMAT_12_BIT_FIX,
77 IPP_OUTPUT_FORMAT_16_BIT_BYPASS,
78 IPP_OUTPUT_FORMAT_FLOAT
79};
80
81struct ipp_funcs {
82
83 /*** cursor ***/
84 void (*ipp_cursor_set_position)(
85 struct input_pixel_processor *ipp,
86 const struct dc_cursor_position *position);
87
88 bool (*ipp_cursor_set_attributes)(
89 struct input_pixel_processor *ipp,
90 const struct dc_cursor_attributes *attributes);
91
92 /*** setup input pixel processing ***/
93
94 /* put the entire pixel processor to bypass */
95 void (*ipp_full_bypass)(
96 struct input_pixel_processor *ipp);
97
98 /* setup ipp to expand/convert input to pixel processor internal format */
99 void (*ipp_setup)(
100 struct input_pixel_processor *ipp,
101 enum surface_pixel_format input_format,
102 enum expansion_mode mode,
103 enum ipp_output_format output_format);
104
105 /* DCE function to setup IPP. TODO: see if we can consolidate to setup */
106 void (*ipp_program_prescale)(
107 struct input_pixel_processor *ipp,
108 struct ipp_prescale_params *params);
109
110 /*** DEGAMMA RELATED ***/
111 bool (*ipp_set_degamma)(
112 struct input_pixel_processor *ipp,
113 enum ipp_degamma_mode mode);
114
115 bool (*ipp_program_degamma_pwl)(
116 struct input_pixel_processor *ipp,
117 const struct pwl_params *params);
118
119};
120
121#endif /* __DAL_IPP_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
new file mode 100644
index 000000000000..77f8aa410898
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
@@ -0,0 +1,263 @@
1/*
2 * link_encoder.h
3 *
4 * Created on: Oct 6, 2015
5 * Author: yonsun
6 */
7
8#ifndef LINK_ENCODER_H_
9#define LINK_ENCODER_H_
10
11#include "grph_object_defs.h"
12#include "signal_types.h"
13#include "dc_types.h"
14
15struct dc_context;
16struct encoder_set_dp_phy_pattern_param;
17struct link_mst_stream_allocation_table;
18struct dc_link_settings;
19struct link_training_settings;
20struct core_stream;
21struct pipe_ctx;
22
23struct encoder_init_data {
24 enum channel_id channel;
25 struct graphics_object_id connector;
26 enum hpd_source_id hpd_source;
27 /* TODO: in DAL2, here was pointer to EventManagerInterface */
28 struct graphics_object_id encoder;
29 struct dc_context *ctx;
30 enum transmitter transmitter;
31};
32
33struct encoder_feature_support {
34 union {
35 struct {
36 /* 1 - external encoder; 0 - internal encoder */
37 uint32_t EXTERNAL_ENCODER:1;
38 uint32_t ANALOG_ENCODER:1;
39 uint32_t STEREO_SYNC:1;
40 /* check the DDC data pin
41 * when performing DP Sink detection */
42 uint32_t DP_SINK_DETECT_POLL_DATA_PIN:1;
43 /* CPLIB authentication
44 * for external DP chip supported */
45 uint32_t CPLIB_DP_AUTHENTICATION:1;
46 uint32_t IS_HBR2_CAPABLE:1;
47 uint32_t IS_HBR3_CAPABLE:1;
48 uint32_t IS_HBR2_VALIDATED:1;
49 uint32_t IS_TPS3_CAPABLE:1;
50 uint32_t IS_TPS4_CAPABLE:1;
51 uint32_t IS_AUDIO_CAPABLE:1;
52 uint32_t IS_VCE_SUPPORTED:1;
53 uint32_t IS_CONVERTER:1;
54 uint32_t IS_Y_ONLY_CAPABLE:1;
55 uint32_t IS_YCBCR_CAPABLE:1;
56 } bits;
57 uint32_t raw;
58 } flags;
59 /* maximum supported deep color depth */
60 enum dc_color_depth max_deep_color;
61 enum dc_color_depth max_hdmi_deep_color;
62 /* maximum supported clock */
63 unsigned int max_pixel_clock;
64 unsigned int max_hdmi_pixel_clock;
65 bool ycbcr420_supported;
66};
67
68enum physical_phy_id {
69 PHYLD_0,
70 PHYLD_1,
71 PHYLD_2,
72 PHYLD_3,
73 PHYLD_4,
74 PHYLD_5,
75 PHYLD_6,
76 PHYLD_7,
77 PHYLD_8,
78 PHYLD_9,
79 PHYLD_COUNT,
80 PHYLD_UNKNOWN = (-1L)
81};
82
83enum phy_type {
84 PHY_TYPE_UNKNOWN = 1,
85 PHY_TYPE_PCIE_PHY = 2,
86 PHY_TYPE_UNIPHY = 3,
87};
88
89union dmcu_psr_level {
90 struct {
91 unsigned int SKIP_CRC:1;
92 unsigned int SKIP_DP_VID_STREAM_DISABLE:1;
93 unsigned int SKIP_PHY_POWER_DOWN:1;
94 unsigned int SKIP_AUX_ACK_CHECK:1;
95 unsigned int SKIP_CRTC_DISABLE:1;
96 unsigned int SKIP_AUX_RFB_CAPTURE_CHECK:1;
97 unsigned int SKIP_SMU_NOTIFICATION:1;
98 unsigned int SKIP_AUTO_STATE_ADVANCE:1;
99 unsigned int DISABLE_PSR_ENTRY_ABORT:1;
100 unsigned int RESERVED:23;
101 } bits;
102 unsigned int u32all;
103};
104
105union dpcd_psr_configuration {
106 struct {
107 unsigned char ENABLE : 1;
108 unsigned char TRANSMITTER_ACTIVE_IN_PSR : 1;
109 unsigned char CRC_VERIFICATION : 1;
110 unsigned char FRAME_CAPTURE_INDICATION : 1;
111 /* For eDP 1.4, PSR v2*/
112 unsigned char LINE_CAPTURE_INDICATION : 1;
113 /* For eDP 1.4, PSR v2*/
114 unsigned char IRQ_HPD_WITH_CRC_ERROR : 1;
115 unsigned char RESERVED : 2;
116 } bits;
117 unsigned char raw;
118};
119
120union psr_error_status {
121 struct {
122 unsigned char LINK_CRC_ERROR :1;
123 unsigned char RFB_STORAGE_ERROR :1;
124 unsigned char RESERVED :6;
125 } bits;
126 unsigned char raw;
127};
128
129union psr_sink_psr_status {
130 struct {
131 unsigned char SINK_SELF_REFRESH_STATUS :3;
132 unsigned char RESERVED :5;
133 } bits;
134 unsigned char raw;
135};
136
137struct psr_dmcu_context {
138 /* ddc line */
139 enum channel_id channel;
140 /* Transmitter id */
141 enum transmitter transmitterId;
142 /* Engine Id is used for Dig Be source select */
143 enum engine_id engineId;
144 /* Controller Id used for Dig Fe source select */
145 enum controller_id controllerId;
146 /* Pcie or Uniphy */
147 enum phy_type phyType;
148 /* Physical PHY Id used by SMU interpretation */
149 enum physical_phy_id smuPhyId;
150 /* Vertical total pixels from crtc timing.
151 * This is used for static screen detection.
152 * ie. If we want to detect half a frame,
153 * we use this to determine the hyst lines.
154 */
155 unsigned int crtcTimingVerticalTotal;
156 /* PSR supported from panel capabilities and
157 * current display configuration
158 */
159 bool psrSupportedDisplayConfig;
160 /* Whether fast link training is supported by the panel */
161 bool psrExitLinkTrainingRequired;
162 /* If RFB setup time is greater than the total VBLANK time,
163 * it is not possible for the sink to capture the video frame
164 * in the same frame the SDP is sent. In this case,
165 * the frame capture indication bit should be set and an extra
166 * static frame should be transmitted to the sink.
167 */
168 bool psrFrameCaptureIndicationReq;
169 /* Set the last possible line SDP may be transmitted without violating
170 * the RFB setup time or entering the active video frame.
171 */
172 unsigned int sdpTransmitLineNumDeadline;
173 /* The VSync rate in Hz used to calculate the
174 * step size for smooth brightness feature
175 */
176 unsigned int vsyncRateHz;
177 unsigned int skipPsrWaitForPllLock;
178 unsigned int numberOfControllers;
179 /* Unused, for future use. To indicate that first changed frame from
180 * state3 shouldn't result in psr_inactive, but rather to perform
181 * an automatic single frame rfb_update.
182 */
183 bool rfb_update_auto_en;
184 /* Number of frame before entering static screen */
185 unsigned int timehyst_frames;
186 /* Partial frames before entering static screen */
187 unsigned int hyst_lines;
188 /* # of repeated AUX transaction attempts to make before
189 * indicating failure to the driver
190 */
191 unsigned int aux_repeats;
192 /* Controls hw blocks to power down during PSR active state */
193 union dmcu_psr_level psr_level;
194 /* Controls additional delay after remote frame capture before
195 * continuing powerd own
196 */
197 unsigned int frame_delay;
198};
199
200
201struct link_encoder {
202 const struct link_encoder_funcs *funcs;
203 int32_t aux_channel_offset;
204 struct dc_context *ctx;
205 struct graphics_object_id id;
206 struct graphics_object_id connector;
207 uint32_t input_signals;
208 uint32_t output_signals;
209 enum engine_id preferred_engine;
210 struct encoder_feature_support features;
211 enum transmitter transmitter;
212 enum hpd_source_id hpd_source;
213};
214
215struct link_encoder_funcs {
216 bool (*validate_output_with_stream)(
217 struct link_encoder *enc, struct pipe_ctx *pipe_ctx);
218 void (*hw_init)(struct link_encoder *enc);
219 void (*setup)(struct link_encoder *enc,
220 enum signal_type signal);
221 void (*enable_tmds_output)(struct link_encoder *enc,
222 enum clock_source_id clock_source,
223 enum dc_color_depth color_depth,
224 bool hdmi,
225 bool dual_link,
226 uint32_t pixel_clock);
227 void (*enable_dp_output)(struct link_encoder *enc,
228 const struct dc_link_settings *link_settings,
229 enum clock_source_id clock_source);
230 void (*enable_dp_mst_output)(struct link_encoder *enc,
231 const struct dc_link_settings *link_settings,
232 enum clock_source_id clock_source);
233 void (*disable_output)(struct link_encoder *link_enc,
234 enum signal_type signal);
235 void (*dp_set_lane_settings)(struct link_encoder *enc,
236 const struct link_training_settings *link_settings);
237 void (*dp_set_phy_pattern)(struct link_encoder *enc,
238 const struct encoder_set_dp_phy_pattern_param *para);
239 void (*update_mst_stream_allocation_table)(
240 struct link_encoder *enc,
241 const struct link_mst_stream_allocation_table *table);
242 void (*set_lcd_backlight_level) (struct link_encoder *enc,
243 uint32_t level);
244 void (*set_dmcu_backlight_level)(struct link_encoder *enc,
245 uint32_t level, uint32_t frame_ramp, uint32_t controller_id);
246 void (*init_dmcu_backlight_settings)(struct link_encoder *enc);
247 void (*set_dmcu_abm_level)(struct link_encoder *enc, uint32_t level);
248 void (*set_dmcu_psr_enable)(struct link_encoder *enc, bool enable);
249 void (*setup_dmcu_psr)(struct link_encoder *enc,
250 struct psr_dmcu_context *psr_context);
251 void (*backlight_control) (struct link_encoder *enc,
252 bool enable);
253 void (*power_control) (struct link_encoder *enc,
254 bool power_up);
255 void (*connect_dig_be_to_fe)(struct link_encoder *enc,
256 enum engine_id engine,
257 bool connect);
258 void (*enable_hpd)(struct link_encoder *enc);
259 void (*disable_hpd)(struct link_encoder *enc);
260 void (*destroy)(struct link_encoder **enc);
261};
262
263#endif /* LINK_ENCODER_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
new file mode 100644
index 000000000000..78dab74edc2d
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
@@ -0,0 +1,106 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25#ifndef __DAL_MEM_INPUT_H__
26#define __DAL_MEM_INPUT_H__
27
28#include "dc.h"
29#include "include/grph_object_id.h"
30#include "inc/bandwidth_calcs.h"
31
32#include "dce/dce_mem_input.h" /* temporary */
33
34struct stutter_modes {
35 bool enhanced;
36 bool quad_dmif_buffer;
37 bool watermark_nb_pstate;
38};
39
40struct mem_input {
41 struct mem_input_funcs *funcs;
42 struct dc_context *ctx;
43 struct dc_plane_address request_address;
44 struct dc_plane_address current_address;
45 uint32_t inst;
46 struct stutter_modes stutter_mode;
47
48 const struct dce_mem_input_registers *regs;
49 const struct dce_mem_input_shift *shifts;
50 const struct dce_mem_input_mask *masks;
51 struct dce_mem_input_wa wa;
52};
53
54struct mem_input_funcs {
55 void (*mem_input_program_display_marks)(
56 struct mem_input *mem_input,
57 struct bw_watermarks nbp,
58 struct bw_watermarks stutter,
59 struct bw_watermarks urgent,
60 uint32_t total_dest_line_time_ns);
61
62 void (*mem_input_program_chroma_display_marks)(
63 struct mem_input *mem_input,
64 struct bw_watermarks nbp,
65 struct bw_watermarks stutter,
66 struct bw_watermarks urgent,
67 uint32_t total_dest_line_time_ns);
68
69 void (*allocate_mem_input)(
70 struct mem_input *mem_input,
71 uint32_t h_total,/* for current target */
72 uint32_t v_total,/* for current target */
73 uint32_t pix_clk_khz,/* for current target */
74 uint32_t total_streams_num);
75
76 void (*free_mem_input)(
77 struct mem_input *mem_input,
78 uint32_t paths_num);
79
80 bool (*mem_input_program_surface_flip_and_addr)(
81 struct mem_input *mem_input,
82 const struct dc_plane_address *address,
83 bool flip_immediate);
84
85 bool (*mem_input_program_pte_vm)(
86 struct mem_input *mem_input,
87 enum surface_pixel_format format,
88 union dc_tiling_info *tiling_info,
89 enum dc_rotation_angle rotation);
90
91 bool (*mem_input_program_surface_config)(
92 struct mem_input *mem_input,
93 enum surface_pixel_format format,
94 union dc_tiling_info *tiling_info,
95 union plane_size *plane_size,
96 enum dc_rotation_angle rotation,
97 struct dc_plane_dcc_param *dcc,
98 bool horizontal_mirror);
99
100 bool (*mem_input_is_flip_pending)(struct mem_input *mem_input);
101
102 void (*mem_input_update_dchub)(struct mem_input *mem_input,
103 struct dchub_init_data *dh_data);
104};
105
106#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
new file mode 100644
index 000000000000..e615997be20e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
@@ -0,0 +1,322 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_OPP_H__
27#define __DAL_OPP_H__
28
29#include "hw_shared.h"
30#include "transform.h"
31
32struct fixed31_32;
33struct gamma_parameters;
34
35/* TODO: Need cleanup */
36enum clamping_range {
37 CLAMPING_FULL_RANGE = 0, /* No Clamping */
38 CLAMPING_LIMITED_RANGE_8BPC, /* 8 bpc: Clamping 1 to FE */
39 CLAMPING_LIMITED_RANGE_10BPC, /* 10 bpc: Clamping 4 to 3FB */
40 CLAMPING_LIMITED_RANGE_12BPC, /* 12 bpc: Clamping 10 to FEF */
41 /* Use programmable clampping value on FMT_CLAMP_COMPONENT_R/G/B. */
42 CLAMPING_LIMITED_RANGE_PROGRAMMABLE
43};
44
45struct clamping_and_pixel_encoding_params {
46 enum dc_pixel_encoding pixel_encoding; /* Pixel Encoding */
47 enum clamping_range clamping_level; /* Clamping identifier */
48 enum dc_color_depth c_depth; /* Deep color use. */
49};
50
51struct bit_depth_reduction_params {
52 struct {
53 /* truncate/round */
54 /* trunc/round enabled*/
55 uint32_t TRUNCATE_ENABLED:1;
56 /* 2 bits: 0=6 bpc, 1=8 bpc, 2 = 10bpc*/
57 uint32_t TRUNCATE_DEPTH:2;
58 /* truncate or round*/
59 uint32_t TRUNCATE_MODE:1;
60
61 /* spatial dither */
62 /* Spatial Bit Depth Reduction enabled*/
63 uint32_t SPATIAL_DITHER_ENABLED:1;
64 /* 2 bits: 0=6 bpc, 1 = 8 bpc, 2 = 10bpc*/
65 uint32_t SPATIAL_DITHER_DEPTH:2;
66 /* 0-3 to select patterns*/
67 uint32_t SPATIAL_DITHER_MODE:2;
68 /* Enable RGB random dithering*/
69 uint32_t RGB_RANDOM:1;
70 /* Enable Frame random dithering*/
71 uint32_t FRAME_RANDOM:1;
72 /* Enable HighPass random dithering*/
73 uint32_t HIGHPASS_RANDOM:1;
74
75 /* temporal dither*/
76 /* frame modulation enabled*/
77 uint32_t FRAME_MODULATION_ENABLED:1;
78 /* same as for trunc/spatial*/
79 uint32_t FRAME_MODULATION_DEPTH:2;
80 /* 2/4 gray levels*/
81 uint32_t TEMPORAL_LEVEL:1;
82 uint32_t FRC25:2;
83 uint32_t FRC50:2;
84 uint32_t FRC75:2;
85 } flags;
86
87 uint32_t r_seed_value;
88 uint32_t b_seed_value;
89 uint32_t g_seed_value;
90};
91
92enum wide_gamut_regamma_mode {
93 /* 0x0 - BITS2:0 Bypass */
94 WIDE_GAMUT_REGAMMA_MODE_GRAPHICS_BYPASS,
95 /* 0x1 - Fixed curve sRGB 2.4 */
96 WIDE_GAMUT_REGAMMA_MODE_GRAPHICS_SRGB24,
97 /* 0x2 - Fixed curve xvYCC 2.22 */
98 WIDE_GAMUT_REGAMMA_MODE_GRAPHICS_XYYCC22,
99 /* 0x3 - Programmable control A */
100 WIDE_GAMUT_REGAMMA_MODE_GRAPHICS_MATRIX_A,
101 /* 0x4 - Programmable control B */
102 WIDE_GAMUT_REGAMMA_MODE_GRAPHICS_MATRIX_B,
103 /* 0x0 - BITS6:4 Bypass */
104 WIDE_GAMUT_REGAMMA_MODE_OVL_BYPASS,
105 /* 0x1 - Fixed curve sRGB 2.4 */
106 WIDE_GAMUT_REGAMMA_MODE_OVL_SRGB24,
107 /* 0x2 - Fixed curve xvYCC 2.22 */
108 WIDE_GAMUT_REGAMMA_MODE_OVL_XYYCC22,
109 /* 0x3 - Programmable control A */
110 WIDE_GAMUT_REGAMMA_MODE_OVL_MATRIX_A,
111 /* 0x4 - Programmable control B */
112 WIDE_GAMUT_REGAMMA_MODE_OVL_MATRIX_B
113};
114
115struct gamma_pixel {
116 struct fixed31_32 r;
117 struct fixed31_32 g;
118 struct fixed31_32 b;
119};
120
121enum channel_name {
122 CHANNEL_NAME_RED,
123 CHANNEL_NAME_GREEN,
124 CHANNEL_NAME_BLUE
125};
126
127struct custom_float_format {
128 uint32_t mantissa_bits;
129 uint32_t exponenta_bits;
130 bool sign;
131};
132
133struct custom_float_value {
134 uint32_t mantissa;
135 uint32_t exponenta;
136 uint32_t value;
137 bool negative;
138};
139
140struct hw_x_point {
141 uint32_t custom_float_x;
142 uint32_t custom_float_x_adjusted;
143 struct fixed31_32 x;
144 struct fixed31_32 adjusted_x;
145 struct fixed31_32 regamma_y_red;
146 struct fixed31_32 regamma_y_green;
147 struct fixed31_32 regamma_y_blue;
148
149};
150
151struct pwl_float_data_ex {
152 struct fixed31_32 r;
153 struct fixed31_32 g;
154 struct fixed31_32 b;
155 struct fixed31_32 delta_r;
156 struct fixed31_32 delta_g;
157 struct fixed31_32 delta_b;
158};
159
160enum hw_point_position {
161 /* hw point sits between left and right sw points */
162 HW_POINT_POSITION_MIDDLE,
163 /* hw point lays left from left (smaller) sw point */
164 HW_POINT_POSITION_LEFT,
165 /* hw point lays stays from right (bigger) sw point */
166 HW_POINT_POSITION_RIGHT
167};
168
169struct gamma_point {
170 int32_t left_index;
171 int32_t right_index;
172 enum hw_point_position pos;
173 struct fixed31_32 coeff;
174};
175
176struct pixel_gamma_point {
177 struct gamma_point r;
178 struct gamma_point g;
179 struct gamma_point b;
180};
181
182struct gamma_coefficients {
183 struct fixed31_32 a0[3];
184 struct fixed31_32 a1[3];
185 struct fixed31_32 a2[3];
186 struct fixed31_32 a3[3];
187 struct fixed31_32 user_gamma[3];
188 struct fixed31_32 user_contrast;
189 struct fixed31_32 user_brightness;
190};
191
192struct pwl_float_data {
193 struct fixed31_32 r;
194 struct fixed31_32 g;
195 struct fixed31_32 b;
196};
197
198enum opp_regamma {
199 OPP_REGAMMA_BYPASS = 0,
200 OPP_REGAMMA_SRGB,
201 OPP_REGAMMA_3_6,
202 OPP_REGAMMA_USER,
203};
204
205struct output_pixel_processor {
206 struct dc_context *ctx;
207 uint32_t inst;
208 const struct opp_funcs *funcs;
209};
210
211enum fmt_stereo_action {
212 FMT_STEREO_ACTION_ENABLE = 0,
213 FMT_STEREO_ACTION_DISABLE,
214 FMT_STEREO_ACTION_UPDATE_POLARITY
215};
216
217enum graphics_csc_adjust_type {
218 GRAPHICS_CSC_ADJUST_TYPE_BYPASS = 0,
219 GRAPHICS_CSC_ADJUST_TYPE_HW, /* without adjustments */
220 GRAPHICS_CSC_ADJUST_TYPE_SW /*use adjustments */
221};
222
223struct default_adjustment {
224 enum lb_pixel_depth lb_color_depth;
225 enum dc_color_space out_color_space;
226 enum dc_color_space in_color_space;
227 enum dc_color_depth color_depth;
228 enum pixel_format surface_pixel_format;
229 enum graphics_csc_adjust_type csc_adjust_type;
230 bool force_hw_default;
231};
232
233enum grph_color_adjust_option {
234 GRPH_COLOR_MATRIX_HW_DEFAULT = 1,
235 GRPH_COLOR_MATRIX_SW
236};
237
238struct opp_grph_csc_adjustment {
239 enum grph_color_adjust_option color_adjust_option;
240 enum dc_color_space c_space;
241 enum dc_color_depth color_depth; /* clean up to uint32_t */
242 enum graphics_csc_adjust_type csc_adjust_type;
243 int32_t adjust_divider;
244 int32_t grph_cont;
245 int32_t grph_sat;
246 int32_t grph_bright;
247 int32_t grph_hue;
248};
249
250struct out_csc_color_matrix {
251 enum dc_color_space color_space;
252 uint16_t regval[12];
253};
254
255/* Underlay related types */
256
257struct hw_adjustment_range {
258 int32_t hw_default;
259 int32_t min;
260 int32_t max;
261 int32_t step;
262 uint32_t divider; /* (actually HW range is min/divider; divider !=0) */
263};
264
265enum ovl_csc_adjust_item {
266 OVERLAY_BRIGHTNESS = 0,
267 OVERLAY_GAMMA,
268 OVERLAY_CONTRAST,
269 OVERLAY_SATURATION,
270 OVERLAY_HUE,
271 OVERLAY_ALPHA,
272 OVERLAY_ALPHA_PER_PIX,
273 OVERLAY_COLOR_TEMPERATURE
274};
275
276struct opp_funcs {
277 void (*opp_power_on_regamma_lut)(
278 struct output_pixel_processor *opp,
279 bool power_on);
280
281 bool (*opp_program_regamma_pwl)(
282 struct output_pixel_processor *opp,
283 const struct pwl_params *params);
284
285 void (*opp_set_regamma_mode)(struct output_pixel_processor *opp,
286 enum opp_regamma mode);
287
288 void (*opp_set_csc_adjustment)(
289 struct output_pixel_processor *opp,
290 const struct out_csc_color_matrix *tbl_entry);
291
292 void (*opp_set_csc_default)(
293 struct output_pixel_processor *opp,
294 const struct default_adjustment *default_adjust);
295
296 /* FORMATTER RELATED */
297
298 void (*opp_program_fmt)(
299 struct output_pixel_processor *opp,
300 struct bit_depth_reduction_params *fmt_bit_depth,
301 struct clamping_and_pixel_encoding_params *clamping);
302
303 void (*opp_set_dyn_expansion)(
304 struct output_pixel_processor *opp,
305 enum dc_color_space color_sp,
306 enum dc_color_depth color_dpth,
307 enum signal_type signal);
308
309 void (*opp_program_bit_depth_reduction)(
310 struct output_pixel_processor *opp,
311 const struct bit_depth_reduction_params *params);
312
313 /* underlay related */
314 void (*opp_get_underlay_adjustment_range)(
315 struct output_pixel_processor *opp,
316 enum ovl_csc_adjust_item overlay_adjust_item,
317 struct hw_adjustment_range *range);
318
319 void (*opp_destroy)(struct output_pixel_processor **opp);
320};
321
322#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
new file mode 100644
index 000000000000..9caf2b365420
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
@@ -0,0 +1,121 @@
1/*
2 * stream_encoder.h
3 *
4 */
5
6#ifndef STREAM_ENCODER_H_
7#define STREAM_ENCODER_H_
8
9#include "include/hw_sequencer_types.h"
10#include "audio_types.h"
11
12struct dc_bios;
13struct dc_context;
14struct dc_crtc_timing;
15
16struct encoder_info_packet {
17 bool valid;
18 uint8_t hb0;
19 uint8_t hb1;
20 uint8_t hb2;
21 uint8_t hb3;
22 uint8_t sb[28];
23};
24
25struct encoder_info_frame {
26 /* auxiliary video information */
27 struct encoder_info_packet avi;
28 struct encoder_info_packet gamut;
29 struct encoder_info_packet vendor;
30 /* source product description */
31 struct encoder_info_packet spd;
32 /* video stream configuration */
33 struct encoder_info_packet vsc;
34};
35
36struct encoder_unblank_param {
37 struct hw_crtc_timing crtc_timing;
38 struct dc_link_settings link_settings;
39};
40
41struct encoder_set_dp_phy_pattern_param {
42 enum dp_test_pattern dp_phy_pattern;
43 const uint8_t *custom_pattern;
44 uint32_t custom_pattern_size;
45 enum dp_panel_mode dp_panel_mode;
46};
47
48struct stream_encoder {
49 const struct stream_encoder_funcs *funcs;
50 struct dc_context *ctx;
51 struct dc_bios *bp;
52 enum engine_id id;
53};
54
55struct stream_encoder_funcs {
56 void (*dp_set_stream_attribute)(
57 struct stream_encoder *enc,
58 struct dc_crtc_timing *crtc_timing,
59 enum dc_color_space output_color_space);
60
61 void (*hdmi_set_stream_attribute)(
62 struct stream_encoder *enc,
63 struct dc_crtc_timing *crtc_timing,
64 int actual_pix_clk_khz,
65 bool enable_audio);
66
67 void (*dvi_set_stream_attribute)(
68 struct stream_encoder *enc,
69 struct dc_crtc_timing *crtc_timing,
70 bool is_dual_link);
71
72 void (*set_mst_bandwidth)(
73 struct stream_encoder *enc,
74 struct fixed31_32 avg_time_slots_per_mtp);
75
76 void (*update_hdmi_info_packets)(
77 struct stream_encoder *enc,
78 const struct encoder_info_frame *info_frame);
79
80 void (*stop_hdmi_info_packets)(
81 struct stream_encoder *enc);
82
83 void (*update_dp_info_packets)(
84 struct stream_encoder *enc,
85 const struct encoder_info_frame *info_frame);
86
87 void (*stop_dp_info_packets)(
88 struct stream_encoder *enc);
89
90 void (*dp_blank)(
91 struct stream_encoder *enc);
92
93 void (*dp_unblank)(
94 struct stream_encoder *enc,
95 const struct encoder_unblank_param *param);
96
97 void (*audio_mute_control)(
98 struct stream_encoder *enc, bool mute);
99
100 void (*dp_audio_setup)(
101 struct stream_encoder *enc,
102 unsigned int az_inst,
103 struct audio_info *info);
104
105 void (*dp_audio_enable) (
106 struct stream_encoder *enc);
107
108 void (*dp_audio_disable) (
109 struct stream_encoder *enc);
110
111 void (*hdmi_audio_setup)(
112 struct stream_encoder *enc,
113 unsigned int az_inst,
114 struct audio_info *info,
115 struct audio_crtc_info *audio_crtc_info);
116
117 void (*hdmi_audio_disable) (
118 struct stream_encoder *enc);
119};
120
121#endif /* STREAM_ENCODER_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
new file mode 100644
index 000000000000..6ac609f6f89f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -0,0 +1,162 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_TIMING_GENERATOR_TYPES_H__
27#define __DAL_TIMING_GENERATOR_TYPES_H__
28
29struct dc_bios;
30
31/**
32 * These parameters are required as input when doing blanking/Unblanking
33*/
34#define MAX_TG_COLOR_VALUE 0x3FF
35
36struct tg_color {
37 /* Maximum 10 bits color value */
38 uint16_t color_r_cr;
39 uint16_t color_g_y;
40 uint16_t color_b_cb;
41};
42
43/* Contains CRTC vertical/horizontal pixel counters */
44struct crtc_position {
45 uint32_t vertical_count;
46 uint32_t horizontal_count;
47 uint32_t nominal_vcount;
48};
49
50struct dcp_gsl_params {
51 int gsl_group;
52 int gsl_master;
53};
54
55#define LEFT_EYE_3D_PRIMARY_SURFACE 1
56#define RIGHT_EYE_3D_PRIMARY_SURFACE 0
57
58enum test_pattern_dyn_range {
59 TEST_PATTERN_DYN_RANGE_VESA = 0,
60 TEST_PATTERN_DYN_RANGE_CEA
61};
62
63enum test_pattern_mode {
64 TEST_PATTERN_MODE_COLORSQUARES_RGB = 0,
65 TEST_PATTERN_MODE_COLORSQUARES_YCBCR601,
66 TEST_PATTERN_MODE_COLORSQUARES_YCBCR709,
67 TEST_PATTERN_MODE_VERTICALBARS,
68 TEST_PATTERN_MODE_HORIZONTALBARS,
69 TEST_PATTERN_MODE_SINGLERAMP_RGB,
70 TEST_PATTERN_MODE_DUALRAMP_RGB
71};
72
73enum test_pattern_color_format {
74 TEST_PATTERN_COLOR_FORMAT_BPC_6 = 0,
75 TEST_PATTERN_COLOR_FORMAT_BPC_8,
76 TEST_PATTERN_COLOR_FORMAT_BPC_10,
77 TEST_PATTERN_COLOR_FORMAT_BPC_12
78};
79
80enum controller_dp_test_pattern {
81 CONTROLLER_DP_TEST_PATTERN_D102 = 0,
82 CONTROLLER_DP_TEST_PATTERN_SYMBOLERROR,
83 CONTROLLER_DP_TEST_PATTERN_PRBS7,
84 CONTROLLER_DP_TEST_PATTERN_COLORSQUARES,
85 CONTROLLER_DP_TEST_PATTERN_VERTICALBARS,
86 CONTROLLER_DP_TEST_PATTERN_HORIZONTALBARS,
87 CONTROLLER_DP_TEST_PATTERN_COLORRAMP,
88 CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
89 CONTROLLER_DP_TEST_PATTERN_RESERVED_8,
90 CONTROLLER_DP_TEST_PATTERN_RESERVED_9,
91 CONTROLLER_DP_TEST_PATTERN_RESERVED_A,
92 CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA
93};
94
95enum crtc_state {
96 CRTC_STATE_VBLANK = 0,
97 CRTC_STATE_VACTIVE
98};
99
100struct timing_generator {
101 const struct timing_generator_funcs *funcs;
102 struct dc_bios *bp;
103 struct dc_context *ctx;
104 int inst;
105};
106
107struct dc_crtc_timing;
108
109struct drr_params;
110
111struct timing_generator_funcs {
112 bool (*validate_timing)(struct timing_generator *tg,
113 const struct dc_crtc_timing *timing);
114 void (*program_timing)(struct timing_generator *tg,
115 const struct dc_crtc_timing *timing,
116 bool use_vbios);
117 bool (*enable_crtc)(struct timing_generator *tg);
118 bool (*disable_crtc)(struct timing_generator *tg);
119 bool (*is_counter_moving)(struct timing_generator *tg);
120 void (*get_position)(struct timing_generator *tg,
121 int32_t *h_position,
122 int32_t *v_position);
123 uint32_t (*get_frame_count)(struct timing_generator *tg);
124 uint32_t (*get_scanoutpos)(
125 struct timing_generator *tg,
126 uint32_t *vbl,
127 uint32_t *position);
128 void (*set_early_control)(struct timing_generator *tg,
129 uint32_t early_cntl);
130 void (*wait_for_state)(struct timing_generator *tg,
131 enum crtc_state state);
132 bool (*set_blank)(struct timing_generator *tg,
133 bool enable_blanking);
134 bool (*is_blanked)(struct timing_generator *tg);
135 void (*set_overscan_blank_color) (struct timing_generator *tg, const struct tg_color *color);
136 void (*set_blank_color)(struct timing_generator *tg, const struct tg_color *color);
137 void (*set_colors)(struct timing_generator *tg,
138 const struct tg_color *blank_color,
139 const struct tg_color *overscan_color);
140
141 void (*disable_vga)(struct timing_generator *tg);
142 bool (*did_triggered_reset_occur)(struct timing_generator *tg);
143 void (*setup_global_swap_lock)(struct timing_generator *tg,
144 const struct dcp_gsl_params *gsl_params);
145 void (*unlock)(struct timing_generator *tg);
146 void (*lock)(struct timing_generator *tg);
147 void (*enable_reset_trigger)(struct timing_generator *tg, int source_tg_inst);
148 void (*disable_reset_trigger)(struct timing_generator *tg);
149 void (*tear_down_global_swap_lock)(struct timing_generator *tg);
150 void (*enable_advanced_request)(struct timing_generator *tg,
151 bool enable, const struct dc_crtc_timing *timing);
152 void (*set_drr)(struct timing_generator *tg, const struct drr_params *params);
153 void (*set_static_screen_control)(struct timing_generator *tg,
154 uint32_t value);
155 void (*set_test_pattern)(
156 struct timing_generator *tg,
157 enum controller_dp_test_pattern test_pattern,
158 enum dc_color_depth color_depth);
159
160};
161
162#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h b/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
new file mode 100644
index 000000000000..ef743b70b3e9
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
@@ -0,0 +1,179 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_TRANSFORM_H__
27#define __DAL_TRANSFORM_H__
28
29#include "dc_hw_types.h"
30#include "fixed31_32.h"
31
32#define CSC_TEMPERATURE_MATRIX_SIZE 9
33
34struct bit_depth_reduction_params;
35
36struct transform {
37 const struct transform_funcs *funcs;
38 struct dc_context *ctx;
39 int inst;
40};
41
42/* Colorimetry */
43enum colorimetry {
44 COLORIMETRY_NO_DATA = 0,
45 COLORIMETRY_ITU601 = 1,
46 COLORIMETRY_ITU709 = 2,
47 COLORIMETRY_EXTENDED = 3
48};
49
50enum active_format_info {
51 ACTIVE_FORMAT_NO_DATA = 0,
52 ACTIVE_FORMAT_VALID = 1
53};
54
55/* Active format aspect ratio */
56enum active_format_aspect_ratio {
57 ACTIVE_FORMAT_ASPECT_RATIO_SAME_AS_PICTURE = 8,
58 ACTIVE_FORMAT_ASPECT_RATIO_4_3 = 9,
59 ACTIVE_FORMAT_ASPECT_RATIO_16_9 = 0XA,
60 ACTIVE_FORMAT_ASPECT_RATIO_14_9 = 0XB
61};
62
63enum bar_info {
64 BAR_INFO_NOT_VALID = 0,
65 BAR_INFO_VERTICAL_VALID = 1,
66 BAR_INFO_HORIZONTAL_VALID = 2,
67 BAR_INFO_BOTH_VALID = 3
68};
69
70enum picture_scaling {
71 PICTURE_SCALING_UNIFORM = 0,
72 PICTURE_SCALING_HORIZONTAL = 1,
73 PICTURE_SCALING_VERTICAL = 2,
74 PICTURE_SCALING_BOTH = 3
75};
76
77/* RGB quantization range */
78enum rgb_quantization_range {
79 RGB_QUANTIZATION_DEFAULT_RANGE = 0,
80 RGB_QUANTIZATION_LIMITED_RANGE = 1,
81 RGB_QUANTIZATION_FULL_RANGE = 2,
82 RGB_QUANTIZATION_RESERVED = 3
83};
84
85/* YYC quantization range */
86enum yyc_quantization_range {
87 YYC_QUANTIZATION_LIMITED_RANGE = 0,
88 YYC_QUANTIZATION_FULL_RANGE = 1,
89 YYC_QUANTIZATION_RESERVED2 = 2,
90 YYC_QUANTIZATION_RESERVED3 = 3
91};
92
93enum graphics_gamut_adjust_type {
94 GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS = 0,
95 GRAPHICS_GAMUT_ADJUST_TYPE_HW, /* without adjustments */
96 GRAPHICS_GAMUT_ADJUST_TYPE_SW /* use adjustments */
97};
98
99struct xfm_grph_csc_adjustment {
100 struct fixed31_32 temperature_matrix[CSC_TEMPERATURE_MATRIX_SIZE];
101 enum graphics_gamut_adjust_type gamut_adjust_type;
102};
103
104enum lb_pixel_depth {
105 /* do not change the values because it is used as bit vector */
106 LB_PIXEL_DEPTH_18BPP = 1,
107 LB_PIXEL_DEPTH_24BPP = 2,
108 LB_PIXEL_DEPTH_30BPP = 4,
109 LB_PIXEL_DEPTH_36BPP = 8
110};
111
112struct overscan_info {
113 int left;
114 int right;
115 int top;
116 int bottom;
117};
118
119struct scaling_ratios {
120 struct fixed31_32 horz;
121 struct fixed31_32 vert;
122 struct fixed31_32 horz_c;
123 struct fixed31_32 vert_c;
124};
125
126struct sharpness_adj {
127 int horz;
128 int vert;
129};
130
131struct line_buffer_params {
132 bool alpha_en;
133 bool pixel_expan_mode;
134 bool interleave_en;
135 int dynamic_pixel_depth;
136 enum lb_pixel_depth depth;
137};
138
139struct scaler_data {
140 int h_active;
141 int v_active;
142 struct scaling_taps taps;
143 struct rect viewport;
144 struct rect recout;
145 struct scaling_ratios ratios;
146 struct sharpness_adj sharpness;
147 enum pixel_format format;
148 struct line_buffer_params lb_params;
149};
150
151struct transform_funcs {
152 void (*transform_reset)(struct transform *xfm);
153
154 void (*transform_set_scaler)(struct transform *xfm,
155 const struct scaler_data *scl_data);
156
157 void (*transform_set_gamut_remap)(
158 struct transform *xfm,
159 const struct xfm_grph_csc_adjustment *adjust);
160
161 void (*transform_set_pixel_storage_depth)(
162 struct transform *xfm,
163 enum lb_pixel_depth depth,
164 const struct bit_depth_reduction_params *bit_depth_params);
165
166 bool (*transform_get_optimal_number_of_taps)(
167 struct transform *xfm,
168 struct scaler_data *scl_data,
169 const struct scaling_taps *in_taps);
170};
171
172extern const uint16_t filter_2tap_16p[18];
173extern const uint16_t filter_2tap_64p[66];
174const uint16_t *get_filter_3tap_16p(struct fixed31_32 ratio);
175const uint16_t *get_filter_3tap_64p(struct fixed31_32 ratio);
176const uint16_t *get_filter_4tap_16p(struct fixed31_32 ratio);
177const uint16_t *get_filter_4tap_64p(struct fixed31_32 ratio);
178
179#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
new file mode 100644
index 000000000000..35a556dd9054
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -0,0 +1,156 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DC_HW_SEQUENCER_H__
27#define __DC_HW_SEQUENCER_H__
28#include "core_types.h"
29#include "timing_generator.h"
30
31struct gamma_parameters;
32
33enum pipe_gating_control {
34 PIPE_GATING_CONTROL_DISABLE = 0,
35 PIPE_GATING_CONTROL_ENABLE,
36 PIPE_GATING_CONTROL_INIT
37};
38
39enum pipe_lock_control {
40 PIPE_LOCK_CONTROL_GRAPHICS = 1 << 0,
41 PIPE_LOCK_CONTROL_BLENDER = 1 << 1,
42 PIPE_LOCK_CONTROL_SCL = 1 << 2,
43 PIPE_LOCK_CONTROL_SURFACE = 1 << 3,
44 PIPE_LOCK_CONTROL_MODE = 1 << 4
45};
46
47struct dce_hwseq;
48
49struct hw_sequencer_funcs {
50
51 void (*init_hw)(struct core_dc *dc);
52
53 enum dc_status (*apply_ctx_to_hw)(
54 struct core_dc *dc, struct validate_context *context);
55
56 void (*reset_hw_ctx_wrap)(
57 struct core_dc *dc, struct validate_context *context);
58
59 void (*prepare_pipe_for_context)(
60 struct core_dc *dc,
61 struct pipe_ctx *pipe_ctx,
62 struct validate_context *context);
63
64 void (*apply_ctx_for_surface)(
65 struct core_dc *dc,
66 struct core_surface *surface,
67 struct validate_context *context);
68
69 void (*set_plane_config)(
70 const struct core_dc *dc,
71 struct pipe_ctx *pipe_ctx,
72 struct resource_context *res_ctx);
73
74 void (*update_plane_addr)(
75 const struct core_dc *dc,
76 struct pipe_ctx *pipe_ctx);
77
78 void (*update_pending_status)(
79 struct pipe_ctx *pipe_ctx);
80
81 bool (*set_gamma_correction)(
82 struct input_pixel_processor *ipp,
83 struct output_pixel_processor *opp,
84 const struct core_gamma *ramp,
85 const struct core_surface *surface);
86
87 void (*power_down)(struct core_dc *dc);
88
89 void (*enable_accelerated_mode)(struct core_dc *dc);
90
91 void (*enable_timing_synchronization)(
92 struct core_dc *dc,
93 int group_index,
94 int group_size,
95 struct pipe_ctx *grouped_pipes[]);
96
97 /* backlight control */
98 void (*encoder_set_lcd_backlight_level)(
99 struct link_encoder *enc, uint32_t level);
100
101 void (*enable_display_pipe_clock_gating)(
102 struct dc_context *ctx,
103 bool clock_gating);
104
105 bool (*enable_display_power_gating)(
106 struct core_dc *dc,
107 uint8_t controller_id,
108 struct dc_bios *dcb,
109 enum pipe_gating_control power_gating);
110
111 void (*power_down_front_end)(struct core_dc *dc, struct pipe_ctx *pipe);
112 void (*update_info_frame)(struct pipe_ctx *pipe_ctx);
113
114 void (*enable_stream)(struct pipe_ctx *pipe_ctx);
115
116 void (*disable_stream)(struct pipe_ctx *pipe_ctx);
117
118 void (*unblank_stream)(struct pipe_ctx *pipe_ctx,
119 struct dc_link_settings *link_settings);
120
121 void (*pipe_control_lock)(
122 struct dce_hwseq *hwseq,
123 unsigned int blnd_inst,
124 enum pipe_lock_control control_mask,
125 bool lock);
126
127 void (*set_displaymarks)(
128 const struct core_dc *dc,
129 struct validate_context *context);
130
131 void (*increase_watermarks_for_pipe)(struct core_dc *dc,
132 struct pipe_ctx *pipe_ctx,
133 struct validate_context *context);
134
135 void (*set_display_clock)(struct validate_context *context);
136
137 void (*set_bandwidth)(struct core_dc *dc);
138
139 void (*set_drr)(struct pipe_ctx **pipe_ctx, int num_pipes,
140 int vmin, int vmax);
141
142 void (*set_static_screen_control)(struct pipe_ctx **pipe_ctx,
143 int num_pipes, int value);
144
145 enum dc_status (*prog_pixclk_crtc_otg)(
146 struct pipe_ctx *pipe_ctx,
147 struct validate_context *context,
148 struct core_dc *dc);
149};
150
151void color_space_to_black_color(
152 const struct core_dc *dc,
153 enum dc_color_space colorspace,
154 struct tg_color *black_color);
155
156#endif /* __DC_HW_SEQUENCER_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h
new file mode 100644
index 000000000000..662fa30d45f7
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h
@@ -0,0 +1,73 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DC_LINK_HWSS_H__
27#define __DC_LINK_HWSS_H__
28
29#include "inc/core_status.h"
30
31enum dc_status core_link_read_dpcd(
32 struct core_link* link,
33 uint32_t address,
34 uint8_t *data,
35 uint32_t size);
36
37enum dc_status core_link_write_dpcd(
38 struct core_link* link,
39 uint32_t address,
40 const uint8_t *data,
41 uint32_t size);
42
43void dp_enable_link_phy(
44 struct core_link *link,
45 enum signal_type signal,
46 enum clock_source_id clock_source,
47 const struct dc_link_settings *link_settings);
48
49void dp_receiver_power_ctrl(struct core_link *link, bool on);
50
51void dp_disable_link_phy(struct core_link *link, enum signal_type signal);
52
53void dp_disable_link_phy_mst(struct core_link *link, enum signal_type signal);
54
55bool dp_set_hw_training_pattern(
56 struct core_link *link,
57 enum hw_dp_training_pattern pattern);
58
59void dp_set_hw_lane_settings(
60 struct core_link *link,
61 const struct link_training_settings *link_settings);
62
63void dp_set_hw_test_pattern(
64 struct core_link *link,
65 enum dp_test_pattern test_pattern,
66 uint8_t *custom_pattern,
67 uint32_t custom_pattern_size);
68
69enum dp_panel_mode dp_get_panel_mode(struct core_link *link);
70
71void dp_retrain_link(struct core_link *link);
72
73#endif /* __DC_LINK_HWSS_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
new file mode 100644
index 000000000000..159b2c519f2b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
@@ -0,0 +1,290 @@
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 */
24
25#ifndef DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_REG_HELPER_H_
26#define DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_REG_HELPER_H_
27
28#include "dm_services.h"
29
30/* macro for register read/write
31 * user of macro need to define
32 *
33 * CTX ==> macro to ptr to dc_context
34 * eg. aud110->base.ctx
35 *
36 * REG ==> macro to location of register offset
37 * eg. aud110->regs->reg
38 */
39#define REG_READ(reg_name) \
40 dm_read_reg(CTX, REG(reg_name))
41
42#define REG_WRITE(reg_name, value) \
43 dm_write_reg(CTX, REG(reg_name), value)
44
45#ifdef REG_SET
46#undef REG_SET
47#endif
48
49#ifdef REG_GET
50#undef REG_GET
51#endif
52
53/* macro to set register fields. */
54#define REG_SET_N(reg_name, n, initial_val, ...) \
55 generic_reg_update_ex(CTX, \
56 REG(reg_name), \
57 initial_val, \
58 n, __VA_ARGS__)
59
60#define FN(reg_name, field) \
61 FD(reg_name##__##field)
62
63#define REG_SET(reg_name, initial_val, field, val) \
64 REG_SET_N(reg_name, 1, initial_val, \
65 FN(reg_name, field), val)
66
67#define REG_SET_2(reg, init_value, f1, v1, f2, v2) \
68 REG_SET_N(reg, 2, init_value, \
69 FN(reg, f1), v1,\
70 FN(reg, f2), v2)
71
72#define REG_SET_3(reg, init_value, f1, v1, f2, v2, f3, v3) \
73 REG_SET_N(reg, 3, init_value, \
74 FN(reg, f1), v1,\
75 FN(reg, f2), v2,\
76 FN(reg, f3), v3)
77
78#define REG_SET_4(reg, init_value, f1, v1, f2, v2, f3, v3, f4, v4) \
79 REG_SET_N(reg, 4, init_value, \
80 FN(reg, f1), v1,\
81 FN(reg, f2), v2,\
82 FN(reg, f3), v3,\
83 FN(reg, f4), v4)
84
85#define REG_SET_5(reg, init_value, f1, v1, f2, v2, f3, v3, f4, v4, \
86 f5, v5) \
87 REG_SET_N(reg, 6, init_value, \
88 FN(reg, f1), v1,\
89 FN(reg, f2), v2,\
90 FN(reg, f3), v3,\
91 FN(reg, f4), v4,\
92 FN(reg, f5), v5)
93
94#define REG_SET_6(reg, init_value, f1, v1, f2, v2, f3, v3, f4, v4, \
95 f5, v5, f6, v6) \
96 REG_SET_N(reg, 6, init_value, \
97 FN(reg, f1), v1,\
98 FN(reg, f2), v2,\
99 FN(reg, f3), v3,\
100 FN(reg, f4), v4,\
101 FN(reg, f5), v5,\
102 FN(reg, f6), v6)
103
104#define REG_SET_7(reg, init_value, f1, v1, f2, v2, f3, v3, f4, v4, \
105 f5, v5, f6, v6, f7, v7) \
106 REG_SET_N(reg, 7, init_value, \
107 FN(reg, f1), v1,\
108 FN(reg, f2), v2,\
109 FN(reg, f3), v3,\
110 FN(reg, f4), v4,\
111 FN(reg, f5), v5,\
112 FN(reg, f6), v6,\
113 FN(reg, f7), v7)
114
115#define REG_SET_10(reg, init_value, f1, v1, f2, v2, f3, v3, f4, v4, f5, \
116 v5, f6, v6, f7, v7, f8, v8, f9, v9, f10, v10) \
117 REG_SET_N(reg, 10, init_value, \
118 FN(reg, f1), v1,\
119 FN(reg, f2), v2, \
120 FN(reg, f3), v3, \
121 FN(reg, f4), v4, \
122 FN(reg, f5), v5, \
123 FN(reg, f6), v6, \
124 FN(reg, f7), v7, \
125 FN(reg, f8), v8, \
126 FN(reg, f9), v9, \
127 FN(reg, f10), v10)
128
129/* macro to get register fields
130 * read given register and fill in field value in output parameter */
131#define REG_GET(reg_name, field, val) \
132 generic_reg_get(CTX, REG(reg_name), \
133 FN(reg_name, field), val)
134
135#define REG_GET_2(reg_name, f1, v1, f2, v2) \
136 generic_reg_get2(CTX, REG(reg_name), \
137 FN(reg_name, f1), v1, \
138 FN(reg_name, f2), v2)
139
140#define REG_GET_3(reg_name, f1, v1, f2, v2, f3, v3) \
141 generic_reg_get3(CTX, REG(reg_name), \
142 FN(reg_name, f1), v1, \
143 FN(reg_name, f2), v2, \
144 FN(reg_name, f3), v3)
145
146#define REG_GET_5(reg_name, f1, v1, f2, v2, f3, v3, f4, v4, f5, v5) \
147 generic_reg_get5(CTX, REG(reg_name), \
148 FN(reg_name, f1), v1, \
149 FN(reg_name, f2), v2, \
150 FN(reg_name, f3), v3, \
151 FN(reg_name, f4), v4, \
152 FN(reg_name, f5), v5)
153
154/* macro to poll and wait for a register field to read back given value */
155
156#define REG_WAIT(reg_name, field, val, delay, max_try) \
157 generic_reg_wait(CTX, \
158 REG(reg_name), FN(reg_name, field), val,\
159 delay, max_try, __func__)
160
161/* macro to update (read, modify, write) register fields
162 */
163#define REG_UPDATE_N(reg_name, n, ...) \
164 generic_reg_update_ex(CTX, \
165 REG(reg_name), \
166 REG_READ(reg_name), \
167 n, __VA_ARGS__)
168
169#define REG_UPDATE(reg_name, field, val) \
170 REG_UPDATE_N(reg_name, 1, \
171 FN(reg_name, field), val)
172
173#define REG_UPDATE_2(reg, f1, v1, f2, v2) \
174 REG_UPDATE_N(reg, 2,\
175 FN(reg, f1), v1,\
176 FN(reg, f2), v2)
177
178#define REG_UPDATE_3(reg, f1, v1, f2, v2, f3, v3) \
179 REG_UPDATE_N(reg, 3, \
180 FN(reg, f1), v1,\
181 FN(reg, f2), v2, \
182 FN(reg, f3), v3)
183
184#define REG_UPDATE_4(reg, f1, v1, f2, v2, f3, v3, f4, v4) \
185 REG_UPDATE_N(reg, 4, \
186 FN(reg, f1), v1,\
187 FN(reg, f2), v2, \
188 FN(reg, f3), v3, \
189 FN(reg, f4), v4)
190
191#define REG_UPDATE_5(reg, f1, v1, f2, v2, f3, v3, f4, v4, f5, v5) \
192 REG_UPDATE_N(reg, 5, \
193 FN(reg, f1), v1,\
194 FN(reg, f2), v2, \
195 FN(reg, f3), v3, \
196 FN(reg, f4), v4, \
197 FN(reg, f5), v5)
198
199#define REG_UPDATE_6(reg, f1, v1, f2, v2, f3, v3, f4, v4, f5, v5, f6, v6) \
200 REG_UPDATE_N(reg, 6, \
201 FN(reg, f1), v1,\
202 FN(reg, f2), v2, \
203 FN(reg, f3), v3, \
204 FN(reg, f4), v4, \
205 FN(reg, f5), v5, \
206 FN(reg, f6), v6)
207
208#define REG_UPDATE_7(reg, f1, v1, f2, v2, f3, v3, f4, v4, f5, v5, f6, v6, f7, v7) \
209 REG_UPDATE_N(reg, 7, \
210 FN(reg, f1), v1,\
211 FN(reg, f2), v2, \
212 FN(reg, f3), v3, \
213 FN(reg, f4), v4, \
214 FN(reg, f5), v5, \
215 FN(reg, f6), v6, \
216 FN(reg, f7), v7)
217
218#define REG_UPDATE_8(reg, f1, v1, f2, v2, f3, v3, f4, v4, f5, v5, f6, v6, f7, v7, f8, v8) \
219 REG_UPDATE_N(reg, 8, \
220 FN(reg, f1), v1,\
221 FN(reg, f2), v2, \
222 FN(reg, f3), v3, \
223 FN(reg, f4), v4, \
224 FN(reg, f5), v5, \
225 FN(reg, f6), v6, \
226 FN(reg, f7), v7, \
227 FN(reg, f8), v8)
228
229#define REG_UPDATE_9(reg, f1, v1, f2, v2, f3, v3, f4, v4, f5, v5, f6, v6, f7, v7, f8, v8, f9, v9) \
230 REG_UPDATE_N(reg, 9, \
231 FN(reg, f1), v1,\
232 FN(reg, f2), v2, \
233 FN(reg, f3), v3, \
234 FN(reg, f4), v4, \
235 FN(reg, f5), v5, \
236 FN(reg, f6), v6, \
237 FN(reg, f7), v7, \
238 FN(reg, f8), v8, \
239 FN(reg, f9), v9)
240
241#define REG_UPDATE_10(reg, f1, v1, f2, v2, f3, v3, f4, v4, f5, v5, f6, v6, f7, v7, f8, v8, f9, v9, f10, v10) \
242 REG_UPDATE_N(reg, 10, \
243 FN(reg, f1), v1,\
244 FN(reg, f2), v2, \
245 FN(reg, f3), v3, \
246 FN(reg, f4), v4, \
247 FN(reg, f5), v5, \
248 FN(reg, f6), v6, \
249 FN(reg, f7), v7, \
250 FN(reg, f8), v8, \
251 FN(reg, f9), v9, \
252 FN(reg, f10), v10)
253
254/* macro to update a register field to specified values in given sequences.
255 * useful when toggling bits
256 */
257#define REG_UPDATE_SEQ(reg, field, value1, value2) \
258{ uint32_t val = REG_UPDATE(reg, field, value1); \
259 REG_SET(reg, val, field, value2); }
260
261/* macro to update fields in register 1 field at a time in given order */
262#define REG_UPDATE_1BY1_2(reg, f1, v1, f2, v2) \
263{ uint32_t val = REG_UPDATE(reg, f1, v1); \
264 REG_SET(reg, val, f2, v2); }
265
266#define REG_UPDATE_1BY1_3(reg, f1, v1, f2, v2, f3, v3) \
267{ uint32_t val = REG_UPDATE(reg, f1, v1); \
268 val = REG_SET(reg, val, f2, v2); \
269 REG_SET(reg, val, f3, v3); }
270
271uint32_t generic_reg_get(const struct dc_context *ctx, uint32_t addr,
272 uint8_t shift, uint32_t mask, uint32_t *field_value);
273
274uint32_t generic_reg_get2(const struct dc_context *ctx, uint32_t addr,
275 uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
276 uint8_t shift2, uint32_t mask2, uint32_t *field_value2);
277
278uint32_t generic_reg_get3(const struct dc_context *ctx, uint32_t addr,
279 uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
280 uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
281 uint8_t shift3, uint32_t mask3, uint32_t *field_value3);
282
283uint32_t generic_reg_get5(const struct dc_context *ctx, uint32_t addr,
284 uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
285 uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
286 uint8_t shift3, uint32_t mask3, uint32_t *field_value3,
287 uint8_t shift4, uint32_t mask4, uint32_t *field_value4,
288 uint8_t shift5, uint32_t mask5, uint32_t *field_value5);
289
290#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_REG_HELPER_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
new file mode 100644
index 000000000000..8dd676de6b07
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -0,0 +1,164 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 */
24
25#ifndef DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_
26#define DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_
27
28#include "core_types.h"
29#include "core_status.h"
30#include "core_dc.h"
31#include "dal_asic_id.h"
32
33/* TODO unhardcode, 4 for CZ*/
34#define MEMORY_TYPE_MULTIPLIER 4
35
36enum dce_version resource_parse_asic_id(
37 struct hw_asic_id asic_id);
38
39struct resource_caps {
40 int num_timing_generator;
41 int num_video_plane;
42 int num_audio;
43 int num_stream_encoder;
44 int num_pll;
45};
46
47struct resource_straps {
48 uint32_t hdmi_disable;
49 uint32_t dc_pinstraps_audio;
50 uint32_t audio_stream_number;
51};
52
53struct resource_create_funcs {
54 void (*read_dce_straps)(
55 struct dc_context *ctx, struct resource_straps *straps);
56
57 struct audio *(*create_audio)(
58 struct dc_context *ctx, unsigned int inst);
59
60 struct stream_encoder *(*create_stream_encoder)(
61 enum engine_id eng_id, struct dc_context *ctx);
62
63 struct dce_hwseq *(*create_hwseq)(
64 struct dc_context *ctx);
65};
66
67bool resource_construct(
68 unsigned int num_virtual_links,
69 struct core_dc *dc,
70 struct resource_pool *pool,
71 const struct resource_create_funcs *create_funcs);
72
73struct resource_pool *dc_create_resource_pool(
74 struct core_dc *dc,
75 int num_virtual_links,
76 enum dce_version dc_version,
77 struct hw_asic_id asic_id);
78
79void dc_destroy_resource_pool(struct core_dc *dc);
80
81enum dc_status resource_map_pool_resources(
82 const struct core_dc *dc,
83 struct validate_context *context);
84
85bool resource_build_scaling_params(
86 const struct dc_surface *surface,
87 struct pipe_ctx *pipe_ctx);
88
89enum dc_status resource_build_scaling_params_for_context(
90 const struct core_dc *dc,
91 struct validate_context *context);
92
93void resource_build_info_frame(struct pipe_ctx *pipe_ctx);
94
95void resource_unreference_clock_source(
96 struct resource_context *res_ctx,
97 struct clock_source *clock_source);
98
99void resource_reference_clock_source(
100 struct resource_context *res_ctx,
101 struct clock_source *clock_source);
102
103bool resource_are_streams_timing_synchronizable(
104 const struct core_stream *stream1,
105 const struct core_stream *stream2);
106
107struct clock_source *resource_find_used_clk_src_for_sharing(
108 struct resource_context *res_ctx,
109 struct pipe_ctx *pipe_ctx);
110
111struct clock_source *dc_resource_find_first_free_pll(
112 struct resource_context *res_ctx);
113
114struct pipe_ctx *resource_get_head_pipe_for_stream(
115 struct resource_context *res_ctx,
116 const struct core_stream *stream);
117
118bool resource_attach_surfaces_to_context(
119 const struct dc_surface *const *surfaces,
120 int surface_count,
121 const struct dc_target *dc_target,
122 struct validate_context *context);
123
124struct pipe_ctx *find_idle_secondary_pipe(struct resource_context *res_ctx);
125
126bool resource_is_stream_unchanged(
127 const struct validate_context *old_context, struct core_stream *stream);
128
129bool is_target_unchanged(
130 const struct core_target *old_target, const struct core_target *target);
131bool resource_validate_attach_surfaces(
132 const struct dc_validation_set set[],
133 int set_count,
134 const struct validate_context *old_context,
135 struct validate_context *context);
136
137void validate_guaranteed_copy_target(
138 struct validate_context *context,
139 int max_targets);
140
141void resource_validate_ctx_update_pointer_after_copy(
142 const struct validate_context *src_ctx,
143 struct validate_context *dst_ctx);
144
145void resource_validate_ctx_copy_construct(
146 const struct validate_context *src_ctx,
147 struct validate_context *dst_ctx);
148
149void resource_validate_ctx_destruct(struct validate_context *context);
150
151enum dc_status resource_map_clock_resources(
152 const struct core_dc *dc,
153 struct validate_context *context);
154
155enum dc_status resource_map_phy_clock_resources(
156 const struct core_dc *dc,
157 struct validate_context *context);
158
159bool pipe_need_reprogram(
160 struct pipe_ctx *pipe_ctx_old,
161 struct pipe_ctx *pipe_ctx);
162
163
164#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/irq/Makefile b/drivers/gpu/drm/amd/display/dc/irq/Makefile
new file mode 100644
index 000000000000..02710333ce0a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/irq/Makefile
@@ -0,0 +1,28 @@
1#
2# Makefile for the 'audio' sub-component of DAL.
3# It provides the control and status of HW adapter resources,
4# that are global for the ASIC and sharable between pipes.
5
6IRQ = irq_service.o
7
8AMD_DAL_IRQ = $(addprefix $(AMDDALPATH)/dc/irq/,$(IRQ))
9
10AMD_DISPLAY_FILES += $(AMD_DAL_IRQ)
11
12###############################################################################
13# DCE 8x
14###############################################################################
15IRQ_DCE80 = irq_service_dce80.o
16
17AMD_DAL_IRQ_DCE80 = $(addprefix $(AMDDALPATH)/dc/irq/dce80/,$(IRQ_DCE80))
18
19AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCE80)
20
21###############################################################################
22# DCE 11x
23###############################################################################
24IRQ_DCE11 = irq_service_dce110.o
25
26AMD_DAL_IRQ_DCE11 = $(addprefix $(AMDDALPATH)/dc/irq/dce110/,$(IRQ_DCE11))
27
28AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCE11)
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
new file mode 100644
index 000000000000..f3eda1b4eebf
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
@@ -0,0 +1,367 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28#include "include/logger_interface.h"
29
30#include "irq_service_dce110.h"
31
32#include "dce/dce_11_0_d.h"
33#include "dce/dce_11_0_sh_mask.h"
34#include "ivsrcid/ivsrcid_vislands30.h"
35
36static bool hpd_ack(
37 struct irq_service *irq_service,
38 const struct irq_source_info *info)
39{
40 uint32_t addr = info->status_reg;
41 uint32_t value = dm_read_reg(irq_service->ctx, addr);
42 uint32_t current_status =
43 get_reg_field_value(
44 value,
45 DC_HPD_INT_STATUS,
46 DC_HPD_SENSE_DELAYED);
47
48 dal_irq_service_ack_generic(irq_service, info);
49
50 value = dm_read_reg(irq_service->ctx, info->enable_reg);
51
52 set_reg_field_value(
53 value,
54 current_status ? 0 : 1,
55 DC_HPD_INT_CONTROL,
56 DC_HPD_INT_POLARITY);
57
58 dm_write_reg(irq_service->ctx, info->enable_reg, value);
59
60 return true;
61}
62
63static const struct irq_source_info_funcs hpd_irq_info_funcs = {
64 .set = NULL,
65 .ack = hpd_ack
66};
67
68static const struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
69 .set = NULL,
70 .ack = NULL
71};
72
73static const struct irq_source_info_funcs pflip_irq_info_funcs = {
74 .set = NULL,
75 .ack = NULL
76};
77
78static const struct irq_source_info_funcs vblank_irq_info_funcs = {
79 .set = NULL,
80 .ack = NULL
81};
82
83#define hpd_int_entry(reg_num)\
84 [DC_IRQ_SOURCE_HPD1 + reg_num] = {\
85 .enable_reg = mmHPD ## reg_num ## _DC_HPD_INT_CONTROL,\
86 .enable_mask = DC_HPD_INT_CONTROL__DC_HPD_INT_EN_MASK,\
87 .enable_value = {\
88 DC_HPD_INT_CONTROL__DC_HPD_INT_EN_MASK,\
89 ~DC_HPD_INT_CONTROL__DC_HPD_INT_EN_MASK\
90 },\
91 .ack_reg = mmHPD ## reg_num ## _DC_HPD_INT_CONTROL,\
92 .ack_mask = DC_HPD_INT_CONTROL__DC_HPD_INT_ACK_MASK,\
93 .ack_value = DC_HPD_INT_CONTROL__DC_HPD_INT_ACK_MASK,\
94 .status_reg = mmHPD ## reg_num ## _DC_HPD_INT_STATUS,\
95 .funcs = &hpd_irq_info_funcs\
96 }
97
98#define hpd_rx_int_entry(reg_num)\
99 [DC_IRQ_SOURCE_HPD1RX + reg_num] = {\
100 .enable_reg = mmHPD ## reg_num ## _DC_HPD_INT_CONTROL,\
101 .enable_mask = DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN_MASK,\
102 .enable_value = {\
103 DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN_MASK,\
104 ~DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN_MASK },\
105 .ack_reg = mmHPD ## reg_num ## _DC_HPD_INT_CONTROL,\
106 .ack_mask = DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK_MASK,\
107 .ack_value = DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK_MASK,\
108 .status_reg = mmHPD ## reg_num ## _DC_HPD_INT_STATUS,\
109 .funcs = &hpd_rx_irq_info_funcs\
110 }
111#define pflip_int_entry(reg_num)\
112 [DC_IRQ_SOURCE_PFLIP1 + reg_num] = {\
113 .enable_reg = mmDCP ## reg_num ## _GRPH_INTERRUPT_CONTROL,\
114 .enable_mask =\
115 GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK,\
116 .enable_value = {\
117 GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK,\
118 ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK},\
119 .ack_reg = mmDCP ## reg_num ## _GRPH_INTERRUPT_STATUS,\
120 .ack_mask = GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK,\
121 .ack_value = GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK,\
122 .status_reg = mmDCP ## reg_num ##_GRPH_INTERRUPT_STATUS,\
123 .funcs = &pflip_irq_info_funcs\
124 }
125
126#define vupdate_int_entry(reg_num)\
127 [DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\
128 .enable_reg = mmCRTC ## reg_num ## _CRTC_INTERRUPT_CONTROL,\
129 .enable_mask =\
130 CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_MSK_MASK,\
131 .enable_value = {\
132 CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_MSK_MASK,\
133 ~CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_MSK_MASK},\
134 .ack_reg = mmCRTC ## reg_num ## _CRTC_V_UPDATE_INT_STATUS,\
135 .ack_mask =\
136 CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_CLEAR_MASK,\
137 .ack_value =\
138 CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_CLEAR_MASK,\
139 .funcs = &vblank_irq_info_funcs\
140 }
141
142#define dummy_irq_entry() \
143 {\
144 .funcs = &dummy_irq_info_funcs\
145 }
146
147#define i2c_int_entry(reg_num) \
148 [DC_IRQ_SOURCE_I2C_DDC ## reg_num] = dummy_irq_entry()
149
150#define dp_sink_int_entry(reg_num) \
151 [DC_IRQ_SOURCE_DPSINK ## reg_num] = dummy_irq_entry()
152
153#define gpio_pad_int_entry(reg_num) \
154 [DC_IRQ_SOURCE_GPIOPAD ## reg_num] = dummy_irq_entry()
155
156#define dc_underflow_int_entry(reg_num) \
157 [DC_IRQ_SOURCE_DC ## reg_num ## UNDERFLOW] = dummy_irq_entry()
158
159bool dal_irq_service_dummy_set(
160 struct irq_service *irq_service,
161 const struct irq_source_info *info,
162 bool enable)
163{
164 dm_logger_write(
165 irq_service->ctx->logger, LOG_ERROR,
166 "%s: called for non-implemented irq source\n",
167 __func__);
168 return false;
169}
170
171bool dal_irq_service_dummy_ack(
172 struct irq_service *irq_service,
173 const struct irq_source_info *info)
174{
175 dm_logger_write(
176 irq_service->ctx->logger, LOG_ERROR,
177 "%s: called for non-implemented irq source\n",
178 __func__);
179 return false;
180}
181
182static const struct irq_source_info_funcs dummy_irq_info_funcs = {
183 .set = dal_irq_service_dummy_set,
184 .ack = dal_irq_service_dummy_ack
185};
186
187static const struct irq_source_info
188irq_source_info_dce110[DAL_IRQ_SOURCES_NUMBER] = {
189 [DC_IRQ_SOURCE_INVALID] = dummy_irq_entry(),
190 hpd_int_entry(0),
191 hpd_int_entry(1),
192 hpd_int_entry(2),
193 hpd_int_entry(3),
194 hpd_int_entry(4),
195 hpd_int_entry(5),
196 hpd_rx_int_entry(0),
197 hpd_rx_int_entry(1),
198 hpd_rx_int_entry(2),
199 hpd_rx_int_entry(3),
200 hpd_rx_int_entry(4),
201 hpd_rx_int_entry(5),
202 i2c_int_entry(1),
203 i2c_int_entry(2),
204 i2c_int_entry(3),
205 i2c_int_entry(4),
206 i2c_int_entry(5),
207 i2c_int_entry(6),
208 dp_sink_int_entry(1),
209 dp_sink_int_entry(2),
210 dp_sink_int_entry(3),
211 dp_sink_int_entry(4),
212 dp_sink_int_entry(5),
213 dp_sink_int_entry(6),
214 [DC_IRQ_SOURCE_TIMER] = dummy_irq_entry(),
215 pflip_int_entry(0),
216 pflip_int_entry(1),
217 pflip_int_entry(2),
218 pflip_int_entry(3),
219 pflip_int_entry(4),
220 pflip_int_entry(5),
221 [DC_IRQ_SOURCE_PFLIP_UNDERLAY0] = dummy_irq_entry(),
222 gpio_pad_int_entry(0),
223 gpio_pad_int_entry(1),
224 gpio_pad_int_entry(2),
225 gpio_pad_int_entry(3),
226 gpio_pad_int_entry(4),
227 gpio_pad_int_entry(5),
228 gpio_pad_int_entry(6),
229 gpio_pad_int_entry(7),
230 gpio_pad_int_entry(8),
231 gpio_pad_int_entry(9),
232 gpio_pad_int_entry(10),
233 gpio_pad_int_entry(11),
234 gpio_pad_int_entry(12),
235 gpio_pad_int_entry(13),
236 gpio_pad_int_entry(14),
237 gpio_pad_int_entry(15),
238 gpio_pad_int_entry(16),
239 gpio_pad_int_entry(17),
240 gpio_pad_int_entry(18),
241 gpio_pad_int_entry(19),
242 gpio_pad_int_entry(20),
243 gpio_pad_int_entry(21),
244 gpio_pad_int_entry(22),
245 gpio_pad_int_entry(23),
246 gpio_pad_int_entry(24),
247 gpio_pad_int_entry(25),
248 gpio_pad_int_entry(26),
249 gpio_pad_int_entry(27),
250 gpio_pad_int_entry(28),
251 gpio_pad_int_entry(29),
252 gpio_pad_int_entry(30),
253 dc_underflow_int_entry(1),
254 dc_underflow_int_entry(2),
255 dc_underflow_int_entry(3),
256 dc_underflow_int_entry(4),
257 dc_underflow_int_entry(5),
258 dc_underflow_int_entry(6),
259 [DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(),
260 [DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(),
261 vupdate_int_entry(0),
262 vupdate_int_entry(1),
263 vupdate_int_entry(2),
264 vupdate_int_entry(3),
265 vupdate_int_entry(4),
266 vupdate_int_entry(5),
267};
268
269enum dc_irq_source to_dal_irq_source_dce110(
270 struct irq_service *irq_service,
271 uint32_t src_id,
272 uint32_t ext_id)
273{
274 switch (src_id) {
275 case VISLANDS30_IV_SRCID_D1_V_UPDATE_INT:
276 return DC_IRQ_SOURCE_VUPDATE1;
277 case VISLANDS30_IV_SRCID_D2_V_UPDATE_INT:
278 return DC_IRQ_SOURCE_VUPDATE2;
279 case VISLANDS30_IV_SRCID_D3_V_UPDATE_INT:
280 return DC_IRQ_SOURCE_VUPDATE3;
281 case VISLANDS30_IV_SRCID_D4_V_UPDATE_INT:
282 return DC_IRQ_SOURCE_VUPDATE4;
283 case VISLANDS30_IV_SRCID_D5_V_UPDATE_INT:
284 return DC_IRQ_SOURCE_VUPDATE5;
285 case VISLANDS30_IV_SRCID_D6_V_UPDATE_INT:
286 return DC_IRQ_SOURCE_VUPDATE6;
287 case VISLANDS30_IV_SRCID_D1_GRPH_PFLIP:
288 return DC_IRQ_SOURCE_PFLIP1;
289 case VISLANDS30_IV_SRCID_D2_GRPH_PFLIP:
290 return DC_IRQ_SOURCE_PFLIP2;
291 case VISLANDS30_IV_SRCID_D3_GRPH_PFLIP:
292 return DC_IRQ_SOURCE_PFLIP3;
293 case VISLANDS30_IV_SRCID_D4_GRPH_PFLIP:
294 return DC_IRQ_SOURCE_PFLIP4;
295 case VISLANDS30_IV_SRCID_D5_GRPH_PFLIP:
296 return DC_IRQ_SOURCE_PFLIP5;
297 case VISLANDS30_IV_SRCID_D6_GRPH_PFLIP:
298 return DC_IRQ_SOURCE_PFLIP6;
299
300 case VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A:
301 /* generic src_id for all HPD and HPDRX interrupts */
302 switch (ext_id) {
303 case VISLANDS30_IV_EXTID_HOTPLUG_DETECT_A:
304 return DC_IRQ_SOURCE_HPD1;
305 case VISLANDS30_IV_EXTID_HOTPLUG_DETECT_B:
306 return DC_IRQ_SOURCE_HPD2;
307 case VISLANDS30_IV_EXTID_HOTPLUG_DETECT_C:
308 return DC_IRQ_SOURCE_HPD3;
309 case VISLANDS30_IV_EXTID_HOTPLUG_DETECT_D:
310 return DC_IRQ_SOURCE_HPD4;
311 case VISLANDS30_IV_EXTID_HOTPLUG_DETECT_E:
312 return DC_IRQ_SOURCE_HPD5;
313 case VISLANDS30_IV_EXTID_HOTPLUG_DETECT_F:
314 return DC_IRQ_SOURCE_HPD6;
315 case VISLANDS30_IV_EXTID_HPD_RX_A:
316 return DC_IRQ_SOURCE_HPD1RX;
317 case VISLANDS30_IV_EXTID_HPD_RX_B:
318 return DC_IRQ_SOURCE_HPD2RX;
319 case VISLANDS30_IV_EXTID_HPD_RX_C:
320 return DC_IRQ_SOURCE_HPD3RX;
321 case VISLANDS30_IV_EXTID_HPD_RX_D:
322 return DC_IRQ_SOURCE_HPD4RX;
323 case VISLANDS30_IV_EXTID_HPD_RX_E:
324 return DC_IRQ_SOURCE_HPD5RX;
325 case VISLANDS30_IV_EXTID_HPD_RX_F:
326 return DC_IRQ_SOURCE_HPD6RX;
327 default:
328 return DC_IRQ_SOURCE_INVALID;
329 }
330 break;
331
332 default:
333 return DC_IRQ_SOURCE_INVALID;
334 }
335}
336
337static const struct irq_service_funcs irq_service_funcs_dce110 = {
338 .to_dal_irq_source = to_dal_irq_source_dce110
339};
340
341bool construct(
342 struct irq_service *irq_service,
343 struct irq_service_init_data *init_data)
344{
345 if (!dal_irq_service_construct(irq_service, init_data))
346 return false;
347
348 irq_service->info = irq_source_info_dce110;
349 irq_service->funcs = &irq_service_funcs_dce110;
350
351 return true;
352}
353
354struct irq_service *dal_irq_service_dce110_create(
355 struct irq_service_init_data *init_data)
356{
357 struct irq_service *irq_service = dm_alloc(sizeof(*irq_service));
358
359 if (!irq_service)
360 return NULL;
361
362 if (construct(irq_service, init_data))
363 return irq_service;
364
365 dm_free(irq_service);
366 return NULL;
367}
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.h b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.h
new file mode 100644
index 000000000000..a84f360c6515
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.h
@@ -0,0 +1,48 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_IRQ_SERVICE_DCE110_H__
27#define __DAL_IRQ_SERVICE_DCE110_H__
28
29#include "../irq_service.h"
30
31struct irq_service *dal_irq_service_dce110_create(
32 struct irq_service_init_data *init_data);
33
34enum dc_irq_source to_dal_irq_source_dce110(
35 struct irq_service *irq_service,
36 uint32_t src_id,
37 uint32_t ext_id);
38
39bool dal_irq_service_dummy_set(
40 struct irq_service *irq_service,
41 const struct irq_source_info *info,
42 bool enable);
43
44bool dal_irq_service_dummy_ack(
45 struct irq_service *irq_service,
46 const struct irq_source_info *info);
47
48#endif
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c b/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c
new file mode 100644
index 000000000000..8b4f45389783
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c
@@ -0,0 +1,283 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28#include "include/logger_interface.h"
29
30#include "irq_service_dce80.h"
31#include "../dce110/irq_service_dce110.h"
32
33#include "dce/dce_8_0_d.h"
34#include "dce/dce_8_0_sh_mask.h"
35
36#include "ivsrcid/ivsrcid_vislands30.h"
37
38static bool hpd_ack(
39 struct irq_service *irq_service,
40 const struct irq_source_info *info)
41{
42 uint32_t addr = info->status_reg;
43 uint32_t value = dm_read_reg(irq_service->ctx, addr);
44 uint32_t current_status =
45 get_reg_field_value(
46 value,
47 DC_HPD1_INT_STATUS,
48 DC_HPD1_SENSE_DELAYED);
49
50 dal_irq_service_ack_generic(irq_service, info);
51
52 value = dm_read_reg(irq_service->ctx, info->enable_reg);
53
54 set_reg_field_value(
55 value,
56 current_status ? 0 : 1,
57 DC_HPD1_INT_CONTROL,
58 DC_HPD1_INT_POLARITY);
59
60 dm_write_reg(irq_service->ctx, info->enable_reg, value);
61
62 return true;
63}
64
65static const struct irq_source_info_funcs hpd_irq_info_funcs = {
66 .set = NULL,
67 .ack = hpd_ack
68};
69
70static const struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
71 .set = NULL,
72 .ack = NULL
73};
74
75static const struct irq_source_info_funcs pflip_irq_info_funcs = {
76 .set = NULL,
77 .ack = NULL
78};
79
80static const struct irq_source_info_funcs vblank_irq_info_funcs = {
81 .set = NULL,
82 .ack = NULL
83};
84
85
86#define hpd_int_entry(reg_num)\
87 [DC_IRQ_SOURCE_INVALID + reg_num] = {\
88 .enable_reg = mmDC_HPD ## reg_num ## _INT_CONTROL,\
89 .enable_mask = DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK,\
90 .enable_value = {\
91 DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK,\
92 ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK\
93 },\
94 .ack_reg = mmDC_HPD ## reg_num ## _INT_CONTROL,\
95 .ack_mask = DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK,\
96 .ack_value = DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK,\
97 .status_reg = mmDC_HPD ## reg_num ## _INT_STATUS,\
98 .funcs = &hpd_irq_info_funcs\
99 }
100
101#define hpd_rx_int_entry(reg_num)\
102 [DC_IRQ_SOURCE_HPD6 + reg_num] = {\
103 .enable_reg = mmDC_HPD ## reg_num ## _INT_CONTROL,\
104 .enable_mask = DC_HPD1_INT_CONTROL__DC_HPD1_RX_INT_EN_MASK,\
105 .enable_value = {\
106 DC_HPD1_INT_CONTROL__DC_HPD1_RX_INT_EN_MASK,\
107 ~DC_HPD1_INT_CONTROL__DC_HPD1_RX_INT_EN_MASK },\
108 .ack_reg = mmDC_HPD ## reg_num ## _INT_CONTROL,\
109 .ack_mask = DC_HPD1_INT_CONTROL__DC_HPD1_RX_INT_ACK_MASK,\
110 .ack_value = DC_HPD1_INT_CONTROL__DC_HPD1_RX_INT_ACK_MASK,\
111 .status_reg = mmDC_HPD ## reg_num ## _INT_STATUS,\
112 .funcs = &hpd_rx_irq_info_funcs\
113 }
114
115#define pflip_int_entry(reg_num)\
116 [DC_IRQ_SOURCE_PFLIP1 + reg_num] = {\
117 .enable_reg = mmDCP ## reg_num ## _GRPH_INTERRUPT_CONTROL,\
118 .enable_mask =\
119 GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK,\
120 .enable_value = {\
121 GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK,\
122 ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK},\
123 .ack_reg = mmDCP ## reg_num ## _GRPH_INTERRUPT_STATUS,\
124 .ack_mask = GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK,\
125 .ack_value = GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK,\
126 .status_reg = mmDCP ## reg_num ##_GRPH_INTERRUPT_STATUS,\
127 .funcs = &pflip_irq_info_funcs\
128 }
129
130#define vupdate_int_entry(reg_num)\
131 [DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\
132 .enable_reg = mmCRTC ## reg_num ## _CRTC_INTERRUPT_CONTROL,\
133 .enable_mask =\
134 CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_MSK_MASK,\
135 .enable_value = {\
136 CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_MSK_MASK,\
137 ~CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_MSK_MASK},\
138 .ack_reg = mmCRTC ## reg_num ## _CRTC_V_UPDATE_INT_STATUS,\
139 .ack_mask =\
140 CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_CLEAR_MASK,\
141 .ack_value =\
142 CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_CLEAR_MASK,\
143 .funcs = &vblank_irq_info_funcs\
144 }
145
146#define dummy_irq_entry() \
147 {\
148 .funcs = &dummy_irq_info_funcs\
149 }
150
151#define i2c_int_entry(reg_num) \
152 [DC_IRQ_SOURCE_I2C_DDC ## reg_num] = dummy_irq_entry()
153
154#define dp_sink_int_entry(reg_num) \
155 [DC_IRQ_SOURCE_DPSINK ## reg_num] = dummy_irq_entry()
156
157#define gpio_pad_int_entry(reg_num) \
158 [DC_IRQ_SOURCE_GPIOPAD ## reg_num] = dummy_irq_entry()
159
160#define dc_underflow_int_entry(reg_num) \
161 [DC_IRQ_SOURCE_DC ## reg_num ## UNDERFLOW] = dummy_irq_entry()
162
163
164static const struct irq_source_info_funcs dummy_irq_info_funcs = {
165 .set = dal_irq_service_dummy_set,
166 .ack = dal_irq_service_dummy_ack
167};
168
169static const struct irq_source_info
170irq_source_info_dce80[DAL_IRQ_SOURCES_NUMBER] = {
171 [DC_IRQ_SOURCE_INVALID] = dummy_irq_entry(),
172 hpd_int_entry(1),
173 hpd_int_entry(2),
174 hpd_int_entry(3),
175 hpd_int_entry(4),
176 hpd_int_entry(5),
177 hpd_int_entry(6),
178 hpd_rx_int_entry(1),
179 hpd_rx_int_entry(2),
180 hpd_rx_int_entry(3),
181 hpd_rx_int_entry(4),
182 hpd_rx_int_entry(5),
183 hpd_rx_int_entry(6),
184 i2c_int_entry(1),
185 i2c_int_entry(2),
186 i2c_int_entry(3),
187 i2c_int_entry(4),
188 i2c_int_entry(5),
189 i2c_int_entry(6),
190 dp_sink_int_entry(1),
191 dp_sink_int_entry(2),
192 dp_sink_int_entry(3),
193 dp_sink_int_entry(4),
194 dp_sink_int_entry(5),
195 dp_sink_int_entry(6),
196 [DC_IRQ_SOURCE_TIMER] = dummy_irq_entry(),
197 pflip_int_entry(0),
198 pflip_int_entry(1),
199 pflip_int_entry(2),
200 pflip_int_entry(3),
201 pflip_int_entry(4),
202 pflip_int_entry(5),
203 [DC_IRQ_SOURCE_PFLIP_UNDERLAY0] = dummy_irq_entry(),
204 gpio_pad_int_entry(0),
205 gpio_pad_int_entry(1),
206 gpio_pad_int_entry(2),
207 gpio_pad_int_entry(3),
208 gpio_pad_int_entry(4),
209 gpio_pad_int_entry(5),
210 gpio_pad_int_entry(6),
211 gpio_pad_int_entry(7),
212 gpio_pad_int_entry(8),
213 gpio_pad_int_entry(9),
214 gpio_pad_int_entry(10),
215 gpio_pad_int_entry(11),
216 gpio_pad_int_entry(12),
217 gpio_pad_int_entry(13),
218 gpio_pad_int_entry(14),
219 gpio_pad_int_entry(15),
220 gpio_pad_int_entry(16),
221 gpio_pad_int_entry(17),
222 gpio_pad_int_entry(18),
223 gpio_pad_int_entry(19),
224 gpio_pad_int_entry(20),
225 gpio_pad_int_entry(21),
226 gpio_pad_int_entry(22),
227 gpio_pad_int_entry(23),
228 gpio_pad_int_entry(24),
229 gpio_pad_int_entry(25),
230 gpio_pad_int_entry(26),
231 gpio_pad_int_entry(27),
232 gpio_pad_int_entry(28),
233 gpio_pad_int_entry(29),
234 gpio_pad_int_entry(30),
235 dc_underflow_int_entry(1),
236 dc_underflow_int_entry(2),
237 dc_underflow_int_entry(3),
238 dc_underflow_int_entry(4),
239 dc_underflow_int_entry(5),
240 dc_underflow_int_entry(6),
241 [DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(),
242 [DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(),
243 vupdate_int_entry(0),
244 vupdate_int_entry(1),
245 vupdate_int_entry(2),
246 vupdate_int_entry(3),
247 vupdate_int_entry(4),
248 vupdate_int_entry(5),
249};
250
251static const struct irq_service_funcs irq_service_funcs_dce80 = {
252 .to_dal_irq_source = to_dal_irq_source_dce110
253};
254
255static bool construct(
256 struct irq_service *irq_service,
257 struct irq_service_init_data *init_data)
258{
259 if (!dal_irq_service_construct(irq_service, init_data))
260 return false;
261
262 irq_service->info = irq_source_info_dce80;
263 irq_service->funcs = &irq_service_funcs_dce80;
264
265 return true;
266}
267
268struct irq_service *dal_irq_service_dce80_create(
269 struct irq_service_init_data *init_data)
270{
271 struct irq_service *irq_service = dm_alloc(sizeof(*irq_service));
272
273 if (!irq_service)
274 return NULL;
275
276 if (construct(irq_service, init_data))
277 return irq_service;
278
279 dm_free(irq_service);
280 return NULL;
281}
282
283
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.h b/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.h
new file mode 100644
index 000000000000..3dd1013576ea
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.h
@@ -0,0 +1,35 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_IRQ_SERVICE_DCE80_H__
27#define __DAL_IRQ_SERVICE_DCE80_H__
28
29#include "../irq_service.h"
30
31struct irq_service *dal_irq_service_dce80_create(
32 struct irq_service_init_data *init_data);
33
34#endif
35
diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
new file mode 100644
index 000000000000..fbaa2fc00ddb
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
@@ -0,0 +1,163 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28#include "include/irq_service_interface.h"
29#include "include/logger_interface.h"
30
31#include "dce110/irq_service_dce110.h"
32
33
34#include "dce80/irq_service_dce80.h"
35
36
37#include "reg_helper.h"
38#include "irq_service.h"
39
40
41
42#define CTX \
43 irq_service->ctx
44
45bool dal_irq_service_construct(
46 struct irq_service *irq_service,
47 struct irq_service_init_data *init_data)
48{
49 if (!init_data || !init_data->ctx)
50 return false;
51
52 irq_service->ctx = init_data->ctx;
53 return true;
54}
55
56void dal_irq_service_destroy(struct irq_service **irq_service)
57{
58 if (!irq_service || !*irq_service) {
59 BREAK_TO_DEBUGGER();
60 return;
61 }
62
63 dm_free(*irq_service);
64
65 *irq_service = NULL;
66}
67
68const struct irq_source_info *find_irq_source_info(
69 struct irq_service *irq_service,
70 enum dc_irq_source source)
71{
72 if (source > DAL_IRQ_SOURCES_NUMBER || source < DC_IRQ_SOURCE_INVALID)
73 return NULL;
74
75 return &irq_service->info[source];
76}
77
78void dal_irq_service_set_generic(
79 struct irq_service *irq_service,
80 const struct irq_source_info *info,
81 bool enable)
82{
83 uint32_t addr = info->enable_reg;
84 uint32_t value = dm_read_reg(irq_service->ctx, addr);
85
86 value = (value & ~info->enable_mask) |
87 (info->enable_value[enable ? 0 : 1] & info->enable_mask);
88 dm_write_reg(irq_service->ctx, addr, value);
89}
90
91bool dal_irq_service_set(
92 struct irq_service *irq_service,
93 enum dc_irq_source source,
94 bool enable)
95{
96 const struct irq_source_info *info =
97 find_irq_source_info(irq_service, source);
98
99 if (!info) {
100 dm_logger_write(
101 irq_service->ctx->logger, LOG_ERROR,
102 "%s: cannot find irq info table entry for %d\n",
103 __func__,
104 source);
105 return false;
106 }
107
108 dal_irq_service_ack(irq_service, source);
109
110 if (info->funcs->set)
111 return info->funcs->set(irq_service, info, enable);
112
113 dal_irq_service_set_generic(irq_service, info, enable);
114
115 return true;
116}
117
118void dal_irq_service_ack_generic(
119 struct irq_service *irq_service,
120 const struct irq_source_info *info)
121{
122 uint32_t addr = info->ack_reg;
123 uint32_t value = dm_read_reg(irq_service->ctx, addr);
124
125 value = (value & ~info->ack_mask) |
126 (info->ack_value & info->ack_mask);
127 dm_write_reg(irq_service->ctx, addr, value);
128}
129
130bool dal_irq_service_ack(
131 struct irq_service *irq_service,
132 enum dc_irq_source source)
133{
134 const struct irq_source_info *info =
135 find_irq_source_info(irq_service, source);
136
137 if (!info) {
138 dm_logger_write(
139 irq_service->ctx->logger, LOG_ERROR,
140 "%s: cannot find irq info table entry for %d\n",
141 __func__,
142 source);
143 return false;
144 }
145
146 if (info->funcs->ack)
147 return info->funcs->ack(irq_service, info);
148
149 dal_irq_service_ack_generic(irq_service, info);
150
151 return true;
152}
153
154enum dc_irq_source dal_irq_service_to_irq_source(
155 struct irq_service *irq_service,
156 uint32_t src_id,
157 uint32_t ext_id)
158{
159 return irq_service->funcs->to_dal_irq_source(
160 irq_service,
161 src_id,
162 ext_id);
163}
diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.h b/drivers/gpu/drm/amd/display/dc/irq/irq_service.h
new file mode 100644
index 000000000000..a2a2d6965c2f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.h
@@ -0,0 +1,85 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_IRQ_SERVICE_H__
27#define __DAL_IRQ_SERVICE_H__
28
29#include "include/irq_service_interface.h"
30
31#include "irq_types.h"
32
33struct irq_service;
34struct irq_source_info;
35
36struct irq_source_info_funcs {
37 bool (*set)(
38 struct irq_service *irq_service,
39 const struct irq_source_info *info,
40 bool enable);
41 bool (*ack)(
42 struct irq_service *irq_service,
43 const struct irq_source_info *info);
44};
45
46struct irq_source_info {
47 uint32_t src_id;
48 uint32_t ext_id;
49 uint32_t enable_reg;
50 uint32_t enable_mask;
51 uint32_t enable_value[2];
52 uint32_t ack_reg;
53 uint32_t ack_mask;
54 uint32_t ack_value;
55 uint32_t status_reg;
56 const struct irq_source_info_funcs *funcs;
57};
58
59struct irq_service_funcs {
60 enum dc_irq_source (*to_dal_irq_source)(
61 struct irq_service *irq_service,
62 uint32_t src_id,
63 uint32_t ext_id);
64};
65
66struct irq_service {
67 struct dc_context *ctx;
68 const struct irq_source_info *info;
69 const struct irq_service_funcs *funcs;
70};
71
72bool dal_irq_service_construct(
73 struct irq_service *irq_service,
74 struct irq_service_init_data *init_data);
75
76void dal_irq_service_ack_generic(
77 struct irq_service *irq_service,
78 const struct irq_source_info *info);
79
80void dal_irq_service_set_generic(
81 struct irq_service *irq_service,
82 const struct irq_source_info *info,
83 bool enable);
84
85#endif
diff --git a/drivers/gpu/drm/amd/display/dc/irq_types.h b/drivers/gpu/drm/amd/display/dc/irq_types.h
new file mode 100644
index 000000000000..e4b4b99a86fc
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/irq_types.h
@@ -0,0 +1,185 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_IRQ_TYPES_H__
27#define __DAL_IRQ_TYPES_H__
28
29struct dc_context;
30
31typedef void (*interrupt_handler)(void *);
32
33typedef void *irq_handler_idx;
34#define DAL_INVALID_IRQ_HANDLER_IDX NULL
35
36/* The order of the IRQ sources is important and MUST match the one's
37of base driver */
38enum dc_irq_source {
39 /* Use as mask to specify invalid irq source */
40 DC_IRQ_SOURCE_INVALID = 0,
41
42 DC_IRQ_SOURCE_HPD1,
43 DC_IRQ_SOURCE_HPD2,
44 DC_IRQ_SOURCE_HPD3,
45 DC_IRQ_SOURCE_HPD4,
46 DC_IRQ_SOURCE_HPD5,
47 DC_IRQ_SOURCE_HPD6,
48
49 DC_IRQ_SOURCE_HPD1RX,
50 DC_IRQ_SOURCE_HPD2RX,
51 DC_IRQ_SOURCE_HPD3RX,
52 DC_IRQ_SOURCE_HPD4RX,
53 DC_IRQ_SOURCE_HPD5RX,
54 DC_IRQ_SOURCE_HPD6RX,
55
56 DC_IRQ_SOURCE_I2C_DDC1,
57 DC_IRQ_SOURCE_I2C_DDC2,
58 DC_IRQ_SOURCE_I2C_DDC3,
59 DC_IRQ_SOURCE_I2C_DDC4,
60 DC_IRQ_SOURCE_I2C_DDC5,
61 DC_IRQ_SOURCE_I2C_DDC6,
62
63 DC_IRQ_SOURCE_DPSINK1,
64 DC_IRQ_SOURCE_DPSINK2,
65 DC_IRQ_SOURCE_DPSINK3,
66 DC_IRQ_SOURCE_DPSINK4,
67 DC_IRQ_SOURCE_DPSINK5,
68 DC_IRQ_SOURCE_DPSINK6,
69
70 DC_IRQ_SOURCE_TIMER,
71
72 DC_IRQ_SOURCE_PFLIP_FIRST,
73 DC_IRQ_SOURCE_PFLIP1 = DC_IRQ_SOURCE_PFLIP_FIRST,
74 DC_IRQ_SOURCE_PFLIP2,
75 DC_IRQ_SOURCE_PFLIP3,
76 DC_IRQ_SOURCE_PFLIP4,
77 DC_IRQ_SOURCE_PFLIP5,
78 DC_IRQ_SOURCE_PFLIP6,
79 DC_IRQ_SOURCE_PFLIP_UNDERLAY0,
80 DC_IRQ_SOURCE_PFLIP_LAST = DC_IRQ_SOURCE_PFLIP_UNDERLAY0,
81
82 DC_IRQ_SOURCE_GPIOPAD0,
83 DC_IRQ_SOURCE_GPIOPAD1,
84 DC_IRQ_SOURCE_GPIOPAD2,
85 DC_IRQ_SOURCE_GPIOPAD3,
86 DC_IRQ_SOURCE_GPIOPAD4,
87 DC_IRQ_SOURCE_GPIOPAD5,
88 DC_IRQ_SOURCE_GPIOPAD6,
89 DC_IRQ_SOURCE_GPIOPAD7,
90 DC_IRQ_SOURCE_GPIOPAD8,
91 DC_IRQ_SOURCE_GPIOPAD9,
92 DC_IRQ_SOURCE_GPIOPAD10,
93 DC_IRQ_SOURCE_GPIOPAD11,
94 DC_IRQ_SOURCE_GPIOPAD12,
95 DC_IRQ_SOURCE_GPIOPAD13,
96 DC_IRQ_SOURCE_GPIOPAD14,
97 DC_IRQ_SOURCE_GPIOPAD15,
98 DC_IRQ_SOURCE_GPIOPAD16,
99 DC_IRQ_SOURCE_GPIOPAD17,
100 DC_IRQ_SOURCE_GPIOPAD18,
101 DC_IRQ_SOURCE_GPIOPAD19,
102 DC_IRQ_SOURCE_GPIOPAD20,
103 DC_IRQ_SOURCE_GPIOPAD21,
104 DC_IRQ_SOURCE_GPIOPAD22,
105 DC_IRQ_SOURCE_GPIOPAD23,
106 DC_IRQ_SOURCE_GPIOPAD24,
107 DC_IRQ_SOURCE_GPIOPAD25,
108 DC_IRQ_SOURCE_GPIOPAD26,
109 DC_IRQ_SOURCE_GPIOPAD27,
110 DC_IRQ_SOURCE_GPIOPAD28,
111 DC_IRQ_SOURCE_GPIOPAD29,
112 DC_IRQ_SOURCE_GPIOPAD30,
113
114 DC_IRQ_SOURCE_DC1UNDERFLOW,
115 DC_IRQ_SOURCE_DC2UNDERFLOW,
116 DC_IRQ_SOURCE_DC3UNDERFLOW,
117 DC_IRQ_SOURCE_DC4UNDERFLOW,
118 DC_IRQ_SOURCE_DC5UNDERFLOW,
119 DC_IRQ_SOURCE_DC6UNDERFLOW,
120
121 DC_IRQ_SOURCE_DMCU_SCP,
122 DC_IRQ_SOURCE_VBIOS_SW,
123
124 DC_IRQ_SOURCE_VUPDATE1,
125 DC_IRQ_SOURCE_VUPDATE2,
126 DC_IRQ_SOURCE_VUPDATE3,
127 DC_IRQ_SOURCE_VUPDATE4,
128 DC_IRQ_SOURCE_VUPDATE5,
129 DC_IRQ_SOURCE_VUPDATE6,
130
131 DAL_IRQ_SOURCES_NUMBER
132};
133
134enum irq_type
135{
136 IRQ_TYPE_PFLIP = DC_IRQ_SOURCE_PFLIP1,
137 IRQ_TYPE_VUPDATE = DC_IRQ_SOURCE_VUPDATE1,
138};
139
140#define DAL_VALID_IRQ_SRC_NUM(src) \
141 ((src) <= DAL_IRQ_SOURCES_NUMBER && (src) > DC_IRQ_SOURCE_INVALID)
142
143/* Number of Page Flip IRQ Sources. */
144#define DAL_PFLIP_IRQ_SRC_NUM \
145 (DC_IRQ_SOURCE_PFLIP_LAST - DC_IRQ_SOURCE_PFLIP_FIRST + 1)
146
147/* the number of contexts may be expanded in the future based on needs */
148enum dc_interrupt_context {
149 INTERRUPT_LOW_IRQ_CONTEXT = 0,
150 INTERRUPT_HIGH_IRQ_CONTEXT,
151 INTERRUPT_CONTEXT_NUMBER
152};
153
154enum dc_interrupt_porlarity {
155 INTERRUPT_POLARITY_DEFAULT = 0,
156 INTERRUPT_POLARITY_LOW = INTERRUPT_POLARITY_DEFAULT,
157 INTERRUPT_POLARITY_HIGH,
158 INTERRUPT_POLARITY_BOTH
159};
160
161#define DC_DECODE_INTERRUPT_POLARITY(int_polarity) \
162 (int_polarity == INTERRUPT_POLARITY_LOW) ? "Low" : \
163 (int_polarity == INTERRUPT_POLARITY_HIGH) ? "High" : \
164 (int_polarity == INTERRUPT_POLARITY_BOTH) ? "Both" : "Invalid"
165
166struct dc_timer_interrupt_params {
167 uint32_t micro_sec_interval;
168 enum dc_interrupt_context int_context;
169};
170
171struct dc_interrupt_params {
172 /* The polarity *change* which will trigger an interrupt.
173 * If 'requested_polarity == INTERRUPT_POLARITY_BOTH', then
174 * 'current_polarity' must be initialised. */
175 enum dc_interrupt_porlarity requested_polarity;
176 /* If 'requested_polarity == INTERRUPT_POLARITY_BOTH',
177 * 'current_polarity' should contain the current state, which means
178 * the interrupt will be triggered when state changes from what is,
179 * in 'current_polarity'. */
180 enum dc_interrupt_porlarity current_polarity;
181 enum dc_irq_source irq_source;
182 enum dc_interrupt_context int_context;
183};
184
185#endif
diff --git a/drivers/gpu/drm/amd/display/dc/os_types.h b/drivers/gpu/drm/amd/display/dc/os_types.h
new file mode 100644
index 000000000000..459a2741eccb
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/os_types.h
@@ -0,0 +1,61 @@
1/*
2 * Copyright 2012-16 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef _OS_TYPES_H_
27#define _OS_TYPES_H_
28
29#if defined __KERNEL__
30
31#include <asm/byteorder.h>
32#include <linux/types.h>
33#include <drm/drmP.h>
34
35#include "cgs_linux.h"
36
37#if defined(__BIG_ENDIAN) && !defined(BIGENDIAN_CPU)
38#define BIGENDIAN_CPU
39#elif defined(__LITTLE_ENDIAN) && !defined(LITTLEENDIAN_CPU)
40#define LITTLEENDIAN_CPU
41#endif
42
43#undef READ
44#undef WRITE
45#undef FRAME_SIZE
46
47#define dm_output_to_console(fmt, ...) DRM_INFO(fmt, ##__VA_ARGS__)
48
49#define dm_error(fmt, ...) DRM_ERROR(fmt, ##__VA_ARGS__)
50
51#define dm_debug(fmt, ...) DRM_DEBUG_KMS(fmt, ##__VA_ARGS__)
52
53#define dm_vlog(fmt, args) vprintk(fmt, args)
54
55#define dm_min(x, y) min(x, y)
56#define dm_max(x, y) max(x, y)
57
58#endif
59
60
61#endif /* _OS_TYPES_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/virtual/Makefile b/drivers/gpu/drm/amd/display/dc/virtual/Makefile
new file mode 100644
index 000000000000..fc0b7318d9cc
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/virtual/Makefile
@@ -0,0 +1,9 @@
1#
2# Makefile for the virtual sub-component of DAL.
3# It provides the control and status of HW CRTC block.
4
5VIRTUAL = virtual_link_encoder.o virtual_stream_encoder.o
6
7AMD_DAL_VIRTUAL = $(addprefix $(AMDDALPATH)/dc/virtual/,$(VIRTUAL))
8
9AMD_DISPLAY_FILES += $(AMD_DAL_VIRTUAL)
diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c
new file mode 100644
index 000000000000..bb4433ff3b6e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c
@@ -0,0 +1,150 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27#include "dm_services_types.h"
28
29#include "virtual_link_encoder.h"
30
31#define VIRTUAL_MAX_PIXEL_CLK_IN_KHZ 600000
32
33static bool virtual_link_encoder_validate_output_with_stream(
34 struct link_encoder *enc,
35 struct pipe_ctx *pipe_ctx) { return true; }
36
37static void virtual_link_encoder_hw_init(struct link_encoder *enc) {}
38
39static void virtual_link_encoder_setup(
40 struct link_encoder *enc,
41 enum signal_type signal) {}
42
43static void virtual_link_encoder_enable_tmds_output(
44 struct link_encoder *enc,
45 enum clock_source_id clock_source,
46 enum dc_color_depth color_depth,
47 bool hdmi,
48 bool dual_link,
49 uint32_t pixel_clock) {}
50
51static void virtual_link_encoder_enable_dp_output(
52 struct link_encoder *enc,
53 const struct dc_link_settings *link_settings,
54 enum clock_source_id clock_source) {}
55
56static void virtual_link_encoder_enable_dp_mst_output(
57 struct link_encoder *enc,
58 const struct dc_link_settings *link_settings,
59 enum clock_source_id clock_source) {}
60
61static void virtual_link_encoder_disable_output(
62 struct link_encoder *link_enc,
63 enum signal_type signal) {}
64
65static void virtual_link_encoder_dp_set_lane_settings(
66 struct link_encoder *enc,
67 const struct link_training_settings *link_settings) {}
68
69static void virtual_link_encoder_dp_set_phy_pattern(
70 struct link_encoder *enc,
71 const struct encoder_set_dp_phy_pattern_param *param) {}
72
73static void virtual_link_encoder_update_mst_stream_allocation_table(
74 struct link_encoder *enc,
75 const struct link_mst_stream_allocation_table *table) {}
76
77static void virtual_link_encoder_set_lcd_backlight_level(
78 struct link_encoder *enc,
79 uint32_t level) {}
80
81static void virtual_link_encoder_set_dmcu_backlight_level(
82 struct link_encoder *enc,
83 uint32_t level,
84 uint32_t frame_ramp,
85 uint32_t controller_id) {}
86
87static void virtual_link_encoder_edp_backlight_control(
88 struct link_encoder *enc,
89 bool enable) {}
90
91static void virtual_link_encoder_edp_power_control(
92 struct link_encoder *enc,
93 bool power_up) {}
94
95static void virtual_link_encoder_connect_dig_be_to_fe(
96 struct link_encoder *enc,
97 enum engine_id engine,
98 bool connect) {}
99
100static void virtual_link_encoder_destroy(struct link_encoder **enc)
101{
102 dm_free(*enc);
103 *enc = NULL;
104}
105
106
107static const struct link_encoder_funcs virtual_lnk_enc_funcs = {
108 .validate_output_with_stream =
109 virtual_link_encoder_validate_output_with_stream,
110 .hw_init = virtual_link_encoder_hw_init,
111 .setup = virtual_link_encoder_setup,
112 .enable_tmds_output = virtual_link_encoder_enable_tmds_output,
113 .enable_dp_output = virtual_link_encoder_enable_dp_output,
114 .enable_dp_mst_output = virtual_link_encoder_enable_dp_mst_output,
115 .disable_output = virtual_link_encoder_disable_output,
116 .dp_set_lane_settings = virtual_link_encoder_dp_set_lane_settings,
117 .dp_set_phy_pattern = virtual_link_encoder_dp_set_phy_pattern,
118 .update_mst_stream_allocation_table =
119 virtual_link_encoder_update_mst_stream_allocation_table,
120 .set_lcd_backlight_level = virtual_link_encoder_set_lcd_backlight_level,
121 .set_dmcu_backlight_level =
122 virtual_link_encoder_set_dmcu_backlight_level,
123 .backlight_control = virtual_link_encoder_edp_backlight_control,
124 .power_control = virtual_link_encoder_edp_power_control,
125 .connect_dig_be_to_fe = virtual_link_encoder_connect_dig_be_to_fe,
126 .destroy = virtual_link_encoder_destroy
127};
128
129bool virtual_link_encoder_construct(
130 struct link_encoder *enc, const struct encoder_init_data *init_data)
131{
132 enc->funcs = &virtual_lnk_enc_funcs;
133 enc->ctx = init_data->ctx;
134 enc->id = init_data->encoder;
135
136 enc->hpd_source = init_data->hpd_source;
137 enc->connector = init_data->connector;
138
139 enc->transmitter = init_data->transmitter;
140
141 enc->features.max_pixel_clock = VIRTUAL_MAX_PIXEL_CLK_IN_KHZ;
142
143 enc->output_signals = SIGNAL_TYPE_VIRTUAL;
144
145 enc->preferred_engine = ENGINE_ID_VIRTUAL;
146
147 return true;
148}
149
150
diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.h b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.h
new file mode 100644
index 000000000000..eb1a94fb8a9b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.h
@@ -0,0 +1,34 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DC_VIRTUAL_LINK_ENCODER_H__
27#define __DC_VIRTUAL_LINK_ENCODER_H__
28
29#include "link_encoder.h"
30
31bool virtual_link_encoder_construct(
32 struct link_encoder *enc, const struct encoder_init_data *init_data);
33
34#endif /* __DC_VIRTUAL_LINK_ENCODER_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c
new file mode 100644
index 000000000000..8de21d9a8079
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c
@@ -0,0 +1,132 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27#include "virtual_stream_encoder.h"
28
29static void virtual_stream_encoder_dp_set_stream_attribute(
30 struct stream_encoder *enc,
31 struct dc_crtc_timing *crtc_timing,
32 enum dc_color_space output_color_space) {}
33
34static void virtual_stream_encoder_hdmi_set_stream_attribute(
35 struct stream_encoder *enc,
36 struct dc_crtc_timing *crtc_timing,
37 int actual_pix_clk_khz,
38 bool enable_audio) {}
39
40static void virtual_stream_encoder_dvi_set_stream_attribute(
41 struct stream_encoder *enc,
42 struct dc_crtc_timing *crtc_timing,
43 bool is_dual_link) {}
44
45static void virtual_stream_encoder_set_mst_bandwidth(
46 struct stream_encoder *enc,
47 struct fixed31_32 avg_time_slots_per_mtp) {}
48
49static void virtual_stream_encoder_update_hdmi_info_packets(
50 struct stream_encoder *enc,
51 const struct encoder_info_frame *info_frame) {}
52
53static void virtual_stream_encoder_stop_hdmi_info_packets(
54 struct stream_encoder *enc) {}
55
56static void virtual_stream_encoder_update_dp_info_packets(
57 struct stream_encoder *enc,
58 const struct encoder_info_frame *info_frame) {}
59
60static void virtual_stream_encoder_stop_dp_info_packets(
61 struct stream_encoder *enc) {}
62
63static void virtual_stream_encoder_dp_blank(
64 struct stream_encoder *enc) {}
65
66static void virtual_stream_encoder_dp_unblank(
67 struct stream_encoder *enc,
68 const struct encoder_unblank_param *param) {}
69
70static void virtual_audio_mute_control(
71 struct stream_encoder *enc,
72 bool mute) {}
73
74static const struct stream_encoder_funcs virtual_str_enc_funcs = {
75 .dp_set_stream_attribute =
76 virtual_stream_encoder_dp_set_stream_attribute,
77 .hdmi_set_stream_attribute =
78 virtual_stream_encoder_hdmi_set_stream_attribute,
79 .dvi_set_stream_attribute =
80 virtual_stream_encoder_dvi_set_stream_attribute,
81 .set_mst_bandwidth =
82 virtual_stream_encoder_set_mst_bandwidth,
83 .update_hdmi_info_packets =
84 virtual_stream_encoder_update_hdmi_info_packets,
85 .stop_hdmi_info_packets =
86 virtual_stream_encoder_stop_hdmi_info_packets,
87 .update_dp_info_packets =
88 virtual_stream_encoder_update_dp_info_packets,
89 .stop_dp_info_packets =
90 virtual_stream_encoder_stop_dp_info_packets,
91 .dp_blank =
92 virtual_stream_encoder_dp_blank,
93 .dp_unblank =
94 virtual_stream_encoder_dp_unblank,
95
96 .audio_mute_control = virtual_audio_mute_control,
97};
98
99bool virtual_stream_encoder_construct(
100 struct stream_encoder *enc,
101 struct dc_context *ctx,
102 struct dc_bios *bp)
103{
104 if (!enc)
105 return false;
106 if (!bp)
107 return false;
108
109 enc->funcs = &virtual_str_enc_funcs;
110 enc->ctx = ctx;
111 enc->id = ENGINE_ID_VIRTUAL;
112 enc->bp = bp;
113
114 return true;
115}
116
117struct stream_encoder *virtual_stream_encoder_create(
118 struct dc_context *ctx, struct dc_bios *bp)
119{
120 struct stream_encoder *enc = dm_alloc(sizeof(*enc));
121
122 if (!enc)
123 return NULL;
124
125 if (virtual_stream_encoder_construct(enc, ctx, bp))
126 return enc;
127
128 BREAK_TO_DEBUGGER();
129 dm_free(enc);
130 return NULL;
131}
132
diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.h
new file mode 100644
index 000000000000..bf3422c66976
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.h
@@ -0,0 +1,39 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DC_VIRTUAL_STREAM_ENCODER_H__
27#define __DC_VIRTUAL_STREAM_ENCODER_H__
28
29#include "stream_encoder.h"
30
31struct stream_encoder *virtual_stream_encoder_create(
32 struct dc_context *ctx, struct dc_bios *bp);
33
34bool virtual_stream_encoder_construct(
35 struct stream_encoder *enc,
36 struct dc_context *ctx,
37 struct dc_bios *bp);
38
39#endif /* __DC_VIRTUAL_STREAM_ENCODER_H__ */
diff --git a/drivers/gpu/drm/amd/display/include/asic_capability_interface.h b/drivers/gpu/drm/amd/display/include/asic_capability_interface.h
new file mode 100644
index 000000000000..57cc72fdc560
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/asic_capability_interface.h
@@ -0,0 +1,55 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of enc software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and enc permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_ASIC_CAPABILITY_INTERFACE_H__
27#define __DAL_ASIC_CAPABILITY_INTERFACE_H__
28
29/* Include */
30#include "include/asic_capability_types.h"
31
32/* Forward declaration */
33struct hw_asic_id;
34
35/* ASIC capability */
36struct asic_capability {
37 struct dc_context *ctx;
38 struct asic_caps caps;
39 struct asic_stereo_3d_caps stereo_3d_caps;
40 struct asic_bugs bugs;
41 uint32_t data[ASIC_DATA_MAX_NUMBER];
42};
43
44/**
45 * Interfaces
46 */
47
48/* Create and initialize ASIC capability */
49struct asic_capability *dal_asic_capability_create(struct hw_asic_id *init,
50 struct dc_context *ctx);
51
52/* Destroy ASIC capability and free memory space */
53void dal_asic_capability_destroy(struct asic_capability **cap);
54
55#endif /* __DAL_ASIC_CAPABILITY_INTERFACE_H__ */
diff --git a/drivers/gpu/drm/amd/display/include/asic_capability_types.h b/drivers/gpu/drm/amd/display/include/asic_capability_types.h
new file mode 100644
index 000000000000..c44dae043599
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/asic_capability_types.h
@@ -0,0 +1,116 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25#ifndef __DAL_ASIC_CAPABILITY_TYPES_H__
26#define __DAL_ASIC_CAPABILITY_TYPES_H__
27
28/*
29 * ASIC Capabilities
30 */
31struct asic_caps {
32 bool CONSUMER_SINGLE_SELECTED_TIMING:1;
33 bool UNDERSCAN_ADJUST:1;
34 bool DELTA_SIGMA_SUPPORT:1;
35 bool PANEL_SELF_REFRESH_SUPPORTED:1;
36 bool IS_FUSION:1;
37 bool DP_MST_SUPPORTED:1;
38 bool UNDERSCAN_FOR_HDMI_ONLY:1;
39 bool DVI_CLOCK_SHARE_CAPABILITY:1;
40 bool SUPPORT_CEA861E_FINAL:1;
41 bool MIRABILIS_SUPPORTED:1;
42 bool MIRABILIS_ENABLED_BY_DEFAULT:1;
43 bool DEVICE_TAG_REMAP_SUPPORTED:1;
44 bool HEADLESS_NO_OPM_SUPPORTED:1;
45 bool WIRELESS_LIMIT_TO_720P:1;
46 bool WIRELESS_FULL_TIMING_ADJUSTMENT:1;
47 bool WIRELESS_TIMING_ADJUSTMENT:1;
48 bool WIRELESS_COMPRESSED_AUDIO:1;
49 bool VCE_SUPPORTED:1;
50 bool HPD_CHECK_FOR_EDID:1;
51 bool NEED_MC_TUNING:1;
52 bool SKIP_PSR_WAIT_FOR_PLL_LOCK_BIT:1;
53 bool DFSBYPASS_DYNAMIC_SUPPORT:1;
54 bool SUPPORT_8BPP:1;
55};
56
57/*
58 * ASIC Stereo 3D Caps
59 */
60struct asic_stereo_3d_caps {
61 bool SUPPORTED:1;
62 bool DISPLAY_BASED_ON_WS:1;
63 bool HDMI_FRAME_PACK:1;
64 bool INTERLACE_FRAME_PACK:1;
65 bool DISPLAYPORT_FRAME_PACK:1;
66 bool DISPLAYPORT_FRAME_ALT:1;
67 bool INTERLEAVE:1;
68};
69
70/*
71 * ASIC Bugs
72 */
73struct asic_bugs {
74 bool MST_SYMBOL_MISALIGNMENT:1;
75 bool PSR_2X_LANE_GANGING:1;
76 bool LB_WA_IS_SUPPORTED:1;
77 bool ROM_REGISTER_ACCESS:1;
78 bool PSR_WA_OVERSCAN_CRC_ERROR:1;
79};
80
81/*
82 * ASIC Data
83 */
84enum asic_data {
85 ASIC_DATA_FIRST = 0,
86 ASIC_DATA_DCE_VERSION = ASIC_DATA_FIRST,
87 ASIC_DATA_DCE_VERSION_MINOR,
88 ASIC_DATA_LINEBUFFER_SIZE,
89 ASIC_DATA_DRAM_BANDWIDTH_EFFICIENCY,
90 ASIC_DATA_MC_LATENCY,
91 ASIC_DATA_MC_LATENCY_SLOW,
92 ASIC_DATA_MEMORYTYPE_MULTIPLIER,
93 ASIC_DATA_PATH_NUM_PER_DPMST_CONNECTOR,
94 ASIC_DATA_MAX_UNDERSCAN_PERCENTAGE,
95 ASIC_DATA_VIEWPORT_PIXEL_GRANULARITY,
96 ASIC_DATA_MIN_DISPCLK_FOR_UNDERSCAN,
97 ASIC_DATA_DOWNSCALE_LIMIT,
98 ASIC_DATA_MAX_NUMBER /* end of enum */
99};
100
101/*
102 * ASIC Feature Flags
103 */
104struct asic_feature_flags {
105 union {
106 uint32_t raw;
107 struct {
108 uint32_t LEGACY_CLIENT:1;
109 uint32_t PACKED_PIXEL_FORMAT:1;
110 uint32_t WORKSTATION_STEREO:1;
111 uint32_t WORKSTATION:1;
112 } bits;
113 };
114};
115
116#endif /* __DAL_ASIC_CAPABILITY_TYPES_H__ */
diff --git a/drivers/gpu/drm/amd/display/include/audio_types.h b/drivers/gpu/drm/amd/display/include/audio_types.h
new file mode 100644
index 000000000000..6364fbc24cfe
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/audio_types.h
@@ -0,0 +1,106 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __AUDIO_TYPES_H__
27#define __AUDIO_TYPES_H__
28
29#include "signal_types.h"
30
31#define AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS 20
32#define MAX_HW_AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS 18
33#define MULTI_CHANNEL_SPLIT_NO_ASSO_INFO 0xFFFFFFFF
34
35
36struct audio_crtc_info {
37 uint32_t h_total;
38 uint32_t h_active;
39 uint32_t v_active;
40 uint32_t pixel_repetition;
41 uint32_t requested_pixel_clock; /* in KHz */
42 uint32_t calculated_pixel_clock; /* in KHz */
43 uint32_t refresh_rate;
44 enum dc_color_depth color_depth;
45 bool interlaced;
46};
47struct azalia_clock_info {
48 uint32_t pixel_clock_in_10khz;
49 uint32_t audio_dto_phase;
50 uint32_t audio_dto_module;
51 uint32_t audio_dto_wall_clock_ratio;
52};
53
54enum audio_dto_source {
55 DTO_SOURCE_UNKNOWN = 0,
56 DTO_SOURCE_ID0,
57 DTO_SOURCE_ID1,
58 DTO_SOURCE_ID2,
59 DTO_SOURCE_ID3,
60 DTO_SOURCE_ID4,
61 DTO_SOURCE_ID5
62};
63
64/* PLL information required for AZALIA DTO calculation */
65
66struct audio_pll_info {
67 uint32_t dp_dto_source_clock_in_khz;
68 uint32_t feed_back_divider;
69 enum audio_dto_source dto_source;
70 bool ss_enabled;
71 uint32_t ss_percentage;
72 uint32_t ss_percentage_divider;
73};
74
75struct audio_channel_associate_info {
76 union {
77 struct {
78 uint32_t ALL_CHANNEL_FL:4;
79 uint32_t ALL_CHANNEL_FR:4;
80 uint32_t ALL_CHANNEL_FC:4;
81 uint32_t ALL_CHANNEL_Sub:4;
82 uint32_t ALL_CHANNEL_SL:4;
83 uint32_t ALL_CHANNEL_SR:4;
84 uint32_t ALL_CHANNEL_BL:4;
85 uint32_t ALL_CHANNEL_BR:4;
86 } bits;
87 uint32_t u32all;
88 };
89};
90
91struct audio_output {
92 /* Front DIG id. */
93 enum engine_id engine_id;
94 /* encoder output signal */
95 enum signal_type signal;
96 /* video timing */
97 struct audio_crtc_info crtc_info;
98 /* PLL for audio */
99 struct audio_pll_info pll_info;
100};
101
102enum audio_payload {
103 CHANNEL_SPLIT_MAPPINGCHANG = 0x9,
104};
105
106#endif /* __AUDIO_TYPES_H__ */
diff --git a/drivers/gpu/drm/amd/display/include/bios_parser_interface.h b/drivers/gpu/drm/amd/display/include/bios_parser_interface.h
new file mode 100644
index 000000000000..d51101c5c6b0
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/bios_parser_interface.h
@@ -0,0 +1,44 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_BIOS_PARSER_INTERFACE_H__
27#define __DAL_BIOS_PARSER_INTERFACE_H__
28
29#include "dc_bios_types.h"
30
31struct bios_parser;
32
33struct bp_init_data {
34 struct dc_context *ctx;
35 uint8_t *bios;
36};
37
38struct dc_bios *dal_bios_parser_create(
39 struct bp_init_data *init,
40 enum dce_version dce_version);
41
42void dal_bios_parser_destroy(struct dc_bios **dcb);
43
44#endif /* __DAL_BIOS_PARSER_INTERFACE_H__ */
diff --git a/drivers/gpu/drm/amd/display/include/bios_parser_types.h b/drivers/gpu/drm/amd/display/include/bios_parser_types.h
new file mode 100644
index 000000000000..9ab9065735f7
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/bios_parser_types.h
@@ -0,0 +1,338 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_BIOS_PARSER_TYPES_H__
27
28#define __DAL_BIOS_PARSER_TYPES_H__
29
30#include "dm_services.h"
31#include "include/signal_types.h"
32#include "include/grph_object_ctrl_defs.h"
33#include "include/gpio_types.h"
34#include "include/link_service_types.h"
35
36/* TODO: include signal_types.h and remove this enum */
37enum as_signal_type {
38 AS_SIGNAL_TYPE_NONE = 0L, /* no signal */
39 AS_SIGNAL_TYPE_DVI,
40 AS_SIGNAL_TYPE_HDMI,
41 AS_SIGNAL_TYPE_LVDS,
42 AS_SIGNAL_TYPE_DISPLAY_PORT,
43 AS_SIGNAL_TYPE_GPU_PLL,
44 AS_SIGNAL_TYPE_UNKNOWN
45};
46
47enum bp_result {
48 BP_RESULT_OK = 0, /* There was no error */
49 BP_RESULT_BADINPUT, /*Bad input parameter */
50 BP_RESULT_BADBIOSTABLE, /* Bad BIOS table */
51 BP_RESULT_UNSUPPORTED, /* BIOS Table is not supported */
52 BP_RESULT_NORECORD, /* Record can't be found */
53 BP_RESULT_FAILURE
54};
55
56enum bp_encoder_control_action {
57 /* direct VBIOS translation! Just to simplify the translation */
58 ENCODER_CONTROL_DISABLE = 0,
59 ENCODER_CONTROL_ENABLE,
60 ENCODER_CONTROL_SETUP,
61 ENCODER_CONTROL_INIT
62};
63
64enum bp_transmitter_control_action {
65 /* direct VBIOS translation! Just to simplify the translation */
66 TRANSMITTER_CONTROL_DISABLE = 0,
67 TRANSMITTER_CONTROL_ENABLE,
68 TRANSMITTER_CONTROL_BACKLIGHT_OFF,
69 TRANSMITTER_CONTROL_BACKLIGHT_ON,
70 TRANSMITTER_CONTROL_BACKLIGHT_BRIGHTNESS,
71 TRANSMITTER_CONTROL_LCD_SETF_TEST_START,
72 TRANSMITTER_CONTROL_LCD_SELF_TEST_STOP,
73 TRANSMITTER_CONTROL_INIT,
74 TRANSMITTER_CONTROL_DEACTIVATE,
75 TRANSMITTER_CONTROL_ACTIAVATE,
76 TRANSMITTER_CONTROL_SETUP,
77 TRANSMITTER_CONTROL_SET_VOLTAGE_AND_PREEMPASIS,
78 /* ATOM_TRANSMITTER_ACTION_POWER_ON. This action is for eDP only
79 * (power up the panel)
80 */
81 TRANSMITTER_CONTROL_POWER_ON,
82 /* ATOM_TRANSMITTER_ACTION_POWER_OFF. This action is for eDP only
83 * (power down the panel)
84 */
85 TRANSMITTER_CONTROL_POWER_OFF
86};
87
88enum bp_external_encoder_control_action {
89 EXTERNAL_ENCODER_CONTROL_DISABLE = 0,
90 EXTERNAL_ENCODER_CONTROL_ENABLE = 1,
91 EXTERNAL_ENCODER_CONTROL_INIT = 0x7,
92 EXTERNAL_ENCODER_CONTROL_SETUP = 0xf,
93 EXTERNAL_ENCODER_CONTROL_UNBLANK = 0x10,
94 EXTERNAL_ENCODER_CONTROL_BLANK = 0x11,
95};
96
97enum bp_pipe_control_action {
98 ASIC_PIPE_DISABLE = 0,
99 ASIC_PIPE_ENABLE,
100 ASIC_PIPE_INIT
101};
102
103struct bp_encoder_control {
104 enum bp_encoder_control_action action;
105 enum engine_id engine_id;
106 enum transmitter transmitter;
107 enum signal_type signal;
108 enum dc_lane_count lanes_number;
109 enum dc_color_depth color_depth;
110 bool enable_dp_audio;
111 uint32_t pixel_clock; /* khz */
112};
113
114struct bp_external_encoder_control {
115 enum bp_external_encoder_control_action action;
116 enum engine_id engine_id;
117 enum dc_link_rate link_rate;
118 enum dc_lane_count lanes_number;
119 enum signal_type signal;
120 enum dc_color_depth color_depth;
121 bool coherent;
122 struct graphics_object_id encoder_id;
123 struct graphics_object_id connector_obj_id;
124 uint32_t pixel_clock; /* in KHz */
125};
126
127struct bp_crtc_source_select {
128 enum engine_id engine_id;
129 enum controller_id controller_id;
130 /* from GPU Tx aka asic_signal */
131 enum signal_type signal;
132 /* sink_signal may differ from asicSignal if Translator encoder */
133 enum signal_type sink_signal;
134 enum display_output_bit_depth display_output_bit_depth;
135 bool enable_dp_audio;
136};
137
138struct bp_transmitter_control {
139 enum bp_transmitter_control_action action;
140 enum engine_id engine_id;
141 enum transmitter transmitter; /* PhyId */
142 enum dc_lane_count lanes_number;
143 enum clock_source_id pll_id; /* needed for DCE 4.0 */
144 enum signal_type signal;
145 enum dc_color_depth color_depth; /* not used for DCE6.0 */
146 enum hpd_source_id hpd_sel; /* ucHPDSel, used for DCe6.0 */
147 struct graphics_object_id connector_obj_id;
148 /* symClock; in 10kHz, pixel clock, in HDMI deep color mode, it should
149 * be pixel clock * deep_color_ratio (in KHz)
150 */
151 uint32_t pixel_clock;
152 uint32_t lane_select;
153 uint32_t lane_settings;
154 bool coherent;
155 bool multi_path;
156 bool single_pll_mode;
157};
158
159struct bp_blank_crtc_parameters {
160 enum controller_id controller_id;
161 uint32_t black_color_rcr;
162 uint32_t black_color_gy;
163 uint32_t black_color_bcb;
164};
165
166struct bp_hw_crtc_timing_parameters {
167 enum controller_id controller_id;
168 /* horizontal part */
169 uint32_t h_total;
170 uint32_t h_addressable;
171 uint32_t h_overscan_left;
172 uint32_t h_overscan_right;
173 uint32_t h_sync_start;
174 uint32_t h_sync_width;
175
176 /* vertical part */
177 uint32_t v_total;
178 uint32_t v_addressable;
179 uint32_t v_overscan_top;
180 uint32_t v_overscan_bottom;
181 uint32_t v_sync_start;
182 uint32_t v_sync_width;
183
184 struct timing_flags {
185 uint32_t INTERLACE:1;
186 uint32_t PIXEL_REPETITION:4;
187 uint32_t HSYNC_POSITIVE_POLARITY:1;
188 uint32_t VSYNC_POSITIVE_POLARITY:1;
189 uint32_t HORZ_COUNT_BY_TWO:1;
190 } flags;
191};
192
193struct bp_hw_crtc_overscan_parameters {
194 enum controller_id controller_id;
195 uint32_t h_overscan_left;
196 uint32_t h_overscan_right;
197 uint32_t v_overscan_top;
198 uint32_t v_overscan_bottom;
199};
200
201struct bp_adjust_pixel_clock_parameters {
202 /* Input: Signal Type - to be converted to Encoder mode */
203 enum signal_type signal_type;
204 /* Input: Encoder object id */
205 struct graphics_object_id encoder_object_id;
206 /* Input: Pixel Clock (requested Pixel clock based on Video timing
207 * standard used) in KHz
208 */
209 uint32_t pixel_clock;
210 /* Output: Adjusted Pixel Clock (after VBIOS exec table) in KHz */
211 uint32_t adjusted_pixel_clock;
212 /* Output: If non-zero, this refDiv value should be used to calculate
213 * other ppll params */
214 uint32_t reference_divider;
215 /* Output: If non-zero, this postDiv value should be used to calculate
216 * other ppll params */
217 uint32_t pixel_clock_post_divider;
218 /* Input: Enable spread spectrum */
219 bool ss_enable;
220};
221
222struct bp_pixel_clock_parameters {
223 enum controller_id controller_id; /* (Which CRTC uses this PLL) */
224 enum clock_source_id pll_id; /* Clock Source Id */
225 /* signal_type -> Encoder Mode - needed by VBIOS Exec table */
226 enum signal_type signal_type;
227 /* Adjusted Pixel Clock (after VBIOS exec table)
228 * that becomes Target Pixel Clock (KHz) */
229 uint32_t target_pixel_clock;
230 /* Calculated Reference divider of Display PLL */
231 uint32_t reference_divider;
232 /* Calculated Feedback divider of Display PLL */
233 uint32_t feedback_divider;
234 /* Calculated Fractional Feedback divider of Display PLL */
235 uint32_t fractional_feedback_divider;
236 /* Calculated Pixel Clock Post divider of Display PLL */
237 uint32_t pixel_clock_post_divider;
238 struct graphics_object_id encoder_object_id; /* Encoder object id */
239 /* VBIOS returns a fixed display clock when DFS-bypass feature
240 * is enabled (KHz) */
241 uint32_t dfs_bypass_display_clock;
242 /* color depth to support HDMI deep color */
243 enum transmitter_color_depth color_depth;
244
245 struct program_pixel_clock_flags {
246 uint32_t FORCE_PROGRAMMING_OF_PLL:1;
247 /* Use Engine Clock as source for Display Clock when
248 * programming PLL */
249 uint32_t USE_E_CLOCK_AS_SOURCE_FOR_D_CLOCK:1;
250 /* Use external reference clock (refDivSrc for PLL) */
251 uint32_t SET_EXTERNAL_REF_DIV_SRC:1;
252 /* Force program PHY PLL only */
253 uint32_t PROGRAM_PHY_PLL_ONLY:1;
254 /* Support for YUV420 */
255 uint32_t SUPPORT_YUV_420:1;
256 /* Use XTALIN reference clock source */
257 uint32_t SET_XTALIN_REF_SRC:1;
258 /* Use GENLK reference clock source */
259 uint32_t SET_GENLOCK_REF_DIV_SRC:1;
260 } flags;
261};
262
263struct bp_display_clock_parameters {
264 uint32_t target_display_clock; /* KHz */
265 /* Actual Display Clock set due to clock divider granularity KHz */
266 uint32_t actual_display_clock;
267 /* Actual Post Divider ID used to generate the actual clock */
268 uint32_t actual_post_divider_id;
269};
270
271enum bp_dce_clock_type {
272 DCECLOCK_TYPE_DISPLAY_CLOCK = 0,
273 DCECLOCK_TYPE_DPREFCLK = 1
274};
275
276/* DCE Clock Parameters structure for SetDceClock Exec command table */
277struct bp_set_dce_clock_parameters {
278 enum clock_source_id pll_id; /* Clock Source Id */
279 /* Display clock or DPREFCLK value */
280 uint32_t target_clock_frequency;
281 /* Clock to set: =0: DISPCLK =1: DPREFCLK =2: PIXCLK */
282 enum bp_dce_clock_type clock_type;
283
284 struct set_dce_clock_flags {
285 uint32_t USE_GENERICA_AS_SOURCE_FOR_DPREFCLK:1;
286 /* Use XTALIN reference clock source */
287 uint32_t USE_XTALIN_AS_SOURCE_FOR_DPREFCLK:1;
288 /* Use PCIE reference clock source */
289 uint32_t USE_PCIE_AS_SOURCE_FOR_DPREFCLK:1;
290 /* Use GENLK reference clock source */
291 uint32_t USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK:1;
292 } flags;
293};
294
295struct spread_spectrum_flags {
296 /* 1 = Center Spread; 0 = down spread */
297 uint32_t CENTER_SPREAD:1;
298 /* 1 = external; 0 = internal */
299 uint32_t EXTERNAL_SS:1;
300 /* 1 = delta-sigma type parameter; 0 = ver1 */
301 uint32_t DS_TYPE:1;
302};
303
304struct bp_spread_spectrum_parameters {
305 enum clock_source_id pll_id;
306 uint32_t percentage;
307 uint32_t ds_frac_amount;
308
309 union {
310 struct {
311 uint32_t step;
312 uint32_t delay;
313 uint32_t range; /* In Hz unit */
314 } ver1;
315 struct {
316 uint32_t feedback_amount;
317 uint32_t nfrac_amount;
318 uint32_t ds_frac_size;
319 } ds;
320 };
321
322 struct spread_spectrum_flags flags;
323};
324
325struct bp_encoder_cap_info {
326 uint32_t DP_HBR2_CAP:1;
327 uint32_t DP_HBR2_EN:1;
328 uint32_t DP_HBR3_EN:1;
329 uint32_t HDMI_6GB_EN:1;
330 uint32_t RESERVED:30;
331};
332
333struct bp_gpio_cntl_info {
334 uint32_t id;
335 enum gpio_pin_output_state state;
336};
337
338#endif /*__DAL_BIOS_PARSER_TYPES_H__ */
diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
new file mode 100644
index 000000000000..119297e3bdc0
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
@@ -0,0 +1,125 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_ASIC_ID_H__
27#define __DAL_ASIC_ID_H__
28
29/*
30 * ASIC internal revision ID
31 */
32
33/* DCE80 (based on ci_id.h in Perforce) */
34#define CI_BONAIRE_M_A0 0x14
35#define CI_BONAIRE_M_A1 0x15
36#define CI_HAWAII_P_A0 0x28
37
38#define CI_UNKNOWN 0xFF
39
40#define ASIC_REV_IS_BONAIRE_M(rev) \
41 ((rev >= CI_BONAIRE_M_A0) && (rev < CI_HAWAII_P_A0))
42
43#define ASIC_REV_IS_HAWAII_P(rev) \
44 (rev >= CI_HAWAII_P_A0)
45
46/* KV1 with Spectre GFX core, 8-8-1-2 (CU-Pix-Primitive-RB) */
47#define KV_SPECTRE_A0 0x01
48
49/* KV2 with Spooky GFX core, including downgraded from Spectre core,
50 * 3-4-1-1 (CU-Pix-Primitive-RB) */
51#define KV_SPOOKY_A0 0x41
52
53/* KB with Kalindi GFX core, 2-4-1-1 (CU-Pix-Primitive-RB) */
54#define KB_KALINDI_A0 0x81
55
56/* KB with Kalindi GFX core, 2-4-1-1 (CU-Pix-Primitive-RB) */
57#define KB_KALINDI_A1 0x82
58
59/* BV with Kalindi GFX core, 2-4-1-1 (CU-Pix-Primitive-RB) */
60#define BV_KALINDI_A2 0x85
61
62/* ML with Godavari GFX core, 2-4-1-1 (CU-Pix-Primitive-RB) */
63#define ML_GODAVARI_A0 0xA1
64
65/* ML with Godavari GFX core, 2-4-1-1 (CU-Pix-Primitive-RB) */
66#define ML_GODAVARI_A1 0xA2
67
68#define KV_UNKNOWN 0xFF
69
70#define ASIC_REV_IS_KALINDI(rev) \
71 ((rev >= KB_KALINDI_A0) && (rev < KV_UNKNOWN))
72
73#define ASIC_REV_IS_BHAVANI(rev) \
74 ((rev >= BV_KALINDI_A2) && (rev < ML_GODAVARI_A0))
75
76#define ASIC_REV_IS_GODAVARI(rev) \
77 ((rev >= ML_GODAVARI_A0) && (rev < KV_UNKNOWN))
78
79/* VI Family */
80/* DCE10 */
81#define VI_TONGA_P_A0 20
82#define VI_TONGA_P_A1 21
83#define VI_FIJI_P_A0 60
84
85/* DCE112 */
86#define VI_POLARIS10_P_A0 80
87#define VI_POLARIS11_M_A0 90
88
89#define VI_UNKNOWN 0xFF
90
91#define ASIC_REV_IS_TONGA_P(eChipRev) ((eChipRev >= VI_TONGA_P_A0) && \
92 (eChipRev < 40))
93#define ASIC_REV_IS_FIJI_P(eChipRev) ((eChipRev >= VI_FIJI_P_A0) && \
94 (eChipRev < 80))
95
96#define ASIC_REV_IS_POLARIS10_P(eChipRev) ((eChipRev >= VI_POLARIS10_P_A0) && \
97 (eChipRev < VI_POLARIS11_M_A0))
98#define ASIC_REV_IS_POLARIS11_M(eChipRev) (eChipRev >= VI_POLARIS11_M_A0)
99
100/* DCE11 */
101#define CZ_CARRIZO_A0 0x01
102
103#define STONEY_A0 0x61
104#define CZ_UNKNOWN 0xFF
105
106#define ASIC_REV_IS_STONEY(rev) \
107 ((rev >= STONEY_A0) && (rev < CZ_UNKNOWN))
108
109/*
110 * ASIC chip ID
111 */
112/* DCE80 */
113#define DEVICE_ID_KALINDI_9834 0x9834
114#define DEVICE_ID_TEMASH_9839 0x9839
115#define DEVICE_ID_TEMASH_983D 0x983D
116
117/* Asic Family IDs for different asic family. */
118#define FAMILY_CI 120 /* Sea Islands: Hawaii (P), Bonaire (M) */
119#define FAMILY_KV 125 /* Fusion => Kaveri: Spectre, Spooky; Kabini: Kalindi */
120#define FAMILY_VI 130 /* Volcanic Islands: Iceland (V), Tonga (M) */
121#define FAMILY_CZ 135 /* Carrizo */
122
123#define FAMILY_UNKNOWN 0xFF
124
125#endif /* __DAL_ASIC_ID_H__ */
diff --git a/drivers/gpu/drm/amd/display/include/dal_register_logger.h b/drivers/gpu/drm/amd/display/include/dal_register_logger.h
new file mode 100644
index 000000000000..00dfcd70cc37
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/dal_register_logger.h
@@ -0,0 +1,42 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_REGISTER_LOGGER__
27#define __DAL_REGISTER_LOGGER__
28
29/****************
30 * API functions
31 ***************/
32
33/* dal_reg_logger_push - begin Register Logging */
34void dal_reg_logger_push(const char *caller_func);
35/* dal_reg_logger_pop - stop Register Logging */
36void dal_reg_logger_pop(void);
37
38/* for internal use of the Logger only */
39void dal_reg_logger_rw_count_increment(void);
40bool dal_reg_logger_should_dump_register(void);
41
42#endif /* __DAL_REGISTER_LOGGER__ */
diff --git a/drivers/gpu/drm/amd/display/include/dal_types.h b/drivers/gpu/drm/amd/display/include/dal_types.h
new file mode 100644
index 000000000000..ada5b19e85eb
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/dal_types.h
@@ -0,0 +1,44 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_TYPES_H__
27#define __DAL_TYPES_H__
28
29#include "signal_types.h"
30#include "dc_types.h"
31
32struct dal_logger;
33struct dc_bios;
34
35enum dce_version {
36 DCE_VERSION_UNKNOWN = (-1),
37 DCE_VERSION_8_0,
38 DCE_VERSION_10_0,
39 DCE_VERSION_11_0,
40 DCE_VERSION_11_2,
41 DCE_VERSION_MAX,
42};
43
44#endif /* __DAL_TYPES_H__ */
diff --git a/drivers/gpu/drm/amd/display/include/ddc_service_types.h b/drivers/gpu/drm/amd/display/include/ddc_service_types.h
new file mode 100644
index 000000000000..0a6ba91e3eba
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/ddc_service_types.h
@@ -0,0 +1,189 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25#ifndef __DAL_DDC_SERVICE_TYPES_H__
26#define __DAL_DDC_SERVICE_TYPES_H__
27
28#define DP_BRANCH_DEVICE_ID_1 0x0010FA
29#define DP_BRANCH_DEVICE_ID_2 0x0022B9
30#define DP_SINK_DEVICE_ID_1 0x4CE000
31#define DP_BRANCH_DEVICE_ID_3 0x00001A
32#define DP_BRANCH_DEVICE_ID_4 0x0080e1
33#define DP_BRANCH_DEVICE_ID_5 0x006037
34#define DP_SINK_DEVICE_ID_2 0x001CF8
35
36
37enum ddc_result {
38 DDC_RESULT_UNKNOWN = 0,
39 DDC_RESULT_SUCESSFULL,
40 DDC_RESULT_FAILED_CHANNEL_BUSY,
41 DDC_RESULT_FAILED_TIMEOUT,
42 DDC_RESULT_FAILED_PROTOCOL_ERROR,
43 DDC_RESULT_FAILED_NACK,
44 DDC_RESULT_FAILED_INCOMPLETE,
45 DDC_RESULT_FAILED_OPERATION,
46 DDC_RESULT_FAILED_INVALID_OPERATION,
47 DDC_RESULT_FAILED_BUFFER_OVERFLOW
48};
49
50enum ddc_service_type {
51 DDC_SERVICE_TYPE_CONNECTOR,
52 DDC_SERVICE_TYPE_DISPLAY_PORT_MST,
53};
54
55enum dcs_dpcd_revision {
56 DCS_DPCD_REV_10 = 0x10,
57 DCS_DPCD_REV_11 = 0x11,
58 DCS_DPCD_REV_12 = 0x12
59};
60
61/**
62 * display sink capability
63 */
64struct display_sink_capability {
65 /* dongle type (DP converter, CV smart dongle) */
66 enum display_dongle_type dongle_type;
67
68 /**********************************************************
69 capabilities going INTO SINK DEVICE (stream capabilities)
70 **********************************************************/
71 /* Dongle's downstream count. */
72 uint32_t downstrm_sink_count;
73 /* Is dongle's downstream count info field (downstrm_sink_count)
74 * valid. */
75 bool downstrm_sink_count_valid;
76
77 /* Maximum additional audio delay in microsecond (us) */
78 uint32_t additional_audio_delay;
79 /* Audio latency value in microsecond (us) */
80 uint32_t audio_latency;
81 /* Interlace video latency value in microsecond (us) */
82 uint32_t video_latency_interlace;
83 /* Progressive video latency value in microsecond (us) */
84 uint32_t video_latency_progressive;
85 /* Dongle caps: Maximum pixel clock supported over dongle for HDMI */
86 uint32_t max_hdmi_pixel_clock;
87 /* Dongle caps: Maximum deep color supported over dongle for HDMI */
88 enum dc_color_depth max_hdmi_deep_color;
89
90 /************************************************************
91 capabilities going OUT OF SOURCE DEVICE (link capabilities)
92 ************************************************************/
93 /* support for Spread Spectrum(SS) */
94 bool ss_supported;
95 /* DP link settings (laneCount, linkRate, Spread) */
96 uint32_t dp_link_lane_count;
97 uint32_t dp_link_rate;
98 uint32_t dp_link_spead;
99
100 enum dcs_dpcd_revision dpcd_revision;
101 /* If dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER,
102 indicates 'Frame Sequential-to-lllFrame Pack' conversion capability.*/
103 bool is_dp_hdmi_s3d_converter;
104 /* to check if we have queried the display capability
105 * for eDP panel already. */
106 bool is_edp_sink_cap_valid;
107
108 enum ddc_transaction_type transaction_type;
109 enum signal_type signal;
110};
111
112struct av_sync_data {
113 uint8_t av_granularity;/* DPCD 00023h */
114 uint8_t aud_dec_lat1;/* DPCD 00024h */
115 uint8_t aud_dec_lat2;/* DPCD 00025h */
116 uint8_t aud_pp_lat1;/* DPCD 00026h */
117 uint8_t aud_pp_lat2;/* DPCD 00027h */
118 uint8_t vid_inter_lat;/* DPCD 00028h */
119 uint8_t vid_prog_lat;/* DPCD 00029h */
120 uint8_t aud_del_ins1;/* DPCD 0002Bh */
121 uint8_t aud_del_ins2;/* DPCD 0002Ch */
122 uint8_t aud_del_ins3;/* DPCD 0002Dh */
123};
124
125/** EDID retrieval related constants, also used by MstMgr **/
126
127#define DDC_EDID_SEGMENT_SIZE 256
128#define DDC_EDID_BLOCK_SIZE 128
129#define DDC_EDID_BLOCKS_PER_SEGMENT \
130 (DDC_EDID_SEGMENT_SIZE / DDC_EDID_BLOCK_SIZE)
131
132#define DDC_EDID_EXT_COUNT_OFFSET 0x7E
133
134#define DDC_EDID_ADDRESS_START 0x50
135#define DDC_EDID_ADDRESS_END 0x52
136#define DDC_EDID_SEGMENT_ADDRESS 0x30
137
138/* signatures for Edid 1x */
139#define DDC_EDID1X_VENDORID_SIGNATURE_OFFSET 8
140#define DDC_EDID1X_VENDORID_SIGNATURE_LEN 4
141#define DDC_EDID1X_EXT_CNT_AND_CHECKSUM_OFFSET 126
142#define DDC_EDID1X_EXT_CNT_AND_CHECKSUM_LEN 2
143#define DDC_EDID1X_CHECKSUM_OFFSET 127
144/* signatures for Edid 20*/
145#define DDC_EDID_20_SIGNATURE_OFFSET 0
146#define DDC_EDID_20_SIGNATURE 0x20
147
148#define DDC_EDID20_VENDORID_SIGNATURE_OFFSET 1
149#define DDC_EDID20_VENDORID_SIGNATURE_LEN 4
150#define DDC_EDID20_CHECKSUM_OFFSET 255
151#define DDC_EDID20_CHECKSUM_LEN 1
152
153/*DP to VGA converter*/
154static const uint8_t DP_VGA_CONVERTER_ID_1[] = "mVGAa";
155/*DP to Dual link DVI converter*/
156static const uint8_t DP_DVI_CONVERTER_ID_1[] = "m2DVIa";
157/*Travis*/
158static const uint8_t DP_VGA_LVDS_CONVERTER_ID_2[] = "sivarT";
159/*Nutmeg*/
160static const uint8_t DP_VGA_LVDS_CONVERTER_ID_3[] = "dnomlA";
161/*DP to VGA converter*/
162static const uint8_t DP_VGA_CONVERTER_ID_4[] = "DpVga";
163/*DP to Dual link DVI converter*/
164static const uint8_t DP_DVI_CONVERTER_ID_4[] = "m2DVIa";
165/*DP to Dual link DVI converter 2*/
166static const uint8_t DP_DVI_CONVERTER_ID_42[] = "v2DVIa";
167
168static const uint8_t DP_SINK_DEV_STRING_ID2_REV0[] = "\0\0\0\0\0\0";
169
170/* Identifies second generation PSR TCON from Parade: Device ID string:
171 * yy-xx-**-**-**-**
172 */
173/* xx - Hw ID high byte */
174static const uint32_t DP_SINK_DEV_STRING_ID2_REV1_HW_ID_HIGH_BYTE =
175 0x06;
176
177/* yy - HW ID low byte, the same silicon has several package/feature flavors */
178static const uint32_t DP_SINK_DEV_STRING_ID2_REV1_HW_ID_LOW_BYTE1 =
179 0x61;
180static const uint32_t DP_SINK_DEV_STRING_ID2_REV1_HW_ID_LOW_BYTE2 =
181 0x62;
182static const uint32_t DP_SINK_DEV_STRING_ID2_REV1_HW_ID_LOW_BYTE3 =
183 0x63;
184static const uint32_t DP_SINK_DEV_STRING_ID2_REV1_HW_ID_LOW_BYTE4 =
185 0x72;
186static const uint32_t DP_SINK_DEV_STRING_ID2_REV1_HW_ID_LOW_BYTE5 =
187 0x73;
188
189#endif /* __DAL_DDC_SERVICE_TYPES_H__ */
diff --git a/drivers/gpu/drm/amd/display/include/display_clock_interface.h b/drivers/gpu/drm/amd/display/include/display_clock_interface.h
new file mode 100644
index 000000000000..2006fa21f54c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/display_clock_interface.h
@@ -0,0 +1,175 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DISPLAY_CLOCK_INTERFACE_H__
27#define __DISPLAY_CLOCK_INTERFACE_H__
28
29#include "hw_sequencer_types.h"
30#include "grph_object_defs.h"
31#include "signal_types.h"
32
33/* Timing related information*/
34struct dc_timing_params {
35 uint32_t INTERLACED:1;
36 uint32_t HCOUNT_BY_TWO:1;
37 uint32_t PIXEL_REPETITION:4; /*< values 1 to 10 supported*/
38 uint32_t PREFETCH:1;
39
40 uint32_t h_total;
41 uint32_t h_addressable;
42 uint32_t h_sync_width;
43};
44
45/* Scaling related information*/
46struct dc_scaling_params {
47 uint32_t h_overscan_right;
48 uint32_t h_overscan_left;
49 uint32_t h_taps;
50 uint32_t v_taps;
51};
52
53/* VScalerEfficiency */
54enum v_scaler_efficiency {
55 V_SCALER_EFFICIENCY_LB36BPP = 0,
56 V_SCALER_EFFICIENCY_LB30BPP = 1,
57 V_SCALER_EFFICIENCY_LB24BPP = 2,
58 V_SCALER_EFFICIENCY_LB18BPP = 3
59};
60
61/* Parameters required for minimum Engine
62 * and minimum Display clock calculations*/
63struct min_clock_params {
64 uint32_t id;
65 uint32_t requested_pixel_clock; /* in KHz */
66 uint32_t actual_pixel_clock; /* in KHz */
67 struct view source_view;
68 struct view dest_view;
69 struct dc_timing_params timing_info;
70 struct dc_scaling_params scaling_info;
71 enum signal_type signal_type;
72 enum dc_color_depth deep_color_depth;
73 enum v_scaler_efficiency scaler_efficiency;
74 bool line_buffer_prefetch_enabled;
75};
76
77/* Result of Minimum System and Display clock calculations.
78 * Minimum System clock and Display clock, source and path to be used
79 * for Display clock*/
80struct minimum_clocks_calculation_result {
81 uint32_t min_sclk_khz;
82 uint32_t min_dclk_khz;
83 uint32_t min_mclk_khz;
84 uint32_t min_deep_sleep_sclk;
85};
86
87/* Enumeration of all clocks states */
88enum clocks_state {
89 CLOCKS_STATE_INVALID = 0,
90 CLOCKS_STATE_ULTRA_LOW,
91 CLOCKS_STATE_LOW,
92 CLOCKS_STATE_NOMINAL,
93 CLOCKS_STATE_PERFORMANCE,
94 /* Starting from DCE11, Max 8 level DPM state supported */
95 CLOCKS_DPM_STATE_LEVEL_INVALID = CLOCKS_STATE_INVALID,
96 CLOCKS_DPM_STATE_LEVEL_0 = CLOCKS_STATE_ULTRA_LOW,
97 CLOCKS_DPM_STATE_LEVEL_1 = CLOCKS_STATE_LOW,
98 CLOCKS_DPM_STATE_LEVEL_2 = CLOCKS_STATE_NOMINAL,
99 CLOCKS_DPM_STATE_LEVEL_3 = CLOCKS_STATE_PERFORMANCE,
100 CLOCKS_DPM_STATE_LEVEL_4 = CLOCKS_DPM_STATE_LEVEL_3 + 1,
101 CLOCKS_DPM_STATE_LEVEL_5 = CLOCKS_DPM_STATE_LEVEL_4 + 1,
102 CLOCKS_DPM_STATE_LEVEL_6 = CLOCKS_DPM_STATE_LEVEL_5 + 1,
103 CLOCKS_DPM_STATE_LEVEL_7 = CLOCKS_DPM_STATE_LEVEL_6 + 1,
104};
105
106/* Structure containing all state-dependent clocks
107 * (dependent on "enum clocks_state") */
108struct state_dependent_clocks {
109 uint32_t display_clk_khz;
110 uint32_t pixel_clk_khz;
111};
112
113struct display_clock_state {
114 uint32_t DFS_BYPASS_ACTIVE:1;
115};
116
117struct display_clock;
118
119struct display_clock *dal_display_clock_dce112_create(
120 struct dc_context *ctx);
121
122struct display_clock *dal_display_clock_dce110_create(
123 struct dc_context *ctx);
124
125struct display_clock *dal_display_clock_dce80_create(
126 struct dc_context *ctx);
127
128void dal_display_clock_destroy(struct display_clock **to_destroy);
129bool dal_display_clock_validate(
130 struct display_clock *disp_clk,
131 struct min_clock_params *params);
132uint32_t dal_display_clock_calculate_min_clock(
133 struct display_clock *disp_clk,
134 uint32_t path_num,
135 struct min_clock_params *params);
136uint32_t dal_display_clock_get_validation_clock(struct display_clock *disp_clk);
137void dal_display_clock_set_clock(
138 struct display_clock *disp_clk,
139 uint32_t requested_clock_khz);
140uint32_t dal_display_clock_get_clock(struct display_clock *disp_clk);
141bool dal_display_clock_get_min_clocks_state(
142 struct display_clock *disp_clk,
143 enum clocks_state *clocks_state);
144bool dal_display_clock_get_required_clocks_state(
145 struct display_clock *disp_clk,
146 struct state_dependent_clocks *req_clocks,
147 enum clocks_state *clocks_state);
148bool dal_display_clock_set_min_clocks_state(
149 struct display_clock *disp_clk,
150 enum clocks_state clocks_state);
151uint32_t dal_display_clock_get_dp_ref_clk_frequency(
152 struct display_clock *disp_clk);
153/*the second parameter of "switchreferenceclock" is
154 * a dummy argument for all pre dce 6.0 versions*/
155void dal_display_clock_switch_reference_clock(
156 struct display_clock *disp_clk,
157 bool use_external_ref_clk,
158 uint32_t requested_clock_khz);
159void dal_display_clock_set_dp_ref_clock_source(
160 struct display_clock *disp_clk,
161 enum clock_source_id clk_src);
162void dal_display_clock_store_max_clocks_state(
163 struct display_clock *disp_clk,
164 enum clocks_state max_clocks_state);
165void dal_display_clock_set_clock_state(
166 struct display_clock *disp_clk,
167 struct display_clock_state clk_state);
168struct display_clock_state dal_display_clock_get_clock_state(
169 struct display_clock *disp_clk);
170uint32_t dal_display_clock_get_dfs_bypass_threshold(
171 struct display_clock *disp_clk);
172void dal_display_clock_invalid_clock_state(
173 struct display_clock *disp_clk);
174
175#endif /* __DISPLAY_CLOCK_INTERFACE_H__ */
diff --git a/drivers/gpu/drm/amd/display/include/dpcd_defs.h b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
new file mode 100644
index 000000000000..fbb2729148df
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
@@ -0,0 +1,742 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_DPCD_DEFS_H__
27#define __DAL_DPCD_DEFS_H__
28
29enum dpcd_address {
30/* addresses marked with 1.2 are only defined since DP 1.2 spec */
31
32 /* Reciever Capability Field */
33 DPCD_ADDRESS_DPCD_REV = 0x00000,
34 DPCD_ADDRESS_MAX_LINK_RATE = 0x00001,
35 DPCD_ADDRESS_MAX_LANE_COUNT = 0x00002,
36 DPCD_ADDRESS_MAX_DOWNSPREAD = 0x00003,
37 DPCD_ADDRESS_NORP = 0x00004,
38 DPCD_ADDRESS_DOWNSTREAM_PORT_PRESENT = 0x00005,
39 DPCD_ADDRESS_MAIN_LINK_CHANNEL_CODING = 0x00006,
40 DPCD_ADDRESS_DOWNSTREAM_PORT_COUNT = 0x00007,
41 DPCD_ADDRESS_RECEIVE_PORT0_CAP0 = 0x00008,
42 DPCD_ADDRESS_RECEIVE_PORT0_CAP1 = 0x00009,
43 DPCD_ADDRESS_RECEIVE_PORT1_CAP0 = 0x0000A,
44 DPCD_ADDRESS_RECEIVE_PORT1_CAP1 = 0x0000B,
45
46 DPCD_ADDRESS_I2C_SPEED_CNTL_CAP = 0x0000C,/*1.2*/
47 DPCD_ADDRESS_EDP_CONFIG_CAP = 0x0000D,/*1.2*/
48 DPCD_ADDRESS_TRAINING_AUX_RD_INTERVAL = 0x000E,/*1.2*/
49
50 DPCD_ADDRESS_MSTM_CAP = 0x00021,/*1.2*/
51
52 /* Audio Video Sync Data Feild */
53 DPCD_ADDRESS_AV_GRANULARITY = 0x0023,
54 DPCD_ADDRESS_AUDIO_DECODE_LATENCY1 = 0x0024,
55 DPCD_ADDRESS_AUDIO_DECODE_LATENCY2 = 0x0025,
56 DPCD_ADDRESS_AUDIO_POSTPROCESSING_LATENCY1 = 0x0026,
57 DPCD_ADDRESS_AUDIO_POSTPROCESSING_LATENCY2 = 0x0027,
58 DPCD_ADDRESS_VIDEO_INTERLACED_LATENCY = 0x0028,
59 DPCD_ADDRESS_VIDEO_PROGRESSIVE_LATENCY = 0x0029,
60 DPCD_ADDRESS_AUDIO_DELAY_INSERT1 = 0x0002B,
61 DPCD_ADDRESS_AUDIO_DELAY_INSERT2 = 0x0002C,
62 DPCD_ADDRESS_AUDIO_DELAY_INSERT3 = 0x0002D,
63
64 /* Audio capability */
65 DPCD_ADDRESS_NUM_OF_AUDIO_ENDPOINTS = 0x00022,
66
67 DPCD_ADDRESS_GUID_START = 0x00030,/*1.2*/
68 DPCD_ADDRESS_GUID_END = 0x0003f,/*1.2*/
69
70 DPCD_ADDRESS_PSR_SUPPORT_VER = 0x00070,
71 DPCD_ADDRESS_PSR_CAPABILITY = 0x00071,
72
73 DPCD_ADDRESS_DWN_STRM_PORT0_CAPS = 0x00080,/*1.2a*/
74
75 /* Link Configuration Field */
76 DPCD_ADDRESS_LINK_BW_SET = 0x00100,
77 DPCD_ADDRESS_LANE_COUNT_SET = 0x00101,
78 DPCD_ADDRESS_TRAINING_PATTERN_SET = 0x00102,
79 DPCD_ADDRESS_LANE0_SET = 0x00103,
80 DPCD_ADDRESS_LANE1_SET = 0x00104,
81 DPCD_ADDRESS_LANE2_SET = 0x00105,
82 DPCD_ADDRESS_LANE3_SET = 0x00106,
83 DPCD_ADDRESS_DOWNSPREAD_CNTL = 0x00107,
84 DPCD_ADDRESS_I2C_SPEED_CNTL = 0x00109,/*1.2*/
85
86 DPCD_ADDRESS_EDP_CONFIG_SET = 0x0010A,
87 DPCD_ADDRESS_LINK_QUAL_LANE0_SET = 0x0010B,
88 DPCD_ADDRESS_LINK_QUAL_LANE1_SET = 0x0010C,
89 DPCD_ADDRESS_LINK_QUAL_LANE2_SET = 0x0010D,
90 DPCD_ADDRESS_LINK_QUAL_LANE3_SET = 0x0010E,
91
92 DPCD_ADDRESS_LANE0_SET2 = 0x0010F,/*1.2*/
93 DPCD_ADDRESS_LANE2_SET2 = 0x00110,/*1.2*/
94
95 DPCD_ADDRESS_MSTM_CNTL = 0x00111,/*1.2*/
96
97 DPCD_ADDRESS_PSR_ENABLE_CFG = 0x0170,
98
99 /* Payload Table Configuration Field 1.2 */
100 DPCD_ADDRESS_PAYLOAD_ALLOCATE_SET = 0x001C0,
101 DPCD_ADDRESS_PAYLOAD_ALLOCATE_START_TIMESLOT = 0x001C1,
102 DPCD_ADDRESS_PAYLOAD_ALLOCATE_TIMESLOT_COUNT = 0x001C2,
103
104 DPCD_ADDRESS_SINK_COUNT = 0x0200,
105 DPCD_ADDRESS_DEVICE_SERVICE_IRQ_VECTOR = 0x0201,
106
107 /* Link / Sink Status Field */
108 DPCD_ADDRESS_LANE_01_STATUS = 0x00202,
109 DPCD_ADDRESS_LANE_23_STATUS = 0x00203,
110 DPCD_ADDRESS_LANE_ALIGN_STATUS_UPDATED = 0x0204,
111 DPCD_ADDRESS_SINK_STATUS = 0x0205,
112
113 /* Adjust Request Field */
114 DPCD_ADDRESS_ADJUST_REQUEST_LANE0_1 = 0x0206,
115 DPCD_ADDRESS_ADJUST_REQUEST_LANE2_3 = 0x0207,
116 DPCD_ADDRESS_ADJUST_REQUEST_POST_CURSOR2 = 0x020C,
117
118 /* Test Request Field */
119 DPCD_ADDRESS_TEST_REQUEST = 0x0218,
120 DPCD_ADDRESS_TEST_LINK_RATE = 0x0219,
121 DPCD_ADDRESS_TEST_LANE_COUNT = 0x0220,
122 DPCD_ADDRESS_TEST_PATTERN = 0x0221,
123 DPCD_ADDRESS_TEST_MISC1 = 0x0232,
124
125 /* Phy Test Pattern Field */
126 DPCD_ADDRESS_TEST_PHY_PATTERN = 0x0248,
127 DPCD_ADDRESS_TEST_80BIT_CUSTOM_PATTERN_7_0 = 0x0250,
128 DPCD_ADDRESS_TEST_80BIT_CUSTOM_PATTERN_15_8 = 0x0251,
129 DPCD_ADDRESS_TEST_80BIT_CUSTOM_PATTERN_23_16 = 0x0252,
130 DPCD_ADDRESS_TEST_80BIT_CUSTOM_PATTERN_31_24 = 0x0253,
131 DPCD_ADDRESS_TEST_80BIT_CUSTOM_PATTERN_39_32 = 0x0254,
132 DPCD_ADDRESS_TEST_80BIT_CUSTOM_PATTERN_47_40 = 0x0255,
133 DPCD_ADDRESS_TEST_80BIT_CUSTOM_PATTERN_55_48 = 0x0256,
134 DPCD_ADDRESS_TEST_80BIT_CUSTOM_PATTERN_63_56 = 0x0257,
135 DPCD_ADDRESS_TEST_80BIT_CUSTOM_PATTERN_71_64 = 0x0258,
136 DPCD_ADDRESS_TEST_80BIT_CUSTOM_PATTERN_79_72 = 0x0259,
137
138 /* Test Response Field*/
139 DPCD_ADDRESS_TEST_RESPONSE = 0x0260,
140
141 /* Audio Test Pattern Field 1.2*/
142 DPCD_ADDRESS_TEST_AUDIO_MODE = 0x0271,
143 DPCD_ADDRESS_TEST_AUDIO_PATTERN_TYPE = 0x0272,
144 DPCD_ADDRESS_TEST_AUDIO_PERIOD_CH_1 = 0x0273,
145 DPCD_ADDRESS_TEST_AUDIO_PERIOD_CH_2 = 0x0274,
146 DPCD_ADDRESS_TEST_AUDIO_PERIOD_CH_3 = 0x0275,
147 DPCD_ADDRESS_TEST_AUDIO_PERIOD_CH_4 = 0x0276,
148 DPCD_ADDRESS_TEST_AUDIO_PERIOD_CH_5 = 0x0277,
149 DPCD_ADDRESS_TEST_AUDIO_PERIOD_CH_6 = 0x0278,
150 DPCD_ADDRESS_TEST_AUDIO_PERIOD_CH_7 = 0x0279,
151 DPCD_ADDRESS_TEST_AUDIO_PERIOD_CH_8 = 0x027A,
152
153 /* Payload Table Status Field */
154 DPCD_ADDRESS_PAYLOAD_TABLE_UPDATE_STATUS = 0x002C0,/*1.2*/
155 DPCD_ADDRESS_VC_PAYLOAD_ID_SLOT1 = 0x002C1,/*1.2*/
156 DPCD_ADDRESS_VC_PAYLOAD_ID_SLOT63 = 0x002FF,/*1.2*/
157
158 /* Source Device Specific Field */
159 DPCD_ADDRESS_SOURCE_DEVICE_ID_START = 0x0300,
160 DPCD_ADDRESS_SOURCE_DEVICE_ID_END = 0x0301,
161 DPCD_ADDRESS_AMD_INTERNAL_DEBUG_START = 0x030C,
162 DPCD_ADDRESS_AMD_INTERNAL_DEBUG_END = 0x030F,
163 DPCD_ADDRESS_SOURCE_SPECIFIC_TABLE_START = 0x0310,
164 DPCD_ADDRESS_SOURCE_SPECIFIC_TABLE_END = 0x037F,
165 DPCD_ADDRESS_SOURCE_RESERVED_START = 0x0380,
166 DPCD_ADDRESS_SOURCE_RESERVED_END = 0x03FF,
167
168 /* Sink Device Specific Field */
169 DPCD_ADDRESS_SINK_DEVICE_ID_START = 0x0400,
170 DPCD_ADDRESS_SINK_DEVICE_ID_END = 0x0402,
171 DPCD_ADDRESS_SINK_DEVICE_STR_START = 0x0403,
172 DPCD_ADDRESS_SINK_DEVICE_STR_END = 0x0408,
173 DPCD_ADDRESS_SINK_REVISION_START = 0x409,
174 DPCD_ADDRESS_SINK_REVISION_END = 0x40B,
175
176 /* Branch Device Specific Field */
177 DPCD_ADDRESS_BRANCH_DEVICE_ID_START = 0x0500,
178 DPCD_ADDRESS_BRANCH_DEVICE_ID_END = 0x0502,
179 DPCD_ADDRESS_BRANCH_DEVICE_STR_START = 0x0503,
180 DPCD_ADDRESS_BRANCH_DEVICE_STR_END = 0x0508,
181 DPCD_ADDRESS_BRANCH_REVISION_START = 0x0509,
182 DPCD_ADDRESS_BRANCH_REVISION_END = 0x050B,
183
184 DPCD_ADDRESS_POWER_STATE = 0x0600,
185
186 /* EDP related */
187 DPCD_ADDRESS_EDP_REV = 0x0700,
188 DPCD_ADDRESS_EDP_CAPABILITY = 0x0701,
189 DPCD_ADDRESS_EDP_BACKLIGHT_ADJUST_CAP = 0x0702,
190 DPCD_ADDRESS_EDP_GENERAL_CAP2 = 0x0703,
191
192 DPCD_ADDRESS_EDP_DISPLAY_CONTROL = 0x0720,
193 DPCD_ADDRESS_SUPPORTED_LINK_RATES = 0x00010, /* edp 1.4 */
194 DPCD_ADDRESS_EDP_BACKLIGHT_SET = 0x0721,
195 DPCD_ADDRESS_EDP_BACKLIGHT_BRIGHTNESS_MSB = 0x0722,
196 DPCD_ADDRESS_EDP_BACKLIGHT_BRIGHTNESS_LSB = 0x0723,
197 DPCD_ADDRESS_EDP_PWMGEN_BIT_COUNT = 0x0724,
198 DPCD_ADDRESS_EDP_PWMGEN_BIT_COUNT_CAP_MIN = 0x0725,
199 DPCD_ADDRESS_EDP_PWMGEN_BIT_COUNT_CAP_MAX = 0x0726,
200 DPCD_ADDRESS_EDP_BACKLIGHT_CONTROL_STATUS = 0x0727,
201 DPCD_ADDRESS_EDP_BACKLIGHT_FREQ_SET = 0x0728,
202 DPCD_ADDRESS_EDP_REVERVED = 0x0729,
203 DPCD_ADDRESS_EDP_BACKLIGNT_FREQ_CAP_MIN_MSB = 0x072A,
204 DPCD_ADDRESS_EDP_BACKLIGNT_FREQ_CAP_MIN_MID = 0x072B,
205 DPCD_ADDRESS_EDP_BACKLIGNT_FREQ_CAP_MIN_LSB = 0x072C,
206 DPCD_ADDRESS_EDP_BACKLIGNT_FREQ_CAP_MAX_MSB = 0x072D,
207 DPCD_ADDRESS_EDP_BACKLIGNT_FREQ_CAP_MAX_MID = 0x072E,
208 DPCD_ADDRESS_EDP_BACKLIGNT_FREQ_CAP_MAX_LSB = 0x072F,
209
210 DPCD_ADDRESS_EDP_DBC_MINIMUM_BRIGHTNESS_SET = 0x0732,
211 DPCD_ADDRESS_EDP_DBC_MAXIMUM_BRIGHTNESS_SET = 0x0733,
212
213 /* Sideband MSG Buffers 1.2 */
214 DPCD_ADDRESS_DOWN_REQ_START = 0x01000,
215 DPCD_ADDRESS_DOWN_REQ_END = 0x011ff,
216
217 DPCD_ADDRESS_UP_REP_START = 0x01200,
218 DPCD_ADDRESS_UP_REP_END = 0x013ff,
219
220 DPCD_ADDRESS_DOWN_REP_START = 0x01400,
221 DPCD_ADDRESS_DOWN_REP_END = 0x015ff,
222
223 DPCD_ADDRESS_UP_REQ_START = 0x01600,
224 DPCD_ADDRESS_UP_REQ_END = 0x017ff,
225
226 /* ESI (Event Status Indicator) Field 1.2 */
227 DPCD_ADDRESS_SINK_COUNT_ESI = 0x02002,
228 DPCD_ADDRESS_DEVICE_IRQ_ESI0 = 0x02003,
229 DPCD_ADDRESS_DEVICE_IRQ_ESI1 = 0x02004,
230 /*@todo move dpcd_address_Lane01Status back here*/
231
232 DPCD_ADDRESS_PSR_ERROR_STATUS = 0x2006,
233 DPCD_ADDRESS_PSR_EVENT_STATUS = 0x2007,
234 DPCD_ADDRESS_PSR_SINK_STATUS = 0x2008,
235 DPCD_ADDRESS_PSR_DBG_REGISTER0 = 0x2009,
236 DPCD_ADDRESS_PSR_DBG_REGISTER1 = 0x200A,
237
238 DPCD_ADDRESS_DP13_DPCD_REV = 0x2200,
239 DPCD_ADDRESS_DP13_MAX_LINK_RATE = 0x2201,
240
241 /* Travis specific addresses */
242 DPCD_ADDRESS_TRAVIS_SINK_DEV_SEL = 0x5f0,
243 DPCD_ADDRESS_TRAVIS_SINK_ACCESS_OFFSET = 0x5f1,
244 DPCD_ADDRESS_TRAVIS_SINK_ACCESS_REG = 0x5f2,
245};
246
247enum dpcd_revision {
248 DPCD_REV_10 = 0x10,
249 DPCD_REV_11 = 0x11,
250 DPCD_REV_12 = 0x12,
251 DPCD_REV_13 = 0x13,
252 DPCD_REV_14 = 0x14
253};
254
255enum dp_pwr_state {
256 DP_PWR_STATE_D0 = 1,/* direct HW translation! */
257 DP_PWR_STATE_D3
258};
259
260/* these are the types stored at DOWNSTREAMPORT_PRESENT */
261enum dpcd_downstream_port_type {
262 DOWNSTREAM_DP = 0,
263 DOWNSTREAM_VGA,
264 DOWNSTREAM_DVI_HDMI,
265 DOWNSTREAM_NONDDC /* has no EDID (TV,CV) */
266};
267
268enum dpcd_link_test_patterns {
269 LINK_TEST_PATTERN_NONE = 0,
270 LINK_TEST_PATTERN_COLOR_RAMP,
271 LINK_TEST_PATTERN_VERTICAL_BARS,
272 LINK_TEST_PATTERN_COLOR_SQUARES
273};
274
275enum dpcd_test_color_format {
276 TEST_COLOR_FORMAT_RGB = 0,
277 TEST_COLOR_FORMAT_YCBCR422,
278 TEST_COLOR_FORMAT_YCBCR444
279};
280
281enum dpcd_test_bit_depth {
282 TEST_BIT_DEPTH_6 = 0,
283 TEST_BIT_DEPTH_8,
284 TEST_BIT_DEPTH_10,
285 TEST_BIT_DEPTH_12,
286 TEST_BIT_DEPTH_16
287};
288
289/* PHY (encoder) test patterns
290The order of test patterns follows DPCD register PHY_TEST_PATTERN (0x248)
291*/
292enum dpcd_phy_test_patterns {
293 PHY_TEST_PATTERN_NONE = 0,
294 PHY_TEST_PATTERN_D10_2,
295 PHY_TEST_PATTERN_SYMBOL_ERROR,
296 PHY_TEST_PATTERN_PRBS7,
297 PHY_TEST_PATTERN_80BIT_CUSTOM,/* For DP1.2 only */
298 PHY_TEST_PATTERN_HBR2_COMPLIANCE_EYE/* For DP1.2 only */
299};
300
301enum dpcd_test_dyn_range {
302 TEST_DYN_RANGE_VESA = 0,
303 TEST_DYN_RANGE_CEA
304};
305
306enum dpcd_audio_test_pattern {
307 AUDIO_TEST_PATTERN_OPERATOR_DEFINED = 0,/* direct HW translation */
308 AUDIO_TEST_PATTERN_SAWTOOTH
309};
310
311enum dpcd_audio_sampling_rate {
312 AUDIO_SAMPLING_RATE_32KHZ = 0,/* direct HW translation */
313 AUDIO_SAMPLING_RATE_44_1KHZ,
314 AUDIO_SAMPLING_RATE_48KHZ,
315 AUDIO_SAMPLING_RATE_88_2KHZ,
316 AUDIO_SAMPLING_RATE_96KHZ,
317 AUDIO_SAMPLING_RATE_176_4KHZ,
318 AUDIO_SAMPLING_RATE_192KHZ
319};
320
321enum dpcd_audio_channels {
322 AUDIO_CHANNELS_1 = 0,/* direct HW translation */
323 AUDIO_CHANNELS_2,
324 AUDIO_CHANNELS_3,
325 AUDIO_CHANNELS_4,
326 AUDIO_CHANNELS_5,
327 AUDIO_CHANNELS_6,
328 AUDIO_CHANNELS_7,
329 AUDIO_CHANNELS_8,
330
331 AUDIO_CHANNELS_COUNT
332};
333
334enum dpcd_audio_test_pattern_periods {
335 DPCD_AUDIO_TEST_PATTERN_PERIOD_NOTUSED = 0,/* direct HW translation */
336 DPCD_AUDIO_TEST_PATTERN_PERIOD_3,
337 DPCD_AUDIO_TEST_PATTERN_PERIOD_6,
338 DPCD_AUDIO_TEST_PATTERN_PERIOD_12,
339 DPCD_AUDIO_TEST_PATTERN_PERIOD_24,
340 DPCD_AUDIO_TEST_PATTERN_PERIOD_48,
341 DPCD_AUDIO_TEST_PATTERN_PERIOD_96,
342 DPCD_AUDIO_TEST_PATTERN_PERIOD_192,
343 DPCD_AUDIO_TEST_PATTERN_PERIOD_384,
344 DPCD_AUDIO_TEST_PATTERN_PERIOD_768,
345 DPCD_AUDIO_TEST_PATTERN_PERIOD_1536
346};
347
348/* This enum is for programming DPCD TRAINING_PATTERN_SET */
349enum dpcd_training_patterns {
350 DPCD_TRAINING_PATTERN_VIDEOIDLE = 0,/* direct HW translation! */
351 DPCD_TRAINING_PATTERN_1,
352 DPCD_TRAINING_PATTERN_2,
353 DPCD_TRAINING_PATTERN_3,
354 DPCD_TRAINING_PATTERN_4 = 7
355};
356
357/* This enum is for use with PsrSinkPsrStatus.bits.sinkSelfRefreshStatus
358It defines the possible PSR states. */
359enum dpcd_psr_sink_states {
360 PSR_SINK_STATE_INACTIVE = 0,
361 PSR_SINK_STATE_ACTIVE_CAPTURE_DISPLAY_ON_SOURCE_TIMING = 1,
362 PSR_SINK_STATE_ACTIVE_DISPLAY_FROM_SINK_RFB = 2,
363 PSR_SINK_STATE_ACTIVE_CAPTURE_DISPLAY_ON_SINK_TIMING = 3,
364 PSR_SINK_STATE_ACTIVE_CAPTURE_TIMING_RESYNC = 4,
365 PSR_SINK_STATE_SINK_INTERNAL_ERROR = 7,
366};
367
368/* This enum defines the Panel's eDP revision at DPCD 700h
369 * 00h = eDP v1.1 or lower
370 * 01h = eDP v1.2
371 * 02h = eDP v1.3 (PSR support starts here)
372 * 03h = eDP v1.4
373 * If unknown revision, treat as eDP v1.1, meaning least functionality set.
374 * This enum has values matched to eDP spec, thus values should not change.
375 */
376enum dpcd_edp_revision {
377 DPCD_EDP_REVISION_EDP_V1_1 = 0,
378 DPCD_EDP_REVISION_EDP_V1_2 = 1,
379 DPCD_EDP_REVISION_EDP_V1_3 = 2,
380 DPCD_EDP_REVISION_EDP_V1_4 = 3,
381 DPCD_EDP_REVISION_EDP_UNKNOWN = DPCD_EDP_REVISION_EDP_V1_1,
382};
383
384union dpcd_rev {
385 struct {
386 uint8_t MINOR:4;
387 uint8_t MAJOR:4;
388 } bits;
389 uint8_t raw;
390};
391
392union max_lane_count {
393 struct {
394 uint8_t MAX_LANE_COUNT:5;
395 uint8_t POST_LT_ADJ_REQ_SUPPORTED:1;
396 uint8_t TPS3_SUPPORTED:1;
397 uint8_t ENHANCED_FRAME_CAP:1;
398 } bits;
399 uint8_t raw;
400};
401
402union max_down_spread {
403 struct {
404 uint8_t MAX_DOWN_SPREAD:1;
405 uint8_t RESERVED:5;
406 uint8_t NO_AUX_HANDSHAKE_LINK_TRAINING:1;
407 uint8_t TPS4_SUPPORTED:1;
408 } bits;
409 uint8_t raw;
410};
411
412union mstm_cap {
413 struct {
414 uint8_t MST_CAP:1;
415 uint8_t RESERVED:7;
416 } bits;
417 uint8_t raw;
418};
419
420union lane_count_set {
421 struct {
422 uint8_t LANE_COUNT_SET:5;
423 uint8_t POST_LT_ADJ_REQ_GRANTED:1;
424 uint8_t RESERVED:1;
425 uint8_t ENHANCED_FRAMING:1;
426 } bits;
427 uint8_t raw;
428};
429
430union lane_status {
431 struct {
432 uint8_t CR_DONE_0:1;
433 uint8_t CHANNEL_EQ_DONE_0:1;
434 uint8_t SYMBOL_LOCKED_0:1;
435 uint8_t RESERVED0:1;
436 uint8_t CR_DONE_1:1;
437 uint8_t CHANNEL_EQ_DONE_1:1;
438 uint8_t SYMBOL_LOCKED_1:1;
439 uint8_t RESERVED_1:1;
440 } bits;
441 uint8_t raw;
442};
443
444union device_service_irq {
445 struct {
446 uint8_t REMOTE_CONTROL_CMD_PENDING:1;
447 uint8_t AUTOMATED_TEST:1;
448 uint8_t CP_IRQ:1;
449 uint8_t MCCS_IRQ:1;
450 uint8_t DOWN_REP_MSG_RDY:1;
451 uint8_t UP_REQ_MSG_RDY:1;
452 uint8_t SINK_SPECIFIC:1;
453 uint8_t reserved:1;
454 } bits;
455 uint8_t raw;
456};
457
458union sink_count {
459 struct {
460 uint8_t SINK_COUNT:6;
461 uint8_t CPREADY:1;
462 uint8_t RESERVED:1;
463 } bits;
464 uint8_t raw;
465};
466
467union lane_align_status_updated {
468 struct {
469 uint8_t INTERLANE_ALIGN_DONE:1;
470 uint8_t POST_LT_ADJ_REQ_IN_PROGRESS:1;
471 uint8_t RESERVED:4;
472 uint8_t DOWNSTREAM_PORT_STATUS_CHANGED:1;
473 uint8_t LINK_STATUS_UPDATED:1;
474 } bits;
475 uint8_t raw;
476};
477
478union lane_adjust {
479 struct {
480 uint8_t VOLTAGE_SWING_LANE:2;
481 uint8_t PRE_EMPHASIS_LANE:2;
482 uint8_t RESERVED:4;
483 } bits;
484 uint8_t raw;
485};
486
487union dpcd_training_pattern {
488 struct {
489 uint8_t TRAINING_PATTERN_SET:4;
490 uint8_t RECOVERED_CLOCK_OUT_EN:1;
491 uint8_t SCRAMBLING_DISABLE:1;
492 uint8_t SYMBOL_ERROR_COUNT_SEL:2;
493 } v1_4;
494 struct {
495 uint8_t TRAINING_PATTERN_SET:2;
496 uint8_t LINK_QUAL_PATTERN_SET:2;
497 uint8_t RESERVED:4;
498 } v1_3;
499 uint8_t raw;
500};
501
502/* Training Lane is used to configure downstream DP device's voltage swing
503and pre-emphasis levels*/
504/* The DPCD addresses are from 0x103 to 0x106*/
505union dpcd_training_lane {
506 struct {
507 uint8_t VOLTAGE_SWING_SET:2;
508 uint8_t MAX_SWING_REACHED:1;
509 uint8_t PRE_EMPHASIS_SET:2;
510 uint8_t MAX_PRE_EMPHASIS_REACHED:1;
511 uint8_t RESERVED:2;
512 } bits;
513 uint8_t raw;
514};
515
516/* TMDS-converter related */
517union dwnstream_port_caps_byte0 {
518 struct {
519 uint8_t DWN_STRM_PORTX_TYPE:3;
520 uint8_t DWN_STRM_PORTX_HPD:1;
521 uint8_t RESERVERD:4;
522 } bits;
523 uint8_t raw;
524};
525
526/* these are the detailed types stored at DWN_STRM_PORTX_CAP (00080h)*/
527enum dpcd_downstream_port_detailed_type {
528 DOWN_STREAM_DETAILED_DP = 0,
529 DOWN_STREAM_DETAILED_VGA,
530 DOWN_STREAM_DETAILED_DVI,
531 DOWN_STREAM_DETAILED_HDMI,
532 DOWN_STREAM_DETAILED_NONDDC,/* has no EDID (TV,CV)*/
533 DOWN_STREAM_DETAILED_DP_PLUS_PLUS
534};
535
536union dwnstream_port_caps_byte2 {
537 struct {
538 uint8_t MAX_BITS_PER_COLOR_COMPONENT:2;
539 uint8_t RESERVED:6;
540 } bits;
541 uint8_t raw;
542};
543
544union dp_downstream_port_present {
545 uint8_t byte;
546 struct {
547 uint8_t PORT_PRESENT:1;
548 uint8_t PORT_TYPE:2;
549 uint8_t FMT_CONVERSION:1;
550 uint8_t DETAILED_CAPS:1;
551 uint8_t RESERVED:3;
552 } fields;
553};
554
555union dwnstream_port_caps_byte3_dvi {
556 struct {
557 uint8_t RESERVED1:1;
558 uint8_t DUAL_LINK:1;
559 uint8_t HIGH_COLOR_DEPTH:1;
560 uint8_t RESERVED2:5;
561 } bits;
562 uint8_t raw;
563};
564
565union dwnstream_port_caps_byte3_hdmi {
566 struct {
567 uint8_t FRAME_SEQ_TO_FRAME_PACK:1;
568 uint8_t RESERVED:7;
569 } bits;
570 uint8_t raw;
571};
572
573/*4-byte structure for detailed capabilities of a down-stream port
574(DP-to-TMDS converter).*/
575
576union sink_status {
577 struct {
578 uint8_t RX_PORT0_STATUS:1;
579 uint8_t RX_PORT1_STATUS:1;
580 uint8_t RESERVED:6;
581 } bits;
582 uint8_t raw;
583};
584
585/*6-byte structure corresponding to 6 registers (200h-205h)
586read during handling of HPD-IRQ*/
587union hpd_irq_data {
588 struct {
589 union sink_count sink_cnt;/* 200h */
590 union device_service_irq device_service_irq;/* 201h */
591 union lane_status lane01_status;/* 202h */
592 union lane_status lane23_status;/* 203h */
593 union lane_align_status_updated lane_status_updated;/* 204h */
594 union sink_status sink_status;
595 } bytes;
596 uint8_t raw[6];
597};
598
599union down_stream_port_count {
600 struct {
601 uint8_t DOWN_STR_PORT_COUNT:4;
602 uint8_t RESERVED:2; /*Bits 5:4 = RESERVED. Read all 0s.*/
603 /*Bit 6 = MSA_TIMING_PAR_IGNORED
604 0 = Sink device requires the MSA timing parameters
605 1 = Sink device is capable of rendering incoming video
606 stream without MSA timing parameters*/
607 uint8_t IGNORE_MSA_TIMING_PARAM:1;
608 /*Bit 7 = OUI Support
609 0 = OUI not supported
610 1 = OUI supported
611 (OUI and Device Identification mandatory for DP 1.2)*/
612 uint8_t OUI_SUPPORT:1;
613 } bits;
614 uint8_t raw;
615};
616
617union down_spread_ctrl {
618 struct {
619 uint8_t RESERVED1:4;/* Bit 3:0 = RESERVED. Read all 0s*/
620 /* Bits 4 = SPREAD_AMP. Spreading amplitude
621 0 = Main link signal is not downspread
622 1 = Main link signal is downspread <= 0.5%
623 with frequency in the range of 30kHz ~ 33kHz*/
624 uint8_t SPREAD_AMP:1;
625 uint8_t RESERVED2:2;/*Bit 6:5 = RESERVED. Read all 0s*/
626 /*Bit 7 = MSA_TIMING_PAR_IGNORE_EN
627 0 = Source device will send valid data for the MSA Timing Params
628 1 = Source device may send invalid data for these MSA Timing Params*/
629 uint8_t IGNORE_MSA_TIMING_PARAM:1;
630 } bits;
631 uint8_t raw;
632};
633
634union dpcd_edp_config {
635 struct {
636 uint8_t PANEL_MODE_EDP:1;
637 uint8_t FRAMING_CHANGE_ENABLE:1;
638 uint8_t RESERVED:5;
639 uint8_t PANEL_SELF_TEST_ENABLE:1;
640 } bits;
641 uint8_t raw;
642};
643
644struct dp_device_vendor_id {
645 uint8_t ieee_oui[3];/*24-bit IEEE OUI*/
646 uint8_t ieee_device_id[6];/*usually 6-byte ASCII name*/
647};
648
649struct dp_sink_hw_fw_revision {
650 uint8_t ieee_hw_rev;
651 uint8_t ieee_fw_rev[2];
652};
653
654/*DPCD register of DP receiver capability field bits-*/
655union edp_configuration_cap {
656 struct {
657 uint8_t ALT_SCRAMBLER_RESET:1;
658 uint8_t FRAMING_CHANGE:1;
659 uint8_t RESERVED:1;
660 uint8_t DPCD_DISPLAY_CONTROL_CAPABLE:1;
661 uint8_t RESERVED2:4;
662 } bits;
663 uint8_t raw;
664};
665
666union training_aux_rd_interval {
667 struct {
668 uint8_t TRAINIG_AUX_RD_INTERVAL:7;
669 uint8_t EXT_RECIEVER_CAP_FIELD_PRESENT:1;
670 } bits;
671 uint8_t raw;
672};
673
674/* Automated test structures */
675union test_request {
676 struct {
677 uint8_t LINK_TRAINING :1;
678 uint8_t LINK_TEST_PATTRN :1;
679 uint8_t EDID_REAT :1;
680 uint8_t PHY_TEST_PATTERN :1;
681 uint8_t AUDIO_TEST_PATTERN :1;
682 uint8_t RESERVED :1;
683 uint8_t TEST_STEREO_3D :1;
684 } bits;
685 uint8_t raw;
686};
687
688union test_response {
689 struct {
690 uint8_t ACK :1;
691 uint8_t NO_ACK :1;
692 uint8_t RESERVED :6;
693 } bits;
694 uint8_t raw;
695};
696
697union phy_test_pattern {
698 struct {
699 /* DpcdPhyTestPatterns. This field is 2 bits for DP1.1
700 * and 3 bits for DP1.2.
701 */
702 uint8_t PATTERN :3;
703 /* BY speci, bit7:2 is 0 for DP1.1. */
704 uint8_t RESERVED :5;
705 } bits;
706 uint8_t raw;
707};
708
709/* States of Compliance Test Specification (CTS DP1.2). */
710union compliance_test_state {
711 struct {
712 unsigned char STEREO_3D_RUNNING : 1;
713 unsigned char SET_TEST_PATTERN_PENDING : 1;
714 unsigned char RESERVED : 6;
715 } bits;
716 unsigned char raw;
717};
718
719union link_test_pattern {
720 struct {
721 /* dpcd_link_test_patterns */
722 unsigned char PATTERN :2;
723 unsigned char RESERVED:6;
724 } bits;
725 unsigned char raw;
726};
727
728union test_misc {
729 struct dpcd_test_misc_bits {
730 unsigned char SYNC_CLOCK :1;
731 /* dpcd_test_color_format */
732 unsigned char CLR_FORMAT :2;
733 /* dpcd_test_dyn_range */
734 unsigned char DYN_RANGE :1;
735 unsigned char YCBCR :1;
736 /* dpcd_test_bit_depth */
737 unsigned char BPC :3;
738 } bits;
739 unsigned char raw;
740};
741
742#endif /* __DAL_DPCD_DEFS_H__ */
diff --git a/drivers/gpu/drm/amd/display/include/fixed31_32.h b/drivers/gpu/drm/amd/display/include/fixed31_32.h
new file mode 100644
index 000000000000..c28de167250f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/fixed31_32.h
@@ -0,0 +1,390 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_FIXED31_32_H__
27#define __DAL_FIXED31_32_H__
28
29#include "os_types.h"
30
31/*
32 * @brief
33 * Arithmetic operations on real numbers
34 * represented as fixed-point numbers.
35 * There are: 1 bit for sign,
36 * 31 bit for integer part,
37 * 32 bits for fractional part.
38 *
39 * @note
40 * Currently, overflows and underflows are asserted;
41 * no special result returned.
42 */
43
44struct fixed31_32 {
45 int64_t value;
46};
47
48/*
49 * @brief
50 * Useful constants
51 */
52
53static const struct fixed31_32 dal_fixed31_32_zero = { 0 };
54static const struct fixed31_32 dal_fixed31_32_epsilon = { 1LL };
55static const struct fixed31_32 dal_fixed31_32_half = { 0x80000000LL };
56static const struct fixed31_32 dal_fixed31_32_one = { 0x100000000LL };
57
58static const struct fixed31_32 dal_fixed31_32_pi = { 13493037705LL };
59static const struct fixed31_32 dal_fixed31_32_two_pi = { 26986075409LL };
60static const struct fixed31_32 dal_fixed31_32_e = { 11674931555LL };
61static const struct fixed31_32 dal_fixed31_32_ln2 = { 2977044471LL };
62static const struct fixed31_32 dal_fixed31_32_ln2_div_2 = { 1488522236LL };
63
64/*
65 * @brief
66 * Initialization routines
67 */
68
69/*
70 * @brief
71 * result = numerator / denominator
72 */
73struct fixed31_32 dal_fixed31_32_from_fraction(
74 int64_t numerator,
75 int64_t denominator);
76
77/*
78 * @brief
79 * result = arg
80 */
81struct fixed31_32 dal_fixed31_32_from_int(
82 int64_t arg);
83
84/*
85 * @brief
86 * Unary operators
87 */
88
89/*
90 * @brief
91 * result = -arg
92 */
93struct fixed31_32 dal_fixed31_32_neg(
94 struct fixed31_32 arg);
95
96/*
97 * @brief
98 * result = abs(arg) := (arg >= 0) ? arg : -arg
99 */
100struct fixed31_32 dal_fixed31_32_abs(
101 struct fixed31_32 arg);
102
103/*
104 * @brief
105 * Binary relational operators
106 */
107
108/*
109 * @brief
110 * result = arg1 < arg2
111 */
112bool dal_fixed31_32_lt(
113 struct fixed31_32 arg1,
114 struct fixed31_32 arg2);
115
116/*
117 * @brief
118 * result = arg1 <= arg2
119 */
120bool dal_fixed31_32_le(
121 struct fixed31_32 arg1,
122 struct fixed31_32 arg2);
123
124/*
125 * @brief
126 * result = arg1 == arg2
127 */
128bool dal_fixed31_32_eq(
129 struct fixed31_32 arg1,
130 struct fixed31_32 arg2);
131
132/*
133 * @brief
134 * result = min(arg1, arg2) := (arg1 <= arg2) ? arg1 : arg2
135 */
136struct fixed31_32 dal_fixed31_32_min(
137 struct fixed31_32 arg1,
138 struct fixed31_32 arg2);
139
140/*
141 * @brief
142 * result = max(arg1, arg2) := (arg1 <= arg2) ? arg2 : arg1
143 */
144struct fixed31_32 dal_fixed31_32_max(
145 struct fixed31_32 arg1,
146 struct fixed31_32 arg2);
147
148/*
149 * @brief
150 * | min_value, when arg <= min_value
151 * result = | arg, when min_value < arg < max_value
152 * | max_value, when arg >= max_value
153 */
154struct fixed31_32 dal_fixed31_32_clamp(
155 struct fixed31_32 arg,
156 struct fixed31_32 min_value,
157 struct fixed31_32 max_value);
158
159/*
160 * @brief
161 * Binary shift operators
162 */
163
164/*
165 * @brief
166 * result = arg << shift
167 */
168struct fixed31_32 dal_fixed31_32_shl(
169 struct fixed31_32 arg,
170 uint8_t shift);
171
172/*
173 * @brief
174 * result = arg >> shift
175 */
176struct fixed31_32 dal_fixed31_32_shr(
177 struct fixed31_32 arg,
178 uint8_t shift);
179
180/*
181 * @brief
182 * Binary additive operators
183 */
184
185/*
186 * @brief
187 * result = arg1 + arg2
188 */
189struct fixed31_32 dal_fixed31_32_add(
190 struct fixed31_32 arg1,
191 struct fixed31_32 arg2);
192
193/*
194 * @brief
195 * result = arg1 - arg2
196 */
197struct fixed31_32 dal_fixed31_32_sub_int(
198 struct fixed31_32 arg1,
199 int32_t arg2);
200
201/*
202 * @brief
203 * result = arg1 - arg2
204 */
205struct fixed31_32 dal_fixed31_32_sub(
206 struct fixed31_32 arg1,
207 struct fixed31_32 arg2);
208
209/*
210 * @brief
211 * Binary multiplicative operators
212 */
213
214/*
215 * @brief
216 * result = arg1 * arg2
217 */
218struct fixed31_32 dal_fixed31_32_mul_int(
219 struct fixed31_32 arg1,
220 int32_t arg2);
221
222/*
223 * @brief
224 * result = arg1 * arg2
225 */
226struct fixed31_32 dal_fixed31_32_mul(
227 struct fixed31_32 arg1,
228 struct fixed31_32 arg2);
229
230/*
231 * @brief
232 * result = square(arg) := arg * arg
233 */
234struct fixed31_32 dal_fixed31_32_sqr(
235 struct fixed31_32 arg);
236
237/*
238 * @brief
239 * result = arg1 / arg2
240 */
241struct fixed31_32 dal_fixed31_32_div_int(
242 struct fixed31_32 arg1,
243 int64_t arg2);
244
245/*
246 * @brief
247 * result = arg1 / arg2
248 */
249struct fixed31_32 dal_fixed31_32_div(
250 struct fixed31_32 arg1,
251 struct fixed31_32 arg2);
252
253/*
254 * @brief
255 * Reciprocal function
256 */
257
258/*
259 * @brief
260 * result = reciprocal(arg) := 1 / arg
261 *
262 * @note
263 * No special actions taken in case argument is zero.
264 */
265struct fixed31_32 dal_fixed31_32_recip(
266 struct fixed31_32 arg);
267
268/*
269 * @brief
270 * Trigonometric functions
271 */
272
273/*
274 * @brief
275 * result = sinc(arg) := sin(arg) / arg
276 *
277 * @note
278 * Argument specified in radians,
279 * internally it's normalized to [-2pi...2pi] range.
280 */
281struct fixed31_32 dal_fixed31_32_sinc(
282 struct fixed31_32 arg);
283
284/*
285 * @brief
286 * result = sin(arg)
287 *
288 * @note
289 * Argument specified in radians,
290 * internally it's normalized to [-2pi...2pi] range.
291 */
292struct fixed31_32 dal_fixed31_32_sin(
293 struct fixed31_32 arg);
294
295/*
296 * @brief
297 * result = cos(arg)
298 *
299 * @note
300 * Argument specified in radians
301 * and should be in [-2pi...2pi] range -
302 * passing arguments outside that range
303 * will cause incorrect result!
304 */
305struct fixed31_32 dal_fixed31_32_cos(
306 struct fixed31_32 arg);
307
308/*
309 * @brief
310 * Transcendent functions
311 */
312
313/*
314 * @brief
315 * result = exp(arg)
316 *
317 * @note
318 * Currently, function is verified for abs(arg) <= 1.
319 */
320struct fixed31_32 dal_fixed31_32_exp(
321 struct fixed31_32 arg);
322
323/*
324 * @brief
325 * result = log(arg)
326 *
327 * @note
328 * Currently, abs(arg) should be less than 1.
329 * No normalization is done.
330 * Currently, no special actions taken
331 * in case of invalid argument(s). Take care!
332 */
333struct fixed31_32 dal_fixed31_32_log(
334 struct fixed31_32 arg);
335
336/*
337 * @brief
338 * Power function
339 */
340
341/*
342 * @brief
343 * result = pow(arg1, arg2)
344 *
345 * @note
346 * Currently, abs(arg1) should be less than 1. Take care!
347 */
348struct fixed31_32 dal_fixed31_32_pow(
349 struct fixed31_32 arg1,
350 struct fixed31_32 arg2);
351
352/*
353 * @brief
354 * Rounding functions
355 */
356
357/*
358 * @brief
359 * result = floor(arg) := greatest integer lower than or equal to arg
360 */
361int32_t dal_fixed31_32_floor(
362 struct fixed31_32 arg);
363
364/*
365 * @brief
366 * result = round(arg) := integer nearest to arg
367 */
368int32_t dal_fixed31_32_round(
369 struct fixed31_32 arg);
370
371/*
372 * @brief
373 * result = ceil(arg) := lowest integer greater than or equal to arg
374 */
375int32_t dal_fixed31_32_ceil(
376 struct fixed31_32 arg);
377
378/* the following two function are used in scaler hw programming to convert fixed
379 * point value to format 2 bits from integer part and 19 bits from fractional
380 * part. The same applies for u0d19, 0 bits from integer part and 19 bits from
381 * fractional
382 */
383
384uint32_t dal_fixed31_32_u2d19(
385 struct fixed31_32 arg);
386
387uint32_t dal_fixed31_32_u0d19(
388 struct fixed31_32 arg);
389
390#endif
diff --git a/drivers/gpu/drm/amd/display/include/fixed32_32.h b/drivers/gpu/drm/amd/display/include/fixed32_32.h
new file mode 100644
index 000000000000..c7ddd0e435eb
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/fixed32_32.h
@@ -0,0 +1,83 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26
27#ifndef __DAL_FIXED32_32_H__
28#define __DAL_FIXED32_32_H__
29
30#include "os_types.h"
31
32struct fixed32_32 {
33 uint64_t value;
34};
35
36static const struct fixed32_32 dal_fixed32_32_zero = { 0 };
37static const struct fixed32_32 dal_fixed32_32_one = { 0x100000000LL };
38static const struct fixed32_32 dal_fixed32_32_half = { 0x80000000LL };
39
40struct fixed32_32 dal_fixed32_32_from_fraction(uint32_t n, uint32_t d);
41struct fixed32_32 dal_fixed32_32_from_int(uint32_t value);
42struct fixed32_32 dal_fixed32_32_add(
43 struct fixed32_32 lhs,
44 struct fixed32_32 rhs);
45struct fixed32_32 dal_fixed32_32_add_int(
46 struct fixed32_32 lhs,
47 uint32_t rhs);
48struct fixed32_32 dal_fixed32_32_sub(
49 struct fixed32_32 lhs,
50 struct fixed32_32 rhs);
51struct fixed32_32 dal_fixed32_32_sub_int(
52 struct fixed32_32 lhs,
53 uint32_t rhs);
54struct fixed32_32 dal_fixed32_32_mul(
55 struct fixed32_32 lhs,
56 struct fixed32_32 rhs);
57struct fixed32_32 dal_fixed32_32_mul_int(
58 struct fixed32_32 lhs,
59 uint32_t rhs);
60struct fixed32_32 dal_fixed32_32_div(
61 struct fixed32_32 lhs,
62 struct fixed32_32 rhs);
63struct fixed32_32 dal_fixed32_32_div_int(
64 struct fixed32_32 lhs,
65 uint32_t rhs);
66struct fixed32_32 dal_fixed32_32_min(
67 struct fixed32_32 lhs,
68 struct fixed32_32 rhs);
69struct fixed32_32 dal_fixed32_32_max(
70 struct fixed32_32 lhs,
71 struct fixed32_32 rhs);
72bool dal_fixed32_32_gt(struct fixed32_32 lhs, struct fixed32_32 rhs);
73bool dal_fixed32_32_gt_int(struct fixed32_32 lhs, uint32_t rhs);
74bool dal_fixed32_32_lt(struct fixed32_32 lhs, struct fixed32_32 rhs);
75bool dal_fixed32_32_lt_int(struct fixed32_32 lhs, uint32_t rhs);
76bool dal_fixed32_32_le(struct fixed32_32 lhs, struct fixed32_32 rhs);
77bool dal_fixed32_32_le_int(struct fixed32_32 lhs, uint32_t rhs);
78bool dal_fixed32_32_eq(struct fixed32_32 lhs, struct fixed32_32 rhs);
79uint32_t dal_fixed32_32_ceil(struct fixed32_32 value);
80uint32_t dal_fixed32_32_floor(struct fixed32_32 value);
81uint32_t dal_fixed32_32_round(struct fixed32_32 value);
82
83#endif
diff --git a/drivers/gpu/drm/amd/display/include/gpio_interface.h b/drivers/gpu/drm/amd/display/include/gpio_interface.h
new file mode 100644
index 000000000000..e4fd31024b92
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/gpio_interface.h
@@ -0,0 +1,92 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_GPIO_INTERFACE_H__
27#define __DAL_GPIO_INTERFACE_H__
28
29#include "gpio_types.h"
30#include "grph_object_defs.h"
31
32struct gpio;
33
34/* Open the handle for future use */
35enum gpio_result dal_gpio_open(
36 struct gpio *gpio,
37 enum gpio_mode mode);
38
39enum gpio_result dal_gpio_open_ex(
40 struct gpio *gpio,
41 enum gpio_mode mode);
42
43/* Get high or low from the pin */
44enum gpio_result dal_gpio_get_value(
45 const struct gpio *gpio,
46 uint32_t *value);
47
48/* Set pin high or low */
49enum gpio_result dal_gpio_set_value(
50 const struct gpio *gpio,
51 uint32_t value);
52
53/* Get current mode */
54enum gpio_mode dal_gpio_get_mode(
55 const struct gpio *gpio);
56
57/* Change mode of the handle */
58enum gpio_result dal_gpio_change_mode(
59 struct gpio *gpio,
60 enum gpio_mode mode);
61
62/* Get the GPIO id */
63enum gpio_id dal_gpio_get_id(
64 const struct gpio *gpio);
65
66/* Get the GPIO enum */
67uint32_t dal_gpio_get_enum(
68 const struct gpio *gpio);
69
70/* Set the GPIO pin configuration */
71enum gpio_result dal_gpio_set_config(
72 struct gpio *gpio,
73 const struct gpio_config_data *config_data);
74
75/* Obtain GPIO pin info */
76enum gpio_result dal_gpio_get_pin_info(
77 const struct gpio *gpio,
78 struct gpio_pin_info *pin_info);
79
80/* Obtain GPIO sync source */
81enum sync_source dal_gpio_get_sync_source(
82 const struct gpio *gpio);
83
84/* Obtain GPIO pin output state (active low or active high) */
85enum gpio_pin_output_state dal_gpio_get_output_state(
86 const struct gpio *gpio);
87
88/* Close the handle */
89void dal_gpio_close(
90 struct gpio *gpio);
91
92#endif
diff --git a/drivers/gpu/drm/amd/display/include/gpio_service_interface.h b/drivers/gpu/drm/amd/display/include/gpio_service_interface.h
new file mode 100644
index 000000000000..f40259bade40
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/gpio_service_interface.h
@@ -0,0 +1,105 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_GPIO_SERVICE_INTERFACE_H__
27#define __DAL_GPIO_SERVICE_INTERFACE_H__
28
29#include "gpio_types.h"
30#include "gpio_interface.h"
31#include "hw/gpio.h"
32
33struct gpio_service;
34
35struct gpio *dal_gpio_create(
36 struct gpio_service *service,
37 enum gpio_id id,
38 uint32_t en,
39 enum gpio_pin_output_state output_state);
40
41void dal_gpio_destroy(
42 struct gpio **ptr);
43
44struct gpio_service *dal_gpio_service_create(
45 enum dce_version dce_version_major,
46 enum dce_version dce_version_minor,
47 struct dc_context *ctx);
48
49struct gpio *dal_gpio_service_create_irq(
50 struct gpio_service *service,
51 uint32_t offset,
52 uint32_t mask);
53
54struct ddc *dal_gpio_create_ddc(
55 struct gpio_service *service,
56 uint32_t offset,
57 uint32_t mask,
58 struct gpio_ddc_hw_info *info);
59
60
61void dal_gpio_destroy_ddc(
62 struct ddc **ddc);
63
64void dal_gpio_service_destroy(
65 struct gpio_service **ptr);
66
67enum dc_irq_source dal_irq_get_source(
68 const struct gpio *irq);
69
70enum dc_irq_source dal_irq_get_rx_source(
71 const struct gpio *irq);
72
73enum gpio_result dal_irq_setup_hpd_filter(
74 struct gpio *irq,
75 struct gpio_hpd_config *config);
76
77struct gpio *dal_gpio_create_irq(
78 struct gpio_service *service,
79 enum gpio_id id,
80 uint32_t en);
81
82void dal_gpio_destroy_irq(
83 struct gpio **ptr);
84
85
86enum gpio_result dal_ddc_open(
87 struct ddc *ddc,
88 enum gpio_mode mode,
89 enum gpio_ddc_config_type config_type);
90
91enum gpio_result dal_ddc_change_mode(
92 struct ddc *ddc,
93 enum gpio_mode mode);
94
95enum gpio_ddc_line dal_ddc_get_line(
96 const struct ddc *ddc);
97
98enum gpio_result dal_ddc_set_config(
99 struct ddc *ddc,
100 enum gpio_ddc_config_type config_type);
101
102void dal_ddc_close(
103 struct ddc *ddc);
104
105#endif
diff --git a/drivers/gpu/drm/amd/display/include/gpio_types.h b/drivers/gpu/drm/amd/display/include/gpio_types.h
new file mode 100644
index 000000000000..8dd46ed799e5
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/gpio_types.h
@@ -0,0 +1,332 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_GPIO_TYPES_H__
27#define __DAL_GPIO_TYPES_H__
28
29#define BUNDLE_A_MASK 0x00FFF000L
30#define BUNDLE_B_MASK 0x00000FFFL
31
32/*
33 * gpio_result
34 *
35 * @brief
36 * The possible return codes that the GPIO object can return.
37 * These return codes can be generated
38 * directly by the GPIO object or from the GPIOPin object.
39 */
40enum gpio_result {
41 GPIO_RESULT_OK,
42 GPIO_RESULT_NULL_HANDLE,
43 GPIO_RESULT_INVALID_DATA,
44 GPIO_RESULT_DEVICE_BUSY,
45 GPIO_RESULT_OPEN_FAILED,
46 GPIO_RESULT_ALREADY_OPENED,
47 GPIO_RESULT_NON_SPECIFIC_ERROR
48};
49
50/*
51 * @brief
52 * Used to identify the specific GPIO device
53 *
54 * @notes
55 * These constants are used as indices in a vector.
56 * Thus they should start from zero and be contiguous.
57 */
58enum gpio_id {
59 GPIO_ID_UNKNOWN = (-1),
60 GPIO_ID_DDC_DATA,
61 GPIO_ID_DDC_CLOCK,
62 GPIO_ID_GENERIC,
63 GPIO_ID_HPD,
64 GPIO_ID_GPIO_PAD,
65 GPIO_ID_VIP_PAD,
66 GPIO_ID_SYNC,
67 GPIO_ID_GSL, /* global swap lock */
68 GPIO_ID_COUNT,
69 GPIO_ID_MIN = GPIO_ID_DDC_DATA,
70 GPIO_ID_MAX = GPIO_ID_GSL
71};
72
73#define GPIO_ENUM_UNKNOWN \
74 32
75
76struct gpio_pin_info {
77 uint32_t offset;
78 uint32_t offset_y;
79 uint32_t offset_en;
80 uint32_t offset_mask;
81
82 uint32_t mask;
83 uint32_t mask_y;
84 uint32_t mask_en;
85 uint32_t mask_mask;
86};
87
88enum gpio_pin_output_state {
89 GPIO_PIN_OUTPUT_STATE_ACTIVE_LOW,
90 GPIO_PIN_OUTPUT_STATE_ACTIVE_HIGH,
91 GPIO_PIN_OUTPUT_STATE_DEFAULT = GPIO_PIN_OUTPUT_STATE_ACTIVE_LOW
92};
93
94enum gpio_generic {
95 GPIO_GENERIC_UNKNOWN = (-1),
96 GPIO_GENERIC_A,
97 GPIO_GENERIC_B,
98 GPIO_GENERIC_C,
99 GPIO_GENERIC_D,
100 GPIO_GENERIC_E,
101 GPIO_GENERIC_F,
102 GPIO_GENERIC_G,
103 GPIO_GENERIC_COUNT,
104 GPIO_GENERIC_MIN = GPIO_GENERIC_A,
105 GPIO_GENERIC_MAX = GPIO_GENERIC_B
106};
107
108enum gpio_hpd {
109 GPIO_HPD_UNKNOWN = (-1),
110 GPIO_HPD_1,
111 GPIO_HPD_2,
112 GPIO_HPD_3,
113 GPIO_HPD_4,
114 GPIO_HPD_5,
115 GPIO_HPD_6,
116 GPIO_HPD_COUNT,
117 GPIO_HPD_MIN = GPIO_HPD_1,
118 GPIO_HPD_MAX = GPIO_HPD_6
119};
120
121enum gpio_gpio_pad {
122 GPIO_GPIO_PAD_UNKNOWN = (-1),
123 GPIO_GPIO_PAD_0,
124 GPIO_GPIO_PAD_1,
125 GPIO_GPIO_PAD_2,
126 GPIO_GPIO_PAD_3,
127 GPIO_GPIO_PAD_4,
128 GPIO_GPIO_PAD_5,
129 GPIO_GPIO_PAD_6,
130 GPIO_GPIO_PAD_7,
131 GPIO_GPIO_PAD_8,
132 GPIO_GPIO_PAD_9,
133 GPIO_GPIO_PAD_10,
134 GPIO_GPIO_PAD_11,
135 GPIO_GPIO_PAD_12,
136 GPIO_GPIO_PAD_13,
137 GPIO_GPIO_PAD_14,
138 GPIO_GPIO_PAD_15,
139 GPIO_GPIO_PAD_16,
140 GPIO_GPIO_PAD_17,
141 GPIO_GPIO_PAD_18,
142 GPIO_GPIO_PAD_19,
143 GPIO_GPIO_PAD_20,
144 GPIO_GPIO_PAD_21,
145 GPIO_GPIO_PAD_22,
146 GPIO_GPIO_PAD_23,
147 GPIO_GPIO_PAD_24,
148 GPIO_GPIO_PAD_25,
149 GPIO_GPIO_PAD_26,
150 GPIO_GPIO_PAD_27,
151 GPIO_GPIO_PAD_28,
152 GPIO_GPIO_PAD_29,
153 GPIO_GPIO_PAD_30,
154 GPIO_GPIO_PAD_COUNT,
155 GPIO_GPIO_PAD_MIN = GPIO_GPIO_PAD_0,
156 GPIO_GPIO_PAD_MAX = GPIO_GPIO_PAD_30
157};
158
159enum gpio_vip_pad {
160 GPIO_VIP_PAD_UNKNOWN = (-1),
161 /* following never used -
162 * GPIO_ID_DDC_CLOCK::GPIO_DDC_LINE_VIP_PAD defined instead */
163 GPIO_VIP_PAD_SCL,
164 /* following never used -
165 * GPIO_ID_DDC_DATA::GPIO_DDC_LINE_VIP_PAD defined instead */
166 GPIO_VIP_PAD_SDA,
167 GPIO_VIP_PAD_VHAD,
168 GPIO_VIP_PAD_VPHCTL,
169 GPIO_VIP_PAD_VIPCLK,
170 GPIO_VIP_PAD_VID,
171 GPIO_VIP_PAD_VPCLK0,
172 GPIO_VIP_PAD_DVALID,
173 GPIO_VIP_PAD_PSYNC,
174 GPIO_VIP_PAD_COUNT,
175 GPIO_VIP_PAD_MIN = GPIO_VIP_PAD_SCL,
176 GPIO_VIP_PAD_MAX = GPIO_VIP_PAD_PSYNC
177};
178
179enum gpio_sync {
180 GPIO_SYNC_UNKNOWN = (-1),
181 GPIO_SYNC_HSYNC_A,
182 GPIO_SYNC_VSYNC_A,
183 GPIO_SYNC_HSYNC_B,
184 GPIO_SYNC_VSYNC_B,
185 GPIO_SYNC_COUNT,
186 GPIO_SYNC_MIN = GPIO_SYNC_HSYNC_A,
187 GPIO_SYNC_MAX = GPIO_SYNC_VSYNC_B
188};
189
190enum gpio_gsl {
191 GPIO_GSL_UNKNOWN = (-1),
192 GPIO_GSL_GENLOCK_CLOCK,
193 GPIO_GSL_GENLOCK_VSYNC,
194 GPIO_GSL_SWAPLOCK_A,
195 GPIO_GSL_SWAPLOCK_B,
196 GPIO_GSL_COUNT,
197 GPIO_GSL_MIN = GPIO_GSL_GENLOCK_CLOCK,
198 GPIO_GSL_MAX = GPIO_GSL_SWAPLOCK_B
199};
200
201/*
202 * @brief
203 * Unique Id for DDC handle.
204 * Values are meaningful (used as indexes to array)
205 */
206enum gpio_ddc_line {
207 GPIO_DDC_LINE_UNKNOWN = (-1),
208 GPIO_DDC_LINE_DDC1,
209 GPIO_DDC_LINE_DDC2,
210 GPIO_DDC_LINE_DDC3,
211 GPIO_DDC_LINE_DDC4,
212 GPIO_DDC_LINE_DDC5,
213 GPIO_DDC_LINE_DDC6,
214 GPIO_DDC_LINE_DDC_VGA,
215 GPIO_DDC_LINE_VIP_PAD,
216 GPIO_DDC_LINE_I2C_PAD = GPIO_DDC_LINE_VIP_PAD,
217 GPIO_DDC_LINE_COUNT,
218 GPIO_DDC_LINE_MIN = GPIO_DDC_LINE_DDC1,
219 GPIO_DDC_LINE_MAX = GPIO_DDC_LINE_I2C_PAD
220};
221
222/*
223 * @brief
224 * Identifies the mode of operation to open a GPIO device.
225 * A GPIO device (pin) can be programmed in only one of these modes at a time.
226 */
227enum gpio_mode {
228 GPIO_MODE_UNKNOWN = (-1),
229 GPIO_MODE_INPUT,
230 GPIO_MODE_OUTPUT,
231 GPIO_MODE_FAST_OUTPUT,
232 GPIO_MODE_HARDWARE,
233 GPIO_MODE_INTERRUPT
234};
235
236/*
237 * @brief
238 * Identifies the source of the signal when GPIO is in HW mode.
239 * get_signal_source() will return GPIO_SYGNAL_SOURCE__UNKNOWN
240 * when one of the following holds:
241 * 1. GPIO is input GPIO
242 * 2. GPIO is not opened in HW mode
243 * 3. GPIO does not have fixed signal source
244 * (like DC_GenericA have mux instead fixed)
245 */
246enum gpio_signal_source {
247 GPIO_SIGNAL_SOURCE_UNKNOWN = (-1),
248 GPIO_SIGNAL_SOURCE_DACA_STEREO_SYNC,
249 GPIO_SIGNAL_SOURCE_PASS_THROUGH_STEREO_SYNC,
250 GPIO_SIGNAL_SOURCE_DACB_STEREO_SYNC,
251 GPIO_SIGNAL_SOURCE_DACA_HSYNC,
252 GPIO_SIGNAL_SOURCE_DACB_HSYNC,
253 GPIO_SIGNAL_SOURCE_DACA_VSYNC,
254 GPIO_SIGNAL_SOURCE_DACB_VSYNC,
255};
256
257enum gpio_stereo_source {
258 GPIO_STEREO_SOURCE_UNKNOWN = (-1),
259 GPIO_STEREO_SOURCE_D1,
260 GPIO_STEREO_SOURCE_D2,
261 GPIO_STEREO_SOURCE_D3,
262 GPIO_STEREO_SOURCE_D4,
263 GPIO_STEREO_SOURCE_D5,
264 GPIO_STEREO_SOURCE_D6
265};
266
267/*
268 * GPIO config
269 */
270
271enum gpio_config_type {
272 GPIO_CONFIG_TYPE_NONE,
273 GPIO_CONFIG_TYPE_DDC,
274 GPIO_CONFIG_TYPE_HPD,
275 GPIO_CONFIG_TYPE_GENERIC_MUX,
276 GPIO_CONFIG_TYPE_GSL_MUX,
277 GPIO_CONFIG_TYPE_I2C_AUX_DUAL_MODE
278};
279
280/* DDC configuration */
281
282enum gpio_ddc_config_type {
283 GPIO_DDC_CONFIG_TYPE_MODE_AUX,
284 GPIO_DDC_CONFIG_TYPE_MODE_I2C,
285 GPIO_DDC_CONFIG_TYPE_POLL_FOR_CONNECT,
286 GPIO_DDC_CONFIG_TYPE_POLL_FOR_DISCONNECT,
287 GPIO_DDC_CONFIG_TYPE_DISABLE_POLLING
288};
289
290struct gpio_ddc_config {
291 enum gpio_ddc_config_type type;
292 bool data_en_bit_present;
293 bool clock_en_bit_present;
294};
295
296/* HPD configuration */
297
298struct gpio_hpd_config {
299 uint32_t delay_on_connect; /* milliseconds */
300 uint32_t delay_on_disconnect; /* milliseconds */
301};
302
303struct gpio_generic_mux_config {
304 bool enable_output_from_mux;
305 enum gpio_signal_source mux_select;
306 enum gpio_stereo_source stereo_select;
307};
308
309enum gpio_gsl_mux_config_type {
310 GPIO_GSL_MUX_CONFIG_TYPE_DISABLE,
311 GPIO_GSL_MUX_CONFIG_TYPE_TIMING_SYNC,
312 GPIO_GSL_MUX_CONFIG_TYPE_FLIP_SYNC
313};
314
315struct gpio_gsl_mux_config {
316 enum gpio_gsl_mux_config_type type;
317 /* Actually sync_source type,
318 * however we want to avoid inter-component includes here */
319 uint32_t gsl_group;
320};
321
322struct gpio_config_data {
323 enum gpio_config_type type;
324 union {
325 struct gpio_ddc_config ddc;
326 struct gpio_hpd_config hpd;
327 struct gpio_generic_mux_config generic_mux;
328 struct gpio_gsl_mux_config gsl_mux;
329 } config;
330};
331
332#endif
diff --git a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
new file mode 100644
index 000000000000..9c0bf6521dd9
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
@@ -0,0 +1,407 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_GRPH_OBJECT_CTRL_DEFS_H__
27#define __DAL_GRPH_OBJECT_CTRL_DEFS_H__
28
29#include "grph_object_defs.h"
30
31/*
32 * #####################################################
33 * #####################################################
34 *
35 * These defines shared between asic_control/bios_parser and other
36 * DAL components
37 *
38 * #####################################################
39 * #####################################################
40 */
41
42enum display_output_bit_depth {
43 PANEL_UNDEFINE = 0,
44 PANEL_6BIT_COLOR = 1,
45 PANEL_8BIT_COLOR = 2,
46 PANEL_10BIT_COLOR = 3,
47 PANEL_12BIT_COLOR = 4,
48 PANEL_16BIT_COLOR = 5,
49};
50
51
52/* Device type as abstracted by ATOM BIOS */
53enum dal_device_type {
54 DEVICE_TYPE_UNKNOWN = 0,
55 DEVICE_TYPE_LCD,
56 DEVICE_TYPE_CRT,
57 DEVICE_TYPE_DFP,
58 DEVICE_TYPE_CV,
59 DEVICE_TYPE_TV,
60 DEVICE_TYPE_CF,
61 DEVICE_TYPE_WIRELESS
62};
63
64/* Device ID as abstracted by ATOM BIOS */
65struct device_id {
66 enum dal_device_type device_type:16;
67 uint32_t enum_id:16; /* 1 based enum */
68};
69
70struct graphics_object_i2c_info {
71 struct gpio_info {
72 uint32_t clk_mask_register_index;
73 uint32_t clk_en_register_index;
74 uint32_t clk_y_register_index;
75 uint32_t clk_a_register_index;
76 uint32_t data_mask_register_index;
77 uint32_t data_en_register_index;
78 uint32_t data_y_register_index;
79 uint32_t data_a_register_index;
80
81 uint32_t clk_mask_shift;
82 uint32_t clk_en_shift;
83 uint32_t clk_y_shift;
84 uint32_t clk_a_shift;
85 uint32_t data_mask_shift;
86 uint32_t data_en_shift;
87 uint32_t data_y_shift;
88 uint32_t data_a_shift;
89 } gpio_info;
90
91 bool i2c_hw_assist;
92 uint32_t i2c_line;
93 uint32_t i2c_engine_id;
94 uint32_t i2c_slave_address;
95};
96
97struct graphics_object_hpd_info {
98 uint8_t hpd_int_gpio_uid;
99 uint8_t hpd_active;
100};
101
102struct connector_device_tag_info {
103 uint32_t acpi_device;
104 struct device_id dev_id;
105};
106
107struct device_timing {
108 struct misc_info {
109 uint32_t HORIZONTAL_CUT_OFF:1;
110 /* 0=Active High, 1=Active Low */
111 uint32_t H_SYNC_POLARITY:1;
112 /* 0=Active High, 1=Active Low */
113 uint32_t V_SYNC_POLARITY:1;
114 uint32_t VERTICAL_CUT_OFF:1;
115 uint32_t H_REPLICATION_BY2:1;
116 uint32_t V_REPLICATION_BY2:1;
117 uint32_t COMPOSITE_SYNC:1;
118 uint32_t INTERLACE:1;
119 uint32_t DOUBLE_CLOCK:1;
120 uint32_t RGB888:1;
121 uint32_t GREY_LEVEL:2;
122 uint32_t SPATIAL:1;
123 uint32_t TEMPORAL:1;
124 uint32_t API_ENABLED:1;
125 } misc_info;
126
127 uint32_t pixel_clk; /* in KHz */
128 uint32_t horizontal_addressable;
129 uint32_t horizontal_blanking_time;
130 uint32_t vertical_addressable;
131 uint32_t vertical_blanking_time;
132 uint32_t horizontal_sync_offset;
133 uint32_t horizontal_sync_width;
134 uint32_t vertical_sync_offset;
135 uint32_t vertical_sync_width;
136 uint32_t horizontal_border;
137 uint32_t vertical_border;
138};
139
140struct supported_refresh_rate {
141 uint32_t REFRESH_RATE_30HZ:1;
142 uint32_t REFRESH_RATE_40HZ:1;
143 uint32_t REFRESH_RATE_48HZ:1;
144 uint32_t REFRESH_RATE_50HZ:1;
145 uint32_t REFRESH_RATE_60HZ:1;
146};
147
148struct embedded_panel_info {
149 struct device_timing lcd_timing;
150 uint32_t ss_id;
151 struct supported_refresh_rate supported_rr;
152 uint32_t drr_enabled;
153 uint32_t min_drr_refresh_rate;
154 bool realtek_eDPToLVDS;
155};
156
157struct firmware_info {
158 struct pll_info {
159 uint32_t crystal_frequency; /* in KHz */
160 uint32_t min_input_pxl_clk_pll_frequency; /* in KHz */
161 uint32_t max_input_pxl_clk_pll_frequency; /* in KHz */
162 uint32_t min_output_pxl_clk_pll_frequency; /* in KHz */
163 uint32_t max_output_pxl_clk_pll_frequency; /* in KHz */
164 } pll_info;
165
166 struct firmware_feature {
167 uint32_t memory_clk_ss_percentage;
168 uint32_t engine_clk_ss_percentage;
169 } feature;
170
171 uint32_t default_display_engine_pll_frequency; /* in KHz */
172 uint32_t external_clock_source_frequency_for_dp; /* in KHz */
173 uint32_t smu_gpu_pll_output_freq; /* in KHz */
174 uint8_t min_allowed_bl_level;
175 uint8_t remote_display_config;
176 uint32_t default_memory_clk; /* in KHz */
177 uint32_t default_engine_clk; /* in KHz */
178 uint32_t dp_phy_ref_clk; /* in KHz - DCE12 only */
179 uint32_t i2c_engine_ref_clk; /* in KHz - DCE12 only */
180
181
182};
183
184struct step_and_delay_info {
185 uint32_t step;
186 uint32_t delay;
187 uint32_t recommended_ref_div;
188};
189
190struct spread_spectrum_info {
191 struct spread_spectrum_type {
192 bool CENTER_MODE:1;
193 bool EXTERNAL:1;
194 bool STEP_AND_DELAY_INFO:1;
195 } type;
196
197 /* in unit of 0.01% (spreadPercentageDivider = 100),
198 otherwise in 0.001% units (spreadPercentageDivider = 1000); */
199 uint32_t spread_spectrum_percentage;
200 uint32_t spread_percentage_divider; /* 100 or 1000 */
201 uint32_t spread_spectrum_range; /* modulation freq (HZ)*/
202
203 union {
204 struct step_and_delay_info step_and_delay_info;
205 /* For mem/engine/uvd, Clock Out frequence (VCO ),
206 in unit of kHz. For TMDS/HDMI/LVDS, it is pixel clock,
207 for DP, it is link clock ( 270000 or 162000 ) */
208 uint32_t target_clock_range; /* in KHz */
209 };
210
211};
212
213struct graphics_object_encoder_cap_info {
214 uint32_t dp_hbr2_cap:1;
215 uint32_t dp_hbr2_validated:1;
216 /*
217 * TODO: added MST and HDMI 6G capable flags
218 */
219 uint32_t reserved:15;
220};
221
222struct din_connector_info {
223 uint32_t gpio_id;
224 bool gpio_tv_active_state;
225};
226
227/* Invalid channel mapping */
228enum { INVALID_DDI_CHANNEL_MAPPING = 0x0 };
229
230/**
231 * DDI PHY channel mapping reflecting XBAR setting
232 */
233union ddi_channel_mapping {
234 struct mapping {
235 uint8_t lane0:2; /* Mapping for lane 0 */
236 uint8_t lane1:2; /* Mapping for lane 1 */
237 uint8_t lane2:2; /* Mapping for lane 2 */
238 uint8_t lane3:2; /* Mapping for lane 3 */
239 } mapping;
240 uint8_t raw;
241};
242
243/**
244* Transmitter output configuration description
245*/
246struct transmitter_configuration_info {
247 /* DDI PHY ID for the transmitter */
248 enum transmitter transmitter_phy_id;
249 /* DDI PHY channel mapping reflecting crossbar setting */
250 union ddi_channel_mapping output_channel_mapping;
251};
252
253struct transmitter_configuration {
254 /* Configuration for the primary transmitter */
255 struct transmitter_configuration_info primary_transmitter_config;
256 /* Secondary transmitter configuration for Dual-link DVI */
257 struct transmitter_configuration_info secondary_transmitter_config;
258};
259
260/* These size should be sufficient to store info coming from BIOS */
261#define NUMBER_OF_UCHAR_FOR_GUID 16
262#define MAX_NUMBER_OF_EXT_DISPLAY_PATH 7
263#define NUMBER_OF_CSR_M3_ARB 10
264#define NUMBER_OF_DISP_CLK_VOLTAGE 4
265#define NUMBER_OF_AVAILABLE_SCLK 5
266
267/* V6 */
268struct integrated_info {
269 struct clock_voltage_caps {
270 /* The Voltage Index indicated by FUSE, same voltage index
271 shared with SCLK DPM fuse table */
272 uint32_t voltage_index;
273 /* Maximum clock supported with specified voltage index */
274 uint32_t max_supported_clk; /* in KHz */
275 } disp_clk_voltage[NUMBER_OF_DISP_CLK_VOLTAGE];
276
277 struct display_connection_info {
278 struct external_display_path {
279 /* A bit vector to show what devices are supported */
280 uint32_t device_tag;
281 /* 16bit device ACPI id. */
282 uint32_t device_acpi_enum;
283 /* A physical connector for displays to plug in,
284 using object connector definitions */
285 struct graphics_object_id device_connector_id;
286 /* An index into external AUX/DDC channel LUT */
287 uint8_t ext_aux_ddc_lut_index;
288 /* An index into external HPD pin LUT */
289 uint8_t ext_hpd_pin_lut_index;
290 /* external encoder object id */
291 struct graphics_object_id ext_encoder_obj_id;
292 /* XBAR mapping of the PHY channels */
293 union ddi_channel_mapping channel_mapping;
294 } path[MAX_NUMBER_OF_EXT_DISPLAY_PATH];
295
296 uint8_t gu_id[NUMBER_OF_UCHAR_FOR_GUID];
297 uint8_t checksum;
298 } ext_disp_conn_info; /* exiting long long time */
299
300 struct available_s_clk_list {
301 /* Maximum clock supported with specified voltage index */
302 uint32_t supported_s_clk; /* in KHz */
303 /* The Voltage Index indicated by FUSE for specified SCLK */
304 uint32_t voltage_index;
305 /* The Voltage ID indicated by FUSE for specified SCLK */
306 uint32_t voltage_id;
307 } avail_s_clk[NUMBER_OF_AVAILABLE_SCLK];
308
309 uint8_t memory_type;
310 uint8_t ma_channel_number;
311 uint32_t boot_up_engine_clock; /* in KHz */
312 uint32_t dentist_vco_freq; /* in KHz */
313 uint32_t boot_up_uma_clock; /* in KHz */
314 uint32_t boot_up_req_display_vector;
315 uint32_t other_display_misc;
316 uint32_t gpu_cap_info;
317 uint32_t sb_mmio_base_addr;
318 uint32_t system_config;
319 uint32_t cpu_cap_info;
320 uint32_t max_nb_voltage;
321 uint32_t min_nb_voltage;
322 uint32_t boot_up_nb_voltage;
323 uint32_t ext_disp_conn_info_offset;
324 uint32_t csr_m3_arb_cntl_default[NUMBER_OF_CSR_M3_ARB];
325 uint32_t csr_m3_arb_cntl_uvd[NUMBER_OF_CSR_M3_ARB];
326 uint32_t csr_m3_arb_cntl_fs3d[NUMBER_OF_CSR_M3_ARB];
327 uint32_t gmc_restore_reset_time;
328 uint32_t minimum_n_clk;
329 uint32_t idle_n_clk;
330 uint32_t ddr_dll_power_up_time;
331 uint32_t ddr_pll_power_up_time;
332 /* start for V6 */
333 uint32_t pcie_clk_ss_type;
334 uint32_t lvds_ss_percentage;
335 uint32_t lvds_sspread_rate_in_10hz;
336 uint32_t hdmi_ss_percentage;
337 uint32_t hdmi_sspread_rate_in_10hz;
338 uint32_t dvi_ss_percentage;
339 uint32_t dvi_sspread_rate_in_10_hz;
340 uint32_t sclk_dpm_boost_margin;
341 uint32_t sclk_dpm_throttle_margin;
342 uint32_t sclk_dpm_tdp_limit_pg;
343 uint32_t sclk_dpm_tdp_limit_boost;
344 uint32_t boost_engine_clock;
345 uint32_t boost_vid_2bit;
346 uint32_t enable_boost;
347 uint32_t gnb_tdp_limit;
348 /* Start from V7 */
349 uint32_t max_lvds_pclk_freq_in_single_link;
350 uint32_t lvds_misc;
351 uint32_t lvds_pwr_on_seq_dig_on_to_de_in_4ms;
352 uint32_t lvds_pwr_on_seq_de_to_vary_bl_in_4ms;
353 uint32_t lvds_pwr_off_seq_vary_bl_to_de_in4ms;
354 uint32_t lvds_pwr_off_seq_de_to_dig_on_in4ms;
355 uint32_t lvds_off_to_on_delay_in_4ms;
356 uint32_t lvds_pwr_on_seq_vary_bl_to_blon_in_4ms;
357 uint32_t lvds_pwr_off_seq_blon_to_vary_bl_in_4ms;
358 uint32_t lvds_reserved1;
359 uint32_t lvds_bit_depth_control_val;
360};
361
362/**
363* Power source ids.
364*/
365enum power_source {
366 POWER_SOURCE_AC = 0,
367 POWER_SOURCE_DC,
368 POWER_SOURCE_LIMITED_POWER,
369 POWER_SOURCE_LIMITED_POWER_2,
370 POWER_SOURCE_MAX
371};
372
373struct bios_event_info {
374 uint32_t thermal_state;
375 uint32_t backlight_level;
376 enum power_source powerSource;
377 bool has_thermal_state_changed;
378 bool has_power_source_changed;
379 bool has_forced_mode_changed;
380 bool forced_mode;
381 bool backlight_changed;
382};
383
384enum {
385 HDMI_PIXEL_CLOCK_IN_KHZ_297 = 297000,
386 TMDS_PIXEL_CLOCK_IN_KHZ_165 = 165000
387};
388
389/*
390 * DFS-bypass flag
391 */
392/* Copy of SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS from atombios.h */
393enum {
394 DFS_BYPASS_ENABLE = 0x10
395};
396
397enum {
398 INVALID_BACKLIGHT = -1
399};
400
401struct panel_backlight_boundaries {
402 uint32_t min_signal_level;
403 uint32_t max_signal_level;
404};
405
406
407#endif
diff --git a/drivers/gpu/drm/amd/display/include/grph_object_defs.h b/drivers/gpu/drm/amd/display/include/grph_object_defs.h
new file mode 100644
index 000000000000..2941b882b0b6
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/grph_object_defs.h
@@ -0,0 +1,140 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_GRPH_OBJECT_DEFS_H__
27#define __DAL_GRPH_OBJECT_DEFS_H__
28
29#include "grph_object_id.h"
30
31/* ********************************************************************
32 * ********************************************************************
33 *
34 * These defines shared between All Graphics Objects
35 *
36 * ********************************************************************
37 * ********************************************************************
38 */
39
40/* HPD unit id - HW direct translation */
41enum hpd_source_id {
42 HPD_SOURCEID1 = 0,
43 HPD_SOURCEID2,
44 HPD_SOURCEID3,
45 HPD_SOURCEID4,
46 HPD_SOURCEID5,
47 HPD_SOURCEID6,
48
49 HPD_SOURCEID_COUNT,
50 HPD_SOURCEID_UNKNOWN
51};
52
53/* DDC unit id - HW direct translation */
54enum channel_id {
55 CHANNEL_ID_UNKNOWN = 0,
56 CHANNEL_ID_DDC1,
57 CHANNEL_ID_DDC2,
58 CHANNEL_ID_DDC3,
59 CHANNEL_ID_DDC4,
60 CHANNEL_ID_DDC5,
61 CHANNEL_ID_DDC6,
62 CHANNEL_ID_DDC_VGA,
63 CHANNEL_ID_I2C_PAD,
64 CHANNEL_ID_COUNT
65};
66
67#define DECODE_CHANNEL_ID(ch_id) \
68 (ch_id) == CHANNEL_ID_DDC1 ? "CHANNEL_ID_DDC1" : \
69 (ch_id) == CHANNEL_ID_DDC2 ? "CHANNEL_ID_DDC2" : \
70 (ch_id) == CHANNEL_ID_DDC3 ? "CHANNEL_ID_DDC3" : \
71 (ch_id) == CHANNEL_ID_DDC4 ? "CHANNEL_ID_DDC4" : \
72 (ch_id) == CHANNEL_ID_DDC5 ? "CHANNEL_ID_DDC5" : \
73 (ch_id) == CHANNEL_ID_DDC6 ? "CHANNEL_ID_DDC6" : \
74 (ch_id) == CHANNEL_ID_DDC_VGA ? "CHANNEL_ID_DDC_VGA" : \
75 (ch_id) == CHANNEL_ID_I2C_PAD ? "CHANNEL_ID_I2C_PAD" : "Invalid"
76
77enum transmitter {
78 TRANSMITTER_UNKNOWN = (-1L),
79 TRANSMITTER_UNIPHY_A,
80 TRANSMITTER_UNIPHY_B,
81 TRANSMITTER_UNIPHY_C,
82 TRANSMITTER_UNIPHY_D,
83 TRANSMITTER_UNIPHY_E,
84 TRANSMITTER_UNIPHY_F,
85 TRANSMITTER_NUTMEG_CRT,
86 TRANSMITTER_TRAVIS_CRT,
87 TRANSMITTER_TRAVIS_LCD,
88 TRANSMITTER_UNIPHY_G,
89 TRANSMITTER_COUNT
90};
91
92/* Generic source of the synchronisation input/output signal */
93/* Can be used for flow control, stereo sync, timing sync, frame sync, etc */
94enum sync_source {
95 SYNC_SOURCE_NONE = 0,
96
97 /* Source based on controllers */
98 SYNC_SOURCE_CONTROLLER0,
99 SYNC_SOURCE_CONTROLLER1,
100 SYNC_SOURCE_CONTROLLER2,
101 SYNC_SOURCE_CONTROLLER3,
102 SYNC_SOURCE_CONTROLLER4,
103 SYNC_SOURCE_CONTROLLER5,
104
105 /* Source based on GSL group */
106 SYNC_SOURCE_GSL_GROUP0,
107 SYNC_SOURCE_GSL_GROUP1,
108 SYNC_SOURCE_GSL_GROUP2,
109
110 /* Source based on GSL IOs */
111 /* These IOs normally used as GSL input/output */
112 SYNC_SOURCE_GSL_IO_FIRST,
113 SYNC_SOURCE_GSL_IO_GENLOCK_CLOCK = SYNC_SOURCE_GSL_IO_FIRST,
114 SYNC_SOURCE_GSL_IO_GENLOCK_VSYNC,
115 SYNC_SOURCE_GSL_IO_SWAPLOCK_A,
116 SYNC_SOURCE_GSL_IO_SWAPLOCK_B,
117 SYNC_SOURCE_GSL_IO_LAST = SYNC_SOURCE_GSL_IO_SWAPLOCK_B,
118
119 /* Source based on regular IOs */
120 SYNC_SOURCE_IO_FIRST,
121 SYNC_SOURCE_IO_GENERIC_A = SYNC_SOURCE_IO_FIRST,
122 SYNC_SOURCE_IO_GENERIC_B,
123 SYNC_SOURCE_IO_GENERIC_C,
124 SYNC_SOURCE_IO_GENERIC_D,
125 SYNC_SOURCE_IO_GENERIC_E,
126 SYNC_SOURCE_IO_GENERIC_F,
127 SYNC_SOURCE_IO_HPD1,
128 SYNC_SOURCE_IO_HPD2,
129 SYNC_SOURCE_IO_HSYNC_A,
130 SYNC_SOURCE_IO_VSYNC_A,
131 SYNC_SOURCE_IO_HSYNC_B,
132 SYNC_SOURCE_IO_VSYNC_B,
133 SYNC_SOURCE_IO_LAST = SYNC_SOURCE_IO_VSYNC_B,
134
135 /* Misc. flow control sources */
136 SYNC_SOURCE_DUAL_GPU_PIN
137};
138
139
140#endif
diff --git a/drivers/gpu/drm/amd/display/include/grph_object_id.h b/drivers/gpu/drm/amd/display/include/grph_object_id.h
new file mode 100644
index 000000000000..e4aa4ddf9d2a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/grph_object_id.h
@@ -0,0 +1,256 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_GRPH_OBJECT_ID_H__
27#define __DAL_GRPH_OBJECT_ID_H__
28
29/* Types of graphics objects */
30enum object_type {
31 OBJECT_TYPE_UNKNOWN = 0,
32
33 /* Direct ATOM BIOS translation */
34 OBJECT_TYPE_GPU,
35 OBJECT_TYPE_ENCODER,
36 OBJECT_TYPE_CONNECTOR,
37 OBJECT_TYPE_ROUTER,
38 OBJECT_TYPE_GENERIC,
39
40 /* Driver specific */
41 OBJECT_TYPE_AUDIO,
42 OBJECT_TYPE_CONTROLLER,
43 OBJECT_TYPE_CLOCK_SOURCE,
44 OBJECT_TYPE_ENGINE,
45
46 OBJECT_TYPE_COUNT
47};
48
49/* Enumeration inside one type of graphics objects */
50enum object_enum_id {
51 ENUM_ID_UNKNOWN = 0,
52 ENUM_ID_1,
53 ENUM_ID_2,
54 ENUM_ID_3,
55 ENUM_ID_4,
56 ENUM_ID_5,
57 ENUM_ID_6,
58 ENUM_ID_7,
59
60 ENUM_ID_COUNT
61};
62
63/* Generic object ids */
64enum generic_id {
65 GENERIC_ID_UNKNOWN = 0,
66 GENERIC_ID_MXM_OPM,
67 GENERIC_ID_GLSYNC,
68 GENERIC_ID_STEREO,
69
70 GENERIC_ID_COUNT
71};
72
73/* Controller object ids */
74enum controller_id {
75 CONTROLLER_ID_UNDEFINED = 0,
76 CONTROLLER_ID_D0,
77 CONTROLLER_ID_D1,
78 CONTROLLER_ID_D2,
79 CONTROLLER_ID_D3,
80 CONTROLLER_ID_D4,
81 CONTROLLER_ID_D5,
82 CONTROLLER_ID_UNDERLAY0,
83 CONTROLLER_ID_MAX = CONTROLLER_ID_UNDERLAY0
84};
85
86#define IS_UNDERLAY_CONTROLLER(ctrlr_id) (ctrlr_id >= CONTROLLER_ID_UNDERLAY0)
87
88/*
89 * ClockSource object ids.
90 * We maintain the order matching (more or less) ATOM BIOS
91 * to improve optimized acquire
92 */
93enum clock_source_id {
94 CLOCK_SOURCE_ID_UNDEFINED = 0,
95 CLOCK_SOURCE_ID_PLL0,
96 CLOCK_SOURCE_ID_PLL1,
97 CLOCK_SOURCE_ID_PLL2,
98 CLOCK_SOURCE_ID_EXTERNAL, /* ID (Phy) ref. clk. for DP */
99 CLOCK_SOURCE_ID_DCPLL,
100 CLOCK_SOURCE_ID_DFS, /* DENTIST */
101 CLOCK_SOURCE_ID_VCE, /* VCE does not need a real PLL */
102 /* Used to distinguish between programming pixel clock and ID (Phy) clock */
103 CLOCK_SOURCE_ID_DP_DTO,
104
105 CLOCK_SOURCE_COMBO_PHY_PLL0, /*combo PHY PLL defines (DC 11.2 and up)*/
106 CLOCK_SOURCE_COMBO_PHY_PLL1,
107 CLOCK_SOURCE_COMBO_PHY_PLL2,
108 CLOCK_SOURCE_COMBO_PHY_PLL3,
109 CLOCK_SOURCE_COMBO_PHY_PLL4,
110 CLOCK_SOURCE_COMBO_PHY_PLL5,
111 CLOCK_SOURCE_COMBO_DISPLAY_PLL0
112};
113
114/* Encoder object ids */
115enum encoder_id {
116 ENCODER_ID_UNKNOWN = 0,
117
118 /* Radeon Class Display Hardware */
119 ENCODER_ID_INTERNAL_LVDS,
120 ENCODER_ID_INTERNAL_TMDS1,
121 ENCODER_ID_INTERNAL_TMDS2,
122 ENCODER_ID_INTERNAL_DAC1,
123 ENCODER_ID_INTERNAL_DAC2, /* TV/CV DAC */
124
125 /* External Third Party Encoders */
126 ENCODER_ID_INTERNAL_LVTM1, /* not used for Radeon */
127 ENCODER_ID_INTERNAL_HDMI,
128
129 /* Kaledisope (KLDSCP) Class Display Hardware */
130 ENCODER_ID_INTERNAL_KLDSCP_TMDS1,
131 ENCODER_ID_INTERNAL_KLDSCP_DAC1,
132 ENCODER_ID_INTERNAL_KLDSCP_DAC2, /* Shared with CV/TV and CRT */
133 /* External TMDS (dual link) */
134 ENCODER_ID_EXTERNAL_MVPU_FPGA, /* MVPU FPGA chip */
135 ENCODER_ID_INTERNAL_DDI,
136 ENCODER_ID_INTERNAL_UNIPHY,
137 ENCODER_ID_INTERNAL_KLDSCP_LVTMA,
138 ENCODER_ID_INTERNAL_UNIPHY1,
139 ENCODER_ID_INTERNAL_UNIPHY2,
140 ENCODER_ID_EXTERNAL_NUTMEG,
141 ENCODER_ID_EXTERNAL_TRAVIS,
142
143 ENCODER_ID_INTERNAL_WIRELESS, /* Internal wireless display encoder */
144 ENCODER_ID_INTERNAL_UNIPHY3,
145 ENCODER_ID_INTERNAL_VIRTUAL,
146};
147
148/* Connector object ids */
149enum connector_id {
150 CONNECTOR_ID_UNKNOWN = 0,
151 CONNECTOR_ID_SINGLE_LINK_DVII = 1,
152 CONNECTOR_ID_DUAL_LINK_DVII = 2,
153 CONNECTOR_ID_SINGLE_LINK_DVID = 3,
154 CONNECTOR_ID_DUAL_LINK_DVID = 4,
155 CONNECTOR_ID_VGA = 5,
156 CONNECTOR_ID_HDMI_TYPE_A = 12,
157 CONNECTOR_ID_LVDS = 14,
158 CONNECTOR_ID_PCIE = 16,
159 CONNECTOR_ID_HARDCODE_DVI = 18,
160 CONNECTOR_ID_DISPLAY_PORT = 19,
161 CONNECTOR_ID_EDP = 20,
162 CONNECTOR_ID_MXM = 21,
163 CONNECTOR_ID_WIRELESS = 22,
164 CONNECTOR_ID_MIRACAST = 23,
165
166 CONNECTOR_ID_VIRTUAL = 100
167};
168
169/* Audio object ids */
170enum audio_id {
171 AUDIO_ID_UNKNOWN = 0,
172 AUDIO_ID_INTERNAL_AZALIA
173};
174
175/* Engine object ids */
176enum engine_id {
177 ENGINE_ID_DIGA,
178 ENGINE_ID_DIGB,
179 ENGINE_ID_DIGC,
180 ENGINE_ID_DIGD,
181 ENGINE_ID_DIGE,
182 ENGINE_ID_DIGF,
183 ENGINE_ID_DIGG,
184 ENGINE_ID_DACA,
185 ENGINE_ID_DACB,
186 ENGINE_ID_VCE, /* wireless display pseudo-encoder */
187 ENGINE_ID_VIRTUAL,
188
189 ENGINE_ID_COUNT,
190 ENGINE_ID_UNKNOWN = (-1L)
191};
192
193enum transmitter_color_depth {
194 TRANSMITTER_COLOR_DEPTH_24 = 0, /* 8 bits */
195 TRANSMITTER_COLOR_DEPTH_30, /* 10 bits */
196 TRANSMITTER_COLOR_DEPTH_36, /* 12 bits */
197 TRANSMITTER_COLOR_DEPTH_48 /* 16 bits */
198};
199
200/*
201 *****************************************************************************
202 * graphics_object_id struct
203 *
204 * graphics_object_id is a very simple struct wrapping 32bit Graphics
205 * Object identication
206 *
207 * This struct should stay very simple
208 * No dependencies at all (no includes)
209 * No debug messages or asserts
210 * No #ifndef and preprocessor directives
211 * No grow in space (no more data member)
212 *****************************************************************************
213 */
214
215struct graphics_object_id {
216 uint32_t id:8;
217 uint32_t enum_id:4;
218 uint32_t type:4;
219 uint32_t reserved:16; /* for padding. total size should be u32 */
220};
221
222/* some simple functions for convenient graphics_object_id handle */
223
224static inline struct graphics_object_id dal_graphics_object_id_init(
225 uint32_t id,
226 enum object_enum_id enum_id,
227 enum object_type type)
228{
229 struct graphics_object_id result = {
230 id, enum_id, type, 0
231 };
232
233 return result;
234}
235
236bool dal_graphics_object_id_is_valid(
237 struct graphics_object_id id);
238bool dal_graphics_object_id_is_equal(
239 struct graphics_object_id id1,
240 struct graphics_object_id id2);
241uint32_t dal_graphics_object_id_to_uint(
242 struct graphics_object_id id);
243
244enum controller_id dal_graphics_object_id_get_controller_id(
245 struct graphics_object_id id);
246enum clock_source_id dal_graphics_object_id_get_clock_source_id(
247 struct graphics_object_id id);
248enum encoder_id dal_graphics_object_id_get_encoder_id(
249 struct graphics_object_id id);
250enum connector_id dal_graphics_object_id_get_connector_id(
251 struct graphics_object_id id);
252enum audio_id dal_graphics_object_id_get_audio_id(
253 struct graphics_object_id id);
254enum engine_id dal_graphics_object_id_get_engine_id(
255 struct graphics_object_id id);
256#endif
diff --git a/drivers/gpu/drm/amd/display/include/hw_sequencer_types.h b/drivers/gpu/drm/amd/display/include/hw_sequencer_types.h
new file mode 100644
index 000000000000..9a78097e70f3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/hw_sequencer_types.h
@@ -0,0 +1,105 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_HW_SEQUENCER_TYPES_H__
27#define __DAL_HW_SEQUENCER_TYPES_H__
28
29#include "signal_types.h"
30#include "grph_object_defs.h"
31#include "link_service_types.h"
32
33/* define the structure of Dynamic Refresh Mode */
34struct drr_params {
35 /* defines the minimum possible vertical dimension of display timing
36 * for CRTC as supported by the panel */
37 uint32_t vertical_total_min;
38 /* defines the maximum possible vertical dimension of display timing
39 * for CRTC as supported by the panel */
40 uint32_t vertical_total_max;
41};
42
43/* CRTC timing structure */
44struct hw_crtc_timing {
45 uint32_t h_total;
46 uint32_t h_addressable;
47 uint32_t h_overscan_left;
48 uint32_t h_overscan_right;
49 uint32_t h_sync_start;
50 uint32_t h_sync_width;
51
52 uint32_t v_total;
53 uint32_t v_addressable;
54 uint32_t v_overscan_top;
55 uint32_t v_overscan_bottom;
56 uint32_t v_sync_start;
57 uint32_t v_sync_width;
58
59 /* in KHz */
60 uint32_t pixel_clock;
61
62 struct {
63 uint32_t INTERLACED:1;
64 uint32_t DOUBLESCAN:1;
65 uint32_t PIXEL_REPETITION:4; /* 1...10 */
66 uint32_t HSYNC_POSITIVE_POLARITY:1;
67 uint32_t VSYNC_POSITIVE_POLARITY:1;
68 /* frame should be packed for 3D
69 * (currently this refers to HDMI 1.4a FramePacking format */
70 uint32_t HORZ_COUNT_BY_TWO:1;
71 uint32_t PACK_3D_FRAME:1;
72 /* 0 - left eye polarity, 1 - right eye polarity */
73 uint32_t RIGHT_EYE_3D_POLARITY:1;
74 /* DVI-DL High-Color mode */
75 uint32_t HIGH_COLOR_DL_MODE:1;
76 uint32_t Y_ONLY:1;
77 /* HDMI 2.0 - Support scrambling for TMDS character
78 * rates less than or equal to 340Mcsc */
79 uint32_t LTE_340MCSC_SCRAMBLE:1;
80 } flags;
81};
82
83/* TODO hw_info_frame and hw_info_packet structures are same as in encoder
84 * merge it*/
85struct hw_info_packet {
86 bool valid;
87 uint8_t hb0;
88 uint8_t hb1;
89 uint8_t hb2;
90 uint8_t hb3;
91 uint8_t sb[28];
92};
93
94struct hw_info_frame {
95 /* Auxiliary Video Information */
96 struct hw_info_packet avi_info_packet;
97 struct hw_info_packet gamut_packet;
98 struct hw_info_packet vendor_info_packet;
99 /* Source Product Description */
100 struct hw_info_packet spd_packet;
101 /* Video Stream Configuration */
102 struct hw_info_packet vsc_packet;
103};
104
105#endif
diff --git a/drivers/gpu/drm/amd/display/include/i2caux_interface.h b/drivers/gpu/drm/amd/display/include/i2caux_interface.h
new file mode 100644
index 000000000000..d2ec04d1c592
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/i2caux_interface.h
@@ -0,0 +1,89 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_I2CAUX_INTERFACE_H__
27#define __DAL_I2CAUX_INTERFACE_H__
28
29#include "gpio_service_interface.h"
30
31
32#define DEFAULT_AUX_MAX_DATA_SIZE 16
33#define AUX_MAX_DEFER_WRITE_RETRY 20
34
35struct aux_payload {
36 /* set following flag to read/write I2C data,
37 * reset it to read/write DPCD data */
38 bool i2c_over_aux;
39 /* set following flag to write data,
40 * reset it to read data */
41 bool write;
42 uint32_t address;
43 uint8_t length;
44 uint8_t *data;
45};
46
47struct aux_command {
48 struct aux_payload *payloads;
49 uint8_t number_of_payloads;
50
51 /* expressed in milliseconds
52 * zero means "use default value" */
53 uint32_t defer_delay;
54
55 /* zero means "use default value" */
56 uint32_t max_defer_write_retry;
57};
58
59union aux_config {
60 struct {
61 uint32_t ALLOW_AUX_WHEN_HPD_LOW:1;
62 } bits;
63 uint32_t raw;
64};
65
66struct i2caux;
67
68struct i2caux *dal_i2caux_create(
69 struct dc_context *ctx);
70
71bool dal_i2caux_submit_i2c_command(
72 struct i2caux *i2caux,
73 struct ddc *ddc,
74 struct i2c_command *cmd);
75
76bool dal_i2caux_submit_aux_command(
77 struct i2caux *i2caux,
78 struct ddc *ddc,
79 struct aux_command *cmd);
80
81void dal_i2caux_configure_aux(
82 struct i2caux *i2caux,
83 struct ddc *ddc,
84 union aux_config cfg);
85
86void dal_i2caux_destroy(
87 struct i2caux **ptr);
88
89#endif
diff --git a/drivers/gpu/drm/amd/display/include/irq_interface.h b/drivers/gpu/drm/amd/display/include/irq_interface.h
new file mode 100644
index 000000000000..077ded3fbedd
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/irq_interface.h
@@ -0,0 +1,31 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_IRQ_INTERFACE_H__
27#define __DAL_IRQ_INTERFACE_H__
28
29#include "gpio_types.h"
30
31#endif
diff --git a/drivers/gpu/drm/amd/display/include/irq_service_interface.h b/drivers/gpu/drm/amd/display/include/irq_service_interface.h
new file mode 100644
index 000000000000..d6ebed524daf
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/irq_service_interface.h
@@ -0,0 +1,51 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_IRQ_SERVICE_INTERFACE_H__
27#define __DAL_IRQ_SERVICE_INTERFACE_H__
28
29struct irq_service_init_data {
30 struct dc_context *ctx;
31};
32
33struct irq_service;
34
35void dal_irq_service_destroy(struct irq_service **irq_service);
36
37bool dal_irq_service_set(
38 struct irq_service *irq_service,
39 enum dc_irq_source source,
40 bool enable);
41
42bool dal_irq_service_ack(
43 struct irq_service *irq_service,
44 enum dc_irq_source source);
45
46enum dc_irq_source dal_irq_service_to_irq_source(
47 struct irq_service *irq_service,
48 uint32_t src_id,
49 uint32_t ext_id);
50
51#endif
diff --git a/drivers/gpu/drm/amd/display/include/link_service_types.h b/drivers/gpu/drm/amd/display/include/link_service_types.h
new file mode 100644
index 000000000000..06e68426d430
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/link_service_types.h
@@ -0,0 +1,232 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_LINK_SERVICE_TYPES_H__
27#define __DAL_LINK_SERVICE_TYPES_H__
28
29#include "grph_object_id.h"
30#include "dpcd_defs.h"
31#include "dal_types.h"
32#include "irq_types.h"
33
34/*struct mst_mgr_callback_object;*/
35struct ddc;
36struct irq_manager;
37
38enum {
39 MAX_CONTROLLER_NUM = 6
40};
41
42enum link_service_type {
43 LINK_SERVICE_TYPE_LEGACY = 0,
44 LINK_SERVICE_TYPE_DP_SST,
45 LINK_SERVICE_TYPE_DP_MST,
46 LINK_SERVICE_TYPE_MAX
47};
48
49enum dpcd_value_mask {
50 DPCD_VALUE_MASK_MAX_LANE_COUNT_LANE_COUNT = 0x1F,
51 DPCD_VALUE_MASK_MAX_LANE_COUNT_TPS3_SUPPORTED = 0x40,
52 DPCD_VALUE_MASK_MAX_LANE_COUNT_ENHANCED_FRAME_EN = 0x80,
53 DPCD_VALUE_MASK_MAX_DOWNSPREAD = 0x01,
54 DPCD_VALUE_MASK_LANE_ALIGN_STATUS_INTERLANE_ALIGN_DONE = 0x01
55};
56
57enum dp_power_state {
58 DP_POWER_STATE_D0 = 1,
59 DP_POWER_STATE_D3
60};
61
62enum dpcd_downstream_port_types {
63 DPCD_DOWNSTREAM_DP,
64 DPCD_DOWNSTREAM_VGA,
65 DPCD_DOWNSTREAM_DVI_HDMI,
66 /* has no EDID (TV, CV) */
67 DPCD_DOWNSTREAM_NON_DDC
68};
69
70enum edp_revision {
71 /* eDP version 1.1 or lower */
72 EDP_REVISION_11 = 0x00,
73 /* eDP version 1.2 */
74 EDP_REVISION_12 = 0x01,
75 /* eDP version 1.3 */
76 EDP_REVISION_13 = 0x02
77};
78
79enum {
80 LINK_RATE_REF_FREQ_IN_KHZ = 27000 /*27MHz*/
81};
82
83struct link_training_settings {
84 struct dc_link_settings link_settings;
85 struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX];
86 bool allow_invalid_msa_timing_param;
87};
88
89enum hw_dp_training_pattern {
90 HW_DP_TRAINING_PATTERN_1 = 0,
91 HW_DP_TRAINING_PATTERN_2,
92 HW_DP_TRAINING_PATTERN_3,
93 HW_DP_TRAINING_PATTERN_4
94};
95
96/*TODO: Move this enum test harness*/
97/* Test patterns*/
98enum dp_test_pattern {
99 /* Input data is pass through Scrambler
100 * and 8b10b Encoder straight to output*/
101 DP_TEST_PATTERN_VIDEO_MODE = 0,
102 /* phy test patterns*/
103 DP_TEST_PATTERN_D102,
104 DP_TEST_PATTERN_SYMBOL_ERROR,
105 DP_TEST_PATTERN_PRBS7,
106
107 DP_TEST_PATTERN_80BIT_CUSTOM,
108 DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE,
109
110 /* Link Training Patterns */
111 DP_TEST_PATTERN_TRAINING_PATTERN1,
112 DP_TEST_PATTERN_TRAINING_PATTERN2,
113 DP_TEST_PATTERN_TRAINING_PATTERN3,
114 DP_TEST_PATTERN_TRAINING_PATTERN4,
115
116 /* link test patterns*/
117 DP_TEST_PATTERN_COLOR_SQUARES,
118 DP_TEST_PATTERN_COLOR_SQUARES_CEA,
119 DP_TEST_PATTERN_VERTICAL_BARS,
120 DP_TEST_PATTERN_HORIZONTAL_BARS,
121 DP_TEST_PATTERN_COLOR_RAMP,
122
123 /* audio test patterns*/
124 DP_TEST_PATTERN_AUDIO_OPERATOR_DEFINED,
125 DP_TEST_PATTERN_AUDIO_SAWTOOTH,
126
127 DP_TEST_PATTERN_UNSUPPORTED
128};
129
130enum dp_panel_mode {
131 /* not required */
132 DP_PANEL_MODE_DEFAULT,
133 /* standard mode for eDP */
134 DP_PANEL_MODE_EDP,
135 /* external chips specific settings */
136 DP_PANEL_MODE_SPECIAL
137};
138
139/**
140 * @brief LinkServiceInitOptions to set certain bits
141 */
142struct link_service_init_options {
143 uint32_t APPLY_MISALIGNMENT_BUG_WORKAROUND:1;
144};
145
146/**
147 * @brief data required to initialize LinkService
148 */
149struct link_service_init_data {
150 /* number of displays indices which the MST Mgr would manange*/
151 uint32_t num_of_displays;
152 enum link_service_type link_type;
153 /*struct mst_mgr_callback_object*topology_change_callback;*/
154 /* native aux access */
155 struct ddc_service *dpcd_access_srv;
156 /* for calling HWSS to program HW */
157 struct hw_sequencer *hwss;
158 /* the source which to register IRQ on */
159 enum dc_irq_source irq_src_hpd_rx;
160 enum dc_irq_source irq_src_dp_sink;
161 /* other init options such as SW Workarounds */
162 struct link_service_init_options init_options;
163 uint32_t connector_enum_id;
164 struct graphics_object_id connector_id;
165 struct dc_context *ctx;
166 struct topology_mgr *tm;
167};
168
169/* DPCD_ADDR_TRAINING_LANEx_SET registers value */
170union dpcd_training_lane_set {
171 struct {
172#if defined(LITTLEENDIAN_CPU)
173 uint8_t VOLTAGE_SWING_SET:2;
174 uint8_t MAX_SWING_REACHED:1;
175 uint8_t PRE_EMPHASIS_SET:2;
176 uint8_t MAX_PRE_EMPHASIS_REACHED:1;
177 /* following is reserved in DP 1.1 */
178 uint8_t POST_CURSOR2_SET:2;
179#elif defined(BIGENDIAN_CPU)
180 uint8_t POST_CURSOR2_SET:2;
181 uint8_t MAX_PRE_EMPHASIS_REACHED:1;
182 uint8_t PRE_EMPHASIS_SET:2;
183 uint8_t MAX_SWING_REACHED:1;
184 uint8_t VOLTAGE_SWING_SET:2;
185#else
186 #error ARCH not defined!
187#endif
188 } bits;
189
190 uint8_t raw;
191};
192
193/**
194 * @brief represent the 16 byte
195 * global unique identifier
196 */
197struct mst_guid {
198 uint8_t ids[16];
199};
200
201/**
202 * @brief represents the relative address used
203 * to identify a node in MST topology network
204 */
205struct mst_rad {
206 /* number of links. rad[0] up to
207 * rad [linkCount - 1] are valid. */
208 uint32_t rad_link_count;
209 /* relative address. rad[0] is the
210 * first device connected to the source. */
211 uint8_t rad[15];
212 /* extra 10 bytes for underscores; for e.g.:2_1_8*/
213 int8_t rad_str[25];
214};
215
216/* DP MST stream allocation (payload bandwidth number) */
217struct dp_mst_stream_allocation {
218 uint8_t vcp_id;
219 /* number of slots required for the DP stream in
220 * transport packet */
221 uint8_t slot_count;
222};
223
224/* DP MST stream allocation table */
225struct dp_mst_stream_allocation_table {
226 /* number of DP video streams */
227 int stream_count;
228 /* array of stream allocations */
229 struct dp_mst_stream_allocation stream_allocations[MAX_CONTROLLER_NUM];
230};
231
232#endif /*__DAL_LINK_SERVICE_TYPES_H__*/
diff --git a/drivers/gpu/drm/amd/display/include/logger_interface.h b/drivers/gpu/drm/amd/display/include/logger_interface.h
new file mode 100644
index 000000000000..b58d30de8293
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/logger_interface.h
@@ -0,0 +1,140 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_LOGGER_INTERFACE_H__
27#define __DAL_LOGGER_INTERFACE_H__
28
29#include "logger_types.h"
30
31struct dc_context;
32struct dc_link;
33struct dc_surface_update;
34
35/*
36 *
37 * DAL logger functionality
38 *
39 */
40
41struct dal_logger *dal_logger_create(struct dc_context *ctx);
42
43uint32_t dal_logger_destroy(struct dal_logger **logger);
44
45void dm_logger_write(
46 struct dal_logger *logger,
47 enum dc_log_type log_type,
48 const char *msg,
49 ...);
50
51void dm_logger_append(
52 struct log_entry *entry,
53 const char *msg,
54 ...);
55
56void dm_logger_open(
57 struct dal_logger *logger,
58 struct log_entry *entry,
59 enum dc_log_type log_type);
60
61void dm_logger_close(struct log_entry *entry);
62
63void dc_conn_log(struct dc_context *ctx,
64 const struct dc_link *link,
65 uint8_t *hex_data,
66 int hex_data_count,
67 enum dc_log_type event,
68 const char *msg,
69 ...);
70
71void logger_write(struct dal_logger *logger,
72 enum dc_log_type log_type,
73 const char *msg,
74 void *paralist);
75
76void pre_surface_trace(
77 const struct dc *dc,
78 const struct dc_surface *const *surfaces,
79 int surface_count);
80
81void update_surface_trace(
82 const struct dc *dc,
83 const struct dc_surface_update *updates,
84 int surface_count);
85
86void post_surface_trace(const struct dc *dc);
87
88
89/* Any function which is empty or have incomplete implementation should be
90 * marked by this macro.
91 * Note that the message will be printed exactly once for every function
92 * it is used in order to avoid repeating of the same message. */
93#define DAL_LOGGER_NOT_IMPL(fmt, ...) \
94{ \
95 static bool print_not_impl = true; \
96\
97 if (print_not_impl == true) { \
98 print_not_impl = false; \
99 dm_logger_write(ctx->logger, LOG_WARNING, \
100 "DAL_NOT_IMPL: " fmt, ##__VA_ARGS__); \
101 } \
102}
103
104/******************************************************************************
105 * Convenience macros to save on typing.
106 *****************************************************************************/
107
108#define DC_ERROR(...) \
109 dm_logger_write(dc_ctx->logger, LOG_ERROR, \
110 __VA_ARGS__);
111
112#define DC_SYNC_INFO(...) \
113 dm_logger_write(dc_ctx->logger, LOG_SYNC, \
114 __VA_ARGS__);
115
116
117/* Connectivity log format:
118 * [time stamp] [drm] [Major_minor] [connector name] message.....
119 * eg:
120 * [ 26.590965] [drm] [Conn_LKTN] [DP-1] HBRx4 pass VS=0, PE=0^
121 * [ 26.881060] [drm] [Conn_Mode] [DP-1] {2560x1080, 2784x1111@185580Khz}^
122 */
123
124#define CONN_DATA_DETECT(link, hex_data, hex_len, ...) \
125 dc_conn_log(link->ctx, &link->public, hex_data, hex_len, \
126 LOG_EVENT_DETECTION, ##__VA_ARGS__)
127
128#define CONN_DATA_LINK_LOSS(link, hex_data, hex_len, ...) \
129 dc_conn_log(link->ctx, &link->public, hex_data, hex_len, \
130 LOG_EVENT_LINK_LOSS, ##__VA_ARGS__)
131
132#define CONN_MSG_LT(link, ...) \
133 dc_conn_log(link->ctx, &link->public, NULL, 0, \
134 LOG_EVENT_LINK_TRAINING, ##__VA_ARGS__)
135
136#define CONN_MSG_MODE(link, ...) \
137 dc_conn_log(link->ctx, &link->public, NULL, 0, \
138 LOG_EVENT_MODE_SET, ##__VA_ARGS__)
139
140#endif /* __DAL_LOGGER_INTERFACE_H__ */
diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h
new file mode 100644
index 000000000000..babd6523b105
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/logger_types.h
@@ -0,0 +1,95 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_LOGGER_TYPES_H__
27#define __DAL_LOGGER_TYPES_H__
28
29#include "os_types.h"
30
31#define MAX_NAME_LEN 32
32
33struct dal_logger;
34
35enum dc_log_type {
36 LOG_ERROR = 0,
37 LOG_WARNING,
38 LOG_DC,
39 LOG_SURFACE,
40 LOG_HW_HOTPLUG,
41 LOG_HW_LINK_TRAINING,
42 LOG_HW_SET_MODE,
43 LOG_HW_RESUME_S3,
44 LOG_HW_AUDIO,
45 LOG_HW_HPD_IRQ,
46 LOG_MST,
47 LOG_SCALER,
48 LOG_BIOS,
49 LOG_BANDWIDTH_CALCS,
50 LOG_BANDWIDTH_VALIDATION,
51 LOG_I2C_AUX,
52 LOG_SYNC,
53 LOG_BACKLIGHT,
54 LOG_FEATURE_OVERRIDE,
55 LOG_DETECTION_EDID_PARSER,
56 LOG_DETECTION_DP_CAPS,
57 LOG_RESOURCE,
58 LOG_DML,
59 LOG_EVENT_MODE_SET,
60 LOG_EVENT_DETECTION,
61 LOG_EVENT_LINK_TRAINING,
62 LOG_EVENT_LINK_LOSS,
63 LOG_EVENT_UNDERFLOW,
64 LOG_IF_TRACE,
65
66 LOG_SECTION_TOTAL_COUNT
67};
68
69union logger_flags {
70 struct {
71 uint32_t ENABLE_CONSOLE:1; /* Print to console */
72 uint32_t ENABLE_BUFFER:1; /* Print to buffer */
73 uint32_t RESERVED:30;
74 } bits;
75 uint32_t value;
76};
77
78struct log_entry {
79 struct dal_logger *logger;
80 enum dc_log_type type;
81
82 char *buf;
83 uint32_t buf_offset;
84 uint32_t max_buf_bytes;
85};
86
87/**
88* Structure for enumerating log types
89*/
90struct dc_log_type_info {
91 enum dc_log_type type;
92 char name[MAX_NAME_LEN];
93};
94
95#endif /* __DAL_LOGGER_TYPES_H__ */
diff --git a/drivers/gpu/drm/amd/display/include/set_mode_types.h b/drivers/gpu/drm/amd/display/include/set_mode_types.h
new file mode 100644
index 000000000000..d18210ff5b7a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/set_mode_types.h
@@ -0,0 +1,127 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_SET_MODE_TYPES_H__
27#define __DAL_SET_MODE_TYPES_H__
28
29#include "dc_types.h"
30
31/* Info frame packet status */
32enum info_frame_flag {
33 INFO_PACKET_PACKET_INVALID = 0,
34 INFO_PACKET_PACKET_VALID = 1,
35 INFO_PACKET_PACKET_RESET = 2,
36 INFO_PACKET_PACKET_UPDATE_SCAN_TYPE = 8
37};
38
39/* Info frame types */
40enum info_frame_type {
41 INFO_FRAME_GAMUT = 0x0A,
42 INFO_FRAME_VENDOR_INFO = 0x81,
43 INFO_FRAME_AVI = 0x82
44};
45
46/* Info frame versions */
47enum info_frame_version {
48 INFO_FRAME_VERSION_1 = 1,
49 INFO_FRAME_VERSION_2 = 2,
50 INFO_FRAME_VERSION_3 = 3
51};
52
53/* Info frame size */
54enum info_frame_size {
55 INFO_FRAME_SIZE_AVI = 13,
56 INFO_FRAME_SIZE_VENDOR = 25,
57 INFO_FRAME_SIZE_AUDIO = 10
58};
59
60struct hdmi_info_frame_header {
61 uint8_t info_frame_type;
62 uint8_t version;
63 uint8_t length;
64};
65
66#pragma pack(push)
67#pragma pack(1)
68
69struct info_packet_raw_data {
70 uint8_t hb0;
71 uint8_t hb1;
72 uint8_t hb2;
73 uint8_t sb[28]; /* sb0~sb27 */
74};
75
76union hdmi_info_packet {
77 struct avi_info_frame {
78 struct hdmi_info_frame_header header;
79
80 uint8_t CHECK_SUM:8;
81
82 uint8_t S0_S1:2;
83 uint8_t B0_B1:2;
84 uint8_t A0:1;
85 uint8_t Y0_Y1_Y2:3;
86
87 uint8_t R0_R3:4;
88 uint8_t M0_M1:2;
89 uint8_t C0_C1:2;
90
91 uint8_t SC0_SC1:2;
92 uint8_t Q0_Q1:2;
93 uint8_t EC0_EC2:3;
94 uint8_t ITC:1;
95
96 uint8_t VIC0_VIC7:8;
97
98 uint8_t PR0_PR3:4;
99 uint8_t CN0_CN1:2;
100 uint8_t YQ0_YQ1:2;
101
102 uint16_t bar_top;
103 uint16_t bar_bottom;
104 uint16_t bar_left;
105 uint16_t bar_right;
106
107 uint8_t reserved[14];
108 } bits;
109
110 struct info_packet_raw_data packet_raw_data;
111};
112
113struct info_packet {
114 enum info_frame_flag flags;
115 union hdmi_info_packet info_packet_hdmi;
116};
117
118struct info_frame {
119 struct info_packet avi_info_packet;
120 struct info_packet gamut_packet;
121 struct info_packet vendor_info_packet;
122 struct info_packet spd_info_packet;
123};
124
125#pragma pack(pop)
126
127#endif /* __DAL_SET_MODE_TYPES_H__ */
diff --git a/drivers/gpu/drm/amd/display/include/signal_types.h b/drivers/gpu/drm/amd/display/include/signal_types.h
new file mode 100644
index 000000000000..a50f7ed74a33
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/signal_types.h
@@ -0,0 +1,59 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DC_SIGNAL_TYPES_H__
27#define __DC_SIGNAL_TYPES_H__
28
29enum signal_type {
30 SIGNAL_TYPE_NONE = 0L, /* no signal */
31 SIGNAL_TYPE_DVI_SINGLE_LINK = (1 << 0),
32 SIGNAL_TYPE_DVI_DUAL_LINK = (1 << 1),
33 SIGNAL_TYPE_HDMI_TYPE_A = (1 << 2),
34 SIGNAL_TYPE_LVDS = (1 << 3),
35 SIGNAL_TYPE_RGB = (1 << 4),
36 SIGNAL_TYPE_DISPLAY_PORT = (1 << 5),
37 SIGNAL_TYPE_DISPLAY_PORT_MST = (1 << 6),
38 SIGNAL_TYPE_EDP = (1 << 7),
39 SIGNAL_TYPE_WIRELESS = (1 << 8), /* Wireless Display */
40 SIGNAL_TYPE_VIRTUAL = (1 << 9), /* Virtual Display */
41
42 SIGNAL_TYPE_COUNT = 10,
43 SIGNAL_TYPE_ALL = (1 << SIGNAL_TYPE_COUNT) - 1
44};
45
46/* help functions for signal types manipulation */
47bool dc_is_hdmi_signal(enum signal_type signal);
48bool dc_is_dp_sst_signal(enum signal_type signal);
49bool dc_is_dp_signal(enum signal_type signal);
50bool dc_is_dp_external_signal(enum signal_type signal);
51bool dc_is_analog_signal(enum signal_type signal);
52bool dc_is_embedded_signal(enum signal_type signal);
53bool dc_is_dvi_signal(enum signal_type signal);
54bool dc_is_dvi_single_link_signal(enum signal_type signal);
55bool dc_is_dual_link_signal(enum signal_type signal);
56bool dc_is_audio_capable_signal(enum signal_type signal);
57bool dc_is_digital_encoder_compatible_signal(enum signal_type signal);
58
59#endif
diff --git a/drivers/gpu/drm/amd/display/include/vector.h b/drivers/gpu/drm/amd/display/include/vector.h
new file mode 100644
index 000000000000..8233b7c22a07
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/vector.h
@@ -0,0 +1,150 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_VECTOR_H__
27#define __DAL_VECTOR_H__
28
29struct vector {
30 uint8_t *container;
31 uint32_t struct_size;
32 uint32_t count;
33 uint32_t capacity;
34 struct dc_context *ctx;
35};
36
37bool dal_vector_construct(
38 struct vector *vector,
39 struct dc_context *ctx,
40 uint32_t capacity,
41 uint32_t struct_size);
42
43struct vector *dal_vector_create(
44 struct dc_context *ctx,
45 uint32_t capacity,
46 uint32_t struct_size);
47
48/* 'initial_value' is optional. If initial_value not supplied,
49 * each "structure" in the vector will contain zeros by default. */
50struct vector *dal_vector_presized_create(
51 struct dc_context *ctx,
52 uint32_t size,
53 void *initial_value,
54 uint32_t struct_size);
55
56void dal_vector_destruct(
57 struct vector *vector);
58
59void dal_vector_destroy(
60 struct vector **vector);
61
62uint32_t dal_vector_get_count(
63 const struct vector *vector);
64
65/* dal_vector_insert_at
66 * reallocate container if necessary
67 * then shell items at right and insert
68 * return if the container modified
69 * do not check that index belongs to container
70 * since the function is private and index is going to be calculated
71 * either with by function or as get_count+1 */
72bool dal_vector_insert_at(
73 struct vector *vector,
74 const void *what,
75 uint32_t position);
76
77bool dal_vector_append(
78 struct vector *vector,
79 const void *item);
80
81/* operator[] */
82void *dal_vector_at_index(
83 const struct vector *vector,
84 uint32_t index);
85
86void dal_vector_set_at_index(
87 const struct vector *vector,
88 const void *what,
89 uint32_t index);
90
91/* create a clone (copy) of a vector */
92struct vector *dal_vector_clone(
93 const struct vector *vector_other);
94
95/* dal_vector_remove_at_index
96 * Shifts elements on the right from remove position to the left,
97 * removing an element at position by overwrite means*/
98bool dal_vector_remove_at_index(
99 struct vector *vector,
100 uint32_t index);
101
102uint32_t dal_vector_capacity(const struct vector *vector);
103
104bool dal_vector_reserve(struct vector *vector, uint32_t capacity);
105
106void dal_vector_clear(struct vector *vector);
107
108/***************************************************************************
109 * Macro definitions of TYPE-SAFE versions of vector set/get functions.
110 ***************************************************************************/
111
112#define DAL_VECTOR_INSERT_AT(vector_type, type_t) \
113 static bool vector_type##_vector_insert_at( \
114 struct vector *vector, \
115 type_t what, \
116 uint32_t position) \
117{ \
118 return dal_vector_insert_at(vector, what, position); \
119}
120
121#define DAL_VECTOR_APPEND(vector_type, type_t) \
122 static bool vector_type##_vector_append( \
123 struct vector *vector, \
124 type_t item) \
125{ \
126 return dal_vector_append(vector, item); \
127}
128
129/* Note: "type_t" is the ONLY token accepted by "checkpatch.pl" and by
130 * "checkcommit" as *return type*.
131 * For uniformity reasons "type_t" is used for all type-safe macro
132 * definitions here. */
133#define DAL_VECTOR_AT_INDEX(vector_type, type_t) \
134 static type_t vector_type##_vector_at_index( \
135 const struct vector *vector, \
136 uint32_t index) \
137{ \
138 return dal_vector_at_index(vector, index); \
139}
140
141#define DAL_VECTOR_SET_AT_INDEX(vector_type, type_t) \
142 static void vector_type##_vector_set_at_index( \
143 const struct vector *vector, \
144 type_t what, \
145 uint32_t index) \
146{ \
147 dal_vector_set_at_index(vector, what, index); \
148}
149
150#endif /* __DAL_VECTOR_H__ */
diff --git a/drivers/gpu/drm/amd/display/modules/color/color.c b/drivers/gpu/drm/amd/display/modules/color/color.c
new file mode 100644
index 000000000000..cf030b18f6a9
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/color/color.c
@@ -0,0 +1,2094 @@
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27#include "dc.h"
28#include "mod_color.h"
29#include "core_types.h"
30#include "fixed31_32.h"
31#include "core_dc.h"
32
33#define MOD_COLOR_MAX_CONCURRENT_SINKS 32
34#define DIVIDER 10000
35/* S2D13 value in [-3.00...0.9999] */
36#define S2D13_MIN (-3 * DIVIDER)
37#define S2D13_MAX (3 * DIVIDER)
38#define S0D13_MIN (-1 * DIVIDER)
39#define S0D13_MAX (1 * DIVIDER)
40
41struct sink_caps {
42 const struct dc_sink *sink;
43};
44
45struct gamut_calculation_matrix {
46 struct fixed31_32 MTransposed[9];
47 struct fixed31_32 XYZtoRGB_Custom[9];
48 struct fixed31_32 XYZtoRGB_Ref[9];
49 struct fixed31_32 RGBtoXYZ_Final[9];
50
51 struct fixed31_32 MResult[9];
52 struct fixed31_32 fXYZofWhiteRef[9];
53 struct fixed31_32 fXYZofRGBRef[9];
54};
55
56struct gamut_src_dst_matrix {
57 struct fixed31_32 rgbCoeffDst[9];
58 struct fixed31_32 whiteCoeffDst[3];
59 struct fixed31_32 rgbCoeffSrc[9];
60 struct fixed31_32 whiteCoeffSrc[3];
61};
62
63struct color_state {
64 bool user_enable_color_temperature;
65 int custom_color_temperature;
66 struct color_space_coordinates source_gamut;
67 struct color_space_coordinates destination_gamut;
68 struct color_range contrast;
69 struct color_range saturation;
70 struct color_range brightness;
71 struct color_range hue;
72 enum dc_quantization_range preferred_quantization_range;
73};
74
75struct core_color {
76 struct mod_color public;
77 struct dc *dc;
78 int num_sinks;
79 struct sink_caps *caps;
80 struct color_state *state;
81};
82
83#define MOD_COLOR_TO_CORE(mod_color)\
84 container_of(mod_color, struct core_color, public)
85
86#define COLOR_REGISTRY_NAME "color_v1"
87
88/*Matrix Calculation Functions*/
89/**
90 *****************************************************************************
91 * Function: transposeMatrix
92 *
93 * @brief
94 * rotate the matrix 90 degrees clockwise
95 * rows become a columns and columns to rows
96 * @param [ in ] M - source matrix
97 * @param [ in ] Rows - num of Rows of the original matrix
98 * @param [ in ] Cols - num of Cols of the original matrix
99 * @param [ out] MTransposed - result matrix
100 * @return void
101 *
102 *****************************************************************************
103 */
104static void transpose_matrix(const struct fixed31_32 *M, unsigned int Rows,
105 unsigned int Cols, struct fixed31_32 *MTransposed)
106{
107 unsigned int i, j;
108
109 for (i = 0; i < Rows; i++) {
110 for (j = 0; j < Cols; j++)
111 MTransposed[(j*Rows)+i] = M[(i*Cols)+j];
112 }
113}
114
115/**
116 *****************************************************************************
117 * Function: multiplyMatrices
118 *
119 * @brief
120 * multiplies produce of two matrices: M = M1[ulRows1 x ulCols1] *
121 * M2[ulCols1 x ulCols2].
122 *
123 * @param [ in ] M1 - first Matrix.
124 * @param [ in ] M2 - second Matrix.
125 * @param [ in ] Rows1 - num of Rows of the first Matrix
126 * @param [ in ] Cols1 - num of Cols of the first Matrix/Num of Rows
127 * of the second Matrix
128 * @param [ in ] Cols2 - num of Cols of the second Matrix
129 * @param [out ] mResult - resulting matrix.
130 * @return void
131 *
132 *****************************************************************************
133 */
134static void multiply_matrices(struct fixed31_32 *mResult,
135 const struct fixed31_32 *M1,
136 const struct fixed31_32 *M2, unsigned int Rows1,
137 unsigned int Cols1, unsigned int Cols2)
138{
139 unsigned int i, j, k;
140
141 for (i = 0; i < Rows1; i++) {
142 for (j = 0; j < Cols2; j++) {
143 mResult[(i * Cols2) + j] = dal_fixed31_32_zero;
144 for (k = 0; k < Cols1; k++)
145 mResult[(i * Cols2) + j] =
146 dal_fixed31_32_add
147 (mResult[(i * Cols2) + j],
148 dal_fixed31_32_mul(M1[(i * Cols1) + k],
149 M2[(k * Cols2) + j]));
150 }
151 }
152}
153
154/**
155 *****************************************************************************
156 * Function: cFind3X3Det
157 *
158 * @brief
159 * finds determinant of given 3x3 matrix
160 *
161 * @param [ in ] m - matrix
162 * @return determinate whioch could not be zero
163 *
164 *****************************************************************************
165 */
166static struct fixed31_32 find_3X3_det(const struct fixed31_32 *m)
167{
168 struct fixed31_32 det, A1, A2, A3;
169
170 A1 = dal_fixed31_32_mul(m[0],
171 dal_fixed31_32_sub(dal_fixed31_32_mul(m[4], m[8]),
172 dal_fixed31_32_mul(m[5], m[7])));
173 A2 = dal_fixed31_32_mul(m[1],
174 dal_fixed31_32_sub(dal_fixed31_32_mul(m[3], m[8]),
175 dal_fixed31_32_mul(m[5], m[6])));
176 A3 = dal_fixed31_32_mul(m[2],
177 dal_fixed31_32_sub(dal_fixed31_32_mul(m[3], m[7]),
178 dal_fixed31_32_mul(m[4], m[6])));
179 det = dal_fixed31_32_add(dal_fixed31_32_sub(A1, A2), A3);
180 return det;
181}
182
183
184/**
185 *****************************************************************************
186 * Function: computeInverseMatrix_3x3
187 *
188 * @brief
189 * builds inverse matrix
190 *
191 * @param [ in ] m - matrix
192 * @param [ out ] im - result matrix
193 * @return true if success
194 *
195 *****************************************************************************
196 */
197static bool compute_inverse_matrix_3x3(const struct fixed31_32 *m,
198 struct fixed31_32 *im)
199{
200 struct fixed31_32 determinant = find_3X3_det(m);
201
202 if (dal_fixed31_32_eq(determinant, dal_fixed31_32_zero) == false) {
203 im[0] = dal_fixed31_32_div(dal_fixed31_32_sub
204 (dal_fixed31_32_mul(m[4], m[8]),
205 dal_fixed31_32_mul(m[5], m[7])), determinant);
206 im[1] = dal_fixed31_32_neg(dal_fixed31_32_div(dal_fixed31_32_sub
207 (dal_fixed31_32_mul(m[1], m[8]),
208 dal_fixed31_32_mul(m[2], m[7])), determinant));
209 im[2] = dal_fixed31_32_div(dal_fixed31_32_sub
210 (dal_fixed31_32_mul(m[1], m[5]),
211 dal_fixed31_32_mul(m[2], m[4])), determinant);
212 im[3] = dal_fixed31_32_neg(dal_fixed31_32_div(dal_fixed31_32_sub
213 (dal_fixed31_32_mul(m[3], m[8]),
214 dal_fixed31_32_mul(m[5], m[6])), determinant));
215 im[4] = dal_fixed31_32_div(dal_fixed31_32_sub
216 (dal_fixed31_32_mul(m[0], m[8]),
217 dal_fixed31_32_mul(m[2], m[6])), determinant);
218 im[5] = dal_fixed31_32_neg(dal_fixed31_32_div(dal_fixed31_32_sub
219 (dal_fixed31_32_mul(m[0], m[5]),
220 dal_fixed31_32_mul(m[2], m[3])), determinant));
221 im[6] = dal_fixed31_32_div(dal_fixed31_32_sub
222 (dal_fixed31_32_mul(m[3], m[7]),
223 dal_fixed31_32_mul(m[4], m[6])), determinant);
224 im[7] = dal_fixed31_32_neg(dal_fixed31_32_div(dal_fixed31_32_sub
225 (dal_fixed31_32_mul(m[0], m[7]),
226 dal_fixed31_32_mul(m[1], m[6])), determinant));
227 im[8] = dal_fixed31_32_div(dal_fixed31_32_sub
228 (dal_fixed31_32_mul(m[0], m[4]),
229 dal_fixed31_32_mul(m[1], m[3])), determinant);
230 return true;
231 }
232 return false;
233}
234
235/**
236 *****************************************************************************
237 * Function: calculateXYZtoRGB_M3x3
238 *
239 * @brief
240 * Calculates transformation matrix from XYZ coordinates to RBG
241 *
242 * @param [ in ] XYZofRGB - primaries XYZ
243 * @param [ in ] XYZofWhite - white point.
244 * @param [ out ] XYZtoRGB - RGB primires
245 * @return true if success
246 *
247 *****************************************************************************
248 */
249static bool calculate_XYZ_to_RGB_3x3(const struct fixed31_32 *XYZofRGB,
250 const struct fixed31_32 *XYZofWhite,
251 struct fixed31_32 *XYZtoRGB)
252{
253
254 struct fixed31_32 MInversed[9];
255 struct fixed31_32 SVector[3];
256
257 /*1. Find Inverse matrix 3x3 of MTransposed*/
258 if (!compute_inverse_matrix_3x3(XYZofRGB, MInversed))
259 return false;
260
261 /*2. Calculate vector: |Sr Sg Sb| = [MInversed] * |Wx Wy Wz|*/
262 multiply_matrices(SVector, MInversed, XYZofWhite, 3, 3, 1);
263
264 /*3. Calculate matrix XYZtoRGB 3x3*/
265 XYZtoRGB[0] = dal_fixed31_32_mul(XYZofRGB[0], SVector[0]);
266 XYZtoRGB[1] = dal_fixed31_32_mul(XYZofRGB[1], SVector[1]);
267 XYZtoRGB[2] = dal_fixed31_32_mul(XYZofRGB[2], SVector[2]);
268
269 XYZtoRGB[3] = dal_fixed31_32_mul(XYZofRGB[3], SVector[0]);
270 XYZtoRGB[4] = dal_fixed31_32_mul(XYZofRGB[4], SVector[1]);
271 XYZtoRGB[5] = dal_fixed31_32_mul(XYZofRGB[5], SVector[2]);
272
273 XYZtoRGB[6] = dal_fixed31_32_mul(XYZofRGB[6], SVector[0]);
274 XYZtoRGB[7] = dal_fixed31_32_mul(XYZofRGB[7], SVector[1]);
275 XYZtoRGB[8] = dal_fixed31_32_mul(XYZofRGB[8], SVector[2]);
276
277 return true;
278}
279
280static bool gamut_to_color_matrix(
281 const struct fixed31_32 *pXYZofRGB,/*destination gamut*/
282 const struct fixed31_32 *pXYZofWhite,/*destination of white point*/
283 const struct fixed31_32 *pRefXYZofRGB,/*source gamut*/
284 const struct fixed31_32 *pRefXYZofWhite,/*source of white point*/
285 bool invert,
286 struct fixed31_32 *tempMatrix3X3)
287{
288 int i = 0;
289 struct gamut_calculation_matrix *matrix =
290 dm_alloc(sizeof(struct gamut_calculation_matrix));
291
292 struct fixed31_32 *pXYZtoRGB_Temp;
293 struct fixed31_32 *pXYZtoRGB_Final;
294
295 matrix->fXYZofWhiteRef[0] = pRefXYZofWhite[0];
296 matrix->fXYZofWhiteRef[1] = pRefXYZofWhite[1];
297 matrix->fXYZofWhiteRef[2] = pRefXYZofWhite[2];
298
299
300 matrix->fXYZofRGBRef[0] = pRefXYZofRGB[0];
301 matrix->fXYZofRGBRef[1] = pRefXYZofRGB[1];
302 matrix->fXYZofRGBRef[2] = pRefXYZofRGB[2];
303
304 matrix->fXYZofRGBRef[3] = pRefXYZofRGB[3];
305 matrix->fXYZofRGBRef[4] = pRefXYZofRGB[4];
306 matrix->fXYZofRGBRef[5] = pRefXYZofRGB[5];
307
308 matrix->fXYZofRGBRef[6] = pRefXYZofRGB[6];
309 matrix->fXYZofRGBRef[7] = pRefXYZofRGB[7];
310 matrix->fXYZofRGBRef[8] = pRefXYZofRGB[8];
311
312 /*default values - unity matrix*/
313 while (i < 9) {
314 if (i == 0 || i == 4 || i == 8)
315 tempMatrix3X3[i] = dal_fixed31_32_one;
316 else
317 tempMatrix3X3[i] = dal_fixed31_32_zero;
318 i++;
319 }
320
321 /*1. Decide about the order of calculation.
322 * bInvert == FALSE --> RGBtoXYZ_Ref * XYZtoRGB_Custom
323 * bInvert == TRUE --> RGBtoXYZ_Custom * XYZtoRGB_Ref */
324 if (invert) {
325 pXYZtoRGB_Temp = matrix->XYZtoRGB_Custom;
326 pXYZtoRGB_Final = matrix->XYZtoRGB_Ref;
327 } else {
328 pXYZtoRGB_Temp = matrix->XYZtoRGB_Ref;
329 pXYZtoRGB_Final = matrix->XYZtoRGB_Custom;
330 }
331
332 /*2. Calculate XYZtoRGB_Ref*/
333 transpose_matrix(matrix->fXYZofRGBRef, 3, 3, matrix->MTransposed);
334
335 if (!calculate_XYZ_to_RGB_3x3(
336 matrix->MTransposed,
337 matrix->fXYZofWhiteRef,
338 matrix->XYZtoRGB_Ref))
339 goto function_fail;
340
341 /*3. Calculate XYZtoRGB_Custom*/
342 transpose_matrix(pXYZofRGB, 3, 3, matrix->MTransposed);
343
344 if (!calculate_XYZ_to_RGB_3x3(
345 matrix->MTransposed,
346 pXYZofWhite,
347 matrix->XYZtoRGB_Custom))
348 goto function_fail;
349
350 /*4. Calculate RGBtoXYZ -
351 * inverse matrix 3x3 of XYZtoRGB_Ref or XYZtoRGB_Custom*/
352 if (!compute_inverse_matrix_3x3(pXYZtoRGB_Temp, matrix->RGBtoXYZ_Final))
353 goto function_fail;
354
355 /*5. Calculate M(3x3) = RGBtoXYZ * XYZtoRGB*/
356 multiply_matrices(matrix->MResult, matrix->RGBtoXYZ_Final,
357 pXYZtoRGB_Final, 3, 3, 3);
358
359 for (i = 0; i < 9; i++)
360 tempMatrix3X3[i] = matrix->MResult[i];
361
362 dm_free(matrix);
363
364 return true;
365
366function_fail:
367 dm_free(matrix);
368 return false;
369}
370
371static bool build_gamut_remap_matrix
372 (struct color_space_coordinates gamut_description,
373 struct fixed31_32 *rgb_matrix,
374 struct fixed31_32 *white_point_matrix)
375{
376 struct fixed31_32 fixed_blueX = dal_fixed31_32_from_fraction
377 (gamut_description.blueX, DIVIDER);
378 struct fixed31_32 fixed_blueY = dal_fixed31_32_from_fraction
379 (gamut_description.blueY, DIVIDER);
380 struct fixed31_32 fixed_greenX = dal_fixed31_32_from_fraction
381 (gamut_description.greenX, DIVIDER);
382 struct fixed31_32 fixed_greenY = dal_fixed31_32_from_fraction
383 (gamut_description.greenY, DIVIDER);
384 struct fixed31_32 fixed_redX = dal_fixed31_32_from_fraction
385 (gamut_description.redX, DIVIDER);
386 struct fixed31_32 fixed_redY = dal_fixed31_32_from_fraction
387 (gamut_description.redY, DIVIDER);
388 struct fixed31_32 fixed_whiteX = dal_fixed31_32_from_fraction
389 (gamut_description.whiteX, DIVIDER);
390 struct fixed31_32 fixed_whiteY = dal_fixed31_32_from_fraction
391 (gamut_description.whiteY, DIVIDER);
392
393 rgb_matrix[0] = dal_fixed31_32_div(fixed_redX, fixed_redY);
394 rgb_matrix[1] = dal_fixed31_32_one;
395 rgb_matrix[2] = dal_fixed31_32_div(dal_fixed31_32_sub
396 (dal_fixed31_32_sub(dal_fixed31_32_one, fixed_redX),
397 fixed_redY), fixed_redY);
398
399 rgb_matrix[3] = dal_fixed31_32_div(fixed_greenX, fixed_greenY);
400 rgb_matrix[4] = dal_fixed31_32_one;
401 rgb_matrix[5] = dal_fixed31_32_div(dal_fixed31_32_sub
402 (dal_fixed31_32_sub(dal_fixed31_32_one, fixed_greenX),
403 fixed_greenY), fixed_greenY);
404
405 rgb_matrix[6] = dal_fixed31_32_div(fixed_blueX, fixed_blueY);
406 rgb_matrix[7] = dal_fixed31_32_one;
407 rgb_matrix[8] = dal_fixed31_32_div(dal_fixed31_32_sub
408 (dal_fixed31_32_sub(dal_fixed31_32_one, fixed_blueX),
409 fixed_blueY), fixed_blueY);
410
411 white_point_matrix[0] = dal_fixed31_32_div(fixed_whiteX, fixed_whiteY);
412 white_point_matrix[1] = dal_fixed31_32_one;
413 white_point_matrix[2] = dal_fixed31_32_div(dal_fixed31_32_sub
414 (dal_fixed31_32_sub(dal_fixed31_32_one, fixed_whiteX),
415 fixed_whiteY), fixed_whiteY);
416
417 return true;
418}
419
420static bool check_dc_support(const struct dc *dc)
421{
422 if (dc->stream_funcs.set_gamut_remap == NULL)
423 return false;
424
425 return true;
426}
427
428static uint16_t fixed_point_to_int_frac(
429 struct fixed31_32 arg,
430 uint8_t integer_bits,
431 uint8_t fractional_bits)
432{
433 int32_t numerator;
434 int32_t divisor = 1 << fractional_bits;
435
436 uint16_t result;
437
438 uint16_t d = (uint16_t)dal_fixed31_32_floor(
439 dal_fixed31_32_abs(
440 arg));
441
442 if (d <= (uint16_t)(1 << integer_bits) - (1 / (uint16_t)divisor))
443 numerator = (uint16_t)dal_fixed31_32_floor(
444 dal_fixed31_32_mul_int(
445 arg,
446 divisor));
447 else {
448 numerator = dal_fixed31_32_floor(
449 dal_fixed31_32_sub(
450 dal_fixed31_32_from_int(
451 1LL << integer_bits),
452 dal_fixed31_32_recip(
453 dal_fixed31_32_from_int(
454 divisor))));
455 }
456
457 if (numerator >= 0)
458 result = (uint16_t)numerator;
459 else
460 result = (uint16_t)(
461 (1 << (integer_bits + fractional_bits + 1)) + numerator);
462
463 if ((result != 0) && dal_fixed31_32_lt(
464 arg, dal_fixed31_32_zero))
465 result |= 1 << (integer_bits + fractional_bits);
466
467 return result;
468}
469
470/**
471* convert_float_matrix
472* This converts a double into HW register spec defined format S2D13.
473* @param :
474* @return None
475*/
476
477static void convert_float_matrix_legacy(
478 uint16_t *matrix,
479 struct fixed31_32 *flt,
480 uint32_t buffer_size)
481{
482 const struct fixed31_32 min_2_13 =
483 dal_fixed31_32_from_fraction(S2D13_MIN, DIVIDER);
484 const struct fixed31_32 max_2_13 =
485 dal_fixed31_32_from_fraction(S2D13_MAX, DIVIDER);
486 uint32_t i;
487
488 for (i = 0; i < buffer_size; ++i) {
489 uint32_t reg_value =
490 fixed_point_to_int_frac(
491 dal_fixed31_32_clamp(
492 flt[i],
493 min_2_13,
494 max_2_13),
495 2,
496 13);
497
498 matrix[i] = (uint16_t)reg_value;
499 }
500}
501
502static void convert_float_matrix(
503 uint16_t *matrix,
504 struct fixed31_32 *flt,
505 uint32_t buffer_size)
506{
507 const struct fixed31_32 min_0_13 =
508 dal_fixed31_32_from_fraction(S0D13_MIN, DIVIDER);
509 const struct fixed31_32 max_0_13 =
510 dal_fixed31_32_from_fraction(S0D13_MAX, DIVIDER);
511 const struct fixed31_32 min_2_13 =
512 dal_fixed31_32_from_fraction(S2D13_MIN, DIVIDER);
513 const struct fixed31_32 max_2_13 =
514 dal_fixed31_32_from_fraction(S2D13_MAX, DIVIDER);
515 uint32_t i;
516 uint16_t temp_matrix[12];
517
518 for (i = 0; i < buffer_size; ++i) {
519 if (i == 3 || i == 7 || i == 11) {
520 uint32_t reg_value =
521 fixed_point_to_int_frac(
522 dal_fixed31_32_clamp(
523 flt[i],
524 min_0_13,
525 max_0_13),
526 2,
527 13);
528
529 temp_matrix[i] = (uint16_t)reg_value;
530 } else {
531 uint32_t reg_value =
532 fixed_point_to_int_frac(
533 dal_fixed31_32_clamp(
534 flt[i],
535 min_2_13,
536 max_2_13),
537 2,
538 13);
539
540 temp_matrix[i] = (uint16_t)reg_value;
541 }
542 }
543
544 matrix[4] = temp_matrix[0];
545 matrix[5] = temp_matrix[1];
546 matrix[6] = temp_matrix[2];
547 matrix[7] = temp_matrix[3];
548
549 matrix[8] = temp_matrix[4];
550 matrix[9] = temp_matrix[5];
551 matrix[10] = temp_matrix[6];
552 matrix[11] = temp_matrix[7];
553
554 matrix[0] = temp_matrix[8];
555 matrix[1] = temp_matrix[9];
556 matrix[2] = temp_matrix[10];
557 matrix[3] = temp_matrix[11];
558}
559
560static int get_hw_value_from_sw_value(int swVal, int swMin,
561 int swMax, int hwMin, int hwMax)
562{
563 int dSW = swMax - swMin; /*software adjustment range size*/
564 int dHW = hwMax - hwMin; /*hardware adjustment range size*/
565 int hwVal; /*HW adjustment value*/
566
567 /* error case, I preserve the behavior from the predecessor
568 *getHwStepFromSwHwMinMaxValue (removed in Feb 2013)
569 *which was the FP version that only computed SCLF (i.e. dHW/dSW).
570 *it would return 0 in this case so
571 *hwVal = hwMin from the formula given in @brief
572 */
573 if (dSW == 0)
574 return hwMin;
575
576 /*it's quite often that ranges match,
577 *e.g. for overlay colors currently (Feb 2013)
578 *only brightness has a different
579 *HW range, and in this case no multiplication or division is needed,
580 *and if minimums match, no calculation at all
581 */
582 if (dSW != dHW) {
583 hwVal = (swVal - swMin)*dHW/dSW + hwMin;
584 } else {
585 hwVal = swVal;
586 if (swMin != hwMin)
587 hwVal += (hwMin - swMin);
588 }
589
590 return hwVal;
591}
592
593static void initialize_fix_point_color_values(
594 struct core_color *core_color,
595 unsigned int sink_index,
596 struct fixed31_32 *grph_cont,
597 struct fixed31_32 *grph_sat,
598 struct fixed31_32 *grph_bright,
599 struct fixed31_32 *sin_grph_hue,
600 struct fixed31_32 *cos_grph_hue)
601{
602 /* Hue adjustment could be negative. -45 ~ +45 */
603 struct fixed31_32 hue =
604 dal_fixed31_32_mul(
605 dal_fixed31_32_from_fraction
606 (get_hw_value_from_sw_value
607 (core_color->state[sink_index].hue.current,
608 core_color->state[sink_index].hue.min,
609 core_color->state[sink_index].hue.max,
610 -30, 30), 180),
611 dal_fixed31_32_pi);
612
613 *sin_grph_hue = dal_fixed31_32_sin(hue);
614 *cos_grph_hue = dal_fixed31_32_cos(hue);
615
616 *grph_cont =
617 dal_fixed31_32_from_fraction(get_hw_value_from_sw_value
618 (core_color->state[sink_index].contrast.current,
619 core_color->state[sink_index].contrast.min,
620 core_color->state[sink_index].contrast.max,
621 50, 150), 100);
622 *grph_sat =
623 dal_fixed31_32_from_fraction(get_hw_value_from_sw_value
624 (core_color->state[sink_index].saturation.current,
625 core_color->state[sink_index].saturation.min,
626 core_color->state[sink_index].saturation.max,
627 0, 200), 100);
628 *grph_bright =
629 dal_fixed31_32_from_fraction(get_hw_value_from_sw_value
630 (core_color->state[sink_index].brightness.current,
631 core_color->state[sink_index].brightness.min,
632 core_color->state[sink_index].brightness.max,
633 -25, 25), 100);
634}
635
636
637/* Given a specific dc_sink* this function finds its equivalent
638 * on the dc_sink array and returns the corresponding index
639 */
640static unsigned int sink_index_from_sink(struct core_color *core_color,
641 const struct dc_sink *sink)
642{
643 unsigned int index = 0;
644
645 for (index = 0; index < core_color->num_sinks; index++)
646 if (core_color->caps[index].sink == sink)
647 return index;
648
649 /* Could not find sink requested */
650 ASSERT(false);
651 return index;
652}
653
654static void calculate_rgb_matrix_legacy(struct core_color *core_color,
655 unsigned int sink_index,
656 struct fixed31_32 *rgb_matrix)
657{
658 const struct fixed31_32 k1 =
659 dal_fixed31_32_from_fraction(701000, 1000000);
660 const struct fixed31_32 k2 =
661 dal_fixed31_32_from_fraction(236568, 1000000);
662 const struct fixed31_32 k3 =
663 dal_fixed31_32_from_fraction(-587000, 1000000);
664 const struct fixed31_32 k4 =
665 dal_fixed31_32_from_fraction(464432, 1000000);
666 const struct fixed31_32 k5 =
667 dal_fixed31_32_from_fraction(-114000, 1000000);
668 const struct fixed31_32 k6 =
669 dal_fixed31_32_from_fraction(-701000, 1000000);
670 const struct fixed31_32 k7 =
671 dal_fixed31_32_from_fraction(-299000, 1000000);
672 const struct fixed31_32 k8 =
673 dal_fixed31_32_from_fraction(-292569, 1000000);
674 const struct fixed31_32 k9 =
675 dal_fixed31_32_from_fraction(413000, 1000000);
676 const struct fixed31_32 k10 =
677 dal_fixed31_32_from_fraction(-92482, 1000000);
678 const struct fixed31_32 k11 =
679 dal_fixed31_32_from_fraction(-114000, 1000000);
680 const struct fixed31_32 k12 =
681 dal_fixed31_32_from_fraction(385051, 1000000);
682 const struct fixed31_32 k13 =
683 dal_fixed31_32_from_fraction(-299000, 1000000);
684 const struct fixed31_32 k14 =
685 dal_fixed31_32_from_fraction(886000, 1000000);
686 const struct fixed31_32 k15 =
687 dal_fixed31_32_from_fraction(-587000, 1000000);
688 const struct fixed31_32 k16 =
689 dal_fixed31_32_from_fraction(-741914, 1000000);
690 const struct fixed31_32 k17 =
691 dal_fixed31_32_from_fraction(886000, 1000000);
692 const struct fixed31_32 k18 =
693 dal_fixed31_32_from_fraction(-144086, 1000000);
694
695 const struct fixed31_32 luma_r =
696 dal_fixed31_32_from_fraction(299, 1000);
697 const struct fixed31_32 luma_g =
698 dal_fixed31_32_from_fraction(587, 1000);
699 const struct fixed31_32 luma_b =
700 dal_fixed31_32_from_fraction(114, 1000);
701
702 struct fixed31_32 grph_cont;
703 struct fixed31_32 grph_sat;
704 struct fixed31_32 grph_bright;
705 struct fixed31_32 sin_grph_hue;
706 struct fixed31_32 cos_grph_hue;
707
708 initialize_fix_point_color_values(
709 core_color, sink_index, &grph_cont, &grph_sat,
710 &grph_bright, &sin_grph_hue, &cos_grph_hue);
711
712 /* COEF_1_1 = GrphCont * (LumaR + GrphSat * (Cos(GrphHue) * K1 +*/
713 /* Sin(GrphHue) * K2))*/
714 /* (Cos(GrphHue) * K1 + Sin(GrphHue) * K2)*/
715 rgb_matrix[0] =
716 dal_fixed31_32_add(
717 dal_fixed31_32_mul(cos_grph_hue, k1),
718 dal_fixed31_32_mul(sin_grph_hue, k2));
719 /* GrphSat * (Cos(GrphHue) * K1 + Sin(GrphHue) * K2 */
720 rgb_matrix[0] = dal_fixed31_32_mul(grph_sat, rgb_matrix[0]);
721 /* (LumaR + GrphSat * (Cos(GrphHue) * K1 + Sin(GrphHue) * K2))*/
722 rgb_matrix[0] = dal_fixed31_32_add(luma_r, rgb_matrix[0]);
723 /* GrphCont * (LumaR + GrphSat * (Cos(GrphHue) * K1 + Sin(GrphHue)**/
724 /* K2))*/
725 rgb_matrix[0] = dal_fixed31_32_mul(grph_cont, rgb_matrix[0]);
726
727 /* COEF_1_2 = GrphCont * (LumaG + GrphSat * (Cos(GrphHue) * K3 +*/
728 /* Sin(GrphHue) * K4))*/
729 /* (Cos(GrphHue) * K3 + Sin(GrphHue) * K4)*/
730 rgb_matrix[1] =
731 dal_fixed31_32_add(
732 dal_fixed31_32_mul(cos_grph_hue, k3),
733 dal_fixed31_32_mul(sin_grph_hue, k4));
734 /* GrphSat * (Cos(GrphHue) * K3 + Sin(GrphHue) * K4)*/
735 rgb_matrix[1] = dal_fixed31_32_mul(grph_sat, rgb_matrix[1]);
736 /* (LumaG + GrphSat * (Cos(GrphHue) * K3 + Sin(GrphHue) * K4))*/
737 rgb_matrix[1] = dal_fixed31_32_add(luma_g, rgb_matrix[1]);
738 /* GrphCont * (LumaG + GrphSat * (Cos(GrphHue) * K3 + Sin(GrphHue)**/
739 /* K4))*/
740 rgb_matrix[1] = dal_fixed31_32_mul(grph_cont, rgb_matrix[1]);
741
742 /* COEF_1_3 = GrphCont * (LumaB + GrphSat * (Cos(GrphHue) * K5 +*/
743 /* Sin(GrphHue) * K6))*/
744 /* (Cos(GrphHue) * K5 + Sin(GrphHue) * K6)*/
745 rgb_matrix[2] =
746 dal_fixed31_32_add(
747 dal_fixed31_32_mul(cos_grph_hue, k5),
748 dal_fixed31_32_mul(sin_grph_hue, k6));
749 /* GrphSat * (Cos(GrphHue) * K5 + Sin(GrphHue) * K6)*/
750 rgb_matrix[2] = dal_fixed31_32_mul(grph_sat, rgb_matrix[2]);
751 /* LumaB + GrphSat * (Cos(GrphHue) * K5 + Sin(GrphHue) * K6)*/
752 rgb_matrix[2] = dal_fixed31_32_add(luma_b, rgb_matrix[2]);
753 /* GrphCont * (LumaB + GrphSat * (Cos(GrphHue) * K5 + Sin(GrphHue)**/
754 /* K6))*/
755 rgb_matrix[2] = dal_fixed31_32_mul(grph_cont, rgb_matrix[2]);
756
757 /* COEF_1_4 = GrphBright*/
758 rgb_matrix[3] = grph_bright;
759
760 /* COEF_2_1 = GrphCont * (LumaR + GrphSat * (Cos(GrphHue) * K7 +*/
761 /* Sin(GrphHue) * K8))*/
762 /* (Cos(GrphHue) * K7 + Sin(GrphHue) * K8)*/
763 rgb_matrix[4] =
764 dal_fixed31_32_add(
765 dal_fixed31_32_mul(cos_grph_hue, k7),
766 dal_fixed31_32_mul(sin_grph_hue, k8));
767 /* GrphSat * (Cos(GrphHue) * K7 + Sin(GrphHue) * K8)*/
768 rgb_matrix[4] = dal_fixed31_32_mul(grph_sat, rgb_matrix[4]);
769 /* (LumaR + GrphSat * (Cos(GrphHue) * K7 + Sin(GrphHue) * K8))*/
770 rgb_matrix[4] = dal_fixed31_32_add(luma_r, rgb_matrix[4]);
771 /* GrphCont * (LumaR + GrphSat * (Cos(GrphHue) * K7 + Sin(GrphHue)**/
772 /* K8))*/
773 rgb_matrix[4] = dal_fixed31_32_mul(grph_cont, rgb_matrix[4]);
774
775 /* COEF_2_2 = GrphCont * (LumaG + GrphSat * (Cos(GrphHue) * K9 +*/
776 /* Sin(GrphHue) * K10))*/
777 /* (Cos(GrphHue) * K9 + Sin(GrphHue) * K10))*/
778 rgb_matrix[5] =
779 dal_fixed31_32_add(
780 dal_fixed31_32_mul(cos_grph_hue, k9),
781 dal_fixed31_32_mul(sin_grph_hue, k10));
782 /* GrphSat * (Cos(GrphHue) * K9 + Sin(GrphHue) * K10))*/
783 rgb_matrix[5] = dal_fixed31_32_mul(grph_sat, rgb_matrix[5]);
784 /* (LumaG + GrphSat * (Cos(GrphHue) * K9 + Sin(GrphHue) * K10))*/
785 rgb_matrix[5] = dal_fixed31_32_add(luma_g, rgb_matrix[5]);
786 /* GrphCont * (LumaG + GrphSat * (Cos(GrphHue) * K9 + Sin(GrphHue)**/
787 /* K10))*/
788 rgb_matrix[5] = dal_fixed31_32_mul(grph_cont, rgb_matrix[5]);
789
790 /* COEF_2_3 = GrphCont * (LumaB + GrphSat * (Cos(GrphHue) * K11 +*/
791 /* Sin(GrphHue) * K12))*/
792 /* (Cos(GrphHue) * K11 + Sin(GrphHue) * K12))*/
793 rgb_matrix[6] =
794 dal_fixed31_32_add(
795 dal_fixed31_32_mul(cos_grph_hue, k11),
796 dal_fixed31_32_mul(sin_grph_hue, k12));
797 /* GrphSat * (Cos(GrphHue) * K11 + Sin(GrphHue) * K12))*/
798 rgb_matrix[6] = dal_fixed31_32_mul(grph_sat, rgb_matrix[6]);
799 /* (LumaB + GrphSat * (Cos(GrphHue) * K11 + Sin(GrphHue) * K12))*/
800 rgb_matrix[6] = dal_fixed31_32_add(luma_b, rgb_matrix[6]);
801 /* GrphCont * (LumaB + GrphSat * (Cos(GrphHue) * K11 + Sin(GrphHue)**/
802 /* K12))*/
803 rgb_matrix[6] = dal_fixed31_32_mul(grph_cont, rgb_matrix[6]);
804
805 /* COEF_2_4 = GrphBright*/
806 rgb_matrix[7] = grph_bright;
807
808 /* COEF_3_1 = GrphCont * (LumaR + GrphSat * (Cos(GrphHue) * K13 +*/
809 /* Sin(GrphHue) * K14))*/
810 /* (Cos(GrphHue) * K13 + Sin(GrphHue) * K14)) */
811 rgb_matrix[8] =
812 dal_fixed31_32_add(
813 dal_fixed31_32_mul(cos_grph_hue, k13),
814 dal_fixed31_32_mul(sin_grph_hue, k14));
815 /* GrphSat * (Cos(GrphHue) * K13 + Sin(GrphHue) * K14)) */
816 rgb_matrix[8] = dal_fixed31_32_mul(grph_sat, rgb_matrix[8]);
817 /* (LumaR + GrphSat * (Cos(GrphHue) * K13 + Sin(GrphHue) * K14)) */
818 rgb_matrix[8] = dal_fixed31_32_add(luma_r, rgb_matrix[8]);
819 /* GrphCont * (LumaR + GrphSat * (Cos(GrphHue) * K13 + Sin(GrphHue)**/
820 /* K14)) */
821 rgb_matrix[8] = dal_fixed31_32_mul(grph_cont, rgb_matrix[8]);
822
823 /* COEF_3_2 = GrphCont * (LumaG + GrphSat * (Cos(GrphHue) * K15 +*/
824 /* Sin(GrphHue) * K16)) */
825 /* GrphSat * (Cos(GrphHue) * K15 + Sin(GrphHue) * K16) */
826 rgb_matrix[9] =
827 dal_fixed31_32_add(
828 dal_fixed31_32_mul(cos_grph_hue, k15),
829 dal_fixed31_32_mul(sin_grph_hue, k16));
830 /* (LumaG + GrphSat * (Cos(GrphHue) * K15 + Sin(GrphHue) * K16)) */
831 rgb_matrix[9] = dal_fixed31_32_mul(grph_sat, rgb_matrix[9]);
832 /* (LumaG + GrphSat * (Cos(GrphHue) * K15 + Sin(GrphHue) * K16)) */
833 rgb_matrix[9] = dal_fixed31_32_add(luma_g, rgb_matrix[9]);
834 /* GrphCont * (LumaG + GrphSat * (Cos(GrphHue) * K15 + Sin(GrphHue)**/
835 /* K16)) */
836 rgb_matrix[9] = dal_fixed31_32_mul(grph_cont, rgb_matrix[9]);
837
838 /* COEF_3_3 = GrphCont * (LumaB + GrphSat * (Cos(GrphHue) * K17 +*/
839 /* Sin(GrphHue) * K18)) */
840 /* (Cos(GrphHue) * K17 + Sin(GrphHue) * K18)) */
841 rgb_matrix[10] =
842 dal_fixed31_32_add(
843 dal_fixed31_32_mul(cos_grph_hue, k17),
844 dal_fixed31_32_mul(sin_grph_hue, k18));
845 /* GrphSat * (Cos(GrphHue) * K17 + Sin(GrphHue) * K18)) */
846 rgb_matrix[10] = dal_fixed31_32_mul(grph_sat, rgb_matrix[10]);
847 /* (LumaB + GrphSat * (Cos(GrphHue) * K17 + Sin(GrphHue) * K18)) */
848 rgb_matrix[10] = dal_fixed31_32_add(luma_b, rgb_matrix[10]);
849 /* GrphCont * (LumaB + GrphSat * (Cos(GrphHue) * K17 + Sin(GrphHue)**/
850 /* K18)) */
851 rgb_matrix[10] = dal_fixed31_32_mul(grph_cont, rgb_matrix[10]);
852
853 /* COEF_3_4 = GrphBright */
854 rgb_matrix[11] = grph_bright;
855}
856
857static void calculate_rgb_limited_range_matrix(struct core_color *core_color,
858 unsigned int sink_index, struct fixed31_32 *rgb_matrix)
859{
860 struct fixed31_32 ideal[12];
861
862 static const int32_t matrix_[] = {
863 85546875, 0, 0, 6250000,
864 0, 85546875, 0, 6250000,
865 0, 0, 85546875, 6250000
866 };
867
868 uint32_t i = 0;
869
870 do {
871 ideal[i] = dal_fixed31_32_from_fraction(
872 matrix_[i],
873 100000000);
874 ++i;
875 } while (i != ARRAY_SIZE(matrix_));
876
877
878 struct fixed31_32 grph_cont;
879 struct fixed31_32 grph_sat;
880 struct fixed31_32 grph_bright;
881 struct fixed31_32 sin_grph_hue;
882 struct fixed31_32 cos_grph_hue;
883
884 initialize_fix_point_color_values(
885 core_color, sink_index, &grph_cont, &grph_sat,
886 &grph_bright, &sin_grph_hue, &cos_grph_hue);
887
888 const struct fixed31_32 multiplier =
889 dal_fixed31_32_mul(grph_cont, grph_sat);
890
891 rgb_matrix[8] = dal_fixed31_32_mul(ideal[0], grph_cont);
892
893 rgb_matrix[9] = dal_fixed31_32_mul(ideal[1], grph_cont);
894
895 rgb_matrix[10] = dal_fixed31_32_mul(ideal[2], grph_cont);
896
897 rgb_matrix[11] = dal_fixed31_32_add(
898 ideal[3],
899 dal_fixed31_32_mul(
900 grph_bright,
901 dal_fixed31_32_from_fraction(86, 100)));
902
903 rgb_matrix[0] = dal_fixed31_32_mul(
904 multiplier,
905 dal_fixed31_32_add(
906 dal_fixed31_32_mul(
907 ideal[8],
908 sin_grph_hue),
909 dal_fixed31_32_mul(
910 ideal[4],
911 cos_grph_hue)));
912
913 rgb_matrix[1] = dal_fixed31_32_mul(
914 multiplier,
915 dal_fixed31_32_add(
916 dal_fixed31_32_mul(
917 ideal[9],
918 sin_grph_hue),
919 dal_fixed31_32_mul(
920 ideal[5],
921 cos_grph_hue)));
922
923 rgb_matrix[2] = dal_fixed31_32_mul(
924 multiplier,
925 dal_fixed31_32_add(
926 dal_fixed31_32_mul(
927 ideal[10],
928 sin_grph_hue),
929 dal_fixed31_32_mul(
930 ideal[6],
931 cos_grph_hue)));
932
933 rgb_matrix[3] = ideal[7];
934
935 rgb_matrix[4] = dal_fixed31_32_mul(
936 multiplier,
937 dal_fixed31_32_sub(
938 dal_fixed31_32_mul(
939 ideal[8],
940 cos_grph_hue),
941 dal_fixed31_32_mul(
942 ideal[4],
943 sin_grph_hue)));
944
945 rgb_matrix[5] = dal_fixed31_32_mul(
946 multiplier,
947 dal_fixed31_32_sub(
948 dal_fixed31_32_mul(
949 ideal[9],
950 cos_grph_hue),
951 dal_fixed31_32_mul(
952 ideal[5],
953 sin_grph_hue)));
954
955 rgb_matrix[6] = dal_fixed31_32_mul(
956 multiplier,
957 dal_fixed31_32_sub(
958 dal_fixed31_32_mul(
959 ideal[10],
960 cos_grph_hue),
961 dal_fixed31_32_mul(
962 ideal[6],
963 sin_grph_hue)));
964
965 rgb_matrix[7] = ideal[11];
966}
967
968static void calculate_yuv_matrix(struct core_color *core_color,
969 unsigned int sink_index,
970 enum dc_color_space color_space,
971 struct fixed31_32 *yuv_matrix)
972{
973 struct fixed31_32 ideal[12];
974 uint32_t i = 0;
975
976 if ((color_space == COLOR_SPACE_YPBPR601) ||
977 (color_space == COLOR_SPACE_YCBCR601) ||
978 (color_space == COLOR_SPACE_YCBCR601_LIMITED)) {
979 static const int32_t matrix_[] = {
980 25578516, 50216016, 9752344, 6250000,
981 -14764391, -28985609, 43750000, 50000000,
982 43750000, -36635164, -7114836, 50000000
983 };
984 do {
985 ideal[i] = dal_fixed31_32_from_fraction(
986 matrix_[i],
987 100000000);
988 ++i;
989 } while (i != ARRAY_SIZE(matrix_));
990 } else {
991 static const int32_t matrix_[] = {
992 18187266, 61183125, 6176484, 6250000,
993 -10025059, -33724941, 43750000, 50000000,
994 43750000, -39738379, -4011621, 50000000
995 };
996 do {
997 ideal[i] = dal_fixed31_32_from_fraction(
998 matrix_[i],
999 100000000);
1000 ++i;
1001 } while (i != ARRAY_SIZE(matrix_));
1002 }
1003
1004 struct fixed31_32 grph_cont;
1005 struct fixed31_32 grph_sat;
1006 struct fixed31_32 grph_bright;
1007 struct fixed31_32 sin_grph_hue;
1008 struct fixed31_32 cos_grph_hue;
1009
1010 initialize_fix_point_color_values(
1011 core_color, sink_index, &grph_cont, &grph_sat,
1012 &grph_bright, &sin_grph_hue, &cos_grph_hue);
1013
1014 const struct fixed31_32 multiplier =
1015 dal_fixed31_32_mul(grph_cont, grph_sat);
1016
1017 yuv_matrix[0] = dal_fixed31_32_mul(ideal[0], grph_cont);
1018
1019 yuv_matrix[1] = dal_fixed31_32_mul(ideal[1], grph_cont);
1020
1021 yuv_matrix[2] = dal_fixed31_32_mul(ideal[2], grph_cont);
1022
1023 yuv_matrix[4] = dal_fixed31_32_mul(
1024 multiplier,
1025 dal_fixed31_32_add(
1026 dal_fixed31_32_mul(
1027 ideal[4],
1028 cos_grph_hue),
1029 dal_fixed31_32_mul(
1030 ideal[8],
1031 sin_grph_hue)));
1032
1033 yuv_matrix[5] = dal_fixed31_32_mul(
1034 multiplier,
1035 dal_fixed31_32_add(
1036 dal_fixed31_32_mul(
1037 ideal[5],
1038 cos_grph_hue),
1039 dal_fixed31_32_mul(
1040 ideal[9],
1041 sin_grph_hue)));
1042
1043 yuv_matrix[6] = dal_fixed31_32_mul(
1044 multiplier,
1045 dal_fixed31_32_add(
1046 dal_fixed31_32_mul(
1047 ideal[6],
1048 cos_grph_hue),
1049 dal_fixed31_32_mul(
1050 ideal[10],
1051 sin_grph_hue)));
1052
1053 yuv_matrix[7] = ideal[7];
1054
1055 yuv_matrix[8] = dal_fixed31_32_mul(
1056 multiplier,
1057 dal_fixed31_32_sub(
1058 dal_fixed31_32_mul(
1059 ideal[8],
1060 cos_grph_hue),
1061 dal_fixed31_32_mul(
1062 ideal[4],
1063 sin_grph_hue)));
1064
1065 yuv_matrix[9] = dal_fixed31_32_mul(
1066 multiplier,
1067 dal_fixed31_32_sub(
1068 dal_fixed31_32_mul(
1069 ideal[9],
1070 cos_grph_hue),
1071 dal_fixed31_32_mul(
1072 ideal[5],
1073 sin_grph_hue)));
1074
1075 yuv_matrix[10] = dal_fixed31_32_mul(
1076 multiplier,
1077 dal_fixed31_32_sub(
1078 dal_fixed31_32_mul(
1079 ideal[10],
1080 cos_grph_hue),
1081 dal_fixed31_32_mul(
1082 ideal[6],
1083 sin_grph_hue)));
1084
1085 yuv_matrix[11] = ideal[11];
1086
1087 if ((color_space == COLOR_SPACE_YCBCR601_LIMITED) ||
1088 (color_space == COLOR_SPACE_YCBCR709_LIMITED)) {
1089 yuv_matrix[3] = dal_fixed31_32_add(ideal[3], grph_bright);
1090 } else {
1091 yuv_matrix[3] = dal_fixed31_32_add(
1092 ideal[3],
1093 dal_fixed31_32_mul(
1094 grph_bright,
1095 dal_fixed31_32_from_fraction(86, 100)));
1096 }
1097}
1098
1099static void calculate_csc_matrix(struct core_color *core_color,
1100 unsigned int sink_index,
1101 enum dc_color_space color_space,
1102 uint16_t *csc_matrix)
1103{
1104 struct fixed31_32 fixed_csc_matrix[12];
1105 switch (color_space) {
1106 case COLOR_SPACE_SRGB:
1107 calculate_rgb_matrix_legacy
1108 (core_color, sink_index, fixed_csc_matrix);
1109 convert_float_matrix_legacy
1110 (csc_matrix, fixed_csc_matrix, 12);
1111 break;
1112 case COLOR_SPACE_SRGB_LIMITED:
1113 calculate_rgb_limited_range_matrix(core_color, sink_index,
1114 fixed_csc_matrix);
1115 convert_float_matrix(csc_matrix, fixed_csc_matrix, 12);
1116 break;
1117 case COLOR_SPACE_YCBCR601:
1118 case COLOR_SPACE_YCBCR709:
1119 case COLOR_SPACE_YCBCR601_LIMITED:
1120 case COLOR_SPACE_YCBCR709_LIMITED:
1121 case COLOR_SPACE_YPBPR601:
1122 case COLOR_SPACE_YPBPR709:
1123 calculate_yuv_matrix(core_color, sink_index, color_space,
1124 fixed_csc_matrix);
1125 convert_float_matrix(csc_matrix, fixed_csc_matrix, 12);
1126 break;
1127 default:
1128 calculate_rgb_matrix_legacy
1129 (core_color, sink_index, fixed_csc_matrix);
1130 convert_float_matrix_legacy
1131 (csc_matrix, fixed_csc_matrix, 12);
1132 break;
1133 }
1134}
1135
1136struct mod_color *mod_color_create(struct dc *dc)
1137{
1138 int i = 0;
1139 struct core_color *core_color =
1140 dm_alloc(sizeof(struct core_color));
1141 struct core_dc *core_dc = DC_TO_CORE(dc);
1142 struct persistent_data_flag flag;
1143
1144 if (core_color == NULL)
1145 goto fail_alloc_context;
1146
1147 core_color->caps = dm_alloc(sizeof(struct sink_caps) *
1148 MOD_COLOR_MAX_CONCURRENT_SINKS);
1149
1150 if (core_color->caps == NULL)
1151 goto fail_alloc_caps;
1152
1153 for (i = 0; i < MOD_COLOR_MAX_CONCURRENT_SINKS; i++)
1154 core_color->caps[i].sink = NULL;
1155
1156 core_color->state = dm_alloc(sizeof(struct color_state) *
1157 MOD_COLOR_MAX_CONCURRENT_SINKS);
1158
1159 /*hardcoded to sRGB with 6500 color temperature*/
1160 for (i = 0; i < MOD_COLOR_MAX_CONCURRENT_SINKS; i++) {
1161 core_color->state[i].source_gamut.blueX = 1500;
1162 core_color->state[i].source_gamut.blueY = 600;
1163 core_color->state[i].source_gamut.greenX = 3000;
1164 core_color->state[i].source_gamut.greenY = 6000;
1165 core_color->state[i].source_gamut.redX = 6400;
1166 core_color->state[i].source_gamut.redY = 3300;
1167 core_color->state[i].source_gamut.whiteX = 3127;
1168 core_color->state[i].source_gamut.whiteY = 3290;
1169
1170 core_color->state[i].destination_gamut.blueX = 1500;
1171 core_color->state[i].destination_gamut.blueY = 600;
1172 core_color->state[i].destination_gamut.greenX = 3000;
1173 core_color->state[i].destination_gamut.greenY = 6000;
1174 core_color->state[i].destination_gamut.redX = 6400;
1175 core_color->state[i].destination_gamut.redY = 3300;
1176 core_color->state[i].destination_gamut.whiteX = 3127;
1177 core_color->state[i].destination_gamut.whiteY = 3290;
1178
1179 core_color->state[i].custom_color_temperature = 6500;
1180
1181 core_color->state[i].contrast.current = 100;
1182 core_color->state[i].contrast.min = 0;
1183 core_color->state[i].contrast.max = 200;
1184
1185 core_color->state[i].saturation.current = 100;
1186 core_color->state[i].saturation.min = 0;
1187 core_color->state[i].saturation.max = 200;
1188
1189 core_color->state[i].brightness.current = 0;
1190 core_color->state[i].brightness.min = -100;
1191 core_color->state[i].brightness.max = 100;
1192
1193 core_color->state[i].hue.current = 0;
1194 core_color->state[i].hue.min = -30;
1195 core_color->state[i].hue.max = 30;
1196 }
1197
1198 if (core_color->state == NULL)
1199 goto fail_alloc_state;
1200
1201 core_color->num_sinks = 0;
1202
1203 if (dc == NULL)
1204 goto fail_construct;
1205
1206 core_color->dc = dc;
1207
1208 if (!check_dc_support(dc))
1209 goto fail_construct;
1210
1211 /* Create initial module folder in registry for color adjustment */
1212 flag.save_per_edid = true;
1213 flag.save_per_link = false;
1214
1215 dm_write_persistent_data(core_dc->ctx, NULL, COLOR_REGISTRY_NAME, NULL,
1216 NULL, 0, &flag);
1217
1218 return &core_color->public;
1219
1220fail_construct:
1221 dm_free(core_color->state);
1222
1223fail_alloc_state:
1224 dm_free(core_color->caps);
1225
1226fail_alloc_caps:
1227 dm_free(core_color);
1228
1229fail_alloc_context:
1230 return NULL;
1231}
1232
1233void mod_color_destroy(struct mod_color *mod_color)
1234{
1235 if (mod_color != NULL) {
1236 int i;
1237 struct core_color *core_color =
1238 MOD_COLOR_TO_CORE(mod_color);
1239
1240 dm_free(core_color->state);
1241
1242 for (i = 0; i < core_color->num_sinks; i++)
1243 dc_sink_release(core_color->caps[i].sink);
1244
1245 dm_free(core_color->caps);
1246
1247 dm_free(core_color);
1248 }
1249}
1250
1251bool mod_color_add_sink(struct mod_color *mod_color, const struct dc_sink *sink)
1252{
1253 struct core_color *core_color = MOD_COLOR_TO_CORE(mod_color);
1254 struct core_dc *core_dc = DC_TO_CORE(core_color->dc);
1255 bool persistent_color_temp_enable;
1256 int persistent_custom_color_temp = 0;
1257 struct color_space_coordinates persistent_source_gamut;
1258 struct color_space_coordinates persistent_destination_gamut;
1259 int persistent_brightness;
1260 int persistent_contrast;
1261 int persistent_hue;
1262 int persistent_saturation;
1263 enum dc_quantization_range persistent_quantization_range;
1264 struct persistent_data_flag flag;
1265
1266 if (core_color->num_sinks < MOD_COLOR_MAX_CONCURRENT_SINKS) {
1267 dc_sink_retain(sink);
1268 core_color->caps[core_color->num_sinks].sink = sink;
1269 core_color->state[core_color->num_sinks].
1270 user_enable_color_temperature = true;
1271
1272 /* get persistent data from registry */
1273 flag.save_per_edid = true;
1274 flag.save_per_link = false;
1275
1276
1277 if (dm_read_persistent_data(core_dc->ctx, sink,
1278 COLOR_REGISTRY_NAME,
1279 "enablecolortempadj",
1280 &persistent_color_temp_enable,
1281 sizeof(bool), &flag))
1282 core_color->state[core_color->num_sinks].
1283 user_enable_color_temperature =
1284 persistent_color_temp_enable;
1285 else
1286 core_color->state[core_color->num_sinks].
1287 user_enable_color_temperature = true;
1288
1289 if (dm_read_persistent_data(core_dc->ctx, sink,
1290 COLOR_REGISTRY_NAME,
1291 "customcolortemp",
1292 &persistent_custom_color_temp,
1293 sizeof(int), &flag))
1294 core_color->state[core_color->num_sinks].
1295 custom_color_temperature
1296 = persistent_custom_color_temp;
1297 else
1298 core_color->state[core_color->num_sinks].
1299 custom_color_temperature = 6500;
1300
1301 if (dm_read_persistent_data(core_dc->ctx, sink,
1302 COLOR_REGISTRY_NAME,
1303 "sourcegamut",
1304 &persistent_source_gamut,
1305 sizeof(struct color_space_coordinates),
1306 &flag)) {
1307 memcpy(&core_color->state[core_color->num_sinks].
1308 source_gamut, &persistent_source_gamut,
1309 sizeof(struct color_space_coordinates));
1310 } else {
1311 core_color->state[core_color->num_sinks].
1312 source_gamut.blueX = 1500;
1313 core_color->state[core_color->num_sinks].
1314 source_gamut.blueY = 600;
1315 core_color->state[core_color->num_sinks].
1316 source_gamut.greenX = 3000;
1317 core_color->state[core_color->num_sinks].
1318 source_gamut.greenY = 6000;
1319 core_color->state[core_color->num_sinks].
1320 source_gamut.redX = 6400;
1321 core_color->state[core_color->num_sinks].
1322 source_gamut.redY = 3300;
1323 core_color->state[core_color->num_sinks].
1324 source_gamut.whiteX = 3127;
1325 core_color->state[core_color->num_sinks].
1326 source_gamut.whiteY = 3290;
1327 }
1328
1329 if (dm_read_persistent_data(core_dc->ctx, sink, COLOR_REGISTRY_NAME,
1330 "destgamut",
1331 &persistent_destination_gamut,
1332 sizeof(struct color_space_coordinates),
1333 &flag)) {
1334 memcpy(&core_color->state[core_color->num_sinks].
1335 destination_gamut,
1336 &persistent_destination_gamut,
1337 sizeof(struct color_space_coordinates));
1338 } else {
1339 core_color->state[core_color->num_sinks].
1340 destination_gamut.blueX = 1500;
1341 core_color->state[core_color->num_sinks].
1342 destination_gamut.blueY = 600;
1343 core_color->state[core_color->num_sinks].
1344 destination_gamut.greenX = 3000;
1345 core_color->state[core_color->num_sinks].
1346 destination_gamut.greenY = 6000;
1347 core_color->state[core_color->num_sinks].
1348 destination_gamut.redX = 6400;
1349 core_color->state[core_color->num_sinks].
1350 destination_gamut.redY = 3300;
1351 core_color->state[core_color->num_sinks].
1352 destination_gamut.whiteX = 3127;
1353 core_color->state[core_color->num_sinks].
1354 destination_gamut.whiteY = 3290;
1355 }
1356
1357 if (dm_read_persistent_data(core_dc->ctx, sink, COLOR_REGISTRY_NAME,
1358 "brightness",
1359 &persistent_brightness,
1360 sizeof(int), &flag))
1361 core_color->state[core_color->num_sinks].
1362 brightness.current = persistent_brightness;
1363 else
1364 core_color->state[core_color->num_sinks].
1365 brightness.current = 0;
1366
1367 if (dm_read_persistent_data(core_dc->ctx, sink, COLOR_REGISTRY_NAME,
1368 "contrast",
1369 &persistent_contrast,
1370 sizeof(int), &flag))
1371 core_color->state[core_color->num_sinks].
1372 contrast.current = persistent_contrast;
1373 else
1374 core_color->state[core_color->num_sinks].
1375 contrast.current = 100;
1376
1377 if (dm_read_persistent_data(core_dc->ctx, sink, COLOR_REGISTRY_NAME,
1378 "hue",
1379 &persistent_hue,
1380 sizeof(int), &flag))
1381 core_color->state[core_color->num_sinks].
1382 hue.current = persistent_hue;
1383 else
1384 core_color->state[core_color->num_sinks].
1385 hue.current = 0;
1386
1387 if (dm_read_persistent_data(core_dc->ctx, sink, COLOR_REGISTRY_NAME,
1388 "saturation",
1389 &persistent_saturation,
1390 sizeof(int), &flag))
1391 core_color->state[core_color->num_sinks].
1392 saturation.current = persistent_saturation;
1393 else
1394 core_color->state[core_color->num_sinks].
1395 saturation.current = 100;
1396
1397 if (dm_read_persistent_data(core_dc->ctx, sink,
1398 COLOR_REGISTRY_NAME,
1399 "preferred_quantization_range",
1400 &persistent_quantization_range,
1401 sizeof(int), &flag))
1402 core_color->state[core_color->num_sinks].
1403 preferred_quantization_range =
1404 persistent_quantization_range;
1405 else
1406 core_color->state[core_color->num_sinks].
1407 preferred_quantization_range = QUANTIZATION_RANGE_FULL;
1408
1409 core_color->num_sinks++;
1410 return true;
1411 }
1412 return false;
1413}
1414
1415bool mod_color_remove_sink(struct mod_color *mod_color,
1416 const struct dc_sink *sink)
1417{
1418 int i = 0, j = 0;
1419 struct core_color *core_color = MOD_COLOR_TO_CORE(mod_color);
1420
1421 for (i = 0; i < core_color->num_sinks; i++) {
1422 if (core_color->caps[i].sink == sink) {
1423 /* To remove this sink, shift everything after down */
1424 for (j = i; j < core_color->num_sinks - 1; j++) {
1425 core_color->caps[j].sink =
1426 core_color->caps[j + 1].sink;
1427
1428 memcpy(&core_color->state[j],
1429 &core_color->state[j + 1],
1430 sizeof(struct color_state));
1431 }
1432
1433 core_color->num_sinks--;
1434
1435 dc_sink_release(sink);
1436
1437 return true;
1438 }
1439 }
1440
1441 return false;
1442}
1443
1444bool mod_color_update_gamut_to_stream(struct mod_color *mod_color,
1445 const struct dc_stream **streams, int num_streams)
1446{
1447 struct core_color *core_color = MOD_COLOR_TO_CORE(mod_color);
1448 struct core_dc *core_dc = DC_TO_CORE(core_color->dc);
1449 struct persistent_data_flag flag;
1450 struct gamut_src_dst_matrix *matrix =
1451 dm_alloc(sizeof(struct gamut_src_dst_matrix));
1452
1453 unsigned int stream_index, sink_index, j;
1454
1455 for (stream_index = 0; stream_index < num_streams; stream_index++) {
1456 sink_index = sink_index_from_sink(core_color,
1457 streams[stream_index]->sink);
1458
1459 /* Write persistent data in registry*/
1460 flag.save_per_edid = true;
1461 flag.save_per_link = false;
1462
1463 dm_write_persistent_data(core_dc->ctx,
1464 streams[stream_index]->sink,
1465 COLOR_REGISTRY_NAME,
1466 "sourcegamut",
1467 &core_color->state[sink_index].
1468 source_gamut,
1469 sizeof(struct color_space_coordinates),
1470 &flag);
1471
1472 dm_write_persistent_data(core_dc->ctx,
1473 streams[stream_index]->sink,
1474 COLOR_REGISTRY_NAME,
1475 "destgamut",
1476 &core_color->state[sink_index].
1477 destination_gamut,
1478 sizeof(struct color_space_coordinates),
1479 &flag);
1480
1481 if (!build_gamut_remap_matrix
1482 (core_color->state[sink_index].source_gamut,
1483 matrix->rgbCoeffSrc,
1484 matrix->whiteCoeffSrc))
1485 goto function_fail;
1486
1487 if (!build_gamut_remap_matrix
1488 (core_color->state[sink_index].
1489 destination_gamut,
1490 matrix->rgbCoeffDst, matrix->whiteCoeffDst))
1491 goto function_fail;
1492
1493 struct fixed31_32 gamut_result[12];
1494 struct fixed31_32 temp_matrix[9];
1495
1496 if (!gamut_to_color_matrix(
1497 matrix->rgbCoeffDst,
1498 matrix->whiteCoeffDst,
1499 matrix->rgbCoeffSrc,
1500 matrix->whiteCoeffSrc,
1501 true,
1502 temp_matrix))
1503 goto function_fail;
1504
1505 gamut_result[0] = temp_matrix[0];
1506 gamut_result[1] = temp_matrix[1];
1507 gamut_result[2] = temp_matrix[2];
1508 gamut_result[3] = matrix->whiteCoeffSrc[0];
1509 gamut_result[4] = temp_matrix[3];
1510 gamut_result[5] = temp_matrix[4];
1511 gamut_result[6] = temp_matrix[5];
1512 gamut_result[7] = matrix->whiteCoeffSrc[1];
1513 gamut_result[8] = temp_matrix[6];
1514 gamut_result[9] = temp_matrix[7];
1515 gamut_result[10] = temp_matrix[8];
1516 gamut_result[11] = matrix->whiteCoeffSrc[2];
1517
1518 struct core_stream *core_stream =
1519 DC_STREAM_TO_CORE
1520 (streams[stream_index]);
1521
1522 core_stream->public.gamut_remap_matrix.enable_remap = true;
1523
1524 for (j = 0; j < 12; j++)
1525 core_stream->public.
1526 gamut_remap_matrix.matrix[j] =
1527 gamut_result[j];
1528 }
1529
1530 dm_free(matrix);
1531 core_color->dc->stream_funcs.set_gamut_remap
1532 (core_color->dc, streams, num_streams);
1533
1534 return true;
1535
1536function_fail:
1537 dm_free(matrix);
1538 return false;
1539}
1540
1541bool mod_color_adjust_source_gamut(struct mod_color *mod_color,
1542 const struct dc_stream **streams, int num_streams,
1543 struct gamut_space_coordinates *input_gamut_coordinates,
1544 struct white_point_coodinates *input_white_point_coordinates)
1545{
1546 struct core_color *core_color = MOD_COLOR_TO_CORE(mod_color);
1547
1548 unsigned int stream_index, sink_index;
1549
1550 for (stream_index = 0; stream_index < num_streams; stream_index++) {
1551 sink_index = sink_index_from_sink(core_color,
1552 streams[stream_index]->sink);
1553
1554 core_color->state[sink_index].source_gamut.blueX =
1555 input_gamut_coordinates->blueX;
1556 core_color->state[sink_index].source_gamut.blueY =
1557 input_gamut_coordinates->blueY;
1558 core_color->state[sink_index].source_gamut.greenX =
1559 input_gamut_coordinates->greenX;
1560 core_color->state[sink_index].source_gamut.greenY =
1561 input_gamut_coordinates->greenY;
1562 core_color->state[sink_index].source_gamut.redX =
1563 input_gamut_coordinates->redX;
1564 core_color->state[sink_index].source_gamut.redY =
1565 input_gamut_coordinates->redY;
1566 core_color->state[sink_index].source_gamut.whiteX =
1567 input_white_point_coordinates->whiteX;
1568 core_color->state[sink_index].source_gamut.whiteY =
1569 input_white_point_coordinates->whiteY;
1570 }
1571
1572 if (!mod_color_update_gamut_to_stream(mod_color, streams, num_streams))
1573 return false;
1574
1575 return true;
1576}
1577
1578bool mod_color_adjust_destination_gamut(struct mod_color *mod_color,
1579 const struct dc_stream **streams, int num_streams,
1580 struct gamut_space_coordinates *input_gamut_coordinates,
1581 struct white_point_coodinates *input_white_point_coordinates)
1582{
1583 struct core_color *core_color = MOD_COLOR_TO_CORE(mod_color);
1584
1585 unsigned int stream_index, sink_index;
1586
1587 for (stream_index = 0; stream_index < num_streams; stream_index++) {
1588 sink_index = sink_index_from_sink(core_color,
1589 streams[stream_index]->sink);
1590
1591 core_color->state[sink_index].destination_gamut.blueX =
1592 input_gamut_coordinates->blueX;
1593 core_color->state[sink_index].destination_gamut.blueY =
1594 input_gamut_coordinates->blueY;
1595 core_color->state[sink_index].destination_gamut.greenX =
1596 input_gamut_coordinates->greenX;
1597 core_color->state[sink_index].destination_gamut.greenY =
1598 input_gamut_coordinates->greenY;
1599 core_color->state[sink_index].destination_gamut.redX =
1600 input_gamut_coordinates->redX;
1601 core_color->state[sink_index].destination_gamut.redY =
1602 input_gamut_coordinates->redY;
1603 core_color->state[sink_index].destination_gamut.whiteX =
1604 input_white_point_coordinates->whiteX;
1605 core_color->state[sink_index].destination_gamut.whiteY =
1606 input_white_point_coordinates->whiteY;
1607 }
1608
1609 if (!mod_color_update_gamut_to_stream(mod_color, streams, num_streams))
1610 return false;
1611
1612 return true;
1613}
1614
1615bool mod_color_set_white_point(struct mod_color *mod_color,
1616 const struct dc_stream **streams, int num_streams,
1617 struct white_point_coodinates *white_point)
1618{
1619 struct core_color *core_color = MOD_COLOR_TO_CORE(mod_color);
1620
1621 unsigned int stream_index, sink_index;
1622
1623 for (stream_index = 0; stream_index < num_streams;
1624 stream_index++) {
1625 sink_index = sink_index_from_sink(core_color,
1626 streams[stream_index]->sink);
1627 core_color->state[sink_index].source_gamut.whiteX =
1628 white_point->whiteX;
1629 core_color->state[sink_index].source_gamut.whiteY =
1630 white_point->whiteY;
1631 }
1632
1633 if (!mod_color_update_gamut_to_stream(mod_color, streams, num_streams))
1634 return false;
1635
1636 return true;
1637}
1638
1639bool mod_color_set_user_enable(struct mod_color *mod_color,
1640 const struct dc_stream **streams, int num_streams,
1641 bool user_enable)
1642{
1643 struct core_color *core_color =
1644 MOD_COLOR_TO_CORE(mod_color);
1645 struct core_dc *core_dc = DC_TO_CORE(core_color->dc);
1646 struct persistent_data_flag flag;
1647 unsigned int stream_index, sink_index;
1648
1649 for (stream_index = 0; stream_index < num_streams; stream_index++) {
1650 sink_index = sink_index_from_sink(core_color,
1651 streams[stream_index]->sink);
1652 core_color->state[sink_index].user_enable_color_temperature
1653 = user_enable;
1654
1655 /* Write persistent data in registry*/
1656 flag.save_per_edid = true;
1657 flag.save_per_link = false;
1658
1659 dm_write_persistent_data(core_dc->ctx,
1660 streams[stream_index]->sink,
1661 COLOR_REGISTRY_NAME,
1662 "enablecolortempadj",
1663 &user_enable,
1664 sizeof(bool),
1665 &flag);
1666 }
1667 return true;
1668}
1669
1670bool mod_color_get_user_enable(struct mod_color *mod_color,
1671 const struct dc_sink *sink,
1672 bool *user_enable)
1673{
1674 struct core_color *core_color =
1675 MOD_COLOR_TO_CORE(mod_color);
1676
1677 unsigned int sink_index = sink_index_from_sink(core_color, sink);
1678
1679 *user_enable = core_color->state[sink_index].
1680 user_enable_color_temperature;
1681
1682 return true;
1683}
1684
1685bool mod_color_get_custom_color_temperature(struct mod_color *mod_color,
1686 const struct dc_sink *sink,
1687 int *color_temperature)
1688{
1689 struct core_color *core_color =
1690 MOD_COLOR_TO_CORE(mod_color);
1691
1692 unsigned int sink_index = sink_index_from_sink(core_color, sink);
1693
1694 *color_temperature = core_color->state[sink_index].
1695 custom_color_temperature;
1696
1697 return true;
1698}
1699
1700bool mod_color_set_custom_color_temperature(struct mod_color *mod_color,
1701 const struct dc_stream **streams, int num_streams,
1702 int color_temperature)
1703{
1704 struct core_color *core_color =
1705 MOD_COLOR_TO_CORE(mod_color);
1706 struct core_dc *core_dc = DC_TO_CORE(core_color->dc);
1707 struct persistent_data_flag flag;
1708 unsigned int stream_index, sink_index;
1709
1710 for (stream_index = 0; stream_index < num_streams; stream_index++) {
1711 sink_index = sink_index_from_sink(core_color,
1712 streams[stream_index]->sink);
1713 core_color->state[sink_index].custom_color_temperature
1714 = color_temperature;
1715
1716 /* Write persistent data in registry*/
1717 flag.save_per_edid = true;
1718 flag.save_per_link = false;
1719
1720 dm_write_persistent_data(core_dc->ctx,
1721 streams[stream_index]->sink,
1722 COLOR_REGISTRY_NAME,
1723 "customcolortemp",
1724 &color_temperature,
1725 sizeof(int),
1726 &flag);
1727 }
1728 return true;
1729}
1730
1731bool mod_color_get_color_saturation(struct mod_color *mod_color,
1732 const struct dc_sink *sink,
1733 struct color_range *color_saturation)
1734{
1735 struct core_color *core_color =
1736 MOD_COLOR_TO_CORE(mod_color);
1737
1738 unsigned int sink_index = sink_index_from_sink(core_color, sink);
1739
1740 *color_saturation = core_color->state[sink_index].saturation;
1741
1742 return true;
1743}
1744
1745bool mod_color_get_color_contrast(struct mod_color *mod_color,
1746 const struct dc_sink *sink,
1747 struct color_range *color_contrast)
1748{
1749 struct core_color *core_color =
1750 MOD_COLOR_TO_CORE(mod_color);
1751
1752 unsigned int sink_index = sink_index_from_sink(core_color, sink);
1753
1754 *color_contrast = core_color->state[sink_index].contrast;
1755
1756 return true;
1757}
1758
1759bool mod_color_get_color_brightness(struct mod_color *mod_color,
1760 const struct dc_sink *sink,
1761 struct color_range *color_brightness)
1762{
1763 struct core_color *core_color =
1764 MOD_COLOR_TO_CORE(mod_color);
1765
1766 unsigned int sink_index = sink_index_from_sink(core_color, sink);
1767
1768 *color_brightness = core_color->state[sink_index].brightness;
1769
1770 return true;
1771}
1772
1773bool mod_color_get_color_hue(struct mod_color *mod_color,
1774 const struct dc_sink *sink,
1775 struct color_range *color_hue)
1776{
1777 struct core_color *core_color =
1778 MOD_COLOR_TO_CORE(mod_color);
1779
1780 unsigned int sink_index = sink_index_from_sink(core_color, sink);
1781
1782 *color_hue = core_color->state[sink_index].hue;
1783
1784 return true;
1785}
1786
1787bool mod_color_get_source_gamut(struct mod_color *mod_color,
1788 const struct dc_sink *sink,
1789 struct color_space_coordinates *source_gamut)
1790{
1791 struct core_color *core_color =
1792 MOD_COLOR_TO_CORE(mod_color);
1793
1794 unsigned int sink_index = sink_index_from_sink(core_color, sink);
1795
1796 *source_gamut = core_color->state[sink_index].source_gamut;
1797
1798 return true;
1799}
1800
1801bool mod_color_notify_mode_change(struct mod_color *mod_color,
1802 const struct dc_stream **streams, int num_streams)
1803{
1804 struct core_color *core_color = MOD_COLOR_TO_CORE(mod_color);
1805
1806 struct gamut_src_dst_matrix *matrix =
1807 dm_alloc(sizeof(struct gamut_src_dst_matrix));
1808
1809 unsigned int stream_index, sink_index, j;
1810
1811 for (stream_index = 0; stream_index < num_streams; stream_index++) {
1812 sink_index = sink_index_from_sink(core_color,
1813 streams[stream_index]->sink);
1814
1815 if (!build_gamut_remap_matrix
1816 (core_color->state[sink_index].source_gamut,
1817 matrix->rgbCoeffSrc,
1818 matrix->whiteCoeffSrc))
1819 goto function_fail;
1820
1821 if (!build_gamut_remap_matrix
1822 (core_color->state[sink_index].
1823 destination_gamut,
1824 matrix->rgbCoeffDst, matrix->whiteCoeffDst))
1825 goto function_fail;
1826
1827 struct fixed31_32 gamut_result[12];
1828 struct fixed31_32 temp_matrix[9];
1829
1830 if (!gamut_to_color_matrix(
1831 matrix->rgbCoeffDst,
1832 matrix->whiteCoeffDst,
1833 matrix->rgbCoeffSrc,
1834 matrix->whiteCoeffSrc,
1835 true,
1836 temp_matrix))
1837 goto function_fail;
1838
1839 gamut_result[0] = temp_matrix[0];
1840 gamut_result[1] = temp_matrix[1];
1841 gamut_result[2] = temp_matrix[2];
1842 gamut_result[3] = matrix->whiteCoeffSrc[0];
1843 gamut_result[4] = temp_matrix[3];
1844 gamut_result[5] = temp_matrix[4];
1845 gamut_result[6] = temp_matrix[5];
1846 gamut_result[7] = matrix->whiteCoeffSrc[1];
1847 gamut_result[8] = temp_matrix[6];
1848 gamut_result[9] = temp_matrix[7];
1849 gamut_result[10] = temp_matrix[8];
1850 gamut_result[11] = matrix->whiteCoeffSrc[2];
1851
1852
1853 struct core_stream *core_stream =
1854 DC_STREAM_TO_CORE
1855 (streams[stream_index]);
1856
1857 core_stream->public.gamut_remap_matrix.enable_remap = true;
1858
1859 for (j = 0; j < 12; j++)
1860 core_stream->public.
1861 gamut_remap_matrix.matrix[j] =
1862 gamut_result[j];
1863
1864 calculate_csc_matrix(core_color, sink_index,
1865 core_stream->public.output_color_space,
1866 core_stream->public.csc_color_matrix.matrix);
1867
1868 core_stream->public.csc_color_matrix.enable_adjustment = true;
1869 }
1870
1871 dm_free(matrix);
1872
1873 return true;
1874
1875function_fail:
1876 dm_free(matrix);
1877 return false;
1878}
1879
1880bool mod_color_set_brightness(struct mod_color *mod_color,
1881 const struct dc_stream **streams, int num_streams,
1882 int brightness_value)
1883{
1884 struct core_color *core_color = MOD_COLOR_TO_CORE(mod_color);
1885 struct core_dc *core_dc = DC_TO_CORE(core_color->dc);
1886 struct persistent_data_flag flag;
1887 unsigned int stream_index, sink_index;
1888
1889 for (stream_index = 0; stream_index < num_streams; stream_index++) {
1890 sink_index = sink_index_from_sink(core_color,
1891 streams[stream_index]->sink);
1892
1893 struct core_stream *core_stream =
1894 DC_STREAM_TO_CORE
1895 (streams[stream_index]);
1896
1897 core_color->state[sink_index].brightness.current =
1898 brightness_value;
1899
1900 calculate_csc_matrix(core_color, sink_index,
1901 core_stream->public.output_color_space,
1902 core_stream->public.csc_color_matrix.matrix);
1903
1904 core_stream->public.csc_color_matrix.enable_adjustment = true;
1905
1906 /* Write persistent data in registry*/
1907 flag.save_per_edid = true;
1908 flag.save_per_link = false;
1909 dm_write_persistent_data(core_dc->ctx,
1910 streams[stream_index]->sink,
1911 COLOR_REGISTRY_NAME,
1912 "brightness",
1913 &brightness_value,
1914 sizeof(int),
1915 &flag);
1916 }
1917
1918 core_color->dc->stream_funcs.set_gamut_remap
1919 (core_color->dc, streams, num_streams);
1920
1921 return true;
1922}
1923
1924bool mod_color_set_contrast(struct mod_color *mod_color,
1925 const struct dc_stream **streams, int num_streams,
1926 int contrast_value)
1927{
1928 struct core_color *core_color = MOD_COLOR_TO_CORE(mod_color);
1929 struct core_dc *core_dc = DC_TO_CORE(core_color->dc);
1930 struct persistent_data_flag flag;
1931 unsigned int stream_index, sink_index;
1932
1933 for (stream_index = 0; stream_index < num_streams; stream_index++) {
1934 sink_index = sink_index_from_sink(core_color,
1935 streams[stream_index]->sink);
1936
1937 struct core_stream *core_stream =
1938 DC_STREAM_TO_CORE
1939 (streams[stream_index]);
1940
1941 core_color->state[sink_index].contrast.current =
1942 contrast_value;
1943
1944 calculate_csc_matrix(core_color, sink_index,
1945 core_stream->public.output_color_space,
1946 core_stream->public.csc_color_matrix.matrix);
1947
1948 core_stream->public.csc_color_matrix.enable_adjustment = true;
1949
1950 /* Write persistent data in registry*/
1951 flag.save_per_edid = true;
1952 flag.save_per_link = false;
1953 dm_write_persistent_data(core_dc->ctx,
1954 streams[stream_index]->sink,
1955 COLOR_REGISTRY_NAME,
1956 "contrast",
1957 &contrast_value,
1958 sizeof(int),
1959 &flag);
1960 }
1961
1962 core_color->dc->stream_funcs.set_gamut_remap
1963 (core_color->dc, streams, num_streams);
1964
1965 return true;
1966}
1967
1968bool mod_color_set_hue(struct mod_color *mod_color,
1969 const struct dc_stream **streams, int num_streams,
1970 int hue_value)
1971{
1972 struct core_color *core_color = MOD_COLOR_TO_CORE(mod_color);
1973 struct core_dc *core_dc = DC_TO_CORE(core_color->dc);
1974 struct persistent_data_flag flag;
1975 unsigned int stream_index, sink_index;
1976
1977 for (stream_index = 0; stream_index < num_streams; stream_index++) {
1978 sink_index = sink_index_from_sink(core_color,
1979 streams[stream_index]->sink);
1980
1981 struct core_stream *core_stream =
1982 DC_STREAM_TO_CORE
1983 (streams[stream_index]);
1984
1985 core_color->state[sink_index].hue.current = hue_value;
1986
1987 calculate_csc_matrix(core_color, sink_index,
1988 core_stream->public.output_color_space,
1989 core_stream->public.csc_color_matrix.matrix);
1990
1991 core_stream->public.csc_color_matrix.enable_adjustment = true;
1992
1993 /* Write persistent data in registry*/
1994 flag.save_per_edid = true;
1995 flag.save_per_link = false;
1996 dm_write_persistent_data(core_dc->ctx,
1997 streams[stream_index]->sink,
1998 COLOR_REGISTRY_NAME,
1999 "hue",
2000 &hue_value,
2001 sizeof(int),
2002 &flag);
2003 }
2004
2005 core_color->dc->stream_funcs.set_gamut_remap
2006 (core_color->dc, streams, num_streams);
2007
2008 return true;
2009}
2010
2011bool mod_color_set_saturation(struct mod_color *mod_color,
2012 const struct dc_stream **streams, int num_streams,
2013 int saturation_value)
2014{
2015 struct core_color *core_color = MOD_COLOR_TO_CORE(mod_color);
2016 struct core_dc *core_dc = DC_TO_CORE(core_color->dc);
2017 struct persistent_data_flag flag;
2018 unsigned int stream_index, sink_index;
2019
2020 for (stream_index = 0; stream_index < num_streams; stream_index++) {
2021 sink_index = sink_index_from_sink(core_color,
2022 streams[stream_index]->sink);
2023
2024 struct core_stream *core_stream =
2025 DC_STREAM_TO_CORE
2026 (streams[stream_index]);
2027
2028 core_color->state[sink_index].saturation.current =
2029 saturation_value;
2030
2031 calculate_csc_matrix(core_color, sink_index,
2032 core_stream->public.output_color_space,
2033 core_stream->public.csc_color_matrix.matrix);
2034
2035 core_stream->public.csc_color_matrix.enable_adjustment = true;
2036
2037 /* Write persistent data in registry*/
2038 flag.save_per_edid = true;
2039 flag.save_per_link = false;
2040 dm_write_persistent_data(core_dc->ctx,
2041 streams[stream_index]->sink,
2042 COLOR_REGISTRY_NAME,
2043 "saturation",
2044 &saturation_value,
2045 sizeof(int),
2046 &flag);
2047 }
2048
2049 core_color->dc->stream_funcs.set_gamut_remap
2050 (core_color->dc, streams, num_streams);
2051
2052 return true;
2053}
2054
2055bool mod_color_set_preferred_quantization_range(struct mod_color *mod_color,
2056 const struct dc_sink *sink,
2057 enum dc_quantization_range quantization_range)
2058{
2059 struct core_color *core_color = MOD_COLOR_TO_CORE(mod_color);
2060 struct core_dc *core_dc = DC_TO_CORE(core_color->dc);
2061 struct persistent_data_flag flag;
2062 unsigned int sink_index;
2063
2064 sink_index = sink_index_from_sink(core_color, sink);
2065 if (core_color->state[sink_index].
2066 preferred_quantization_range != quantization_range) {
2067 core_color->state[sink_index].preferred_quantization_range =
2068 quantization_range;
2069 flag.save_per_edid = true;
2070 flag.save_per_link = false;
2071 dm_write_persistent_data(core_dc->ctx,
2072 sink,
2073 COLOR_REGISTRY_NAME,
2074 "quantization_range",
2075 &quantization_range,
2076 sizeof(int),
2077 &flag);
2078 }
2079
2080 return true;
2081}
2082
2083bool mod_color_get_preferred_quantization_range(struct mod_color *mod_color,
2084 const struct dc_sink *sink,
2085 enum dc_quantization_range *quantization_range)
2086{
2087 struct core_color *core_color = MOD_COLOR_TO_CORE(mod_color);
2088 unsigned int sink_index;
2089
2090 sink_index = sink_index_from_sink(core_color, sink);
2091 *quantization_range = core_color->state[sink_index].
2092 preferred_quantization_range;
2093 return true;
2094}
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/Makefile b/drivers/gpu/drm/amd/display/modules/freesync/Makefile
new file mode 100644
index 000000000000..db8e0ff6d7a9
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/freesync/Makefile
@@ -0,0 +1,10 @@
1#
2# Makefile for the 'freesync' sub-module of DAL.
3#
4
5FREESYNC = freesync.o
6
7AMD_DAL_FREESYNC = $(addprefix $(AMDDALPATH)/modules/freesync/,$(FREESYNC))
8#$(info ************ DAL-FREE SYNC_MAKEFILE ************)
9
10AMD_DISPLAY_FILES += $(AMD_DAL_FREESYNC)
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
new file mode 100644
index 000000000000..eb912baa0169
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -0,0 +1,1158 @@
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27#include "dc.h"
28#include "mod_freesync.h"
29#include "core_types.h"
30#include "core_dc.h"
31
32#define MOD_FREESYNC_MAX_CONCURRENT_STREAMS 32
33
34/* Refresh rate ramp at a fixed rate of 65 Hz/second */
35#define STATIC_SCREEN_RAMP_DELTA_REFRESH_RATE_PER_FRAME ((1000 / 60) * 65)
36/* Number of elements in the render times cache array */
37#define RENDER_TIMES_MAX_COUNT 20
38/* Threshold to exit BTR (to avoid frequent enter-exits at the lower limit) */
39#define BTR_EXIT_MARGIN 2000
40
41#define FREESYNC_REGISTRY_NAME "freesync_v1"
42
43struct gradual_static_ramp {
44 bool ramp_is_active;
45 bool ramp_direction_is_up;
46 unsigned int ramp_current_frame_duration_in_ns;
47};
48
49struct time_cache {
50 /* video (48Hz feature) related */
51 unsigned int update_duration_in_ns;
52
53 /* BTR/fixed refresh related */
54 unsigned int prev_time_stamp_in_us;
55
56 unsigned int min_render_time_in_us;
57 unsigned int max_render_time_in_us;
58
59 unsigned int render_times_index;
60 unsigned int render_times[RENDER_TIMES_MAX_COUNT];
61};
62
63struct below_the_range {
64 bool btr_active;
65 bool program_btr;
66
67 unsigned int mid_point_in_us;
68
69 unsigned int inserted_frame_duration_in_us;
70 unsigned int frames_to_insert;
71 unsigned int frame_counter;
72};
73
74struct fixed_refresh {
75 bool fixed_refresh_active;
76 bool program_fixed_refresh;
77};
78
79struct freesync_state {
80 bool fullscreen;
81 bool static_screen;
82 bool video;
83
84 unsigned int nominal_refresh_rate_in_micro_hz;
85 bool windowed_fullscreen;
86
87 struct time_cache time;
88
89 struct gradual_static_ramp static_ramp;
90 struct below_the_range btr;
91 struct fixed_refresh fixed_refresh;
92};
93
94struct freesync_entity {
95 const struct dc_stream *stream;
96 struct mod_freesync_caps *caps;
97 struct freesync_state state;
98 struct mod_freesync_user_enable user_enable;
99};
100
101struct core_freesync {
102 struct mod_freesync public;
103 struct dc *dc;
104 struct freesync_entity *map;
105 int num_entities;
106};
107
108#define MOD_FREESYNC_TO_CORE(mod_freesync)\
109 container_of(mod_freesync, struct core_freesync, public)
110
111static bool check_dc_support(const struct dc *dc)
112{
113 if (dc->stream_funcs.adjust_vmin_vmax == NULL)
114 return false;
115
116 return true;
117}
118
119struct mod_freesync *mod_freesync_create(struct dc *dc)
120{
121 struct core_freesync *core_freesync =
122 dm_alloc(sizeof(struct core_freesync));
123
124 struct core_dc *core_dc = DC_TO_CORE(dc);
125
126 struct persistent_data_flag flag;
127
128 int i = 0;
129
130 if (core_freesync == NULL)
131 goto fail_alloc_context;
132
133 core_freesync->map = dm_alloc(sizeof(struct freesync_entity) *
134 MOD_FREESYNC_MAX_CONCURRENT_STREAMS);
135
136 if (core_freesync->map == NULL)
137 goto fail_alloc_map;
138
139 for (i = 0; i < MOD_FREESYNC_MAX_CONCURRENT_STREAMS; i++)
140 core_freesync->map[i].stream = NULL;
141
142 core_freesync->num_entities = 0;
143
144 if (dc == NULL)
145 goto fail_construct;
146
147 core_freesync->dc = dc;
148
149 if (!check_dc_support(dc))
150 goto fail_construct;
151
152 /* Create initial module folder in registry for freesync enable data */
153 flag.save_per_edid = true;
154 flag.save_per_link = false;
155 dm_write_persistent_data(core_dc->ctx, NULL, FREESYNC_REGISTRY_NAME, NULL, NULL,
156 0, &flag);
157
158 return &core_freesync->public;
159
160fail_construct:
161 dm_free(core_freesync->map);
162
163fail_alloc_map:
164 dm_free(core_freesync);
165
166fail_alloc_context:
167 return NULL;
168}
169
170void mod_freesync_destroy(struct mod_freesync *mod_freesync)
171{
172 if (mod_freesync != NULL) {
173 int i;
174 struct core_freesync *core_freesync =
175 MOD_FREESYNC_TO_CORE(mod_freesync);
176
177 for (i = 0; i < core_freesync->num_entities; i++)
178 if (core_freesync->map[i].stream)
179 dc_stream_release(core_freesync->map[i].stream);
180
181 dm_free(core_freesync->map);
182
183 dm_free(core_freesync);
184 }
185}
186
187/* Given a specific dc_stream* this function finds its equivalent
188 * on the core_freesync->map and returns the corresponding index
189 */
190static unsigned int map_index_from_stream(struct core_freesync *core_freesync,
191 const struct dc_stream *stream)
192{
193 unsigned int index = 0;
194
195 for (index = 0; index < core_freesync->num_entities; index++) {
196 if (core_freesync->map[index].stream == stream) {
197 return index;
198 }
199 }
200 /* Could not find stream requested */
201 ASSERT(false);
202 return index;
203}
204
205bool mod_freesync_add_stream(struct mod_freesync *mod_freesync,
206 const struct dc_stream *stream, struct mod_freesync_caps *caps)
207{
208 struct core_freesync *core_freesync =
209 MOD_FREESYNC_TO_CORE(mod_freesync);
210 struct core_stream *core_stream =
211 DC_STREAM_TO_CORE(stream);
212 struct core_dc *core_dc = DC_TO_CORE(core_freesync->dc);
213
214 int persistent_freesync_enable = 0;
215 struct persistent_data_flag flag;
216
217 flag.save_per_edid = true;
218 flag.save_per_link = false;
219
220 if (core_freesync->num_entities < MOD_FREESYNC_MAX_CONCURRENT_STREAMS) {
221
222 dc_stream_retain(stream);
223
224 core_freesync->map[core_freesync->num_entities].stream = stream;
225 core_freesync->map[core_freesync->num_entities].caps = caps;
226
227 core_freesync->map[core_freesync->num_entities].state.
228 fullscreen = false;
229 core_freesync->map[core_freesync->num_entities].state.
230 static_screen = false;
231 core_freesync->map[core_freesync->num_entities].state.
232 video = false;
233 core_freesync->map[core_freesync->num_entities].state.time.
234 update_duration_in_ns = 0;
235 core_freesync->map[core_freesync->num_entities].state.
236 static_ramp.ramp_is_active = false;
237
238 /* get persistent data from registry */
239 if (dm_read_persistent_data(core_dc->ctx, stream->sink,
240 FREESYNC_REGISTRY_NAME,
241 "userenable", &persistent_freesync_enable,
242 sizeof(int), &flag)) {
243 core_freesync->map[core_freesync->num_entities].user_enable.
244 enable_for_gaming =
245 (persistent_freesync_enable & 1) ? true : false;
246 core_freesync->map[core_freesync->num_entities].user_enable.
247 enable_for_static =
248 (persistent_freesync_enable & 2) ? true : false;
249 core_freesync->map[core_freesync->num_entities].user_enable.
250 enable_for_video =
251 (persistent_freesync_enable & 4) ? true : false;
252 } else {
253 core_freesync->map[core_freesync->num_entities].user_enable.
254 enable_for_gaming = false;
255 core_freesync->map[core_freesync->num_entities].user_enable.
256 enable_for_static = false;
257 core_freesync->map[core_freesync->num_entities].user_enable.
258 enable_for_video = false;
259 }
260
261 if (caps->supported)
262 core_stream->public.ignore_msa_timing_param = 1;
263
264 core_freesync->num_entities++;
265 return true;
266 }
267 return false;
268}
269
270bool mod_freesync_remove_stream(struct mod_freesync *mod_freesync,
271 const struct dc_stream *stream)
272{
273 struct core_freesync *core_freesync =
274 MOD_FREESYNC_TO_CORE(mod_freesync);
275
276 int i = 0;
277 unsigned int index = map_index_from_stream(core_freesync, stream);
278 dc_stream_release(core_freesync->map[index].stream);
279 core_freesync->map[index].stream = NULL;
280 /* To remove this entity, shift everything after down */
281 for (i = index; i < core_freesync->num_entities - 1; i++)
282 core_freesync->map[i] = core_freesync->map[i + 1];
283 core_freesync->num_entities--;
284 return true;
285}
286
287static void update_stream_freesync_context(struct core_freesync *core_freesync,
288 const struct dc_stream *stream)
289{
290 unsigned int index;
291 struct freesync_context *ctx;
292 struct core_stream *core_stream;
293
294 core_stream = DC_STREAM_TO_CORE(stream);
295 ctx = &core_stream->public.freesync_ctx;
296
297 index = map_index_from_stream(core_freesync, stream);
298
299 ctx->supported = core_freesync->map[index].caps->supported;
300 ctx->enabled = (core_freesync->map[index].user_enable.enable_for_gaming ||
301 core_freesync->map[index].user_enable.enable_for_video ||
302 core_freesync->map[index].user_enable.enable_for_static);
303 ctx->active = (core_freesync->map[index].state.fullscreen ||
304 core_freesync->map[index].state.video ||
305 core_freesync->map[index].state.static_ramp.ramp_is_active);
306 ctx->min_refresh_in_micro_hz =
307 core_freesync->map[index].caps->min_refresh_in_micro_hz;
308 ctx->nominal_refresh_in_micro_hz = core_freesync->
309 map[index].state.nominal_refresh_rate_in_micro_hz;
310
311}
312
313static void update_stream(struct core_freesync *core_freesync,
314 const struct dc_stream *stream)
315{
316 struct core_stream *core_stream = DC_STREAM_TO_CORE(stream);
317
318 unsigned int index = map_index_from_stream(core_freesync, stream);
319 if (core_freesync->map[index].caps->supported) {
320 core_stream->public.ignore_msa_timing_param = 1;
321 update_stream_freesync_context(core_freesync, stream);
322 }
323}
324
325static void calc_vmin_vmax(struct core_freesync *core_freesync,
326 const struct dc_stream *stream, int *vmin, int *vmax)
327{
328 unsigned int min_frame_duration_in_ns = 0, max_frame_duration_in_ns = 0;
329 unsigned int index = map_index_from_stream(core_freesync, stream);
330
331 min_frame_duration_in_ns = ((unsigned int) (div64_u64(
332 (1000000000ULL * 1000000),
333 core_freesync->map[index].state.
334 nominal_refresh_rate_in_micro_hz)));
335 max_frame_duration_in_ns = ((unsigned int) (div64_u64(
336 (1000000000ULL * 1000000),
337 core_freesync->map[index].caps->min_refresh_in_micro_hz)));
338
339 *vmax = div64_u64(div64_u64(((unsigned long long)(
340 max_frame_duration_in_ns) * stream->timing.pix_clk_khz),
341 stream->timing.h_total), 1000000);
342 *vmin = div64_u64(div64_u64(((unsigned long long)(
343 min_frame_duration_in_ns) * stream->timing.pix_clk_khz),
344 stream->timing.h_total), 1000000);
345}
346
347static void calc_v_total_from_duration(const struct dc_stream *stream,
348 unsigned int duration_in_ns, int *v_total_nominal)
349{
350 *v_total_nominal = div64_u64(div64_u64(((unsigned long long)(
351 duration_in_ns) * stream->timing.pix_clk_khz),
352 stream->timing.h_total), 1000000);
353}
354
355static void calc_v_total_for_static_ramp(struct core_freesync *core_freesync,
356 const struct dc_stream *stream,
357 unsigned int index, int *v_total)
358{
359 unsigned int frame_duration = 0;
360
361 struct gradual_static_ramp *static_ramp_variables =
362 &core_freesync->map[index].state.static_ramp;
363
364 /* Calc ratio between new and current frame duration with 3 digit */
365 unsigned int frame_duration_ratio = div64_u64(1000000,
366 (1000 + div64_u64(((unsigned long long)(
367 STATIC_SCREEN_RAMP_DELTA_REFRESH_RATE_PER_FRAME) *
368 static_ramp_variables->ramp_current_frame_duration_in_ns),
369 1000000000)));
370
371 /* Calculate delta between new and current frame duration in ns */
372 unsigned int frame_duration_delta = div64_u64(((unsigned long long)(
373 static_ramp_variables->ramp_current_frame_duration_in_ns) *
374 (1000 - frame_duration_ratio)), 1000);
375
376 /* Adjust frame duration delta based on ratio between current and
377 * standard frame duration (frame duration at 60 Hz refresh rate).
378 */
379 unsigned int ramp_rate_interpolated = div64_u64(((unsigned long long)(
380 frame_duration_delta) * static_ramp_variables->
381 ramp_current_frame_duration_in_ns), 16666666);
382
383 /* Going to a higher refresh rate (lower frame duration) */
384 if (static_ramp_variables->ramp_direction_is_up) {
385 /* reduce frame duration */
386 static_ramp_variables->ramp_current_frame_duration_in_ns -=
387 ramp_rate_interpolated;
388
389 /* min frame duration */
390 frame_duration = ((unsigned int) (div64_u64(
391 (1000000000ULL * 1000000),
392 core_freesync->map[index].state.
393 nominal_refresh_rate_in_micro_hz)));
394
395 /* adjust for frame duration below min */
396 if (static_ramp_variables->ramp_current_frame_duration_in_ns <=
397 frame_duration) {
398
399 static_ramp_variables->ramp_is_active = false;
400 static_ramp_variables->
401 ramp_current_frame_duration_in_ns =
402 frame_duration;
403 }
404 /* Going to a lower refresh rate (larger frame duration) */
405 } else {
406 /* increase frame duration */
407 static_ramp_variables->ramp_current_frame_duration_in_ns +=
408 ramp_rate_interpolated;
409
410 /* max frame duration */
411 frame_duration = ((unsigned int) (div64_u64(
412 (1000000000ULL * 1000000),
413 core_freesync->map[index].caps->min_refresh_in_micro_hz)));
414
415 /* adjust for frame duration above max */
416 if (static_ramp_variables->ramp_current_frame_duration_in_ns >=
417 frame_duration) {
418
419 static_ramp_variables->ramp_is_active = false;
420 static_ramp_variables->
421 ramp_current_frame_duration_in_ns =
422 frame_duration;
423 }
424 }
425
426 calc_v_total_from_duration(stream, static_ramp_variables->
427 ramp_current_frame_duration_in_ns, v_total);
428}
429
430static void reset_freesync_state_variables(struct freesync_state* state)
431{
432 state->static_ramp.ramp_is_active = false;
433 if (state->nominal_refresh_rate_in_micro_hz)
434 state->static_ramp.ramp_current_frame_duration_in_ns =
435 ((unsigned int) (div64_u64(
436 (1000000000ULL * 1000000),
437 state->nominal_refresh_rate_in_micro_hz)));
438
439 state->btr.btr_active = false;
440 state->btr.frame_counter = 0;
441 state->btr.frames_to_insert = 0;
442 state->btr.inserted_frame_duration_in_us = 0;
443 state->btr.program_btr = false;
444
445 state->fixed_refresh.fixed_refresh_active = false;
446 state->fixed_refresh.program_fixed_refresh = false;
447}
448/*
449 * Sets freesync mode on a stream depending on current freesync state.
450 */
451static bool set_freesync_on_streams(struct core_freesync *core_freesync,
452 const struct dc_stream **streams, int num_streams)
453{
454 int v_total_nominal = 0, v_total_min = 0, v_total_max = 0;
455 unsigned int stream_idx, map_index = 0;
456 struct freesync_state *state;
457
458 if (num_streams == 0 || streams == NULL || num_streams > 1)
459 return false;
460
461 for (stream_idx = 0; stream_idx < num_streams; stream_idx++) {
462
463 map_index = map_index_from_stream(core_freesync,
464 streams[stream_idx]);
465
466 state = &core_freesync->map[map_index].state;
467
468 if (core_freesync->map[map_index].caps->supported) {
469
470 /* Fullscreen has the topmost priority. If the
471 * fullscreen bit is set, we are in a fullscreen
472 * application where it should not matter if it is
473 * static screen. We should not check the static_screen
474 * or video bit.
475 *
476 * Special cases of fullscreen include btr and fixed
477 * refresh. We program btr on every flip and involves
478 * programming full range right before the last inserted frame.
479 * However, we do not want to program the full freesync range
480 * when fixed refresh is active, because we only program
481 * that logic once and this will override it.
482 */
483 if (core_freesync->map[map_index].user_enable.
484 enable_for_gaming == true &&
485 state->fullscreen == true &&
486 state->fixed_refresh.fixed_refresh_active == false) {
487 /* Enable freesync */
488
489 calc_vmin_vmax(core_freesync,
490 streams[stream_idx],
491 &v_total_min, &v_total_max);
492
493 /* Update the freesync context for the stream */
494 update_stream_freesync_context(core_freesync,
495 streams[stream_idx]);
496
497 core_freesync->dc->stream_funcs.
498 adjust_vmin_vmax(core_freesync->dc, streams,
499 num_streams, v_total_min,
500 v_total_max);
501
502 return true;
503
504 } else if (core_freesync->map[map_index].user_enable.
505 enable_for_video && state->video == true) {
506 /* Enable 48Hz feature */
507
508 calc_v_total_from_duration(streams[stream_idx],
509 state->time.update_duration_in_ns,
510 &v_total_nominal);
511
512 /* Program only if v_total_nominal is in range*/
513 if (v_total_nominal >=
514 streams[stream_idx]->timing.v_total) {
515
516 /* Update the freesync context for
517 * the stream
518 */
519 update_stream_freesync_context(
520 core_freesync,
521 streams[stream_idx]);
522
523 core_freesync->dc->stream_funcs.
524 adjust_vmin_vmax(
525 core_freesync->dc, streams,
526 num_streams, v_total_nominal,
527 v_total_nominal);
528 }
529 return true;
530
531 } else {
532 /* Disable freesync */
533 v_total_nominal = streams[stream_idx]->
534 timing.v_total;
535
536 /* Update the freesync context for
537 * the stream
538 */
539 update_stream_freesync_context(
540 core_freesync,
541 streams[stream_idx]);
542
543 core_freesync->dc->stream_funcs.
544 adjust_vmin_vmax(
545 core_freesync->dc, streams,
546 num_streams, v_total_nominal,
547 v_total_nominal);
548
549 /* Reset the cached variables */
550 reset_freesync_state_variables(state);
551
552 return true;
553 }
554 } else {
555 /* Disable freesync */
556 v_total_nominal = streams[stream_idx]->
557 timing.v_total;
558 /*
559 * we have to reset drr always even sink does
560 * not support freesync because a former stream has
561 * be programmed
562 */
563 core_freesync->dc->stream_funcs.
564 adjust_vmin_vmax(
565 core_freesync->dc, streams,
566 num_streams, v_total_nominal,
567 v_total_nominal);
568 /* Reset the cached variables */
569 reset_freesync_state_variables(state);
570 }
571
572 }
573
574 return false;
575}
576
577static void set_static_ramp_variables(struct core_freesync *core_freesync,
578 unsigned int index, bool enable_static_screen)
579{
580 unsigned int frame_duration = 0;
581
582 struct gradual_static_ramp *static_ramp_variables =
583 &core_freesync->map[index].state.static_ramp;
584
585 /* If ramp is not active, set initial frame duration depending on
586 * whether we are enabling/disabling static screen mode. If the ramp is
587 * already active, ramp should continue in the opposite direction
588 * starting with the current frame duration
589 */
590 if (!static_ramp_variables->ramp_is_active) {
591
592 static_ramp_variables->ramp_is_active = true;
593
594 if (enable_static_screen == true) {
595 /* Going to lower refresh rate, so start from max
596 * refresh rate (min frame duration)
597 */
598 frame_duration = ((unsigned int) (div64_u64(
599 (1000000000ULL * 1000000),
600 core_freesync->map[index].state.
601 nominal_refresh_rate_in_micro_hz)));
602 } else {
603 /* Going to higher refresh rate, so start from min
604 * refresh rate (max frame duration)
605 */
606 frame_duration = ((unsigned int) (div64_u64(
607 (1000000000ULL * 1000000),
608 core_freesync->map[index].caps->min_refresh_in_micro_hz)));
609 }
610
611 static_ramp_variables->
612 ramp_current_frame_duration_in_ns = frame_duration;
613 }
614
615 /* If we are ENABLING static screen, refresh rate should go DOWN.
616 * If we are DISABLING static screen, refresh rate should go UP.
617 */
618 static_ramp_variables->ramp_direction_is_up = !enable_static_screen;
619}
620
621void mod_freesync_handle_v_update(struct mod_freesync *mod_freesync,
622 const struct dc_stream **streams, int num_streams)
623{
624 struct core_freesync *core_freesync =
625 MOD_FREESYNC_TO_CORE(mod_freesync);
626
627 unsigned int index, v_total = 0;
628 struct freesync_state *state;
629
630 if (core_freesync->num_entities == 0)
631 return;
632
633 index = map_index_from_stream(core_freesync,
634 streams[0]);
635
636 if (core_freesync->map[index].caps->supported == false)
637 return;
638
639 state = &core_freesync->map[index].state;
640
641 /* Below the Range Logic */
642
643 /* Only execute if in fullscreen mode */
644 if (state->fullscreen == true &&
645 core_freesync->map[index].user_enable.enable_for_gaming) {
646
647 if (state->btr.btr_active)
648 if (state->btr.frame_counter > 0)
649
650 state->btr.frame_counter--;
651
652 if (state->btr.frame_counter == 1) {
653
654 /* Restore FreeSync */
655 set_freesync_on_streams(core_freesync, streams,
656 num_streams);
657 }
658 }
659
660 /* If in fullscreen freesync mode or in video, do not program
661 * static screen ramp values
662 */
663 if (state->fullscreen == true || state->video == true) {
664
665 state->static_ramp.ramp_is_active = false;
666
667 return;
668 }
669
670 /* Gradual Static Screen Ramping Logic */
671
672 /* Execute if ramp is active and user enabled freesync static screen*/
673 if (state->static_ramp.ramp_is_active &&
674 core_freesync->map[index].user_enable.enable_for_static) {
675
676 calc_v_total_for_static_ramp(core_freesync, streams[0],
677 index, &v_total);
678
679 /* Update the freesync context for the stream */
680 update_stream_freesync_context(core_freesync, streams[0]);
681
682 /* Program static screen ramp values */
683 core_freesync->dc->stream_funcs.adjust_vmin_vmax(
684 core_freesync->dc, streams,
685 num_streams, v_total,
686 v_total);
687 }
688}
689
690void mod_freesync_update_state(struct mod_freesync *mod_freesync,
691 const struct dc_stream **streams, int num_streams,
692 struct mod_freesync_params *freesync_params)
693{
694 struct core_freesync *core_freesync =
695 MOD_FREESYNC_TO_CORE(mod_freesync);
696 bool freesync_program_required = false;
697 unsigned int stream_index;
698 struct freesync_state *state;
699
700 if (core_freesync->num_entities == 0)
701 return;
702
703 for(stream_index = 0; stream_index < num_streams; stream_index++) {
704
705 unsigned int map_index = map_index_from_stream(core_freesync,
706 streams[stream_index]);
707
708 state = &core_freesync->map[map_index].state;
709
710 switch (freesync_params->state){
711 case FREESYNC_STATE_FULLSCREEN:
712 state->fullscreen = freesync_params->enable;
713 freesync_program_required = true;
714 state->windowed_fullscreen =
715 freesync_params->windowed_fullscreen;
716 break;
717 case FREESYNC_STATE_STATIC_SCREEN:
718 /* Static screen ramp is only enabled for embedded
719 * panels. Also change core variables only if there
720 * is a change.
721 */
722 if (dc_is_embedded_signal(
723 streams[stream_index]->sink->sink_signal) &&
724 state->static_screen !=
725 freesync_params->enable) {
726
727 /* Change the state flag */
728 state->static_screen = freesync_params->enable;
729
730 /* Change static screen ramp variables */
731 set_static_ramp_variables(core_freesync,
732 map_index,
733 freesync_params->enable);
734 }
735 /* We program the ramp starting next VUpdate */
736 break;
737 case FREESYNC_STATE_VIDEO:
738 /* Change core variables only if there is a change*/
739 if(freesync_params->update_duration_in_ns !=
740 state->time.update_duration_in_ns) {
741
742 state->video = freesync_params->enable;
743 state->time.update_duration_in_ns =
744 freesync_params->update_duration_in_ns;
745
746 freesync_program_required = true;
747 }
748 break;
749 }
750 }
751
752 if (freesync_program_required)
753 /* Program freesync according to current state*/
754 set_freesync_on_streams(core_freesync, streams, num_streams);
755}
756
757
758bool mod_freesync_get_state(struct mod_freesync *mod_freesync,
759 const struct dc_stream *stream,
760 struct mod_freesync_params *freesync_params)
761{
762 struct core_freesync *core_freesync =
763 MOD_FREESYNC_TO_CORE(mod_freesync);
764
765 unsigned int index = map_index_from_stream(core_freesync, stream);
766
767 if (core_freesync->map[index].state.fullscreen) {
768 freesync_params->state = FREESYNC_STATE_FULLSCREEN;
769 freesync_params->enable = true;
770 } else if (core_freesync->map[index].state.static_screen) {
771 freesync_params->state = FREESYNC_STATE_STATIC_SCREEN;
772 freesync_params->enable = true;
773 } else if (core_freesync->map[index].state.video) {
774 freesync_params->state = FREESYNC_STATE_VIDEO;
775 freesync_params->enable = true;
776 } else {
777 freesync_params->state = FREESYNC_STATE_NONE;
778 freesync_params->enable = false;
779 }
780
781 freesync_params->update_duration_in_ns =
782 core_freesync->map[index].state.time.update_duration_in_ns;
783
784 return true;
785}
786
787bool mod_freesync_set_user_enable(struct mod_freesync *mod_freesync,
788 const struct dc_stream **streams, int num_streams,
789 struct mod_freesync_user_enable *user_enable)
790{
791 struct core_freesync *core_freesync =
792 MOD_FREESYNC_TO_CORE(mod_freesync);
793 struct core_dc *core_dc = DC_TO_CORE(core_freesync->dc);
794
795 unsigned int stream_index, map_index;
796 int persistent_data = 0;
797 struct persistent_data_flag flag;
798
799 flag.save_per_edid = true;
800 flag.save_per_link = false;
801
802 for(stream_index = 0; stream_index < num_streams;
803 stream_index++){
804
805 map_index = map_index_from_stream(core_freesync,
806 streams[stream_index]);
807
808 core_freesync->map[map_index].user_enable = *user_enable;
809
810 /* Write persistent data in registry*/
811 if (core_freesync->map[map_index].user_enable.
812 enable_for_gaming)
813 persistent_data = persistent_data | 1;
814 if (core_freesync->map[map_index].user_enable.
815 enable_for_static)
816 persistent_data = persistent_data | 2;
817 if (core_freesync->map[map_index].user_enable.
818 enable_for_video)
819 persistent_data = persistent_data | 4;
820
821 dm_write_persistent_data(core_dc->ctx,
822 streams[stream_index]->sink,
823 FREESYNC_REGISTRY_NAME,
824 "userenable",
825 &persistent_data,
826 sizeof(int),
827 &flag);
828 }
829
830 set_freesync_on_streams(core_freesync, streams, num_streams);
831
832 return true;
833}
834
835bool mod_freesync_get_user_enable(struct mod_freesync *mod_freesync,
836 const struct dc_stream *stream,
837 struct mod_freesync_user_enable *user_enable)
838{
839 struct core_freesync *core_freesync =
840 MOD_FREESYNC_TO_CORE(mod_freesync);
841
842 unsigned int index = map_index_from_stream(core_freesync, stream);
843
844 *user_enable = core_freesync->map[index].user_enable;
845
846 return true;
847}
848
849void mod_freesync_notify_mode_change(struct mod_freesync *mod_freesync,
850 const struct dc_stream **streams, int num_streams)
851{
852 struct core_freesync *core_freesync =
853 MOD_FREESYNC_TO_CORE(mod_freesync);
854
855 unsigned int stream_index, map_index;
856 unsigned min_frame_duration_in_ns, max_frame_duration_in_ns;
857 struct freesync_state *state;
858
859 for (stream_index = 0; stream_index < num_streams; stream_index++) {
860
861 map_index = map_index_from_stream(core_freesync,
862 streams[stream_index]);
863
864 state = &core_freesync->map[map_index].state;
865
866 if (core_freesync->map[map_index].caps->supported) {
867 /* Update the field rate for new timing */
868 state->nominal_refresh_rate_in_micro_hz = 1000000 *
869 div64_u64(div64_u64((streams[stream_index]->
870 timing.pix_clk_khz * 1000),
871 streams[stream_index]->timing.v_total),
872 streams[stream_index]->timing.h_total);
873
874 /* Update the stream */
875 update_stream(core_freesync, streams[stream_index]);
876
877 /* Determine whether BTR can be supported */
878 min_frame_duration_in_ns = ((unsigned int) (div64_u64(
879 (1000000000ULL * 1000000),
880 state->nominal_refresh_rate_in_micro_hz)));
881
882 max_frame_duration_in_ns = ((unsigned int) (div64_u64(
883 (1000000000ULL * 1000000),
884 core_freesync->map[map_index].caps->min_refresh_in_micro_hz)));
885
886 if (max_frame_duration_in_ns >=
887 2 * min_frame_duration_in_ns)
888 core_freesync->map[map_index].caps->btr_supported = true;
889 else
890 core_freesync->map[map_index].caps->btr_supported = false;
891
892 /* Cache the time variables */
893 state->time.max_render_time_in_us =
894 max_frame_duration_in_ns / 1000;
895 state->time.min_render_time_in_us =
896 min_frame_duration_in_ns / 1000;
897 state->btr.mid_point_in_us =
898 (max_frame_duration_in_ns +
899 min_frame_duration_in_ns) / 2000;
900
901 }
902 }
903
904 /* Program freesync according to current state*/
905 set_freesync_on_streams(core_freesync, streams, num_streams);
906}
907
908/* Add the timestamps to the cache and determine whether BTR programming
909 * is required, depending on the times calculated
910 */
911static void update_timestamps(struct core_freesync *core_freesync,
912 const struct dc_stream *stream, unsigned int map_index,
913 unsigned int last_render_time_in_us)
914{
915 struct freesync_state *state = &core_freesync->map[map_index].state;
916
917 state->time.render_times[state->time.render_times_index] =
918 last_render_time_in_us;
919 state->time.render_times_index++;
920
921 if (state->time.render_times_index >= RENDER_TIMES_MAX_COUNT)
922 state->time.render_times_index = 0;
923
924 if (last_render_time_in_us + BTR_EXIT_MARGIN <
925 state->time.max_render_time_in_us) {
926
927 /* Exit Below the Range */
928 if (state->btr.btr_active) {
929
930 state->btr.program_btr = true;
931 state->btr.btr_active = false;
932 state->btr.frame_counter = 0;
933
934 /* Exit Fixed Refresh mode */
935 } else if (state->fixed_refresh.fixed_refresh_active) {
936
937 state->fixed_refresh.program_fixed_refresh = true;
938 state->fixed_refresh.fixed_refresh_active = false;
939
940 }
941
942 } else if (last_render_time_in_us > state->time.max_render_time_in_us) {
943
944 /* Enter Below the Range */
945 if (!state->btr.btr_active &&
946 core_freesync->map[map_index].caps->btr_supported) {
947
948 state->btr.program_btr = true;
949 state->btr.btr_active = true;
950
951 /* Enter Fixed Refresh mode */
952 } else if (!state->fixed_refresh.fixed_refresh_active &&
953 !core_freesync->map[map_index].caps->btr_supported) {
954
955 state->fixed_refresh.program_fixed_refresh = true;
956 state->fixed_refresh.fixed_refresh_active = true;
957
958 }
959 }
960
961 /* When Below the Range is active, must react on every frame */
962 if (state->btr.btr_active)
963 state->btr.program_btr = true;
964}
965
966static void apply_below_the_range(struct core_freesync *core_freesync,
967 const struct dc_stream *stream, unsigned int map_index,
968 unsigned int last_render_time_in_us)
969{
970 unsigned int inserted_frame_duration_in_us = 0;
971 unsigned int mid_point_frames_ceil = 0;
972 unsigned int mid_point_frames_floor = 0;
973 unsigned int frame_time_in_us = 0;
974 unsigned int delta_from_mid_point_in_us_1 = 0xFFFFFFFF;
975 unsigned int delta_from_mid_point_in_us_2 = 0xFFFFFFFF;
976 unsigned int frames_to_insert = 0;
977 unsigned int inserted_frame_v_total = 0;
978 unsigned int vmin = 0, vmax = 0;
979 unsigned int min_frame_duration_in_ns = 0;
980 struct freesync_state *state = &core_freesync->map[map_index].state;
981
982 if (!state->btr.program_btr)
983 return;
984
985 state->btr.program_btr = false;
986
987 min_frame_duration_in_ns = ((unsigned int) (div64_u64(
988 (1000000000ULL * 1000000),
989 state->nominal_refresh_rate_in_micro_hz)));
990
991 /* Program BTR */
992
993 /* BTR set to "not active" so disengage */
994 if (!state->btr.btr_active)
995
996 /* Restore FreeSync */
997 set_freesync_on_streams(core_freesync, &stream, 1);
998
999 /* BTR set to "active" so engage */
1000 else {
1001
1002 /* Calculate number of midPoint frames that could fit within
1003 * the render time interval- take ceil of this value
1004 */
1005 mid_point_frames_ceil = (last_render_time_in_us +
1006 state->btr.mid_point_in_us- 1) /
1007 state->btr.mid_point_in_us;
1008
1009 if (mid_point_frames_ceil > 0) {
1010
1011 frame_time_in_us = last_render_time_in_us /
1012 mid_point_frames_ceil;
1013 delta_from_mid_point_in_us_1 = (state->btr.mid_point_in_us >
1014 frame_time_in_us) ?
1015 (state->btr.mid_point_in_us - frame_time_in_us):
1016 (frame_time_in_us - state->btr.mid_point_in_us);
1017 }
1018
1019 /* Calculate number of midPoint frames that could fit within
1020 * the render time interval- take floor of this value
1021 */
1022 mid_point_frames_floor = last_render_time_in_us /
1023 state->btr.mid_point_in_us;
1024
1025 if (mid_point_frames_floor > 0) {
1026
1027 frame_time_in_us = last_render_time_in_us /
1028 mid_point_frames_floor;
1029 delta_from_mid_point_in_us_2 = (state->btr.mid_point_in_us >
1030 frame_time_in_us) ?
1031 (state->btr.mid_point_in_us - frame_time_in_us):
1032 (frame_time_in_us - state->btr.mid_point_in_us);
1033 }
1034
1035 /* Choose number of frames to insert based on how close it
1036 * can get to the mid point of the variable range.
1037 */
1038 if (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2)
1039 frames_to_insert = mid_point_frames_ceil;
1040 else
1041 frames_to_insert = mid_point_frames_floor;
1042
1043 /* Either we've calculated the number of frames to insert,
1044 * or we need to insert min duration frames
1045 */
1046 if (frames_to_insert > 0)
1047 inserted_frame_duration_in_us = last_render_time_in_us /
1048 frames_to_insert;
1049
1050 if (inserted_frame_duration_in_us <
1051 state->time.min_render_time_in_us)
1052
1053 inserted_frame_duration_in_us =
1054 state->time.min_render_time_in_us;
1055
1056 /* We need the v_total_min from capability */
1057 calc_vmin_vmax(core_freesync, stream, &vmin, &vmax);
1058
1059 inserted_frame_v_total = vmin;
1060 if (min_frame_duration_in_ns / 1000)
1061 inserted_frame_v_total = inserted_frame_duration_in_us *
1062 vmin / (min_frame_duration_in_ns / 1000);
1063
1064 /* Set length of inserted frames as v_total_max*/
1065 vmax = inserted_frame_v_total;
1066
1067 /* Program V_TOTAL */
1068 core_freesync->dc->stream_funcs.adjust_vmin_vmax(
1069 core_freesync->dc, &stream,
1070 1, vmin,
1071 vmax);
1072
1073 /* Cache the calculated variables */
1074 state->btr.inserted_frame_duration_in_us =
1075 inserted_frame_duration_in_us;
1076 state->btr.frames_to_insert = frames_to_insert;
1077 state->btr.frame_counter = frames_to_insert;
1078
1079 }
1080}
1081
1082static void apply_fixed_refresh(struct core_freesync *core_freesync,
1083 const struct dc_stream *stream, unsigned int map_index)
1084{
1085 unsigned int vmin = 0, vmax = 0;
1086 struct freesync_state *state = &core_freesync->map[map_index].state;
1087
1088 if (!state->fixed_refresh.program_fixed_refresh)
1089 return;
1090
1091 state->fixed_refresh.program_fixed_refresh = false;
1092
1093 /* Program Fixed Refresh */
1094
1095 /* Fixed Refresh set to "not active" so disengage */
1096 if (!state->fixed_refresh.fixed_refresh_active) {
1097 set_freesync_on_streams(core_freesync, &stream, 1);
1098
1099 /* Fixed Refresh set to "active" so engage (fix to max) */
1100 } else {
1101
1102 calc_vmin_vmax(core_freesync, stream, &vmin, &vmax);
1103
1104 vmax = vmin;
1105
1106 core_freesync->dc->stream_funcs.adjust_vmin_vmax(
1107 core_freesync->dc, &stream,
1108 1, vmin,
1109 vmax);
1110 }
1111}
1112
1113void mod_freesync_pre_update_plane_addresses(struct mod_freesync *mod_freesync,
1114 const struct dc_stream **streams, int num_streams,
1115 unsigned int curr_time_stamp_in_us)
1116{
1117 unsigned int stream_index, map_index, last_render_time_in_us = 0;
1118 struct core_freesync *core_freesync =
1119 MOD_FREESYNC_TO_CORE(mod_freesync);
1120
1121 for (stream_index = 0; stream_index < num_streams; stream_index++) {
1122
1123 map_index = map_index_from_stream(core_freesync,
1124 streams[stream_index]);
1125
1126 if (core_freesync->map[map_index].caps->supported) {
1127
1128 last_render_time_in_us = curr_time_stamp_in_us -
1129 core_freesync->map[map_index].state.time.
1130 prev_time_stamp_in_us;
1131
1132 /* Add the timestamps to the cache and determine
1133 * whether BTR program is required
1134 */
1135 update_timestamps(core_freesync, streams[stream_index],
1136 map_index, last_render_time_in_us);
1137
1138 if (core_freesync->map[map_index].state.fullscreen &&
1139 core_freesync->map[map_index].user_enable.
1140 enable_for_gaming) {
1141
1142 if (core_freesync->map[map_index].caps->btr_supported) {
1143
1144 apply_below_the_range(core_freesync,
1145 streams[stream_index], map_index,
1146 last_render_time_in_us);
1147 } else {
1148 apply_fixed_refresh(core_freesync,
1149 streams[stream_index], map_index);
1150 }
1151 }
1152
1153 core_freesync->map[map_index].state.time.
1154 prev_time_stamp_in_us = curr_time_stamp_in_us;
1155 }
1156
1157 }
1158}
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_color.h b/drivers/gpu/drm/amd/display/modules/inc/mod_color.h
new file mode 100644
index 000000000000..e54fe2cb8611
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_color.h
@@ -0,0 +1,179 @@
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26
27#ifndef MOD_COLOR_H_
28#define MOD_COLOR_H_
29
30#include "dm_services.h"
31
32struct mod_color {
33 int dummy;
34};
35
36struct color_space_coordinates {
37 unsigned int redX;
38 unsigned int redY;
39 unsigned int greenX;
40 unsigned int greenY;
41 unsigned int blueX;
42 unsigned int blueY;
43 unsigned int whiteX;
44 unsigned int whiteY;
45};
46
47struct gamut_space_coordinates {
48 unsigned int redX;
49 unsigned int redY;
50 unsigned int greenX;
51 unsigned int greenY;
52 unsigned int blueX;
53 unsigned int blueY;
54};
55
56struct gamut_space_entry {
57 unsigned int index;
58 unsigned int redX;
59 unsigned int redY;
60 unsigned int greenX;
61 unsigned int greenY;
62 unsigned int blueX;
63 unsigned int blueY;
64
65 int a0;
66 int a1;
67 int a2;
68 int a3;
69 int gamma;
70};
71
72struct white_point_coodinates {
73 unsigned int whiteX;
74 unsigned int whiteY;
75};
76
77struct white_point_coodinates_entry {
78 unsigned int index;
79 unsigned int whiteX;
80 unsigned int whiteY;
81};
82
83struct color_range {
84 int current;
85 int min;
86 int max;
87};
88
89struct mod_color *mod_color_create(struct dc *dc);
90
91void mod_color_destroy(struct mod_color *mod_color);
92
93bool mod_color_add_sink(struct mod_color *mod_color,
94 const struct dc_sink *sink);
95
96bool mod_color_remove_sink(struct mod_color *mod_color,
97 const struct dc_sink *sink);
98
99bool mod_color_update_gamut_to_stream(struct mod_color *mod_color,
100 const struct dc_stream **streams, int num_streams);
101
102bool mod_color_set_white_point(struct mod_color *mod_color,
103 const struct dc_stream **streams, int num_streams,
104 struct white_point_coodinates *white_point);
105
106bool mod_color_adjust_source_gamut(struct mod_color *mod_color,
107 const struct dc_stream **streams, int num_streams,
108 struct gamut_space_coordinates *input_gamut_coordinates,
109 struct white_point_coodinates *input_white_point_coordinates);
110
111bool mod_color_adjust_destination_gamut(struct mod_color *mod_color,
112 const struct dc_stream **streams, int num_streams,
113 struct gamut_space_coordinates *input_gamut_coordinates,
114 struct white_point_coodinates *input_white_point_coordinates);
115
116bool mod_color_get_user_enable(struct mod_color *mod_color,
117 const struct dc_sink *sink,
118 bool *user_enable);
119
120bool mod_color_set_user_enable(struct mod_color *mod_color,
121 const struct dc_stream **streams, int num_streams,
122 bool user_enable);
123
124bool mod_color_get_custom_color_temperature(struct mod_color *mod_color,
125 const struct dc_sink *sink,
126 int *color_temperature);
127
128bool mod_color_set_custom_color_temperature(struct mod_color *mod_color,
129 const struct dc_stream **streams, int num_streams,
130 int color_temperature);
131
132bool mod_color_get_color_saturation(struct mod_color *mod_color,
133 const struct dc_sink *sink,
134 struct color_range *color_saturation);
135
136bool mod_color_get_color_contrast(struct mod_color *mod_color,
137 const struct dc_sink *sink,
138 struct color_range *color_contrast);
139
140bool mod_color_get_color_brightness(struct mod_color *mod_color,
141 const struct dc_sink *sink,
142 struct color_range *color_brightness);
143
144bool mod_color_get_color_hue(struct mod_color *mod_color,
145 const struct dc_sink *sink,
146 struct color_range *color_hue);
147
148bool mod_color_get_source_gamut(struct mod_color *mod_color,
149 const struct dc_sink *sink,
150 struct color_space_coordinates *source_gamut);
151
152bool mod_color_notify_mode_change(struct mod_color *mod_color,
153 const struct dc_stream **streams, int num_streams);
154
155bool mod_color_set_brightness(struct mod_color *mod_color,
156 const struct dc_stream **streams, int num_streams,
157 int brightness_value);
158
159bool mod_color_set_contrast(struct mod_color *mod_color,
160 const struct dc_stream **streams, int num_streams,
161 int contrast_value);
162
163bool mod_color_set_hue(struct mod_color *mod_color,
164 const struct dc_stream **streams, int num_streams,
165 int hue_value);
166
167bool mod_color_set_saturation(struct mod_color *mod_color,
168 const struct dc_stream **streams, int num_streams,
169 int saturation_value);
170
171bool mod_color_set_preferred_quantization_range(struct mod_color *mod_color,
172 const struct dc_sink *sink,
173 enum dc_quantization_range quantization_range);
174
175bool mod_color_get_preferred_quantization_range(struct mod_color *mod_color,
176 const struct dc_sink *sink,
177 enum dc_quantization_range *quantization_range);
178
179#endif /* MOD_COLOR_H_ */
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
new file mode 100644
index 000000000000..7abfe34dc2d9
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
@@ -0,0 +1,149 @@
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26
27
28
29/*
30 * Copyright 2016 Advanced Micro Devices, Inc.
31 *
32 * Permission is hereby granted, free of charge, to any person obtaining a
33 * copy of this software and associated documentation files (the "Software"),
34 * to deal in the Software without restriction, including without limitation
35 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
36 * and/or sell copies of the Software, and to permit persons to whom the
37 * Software is furnished to do so, subject to the following conditions:
38 *
39 * The above copyright notice and this permission notice shall be included in
40 * all copies or substantial portions of the Software.
41 *
42 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
43 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
44 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
45 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
46 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
47 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
48 * OTHER DEALINGS IN THE SOFTWARE.
49 *
50 * Authors: AMD
51 *
52 */
53
54#ifndef MOD_FREESYNC_H_
55#define MOD_FREESYNC_H_
56
57#include "dm_services.h"
58
59struct mod_freesync *mod_freesync_create(struct dc *dc);
60void mod_freesync_destroy(struct mod_freesync *mod_freesync);
61
62struct mod_freesync {
63 int dummy;
64};
65
66enum mod_freesync_state {
67 FREESYNC_STATE_NONE,
68 FREESYNC_STATE_FULLSCREEN,
69 FREESYNC_STATE_STATIC_SCREEN,
70 FREESYNC_STATE_VIDEO
71};
72
73enum mod_freesync_user_enable_mask {
74 FREESYNC_USER_ENABLE_STATIC = 0x1,
75 FREESYNC_USER_ENABLE_VIDEO = 0x2,
76 FREESYNC_USER_ENABLE_GAMING = 0x4
77};
78
79struct mod_freesync_user_enable {
80 bool enable_for_static;
81 bool enable_for_video;
82 bool enable_for_gaming;
83};
84
85struct mod_freesync_caps {
86 bool supported;
87 unsigned int min_refresh_in_micro_hz;
88 unsigned int max_refresh_in_micro_hz;
89
90 bool btr_supported;
91};
92
93struct mod_freesync_params {
94 enum mod_freesync_state state;
95 bool enable;
96 unsigned int update_duration_in_ns;
97 bool windowed_fullscreen;
98};
99
100/*
101 * Add stream to be tracked by module
102 */
103bool mod_freesync_add_stream(struct mod_freesync *mod_freesync,
104 const struct dc_stream *stream, struct mod_freesync_caps *caps);
105
106/*
107 * Remove stream to be tracked by module
108 */
109bool mod_freesync_remove_stream(struct mod_freesync *mod_freesync,
110 const struct dc_stream *stream);
111
112/*
113 * Build additional parameters for dc_stream when creating stream for
114 * sink to support freesync
115 */
116void mod_freesync_update_stream(struct mod_freesync *mod_freesync,
117 struct dc_stream *stream);
118
119/*
120 * Update the freesync state flags for each display and program
121 * freesync accordingly
122 */
123void mod_freesync_update_state(struct mod_freesync *mod_freesync,
124 const struct dc_stream **streams, int num_streams,
125 struct mod_freesync_params *freesync_params);
126
127bool mod_freesync_get_state(struct mod_freesync *mod_freesync,
128 const struct dc_stream *stream,
129 struct mod_freesync_params *freesync_params);
130
131bool mod_freesync_set_user_enable(struct mod_freesync *mod_freesync,
132 const struct dc_stream **streams, int num_streams,
133 struct mod_freesync_user_enable *user_enable);
134
135bool mod_freesync_get_user_enable(struct mod_freesync *mod_freesync,
136 const struct dc_stream *stream,
137 struct mod_freesync_user_enable *user_enable);
138
139void mod_freesync_handle_v_update(struct mod_freesync *mod_freesync,
140 const struct dc_stream **streams, int num_streams);
141
142void mod_freesync_notify_mode_change(struct mod_freesync *mod_freesync,
143 const struct dc_stream **streams, int num_streams);
144
145void mod_freesync_pre_update_plane_addresses(struct mod_freesync *mod_freesync,
146 const struct dc_stream **streams, int num_streams,
147 unsigned int curr_time_stamp);
148
149#endif
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_power.h b/drivers/gpu/drm/amd/display/modules/inc/mod_power.h
new file mode 100644
index 000000000000..a204e8d6cd23
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_power.h
@@ -0,0 +1,112 @@
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef MODULES_INC_MOD_POWER_H_
27#define MODULES_INC_MOD_POWER_H_
28
29#include "dm_services.h"
30
31struct mod_power {
32 int dummy;
33};
34
35/* VariBright related commands */
36enum varibright_command {
37 VariBright_Cmd__SetVBLevel = 0,
38 VariBright_Cmd__UserEnable,
39 VariBright_Cmd__PreDisplayConfigChange,
40 VariBright_Cmd__PostDisplayConfigChange,
41 VariBright_Cmd__SuspendABM,
42 VariBright_Cmd__ResumeABM,
43
44 VariBright_Cmd__Unknown,
45};
46
47/* VariBright settings structure */
48struct varibright_info {
49 enum varibright_command cmd;
50
51 unsigned int level;
52 bool enable;
53 bool activate;
54};
55
56enum dmcu_block_psr_reason {
57 /* This is a bitfield mask */
58 dmcu_block_psr_reason_invalid = 0x0,
59 dmcu_block_psr_reason_vsync_int = 0x1,
60 dmcu_block_psr_reason_shared_primary = 0x2,
61 dmcu_block_psr_reason_unsupported_link_rate = 0x4
62};
63
64struct mod_power *mod_power_create(struct dc *dc);
65
66void mod_power_destroy(struct mod_power *mod_power);
67
68bool mod_power_add_sink(struct mod_power *mod_power,
69 const struct dc_sink *sink);
70
71bool mod_power_remove_sink(struct mod_power *mod_power,
72 const struct dc_sink *sink);
73
74bool mod_power_set_backlight(struct mod_power *mod_power,
75 const struct dc_stream **streams, int num_streams,
76 unsigned int backlight_8bit);
77
78bool mod_power_get_backlight(struct mod_power *mod_power,
79 const struct dc_sink *sink,
80 unsigned int *backlight_8bit);
81
82void mod_power_initialize_backlight_caps
83 (struct mod_power *mod_power);
84
85unsigned int mod_power_backlight_level_percentage_to_signal
86 (struct mod_power *mod_power, unsigned int percentage);
87
88unsigned int mod_power_backlight_level_signal_to_percentage
89 (struct mod_power *mod_power, unsigned int signalLevel8bit);
90
91bool mod_power_get_panel_backlight_boundaries
92 (struct mod_power *mod_power,
93 unsigned int *min_backlight,
94 unsigned int *max_backlight,
95 unsigned int *output_ac_level_percentage,
96 unsigned int *output_dc_level_percentage);
97
98bool mod_power_set_smooth_brightness(struct mod_power *mod_power,
99 const struct dc_sink *sink, bool enable_brightness);
100
101bool mod_power_notify_mode_change(struct mod_power *mod_power,
102 const struct dc_stream *stream);
103
104bool mod_power_varibright_control(struct mod_power *mod_power,
105 struct varibright_info *input_varibright_info);
106
107bool mod_power_block_psr(bool block_enable, enum dmcu_block_psr_reason reason);
108
109bool mod_power_set_psr_enable(struct mod_power *mod_power,
110 bool psr_enable);
111
112#endif /* MODULES_INC_MOD_POWER_H_ */
diff --git a/drivers/gpu/drm/amd/display/modules/power/power.c b/drivers/gpu/drm/amd/display/modules/power/power.c
new file mode 100644
index 000000000000..ea07e847da0a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/power/power.c
@@ -0,0 +1,784 @@
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "mod_power.h"
27#include "dm_services.h"
28#include "dc.h"
29#include "core_types.h"
30#include "core_dc.h"
31
32#define MOD_POWER_MAX_CONCURRENT_SINKS 32
33#define SMOOTH_BRIGHTNESS_ADJUSTMENT_TIME_IN_MS 500
34
35struct sink_caps {
36 const struct dc_sink *sink;
37};
38
39struct backlight_state {
40 unsigned int backlight;
41 unsigned int frame_ramp;
42 bool smooth_brightness_enabled;
43};
44
45struct core_power {
46 struct mod_power public;
47 struct dc *dc;
48 int num_sinks;
49 struct sink_caps *caps;
50 struct backlight_state *state;
51};
52
53union dmcu_abm_set_bl_params {
54 struct {
55 unsigned int gradual_change : 1; /* [0:0] */
56 unsigned int reserved : 15; /* [15:1] */
57 unsigned int frame_ramp : 16; /* [31:16] */
58 } bits;
59 unsigned int u32All;
60};
61
62/* Backlight cached properties */
63static unsigned int backlight_8bit_lut_array[101];
64static unsigned int ac_level_percentage;
65static unsigned int dc_level_percentage;
66static bool backlight_caps_valid;
67/* we use lazy initialization of backlight capabilities cache */
68static bool backlight_caps_initialized;
69/* AC/DC levels initialized later in separate context */
70static bool backlight_def_levels_valid;
71
72/* ABM cached properties */
73static unsigned int abm_level;
74static bool abm_user_enable;
75static bool abm_active;
76
77/*PSR cached properties*/
78static unsigned int block_psr;
79
80/* Defines default backlight curve F(x) = A(x*x) + Bx + C.
81 *
82 * Backlight curve should always satisfy F(0) = min, F(100) = max,
83 * so polynom coefficients are:
84 * A is 0.0255 - B/100 - min/10000 - (255-max)/10000 = (max - min)/10000 - B/100
85 * B is adjustable factor to modify the curve.
86 * Bigger B results in less concave curve. B range is [0..(max-min)/100]
87 * C is backlight minimum
88 */
89static const unsigned int backlight_curve_coeff_a_factor = 10000;
90static const unsigned int backlight_curve_coeff_b = 100;
91static const unsigned int backlight_curve_coeff_b_factor = 100;
92
93/* Minimum and maximum backlight input signal levels */
94static const unsigned int default_min_backlight = 12;
95static const unsigned int default_max_backlight = 255;
96
97/* Other backlight constants */
98static const unsigned int absolute_backlight_max = 255;
99
100#define MOD_POWER_TO_CORE(mod_power)\
101 container_of(mod_power, struct core_power, public)
102
103static bool check_dc_support(const struct dc *dc)
104{
105 if (dc->stream_funcs.set_backlight == NULL)
106 return false;
107
108 return true;
109}
110
111/* Given a specific dc_sink* this function finds its equivalent
112 * on the dc_sink array and returns the corresponding index
113 */
114static unsigned int sink_index_from_sink(struct core_power *core_power,
115 const struct dc_sink *sink)
116{
117 unsigned int index = 0;
118
119 for (index = 0; index < core_power->num_sinks; index++)
120 if (core_power->caps[index].sink == sink)
121 return index;
122
123 /* Could not find sink requested */
124 ASSERT(false);
125 return index;
126}
127
128static unsigned int convertBL8to17(unsigned int backlight_8bit)
129{
130 unsigned int temp_ulong = backlight_8bit * 0x10101;
131 unsigned char temp_uchar =
132 (unsigned char)(((temp_ulong & 0x80) >> 7) & 1);
133
134 temp_ulong = (temp_ulong >> 8) + temp_uchar;
135
136 return temp_ulong;
137}
138
139static uint16_t convertBL8to16(unsigned int backlight_8bit)
140{
141 return (uint16_t)((backlight_8bit * 0x10101) >> 8);
142}
143
144/*This is used when OS wants to retrieve the current BL.
145 * We return the 8bit value to OS.
146 */
147static unsigned int convertBL17to8(unsigned int backlight_17bit)
148{
149 if (backlight_17bit & 0x10000)
150 return default_max_backlight;
151 else
152 return (backlight_17bit >> 8);
153}
154
155struct mod_power *mod_power_create(struct dc *dc)
156{
157 struct core_power *core_power =
158 dm_alloc(sizeof(struct core_power));
159
160 struct core_dc *core_dc = DC_TO_CORE(dc);
161
162 int i = 0;
163
164 if (core_power == NULL)
165 goto fail_alloc_context;
166
167 core_power->caps = dm_alloc(sizeof(struct sink_caps) *
168 MOD_POWER_MAX_CONCURRENT_SINKS);
169
170 if (core_power->caps == NULL)
171 goto fail_alloc_caps;
172
173 for (i = 0; i < MOD_POWER_MAX_CONCURRENT_SINKS; i++)
174 core_power->caps[i].sink = NULL;
175
176 core_power->state = dm_alloc(sizeof(struct backlight_state) *
177 MOD_POWER_MAX_CONCURRENT_SINKS);
178
179 if (core_power->state == NULL)
180 goto fail_alloc_state;
181
182 core_power->num_sinks = 0;
183 backlight_caps_valid = false;
184
185 if (dc == NULL)
186 goto fail_construct;
187
188 core_power->dc = dc;
189
190 if (!check_dc_support(dc))
191 goto fail_construct;
192
193 abm_user_enable = false;
194 abm_active = false;
195
196 return &core_power->public;
197
198fail_construct:
199 dm_free(core_power->state);
200
201fail_alloc_state:
202 dm_free(core_power->caps);
203
204fail_alloc_caps:
205 dm_free(core_power);
206
207fail_alloc_context:
208 return NULL;
209}
210
211
212void mod_power_destroy(struct mod_power *mod_power)
213{
214 if (mod_power != NULL) {
215 int i;
216 struct core_power *core_power =
217 MOD_POWER_TO_CORE(mod_power);
218
219 dm_free(core_power->state);
220
221 for (i = 0; i < core_power->num_sinks; i++)
222 dc_sink_release(core_power->caps[i].sink);
223
224 dm_free(core_power->caps);
225
226 dm_free(core_power);
227 }
228}
229
230bool mod_power_add_sink(struct mod_power *mod_power,
231 const struct dc_sink *sink)
232{
233 if (sink->sink_signal == SIGNAL_TYPE_VIRTUAL)
234 return false;
235
236 struct core_power *core_power =
237 MOD_POWER_TO_CORE(mod_power);
238 struct core_dc *core_dc = DC_TO_CORE(core_power->dc);
239
240 if (core_power->num_sinks < MOD_POWER_MAX_CONCURRENT_SINKS) {
241 dc_sink_retain(sink);
242 core_power->caps[core_power->num_sinks].sink = sink;
243 core_power->state[core_power->num_sinks].
244 smooth_brightness_enabled = false;
245 core_power->state[core_power->num_sinks].
246 backlight = 100;
247 core_power->num_sinks++;
248 return true;
249 }
250
251 return false;
252}
253
254bool mod_power_remove_sink(struct mod_power *mod_power,
255 const struct dc_sink *sink)
256{
257 int i = 0, j = 0;
258 struct core_power *core_power =
259 MOD_POWER_TO_CORE(mod_power);
260
261 for (i = 0; i < core_power->num_sinks; i++) {
262 if (core_power->caps[i].sink == sink) {
263 /* To remove this sink, shift everything after down */
264 for (j = i; j < core_power->num_sinks - 1; j++) {
265 core_power->caps[j].sink =
266 core_power->caps[j + 1].sink;
267
268 memcpy(&core_power->state[j],
269 &core_power->state[j + 1],
270 sizeof(struct backlight_state));
271 }
272 core_power->num_sinks--;
273 dc_sink_release(sink);
274 return true;
275 }
276 }
277 return false;
278}
279
280bool mod_power_set_backlight(struct mod_power *mod_power,
281 const struct dc_stream **streams, int num_streams,
282 unsigned int backlight_8bit)
283{
284 struct core_power *core_power =
285 MOD_POWER_TO_CORE(mod_power);
286
287 unsigned int frame_ramp = 0;
288
289 unsigned int stream_index, sink_index, vsync_rate_hz;
290
291 union dmcu_abm_set_bl_params params;
292
293 for (stream_index = 0; stream_index < num_streams; stream_index++) {
294 if (streams[stream_index]->sink->sink_signal == SIGNAL_TYPE_VIRTUAL) {
295 core_power->state[sink_index].backlight = 0;
296 core_power->state[sink_index].frame_ramp = 0;
297 core_power->state[sink_index].smooth_brightness_enabled = false;
298 continue;
299 }
300
301 sink_index = sink_index_from_sink(core_power,
302 streams[stream_index]->sink);
303
304 vsync_rate_hz = div64_u64(div64_u64((streams[stream_index]->
305 timing.pix_clk_khz * 1000),
306 streams[stream_index]->timing.v_total),
307 streams[stream_index]->timing.h_total);
308
309 core_power->state[sink_index].backlight = backlight_8bit;
310
311 if (core_power->state[sink_index].smooth_brightness_enabled)
312 frame_ramp = ((vsync_rate_hz *
313 SMOOTH_BRIGHTNESS_ADJUSTMENT_TIME_IN_MS) + 500)
314 / 1000;
315 else
316 frame_ramp = 0;
317
318 core_power->state[sink_index].frame_ramp = frame_ramp;
319 }
320
321 params.u32All = 0;
322 params.bits.gradual_change = (frame_ramp > 0);
323 params.bits.frame_ramp = frame_ramp;
324
325 core_power->dc->stream_funcs.set_backlight
326 (core_power->dc, backlight_8bit, params.u32All, streams[0]);
327
328 return true;
329}
330
331bool mod_power_get_backlight(struct mod_power *mod_power,
332 const struct dc_sink *sink,
333 unsigned int *backlight_8bit)
334{
335 if (sink->sink_signal == SIGNAL_TYPE_VIRTUAL)
336 return false;
337
338 struct core_power *core_power =
339 MOD_POWER_TO_CORE(mod_power);
340
341 unsigned int sink_index = sink_index_from_sink(core_power, sink);
342
343 *backlight_8bit = core_power->state[sink_index].backlight;
344
345 return true;
346}
347
348/* hard coded to default backlight curve. */
349void mod_power_initialize_backlight_caps(struct mod_power
350 *mod_power)
351{
352 struct core_power *core_power =
353 MOD_POWER_TO_CORE(mod_power);
354 struct core_dc *core_dc = DC_TO_CORE(core_power->dc);
355 unsigned int i;
356
357 backlight_caps_initialized = true;
358
359 struct dm_acpi_atif_backlight_caps *pExtCaps = NULL;
360 bool customCurvePresent = false;
361 bool customMinMaxPresent = false;
362 bool customDefLevelsPresent = false;
363
364 /* Allocate memory for ATIF output
365 * (do not want to use 256 bytes on the stack)
366 */
367 pExtCaps = (struct dm_acpi_atif_backlight_caps *)
368 (dm_alloc(sizeof(struct dm_acpi_atif_backlight_caps)));
369 if (pExtCaps == NULL)
370 return;
371
372 /* Retrieve ACPI extended brightness caps */
373 if (dm_query_extended_brightness_caps
374 (core_dc->ctx, AcpiDisplayType_LCD1, pExtCaps)) {
375 ac_level_percentage = pExtCaps->acLevelPercentage;
376 dc_level_percentage = pExtCaps->dcLevelPercentage;
377 customMinMaxPresent = true;
378 customDefLevelsPresent = true;
379 customCurvePresent = (pExtCaps->numOfDataPoints > 0);
380
381 ASSERT(pExtCaps->numOfDataPoints <= 99);
382 } else {
383 dm_free(pExtCaps);
384 return;
385 }
386
387 if (customMinMaxPresent)
388 backlight_8bit_lut_array[0] = pExtCaps->minInputSignal;
389 else
390 backlight_8bit_lut_array[0] = default_min_backlight;
391
392 if (customMinMaxPresent)
393 backlight_8bit_lut_array[100] = pExtCaps->maxInputSignal;
394 else
395 backlight_8bit_lut_array[100] = default_max_backlight;
396
397 ASSERT(backlight_8bit_lut_array[100] <= absolute_backlight_max);
398 ASSERT(backlight_8bit_lut_array[0] <=
399 backlight_8bit_lut_array[100]);
400
401 /* Just to make sure we use valid values */
402 if (backlight_8bit_lut_array[100] > absolute_backlight_max)
403 backlight_8bit_lut_array[100] = absolute_backlight_max;
404 if (backlight_8bit_lut_array[0] > backlight_8bit_lut_array[100]) {
405 unsigned int swap;
406
407 swap = backlight_8bit_lut_array[0];
408 backlight_8bit_lut_array[0] = backlight_8bit_lut_array[100];
409 backlight_8bit_lut_array[100] = swap;
410 }
411
412 /* Build backlight translation table for custom curve */
413 if (customCurvePresent) {
414 unsigned int index = 1;
415 unsigned int numOfDataPoints =
416 (pExtCaps->numOfDataPoints <= 99 ?
417 pExtCaps->numOfDataPoints : 99);
418
419 /* Filling translation table from data points -
420 * between every two provided data points we
421 * lineary interpolate missing values
422 */
423 for (i = 0; i < numOfDataPoints; i++) {
424 /* Clamp signal level between min and max
425 * (since min and max might come other
426 * soruce like registry)
427 */
428 unsigned int luminance =
429 pExtCaps->dataPoints[i].luminance;
430 unsigned int signalLevel =
431 pExtCaps->dataPoints[i].signalLevel;
432
433 if (signalLevel < backlight_8bit_lut_array[0])
434 signalLevel = backlight_8bit_lut_array[0];
435 if (signalLevel > backlight_8bit_lut_array[100])
436 signalLevel = backlight_8bit_lut_array[100];
437
438 /* Lineary interpolate missing values */
439 if (index < luminance) {
440 unsigned int baseValue =
441 backlight_8bit_lut_array[index-1];
442 unsigned int deltaSignal =
443 signalLevel - baseValue;
444 unsigned int deltaLuma =
445 luminance - index + 1;
446 unsigned int step = deltaSignal;
447
448 for (; index < luminance; index++) {
449 backlight_8bit_lut_array[index] =
450 baseValue + (step / deltaLuma);
451 step += deltaSignal;
452 }
453 }
454
455 /* Now [index == luminance],
456 * so we can add data point to the translation table
457 */
458 backlight_8bit_lut_array[index++] = signalLevel;
459 }
460
461 /* Complete the final segment of interpolation -
462 * between last datapoint and maximum value
463 */
464 if (index < 100) {
465 unsigned int baseValue =
466 backlight_8bit_lut_array[index-1];
467 unsigned int deltaSignal =
468 backlight_8bit_lut_array[100] -
469 baseValue;
470 unsigned int deltaLuma = 100 - index + 1;
471 unsigned int step = deltaSignal;
472
473 for (; index < 100; index++) {
474 backlight_8bit_lut_array[index] =
475 baseValue + (step / deltaLuma);
476 step += deltaSignal;
477 }
478 }
479 /* Build backlight translation table based on default curve */
480 } else {
481 unsigned int delta =
482 backlight_8bit_lut_array[100] -
483 backlight_8bit_lut_array[0];
484 unsigned int coeffC = backlight_8bit_lut_array[0];
485 unsigned int coeffB =
486 (backlight_curve_coeff_b < delta ?
487 backlight_curve_coeff_b : delta);
488 unsigned int coeffA = delta - coeffB; /* coeffB is B*100 */
489
490 for (i = 1; i < 100; i++) {
491 backlight_8bit_lut_array[i] =
492 (coeffA * i * i) /
493 backlight_curve_coeff_a_factor +
494 (coeffB * i) /
495 backlight_curve_coeff_b_factor +
496 coeffC;
497 }
498 }
499
500 if (pExtCaps != NULL)
501 dm_free(pExtCaps);
502
503 /* Successfully initialized */
504 backlight_caps_valid = true;
505 backlight_def_levels_valid = customDefLevelsPresent;
506}
507
508unsigned int mod_power_backlight_level_percentage_to_signal(
509 struct mod_power *mod_power, unsigned int percentage)
510{
511 /* Do lazy initialization of backlight capabilities*/
512 if (!backlight_caps_initialized)
513 mod_power_initialize_backlight_caps(mod_power);
514
515 /* Since the translation table is indexed by percentage,
516 * we simply return backlight value at given percent
517 */
518 if (backlight_caps_valid && percentage <= 100)
519 return backlight_8bit_lut_array[percentage];
520
521 return -1;
522}
523
524unsigned int mod_power_backlight_level_signal_to_percentage(
525 struct mod_power *mod_power,
526 unsigned int signalLevel8bit)
527{
528 unsigned int invalid_backlight = (unsigned int)(-1);
529 /* Do lazy initialization of backlight capabilities */
530 if (!backlight_caps_initialized)
531 mod_power_initialize_backlight_caps(mod_power);
532
533 /* If customer curve cannot convert to differentiated value near min
534 * it is important to report 0 for min signal to pass setting "Dimmed"
535 * setting in HCK brightness2 tests.
536 */
537 if (signalLevel8bit <= backlight_8bit_lut_array[0])
538 return 0;
539
540 /* Since the translation table is indexed by percentage
541 * we need to do a binary search over the array
542 * Another option would be to guess entry based on linear distribution
543 * and then do linear search in correct direction
544 */
545 if (backlight_caps_valid && signalLevel8bit <=
546 absolute_backlight_max) {
547 unsigned int min = 0;
548 unsigned int max = 100;
549 unsigned int mid = invalid_backlight;
550
551 while (max >= min) {
552 mid = (min + max) / 2; /* floor of half range */
553
554 if (backlight_8bit_lut_array[mid] < signalLevel8bit)
555 min = mid + 1;
556 else if (backlight_8bit_lut_array[mid] >
557 signalLevel8bit)
558 max = mid - 1;
559 else
560 break;
561
562 if (max == 0 || max == 1)
563 return invalid_backlight;
564 }
565 return mid;
566 }
567
568 return invalid_backlight;
569}
570
571
572bool mod_power_get_panel_backlight_boundaries(
573 struct mod_power *mod_power,
574 unsigned int *min_backlight,
575 unsigned int *max_backlight,
576 unsigned int *output_ac_level_percentage,
577 unsigned int *output_dc_level_percentage)
578{
579 /* Do lazy initialization of backlight capabilities */
580 if (!backlight_caps_initialized)
581 mod_power_initialize_backlight_caps(mod_power);
582
583 /* If cache was successfully updated,
584 * copy the values to output structure and return success
585 */
586 if (backlight_caps_valid) {
587 *min_backlight = backlight_8bit_lut_array[0];
588 *max_backlight = backlight_8bit_lut_array[100];
589
590 *output_ac_level_percentage = ac_level_percentage;
591 *output_dc_level_percentage = dc_level_percentage;
592
593 return true;
594 }
595
596 return false;
597}
598
599bool mod_power_set_smooth_brightness(struct mod_power *mod_power,
600 const struct dc_sink *sink, bool enable_brightness)
601{
602 if (sink->sink_signal == SIGNAL_TYPE_VIRTUAL)
603 return false;
604
605 struct core_power *core_power =
606 MOD_POWER_TO_CORE(mod_power);
607 unsigned int sink_index = sink_index_from_sink(core_power, sink);
608
609 core_power->state[sink_index].smooth_brightness_enabled
610 = enable_brightness;
611 return true;
612}
613
614bool mod_power_notify_mode_change(struct mod_power *mod_power,
615 const struct dc_stream *stream)
616{
617 if (stream->sink->sink_signal == SIGNAL_TYPE_VIRTUAL)
618 return false;
619
620 struct core_power *core_power =
621 MOD_POWER_TO_CORE(mod_power);
622
623 unsigned int sink_index = sink_index_from_sink(core_power,
624 stream->sink);
625 unsigned int frame_ramp = core_power->state[sink_index].frame_ramp;
626 union dmcu_abm_set_bl_params params;
627
628 params.u32All = 0;
629 params.bits.gradual_change = (frame_ramp > 0);
630 params.bits.frame_ramp = frame_ramp;
631
632 core_power->dc->stream_funcs.set_backlight
633 (core_power->dc,
634 core_power->state[sink_index].backlight,
635 params.u32All, stream);
636
637 core_power->dc->stream_funcs.setup_psr
638 (core_power->dc, stream);
639
640 return true;
641}
642
643
644static bool mod_power_abm_feature_enable(struct mod_power
645 *mod_power, bool enable)
646{
647 struct core_power *core_power =
648 MOD_POWER_TO_CORE(mod_power);
649 if (abm_user_enable == enable)
650 return true;
651
652 abm_user_enable = enable;
653
654 if (enable) {
655 if (abm_level != 0 && abm_active)
656 core_power->dc->stream_funcs.set_abm_level
657 (core_power->dc, abm_level);
658 } else {
659 if (abm_level != 0 && abm_active) {
660 abm_level = 0;
661 core_power->dc->stream_funcs.set_abm_level
662 (core_power->dc, abm_level);
663 }
664 }
665
666 return true;
667}
668
669static bool mod_power_abm_activate(struct mod_power
670 *mod_power, bool activate)
671{
672 struct core_power *core_power =
673 MOD_POWER_TO_CORE(mod_power);
674 if (abm_active == activate)
675 return true;
676
677 abm_active = activate;
678
679 if (activate) {
680 if (abm_level != 0 && abm_user_enable)
681 core_power->dc->stream_funcs.set_abm_level
682 (core_power->dc, abm_level);
683 } else {
684 if (abm_level != 0 && abm_user_enable) {
685 abm_level = 0;
686 core_power->dc->stream_funcs.set_abm_level
687 (core_power->dc, abm_level);
688 }
689 }
690
691 return true;
692}
693
694static bool mod_power_abm_set_level(struct mod_power *mod_power,
695 unsigned int level)
696{
697 struct core_power *core_power =
698 MOD_POWER_TO_CORE(mod_power);
699 if (abm_level == level)
700 return true;
701
702 if (abm_active && abm_user_enable && level == 0)
703 core_power->dc->stream_funcs.set_abm_level
704 (core_power->dc, 0);
705 else if (abm_active && abm_user_enable && level != 0)
706 core_power->dc->stream_funcs.set_abm_level
707 (core_power->dc, level);
708
709 abm_level = level;
710
711 return true;
712}
713
714bool mod_power_varibright_control(struct mod_power *mod_power,
715 struct varibright_info *input_varibright_info)
716{
717 switch (input_varibright_info->cmd) {
718 case VariBright_Cmd__SetVBLevel:
719 {
720 /* Set VariBright user level. */
721 mod_power_abm_set_level(mod_power,
722 input_varibright_info->level);
723 }
724 break;
725
726 case VariBright_Cmd__UserEnable:
727 {
728 /* Set VariBright user enable state. */
729 mod_power_abm_feature_enable(mod_power,
730 input_varibright_info->enable);
731 }
732 break;
733
734 case VariBright_Cmd__PostDisplayConfigChange:
735 {
736 /* Set VariBright user level. */
737 mod_power_abm_set_level(mod_power,
738 input_varibright_info->level);
739
740 /* Set VariBright user enable state. */
741 mod_power_abm_feature_enable(mod_power,
742 input_varibright_info->enable);
743
744 /* Set VariBright activate based on power state. */
745 mod_power_abm_activate(mod_power,
746 input_varibright_info->activate);
747 }
748 break;
749
750 default:
751 {
752 return false;
753 }
754 break;
755 }
756
757 return true;
758}
759
760bool mod_power_block_psr(bool block_enable, enum dmcu_block_psr_reason reason)
761{
762 if (block_enable)
763 block_psr |= reason;
764 else
765 block_psr &= ~reason;
766
767 return true;
768}
769
770
771bool mod_power_set_psr_enable(struct mod_power *mod_power,
772 bool psr_enable)
773{
774 struct core_power *core_power =
775 MOD_POWER_TO_CORE(mod_power);
776
777 if (block_psr == 0)
778 return core_power->dc->stream_funcs.set_psr_enable
779 (core_power->dc, psr_enable);
780
781 return false;
782}
783
784