aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2018-07-09 20:57:02 -0400
committerDave Airlie <airlied@redhat.com>2018-07-09 20:57:08 -0400
commitba7ca97d73b437eec883eba785495b8c9759b5f2 (patch)
tree1fa28063ba726d21af566796751805a3a4db21cc /drivers/gpu/drm/amd
parent61a3a9d6c9c9a017decadb56669b66066612d728 (diff)
parent43911fb68b19f7c37ab69eff8e6a3c1370bc0cb5 (diff)
Merge branch 'drm-next-4.19' of git://people.freedesktop.org/~agd5f/linux into drm-next
More features for 4.19: - Use core pcie functionality rather than duplicating our own for pcie gens and lanes - Scheduler function naming cleanups - More documentation - Reworked DC/Powerplay interfaces to improve power savings - Initial stutter mode support for RV (power feature) - Vega12 powerplay updates - GFXOFF fixes - Misc fixes Signed-off-by: Dave Airlie <airlied@redhat.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180705221447.2807-1-alexander.deucher@amd.com
Diffstat (limited to 'drivers/gpu/drm/amd')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h55
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c131
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c165
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c241
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c50
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_test.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atom.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c72
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c120
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v4_0.c16
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c61
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c170
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h (renamed from drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.h)13
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c535
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c324
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.c196
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c218
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.c46
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c194
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_debug.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c40
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_bios_types.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c718
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h93
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c49
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c175
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c362
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c315
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c76
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_pp_smu.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services_types.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c63
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.c79
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h43
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/irq_service.c2
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_defs.h46
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_id.h16
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.c8
-rw-r--r--drivers/gpu/drm/amd/include/amd_pcie.h2
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h46
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h5
-rw-r--r--drivers/gpu/drm/amd/include/dm_pp_interface.h37
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h15
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c100
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c8
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c96
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h5
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c8
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c12
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c42
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c56
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c1055
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h13
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h7
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h23
121 files changed, 4136 insertions, 2761 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 71b9b861f60e..8a440b9fa0fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -190,6 +190,7 @@ struct amdgpu_job;
190struct amdgpu_irq_src; 190struct amdgpu_irq_src;
191struct amdgpu_fpriv; 191struct amdgpu_fpriv;
192struct amdgpu_bo_va_mapping; 192struct amdgpu_bo_va_mapping;
193struct amdgpu_atif;
193 194
194enum amdgpu_cp_irq { 195enum amdgpu_cp_irq {
195 AMDGPU_CP_IRQ_GFX_EOP = 0, 196 AMDGPU_CP_IRQ_GFX_EOP = 0,
@@ -683,8 +684,8 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
683int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id); 684int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id);
684 685
685void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr); 686void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr);
686void amdgpu_ctx_mgr_entity_cleanup(struct amdgpu_ctx_mgr *mgr);
687void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr); 687void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr);
688void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr);
688void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr); 689void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
689 690
690 691
@@ -930,6 +931,11 @@ struct amdgpu_ngg {
930 bool init; 931 bool init;
931}; 932};
932 933
934struct sq_work {
935 struct work_struct work;
936 unsigned ih_data;
937};
938
933struct amdgpu_gfx { 939struct amdgpu_gfx {
934 struct mutex gpu_clock_mutex; 940 struct mutex gpu_clock_mutex;
935 struct amdgpu_gfx_config config; 941 struct amdgpu_gfx_config config;
@@ -970,6 +976,8 @@ struct amdgpu_gfx {
970 struct amdgpu_irq_src priv_inst_irq; 976 struct amdgpu_irq_src priv_inst_irq;
971 struct amdgpu_irq_src cp_ecc_error_irq; 977 struct amdgpu_irq_src cp_ecc_error_irq;
972 struct amdgpu_irq_src sq_irq; 978 struct amdgpu_irq_src sq_irq;
979 struct sq_work sq_work;
980
973 /* gfx status */ 981 /* gfx status */
974 uint32_t gfx_current_status; 982 uint32_t gfx_current_status;
975 /* ce ram size*/ 983 /* ce ram size*/
@@ -1271,43 +1279,6 @@ struct amdgpu_vram_scratch {
1271/* 1279/*
1272 * ACPI 1280 * ACPI
1273 */ 1281 */
1274struct amdgpu_atif_notification_cfg {
1275 bool enabled;
1276 int command_code;
1277};
1278
1279struct amdgpu_atif_notifications {
1280 bool display_switch;
1281 bool expansion_mode_change;
1282 bool thermal_state;
1283 bool forced_power_state;
1284 bool system_power_state;
1285 bool display_conf_change;
1286 bool px_gfx_switch;
1287 bool brightness_change;
1288 bool dgpu_display_event;
1289};
1290
1291struct amdgpu_atif_functions {
1292 bool system_params;
1293 bool sbios_requests;
1294 bool select_active_disp;
1295 bool lid_state;
1296 bool get_tv_standard;
1297 bool set_tv_standard;
1298 bool get_panel_expansion_mode;
1299 bool set_panel_expansion_mode;
1300 bool temperature_change;
1301 bool graphics_device_types;
1302};
1303
1304struct amdgpu_atif {
1305 struct amdgpu_atif_notifications notifications;
1306 struct amdgpu_atif_functions functions;
1307 struct amdgpu_atif_notification_cfg notification_cfg;
1308 struct amdgpu_encoder *encoder_for_bl;
1309};
1310
1311struct amdgpu_atcs_functions { 1282struct amdgpu_atcs_functions {
1312 bool get_ext_state; 1283 bool get_ext_state;
1313 bool pcie_perf_req; 1284 bool pcie_perf_req;
@@ -1468,7 +1439,7 @@ struct amdgpu_device {
1468#if defined(CONFIG_DEBUG_FS) 1439#if defined(CONFIG_DEBUG_FS)
1469 struct dentry *debugfs_regs[AMDGPU_DEBUGFS_MAX_COMPONENTS]; 1440 struct dentry *debugfs_regs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
1470#endif 1441#endif
1471 struct amdgpu_atif atif; 1442 struct amdgpu_atif *atif;
1472 struct amdgpu_atcs atcs; 1443 struct amdgpu_atcs atcs;
1473 struct mutex srbm_mutex; 1444 struct mutex srbm_mutex;
1474 /* GRBM index mutex. Protects concurrent access to GRBM index */ 1445 /* GRBM index mutex. Protects concurrent access to GRBM index */
@@ -1896,6 +1867,12 @@ static inline bool amdgpu_atpx_dgpu_req_power_for_displays(void) { return false;
1896static inline bool amdgpu_has_atpx(void) { return false; } 1867static inline bool amdgpu_has_atpx(void) { return false; }
1897#endif 1868#endif
1898 1869
1870#if defined(CONFIG_VGA_SWITCHEROO) && defined(CONFIG_ACPI)
1871void *amdgpu_atpx_get_dhandle(void);
1872#else
1873static inline void *amdgpu_atpx_get_dhandle(void) { return NULL; }
1874#endif
1875
1899/* 1876/*
1900 * KMS 1877 * KMS
1901 */ 1878 */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 8fa850a070e0..0d8c3fc6eace 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -34,6 +34,45 @@
34#include "amd_acpi.h" 34#include "amd_acpi.h"
35#include "atom.h" 35#include "atom.h"
36 36
37struct amdgpu_atif_notification_cfg {
38 bool enabled;
39 int command_code;
40};
41
42struct amdgpu_atif_notifications {
43 bool display_switch;
44 bool expansion_mode_change;
45 bool thermal_state;
46 bool forced_power_state;
47 bool system_power_state;
48 bool display_conf_change;
49 bool px_gfx_switch;
50 bool brightness_change;
51 bool dgpu_display_event;
52};
53
54struct amdgpu_atif_functions {
55 bool system_params;
56 bool sbios_requests;
57 bool select_active_disp;
58 bool lid_state;
59 bool get_tv_standard;
60 bool set_tv_standard;
61 bool get_panel_expansion_mode;
62 bool set_panel_expansion_mode;
63 bool temperature_change;
64 bool graphics_device_types;
65};
66
67struct amdgpu_atif {
68 acpi_handle handle;
69
70 struct amdgpu_atif_notifications notifications;
71 struct amdgpu_atif_functions functions;
72 struct amdgpu_atif_notification_cfg notification_cfg;
73 struct amdgpu_encoder *encoder_for_bl;
74};
75
37/* Call the ATIF method 76/* Call the ATIF method
38 */ 77 */
39/** 78/**
@@ -46,8 +85,9 @@
46 * Executes the requested ATIF function (all asics). 85 * Executes the requested ATIF function (all asics).
47 * Returns a pointer to the acpi output buffer. 86 * Returns a pointer to the acpi output buffer.
48 */ 87 */
49static union acpi_object *amdgpu_atif_call(acpi_handle handle, int function, 88static union acpi_object *amdgpu_atif_call(struct amdgpu_atif *atif,
50 struct acpi_buffer *params) 89 int function,
90 struct acpi_buffer *params)
51{ 91{
52 acpi_status status; 92 acpi_status status;
53 union acpi_object atif_arg_elements[2]; 93 union acpi_object atif_arg_elements[2];
@@ -70,7 +110,8 @@ static union acpi_object *amdgpu_atif_call(acpi_handle handle, int function,
70 atif_arg_elements[1].integer.value = 0; 110 atif_arg_elements[1].integer.value = 0;
71 } 111 }
72 112
73 status = acpi_evaluate_object(handle, "ATIF", &atif_arg, &buffer); 113 status = acpi_evaluate_object(atif->handle, NULL, &atif_arg,
114 &buffer);
74 115
75 /* Fail only if calling the method fails and ATIF is supported */ 116 /* Fail only if calling the method fails and ATIF is supported */
76 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { 117 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
@@ -141,15 +182,14 @@ static void amdgpu_atif_parse_functions(struct amdgpu_atif_functions *f, u32 mas
141 * (all asics). 182 * (all asics).
142 * returns 0 on success, error on failure. 183 * returns 0 on success, error on failure.
143 */ 184 */
144static int amdgpu_atif_verify_interface(acpi_handle handle, 185static int amdgpu_atif_verify_interface(struct amdgpu_atif *atif)
145 struct amdgpu_atif *atif)
146{ 186{
147 union acpi_object *info; 187 union acpi_object *info;
148 struct atif_verify_interface output; 188 struct atif_verify_interface output;
149 size_t size; 189 size_t size;
150 int err = 0; 190 int err = 0;
151 191
152 info = amdgpu_atif_call(handle, ATIF_FUNCTION_VERIFY_INTERFACE, NULL); 192 info = amdgpu_atif_call(atif, ATIF_FUNCTION_VERIFY_INTERFACE, NULL);
153 if (!info) 193 if (!info)
154 return -EIO; 194 return -EIO;
155 195
@@ -176,6 +216,35 @@ out:
176 return err; 216 return err;
177} 217}
178 218
219static acpi_handle amdgpu_atif_probe_handle(acpi_handle dhandle)
220{
221 acpi_handle handle = NULL;
222 char acpi_method_name[255] = { 0 };
223 struct acpi_buffer buffer = { sizeof(acpi_method_name), acpi_method_name };
224 acpi_status status;
225
226 /* For PX/HG systems, ATIF and ATPX are in the iGPU's namespace, on dGPU only
227 * systems, ATIF is in the dGPU's namespace.
228 */
229 status = acpi_get_handle(dhandle, "ATIF", &handle);
230 if (ACPI_SUCCESS(status))
231 goto out;
232
233 if (amdgpu_has_atpx()) {
234 status = acpi_get_handle(amdgpu_atpx_get_dhandle(), "ATIF",
235 &handle);
236 if (ACPI_SUCCESS(status))
237 goto out;
238 }
239
240 DRM_DEBUG_DRIVER("No ATIF handle found\n");
241 return NULL;
242out:
243 acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
244 DRM_DEBUG_DRIVER("Found ATIF handle %s\n", acpi_method_name);
245 return handle;
246}
247
179/** 248/**
180 * amdgpu_atif_get_notification_params - determine notify configuration 249 * amdgpu_atif_get_notification_params - determine notify configuration
181 * 250 *
@@ -188,15 +257,16 @@ out:
188 * where n is specified in the result if a notifier is used. 257 * where n is specified in the result if a notifier is used.
189 * Returns 0 on success, error on failure. 258 * Returns 0 on success, error on failure.
190 */ 259 */
191static int amdgpu_atif_get_notification_params(acpi_handle handle, 260static int amdgpu_atif_get_notification_params(struct amdgpu_atif *atif)
192 struct amdgpu_atif_notification_cfg *n)
193{ 261{
194 union acpi_object *info; 262 union acpi_object *info;
263 struct amdgpu_atif_notification_cfg *n = &atif->notification_cfg;
195 struct atif_system_params params; 264 struct atif_system_params params;
196 size_t size; 265 size_t size;
197 int err = 0; 266 int err = 0;
198 267
199 info = amdgpu_atif_call(handle, ATIF_FUNCTION_GET_SYSTEM_PARAMETERS, NULL); 268 info = amdgpu_atif_call(atif, ATIF_FUNCTION_GET_SYSTEM_PARAMETERS,
269 NULL);
200 if (!info) { 270 if (!info) {
201 err = -EIO; 271 err = -EIO;
202 goto out; 272 goto out;
@@ -250,14 +320,15 @@ out:
250 * (all asics). 320 * (all asics).
251 * Returns 0 on success, error on failure. 321 * Returns 0 on success, error on failure.
252 */ 322 */
253static int amdgpu_atif_get_sbios_requests(acpi_handle handle, 323static int amdgpu_atif_get_sbios_requests(struct amdgpu_atif *atif,
254 struct atif_sbios_requests *req) 324 struct atif_sbios_requests *req)
255{ 325{
256 union acpi_object *info; 326 union acpi_object *info;
257 size_t size; 327 size_t size;
258 int count = 0; 328 int count = 0;
259 329
260 info = amdgpu_atif_call(handle, ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS, NULL); 330 info = amdgpu_atif_call(atif, ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS,
331 NULL);
261 if (!info) 332 if (!info)
262 return -EIO; 333 return -EIO;
263 334
@@ -290,11 +361,10 @@ out:
290 * Returns NOTIFY code 361 * Returns NOTIFY code
291 */ 362 */
292static int amdgpu_atif_handler(struct amdgpu_device *adev, 363static int amdgpu_atif_handler(struct amdgpu_device *adev,
293 struct acpi_bus_event *event) 364 struct acpi_bus_event *event)
294{ 365{
295 struct amdgpu_atif *atif = &adev->atif; 366 struct amdgpu_atif *atif = adev->atif;
296 struct atif_sbios_requests req; 367 struct atif_sbios_requests req;
297 acpi_handle handle;
298 int count; 368 int count;
299 369
300 DRM_DEBUG_DRIVER("event, device_class = %s, type = %#x\n", 370 DRM_DEBUG_DRIVER("event, device_class = %s, type = %#x\n",
@@ -303,14 +373,14 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
303 if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0) 373 if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0)
304 return NOTIFY_DONE; 374 return NOTIFY_DONE;
305 375
306 if (!atif->notification_cfg.enabled || 376 if (!atif ||
377 !atif->notification_cfg.enabled ||
307 event->type != atif->notification_cfg.command_code) 378 event->type != atif->notification_cfg.command_code)
308 /* Not our event */ 379 /* Not our event */
309 return NOTIFY_DONE; 380 return NOTIFY_DONE;
310 381
311 /* Check pending SBIOS requests */ 382 /* Check pending SBIOS requests */
312 handle = ACPI_HANDLE(&adev->pdev->dev); 383 count = amdgpu_atif_get_sbios_requests(atif, &req);
313 count = amdgpu_atif_get_sbios_requests(handle, &req);
314 384
315 if (count <= 0) 385 if (count <= 0)
316 return NOTIFY_DONE; 386 return NOTIFY_DONE;
@@ -641,8 +711,8 @@ static int amdgpu_acpi_event(struct notifier_block *nb,
641 */ 711 */
642int amdgpu_acpi_init(struct amdgpu_device *adev) 712int amdgpu_acpi_init(struct amdgpu_device *adev)
643{ 713{
644 acpi_handle handle; 714 acpi_handle handle, atif_handle;
645 struct amdgpu_atif *atif = &adev->atif; 715 struct amdgpu_atif *atif;
646 struct amdgpu_atcs *atcs = &adev->atcs; 716 struct amdgpu_atcs *atcs = &adev->atcs;
647 int ret; 717 int ret;
648 718
@@ -658,12 +728,26 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
658 DRM_DEBUG_DRIVER("Call to ATCS verify_interface failed: %d\n", ret); 728 DRM_DEBUG_DRIVER("Call to ATCS verify_interface failed: %d\n", ret);
659 } 729 }
660 730
731 /* Probe for ATIF, and initialize it if found */
732 atif_handle = amdgpu_atif_probe_handle(handle);
733 if (!atif_handle)
734 goto out;
735
736 atif = kzalloc(sizeof(*atif), GFP_KERNEL);
737 if (!atif) {
738 DRM_WARN("Not enough memory to initialize ATIF\n");
739 goto out;
740 }
741 atif->handle = atif_handle;
742
661 /* Call the ATIF method */ 743 /* Call the ATIF method */
662 ret = amdgpu_atif_verify_interface(handle, atif); 744 ret = amdgpu_atif_verify_interface(atif);
663 if (ret) { 745 if (ret) {
664 DRM_DEBUG_DRIVER("Call to ATIF verify_interface failed: %d\n", ret); 746 DRM_DEBUG_DRIVER("Call to ATIF verify_interface failed: %d\n", ret);
747 kfree(atif);
665 goto out; 748 goto out;
666 } 749 }
750 adev->atif = atif;
667 751
668 if (atif->notifications.brightness_change) { 752 if (atif->notifications.brightness_change) {
669 struct drm_encoder *tmp; 753 struct drm_encoder *tmp;
@@ -693,8 +777,7 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
693 } 777 }
694 778
695 if (atif->functions.system_params) { 779 if (atif->functions.system_params) {
696 ret = amdgpu_atif_get_notification_params(handle, 780 ret = amdgpu_atif_get_notification_params(atif);
697 &atif->notification_cfg);
698 if (ret) { 781 if (ret) {
699 DRM_DEBUG_DRIVER("Call to GET_SYSTEM_PARAMS failed: %d\n", 782 DRM_DEBUG_DRIVER("Call to GET_SYSTEM_PARAMS failed: %d\n",
700 ret); 783 ret);
@@ -720,4 +803,6 @@ out:
720void amdgpu_acpi_fini(struct amdgpu_device *adev) 803void amdgpu_acpi_fini(struct amdgpu_device *adev)
721{ 804{
722 unregister_acpi_notifier(&adev->acpi_nb); 805 unregister_acpi_notifier(&adev->acpi_nb);
806 if (adev->atif)
807 kfree(adev->atif);
723} 808}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index daa06e7c5bb7..b33f1680c9a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -32,7 +32,7 @@ struct amdgpu_atpx_functions {
32 bool switch_start; 32 bool switch_start;
33 bool switch_end; 33 bool switch_end;
34 bool disp_connectors_mapping; 34 bool disp_connectors_mapping;
35 bool disp_detetion_ports; 35 bool disp_detection_ports;
36}; 36};
37 37
38struct amdgpu_atpx { 38struct amdgpu_atpx {
@@ -90,6 +90,12 @@ bool amdgpu_atpx_dgpu_req_power_for_displays(void) {
90 return amdgpu_atpx_priv.atpx.dgpu_req_power_for_displays; 90 return amdgpu_atpx_priv.atpx.dgpu_req_power_for_displays;
91} 91}
92 92
93#if defined(CONFIG_ACPI)
94void *amdgpu_atpx_get_dhandle(void) {
95 return amdgpu_atpx_priv.dhandle;
96}
97#endif
98
93/** 99/**
94 * amdgpu_atpx_call - call an ATPX method 100 * amdgpu_atpx_call - call an ATPX method
95 * 101 *
@@ -156,7 +162,7 @@ static void amdgpu_atpx_parse_functions(struct amdgpu_atpx_functions *f, u32 mas
156 f->switch_start = mask & ATPX_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION_SUPPORTED; 162 f->switch_start = mask & ATPX_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION_SUPPORTED;
157 f->switch_end = mask & ATPX_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION_SUPPORTED; 163 f->switch_end = mask & ATPX_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION_SUPPORTED;
158 f->disp_connectors_mapping = mask & ATPX_GET_DISPLAY_CONNECTORS_MAPPING_SUPPORTED; 164 f->disp_connectors_mapping = mask & ATPX_GET_DISPLAY_CONNECTORS_MAPPING_SUPPORTED;
159 f->disp_detetion_ports = mask & ATPX_GET_DISPLAY_DETECTION_PORTS_SUPPORTED; 165 f->disp_detection_ports = mask & ATPX_GET_DISPLAY_DETECTION_PORTS_SUPPORTED;
160} 166}
161 167
162/** 168/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index e950730f1933..693ec5ea4950 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -314,17 +314,17 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
314 (adev->pdev->revision == 0x81) || 314 (adev->pdev->revision == 0x81) ||
315 (adev->pdev->device == 0x665f)) { 315 (adev->pdev->device == 0x665f)) {
316 info->is_kicker = true; 316 info->is_kicker = true;
317 strcpy(fw_name, "radeon/bonaire_k_smc.bin"); 317 strcpy(fw_name, "amdgpu/bonaire_k_smc.bin");
318 } else { 318 } else {
319 strcpy(fw_name, "radeon/bonaire_smc.bin"); 319 strcpy(fw_name, "amdgpu/bonaire_smc.bin");
320 } 320 }
321 break; 321 break;
322 case CHIP_HAWAII: 322 case CHIP_HAWAII:
323 if (adev->pdev->revision == 0x80) { 323 if (adev->pdev->revision == 0x80) {
324 info->is_kicker = true; 324 info->is_kicker = true;
325 strcpy(fw_name, "radeon/hawaii_k_smc.bin"); 325 strcpy(fw_name, "amdgpu/hawaii_k_smc.bin");
326 } else { 326 } else {
327 strcpy(fw_name, "radeon/hawaii_smc.bin"); 327 strcpy(fw_name, "amdgpu/hawaii_smc.bin");
328 } 328 }
329 break; 329 break;
330 case CHIP_TOPAZ: 330 case CHIP_TOPAZ:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 64b3a1ed04dc..0120b24fae1b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -104,7 +104,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
104 104
105failed: 105failed:
106 for (j = 0; j < i; j++) 106 for (j = 0; j < i; j++)
107 drm_sched_entity_fini(&adev->rings[j]->sched, 107 drm_sched_entity_destroy(&adev->rings[j]->sched,
108 &ctx->rings[j].entity); 108 &ctx->rings[j].entity);
109 kfree(ctx->fences); 109 kfree(ctx->fences);
110 ctx->fences = NULL; 110 ctx->fences = NULL;
@@ -178,7 +178,7 @@ static void amdgpu_ctx_do_release(struct kref *ref)
178 if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring) 178 if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
179 continue; 179 continue;
180 180
181 drm_sched_entity_fini(&ctx->adev->rings[i]->sched, 181 drm_sched_entity_destroy(&ctx->adev->rings[i]->sched,
182 &ctx->rings[i].entity); 182 &ctx->rings[i].entity);
183 } 183 }
184 184
@@ -444,7 +444,7 @@ void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
444 idr_init(&mgr->ctx_handles); 444 idr_init(&mgr->ctx_handles);
445} 445}
446 446
447void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr) 447void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr)
448{ 448{
449 struct amdgpu_ctx *ctx; 449 struct amdgpu_ctx *ctx;
450 struct idr *idp; 450 struct idr *idp;
@@ -466,14 +466,14 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
466 if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring) 466 if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
467 continue; 467 continue;
468 468
469 max_wait = drm_sched_entity_do_release(&ctx->adev->rings[i]->sched, 469 max_wait = drm_sched_entity_flush(&ctx->adev->rings[i]->sched,
470 &ctx->rings[i].entity, max_wait); 470 &ctx->rings[i].entity, max_wait);
471 } 471 }
472 } 472 }
473 mutex_unlock(&mgr->lock); 473 mutex_unlock(&mgr->lock);
474} 474}
475 475
476void amdgpu_ctx_mgr_entity_cleanup(struct amdgpu_ctx_mgr *mgr) 476void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
477{ 477{
478 struct amdgpu_ctx *ctx; 478 struct amdgpu_ctx *ctx;
479 struct idr *idp; 479 struct idr *idp;
@@ -492,7 +492,7 @@ void amdgpu_ctx_mgr_entity_cleanup(struct amdgpu_ctx_mgr *mgr)
492 continue; 492 continue;
493 493
494 if (kref_read(&ctx->refcount) == 1) 494 if (kref_read(&ctx->refcount) == 1)
495 drm_sched_entity_cleanup(&ctx->adev->rings[i]->sched, 495 drm_sched_entity_fini(&ctx->adev->rings[i]->sched,
496 &ctx->rings[i].entity); 496 &ctx->rings[i].entity);
497 else 497 else
498 DRM_ERROR("ctx %p is still alive\n", ctx); 498 DRM_ERROR("ctx %p is still alive\n", ctx);
@@ -506,7 +506,7 @@ void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
506 struct idr *idp; 506 struct idr *idp;
507 uint32_t id; 507 uint32_t id;
508 508
509 amdgpu_ctx_mgr_entity_cleanup(mgr); 509 amdgpu_ctx_mgr_entity_fini(mgr);
510 510
511 idp = &mgr->ctx_handles; 511 idp = &mgr->ctx_handles;
512 512
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index d82d0d314285..9883fa9bb41b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1076,7 +1076,7 @@ static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1076/** 1076/**
1077 * amdgpu_device_ip_set_clockgating_state - set the CG state 1077 * amdgpu_device_ip_set_clockgating_state - set the CG state
1078 * 1078 *
1079 * @adev: amdgpu_device pointer 1079 * @dev: amdgpu_device pointer
1080 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.) 1080 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1081 * @state: clockgating state (gate or ungate) 1081 * @state: clockgating state (gate or ungate)
1082 * 1082 *
@@ -1110,7 +1110,7 @@ int amdgpu_device_ip_set_clockgating_state(void *dev,
1110/** 1110/**
1111 * amdgpu_device_ip_set_powergating_state - set the PG state 1111 * amdgpu_device_ip_set_powergating_state - set the PG state
1112 * 1112 *
1113 * @adev: amdgpu_device pointer 1113 * @dev: amdgpu_device pointer
1114 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.) 1114 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1115 * @state: powergating state (gate or ungate) 1115 * @state: powergating state (gate or ungate)
1116 * 1116 *
@@ -1221,7 +1221,7 @@ bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1221 * amdgpu_device_ip_get_ip_block - get a hw IP pointer 1221 * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1222 * 1222 *
1223 * @adev: amdgpu_device pointer 1223 * @adev: amdgpu_device pointer
1224 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.) 1224 * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
1225 * 1225 *
1226 * Returns a pointer to the hardware IP block structure 1226 * Returns a pointer to the hardware IP block structure
1227 * if it exists for the asic, otherwise NULL. 1227 * if it exists for the asic, otherwise NULL.
@@ -1707,10 +1707,6 @@ static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev)
1707 if (amdgpu_emu_mode == 1) 1707 if (amdgpu_emu_mode == 1)
1708 return 0; 1708 return 0;
1709 1709
1710 r = amdgpu_ib_ring_tests(adev);
1711 if (r)
1712 DRM_ERROR("ib ring test failed (%d).\n", r);
1713
1714 for (i = 0; i < adev->num_ip_blocks; i++) { 1710 for (i = 0; i < adev->num_ip_blocks; i++) {
1715 if (!adev->ip_blocks[i].status.valid) 1711 if (!adev->ip_blocks[i].status.valid)
1716 continue; 1712 continue;
@@ -1730,17 +1726,34 @@ static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev)
1730 } 1726 }
1731 } 1727 }
1732 1728
1733 if (adev->powerplay.pp_feature & PP_GFXOFF_MASK) { 1729 return 0;
1734 /* enable gfx powergating */ 1730}
1735 amdgpu_device_ip_set_powergating_state(adev, 1731
1736 AMD_IP_BLOCK_TYPE_GFX, 1732static int amdgpu_device_ip_late_set_pg_state(struct amdgpu_device *adev)
1737 AMD_PG_STATE_GATE); 1733{
1738 /* enable gfxoff */ 1734 int i = 0, r;
1739 amdgpu_device_ip_set_powergating_state(adev,
1740 AMD_IP_BLOCK_TYPE_SMC,
1741 AMD_PG_STATE_GATE);
1742 }
1743 1735
1736 if (amdgpu_emu_mode == 1)
1737 return 0;
1738
1739 for (i = 0; i < adev->num_ip_blocks; i++) {
1740 if (!adev->ip_blocks[i].status.valid)
1741 continue;
1742 /* skip CG for VCE/UVD, it's handled specially */
1743 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1744 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
1745 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
1746 adev->ip_blocks[i].version->funcs->set_powergating_state) {
1747 /* enable powergating to save power */
1748 r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
1749 AMD_PG_STATE_GATE);
1750 if (r) {
1751 DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
1752 adev->ip_blocks[i].version->funcs->name, r);
1753 return r;
1754 }
1755 }
1756 }
1744 return 0; 1757 return 0;
1745} 1758}
1746 1759
@@ -1774,6 +1787,9 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
1774 } 1787 }
1775 } 1788 }
1776 1789
1790 amdgpu_device_ip_late_set_cg_state(adev);
1791 amdgpu_device_ip_late_set_pg_state(adev);
1792
1777 queue_delayed_work(system_wq, &adev->late_init_work, 1793 queue_delayed_work(system_wq, &adev->late_init_work,
1778 msecs_to_jiffies(AMDGPU_RESUME_MS)); 1794 msecs_to_jiffies(AMDGPU_RESUME_MS));
1779 1795
@@ -1812,6 +1828,8 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
1812 adev->ip_blocks[i].version->funcs->name, r); 1828 adev->ip_blocks[i].version->funcs->name, r);
1813 return r; 1829 return r;
1814 } 1830 }
1831 if (adev->powerplay.pp_funcs->set_powergating_by_smu)
1832 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false);
1815 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev); 1833 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
1816 /* XXX handle errors */ 1834 /* XXX handle errors */
1817 if (r) { 1835 if (r) {
@@ -1900,7 +1918,11 @@ static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work)
1900{ 1918{
1901 struct amdgpu_device *adev = 1919 struct amdgpu_device *adev =
1902 container_of(work, struct amdgpu_device, late_init_work.work); 1920 container_of(work, struct amdgpu_device, late_init_work.work);
1903 amdgpu_device_ip_late_set_cg_state(adev); 1921 int r;
1922
1923 r = amdgpu_ib_ring_tests(adev);
1924 if (r)
1925 DRM_ERROR("ib ring test failed (%d).\n", r);
1904} 1926}
1905 1927
1906/** 1928/**
@@ -1921,12 +1943,6 @@ int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
1921 if (amdgpu_sriov_vf(adev)) 1943 if (amdgpu_sriov_vf(adev))
1922 amdgpu_virt_request_full_gpu(adev, false); 1944 amdgpu_virt_request_full_gpu(adev, false);
1923 1945
1924 /* ungate SMC block powergating */
1925 if (adev->powerplay.pp_feature & PP_GFXOFF_MASK)
1926 amdgpu_device_ip_set_powergating_state(adev,
1927 AMD_IP_BLOCK_TYPE_SMC,
1928 AMD_PG_STATE_UNGATE);
1929
1930 /* ungate SMC block first */ 1946 /* ungate SMC block first */
1931 r = amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC, 1947 r = amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
1932 AMD_CG_STATE_UNGATE); 1948 AMD_CG_STATE_UNGATE);
@@ -1934,6 +1950,10 @@ int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
1934 DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n", r); 1950 DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n", r);
1935 } 1951 }
1936 1952
1953 /* call smu to disable gfx off feature first when suspend */
1954 if (adev->powerplay.pp_funcs->set_powergating_by_smu)
1955 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false);
1956
1937 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 1957 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1938 if (!adev->ip_blocks[i].status.valid) 1958 if (!adev->ip_blocks[i].status.valid)
1939 continue; 1959 continue;
@@ -2209,7 +2229,7 @@ bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
2209 * amdgpu_device_init - initialize the driver 2229 * amdgpu_device_init - initialize the driver
2210 * 2230 *
2211 * @adev: amdgpu_device pointer 2231 * @adev: amdgpu_device pointer
2212 * @pdev: drm dev pointer 2232 * @ddev: drm dev pointer
2213 * @pdev: pci dev pointer 2233 * @pdev: pci dev pointer
2214 * @flags: driver flags 2234 * @flags: driver flags
2215 * 2235 *
@@ -2582,8 +2602,9 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
2582/** 2602/**
2583 * amdgpu_device_suspend - initiate device suspend 2603 * amdgpu_device_suspend - initiate device suspend
2584 * 2604 *
2585 * @pdev: drm dev pointer 2605 * @dev: drm dev pointer
2586 * @state: suspend state 2606 * @suspend: suspend state
2607 * @fbcon : notify the fbdev of suspend
2587 * 2608 *
2588 * Puts the hw in the suspend state (all asics). 2609 * Puts the hw in the suspend state (all asics).
2589 * Returns 0 for success or an error on failure. 2610 * Returns 0 for success or an error on failure.
@@ -2681,7 +2702,9 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
2681/** 2702/**
2682 * amdgpu_device_resume - initiate device resume 2703 * amdgpu_device_resume - initiate device resume
2683 * 2704 *
2684 * @pdev: drm dev pointer 2705 * @dev: drm dev pointer
2706 * @resume: resume state
2707 * @fbcon : notify the fbdev of resume
2685 * 2708 *
2686 * Bring the hw back to operating state (all asics). 2709 * Bring the hw back to operating state (all asics).
2687 * Returns 0 for success or an error on failure. 2710 * Returns 0 for success or an error on failure.
@@ -3144,6 +3167,7 @@ out:
3144 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf 3167 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
3145 * 3168 *
3146 * @adev: amdgpu device pointer 3169 * @adev: amdgpu device pointer
3170 * @from_hypervisor: request from hypervisor
3147 * 3171 *
3148 * do VF FLR and reinitialize Asic 3172 * do VF FLR and reinitialize Asic
3149 * return 0 means successed otherwise failed 3173 * return 0 means successed otherwise failed
@@ -3191,7 +3215,7 @@ error:
3191 * 3215 *
3192 * @adev: amdgpu device pointer 3216 * @adev: amdgpu device pointer
3193 * @job: which job trigger hang 3217 * @job: which job trigger hang
3194 * @force forces reset regardless of amdgpu_gpu_recovery 3218 * @force: forces reset regardless of amdgpu_gpu_recovery
3195 * 3219 *
3196 * Attempt to reset the GPU if it has hung (all asics). 3220 * Attempt to reset the GPU if it has hung (all asics).
3197 * Returns 0 for success or an error on failure. 3221 * Returns 0 for success or an error on failure.
@@ -3291,8 +3315,9 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
3291 */ 3315 */
3292static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev) 3316static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
3293{ 3317{
3294 u32 mask; 3318 struct pci_dev *pdev;
3295 int ret; 3319 enum pci_bus_speed speed_cap;
3320 enum pcie_link_width link_width;
3296 3321
3297 if (amdgpu_pcie_gen_cap) 3322 if (amdgpu_pcie_gen_cap)
3298 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap; 3323 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
@@ -3310,27 +3335,61 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
3310 } 3335 }
3311 3336
3312 if (adev->pm.pcie_gen_mask == 0) { 3337 if (adev->pm.pcie_gen_mask == 0) {
3313 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask); 3338 /* asic caps */
3314 if (!ret) { 3339 pdev = adev->pdev;
3315 adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | 3340 speed_cap = pcie_get_speed_cap(pdev);
3341 if (speed_cap == PCI_SPEED_UNKNOWN) {
3342 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
3316 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 | 3343 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
3317 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3); 3344 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
3318
3319 if (mask & DRM_PCIE_SPEED_25)
3320 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
3321 if (mask & DRM_PCIE_SPEED_50)
3322 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
3323 if (mask & DRM_PCIE_SPEED_80)
3324 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
3325 } else { 3345 } else {
3326 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK; 3346 if (speed_cap == PCIE_SPEED_16_0GT)
3347 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
3348 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
3349 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
3350 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
3351 else if (speed_cap == PCIE_SPEED_8_0GT)
3352 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
3353 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
3354 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
3355 else if (speed_cap == PCIE_SPEED_5_0GT)
3356 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
3357 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
3358 else
3359 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
3360 }
3361 /* platform caps */
3362 pdev = adev->ddev->pdev->bus->self;
3363 speed_cap = pcie_get_speed_cap(pdev);
3364 if (speed_cap == PCI_SPEED_UNKNOWN) {
3365 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
3366 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
3367 } else {
3368 if (speed_cap == PCIE_SPEED_16_0GT)
3369 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
3370 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
3371 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
3372 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
3373 else if (speed_cap == PCIE_SPEED_8_0GT)
3374 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
3375 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
3376 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
3377 else if (speed_cap == PCIE_SPEED_5_0GT)
3378 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
3379 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
3380 else
3381 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
3382
3327 } 3383 }
3328 } 3384 }
3329 if (adev->pm.pcie_mlw_mask == 0) { 3385 if (adev->pm.pcie_mlw_mask == 0) {
3330 ret = drm_pcie_get_max_link_width(adev->ddev, &mask); 3386 pdev = adev->ddev->pdev->bus->self;
3331 if (!ret) { 3387 link_width = pcie_get_width_cap(pdev);
3332 switch (mask) { 3388 if (link_width == PCIE_LNK_WIDTH_UNKNOWN) {
3333 case 32: 3389 adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
3390 } else {
3391 switch (link_width) {
3392 case PCIE_LNK_X32:
3334 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 | 3393 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
3335 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 | 3394 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
3336 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | 3395 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
@@ -3339,7 +3398,7 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
3339 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 3398 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3340 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 3399 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3341 break; 3400 break;
3342 case 16: 3401 case PCIE_LNK_X16:
3343 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 | 3402 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
3344 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | 3403 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3345 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | 3404 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
@@ -3347,36 +3406,34 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
3347 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 3406 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3348 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 3407 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3349 break; 3408 break;
3350 case 12: 3409 case PCIE_LNK_X12:
3351 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | 3410 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3352 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | 3411 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3353 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 3412 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3354 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 3413 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3355 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 3414 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3356 break; 3415 break;
3357 case 8: 3416 case PCIE_LNK_X8:
3358 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | 3417 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3359 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 3418 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3360 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 3419 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3361 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 3420 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3362 break; 3421 break;
3363 case 4: 3422 case PCIE_LNK_X4:
3364 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 3423 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3365 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 3424 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3366 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 3425 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3367 break; 3426 break;
3368 case 2: 3427 case PCIE_LNK_X2:
3369 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 3428 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3370 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 3429 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3371 break; 3430 break;
3372 case 1: 3431 case PCIE_LNK_X1:
3373 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1; 3432 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
3374 break; 3433 break;
3375 default: 3434 default:
3376 break; 3435 break;
3377 } 3436 }
3378 } else {
3379 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
3380 } 3437 }
3381 } 3438 }
3382} 3439}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
index 77ad59ade85c..1c4595562f8f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
@@ -28,6 +28,7 @@
28#include "amdgpu_i2c.h" 28#include "amdgpu_i2c.h"
29#include "amdgpu_dpm.h" 29#include "amdgpu_dpm.h"
30#include "atom.h" 30#include "atom.h"
31#include "amd_pcie.h"
31 32
32void amdgpu_dpm_print_class_info(u32 class, u32 class2) 33void amdgpu_dpm_print_class_info(u32 class, u32 class2)
33{ 34{
@@ -936,9 +937,11 @@ enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
936 case AMDGPU_PCIE_GEN3: 937 case AMDGPU_PCIE_GEN3:
937 return AMDGPU_PCIE_GEN3; 938 return AMDGPU_PCIE_GEN3;
938 default: 939 default:
939 if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == AMDGPU_PCIE_GEN3)) 940 if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) &&
941 (default_gen == AMDGPU_PCIE_GEN3))
940 return AMDGPU_PCIE_GEN3; 942 return AMDGPU_PCIE_GEN3;
941 else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == AMDGPU_PCIE_GEN2)) 943 else if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) &&
944 (default_gen == AMDGPU_PCIE_GEN2))
942 return AMDGPU_PCIE_GEN2; 945 return AMDGPU_PCIE_GEN2;
943 else 946 else
944 return AMDGPU_PCIE_GEN1; 947 return AMDGPU_PCIE_GEN1;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
index 9acfbee91c40..ff24e1cc5b65 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
@@ -287,12 +287,6 @@ enum amdgpu_pcie_gen {
287#define amdgpu_dpm_force_performance_level(adev, l) \ 287#define amdgpu_dpm_force_performance_level(adev, l) \
288 ((adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l))) 288 ((adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)))
289 289
290#define amdgpu_dpm_powergate_uvd(adev, g) \
291 ((adev)->powerplay.pp_funcs->powergate_uvd((adev)->powerplay.pp_handle, (g)))
292
293#define amdgpu_dpm_powergate_vce(adev, g) \
294 ((adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)))
295
296#define amdgpu_dpm_get_current_power_state(adev) \ 290#define amdgpu_dpm_get_current_power_state(adev) \
297 ((adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle)) 291 ((adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle))
298 292
@@ -347,6 +341,10 @@ enum amdgpu_pcie_gen {
347 ((adev)->powerplay.pp_funcs->set_clockgating_by_smu(\ 341 ((adev)->powerplay.pp_funcs->set_clockgating_by_smu(\
348 (adev)->powerplay.pp_handle, msg_id)) 342 (adev)->powerplay.pp_handle, msg_id))
349 343
344#define amdgpu_dpm_set_powergating_by_smu(adev, block_type, gate) \
345 ((adev)->powerplay.pp_funcs->set_powergating_by_smu(\
346 (adev)->powerplay.pp_handle, block_type, gate))
347
350#define amdgpu_dpm_get_power_profile_mode(adev, buf) \ 348#define amdgpu_dpm_get_power_profile_mode(adev, buf) \
351 ((adev)->powerplay.pp_funcs->get_power_profile_mode(\ 349 ((adev)->powerplay.pp_funcs->get_power_profile_mode(\
352 (adev)->powerplay.pp_handle, buf)) 350 (adev)->powerplay.pp_handle, buf))
@@ -359,10 +357,6 @@ enum amdgpu_pcie_gen {
359 ((adev)->powerplay.pp_funcs->odn_edit_dpm_table(\ 357 ((adev)->powerplay.pp_funcs->odn_edit_dpm_table(\
360 (adev)->powerplay.pp_handle, type, parameter, size)) 358 (adev)->powerplay.pp_handle, type, parameter, size))
361 359
362#define amdgpu_dpm_set_mmhub_powergating_by_smu(adev) \
363 ((adev)->powerplay.pp_funcs->set_mmhub_powergating_by_smu( \
364 (adev)->powerplay.pp_handle))
365
366struct amdgpu_dpm { 360struct amdgpu_dpm {
367 struct amdgpu_ps *ps; 361 struct amdgpu_ps *ps;
368 /* number of valid power states */ 362 /* number of valid power states */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index a549483032b0..06aede194bf8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -1,10 +1,3 @@
1/**
2 * \file amdgpu_drv.c
3 * AMD Amdgpu driver
4 *
5 * \author Gareth Hughes <gareth@valinux.com>
6 */
7
8/* 1/*
9 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. 2 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
10 * All Rights Reserved. 3 * All Rights Reserved.
@@ -122,7 +115,8 @@ uint amdgpu_pg_mask = 0xffffffff;
122uint amdgpu_sdma_phase_quantum = 32; 115uint amdgpu_sdma_phase_quantum = 32;
123char *amdgpu_disable_cu = NULL; 116char *amdgpu_disable_cu = NULL;
124char *amdgpu_virtual_display = NULL; 117char *amdgpu_virtual_display = NULL;
125uint amdgpu_pp_feature_mask = 0xffff3fff; /* gfxoff (bit 15) disabled by default */ 118/* OverDrive(bit 14),gfxoff(bit 15),stutter mode(bit 17) disabled by default*/
119uint amdgpu_pp_feature_mask = 0xfffd3fff;
126int amdgpu_ngg = 0; 120int amdgpu_ngg = 0;
127int amdgpu_prim_buf_per_se = 0; 121int amdgpu_prim_buf_per_se = 0;
128int amdgpu_pos_buf_per_se = 0; 122int amdgpu_pos_buf_per_se = 0;
@@ -135,102 +129,239 @@ int amdgpu_gpu_recovery = -1; /* auto */
135int amdgpu_emu_mode = 0; 129int amdgpu_emu_mode = 0;
136uint amdgpu_smu_memory_pool_size = 0; 130uint amdgpu_smu_memory_pool_size = 0;
137 131
132/**
133 * DOC: vramlimit (int)
134 * Restrict the total amount of VRAM in MiB for testing. The default is 0 (Use full VRAM).
135 */
138MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes"); 136MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
139module_param_named(vramlimit, amdgpu_vram_limit, int, 0600); 137module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
140 138
139/**
140 * DOC: vis_vramlimit (int)
141 * Restrict the amount of CPU visible VRAM in MiB for testing. The default is 0 (Use full CPU visible VRAM).
142 */
141MODULE_PARM_DESC(vis_vramlimit, "Restrict visible VRAM for testing, in megabytes"); 143MODULE_PARM_DESC(vis_vramlimit, "Restrict visible VRAM for testing, in megabytes");
142module_param_named(vis_vramlimit, amdgpu_vis_vram_limit, int, 0444); 144module_param_named(vis_vramlimit, amdgpu_vis_vram_limit, int, 0444);
143 145
146/**
147 * DOC: gartsize (uint)
148 * Restrict the size of GART in Mib (32, 64, etc.) for testing. The default is -1 (The size depends on asic).
149 */
144MODULE_PARM_DESC(gartsize, "Size of GART to setup in megabytes (32, 64, etc., -1=auto)"); 150MODULE_PARM_DESC(gartsize, "Size of GART to setup in megabytes (32, 64, etc., -1=auto)");
145module_param_named(gartsize, amdgpu_gart_size, uint, 0600); 151module_param_named(gartsize, amdgpu_gart_size, uint, 0600);
146 152
153/**
154 * DOC: gttsize (int)
155 * Restrict the size of GTT domain in MiB for testing. The default is -1 (It's VRAM size if 3GB < VRAM < 3/4 RAM,
156 * otherwise 3/4 RAM size).
157 */
147MODULE_PARM_DESC(gttsize, "Size of the GTT domain in megabytes (-1 = auto)"); 158MODULE_PARM_DESC(gttsize, "Size of the GTT domain in megabytes (-1 = auto)");
148module_param_named(gttsize, amdgpu_gtt_size, int, 0600); 159module_param_named(gttsize, amdgpu_gtt_size, int, 0600);
149 160
161/**
162 * DOC: moverate (int)
163 * Set maximum buffer migration rate in MB/s. The default is -1 (8 MB/s).
164 */
150MODULE_PARM_DESC(moverate, "Maximum buffer migration rate in MB/s. (32, 64, etc., -1=auto, 0=1=disabled)"); 165MODULE_PARM_DESC(moverate, "Maximum buffer migration rate in MB/s. (32, 64, etc., -1=auto, 0=1=disabled)");
151module_param_named(moverate, amdgpu_moverate, int, 0600); 166module_param_named(moverate, amdgpu_moverate, int, 0600);
152 167
168/**
169 * DOC: benchmark (int)
170 * Run benchmarks. The default is 0 (Skip benchmarks).
171 */
153MODULE_PARM_DESC(benchmark, "Run benchmark"); 172MODULE_PARM_DESC(benchmark, "Run benchmark");
154module_param_named(benchmark, amdgpu_benchmarking, int, 0444); 173module_param_named(benchmark, amdgpu_benchmarking, int, 0444);
155 174
175/**
176 * DOC: test (int)
177 * Test BO GTT->VRAM and VRAM->GTT GPU copies. The default is 0 (Skip test, only set 1 to run test).
178 */
156MODULE_PARM_DESC(test, "Run tests"); 179MODULE_PARM_DESC(test, "Run tests");
157module_param_named(test, amdgpu_testing, int, 0444); 180module_param_named(test, amdgpu_testing, int, 0444);
158 181
182/**
183 * DOC: audio (int)
184 * Set HDMI/DPAudio. Only affects non-DC display handling. The default is -1 (Enabled), set 0 to disabled it.
185 */
159MODULE_PARM_DESC(audio, "Audio enable (-1 = auto, 0 = disable, 1 = enable)"); 186MODULE_PARM_DESC(audio, "Audio enable (-1 = auto, 0 = disable, 1 = enable)");
160module_param_named(audio, amdgpu_audio, int, 0444); 187module_param_named(audio, amdgpu_audio, int, 0444);
161 188
189/**
190 * DOC: disp_priority (int)
191 * Set display Priority (1 = normal, 2 = high). Only affects non-DC display handling. The default is 0 (auto).
192 */
162MODULE_PARM_DESC(disp_priority, "Display Priority (0 = auto, 1 = normal, 2 = high)"); 193MODULE_PARM_DESC(disp_priority, "Display Priority (0 = auto, 1 = normal, 2 = high)");
163module_param_named(disp_priority, amdgpu_disp_priority, int, 0444); 194module_param_named(disp_priority, amdgpu_disp_priority, int, 0444);
164 195
196/**
197 * DOC: hw_i2c (int)
198 * To enable hw i2c engine. Only affects non-DC display handling. The default is 0 (Disabled).
199 */
165MODULE_PARM_DESC(hw_i2c, "hw i2c engine enable (0 = disable)"); 200MODULE_PARM_DESC(hw_i2c, "hw i2c engine enable (0 = disable)");
166module_param_named(hw_i2c, amdgpu_hw_i2c, int, 0444); 201module_param_named(hw_i2c, amdgpu_hw_i2c, int, 0444);
167 202
203/**
204 * DOC: pcie_gen2 (int)
205 * To disable PCIE Gen2/3 mode (0 = disable, 1 = enable). The default is -1 (auto, enabled).
206 */
168MODULE_PARM_DESC(pcie_gen2, "PCIE Gen2 mode (-1 = auto, 0 = disable, 1 = enable)"); 207MODULE_PARM_DESC(pcie_gen2, "PCIE Gen2 mode (-1 = auto, 0 = disable, 1 = enable)");
169module_param_named(pcie_gen2, amdgpu_pcie_gen2, int, 0444); 208module_param_named(pcie_gen2, amdgpu_pcie_gen2, int, 0444);
170 209
210/**
211 * DOC: msi (int)
212 * To disable Message Signaled Interrupts (MSI) functionality (1 = enable, 0 = disable). The default is -1 (auto, enabled).
213 */
171MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)"); 214MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)");
172module_param_named(msi, amdgpu_msi, int, 0444); 215module_param_named(msi, amdgpu_msi, int, 0444);
173 216
217/**
218 * DOC: lockup_timeout (int)
219 * Set GPU scheduler timeout value in ms. Value 0 is invalidated, will be adjusted to 10000.
220 * Negative values mean 'infinite timeout' (MAX_JIFFY_OFFSET). The default is 10000.
221 */
174MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms > 0 (default 10000)"); 222MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms > 0 (default 10000)");
175module_param_named(lockup_timeout, amdgpu_lockup_timeout, int, 0444); 223module_param_named(lockup_timeout, amdgpu_lockup_timeout, int, 0444);
176 224
225/**
226 * DOC: dpm (int)
227 * Override for dynamic power management setting (1 = enable, 0 = disable). The default is -1 (auto).
228 */
177MODULE_PARM_DESC(dpm, "DPM support (1 = enable, 0 = disable, -1 = auto)"); 229MODULE_PARM_DESC(dpm, "DPM support (1 = enable, 0 = disable, -1 = auto)");
178module_param_named(dpm, amdgpu_dpm, int, 0444); 230module_param_named(dpm, amdgpu_dpm, int, 0444);
179 231
232/**
233 * DOC: fw_load_type (int)
234 * Set different firmware loading type for debugging (0 = direct, 1 = SMU, 2 = PSP). The default is -1 (auto).
235 */
180MODULE_PARM_DESC(fw_load_type, "firmware loading type (0 = direct, 1 = SMU, 2 = PSP, -1 = auto)"); 236MODULE_PARM_DESC(fw_load_type, "firmware loading type (0 = direct, 1 = SMU, 2 = PSP, -1 = auto)");
181module_param_named(fw_load_type, amdgpu_fw_load_type, int, 0444); 237module_param_named(fw_load_type, amdgpu_fw_load_type, int, 0444);
182 238
239/**
240 * DOC: aspm (int)
241 * To disable ASPM (1 = enable, 0 = disable). The default is -1 (auto, enabled).
242 */
183MODULE_PARM_DESC(aspm, "ASPM support (1 = enable, 0 = disable, -1 = auto)"); 243MODULE_PARM_DESC(aspm, "ASPM support (1 = enable, 0 = disable, -1 = auto)");
184module_param_named(aspm, amdgpu_aspm, int, 0444); 244module_param_named(aspm, amdgpu_aspm, int, 0444);
185 245
246/**
247 * DOC: runpm (int)
248 * Override for runtime power management control for dGPUs in PX/HG laptops. The amdgpu driver can dynamically power down
249 * the dGPU on PX/HG laptops when it is idle. The default is -1 (auto enable). Setting the value to 0 disables this functionality.
250 */
186MODULE_PARM_DESC(runpm, "PX runtime pm (1 = force enable, 0 = disable, -1 = PX only default)"); 251MODULE_PARM_DESC(runpm, "PX runtime pm (1 = force enable, 0 = disable, -1 = PX only default)");
187module_param_named(runpm, amdgpu_runtime_pm, int, 0444); 252module_param_named(runpm, amdgpu_runtime_pm, int, 0444);
188 253
254/**
255 * DOC: ip_block_mask (uint)
256 * Override what IP blocks are enabled on the GPU. Each GPU is a collection of IP blocks (gfx, display, video, etc.).
257 * Use this parameter to disable specific blocks. Note that the IP blocks do not have a fixed index. Some asics may not have
258 * some IPs or may include multiple instances of an IP so the ordering various from asic to asic. See the driver output in
259 * the kernel log for the list of IPs on the asic. The default is 0xffffffff (enable all blocks on a device).
260 */
189MODULE_PARM_DESC(ip_block_mask, "IP Block Mask (all blocks enabled (default))"); 261MODULE_PARM_DESC(ip_block_mask, "IP Block Mask (all blocks enabled (default))");
190module_param_named(ip_block_mask, amdgpu_ip_block_mask, uint, 0444); 262module_param_named(ip_block_mask, amdgpu_ip_block_mask, uint, 0444);
191 263
264/**
265 * DOC: bapm (int)
266 * Bidirectional Application Power Management (BAPM) used to dynamically share TDP between CPU and GPU. Set value 0 to disable it.
267 * The default -1 (auto, enabled)
268 */
192MODULE_PARM_DESC(bapm, "BAPM support (1 = enable, 0 = disable, -1 = auto)"); 269MODULE_PARM_DESC(bapm, "BAPM support (1 = enable, 0 = disable, -1 = auto)");
193module_param_named(bapm, amdgpu_bapm, int, 0444); 270module_param_named(bapm, amdgpu_bapm, int, 0444);
194 271
272/**
273 * DOC: deep_color (int)
274 * Set 1 to enable Deep Color support. Only affects non-DC display handling. The default is 0 (disabled).
275 */
195MODULE_PARM_DESC(deep_color, "Deep Color support (1 = enable, 0 = disable (default))"); 276MODULE_PARM_DESC(deep_color, "Deep Color support (1 = enable, 0 = disable (default))");
196module_param_named(deep_color, amdgpu_deep_color, int, 0444); 277module_param_named(deep_color, amdgpu_deep_color, int, 0444);
197 278
279/**
280 * DOC: vm_size (int)
281 * Override the size of the GPU's per client virtual address space in GiB. The default is -1 (automatic for each asic).
282 */
198MODULE_PARM_DESC(vm_size, "VM address space size in gigabytes (default 64GB)"); 283MODULE_PARM_DESC(vm_size, "VM address space size in gigabytes (default 64GB)");
199module_param_named(vm_size, amdgpu_vm_size, int, 0444); 284module_param_named(vm_size, amdgpu_vm_size, int, 0444);
200 285
286/**
287 * DOC: vm_fragment_size (int)
288 * Override VM fragment size in bits (4, 5, etc. 4 = 64K, 9 = 2M). The default is -1 (automatic for each asic).
289 */
201MODULE_PARM_DESC(vm_fragment_size, "VM fragment size in bits (4, 5, etc. 4 = 64K (default), Max 9 = 2M)"); 290MODULE_PARM_DESC(vm_fragment_size, "VM fragment size in bits (4, 5, etc. 4 = 64K (default), Max 9 = 2M)");
202module_param_named(vm_fragment_size, amdgpu_vm_fragment_size, int, 0444); 291module_param_named(vm_fragment_size, amdgpu_vm_fragment_size, int, 0444);
203 292
293/**
294 * DOC: vm_block_size (int)
295 * Override VM page table size in bits (default depending on vm_size and hw setup). The default is -1 (automatic for each asic).
296 */
204MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default depending on vm_size)"); 297MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default depending on vm_size)");
205module_param_named(vm_block_size, amdgpu_vm_block_size, int, 0444); 298module_param_named(vm_block_size, amdgpu_vm_block_size, int, 0444);
206 299
300/**
301 * DOC: vm_fault_stop (int)
302 * Stop on VM fault for debugging (0 = never, 1 = print first, 2 = always). The default is 0 (No stop).
303 */
207MODULE_PARM_DESC(vm_fault_stop, "Stop on VM fault (0 = never (default), 1 = print first, 2 = always)"); 304MODULE_PARM_DESC(vm_fault_stop, "Stop on VM fault (0 = never (default), 1 = print first, 2 = always)");
208module_param_named(vm_fault_stop, amdgpu_vm_fault_stop, int, 0444); 305module_param_named(vm_fault_stop, amdgpu_vm_fault_stop, int, 0444);
209 306
307/**
308 * DOC: vm_debug (int)
309 * Debug VM handling (0 = disabled, 1 = enabled). The default is 0 (Disabled).
310 */
210MODULE_PARM_DESC(vm_debug, "Debug VM handling (0 = disabled (default), 1 = enabled)"); 311MODULE_PARM_DESC(vm_debug, "Debug VM handling (0 = disabled (default), 1 = enabled)");
211module_param_named(vm_debug, amdgpu_vm_debug, int, 0644); 312module_param_named(vm_debug, amdgpu_vm_debug, int, 0644);
212 313
314/**
315 * DOC: vm_update_mode (int)
316 * Override VM update mode. VM updated by using CPU (0 = never, 1 = Graphics only, 2 = Compute only, 3 = Both). The default
317 * is -1 (Only in large BAR(LB) systems Compute VM tables will be updated by CPU, otherwise 0, never).
318 */
213MODULE_PARM_DESC(vm_update_mode, "VM update using CPU (0 = never (default except for large BAR(LB)), 1 = Graphics only, 2 = Compute only (default for LB), 3 = Both"); 319MODULE_PARM_DESC(vm_update_mode, "VM update using CPU (0 = never (default except for large BAR(LB)), 1 = Graphics only, 2 = Compute only (default for LB), 3 = Both");
214module_param_named(vm_update_mode, amdgpu_vm_update_mode, int, 0444); 320module_param_named(vm_update_mode, amdgpu_vm_update_mode, int, 0444);
215 321
322/**
323 * DOC: vram_page_split (int)
324 * Override the number of pages after we split VRAM allocations (default 512, -1 = disable). The default is 512.
325 */
216MODULE_PARM_DESC(vram_page_split, "Number of pages after we split VRAM allocations (default 512, -1 = disable)"); 326MODULE_PARM_DESC(vram_page_split, "Number of pages after we split VRAM allocations (default 512, -1 = disable)");
217module_param_named(vram_page_split, amdgpu_vram_page_split, int, 0444); 327module_param_named(vram_page_split, amdgpu_vram_page_split, int, 0444);
218 328
329/**
330 * DOC: exp_hw_support (int)
331 * Enable experimental hw support (1 = enable). The default is 0 (disabled).
332 */
219MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))"); 333MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))");
220module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444); 334module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444);
221 335
336/**
337 * DOC: dc (int)
338 * Disable/Enable Display Core driver for debugging (1 = enable, 0 = disable). The default is -1 (automatic for each asic).
339 */
222MODULE_PARM_DESC(dc, "Display Core driver (1 = enable, 0 = disable, -1 = auto (default))"); 340MODULE_PARM_DESC(dc, "Display Core driver (1 = enable, 0 = disable, -1 = auto (default))");
223module_param_named(dc, amdgpu_dc, int, 0444); 341module_param_named(dc, amdgpu_dc, int, 0444);
224 342
225MODULE_PARM_DESC(dc_log, "Display Core Log Level (0 = minimal (default), 1 = chatty"); 343MODULE_PARM_DESC(dc_log, "Display Core Log Level (0 = minimal (default), 1 = chatty");
226module_param_named(dc_log, amdgpu_dc_log, int, 0444); 344module_param_named(dc_log, amdgpu_dc_log, int, 0444);
227 345
346/**
347 * DOC: sched_jobs (int)
348 * Override the max number of jobs supported in the sw queue. The default is 32.
349 */
228MODULE_PARM_DESC(sched_jobs, "the max number of jobs supported in the sw queue (default 32)"); 350MODULE_PARM_DESC(sched_jobs, "the max number of jobs supported in the sw queue (default 32)");
229module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444); 351module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444);
230 352
353/**
354 * DOC: sched_hw_submission (int)
355 * Override the max number of HW submissions. The default is 2.
356 */
231MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default 2)"); 357MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default 2)");
232module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444); 358module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444);
233 359
360/**
361 * DOC: ppfeaturemask (uint)
362 * Override power features enabled. See enum PP_FEATURE_MASK in drivers/gpu/drm/amd/include/amd_shared.h.
363 * The default is the current set of stable power features.
364 */
234MODULE_PARM_DESC(ppfeaturemask, "all power features enabled (default))"); 365MODULE_PARM_DESC(ppfeaturemask, "all power features enabled (default))");
235module_param_named(ppfeaturemask, amdgpu_pp_feature_mask, uint, 0444); 366module_param_named(ppfeaturemask, amdgpu_pp_feature_mask, uint, 0444);
236 367
@@ -240,58 +371,135 @@ module_param_named(no_evict, amdgpu_no_evict, int, 0444);
240MODULE_PARM_DESC(direct_gma_size, "Direct GMA size in megabytes (max 96MB)"); 371MODULE_PARM_DESC(direct_gma_size, "Direct GMA size in megabytes (max 96MB)");
241module_param_named(direct_gma_size, amdgpu_direct_gma_size, int, 0444); 372module_param_named(direct_gma_size, amdgpu_direct_gma_size, int, 0444);
242 373
374/**
375 * DOC: pcie_gen_cap (uint)
376 * Override PCIE gen speed capabilities. See the CAIL flags in drivers/gpu/drm/amd/include/amd_pcie.h.
377 * The default is 0 (automatic for each asic).
378 */
243MODULE_PARM_DESC(pcie_gen_cap, "PCIE Gen Caps (0: autodetect (default))"); 379MODULE_PARM_DESC(pcie_gen_cap, "PCIE Gen Caps (0: autodetect (default))");
244module_param_named(pcie_gen_cap, amdgpu_pcie_gen_cap, uint, 0444); 380module_param_named(pcie_gen_cap, amdgpu_pcie_gen_cap, uint, 0444);
245 381
382/**
383 * DOC: pcie_lane_cap (uint)
384 * Override PCIE lanes capabilities. See the CAIL flags in drivers/gpu/drm/amd/include/amd_pcie.h.
385 * The default is 0 (automatic for each asic).
386 */
246MODULE_PARM_DESC(pcie_lane_cap, "PCIE Lane Caps (0: autodetect (default))"); 387MODULE_PARM_DESC(pcie_lane_cap, "PCIE Lane Caps (0: autodetect (default))");
247module_param_named(pcie_lane_cap, amdgpu_pcie_lane_cap, uint, 0444); 388module_param_named(pcie_lane_cap, amdgpu_pcie_lane_cap, uint, 0444);
248 389
390/**
391 * DOC: cg_mask (uint)
392 * Override Clockgating features enabled on GPU (0 = disable clock gating). See the AMD_CG_SUPPORT flags in
393 * drivers/gpu/drm/amd/include/amd_shared.h. The default is 0xffffffff (all enabled).
394 */
249MODULE_PARM_DESC(cg_mask, "Clockgating flags mask (0 = disable clock gating)"); 395MODULE_PARM_DESC(cg_mask, "Clockgating flags mask (0 = disable clock gating)");
250module_param_named(cg_mask, amdgpu_cg_mask, uint, 0444); 396module_param_named(cg_mask, amdgpu_cg_mask, uint, 0444);
251 397
398/**
399 * DOC: pg_mask (uint)
400 * Override Powergating features enabled on GPU (0 = disable power gating). See the AMD_PG_SUPPORT flags in
401 * drivers/gpu/drm/amd/include/amd_shared.h. The default is 0xffffffff (all enabled).
402 */
252MODULE_PARM_DESC(pg_mask, "Powergating flags mask (0 = disable power gating)"); 403MODULE_PARM_DESC(pg_mask, "Powergating flags mask (0 = disable power gating)");
253module_param_named(pg_mask, amdgpu_pg_mask, uint, 0444); 404module_param_named(pg_mask, amdgpu_pg_mask, uint, 0444);
254 405
406/**
407 * DOC: sdma_phase_quantum (uint)
408 * Override SDMA context switch phase quantum (x 1K GPU clock cycles, 0 = no change). The default is 32.
409 */
255MODULE_PARM_DESC(sdma_phase_quantum, "SDMA context switch phase quantum (x 1K GPU clock cycles, 0 = no change (default 32))"); 410MODULE_PARM_DESC(sdma_phase_quantum, "SDMA context switch phase quantum (x 1K GPU clock cycles, 0 = no change (default 32))");
256module_param_named(sdma_phase_quantum, amdgpu_sdma_phase_quantum, uint, 0444); 411module_param_named(sdma_phase_quantum, amdgpu_sdma_phase_quantum, uint, 0444);
257 412
413/**
414 * DOC: disable_cu (charp)
415 * Set to disable CUs (It's set like se.sh.cu,...). The default is NULL.
416 */
258MODULE_PARM_DESC(disable_cu, "Disable CUs (se.sh.cu,...)"); 417MODULE_PARM_DESC(disable_cu, "Disable CUs (se.sh.cu,...)");
259module_param_named(disable_cu, amdgpu_disable_cu, charp, 0444); 418module_param_named(disable_cu, amdgpu_disable_cu, charp, 0444);
260 419
420/**
421 * DOC: virtual_display (charp)
422 * Set to enable virtual display feature. This feature provides a virtual display hardware on headless boards
423 * or in virtualized environments. It will be set like xxxx:xx:xx.x,x;xxxx:xx:xx.x,x. It's the pci address of
424 * the device, plus the number of crtcs to expose. E.g., 0000:26:00.0,4 would enable 4 virtual crtcs on the pci
425 * device at 26:00.0. The default is NULL.
426 */
261MODULE_PARM_DESC(virtual_display, 427MODULE_PARM_DESC(virtual_display,
262 "Enable virtual display feature (the virtual_display will be set like xxxx:xx:xx.x,x;xxxx:xx:xx.x,x)"); 428 "Enable virtual display feature (the virtual_display will be set like xxxx:xx:xx.x,x;xxxx:xx:xx.x,x)");
263module_param_named(virtual_display, amdgpu_virtual_display, charp, 0444); 429module_param_named(virtual_display, amdgpu_virtual_display, charp, 0444);
264 430
431/**
432 * DOC: ngg (int)
433 * Set to enable Next Generation Graphics (1 = enable). The default is 0 (disabled).
434 */
265MODULE_PARM_DESC(ngg, "Next Generation Graphics (1 = enable, 0 = disable(default depending on gfx))"); 435MODULE_PARM_DESC(ngg, "Next Generation Graphics (1 = enable, 0 = disable(default depending on gfx))");
266module_param_named(ngg, amdgpu_ngg, int, 0444); 436module_param_named(ngg, amdgpu_ngg, int, 0444);
267 437
438/**
439 * DOC: prim_buf_per_se (int)
440 * Override the size of Primitive Buffer per Shader Engine in Byte. The default is 0 (depending on gfx).
441 */
268MODULE_PARM_DESC(prim_buf_per_se, "the size of Primitive Buffer per Shader Engine (default depending on gfx)"); 442MODULE_PARM_DESC(prim_buf_per_se, "the size of Primitive Buffer per Shader Engine (default depending on gfx)");
269module_param_named(prim_buf_per_se, amdgpu_prim_buf_per_se, int, 0444); 443module_param_named(prim_buf_per_se, amdgpu_prim_buf_per_se, int, 0444);
270 444
445/**
446 * DOC: pos_buf_per_se (int)
447 * Override the size of Position Buffer per Shader Engine in Byte. The default is 0 (depending on gfx).
448 */
271MODULE_PARM_DESC(pos_buf_per_se, "the size of Position Buffer per Shader Engine (default depending on gfx)"); 449MODULE_PARM_DESC(pos_buf_per_se, "the size of Position Buffer per Shader Engine (default depending on gfx)");
272module_param_named(pos_buf_per_se, amdgpu_pos_buf_per_se, int, 0444); 450module_param_named(pos_buf_per_se, amdgpu_pos_buf_per_se, int, 0444);
273 451
452/**
453 * DOC: cntl_sb_buf_per_se (int)
454 * Override the size of Control Sideband per Shader Engine in Byte. The default is 0 (depending on gfx).
455 */
274MODULE_PARM_DESC(cntl_sb_buf_per_se, "the size of Control Sideband per Shader Engine (default depending on gfx)"); 456MODULE_PARM_DESC(cntl_sb_buf_per_se, "the size of Control Sideband per Shader Engine (default depending on gfx)");
275module_param_named(cntl_sb_buf_per_se, amdgpu_cntl_sb_buf_per_se, int, 0444); 457module_param_named(cntl_sb_buf_per_se, amdgpu_cntl_sb_buf_per_se, int, 0444);
276 458
459/**
460 * DOC: param_buf_per_se (int)
461 * Override the size of Off-Chip Pramater Cache per Shader Engine in Byte. The default is 0 (depending on gfx).
462 */
277MODULE_PARM_DESC(param_buf_per_se, "the size of Off-Chip Pramater Cache per Shader Engine (default depending on gfx)"); 463MODULE_PARM_DESC(param_buf_per_se, "the size of Off-Chip Pramater Cache per Shader Engine (default depending on gfx)");
278module_param_named(param_buf_per_se, amdgpu_param_buf_per_se, int, 0444); 464module_param_named(param_buf_per_se, amdgpu_param_buf_per_se, int, 0444);
279 465
466/**
467 * DOC: job_hang_limit (int)
468 * Set how much time allow a job hang and not drop it. The default is 0.
469 */
280MODULE_PARM_DESC(job_hang_limit, "how much time allow a job hang and not drop it (default 0)"); 470MODULE_PARM_DESC(job_hang_limit, "how much time allow a job hang and not drop it (default 0)");
281module_param_named(job_hang_limit, amdgpu_job_hang_limit, int ,0444); 471module_param_named(job_hang_limit, amdgpu_job_hang_limit, int ,0444);
282 472
473/**
474 * DOC: lbpw (int)
475 * Override Load Balancing Per Watt (LBPW) support (1 = enable, 0 = disable). The default is -1 (auto, enabled).
476 */
283MODULE_PARM_DESC(lbpw, "Load Balancing Per Watt (LBPW) support (1 = enable, 0 = disable, -1 = auto)"); 477MODULE_PARM_DESC(lbpw, "Load Balancing Per Watt (LBPW) support (1 = enable, 0 = disable, -1 = auto)");
284module_param_named(lbpw, amdgpu_lbpw, int, 0444); 478module_param_named(lbpw, amdgpu_lbpw, int, 0444);
285 479
286MODULE_PARM_DESC(compute_multipipe, "Force compute queues to be spread across pipes (1 = enable, 0 = disable, -1 = auto)"); 480MODULE_PARM_DESC(compute_multipipe, "Force compute queues to be spread across pipes (1 = enable, 0 = disable, -1 = auto)");
287module_param_named(compute_multipipe, amdgpu_compute_multipipe, int, 0444); 481module_param_named(compute_multipipe, amdgpu_compute_multipipe, int, 0444);
288 482
483/**
484 * DOC: gpu_recovery (int)
485 * Set to enable GPU recovery mechanism (1 = enable, 0 = disable). The default is -1 (auto, disabled except SRIOV).
486 */
289MODULE_PARM_DESC(gpu_recovery, "Enable GPU recovery mechanism, (1 = enable, 0 = disable, -1 = auto)"); 487MODULE_PARM_DESC(gpu_recovery, "Enable GPU recovery mechanism, (1 = enable, 0 = disable, -1 = auto)");
290module_param_named(gpu_recovery, amdgpu_gpu_recovery, int, 0444); 488module_param_named(gpu_recovery, amdgpu_gpu_recovery, int, 0444);
291 489
490/**
491 * DOC: emu_mode (int)
492 * Set value 1 to enable emulation mode. This is only needed when running on an emulator. The default is 0 (disabled).
493 */
292MODULE_PARM_DESC(emu_mode, "Emulation mode, (1 = enable, 0 = disable)"); 494MODULE_PARM_DESC(emu_mode, "Emulation mode, (1 = enable, 0 = disable)");
293module_param_named(emu_mode, amdgpu_emu_mode, int, 0444); 495module_param_named(emu_mode, amdgpu_emu_mode, int, 0444);
294 496
497/**
498 * DOC: si_support (int)
499 * Set SI support driver. This parameter works after set config CONFIG_DRM_AMDGPU_SI. For SI asic, when radeon driver is enabled,
500 * set value 0 to use radeon driver, while set value 1 to use amdgpu driver. The default is using radeon driver when it available,
501 * otherwise using amdgpu driver.
502 */
295#ifdef CONFIG_DRM_AMDGPU_SI 503#ifdef CONFIG_DRM_AMDGPU_SI
296 504
297#if defined(CONFIG_DRM_RADEON) || defined(CONFIG_DRM_RADEON_MODULE) 505#if defined(CONFIG_DRM_RADEON) || defined(CONFIG_DRM_RADEON_MODULE)
@@ -305,6 +513,12 @@ MODULE_PARM_DESC(si_support, "SI support (1 = enabled (default), 0 = disabled)")
305module_param_named(si_support, amdgpu_si_support, int, 0444); 513module_param_named(si_support, amdgpu_si_support, int, 0444);
306#endif 514#endif
307 515
516/**
517 * DOC: cik_support (int)
518 * Set CIK support driver. This parameter works after set config CONFIG_DRM_AMDGPU_CIK. For CIK asic, when radeon driver is enabled,
519 * set value 0 to use radeon driver, while set value 1 to use amdgpu driver. The default is using radeon driver when it available,
520 * otherwise using amdgpu driver.
521 */
308#ifdef CONFIG_DRM_AMDGPU_CIK 522#ifdef CONFIG_DRM_AMDGPU_CIK
309 523
310#if defined(CONFIG_DRM_RADEON) || defined(CONFIG_DRM_RADEON_MODULE) 524#if defined(CONFIG_DRM_RADEON) || defined(CONFIG_DRM_RADEON_MODULE)
@@ -318,6 +532,11 @@ MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled (default), 0 = disabled)
318module_param_named(cik_support, amdgpu_cik_support, int, 0444); 532module_param_named(cik_support, amdgpu_cik_support, int, 0444);
319#endif 533#endif
320 534
535/**
536 * DOC: smu_memory_pool_size (uint)
537 * It is used to reserve gtt for smu debug usage, setting value 0 to disable it. The actual size is value * 256MiB.
538 * E.g. 0x1 = 256Mbyte, 0x2 = 512Mbyte, 0x4 = 1 Gbyte, 0x8 = 2GByte. The default is 0 (disabled).
539 */
321MODULE_PARM_DESC(smu_memory_pool_size, 540MODULE_PARM_DESC(smu_memory_pool_size,
322 "reserve gtt for smu debug usage, 0 = disable," 541 "reserve gtt for smu debug usage, 0 = disable,"
323 "0x1 = 256Mbyte, 0x2 = 512Mbyte, 0x4 = 1 Gbyte, 0x8 = 2GByte"); 542 "0x1 = 256Mbyte, 0x2 = 512Mbyte, 0x4 = 1 Gbyte, 0x8 = 2GByte");
@@ -664,7 +883,7 @@ retry_init:
664err_pci: 883err_pci:
665 pci_disable_device(pdev); 884 pci_disable_device(pdev);
666err_free: 885err_free:
667 drm_dev_unref(dev); 886 drm_dev_put(dev);
668 return ret; 887 return ret;
669} 888}
670 889
@@ -674,7 +893,7 @@ amdgpu_pci_remove(struct pci_dev *pdev)
674 struct drm_device *dev = pci_get_drvdata(pdev); 893 struct drm_device *dev = pci_get_drvdata(pdev);
675 894
676 drm_dev_unregister(dev); 895 drm_dev_unregister(dev);
677 drm_dev_unref(dev); 896 drm_dev_put(dev);
678 pci_disable_device(pdev); 897 pci_disable_device(pdev);
679 pci_set_drvdata(pdev, NULL); 898 pci_set_drvdata(pdev, NULL);
680} 899}
@@ -860,7 +1079,7 @@ static int amdgpu_flush(struct file *f, fl_owner_t id)
860 struct drm_file *file_priv = f->private_data; 1079 struct drm_file *file_priv = f->private_data;
861 struct amdgpu_fpriv *fpriv = file_priv->driver_priv; 1080 struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
862 1081
863 amdgpu_ctx_mgr_entity_fini(&fpriv->ctx_mgr); 1082 amdgpu_ctx_mgr_entity_flush(&fpriv->ctx_mgr);
864 1083
865 return 0; 1084 return 0;
866} 1085}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index bc5fd8ebab5d..811c62927c38 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -146,7 +146,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
146 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | 146 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
147 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | 147 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
148 AMDGPU_GEM_CREATE_VRAM_CLEARED, 148 AMDGPU_GEM_CREATE_VRAM_CLEARED,
149 true, NULL, &gobj); 149 ttm_bo_type_kernel, NULL, &gobj);
150 if (ret) { 150 if (ret) {
151 pr_err("failed to allocate framebuffer (%d)\n", aligned_size); 151 pr_err("failed to allocate framebuffer (%d)\n", aligned_size);
152 return -ENOMEM; 152 return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index dd11b7313ca0..36113cb60ca2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -234,7 +234,7 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
234 } 234 }
235 235
236 t = offset / AMDGPU_GPU_PAGE_SIZE; 236 t = offset / AMDGPU_GPU_PAGE_SIZE;
237 p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); 237 p = t / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
238 for (i = 0; i < pages; i++, p++) { 238 for (i = 0; i < pages; i++, p++) {
239#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS 239#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
240 adev->gart.pages[p] = NULL; 240 adev->gart.pages[p] = NULL;
@@ -243,7 +243,7 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
243 if (!adev->gart.ptr) 243 if (!adev->gart.ptr)
244 continue; 244 continue;
245 245
246 for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) { 246 for (j = 0; j < AMDGPU_GPU_PAGES_IN_CPU_PAGE; j++, t++) {
247 amdgpu_gmc_set_pte_pde(adev, adev->gart.ptr, 247 amdgpu_gmc_set_pte_pde(adev, adev->gart.ptr,
248 t, page_base, flags); 248 t, page_base, flags);
249 page_base += AMDGPU_GPU_PAGE_SIZE; 249 page_base += AMDGPU_GPU_PAGE_SIZE;
@@ -282,7 +282,7 @@ int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
282 282
283 for (i = 0; i < pages; i++) { 283 for (i = 0; i < pages; i++) {
284 page_base = dma_addr[i]; 284 page_base = dma_addr[i];
285 for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) { 285 for (j = 0; j < AMDGPU_GPU_PAGES_IN_CPU_PAGE; j++, t++) {
286 amdgpu_gmc_set_pte_pde(adev, dst, t, page_base, flags); 286 amdgpu_gmc_set_pte_pde(adev, dst, t, page_base, flags);
287 page_base += AMDGPU_GPU_PAGE_SIZE; 287 page_base += AMDGPU_GPU_PAGE_SIZE;
288 } 288 }
@@ -319,7 +319,7 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
319 319
320#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS 320#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
321 t = offset / AMDGPU_GPU_PAGE_SIZE; 321 t = offset / AMDGPU_GPU_PAGE_SIZE;
322 p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); 322 p = t / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
323 for (i = 0; i < pages; i++, p++) 323 for (i = 0; i < pages; i++, p++)
324 adev->gart.pages[p] = pagelist ? pagelist[i] : NULL; 324 adev->gart.pages[p] = pagelist ? pagelist[i] : NULL;
325#endif 325#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
index 456295c00291..9f9e9dc87da1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
@@ -37,6 +37,8 @@ struct amdgpu_bo;
37#define AMDGPU_GPU_PAGE_SHIFT 12 37#define AMDGPU_GPU_PAGE_SHIFT 12
38#define AMDGPU_GPU_PAGE_ALIGN(a) (((a) + AMDGPU_GPU_PAGE_MASK) & ~AMDGPU_GPU_PAGE_MASK) 38#define AMDGPU_GPU_PAGE_ALIGN(a) (((a) + AMDGPU_GPU_PAGE_MASK) & ~AMDGPU_GPU_PAGE_MASK)
39 39
40#define AMDGPU_GPU_PAGES_IN_CPU_PAGE (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE)
41
40struct amdgpu_gart { 42struct amdgpu_gart {
41 u64 table_addr; 43 u64 table_addr;
42 struct amdgpu_bo *robj; 44 struct amdgpu_bo *robj;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 89743cdc1c2c..bcbdcf997d20 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -265,7 +265,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
265 265
266 r = amdgpu_gem_object_create(adev, size, args->in.alignment, 266 r = amdgpu_gem_object_create(adev, size, args->in.alignment,
267 (u32)(0xffffffff & args->in.domains), 267 (u32)(0xffffffff & args->in.domains),
268 flags, false, resv, &gobj); 268 flags, ttm_bo_type_device, resv, &gobj);
269 if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) { 269 if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
270 if (!r) { 270 if (!r) {
271 struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj); 271 struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
@@ -317,7 +317,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
317 317
318 /* create a gem object to contain this object in */ 318 /* create a gem object to contain this object in */
319 r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_CPU, 319 r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_CPU,
320 0, 0, NULL, &gobj); 320 0, ttm_bo_type_device, NULL, &gobj);
321 if (r) 321 if (r)
322 return r; 322 return r;
323 323
@@ -766,7 +766,7 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
766 amdgpu_display_supported_domains(adev)); 766 amdgpu_display_supported_domains(adev));
767 r = amdgpu_gem_object_create(adev, args->size, 0, domain, 767 r = amdgpu_gem_object_create(adev, args->size, 0, domain,
768 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 768 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
769 false, NULL, &gobj); 769 ttm_bo_type_device, NULL, &gobj);
770 if (r) 770 if (r)
771 return -ENOMEM; 771 return -ENOMEM;
772 772
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 31f8170313b4..ce7739832d29 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -231,6 +231,12 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
231 if (ib->flags & AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE) 231 if (ib->flags & AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE)
232 fence_flags |= AMDGPU_FENCE_FLAG_TC_WB_ONLY; 232 fence_flags |= AMDGPU_FENCE_FLAG_TC_WB_ONLY;
233 233
234 /* wrap the last IB with fence */
235 if (job && job->uf_addr) {
236 amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence,
237 fence_flags | AMDGPU_FENCE_FLAG_64BIT);
238 }
239
234 r = amdgpu_fence_emit(ring, f, fence_flags); 240 r = amdgpu_fence_emit(ring, f, fence_flags);
235 if (r) { 241 if (r) {
236 dev_err(adev->dev, "failed to emit fence (%d)\n", r); 242 dev_err(adev->dev, "failed to emit fence (%d)\n", r);
@@ -243,12 +249,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
243 if (ring->funcs->insert_end) 249 if (ring->funcs->insert_end)
244 ring->funcs->insert_end(ring); 250 ring->funcs->insert_end(ring);
245 251
246 /* wrap the last IB with fence */
247 if (job && job->uf_addr) {
248 amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence,
249 fence_flags | AMDGPU_FENCE_FLAG_64BIT);
250 }
251
252 if (patch_offset != ~0 && ring->funcs->patch_cond_exec) 252 if (patch_offset != ~0 && ring->funcs->patch_cond_exec)
253 amdgpu_ring_patch_cond_exec(ring, patch_offset); 253 amdgpu_ring_patch_cond_exec(ring, patch_offset);
254 254
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index a1c78f90eadf..3a072a7a39f0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -578,11 +578,6 @@ void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
578 list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru); 578 list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru);
579 } 579 }
580 } 580 }
581
582 adev->vm_manager.fence_context =
583 dma_fence_context_alloc(AMDGPU_MAX_RINGS);
584 for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
585 adev->vm_manager.seqno[i] = 0;
586} 581}
587 582
588/** 583/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index 72a3e8c68876..a365ea2383d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -58,7 +58,8 @@
58 * 58 *
59 * @adev: amdgpu device pointer 59 * @adev: amdgpu device pointer
60 * @mm: process address space 60 * @mm: process address space
61 * @mn: MMU notifier structur 61 * @mn: MMU notifier structure
62 * @type: type of MMU notifier
62 * @work: destruction work item 63 * @work: destruction work item
63 * @node: hash table node to find structure by adev and mn 64 * @node: hash table node to find structure by adev and mn
64 * @lock: rw semaphore protecting the notifier nodes 65 * @lock: rw semaphore protecting the notifier nodes
@@ -266,7 +267,7 @@ static void amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn,
266 * amdgpu_mn_invalidate_range_start_hsa - callback to notify about mm change 267 * amdgpu_mn_invalidate_range_start_hsa - callback to notify about mm change
267 * 268 *
268 * @mn: our notifier 269 * @mn: our notifier
269 * @mn: the mm this callback is about 270 * @mm: the mm this callback is about
270 * @start: start of updated range 271 * @start: start of updated range
271 * @end: end of updated range 272 * @end: end of updated range
272 * 273 *
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 113edffb5960..f1404adc3a90 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -918,6 +918,36 @@ fail:
918 return -EINVAL; 918 return -EINVAL;
919} 919}
920 920
921/**
922 * DOC: busy_percent
923 *
924 * The amdgpu driver provides a sysfs API for reading how busy the GPU
925 * is as a percentage. The file gpu_busy_percent is used for this.
926 * The SMU firmware computes a percentage of load based on the
927 * aggregate activity level in the IP cores.
928 */
929static ssize_t amdgpu_get_busy_percent(struct device *dev,
930 struct device_attribute *attr,
931 char *buf)
932{
933 struct drm_device *ddev = dev_get_drvdata(dev);
934 struct amdgpu_device *adev = ddev->dev_private;
935 int r, value, size = sizeof(value);
936
937 /* sanity check PP is enabled */
938 if (!(adev->powerplay.pp_funcs &&
939 adev->powerplay.pp_funcs->read_sensor))
940 return -EINVAL;
941
942 /* read the IP busy sensor */
943 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD,
944 (void *)&value, &size);
945 if (r)
946 return r;
947
948 return snprintf(buf, PAGE_SIZE, "%d\n", value);
949}
950
921static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, amdgpu_get_dpm_state, amdgpu_set_dpm_state); 951static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, amdgpu_get_dpm_state, amdgpu_set_dpm_state);
922static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR, 952static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR,
923 amdgpu_get_dpm_forced_performance_level, 953 amdgpu_get_dpm_forced_performance_level,
@@ -951,6 +981,8 @@ static DEVICE_ATTR(pp_power_profile_mode, S_IRUGO | S_IWUSR,
951static DEVICE_ATTR(pp_od_clk_voltage, S_IRUGO | S_IWUSR, 981static DEVICE_ATTR(pp_od_clk_voltage, S_IRUGO | S_IWUSR,
952 amdgpu_get_pp_od_clk_voltage, 982 amdgpu_get_pp_od_clk_voltage,
953 amdgpu_set_pp_od_clk_voltage); 983 amdgpu_set_pp_od_clk_voltage);
984static DEVICE_ATTR(gpu_busy_percent, S_IRUGO,
985 amdgpu_get_busy_percent, NULL);
954 986
955static ssize_t amdgpu_hwmon_show_temp(struct device *dev, 987static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
956 struct device_attribute *attr, 988 struct device_attribute *attr,
@@ -1697,10 +1729,10 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
1697 1729
1698void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) 1730void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
1699{ 1731{
1700 if (adev->powerplay.pp_funcs->powergate_uvd) { 1732 if (adev->powerplay.pp_funcs->set_powergating_by_smu) {
1701 /* enable/disable UVD */ 1733 /* enable/disable UVD */
1702 mutex_lock(&adev->pm.mutex); 1734 mutex_lock(&adev->pm.mutex);
1703 amdgpu_dpm_powergate_uvd(adev, !enable); 1735 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
1704 mutex_unlock(&adev->pm.mutex); 1736 mutex_unlock(&adev->pm.mutex);
1705 } else { 1737 } else {
1706 if (enable) { 1738 if (enable) {
@@ -1719,10 +1751,10 @@ void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
1719 1751
1720void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) 1752void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
1721{ 1753{
1722 if (adev->powerplay.pp_funcs->powergate_vce) { 1754 if (adev->powerplay.pp_funcs->set_powergating_by_smu) {
1723 /* enable/disable VCE */ 1755 /* enable/disable VCE */
1724 mutex_lock(&adev->pm.mutex); 1756 mutex_lock(&adev->pm.mutex);
1725 amdgpu_dpm_powergate_vce(adev, !enable); 1757 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
1726 mutex_unlock(&adev->pm.mutex); 1758 mutex_unlock(&adev->pm.mutex);
1727 } else { 1759 } else {
1728 if (enable) { 1760 if (enable) {
@@ -1854,6 +1886,13 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
1854 "pp_od_clk_voltage\n"); 1886 "pp_od_clk_voltage\n");
1855 return ret; 1887 return ret;
1856 } 1888 }
1889 ret = device_create_file(adev->dev,
1890 &dev_attr_gpu_busy_percent);
1891 if (ret) {
1892 DRM_ERROR("failed to create device file "
1893 "gpu_busy_level\n");
1894 return ret;
1895 }
1857 ret = amdgpu_debugfs_pm_init(adev); 1896 ret = amdgpu_debugfs_pm_init(adev);
1858 if (ret) { 1897 if (ret) {
1859 DRM_ERROR("Failed to register debugfs file for dpm!\n"); 1898 DRM_ERROR("Failed to register debugfs file for dpm!\n");
@@ -1889,6 +1928,7 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
1889 &dev_attr_pp_power_profile_mode); 1928 &dev_attr_pp_power_profile_mode);
1890 device_remove_file(adev->dev, 1929 device_remove_file(adev->dev,
1891 &dev_attr_pp_od_clk_voltage); 1930 &dev_attr_pp_od_clk_voltage);
1931 device_remove_file(adev->dev, &dev_attr_gpu_busy_percent);
1892} 1932}
1893 1933
1894void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) 1934void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
@@ -1919,7 +1959,7 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
1919 if (!amdgpu_device_has_dc_support(adev)) { 1959 if (!amdgpu_device_has_dc_support(adev)) {
1920 mutex_lock(&adev->pm.mutex); 1960 mutex_lock(&adev->pm.mutex);
1921 amdgpu_dpm_get_active_displays(adev); 1961 amdgpu_dpm_get_active_displays(adev);
1922 adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtcs; 1962 adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
1923 adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev); 1963 adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
1924 adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev); 1964 adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
1925 /* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */ 1965 /* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index b2286bc41aec..df7226ad64b5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -191,7 +191,6 @@ error:
191/** 191/**
192 * amdgpu_gem_map_attach - &dma_buf_ops.attach implementation 192 * amdgpu_gem_map_attach - &dma_buf_ops.attach implementation
193 * @dma_buf: shared DMA buffer 193 * @dma_buf: shared DMA buffer
194 * @target_dev: target device
195 * @attach: DMA-buf attachment 194 * @attach: DMA-buf attachment
196 * 195 *
197 * Makes sure that the shared DMA buffer can be accessed by the target device. 196 * Makes sure that the shared DMA buffer can be accessed by the target device.
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
index e3878256743a..57b14dccd8e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0 OR MIT
1/* 2/*
2 * Copyright 2009 VMware, Inc. 3 * Copyright 2009 VMware, Inc.
3 * 4 *
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 0c084d3d0865..0246cb87d9e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -162,7 +162,7 @@ error_mem:
162static void amdgpu_ttm_global_fini(struct amdgpu_device *adev) 162static void amdgpu_ttm_global_fini(struct amdgpu_device *adev)
163{ 163{
164 if (adev->mman.mem_global_referenced) { 164 if (adev->mman.mem_global_referenced) {
165 drm_sched_entity_fini(adev->mman.entity.sched, 165 drm_sched_entity_destroy(adev->mman.entity.sched,
166 &adev->mman.entity); 166 &adev->mman.entity);
167 mutex_destroy(&adev->mman.gtt_window_lock); 167 mutex_destroy(&adev->mman.gtt_window_lock);
168 drm_global_item_unref(&adev->mman.bo_global_ref.ref); 168 drm_global_item_unref(&adev->mman.bo_global_ref.ref);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index cc15d3230402..3e70eb61a960 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -53,11 +53,11 @@
53 53
54/* Firmware Names */ 54/* Firmware Names */
55#ifdef CONFIG_DRM_AMDGPU_CIK 55#ifdef CONFIG_DRM_AMDGPU_CIK
56#define FIRMWARE_BONAIRE "radeon/bonaire_uvd.bin" 56#define FIRMWARE_BONAIRE "amdgpu/bonaire_uvd.bin"
57#define FIRMWARE_KABINI "radeon/kabini_uvd.bin" 57#define FIRMWARE_KABINI "amdgpu/kabini_uvd.bin"
58#define FIRMWARE_KAVERI "radeon/kaveri_uvd.bin" 58#define FIRMWARE_KAVERI "amdgpu/kaveri_uvd.bin"
59#define FIRMWARE_HAWAII "radeon/hawaii_uvd.bin" 59#define FIRMWARE_HAWAII "amdgpu/hawaii_uvd.bin"
60#define FIRMWARE_MULLINS "radeon/mullins_uvd.bin" 60#define FIRMWARE_MULLINS "amdgpu/mullins_uvd.bin"
61#endif 61#endif
62#define FIRMWARE_TONGA "amdgpu/tonga_uvd.bin" 62#define FIRMWARE_TONGA "amdgpu/tonga_uvd.bin"
63#define FIRMWARE_CARRIZO "amdgpu/carrizo_uvd.bin" 63#define FIRMWARE_CARRIZO "amdgpu/carrizo_uvd.bin"
@@ -309,7 +309,7 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
309 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { 309 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
310 kfree(adev->uvd.inst[j].saved_bo); 310 kfree(adev->uvd.inst[j].saved_bo);
311 311
312 drm_sched_entity_fini(&adev->uvd.inst[j].ring.sched, &adev->uvd.inst[j].entity); 312 drm_sched_entity_destroy(&adev->uvd.inst[j].ring.sched, &adev->uvd.inst[j].entity);
313 313
314 amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo, 314 amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo,
315 &adev->uvd.inst[j].gpu_addr, 315 &adev->uvd.inst[j].gpu_addr,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 23d960ec1cf2..6ae1ad7e83b3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -40,11 +40,11 @@
40 40
41/* Firmware Names */ 41/* Firmware Names */
42#ifdef CONFIG_DRM_AMDGPU_CIK 42#ifdef CONFIG_DRM_AMDGPU_CIK
43#define FIRMWARE_BONAIRE "radeon/bonaire_vce.bin" 43#define FIRMWARE_BONAIRE "amdgpu/bonaire_vce.bin"
44#define FIRMWARE_KABINI "radeon/kabini_vce.bin" 44#define FIRMWARE_KABINI "amdgpu/kabini_vce.bin"
45#define FIRMWARE_KAVERI "radeon/kaveri_vce.bin" 45#define FIRMWARE_KAVERI "amdgpu/kaveri_vce.bin"
46#define FIRMWARE_HAWAII "radeon/hawaii_vce.bin" 46#define FIRMWARE_HAWAII "amdgpu/hawaii_vce.bin"
47#define FIRMWARE_MULLINS "radeon/mullins_vce.bin" 47#define FIRMWARE_MULLINS "amdgpu/mullins_vce.bin"
48#endif 48#endif
49#define FIRMWARE_TONGA "amdgpu/tonga_vce.bin" 49#define FIRMWARE_TONGA "amdgpu/tonga_vce.bin"
50#define FIRMWARE_CARRIZO "amdgpu/carrizo_vce.bin" 50#define FIRMWARE_CARRIZO "amdgpu/carrizo_vce.bin"
@@ -222,7 +222,7 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
222 if (adev->vce.vcpu_bo == NULL) 222 if (adev->vce.vcpu_bo == NULL)
223 return 0; 223 return 0;
224 224
225 drm_sched_entity_fini(&adev->vce.ring[0].sched, &adev->vce.entity); 225 drm_sched_entity_destroy(&adev->vce.ring[0].sched, &adev->vce.entity);
226 226
227 amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr, 227 amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr,
228 (void **)&adev->vce.cpu_addr); 228 (void **)&adev->vce.cpu_addr);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 422d1a434db4..712af5c1a5d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1082,7 +1082,7 @@ restart:
1082 struct amdgpu_vm_bo_base, 1082 struct amdgpu_vm_bo_base,
1083 vm_status); 1083 vm_status);
1084 bo_base->moved = false; 1084 bo_base->moved = false;
1085 list_move(&bo_base->vm_status, &vm->idle); 1085 list_del_init(&bo_base->vm_status);
1086 1086
1087 bo = bo_base->bo->parent; 1087 bo = bo_base->bo->parent;
1088 if (!bo) 1088 if (!bo)
@@ -1567,7 +1567,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
1567 if (nodes) { 1567 if (nodes) {
1568 addr = nodes->start << PAGE_SHIFT; 1568 addr = nodes->start << PAGE_SHIFT;
1569 max_entries = (nodes->size - pfn) * 1569 max_entries = (nodes->size - pfn) *
1570 (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); 1570 AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1571 } else { 1571 } else {
1572 addr = 0; 1572 addr = 0;
1573 max_entries = S64_MAX; 1573 max_entries = S64_MAX;
@@ -1578,7 +1578,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
1578 1578
1579 max_entries = min(max_entries, 16ull * 1024ull); 1579 max_entries = min(max_entries, 16ull * 1024ull);
1580 for (count = 1; 1580 for (count = 1;
1581 count < max_entries / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); 1581 count < max_entries / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1582 ++count) { 1582 ++count) {
1583 uint64_t idx = pfn + count; 1583 uint64_t idx = pfn + count;
1584 1584
@@ -1592,7 +1592,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
1592 dma_addr = pages_addr; 1592 dma_addr = pages_addr;
1593 } else { 1593 } else {
1594 addr = pages_addr[pfn]; 1594 addr = pages_addr[pfn];
1595 max_entries = count * (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); 1595 max_entries = count * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1596 } 1596 }
1597 1597
1598 } else if (flags & AMDGPU_PTE_VALID) { 1598 } else if (flags & AMDGPU_PTE_VALID) {
@@ -1607,7 +1607,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
1607 if (r) 1607 if (r)
1608 return r; 1608 return r;
1609 1609
1610 pfn += (last - start + 1) / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); 1610 pfn += (last - start + 1) / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1611 if (nodes && nodes->size == pfn) { 1611 if (nodes && nodes->size == pfn) {
1612 pfn = 0; 1612 pfn = 0;
1613 ++nodes; 1613 ++nodes;
@@ -2643,7 +2643,7 @@ error_free_root:
2643 vm->root.base.bo = NULL; 2643 vm->root.base.bo = NULL;
2644 2644
2645error_free_sched_entity: 2645error_free_sched_entity:
2646 drm_sched_entity_fini(&ring->sched, &vm->entity); 2646 drm_sched_entity_destroy(&ring->sched, &vm->entity);
2647 2647
2648 return r; 2648 return r;
2649} 2649}
@@ -2780,7 +2780,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2780 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); 2780 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2781 } 2781 }
2782 2782
2783 drm_sched_entity_fini(vm->entity.sched, &vm->entity); 2783 drm_sched_entity_destroy(vm->entity.sched, &vm->entity);
2784 2784
2785 if (!RB_EMPTY_ROOT(&vm->va.rb_root)) { 2785 if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
2786 dev_err(adev->dev, "still active bo inside vm\n"); 2786 dev_err(adev->dev, "still active bo inside vm\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index b6333f92ba45..f7a4bd5885a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -112,7 +112,7 @@ u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo)
112 unsigned pages = mem->num_pages; 112 unsigned pages = mem->num_pages;
113 u64 usage = 0; 113 u64 usage = 0;
114 114
115 if (adev->gmc.visible_vram_size == adev->gmc.real_vram_size) 115 if (amdgpu_gmc_vram_full_visible(&adev->gmc))
116 return 0; 116 return 0;
117 117
118 if (mem->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT) 118 if (mem->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)
diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c
index b18c31a701e2..e9934de1b9cf 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom.c
+++ b/drivers/gpu/drm/amd/amdgpu/atom.c
@@ -1221,7 +1221,7 @@ static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index,
1221 ectx.abort = false; 1221 ectx.abort = false;
1222 ectx.last_jump = 0; 1222 ectx.last_jump = 0;
1223 if (ws) 1223 if (ws)
1224 ectx.ws = kcalloc(4, ws, GFP_ATOMIC); 1224 ectx.ws = kcalloc(4, ws, GFP_KERNEL);
1225 else 1225 else
1226 ectx.ws = NULL; 1226 ectx.ws = NULL;
1227 1227
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index c9d45cffca56..d2469453dca2 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -49,10 +49,10 @@
49#include "gmc/gmc_7_1_d.h" 49#include "gmc/gmc_7_1_d.h"
50#include "gmc/gmc_7_1_sh_mask.h" 50#include "gmc/gmc_7_1_sh_mask.h"
51 51
52MODULE_FIRMWARE("radeon/bonaire_smc.bin"); 52MODULE_FIRMWARE("amdgpu/bonaire_smc.bin");
53MODULE_FIRMWARE("radeon/bonaire_k_smc.bin"); 53MODULE_FIRMWARE("amdgpu/bonaire_k_smc.bin");
54MODULE_FIRMWARE("radeon/hawaii_smc.bin"); 54MODULE_FIRMWARE("amdgpu/hawaii_smc.bin");
55MODULE_FIRMWARE("radeon/hawaii_k_smc.bin"); 55MODULE_FIRMWARE("amdgpu/hawaii_k_smc.bin");
56 56
57#define MC_CG_ARB_FREQ_F0 0x0a 57#define MC_CG_ARB_FREQ_F0 0x0a
58#define MC_CG_ARB_FREQ_F1 0x0b 58#define MC_CG_ARB_FREQ_F1 0x0b
@@ -5815,7 +5815,7 @@ static int ci_dpm_init_microcode(struct amdgpu_device *adev)
5815 default: BUG(); 5815 default: BUG();
5816 } 5816 }
5817 5817
5818 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name); 5818 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name);
5819 err = request_firmware(&adev->pm.fw, fw_name, adev->dev); 5819 err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
5820 if (err) 5820 if (err)
5821 goto out; 5821 goto out;
@@ -5846,8 +5846,7 @@ static int ci_dpm_init(struct amdgpu_device *adev)
5846 adev->pm.dpm.priv = pi; 5846 adev->pm.dpm.priv = pi;
5847 5847
5848 pi->sys_pcie_mask = 5848 pi->sys_pcie_mask =
5849 (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) >> 5849 adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK;
5850 CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT;
5851 5850
5852 pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID; 5851 pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
5853 5852
@@ -6767,6 +6766,19 @@ static int ci_dpm_read_sensor(void *handle, int idx,
6767 } 6766 }
6768} 6767}
6769 6768
6769static int ci_set_powergating_by_smu(void *handle,
6770 uint32_t block_type, bool gate)
6771{
6772 switch (block_type) {
6773 case AMD_IP_BLOCK_TYPE_UVD:
6774 ci_dpm_powergate_uvd(handle, gate);
6775 break;
6776 default:
6777 break;
6778 }
6779 return 0;
6780}
6781
6770static const struct amd_ip_funcs ci_dpm_ip_funcs = { 6782static const struct amd_ip_funcs ci_dpm_ip_funcs = {
6771 .name = "ci_dpm", 6783 .name = "ci_dpm",
6772 .early_init = ci_dpm_early_init, 6784 .early_init = ci_dpm_early_init,
@@ -6804,7 +6816,7 @@ static const struct amd_pm_funcs ci_dpm_funcs = {
6804 .debugfs_print_current_performance_level = &ci_dpm_debugfs_print_current_performance_level, 6816 .debugfs_print_current_performance_level = &ci_dpm_debugfs_print_current_performance_level,
6805 .force_performance_level = &ci_dpm_force_performance_level, 6817 .force_performance_level = &ci_dpm_force_performance_level,
6806 .vblank_too_short = &ci_dpm_vblank_too_short, 6818 .vblank_too_short = &ci_dpm_vblank_too_short,
6807 .powergate_uvd = &ci_dpm_powergate_uvd, 6819 .set_powergating_by_smu = &ci_set_powergating_by_smu,
6808 .set_fan_control_mode = &ci_dpm_set_fan_control_mode, 6820 .set_fan_control_mode = &ci_dpm_set_fan_control_mode,
6809 .get_fan_control_mode = &ci_dpm_get_fan_control_mode, 6821 .get_fan_control_mode = &ci_dpm_get_fan_control_mode,
6810 .set_fan_speed_percent = &ci_dpm_set_fan_speed_percent, 6822 .set_fan_speed_percent = &ci_dpm_set_fan_speed_percent,
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index a7576255cc30..d0fa2aac2388 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -54,16 +54,16 @@ static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev);
54static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev); 54static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev);
55static int cik_sdma_soft_reset(void *handle); 55static int cik_sdma_soft_reset(void *handle);
56 56
57MODULE_FIRMWARE("radeon/bonaire_sdma.bin"); 57MODULE_FIRMWARE("amdgpu/bonaire_sdma.bin");
58MODULE_FIRMWARE("radeon/bonaire_sdma1.bin"); 58MODULE_FIRMWARE("amdgpu/bonaire_sdma1.bin");
59MODULE_FIRMWARE("radeon/hawaii_sdma.bin"); 59MODULE_FIRMWARE("amdgpu/hawaii_sdma.bin");
60MODULE_FIRMWARE("radeon/hawaii_sdma1.bin"); 60MODULE_FIRMWARE("amdgpu/hawaii_sdma1.bin");
61MODULE_FIRMWARE("radeon/kaveri_sdma.bin"); 61MODULE_FIRMWARE("amdgpu/kaveri_sdma.bin");
62MODULE_FIRMWARE("radeon/kaveri_sdma1.bin"); 62MODULE_FIRMWARE("amdgpu/kaveri_sdma1.bin");
63MODULE_FIRMWARE("radeon/kabini_sdma.bin"); 63MODULE_FIRMWARE("amdgpu/kabini_sdma.bin");
64MODULE_FIRMWARE("radeon/kabini_sdma1.bin"); 64MODULE_FIRMWARE("amdgpu/kabini_sdma1.bin");
65MODULE_FIRMWARE("radeon/mullins_sdma.bin"); 65MODULE_FIRMWARE("amdgpu/mullins_sdma.bin");
66MODULE_FIRMWARE("radeon/mullins_sdma1.bin"); 66MODULE_FIRMWARE("amdgpu/mullins_sdma1.bin");
67 67
68u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev); 68u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev);
69 69
@@ -132,9 +132,9 @@ static int cik_sdma_init_microcode(struct amdgpu_device *adev)
132 132
133 for (i = 0; i < adev->sdma.num_instances; i++) { 133 for (i = 0; i < adev->sdma.num_instances; i++) {
134 if (i == 0) 134 if (i == 0)
135 snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name); 135 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
136 else 136 else
137 snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma1.bin", chip_name); 137 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
138 err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev); 138 err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
139 if (err) 139 if (err)
140 goto out; 140 goto out;
@@ -177,9 +177,8 @@ static uint64_t cik_sdma_ring_get_rptr(struct amdgpu_ring *ring)
177static uint64_t cik_sdma_ring_get_wptr(struct amdgpu_ring *ring) 177static uint64_t cik_sdma_ring_get_wptr(struct amdgpu_ring *ring)
178{ 178{
179 struct amdgpu_device *adev = ring->adev; 179 struct amdgpu_device *adev = ring->adev;
180 u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
181 180
182 return (RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2; 181 return (RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me]) & 0x3fffc) >> 2;
183} 182}
184 183
185/** 184/**
@@ -192,9 +191,8 @@ static uint64_t cik_sdma_ring_get_wptr(struct amdgpu_ring *ring)
192static void cik_sdma_ring_set_wptr(struct amdgpu_ring *ring) 191static void cik_sdma_ring_set_wptr(struct amdgpu_ring *ring)
193{ 192{
194 struct amdgpu_device *adev = ring->adev; 193 struct amdgpu_device *adev = ring->adev;
195 u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
196 194
197 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], 195 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me],
198 (lower_32_bits(ring->wptr) << 2) & 0x3fffc); 196 (lower_32_bits(ring->wptr) << 2) & 0x3fffc);
199} 197}
200 198
@@ -248,7 +246,7 @@ static void cik_sdma_ring_emit_hdp_flush(struct amdgpu_ring *ring)
248 SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */ 246 SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
249 u32 ref_and_mask; 247 u32 ref_and_mask;
250 248
251 if (ring == &ring->adev->sdma.instance[0].ring) 249 if (ring->me == 0)
252 ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA0_MASK; 250 ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA0_MASK;
253 else 251 else
254 ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA1_MASK; 252 ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA1_MASK;
@@ -1290,8 +1288,10 @@ static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev)
1290{ 1288{
1291 int i; 1289 int i;
1292 1290
1293 for (i = 0; i < adev->sdma.num_instances; i++) 1291 for (i = 0; i < adev->sdma.num_instances; i++) {
1294 adev->sdma.instance[i].ring.funcs = &cik_sdma_ring_funcs; 1292 adev->sdma.instance[i].ring.funcs = &cik_sdma_ring_funcs;
1293 adev->sdma.instance[i].ring.me = i;
1294 }
1295} 1295}
1296 1296
1297static const struct amdgpu_irq_src_funcs cik_sdma_trap_irq_funcs = { 1297static const struct amdgpu_irq_src_funcs cik_sdma_trap_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index cd6bf291a853..de184a886057 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -44,30 +44,30 @@ static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev);
44static void gfx_v6_0_set_irq_funcs(struct amdgpu_device *adev); 44static void gfx_v6_0_set_irq_funcs(struct amdgpu_device *adev);
45static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev); 45static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev);
46 46
47MODULE_FIRMWARE("radeon/tahiti_pfp.bin"); 47MODULE_FIRMWARE("amdgpu/tahiti_pfp.bin");
48MODULE_FIRMWARE("radeon/tahiti_me.bin"); 48MODULE_FIRMWARE("amdgpu/tahiti_me.bin");
49MODULE_FIRMWARE("radeon/tahiti_ce.bin"); 49MODULE_FIRMWARE("amdgpu/tahiti_ce.bin");
50MODULE_FIRMWARE("radeon/tahiti_rlc.bin"); 50MODULE_FIRMWARE("amdgpu/tahiti_rlc.bin");
51 51
52MODULE_FIRMWARE("radeon/pitcairn_pfp.bin"); 52MODULE_FIRMWARE("amdgpu/pitcairn_pfp.bin");
53MODULE_FIRMWARE("radeon/pitcairn_me.bin"); 53MODULE_FIRMWARE("amdgpu/pitcairn_me.bin");
54MODULE_FIRMWARE("radeon/pitcairn_ce.bin"); 54MODULE_FIRMWARE("amdgpu/pitcairn_ce.bin");
55MODULE_FIRMWARE("radeon/pitcairn_rlc.bin"); 55MODULE_FIRMWARE("amdgpu/pitcairn_rlc.bin");
56 56
57MODULE_FIRMWARE("radeon/verde_pfp.bin"); 57MODULE_FIRMWARE("amdgpu/verde_pfp.bin");
58MODULE_FIRMWARE("radeon/verde_me.bin"); 58MODULE_FIRMWARE("amdgpu/verde_me.bin");
59MODULE_FIRMWARE("radeon/verde_ce.bin"); 59MODULE_FIRMWARE("amdgpu/verde_ce.bin");
60MODULE_FIRMWARE("radeon/verde_rlc.bin"); 60MODULE_FIRMWARE("amdgpu/verde_rlc.bin");
61 61
62MODULE_FIRMWARE("radeon/oland_pfp.bin"); 62MODULE_FIRMWARE("amdgpu/oland_pfp.bin");
63MODULE_FIRMWARE("radeon/oland_me.bin"); 63MODULE_FIRMWARE("amdgpu/oland_me.bin");
64MODULE_FIRMWARE("radeon/oland_ce.bin"); 64MODULE_FIRMWARE("amdgpu/oland_ce.bin");
65MODULE_FIRMWARE("radeon/oland_rlc.bin"); 65MODULE_FIRMWARE("amdgpu/oland_rlc.bin");
66 66
67MODULE_FIRMWARE("radeon/hainan_pfp.bin"); 67MODULE_FIRMWARE("amdgpu/hainan_pfp.bin");
68MODULE_FIRMWARE("radeon/hainan_me.bin"); 68MODULE_FIRMWARE("amdgpu/hainan_me.bin");
69MODULE_FIRMWARE("radeon/hainan_ce.bin"); 69MODULE_FIRMWARE("amdgpu/hainan_ce.bin");
70MODULE_FIRMWARE("radeon/hainan_rlc.bin"); 70MODULE_FIRMWARE("amdgpu/hainan_rlc.bin");
71 71
72static u32 gfx_v6_0_get_csb_size(struct amdgpu_device *adev); 72static u32 gfx_v6_0_get_csb_size(struct amdgpu_device *adev);
73static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer); 73static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer);
@@ -335,7 +335,7 @@ static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
335 default: BUG(); 335 default: BUG();
336 } 336 }
337 337
338 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name); 338 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
339 err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev); 339 err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
340 if (err) 340 if (err)
341 goto out; 341 goto out;
@@ -346,7 +346,7 @@ static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
346 adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); 346 adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
347 adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); 347 adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
348 348
349 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name); 349 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
350 err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev); 350 err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
351 if (err) 351 if (err)
352 goto out; 352 goto out;
@@ -357,7 +357,7 @@ static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
357 adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); 357 adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
358 adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); 358 adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
359 359
360 snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name); 360 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
361 err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev); 361 err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
362 if (err) 362 if (err)
363 goto out; 363 goto out;
@@ -368,7 +368,7 @@ static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
368 adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); 368 adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
369 adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); 369 adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
370 370
371 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name); 371 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
372 err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev); 372 err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
373 if (err) 373 if (err)
374 goto out; 374 goto out;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 42b6144c1fd5..95452c5a9df6 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -57,36 +57,36 @@ static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev);
57static void gfx_v7_0_set_irq_funcs(struct amdgpu_device *adev); 57static void gfx_v7_0_set_irq_funcs(struct amdgpu_device *adev);
58static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev); 58static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev);
59 59
60MODULE_FIRMWARE("radeon/bonaire_pfp.bin"); 60MODULE_FIRMWARE("amdgpu/bonaire_pfp.bin");
61MODULE_FIRMWARE("radeon/bonaire_me.bin"); 61MODULE_FIRMWARE("amdgpu/bonaire_me.bin");
62MODULE_FIRMWARE("radeon/bonaire_ce.bin"); 62MODULE_FIRMWARE("amdgpu/bonaire_ce.bin");
63MODULE_FIRMWARE("radeon/bonaire_rlc.bin"); 63MODULE_FIRMWARE("amdgpu/bonaire_rlc.bin");
64MODULE_FIRMWARE("radeon/bonaire_mec.bin"); 64MODULE_FIRMWARE("amdgpu/bonaire_mec.bin");
65 65
66MODULE_FIRMWARE("radeon/hawaii_pfp.bin"); 66MODULE_FIRMWARE("amdgpu/hawaii_pfp.bin");
67MODULE_FIRMWARE("radeon/hawaii_me.bin"); 67MODULE_FIRMWARE("amdgpu/hawaii_me.bin");
68MODULE_FIRMWARE("radeon/hawaii_ce.bin"); 68MODULE_FIRMWARE("amdgpu/hawaii_ce.bin");
69MODULE_FIRMWARE("radeon/hawaii_rlc.bin"); 69MODULE_FIRMWARE("amdgpu/hawaii_rlc.bin");
70MODULE_FIRMWARE("radeon/hawaii_mec.bin"); 70MODULE_FIRMWARE("amdgpu/hawaii_mec.bin");
71 71
72MODULE_FIRMWARE("radeon/kaveri_pfp.bin"); 72MODULE_FIRMWARE("amdgpu/kaveri_pfp.bin");
73MODULE_FIRMWARE("radeon/kaveri_me.bin"); 73MODULE_FIRMWARE("amdgpu/kaveri_me.bin");
74MODULE_FIRMWARE("radeon/kaveri_ce.bin"); 74MODULE_FIRMWARE("amdgpu/kaveri_ce.bin");
75MODULE_FIRMWARE("radeon/kaveri_rlc.bin"); 75MODULE_FIRMWARE("amdgpu/kaveri_rlc.bin");
76MODULE_FIRMWARE("radeon/kaveri_mec.bin"); 76MODULE_FIRMWARE("amdgpu/kaveri_mec.bin");
77MODULE_FIRMWARE("radeon/kaveri_mec2.bin"); 77MODULE_FIRMWARE("amdgpu/kaveri_mec2.bin");
78 78
79MODULE_FIRMWARE("radeon/kabini_pfp.bin"); 79MODULE_FIRMWARE("amdgpu/kabini_pfp.bin");
80MODULE_FIRMWARE("radeon/kabini_me.bin"); 80MODULE_FIRMWARE("amdgpu/kabini_me.bin");
81MODULE_FIRMWARE("radeon/kabini_ce.bin"); 81MODULE_FIRMWARE("amdgpu/kabini_ce.bin");
82MODULE_FIRMWARE("radeon/kabini_rlc.bin"); 82MODULE_FIRMWARE("amdgpu/kabini_rlc.bin");
83MODULE_FIRMWARE("radeon/kabini_mec.bin"); 83MODULE_FIRMWARE("amdgpu/kabini_mec.bin");
84 84
85MODULE_FIRMWARE("radeon/mullins_pfp.bin"); 85MODULE_FIRMWARE("amdgpu/mullins_pfp.bin");
86MODULE_FIRMWARE("radeon/mullins_me.bin"); 86MODULE_FIRMWARE("amdgpu/mullins_me.bin");
87MODULE_FIRMWARE("radeon/mullins_ce.bin"); 87MODULE_FIRMWARE("amdgpu/mullins_ce.bin");
88MODULE_FIRMWARE("radeon/mullins_rlc.bin"); 88MODULE_FIRMWARE("amdgpu/mullins_rlc.bin");
89MODULE_FIRMWARE("radeon/mullins_mec.bin"); 89MODULE_FIRMWARE("amdgpu/mullins_mec.bin");
90 90
91static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] = 91static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
92{ 92{
@@ -925,7 +925,7 @@ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
925 default: BUG(); 925 default: BUG();
926 } 926 }
927 927
928 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name); 928 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
929 err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev); 929 err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
930 if (err) 930 if (err)
931 goto out; 931 goto out;
@@ -933,7 +933,7 @@ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
933 if (err) 933 if (err)
934 goto out; 934 goto out;
935 935
936 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name); 936 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
937 err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev); 937 err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
938 if (err) 938 if (err)
939 goto out; 939 goto out;
@@ -941,7 +941,7 @@ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
941 if (err) 941 if (err)
942 goto out; 942 goto out;
943 943
944 snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name); 944 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
945 err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev); 945 err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
946 if (err) 946 if (err)
947 goto out; 947 goto out;
@@ -949,7 +949,7 @@ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
949 if (err) 949 if (err)
950 goto out; 950 goto out;
951 951
952 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec.bin", chip_name); 952 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
953 err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev); 953 err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
954 if (err) 954 if (err)
955 goto out; 955 goto out;
@@ -958,7 +958,7 @@ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
958 goto out; 958 goto out;
959 959
960 if (adev->asic_type == CHIP_KAVERI) { 960 if (adev->asic_type == CHIP_KAVERI) {
961 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec2.bin", chip_name); 961 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
962 err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev); 962 err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
963 if (err) 963 if (err)
964 goto out; 964 goto out;
@@ -967,7 +967,7 @@ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
967 goto out; 967 goto out;
968 } 968 }
969 969
970 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name); 970 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
971 err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev); 971 err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
972 if (err) 972 if (err)
973 goto out; 973 goto out;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 807ee0dd623c..551f21bad6d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -704,6 +704,17 @@ static const u32 stoney_mgcg_cgcg_init[] =
704 mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96940200, 704 mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96940200,
705}; 705};
706 706
707
708static const char * const sq_edc_source_names[] = {
709 "SQ_EDC_INFO_SOURCE_INVALID: No EDC error has occurred",
710 "SQ_EDC_INFO_SOURCE_INST: EDC source is Instruction Fetch",
711 "SQ_EDC_INFO_SOURCE_SGPR: EDC source is SGPR or SQC data return",
712 "SQ_EDC_INFO_SOURCE_VGPR: EDC source is VGPR",
713 "SQ_EDC_INFO_SOURCE_LDS: EDC source is LDS",
714 "SQ_EDC_INFO_SOURCE_GDS: EDC source is GDS",
715 "SQ_EDC_INFO_SOURCE_TA: EDC source is TA",
716};
717
707static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev); 718static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev);
708static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev); 719static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev);
709static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev); 720static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev);
@@ -2006,6 +2017,8 @@ static int gfx_v8_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
2006 return 0; 2017 return 0;
2007} 2018}
2008 2019
2020static void gfx_v8_0_sq_irq_work_func(struct work_struct *work);
2021
2009static int gfx_v8_0_sw_init(void *handle) 2022static int gfx_v8_0_sw_init(void *handle)
2010{ 2023{
2011 int i, j, k, r, ring_id; 2024 int i, j, k, r, ring_id;
@@ -2069,6 +2082,8 @@ static int gfx_v8_0_sw_init(void *handle)
2069 return r; 2082 return r;
2070 } 2083 }
2071 2084
2085 INIT_WORK(&adev->gfx.sq_work.work, gfx_v8_0_sq_irq_work_func);
2086
2072 adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE; 2087 adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
2073 2088
2074 gfx_v8_0_scratch_init(adev); 2089 gfx_v8_0_scratch_init(adev);
@@ -5581,24 +5596,18 @@ static int gfx_v8_0_late_init(void *handle)
5581 return r; 5596 return r;
5582 } 5597 }
5583 5598
5584 amdgpu_device_ip_set_powergating_state(adev,
5585 AMD_IP_BLOCK_TYPE_GFX,
5586 AMD_PG_STATE_GATE);
5587
5588 return 0; 5599 return 0;
5589} 5600}
5590 5601
5591static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev, 5602static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
5592 bool enable) 5603 bool enable)
5593{ 5604{
5594 if ((adev->asic_type == CHIP_POLARIS11) || 5605 if (((adev->asic_type == CHIP_POLARIS11) ||
5595 (adev->asic_type == CHIP_POLARIS12) || 5606 (adev->asic_type == CHIP_POLARIS12) ||
5596 (adev->asic_type == CHIP_VEGAM)) 5607 (adev->asic_type == CHIP_VEGAM)) &&
5608 adev->powerplay.pp_funcs->set_powergating_by_smu)
5597 /* Send msg to SMU via Powerplay */ 5609 /* Send msg to SMU via Powerplay */
5598 amdgpu_device_ip_set_powergating_state(adev, 5610 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, enable);
5599 AMD_IP_BLOCK_TYPE_SMC,
5600 enable ?
5601 AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE);
5602 5611
5603 WREG32_FIELD(RLC_PG_CNTL, STATIC_PER_CU_PG_ENABLE, enable ? 1 : 0); 5612 WREG32_FIELD(RLC_PG_CNTL, STATIC_PER_CU_PG_ENABLE, enable ? 1 : 0);
5604} 5613}
@@ -6955,16 +6964,14 @@ static int gfx_v8_0_cp_ecc_error_irq(struct amdgpu_device *adev,
6955 return 0; 6964 return 0;
6956} 6965}
6957 6966
6958static int gfx_v8_0_sq_irq(struct amdgpu_device *adev, 6967static void gfx_v8_0_parse_sq_irq(struct amdgpu_device *adev, unsigned ih_data)
6959 struct amdgpu_irq_src *source,
6960 struct amdgpu_iv_entry *entry)
6961{ 6968{
6962 u8 enc, se_id; 6969 u32 enc, se_id, sh_id, cu_id;
6963 char type[20]; 6970 char type[20];
6971 int sq_edc_source = -1;
6964 6972
6965 /* Parse all fields according to SQ_INTERRUPT* registers */ 6973 enc = REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_CMN, ENCODING);
6966 enc = (entry->src_data[0] >> 26) & 0x3; 6974 se_id = REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_CMN, SE_ID);
6967 se_id = (entry->src_data[0] >> 24) & 0x3;
6968 6975
6969 switch (enc) { 6976 switch (enc) {
6970 case 0: 6977 case 0:
@@ -6974,19 +6981,37 @@ static int gfx_v8_0_sq_irq(struct amdgpu_device *adev,
6974 "reg_timestamp %d, thread_trace_buff_full %d," 6981 "reg_timestamp %d, thread_trace_buff_full %d,"
6975 "wlt %d, thread_trace %d.\n", 6982 "wlt %d, thread_trace %d.\n",
6976 se_id, 6983 se_id,
6977 (entry->src_data[0] >> 7) & 0x1, 6984 REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, IMMED_OVERFLOW),
6978 (entry->src_data[0] >> 6) & 0x1, 6985 REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, HOST_REG_OVERFLOW),
6979 (entry->src_data[0] >> 5) & 0x1, 6986 REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, HOST_CMD_OVERFLOW),
6980 (entry->src_data[0] >> 4) & 0x1, 6987 REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, CMD_TIMESTAMP),
6981 (entry->src_data[0] >> 3) & 0x1, 6988 REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, REG_TIMESTAMP),
6982 (entry->src_data[0] >> 2) & 0x1, 6989 REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, THREAD_TRACE_BUF_FULL),
6983 (entry->src_data[0] >> 1) & 0x1, 6990 REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, WLT),
6984 entry->src_data[0] & 0x1 6991 REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, THREAD_TRACE)
6985 ); 6992 );
6986 break; 6993 break;
6987 case 1: 6994 case 1:
6988 case 2: 6995 case 2:
6989 6996
6997 cu_id = REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, CU_ID);
6998 sh_id = REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, SH_ID);
6999
7000 /*
7001 * This function can be called either directly from ISR
7002 * or from BH in which case we can access SQ_EDC_INFO
7003 * instance
7004 */
7005 if (in_task()) {
7006 mutex_lock(&adev->grbm_idx_mutex);
7007 gfx_v8_0_select_se_sh(adev, se_id, sh_id, cu_id);
7008
7009 sq_edc_source = REG_GET_FIELD(RREG32(mmSQ_EDC_INFO), SQ_EDC_INFO, SOURCE);
7010
7011 gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
7012 mutex_unlock(&adev->grbm_idx_mutex);
7013 }
7014
6990 if (enc == 1) 7015 if (enc == 1)
6991 sprintf(type, "instruction intr"); 7016 sprintf(type, "instruction intr");
6992 else 7017 else
@@ -6994,17 +7019,46 @@ static int gfx_v8_0_sq_irq(struct amdgpu_device *adev,
6994 7019
6995 DRM_INFO( 7020 DRM_INFO(
6996 "SQ %s detected: " 7021 "SQ %s detected: "
6997 "se_id %d, cu_id %d, simd_id %d, wave_id %d, vm_id %d\n", 7022 "se_id %d, sh_id %d, cu_id %d, simd_id %d, wave_id %d, vm_id %d "
6998 type, se_id, 7023 "trap %s, sq_ed_info.source %s.\n",
6999 (entry->src_data[0] >> 20) & 0xf, 7024 type, se_id, sh_id, cu_id,
7000 (entry->src_data[0] >> 18) & 0x3, 7025 REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, SIMD_ID),
7001 (entry->src_data[0] >> 14) & 0xf, 7026 REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, WAVE_ID),
7002 (entry->src_data[0] >> 10) & 0xf 7027 REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, VM_ID),
7003 ); 7028 REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, PRIV) ? "true" : "false",
7029 (sq_edc_source != -1) ? sq_edc_source_names[sq_edc_source] : "unavailable"
7030 );
7004 break; 7031 break;
7005 default: 7032 default:
7006 DRM_ERROR("SQ invalid encoding type\n."); 7033 DRM_ERROR("SQ invalid encoding type\n.");
7007 return -EINVAL; 7034 }
7035}
7036
7037static void gfx_v8_0_sq_irq_work_func(struct work_struct *work)
7038{
7039
7040 struct amdgpu_device *adev = container_of(work, struct amdgpu_device, gfx.sq_work.work);
7041 struct sq_work *sq_work = container_of(work, struct sq_work, work);
7042
7043 gfx_v8_0_parse_sq_irq(adev, sq_work->ih_data);
7044}
7045
7046static int gfx_v8_0_sq_irq(struct amdgpu_device *adev,
7047 struct amdgpu_irq_src *source,
7048 struct amdgpu_iv_entry *entry)
7049{
7050 unsigned ih_data = entry->src_data[0];
7051
7052 /*
7053 * Try to submit work so SQ_EDC_INFO can be accessed from
7054 * BH. If previous work submission hasn't finished yet
7055 * just print whatever info is possible directly from the ISR.
7056 */
7057 if (work_pending(&adev->gfx.sq_work.work)) {
7058 gfx_v8_0_parse_sq_irq(adev, ih_data);
7059 } else {
7060 adev->gfx.sq_work.ih_data = ih_data;
7061 schedule_work(&adev->gfx.sq_work.work);
7008 } 7062 }
7009 7063
7010 return 0; 7064 return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index a69153435ea7..ac46eabe3bcd 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -3714,6 +3714,10 @@ static int gfx_v9_0_set_powergating_state(void *handle,
3714 3714
3715 /* update mgcg state */ 3715 /* update mgcg state */
3716 gfx_v9_0_update_gfx_mg_power_gating(adev, enable); 3716 gfx_v9_0_update_gfx_mg_power_gating(adev, enable);
3717
3718 /* set gfx off through smu */
3719 if (enable && adev->powerplay.pp_funcs->set_powergating_by_smu)
3720 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true);
3717 break; 3721 break;
3718 default: 3722 default:
3719 break; 3723 break;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 79f9ac29019b..75317f283c69 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -41,11 +41,11 @@ static void gmc_v6_0_set_gmc_funcs(struct amdgpu_device *adev);
41static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev); 41static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev);
42static int gmc_v6_0_wait_for_idle(void *handle); 42static int gmc_v6_0_wait_for_idle(void *handle);
43 43
44MODULE_FIRMWARE("radeon/tahiti_mc.bin"); 44MODULE_FIRMWARE("amdgpu/tahiti_mc.bin");
45MODULE_FIRMWARE("radeon/pitcairn_mc.bin"); 45MODULE_FIRMWARE("amdgpu/pitcairn_mc.bin");
46MODULE_FIRMWARE("radeon/verde_mc.bin"); 46MODULE_FIRMWARE("amdgpu/verde_mc.bin");
47MODULE_FIRMWARE("radeon/oland_mc.bin"); 47MODULE_FIRMWARE("amdgpu/oland_mc.bin");
48MODULE_FIRMWARE("radeon/si58_mc.bin"); 48MODULE_FIRMWARE("amdgpu/si58_mc.bin");
49 49
50#define MC_SEQ_MISC0__MT__MASK 0xf0000000 50#define MC_SEQ_MISC0__MT__MASK 0xf0000000
51#define MC_SEQ_MISC0__MT__GDDR1 0x10000000 51#define MC_SEQ_MISC0__MT__GDDR1 0x10000000
@@ -134,9 +134,9 @@ static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
134 is_58_fw = true; 134 is_58_fw = true;
135 135
136 if (is_58_fw) 136 if (is_58_fw)
137 snprintf(fw_name, sizeof(fw_name), "radeon/si58_mc.bin"); 137 snprintf(fw_name, sizeof(fw_name), "amdgpu/si58_mc.bin");
138 else 138 else
139 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); 139 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
140 err = request_firmware(&adev->gmc.fw, fw_name, adev->dev); 140 err = request_firmware(&adev->gmc.fw, fw_name, adev->dev);
141 if (err) 141 if (err)
142 goto out; 142 goto out;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 7147bfe25a23..78339309a00c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -47,8 +47,8 @@ static void gmc_v7_0_set_gmc_funcs(struct amdgpu_device *adev);
47static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev); 47static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
48static int gmc_v7_0_wait_for_idle(void *handle); 48static int gmc_v7_0_wait_for_idle(void *handle);
49 49
50MODULE_FIRMWARE("radeon/bonaire_mc.bin"); 50MODULE_FIRMWARE("amdgpu/bonaire_mc.bin");
51MODULE_FIRMWARE("radeon/hawaii_mc.bin"); 51MODULE_FIRMWARE("amdgpu/hawaii_mc.bin");
52MODULE_FIRMWARE("amdgpu/topaz_mc.bin"); 52MODULE_FIRMWARE("amdgpu/topaz_mc.bin");
53 53
54static const u32 golden_settings_iceland_a11[] = 54static const u32 golden_settings_iceland_a11[] =
@@ -147,10 +147,7 @@ static int gmc_v7_0_init_microcode(struct amdgpu_device *adev)
147 default: BUG(); 147 default: BUG();
148 } 148 }
149 149
150 if (adev->asic_type == CHIP_TOPAZ) 150 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
151 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
152 else
153 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
154 151
155 err = request_firmware(&adev->gmc.fw, fw_name, adev->dev); 152 err = request_firmware(&adev->gmc.fw, fw_name, adev->dev);
156 if (err) 153 if (err)
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index 46de1fd18a7b..3f57f6463dc8 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -3306,6 +3306,19 @@ static int kv_dpm_read_sensor(void *handle, int idx,
3306 } 3306 }
3307} 3307}
3308 3308
3309static int kv_set_powergating_by_smu(void *handle,
3310 uint32_t block_type, bool gate)
3311{
3312 switch (block_type) {
3313 case AMD_IP_BLOCK_TYPE_UVD:
3314 kv_dpm_powergate_uvd(handle, gate);
3315 break;
3316 default:
3317 break;
3318 }
3319 return 0;
3320}
3321
3309static const struct amd_ip_funcs kv_dpm_ip_funcs = { 3322static const struct amd_ip_funcs kv_dpm_ip_funcs = {
3310 .name = "kv_dpm", 3323 .name = "kv_dpm",
3311 .early_init = kv_dpm_early_init, 3324 .early_init = kv_dpm_early_init,
@@ -3342,7 +3355,7 @@ static const struct amd_pm_funcs kv_dpm_funcs = {
3342 .print_power_state = &kv_dpm_print_power_state, 3355 .print_power_state = &kv_dpm_print_power_state,
3343 .debugfs_print_current_performance_level = &kv_dpm_debugfs_print_current_performance_level, 3356 .debugfs_print_current_performance_level = &kv_dpm_debugfs_print_current_performance_level,
3344 .force_performance_level = &kv_dpm_force_performance_level, 3357 .force_performance_level = &kv_dpm_force_performance_level,
3345 .powergate_uvd = &kv_dpm_powergate_uvd, 3358 .set_powergating_by_smu = kv_set_powergating_by_smu,
3346 .enable_bapm = &kv_dpm_enable_bapm, 3359 .enable_bapm = &kv_dpm_enable_bapm,
3347 .get_vce_clock_state = amdgpu_get_vce_clock_state, 3360 .get_vce_clock_state = amdgpu_get_vce_clock_state,
3348 .check_state_equal = kv_check_state_equal, 3361 .check_state_equal = kv_check_state_equal,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index 3d53c4413f13..e70a0d4d6db4 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -471,8 +471,8 @@ void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev,
471 RENG_EXECUTE_ON_REG_UPDATE, 1); 471 RENG_EXECUTE_ON_REG_UPDATE, 1);
472 WREG32_SOC15(MMHUB, 0, mmPCTL1_RENG_EXECUTE, pctl1_reng_execute); 472 WREG32_SOC15(MMHUB, 0, mmPCTL1_RENG_EXECUTE, pctl1_reng_execute);
473 473
474 if (adev->powerplay.pp_funcs->set_mmhub_powergating_by_smu) 474 if (adev->powerplay.pp_funcs->set_powergating_by_smu)
475 amdgpu_dpm_set_mmhub_powergating_by_smu(adev); 475 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GMC, true);
476 476
477 } else { 477 } else {
478 pctl0_reng_execute = REG_SET_FIELD(pctl0_reng_execute, 478 pctl0_reng_execute = REG_SET_FIELD(pctl0_reng_execute,
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index c7190c39c4f5..cee4fae76d20 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -202,8 +202,7 @@ static uint64_t sdma_v2_4_ring_get_rptr(struct amdgpu_ring *ring)
202static uint64_t sdma_v2_4_ring_get_wptr(struct amdgpu_ring *ring) 202static uint64_t sdma_v2_4_ring_get_wptr(struct amdgpu_ring *ring)
203{ 203{
204 struct amdgpu_device *adev = ring->adev; 204 struct amdgpu_device *adev = ring->adev;
205 int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1; 205 u32 wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me]) >> 2;
206 u32 wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) >> 2;
207 206
208 return wptr; 207 return wptr;
209} 208}
@@ -218,9 +217,8 @@ static uint64_t sdma_v2_4_ring_get_wptr(struct amdgpu_ring *ring)
218static void sdma_v2_4_ring_set_wptr(struct amdgpu_ring *ring) 217static void sdma_v2_4_ring_set_wptr(struct amdgpu_ring *ring)
219{ 218{
220 struct amdgpu_device *adev = ring->adev; 219 struct amdgpu_device *adev = ring->adev;
221 int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
222 220
223 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], lower_32_bits(ring->wptr) << 2); 221 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me], lower_32_bits(ring->wptr) << 2);
224} 222}
225 223
226static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) 224static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
@@ -273,7 +271,7 @@ static void sdma_v2_4_ring_emit_hdp_flush(struct amdgpu_ring *ring)
273{ 271{
274 u32 ref_and_mask = 0; 272 u32 ref_and_mask = 0;
275 273
276 if (ring == &ring->adev->sdma.instance[0].ring) 274 if (ring->me == 0)
277 ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1); 275 ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1);
278 else 276 else
279 ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1); 277 ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1);
@@ -1213,8 +1211,10 @@ static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev)
1213{ 1211{
1214 int i; 1212 int i;
1215 1213
1216 for (i = 0; i < adev->sdma.num_instances; i++) 1214 for (i = 0; i < adev->sdma.num_instances; i++) {
1217 adev->sdma.instance[i].ring.funcs = &sdma_v2_4_ring_funcs; 1215 adev->sdma.instance[i].ring.funcs = &sdma_v2_4_ring_funcs;
1216 adev->sdma.instance[i].ring.me = i;
1217 }
1218} 1218}
1219 1219
1220static const struct amdgpu_irq_src_funcs sdma_v2_4_trap_irq_funcs = { 1220static const struct amdgpu_irq_src_funcs sdma_v2_4_trap_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index aa9ab299fd32..99616dd9594f 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -365,9 +365,7 @@ static uint64_t sdma_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
365 /* XXX check if swapping is necessary on BE */ 365 /* XXX check if swapping is necessary on BE */
366 wptr = ring->adev->wb.wb[ring->wptr_offs] >> 2; 366 wptr = ring->adev->wb.wb[ring->wptr_offs] >> 2;
367 } else { 367 } else {
368 int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1; 368 wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me]) >> 2;
369
370 wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) >> 2;
371 } 369 }
372 370
373 return wptr; 371 return wptr;
@@ -394,9 +392,7 @@ static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
394 392
395 WRITE_ONCE(*wb, (lower_32_bits(ring->wptr) << 2)); 393 WRITE_ONCE(*wb, (lower_32_bits(ring->wptr) << 2));
396 } else { 394 } else {
397 int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1; 395 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me], lower_32_bits(ring->wptr) << 2);
398
399 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], lower_32_bits(ring->wptr) << 2);
400 } 396 }
401} 397}
402 398
@@ -450,7 +446,7 @@ static void sdma_v3_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
450{ 446{
451 u32 ref_and_mask = 0; 447 u32 ref_and_mask = 0;
452 448
453 if (ring == &ring->adev->sdma.instance[0].ring) 449 if (ring->me == 0)
454 ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1); 450 ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1);
455 else 451 else
456 ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1); 452 ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1);
@@ -1655,8 +1651,10 @@ static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev)
1655{ 1651{
1656 int i; 1652 int i;
1657 1653
1658 for (i = 0; i < adev->sdma.num_instances; i++) 1654 for (i = 0; i < adev->sdma.num_instances; i++) {
1659 adev->sdma.instance[i].ring.funcs = &sdma_v3_0_ring_funcs; 1655 adev->sdma.instance[i].ring.funcs = &sdma_v3_0_ring_funcs;
1656 adev->sdma.instance[i].ring.me = i;
1657 }
1660} 1658}
1661 1659
1662static const struct amdgpu_irq_src_funcs sdma_v3_0_trap_irq_funcs = { 1660static const struct amdgpu_irq_src_funcs sdma_v3_0_trap_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index ca53b3fba422..572ca63cf676 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -296,13 +296,12 @@ static uint64_t sdma_v4_0_ring_get_wptr(struct amdgpu_ring *ring)
296 DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr); 296 DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr);
297 } else { 297 } else {
298 u32 lowbit, highbit; 298 u32 lowbit, highbit;
299 int me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
300 299
301 lowbit = RREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR)) >> 2; 300 lowbit = RREG32(sdma_v4_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR)) >> 2;
302 highbit = RREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR_HI)) >> 2; 301 highbit = RREG32(sdma_v4_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI)) >> 2;
303 302
304 DRM_DEBUG("wptr [%i]high== 0x%08x low==0x%08x\n", 303 DRM_DEBUG("wptr [%i]high== 0x%08x low==0x%08x\n",
305 me, highbit, lowbit); 304 ring->me, highbit, lowbit);
306 wptr = highbit; 305 wptr = highbit;
307 wptr = wptr << 32; 306 wptr = wptr << 32;
308 wptr |= lowbit; 307 wptr |= lowbit;
@@ -339,17 +338,15 @@ static void sdma_v4_0_ring_set_wptr(struct amdgpu_ring *ring)
339 ring->doorbell_index, ring->wptr << 2); 338 ring->doorbell_index, ring->wptr << 2);
340 WDOORBELL64(ring->doorbell_index, ring->wptr << 2); 339 WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
341 } else { 340 } else {
342 int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
343
344 DRM_DEBUG("Not using doorbell -- " 341 DRM_DEBUG("Not using doorbell -- "
345 "mmSDMA%i_GFX_RB_WPTR == 0x%08x " 342 "mmSDMA%i_GFX_RB_WPTR == 0x%08x "
346 "mmSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n", 343 "mmSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
347 me, 344 ring->me,
348 lower_32_bits(ring->wptr << 2), 345 lower_32_bits(ring->wptr << 2),
349 me, 346 ring->me,
350 upper_32_bits(ring->wptr << 2)); 347 upper_32_bits(ring->wptr << 2));
351 WREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr << 2)); 348 WREG32(sdma_v4_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr << 2));
352 WREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr << 2)); 349 WREG32(sdma_v4_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr << 2));
353 } 350 }
354} 351}
355 352
@@ -430,7 +427,7 @@ static void sdma_v4_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
430 u32 ref_and_mask = 0; 427 u32 ref_and_mask = 0;
431 const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg; 428 const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
432 429
433 if (ring == &ring->adev->sdma.instance[0].ring) 430 if (ring->me == 0)
434 ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0; 431 ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0;
435 else 432 else
436 ref_and_mask = nbio_hf_reg->ref_and_mask_sdma1; 433 ref_and_mask = nbio_hf_reg->ref_and_mask_sdma1;
@@ -1651,8 +1648,10 @@ static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev)
1651{ 1648{
1652 int i; 1649 int i;
1653 1650
1654 for (i = 0; i < adev->sdma.num_instances; i++) 1651 for (i = 0; i < adev->sdma.num_instances; i++) {
1655 adev->sdma.instance[i].ring.funcs = &sdma_v4_0_ring_funcs; 1652 adev->sdma.instance[i].ring.funcs = &sdma_v4_0_ring_funcs;
1653 adev->sdma.instance[i].ring.me = i;
1654 }
1656} 1655}
1657 1656
1658static const struct amdgpu_irq_src_funcs sdma_v4_0_trap_irq_funcs = { 1657static const struct amdgpu_irq_src_funcs sdma_v4_0_trap_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index d51318c695e6..db327b412562 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -56,16 +56,16 @@
56 56
57#define BIOS_SCRATCH_4 0x5cd 57#define BIOS_SCRATCH_4 0x5cd
58 58
59MODULE_FIRMWARE("radeon/tahiti_smc.bin"); 59MODULE_FIRMWARE("amdgpu/tahiti_smc.bin");
60MODULE_FIRMWARE("radeon/pitcairn_smc.bin"); 60MODULE_FIRMWARE("amdgpu/pitcairn_smc.bin");
61MODULE_FIRMWARE("radeon/pitcairn_k_smc.bin"); 61MODULE_FIRMWARE("amdgpu/pitcairn_k_smc.bin");
62MODULE_FIRMWARE("radeon/verde_smc.bin"); 62MODULE_FIRMWARE("amdgpu/verde_smc.bin");
63MODULE_FIRMWARE("radeon/verde_k_smc.bin"); 63MODULE_FIRMWARE("amdgpu/verde_k_smc.bin");
64MODULE_FIRMWARE("radeon/oland_smc.bin"); 64MODULE_FIRMWARE("amdgpu/oland_smc.bin");
65MODULE_FIRMWARE("radeon/oland_k_smc.bin"); 65MODULE_FIRMWARE("amdgpu/oland_k_smc.bin");
66MODULE_FIRMWARE("radeon/hainan_smc.bin"); 66MODULE_FIRMWARE("amdgpu/hainan_smc.bin");
67MODULE_FIRMWARE("radeon/hainan_k_smc.bin"); 67MODULE_FIRMWARE("amdgpu/hainan_k_smc.bin");
68MODULE_FIRMWARE("radeon/banks_k_2_smc.bin"); 68MODULE_FIRMWARE("amdgpu/banks_k_2_smc.bin");
69 69
70static const struct amd_pm_funcs si_dpm_funcs; 70static const struct amd_pm_funcs si_dpm_funcs;
71 71
@@ -7318,8 +7318,7 @@ static int si_dpm_init(struct amdgpu_device *adev)
7318 pi = &eg_pi->rv7xx; 7318 pi = &eg_pi->rv7xx;
7319 7319
7320 si_pi->sys_pcie_mask = 7320 si_pi->sys_pcie_mask =
7321 (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) >> 7321 adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK;
7322 CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT;
7323 si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID; 7322 si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
7324 si_pi->boot_pcie_gen = si_get_current_pcie_speed(adev); 7323 si_pi->boot_pcie_gen = si_get_current_pcie_speed(adev);
7325 7324
@@ -7667,7 +7666,7 @@ static int si_dpm_init_microcode(struct amdgpu_device *adev)
7667 default: BUG(); 7666 default: BUG();
7668 } 7667 }
7669 7668
7670 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name); 7669 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name);
7671 err = request_firmware(&adev->pm.fw, fw_name, adev->dev); 7670 err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
7672 if (err) 7671 if (err)
7673 goto out; 7672 goto out;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index bfddf97dd13e..8ee1c2eaaa14 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -470,7 +470,7 @@ static int uvd_v6_0_sw_fini(void *handle)
470 return r; 470 return r;
471 471
472 if (uvd_v6_0_enc_support(adev)) { 472 if (uvd_v6_0_enc_support(adev)) {
473 drm_sched_entity_fini(&adev->uvd.inst->ring_enc[0].sched, &adev->uvd.inst->entity_enc); 473 drm_sched_entity_destroy(&adev->uvd.inst->ring_enc[0].sched, &adev->uvd.inst->entity_enc);
474 474
475 for (i = 0; i < adev->uvd.num_enc_rings; ++i) 475 for (i = 0; i < adev->uvd.num_enc_rings; ++i)
476 amdgpu_ring_fini(&adev->uvd.inst->ring_enc[i]); 476 amdgpu_ring_fini(&adev->uvd.inst->ring_enc[i]);
@@ -1569,7 +1569,6 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
1569static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = { 1569static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
1570 .type = AMDGPU_RING_TYPE_UVD, 1570 .type = AMDGPU_RING_TYPE_UVD,
1571 .align_mask = 0xf, 1571 .align_mask = 0xf,
1572 .nop = PACKET0(mmUVD_NO_OP, 0),
1573 .support_64bit_ptrs = false, 1572 .support_64bit_ptrs = false,
1574 .get_rptr = uvd_v6_0_ring_get_rptr, 1573 .get_rptr = uvd_v6_0_ring_get_rptr,
1575 .get_wptr = uvd_v6_0_ring_get_wptr, 1574 .get_wptr = uvd_v6_0_ring_get_wptr,
@@ -1587,7 +1586,7 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
1587 .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush, 1586 .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
1588 .test_ring = uvd_v6_0_ring_test_ring, 1587 .test_ring = uvd_v6_0_ring_test_ring,
1589 .test_ib = amdgpu_uvd_ring_test_ib, 1588 .test_ib = amdgpu_uvd_ring_test_ib,
1590 .insert_nop = amdgpu_ring_insert_nop, 1589 .insert_nop = uvd_v6_0_ring_insert_nop,
1591 .pad_ib = amdgpu_ring_generic_pad_ib, 1590 .pad_ib = amdgpu_ring_generic_pad_ib,
1592 .begin_use = amdgpu_uvd_ring_begin_use, 1591 .begin_use = amdgpu_uvd_ring_begin_use,
1593 .end_use = amdgpu_uvd_ring_end_use, 1592 .end_use = amdgpu_uvd_ring_end_use,
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 57d32f21b3a6..ba244d3b74db 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -491,7 +491,7 @@ static int uvd_v7_0_sw_fini(void *handle)
491 return r; 491 return r;
492 492
493 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { 493 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
494 drm_sched_entity_fini(&adev->uvd.inst[j].ring_enc[0].sched, &adev->uvd.inst[j].entity_enc); 494 drm_sched_entity_destroy(&adev->uvd.inst[j].ring_enc[0].sched, &adev->uvd.inst[j].entity_enc);
495 495
496 for (i = 0; i < adev->uvd.num_enc_rings; ++i) 496 for (i = 0; i < adev->uvd.num_enc_rings; ++i)
497 amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]); 497 amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index 47f70827195b..d48e877b682e 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -56,7 +56,7 @@ static uint64_t vce_v2_0_ring_get_rptr(struct amdgpu_ring *ring)
56{ 56{
57 struct amdgpu_device *adev = ring->adev; 57 struct amdgpu_device *adev = ring->adev;
58 58
59 if (ring == &adev->vce.ring[0]) 59 if (ring->me == 0)
60 return RREG32(mmVCE_RB_RPTR); 60 return RREG32(mmVCE_RB_RPTR);
61 else 61 else
62 return RREG32(mmVCE_RB_RPTR2); 62 return RREG32(mmVCE_RB_RPTR2);
@@ -73,7 +73,7 @@ static uint64_t vce_v2_0_ring_get_wptr(struct amdgpu_ring *ring)
73{ 73{
74 struct amdgpu_device *adev = ring->adev; 74 struct amdgpu_device *adev = ring->adev;
75 75
76 if (ring == &adev->vce.ring[0]) 76 if (ring->me == 0)
77 return RREG32(mmVCE_RB_WPTR); 77 return RREG32(mmVCE_RB_WPTR);
78 else 78 else
79 return RREG32(mmVCE_RB_WPTR2); 79 return RREG32(mmVCE_RB_WPTR2);
@@ -90,7 +90,7 @@ static void vce_v2_0_ring_set_wptr(struct amdgpu_ring *ring)
90{ 90{
91 struct amdgpu_device *adev = ring->adev; 91 struct amdgpu_device *adev = ring->adev;
92 92
93 if (ring == &adev->vce.ring[0]) 93 if (ring->me == 0)
94 WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr)); 94 WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
95 else 95 else
96 WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr)); 96 WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
@@ -627,8 +627,10 @@ static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev)
627{ 627{
628 int i; 628 int i;
629 629
630 for (i = 0; i < adev->vce.num_rings; i++) 630 for (i = 0; i < adev->vce.num_rings; i++) {
631 adev->vce.ring[i].funcs = &vce_v2_0_ring_funcs; 631 adev->vce.ring[i].funcs = &vce_v2_0_ring_funcs;
632 adev->vce.ring[i].me = i;
633 }
632} 634}
633 635
634static const struct amdgpu_irq_src_funcs vce_v2_0_irq_funcs = { 636static const struct amdgpu_irq_src_funcs vce_v2_0_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 0999c843f623..99604d0262ad 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -86,9 +86,9 @@ static uint64_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
86 else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0) 86 else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
87 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1)); 87 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
88 88
89 if (ring == &adev->vce.ring[0]) 89 if (ring->me == 0)
90 v = RREG32(mmVCE_RB_RPTR); 90 v = RREG32(mmVCE_RB_RPTR);
91 else if (ring == &adev->vce.ring[1]) 91 else if (ring->me == 1)
92 v = RREG32(mmVCE_RB_RPTR2); 92 v = RREG32(mmVCE_RB_RPTR2);
93 else 93 else
94 v = RREG32(mmVCE_RB_RPTR3); 94 v = RREG32(mmVCE_RB_RPTR3);
@@ -118,9 +118,9 @@ static uint64_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
118 else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0) 118 else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
119 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1)); 119 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
120 120
121 if (ring == &adev->vce.ring[0]) 121 if (ring->me == 0)
122 v = RREG32(mmVCE_RB_WPTR); 122 v = RREG32(mmVCE_RB_WPTR);
123 else if (ring == &adev->vce.ring[1]) 123 else if (ring->me == 1)
124 v = RREG32(mmVCE_RB_WPTR2); 124 v = RREG32(mmVCE_RB_WPTR2);
125 else 125 else
126 v = RREG32(mmVCE_RB_WPTR3); 126 v = RREG32(mmVCE_RB_WPTR3);
@@ -149,9 +149,9 @@ static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
149 else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0) 149 else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
150 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1)); 150 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
151 151
152 if (ring == &adev->vce.ring[0]) 152 if (ring->me == 0)
153 WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr)); 153 WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
154 else if (ring == &adev->vce.ring[1]) 154 else if (ring->me == 1)
155 WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr)); 155 WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
156 else 156 else
157 WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr)); 157 WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr));
@@ -900,7 +900,7 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = {
900 .emit_frame_size = 900 .emit_frame_size =
901 4 + /* vce_v3_0_emit_pipeline_sync */ 901 4 + /* vce_v3_0_emit_pipeline_sync */
902 6, /* amdgpu_vce_ring_emit_fence x1 no user fence */ 902 6, /* amdgpu_vce_ring_emit_fence x1 no user fence */
903 .emit_ib_size = 5, /* vce_v3_0_ring_emit_ib */ 903 .emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */
904 .emit_ib = amdgpu_vce_ring_emit_ib, 904 .emit_ib = amdgpu_vce_ring_emit_ib,
905 .emit_fence = amdgpu_vce_ring_emit_fence, 905 .emit_fence = amdgpu_vce_ring_emit_fence,
906 .test_ring = amdgpu_vce_ring_test_ring, 906 .test_ring = amdgpu_vce_ring_test_ring,
@@ -924,7 +924,7 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = {
924 6 + /* vce_v3_0_emit_vm_flush */ 924 6 + /* vce_v3_0_emit_vm_flush */
925 4 + /* vce_v3_0_emit_pipeline_sync */ 925 4 + /* vce_v3_0_emit_pipeline_sync */
926 6 + 6, /* amdgpu_vce_ring_emit_fence x2 vm fence */ 926 6 + 6, /* amdgpu_vce_ring_emit_fence x2 vm fence */
927 .emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */ 927 .emit_ib_size = 5, /* vce_v3_0_ring_emit_ib */
928 .emit_ib = vce_v3_0_ring_emit_ib, 928 .emit_ib = vce_v3_0_ring_emit_ib,
929 .emit_vm_flush = vce_v3_0_emit_vm_flush, 929 .emit_vm_flush = vce_v3_0_emit_vm_flush,
930 .emit_pipeline_sync = vce_v3_0_emit_pipeline_sync, 930 .emit_pipeline_sync = vce_v3_0_emit_pipeline_sync,
@@ -942,12 +942,16 @@ static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev)
942 int i; 942 int i;
943 943
944 if (adev->asic_type >= CHIP_STONEY) { 944 if (adev->asic_type >= CHIP_STONEY) {
945 for (i = 0; i < adev->vce.num_rings; i++) 945 for (i = 0; i < adev->vce.num_rings; i++) {
946 adev->vce.ring[i].funcs = &vce_v3_0_ring_vm_funcs; 946 adev->vce.ring[i].funcs = &vce_v3_0_ring_vm_funcs;
947 adev->vce.ring[i].me = i;
948 }
947 DRM_INFO("VCE enabled in VM mode\n"); 949 DRM_INFO("VCE enabled in VM mode\n");
948 } else { 950 } else {
949 for (i = 0; i < adev->vce.num_rings; i++) 951 for (i = 0; i < adev->vce.num_rings; i++) {
950 adev->vce.ring[i].funcs = &vce_v3_0_ring_phys_funcs; 952 adev->vce.ring[i].funcs = &vce_v3_0_ring_phys_funcs;
953 adev->vce.ring[i].me = i;
954 }
951 DRM_INFO("VCE enabled in physical mode\n"); 955 DRM_INFO("VCE enabled in physical mode\n");
952 } 956 }
953} 957}
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index 8fd1b742985a..575bf9709389 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -60,9 +60,9 @@ static uint64_t vce_v4_0_ring_get_rptr(struct amdgpu_ring *ring)
60{ 60{
61 struct amdgpu_device *adev = ring->adev; 61 struct amdgpu_device *adev = ring->adev;
62 62
63 if (ring == &adev->vce.ring[0]) 63 if (ring->me == 0)
64 return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR)); 64 return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR));
65 else if (ring == &adev->vce.ring[1]) 65 else if (ring->me == 1)
66 return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR2)); 66 return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR2));
67 else 67 else
68 return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR3)); 68 return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR3));
@@ -82,9 +82,9 @@ static uint64_t vce_v4_0_ring_get_wptr(struct amdgpu_ring *ring)
82 if (ring->use_doorbell) 82 if (ring->use_doorbell)
83 return adev->wb.wb[ring->wptr_offs]; 83 return adev->wb.wb[ring->wptr_offs];
84 84
85 if (ring == &adev->vce.ring[0]) 85 if (ring->me == 0)
86 return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR)); 86 return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR));
87 else if (ring == &adev->vce.ring[1]) 87 else if (ring->me == 1)
88 return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR2)); 88 return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR2));
89 else 89 else
90 return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR3)); 90 return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR3));
@@ -108,10 +108,10 @@ static void vce_v4_0_ring_set_wptr(struct amdgpu_ring *ring)
108 return; 108 return;
109 } 109 }
110 110
111 if (ring == &adev->vce.ring[0]) 111 if (ring->me == 0)
112 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR), 112 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR),
113 lower_32_bits(ring->wptr)); 113 lower_32_bits(ring->wptr));
114 else if (ring == &adev->vce.ring[1]) 114 else if (ring->me == 1)
115 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR2), 115 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR2),
116 lower_32_bits(ring->wptr)); 116 lower_32_bits(ring->wptr));
117 else 117 else
@@ -1088,8 +1088,10 @@ static void vce_v4_0_set_ring_funcs(struct amdgpu_device *adev)
1088{ 1088{
1089 int i; 1089 int i;
1090 1090
1091 for (i = 0; i < adev->vce.num_rings; i++) 1091 for (i = 0; i < adev->vce.num_rings; i++) {
1092 adev->vce.ring[i].funcs = &vce_v4_0_ring_vm_funcs; 1092 adev->vce.ring[i].funcs = &vce_v4_0_ring_vm_funcs;
1093 adev->vce.ring[i].me = i;
1094 }
1093 DRM_INFO("VCE enabled in VM mode\n"); 1095 DRM_INFO("VCE enabled in VM mode\n");
1094} 1096}
1095 1097
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
index af16973f2c41..94911871eb9b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
@@ -28,11 +28,11 @@
28AMDGPUDM = amdgpu_dm.o amdgpu_dm_irq.o amdgpu_dm_mst_types.o amdgpu_dm_color.o 28AMDGPUDM = amdgpu_dm.o amdgpu_dm_irq.o amdgpu_dm_mst_types.o amdgpu_dm_color.o
29 29
30ifneq ($(CONFIG_DRM_AMD_DC),) 30ifneq ($(CONFIG_DRM_AMD_DC),)
31AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o 31AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o amdgpu_dm_pp_smu.o
32endif 32endif
33 33
34ifneq ($(CONFIG_DEBUG_FS),) 34ifneq ($(CONFIG_DEBUG_FS),)
35AMDGPUDM += amdgpu_dm_crc.o 35AMDGPUDM += amdgpu_dm_crc.o amdgpu_dm_debugfs.o
36endif 36endif
37 37
38subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc 38subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 66bd3cc3e387..ca017c1dd4da 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -39,6 +39,9 @@
39#include "dm_helpers.h" 39#include "dm_helpers.h"
40#include "dm_services_types.h" 40#include "dm_services_types.h"
41#include "amdgpu_dm_mst_types.h" 41#include "amdgpu_dm_mst_types.h"
42#if defined(CONFIG_DEBUG_FS)
43#include "amdgpu_dm_debugfs.h"
44#endif
42 45
43#include "ivsrcid/ivsrcid_vislands30.h" 46#include "ivsrcid/ivsrcid_vislands30.h"
44 47
@@ -1532,7 +1535,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
1532 /* 1535 /*
1533 * Temporary disable until pplib/smu interaction is implemented 1536 * Temporary disable until pplib/smu interaction is implemented
1534 */ 1537 */
1535 dm->dc->debug.disable_stutter = true; 1538 dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1536 break; 1539 break;
1537#endif 1540#endif
1538 default: 1541 default:
@@ -2173,6 +2176,46 @@ get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
2173 return color_space; 2176 return color_space;
2174} 2177}
2175 2178
2179static void reduce_mode_colour_depth(struct dc_crtc_timing *timing_out)
2180{
2181 if (timing_out->display_color_depth <= COLOR_DEPTH_888)
2182 return;
2183
2184 timing_out->display_color_depth--;
2185}
2186
2187static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_out,
2188 const struct drm_display_info *info)
2189{
2190 int normalized_clk;
2191 if (timing_out->display_color_depth <= COLOR_DEPTH_888)
2192 return;
2193 do {
2194 normalized_clk = timing_out->pix_clk_khz;
2195 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
2196 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
2197 normalized_clk /= 2;
2198 /* Adjusting pix clock following on HDMI spec based on colour depth */
2199 switch (timing_out->display_color_depth) {
2200 case COLOR_DEPTH_101010:
2201 normalized_clk = (normalized_clk * 30) / 24;
2202 break;
2203 case COLOR_DEPTH_121212:
2204 normalized_clk = (normalized_clk * 36) / 24;
2205 break;
2206 case COLOR_DEPTH_161616:
2207 normalized_clk = (normalized_clk * 48) / 24;
2208 break;
2209 default:
2210 return;
2211 }
2212 if (normalized_clk <= info->max_tmds_clock)
2213 return;
2214 reduce_mode_colour_depth(timing_out);
2215
2216 } while (timing_out->display_color_depth > COLOR_DEPTH_888);
2217
2218}
2176/*****************************************************************************/ 2219/*****************************************************************************/
2177 2220
2178static void 2221static void
@@ -2181,6 +2224,7 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
2181 const struct drm_connector *connector) 2224 const struct drm_connector *connector)
2182{ 2225{
2183 struct dc_crtc_timing *timing_out = &stream->timing; 2226 struct dc_crtc_timing *timing_out = &stream->timing;
2227 const struct drm_display_info *info = &connector->display_info;
2184 2228
2185 memset(timing_out, 0, sizeof(struct dc_crtc_timing)); 2229 memset(timing_out, 0, sizeof(struct dc_crtc_timing));
2186 2230
@@ -2189,8 +2233,10 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
2189 timing_out->v_border_top = 0; 2233 timing_out->v_border_top = 0;
2190 timing_out->v_border_bottom = 0; 2234 timing_out->v_border_bottom = 0;
2191 /* TODO: un-hardcode */ 2235 /* TODO: un-hardcode */
2192 2236 if (drm_mode_is_420_only(info, mode_in)
2193 if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444) 2237 && stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
2238 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
2239 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
2194 && stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) 2240 && stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
2195 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444; 2241 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
2196 else 2242 else
@@ -2226,6 +2272,8 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
2226 2272
2227 stream->out_transfer_func->type = TF_TYPE_PREDEFINED; 2273 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
2228 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB; 2274 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
2275 if (stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
2276 adjust_colour_depth_from_display_info(timing_out, info);
2229} 2277}
2230 2278
2231static void fill_audio_info(struct audio_info *audio_info, 2279static void fill_audio_info(struct audio_info *audio_info,
@@ -3619,6 +3667,13 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
3619 &aconnector->base, &aencoder->base); 3667 &aconnector->base, &aencoder->base);
3620 3668
3621 drm_connector_register(&aconnector->base); 3669 drm_connector_register(&aconnector->base);
3670#if defined(CONFIG_DEBUG_FS)
3671 res = connector_debugfs_init(aconnector);
3672 if (res) {
3673 DRM_ERROR("Failed to create debugfs for connector");
3674 goto out_free;
3675 }
3676#endif
3622 3677
3623 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort 3678 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
3624 || connector_type == DRM_MODE_CONNECTOR_eDP) 3679 || connector_type == DRM_MODE_CONNECTOR_eDP)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
new file mode 100644
index 000000000000..cf5ea69e46ad
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -0,0 +1,170 @@
1/*
2 * Copyright 2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include <linux/debugfs.h>
27
28#include "dc.h"
29#include "dc_link.h"
30
31#include "amdgpu.h"
32#include "amdgpu_dm.h"
33#include "amdgpu_dm_debugfs.h"
34
35static ssize_t dp_link_rate_debugfs_read(struct file *f, char __user *buf,
36 size_t size, loff_t *pos)
37{
38 /* TODO: create method to read link rate */
39 return 1;
40}
41
42static ssize_t dp_link_rate_debugfs_write(struct file *f, const char __user *buf,
43 size_t size, loff_t *pos)
44{
45 /* TODO: create method to write link rate */
46 return 1;
47}
48
49static ssize_t dp_lane_count_debugfs_read(struct file *f, char __user *buf,
50 size_t size, loff_t *pos)
51{
52 /* TODO: create method to read lane count */
53 return 1;
54}
55
56static ssize_t dp_lane_count_debugfs_write(struct file *f, const char __user *buf,
57 size_t size, loff_t *pos)
58{
59 /* TODO: create method to write lane count */
60 return 1;
61}
62
63static ssize_t dp_voltage_swing_debugfs_read(struct file *f, char __user *buf,
64 size_t size, loff_t *pos)
65{
66 /* TODO: create method to read voltage swing */
67 return 1;
68}
69
70static ssize_t dp_voltage_swing_debugfs_write(struct file *f, const char __user *buf,
71 size_t size, loff_t *pos)
72{
73 /* TODO: create method to write voltage swing */
74 return 1;
75}
76
77static ssize_t dp_pre_emphasis_debugfs_read(struct file *f, char __user *buf,
78 size_t size, loff_t *pos)
79{
80 /* TODO: create method to read pre-emphasis */
81 return 1;
82}
83
84static ssize_t dp_pre_emphasis_debugfs_write(struct file *f, const char __user *buf,
85 size_t size, loff_t *pos)
86{
87 /* TODO: create method to write pre-emphasis */
88 return 1;
89}
90
91static ssize_t dp_phy_test_pattern_debugfs_read(struct file *f, char __user *buf,
92 size_t size, loff_t *pos)
93{
94 /* TODO: create method to read PHY test pattern */
95 return 1;
96}
97
98static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __user *buf,
99 size_t size, loff_t *pos)
100{
101 /* TODO: create method to write PHY test pattern */
102 return 1;
103}
104
105static const struct file_operations dp_link_rate_fops = {
106 .owner = THIS_MODULE,
107 .read = dp_link_rate_debugfs_read,
108 .write = dp_link_rate_debugfs_write,
109 .llseek = default_llseek
110};
111
112static const struct file_operations dp_lane_count_fops = {
113 .owner = THIS_MODULE,
114 .read = dp_lane_count_debugfs_read,
115 .write = dp_lane_count_debugfs_write,
116 .llseek = default_llseek
117};
118
119static const struct file_operations dp_voltage_swing_fops = {
120 .owner = THIS_MODULE,
121 .read = dp_voltage_swing_debugfs_read,
122 .write = dp_voltage_swing_debugfs_write,
123 .llseek = default_llseek
124};
125
126static const struct file_operations dp_pre_emphasis_fops = {
127 .owner = THIS_MODULE,
128 .read = dp_pre_emphasis_debugfs_read,
129 .write = dp_pre_emphasis_debugfs_write,
130 .llseek = default_llseek
131};
132
133static const struct file_operations dp_phy_test_pattern_fops = {
134 .owner = THIS_MODULE,
135 .read = dp_phy_test_pattern_debugfs_read,
136 .write = dp_phy_test_pattern_debugfs_write,
137 .llseek = default_llseek
138};
139
140static const struct {
141 char *name;
142 const struct file_operations *fops;
143} dp_debugfs_entries[] = {
144 {"link_rate", &dp_link_rate_fops},
145 {"lane_count", &dp_lane_count_fops},
146 {"voltage_swing", &dp_voltage_swing_fops},
147 {"pre_emphasis", &dp_pre_emphasis_fops},
148 {"phy_test_pattern", &dp_phy_test_pattern_fops}
149};
150
151int connector_debugfs_init(struct amdgpu_dm_connector *connector)
152{
153 int i;
154 struct dentry *ent, *dir = connector->base.debugfs_entry;
155
156 if (connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
157 for (i = 0; i < ARRAY_SIZE(dp_debugfs_entries); i++) {
158 ent = debugfs_create_file(dp_debugfs_entries[i].name,
159 0644,
160 dir,
161 connector,
162 dp_debugfs_entries[i].fops);
163 if (IS_ERR(ent))
164 return PTR_ERR(ent);
165 }
166 }
167
168 return 0;
169}
170
diff --git a/drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h
index 7a65206a6d21..d9ed1b2aa811 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2017 Advanced Micro Devices, Inc. 2 * Copyright 2018 Advanced Micro Devices, Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -23,13 +23,12 @@
23 * 23 *
24 */ 24 */
25 25
26#ifndef __SOC_BOUNDING_BOX_H__ 26#ifndef __AMDGPU_DM_DEBUGFS_H__
27#define __SOC_BOUNDING_BOX_H__ 27#define __AMDGPU_DM_DEBUGFS_H__
28 28
29#include "dml_common_defs.h" 29#include "amdgpu.h"
30#include "amdgpu_dm.h"
30 31
31void dml_socbb_set_latencies(soc_bounding_box_st *to_box, soc_bounding_box_st *from_box); 32int connector_debugfs_init(struct amdgpu_dm_connector *connector);
32voltage_scaling_st dml_socbb_voltage_scaling(const soc_bounding_box_st *box, enum voltage_state voltage);
33double dml_socbb_return_bw_mhz(soc_bounding_box_st *box, enum voltage_state voltage);
34 33
35#endif 34#endif
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
new file mode 100644
index 000000000000..50e863024f58
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
@@ -0,0 +1,535 @@
1/*
2 * Copyright 2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 */
24#include <linux/string.h>
25#include <linux/acpi.h>
26
27#include <drm/drmP.h>
28#include <drm/drm_crtc_helper.h>
29#include <drm/amdgpu_drm.h>
30#include "dm_services.h"
31#include "amdgpu.h"
32#include "amdgpu_dm.h"
33#include "amdgpu_dm_irq.h"
34#include "amdgpu_pm.h"
35#include "dm_pp_smu.h"
36
37
38bool dm_pp_apply_display_requirements(
39 const struct dc_context *ctx,
40 const struct dm_pp_display_configuration *pp_display_cfg)
41{
42 struct amdgpu_device *adev = ctx->driver_context;
43 int i;
44
45 if (adev->pm.dpm_enabled) {
46
47 memset(&adev->pm.pm_display_cfg, 0,
48 sizeof(adev->pm.pm_display_cfg));
49
50 adev->pm.pm_display_cfg.cpu_cc6_disable =
51 pp_display_cfg->cpu_cc6_disable;
52
53 adev->pm.pm_display_cfg.cpu_pstate_disable =
54 pp_display_cfg->cpu_pstate_disable;
55
56 adev->pm.pm_display_cfg.cpu_pstate_separation_time =
57 pp_display_cfg->cpu_pstate_separation_time;
58
59 adev->pm.pm_display_cfg.nb_pstate_switch_disable =
60 pp_display_cfg->nb_pstate_switch_disable;
61
62 adev->pm.pm_display_cfg.num_display =
63 pp_display_cfg->display_count;
64 adev->pm.pm_display_cfg.num_path_including_non_display =
65 pp_display_cfg->display_count;
66
67 adev->pm.pm_display_cfg.min_core_set_clock =
68 pp_display_cfg->min_engine_clock_khz/10;
69 adev->pm.pm_display_cfg.min_core_set_clock_in_sr =
70 pp_display_cfg->min_engine_clock_deep_sleep_khz/10;
71 adev->pm.pm_display_cfg.min_mem_set_clock =
72 pp_display_cfg->min_memory_clock_khz/10;
73
74 adev->pm.pm_display_cfg.min_dcef_deep_sleep_set_clk =
75 pp_display_cfg->min_engine_clock_deep_sleep_khz/10;
76 adev->pm.pm_display_cfg.min_dcef_set_clk =
77 pp_display_cfg->min_dcfclock_khz/10;
78
79 adev->pm.pm_display_cfg.multi_monitor_in_sync =
80 pp_display_cfg->all_displays_in_sync;
81 adev->pm.pm_display_cfg.min_vblank_time =
82 pp_display_cfg->avail_mclk_switch_time_us;
83
84 adev->pm.pm_display_cfg.display_clk =
85 pp_display_cfg->disp_clk_khz/10;
86
87 adev->pm.pm_display_cfg.dce_tolerable_mclk_in_active_latency =
88 pp_display_cfg->avail_mclk_switch_time_in_disp_active_us;
89
90 adev->pm.pm_display_cfg.crtc_index = pp_display_cfg->crtc_index;
91 adev->pm.pm_display_cfg.line_time_in_us =
92 pp_display_cfg->line_time_in_us;
93
94 adev->pm.pm_display_cfg.vrefresh = pp_display_cfg->disp_configs[0].v_refresh;
95 adev->pm.pm_display_cfg.crossfire_display_index = -1;
96 adev->pm.pm_display_cfg.min_bus_bandwidth = 0;
97
98 for (i = 0; i < pp_display_cfg->display_count; i++) {
99 const struct dm_pp_single_disp_config *dc_cfg =
100 &pp_display_cfg->disp_configs[i];
101 adev->pm.pm_display_cfg.displays[i].controller_id = dc_cfg->pipe_idx + 1;
102 }
103
104 /* TODO: complete implementation of
105 * pp_display_configuration_change().
106 * Follow example of:
107 * PHM_StoreDALConfigurationData - powerplay\hwmgr\hardwaremanager.c
108 * PP_IRI_DisplayConfigurationChange - powerplay\eventmgr\iri.c */
109 if (adev->powerplay.pp_funcs->display_configuration_change)
110 adev->powerplay.pp_funcs->display_configuration_change(
111 adev->powerplay.pp_handle,
112 &adev->pm.pm_display_cfg);
113
114 /* TODO: replace by a separate call to 'apply display cfg'? */
115 amdgpu_pm_compute_clocks(adev);
116 }
117
118 return true;
119}
120
121static void get_default_clock_levels(
122 enum dm_pp_clock_type clk_type,
123 struct dm_pp_clock_levels *clks)
124{
125 uint32_t disp_clks_in_khz[6] = {
126 300000, 400000, 496560, 626090, 685720, 757900 };
127 uint32_t sclks_in_khz[6] = {
128 300000, 360000, 423530, 514290, 626090, 720000 };
129 uint32_t mclks_in_khz[2] = { 333000, 800000 };
130
131 switch (clk_type) {
132 case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
133 clks->num_levels = 6;
134 memmove(clks->clocks_in_khz, disp_clks_in_khz,
135 sizeof(disp_clks_in_khz));
136 break;
137 case DM_PP_CLOCK_TYPE_ENGINE_CLK:
138 clks->num_levels = 6;
139 memmove(clks->clocks_in_khz, sclks_in_khz,
140 sizeof(sclks_in_khz));
141 break;
142 case DM_PP_CLOCK_TYPE_MEMORY_CLK:
143 clks->num_levels = 2;
144 memmove(clks->clocks_in_khz, mclks_in_khz,
145 sizeof(mclks_in_khz));
146 break;
147 default:
148 clks->num_levels = 0;
149 break;
150 }
151}
152
153static enum amd_pp_clock_type dc_to_pp_clock_type(
154 enum dm_pp_clock_type dm_pp_clk_type)
155{
156 enum amd_pp_clock_type amd_pp_clk_type = 0;
157
158 switch (dm_pp_clk_type) {
159 case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
160 amd_pp_clk_type = amd_pp_disp_clock;
161 break;
162 case DM_PP_CLOCK_TYPE_ENGINE_CLK:
163 amd_pp_clk_type = amd_pp_sys_clock;
164 break;
165 case DM_PP_CLOCK_TYPE_MEMORY_CLK:
166 amd_pp_clk_type = amd_pp_mem_clock;
167 break;
168 case DM_PP_CLOCK_TYPE_DCEFCLK:
169 amd_pp_clk_type = amd_pp_dcef_clock;
170 break;
171 case DM_PP_CLOCK_TYPE_DCFCLK:
172 amd_pp_clk_type = amd_pp_dcf_clock;
173 break;
174 case DM_PP_CLOCK_TYPE_PIXELCLK:
175 amd_pp_clk_type = amd_pp_pixel_clock;
176 break;
177 case DM_PP_CLOCK_TYPE_FCLK:
178 amd_pp_clk_type = amd_pp_f_clock;
179 break;
180 case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK:
181 amd_pp_clk_type = amd_pp_phy_clock;
182 break;
183 case DM_PP_CLOCK_TYPE_DPPCLK:
184 amd_pp_clk_type = amd_pp_dpp_clock;
185 break;
186 default:
187 DRM_ERROR("DM_PPLIB: invalid clock type: %d!\n",
188 dm_pp_clk_type);
189 break;
190 }
191
192 return amd_pp_clk_type;
193}
194
195static void pp_to_dc_clock_levels(
196 const struct amd_pp_clocks *pp_clks,
197 struct dm_pp_clock_levels *dc_clks,
198 enum dm_pp_clock_type dc_clk_type)
199{
200 uint32_t i;
201
202 if (pp_clks->count > DM_PP_MAX_CLOCK_LEVELS) {
203 DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
204 DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
205 pp_clks->count,
206 DM_PP_MAX_CLOCK_LEVELS);
207
208 dc_clks->num_levels = DM_PP_MAX_CLOCK_LEVELS;
209 } else
210 dc_clks->num_levels = pp_clks->count;
211
212 DRM_INFO("DM_PPLIB: values for %s clock\n",
213 DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
214
215 for (i = 0; i < dc_clks->num_levels; i++) {
216 DRM_INFO("DM_PPLIB:\t %d\n", pp_clks->clock[i]);
217 dc_clks->clocks_in_khz[i] = pp_clks->clock[i];
218 }
219}
220
221static void pp_to_dc_clock_levels_with_latency(
222 const struct pp_clock_levels_with_latency *pp_clks,
223 struct dm_pp_clock_levels_with_latency *clk_level_info,
224 enum dm_pp_clock_type dc_clk_type)
225{
226 uint32_t i;
227
228 if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) {
229 DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
230 DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
231 pp_clks->num_levels,
232 DM_PP_MAX_CLOCK_LEVELS);
233
234 clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS;
235 } else
236 clk_level_info->num_levels = pp_clks->num_levels;
237
238 DRM_DEBUG("DM_PPLIB: values for %s clock\n",
239 DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
240
241 for (i = 0; i < clk_level_info->num_levels; i++) {
242 DRM_DEBUG("DM_PPLIB:\t %d in kHz\n", pp_clks->data[i].clocks_in_khz);
243 clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz;
244 clk_level_info->data[i].latency_in_us = pp_clks->data[i].latency_in_us;
245 }
246}
247
248static void pp_to_dc_clock_levels_with_voltage(
249 const struct pp_clock_levels_with_voltage *pp_clks,
250 struct dm_pp_clock_levels_with_voltage *clk_level_info,
251 enum dm_pp_clock_type dc_clk_type)
252{
253 uint32_t i;
254
255 if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) {
256 DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
257 DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
258 pp_clks->num_levels,
259 DM_PP_MAX_CLOCK_LEVELS);
260
261 clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS;
262 } else
263 clk_level_info->num_levels = pp_clks->num_levels;
264
265 DRM_INFO("DM_PPLIB: values for %s clock\n",
266 DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
267
268 for (i = 0; i < clk_level_info->num_levels; i++) {
269 DRM_INFO("DM_PPLIB:\t %d in kHz\n", pp_clks->data[i].clocks_in_khz);
270 clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz;
271 clk_level_info->data[i].voltage_in_mv = pp_clks->data[i].voltage_in_mv;
272 }
273}
274
275bool dm_pp_get_clock_levels_by_type(
276 const struct dc_context *ctx,
277 enum dm_pp_clock_type clk_type,
278 struct dm_pp_clock_levels *dc_clks)
279{
280 struct amdgpu_device *adev = ctx->driver_context;
281 void *pp_handle = adev->powerplay.pp_handle;
282 struct amd_pp_clocks pp_clks = { 0 };
283 struct amd_pp_simple_clock_info validation_clks = { 0 };
284 uint32_t i;
285
286 if (adev->powerplay.pp_funcs->get_clock_by_type) {
287 if (adev->powerplay.pp_funcs->get_clock_by_type(pp_handle,
288 dc_to_pp_clock_type(clk_type), &pp_clks)) {
289 /* Error in pplib. Provide default values. */
290 get_default_clock_levels(clk_type, dc_clks);
291 return true;
292 }
293 }
294
295 pp_to_dc_clock_levels(&pp_clks, dc_clks, clk_type);
296
297 if (adev->powerplay.pp_funcs->get_display_mode_validation_clocks) {
298 if (adev->powerplay.pp_funcs->get_display_mode_validation_clocks(
299 pp_handle, &validation_clks)) {
300 /* Error in pplib. Provide default values. */
301 DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n");
302 validation_clks.engine_max_clock = 72000;
303 validation_clks.memory_max_clock = 80000;
304 validation_clks.level = 0;
305 }
306 }
307
308 DRM_INFO("DM_PPLIB: Validation clocks:\n");
309 DRM_INFO("DM_PPLIB: engine_max_clock: %d\n",
310 validation_clks.engine_max_clock);
311 DRM_INFO("DM_PPLIB: memory_max_clock: %d\n",
312 validation_clks.memory_max_clock);
313 DRM_INFO("DM_PPLIB: level : %d\n",
314 validation_clks.level);
315
316 /* Translate 10 kHz to kHz. */
317 validation_clks.engine_max_clock *= 10;
318 validation_clks.memory_max_clock *= 10;
319
320 /* Determine the highest non-boosted level from the Validation Clocks */
321 if (clk_type == DM_PP_CLOCK_TYPE_ENGINE_CLK) {
322 for (i = 0; i < dc_clks->num_levels; i++) {
323 if (dc_clks->clocks_in_khz[i] > validation_clks.engine_max_clock) {
324 /* This clock is higher the validation clock.
325 * Than means the previous one is the highest
326 * non-boosted one. */
327 DRM_INFO("DM_PPLIB: reducing engine clock level from %d to %d\n",
328 dc_clks->num_levels, i);
329 dc_clks->num_levels = i > 0 ? i : 1;
330 break;
331 }
332 }
333 } else if (clk_type == DM_PP_CLOCK_TYPE_MEMORY_CLK) {
334 for (i = 0; i < dc_clks->num_levels; i++) {
335 if (dc_clks->clocks_in_khz[i] > validation_clks.memory_max_clock) {
336 DRM_INFO("DM_PPLIB: reducing memory clock level from %d to %d\n",
337 dc_clks->num_levels, i);
338 dc_clks->num_levels = i > 0 ? i : 1;
339 break;
340 }
341 }
342 }
343
344 return true;
345}
346
347bool dm_pp_get_clock_levels_by_type_with_latency(
348 const struct dc_context *ctx,
349 enum dm_pp_clock_type clk_type,
350 struct dm_pp_clock_levels_with_latency *clk_level_info)
351{
352 struct amdgpu_device *adev = ctx->driver_context;
353 void *pp_handle = adev->powerplay.pp_handle;
354 struct pp_clock_levels_with_latency pp_clks = { 0 };
355 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
356
357 if (!pp_funcs || !pp_funcs->get_clock_by_type_with_latency)
358 return false;
359
360 if (pp_funcs->get_clock_by_type_with_latency(pp_handle,
361 dc_to_pp_clock_type(clk_type),
362 &pp_clks))
363 return false;
364
365 pp_to_dc_clock_levels_with_latency(&pp_clks, clk_level_info, clk_type);
366
367 return true;
368}
369
370bool dm_pp_get_clock_levels_by_type_with_voltage(
371 const struct dc_context *ctx,
372 enum dm_pp_clock_type clk_type,
373 struct dm_pp_clock_levels_with_voltage *clk_level_info)
374{
375 struct amdgpu_device *adev = ctx->driver_context;
376 void *pp_handle = adev->powerplay.pp_handle;
377 struct pp_clock_levels_with_voltage pp_clk_info = {0};
378 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
379
380 if (pp_funcs->get_clock_by_type_with_voltage(pp_handle,
381 dc_to_pp_clock_type(clk_type),
382 &pp_clk_info))
383 return false;
384
385 pp_to_dc_clock_levels_with_voltage(&pp_clk_info, clk_level_info, clk_type);
386
387 return true;
388}
389
390bool dm_pp_notify_wm_clock_changes(
391 const struct dc_context *ctx,
392 struct dm_pp_wm_sets_with_clock_ranges *wm_with_clock_ranges)
393{
394 /* TODO: to be implemented */
395 return false;
396}
397
398bool dm_pp_apply_power_level_change_request(
399 const struct dc_context *ctx,
400 struct dm_pp_power_level_change_request *level_change_req)
401{
402 /* TODO: to be implemented */
403 return false;
404}
405
406bool dm_pp_apply_clock_for_voltage_request(
407 const struct dc_context *ctx,
408 struct dm_pp_clock_for_voltage_req *clock_for_voltage_req)
409{
410 struct amdgpu_device *adev = ctx->driver_context;
411 struct pp_display_clock_request pp_clock_request = {0};
412 int ret = 0;
413
414 pp_clock_request.clock_type = dc_to_pp_clock_type(clock_for_voltage_req->clk_type);
415 pp_clock_request.clock_freq_in_khz = clock_for_voltage_req->clocks_in_khz;
416
417 if (!pp_clock_request.clock_type)
418 return false;
419
420 if (adev->powerplay.pp_funcs->display_clock_voltage_request)
421 ret = adev->powerplay.pp_funcs->display_clock_voltage_request(
422 adev->powerplay.pp_handle,
423 &pp_clock_request);
424 if (ret)
425 return false;
426 return true;
427}
428
429bool dm_pp_get_static_clocks(
430 const struct dc_context *ctx,
431 struct dm_pp_static_clock_info *static_clk_info)
432{
433 struct amdgpu_device *adev = ctx->driver_context;
434 struct amd_pp_clock_info pp_clk_info = {0};
435 int ret = 0;
436
437 if (adev->powerplay.pp_funcs->get_current_clocks)
438 ret = adev->powerplay.pp_funcs->get_current_clocks(
439 adev->powerplay.pp_handle,
440 &pp_clk_info);
441 if (ret)
442 return false;
443
444 static_clk_info->max_clocks_state = pp_clk_info.max_clocks_state;
445 static_clk_info->max_mclk_khz = pp_clk_info.max_memory_clock;
446 static_clk_info->max_sclk_khz = pp_clk_info.max_engine_clock;
447
448 return true;
449}
450
451void pp_rv_set_display_requirement(struct pp_smu *pp,
452 struct pp_smu_display_requirement_rv *req)
453{
454 struct dc_context *ctx = pp->ctx;
455 struct amdgpu_device *adev = ctx->driver_context;
456 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
457
458 if (!pp_funcs || !pp_funcs->display_configuration_changed)
459 return;
460
461 amdgpu_dpm_display_configuration_changed(adev);
462}
463
464void pp_rv_set_wm_ranges(struct pp_smu *pp,
465 struct pp_smu_wm_range_sets *ranges)
466{
467 struct dc_context *ctx = pp->ctx;
468 struct amdgpu_device *adev = ctx->driver_context;
469 void *pp_handle = adev->powerplay.pp_handle;
470 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
471 struct dm_pp_wm_sets_with_clock_ranges_soc15 wm_with_clock_ranges;
472 struct dm_pp_clock_range_for_dmif_wm_set_soc15 *wm_dce_clocks = wm_with_clock_ranges.wm_dmif_clocks_ranges;
473 struct dm_pp_clock_range_for_mcif_wm_set_soc15 *wm_soc_clocks = wm_with_clock_ranges.wm_mcif_clocks_ranges;
474 int32_t i;
475
476 wm_with_clock_ranges.num_wm_dmif_sets = ranges->num_reader_wm_sets;
477 wm_with_clock_ranges.num_wm_mcif_sets = ranges->num_writer_wm_sets;
478
479 for (i = 0; i < wm_with_clock_ranges.num_wm_dmif_sets; i++) {
480 if (ranges->reader_wm_sets[i].wm_inst > 3)
481 wm_dce_clocks[i].wm_set_id = WM_SET_A;
482 else
483 wm_dce_clocks[i].wm_set_id =
484 ranges->reader_wm_sets[i].wm_inst;
485 wm_dce_clocks[i].wm_max_dcfclk_clk_in_khz =
486 ranges->reader_wm_sets[i].max_drain_clk_khz;
487 wm_dce_clocks[i].wm_min_dcfclk_clk_in_khz =
488 ranges->reader_wm_sets[i].min_drain_clk_khz;
489 wm_dce_clocks[i].wm_max_mem_clk_in_khz =
490 ranges->reader_wm_sets[i].max_fill_clk_khz;
491 wm_dce_clocks[i].wm_min_mem_clk_in_khz =
492 ranges->reader_wm_sets[i].min_fill_clk_khz;
493 }
494
495 for (i = 0; i < wm_with_clock_ranges.num_wm_mcif_sets; i++) {
496 if (ranges->writer_wm_sets[i].wm_inst > 3)
497 wm_soc_clocks[i].wm_set_id = WM_SET_A;
498 else
499 wm_soc_clocks[i].wm_set_id =
500 ranges->writer_wm_sets[i].wm_inst;
501 wm_soc_clocks[i].wm_max_socclk_clk_in_khz =
502 ranges->writer_wm_sets[i].max_fill_clk_khz;
503 wm_soc_clocks[i].wm_min_socclk_clk_in_khz =
504 ranges->writer_wm_sets[i].min_fill_clk_khz;
505 wm_soc_clocks[i].wm_max_mem_clk_in_khz =
506 ranges->writer_wm_sets[i].max_drain_clk_khz;
507 wm_soc_clocks[i].wm_min_mem_clk_in_khz =
508 ranges->writer_wm_sets[i].min_drain_clk_khz;
509 }
510
511 pp_funcs->set_watermarks_for_clocks_ranges(pp_handle, &wm_with_clock_ranges);
512}
513
514void pp_rv_set_pme_wa_enable(struct pp_smu *pp)
515{
516 struct dc_context *ctx = pp->ctx;
517 struct amdgpu_device *adev = ctx->driver_context;
518 void *pp_handle = adev->powerplay.pp_handle;
519 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
520
521 if (!pp_funcs || !pp_funcs->notify_smu_enable_pwe)
522 return;
523
524 pp_funcs->notify_smu_enable_pwe(pp_handle);
525}
526
527void dm_pp_get_funcs_rv(
528 struct dc_context *ctx,
529 struct pp_smu_funcs_rv *funcs)
530{
531 funcs->pp_smu.ctx = ctx;
532 funcs->set_display_requirement = pp_rv_set_display_requirement;
533 funcs->set_wm_ranges = pp_rv_set_wm_ranges;
534 funcs->set_pme_wa_enable = pp_rv_set_pme_wa_enable;
535}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
index e861929dd981..9f0a217603ad 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
@@ -35,6 +35,8 @@
35#include "amdgpu_dm_irq.h" 35#include "amdgpu_dm_irq.h"
36#include "amdgpu_pm.h" 36#include "amdgpu_pm.h"
37 37
38
39
38unsigned long long dm_get_elapse_time_in_ns(struct dc_context *ctx, 40unsigned long long dm_get_elapse_time_in_ns(struct dc_context *ctx,
39 unsigned long long current_time_stamp, 41 unsigned long long current_time_stamp,
40 unsigned long long last_time_stamp) 42 unsigned long long last_time_stamp)
@@ -72,326 +74,4 @@ bool dm_read_persistent_data(struct dc_context *ctx,
72 74
73/**** power component interfaces ****/ 75/**** power component interfaces ****/
74 76
75bool dm_pp_apply_display_requirements(
76 const struct dc_context *ctx,
77 const struct dm_pp_display_configuration *pp_display_cfg)
78{
79 struct amdgpu_device *adev = ctx->driver_context;
80
81 if (adev->pm.dpm_enabled) {
82
83 memset(&adev->pm.pm_display_cfg, 0,
84 sizeof(adev->pm.pm_display_cfg));
85
86 adev->pm.pm_display_cfg.cpu_cc6_disable =
87 pp_display_cfg->cpu_cc6_disable;
88
89 adev->pm.pm_display_cfg.cpu_pstate_disable =
90 pp_display_cfg->cpu_pstate_disable;
91
92 adev->pm.pm_display_cfg.cpu_pstate_separation_time =
93 pp_display_cfg->cpu_pstate_separation_time;
94
95 adev->pm.pm_display_cfg.nb_pstate_switch_disable =
96 pp_display_cfg->nb_pstate_switch_disable;
97
98 adev->pm.pm_display_cfg.num_display =
99 pp_display_cfg->display_count;
100 adev->pm.pm_display_cfg.num_path_including_non_display =
101 pp_display_cfg->display_count;
102
103 adev->pm.pm_display_cfg.min_core_set_clock =
104 pp_display_cfg->min_engine_clock_khz/10;
105 adev->pm.pm_display_cfg.min_core_set_clock_in_sr =
106 pp_display_cfg->min_engine_clock_deep_sleep_khz/10;
107 adev->pm.pm_display_cfg.min_mem_set_clock =
108 pp_display_cfg->min_memory_clock_khz/10;
109
110 adev->pm.pm_display_cfg.multi_monitor_in_sync =
111 pp_display_cfg->all_displays_in_sync;
112 adev->pm.pm_display_cfg.min_vblank_time =
113 pp_display_cfg->avail_mclk_switch_time_us;
114
115 adev->pm.pm_display_cfg.display_clk =
116 pp_display_cfg->disp_clk_khz/10;
117
118 adev->pm.pm_display_cfg.dce_tolerable_mclk_in_active_latency =
119 pp_display_cfg->avail_mclk_switch_time_in_disp_active_us;
120
121 adev->pm.pm_display_cfg.crtc_index = pp_display_cfg->crtc_index;
122 adev->pm.pm_display_cfg.line_time_in_us =
123 pp_display_cfg->line_time_in_us;
124
125 adev->pm.pm_display_cfg.vrefresh = pp_display_cfg->disp_configs[0].v_refresh;
126 adev->pm.pm_display_cfg.crossfire_display_index = -1;
127 adev->pm.pm_display_cfg.min_bus_bandwidth = 0;
128
129 /* TODO: complete implementation of
130 * pp_display_configuration_change().
131 * Follow example of:
132 * PHM_StoreDALConfigurationData - powerplay\hwmgr\hardwaremanager.c
133 * PP_IRI_DisplayConfigurationChange - powerplay\eventmgr\iri.c */
134 if (adev->powerplay.pp_funcs->display_configuration_change)
135 adev->powerplay.pp_funcs->display_configuration_change(
136 adev->powerplay.pp_handle,
137 &adev->pm.pm_display_cfg);
138
139 /* TODO: replace by a separate call to 'apply display cfg'? */
140 amdgpu_pm_compute_clocks(adev);
141 }
142
143 return true;
144}
145
146static void get_default_clock_levels(
147 enum dm_pp_clock_type clk_type,
148 struct dm_pp_clock_levels *clks)
149{
150 uint32_t disp_clks_in_khz[6] = {
151 300000, 400000, 496560, 626090, 685720, 757900 };
152 uint32_t sclks_in_khz[6] = {
153 300000, 360000, 423530, 514290, 626090, 720000 };
154 uint32_t mclks_in_khz[2] = { 333000, 800000 };
155
156 switch (clk_type) {
157 case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
158 clks->num_levels = 6;
159 memmove(clks->clocks_in_khz, disp_clks_in_khz,
160 sizeof(disp_clks_in_khz));
161 break;
162 case DM_PP_CLOCK_TYPE_ENGINE_CLK:
163 clks->num_levels = 6;
164 memmove(clks->clocks_in_khz, sclks_in_khz,
165 sizeof(sclks_in_khz));
166 break;
167 case DM_PP_CLOCK_TYPE_MEMORY_CLK:
168 clks->num_levels = 2;
169 memmove(clks->clocks_in_khz, mclks_in_khz,
170 sizeof(mclks_in_khz));
171 break;
172 default:
173 clks->num_levels = 0;
174 break;
175 }
176}
177
178static enum amd_pp_clock_type dc_to_pp_clock_type(
179 enum dm_pp_clock_type dm_pp_clk_type)
180{
181 enum amd_pp_clock_type amd_pp_clk_type = 0;
182
183 switch (dm_pp_clk_type) {
184 case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
185 amd_pp_clk_type = amd_pp_disp_clock;
186 break;
187 case DM_PP_CLOCK_TYPE_ENGINE_CLK:
188 amd_pp_clk_type = amd_pp_sys_clock;
189 break;
190 case DM_PP_CLOCK_TYPE_MEMORY_CLK:
191 amd_pp_clk_type = amd_pp_mem_clock;
192 break;
193 default:
194 DRM_ERROR("DM_PPLIB: invalid clock type: %d!\n",
195 dm_pp_clk_type);
196 break;
197 }
198
199 return amd_pp_clk_type;
200}
201
202static void pp_to_dc_clock_levels(
203 const struct amd_pp_clocks *pp_clks,
204 struct dm_pp_clock_levels *dc_clks,
205 enum dm_pp_clock_type dc_clk_type)
206{
207 uint32_t i;
208
209 if (pp_clks->count > DM_PP_MAX_CLOCK_LEVELS) {
210 DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
211 DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
212 pp_clks->count,
213 DM_PP_MAX_CLOCK_LEVELS);
214
215 dc_clks->num_levels = DM_PP_MAX_CLOCK_LEVELS;
216 } else
217 dc_clks->num_levels = pp_clks->count;
218
219 DRM_INFO("DM_PPLIB: values for %s clock\n",
220 DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
221
222 for (i = 0; i < dc_clks->num_levels; i++) {
223 DRM_INFO("DM_PPLIB:\t %d\n", pp_clks->clock[i]);
224 /* translate 10kHz to kHz */
225 dc_clks->clocks_in_khz[i] = pp_clks->clock[i] * 10;
226 }
227}
228
229static void pp_to_dc_clock_levels_with_latency(
230 const struct pp_clock_levels_with_latency *pp_clks,
231 struct dm_pp_clock_levels_with_latency *clk_level_info,
232 enum dm_pp_clock_type dc_clk_type)
233{
234 uint32_t i;
235
236 if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) {
237 DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
238 DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
239 pp_clks->num_levels,
240 DM_PP_MAX_CLOCK_LEVELS);
241
242 clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS;
243 } else
244 clk_level_info->num_levels = pp_clks->num_levels;
245
246 DRM_DEBUG("DM_PPLIB: values for %s clock\n",
247 DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
248
249 for (i = 0; i < clk_level_info->num_levels; i++) {
250 DRM_DEBUG("DM_PPLIB:\t %d\n", pp_clks->data[i].clocks_in_khz);
251 clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz;
252 clk_level_info->data[i].latency_in_us = pp_clks->data[i].latency_in_us;
253 }
254}
255
256bool dm_pp_get_clock_levels_by_type(
257 const struct dc_context *ctx,
258 enum dm_pp_clock_type clk_type,
259 struct dm_pp_clock_levels *dc_clks)
260{
261 struct amdgpu_device *adev = ctx->driver_context;
262 void *pp_handle = adev->powerplay.pp_handle;
263 struct amd_pp_clocks pp_clks = { 0 };
264 struct amd_pp_simple_clock_info validation_clks = { 0 };
265 uint32_t i;
266
267 if (adev->powerplay.pp_funcs->get_clock_by_type) {
268 if (adev->powerplay.pp_funcs->get_clock_by_type(pp_handle,
269 dc_to_pp_clock_type(clk_type), &pp_clks)) {
270 /* Error in pplib. Provide default values. */
271 get_default_clock_levels(clk_type, dc_clks);
272 return true;
273 }
274 }
275
276 pp_to_dc_clock_levels(&pp_clks, dc_clks, clk_type);
277
278 if (adev->powerplay.pp_funcs->get_display_mode_validation_clocks) {
279 if (adev->powerplay.pp_funcs->get_display_mode_validation_clocks(
280 pp_handle, &validation_clks)) {
281 /* Error in pplib. Provide default values. */
282 DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n");
283 validation_clks.engine_max_clock = 72000;
284 validation_clks.memory_max_clock = 80000;
285 validation_clks.level = 0;
286 }
287 }
288
289 DRM_INFO("DM_PPLIB: Validation clocks:\n");
290 DRM_INFO("DM_PPLIB: engine_max_clock: %d\n",
291 validation_clks.engine_max_clock);
292 DRM_INFO("DM_PPLIB: memory_max_clock: %d\n",
293 validation_clks.memory_max_clock);
294 DRM_INFO("DM_PPLIB: level : %d\n",
295 validation_clks.level);
296
297 /* Translate 10 kHz to kHz. */
298 validation_clks.engine_max_clock *= 10;
299 validation_clks.memory_max_clock *= 10;
300
301 /* Determine the highest non-boosted level from the Validation Clocks */
302 if (clk_type == DM_PP_CLOCK_TYPE_ENGINE_CLK) {
303 for (i = 0; i < dc_clks->num_levels; i++) {
304 if (dc_clks->clocks_in_khz[i] > validation_clks.engine_max_clock) {
305 /* This clock is higher the validation clock.
306 * Than means the previous one is the highest
307 * non-boosted one. */
308 DRM_INFO("DM_PPLIB: reducing engine clock level from %d to %d\n",
309 dc_clks->num_levels, i);
310 dc_clks->num_levels = i > 0 ? i : 1;
311 break;
312 }
313 }
314 } else if (clk_type == DM_PP_CLOCK_TYPE_MEMORY_CLK) {
315 for (i = 0; i < dc_clks->num_levels; i++) {
316 if (dc_clks->clocks_in_khz[i] > validation_clks.memory_max_clock) {
317 DRM_INFO("DM_PPLIB: reducing memory clock level from %d to %d\n",
318 dc_clks->num_levels, i);
319 dc_clks->num_levels = i > 0 ? i : 1;
320 break;
321 }
322 }
323 }
324
325 return true;
326}
327
328bool dm_pp_get_clock_levels_by_type_with_latency(
329 const struct dc_context *ctx,
330 enum dm_pp_clock_type clk_type,
331 struct dm_pp_clock_levels_with_latency *clk_level_info)
332{
333 struct amdgpu_device *adev = ctx->driver_context;
334 void *pp_handle = adev->powerplay.pp_handle;
335 struct pp_clock_levels_with_latency pp_clks = { 0 };
336 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
337
338 if (!pp_funcs || !pp_funcs->get_clock_by_type_with_latency)
339 return false;
340
341 if (pp_funcs->get_clock_by_type_with_latency(pp_handle,
342 dc_to_pp_clock_type(clk_type),
343 &pp_clks))
344 return false;
345
346 pp_to_dc_clock_levels_with_latency(&pp_clks, clk_level_info, clk_type);
347
348 return true;
349}
350
351bool dm_pp_get_clock_levels_by_type_with_voltage(
352 const struct dc_context *ctx,
353 enum dm_pp_clock_type clk_type,
354 struct dm_pp_clock_levels_with_voltage *clk_level_info)
355{
356 /* TODO: to be implemented */
357 return false;
358}
359
360bool dm_pp_notify_wm_clock_changes(
361 const struct dc_context *ctx,
362 struct dm_pp_wm_sets_with_clock_ranges *wm_with_clock_ranges)
363{
364 /* TODO: to be implemented */
365 return false;
366}
367
368bool dm_pp_apply_power_level_change_request(
369 const struct dc_context *ctx,
370 struct dm_pp_power_level_change_request *level_change_req)
371{
372 /* TODO: to be implemented */
373 return false;
374}
375
376bool dm_pp_apply_clock_for_voltage_request(
377 const struct dc_context *ctx,
378 struct dm_pp_clock_for_voltage_req *clock_for_voltage_req)
379{
380 /* TODO: to be implemented */
381 return false;
382}
383
384bool dm_pp_get_static_clocks(
385 const struct dc_context *ctx,
386 struct dm_pp_static_clock_info *static_clk_info)
387{
388 /* TODO: to be implemented */
389 return false;
390}
391
392void dm_pp_get_funcs_rv(
393 struct dc_context *ctx,
394 struct pp_smu_funcs_rv *funcs)
395{}
396 77
397/**** end of power component interfaces ****/
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
index c7f0b27e457e..be8a2494355a 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
@@ -3762,6 +3762,200 @@ static struct integrated_info *bios_parser_create_integrated_info(
3762 return NULL; 3762 return NULL;
3763} 3763}
3764 3764
3765enum bp_result update_slot_layout_info(
3766 struct dc_bios *dcb,
3767 unsigned int i,
3768 struct slot_layout_info *slot_layout_info,
3769 unsigned int record_offset)
3770{
3771 unsigned int j;
3772 struct bios_parser *bp;
3773 ATOM_BRACKET_LAYOUT_RECORD *record;
3774 ATOM_COMMON_RECORD_HEADER *record_header;
3775 enum bp_result result = BP_RESULT_NORECORD;
3776
3777 bp = BP_FROM_DCB(dcb);
3778 record = NULL;
3779 record_header = NULL;
3780
3781 for (;;) {
3782
3783 record_header = (ATOM_COMMON_RECORD_HEADER *)
3784 GET_IMAGE(ATOM_COMMON_RECORD_HEADER, record_offset);
3785 if (record_header == NULL) {
3786 result = BP_RESULT_BADBIOSTABLE;
3787 break;
3788 }
3789
3790 /* the end of the list */
3791 if (record_header->ucRecordType == 0xff ||
3792 record_header->ucRecordSize == 0) {
3793 break;
3794 }
3795
3796 if (record_header->ucRecordType ==
3797 ATOM_BRACKET_LAYOUT_RECORD_TYPE &&
3798 sizeof(ATOM_BRACKET_LAYOUT_RECORD)
3799 <= record_header->ucRecordSize) {
3800 record = (ATOM_BRACKET_LAYOUT_RECORD *)
3801 (record_header);
3802 result = BP_RESULT_OK;
3803 break;
3804 }
3805
3806 record_offset += record_header->ucRecordSize;
3807 }
3808
3809 /* return if the record not found */
3810 if (result != BP_RESULT_OK)
3811 return result;
3812
3813 /* get slot sizes */
3814 slot_layout_info->length = record->ucLength;
3815 slot_layout_info->width = record->ucWidth;
3816
3817 /* get info for each connector in the slot */
3818 slot_layout_info->num_of_connectors = record->ucConnNum;
3819 for (j = 0; j < slot_layout_info->num_of_connectors; ++j) {
3820 slot_layout_info->connectors[j].connector_type =
3821 (enum connector_layout_type)
3822 (record->asConnInfo[j].ucConnectorType);
3823 switch (record->asConnInfo[j].ucConnectorType) {
3824 case CONNECTOR_TYPE_DVI_D:
3825 slot_layout_info->connectors[j].connector_type =
3826 CONNECTOR_LAYOUT_TYPE_DVI_D;
3827 slot_layout_info->connectors[j].length =
3828 CONNECTOR_SIZE_DVI;
3829 break;
3830
3831 case CONNECTOR_TYPE_HDMI:
3832 slot_layout_info->connectors[j].connector_type =
3833 CONNECTOR_LAYOUT_TYPE_HDMI;
3834 slot_layout_info->connectors[j].length =
3835 CONNECTOR_SIZE_HDMI;
3836 break;
3837
3838 case CONNECTOR_TYPE_DISPLAY_PORT:
3839 slot_layout_info->connectors[j].connector_type =
3840 CONNECTOR_LAYOUT_TYPE_DP;
3841 slot_layout_info->connectors[j].length =
3842 CONNECTOR_SIZE_DP;
3843 break;
3844
3845 case CONNECTOR_TYPE_MINI_DISPLAY_PORT:
3846 slot_layout_info->connectors[j].connector_type =
3847 CONNECTOR_LAYOUT_TYPE_MINI_DP;
3848 slot_layout_info->connectors[j].length =
3849 CONNECTOR_SIZE_MINI_DP;
3850 break;
3851
3852 default:
3853 slot_layout_info->connectors[j].connector_type =
3854 CONNECTOR_LAYOUT_TYPE_UNKNOWN;
3855 slot_layout_info->connectors[j].length =
3856 CONNECTOR_SIZE_UNKNOWN;
3857 }
3858
3859 slot_layout_info->connectors[j].position =
3860 record->asConnInfo[j].ucPosition;
3861 slot_layout_info->connectors[j].connector_id =
3862 object_id_from_bios_object_id(
3863 record->asConnInfo[j].usConnectorObjectId);
3864 }
3865 return result;
3866}
3867
3868
3869enum bp_result get_bracket_layout_record(
3870 struct dc_bios *dcb,
3871 unsigned int bracket_layout_id,
3872 struct slot_layout_info *slot_layout_info)
3873{
3874 unsigned int i;
3875 unsigned int record_offset;
3876 struct bios_parser *bp;
3877 enum bp_result result;
3878 ATOM_OBJECT *object;
3879 ATOM_OBJECT_TABLE *object_table;
3880 unsigned int genericTableOffset;
3881
3882 bp = BP_FROM_DCB(dcb);
3883 object = NULL;
3884 if (slot_layout_info == NULL) {
3885 DC_LOG_DETECTION_EDID_PARSER("Invalid slot_layout_info\n");
3886 return BP_RESULT_BADINPUT;
3887 }
3888
3889
3890 genericTableOffset = bp->object_info_tbl_offset +
3891 bp->object_info_tbl.v1_3->usMiscObjectTableOffset;
3892 object_table = (ATOM_OBJECT_TABLE *)
3893 GET_IMAGE(ATOM_OBJECT_TABLE, genericTableOffset);
3894 if (!object_table)
3895 return BP_RESULT_FAILURE;
3896
3897 result = BP_RESULT_NORECORD;
3898 for (i = 0; i < object_table->ucNumberOfObjects; ++i) {
3899
3900 if (bracket_layout_id ==
3901 object_table->asObjects[i].usObjectID) {
3902
3903 object = &object_table->asObjects[i];
3904 record_offset = object->usRecordOffset +
3905 bp->object_info_tbl_offset;
3906
3907 result = update_slot_layout_info(dcb, i,
3908 slot_layout_info, record_offset);
3909 break;
3910 }
3911 }
3912 return result;
3913}
3914
3915static enum bp_result bios_get_board_layout_info(
3916 struct dc_bios *dcb,
3917 struct board_layout_info *board_layout_info)
3918{
3919 unsigned int i;
3920 struct bios_parser *bp;
3921 enum bp_result record_result;
3922
3923 const unsigned int slot_index_to_vbios_id[MAX_BOARD_SLOTS] = {
3924 GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID1,
3925 GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID2,
3926 0, 0
3927 };
3928
3929 bp = BP_FROM_DCB(dcb);
3930 if (board_layout_info == NULL) {
3931 DC_LOG_DETECTION_EDID_PARSER("Invalid board_layout_info\n");
3932 return BP_RESULT_BADINPUT;
3933 }
3934
3935 board_layout_info->num_of_slots = 0;
3936
3937 for (i = 0; i < MAX_BOARD_SLOTS; ++i) {
3938 record_result = get_bracket_layout_record(dcb,
3939 slot_index_to_vbios_id[i],
3940 &board_layout_info->slots[i]);
3941
3942 if (record_result == BP_RESULT_NORECORD && i > 0)
3943 break; /* no more slots present in bios */
3944 else if (record_result != BP_RESULT_OK)
3945 return record_result; /* fail */
3946
3947 ++board_layout_info->num_of_slots;
3948 }
3949
3950 /* all data is valid */
3951 board_layout_info->is_number_of_slots_valid = 1;
3952 board_layout_info->is_slots_size_valid = 1;
3953 board_layout_info->is_connector_offsets_valid = 1;
3954 board_layout_info->is_connector_lengths_valid = 1;
3955
3956 return BP_RESULT_OK;
3957}
3958
3765/******************************************************************************/ 3959/******************************************************************************/
3766 3960
3767static const struct dc_vbios_funcs vbios_funcs = { 3961static const struct dc_vbios_funcs vbios_funcs = {
@@ -3836,6 +4030,8 @@ static const struct dc_vbios_funcs vbios_funcs = {
3836 .post_init = bios_parser_post_init, /* patch vbios table for mxm module by reading i2c */ 4030 .post_init = bios_parser_post_init, /* patch vbios table for mxm module by reading i2c */
3837 4031
3838 .bios_parser_destroy = bios_parser_destroy, 4032 .bios_parser_destroy = bios_parser_destroy,
4033
4034 .get_board_layout_info = bios_get_board_layout_info,
3839}; 4035};
3840 4036
3841static bool bios_parser_construct( 4037static bool bios_parser_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index b8cef7af3c4a..aeb56e402ccc 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -43,6 +43,29 @@
43#include "bios_parser_interface.h" 43#include "bios_parser_interface.h"
44 44
45#include "bios_parser_common.h" 45#include "bios_parser_common.h"
46
47/* Temporarily add in defines until ObjectID.h patch is updated in a few days */
48#ifndef GENERIC_OBJECT_ID_BRACKET_LAYOUT
49#define GENERIC_OBJECT_ID_BRACKET_LAYOUT 0x05
50#endif /* GENERIC_OBJECT_ID_BRACKET_LAYOUT */
51
52#ifndef GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID1
53#define GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID1 \
54 (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
55 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
56 GENERIC_OBJECT_ID_BRACKET_LAYOUT << OBJECT_ID_SHIFT)
57#endif /* GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID1 */
58
59#ifndef GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID2
60#define GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID2 \
61 (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
62 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
63 GENERIC_OBJECT_ID_BRACKET_LAYOUT << OBJECT_ID_SHIFT)
64#endif /* GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID2 */
65
66#define DC_LOGGER \
67 bp->base.ctx->logger
68
46#define LAST_RECORD_TYPE 0xff 69#define LAST_RECORD_TYPE 0xff
47#define SMU9_SYSPLL0_ID 0 70#define SMU9_SYSPLL0_ID 0
48 71
@@ -86,7 +109,6 @@ static struct atom_encoder_caps_record *get_encoder_cap_record(
86 109
87#define DATA_TABLES(table) (bp->master_data_tbl->listOfdatatables.table) 110#define DATA_TABLES(table) (bp->master_data_tbl->listOfdatatables.table)
88 111
89
90static void destruct(struct bios_parser *bp) 112static void destruct(struct bios_parser *bp)
91{ 113{
92 kfree(bp->base.bios_local_image); 114 kfree(bp->base.bios_local_image);
@@ -1854,6 +1876,198 @@ static struct integrated_info *bios_parser_create_integrated_info(
1854 return NULL; 1876 return NULL;
1855} 1877}
1856 1878
1879static enum bp_result update_slot_layout_info(
1880 struct dc_bios *dcb,
1881 unsigned int i,
1882 struct slot_layout_info *slot_layout_info)
1883{
1884 unsigned int record_offset;
1885 unsigned int j;
1886 struct atom_display_object_path_v2 *object;
1887 struct atom_bracket_layout_record *record;
1888 struct atom_common_record_header *record_header;
1889 enum bp_result result;
1890 struct bios_parser *bp;
1891 struct object_info_table *tbl;
1892 struct display_object_info_table_v1_4 *v1_4;
1893
1894 record = NULL;
1895 record_header = NULL;
1896 result = BP_RESULT_NORECORD;
1897
1898 bp = BP_FROM_DCB(dcb);
1899 tbl = &bp->object_info_tbl;
1900 v1_4 = tbl->v1_4;
1901
1902 object = &v1_4->display_path[i];
1903 record_offset = (unsigned int)
1904 (object->disp_recordoffset) +
1905 (unsigned int)(bp->object_info_tbl_offset);
1906
1907 for (;;) {
1908
1909 record_header = (struct atom_common_record_header *)
1910 GET_IMAGE(struct atom_common_record_header,
1911 record_offset);
1912 if (record_header == NULL) {
1913 result = BP_RESULT_BADBIOSTABLE;
1914 break;
1915 }
1916
1917 /* the end of the list */
1918 if (record_header->record_type == 0xff ||
1919 record_header->record_size == 0) {
1920 break;
1921 }
1922
1923 if (record_header->record_type ==
1924 ATOM_BRACKET_LAYOUT_RECORD_TYPE &&
1925 sizeof(struct atom_bracket_layout_record)
1926 <= record_header->record_size) {
1927 record = (struct atom_bracket_layout_record *)
1928 (record_header);
1929 result = BP_RESULT_OK;
1930 break;
1931 }
1932
1933 record_offset += record_header->record_size;
1934 }
1935
1936 /* return if the record not found */
1937 if (result != BP_RESULT_OK)
1938 return result;
1939
1940 /* get slot sizes */
1941 slot_layout_info->length = record->bracketlen;
1942 slot_layout_info->width = record->bracketwidth;
1943
1944 /* get info for each connector in the slot */
1945 slot_layout_info->num_of_connectors = record->conn_num;
1946 for (j = 0; j < slot_layout_info->num_of_connectors; ++j) {
1947 slot_layout_info->connectors[j].connector_type =
1948 (enum connector_layout_type)
1949 (record->conn_info[j].connector_type);
1950 switch (record->conn_info[j].connector_type) {
1951 case CONNECTOR_TYPE_DVI_D:
1952 slot_layout_info->connectors[j].connector_type =
1953 CONNECTOR_LAYOUT_TYPE_DVI_D;
1954 slot_layout_info->connectors[j].length =
1955 CONNECTOR_SIZE_DVI;
1956 break;
1957
1958 case CONNECTOR_TYPE_HDMI:
1959 slot_layout_info->connectors[j].connector_type =
1960 CONNECTOR_LAYOUT_TYPE_HDMI;
1961 slot_layout_info->connectors[j].length =
1962 CONNECTOR_SIZE_HDMI;
1963 break;
1964
1965 case CONNECTOR_TYPE_DISPLAY_PORT:
1966 slot_layout_info->connectors[j].connector_type =
1967 CONNECTOR_LAYOUT_TYPE_DP;
1968 slot_layout_info->connectors[j].length =
1969 CONNECTOR_SIZE_DP;
1970 break;
1971
1972 case CONNECTOR_TYPE_MINI_DISPLAY_PORT:
1973 slot_layout_info->connectors[j].connector_type =
1974 CONNECTOR_LAYOUT_TYPE_MINI_DP;
1975 slot_layout_info->connectors[j].length =
1976 CONNECTOR_SIZE_MINI_DP;
1977 break;
1978
1979 default:
1980 slot_layout_info->connectors[j].connector_type =
1981 CONNECTOR_LAYOUT_TYPE_UNKNOWN;
1982 slot_layout_info->connectors[j].length =
1983 CONNECTOR_SIZE_UNKNOWN;
1984 }
1985
1986 slot_layout_info->connectors[j].position =
1987 record->conn_info[j].position;
1988 slot_layout_info->connectors[j].connector_id =
1989 object_id_from_bios_object_id(
1990 record->conn_info[j].connectorobjid);
1991 }
1992 return result;
1993}
1994
1995
1996static enum bp_result get_bracket_layout_record(
1997 struct dc_bios *dcb,
1998 unsigned int bracket_layout_id,
1999 struct slot_layout_info *slot_layout_info)
2000{
2001 unsigned int i;
2002 struct bios_parser *bp = BP_FROM_DCB(dcb);
2003 enum bp_result result;
2004 struct object_info_table *tbl;
2005 struct display_object_info_table_v1_4 *v1_4;
2006
2007 if (slot_layout_info == NULL) {
2008 DC_LOG_DETECTION_EDID_PARSER("Invalid slot_layout_info\n");
2009 return BP_RESULT_BADINPUT;
2010 }
2011 tbl = &bp->object_info_tbl;
2012 v1_4 = tbl->v1_4;
2013
2014 result = BP_RESULT_NORECORD;
2015 for (i = 0; i < v1_4->number_of_path; ++i) {
2016
2017 if (bracket_layout_id ==
2018 v1_4->display_path[i].display_objid) {
2019 result = update_slot_layout_info(dcb, i,
2020 slot_layout_info);
2021 break;
2022 }
2023 }
2024 return result;
2025}
2026
2027static enum bp_result bios_get_board_layout_info(
2028 struct dc_bios *dcb,
2029 struct board_layout_info *board_layout_info)
2030{
2031 unsigned int i;
2032 struct bios_parser *bp;
2033 enum bp_result record_result;
2034
2035 const unsigned int slot_index_to_vbios_id[MAX_BOARD_SLOTS] = {
2036 GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID1,
2037 GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID2,
2038 0, 0
2039 };
2040
2041 bp = BP_FROM_DCB(dcb);
2042 if (board_layout_info == NULL) {
2043 DC_LOG_DETECTION_EDID_PARSER("Invalid board_layout_info\n");
2044 return BP_RESULT_BADINPUT;
2045 }
2046
2047 board_layout_info->num_of_slots = 0;
2048
2049 for (i = 0; i < MAX_BOARD_SLOTS; ++i) {
2050 record_result = get_bracket_layout_record(dcb,
2051 slot_index_to_vbios_id[i],
2052 &board_layout_info->slots[i]);
2053
2054 if (record_result == BP_RESULT_NORECORD && i > 0)
2055 break; /* no more slots present in bios */
2056 else if (record_result != BP_RESULT_OK)
2057 return record_result; /* fail */
2058
2059 ++board_layout_info->num_of_slots;
2060 }
2061
2062 /* all data is valid */
2063 board_layout_info->is_number_of_slots_valid = 1;
2064 board_layout_info->is_slots_size_valid = 1;
2065 board_layout_info->is_connector_offsets_valid = 1;
2066 board_layout_info->is_connector_lengths_valid = 1;
2067
2068 return BP_RESULT_OK;
2069}
2070
1857static const struct dc_vbios_funcs vbios_funcs = { 2071static const struct dc_vbios_funcs vbios_funcs = {
1858 .get_connectors_number = bios_parser_get_connectors_number, 2072 .get_connectors_number = bios_parser_get_connectors_number,
1859 2073
@@ -1925,6 +2139,8 @@ static const struct dc_vbios_funcs vbios_funcs = {
1925 .bios_parser_destroy = firmware_parser_destroy, 2139 .bios_parser_destroy = firmware_parser_destroy,
1926 2140
1927 .get_smu_clock_info = bios_parser_get_smu_clock_info, 2141 .get_smu_clock_info = bios_parser_get_smu_clock_info,
2142
2143 .get_board_layout_info = bios_get_board_layout_info,
1928}; 2144};
1929 2145
1930static bool bios_parser_construct( 2146static bool bios_parser_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
index 752b08a42d3e..2b5dc499a35e 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
@@ -59,36 +59,7 @@
59 bios_cmd_table_para_revision(bp->base.ctx->driver_context, \ 59 bios_cmd_table_para_revision(bp->base.ctx->driver_context, \
60 GET_INDEX_INTO_MASTER_TABLE(command, fname)) 60 GET_INDEX_INTO_MASTER_TABLE(command, fname))
61 61
62static void init_dig_encoder_control(struct bios_parser *bp);
63static void init_transmitter_control(struct bios_parser *bp);
64static void init_set_pixel_clock(struct bios_parser *bp);
65 62
66static void init_set_crtc_timing(struct bios_parser *bp);
67
68static void init_select_crtc_source(struct bios_parser *bp);
69static void init_enable_crtc(struct bios_parser *bp);
70
71static void init_external_encoder_control(struct bios_parser *bp);
72static void init_enable_disp_power_gating(struct bios_parser *bp);
73static void init_set_dce_clock(struct bios_parser *bp);
74static void init_get_smu_clock_info(struct bios_parser *bp);
75
76void dal_firmware_parser_init_cmd_tbl(struct bios_parser *bp)
77{
78 init_dig_encoder_control(bp);
79 init_transmitter_control(bp);
80 init_set_pixel_clock(bp);
81
82 init_set_crtc_timing(bp);
83
84 init_select_crtc_source(bp);
85 init_enable_crtc(bp);
86
87 init_external_encoder_control(bp);
88 init_enable_disp_power_gating(bp);
89 init_set_dce_clock(bp);
90 init_get_smu_clock_info(bp);
91}
92 63
93static uint32_t bios_cmd_table_para_revision(void *dev, 64static uint32_t bios_cmd_table_para_revision(void *dev,
94 uint32_t index) 65 uint32_t index)
@@ -829,3 +800,20 @@ static unsigned int get_smu_clock_info_v3_1(struct bios_parser *bp, uint8_t id)
829 return 0; 800 return 0;
830} 801}
831 802
803void dal_firmware_parser_init_cmd_tbl(struct bios_parser *bp)
804{
805 init_dig_encoder_control(bp);
806 init_transmitter_control(bp);
807 init_set_pixel_clock(bp);
808
809 init_set_crtc_timing(bp);
810
811 init_select_crtc_source(bp);
812 init_enable_crtc(bp);
813
814 init_external_encoder_control(bp);
815 init_enable_disp_power_gating(bp);
816 init_set_dce_clock(bp);
817 init_get_smu_clock_info(bp);
818
819}
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
index 49a4ea45466d..e44b8d3d6891 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -31,6 +31,8 @@
31 31
32#include "resource.h" 32#include "resource.h"
33#include "dcn10/dcn10_resource.h" 33#include "dcn10/dcn10_resource.h"
34#include "dcn10/dcn10_hubbub.h"
35
34#include "dcn_calc_math.h" 36#include "dcn_calc_math.h"
35 37
36#define DC_LOGGER \ 38#define DC_LOGGER \
@@ -423,6 +425,10 @@ static void dcn_bw_calc_rq_dlg_ttu(
423 int total_flip_bytes = 0; 425 int total_flip_bytes = 0;
424 int i; 426 int i;
425 427
428 memset(dlg_regs, 0, sizeof(*dlg_regs));
429 memset(ttu_regs, 0, sizeof(*ttu_regs));
430 memset(rq_regs, 0, sizeof(*rq_regs));
431
426 for (i = 0; i < number_of_planes; i++) { 432 for (i = 0; i < number_of_planes; i++) {
427 total_active_bw += v->read_bandwidth[i]; 433 total_active_bw += v->read_bandwidth[i];
428 total_prefetch_bw += v->prefetch_bandwidth[i]; 434 total_prefetch_bw += v->prefetch_bandwidth[i];
@@ -501,6 +507,7 @@ static void split_stream_across_pipes(
501 resource_build_scaling_params(secondary_pipe); 507 resource_build_scaling_params(secondary_pipe);
502} 508}
503 509
510#if 0
504static void calc_wm_sets_and_perf_params( 511static void calc_wm_sets_and_perf_params(
505 struct dc_state *context, 512 struct dc_state *context,
506 struct dcn_bw_internal_vars *v) 513 struct dcn_bw_internal_vars *v)
@@ -582,6 +589,7 @@ static void calc_wm_sets_and_perf_params(
582 if (v->voltage_level >= 3) 589 if (v->voltage_level >= 3)
583 context->bw.dcn.watermarks.d = context->bw.dcn.watermarks.a; 590 context->bw.dcn.watermarks.d = context->bw.dcn.watermarks.a;
584} 591}
592#endif
585 593
586static bool dcn_bw_apply_registry_override(struct dc *dc) 594static bool dcn_bw_apply_registry_override(struct dc *dc)
587{ 595{
@@ -883,7 +891,26 @@ bool dcn_validate_bandwidth(
883 ASSERT(pipe->plane_res.scl_data.ratios.vert.value != dc_fixpt_one.value 891 ASSERT(pipe->plane_res.scl_data.ratios.vert.value != dc_fixpt_one.value
884 || v->scaler_rec_out_width[input_idx] == v->viewport_height[input_idx]); 892 || v->scaler_rec_out_width[input_idx] == v->viewport_height[input_idx]);
885 } 893 }
886 v->dcc_enable[input_idx] = pipe->plane_state->dcc.enable ? dcn_bw_yes : dcn_bw_no; 894
895 if (dc->debug.optimized_watermark) {
896 /*
897 * this method requires us to always re-calculate watermark when dcc change
898 * between flip.
899 */
900 v->dcc_enable[input_idx] = pipe->plane_state->dcc.enable ? dcn_bw_yes : dcn_bw_no;
901 } else {
902 /*
903 * allow us to disable dcc on the fly without re-calculating WM
904 *
905 * extra overhead for DCC is quite small. for 1080p WM without
906 * DCC is only 0.417us lower (urgent goes from 6.979us to 6.562us)
907 */
908 unsigned int bpe;
909
910 v->dcc_enable[input_idx] = dc->res_pool->hubbub->funcs->dcc_support_pixel_format(
911 pipe->plane_state->format, &bpe) ? dcn_bw_yes : dcn_bw_no;
912 }
913
887 v->source_pixel_format[input_idx] = tl_pixel_format_to_bw_defs( 914 v->source_pixel_format[input_idx] = tl_pixel_format_to_bw_defs(
888 pipe->plane_state->format); 915 pipe->plane_state->format);
889 v->source_surface_mode[input_idx] = tl_sw_mode_to_bw_defs( 916 v->source_surface_mode[input_idx] = tl_sw_mode_to_bw_defs(
@@ -976,43 +1003,60 @@ bool dcn_validate_bandwidth(
976 bw_consumed = v->fabric_and_dram_bandwidth; 1003 bw_consumed = v->fabric_and_dram_bandwidth;
977 1004
978 display_pipe_configuration(v); 1005 display_pipe_configuration(v);
979 calc_wm_sets_and_perf_params(context, v); 1006 /*calc_wm_sets_and_perf_params(context, v);*/
980 context->bw.dcn.calc_clk.fclk_khz = (int)(bw_consumed * 1000000 / 1007 /* Only 1 set is used by dcn since no noticeable
1008 * performance improvement was measured and due to hw bug DEGVIDCN10-254
1009 */
1010 dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v);
1011
1012 context->bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns =
1013 v->stutter_exit_watermark * 1000;
1014 context->bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
1015 v->stutter_enter_plus_exit_watermark * 1000;
1016 context->bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns =
1017 v->dram_clock_change_watermark * 1000;
1018 context->bw.dcn.watermarks.a.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
1019 context->bw.dcn.watermarks.a.urgent_ns = v->urgent_watermark * 1000;
1020 context->bw.dcn.watermarks.b = context->bw.dcn.watermarks.a;
1021 context->bw.dcn.watermarks.c = context->bw.dcn.watermarks.a;
1022 context->bw.dcn.watermarks.d = context->bw.dcn.watermarks.a;
1023
1024 context->bw.dcn.clk.fclk_khz = (int)(bw_consumed * 1000000 /
981 (ddr4_dram_factor_single_Channel * v->number_of_channels)); 1025 (ddr4_dram_factor_single_Channel * v->number_of_channels));
982 if (bw_consumed == v->fabric_and_dram_bandwidth_vmin0p65) { 1026 if (bw_consumed == v->fabric_and_dram_bandwidth_vmin0p65) {
983 context->bw.dcn.calc_clk.fclk_khz = (int)(bw_consumed * 1000000 / 32); 1027 context->bw.dcn.clk.fclk_khz = (int)(bw_consumed * 1000000 / 32);
984 } 1028 }
985 1029
986 context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz = (int)(v->dcf_clk_deep_sleep * 1000); 1030 context->bw.dcn.clk.dcfclk_deep_sleep_khz = (int)(v->dcf_clk_deep_sleep * 1000);
987 context->bw.dcn.calc_clk.dcfclk_khz = (int)(v->dcfclk * 1000); 1031 context->bw.dcn.clk.dcfclk_khz = (int)(v->dcfclk * 1000);
988 1032
989 context->bw.dcn.calc_clk.dispclk_khz = (int)(v->dispclk * 1000); 1033 context->bw.dcn.clk.dispclk_khz = (int)(v->dispclk * 1000);
990 if (dc->debug.max_disp_clk == true) 1034 if (dc->debug.max_disp_clk == true)
991 context->bw.dcn.calc_clk.dispclk_khz = (int)(dc->dcn_soc->max_dispclk_vmax0p9 * 1000); 1035 context->bw.dcn.clk.dispclk_khz = (int)(dc->dcn_soc->max_dispclk_vmax0p9 * 1000);
992 1036
993 if (context->bw.dcn.calc_clk.dispclk_khz < 1037 if (context->bw.dcn.clk.dispclk_khz <
994 dc->debug.min_disp_clk_khz) { 1038 dc->debug.min_disp_clk_khz) {
995 context->bw.dcn.calc_clk.dispclk_khz = 1039 context->bw.dcn.clk.dispclk_khz =
996 dc->debug.min_disp_clk_khz; 1040 dc->debug.min_disp_clk_khz;
997 } 1041 }
998 1042
999 context->bw.dcn.calc_clk.dppclk_khz = context->bw.dcn.calc_clk.dispclk_khz / v->dispclk_dppclk_ratio; 1043 context->bw.dcn.clk.dppclk_khz = context->bw.dcn.clk.dispclk_khz / v->dispclk_dppclk_ratio;
1000 1044 context->bw.dcn.clk.phyclk_khz = v->phyclk_per_state[v->voltage_level];
1001 switch (v->voltage_level) { 1045 switch (v->voltage_level) {
1002 case 0: 1046 case 0:
1003 context->bw.dcn.calc_clk.max_supported_dppclk_khz = 1047 context->bw.dcn.clk.max_supported_dppclk_khz =
1004 (int)(dc->dcn_soc->max_dppclk_vmin0p65 * 1000); 1048 (int)(dc->dcn_soc->max_dppclk_vmin0p65 * 1000);
1005 break; 1049 break;
1006 case 1: 1050 case 1:
1007 context->bw.dcn.calc_clk.max_supported_dppclk_khz = 1051 context->bw.dcn.clk.max_supported_dppclk_khz =
1008 (int)(dc->dcn_soc->max_dppclk_vmid0p72 * 1000); 1052 (int)(dc->dcn_soc->max_dppclk_vmid0p72 * 1000);
1009 break; 1053 break;
1010 case 2: 1054 case 2:
1011 context->bw.dcn.calc_clk.max_supported_dppclk_khz = 1055 context->bw.dcn.clk.max_supported_dppclk_khz =
1012 (int)(dc->dcn_soc->max_dppclk_vnom0p8 * 1000); 1056 (int)(dc->dcn_soc->max_dppclk_vnom0p8 * 1000);
1013 break; 1057 break;
1014 default: 1058 default:
1015 context->bw.dcn.calc_clk.max_supported_dppclk_khz = 1059 context->bw.dcn.clk.max_supported_dppclk_khz =
1016 (int)(dc->dcn_soc->max_dppclk_vmax0p9 * 1000); 1060 (int)(dc->dcn_soc->max_dppclk_vmax0p9 * 1000);
1017 break; 1061 break;
1018 } 1062 }
@@ -1225,27 +1269,27 @@ static unsigned int dcn_find_normalized_clock_vdd_Level(
1225 1269
1226unsigned int dcn_find_dcfclk_suits_all( 1270unsigned int dcn_find_dcfclk_suits_all(
1227 const struct dc *dc, 1271 const struct dc *dc,
1228 struct clocks_value *clocks) 1272 struct dc_clocks *clocks)
1229{ 1273{
1230 unsigned vdd_level, vdd_level_temp; 1274 unsigned vdd_level, vdd_level_temp;
1231 unsigned dcf_clk; 1275 unsigned dcf_clk;
1232 1276
1233 /*find a common supported voltage level*/ 1277 /*find a common supported voltage level*/
1234 vdd_level = dcn_find_normalized_clock_vdd_Level( 1278 vdd_level = dcn_find_normalized_clock_vdd_Level(
1235 dc, DM_PP_CLOCK_TYPE_DISPLAY_CLK, clocks->dispclk_in_khz); 1279 dc, DM_PP_CLOCK_TYPE_DISPLAY_CLK, clocks->dispclk_khz);
1236 vdd_level_temp = dcn_find_normalized_clock_vdd_Level( 1280 vdd_level_temp = dcn_find_normalized_clock_vdd_Level(
1237 dc, DM_PP_CLOCK_TYPE_DISPLAYPHYCLK, clocks->phyclk_in_khz); 1281 dc, DM_PP_CLOCK_TYPE_DISPLAYPHYCLK, clocks->phyclk_khz);
1238 1282
1239 vdd_level = dcn_bw_max(vdd_level, vdd_level_temp); 1283 vdd_level = dcn_bw_max(vdd_level, vdd_level_temp);
1240 vdd_level_temp = dcn_find_normalized_clock_vdd_Level( 1284 vdd_level_temp = dcn_find_normalized_clock_vdd_Level(
1241 dc, DM_PP_CLOCK_TYPE_DPPCLK, clocks->dppclk_in_khz); 1285 dc, DM_PP_CLOCK_TYPE_DPPCLK, clocks->dppclk_khz);
1242 vdd_level = dcn_bw_max(vdd_level, vdd_level_temp); 1286 vdd_level = dcn_bw_max(vdd_level, vdd_level_temp);
1243 1287
1244 vdd_level_temp = dcn_find_normalized_clock_vdd_Level( 1288 vdd_level_temp = dcn_find_normalized_clock_vdd_Level(
1245 dc, DM_PP_CLOCK_TYPE_MEMORY_CLK, clocks->dcfclock_in_khz); 1289 dc, DM_PP_CLOCK_TYPE_MEMORY_CLK, clocks->fclk_khz);
1246 vdd_level = dcn_bw_max(vdd_level, vdd_level_temp); 1290 vdd_level = dcn_bw_max(vdd_level, vdd_level_temp);
1247 vdd_level_temp = dcn_find_normalized_clock_vdd_Level( 1291 vdd_level_temp = dcn_find_normalized_clock_vdd_Level(
1248 dc, DM_PP_CLOCK_TYPE_DCFCLK, clocks->dcfclock_in_khz); 1292 dc, DM_PP_CLOCK_TYPE_DCFCLK, clocks->dcfclk_khz);
1249 1293
1250 /*find that level conresponding dcfclk*/ 1294 /*find that level conresponding dcfclk*/
1251 vdd_level = dcn_bw_max(vdd_level, vdd_level_temp); 1295 vdd_level = dcn_bw_max(vdd_level, vdd_level_temp);
@@ -1331,21 +1375,14 @@ void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
1331{ 1375{
1332 struct pp_smu_funcs_rv *pp = dc->res_pool->pp_smu; 1376 struct pp_smu_funcs_rv *pp = dc->res_pool->pp_smu;
1333 struct pp_smu_wm_range_sets ranges = {0}; 1377 struct pp_smu_wm_range_sets ranges = {0};
1334 int max_fclk_khz, nom_fclk_khz, mid_fclk_khz, min_fclk_khz; 1378 int min_fclk_khz, min_dcfclk_khz, socclk_khz;
1335 int max_dcfclk_khz, min_dcfclk_khz;
1336 int socclk_khz;
1337 const int overdrive = 5000000; /* 5 GHz to cover Overdrive */ 1379 const int overdrive = 5000000; /* 5 GHz to cover Overdrive */
1338 unsigned factor = (ddr4_dram_factor_single_Channel * dc->dcn_soc->number_of_channels);
1339 1380
1340 if (!pp->set_wm_ranges) 1381 if (!pp->set_wm_ranges)
1341 return; 1382 return;
1342 1383
1343 kernel_fpu_begin(); 1384 kernel_fpu_begin();
1344 max_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 * 1000000 / factor;
1345 nom_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 * 1000000 / factor;
1346 mid_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 * 1000000 / factor;
1347 min_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 * 1000000 / 32; 1385 min_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 * 1000000 / 32;
1348 max_dcfclk_khz = dc->dcn_soc->dcfclkv_max0p9 * 1000;
1349 min_dcfclk_khz = dc->dcn_soc->dcfclkv_min0p65 * 1000; 1386 min_dcfclk_khz = dc->dcn_soc->dcfclkv_min0p65 * 1000;
1350 socclk_khz = dc->dcn_soc->socclk * 1000; 1387 socclk_khz = dc->dcn_soc->socclk * 1000;
1351 kernel_fpu_end(); 1388 kernel_fpu_end();
@@ -1353,105 +1390,46 @@ void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
1353 /* Now notify PPLib/SMU about which Watermarks sets they should select 1390 /* Now notify PPLib/SMU about which Watermarks sets they should select
1354 * depending on DPM state they are in. And update BW MGR GFX Engine and 1391 * depending on DPM state they are in. And update BW MGR GFX Engine and
1355 * Memory clock member variables for Watermarks calculations for each 1392 * Memory clock member variables for Watermarks calculations for each
1356 * Watermark Set 1393 * Watermark Set. Only one watermark set for dcn1 due to hw bug DEGVIDCN10-254.
1357 */ 1394 */
1358 /* SOCCLK does not affect anytihng but writeback for DCN so for now we dont 1395 /* SOCCLK does not affect anytihng but writeback for DCN so for now we dont
1359 * care what the value is, hence min to overdrive level 1396 * care what the value is, hence min to overdrive level
1360 */ 1397 */
1361 ranges.num_reader_wm_sets = WM_COUNT; 1398 ranges.num_reader_wm_sets = WM_SET_COUNT;
1362 ranges.num_writer_wm_sets = WM_COUNT; 1399 ranges.num_writer_wm_sets = WM_SET_COUNT;
1363 ranges.reader_wm_sets[0].wm_inst = WM_A; 1400 ranges.reader_wm_sets[0].wm_inst = WM_A;
1364 ranges.reader_wm_sets[0].min_drain_clk_khz = min_dcfclk_khz; 1401 ranges.reader_wm_sets[0].min_drain_clk_khz = min_dcfclk_khz;
1365 ranges.reader_wm_sets[0].max_drain_clk_khz = max_dcfclk_khz; 1402 ranges.reader_wm_sets[0].max_drain_clk_khz = overdrive;
1366 ranges.reader_wm_sets[0].min_fill_clk_khz = min_fclk_khz; 1403 ranges.reader_wm_sets[0].min_fill_clk_khz = min_fclk_khz;
1367 ranges.reader_wm_sets[0].max_fill_clk_khz = min_fclk_khz; 1404 ranges.reader_wm_sets[0].max_fill_clk_khz = overdrive;
1368 ranges.writer_wm_sets[0].wm_inst = WM_A; 1405 ranges.writer_wm_sets[0].wm_inst = WM_A;
1369 ranges.writer_wm_sets[0].min_fill_clk_khz = socclk_khz; 1406 ranges.writer_wm_sets[0].min_fill_clk_khz = socclk_khz;
1370 ranges.writer_wm_sets[0].max_fill_clk_khz = overdrive; 1407 ranges.writer_wm_sets[0].max_fill_clk_khz = overdrive;
1371 ranges.writer_wm_sets[0].min_drain_clk_khz = min_fclk_khz; 1408 ranges.writer_wm_sets[0].min_drain_clk_khz = min_fclk_khz;
1372 ranges.writer_wm_sets[0].max_drain_clk_khz = min_fclk_khz; 1409 ranges.writer_wm_sets[0].max_drain_clk_khz = overdrive;
1373
1374 ranges.reader_wm_sets[1].wm_inst = WM_B;
1375 ranges.reader_wm_sets[1].min_drain_clk_khz = min_fclk_khz;
1376 ranges.reader_wm_sets[1].max_drain_clk_khz = max_dcfclk_khz;
1377 ranges.reader_wm_sets[1].min_fill_clk_khz = mid_fclk_khz;
1378 ranges.reader_wm_sets[1].max_fill_clk_khz = mid_fclk_khz;
1379 ranges.writer_wm_sets[1].wm_inst = WM_B;
1380 ranges.writer_wm_sets[1].min_fill_clk_khz = socclk_khz;
1381 ranges.writer_wm_sets[1].max_fill_clk_khz = overdrive;
1382 ranges.writer_wm_sets[1].min_drain_clk_khz = mid_fclk_khz;
1383 ranges.writer_wm_sets[1].max_drain_clk_khz = mid_fclk_khz;
1384
1385
1386 ranges.reader_wm_sets[2].wm_inst = WM_C;
1387 ranges.reader_wm_sets[2].min_drain_clk_khz = min_fclk_khz;
1388 ranges.reader_wm_sets[2].max_drain_clk_khz = max_dcfclk_khz;
1389 ranges.reader_wm_sets[2].min_fill_clk_khz = nom_fclk_khz;
1390 ranges.reader_wm_sets[2].max_fill_clk_khz = nom_fclk_khz;
1391 ranges.writer_wm_sets[2].wm_inst = WM_C;
1392 ranges.writer_wm_sets[2].min_fill_clk_khz = socclk_khz;
1393 ranges.writer_wm_sets[2].max_fill_clk_khz = overdrive;
1394 ranges.writer_wm_sets[2].min_drain_clk_khz = nom_fclk_khz;
1395 ranges.writer_wm_sets[2].max_drain_clk_khz = nom_fclk_khz;
1396
1397 ranges.reader_wm_sets[3].wm_inst = WM_D;
1398 ranges.reader_wm_sets[3].min_drain_clk_khz = min_fclk_khz;
1399 ranges.reader_wm_sets[3].max_drain_clk_khz = max_dcfclk_khz;
1400 ranges.reader_wm_sets[3].min_fill_clk_khz = max_fclk_khz;
1401 ranges.reader_wm_sets[3].max_fill_clk_khz = max_fclk_khz;
1402 ranges.writer_wm_sets[3].wm_inst = WM_D;
1403 ranges.writer_wm_sets[3].min_fill_clk_khz = socclk_khz;
1404 ranges.writer_wm_sets[3].max_fill_clk_khz = overdrive;
1405 ranges.writer_wm_sets[3].min_drain_clk_khz = max_fclk_khz;
1406 ranges.writer_wm_sets[3].max_drain_clk_khz = max_fclk_khz;
1407 1410
1408 if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) { 1411 if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
1409 ranges.reader_wm_sets[0].wm_inst = WM_A; 1412 ranges.reader_wm_sets[0].wm_inst = WM_A;
1410 ranges.reader_wm_sets[0].min_drain_clk_khz = 300000; 1413 ranges.reader_wm_sets[0].min_drain_clk_khz = 300000;
1411 ranges.reader_wm_sets[0].max_drain_clk_khz = 654000; 1414 ranges.reader_wm_sets[0].max_drain_clk_khz = 5000000;
1412 ranges.reader_wm_sets[0].min_fill_clk_khz = 800000; 1415 ranges.reader_wm_sets[0].min_fill_clk_khz = 800000;
1413 ranges.reader_wm_sets[0].max_fill_clk_khz = 800000; 1416 ranges.reader_wm_sets[0].max_fill_clk_khz = 5000000;
1414 ranges.writer_wm_sets[0].wm_inst = WM_A; 1417 ranges.writer_wm_sets[0].wm_inst = WM_A;
1415 ranges.writer_wm_sets[0].min_fill_clk_khz = 200000; 1418 ranges.writer_wm_sets[0].min_fill_clk_khz = 200000;
1416 ranges.writer_wm_sets[0].max_fill_clk_khz = 757000; 1419 ranges.writer_wm_sets[0].max_fill_clk_khz = 5000000;
1417 ranges.writer_wm_sets[0].min_drain_clk_khz = 800000; 1420 ranges.writer_wm_sets[0].min_drain_clk_khz = 800000;
1418 ranges.writer_wm_sets[0].max_drain_clk_khz = 800000; 1421 ranges.writer_wm_sets[0].max_drain_clk_khz = 5000000;
1419
1420 ranges.reader_wm_sets[1].wm_inst = WM_B;
1421 ranges.reader_wm_sets[1].min_drain_clk_khz = 300000;
1422 ranges.reader_wm_sets[1].max_drain_clk_khz = 654000;
1423 ranges.reader_wm_sets[1].min_fill_clk_khz = 933000;
1424 ranges.reader_wm_sets[1].max_fill_clk_khz = 933000;
1425 ranges.writer_wm_sets[1].wm_inst = WM_B;
1426 ranges.writer_wm_sets[1].min_fill_clk_khz = 200000;
1427 ranges.writer_wm_sets[1].max_fill_clk_khz = 757000;
1428 ranges.writer_wm_sets[1].min_drain_clk_khz = 933000;
1429 ranges.writer_wm_sets[1].max_drain_clk_khz = 933000;
1430
1431
1432 ranges.reader_wm_sets[2].wm_inst = WM_C;
1433 ranges.reader_wm_sets[2].min_drain_clk_khz = 300000;
1434 ranges.reader_wm_sets[2].max_drain_clk_khz = 654000;
1435 ranges.reader_wm_sets[2].min_fill_clk_khz = 1067000;
1436 ranges.reader_wm_sets[2].max_fill_clk_khz = 1067000;
1437 ranges.writer_wm_sets[2].wm_inst = WM_C;
1438 ranges.writer_wm_sets[2].min_fill_clk_khz = 200000;
1439 ranges.writer_wm_sets[2].max_fill_clk_khz = 757000;
1440 ranges.writer_wm_sets[2].min_drain_clk_khz = 1067000;
1441 ranges.writer_wm_sets[2].max_drain_clk_khz = 1067000;
1442
1443 ranges.reader_wm_sets[3].wm_inst = WM_D;
1444 ranges.reader_wm_sets[3].min_drain_clk_khz = 300000;
1445 ranges.reader_wm_sets[3].max_drain_clk_khz = 654000;
1446 ranges.reader_wm_sets[3].min_fill_clk_khz = 1200000;
1447 ranges.reader_wm_sets[3].max_fill_clk_khz = 1200000;
1448 ranges.writer_wm_sets[3].wm_inst = WM_D;
1449 ranges.writer_wm_sets[3].min_fill_clk_khz = 200000;
1450 ranges.writer_wm_sets[3].max_fill_clk_khz = 757000;
1451 ranges.writer_wm_sets[3].min_drain_clk_khz = 1200000;
1452 ranges.writer_wm_sets[3].max_drain_clk_khz = 1200000;
1453 } 1422 }
1454 1423
1424 ranges.reader_wm_sets[1] = ranges.writer_wm_sets[0];
1425 ranges.reader_wm_sets[1].wm_inst = WM_B;
1426
1427 ranges.reader_wm_sets[2] = ranges.writer_wm_sets[0];
1428 ranges.reader_wm_sets[2].wm_inst = WM_C;
1429
1430 ranges.reader_wm_sets[3] = ranges.writer_wm_sets[0];
1431 ranges.reader_wm_sets[3].wm_inst = WM_D;
1432
1455 /* Notify PP Lib/SMU which Watermarks to use for which clock ranges */ 1433 /* Notify PP Lib/SMU which Watermarks to use for which clock ranges */
1456 pp->set_wm_ranges(&pp->pp_smu, &ranges); 1434 pp->set_wm_ranges(&pp->pp_smu, &ranges);
1457} 1435}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 53ce7fa864b4..2a785bbf2b8f 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -944,12 +944,7 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
944 944
945 dc->optimized_required = false; 945 dc->optimized_required = false;
946 946
947 /* 3rd param should be true, temp w/a for RV*/
948#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
949 dc->hwss.set_bandwidth(dc, context, dc->ctx->dce_version < DCN_VERSION_1_0);
950#else
951 dc->hwss.set_bandwidth(dc, context, true); 947 dc->hwss.set_bandwidth(dc, context, true);
952#endif
953 return true; 948 return true;
954} 949}
955 950
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
index 267c76766dea..e1ebdf7b5eaf 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
@@ -352,19 +352,19 @@ void context_clock_trace(
352 DC_LOGGER_INIT(dc->ctx->logger); 352 DC_LOGGER_INIT(dc->ctx->logger);
353 CLOCK_TRACE("Current: dispclk_khz:%d max_dppclk_khz:%d dcfclk_khz:%d\n" 353 CLOCK_TRACE("Current: dispclk_khz:%d max_dppclk_khz:%d dcfclk_khz:%d\n"
354 "dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n", 354 "dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n",
355 context->bw.dcn.calc_clk.dispclk_khz, 355 context->bw.dcn.clk.dispclk_khz,
356 context->bw.dcn.calc_clk.dppclk_khz, 356 context->bw.dcn.clk.dppclk_khz,
357 context->bw.dcn.calc_clk.dcfclk_khz, 357 context->bw.dcn.clk.dcfclk_khz,
358 context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz, 358 context->bw.dcn.clk.dcfclk_deep_sleep_khz,
359 context->bw.dcn.calc_clk.fclk_khz, 359 context->bw.dcn.clk.fclk_khz,
360 context->bw.dcn.calc_clk.socclk_khz); 360 context->bw.dcn.clk.socclk_khz);
361 CLOCK_TRACE("Calculated: dispclk_khz:%d max_dppclk_khz:%d dcfclk_khz:%d\n" 361 CLOCK_TRACE("Calculated: dispclk_khz:%d max_dppclk_khz:%d dcfclk_khz:%d\n"
362 "dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n", 362 "dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n",
363 context->bw.dcn.calc_clk.dispclk_khz, 363 context->bw.dcn.clk.dispclk_khz,
364 context->bw.dcn.calc_clk.dppclk_khz, 364 context->bw.dcn.clk.dppclk_khz,
365 context->bw.dcn.calc_clk.dcfclk_khz, 365 context->bw.dcn.clk.dcfclk_khz,
366 context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz, 366 context->bw.dcn.clk.dcfclk_deep_sleep_khz,
367 context->bw.dcn.calc_clk.fclk_khz, 367 context->bw.dcn.clk.fclk_khz,
368 context->bw.dcn.calc_clk.socclk_khz); 368 context->bw.dcn.clk.socclk_khz);
369#endif 369#endif
370} 370}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 08b7ee526f0f..fa56c0fc02bf 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -33,6 +33,7 @@
33#include "dc_link_dp.h" 33#include "dc_link_dp.h"
34#include "dc_link_ddc.h" 34#include "dc_link_ddc.h"
35#include "link_hwss.h" 35#include "link_hwss.h"
36#include "opp.h"
36 37
37#include "link_encoder.h" 38#include "link_encoder.h"
38#include "hw_sequencer.h" 39#include "hw_sequencer.h"
@@ -1284,29 +1285,15 @@ static enum dc_status enable_link_dp(
1284 max_link_rate = LINK_RATE_HIGH3; 1285 max_link_rate = LINK_RATE_HIGH3;
1285 1286
1286 if (link_settings.link_rate == max_link_rate) { 1287 if (link_settings.link_rate == max_link_rate) {
1287 if (state->dis_clk->funcs->set_min_clocks_state) { 1288 struct dc_clocks clocks = state->bw.dcn.clk;
1288 if (state->dis_clk->cur_min_clks_state < DM_PP_CLOCKS_STATE_NOMINAL) 1289
1289 state->dis_clk->funcs->set_min_clocks_state( 1290 /* dce/dcn compat, do not update dispclk */
1290 state->dis_clk, DM_PP_CLOCKS_STATE_NOMINAL); 1291 clocks.dispclk_khz = 0;
1291 } else { 1292 /* 27mhz = 27000000hz= 27000khz */
1292 uint32_t dp_phyclk_in_khz; 1293 clocks.phyclk_khz = link_settings.link_rate * 27000;
1293 const struct clocks_value clocks_value = 1294
1294 state->dis_clk->cur_clocks_value; 1295 state->dis_clk->funcs->update_clocks(
1295 1296 state->dis_clk, &clocks, false);
1296 /* 27mhz = 27000000hz= 27000khz */
1297 dp_phyclk_in_khz = link_settings.link_rate * 27000;
1298
1299 if (((clocks_value.max_non_dp_phyclk_in_khz != 0) &&
1300 (dp_phyclk_in_khz > clocks_value.max_non_dp_phyclk_in_khz)) ||
1301 (dp_phyclk_in_khz > clocks_value.max_dp_phyclk_in_khz)) {
1302 state->dis_clk->funcs->apply_clock_voltage_request(
1303 state->dis_clk,
1304 DM_PP_CLOCK_TYPE_DISPLAYPHYCLK,
1305 dp_phyclk_in_khz,
1306 false,
1307 true);
1308 }
1309 }
1310 } 1297 }
1311 1298
1312 dp_enable_link_phy( 1299 dp_enable_link_phy(
@@ -2396,9 +2383,10 @@ void core_link_enable_stream(
2396 core_dc->hwss.enable_audio_stream(pipe_ctx); 2383 core_dc->hwss.enable_audio_stream(pipe_ctx);
2397 2384
2398 /* turn off otg test pattern if enable */ 2385 /* turn off otg test pattern if enable */
2399 pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg, 2386 if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
2400 CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, 2387 pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
2401 COLOR_DEPTH_UNDEFINED); 2388 CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
2389 COLOR_DEPTH_UNDEFINED);
2402 2390
2403 core_dc->hwss.enable_stream(pipe_ctx); 2391 core_dc->hwss.enable_stream(pipe_ctx);
2404 2392
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 509f265663d2..84586b679d73 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -3,6 +3,7 @@
3#include "dc.h" 3#include "dc.h"
4#include "dc_link_dp.h" 4#include "dc_link_dp.h"
5#include "dm_helpers.h" 5#include "dm_helpers.h"
6#include "opp.h"
6 7
7#include "inc/core_types.h" 8#include "inc/core_types.h"
8#include "link_hwss.h" 9#include "link_hwss.h"
@@ -1999,7 +2000,7 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
1999{ 2000{
2000 union hpd_irq_data hpd_irq_dpcd_data = {{{{0}}}}; 2001 union hpd_irq_data hpd_irq_dpcd_data = {{{{0}}}};
2001 union device_service_irq device_service_clear = { { 0 } }; 2002 union device_service_irq device_service_clear = { { 0 } };
2002 enum dc_status result = DDC_RESULT_UNKNOWN; 2003 enum dc_status result;
2003 bool status = false; 2004 bool status = false;
2004 /* For use cases related to down stream connection status change, 2005 /* For use cases related to down stream connection status change,
2005 * PSR and device auto test, refer to function handle_sst_hpd_irq 2006 * PSR and device auto test, refer to function handle_sst_hpd_irq
@@ -2511,8 +2512,8 @@ static void set_crtc_test_pattern(struct dc_link *link,
2511 pipe_ctx->stream->bit_depth_params = params; 2512 pipe_ctx->stream->bit_depth_params = params;
2512 pipe_ctx->stream_res.opp->funcs-> 2513 pipe_ctx->stream_res.opp->funcs->
2513 opp_program_bit_depth_reduction(pipe_ctx->stream_res.opp, &params); 2514 opp_program_bit_depth_reduction(pipe_ctx->stream_res.opp, &params);
2514 2515 if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
2515 pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg, 2516 pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
2516 controller_test_pattern, color_depth); 2517 controller_test_pattern, color_depth);
2517 } 2518 }
2518 break; 2519 break;
@@ -2524,8 +2525,8 @@ static void set_crtc_test_pattern(struct dc_link *link,
2524 pipe_ctx->stream->bit_depth_params = params; 2525 pipe_ctx->stream->bit_depth_params = params;
2525 pipe_ctx->stream_res.opp->funcs-> 2526 pipe_ctx->stream_res.opp->funcs->
2526 opp_program_bit_depth_reduction(pipe_ctx->stream_res.opp, &params); 2527 opp_program_bit_depth_reduction(pipe_ctx->stream_res.opp, &params);
2527 2528 if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
2528 pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg, 2529 pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
2529 CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, 2530 CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
2530 color_depth); 2531 color_depth);
2531 } 2532 }
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index fca22550417a..c5fc5250e2bf 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1948,7 +1948,7 @@ void dc_resource_state_construct(
1948 const struct dc *dc, 1948 const struct dc *dc,
1949 struct dc_state *dst_ctx) 1949 struct dc_state *dst_ctx)
1950{ 1950{
1951 dst_ctx->dis_clk = dc->res_pool->display_clock; 1951 dst_ctx->dis_clk = dc->res_pool->dccg;
1952} 1952}
1953 1953
1954enum dc_status dc_validate_global_state( 1954enum dc_status dc_validate_global_state(
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 7ebce7669eea..0cb7e10d2505 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -38,7 +38,7 @@
38#include "inc/compressor.h" 38#include "inc/compressor.h"
39#include "dml/display_mode_lib.h" 39#include "dml/display_mode_lib.h"
40 40
41#define DC_VER "3.1.47" 41#define DC_VER "3.1.52"
42 42
43#define MAX_SURFACES 3 43#define MAX_SURFACES 3
44#define MAX_STREAMS 6 44#define MAX_STREAMS 6
@@ -186,6 +186,10 @@ enum wm_report_mode {
186 WM_REPORT_OVERRIDE = 1, 186 WM_REPORT_OVERRIDE = 1,
187}; 187};
188 188
189/*
190 * For any clocks that may differ per pipe
191 * only the max is stored in this structure
192 */
189struct dc_clocks { 193struct dc_clocks {
190 int dispclk_khz; 194 int dispclk_khz;
191 int max_supported_dppclk_khz; 195 int max_supported_dppclk_khz;
@@ -194,6 +198,7 @@ struct dc_clocks {
194 int socclk_khz; 198 int socclk_khz;
195 int dcfclk_deep_sleep_khz; 199 int dcfclk_deep_sleep_khz;
196 int fclk_khz; 200 int fclk_khz;
201 int phyclk_khz;
197}; 202};
198 203
199struct dc_debug { 204struct dc_debug {
@@ -228,6 +233,7 @@ struct dc_debug {
228 int urgent_latency_ns; 233 int urgent_latency_ns;
229 int percent_of_ideal_drambw; 234 int percent_of_ideal_drambw;
230 int dram_clock_change_latency_ns; 235 int dram_clock_change_latency_ns;
236 bool optimized_watermark;
231 int always_scale; 237 int always_scale;
232 bool disable_pplib_clock_request; 238 bool disable_pplib_clock_request;
233 bool disable_clock_gate; 239 bool disable_clock_gate;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
index d9b84ec7954c..90082bab71f0 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
@@ -198,6 +198,10 @@ struct dc_vbios_funcs {
198 void (*post_init)(struct dc_bios *bios); 198 void (*post_init)(struct dc_bios *bios);
199 199
200 void (*bios_parser_destroy)(struct dc_bios **dcb); 200 void (*bios_parser_destroy)(struct dc_bios **dcb);
201
202 enum bp_result (*get_board_layout_info)(
203 struct dc_bios *dcb,
204 struct board_layout_info *board_layout_info);
201}; 205};
202 206
203struct bios_registers { 207struct bios_registers {
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index d31023d57b58..14afbc5c0a62 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -199,6 +199,7 @@ enum surface_pixel_format {
199 SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb, 199 SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb,
200 SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr, 200 SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr,
201 SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb, 201 SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb,
202 SURFACE_PIXEL_FORMAT_SUBSAMPLE_END,
202 SURFACE_PIXEL_FORMAT_INVALID 203 SURFACE_PIXEL_FORMAT_INVALID
203 204
204 /*grow 444 video here if necessary */ 205 /*grow 444 video here if necessary */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index 88b09dd758ba..ca137757a69e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -133,7 +133,7 @@ static bool calculate_fb_and_fractional_fb_divider(
133 uint64_t feedback_divider; 133 uint64_t feedback_divider;
134 134
135 feedback_divider = 135 feedback_divider =
136 (uint64_t)(target_pix_clk_khz * ref_divider * post_divider); 136 (uint64_t)target_pix_clk_khz * ref_divider * post_divider;
137 feedback_divider *= 10; 137 feedback_divider *= 10;
138 /* additional factor, since we divide by 10 afterwards */ 138 /* additional factor, since we divide by 10 afterwards */
139 feedback_divider *= (uint64_t)(calc_pll_cs->fract_fb_divider_factor); 139 feedback_divider *= (uint64_t)(calc_pll_cs->fract_fb_divider_factor);
@@ -145,8 +145,8 @@ static bool calculate_fb_and_fractional_fb_divider(
145 * of fractional feedback decimal point and the fractional FB Divider precision 145 * of fractional feedback decimal point and the fractional FB Divider precision
146 * is 2 then the equation becomes (ullfeedbackDivider + 5*100) / (10*100))*/ 146 * is 2 then the equation becomes (ullfeedbackDivider + 5*100) / (10*100))*/
147 147
148 feedback_divider += (uint64_t) 148 feedback_divider += 5ULL *
149 (5 * calc_pll_cs->fract_fb_divider_precision_factor); 149 calc_pll_cs->fract_fb_divider_precision_factor;
150 feedback_divider = 150 feedback_divider =
151 div_u64(feedback_divider, 151 div_u64(feedback_divider,
152 calc_pll_cs->fract_fb_divider_precision_factor * 10); 152 calc_pll_cs->fract_fb_divider_precision_factor * 10);
@@ -203,8 +203,8 @@ static bool calc_fb_divider_checking_tolerance(
203 &fract_feedback_divider); 203 &fract_feedback_divider);
204 204
205 /*Actual calculated value*/ 205 /*Actual calculated value*/
206 actual_calc_clk_khz = (uint64_t)(feedback_divider * 206 actual_calc_clk_khz = (uint64_t)feedback_divider *
207 calc_pll_cs->fract_fb_divider_factor) + 207 calc_pll_cs->fract_fb_divider_factor +
208 fract_feedback_divider; 208 fract_feedback_divider;
209 actual_calc_clk_khz *= calc_pll_cs->ref_freq_khz; 209 actual_calc_clk_khz *= calc_pll_cs->ref_freq_khz;
210 actual_calc_clk_khz = 210 actual_calc_clk_khz =
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
index 8a581c67bf2d..6882dc953a2c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
@@ -38,7 +38,7 @@
38#include "dal_asic_id.h" 38#include "dal_asic_id.h"
39 39
40#define TO_DCE_CLOCKS(clocks)\ 40#define TO_DCE_CLOCKS(clocks)\
41 container_of(clocks, struct dce_disp_clk, base) 41 container_of(clocks, struct dce_dccg, base)
42 42
43#define REG(reg) \ 43#define REG(reg) \
44 (clk_dce->regs->reg) 44 (clk_dce->regs->reg)
@@ -101,99 +101,78 @@ static const struct state_dependent_clocks dce120_max_clks_by_state[] = {
101/*ClocksStatePerformance*/ 101/*ClocksStatePerformance*/
102{ .display_clk_khz = 1133000, .pixel_clk_khz = 600000 } }; 102{ .display_clk_khz = 1133000, .pixel_clk_khz = 600000 } };
103 103
104/* Starting point for each divider range.*/ 104/* Starting DID for each range */
105enum dce_divider_range_start { 105enum dentist_base_divider_id {
106 DIVIDER_RANGE_01_START = 200, /* 2.00*/ 106 DENTIST_BASE_DID_1 = 0x08,
107 DIVIDER_RANGE_02_START = 1600, /* 16.00*/ 107 DENTIST_BASE_DID_2 = 0x40,
108 DIVIDER_RANGE_03_START = 3200, /* 32.00*/ 108 DENTIST_BASE_DID_3 = 0x60,
109 DIVIDER_RANGE_SCALE_FACTOR = 100 /* Results are scaled up by 100.*/ 109 DENTIST_MAX_DID = 0x80
110}; 110};
111 111
112/* Ranges for divider identifiers (Divider ID or DID) 112/* Starting point and step size for each divider range.*/
113 mmDENTIST_DISPCLK_CNTL.DENTIST_DISPCLK_WDIVIDER*/ 113enum dentist_divider_range {
114enum dce_divider_id_register_setting { 114 DENTIST_DIVIDER_RANGE_1_START = 8, /* 2.00 */
115 DIVIDER_RANGE_01_BASE_DIVIDER_ID = 0X08, 115 DENTIST_DIVIDER_RANGE_1_STEP = 1, /* 0.25 */
116 DIVIDER_RANGE_02_BASE_DIVIDER_ID = 0X40, 116 DENTIST_DIVIDER_RANGE_2_START = 64, /* 16.00 */
117 DIVIDER_RANGE_03_BASE_DIVIDER_ID = 0X60, 117 DENTIST_DIVIDER_RANGE_2_STEP = 2, /* 0.50 */
118 DIVIDER_RANGE_MAX_DIVIDER_ID = 0X80 118 DENTIST_DIVIDER_RANGE_3_START = 128, /* 32.00 */
119 DENTIST_DIVIDER_RANGE_3_STEP = 4, /* 1.00 */
120 DENTIST_DIVIDER_RANGE_SCALE_FACTOR = 4
119}; 121};
120 122
121/* Step size between each divider within a range. 123static int dentist_get_divider_from_did(int did)
122 Incrementing the DENTIST_DISPCLK_WDIVIDER by one
123 will increment the divider by this much.*/
124enum dce_divider_range_step_size {
125 DIVIDER_RANGE_01_STEP_SIZE = 25, /* 0.25*/
126 DIVIDER_RANGE_02_STEP_SIZE = 50, /* 0.50*/
127 DIVIDER_RANGE_03_STEP_SIZE = 100 /* 1.00 */
128};
129
130static bool dce_divider_range_construct(
131 struct dce_divider_range *div_range,
132 int range_start,
133 int range_step,
134 int did_min,
135 int did_max)
136{ 124{
137 div_range->div_range_start = range_start; 125 if (did < DENTIST_BASE_DID_1)
138 div_range->div_range_step = range_step; 126 did = DENTIST_BASE_DID_1;
139 div_range->did_min = did_min; 127 if (did > DENTIST_MAX_DID)
140 div_range->did_max = did_max; 128 did = DENTIST_MAX_DID;
141 129
142 if (div_range->div_range_step == 0) { 130 if (did < DENTIST_BASE_DID_2) {
143 div_range->div_range_step = 1; 131 return DENTIST_DIVIDER_RANGE_1_START + DENTIST_DIVIDER_RANGE_1_STEP
144 /*div_range_step cannot be zero*/ 132 * (did - DENTIST_BASE_DID_1);
145 BREAK_TO_DEBUGGER(); 133 } else if (did < DENTIST_BASE_DID_3) {
134 return DENTIST_DIVIDER_RANGE_2_START + DENTIST_DIVIDER_RANGE_2_STEP
135 * (did - DENTIST_BASE_DID_2);
136 } else {
137 return DENTIST_DIVIDER_RANGE_3_START + DENTIST_DIVIDER_RANGE_3_STEP
138 * (did - DENTIST_BASE_DID_3);
146 } 139 }
147 /* Calculate this based on the other inputs.*/
148 /* See DividerRange.h for explanation of */
149 /* the relationship between divider id (DID) and a divider.*/
150 /* Number of Divider IDs = (Maximum Divider ID - Minimum Divider ID)*/
151 /* Maximum divider identified in this range =
152 * (Number of Divider IDs)*Step size between dividers
153 * + The start of this range.*/
154 div_range->div_range_end = (did_max - did_min) * range_step
155 + range_start;
156 return true;
157}
158
159static int dce_divider_range_calc_divider(
160 struct dce_divider_range *div_range,
161 int did)
162{
163 /* Is this DID within our range?*/
164 if ((did < div_range->did_min) || (did >= div_range->did_max))
165 return INVALID_DIVIDER;
166
167 return ((did - div_range->did_min) * div_range->div_range_step)
168 + div_range->div_range_start;
169
170} 140}
171 141
172static int dce_divider_range_get_divider( 142/* SW will adjust DP REF Clock average value for all purposes
173 struct dce_divider_range *div_range, 143 * (DP DTO / DP Audio DTO and DP GTC)
174 int ranges_num, 144 if clock is spread for all cases:
175 int did) 145 -if SS enabled on DP Ref clock and HW de-spreading enabled with SW
146 calculations for DS_INCR/DS_MODULO (this is planned to be default case)
147 -if SS enabled on DP Ref clock and HW de-spreading enabled with HW
148 calculations (not planned to be used, but average clock should still
149 be valid)
150 -if SS enabled on DP Ref clock and HW de-spreading disabled
151 (should not be case with CIK) then SW should program all rates
152 generated according to average value (case as with previous ASICs)
153 */
154static int dccg_adjust_dp_ref_freq_for_ss(struct dce_dccg *clk_dce, int dp_ref_clk_khz)
176{ 155{
177 int div = INVALID_DIVIDER; 156 if (clk_dce->ss_on_dprefclk && clk_dce->dprefclk_ss_divider != 0) {
178 int i; 157 struct fixed31_32 ss_percentage = dc_fixpt_div_int(
158 dc_fixpt_from_fraction(clk_dce->dprefclk_ss_percentage,
159 clk_dce->dprefclk_ss_divider), 200);
160 struct fixed31_32 adj_dp_ref_clk_khz;
179 161
180 for (i = 0; i < ranges_num; i++) { 162 ss_percentage = dc_fixpt_sub(dc_fixpt_one, ss_percentage);
181 /* Calculate divider with given divider ID*/ 163 adj_dp_ref_clk_khz = dc_fixpt_mul_int(ss_percentage, dp_ref_clk_khz);
182 div = dce_divider_range_calc_divider(&div_range[i], did); 164 dp_ref_clk_khz = dc_fixpt_floor(adj_dp_ref_clk_khz);
183 /* Found a valid return divider*/
184 if (div != INVALID_DIVIDER)
185 break;
186 } 165 }
187 return div; 166 return dp_ref_clk_khz;
188} 167}
189 168
190static int dce_clocks_get_dp_ref_freq(struct display_clock *clk) 169static int dce_get_dp_ref_freq_khz(struct dccg *clk)
191{ 170{
192 struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk); 171 struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
193 int dprefclk_wdivider; 172 int dprefclk_wdivider;
194 int dprefclk_src_sel; 173 int dprefclk_src_sel;
195 int dp_ref_clk_khz = 600000; 174 int dp_ref_clk_khz = 600000;
196 int target_div = INVALID_DIVIDER; 175 int target_div;
197 176
198 /* ASSERT DP Reference Clock source is from DFS*/ 177 /* ASSERT DP Reference Clock source is from DFS*/
199 REG_GET(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, &dprefclk_src_sel); 178 REG_GET(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, &dprefclk_src_sel);
@@ -204,80 +183,27 @@ static int dce_clocks_get_dp_ref_freq(struct display_clock *clk)
204 REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, &dprefclk_wdivider); 183 REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, &dprefclk_wdivider);
205 184
206 /* Convert DENTIST_DPREFCLK_WDIVIDERto actual divider*/ 185 /* Convert DENTIST_DPREFCLK_WDIVIDERto actual divider*/
207 target_div = dce_divider_range_get_divider( 186 target_div = dentist_get_divider_from_did(dprefclk_wdivider);
208 clk_dce->divider_ranges,
209 DIVIDER_RANGE_MAX,
210 dprefclk_wdivider);
211
212 if (target_div != INVALID_DIVIDER) {
213 /* Calculate the current DFS clock, in kHz.*/
214 dp_ref_clk_khz = (DIVIDER_RANGE_SCALE_FACTOR
215 * clk_dce->dentist_vco_freq_khz) / target_div;
216 }
217 187
218 /* SW will adjust DP REF Clock average value for all purposes 188 /* Calculate the current DFS clock, in kHz.*/
219 * (DP DTO / DP Audio DTO and DP GTC) 189 dp_ref_clk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
220 if clock is spread for all cases: 190 * clk_dce->dentist_vco_freq_khz) / target_div;
221 -if SS enabled on DP Ref clock and HW de-spreading enabled with SW
222 calculations for DS_INCR/DS_MODULO (this is planned to be default case)
223 -if SS enabled on DP Ref clock and HW de-spreading enabled with HW
224 calculations (not planned to be used, but average clock should still
225 be valid)
226 -if SS enabled on DP Ref clock and HW de-spreading disabled
227 (should not be case with CIK) then SW should program all rates
228 generated according to average value (case as with previous ASICs)
229 */
230 if (clk_dce->ss_on_dprefclk && clk_dce->dprefclk_ss_divider != 0) {
231 struct fixed31_32 ss_percentage = dc_fixpt_div_int(
232 dc_fixpt_from_fraction(
233 clk_dce->dprefclk_ss_percentage,
234 clk_dce->dprefclk_ss_divider), 200);
235 struct fixed31_32 adj_dp_ref_clk_khz;
236 191
237 ss_percentage = dc_fixpt_sub(dc_fixpt_one, 192 return dccg_adjust_dp_ref_freq_for_ss(clk_dce, dp_ref_clk_khz);
238 ss_percentage);
239 adj_dp_ref_clk_khz =
240 dc_fixpt_mul_int(
241 ss_percentage,
242 dp_ref_clk_khz);
243 dp_ref_clk_khz = dc_fixpt_floor(adj_dp_ref_clk_khz);
244 }
245
246 return dp_ref_clk_khz;
247} 193}
248 194
249/* TODO: This is DCN DPREFCLK: it could be program by DENTIST by VBIOS 195static int dce12_get_dp_ref_freq_khz(struct dccg *clk)
250 * or CLK0_CLK11 by SMU. For DCE120, it is wlays 600Mhz. Will re-visit
251 * clock implementation
252 */
253static int dce_clocks_get_dp_ref_freq_wrkaround(struct display_clock *clk)
254{ 196{
255 struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk); 197 struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
256 int dp_ref_clk_khz = 600000;
257
258 if (clk_dce->ss_on_dprefclk && clk_dce->dprefclk_ss_divider != 0) {
259 struct fixed31_32 ss_percentage = dc_fixpt_div_int(
260 dc_fixpt_from_fraction(
261 clk_dce->dprefclk_ss_percentage,
262 clk_dce->dprefclk_ss_divider), 200);
263 struct fixed31_32 adj_dp_ref_clk_khz;
264 198
265 ss_percentage = dc_fixpt_sub(dc_fixpt_one, 199 return dccg_adjust_dp_ref_freq_for_ss(clk_dce, 600000);
266 ss_percentage);
267 adj_dp_ref_clk_khz =
268 dc_fixpt_mul_int(
269 ss_percentage,
270 dp_ref_clk_khz);
271 dp_ref_clk_khz = dc_fixpt_floor(adj_dp_ref_clk_khz);
272 }
273
274 return dp_ref_clk_khz;
275} 200}
201
276static enum dm_pp_clocks_state dce_get_required_clocks_state( 202static enum dm_pp_clocks_state dce_get_required_clocks_state(
277 struct display_clock *clk, 203 struct dccg *clk,
278 struct state_dependent_clocks *req_clocks) 204 struct dc_clocks *req_clocks)
279{ 205{
280 struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk); 206 struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
281 int i; 207 int i;
282 enum dm_pp_clocks_state low_req_clk; 208 enum dm_pp_clocks_state low_req_clk;
283 209
@@ -286,53 +212,30 @@ static enum dm_pp_clocks_state dce_get_required_clocks_state(
286 * all required clocks 212 * all required clocks
287 */ 213 */
288 for (i = clk->max_clks_state; i >= DM_PP_CLOCKS_STATE_ULTRA_LOW; i--) 214 for (i = clk->max_clks_state; i >= DM_PP_CLOCKS_STATE_ULTRA_LOW; i--)
289 if (req_clocks->display_clk_khz > 215 if (req_clocks->dispclk_khz >
290 clk_dce->max_clks_by_state[i].display_clk_khz 216 clk_dce->max_clks_by_state[i].display_clk_khz
291 || req_clocks->pixel_clk_khz > 217 || req_clocks->phyclk_khz >
292 clk_dce->max_clks_by_state[i].pixel_clk_khz) 218 clk_dce->max_clks_by_state[i].pixel_clk_khz)
293 break; 219 break;
294 220
295 low_req_clk = i + 1; 221 low_req_clk = i + 1;
296 if (low_req_clk > clk->max_clks_state) { 222 if (low_req_clk > clk->max_clks_state) {
297 DC_LOG_WARNING("%s: clocks unsupported disp_clk %d pix_clk %d", 223 /* set max clock state for high phyclock, invalid on exceeding display clock */
298 __func__, 224 if (clk_dce->max_clks_by_state[clk->max_clks_state].display_clk_khz
299 req_clocks->display_clk_khz, 225 < req_clocks->dispclk_khz)
300 req_clocks->pixel_clk_khz); 226 low_req_clk = DM_PP_CLOCKS_STATE_INVALID;
301 low_req_clk = DM_PP_CLOCKS_STATE_INVALID; 227 else
228 low_req_clk = clk->max_clks_state;
302 } 229 }
303 230
304 return low_req_clk; 231 return low_req_clk;
305} 232}
306 233
307static bool dce_clock_set_min_clocks_state(
308 struct display_clock *clk,
309 enum dm_pp_clocks_state clocks_state)
310{
311 struct dm_pp_power_level_change_request level_change_req = {
312 clocks_state };
313
314 if (clocks_state > clk->max_clks_state) {
315 /*Requested state exceeds max supported state.*/
316 DC_LOG_WARNING("Requested state exceeds max supported state");
317 return false;
318 } else if (clocks_state == clk->cur_min_clks_state) {
319 /*if we're trying to set the same state, we can just return
320 * since nothing needs to be done*/
321 return true;
322 }
323
324 /* get max clock state from PPLIB */
325 if (dm_pp_apply_power_level_change_request(clk->ctx, &level_change_req))
326 clk->cur_min_clks_state = clocks_state;
327
328 return true;
329}
330
331static int dce_set_clock( 234static int dce_set_clock(
332 struct display_clock *clk, 235 struct dccg *clk,
333 int requested_clk_khz) 236 int requested_clk_khz)
334{ 237{
335 struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk); 238 struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
336 struct bp_pixel_clock_parameters pxl_clk_params = { 0 }; 239 struct bp_pixel_clock_parameters pxl_clk_params = { 0 };
337 struct dc_bios *bp = clk->ctx->dc_bios; 240 struct dc_bios *bp = clk->ctx->dc_bios;
338 int actual_clock = requested_clk_khz; 241 int actual_clock = requested_clk_khz;
@@ -364,10 +267,10 @@ static int dce_set_clock(
364} 267}
365 268
366static int dce_psr_set_clock( 269static int dce_psr_set_clock(
367 struct display_clock *clk, 270 struct dccg *clk,
368 int requested_clk_khz) 271 int requested_clk_khz)
369{ 272{
370 struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk); 273 struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
371 struct dc_context *ctx = clk_dce->base.ctx; 274 struct dc_context *ctx = clk_dce->base.ctx;
372 struct dc *core_dc = ctx->dc; 275 struct dc *core_dc = ctx->dc;
373 struct dmcu *dmcu = core_dc->res_pool->dmcu; 276 struct dmcu *dmcu = core_dc->res_pool->dmcu;
@@ -380,10 +283,10 @@ static int dce_psr_set_clock(
380} 283}
381 284
382static int dce112_set_clock( 285static int dce112_set_clock(
383 struct display_clock *clk, 286 struct dccg *clk,
384 int requested_clk_khz) 287 int requested_clk_khz)
385{ 288{
386 struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk); 289 struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
387 struct bp_set_dce_clock_parameters dce_clk_params; 290 struct bp_set_dce_clock_parameters dce_clk_params;
388 struct dc_bios *bp = clk->ctx->dc_bios; 291 struct dc_bios *bp = clk->ctx->dc_bios;
389 struct dc *core_dc = clk->ctx->dc; 292 struct dc *core_dc = clk->ctx->dc;
@@ -432,7 +335,7 @@ static int dce112_set_clock(
432 return actual_clock; 335 return actual_clock;
433} 336}
434 337
435static void dce_clock_read_integrated_info(struct dce_disp_clk *clk_dce) 338static void dce_clock_read_integrated_info(struct dce_dccg *clk_dce)
436{ 339{
437 struct dc_debug *debug = &clk_dce->base.ctx->dc->debug; 340 struct dc_debug *debug = &clk_dce->base.ctx->dc->debug;
438 struct dc_bios *bp = clk_dce->base.ctx->dc_bios; 341 struct dc_bios *bp = clk_dce->base.ctx->dc_bios;
@@ -488,11 +391,9 @@ static void dce_clock_read_integrated_info(struct dce_disp_clk *clk_dce)
488 if (!debug->disable_dfs_bypass && bp->integrated_info) 391 if (!debug->disable_dfs_bypass && bp->integrated_info)
489 if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE) 392 if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
490 clk_dce->dfs_bypass_enabled = true; 393 clk_dce->dfs_bypass_enabled = true;
491
492 clk_dce->use_max_disp_clk = debug->max_disp_clk;
493} 394}
494 395
495static void dce_clock_read_ss_info(struct dce_disp_clk *clk_dce) 396static void dce_clock_read_ss_info(struct dce_dccg *clk_dce)
496{ 397{
497 struct dc_bios *bp = clk_dce->base.ctx->dc_bios; 398 struct dc_bios *bp = clk_dce->base.ctx->dc_bios;
498 int ss_info_num = bp->funcs->get_ss_entry_number( 399 int ss_info_num = bp->funcs->get_ss_entry_number(
@@ -548,139 +449,263 @@ static void dce_clock_read_ss_info(struct dce_disp_clk *clk_dce)
548 } 449 }
549} 450}
550 451
551static bool dce_apply_clock_voltage_request( 452static inline bool should_set_clock(bool safe_to_lower, int calc_clk, int cur_clk)
552 struct display_clock *clk, 453{
553 enum dm_pp_clock_type clocks_type, 454 return ((safe_to_lower && calc_clk < cur_clk) || calc_clk > cur_clk);
554 int clocks_in_khz, 455}
555 bool pre_mode_set, 456
556 bool update_dp_phyclk) 457static void dce12_update_clocks(struct dccg *dccg,
458 struct dc_clocks *new_clocks,
459 bool safe_to_lower)
557{ 460{
558 bool send_request = false;
559 struct dm_pp_clock_for_voltage_req clock_voltage_req = {0}; 461 struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
560 462
561 switch (clocks_type) { 463 if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) {
562 case DM_PP_CLOCK_TYPE_DISPLAY_CLK: 464 clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAY_CLK;
563 case DM_PP_CLOCK_TYPE_PIXELCLK: 465 clock_voltage_req.clocks_in_khz = new_clocks->dispclk_khz;
564 case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK: 466 dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);
565 break; 467 dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
566 default: 468
567 BREAK_TO_DEBUGGER(); 469 dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
568 return false;
569 } 470 }
570 471
571 clock_voltage_req.clk_type = clocks_type; 472 if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, dccg->clks.phyclk_khz)) {
572 clock_voltage_req.clocks_in_khz = clocks_in_khz; 473 clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAYPHYCLK;
573 474 clock_voltage_req.clocks_in_khz = new_clocks->phyclk_khz;
574 /* to pplib */ 475 dccg->clks.phyclk_khz = new_clocks->phyclk_khz;
575 if (pre_mode_set) { 476
576 switch (clocks_type) { 477 dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
577 case DM_PP_CLOCK_TYPE_DISPLAY_CLK: 478 }
578 if (clocks_in_khz > clk->cur_clocks_value.dispclk_in_khz) { 479}
579 clk->cur_clocks_value.dispclk_notify_pplib_done = true; 480
580 send_request = true; 481#ifdef CONFIG_DRM_AMD_DC_DCN1_0
581 } else 482static int dcn1_determine_dppclk_threshold(struct dccg *dccg, struct dc_clocks *new_clocks)
582 clk->cur_clocks_value.dispclk_notify_pplib_done = false; 483{
583 /* no matter incrase or decrase clock, update current clock value */ 484 bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
584 clk->cur_clocks_value.dispclk_in_khz = clocks_in_khz; 485 bool dispclk_increase = new_clocks->dispclk_khz > dccg->clks.dispclk_khz;
585 break; 486 int disp_clk_threshold = new_clocks->max_supported_dppclk_khz;
586 case DM_PP_CLOCK_TYPE_PIXELCLK: 487 bool cur_dpp_div = dccg->clks.dispclk_khz > dccg->clks.dppclk_khz;
587 if (clocks_in_khz > clk->cur_clocks_value.max_pixelclk_in_khz) { 488
588 clk->cur_clocks_value.pixelclk_notify_pplib_done = true; 489 /* increase clock, looking for div is 0 for current, request div is 1*/
589 send_request = true; 490 if (dispclk_increase) {
590 } else 491 /* already divided by 2, no need to reach target clk with 2 steps*/
591 clk->cur_clocks_value.pixelclk_notify_pplib_done = false; 492 if (cur_dpp_div)
592 /* no matter incrase or decrase clock, update current clock value */ 493 return new_clocks->dispclk_khz;
593 clk->cur_clocks_value.max_pixelclk_in_khz = clocks_in_khz; 494
594 break; 495 /* request disp clk is lower than maximum supported dpp clk,
595 case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK: 496 * no need to reach target clk with two steps.
596 if (clocks_in_khz > clk->cur_clocks_value.max_non_dp_phyclk_in_khz) { 497 */
597 clk->cur_clocks_value.phyclk_notigy_pplib_done = true; 498 if (new_clocks->dispclk_khz <= disp_clk_threshold)
598 send_request = true; 499 return new_clocks->dispclk_khz;
599 } else 500
600 clk->cur_clocks_value.phyclk_notigy_pplib_done = false; 501 /* target dpp clk not request divided by 2, still within threshold */
601 /* no matter incrase or decrase clock, update current clock value */ 502 if (!request_dpp_div)
602 clk->cur_clocks_value.max_non_dp_phyclk_in_khz = clocks_in_khz; 503 return new_clocks->dispclk_khz;
603 break;
604 default:
605 ASSERT(0);
606 break;
607 }
608 504
609 } else { 505 } else {
610 switch (clocks_type) { 506 /* decrease clock, looking for current dppclk divided by 2,
611 case DM_PP_CLOCK_TYPE_DISPLAY_CLK: 507 * request dppclk not divided by 2.
612 if (!clk->cur_clocks_value.dispclk_notify_pplib_done) 508 */
613 send_request = true; 509
614 break; 510 /* current dpp clk not divided by 2, no need to ramp*/
615 case DM_PP_CLOCK_TYPE_PIXELCLK: 511 if (!cur_dpp_div)
616 if (!clk->cur_clocks_value.pixelclk_notify_pplib_done) 512 return new_clocks->dispclk_khz;
617 send_request = true; 513
618 break; 514 /* current disp clk is lower than current maximum dpp clk,
619 case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK: 515 * no need to ramp
620 if (!clk->cur_clocks_value.phyclk_notigy_pplib_done) 516 */
621 send_request = true; 517 if (dccg->clks.dispclk_khz <= disp_clk_threshold)
622 break; 518 return new_clocks->dispclk_khz;
623 default: 519
624 ASSERT(0); 520 /* request dpp clk need to be divided by 2 */
625 break; 521 if (request_dpp_div)
626 } 522 return new_clocks->dispclk_khz;
627 } 523 }
628 if (send_request) { 524
629#if defined(CONFIG_DRM_AMD_DC_DCN1_0) 525 return disp_clk_threshold;
630 if (clk->ctx->dce_version >= DCN_VERSION_1_0) { 526}
631 struct dc *core_dc = clk->ctx->dc; 527
632 /*use dcfclk request voltage*/ 528static void dcn1_ramp_up_dispclk_with_dpp(struct dccg *dccg, struct dc_clocks *new_clocks)
633 clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK; 529{
634 clock_voltage_req.clocks_in_khz = 530 struct dc *dc = dccg->ctx->dc;
635 dcn_find_dcfclk_suits_all(core_dc, &clk->cur_clocks_value); 531 int dispclk_to_dpp_threshold = dcn1_determine_dppclk_threshold(dccg, new_clocks);
636 } 532 bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
533 int i;
534
535 /* set disp clk to dpp clk threshold */
536 dccg->funcs->set_dispclk(dccg, dispclk_to_dpp_threshold);
537
538 /* update request dpp clk division option */
539 for (i = 0; i < dc->res_pool->pipe_count; i++) {
540 struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
541
542 if (!pipe_ctx->plane_state)
543 continue;
544
545 pipe_ctx->plane_res.dpp->funcs->dpp_dppclk_control(
546 pipe_ctx->plane_res.dpp,
547 request_dpp_div,
548 true);
549 }
550
551 /* If target clk not same as dppclk threshold, set to target clock */
552 if (dispclk_to_dpp_threshold != new_clocks->dispclk_khz)
553 dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);
554
555 dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
556 dccg->clks.dppclk_khz = new_clocks->dppclk_khz;
557 dccg->clks.max_supported_dppclk_khz = new_clocks->max_supported_dppclk_khz;
558}
559
560static void dcn1_update_clocks(struct dccg *dccg,
561 struct dc_clocks *new_clocks,
562 bool safe_to_lower)
563{
564 struct dc *dc = dccg->ctx->dc;
565 struct pp_smu_display_requirement_rv *smu_req_cur =
566 &dc->res_pool->pp_smu_req;
567 struct pp_smu_display_requirement_rv smu_req = *smu_req_cur;
568 struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
569 struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
570 bool send_request_to_increase = false;
571 bool send_request_to_lower = false;
572
573 if (new_clocks->phyclk_khz)
574 smu_req.display_count = 1;
575 else
576 smu_req.display_count = 0;
577
578 if (new_clocks->dispclk_khz > dccg->clks.dispclk_khz
579 || new_clocks->phyclk_khz > dccg->clks.phyclk_khz
580 || new_clocks->fclk_khz > dccg->clks.fclk_khz
581 || new_clocks->dcfclk_khz > dccg->clks.dcfclk_khz)
582 send_request_to_increase = true;
583
584 if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, dccg->clks.phyclk_khz)) {
585 dccg->clks.phyclk_khz = new_clocks->phyclk_khz;
586
587 send_request_to_lower = true;
588 }
589
590 if (should_set_clock(safe_to_lower, new_clocks->fclk_khz, dccg->clks.fclk_khz)) {
591 dccg->clks.fclk_khz = new_clocks->fclk_khz;
592 clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_FCLK;
593 clock_voltage_req.clocks_in_khz = new_clocks->fclk_khz;
594 smu_req.hard_min_fclk_khz = new_clocks->fclk_khz;
595
596 dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
597 send_request_to_lower = true;
598 }
599
600 if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, dccg->clks.dcfclk_khz)) {
601 dccg->clks.dcfclk_khz = new_clocks->dcfclk_khz;
602 smu_req.hard_min_dcefclk_khz = new_clocks->dcfclk_khz;
603
604 send_request_to_lower = true;
605 }
606
607 if (should_set_clock(safe_to_lower,
608 new_clocks->dcfclk_deep_sleep_khz, dccg->clks.dcfclk_deep_sleep_khz)) {
609 dccg->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
610 smu_req.min_deep_sleep_dcefclk_mhz = new_clocks->dcfclk_deep_sleep_khz;
611
612 send_request_to_lower = true;
613 }
614
615 /* make sure dcf clk is before dpp clk to
616 * make sure we have enough voltage to run dpp clk
617 */
618 if (send_request_to_increase) {
619 /*use dcfclk to request voltage*/
620 clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
621 clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
622 dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
623 if (pp_smu->set_display_requirement)
624 pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
625 }
626
627 /* dcn1 dppclk is tied to dispclk */
628 if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) {
629 dcn1_ramp_up_dispclk_with_dpp(dccg, new_clocks);
630 dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
631
632 send_request_to_lower = true;
633 }
634
635 if (!send_request_to_increase && send_request_to_lower) {
636 /*use dcfclk to request voltage*/
637 clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
638 clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
639 dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
640 if (pp_smu->set_display_requirement)
641 pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
642 }
643
644
645 *smu_req_cur = smu_req;
646}
637#endif 647#endif
638 dm_pp_apply_clock_for_voltage_request( 648
639 clk->ctx, &clock_voltage_req); 649static void dce_update_clocks(struct dccg *dccg,
650 struct dc_clocks *new_clocks,
651 bool safe_to_lower)
652{
653 struct dm_pp_power_level_change_request level_change_req;
654
655 level_change_req.power_level = dce_get_required_clocks_state(dccg, new_clocks);
656 /* get max clock state from PPLIB */
657 if ((level_change_req.power_level < dccg->cur_min_clks_state && safe_to_lower)
658 || level_change_req.power_level > dccg->cur_min_clks_state) {
659 if (dm_pp_apply_power_level_change_request(dccg->ctx, &level_change_req))
660 dccg->cur_min_clks_state = level_change_req.power_level;
640 } 661 }
641 if (update_dp_phyclk && (clocks_in_khz >
642 clk->cur_clocks_value.max_dp_phyclk_in_khz))
643 clk->cur_clocks_value.max_dp_phyclk_in_khz = clocks_in_khz;
644 662
645 return true; 663 if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) {
664 dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);
665 dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
666 }
646} 667}
647 668
669#ifdef CONFIG_DRM_AMD_DC_DCN1_0
670static const struct display_clock_funcs dcn1_funcs = {
671 .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
672 .set_dispclk = dce112_set_clock,
673 .update_clocks = dcn1_update_clocks
674};
675#endif
648 676
649static const struct display_clock_funcs dce120_funcs = { 677static const struct display_clock_funcs dce120_funcs = {
650 .get_dp_ref_clk_frequency = dce_clocks_get_dp_ref_freq_wrkaround, 678 .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
651 .apply_clock_voltage_request = dce_apply_clock_voltage_request, 679 .set_dispclk = dce112_set_clock,
652 .set_clock = dce112_set_clock 680 .update_clocks = dce12_update_clocks
653}; 681};
654 682
655static const struct display_clock_funcs dce112_funcs = { 683static const struct display_clock_funcs dce112_funcs = {
656 .get_dp_ref_clk_frequency = dce_clocks_get_dp_ref_freq, 684 .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
657 .get_required_clocks_state = dce_get_required_clocks_state, 685 .set_dispclk = dce112_set_clock,
658 .set_min_clocks_state = dce_clock_set_min_clocks_state, 686 .update_clocks = dce_update_clocks
659 .set_clock = dce112_set_clock
660}; 687};
661 688
662static const struct display_clock_funcs dce110_funcs = { 689static const struct display_clock_funcs dce110_funcs = {
663 .get_dp_ref_clk_frequency = dce_clocks_get_dp_ref_freq, 690 .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
664 .get_required_clocks_state = dce_get_required_clocks_state, 691 .set_dispclk = dce_psr_set_clock,
665 .set_min_clocks_state = dce_clock_set_min_clocks_state, 692 .update_clocks = dce_update_clocks
666 .set_clock = dce_psr_set_clock
667}; 693};
668 694
669static const struct display_clock_funcs dce_funcs = { 695static const struct display_clock_funcs dce_funcs = {
670 .get_dp_ref_clk_frequency = dce_clocks_get_dp_ref_freq, 696 .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
671 .get_required_clocks_state = dce_get_required_clocks_state, 697 .set_dispclk = dce_set_clock,
672 .set_min_clocks_state = dce_clock_set_min_clocks_state, 698 .update_clocks = dce_update_clocks
673 .set_clock = dce_set_clock
674}; 699};
675 700
676static void dce_disp_clk_construct( 701static void dce_dccg_construct(
677 struct dce_disp_clk *clk_dce, 702 struct dce_dccg *clk_dce,
678 struct dc_context *ctx, 703 struct dc_context *ctx,
679 const struct dce_disp_clk_registers *regs, 704 const struct dccg_registers *regs,
680 const struct dce_disp_clk_shift *clk_shift, 705 const struct dccg_shift *clk_shift,
681 const struct dce_disp_clk_mask *clk_mask) 706 const struct dccg_mask *clk_mask)
682{ 707{
683 struct display_clock *base = &clk_dce->base; 708 struct dccg *base = &clk_dce->base;
684 709
685 base->ctx = ctx; 710 base->ctx = ctx;
686 base->funcs = &dce_funcs; 711 base->funcs = &dce_funcs;
@@ -700,34 +725,15 @@ static void dce_disp_clk_construct(
700 725
701 dce_clock_read_integrated_info(clk_dce); 726 dce_clock_read_integrated_info(clk_dce);
702 dce_clock_read_ss_info(clk_dce); 727 dce_clock_read_ss_info(clk_dce);
703
704 dce_divider_range_construct(
705 &clk_dce->divider_ranges[DIVIDER_RANGE_01],
706 DIVIDER_RANGE_01_START,
707 DIVIDER_RANGE_01_STEP_SIZE,
708 DIVIDER_RANGE_01_BASE_DIVIDER_ID,
709 DIVIDER_RANGE_02_BASE_DIVIDER_ID);
710 dce_divider_range_construct(
711 &clk_dce->divider_ranges[DIVIDER_RANGE_02],
712 DIVIDER_RANGE_02_START,
713 DIVIDER_RANGE_02_STEP_SIZE,
714 DIVIDER_RANGE_02_BASE_DIVIDER_ID,
715 DIVIDER_RANGE_03_BASE_DIVIDER_ID);
716 dce_divider_range_construct(
717 &clk_dce->divider_ranges[DIVIDER_RANGE_03],
718 DIVIDER_RANGE_03_START,
719 DIVIDER_RANGE_03_STEP_SIZE,
720 DIVIDER_RANGE_03_BASE_DIVIDER_ID,
721 DIVIDER_RANGE_MAX_DIVIDER_ID);
722} 728}
723 729
724struct display_clock *dce_disp_clk_create( 730struct dccg *dce_dccg_create(
725 struct dc_context *ctx, 731 struct dc_context *ctx,
726 const struct dce_disp_clk_registers *regs, 732 const struct dccg_registers *regs,
727 const struct dce_disp_clk_shift *clk_shift, 733 const struct dccg_shift *clk_shift,
728 const struct dce_disp_clk_mask *clk_mask) 734 const struct dccg_mask *clk_mask)
729{ 735{
730 struct dce_disp_clk *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL); 736 struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
731 737
732 if (clk_dce == NULL) { 738 if (clk_dce == NULL) {
733 BREAK_TO_DEBUGGER(); 739 BREAK_TO_DEBUGGER();
@@ -738,19 +744,19 @@ struct display_clock *dce_disp_clk_create(
738 dce80_max_clks_by_state, 744 dce80_max_clks_by_state,
739 sizeof(dce80_max_clks_by_state)); 745 sizeof(dce80_max_clks_by_state));
740 746
741 dce_disp_clk_construct( 747 dce_dccg_construct(
742 clk_dce, ctx, regs, clk_shift, clk_mask); 748 clk_dce, ctx, regs, clk_shift, clk_mask);
743 749
744 return &clk_dce->base; 750 return &clk_dce->base;
745} 751}
746 752
747struct display_clock *dce110_disp_clk_create( 753struct dccg *dce110_dccg_create(
748 struct dc_context *ctx, 754 struct dc_context *ctx,
749 const struct dce_disp_clk_registers *regs, 755 const struct dccg_registers *regs,
750 const struct dce_disp_clk_shift *clk_shift, 756 const struct dccg_shift *clk_shift,
751 const struct dce_disp_clk_mask *clk_mask) 757 const struct dccg_mask *clk_mask)
752{ 758{
753 struct dce_disp_clk *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL); 759 struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
754 760
755 if (clk_dce == NULL) { 761 if (clk_dce == NULL) {
756 BREAK_TO_DEBUGGER(); 762 BREAK_TO_DEBUGGER();
@@ -761,7 +767,7 @@ struct display_clock *dce110_disp_clk_create(
761 dce110_max_clks_by_state, 767 dce110_max_clks_by_state,
762 sizeof(dce110_max_clks_by_state)); 768 sizeof(dce110_max_clks_by_state));
763 769
764 dce_disp_clk_construct( 770 dce_dccg_construct(
765 clk_dce, ctx, regs, clk_shift, clk_mask); 771 clk_dce, ctx, regs, clk_shift, clk_mask);
766 772
767 clk_dce->base.funcs = &dce110_funcs; 773 clk_dce->base.funcs = &dce110_funcs;
@@ -769,13 +775,13 @@ struct display_clock *dce110_disp_clk_create(
769 return &clk_dce->base; 775 return &clk_dce->base;
770} 776}
771 777
772struct display_clock *dce112_disp_clk_create( 778struct dccg *dce112_dccg_create(
773 struct dc_context *ctx, 779 struct dc_context *ctx,
774 const struct dce_disp_clk_registers *regs, 780 const struct dccg_registers *regs,
775 const struct dce_disp_clk_shift *clk_shift, 781 const struct dccg_shift *clk_shift,
776 const struct dce_disp_clk_mask *clk_mask) 782 const struct dccg_mask *clk_mask)
777{ 783{
778 struct dce_disp_clk *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL); 784 struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
779 785
780 if (clk_dce == NULL) { 786 if (clk_dce == NULL) {
781 BREAK_TO_DEBUGGER(); 787 BREAK_TO_DEBUGGER();
@@ -786,7 +792,7 @@ struct display_clock *dce112_disp_clk_create(
786 dce112_max_clks_by_state, 792 dce112_max_clks_by_state,
787 sizeof(dce112_max_clks_by_state)); 793 sizeof(dce112_max_clks_by_state));
788 794
789 dce_disp_clk_construct( 795 dce_dccg_construct(
790 clk_dce, ctx, regs, clk_shift, clk_mask); 796 clk_dce, ctx, regs, clk_shift, clk_mask);
791 797
792 clk_dce->base.funcs = &dce112_funcs; 798 clk_dce->base.funcs = &dce112_funcs;
@@ -794,10 +800,9 @@ struct display_clock *dce112_disp_clk_create(
794 return &clk_dce->base; 800 return &clk_dce->base;
795} 801}
796 802
797struct display_clock *dce120_disp_clk_create(struct dc_context *ctx) 803struct dccg *dce120_dccg_create(struct dc_context *ctx)
798{ 804{
799 struct dce_disp_clk *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL); 805 struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
800 struct dm_pp_clock_levels_with_voltage clk_level_info = {0};
801 806
802 if (clk_dce == NULL) { 807 if (clk_dce == NULL) {
803 BREAK_TO_DEBUGGER(); 808 BREAK_TO_DEBUGGER();
@@ -808,28 +813,59 @@ struct display_clock *dce120_disp_clk_create(struct dc_context *ctx)
808 dce120_max_clks_by_state, 813 dce120_max_clks_by_state,
809 sizeof(dce120_max_clks_by_state)); 814 sizeof(dce120_max_clks_by_state));
810 815
811 dce_disp_clk_construct( 816 dce_dccg_construct(
812 clk_dce, ctx, NULL, NULL, NULL); 817 clk_dce, ctx, NULL, NULL, NULL);
813 818
814 clk_dce->base.funcs = &dce120_funcs; 819 clk_dce->base.funcs = &dce120_funcs;
815 820
816 /* new in dce120 */ 821 return &clk_dce->base;
817 if (!ctx->dc->debug.disable_pplib_clock_request && 822}
818 dm_pp_get_clock_levels_by_type_with_voltage( 823
819 ctx, DM_PP_CLOCK_TYPE_DISPLAY_CLK, &clk_level_info) 824#ifdef CONFIG_DRM_AMD_DC_DCN1_0
820 && clk_level_info.num_levels) 825struct dccg *dcn1_dccg_create(struct dc_context *ctx)
821 clk_dce->max_displ_clk_in_khz = 826{
822 clk_level_info.data[clk_level_info.num_levels - 1].clocks_in_khz; 827 struct dc_debug *debug = &ctx->dc->debug;
823 else 828 struct dc_bios *bp = ctx->dc_bios;
824 clk_dce->max_displ_clk_in_khz = 1133000; 829 struct dc_firmware_info fw_info = { { 0 } };
830 struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
831
832 if (clk_dce == NULL) {
833 BREAK_TO_DEBUGGER();
834 return NULL;
835 }
836
837 clk_dce->base.ctx = ctx;
838 clk_dce->base.funcs = &dcn1_funcs;
839
840 clk_dce->dfs_bypass_disp_clk = 0;
841
842 clk_dce->dprefclk_ss_percentage = 0;
843 clk_dce->dprefclk_ss_divider = 1000;
844 clk_dce->ss_on_dprefclk = false;
845
846 if (bp->integrated_info)
847 clk_dce->dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq;
848 if (clk_dce->dentist_vco_freq_khz == 0) {
849 bp->funcs->get_firmware_info(bp, &fw_info);
850 clk_dce->dentist_vco_freq_khz = fw_info.smu_gpu_pll_output_freq;
851 if (clk_dce->dentist_vco_freq_khz == 0)
852 clk_dce->dentist_vco_freq_khz = 3600000;
853 }
854
855 if (!debug->disable_dfs_bypass && bp->integrated_info)
856 if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
857 clk_dce->dfs_bypass_enabled = true;
858
859 dce_clock_read_ss_info(clk_dce);
825 860
826 return &clk_dce->base; 861 return &clk_dce->base;
827} 862}
863#endif
828 864
829void dce_disp_clk_destroy(struct display_clock **disp_clk) 865void dce_dccg_destroy(struct dccg **dccg)
830{ 866{
831 struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(*disp_clk); 867 struct dce_dccg *clk_dce = TO_DCE_CLOCKS(*dccg);
832 868
833 kfree(clk_dce); 869 kfree(clk_dce);
834 *disp_clk = NULL; 870 *dccg = NULL;
835} 871}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
index 0e717e0dc8f0..7ce0a54e548f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
@@ -33,6 +33,9 @@
33 .DPREFCLK_CNTL = mmDPREFCLK_CNTL, \ 33 .DPREFCLK_CNTL = mmDPREFCLK_CNTL, \
34 .DENTIST_DISPCLK_CNTL = mmDENTIST_DISPCLK_CNTL 34 .DENTIST_DISPCLK_CNTL = mmDENTIST_DISPCLK_CNTL
35 35
36#define CLK_COMMON_REG_LIST_DCN_BASE() \
37 SR(DENTIST_DISPCLK_CNTL)
38
36#define CLK_SF(reg_name, field_name, post_fix)\ 39#define CLK_SF(reg_name, field_name, post_fix)\
37 .field_name = reg_name ## __ ## field_name ## post_fix 40 .field_name = reg_name ## __ ## field_name ## post_fix
38 41
@@ -40,58 +43,41 @@
40 CLK_SF(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, mask_sh), \ 43 CLK_SF(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, mask_sh), \
41 CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, mask_sh) 44 CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, mask_sh)
42 45
46#define CLK_COMMON_MASK_SH_LIST_DCN_COMMON_BASE(mask_sh) \
47 CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_WDIVIDER, mask_sh),\
48 CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, mask_sh),\
49 CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, mask_sh),\
50 CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_CHG_DONE, mask_sh)
51
43#define CLK_REG_FIELD_LIST(type) \ 52#define CLK_REG_FIELD_LIST(type) \
44 type DPREFCLK_SRC_SEL; \ 53 type DPREFCLK_SRC_SEL; \
45 type DENTIST_DPREFCLK_WDIVIDER; 54 type DENTIST_DPREFCLK_WDIVIDER; \
55 type DENTIST_DISPCLK_WDIVIDER; \
56 type DENTIST_DPPCLK_WDIVIDER; \
57 type DENTIST_DISPCLK_CHG_DONE; \
58 type DENTIST_DPPCLK_CHG_DONE;
46 59
47struct dce_disp_clk_shift { 60struct dccg_shift {
48 CLK_REG_FIELD_LIST(uint8_t) 61 CLK_REG_FIELD_LIST(uint8_t)
49}; 62};
50 63
51struct dce_disp_clk_mask { 64struct dccg_mask {
52 CLK_REG_FIELD_LIST(uint32_t) 65 CLK_REG_FIELD_LIST(uint32_t)
53}; 66};
54 67
55struct dce_disp_clk_registers { 68struct dccg_registers {
56 uint32_t DPREFCLK_CNTL; 69 uint32_t DPREFCLK_CNTL;
57 uint32_t DENTIST_DISPCLK_CNTL; 70 uint32_t DENTIST_DISPCLK_CNTL;
58}; 71};
59 72
60/* Array identifiers and count for the divider ranges.*/ 73struct dce_dccg {
61enum dce_divider_range_count { 74 struct dccg base;
62 DIVIDER_RANGE_01 = 0, 75 const struct dccg_registers *regs;
63 DIVIDER_RANGE_02, 76 const struct dccg_shift *clk_shift;
64 DIVIDER_RANGE_03, 77 const struct dccg_mask *clk_mask;
65 DIVIDER_RANGE_MAX /* == 3*/
66};
67
68enum dce_divider_error_types {
69 INVALID_DID = 0,
70 INVALID_DIVIDER = 1
71};
72
73struct dce_divider_range {
74 int div_range_start;
75 /* The end of this range of dividers.*/
76 int div_range_end;
77 /* The distance between each divider in this range.*/
78 int div_range_step;
79 /* The divider id for the lowest divider.*/
80 int did_min;
81 /* The divider id for the highest divider.*/
82 int did_max;
83};
84
85struct dce_disp_clk {
86 struct display_clock base;
87 const struct dce_disp_clk_registers *regs;
88 const struct dce_disp_clk_shift *clk_shift;
89 const struct dce_disp_clk_mask *clk_mask;
90 78
91 struct state_dependent_clocks max_clks_by_state[DM_PP_CLOCKS_MAX_STATES]; 79 struct state_dependent_clocks max_clks_by_state[DM_PP_CLOCKS_MAX_STATES];
92 struct dce_divider_range divider_ranges[DIVIDER_RANGE_MAX];
93 80
94 bool use_max_disp_clk;
95 int dentist_vco_freq_khz; 81 int dentist_vco_freq_khz;
96 82
97 /* Cache the status of DFS-bypass feature*/ 83 /* Cache the status of DFS-bypass feature*/
@@ -106,32 +92,33 @@ struct dce_disp_clk {
106 int dprefclk_ss_percentage; 92 int dprefclk_ss_percentage;
107 /* DPREFCLK SS percentage Divider (100 or 1000) */ 93 /* DPREFCLK SS percentage Divider (100 or 1000) */
108 int dprefclk_ss_divider; 94 int dprefclk_ss_divider;
109
110 /* max disp_clk from PPLIB for max validation display clock*/
111 int max_displ_clk_in_khz;
112}; 95};
113 96
114 97
115struct display_clock *dce_disp_clk_create( 98struct dccg *dce_dccg_create(
116 struct dc_context *ctx, 99 struct dc_context *ctx,
117 const struct dce_disp_clk_registers *regs, 100 const struct dccg_registers *regs,
118 const struct dce_disp_clk_shift *clk_shift, 101 const struct dccg_shift *clk_shift,
119 const struct dce_disp_clk_mask *clk_mask); 102 const struct dccg_mask *clk_mask);
120 103
121struct display_clock *dce110_disp_clk_create( 104struct dccg *dce110_dccg_create(
122 struct dc_context *ctx, 105 struct dc_context *ctx,
123 const struct dce_disp_clk_registers *regs, 106 const struct dccg_registers *regs,
124 const struct dce_disp_clk_shift *clk_shift, 107 const struct dccg_shift *clk_shift,
125 const struct dce_disp_clk_mask *clk_mask); 108 const struct dccg_mask *clk_mask);
126 109
127struct display_clock *dce112_disp_clk_create( 110struct dccg *dce112_dccg_create(
128 struct dc_context *ctx, 111 struct dc_context *ctx,
129 const struct dce_disp_clk_registers *regs, 112 const struct dccg_registers *regs,
130 const struct dce_disp_clk_shift *clk_shift, 113 const struct dccg_shift *clk_shift,
131 const struct dce_disp_clk_mask *clk_mask); 114 const struct dccg_mask *clk_mask);
115
116struct dccg *dce120_dccg_create(struct dc_context *ctx);
132 117
133struct display_clock *dce120_disp_clk_create(struct dc_context *ctx); 118#ifdef CONFIG_DRM_AMD_DC_DCN1_0
119struct dccg *dcn1_dccg_create(struct dc_context *ctx);
120#endif
134 121
135void dce_disp_clk_destroy(struct display_clock **disp_clk); 122void dce_dccg_destroy(struct dccg **dccg);
136 123
137#endif /* _DCE_CLOCKS_H_ */ 124#endif /* _DCE_CLOCKS_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
index 057b8afd74bc..057407892618 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
@@ -249,7 +249,6 @@ struct dce_hwseq_registers {
249 uint32_t DISPCLK_FREQ_CHANGE_CNTL; 249 uint32_t DISPCLK_FREQ_CHANGE_CNTL;
250 uint32_t RBBMIF_TIMEOUT_DIS; 250 uint32_t RBBMIF_TIMEOUT_DIS;
251 uint32_t RBBMIF_TIMEOUT_DIS_2; 251 uint32_t RBBMIF_TIMEOUT_DIS_2;
252 uint32_t DENTIST_DISPCLK_CNTL;
253 uint32_t DCHUBBUB_CRC_CTRL; 252 uint32_t DCHUBBUB_CRC_CTRL;
254 uint32_t DPP_TOP0_DPP_CRC_CTRL; 253 uint32_t DPP_TOP0_DPP_CRC_CTRL;
255 uint32_t DPP_TOP0_DPP_CRC_VAL_R_G; 254 uint32_t DPP_TOP0_DPP_CRC_VAL_R_G;
@@ -496,8 +495,6 @@ struct dce_hwseq_registers {
496 type DOMAIN7_PGFSM_PWR_STATUS; \ 495 type DOMAIN7_PGFSM_PWR_STATUS; \
497 type DCFCLK_GATE_DIS; \ 496 type DCFCLK_GATE_DIS; \
498 type DCHUBBUB_GLOBAL_TIMER_REFDIV; \ 497 type DCHUBBUB_GLOBAL_TIMER_REFDIV; \
499 type DENTIST_DPPCLK_WDIVIDER; \
500 type DENTIST_DISPCLK_WDIVIDER; \
501 type VGA_TEST_ENABLE; \ 498 type VGA_TEST_ENABLE; \
502 type VGA_TEST_RENDER_START; \ 499 type VGA_TEST_RENDER_START; \
503 type D1VGA_MODE_ENABLE; \ 500 type D1VGA_MODE_ENABLE; \
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
index 41f83ecd7469..ec3221333011 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
@@ -125,17 +125,54 @@ static void dce100_pplib_apply_display_requirements(
125 dc->prev_display_config = *pp_display_cfg; 125 dc->prev_display_config = *pp_display_cfg;
126} 126}
127 127
128/* unit: in_khz before mode set, get pixel clock from context. ASIC register
129 * may not be programmed yet
130 */
131static uint32_t get_max_pixel_clock_for_all_paths(
132 struct dc *dc,
133 struct dc_state *context)
134{
135 uint32_t max_pix_clk = 0;
136 int i;
137
138 for (i = 0; i < MAX_PIPES; i++) {
139 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
140
141 if (pipe_ctx->stream == NULL)
142 continue;
143
144 /* do not check under lay */
145 if (pipe_ctx->top_pipe)
146 continue;
147
148 if (pipe_ctx->stream_res.pix_clk_params.requested_pix_clk > max_pix_clk)
149 max_pix_clk =
150 pipe_ctx->stream_res.pix_clk_params.requested_pix_clk;
151 }
152
153 if (max_pix_clk == 0)
154 ASSERT(0);
155
156 return max_pix_clk;
157}
158
128void dce100_set_bandwidth( 159void dce100_set_bandwidth(
129 struct dc *dc, 160 struct dc *dc,
130 struct dc_state *context, 161 struct dc_state *context,
131 bool decrease_allowed) 162 bool decrease_allowed)
132{ 163{
133 if (decrease_allowed || context->bw.dce.dispclk_khz > dc->current_state->bw.dce.dispclk_khz) { 164 struct dc_clocks req_clks;
134 dc->res_pool->display_clock->funcs->set_clock( 165
135 dc->res_pool->display_clock, 166 req_clks.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100;
136 context->bw.dce.dispclk_khz * 115 / 100); 167 req_clks.phyclk_khz = get_max_pixel_clock_for_all_paths(dc, context);
137 dc->current_state->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz; 168
138 } 169 dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool);
170
171 dc->res_pool->dccg->funcs->update_clocks(
172 dc->res_pool->dccg,
173 &req_clks,
174 decrease_allowed);
175
139 dce100_pplib_apply_display_requirements(dc, context); 176 dce100_pplib_apply_display_requirements(dc, context);
140} 177}
141 178
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
index 38ec0d609297..8ed8eace42be 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
@@ -135,15 +135,15 @@ static const struct dce110_timing_generator_offsets dce100_tg_offsets[] = {
135 .reg_name = mm ## block ## id ## _ ## reg_name 135 .reg_name = mm ## block ## id ## _ ## reg_name
136 136
137 137
138static const struct dce_disp_clk_registers disp_clk_regs = { 138static const struct dccg_registers disp_clk_regs = {
139 CLK_COMMON_REG_LIST_DCE_BASE() 139 CLK_COMMON_REG_LIST_DCE_BASE()
140}; 140};
141 141
142static const struct dce_disp_clk_shift disp_clk_shift = { 142static const struct dccg_shift disp_clk_shift = {
143 CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) 143 CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
144}; 144};
145 145
146static const struct dce_disp_clk_mask disp_clk_mask = { 146static const struct dccg_mask disp_clk_mask = {
147 CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) 147 CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
148}; 148};
149 149
@@ -644,8 +644,8 @@ static void destruct(struct dce110_resource_pool *pool)
644 dce_aud_destroy(&pool->base.audios[i]); 644 dce_aud_destroy(&pool->base.audios[i]);
645 } 645 }
646 646
647 if (pool->base.display_clock != NULL) 647 if (pool->base.dccg != NULL)
648 dce_disp_clk_destroy(&pool->base.display_clock); 648 dce_dccg_destroy(&pool->base.dccg);
649 649
650 if (pool->base.abm != NULL) 650 if (pool->base.abm != NULL)
651 dce_abm_destroy(&pool->base.abm); 651 dce_abm_destroy(&pool->base.abm);
@@ -817,11 +817,11 @@ static bool construct(
817 } 817 }
818 } 818 }
819 819
820 pool->base.display_clock = dce_disp_clk_create(ctx, 820 pool->base.dccg = dce_dccg_create(ctx,
821 &disp_clk_regs, 821 &disp_clk_regs,
822 &disp_clk_shift, 822 &disp_clk_shift,
823 &disp_clk_mask); 823 &disp_clk_mask);
824 if (pool->base.display_clock == NULL) { 824 if (pool->base.dccg == NULL) {
825 dm_error("DC: failed to create display clock!\n"); 825 dm_error("DC: failed to create display clock!\n");
826 BREAK_TO_DEBUGGER(); 826 BREAK_TO_DEBUGGER();
827 goto res_create_fail; 827 goto res_create_fail;
@@ -851,7 +851,7 @@ static bool construct(
851 * max_clock_state 851 * max_clock_state
852 */ 852 */
853 if (dm_pp_get_static_clocks(ctx, &static_clk_info)) 853 if (dm_pp_get_static_clocks(ctx, &static_clk_info))
854 pool->base.display_clock->max_clks_state = 854 pool->base.dccg->max_clks_state =
855 static_clk_info.max_clocks_state; 855 static_clk_info.max_clocks_state;
856 { 856 {
857 struct irq_service_init_data init_data; 857 struct irq_service_init_data init_data;
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
index df027013e50c..1f7f25013217 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
@@ -143,7 +143,7 @@ static void wait_for_fbc_state_changed(
143 struct dce110_compressor *cp110, 143 struct dce110_compressor *cp110,
144 bool enabled) 144 bool enabled)
145{ 145{
146 uint16_t counter = 0; 146 uint32_t counter = 0;
147 uint32_t addr = mmFBC_STATUS; 147 uint32_t addr = mmFBC_STATUS;
148 uint32_t value; 148 uint32_t value;
149 149
@@ -158,7 +158,7 @@ static void wait_for_fbc_state_changed(
158 counter++; 158 counter++;
159 } 159 }
160 160
161 if (counter == 10) { 161 if (counter == 1000) {
162 DC_LOG_WARNING("%s: wait counter exceeded, changes to HW not applied", 162 DC_LOG_WARNING("%s: wait counter exceeded, changes to HW not applied",
163 __func__); 163 __func__);
164 } else { 164 } else {
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 353ffcbdf5ba..9cbd5036db07 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -1475,7 +1475,7 @@ static void power_down_controllers(struct dc *dc)
1475{ 1475{
1476 int i; 1476 int i;
1477 1477
1478 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1478 for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
1479 dc->res_pool->timing_generators[i]->funcs->disable_crtc( 1479 dc->res_pool->timing_generators[i]->funcs->disable_crtc(
1480 dc->res_pool->timing_generators[i]); 1480 dc->res_pool->timing_generators[i]);
1481 } 1481 }
@@ -1515,12 +1515,13 @@ static void disable_vga_and_power_gate_all_controllers(
1515 struct timing_generator *tg; 1515 struct timing_generator *tg;
1516 struct dc_context *ctx = dc->ctx; 1516 struct dc_context *ctx = dc->ctx;
1517 1517
1518 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1518 for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
1519 tg = dc->res_pool->timing_generators[i]; 1519 tg = dc->res_pool->timing_generators[i];
1520 1520
1521 if (tg->funcs->disable_vga) 1521 if (tg->funcs->disable_vga)
1522 tg->funcs->disable_vga(tg); 1522 tg->funcs->disable_vga(tg);
1523 1523 }
1524 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1524 /* Enable CLOCK gating for each pipe BEFORE controller 1525 /* Enable CLOCK gating for each pipe BEFORE controller
1525 * powergating. */ 1526 * powergating. */
1526 enable_display_pipe_clock_gating(ctx, 1527 enable_display_pipe_clock_gating(ctx,
@@ -1663,7 +1664,7 @@ static void dce110_set_displaymarks(
1663 } 1664 }
1664} 1665}
1665 1666
1666static void set_safe_displaymarks( 1667void dce110_set_safe_displaymarks(
1667 struct resource_context *res_ctx, 1668 struct resource_context *res_ctx,
1668 const struct resource_pool *pool) 1669 const struct resource_pool *pool)
1669{ 1670{
@@ -1755,23 +1756,15 @@ static void set_static_screen_control(struct pipe_ctx **pipe_ctx,
1755} 1756}
1756 1757
1757/* unit: in_khz before mode set, get pixel clock from context. ASIC register 1758/* unit: in_khz before mode set, get pixel clock from context. ASIC register
1758 * may not be programmed yet. 1759 * may not be programmed yet
1759 * TODO: after mode set, pre_mode_set = false,
1760 * may read PLL register to get pixel clock
1761 */ 1760 */
1762static uint32_t get_max_pixel_clock_for_all_paths( 1761static uint32_t get_max_pixel_clock_for_all_paths(
1763 struct dc *dc, 1762 struct dc *dc,
1764 struct dc_state *context, 1763 struct dc_state *context)
1765 bool pre_mode_set)
1766{ 1764{
1767 uint32_t max_pix_clk = 0; 1765 uint32_t max_pix_clk = 0;
1768 int i; 1766 int i;
1769 1767
1770 if (!pre_mode_set) {
1771 /* TODO: read ASIC register to get pixel clock */
1772 ASSERT(0);
1773 }
1774
1775 for (i = 0; i < MAX_PIPES; i++) { 1768 for (i = 0; i < MAX_PIPES; i++) {
1776 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 1769 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1777 1770
@@ -1787,95 +1780,10 @@ static uint32_t get_max_pixel_clock_for_all_paths(
1787 pipe_ctx->stream_res.pix_clk_params.requested_pix_clk; 1780 pipe_ctx->stream_res.pix_clk_params.requested_pix_clk;
1788 } 1781 }
1789 1782
1790 if (max_pix_clk == 0)
1791 ASSERT(0);
1792
1793 return max_pix_clk; 1783 return max_pix_clk;
1794} 1784}
1795 1785
1796/* 1786/*
1797 * Find clock state based on clock requested. if clock value is 0, simply
1798 * set clock state as requested without finding clock state by clock value
1799 */
1800
1801static void apply_min_clocks(
1802 struct dc *dc,
1803 struct dc_state *context,
1804 enum dm_pp_clocks_state *clocks_state,
1805 bool pre_mode_set)
1806{
1807 struct state_dependent_clocks req_clocks = {0};
1808
1809 if (!pre_mode_set) {
1810 /* set clock_state without verification */
1811 if (context->dis_clk->funcs->set_min_clocks_state) {
1812 context->dis_clk->funcs->set_min_clocks_state(
1813 context->dis_clk, *clocks_state);
1814 return;
1815 }
1816
1817 /* TODO: This is incorrect. Figure out how to fix. */
1818 context->dis_clk->funcs->apply_clock_voltage_request(
1819 context->dis_clk,
1820 DM_PP_CLOCK_TYPE_DISPLAY_CLK,
1821 context->dis_clk->cur_clocks_value.dispclk_in_khz,
1822 pre_mode_set,
1823 false);
1824
1825 context->dis_clk->funcs->apply_clock_voltage_request(
1826 context->dis_clk,
1827 DM_PP_CLOCK_TYPE_PIXELCLK,
1828 context->dis_clk->cur_clocks_value.max_pixelclk_in_khz,
1829 pre_mode_set,
1830 false);
1831
1832 context->dis_clk->funcs->apply_clock_voltage_request(
1833 context->dis_clk,
1834 DM_PP_CLOCK_TYPE_DISPLAYPHYCLK,
1835 context->dis_clk->cur_clocks_value.max_non_dp_phyclk_in_khz,
1836 pre_mode_set,
1837 false);
1838 return;
1839 }
1840
1841 /* get the required state based on state dependent clocks:
1842 * display clock and pixel clock
1843 */
1844 req_clocks.display_clk_khz = context->bw.dce.dispclk_khz;
1845
1846 req_clocks.pixel_clk_khz = get_max_pixel_clock_for_all_paths(
1847 dc, context, true);
1848
1849 if (context->dis_clk->funcs->get_required_clocks_state) {
1850 *clocks_state = context->dis_clk->funcs->get_required_clocks_state(
1851 context->dis_clk, &req_clocks);
1852 context->dis_clk->funcs->set_min_clocks_state(
1853 context->dis_clk, *clocks_state);
1854 } else {
1855 context->dis_clk->funcs->apply_clock_voltage_request(
1856 context->dis_clk,
1857 DM_PP_CLOCK_TYPE_DISPLAY_CLK,
1858 req_clocks.display_clk_khz,
1859 pre_mode_set,
1860 false);
1861
1862 context->dis_clk->funcs->apply_clock_voltage_request(
1863 context->dis_clk,
1864 DM_PP_CLOCK_TYPE_PIXELCLK,
1865 req_clocks.pixel_clk_khz,
1866 pre_mode_set,
1867 false);
1868
1869 context->dis_clk->funcs->apply_clock_voltage_request(
1870 context->dis_clk,
1871 DM_PP_CLOCK_TYPE_DISPLAYPHYCLK,
1872 req_clocks.pixel_clk_khz,
1873 pre_mode_set,
1874 false);
1875 }
1876}
1877
1878/*
1879 * Check if FBC can be enabled 1787 * Check if FBC can be enabled
1880 */ 1788 */
1881static bool should_enable_fbc(struct dc *dc, 1789static bool should_enable_fbc(struct dc *dc,
@@ -2093,7 +2001,6 @@ enum dc_status dce110_apply_ctx_to_hw(
2093 struct dc_bios *dcb = dc->ctx->dc_bios; 2001 struct dc_bios *dcb = dc->ctx->dc_bios;
2094 enum dc_status status; 2002 enum dc_status status;
2095 int i; 2003 int i;
2096 enum dm_pp_clocks_state clocks_state = DM_PP_CLOCKS_STATE_INVALID;
2097 2004
2098 /* Reset old context */ 2005 /* Reset old context */
2099 /* look up the targets that have been removed since last commit */ 2006 /* look up the targets that have been removed since last commit */
@@ -2127,55 +2034,9 @@ enum dc_status dce110_apply_ctx_to_hw(
2127 PIPE_GATING_CONTROL_DISABLE); 2034 PIPE_GATING_CONTROL_DISABLE);
2128 } 2035 }
2129 2036
2130 set_safe_displaymarks(&context->res_ctx, dc->res_pool);
2131
2132 if (dc->fbc_compressor) 2037 if (dc->fbc_compressor)
2133 dc->fbc_compressor->funcs->disable_fbc(dc->fbc_compressor); 2038 dc->fbc_compressor->funcs->disable_fbc(dc->fbc_compressor);
2134 2039
2135 /*TODO: when pplib works*/
2136 apply_min_clocks(dc, context, &clocks_state, true);
2137
2138#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
2139 if (dc->ctx->dce_version >= DCN_VERSION_1_0) {
2140 if (context->bw.dcn.calc_clk.fclk_khz
2141 > dc->current_state->bw.dcn.cur_clk.fclk_khz) {
2142 struct dm_pp_clock_for_voltage_req clock;
2143
2144 clock.clk_type = DM_PP_CLOCK_TYPE_FCLK;
2145 clock.clocks_in_khz = context->bw.dcn.calc_clk.fclk_khz;
2146 dm_pp_apply_clock_for_voltage_request(dc->ctx, &clock);
2147 dc->current_state->bw.dcn.cur_clk.fclk_khz = clock.clocks_in_khz;
2148 context->bw.dcn.cur_clk.fclk_khz = clock.clocks_in_khz;
2149 }
2150 if (context->bw.dcn.calc_clk.dcfclk_khz
2151 > dc->current_state->bw.dcn.cur_clk.dcfclk_khz) {
2152 struct dm_pp_clock_for_voltage_req clock;
2153
2154 clock.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
2155 clock.clocks_in_khz = context->bw.dcn.calc_clk.dcfclk_khz;
2156 dm_pp_apply_clock_for_voltage_request(dc->ctx, &clock);
2157 dc->current_state->bw.dcn.cur_clk.dcfclk_khz = clock.clocks_in_khz;
2158 context->bw.dcn.cur_clk.dcfclk_khz = clock.clocks_in_khz;
2159 }
2160 if (context->bw.dcn.calc_clk.dispclk_khz
2161 > dc->current_state->bw.dcn.cur_clk.dispclk_khz) {
2162 dc->res_pool->display_clock->funcs->set_clock(
2163 dc->res_pool->display_clock,
2164 context->bw.dcn.calc_clk.dispclk_khz);
2165 dc->current_state->bw.dcn.cur_clk.dispclk_khz =
2166 context->bw.dcn.calc_clk.dispclk_khz;
2167 context->bw.dcn.cur_clk.dispclk_khz =
2168 context->bw.dcn.calc_clk.dispclk_khz;
2169 }
2170 } else
2171#endif
2172 if (context->bw.dce.dispclk_khz
2173 > dc->current_state->bw.dce.dispclk_khz) {
2174 dc->res_pool->display_clock->funcs->set_clock(
2175 dc->res_pool->display_clock,
2176 context->bw.dce.dispclk_khz * 115 / 100);
2177 }
2178
2179 dce110_setup_audio_dto(dc, context); 2040 dce110_setup_audio_dto(dc, context);
2180 2041
2181 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2042 for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -2204,9 +2065,6 @@ enum dc_status dce110_apply_ctx_to_hw(
2204 return status; 2065 return status;
2205 } 2066 }
2206 2067
2207 /* to save power */
2208 apply_min_clocks(dc, context, &clocks_state, false);
2209
2210 dcb->funcs->set_scratch_critical_state(dcb, false); 2068 dcb->funcs->set_scratch_critical_state(dcb, false);
2211 2069
2212 if (dc->fbc_compressor) 2070 if (dc->fbc_compressor)
@@ -2694,15 +2552,20 @@ static void dce110_set_bandwidth(
2694 struct dc_state *context, 2552 struct dc_state *context,
2695 bool decrease_allowed) 2553 bool decrease_allowed)
2696{ 2554{
2697 dce110_set_displaymarks(dc, context); 2555 struct dc_clocks req_clks;
2698 2556
2699 if (decrease_allowed || context->bw.dce.dispclk_khz > dc->current_state->bw.dce.dispclk_khz) { 2557 req_clks.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100;
2700 dc->res_pool->display_clock->funcs->set_clock( 2558 req_clks.phyclk_khz = get_max_pixel_clock_for_all_paths(dc, context);
2701 dc->res_pool->display_clock, 2559
2702 context->bw.dce.dispclk_khz * 115 / 100); 2560 if (decrease_allowed)
2703 dc->current_state->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz; 2561 dce110_set_displaymarks(dc, context);
2704 } 2562 else
2563 dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool);
2705 2564
2565 dc->res_pool->dccg->funcs->update_clocks(
2566 dc->res_pool->dccg,
2567 &req_clks,
2568 decrease_allowed);
2706 pplib_apply_display_requirements(dc, context); 2569 pplib_apply_display_requirements(dc, context);
2707} 2570}
2708 2571
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
index f48d5a68d238..d6db3dbd9015 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
@@ -60,6 +60,10 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context);
60 60
61void dce110_power_down(struct dc *dc); 61void dce110_power_down(struct dc *dc);
62 62
63void dce110_set_safe_displaymarks(
64 struct resource_context *res_ctx,
65 const struct resource_pool *pool);
66
63void dce110_fill_display_configs( 67void dce110_fill_display_configs(
64 const struct dc_state *context, 68 const struct dc_state *context,
65 struct dm_pp_display_configuration *pp_display_cfg); 69 struct dm_pp_display_configuration *pp_display_cfg);
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
index 20c029089551..3edaa006bd57 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -146,15 +146,15 @@ static const struct dce110_timing_generator_offsets dce110_tg_offsets[] = {
146#define SRI(reg_name, block, id)\ 146#define SRI(reg_name, block, id)\
147 .reg_name = mm ## block ## id ## _ ## reg_name 147 .reg_name = mm ## block ## id ## _ ## reg_name
148 148
149static const struct dce_disp_clk_registers disp_clk_regs = { 149static const struct dccg_registers disp_clk_regs = {
150 CLK_COMMON_REG_LIST_DCE_BASE() 150 CLK_COMMON_REG_LIST_DCE_BASE()
151}; 151};
152 152
153static const struct dce_disp_clk_shift disp_clk_shift = { 153static const struct dccg_shift disp_clk_shift = {
154 CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) 154 CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
155}; 155};
156 156
157static const struct dce_disp_clk_mask disp_clk_mask = { 157static const struct dccg_mask disp_clk_mask = {
158 CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) 158 CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
159}; 159};
160 160
@@ -679,8 +679,8 @@ static void destruct(struct dce110_resource_pool *pool)
679 if (pool->base.dmcu != NULL) 679 if (pool->base.dmcu != NULL)
680 dce_dmcu_destroy(&pool->base.dmcu); 680 dce_dmcu_destroy(&pool->base.dmcu);
681 681
682 if (pool->base.display_clock != NULL) 682 if (pool->base.dccg != NULL)
683 dce_disp_clk_destroy(&pool->base.display_clock); 683 dce_dccg_destroy(&pool->base.dccg);
684 684
685 if (pool->base.irqs != NULL) { 685 if (pool->base.irqs != NULL) {
686 dal_irq_service_destroy(&pool->base.irqs); 686 dal_irq_service_destroy(&pool->base.irqs);
@@ -1179,11 +1179,11 @@ static bool construct(
1179 } 1179 }
1180 } 1180 }
1181 1181
1182 pool->base.display_clock = dce110_disp_clk_create(ctx, 1182 pool->base.dccg = dce110_dccg_create(ctx,
1183 &disp_clk_regs, 1183 &disp_clk_regs,
1184 &disp_clk_shift, 1184 &disp_clk_shift,
1185 &disp_clk_mask); 1185 &disp_clk_mask);
1186 if (pool->base.display_clock == NULL) { 1186 if (pool->base.dccg == NULL) {
1187 dm_error("DC: failed to create display clock!\n"); 1187 dm_error("DC: failed to create display clock!\n");
1188 BREAK_TO_DEBUGGER(); 1188 BREAK_TO_DEBUGGER();
1189 goto res_create_fail; 1189 goto res_create_fail;
@@ -1213,7 +1213,7 @@ static bool construct(
1213 * max_clock_state 1213 * max_clock_state
1214 */ 1214 */
1215 if (dm_pp_get_static_clocks(ctx, &static_clk_info)) 1215 if (dm_pp_get_static_clocks(ctx, &static_clk_info))
1216 pool->base.display_clock->max_clks_state = 1216 pool->base.dccg->max_clks_state =
1217 static_clk_info.max_clocks_state; 1217 static_clk_info.max_clocks_state;
1218 1218
1219 { 1219 {
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
index 00c0a1ef15eb..9e1afb11e6ad 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
@@ -146,15 +146,15 @@ static const struct dce110_timing_generator_offsets dce112_tg_offsets[] = {
146 .reg_name = mm ## block ## id ## _ ## reg_name 146 .reg_name = mm ## block ## id ## _ ## reg_name
147 147
148 148
149static const struct dce_disp_clk_registers disp_clk_regs = { 149static const struct dccg_registers disp_clk_regs = {
150 CLK_COMMON_REG_LIST_DCE_BASE() 150 CLK_COMMON_REG_LIST_DCE_BASE()
151}; 151};
152 152
153static const struct dce_disp_clk_shift disp_clk_shift = { 153static const struct dccg_shift disp_clk_shift = {
154 CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) 154 CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
155}; 155};
156 156
157static const struct dce_disp_clk_mask disp_clk_mask = { 157static const struct dccg_mask disp_clk_mask = {
158 CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) 158 CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
159}; 159};
160 160
@@ -668,8 +668,8 @@ static void destruct(struct dce110_resource_pool *pool)
668 if (pool->base.dmcu != NULL) 668 if (pool->base.dmcu != NULL)
669 dce_dmcu_destroy(&pool->base.dmcu); 669 dce_dmcu_destroy(&pool->base.dmcu);
670 670
671 if (pool->base.display_clock != NULL) 671 if (pool->base.dccg != NULL)
672 dce_disp_clk_destroy(&pool->base.display_clock); 672 dce_dccg_destroy(&pool->base.dccg);
673 673
674 if (pool->base.irqs != NULL) { 674 if (pool->base.irqs != NULL) {
675 dal_irq_service_destroy(&pool->base.irqs); 675 dal_irq_service_destroy(&pool->base.irqs);
@@ -1000,7 +1000,7 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
1000 eng_clks.data[0].clocks_in_khz; 1000 eng_clks.data[0].clocks_in_khz;
1001 clk_ranges.wm_clk_ranges[0].wm_max_eng_clk_in_khz = 1001 clk_ranges.wm_clk_ranges[0].wm_max_eng_clk_in_khz =
1002 eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz - 1; 1002 eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz - 1;
1003 clk_ranges.wm_clk_ranges[0].wm_min_memg_clk_in_khz = 1003 clk_ranges.wm_clk_ranges[0].wm_min_mem_clk_in_khz =
1004 mem_clks.data[0].clocks_in_khz; 1004 mem_clks.data[0].clocks_in_khz;
1005 clk_ranges.wm_clk_ranges[0].wm_max_mem_clk_in_khz = 1005 clk_ranges.wm_clk_ranges[0].wm_max_mem_clk_in_khz =
1006 mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz - 1; 1006 mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz - 1;
@@ -1010,7 +1010,7 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
1010 eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz; 1010 eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz;
1011 /* 5 GHz instead of data[7].clockInKHz to cover Overdrive */ 1011 /* 5 GHz instead of data[7].clockInKHz to cover Overdrive */
1012 clk_ranges.wm_clk_ranges[1].wm_max_eng_clk_in_khz = 5000000; 1012 clk_ranges.wm_clk_ranges[1].wm_max_eng_clk_in_khz = 5000000;
1013 clk_ranges.wm_clk_ranges[1].wm_min_memg_clk_in_khz = 1013 clk_ranges.wm_clk_ranges[1].wm_min_mem_clk_in_khz =
1014 mem_clks.data[0].clocks_in_khz; 1014 mem_clks.data[0].clocks_in_khz;
1015 clk_ranges.wm_clk_ranges[1].wm_max_mem_clk_in_khz = 1015 clk_ranges.wm_clk_ranges[1].wm_max_mem_clk_in_khz =
1016 mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz - 1; 1016 mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz - 1;
@@ -1020,7 +1020,7 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
1020 eng_clks.data[0].clocks_in_khz; 1020 eng_clks.data[0].clocks_in_khz;
1021 clk_ranges.wm_clk_ranges[2].wm_max_eng_clk_in_khz = 1021 clk_ranges.wm_clk_ranges[2].wm_max_eng_clk_in_khz =
1022 eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz - 1; 1022 eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz - 1;
1023 clk_ranges.wm_clk_ranges[2].wm_min_memg_clk_in_khz = 1023 clk_ranges.wm_clk_ranges[2].wm_min_mem_clk_in_khz =
1024 mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz; 1024 mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz;
1025 /* 5 GHz instead of data[2].clockInKHz to cover Overdrive */ 1025 /* 5 GHz instead of data[2].clockInKHz to cover Overdrive */
1026 clk_ranges.wm_clk_ranges[2].wm_max_mem_clk_in_khz = 5000000; 1026 clk_ranges.wm_clk_ranges[2].wm_max_mem_clk_in_khz = 5000000;
@@ -1030,7 +1030,7 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
1030 eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz; 1030 eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz;
1031 /* 5 GHz instead of data[7].clockInKHz to cover Overdrive */ 1031 /* 5 GHz instead of data[7].clockInKHz to cover Overdrive */
1032 clk_ranges.wm_clk_ranges[3].wm_max_eng_clk_in_khz = 5000000; 1032 clk_ranges.wm_clk_ranges[3].wm_max_eng_clk_in_khz = 5000000;
1033 clk_ranges.wm_clk_ranges[3].wm_min_memg_clk_in_khz = 1033 clk_ranges.wm_clk_ranges[3].wm_min_mem_clk_in_khz =
1034 mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz; 1034 mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz;
1035 /* 5 GHz instead of data[2].clockInKHz to cover Overdrive */ 1035 /* 5 GHz instead of data[2].clockInKHz to cover Overdrive */
1036 clk_ranges.wm_clk_ranges[3].wm_max_mem_clk_in_khz = 5000000; 1036 clk_ranges.wm_clk_ranges[3].wm_max_mem_clk_in_khz = 5000000;
@@ -1124,11 +1124,11 @@ static bool construct(
1124 } 1124 }
1125 } 1125 }
1126 1126
1127 pool->base.display_clock = dce112_disp_clk_create(ctx, 1127 pool->base.dccg = dce112_dccg_create(ctx,
1128 &disp_clk_regs, 1128 &disp_clk_regs,
1129 &disp_clk_shift, 1129 &disp_clk_shift,
1130 &disp_clk_mask); 1130 &disp_clk_mask);
1131 if (pool->base.display_clock == NULL) { 1131 if (pool->base.dccg == NULL) {
1132 dm_error("DC: failed to create display clock!\n"); 1132 dm_error("DC: failed to create display clock!\n");
1133 BREAK_TO_DEBUGGER(); 1133 BREAK_TO_DEBUGGER();
1134 goto res_create_fail; 1134 goto res_create_fail;
@@ -1158,7 +1158,7 @@ static bool construct(
1158 * max_clock_state 1158 * max_clock_state
1159 */ 1159 */
1160 if (dm_pp_get_static_clocks(ctx, &static_clk_info)) 1160 if (dm_pp_get_static_clocks(ctx, &static_clk_info))
1161 pool->base.display_clock->max_clks_state = 1161 pool->base.dccg->max_clks_state =
1162 static_clk_info.max_clocks_state; 1162 static_clk_info.max_clocks_state;
1163 1163
1164 { 1164 {
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
index 2d58daccc005..8381f27a2361 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
@@ -494,8 +494,8 @@ static void destruct(struct dce110_resource_pool *pool)
494 if (pool->base.dmcu != NULL) 494 if (pool->base.dmcu != NULL)
495 dce_dmcu_destroy(&pool->base.dmcu); 495 dce_dmcu_destroy(&pool->base.dmcu);
496 496
497 if (pool->base.display_clock != NULL) 497 if (pool->base.dccg != NULL)
498 dce_disp_clk_destroy(&pool->base.display_clock); 498 dce_dccg_destroy(&pool->base.dccg);
499} 499}
500 500
501static void read_dce_straps( 501static void read_dce_straps(
@@ -775,7 +775,7 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
775 eng_clks.data[0].clocks_in_khz; 775 eng_clks.data[0].clocks_in_khz;
776 clk_ranges.wm_clk_ranges[0].wm_max_eng_clk_in_khz = 776 clk_ranges.wm_clk_ranges[0].wm_max_eng_clk_in_khz =
777 eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz - 1; 777 eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz - 1;
778 clk_ranges.wm_clk_ranges[0].wm_min_memg_clk_in_khz = 778 clk_ranges.wm_clk_ranges[0].wm_min_mem_clk_in_khz =
779 mem_clks.data[0].clocks_in_khz; 779 mem_clks.data[0].clocks_in_khz;
780 clk_ranges.wm_clk_ranges[0].wm_max_mem_clk_in_khz = 780 clk_ranges.wm_clk_ranges[0].wm_max_mem_clk_in_khz =
781 mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz - 1; 781 mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz - 1;
@@ -785,7 +785,7 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
785 eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz; 785 eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz;
786 /* 5 GHz instead of data[7].clockInKHz to cover Overdrive */ 786 /* 5 GHz instead of data[7].clockInKHz to cover Overdrive */
787 clk_ranges.wm_clk_ranges[1].wm_max_eng_clk_in_khz = 5000000; 787 clk_ranges.wm_clk_ranges[1].wm_max_eng_clk_in_khz = 5000000;
788 clk_ranges.wm_clk_ranges[1].wm_min_memg_clk_in_khz = 788 clk_ranges.wm_clk_ranges[1].wm_min_mem_clk_in_khz =
789 mem_clks.data[0].clocks_in_khz; 789 mem_clks.data[0].clocks_in_khz;
790 clk_ranges.wm_clk_ranges[1].wm_max_mem_clk_in_khz = 790 clk_ranges.wm_clk_ranges[1].wm_max_mem_clk_in_khz =
791 mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz - 1; 791 mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz - 1;
@@ -795,7 +795,7 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
795 eng_clks.data[0].clocks_in_khz; 795 eng_clks.data[0].clocks_in_khz;
796 clk_ranges.wm_clk_ranges[2].wm_max_eng_clk_in_khz = 796 clk_ranges.wm_clk_ranges[2].wm_max_eng_clk_in_khz =
797 eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz - 1; 797 eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz - 1;
798 clk_ranges.wm_clk_ranges[2].wm_min_memg_clk_in_khz = 798 clk_ranges.wm_clk_ranges[2].wm_min_mem_clk_in_khz =
799 mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz; 799 mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz;
800 /* 5 GHz instead of data[2].clockInKHz to cover Overdrive */ 800 /* 5 GHz instead of data[2].clockInKHz to cover Overdrive */
801 clk_ranges.wm_clk_ranges[2].wm_max_mem_clk_in_khz = 5000000; 801 clk_ranges.wm_clk_ranges[2].wm_max_mem_clk_in_khz = 5000000;
@@ -805,7 +805,7 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
805 eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz; 805 eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz;
806 /* 5 GHz instead of data[7].clockInKHz to cover Overdrive */ 806 /* 5 GHz instead of data[7].clockInKHz to cover Overdrive */
807 clk_ranges.wm_clk_ranges[3].wm_max_eng_clk_in_khz = 5000000; 807 clk_ranges.wm_clk_ranges[3].wm_max_eng_clk_in_khz = 5000000;
808 clk_ranges.wm_clk_ranges[3].wm_min_memg_clk_in_khz = 808 clk_ranges.wm_clk_ranges[3].wm_min_mem_clk_in_khz =
809 mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz; 809 mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz;
810 /* 5 GHz instead of data[2].clockInKHz to cover Overdrive */ 810 /* 5 GHz instead of data[2].clockInKHz to cover Overdrive */
811 clk_ranges.wm_clk_ranges[3].wm_max_mem_clk_in_khz = 5000000; 811 clk_ranges.wm_clk_ranges[3].wm_max_mem_clk_in_khz = 5000000;
@@ -894,11 +894,11 @@ static bool construct(
894 } 894 }
895 } 895 }
896 896
897 pool->base.display_clock = dce120_disp_clk_create(ctx); 897 pool->base.dccg = dce120_dccg_create(ctx);
898 if (pool->base.display_clock == NULL) { 898 if (pool->base.dccg == NULL) {
899 dm_error("DC: failed to create display clock!\n"); 899 dm_error("DC: failed to create display clock!\n");
900 BREAK_TO_DEBUGGER(); 900 BREAK_TO_DEBUGGER();
901 goto disp_clk_create_fail; 901 goto dccg_create_fail;
902 } 902 }
903 903
904 pool->base.dmcu = dce_dmcu_create(ctx, 904 pool->base.dmcu = dce_dmcu_create(ctx,
@@ -1011,7 +1011,7 @@ static bool construct(
1011 1011
1012irqs_create_fail: 1012irqs_create_fail:
1013controller_create_fail: 1013controller_create_fail:
1014disp_clk_create_fail: 1014dccg_create_fail:
1015clk_src_create_fail: 1015clk_src_create_fail:
1016res_create_fail: 1016res_create_fail:
1017 1017
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
index 48a068964722..2ac95ec2bf96 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
@@ -153,15 +153,15 @@ static const struct dce110_timing_generator_offsets dce80_tg_offsets[] = {
153 .reg_name = mm ## block ## id ## _ ## reg_name 153 .reg_name = mm ## block ## id ## _ ## reg_name
154 154
155 155
156static const struct dce_disp_clk_registers disp_clk_regs = { 156static const struct dccg_registers disp_clk_regs = {
157 CLK_COMMON_REG_LIST_DCE_BASE() 157 CLK_COMMON_REG_LIST_DCE_BASE()
158}; 158};
159 159
160static const struct dce_disp_clk_shift disp_clk_shift = { 160static const struct dccg_shift disp_clk_shift = {
161 CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) 161 CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
162}; 162};
163 163
164static const struct dce_disp_clk_mask disp_clk_mask = { 164static const struct dccg_mask disp_clk_mask = {
165 CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) 165 CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
166}; 166};
167 167
@@ -683,8 +683,8 @@ static void destruct(struct dce110_resource_pool *pool)
683 } 683 }
684 } 684 }
685 685
686 if (pool->base.display_clock != NULL) 686 if (pool->base.dccg != NULL)
687 dce_disp_clk_destroy(&pool->base.display_clock); 687 dce_dccg_destroy(&pool->base.dccg);
688 688
689 if (pool->base.irqs != NULL) { 689 if (pool->base.irqs != NULL) {
690 dal_irq_service_destroy(&pool->base.irqs); 690 dal_irq_service_destroy(&pool->base.irqs);
@@ -822,11 +822,11 @@ static bool dce80_construct(
822 } 822 }
823 } 823 }
824 824
825 pool->base.display_clock = dce_disp_clk_create(ctx, 825 pool->base.dccg = dce_dccg_create(ctx,
826 &disp_clk_regs, 826 &disp_clk_regs,
827 &disp_clk_shift, 827 &disp_clk_shift,
828 &disp_clk_mask); 828 &disp_clk_mask);
829 if (pool->base.display_clock == NULL) { 829 if (pool->base.dccg == NULL) {
830 dm_error("DC: failed to create display clock!\n"); 830 dm_error("DC: failed to create display clock!\n");
831 BREAK_TO_DEBUGGER(); 831 BREAK_TO_DEBUGGER();
832 goto res_create_fail; 832 goto res_create_fail;
@@ -852,7 +852,7 @@ static bool dce80_construct(
852 goto res_create_fail; 852 goto res_create_fail;
853 } 853 }
854 if (dm_pp_get_static_clocks(ctx, &static_clk_info)) 854 if (dm_pp_get_static_clocks(ctx, &static_clk_info))
855 pool->base.display_clock->max_clks_state = 855 pool->base.dccg->max_clks_state =
856 static_clk_info.max_clocks_state; 856 static_clk_info.max_clocks_state;
857 857
858 { 858 {
@@ -1006,11 +1006,11 @@ static bool dce81_construct(
1006 } 1006 }
1007 } 1007 }
1008 1008
1009 pool->base.display_clock = dce_disp_clk_create(ctx, 1009 pool->base.dccg = dce_dccg_create(ctx,
1010 &disp_clk_regs, 1010 &disp_clk_regs,
1011 &disp_clk_shift, 1011 &disp_clk_shift,
1012 &disp_clk_mask); 1012 &disp_clk_mask);
1013 if (pool->base.display_clock == NULL) { 1013 if (pool->base.dccg == NULL) {
1014 dm_error("DC: failed to create display clock!\n"); 1014 dm_error("DC: failed to create display clock!\n");
1015 BREAK_TO_DEBUGGER(); 1015 BREAK_TO_DEBUGGER();
1016 goto res_create_fail; 1016 goto res_create_fail;
@@ -1037,7 +1037,7 @@ static bool dce81_construct(
1037 } 1037 }
1038 1038
1039 if (dm_pp_get_static_clocks(ctx, &static_clk_info)) 1039 if (dm_pp_get_static_clocks(ctx, &static_clk_info))
1040 pool->base.display_clock->max_clks_state = 1040 pool->base.dccg->max_clks_state =
1041 static_clk_info.max_clocks_state; 1041 static_clk_info.max_clocks_state;
1042 1042
1043 { 1043 {
@@ -1187,11 +1187,11 @@ static bool dce83_construct(
1187 } 1187 }
1188 } 1188 }
1189 1189
1190 pool->base.display_clock = dce_disp_clk_create(ctx, 1190 pool->base.dccg = dce_dccg_create(ctx,
1191 &disp_clk_regs, 1191 &disp_clk_regs,
1192 &disp_clk_shift, 1192 &disp_clk_shift,
1193 &disp_clk_mask); 1193 &disp_clk_mask);
1194 if (pool->base.display_clock == NULL) { 1194 if (pool->base.dccg == NULL) {
1195 dm_error("DC: failed to create display clock!\n"); 1195 dm_error("DC: failed to create display clock!\n");
1196 BREAK_TO_DEBUGGER(); 1196 BREAK_TO_DEBUGGER();
1197 goto res_create_fail; 1197 goto res_create_fail;
@@ -1218,7 +1218,7 @@ static bool dce83_construct(
1218 } 1218 }
1219 1219
1220 if (dm_pp_get_static_clocks(ctx, &static_clk_info)) 1220 if (dm_pp_get_static_clocks(ctx, &static_clk_info))
1221 pool->base.display_clock->max_clks_state = 1221 pool->base.dccg->max_clks_state =
1222 static_clk_info.max_clocks_state; 1222 static_clk_info.max_clocks_state;
1223 1223
1224 { 1224 {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
index c69fa4bfab0a..742fd497ed00 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -145,10 +145,10 @@ static bool dpp_get_optimal_number_of_taps(
145 pixel_width = scl_data->viewport.width; 145 pixel_width = scl_data->viewport.width;
146 146
147 /* Some ASICs does not support FP16 scaling, so we reject modes require this*/ 147 /* Some ASICs does not support FP16 scaling, so we reject modes require this*/
148 if (scl_data->viewport.width != scl_data->h_active && 148 if (scl_data->format == PIXEL_FORMAT_FP16 &&
149 scl_data->viewport.height != scl_data->v_active &&
150 dpp->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT && 149 dpp->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT &&
151 scl_data->format == PIXEL_FORMAT_FP16) 150 scl_data->ratios.horz.value != dc_fixpt_one.value &&
151 scl_data->ratios.vert.value != dc_fixpt_one.value)
152 return false; 152 return false;
153 153
154 if (scl_data->viewport.width > scl_data->h_active && 154 if (scl_data->viewport.width > scl_data->h_active &&
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
index 943143efbb82..1ea91e153d3a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
@@ -190,10 +190,17 @@ static uint32_t convert_and_clamp(
190} 190}
191 191
192 192
193void hubbub1_wm_change_req_wa(struct hubbub *hubbub)
194{
195 REG_UPDATE_SEQ(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
196 DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 0, 1);
197}
198
193void hubbub1_program_watermarks( 199void hubbub1_program_watermarks(
194 struct hubbub *hubbub, 200 struct hubbub *hubbub,
195 struct dcn_watermark_set *watermarks, 201 struct dcn_watermark_set *watermarks,
196 unsigned int refclk_mhz) 202 unsigned int refclk_mhz,
203 bool safe_to_lower)
197{ 204{
198 uint32_t force_en = hubbub->ctx->dc->debug.disable_stutter ? 1 : 0; 205 uint32_t force_en = hubbub->ctx->dc->debug.disable_stutter ? 1 : 0;
199 /* 206 /*
@@ -202,191 +209,259 @@ void hubbub1_program_watermarks(
202 */ 209 */
203 uint32_t prog_wm_value; 210 uint32_t prog_wm_value;
204 211
205 REG_UPDATE(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
206 DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 0);
207 212
208 /* Repeat for water mark set A, B, C and D. */ 213 /* Repeat for water mark set A, B, C and D. */
209 /* clock state A */ 214 /* clock state A */
210 prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns, 215 if (safe_to_lower || watermarks->a.urgent_ns > hubbub->watermarks.a.urgent_ns) {
211 refclk_mhz, 0x1fffff); 216 hubbub->watermarks.a.urgent_ns = watermarks->a.urgent_ns;
212 REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value); 217 prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns,
213 218 refclk_mhz, 0x1fffff);
214 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n" 219 REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
215 "HW register value = 0x%x\n",
216 watermarks->a.urgent_ns, prog_wm_value);
217 220
218 prog_wm_value = convert_and_clamp(watermarks->a.pte_meta_urgent_ns, 221 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n"
219 refclk_mhz, 0x1fffff); 222 "HW register value = 0x%x\n",
220 REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A, prog_wm_value); 223 watermarks->a.urgent_ns, prog_wm_value);
221 DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_A calculated =%d\n" 224 }
222 "HW register value = 0x%x\n",
223 watermarks->a.pte_meta_urgent_ns, prog_wm_value);
224 225
225 if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A)) { 226 if (safe_to_lower || watermarks->a.pte_meta_urgent_ns > hubbub->watermarks.a.pte_meta_urgent_ns) {
226 prog_wm_value = convert_and_clamp( 227 hubbub->watermarks.a.pte_meta_urgent_ns = watermarks->a.pte_meta_urgent_ns;
227 watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, 228 prog_wm_value = convert_and_clamp(watermarks->a.pte_meta_urgent_ns,
228 refclk_mhz, 0x1fffff); 229 refclk_mhz, 0x1fffff);
229 REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value); 230 REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A, prog_wm_value);
230 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n" 231 DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_A calculated =%d\n"
231 "HW register value = 0x%x\n", 232 "HW register value = 0x%x\n",
232 watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); 233 watermarks->a.pte_meta_urgent_ns, prog_wm_value);
234 }
235
236 if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A)) {
237 if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
238 > hubbub->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns) {
239 hubbub->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
240 watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns;
241 prog_wm_value = convert_and_clamp(
242 watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns,
243 refclk_mhz, 0x1fffff);
244 REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value);
245 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
246 "HW register value = 0x%x\n",
247 watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
248 }
233 249
250 if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_ns
251 > hubbub->watermarks.a.cstate_pstate.cstate_exit_ns) {
252 hubbub->watermarks.a.cstate_pstate.cstate_exit_ns =
253 watermarks->a.cstate_pstate.cstate_exit_ns;
254 prog_wm_value = convert_and_clamp(
255 watermarks->a.cstate_pstate.cstate_exit_ns,
256 refclk_mhz, 0x1fffff);
257 REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
258 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
259 "HW register value = 0x%x\n",
260 watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value);
261 }
262 }
234 263
264 if (safe_to_lower || watermarks->a.cstate_pstate.pstate_change_ns
265 > hubbub->watermarks.a.cstate_pstate.pstate_change_ns) {
266 hubbub->watermarks.a.cstate_pstate.pstate_change_ns =
267 watermarks->a.cstate_pstate.pstate_change_ns;
235 prog_wm_value = convert_and_clamp( 268 prog_wm_value = convert_and_clamp(
236 watermarks->a.cstate_pstate.cstate_exit_ns, 269 watermarks->a.cstate_pstate.pstate_change_ns,
237 refclk_mhz, 0x1fffff); 270 refclk_mhz, 0x1fffff);
238 REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value); 271 REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value);
239 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n" 272 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
240 "HW register value = 0x%x\n", 273 "HW register value = 0x%x\n\n",
241 watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value); 274 watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value);
242 } 275 }
243 276
244 prog_wm_value = convert_and_clamp(
245 watermarks->a.cstate_pstate.pstate_change_ns,
246 refclk_mhz, 0x1fffff);
247 REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value);
248 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
249 "HW register value = 0x%x\n\n",
250 watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value);
251
252
253 /* clock state B */ 277 /* clock state B */
254 prog_wm_value = convert_and_clamp( 278 if (safe_to_lower || watermarks->b.urgent_ns > hubbub->watermarks.b.urgent_ns) {
255 watermarks->b.urgent_ns, refclk_mhz, 0x1fffff); 279 hubbub->watermarks.b.urgent_ns = watermarks->b.urgent_ns;
256 REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value); 280 prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns,
257 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n" 281 refclk_mhz, 0x1fffff);
258 "HW register value = 0x%x\n", 282 REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value);
259 watermarks->b.urgent_ns, prog_wm_value);
260
261
262 prog_wm_value = convert_and_clamp(
263 watermarks->b.pte_meta_urgent_ns,
264 refclk_mhz, 0x1fffff);
265 REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B, prog_wm_value);
266 DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_B calculated =%d\n"
267 "HW register value = 0x%x\n",
268 watermarks->b.pte_meta_urgent_ns, prog_wm_value);
269 283
284 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n"
285 "HW register value = 0x%x\n",
286 watermarks->b.urgent_ns, prog_wm_value);
287 }
270 288
271 if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B)) { 289 if (safe_to_lower || watermarks->b.pte_meta_urgent_ns > hubbub->watermarks.b.pte_meta_urgent_ns) {
272 prog_wm_value = convert_and_clamp( 290 hubbub->watermarks.b.pte_meta_urgent_ns = watermarks->b.pte_meta_urgent_ns;
273 watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, 291 prog_wm_value = convert_and_clamp(watermarks->b.pte_meta_urgent_ns,
274 refclk_mhz, 0x1fffff); 292 refclk_mhz, 0x1fffff);
275 REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value); 293 REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B, prog_wm_value);
276 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_B calculated =%d\n" 294 DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_B calculated =%d\n"
277 "HW register value = 0x%x\n", 295 "HW register value = 0x%x\n",
278 watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); 296 watermarks->b.pte_meta_urgent_ns, prog_wm_value);
297 }
298
299 if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B)) {
300 if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
301 > hubbub->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns) {
302 hubbub->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns =
303 watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns;
304 prog_wm_value = convert_and_clamp(
305 watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns,
306 refclk_mhz, 0x1fffff);
307 REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value);
308 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n"
309 "HW register value = 0x%x\n",
310 watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
311 }
279 312
313 if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_ns
314 > hubbub->watermarks.b.cstate_pstate.cstate_exit_ns) {
315 hubbub->watermarks.b.cstate_pstate.cstate_exit_ns =
316 watermarks->b.cstate_pstate.cstate_exit_ns;
317 prog_wm_value = convert_and_clamp(
318 watermarks->b.cstate_pstate.cstate_exit_ns,
319 refclk_mhz, 0x1fffff);
320 REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value);
321 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
322 "HW register value = 0x%x\n",
323 watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value);
324 }
325 }
280 326
327 if (safe_to_lower || watermarks->b.cstate_pstate.pstate_change_ns
328 > hubbub->watermarks.b.cstate_pstate.pstate_change_ns) {
329 hubbub->watermarks.b.cstate_pstate.pstate_change_ns =
330 watermarks->b.cstate_pstate.pstate_change_ns;
281 prog_wm_value = convert_and_clamp( 331 prog_wm_value = convert_and_clamp(
282 watermarks->b.cstate_pstate.cstate_exit_ns, 332 watermarks->b.cstate_pstate.pstate_change_ns,
283 refclk_mhz, 0x1fffff); 333 refclk_mhz, 0x1fffff);
284 REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value); 334 REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value);
285 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n" 335 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n"
286 "HW register value = 0x%x\n", 336 "HW register value = 0x%x\n\n",
287 watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value); 337 watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value);
288 } 338 }
289 339
290 prog_wm_value = convert_and_clamp(
291 watermarks->b.cstate_pstate.pstate_change_ns,
292 refclk_mhz, 0x1fffff);
293 REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value);
294 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n\n"
295 "HW register value = 0x%x\n",
296 watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value);
297
298 /* clock state C */ 340 /* clock state C */
299 prog_wm_value = convert_and_clamp( 341 if (safe_to_lower || watermarks->c.urgent_ns > hubbub->watermarks.c.urgent_ns) {
300 watermarks->c.urgent_ns, refclk_mhz, 0x1fffff); 342 hubbub->watermarks.c.urgent_ns = watermarks->c.urgent_ns;
301 REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value); 343 prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns,
302 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n" 344 refclk_mhz, 0x1fffff);
303 "HW register value = 0x%x\n", 345 REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value);
304 watermarks->c.urgent_ns, prog_wm_value);
305
306
307 prog_wm_value = convert_and_clamp(
308 watermarks->c.pte_meta_urgent_ns,
309 refclk_mhz, 0x1fffff);
310 REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C, prog_wm_value);
311 DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_C calculated =%d\n"
312 "HW register value = 0x%x\n",
313 watermarks->c.pte_meta_urgent_ns, prog_wm_value);
314 346
347 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n"
348 "HW register value = 0x%x\n",
349 watermarks->c.urgent_ns, prog_wm_value);
350 }
315 351
316 if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C)) { 352 if (safe_to_lower || watermarks->c.pte_meta_urgent_ns > hubbub->watermarks.c.pte_meta_urgent_ns) {
317 prog_wm_value = convert_and_clamp( 353 hubbub->watermarks.c.pte_meta_urgent_ns = watermarks->c.pte_meta_urgent_ns;
318 watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, 354 prog_wm_value = convert_and_clamp(watermarks->c.pte_meta_urgent_ns,
319 refclk_mhz, 0x1fffff); 355 refclk_mhz, 0x1fffff);
320 REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value); 356 REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C, prog_wm_value);
321 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_C calculated =%d\n" 357 DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_C calculated =%d\n"
322 "HW register value = 0x%x\n", 358 "HW register value = 0x%x\n",
323 watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); 359 watermarks->c.pte_meta_urgent_ns, prog_wm_value);
360 }
324 361
362 if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C)) {
363 if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
364 > hubbub->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) {
365 hubbub->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns =
366 watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns;
367 prog_wm_value = convert_and_clamp(
368 watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns,
369 refclk_mhz, 0x1fffff);
370 REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value);
371 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n"
372 "HW register value = 0x%x\n",
373 watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
374 }
375
376 if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_ns
377 > hubbub->watermarks.c.cstate_pstate.cstate_exit_ns) {
378 hubbub->watermarks.c.cstate_pstate.cstate_exit_ns =
379 watermarks->c.cstate_pstate.cstate_exit_ns;
380 prog_wm_value = convert_and_clamp(
381 watermarks->c.cstate_pstate.cstate_exit_ns,
382 refclk_mhz, 0x1fffff);
383 REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value);
384 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
385 "HW register value = 0x%x\n",
386 watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value);
387 }
388 }
325 389
390 if (safe_to_lower || watermarks->c.cstate_pstate.pstate_change_ns
391 > hubbub->watermarks.c.cstate_pstate.pstate_change_ns) {
392 hubbub->watermarks.c.cstate_pstate.pstate_change_ns =
393 watermarks->c.cstate_pstate.pstate_change_ns;
326 prog_wm_value = convert_and_clamp( 394 prog_wm_value = convert_and_clamp(
327 watermarks->c.cstate_pstate.cstate_exit_ns, 395 watermarks->c.cstate_pstate.pstate_change_ns,
328 refclk_mhz, 0x1fffff); 396 refclk_mhz, 0x1fffff);
329 REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value); 397 REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value);
330 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n" 398 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n"
331 "HW register value = 0x%x\n", 399 "HW register value = 0x%x\n\n",
332 watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value); 400 watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value);
333 } 401 }
334 402
335 prog_wm_value = convert_and_clamp(
336 watermarks->c.cstate_pstate.pstate_change_ns,
337 refclk_mhz, 0x1fffff);
338 REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value);
339 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n\n"
340 "HW register value = 0x%x\n",
341 watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value);
342
343 /* clock state D */ 403 /* clock state D */
344 prog_wm_value = convert_and_clamp( 404 if (safe_to_lower || watermarks->d.urgent_ns > hubbub->watermarks.d.urgent_ns) {
345 watermarks->d.urgent_ns, refclk_mhz, 0x1fffff); 405 hubbub->watermarks.d.urgent_ns = watermarks->d.urgent_ns;
346 REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value); 406 prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns,
347 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n"
348 "HW register value = 0x%x\n",
349 watermarks->d.urgent_ns, prog_wm_value);
350
351 prog_wm_value = convert_and_clamp(
352 watermarks->d.pte_meta_urgent_ns,
353 refclk_mhz, 0x1fffff);
354 REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D, prog_wm_value);
355 DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_D calculated =%d\n"
356 "HW register value = 0x%x\n",
357 watermarks->d.pte_meta_urgent_ns, prog_wm_value);
358
359
360 if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D)) {
361 prog_wm_value = convert_and_clamp(
362 watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns,
363 refclk_mhz, 0x1fffff); 407 refclk_mhz, 0x1fffff);
364 REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value); 408 REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value);
365 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_D calculated =%d\n"
366 "HW register value = 0x%x\n",
367 watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
368 409
410 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n"
411 "HW register value = 0x%x\n",
412 watermarks->d.urgent_ns, prog_wm_value);
413 }
369 414
370 prog_wm_value = convert_and_clamp( 415 if (safe_to_lower || watermarks->d.pte_meta_urgent_ns > hubbub->watermarks.d.pte_meta_urgent_ns) {
371 watermarks->d.cstate_pstate.cstate_exit_ns, 416 hubbub->watermarks.d.pte_meta_urgent_ns = watermarks->d.pte_meta_urgent_ns;
417 prog_wm_value = convert_and_clamp(watermarks->d.pte_meta_urgent_ns,
372 refclk_mhz, 0x1fffff); 418 refclk_mhz, 0x1fffff);
373 REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value); 419 REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D, prog_wm_value);
374 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n" 420 DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_D calculated =%d\n"
375 "HW register value = 0x%x\n", 421 "HW register value = 0x%x\n",
376 watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value); 422 watermarks->d.pte_meta_urgent_ns, prog_wm_value);
377 } 423 }
378 424
425 if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D)) {
426 if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
427 > hubbub->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns) {
428 hubbub->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns =
429 watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns;
430 prog_wm_value = convert_and_clamp(
431 watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns,
432 refclk_mhz, 0x1fffff);
433 REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value);
434 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n"
435 "HW register value = 0x%x\n",
436 watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
437 }
379 438
380 prog_wm_value = convert_and_clamp( 439 if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_ns
381 watermarks->d.cstate_pstate.pstate_change_ns, 440 > hubbub->watermarks.d.cstate_pstate.cstate_exit_ns) {
382 refclk_mhz, 0x1fffff); 441 hubbub->watermarks.d.cstate_pstate.cstate_exit_ns =
383 REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value); 442 watermarks->d.cstate_pstate.cstate_exit_ns;
384 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n" 443 prog_wm_value = convert_and_clamp(
385 "HW register value = 0x%x\n\n", 444 watermarks->d.cstate_pstate.cstate_exit_ns,
386 watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value); 445 refclk_mhz, 0x1fffff);
446 REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value);
447 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
448 "HW register value = 0x%x\n",
449 watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value);
450 }
451 }
387 452
388 REG_UPDATE(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL, 453 if (safe_to_lower || watermarks->d.cstate_pstate.pstate_change_ns
389 DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 1); 454 > hubbub->watermarks.d.cstate_pstate.pstate_change_ns) {
455 hubbub->watermarks.d.cstate_pstate.pstate_change_ns =
456 watermarks->d.cstate_pstate.pstate_change_ns;
457 prog_wm_value = convert_and_clamp(
458 watermarks->d.cstate_pstate.pstate_change_ns,
459 refclk_mhz, 0x1fffff);
460 REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value);
461 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
462 "HW register value = 0x%x\n\n",
463 watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value);
464 }
390 465
391 REG_UPDATE(DCHUBBUB_ARB_SAT_LEVEL, 466 REG_UPDATE(DCHUBBUB_ARB_SAT_LEVEL,
392 DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz); 467 DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz);
@@ -408,6 +483,11 @@ void hubbub1_update_dchub(
408 struct hubbub *hubbub, 483 struct hubbub *hubbub,
409 struct dchub_init_data *dh_data) 484 struct dchub_init_data *dh_data)
410{ 485{
486 if (REG(DCHUBBUB_SDPIF_FB_TOP) == 0) {
487 ASSERT(false);
488 /*should not come here*/
489 return;
490 }
411 /* TODO: port code from dal2 */ 491 /* TODO: port code from dal2 */
412 switch (dh_data->fb_mode) { 492 switch (dh_data->fb_mode) {
413 case FRAME_BUFFER_MODE_ZFB_ONLY: 493 case FRAME_BUFFER_MODE_ZFB_ONLY:
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
index 6315a0e6b0d6..d6e596eef4c5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
@@ -185,6 +185,7 @@ struct hubbub {
185 const struct dcn_hubbub_shift *shifts; 185 const struct dcn_hubbub_shift *shifts;
186 const struct dcn_hubbub_mask *masks; 186 const struct dcn_hubbub_mask *masks;
187 unsigned int debug_test_index_pstate; 187 unsigned int debug_test_index_pstate;
188 struct dcn_watermark_set watermarks;
188}; 189};
189 190
190void hubbub1_update_dchub( 191void hubbub1_update_dchub(
@@ -194,10 +195,13 @@ void hubbub1_update_dchub(
194bool hubbub1_verify_allow_pstate_change_high( 195bool hubbub1_verify_allow_pstate_change_high(
195 struct hubbub *hubbub); 196 struct hubbub *hubbub);
196 197
198void hubbub1_wm_change_req_wa(struct hubbub *hubbub);
199
197void hubbub1_program_watermarks( 200void hubbub1_program_watermarks(
198 struct hubbub *hubbub, 201 struct hubbub *hubbub,
199 struct dcn_watermark_set *watermarks, 202 struct dcn_watermark_set *watermarks,
200 unsigned int refclk_mhz); 203 unsigned int refclk_mhz,
204 bool safe_to_lower);
201 205
202void hubbub1_toggle_watermark_change_req( 206void hubbub1_toggle_watermark_change_req(
203 struct hubbub *hubbub); 207 struct hubbub *hubbub);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index c28085be39ff..93f52c58bc69 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -166,7 +166,7 @@ void hubp1_program_size_and_rotation(
166 /* Program data and meta surface pitch (calculation from addrlib) 166 /* Program data and meta surface pitch (calculation from addrlib)
167 * 444 or 420 luma 167 * 444 or 420 luma
168 */ 168 */
169 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) { 169 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN && format < SURFACE_PIXEL_FORMAT_SUBSAMPLE_END) {
170 ASSERT(plane_size->video.chroma_pitch != 0); 170 ASSERT(plane_size->video.chroma_pitch != 0);
171 /* Chroma pitch zero can cause system hang! */ 171 /* Chroma pitch zero can cause system hang! */
172 172
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 03eb736a312f..3b2cb2d3b8a6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -337,13 +337,13 @@ void dcn10_log_hw_state(struct dc *dc)
337 337
338 DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d dcfclk_deep_sleep_khz:%d dispclk_khz:%d\n" 338 DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d dcfclk_deep_sleep_khz:%d dispclk_khz:%d\n"
339 "dppclk_khz:%d max_supported_dppclk_khz:%d fclk_khz:%d socclk_khz:%d\n\n", 339 "dppclk_khz:%d max_supported_dppclk_khz:%d fclk_khz:%d socclk_khz:%d\n\n",
340 dc->current_state->bw.dcn.calc_clk.dcfclk_khz, 340 dc->current_state->bw.dcn.clk.dcfclk_khz,
341 dc->current_state->bw.dcn.calc_clk.dcfclk_deep_sleep_khz, 341 dc->current_state->bw.dcn.clk.dcfclk_deep_sleep_khz,
342 dc->current_state->bw.dcn.calc_clk.dispclk_khz, 342 dc->current_state->bw.dcn.clk.dispclk_khz,
343 dc->current_state->bw.dcn.calc_clk.dppclk_khz, 343 dc->current_state->bw.dcn.clk.dppclk_khz,
344 dc->current_state->bw.dcn.calc_clk.max_supported_dppclk_khz, 344 dc->current_state->bw.dcn.clk.max_supported_dppclk_khz,
345 dc->current_state->bw.dcn.calc_clk.fclk_khz, 345 dc->current_state->bw.dcn.clk.fclk_khz,
346 dc->current_state->bw.dcn.calc_clk.socclk_khz); 346 dc->current_state->bw.dcn.clk.socclk_khz);
347 347
348 log_mpc_crc(dc); 348 log_mpc_crc(dc);
349 349
@@ -415,6 +415,8 @@ static void dpp_pg_control(
415 415
416 if (hws->ctx->dc->debug.disable_dpp_power_gate) 416 if (hws->ctx->dc->debug.disable_dpp_power_gate)
417 return; 417 return;
418 if (REG(DOMAIN1_PG_CONFIG) == 0)
419 return;
418 420
419 switch (dpp_inst) { 421 switch (dpp_inst) {
420 case 0: /* DPP0 */ 422 case 0: /* DPP0 */
@@ -465,6 +467,8 @@ static void hubp_pg_control(
465 467
466 if (hws->ctx->dc->debug.disable_hubp_power_gate) 468 if (hws->ctx->dc->debug.disable_hubp_power_gate)
467 return; 469 return;
470 if (REG(DOMAIN0_PG_CONFIG) == 0)
471 return;
468 472
469 switch (hubp_inst) { 473 switch (hubp_inst) {
470 case 0: /* DCHUBP0 */ 474 case 0: /* DCHUBP0 */
@@ -865,7 +869,8 @@ void hwss1_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
865 return; 869 return;
866 870
867 mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove); 871 mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
868 opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true; 872 if (opp != NULL)
873 opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
869 874
870 dc->optimized_required = true; 875 dc->optimized_required = true;
871 876
@@ -1010,7 +1015,7 @@ static void dcn10_init_hw(struct dc *dc)
1010 /* Reset all MPCC muxes */ 1015 /* Reset all MPCC muxes */
1011 dc->res_pool->mpc->funcs->mpc_init(dc->res_pool->mpc); 1016 dc->res_pool->mpc->funcs->mpc_init(dc->res_pool->mpc);
1012 1017
1013 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1018 for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
1014 struct timing_generator *tg = dc->res_pool->timing_generators[i]; 1019 struct timing_generator *tg = dc->res_pool->timing_generators[i];
1015 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 1020 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1016 struct hubp *hubp = dc->res_pool->hubps[i]; 1021 struct hubp *hubp = dc->res_pool->hubps[i];
@@ -1343,10 +1348,11 @@ static void dcn10_enable_per_frame_crtc_position_reset(
1343 1348
1344 DC_SYNC_INFO("Setting up\n"); 1349 DC_SYNC_INFO("Setting up\n");
1345 for (i = 0; i < group_size; i++) 1350 for (i = 0; i < group_size; i++)
1346 grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset( 1351 if (grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset)
1347 grouped_pipes[i]->stream_res.tg, 1352 grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset(
1348 grouped_pipes[i]->stream->triggered_crtc_reset.event_source->status.primary_otg_inst, 1353 grouped_pipes[i]->stream_res.tg,
1349 &grouped_pipes[i]->stream->triggered_crtc_reset); 1354 grouped_pipes[i]->stream->triggered_crtc_reset.event_source->status.primary_otg_inst,
1355 &grouped_pipes[i]->stream->triggered_crtc_reset);
1350 1356
1351 DC_SYNC_INFO("Waiting for trigger\n"); 1357 DC_SYNC_INFO("Waiting for trigger\n");
1352 1358
@@ -1952,18 +1958,17 @@ static void update_dchubp_dpp(
1952 * divided by 2 1958 * divided by 2
1953 */ 1959 */
1954 if (plane_state->update_flags.bits.full_update) { 1960 if (plane_state->update_flags.bits.full_update) {
1955 bool should_divided_by_2 = context->bw.dcn.calc_clk.dppclk_khz <= 1961 bool should_divided_by_2 = context->bw.dcn.clk.dppclk_khz <=
1956 context->bw.dcn.cur_clk.dispclk_khz / 2; 1962 dc->res_pool->dccg->clks.dispclk_khz / 2;
1957 1963
1958 dpp->funcs->dpp_dppclk_control( 1964 dpp->funcs->dpp_dppclk_control(
1959 dpp, 1965 dpp,
1960 should_divided_by_2, 1966 should_divided_by_2,
1961 true); 1967 true);
1962 1968
1963 dc->current_state->bw.dcn.cur_clk.dppclk_khz = 1969 dc->res_pool->dccg->clks.dppclk_khz = should_divided_by_2 ?
1964 should_divided_by_2 ? 1970 dc->res_pool->dccg->clks.dispclk_khz / 2 :
1965 context->bw.dcn.cur_clk.dispclk_khz / 2 : 1971 dc->res_pool->dccg->clks.dispclk_khz;
1966 context->bw.dcn.cur_clk.dispclk_khz;
1967 } 1972 }
1968 1973
1969 /* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG 1974 /* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
@@ -2153,12 +2158,12 @@ static void dcn10_pplib_apply_display_requirements(
2153{ 2158{
2154 struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg; 2159 struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
2155 2160
2156 pp_display_cfg->min_engine_clock_khz = context->bw.dcn.cur_clk.dcfclk_khz; 2161 pp_display_cfg->min_engine_clock_khz = dc->res_pool->dccg->clks.dcfclk_khz;
2157 pp_display_cfg->min_memory_clock_khz = context->bw.dcn.cur_clk.fclk_khz; 2162 pp_display_cfg->min_memory_clock_khz = dc->res_pool->dccg->clks.fclk_khz;
2158 pp_display_cfg->min_engine_clock_deep_sleep_khz = context->bw.dcn.cur_clk.dcfclk_deep_sleep_khz; 2163 pp_display_cfg->min_engine_clock_deep_sleep_khz = dc->res_pool->dccg->clks.dcfclk_deep_sleep_khz;
2159 pp_display_cfg->min_dcfc_deep_sleep_clock_khz = context->bw.dcn.cur_clk.dcfclk_deep_sleep_khz; 2164 pp_display_cfg->min_dcfc_deep_sleep_clock_khz = dc->res_pool->dccg->clks.dcfclk_deep_sleep_khz;
2160 pp_display_cfg->min_dcfclock_khz = context->bw.dcn.cur_clk.dcfclk_khz; 2165 pp_display_cfg->min_dcfclock_khz = dc->res_pool->dccg->clks.dcfclk_khz;
2161 pp_display_cfg->disp_clk_khz = context->bw.dcn.cur_clk.dispclk_khz; 2166 pp_display_cfg->disp_clk_khz = dc->res_pool->dccg->clks.dispclk_khz;
2162 dce110_fill_display_configs(context, pp_display_cfg); 2167 dce110_fill_display_configs(context, pp_display_cfg);
2163 2168
2164 if (memcmp(&dc->prev_display_config, pp_display_cfg, sizeof( 2169 if (memcmp(&dc->prev_display_config, pp_display_cfg, sizeof(
@@ -2220,8 +2225,6 @@ static void dcn10_apply_ctx_for_surface(
2220 int i; 2225 int i;
2221 struct timing_generator *tg; 2226 struct timing_generator *tg;
2222 bool removed_pipe[4] = { false }; 2227 bool removed_pipe[4] = { false };
2223 unsigned int ref_clk_mhz = dc->res_pool->ref_clock_inKhz/1000;
2224 bool program_water_mark = false;
2225 struct pipe_ctx *top_pipe_to_program = 2228 struct pipe_ctx *top_pipe_to_program =
2226 find_top_pipe_for_stream(dc, context, stream); 2229 find_top_pipe_for_stream(dc, context, stream);
2227 DC_LOGGER_INIT(dc->ctx->logger); 2230 DC_LOGGER_INIT(dc->ctx->logger);
@@ -2269,8 +2272,7 @@ static void dcn10_apply_ctx_for_surface(
2269 hwss1_plane_atomic_disconnect(dc, old_pipe_ctx); 2272 hwss1_plane_atomic_disconnect(dc, old_pipe_ctx);
2270 removed_pipe[i] = true; 2273 removed_pipe[i] = true;
2271 2274
2272 DC_LOG_DC( 2275 DC_LOG_DC("Reset mpcc for pipe %d\n",
2273 "Reset mpcc for pipe %d\n",
2274 old_pipe_ctx->pipe_idx); 2276 old_pipe_ctx->pipe_idx);
2275 } 2277 }
2276 } 2278 }
@@ -2283,248 +2285,41 @@ static void dcn10_apply_ctx_for_surface(
2283 if (num_planes == 0) 2285 if (num_planes == 0)
2284 false_optc_underflow_wa(dc, stream, tg); 2286 false_optc_underflow_wa(dc, stream, tg);
2285 2287
2286 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2288 for (i = 0; i < dc->res_pool->pipe_count; i++)
2287 struct pipe_ctx *old_pipe_ctx =
2288 &dc->current_state->res_ctx.pipe_ctx[i];
2289 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2290
2291 if (pipe_ctx->stream == stream &&
2292 pipe_ctx->plane_state &&
2293 pipe_ctx->plane_state->update_flags.bits.full_update)
2294 program_water_mark = true;
2295
2296 if (removed_pipe[i]) 2289 if (removed_pipe[i])
2297 dcn10_disable_plane(dc, old_pipe_ctx); 2290 dcn10_disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
2298 }
2299
2300 if (program_water_mark) {
2301 if (dc->debug.sanity_checks) {
2302 /* pstate stuck check after watermark update */
2303 dcn10_verify_allow_pstate_change_high(dc);
2304 }
2305 2291
2306 /* watermark is for all pipes */ 2292 if (dc->hwseq->wa.DEGVIDCN10_254)
2307 hubbub1_program_watermarks(dc->res_pool->hubbub, 2293 hubbub1_wm_change_req_wa(dc->res_pool->hubbub);
2308 &context->bw.dcn.watermarks, ref_clk_mhz);
2309
2310 if (dc->debug.sanity_checks) {
2311 /* pstate stuck check after watermark update */
2312 dcn10_verify_allow_pstate_change_high(dc);
2313 }
2314 }
2315/* DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
2316 "\n============== Watermark parameters ==============\n"
2317 "a.urgent_ns: %d \n"
2318 "a.cstate_enter_plus_exit: %d \n"
2319 "a.cstate_exit: %d \n"
2320 "a.pstate_change: %d \n"
2321 "a.pte_meta_urgent: %d \n"
2322 "b.urgent_ns: %d \n"
2323 "b.cstate_enter_plus_exit: %d \n"
2324 "b.cstate_exit: %d \n"
2325 "b.pstate_change: %d \n"
2326 "b.pte_meta_urgent: %d \n",
2327 context->bw.dcn.watermarks.a.urgent_ns,
2328 context->bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns,
2329 context->bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns,
2330 context->bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns,
2331 context->bw.dcn.watermarks.a.pte_meta_urgent_ns,
2332 context->bw.dcn.watermarks.b.urgent_ns,
2333 context->bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns,
2334 context->bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns,
2335 context->bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns,
2336 context->bw.dcn.watermarks.b.pte_meta_urgent_ns
2337 );
2338 DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
2339 "\nc.urgent_ns: %d \n"
2340 "c.cstate_enter_plus_exit: %d \n"
2341 "c.cstate_exit: %d \n"
2342 "c.pstate_change: %d \n"
2343 "c.pte_meta_urgent: %d \n"
2344 "d.urgent_ns: %d \n"
2345 "d.cstate_enter_plus_exit: %d \n"
2346 "d.cstate_exit: %d \n"
2347 "d.pstate_change: %d \n"
2348 "d.pte_meta_urgent: %d \n"
2349 "========================================================\n",
2350 context->bw.dcn.watermarks.c.urgent_ns,
2351 context->bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns,
2352 context->bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns,
2353 context->bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns,
2354 context->bw.dcn.watermarks.c.pte_meta_urgent_ns,
2355 context->bw.dcn.watermarks.d.urgent_ns,
2356 context->bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns,
2357 context->bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns,
2358 context->bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns,
2359 context->bw.dcn.watermarks.d.pte_meta_urgent_ns
2360 );
2361*/
2362}
2363
2364static inline bool should_set_clock(bool decrease_allowed, int calc_clk, int cur_clk)
2365{
2366 return ((decrease_allowed && calc_clk < cur_clk) || calc_clk > cur_clk);
2367}
2368
2369static int determine_dppclk_threshold(struct dc *dc, struct dc_state *context)
2370{
2371 bool request_dpp_div = context->bw.dcn.calc_clk.dispclk_khz >
2372 context->bw.dcn.calc_clk.dppclk_khz;
2373 bool dispclk_increase = context->bw.dcn.calc_clk.dispclk_khz >
2374 context->bw.dcn.cur_clk.dispclk_khz;
2375 int disp_clk_threshold = context->bw.dcn.calc_clk.max_supported_dppclk_khz;
2376 bool cur_dpp_div = context->bw.dcn.cur_clk.dispclk_khz >
2377 context->bw.dcn.cur_clk.dppclk_khz;
2378
2379 /* increase clock, looking for div is 0 for current, request div is 1*/
2380 if (dispclk_increase) {
2381 /* already divided by 2, no need to reach target clk with 2 steps*/
2382 if (cur_dpp_div)
2383 return context->bw.dcn.calc_clk.dispclk_khz;
2384
2385 /* request disp clk is lower than maximum supported dpp clk,
2386 * no need to reach target clk with two steps.
2387 */
2388 if (context->bw.dcn.calc_clk.dispclk_khz <= disp_clk_threshold)
2389 return context->bw.dcn.calc_clk.dispclk_khz;
2390
2391 /* target dpp clk not request divided by 2, still within threshold */
2392 if (!request_dpp_div)
2393 return context->bw.dcn.calc_clk.dispclk_khz;
2394
2395 } else {
2396 /* decrease clock, looking for current dppclk divided by 2,
2397 * request dppclk not divided by 2.
2398 */
2399
2400 /* current dpp clk not divided by 2, no need to ramp*/
2401 if (!cur_dpp_div)
2402 return context->bw.dcn.calc_clk.dispclk_khz;
2403
2404 /* current disp clk is lower than current maximum dpp clk,
2405 * no need to ramp
2406 */
2407 if (context->bw.dcn.cur_clk.dispclk_khz <= disp_clk_threshold)
2408 return context->bw.dcn.calc_clk.dispclk_khz;
2409
2410 /* request dpp clk need to be divided by 2 */
2411 if (request_dpp_div)
2412 return context->bw.dcn.calc_clk.dispclk_khz;
2413 }
2414
2415 return disp_clk_threshold;
2416}
2417
2418static void ramp_up_dispclk_with_dpp(struct dc *dc, struct dc_state *context)
2419{
2420 int i;
2421 bool request_dpp_div = context->bw.dcn.calc_clk.dispclk_khz >
2422 context->bw.dcn.calc_clk.dppclk_khz;
2423
2424 int dispclk_to_dpp_threshold = determine_dppclk_threshold(dc, context);
2425
2426 /* set disp clk to dpp clk threshold */
2427 dc->res_pool->display_clock->funcs->set_clock(
2428 dc->res_pool->display_clock,
2429 dispclk_to_dpp_threshold);
2430
2431 /* update request dpp clk division option */
2432 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2433 struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
2434
2435 if (!pipe_ctx->plane_state)
2436 continue;
2437
2438 pipe_ctx->plane_res.dpp->funcs->dpp_dppclk_control(
2439 pipe_ctx->plane_res.dpp,
2440 request_dpp_div,
2441 true);
2442 }
2443
2444 /* If target clk not same as dppclk threshold, set to target clock */
2445 if (dispclk_to_dpp_threshold != context->bw.dcn.calc_clk.dispclk_khz) {
2446 dc->res_pool->display_clock->funcs->set_clock(
2447 dc->res_pool->display_clock,
2448 context->bw.dcn.calc_clk.dispclk_khz);
2449 }
2450
2451 context->bw.dcn.cur_clk.dispclk_khz =
2452 context->bw.dcn.calc_clk.dispclk_khz;
2453 context->bw.dcn.cur_clk.dppclk_khz =
2454 context->bw.dcn.calc_clk.dppclk_khz;
2455 context->bw.dcn.cur_clk.max_supported_dppclk_khz =
2456 context->bw.dcn.calc_clk.max_supported_dppclk_khz;
2457} 2294}
2458 2295
2459static void dcn10_set_bandwidth( 2296static void dcn10_set_bandwidth(
2460 struct dc *dc, 2297 struct dc *dc,
2461 struct dc_state *context, 2298 struct dc_state *context,
2462 bool decrease_allowed) 2299 bool safe_to_lower)
2463{ 2300{
2464 struct pp_smu_display_requirement_rv *smu_req_cur = 2301 if (dc->debug.sanity_checks)
2465 &dc->res_pool->pp_smu_req;
2466 struct pp_smu_display_requirement_rv smu_req = *smu_req_cur;
2467 struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
2468
2469 if (dc->debug.sanity_checks) {
2470 dcn10_verify_allow_pstate_change_high(dc); 2302 dcn10_verify_allow_pstate_change_high(dc);
2471 }
2472
2473 if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
2474 return;
2475
2476 if (should_set_clock(
2477 decrease_allowed,
2478 context->bw.dcn.calc_clk.dcfclk_khz,
2479 dc->current_state->bw.dcn.cur_clk.dcfclk_khz)) {
2480 context->bw.dcn.cur_clk.dcfclk_khz =
2481 context->bw.dcn.calc_clk.dcfclk_khz;
2482 smu_req.hard_min_dcefclk_khz =
2483 context->bw.dcn.calc_clk.dcfclk_khz;
2484 }
2485
2486 if (should_set_clock(
2487 decrease_allowed,
2488 context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz,
2489 dc->current_state->bw.dcn.cur_clk.dcfclk_deep_sleep_khz)) {
2490 context->bw.dcn.cur_clk.dcfclk_deep_sleep_khz =
2491 context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz;
2492 }
2493
2494 if (should_set_clock(
2495 decrease_allowed,
2496 context->bw.dcn.calc_clk.fclk_khz,
2497 dc->current_state->bw.dcn.cur_clk.fclk_khz)) {
2498 context->bw.dcn.cur_clk.fclk_khz =
2499 context->bw.dcn.calc_clk.fclk_khz;
2500 smu_req.hard_min_fclk_khz = context->bw.dcn.calc_clk.fclk_khz;
2501 }
2502
2503 smu_req.display_count = context->stream_count;
2504
2505 if (pp_smu->set_display_requirement)
2506 pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
2507 2303
2508 *smu_req_cur = smu_req; 2304 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
2305 if (context->stream_count == 0)
2306 context->bw.dcn.clk.phyclk_khz = 0;
2509 2307
2510 /* make sure dcf clk is before dpp clk to 2308 dc->res_pool->dccg->funcs->update_clocks(
2511 * make sure we have enough voltage to run dpp clk 2309 dc->res_pool->dccg,
2512 */ 2310 &context->bw.dcn.clk,
2513 if (should_set_clock( 2311 safe_to_lower);
2514 decrease_allowed,
2515 context->bw.dcn.calc_clk.dispclk_khz,
2516 dc->current_state->bw.dcn.cur_clk.dispclk_khz)) {
2517 2312
2518 ramp_up_dispclk_with_dpp(dc, context); 2313 dcn10_pplib_apply_display_requirements(dc, context);
2519 } 2314 }
2520 2315
2521 dcn10_pplib_apply_display_requirements(dc, context); 2316 hubbub1_program_watermarks(dc->res_pool->hubbub,
2317 &context->bw.dcn.watermarks,
2318 dc->res_pool->ref_clock_inKhz / 1000,
2319 true);
2522 2320
2523 if (dc->debug.sanity_checks) { 2321 if (dc->debug.sanity_checks)
2524 dcn10_verify_allow_pstate_change_high(dc); 2322 dcn10_verify_allow_pstate_change_high(dc);
2525 }
2526
2527 /* need to fix this function. not doing the right thing here */
2528} 2323}
2529 2324
2530static void set_drr(struct pipe_ctx **pipe_ctx, 2325static void set_drr(struct pipe_ctx **pipe_ctx,
@@ -2707,8 +2502,14 @@ static void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
2707 2502
2708static void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data) 2503static void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
2709{ 2504{
2710 if (hws->ctx->dc->res_pool->hubbub != NULL) 2505 if (hws->ctx->dc->res_pool->hubbub != NULL) {
2711 hubbub1_update_dchub(hws->ctx->dc->res_pool->hubbub, dh_data); 2506 struct hubp *hubp = hws->ctx->dc->res_pool->hubps[0];
2507
2508 if (hubp->funcs->hubp_update_dchub)
2509 hubp->funcs->hubp_update_dchub(hubp, dh_data);
2510 else
2511 hubbub1_update_dchub(hws->ctx->dc->res_pool->hubbub, dh_data);
2512 }
2712} 2513}
2713 2514
2714static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx) 2515static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
index 21fa40ac0786..fd9dc70190a8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
@@ -995,6 +995,8 @@ void dcn10_link_encoder_disable_output(
995 995
996 if (!dcn10_is_dig_enabled(enc)) { 996 if (!dcn10_is_dig_enabled(enc)) {
997 /* OF_SKIP_POWER_DOWN_INACTIVE_ENCODER */ 997 /* OF_SKIP_POWER_DOWN_INACTIVE_ENCODER */
998 /*in DP_Alt_No_Connect case, we turn off the dig already,
999 after excuation the PHY w/a sequence, not allow touch PHY any more*/
998 return; 1000 return;
999 } 1001 }
1000 /* Power-down RX and disable GPU PHY should be paired. 1002 /* Power-down RX and disable GPU PHY should be paired.
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 2da325ce781b..771e0cf29bba 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -65,6 +65,68 @@
65#include "dce/dce_abm.h" 65#include "dce/dce_abm.h"
66#include "dce/dce_dmcu.h" 66#include "dce/dce_dmcu.h"
67 67
68const struct _vcs_dpi_ip_params_st dcn1_0_ip = {
69 .rob_buffer_size_kbytes = 64,
70 .det_buffer_size_kbytes = 164,
71 .dpte_buffer_size_in_pte_reqs = 42,
72 .dpp_output_buffer_pixels = 2560,
73 .opp_output_buffer_lines = 1,
74 .pixel_chunk_size_kbytes = 8,
75 .pte_enable = 1,
76 .pte_chunk_size_kbytes = 2,
77 .meta_chunk_size_kbytes = 2,
78 .writeback_chunk_size_kbytes = 2,
79 .line_buffer_size_bits = 589824,
80 .max_line_buffer_lines = 12,
81 .IsLineBufferBppFixed = 0,
82 .LineBufferFixedBpp = -1,
83 .writeback_luma_buffer_size_kbytes = 12,
84 .writeback_chroma_buffer_size_kbytes = 8,
85 .max_num_dpp = 4,
86 .max_num_wb = 2,
87 .max_dchub_pscl_bw_pix_per_clk = 4,
88 .max_pscl_lb_bw_pix_per_clk = 2,
89 .max_lb_vscl_bw_pix_per_clk = 4,
90 .max_vscl_hscl_bw_pix_per_clk = 4,
91 .max_hscl_ratio = 4,
92 .max_vscl_ratio = 4,
93 .hscl_mults = 4,
94 .vscl_mults = 4,
95 .max_hscl_taps = 8,
96 .max_vscl_taps = 8,
97 .dispclk_ramp_margin_percent = 1,
98 .underscan_factor = 1.10,
99 .min_vblank_lines = 14,
100 .dppclk_delay_subtotal = 90,
101 .dispclk_delay_subtotal = 42,
102 .dcfclk_cstate_latency = 10,
103 .max_inter_dcn_tile_repeaters = 8,
104 .can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one = 0,
105 .bug_forcing_LC_req_same_size_fixed = 0,
106};
107
108const struct _vcs_dpi_soc_bounding_box_st dcn1_0_soc = {
109 .sr_exit_time_us = 9.0,
110 .sr_enter_plus_exit_time_us = 11.0,
111 .urgent_latency_us = 4.0,
112 .writeback_latency_us = 12.0,
113 .ideal_dram_bw_after_urgent_percent = 80.0,
114 .max_request_size_bytes = 256,
115 .downspread_percent = 0.5,
116 .dram_page_open_time_ns = 50.0,
117 .dram_rw_turnaround_time_ns = 17.5,
118 .dram_return_buffer_per_channel_bytes = 8192,
119 .round_trip_ping_latency_dcfclk_cycles = 128,
120 .urgent_out_of_order_return_per_channel_bytes = 256,
121 .channel_interleave_bytes = 256,
122 .num_banks = 8,
123 .num_chans = 2,
124 .vmm_page_size_bytes = 4096,
125 .dram_clock_change_latency_us = 17.0,
126 .writeback_dram_clock_change_latency_us = 23.0,
127 .return_bus_width_bytes = 64,
128};
129
68#ifndef mmDP0_DP_DPHY_INTERNAL_CTRL 130#ifndef mmDP0_DP_DPHY_INTERNAL_CTRL
69 #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x210f 131 #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x210f
70 #define mmDP0_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 132 #define mmDP0_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
@@ -437,7 +499,7 @@ static const struct dc_debug debug_defaults_drv = {
437 */ 499 */
438 .min_disp_clk_khz = 100000, 500 .min_disp_clk_khz = 100000,
439 501
440 .disable_pplib_clock_request = true, 502 .disable_pplib_clock_request = false,
441 .disable_pplib_wm_range = false, 503 .disable_pplib_wm_range = false,
442 .pplib_wm_report_mode = WM_REPORT_DEFAULT, 504 .pplib_wm_report_mode = WM_REPORT_DEFAULT,
443 .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP, 505 .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
@@ -681,6 +743,7 @@ static struct dce_hwseq *dcn10_hwseq_create(
681 hws->masks = &hwseq_mask; 743 hws->masks = &hwseq_mask;
682 hws->wa.DEGVIDCN10_253 = true; 744 hws->wa.DEGVIDCN10_253 = true;
683 hws->wa.false_optc_underflow = true; 745 hws->wa.false_optc_underflow = true;
746 hws->wa.DEGVIDCN10_254 = true;
684 } 747 }
685 return hws; 748 return hws;
686} 749}
@@ -791,8 +854,8 @@ static void destruct(struct dcn10_resource_pool *pool)
791 if (pool->base.dmcu != NULL) 854 if (pool->base.dmcu != NULL)
792 dce_dmcu_destroy(&pool->base.dmcu); 855 dce_dmcu_destroy(&pool->base.dmcu);
793 856
794 if (pool->base.display_clock != NULL) 857 if (pool->base.dccg != NULL)
795 dce_disp_clk_destroy(&pool->base.display_clock); 858 dce_dccg_destroy(&pool->base.dccg);
796 859
797 kfree(pool->base.pp_smu); 860 kfree(pool->base.pp_smu);
798} 861}
@@ -1005,8 +1068,7 @@ static bool construct(
1005 1068
1006 ctx->dc_bios->regs = &bios_regs; 1069 ctx->dc_bios->regs = &bios_regs;
1007 1070
1008 pool->base.res_cap = &res_cap; 1071 pool->base.res_cap = &res_cap;
1009
1010 pool->base.funcs = &dcn10_res_pool_funcs; 1072 pool->base.funcs = &dcn10_res_pool_funcs;
1011 1073
1012 /* 1074 /*
@@ -1072,8 +1134,8 @@ static bool construct(
1072 } 1134 }
1073 } 1135 }
1074 1136
1075 pool->base.display_clock = dce120_disp_clk_create(ctx); 1137 pool->base.dccg = dcn1_dccg_create(ctx);
1076 if (pool->base.display_clock == NULL) { 1138 if (pool->base.dccg == NULL) {
1077 dm_error("DC: failed to create display clock!\n"); 1139 dm_error("DC: failed to create display clock!\n");
1078 BREAK_TO_DEBUGGER(); 1140 BREAK_TO_DEBUGGER();
1079 goto fail; 1141 goto fail;
diff --git a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
index eac4bfe12257..58ed2055ef9f 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
@@ -40,7 +40,7 @@ enum wm_set_id {
40 WM_B, 40 WM_B,
41 WM_C, 41 WM_C,
42 WM_D, 42 WM_D,
43 WM_COUNT, 43 WM_SET_COUNT,
44}; 44};
45 45
46struct pp_smu_wm_set_range { 46struct pp_smu_wm_set_range {
@@ -53,10 +53,10 @@ struct pp_smu_wm_set_range {
53 53
54struct pp_smu_wm_range_sets { 54struct pp_smu_wm_range_sets {
55 uint32_t num_reader_wm_sets; 55 uint32_t num_reader_wm_sets;
56 struct pp_smu_wm_set_range reader_wm_sets[WM_COUNT]; 56 struct pp_smu_wm_set_range reader_wm_sets[WM_SET_COUNT];
57 57
58 uint32_t num_writer_wm_sets; 58 uint32_t num_writer_wm_sets;
59 struct pp_smu_wm_set_range writer_wm_sets[WM_COUNT]; 59 struct pp_smu_wm_set_range writer_wm_sets[WM_SET_COUNT];
60}; 60};
61 61
62struct pp_smu_display_requirement_rv { 62struct pp_smu_display_requirement_rv {
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services_types.h b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
index ab8c77d4e6df..2b83f922ac02 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_services_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
@@ -137,7 +137,7 @@ struct dm_pp_clock_range_for_wm_set {
137 enum dm_pp_wm_set_id wm_set_id; 137 enum dm_pp_wm_set_id wm_set_id;
138 uint32_t wm_min_eng_clk_in_khz; 138 uint32_t wm_min_eng_clk_in_khz;
139 uint32_t wm_max_eng_clk_in_khz; 139 uint32_t wm_max_eng_clk_in_khz;
140 uint32_t wm_min_memg_clk_in_khz; 140 uint32_t wm_min_mem_clk_in_khz;
141 uint32_t wm_max_mem_clk_in_khz; 141 uint32_t wm_max_mem_clk_in_khz;
142}; 142};
143 143
@@ -150,7 +150,7 @@ struct dm_pp_clock_range_for_dmif_wm_set_soc15 {
150 enum dm_pp_wm_set_id wm_set_id; 150 enum dm_pp_wm_set_id wm_set_id;
151 uint32_t wm_min_dcfclk_clk_in_khz; 151 uint32_t wm_min_dcfclk_clk_in_khz;
152 uint32_t wm_max_dcfclk_clk_in_khz; 152 uint32_t wm_max_dcfclk_clk_in_khz;
153 uint32_t wm_min_memg_clk_in_khz; 153 uint32_t wm_min_mem_clk_in_khz;
154 uint32_t wm_max_mem_clk_in_khz; 154 uint32_t wm_max_mem_clk_in_khz;
155}; 155};
156 156
@@ -158,7 +158,7 @@ struct dm_pp_clock_range_for_mcif_wm_set_soc15 {
158 enum dm_pp_wm_set_id wm_set_id; 158 enum dm_pp_wm_set_id wm_set_id;
159 uint32_t wm_min_socclk_clk_in_khz; 159 uint32_t wm_min_socclk_clk_in_khz;
160 uint32_t wm_max_socclk_clk_in_khz; 160 uint32_t wm_max_socclk_clk_in_khz;
161 uint32_t wm_min_memg_clk_in_khz; 161 uint32_t wm_min_mem_clk_in_khz;
162 uint32_t wm_max_mem_clk_in_khz; 162 uint32_t wm_max_mem_clk_in_khz;
163}; 163};
164 164
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index f83a608f93e9..d97ca6528f9d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -36,11 +36,10 @@ CFLAGS_display_mode_lib.o := $(dml_ccflags)
36CFLAGS_display_pipe_clocks.o := $(dml_ccflags) 36CFLAGS_display_pipe_clocks.o := $(dml_ccflags)
37CFLAGS_dml1_display_rq_dlg_calc.o := $(dml_ccflags) 37CFLAGS_dml1_display_rq_dlg_calc.o := $(dml_ccflags)
38CFLAGS_display_rq_dlg_helpers.o := $(dml_ccflags) 38CFLAGS_display_rq_dlg_helpers.o := $(dml_ccflags)
39CFLAGS_soc_bounding_box.o := $(dml_ccflags)
40CFLAGS_dml_common_defs.o := $(dml_ccflags) 39CFLAGS_dml_common_defs.o := $(dml_ccflags)
41 40
42DML = display_mode_lib.o display_rq_dlg_helpers.o dml1_display_rq_dlg_calc.o \ 41DML = display_mode_lib.o display_rq_dlg_helpers.o dml1_display_rq_dlg_calc.o \
43 soc_bounding_box.o dml_common_defs.o 42 dml_common_defs.o
44 43
45AMD_DAL_DML = $(addprefix $(AMDDALPATH)/dc/dml/,$(DML)) 44AMD_DAL_DML = $(addprefix $(AMDDALPATH)/dc/dml/,$(DML))
46 45
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
index fd9d97aab071..dddeb0d4db8f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
@@ -26,67 +26,8 @@
26#include "display_mode_lib.h" 26#include "display_mode_lib.h"
27#include "dc_features.h" 27#include "dc_features.h"
28 28
29static const struct _vcs_dpi_ip_params_st dcn1_0_ip = { 29extern const struct _vcs_dpi_ip_params_st dcn1_0_ip;
30 .rob_buffer_size_kbytes = 64, 30extern const struct _vcs_dpi_soc_bounding_box_st dcn1_0_soc;
31 .det_buffer_size_kbytes = 164,
32 .dpte_buffer_size_in_pte_reqs = 42,
33 .dpp_output_buffer_pixels = 2560,
34 .opp_output_buffer_lines = 1,
35 .pixel_chunk_size_kbytes = 8,
36 .pte_enable = 1,
37 .pte_chunk_size_kbytes = 2,
38 .meta_chunk_size_kbytes = 2,
39 .writeback_chunk_size_kbytes = 2,
40 .line_buffer_size_bits = 589824,
41 .max_line_buffer_lines = 12,
42 .IsLineBufferBppFixed = 0,
43 .LineBufferFixedBpp = -1,
44 .writeback_luma_buffer_size_kbytes = 12,
45 .writeback_chroma_buffer_size_kbytes = 8,
46 .max_num_dpp = 4,
47 .max_num_wb = 2,
48 .max_dchub_pscl_bw_pix_per_clk = 4,
49 .max_pscl_lb_bw_pix_per_clk = 2,
50 .max_lb_vscl_bw_pix_per_clk = 4,
51 .max_vscl_hscl_bw_pix_per_clk = 4,
52 .max_hscl_ratio = 4,
53 .max_vscl_ratio = 4,
54 .hscl_mults = 4,
55 .vscl_mults = 4,
56 .max_hscl_taps = 8,
57 .max_vscl_taps = 8,
58 .dispclk_ramp_margin_percent = 1,
59 .underscan_factor = 1.10,
60 .min_vblank_lines = 14,
61 .dppclk_delay_subtotal = 90,
62 .dispclk_delay_subtotal = 42,
63 .dcfclk_cstate_latency = 10,
64 .max_inter_dcn_tile_repeaters = 8,
65 .can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one = 0,
66 .bug_forcing_LC_req_same_size_fixed = 0,
67};
68
69static const struct _vcs_dpi_soc_bounding_box_st dcn1_0_soc = {
70 .sr_exit_time_us = 9.0,
71 .sr_enter_plus_exit_time_us = 11.0,
72 .urgent_latency_us = 4.0,
73 .writeback_latency_us = 12.0,
74 .ideal_dram_bw_after_urgent_percent = 80.0,
75 .max_request_size_bytes = 256,
76 .downspread_percent = 0.5,
77 .dram_page_open_time_ns = 50.0,
78 .dram_rw_turnaround_time_ns = 17.5,
79 .dram_return_buffer_per_channel_bytes = 8192,
80 .round_trip_ping_latency_dcfclk_cycles = 128,
81 .urgent_out_of_order_return_per_channel_bytes = 256,
82 .channel_interleave_bytes = 256,
83 .num_banks = 8,
84 .num_chans = 2,
85 .vmm_page_size_bytes = 4096,
86 .dram_clock_change_latency_us = 17.0,
87 .writeback_dram_clock_change_latency_us = 23.0,
88 .return_bus_width_bytes = 64,
89};
90 31
91static void set_soc_bounding_box(struct _vcs_dpi_soc_bounding_box_st *soc, enum dml_project project) 32static void set_soc_bounding_box(struct _vcs_dpi_soc_bounding_box_st *soc, enum dml_project project)
92{ 33{
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
index 3c2abcb8a1b0..635206248889 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
@@ -27,7 +27,6 @@
27 27
28 28
29#include "dml_common_defs.h" 29#include "dml_common_defs.h"
30#include "soc_bounding_box.h"
31#include "dml1_display_rq_dlg_calc.h" 30#include "dml1_display_rq_dlg_calc.h"
32 31
33enum dml_project { 32enum dml_project {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index 7fa0375939ae..6943801c5fd3 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -64,10 +64,9 @@ struct _vcs_dpi_voltage_scaling_st {
64 double dscclk_mhz; 64 double dscclk_mhz;
65 double dcfclk_mhz; 65 double dcfclk_mhz;
66 double socclk_mhz; 66 double socclk_mhz;
67 double dram_speed_mhz; 67 double dram_speed_mts;
68 double fabricclk_mhz; 68 double fabricclk_mhz;
69 double dispclk_mhz; 69 double dispclk_mhz;
70 double dram_bw_per_chan_gbps;
71 double phyclk_mhz; 70 double phyclk_mhz;
72 double dppclk_mhz; 71 double dppclk_mhz;
73}; 72};
diff --git a/drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.c b/drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.c
deleted file mode 100644
index 324239c77958..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.c
+++ /dev/null
@@ -1,79 +0,0 @@
1/*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25#include "soc_bounding_box.h"
26#include "display_mode_lib.h"
27#include "dc_features.h"
28
29#include "dml_inline_defs.h"
30
31/*
32 * NOTE:
33 * This file is gcc-parseable HW gospel, coming straight from HW engineers.
34 *
35 * It doesn't adhere to Linux kernel style and sometimes will do things in odd
36 * ways. Unless there is something clearly wrong with it the code should
37 * remain as-is as it provides us with a guarantee from HW that it is correct.
38 */
39
40void dml_socbb_set_latencies(soc_bounding_box_st *to_box, soc_bounding_box_st *from_box)
41{
42 to_box->dram_clock_change_latency_us = from_box->dram_clock_change_latency_us;
43 to_box->sr_exit_time_us = from_box->sr_exit_time_us;
44 to_box->sr_enter_plus_exit_time_us = from_box->sr_enter_plus_exit_time_us;
45 to_box->urgent_latency_us = from_box->urgent_latency_us;
46 to_box->writeback_latency_us = from_box->writeback_latency_us;
47}
48
49voltage_scaling_st dml_socbb_voltage_scaling(
50 const soc_bounding_box_st *soc,
51 enum voltage_state voltage)
52{
53 const voltage_scaling_st *voltage_state;
54 const voltage_scaling_st * const voltage_end = soc->clock_limits + DC__VOLTAGE_STATES;
55
56 for (voltage_state = soc->clock_limits;
57 voltage_state < voltage_end && voltage_state->state != voltage;
58 voltage_state++) {
59 }
60
61 if (voltage_state < voltage_end)
62 return *voltage_state;
63 return soc->clock_limits[DC__VOLTAGE_STATES - 1];
64}
65
66double dml_socbb_return_bw_mhz(soc_bounding_box_st *box, enum voltage_state voltage)
67{
68 double return_bw;
69
70 voltage_scaling_st state = dml_socbb_voltage_scaling(box, voltage);
71
72 return_bw = dml_min((double) box->return_bus_width_bytes * state.dcfclk_mhz,
73 state.dram_bw_per_chan_gbps * 1000.0 * (double) box->num_chans
74 * box->ideal_dram_bw_after_urgent_percent / 100.0);
75
76 return_bw = dml_min((double) box->return_bus_width_bytes * state.fabricclk_mhz, return_bw);
77
78 return return_bw;
79}
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index 4beddca0180c..00d728e629fa 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -163,7 +163,7 @@ struct resource_pool {
163 unsigned int audio_count; 163 unsigned int audio_count;
164 struct audio_support audio_support; 164 struct audio_support audio_support;
165 165
166 struct display_clock *display_clock; 166 struct dccg *dccg;
167 struct irq_service *irqs; 167 struct irq_service *irqs;
168 168
169 struct abm *abm; 169 struct abm *abm;
@@ -256,8 +256,7 @@ struct dce_bw_output {
256}; 256};
257 257
258struct dcn_bw_output { 258struct dcn_bw_output {
259 struct dc_clocks cur_clk; 259 struct dc_clocks clk;
260 struct dc_clocks calc_clk;
261 struct dcn_watermark_set watermarks; 260 struct dcn_watermark_set watermarks;
262}; 261};
263 262
@@ -282,7 +281,7 @@ struct dc_state {
282 struct dcn_bw_internal_vars dcn_bw_vars; 281 struct dcn_bw_internal_vars dcn_bw_vars;
283#endif 282#endif
284 283
285 struct display_clock *dis_clk; 284 struct dccg *dis_clk;
286 285
287 struct kref refcount; 286 struct kref refcount;
288}; 287};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
index 132d18d4b293..ddbb673caa08 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
@@ -625,7 +625,7 @@ bool dcn_validate_bandwidth(
625 625
626unsigned int dcn_find_dcfclk_suits_all( 626unsigned int dcn_find_dcfclk_suits_all(
627 const struct dc *dc, 627 const struct dc *dc,
628 struct clocks_value *clocks); 628 struct dc_clocks *clocks);
629 629
630void dcn_bw_update_from_pplib(struct dc *dc); 630void dcn_bw_update_from_pplib(struct dc *dc);
631void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc); 631void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h b/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h
index f5f69cd81f6f..3c7ccb68ecdb 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h
@@ -27,23 +27,7 @@
27#define __DISPLAY_CLOCK_H__ 27#define __DISPLAY_CLOCK_H__
28 28
29#include "dm_services_types.h" 29#include "dm_services_types.h"
30 30#include "dc.h"
31
32struct clocks_value {
33 int dispclk_in_khz;
34 int max_pixelclk_in_khz;
35 int max_non_dp_phyclk_in_khz;
36 int max_dp_phyclk_in_khz;
37 bool dispclk_notify_pplib_done;
38 bool pixelclk_notify_pplib_done;
39 bool phyclk_notigy_pplib_done;
40 int dcfclock_in_khz;
41 int dppclk_in_khz;
42 int mclk_in_khz;
43 int phyclk_in_khz;
44 int common_vdd_level;
45};
46
47 31
48/* Structure containing all state-dependent clocks 32/* Structure containing all state-dependent clocks
49 * (dependent on "enum clocks_state") */ 33 * (dependent on "enum clocks_state") */
@@ -52,34 +36,23 @@ struct state_dependent_clocks {
52 int pixel_clk_khz; 36 int pixel_clk_khz;
53}; 37};
54 38
55struct display_clock { 39struct dccg {
56 struct dc_context *ctx; 40 struct dc_context *ctx;
57 const struct display_clock_funcs *funcs; 41 const struct display_clock_funcs *funcs;
58 42
59 enum dm_pp_clocks_state max_clks_state; 43 enum dm_pp_clocks_state max_clks_state;
60 enum dm_pp_clocks_state cur_min_clks_state; 44 enum dm_pp_clocks_state cur_min_clks_state;
61 struct clocks_value cur_clocks_value; 45 struct dc_clocks clks;
62}; 46};
63 47
64struct display_clock_funcs { 48struct display_clock_funcs {
65 int (*set_clock)(struct display_clock *disp_clk, 49 void (*update_clocks)(struct dccg *dccg,
50 struct dc_clocks *new_clocks,
51 bool safe_to_lower);
52 int (*set_dispclk)(struct dccg *dccg,
66 int requested_clock_khz); 53 int requested_clock_khz);
67 54
68 enum dm_pp_clocks_state (*get_required_clocks_state)( 55 int (*get_dp_ref_clk_frequency)(struct dccg *dccg);
69 struct display_clock *disp_clk,
70 struct state_dependent_clocks *req_clocks);
71
72 bool (*set_min_clocks_state)(struct display_clock *disp_clk,
73 enum dm_pp_clocks_state dm_pp_clocks_state);
74
75 int (*get_dp_ref_clk_frequency)(struct display_clock *disp_clk);
76
77 bool (*apply_clock_voltage_request)(
78 struct display_clock *disp_clk,
79 enum dm_pp_clock_type clocks_type,
80 int clocks_in_khz,
81 bool pre_mode_set,
82 bool update_dp_phyclk);
83}; 56};
84 57
85#endif /* __DISPLAY_CLOCK_H__ */ 58#endif /* __DISPLAY_CLOCK_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index a71770ed4b9f..2506601120af 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -44,6 +44,7 @@ struct dce_hwseq_wa {
44 bool blnd_crtc_trigger; 44 bool blnd_crtc_trigger;
45 bool DEGVIDCN10_253; 45 bool DEGVIDCN10_253;
46 bool false_optc_underflow; 46 bool false_optc_underflow;
47 bool DEGVIDCN10_254;
47}; 48};
48 49
49struct hwseq_wa_state { 50struct hwseq_wa_state {
@@ -171,7 +172,7 @@ struct hw_sequencer_funcs {
171 void (*set_bandwidth)( 172 void (*set_bandwidth)(
172 struct dc *dc, 173 struct dc *dc,
173 struct dc_state *context, 174 struct dc_state *context,
174 bool decrease_allowed); 175 bool safe_to_lower);
175 176
176 void (*set_drr)(struct pipe_ctx **pipe_ctx, int num_pipes, 177 void (*set_drr)(struct pipe_ctx **pipe_ctx, int num_pipes,
177 int vmin, int vmax); 178 int vmin, int vmax);
diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
index dcdfa0f01551..604bea01fc13 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
@@ -78,7 +78,7 @@ const struct irq_source_info *find_irq_source_info(
78 struct irq_service *irq_service, 78 struct irq_service *irq_service,
79 enum dc_irq_source source) 79 enum dc_irq_source source)
80{ 80{
81 if (source > DAL_IRQ_SOURCES_NUMBER || source < DC_IRQ_SOURCE_INVALID) 81 if (source >= DAL_IRQ_SOURCES_NUMBER || source < DC_IRQ_SOURCE_INVALID)
82 return NULL; 82 return NULL;
83 83
84 return &irq_service->info[source]; 84 return &irq_service->info[source];
diff --git a/drivers/gpu/drm/amd/display/include/grph_object_defs.h b/drivers/gpu/drm/amd/display/include/grph_object_defs.h
index 2941b882b0b6..58bb42ed85ca 100644
--- a/drivers/gpu/drm/amd/display/include/grph_object_defs.h
+++ b/drivers/gpu/drm/amd/display/include/grph_object_defs.h
@@ -37,6 +37,10 @@
37 * ******************************************************************** 37 * ********************************************************************
38 */ 38 */
39 39
40#define MAX_CONNECTOR_NUMBER_PER_SLOT (16)
41#define MAX_BOARD_SLOTS (4)
42#define INVALID_CONNECTOR_INDEX ((unsigned int)(-1))
43
40/* HPD unit id - HW direct translation */ 44/* HPD unit id - HW direct translation */
41enum hpd_source_id { 45enum hpd_source_id {
42 HPD_SOURCEID1 = 0, 46 HPD_SOURCEID1 = 0,
@@ -136,5 +140,47 @@ enum sync_source {
136 SYNC_SOURCE_DUAL_GPU_PIN 140 SYNC_SOURCE_DUAL_GPU_PIN
137}; 141};
138 142
143/* connector sizes in millimeters - from BiosParserTypes.hpp */
144#define CONNECTOR_SIZE_DVI 40
145#define CONNECTOR_SIZE_VGA 32
146#define CONNECTOR_SIZE_HDMI 16
147#define CONNECTOR_SIZE_DP 16
148#define CONNECTOR_SIZE_MINI_DP 9
149#define CONNECTOR_SIZE_UNKNOWN 30
150
151enum connector_layout_type {
152 CONNECTOR_LAYOUT_TYPE_UNKNOWN,
153 CONNECTOR_LAYOUT_TYPE_DVI_D,
154 CONNECTOR_LAYOUT_TYPE_DVI_I,
155 CONNECTOR_LAYOUT_TYPE_VGA,
156 CONNECTOR_LAYOUT_TYPE_HDMI,
157 CONNECTOR_LAYOUT_TYPE_DP,
158 CONNECTOR_LAYOUT_TYPE_MINI_DP,
159};
160struct connector_layout_info {
161 struct graphics_object_id connector_id;
162 enum connector_layout_type connector_type;
163 unsigned int length;
164 unsigned int position; /* offset in mm from right side of the board */
165};
166
167/* length and width in mm */
168struct slot_layout_info {
169 unsigned int length;
170 unsigned int width;
171 unsigned int num_of_connectors;
172 struct connector_layout_info connectors[MAX_CONNECTOR_NUMBER_PER_SLOT];
173};
174
175struct board_layout_info {
176 unsigned int num_of_slots;
139 177
178 /* indicates valid information in bracket layout structure. */
179 unsigned int is_number_of_slots_valid : 1;
180 unsigned int is_slots_size_valid : 1;
181 unsigned int is_connector_offsets_valid : 1;
182 unsigned int is_connector_lengths_valid : 1;
183
184 struct slot_layout_info slots[MAX_BOARD_SLOTS];
185};
140#endif 186#endif
diff --git a/drivers/gpu/drm/amd/display/include/grph_object_id.h b/drivers/gpu/drm/amd/display/include/grph_object_id.h
index c4197432eb7c..33b3d755fe65 100644
--- a/drivers/gpu/drm/amd/display/include/grph_object_id.h
+++ b/drivers/gpu/drm/amd/display/include/grph_object_id.h
@@ -197,6 +197,11 @@ enum transmitter_color_depth {
197 TRANSMITTER_COLOR_DEPTH_48 /* 16 bits */ 197 TRANSMITTER_COLOR_DEPTH_48 /* 16 bits */
198}; 198};
199 199
200enum dp_alt_mode {
201 DP_Alt_mode__Unknown = 0,
202 DP_Alt_mode__Connect,
203 DP_Alt_mode__NoConnect,
204};
200/* 205/*
201 ***************************************************************************** 206 *****************************************************************************
202 * graphics_object_id struct 207 * graphics_object_id struct
@@ -287,4 +292,15 @@ static inline enum engine_id dal_graphics_object_id_get_engine_id(
287 return (enum engine_id) id.id; 292 return (enum engine_id) id.id;
288 return ENGINE_ID_UNKNOWN; 293 return ENGINE_ID_UNKNOWN;
289} 294}
295
296static inline bool dal_graphics_object_id_equal(
297 struct graphics_object_id id_1,
298 struct graphics_object_id id_2)
299{
300 if ((id_1.id == id_2.id) && (id_1.enum_id == id_2.enum_id) &&
301 (id_1.type == id_2.type)) {
302 return true;
303 }
304 return false;
305}
290#endif 306#endif
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index 98edaefa2b47..ee69c949bfbf 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -1723,8 +1723,8 @@ bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans,
1723 kvfree(rgb_regamma); 1723 kvfree(rgb_regamma);
1724 } else if (trans == TRANSFER_FUNCTION_HLG || 1724 } else if (trans == TRANSFER_FUNCTION_HLG ||
1725 trans == TRANSFER_FUNCTION_HLG12) { 1725 trans == TRANSFER_FUNCTION_HLG12) {
1726 rgb_regamma = kvzalloc(sizeof(*rgb_regamma) * 1726 rgb_regamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS,
1727 (MAX_HW_POINTS + _EXTRA_POINTS), 1727 sizeof(*rgb_regamma),
1728 GFP_KERNEL); 1728 GFP_KERNEL);
1729 if (!rgb_regamma) 1729 if (!rgb_regamma)
1730 goto rgb_regamma_alloc_fail; 1730 goto rgb_regamma_alloc_fail;
@@ -1802,8 +1802,8 @@ bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
1802 kvfree(rgb_degamma); 1802 kvfree(rgb_degamma);
1803 } else if (trans == TRANSFER_FUNCTION_HLG || 1803 } else if (trans == TRANSFER_FUNCTION_HLG ||
1804 trans == TRANSFER_FUNCTION_HLG12) { 1804 trans == TRANSFER_FUNCTION_HLG12) {
1805 rgb_degamma = kvzalloc(sizeof(*rgb_degamma) * 1805 rgb_degamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS,
1806 (MAX_HW_POINTS + _EXTRA_POINTS), 1806 sizeof(*rgb_degamma),
1807 GFP_KERNEL); 1807 GFP_KERNEL);
1808 if (!rgb_degamma) 1808 if (!rgb_degamma)
1809 goto rgb_degamma_alloc_fail; 1809 goto rgb_degamma_alloc_fail;
diff --git a/drivers/gpu/drm/amd/include/amd_pcie.h b/drivers/gpu/drm/amd/include/amd_pcie.h
index 5eb895fd98bf..9cb9ceb4d74d 100644
--- a/drivers/gpu/drm/amd/include/amd_pcie.h
+++ b/drivers/gpu/drm/amd/include/amd_pcie.h
@@ -27,6 +27,7 @@
27#define CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 0x00010000 27#define CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 0x00010000
28#define CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 0x00020000 28#define CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 0x00020000
29#define CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 0x00040000 29#define CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 0x00040000
30#define CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 0x00080000
30#define CAIL_PCIE_LINK_SPEED_SUPPORT_MASK 0xFFFF0000 31#define CAIL_PCIE_LINK_SPEED_SUPPORT_MASK 0xFFFF0000
31#define CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT 16 32#define CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT 16
32 33
@@ -34,6 +35,7 @@
34#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 0x00000001 35#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 0x00000001
35#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 0x00000002 36#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 0x00000002
36#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 0x00000004 37#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 0x00000004
38#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 0x00000008
37#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_MASK 0x0000FFFF 39#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_MASK 0x0000FFFF
38#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_SHIFT 0 40#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_SHIFT 0
39 41
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index b178176b72ac..265621d8945c 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -128,47 +128,57 @@ enum PP_FEATURE_MASK {
128 PP_OVERDRIVE_MASK = 0x4000, 128 PP_OVERDRIVE_MASK = 0x4000,
129 PP_GFXOFF_MASK = 0x8000, 129 PP_GFXOFF_MASK = 0x8000,
130 PP_ACG_MASK = 0x10000, 130 PP_ACG_MASK = 0x10000,
131 PP_STUTTER_MODE = 0x20000,
131}; 132};
132 133
134/**
135 * struct amd_ip_funcs - general hooks for managing amdgpu IP Blocks
136 */
133struct amd_ip_funcs { 137struct amd_ip_funcs {
134 /* Name of IP block */ 138 /** @name: Name of IP block */
135 char *name; 139 char *name;
136 /* sets up early driver state (pre sw_init), does not configure hw - Optional */ 140 /**
141 * @early_init:
142 *
143 * sets up early driver state (pre sw_init),
144 * does not configure hw - Optional
145 */
137 int (*early_init)(void *handle); 146 int (*early_init)(void *handle);
138 /* sets up late driver/hw state (post hw_init) - Optional */ 147 /** @late_init: sets up late driver/hw state (post hw_init) - Optional */
139 int (*late_init)(void *handle); 148 int (*late_init)(void *handle);
140 /* sets up driver state, does not configure hw */ 149 /** @sw_init: sets up driver state, does not configure hw */
141 int (*sw_init)(void *handle); 150 int (*sw_init)(void *handle);
142 /* tears down driver state, does not configure hw */ 151 /** @sw_fini: tears down driver state, does not configure hw */
143 int (*sw_fini)(void *handle); 152 int (*sw_fini)(void *handle);
144 /* sets up the hw state */ 153 /** @hw_init: sets up the hw state */
145 int (*hw_init)(void *handle); 154 int (*hw_init)(void *handle);
146 /* tears down the hw state */ 155 /** @hw_fini: tears down the hw state */
147 int (*hw_fini)(void *handle); 156 int (*hw_fini)(void *handle);
157 /** @late_fini: final cleanup */
148 void (*late_fini)(void *handle); 158 void (*late_fini)(void *handle);
149 /* handles IP specific hw/sw changes for suspend */ 159 /** @suspend: handles IP specific hw/sw changes for suspend */
150 int (*suspend)(void *handle); 160 int (*suspend)(void *handle);
151 /* handles IP specific hw/sw changes for resume */ 161 /** @resume: handles IP specific hw/sw changes for resume */
152 int (*resume)(void *handle); 162 int (*resume)(void *handle);
153 /* returns current IP block idle status */ 163 /** @is_idle: returns current IP block idle status */
154 bool (*is_idle)(void *handle); 164 bool (*is_idle)(void *handle);
155 /* poll for idle */ 165 /** @wait_for_idle: poll for idle */
156 int (*wait_for_idle)(void *handle); 166 int (*wait_for_idle)(void *handle);
157 /* check soft reset the IP block */ 167 /** @check_soft_reset: check soft reset the IP block */
158 bool (*check_soft_reset)(void *handle); 168 bool (*check_soft_reset)(void *handle);
159 /* pre soft reset the IP block */ 169 /** @pre_soft_reset: pre soft reset the IP block */
160 int (*pre_soft_reset)(void *handle); 170 int (*pre_soft_reset)(void *handle);
161 /* soft reset the IP block */ 171 /** @soft_reset: soft reset the IP block */
162 int (*soft_reset)(void *handle); 172 int (*soft_reset)(void *handle);
163 /* post soft reset the IP block */ 173 /** @post_soft_reset: post soft reset the IP block */
164 int (*post_soft_reset)(void *handle); 174 int (*post_soft_reset)(void *handle);
165 /* enable/disable cg for the IP block */ 175 /** @set_clockgating_state: enable/disable cg for the IP block */
166 int (*set_clockgating_state)(void *handle, 176 int (*set_clockgating_state)(void *handle,
167 enum amd_clockgating_state state); 177 enum amd_clockgating_state state);
168 /* enable/disable pg for the IP block */ 178 /** @set_powergating_state: enable/disable pg for the IP block */
169 int (*set_powergating_state)(void *handle, 179 int (*set_powergating_state)(void *handle,
170 enum amd_powergating_state state); 180 enum amd_powergating_state state);
171 /* get current clockgating status */ 181 /** @get_clockgating_state: get current clockgating status */
172 void (*get_clockgating_state)(void *handle, u32 *flags); 182 void (*get_clockgating_state)(void *handle, u32 *flags);
173}; 183};
174 184
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index 092d800b703a..33b4de4ad66e 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -1433,7 +1433,10 @@ struct atom_smc_dpm_info_v4_1
1433 uint8_t acggfxclkspreadpercent; 1433 uint8_t acggfxclkspreadpercent;
1434 uint16_t acggfxclkspreadfreq; 1434 uint16_t acggfxclkspreadfreq;
1435 1435
1436 uint32_t boardreserved[10]; 1436 uint8_t Vr2_I2C_address;
1437 uint8_t padding_vr2[3];
1438
1439 uint32_t boardreserved[9];
1437}; 1440};
1438 1441
1439/* 1442/*
diff --git a/drivers/gpu/drm/amd/include/dm_pp_interface.h b/drivers/gpu/drm/amd/include/dm_pp_interface.h
index 7852952d1fde..1d93a0c574c9 100644
--- a/drivers/gpu/drm/amd/include/dm_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/dm_pp_interface.h
@@ -23,6 +23,8 @@
23#ifndef _DM_PP_INTERFACE_ 23#ifndef _DM_PP_INTERFACE_
24#define _DM_PP_INTERFACE_ 24#define _DM_PP_INTERFACE_
25 25
26#include "dm_services_types.h"
27
26#define PP_MAX_CLOCK_LEVELS 16 28#define PP_MAX_CLOCK_LEVELS 16
27 29
28enum amd_pp_display_config_type{ 30enum amd_pp_display_config_type{
@@ -189,39 +191,4 @@ struct pp_display_clock_request {
189 uint32_t clock_freq_in_khz; 191 uint32_t clock_freq_in_khz;
190}; 192};
191 193
192#define PP_MAX_WM_SETS 4
193
194enum pp_wm_set_id {
195 DC_WM_SET_A = 0,
196 DC_WM_SET_B,
197 DC_WM_SET_C,
198 DC_WM_SET_D,
199 DC_WM_SET_INVALID = 0xffff,
200};
201
202struct pp_wm_set_with_dmif_clock_range_soc15 {
203 enum pp_wm_set_id wm_set_id;
204 uint32_t wm_min_dcefclk_in_khz;
205 uint32_t wm_max_dcefclk_in_khz;
206 uint32_t wm_min_memclk_in_khz;
207 uint32_t wm_max_memclk_in_khz;
208};
209
210struct pp_wm_set_with_mcif_clock_range_soc15 {
211 enum pp_wm_set_id wm_set_id;
212 uint32_t wm_min_socclk_in_khz;
213 uint32_t wm_max_socclk_in_khz;
214 uint32_t wm_min_memclk_in_khz;
215 uint32_t wm_max_memclk_in_khz;
216};
217
218struct pp_wm_sets_with_clock_ranges_soc15 {
219 uint32_t num_wm_sets_dmif;
220 uint32_t num_wm_sets_mcif;
221 struct pp_wm_set_with_dmif_clock_range_soc15
222 wm_sets_dmif[PP_MAX_WM_SETS];
223 struct pp_wm_set_with_mcif_clock_range_soc15
224 wm_sets_mcif[PP_MAX_WM_SETS];
225};
226
227#endif /* _DM_PP_INTERFACE_ */ 194#endif /* _DM_PP_INTERFACE_ */
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index 06f08f34a110..6a41b81c7325 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -192,7 +192,6 @@ struct amd_pp_simple_clock_info;
192struct amd_pp_display_configuration; 192struct amd_pp_display_configuration;
193struct amd_pp_clock_info; 193struct amd_pp_clock_info;
194struct pp_display_clock_request; 194struct pp_display_clock_request;
195struct pp_wm_sets_with_clock_ranges_soc15;
196struct pp_clock_levels_with_voltage; 195struct pp_clock_levels_with_voltage;
197struct pp_clock_levels_with_latency; 196struct pp_clock_levels_with_latency;
198struct amd_pp_clocks; 197struct amd_pp_clocks;
@@ -232,16 +231,19 @@ struct amd_pm_funcs {
232 void (*debugfs_print_current_performance_level)(void *handle, struct seq_file *m); 231 void (*debugfs_print_current_performance_level)(void *handle, struct seq_file *m);
233 int (*switch_power_profile)(void *handle, enum PP_SMC_POWER_PROFILE type, bool en); 232 int (*switch_power_profile)(void *handle, enum PP_SMC_POWER_PROFILE type, bool en);
234/* export to amdgpu */ 233/* export to amdgpu */
235 void (*powergate_uvd)(void *handle, bool gate);
236 void (*powergate_vce)(void *handle, bool gate);
237 struct amd_vce_state *(*get_vce_clock_state)(void *handle, u32 idx); 234 struct amd_vce_state *(*get_vce_clock_state)(void *handle, u32 idx);
238 int (*dispatch_tasks)(void *handle, enum amd_pp_task task_id, 235 int (*dispatch_tasks)(void *handle, enum amd_pp_task task_id,
239 enum amd_pm_state_type *user_state); 236 enum amd_pm_state_type *user_state);
240 int (*load_firmware)(void *handle); 237 int (*load_firmware)(void *handle);
241 int (*wait_for_fw_loading_complete)(void *handle); 238 int (*wait_for_fw_loading_complete)(void *handle);
239 int (*set_powergating_by_smu)(void *handle,
240 uint32_t block_type, bool gate);
242 int (*set_clockgating_by_smu)(void *handle, uint32_t msg_id); 241 int (*set_clockgating_by_smu)(void *handle, uint32_t msg_id);
243 int (*set_power_limit)(void *handle, uint32_t n); 242 int (*set_power_limit)(void *handle, uint32_t n);
244 int (*get_power_limit)(void *handle, uint32_t *limit, bool default_limit); 243 int (*get_power_limit)(void *handle, uint32_t *limit, bool default_limit);
244 int (*get_power_profile_mode)(void *handle, char *buf);
245 int (*set_power_profile_mode)(void *handle, long *input, uint32_t size);
246 int (*odn_edit_dpm_table)(void *handle, uint32_t type, long *input, uint32_t size);
245/* export to DC */ 247/* export to DC */
246 u32 (*get_sclk)(void *handle, bool low); 248 u32 (*get_sclk)(void *handle, bool low);
247 u32 (*get_mclk)(void *handle, bool low); 249 u32 (*get_mclk)(void *handle, bool low);
@@ -261,15 +263,12 @@ struct amd_pm_funcs {
261 enum amd_pp_clock_type type, 263 enum amd_pp_clock_type type,
262 struct pp_clock_levels_with_voltage *clocks); 264 struct pp_clock_levels_with_voltage *clocks);
263 int (*set_watermarks_for_clocks_ranges)(void *handle, 265 int (*set_watermarks_for_clocks_ranges)(void *handle,
264 struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges); 266 void *clock_ranges);
265 int (*display_clock_voltage_request)(void *handle, 267 int (*display_clock_voltage_request)(void *handle,
266 struct pp_display_clock_request *clock); 268 struct pp_display_clock_request *clock);
267 int (*get_display_mode_validation_clocks)(void *handle, 269 int (*get_display_mode_validation_clocks)(void *handle,
268 struct amd_pp_simple_clock_info *clocks); 270 struct amd_pp_simple_clock_info *clocks);
269 int (*get_power_profile_mode)(void *handle, char *buf); 271 int (*notify_smu_enable_pwe)(void *handle);
270 int (*set_power_profile_mode)(void *handle, long *input, uint32_t size);
271 int (*odn_edit_dpm_table)(void *handle, uint32_t type, long *input, uint32_t size);
272 int (*set_mmhub_powergating_by_smu)(void *handle);
273}; 272};
274 273
275#endif 274#endif
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index d567be49c31b..145e5c403bea 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -221,29 +221,7 @@ static int pp_sw_reset(void *handle)
221static int pp_set_powergating_state(void *handle, 221static int pp_set_powergating_state(void *handle,
222 enum amd_powergating_state state) 222 enum amd_powergating_state state)
223{ 223{
224 struct amdgpu_device *adev = handle; 224 return 0;
225 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
226 int ret;
227
228 if (!hwmgr || !hwmgr->pm_en)
229 return 0;
230
231 if (hwmgr->hwmgr_func->gfx_off_control) {
232 /* Enable/disable GFX off through SMU */
233 ret = hwmgr->hwmgr_func->gfx_off_control(hwmgr,
234 state == AMD_PG_STATE_GATE);
235 if (ret)
236 pr_err("gfx off control failed!\n");
237 }
238
239 if (hwmgr->hwmgr_func->enable_per_cu_power_gating == NULL) {
240 pr_debug("%s was not implemented.\n", __func__);
241 return 0;
242 }
243
244 /* Enable/disable GFX per cu powergating through SMU */
245 return hwmgr->hwmgr_func->enable_per_cu_power_gating(hwmgr,
246 state == AMD_PG_STATE_GATE);
247} 225}
248 226
249static int pp_suspend(void *handle) 227static int pp_suspend(void *handle)
@@ -1118,17 +1096,17 @@ static int pp_get_clock_by_type_with_voltage(void *handle,
1118} 1096}
1119 1097
1120static int pp_set_watermarks_for_clocks_ranges(void *handle, 1098static int pp_set_watermarks_for_clocks_ranges(void *handle,
1121 struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges) 1099 void *clock_ranges)
1122{ 1100{
1123 struct pp_hwmgr *hwmgr = handle; 1101 struct pp_hwmgr *hwmgr = handle;
1124 int ret = 0; 1102 int ret = 0;
1125 1103
1126 if (!hwmgr || !hwmgr->pm_en ||!wm_with_clock_ranges) 1104 if (!hwmgr || !hwmgr->pm_en || !clock_ranges)
1127 return -EINVAL; 1105 return -EINVAL;
1128 1106
1129 mutex_lock(&hwmgr->smu_lock); 1107 mutex_lock(&hwmgr->smu_lock);
1130 ret = phm_set_watermarks_for_clocks_ranges(hwmgr, 1108 ret = phm_set_watermarks_for_clocks_ranges(hwmgr,
1131 wm_with_clock_ranges); 1109 clock_ranges);
1132 mutex_unlock(&hwmgr->smu_lock); 1110 mutex_unlock(&hwmgr->smu_lock);
1133 1111
1134 return ret; 1112 return ret;
@@ -1168,19 +1146,78 @@ static int pp_get_display_mode_validation_clocks(void *handle,
1168 return ret; 1146 return ret;
1169} 1147}
1170 1148
1171static int pp_set_mmhub_powergating_by_smu(void *handle) 1149static int pp_dpm_powergate_mmhub(void *handle)
1172{ 1150{
1173 struct pp_hwmgr *hwmgr = handle; 1151 struct pp_hwmgr *hwmgr = handle;
1174 1152
1175 if (!hwmgr || !hwmgr->pm_en) 1153 if (!hwmgr || !hwmgr->pm_en)
1176 return -EINVAL; 1154 return -EINVAL;
1177 1155
1178 if (hwmgr->hwmgr_func->set_mmhub_powergating_by_smu == NULL) { 1156 if (hwmgr->hwmgr_func->powergate_mmhub == NULL) {
1179 pr_info("%s was not implemented.\n", __func__); 1157 pr_info("%s was not implemented.\n", __func__);
1180 return 0; 1158 return 0;
1181 } 1159 }
1182 1160
1183 return hwmgr->hwmgr_func->set_mmhub_powergating_by_smu(hwmgr); 1161 return hwmgr->hwmgr_func->powergate_mmhub(hwmgr);
1162}
1163
1164static int pp_dpm_powergate_gfx(void *handle, bool gate)
1165{
1166 struct pp_hwmgr *hwmgr = handle;
1167
1168 if (!hwmgr || !hwmgr->pm_en)
1169 return 0;
1170
1171 if (hwmgr->hwmgr_func->powergate_gfx == NULL) {
1172 pr_info("%s was not implemented.\n", __func__);
1173 return 0;
1174 }
1175
1176 return hwmgr->hwmgr_func->powergate_gfx(hwmgr, gate);
1177}
1178
1179static int pp_set_powergating_by_smu(void *handle,
1180 uint32_t block_type, bool gate)
1181{
1182 int ret = 0;
1183
1184 switch (block_type) {
1185 case AMD_IP_BLOCK_TYPE_UVD:
1186 case AMD_IP_BLOCK_TYPE_VCN:
1187 pp_dpm_powergate_uvd(handle, gate);
1188 break;
1189 case AMD_IP_BLOCK_TYPE_VCE:
1190 pp_dpm_powergate_vce(handle, gate);
1191 break;
1192 case AMD_IP_BLOCK_TYPE_GMC:
1193 pp_dpm_powergate_mmhub(handle);
1194 break;
1195 case AMD_IP_BLOCK_TYPE_GFX:
1196 ret = pp_dpm_powergate_gfx(handle, gate);
1197 break;
1198 default:
1199 break;
1200 }
1201 return ret;
1202}
1203
1204static int pp_notify_smu_enable_pwe(void *handle)
1205{
1206 struct pp_hwmgr *hwmgr = handle;
1207
1208 if (!hwmgr || !hwmgr->pm_en)
1209 return -EINVAL;;
1210
1211 if (hwmgr->hwmgr_func->smus_notify_pwe == NULL) {
1212 pr_info("%s was not implemented.\n", __func__);
1213 return -EINVAL;;
1214 }
1215
1216 mutex_lock(&hwmgr->smu_lock);
1217 hwmgr->hwmgr_func->smus_notify_pwe(hwmgr);
1218 mutex_unlock(&hwmgr->smu_lock);
1219
1220 return 0;
1184} 1221}
1185 1222
1186static const struct amd_pm_funcs pp_dpm_funcs = { 1223static const struct amd_pm_funcs pp_dpm_funcs = {
@@ -1189,8 +1226,6 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
1189 .force_performance_level = pp_dpm_force_performance_level, 1226 .force_performance_level = pp_dpm_force_performance_level,
1190 .get_performance_level = pp_dpm_get_performance_level, 1227 .get_performance_level = pp_dpm_get_performance_level,
1191 .get_current_power_state = pp_dpm_get_current_power_state, 1228 .get_current_power_state = pp_dpm_get_current_power_state,
1192 .powergate_vce = pp_dpm_powergate_vce,
1193 .powergate_uvd = pp_dpm_powergate_uvd,
1194 .dispatch_tasks = pp_dpm_dispatch_tasks, 1229 .dispatch_tasks = pp_dpm_dispatch_tasks,
1195 .set_fan_control_mode = pp_dpm_set_fan_control_mode, 1230 .set_fan_control_mode = pp_dpm_set_fan_control_mode,
1196 .get_fan_control_mode = pp_dpm_get_fan_control_mode, 1231 .get_fan_control_mode = pp_dpm_get_fan_control_mode,
@@ -1210,6 +1245,7 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
1210 .get_vce_clock_state = pp_dpm_get_vce_clock_state, 1245 .get_vce_clock_state = pp_dpm_get_vce_clock_state,
1211 .switch_power_profile = pp_dpm_switch_power_profile, 1246 .switch_power_profile = pp_dpm_switch_power_profile,
1212 .set_clockgating_by_smu = pp_set_clockgating_by_smu, 1247 .set_clockgating_by_smu = pp_set_clockgating_by_smu,
1248 .set_powergating_by_smu = pp_set_powergating_by_smu,
1213 .get_power_profile_mode = pp_get_power_profile_mode, 1249 .get_power_profile_mode = pp_get_power_profile_mode,
1214 .set_power_profile_mode = pp_set_power_profile_mode, 1250 .set_power_profile_mode = pp_set_power_profile_mode,
1215 .odn_edit_dpm_table = pp_odn_edit_dpm_table, 1251 .odn_edit_dpm_table = pp_odn_edit_dpm_table,
@@ -1227,5 +1263,5 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
1227 .set_watermarks_for_clocks_ranges = pp_set_watermarks_for_clocks_ranges, 1263 .set_watermarks_for_clocks_ranges = pp_set_watermarks_for_clocks_ranges,
1228 .display_clock_voltage_request = pp_display_clock_voltage_request, 1264 .display_clock_voltage_request = pp_display_clock_voltage_request,
1229 .get_display_mode_validation_clocks = pp_get_display_mode_validation_clocks, 1265 .get_display_mode_validation_clocks = pp_get_display_mode_validation_clocks,
1230 .set_mmhub_powergating_by_smu = pp_set_mmhub_powergating_by_smu, 1266 .notify_smu_enable_pwe = pp_notify_smu_enable_pwe,
1231}; 1267};
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
index a0bb921fac22..53207e76b0f3 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
@@ -435,7 +435,7 @@ int phm_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
435} 435}
436 436
437int phm_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr, 437int phm_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
438 struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges) 438 void *clock_ranges)
439{ 439{
440 PHM_FUNC_CHECK(hwmgr); 440 PHM_FUNC_CHECK(hwmgr);
441 441
@@ -443,7 +443,7 @@ int phm_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
443 return -EINVAL; 443 return -EINVAL;
444 444
445 return hwmgr->hwmgr_func->set_watermarks_for_clocks_ranges(hwmgr, 445 return hwmgr->hwmgr_func->set_watermarks_for_clocks_ranges(hwmgr,
446 wm_with_clock_ranges); 446 clock_ranges);
447} 447}
448 448
449int phm_display_clock_voltage_request(struct pp_hwmgr *hwmgr, 449int phm_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
index 7047e29755c3..01dc46dc9c8a 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
@@ -1544,14 +1544,14 @@ void atomctrl_get_voltage_range(struct pp_hwmgr *hwmgr, uint32_t *max_vddc,
1544 switch (hwmgr->chip_id) { 1544 switch (hwmgr->chip_id) {
1545 case CHIP_TONGA: 1545 case CHIP_TONGA:
1546 case CHIP_FIJI: 1546 case CHIP_FIJI:
1547 *max_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)->ulMaxVddc/4); 1547 *max_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)->ulMaxVddc) / 4;
1548 *min_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)->ulMinVddc/4); 1548 *min_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)->ulMinVddc) / 4;
1549 return; 1549 return;
1550 case CHIP_POLARIS11: 1550 case CHIP_POLARIS11:
1551 case CHIP_POLARIS10: 1551 case CHIP_POLARIS10:
1552 case CHIP_POLARIS12: 1552 case CHIP_POLARIS12:
1553 *max_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)->ulMaxVddc/100); 1553 *max_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)->ulMaxVddc) / 100;
1554 *min_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)->ulMinVddc/100); 1554 *min_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)->ulMinVddc) / 100;
1555 return; 1555 return;
1556 default: 1556 default:
1557 break; 1557 break;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
index 5325661fedff..d27c1c9df286 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
@@ -512,14 +512,82 @@ int pp_atomfwctrl_get_clk_information_by_clkid(struct pp_hwmgr *hwmgr, BIOS_CLKI
512 return 0; 512 return 0;
513} 513}
514 514
515static void pp_atomfwctrl_copy_vbios_bootup_values_3_2(struct pp_hwmgr *hwmgr,
516 struct pp_atomfwctrl_bios_boot_up_values *boot_values,
517 struct atom_firmware_info_v3_2 *fw_info)
518{
519 uint32_t frequency = 0;
520
521 boot_values->ulRevision = fw_info->firmware_revision;
522 boot_values->ulGfxClk = fw_info->bootup_sclk_in10khz;
523 boot_values->ulUClk = fw_info->bootup_mclk_in10khz;
524 boot_values->usVddc = fw_info->bootup_vddc_mv;
525 boot_values->usVddci = fw_info->bootup_vddci_mv;
526 boot_values->usMvddc = fw_info->bootup_mvddc_mv;
527 boot_values->usVddGfx = fw_info->bootup_vddgfx_mv;
528 boot_values->ucCoolingID = fw_info->coolingsolution_id;
529 boot_values->ulSocClk = 0;
530 boot_values->ulDCEFClk = 0;
531
532 if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_SOCCLK_ID, &frequency))
533 boot_values->ulSocClk = frequency;
534
535 if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_DCEFCLK_ID, &frequency))
536 boot_values->ulDCEFClk = frequency;
537
538 if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_ECLK_ID, &frequency))
539 boot_values->ulEClk = frequency;
540
541 if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_VCLK_ID, &frequency))
542 boot_values->ulVClk = frequency;
543
544 if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_DCLK_ID, &frequency))
545 boot_values->ulDClk = frequency;
546}
547
548static void pp_atomfwctrl_copy_vbios_bootup_values_3_1(struct pp_hwmgr *hwmgr,
549 struct pp_atomfwctrl_bios_boot_up_values *boot_values,
550 struct atom_firmware_info_v3_1 *fw_info)
551{
552 uint32_t frequency = 0;
553
554 boot_values->ulRevision = fw_info->firmware_revision;
555 boot_values->ulGfxClk = fw_info->bootup_sclk_in10khz;
556 boot_values->ulUClk = fw_info->bootup_mclk_in10khz;
557 boot_values->usVddc = fw_info->bootup_vddc_mv;
558 boot_values->usVddci = fw_info->bootup_vddci_mv;
559 boot_values->usMvddc = fw_info->bootup_mvddc_mv;
560 boot_values->usVddGfx = fw_info->bootup_vddgfx_mv;
561 boot_values->ucCoolingID = fw_info->coolingsolution_id;
562 boot_values->ulSocClk = 0;
563 boot_values->ulDCEFClk = 0;
564
565 if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_SOCCLK_ID, &frequency))
566 boot_values->ulSocClk = frequency;
567
568 if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCEFCLK_ID, &frequency))
569 boot_values->ulDCEFClk = frequency;
570
571 if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_ECLK_ID, &frequency))
572 boot_values->ulEClk = frequency;
573
574 if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_VCLK_ID, &frequency))
575 boot_values->ulVClk = frequency;
576
577 if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCLK_ID, &frequency))
578 boot_values->ulDClk = frequency;
579}
580
515int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr, 581int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr,
516 struct pp_atomfwctrl_bios_boot_up_values *boot_values) 582 struct pp_atomfwctrl_bios_boot_up_values *boot_values)
517{ 583{
518 struct atom_firmware_info_v3_1 *info = NULL; 584 struct atom_firmware_info_v3_2 *fwinfo_3_2;
585 struct atom_firmware_info_v3_1 *fwinfo_3_1;
586 struct atom_common_table_header *info = NULL;
519 uint16_t ix; 587 uint16_t ix;
520 588
521 ix = GetIndexIntoMasterDataTable(firmwareinfo); 589 ix = GetIndexIntoMasterDataTable(firmwareinfo);
522 info = (struct atom_firmware_info_v3_1 *) 590 info = (struct atom_common_table_header *)
523 smu_atom_get_data_table(hwmgr->adev, 591 smu_atom_get_data_table(hwmgr->adev,
524 ix, NULL, NULL, NULL); 592 ix, NULL, NULL, NULL);
525 593
@@ -528,16 +596,18 @@ int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr,
528 return -EINVAL; 596 return -EINVAL;
529 } 597 }
530 598
531 boot_values->ulRevision = info->firmware_revision; 599 if ((info->format_revision == 3) && (info->content_revision == 2)) {
532 boot_values->ulGfxClk = info->bootup_sclk_in10khz; 600 fwinfo_3_2 = (struct atom_firmware_info_v3_2 *)info;
533 boot_values->ulUClk = info->bootup_mclk_in10khz; 601 pp_atomfwctrl_copy_vbios_bootup_values_3_2(hwmgr,
534 boot_values->usVddc = info->bootup_vddc_mv; 602 boot_values, fwinfo_3_2);
535 boot_values->usVddci = info->bootup_vddci_mv; 603 } else if ((info->format_revision == 3) && (info->content_revision == 1)) {
536 boot_values->usMvddc = info->bootup_mvddc_mv; 604 fwinfo_3_1 = (struct atom_firmware_info_v3_1 *)info;
537 boot_values->usVddGfx = info->bootup_vddgfx_mv; 605 pp_atomfwctrl_copy_vbios_bootup_values_3_1(hwmgr,
538 boot_values->ucCoolingID = info->coolingsolution_id; 606 boot_values, fwinfo_3_1);
539 boot_values->ulSocClk = 0; 607 } else {
540 boot_values->ulDCEFClk = 0; 608 pr_info("Fw info table revision does not match!");
609 return -EINVAL;
610 }
541 611
542 return 0; 612 return 0;
543} 613}
@@ -629,5 +699,7 @@ int pp_atomfwctrl_get_smc_dpm_information(struct pp_hwmgr *hwmgr,
629 param->acggfxclkspreadpercent = info->acggfxclkspreadpercent; 699 param->acggfxclkspreadpercent = info->acggfxclkspreadpercent;
630 param->acggfxclkspreadfreq = info->acggfxclkspreadfreq; 700 param->acggfxclkspreadfreq = info->acggfxclkspreadfreq;
631 701
702 param->Vr2_I2C_address = info->Vr2_I2C_address;
703
632 return 0; 704 return 0;
633} 705}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
index fe10aa4db5e6..22e21668c93a 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
@@ -136,6 +136,9 @@ struct pp_atomfwctrl_bios_boot_up_values {
136 uint32_t ulUClk; 136 uint32_t ulUClk;
137 uint32_t ulSocClk; 137 uint32_t ulSocClk;
138 uint32_t ulDCEFClk; 138 uint32_t ulDCEFClk;
139 uint32_t ulEClk;
140 uint32_t ulVClk;
141 uint32_t ulDClk;
139 uint16_t usVddc; 142 uint16_t usVddc;
140 uint16_t usVddci; 143 uint16_t usVddci;
141 uint16_t usMvddc; 144 uint16_t usMvddc;
@@ -207,6 +210,8 @@ struct pp_atomfwctrl_smc_dpm_parameters
207 uint8_t acggfxclkspreadenabled; 210 uint8_t acggfxclkspreadenabled;
208 uint8_t acggfxclkspreadpercent; 211 uint8_t acggfxclkspreadpercent;
209 uint16_t acggfxclkspreadfreq; 212 uint16_t acggfxclkspreadfreq;
213
214 uint8_t Vr2_I2C_address;
210}; 215};
211 216
212int pp_atomfwctrl_get_gpu_pll_dividers_vega10(struct pp_hwmgr *hwmgr, 217int pp_atomfwctrl_get_gpu_pll_dividers_vega10(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
index 35bd9870ab10..4e1fd5393845 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
@@ -183,10 +183,10 @@ static int get_vddc_lookup_table(
183 ATOM_Tonga_Voltage_Lookup_Record, 183 ATOM_Tonga_Voltage_Lookup_Record,
184 entries, vddc_lookup_pp_tables, i); 184 entries, vddc_lookup_pp_tables, i);
185 record->us_calculated = 0; 185 record->us_calculated = 0;
186 record->us_vdd = atom_record->usVdd; 186 record->us_vdd = le16_to_cpu(atom_record->usVdd);
187 record->us_cac_low = atom_record->usCACLow; 187 record->us_cac_low = le16_to_cpu(atom_record->usCACLow);
188 record->us_cac_mid = atom_record->usCACMid; 188 record->us_cac_mid = le16_to_cpu(atom_record->usCACMid);
189 record->us_cac_high = atom_record->usCACHigh; 189 record->us_cac_high = le16_to_cpu(atom_record->usCACHigh);
190 } 190 }
191 191
192 *lookup_table = table; 192 *lookup_table = table;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
index d4bc83e81389..a63e00653324 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
@@ -993,7 +993,7 @@ static int smu10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
993 993
994 clocks->num_levels = 0; 994 clocks->num_levels = 0;
995 for (i = 0; i < pclk_vol_table->count; i++) { 995 for (i = 0; i < pclk_vol_table->count; i++) {
996 clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk; 996 clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk * 10;
997 clocks->data[i].latency_in_us = latency_required ? 997 clocks->data[i].latency_in_us = latency_required ?
998 smu10_get_mem_latency(hwmgr, 998 smu10_get_mem_latency(hwmgr,
999 pclk_vol_table->entries[i].clk) : 999 pclk_vol_table->entries[i].clk) :
@@ -1044,7 +1044,7 @@ static int smu10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
1044 1044
1045 clocks->num_levels = 0; 1045 clocks->num_levels = 0;
1046 for (i = 0; i < pclk_vol_table->count; i++) { 1046 for (i = 0; i < pclk_vol_table->count; i++) {
1047 clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk; 1047 clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk * 10;
1048 clocks->data[i].voltage_in_mv = pclk_vol_table->entries[i].vol; 1048 clocks->data[i].voltage_in_mv = pclk_vol_table->entries[i].vol;
1049 clocks->num_levels++; 1049 clocks->num_levels++;
1050 } 1050 }
@@ -1108,9 +1108,10 @@ static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
1108} 1108}
1109 1109
1110static int smu10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr, 1110static int smu10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
1111 struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges) 1111 void *clock_ranges)
1112{ 1112{
1113 struct smu10_hwmgr *data = hwmgr->backend; 1113 struct smu10_hwmgr *data = hwmgr->backend;
1114 struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_ranges;
1114 Watermarks_t *table = &(data->water_marks_table); 1115 Watermarks_t *table = &(data->water_marks_table);
1115 int result = 0; 1116 int result = 0;
1116 1117
@@ -1126,7 +1127,7 @@ static int smu10_smus_notify_pwe(struct pp_hwmgr *hwmgr)
1126 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SetRccPfcPmeRestoreRegister); 1127 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SetRccPfcPmeRestoreRegister);
1127} 1128}
1128 1129
1129static int smu10_set_mmhub_powergating_by_smu(struct pp_hwmgr *hwmgr) 1130static int smu10_powergate_mmhub(struct pp_hwmgr *hwmgr)
1130{ 1131{
1131 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerGateMmHub); 1132 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerGateMmHub);
1132} 1133}
@@ -1182,10 +1183,11 @@ static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
1182 .asic_setup = smu10_setup_asic_task, 1183 .asic_setup = smu10_setup_asic_task,
1183 .power_state_set = smu10_set_power_state_tasks, 1184 .power_state_set = smu10_set_power_state_tasks,
1184 .dynamic_state_management_disable = smu10_disable_dpm_tasks, 1185 .dynamic_state_management_disable = smu10_disable_dpm_tasks,
1185 .set_mmhub_powergating_by_smu = smu10_set_mmhub_powergating_by_smu, 1186 .powergate_mmhub = smu10_powergate_mmhub,
1186 .smus_notify_pwe = smu10_smus_notify_pwe, 1187 .smus_notify_pwe = smu10_smus_notify_pwe,
1187 .gfx_off_control = smu10_gfx_off_control, 1188 .gfx_off_control = smu10_gfx_off_control,
1188 .display_clock_voltage_request = smu10_display_clock_voltage_request, 1189 .display_clock_voltage_request = smu10_display_clock_voltage_request,
1190 .powergate_gfx = smu10_gfx_off_control,
1189}; 1191};
1190 1192
1191int smu10_init_function_pointers(struct pp_hwmgr *hwmgr) 1193int smu10_init_function_pointers(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
index 41495621d94a..683b29a99366 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
@@ -416,7 +416,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
416 * Powerplay will only control the static per CU Power Gating. 416 * Powerplay will only control the static per CU Power Gating.
417 * Dynamic per CU Power Gating will be done in gfx. 417 * Dynamic per CU Power Gating will be done in gfx.
418 */ 418 */
419int smu7_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable) 419int smu7_powergate_gfx(struct pp_hwmgr *hwmgr, bool enable)
420{ 420{
421 struct amdgpu_device *adev = hwmgr->adev; 421 struct amdgpu_device *adev = hwmgr->adev;
422 422
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h
index be7f66d2b234..fc8f8a6acc72 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h
@@ -33,6 +33,6 @@ int smu7_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate);
33int smu7_disable_clock_power_gating(struct pp_hwmgr *hwmgr); 33int smu7_disable_clock_power_gating(struct pp_hwmgr *hwmgr);
34int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, 34int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
35 const uint32_t *msg_id); 35 const uint32_t *msg_id);
36int smu7_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable); 36int smu7_powergate_gfx(struct pp_hwmgr *hwmgr, bool enable);
37 37
38#endif 38#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index b89d6fb8559b..077b79938528 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -1578,7 +1578,7 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
1578 data->current_profile_setting.sclk_up_hyst = 0; 1578 data->current_profile_setting.sclk_up_hyst = 0;
1579 data->current_profile_setting.sclk_down_hyst = 100; 1579 data->current_profile_setting.sclk_down_hyst = 100;
1580 data->current_profile_setting.sclk_activity = SMU7_SCLK_TARGETACTIVITY_DFLT; 1580 data->current_profile_setting.sclk_activity = SMU7_SCLK_TARGETACTIVITY_DFLT;
1581 data->current_profile_setting.bupdate_sclk = 1; 1581 data->current_profile_setting.bupdate_mclk = 1;
1582 data->current_profile_setting.mclk_up_hyst = 0; 1582 data->current_profile_setting.mclk_up_hyst = 0;
1583 data->current_profile_setting.mclk_down_hyst = 100; 1583 data->current_profile_setting.mclk_down_hyst = 100;
1584 data->current_profile_setting.mclk_activity = SMU7_MCLK_TARGETACTIVITY_DFLT; 1584 data->current_profile_setting.mclk_activity = SMU7_MCLK_TARGETACTIVITY_DFLT;
@@ -3183,7 +3183,7 @@ static int smu7_get_pp_table_entry_callback_func_v1(struct pp_hwmgr *hwmgr,
3183 performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, 3183 performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
3184 state_entry->ucPCIEGenLow); 3184 state_entry->ucPCIEGenLow);
3185 performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, 3185 performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
3186 state_entry->ucPCIELaneHigh); 3186 state_entry->ucPCIELaneLow);
3187 3187
3188 performance_level = &(smu7_power_state->performance_levels 3188 performance_level = &(smu7_power_state->performance_levels
3189 [smu7_power_state->performance_level_count++]); 3189 [smu7_power_state->performance_level_count++]);
@@ -5044,7 +5044,7 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
5044 .get_fan_control_mode = smu7_get_fan_control_mode, 5044 .get_fan_control_mode = smu7_get_fan_control_mode,
5045 .force_clock_level = smu7_force_clock_level, 5045 .force_clock_level = smu7_force_clock_level,
5046 .print_clock_levels = smu7_print_clock_levels, 5046 .print_clock_levels = smu7_print_clock_levels,
5047 .enable_per_cu_power_gating = smu7_enable_per_cu_power_gating, 5047 .powergate_gfx = smu7_powergate_gfx,
5048 .get_sclk_od = smu7_get_sclk_od, 5048 .get_sclk_od = smu7_get_sclk_od,
5049 .set_sclk_od = smu7_set_sclk_od, 5049 .set_sclk_od = smu7_set_sclk_od,
5050 .get_mclk_od = smu7_get_mclk_od, 5050 .get_mclk_od = smu7_get_mclk_od,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
index 93a3d022ba47..3effb5583d1f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
@@ -652,7 +652,7 @@ int smu_get_voltage_dependency_table_ppt_v1(
652} 652}
653 653
654int smu_set_watermarks_for_clocks_ranges(void *wt_table, 654int smu_set_watermarks_for_clocks_ranges(void *wt_table,
655 struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges) 655 struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
656{ 656{
657 uint32_t i; 657 uint32_t i;
658 struct watermarks *table = wt_table; 658 struct watermarks *table = wt_table;
@@ -660,49 +660,49 @@ int smu_set_watermarks_for_clocks_ranges(void *wt_table,
660 if (!table || !wm_with_clock_ranges) 660 if (!table || !wm_with_clock_ranges)
661 return -EINVAL; 661 return -EINVAL;
662 662
663 if (wm_with_clock_ranges->num_wm_sets_dmif > 4 || wm_with_clock_ranges->num_wm_sets_mcif > 4) 663 if (wm_with_clock_ranges->num_wm_dmif_sets > 4 || wm_with_clock_ranges->num_wm_mcif_sets > 4)
664 return -EINVAL; 664 return -EINVAL;
665 665
666 for (i = 0; i < wm_with_clock_ranges->num_wm_sets_dmif; i++) { 666 for (i = 0; i < wm_with_clock_ranges->num_wm_dmif_sets; i++) {
667 table->WatermarkRow[1][i].MinClock = 667 table->WatermarkRow[1][i].MinClock =
668 cpu_to_le16((uint16_t) 668 cpu_to_le16((uint16_t)
669 (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_dcefclk_in_khz) / 669 (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz) /
670 100); 670 1000);
671 table->WatermarkRow[1][i].MaxClock = 671 table->WatermarkRow[1][i].MaxClock =
672 cpu_to_le16((uint16_t) 672 cpu_to_le16((uint16_t)
673 (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_dcefclk_in_khz) / 673 (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz) /
674 100); 674 100);
675 table->WatermarkRow[1][i].MinUclk = 675 table->WatermarkRow[1][i].MinUclk =
676 cpu_to_le16((uint16_t) 676 cpu_to_le16((uint16_t)
677 (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_memclk_in_khz) / 677 (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz) /
678 100); 678 1000);
679 table->WatermarkRow[1][i].MaxUclk = 679 table->WatermarkRow[1][i].MaxUclk =
680 cpu_to_le16((uint16_t) 680 cpu_to_le16((uint16_t)
681 (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_memclk_in_khz) / 681 (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz) /
682 100); 682 1000);
683 table->WatermarkRow[1][i].WmSetting = (uint8_t) 683 table->WatermarkRow[1][i].WmSetting = (uint8_t)
684 wm_with_clock_ranges->wm_sets_dmif[i].wm_set_id; 684 wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
685 } 685 }
686 686
687 for (i = 0; i < wm_with_clock_ranges->num_wm_sets_mcif; i++) { 687 for (i = 0; i < wm_with_clock_ranges->num_wm_mcif_sets; i++) {
688 table->WatermarkRow[0][i].MinClock = 688 table->WatermarkRow[0][i].MinClock =
689 cpu_to_le16((uint16_t) 689 cpu_to_le16((uint16_t)
690 (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_socclk_in_khz) / 690 (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz) /
691 100); 691 1000);
692 table->WatermarkRow[0][i].MaxClock = 692 table->WatermarkRow[0][i].MaxClock =
693 cpu_to_le16((uint16_t) 693 cpu_to_le16((uint16_t)
694 (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_socclk_in_khz) / 694 (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz) /
695 100); 695 1000);
696 table->WatermarkRow[0][i].MinUclk = 696 table->WatermarkRow[0][i].MinUclk =
697 cpu_to_le16((uint16_t) 697 cpu_to_le16((uint16_t)
698 (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_memclk_in_khz) / 698 (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz) /
699 100); 699 1000);
700 table->WatermarkRow[0][i].MaxUclk = 700 table->WatermarkRow[0][i].MaxUclk =
701 cpu_to_le16((uint16_t) 701 cpu_to_le16((uint16_t)
702 (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_memclk_in_khz) / 702 (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz) /
703 100); 703 1000);
704 table->WatermarkRow[0][i].WmSetting = (uint8_t) 704 table->WatermarkRow[0][i].WmSetting = (uint8_t)
705 wm_with_clock_ranges->wm_sets_mcif[i].wm_set_id; 705 wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
706 } 706 }
707 return 0; 707 return 0;
708} 708}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h
index 916cc01e7652..5454289d5226 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h
@@ -107,7 +107,7 @@ int smu_get_voltage_dependency_table_ppt_v1(
107 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table); 107 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table);
108 108
109int smu_set_watermarks_for_clocks_ranges(void *wt_table, 109int smu_set_watermarks_for_clocks_ranges(void *wt_table,
110 struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges); 110 struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges);
111 111
112#define PHM_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT 112#define PHM_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
113#define PHM_FIELD_MASK(reg, field) reg##__##field##_MASK 113#define PHM_FIELD_MASK(reg, field) reg##__##field##_MASK
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index 3b8d36df52e9..5e771bc119d6 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -55,12 +55,6 @@
55 55
56static const uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2}; 56static const uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2};
57 57
58#define MEM_FREQ_LOW_LATENCY 25000
59#define MEM_FREQ_HIGH_LATENCY 80000
60#define MEM_LATENCY_HIGH 245
61#define MEM_LATENCY_LOW 35
62#define MEM_LATENCY_ERR 0xFFFF
63
64#define mmDF_CS_AON0_DramBaseAddress0 0x0044 58#define mmDF_CS_AON0_DramBaseAddress0 0x0044
65#define mmDF_CS_AON0_DramBaseAddress0_BASE_IDX 0 59#define mmDF_CS_AON0_DramBaseAddress0_BASE_IDX 0
66 60
@@ -3223,7 +3217,7 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
3223 /* Find the lowest MCLK frequency that is within 3217 /* Find the lowest MCLK frequency that is within
3224 * the tolerable latency defined in DAL 3218 * the tolerable latency defined in DAL
3225 */ 3219 */
3226 latency = 0; 3220 latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency;
3227 for (i = 0; i < data->mclk_latency_table.count; i++) { 3221 for (i = 0; i < data->mclk_latency_table.count; i++) {
3228 if ((data->mclk_latency_table.entries[i].latency <= latency) && 3222 if ((data->mclk_latency_table.entries[i].latency <= latency) &&
3229 (data->mclk_latency_table.entries[i].frequency >= 3223 (data->mclk_latency_table.entries[i].frequency >=
@@ -4064,28 +4058,17 @@ static void vega10_get_sclks(struct pp_hwmgr *hwmgr,
4064 table_info->vdd_dep_on_sclk; 4058 table_info->vdd_dep_on_sclk;
4065 uint32_t i; 4059 uint32_t i;
4066 4060
4061 clocks->num_levels = 0;
4067 for (i = 0; i < dep_table->count; i++) { 4062 for (i = 0; i < dep_table->count; i++) {
4068 if (dep_table->entries[i].clk) { 4063 if (dep_table->entries[i].clk) {
4069 clocks->data[clocks->num_levels].clocks_in_khz = 4064 clocks->data[clocks->num_levels].clocks_in_khz =
4070 dep_table->entries[i].clk; 4065 dep_table->entries[i].clk * 10;
4071 clocks->num_levels++; 4066 clocks->num_levels++;
4072 } 4067 }
4073 } 4068 }
4074 4069
4075} 4070}
4076 4071
4077static uint32_t vega10_get_mem_latency(struct pp_hwmgr *hwmgr,
4078 uint32_t clock)
4079{
4080 if (clock >= MEM_FREQ_LOW_LATENCY &&
4081 clock < MEM_FREQ_HIGH_LATENCY)
4082 return MEM_LATENCY_HIGH;
4083 else if (clock >= MEM_FREQ_HIGH_LATENCY)
4084 return MEM_LATENCY_LOW;
4085 else
4086 return MEM_LATENCY_ERR;
4087}
4088
4089static void vega10_get_memclocks(struct pp_hwmgr *hwmgr, 4072static void vega10_get_memclocks(struct pp_hwmgr *hwmgr,
4090 struct pp_clock_levels_with_latency *clocks) 4073 struct pp_clock_levels_with_latency *clocks)
4091{ 4074{
@@ -4094,26 +4077,22 @@ static void vega10_get_memclocks(struct pp_hwmgr *hwmgr,
4094 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table = 4077 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
4095 table_info->vdd_dep_on_mclk; 4078 table_info->vdd_dep_on_mclk;
4096 struct vega10_hwmgr *data = hwmgr->backend; 4079 struct vega10_hwmgr *data = hwmgr->backend;
4080 uint32_t j = 0;
4097 uint32_t i; 4081 uint32_t i;
4098 4082
4099 clocks->num_levels = 0;
4100 data->mclk_latency_table.count = 0;
4101
4102 for (i = 0; i < dep_table->count; i++) { 4083 for (i = 0; i < dep_table->count; i++) {
4103 if (dep_table->entries[i].clk) { 4084 if (dep_table->entries[i].clk) {
4104 clocks->data[clocks->num_levels].clocks_in_khz = 4085
4105 data->mclk_latency_table.entries 4086 clocks->data[j].clocks_in_khz =
4106 [data->mclk_latency_table.count].frequency = 4087 dep_table->entries[i].clk * 10;
4107 dep_table->entries[i].clk; 4088 data->mclk_latency_table.entries[j].frequency =
4108 clocks->data[clocks->num_levels].latency_in_us = 4089 dep_table->entries[i].clk;
4109 data->mclk_latency_table.entries 4090 clocks->data[j].latency_in_us =
4110 [data->mclk_latency_table.count].latency = 4091 data->mclk_latency_table.entries[j].latency = 25;
4111 vega10_get_mem_latency(hwmgr, 4092 j++;
4112 dep_table->entries[i].clk);
4113 clocks->num_levels++;
4114 data->mclk_latency_table.count++;
4115 } 4093 }
4116 } 4094 }
4095 clocks->num_levels = data->mclk_latency_table.count = j;
4117} 4096}
4118 4097
4119static void vega10_get_dcefclocks(struct pp_hwmgr *hwmgr, 4098static void vega10_get_dcefclocks(struct pp_hwmgr *hwmgr,
@@ -4126,7 +4105,7 @@ static void vega10_get_dcefclocks(struct pp_hwmgr *hwmgr,
4126 uint32_t i; 4105 uint32_t i;
4127 4106
4128 for (i = 0; i < dep_table->count; i++) { 4107 for (i = 0; i < dep_table->count; i++) {
4129 clocks->data[i].clocks_in_khz = dep_table->entries[i].clk; 4108 clocks->data[i].clocks_in_khz = dep_table->entries[i].clk * 10;
4130 clocks->data[i].latency_in_us = 0; 4109 clocks->data[i].latency_in_us = 0;
4131 clocks->num_levels++; 4110 clocks->num_levels++;
4132 } 4111 }
@@ -4142,7 +4121,7 @@ static void vega10_get_socclocks(struct pp_hwmgr *hwmgr,
4142 uint32_t i; 4121 uint32_t i;
4143 4122
4144 for (i = 0; i < dep_table->count; i++) { 4123 for (i = 0; i < dep_table->count; i++) {
4145 clocks->data[i].clocks_in_khz = dep_table->entries[i].clk; 4124 clocks->data[i].clocks_in_khz = dep_table->entries[i].clk * 10;
4146 clocks->data[i].latency_in_us = 0; 4125 clocks->data[i].latency_in_us = 0;
4147 clocks->num_levels++; 4126 clocks->num_levels++;
4148 } 4127 }
@@ -4202,7 +4181,7 @@ static int vega10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
4202 } 4181 }
4203 4182
4204 for (i = 0; i < dep_table->count; i++) { 4183 for (i = 0; i < dep_table->count; i++) {
4205 clocks->data[i].clocks_in_khz = dep_table->entries[i].clk; 4184 clocks->data[i].clocks_in_khz = dep_table->entries[i].clk * 10;
4206 clocks->data[i].voltage_in_mv = (uint32_t)(table_info->vddc_lookup_table-> 4185 clocks->data[i].voltage_in_mv = (uint32_t)(table_info->vddc_lookup_table->
4207 entries[dep_table->entries[i].vddInd].us_vdd); 4186 entries[dep_table->entries[i].vddInd].us_vdd);
4208 clocks->num_levels++; 4187 clocks->num_levels++;
@@ -4215,9 +4194,10 @@ static int vega10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
4215} 4194}
4216 4195
4217static int vega10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr, 4196static int vega10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
4218 struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges) 4197 void *clock_range)
4219{ 4198{
4220 struct vega10_hwmgr *data = hwmgr->backend; 4199 struct vega10_hwmgr *data = hwmgr->backend;
4200 struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_range;
4221 Watermarks_t *table = &(data->smc_state_table.water_marks_table); 4201 Watermarks_t *table = &(data->smc_state_table.water_marks_table);
4222 int result = 0; 4202 int result = 0;
4223 4203
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
index 782e2098824d..57492878874f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
@@ -81,6 +81,7 @@ static void vega12_set_default_registry_data(struct pp_hwmgr *hwmgr)
81 81
82 data->registry_data.disallowed_features = 0x0; 82 data->registry_data.disallowed_features = 0x0;
83 data->registry_data.od_state_in_dc_support = 0; 83 data->registry_data.od_state_in_dc_support = 0;
84 data->registry_data.thermal_support = 1;
84 data->registry_data.skip_baco_hardware = 0; 85 data->registry_data.skip_baco_hardware = 0;
85 86
86 data->registry_data.log_avfs_param = 0; 87 data->registry_data.log_avfs_param = 0;
@@ -453,37 +454,30 @@ static int vega12_setup_asic_task(struct pp_hwmgr *hwmgr)
453 */ 454 */
454static void vega12_init_dpm_state(struct vega12_dpm_state *dpm_state) 455static void vega12_init_dpm_state(struct vega12_dpm_state *dpm_state)
455{ 456{
456 dpm_state->soft_min_level = 0xff; 457 dpm_state->soft_min_level = 0x0;
457 dpm_state->soft_max_level = 0xff; 458 dpm_state->soft_max_level = 0xffff;
458 dpm_state->hard_min_level = 0xff; 459 dpm_state->hard_min_level = 0x0;
459 dpm_state->hard_max_level = 0xff; 460 dpm_state->hard_max_level = 0xffff;
460} 461}
461 462
462static int vega12_get_number_dpm_level(struct pp_hwmgr *hwmgr, 463static int vega12_get_number_of_dpm_level(struct pp_hwmgr *hwmgr,
463 PPCLK_e clkID, uint32_t *num_dpm_level) 464 PPCLK_e clk_id, uint32_t *num_of_levels)
464{ 465{
465 int result; 466 int ret = 0;
466 /*
467 * SMU expects the Clock ID to be in the top 16 bits.
468 * Lower 16 bits specify the level however 0xFF is a
469 * special argument the returns the total number of levels
470 */
471 PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
472 PPSMC_MSG_GetDpmFreqByIndex, (clkID << 16 | 0xFF)) == 0,
473 "[GetNumberDpmLevel] Failed to get DPM levels from SMU for CLKID!",
474 return -EINVAL);
475
476 result = vega12_read_arg_from_smc(hwmgr, num_dpm_level);
477 467
478 PP_ASSERT_WITH_CODE(*num_dpm_level < MAX_REGULAR_DPM_NUMBER, 468 ret = smum_send_msg_to_smc_with_parameter(hwmgr,
479 "[GetNumberDPMLevel] Number of DPM levels is greater than limit", 469 PPSMC_MSG_GetDpmFreqByIndex,
480 return -EINVAL); 470 (clk_id << 16 | 0xFF));
471 PP_ASSERT_WITH_CODE(!ret,
472 "[GetNumOfDpmLevel] failed to get dpm levels!",
473 return ret);
481 474
482 PP_ASSERT_WITH_CODE(*num_dpm_level != 0, 475 vega12_read_arg_from_smc(hwmgr, num_of_levels);
483 "[GetNumberDPMLevel] Number of CLK Levels is zero!", 476 PP_ASSERT_WITH_CODE(*num_of_levels > 0,
484 return -EINVAL); 477 "[GetNumOfDpmLevel] number of clk levels is invalid!",
478 return -EINVAL);
485 479
486 return result; 480 return ret;
487} 481}
488 482
489static int vega12_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr, 483static int vega12_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr,
@@ -509,6 +503,31 @@ static int vega12_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr,
509 return result; 503 return result;
510} 504}
511 505
506static int vega12_setup_single_dpm_table(struct pp_hwmgr *hwmgr,
507 struct vega12_single_dpm_table *dpm_table, PPCLK_e clk_id)
508{
509 int ret = 0;
510 uint32_t i, num_of_levels, clk;
511
512 ret = vega12_get_number_of_dpm_level(hwmgr, clk_id, &num_of_levels);
513 PP_ASSERT_WITH_CODE(!ret,
514 "[SetupSingleDpmTable] failed to get clk levels!",
515 return ret);
516
517 dpm_table->count = num_of_levels;
518
519 for (i = 0; i < num_of_levels; i++) {
520 ret = vega12_get_dpm_frequency_by_index(hwmgr, clk_id, i, &clk);
521 PP_ASSERT_WITH_CODE(!ret,
522 "[SetupSingleDpmTable] failed to get clk of specific level!",
523 return ret);
524 dpm_table->dpm_levels[i].value = clk;
525 dpm_table->dpm_levels[i].enabled = true;
526 }
527
528 return ret;
529}
530
512/* 531/*
513 * This function is to initialize all DPM state tables 532 * This function is to initialize all DPM state tables
514 * for SMU based on the dependency table. 533 * for SMU based on the dependency table.
@@ -519,224 +538,136 @@ static int vega12_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr,
519 */ 538 */
520static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) 539static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
521{ 540{
522 uint32_t num_levels, i, clock;
523 541
524 struct vega12_hwmgr *data = 542 struct vega12_hwmgr *data =
525 (struct vega12_hwmgr *)(hwmgr->backend); 543 (struct vega12_hwmgr *)(hwmgr->backend);
526
527 struct vega12_single_dpm_table *dpm_table; 544 struct vega12_single_dpm_table *dpm_table;
545 int ret = 0;
528 546
529 memset(&data->dpm_table, 0, sizeof(data->dpm_table)); 547 memset(&data->dpm_table, 0, sizeof(data->dpm_table));
530 548
531 /* Initialize Sclk DPM and SOC DPM table based on allow Sclk values */ 549 /* socclk */
532 dpm_table = &(data->dpm_table.soc_table); 550 dpm_table = &(data->dpm_table.soc_table);
533 551 if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
534 PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_SOCCLK, 552 ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_SOCCLK);
535 &num_levels) == 0, 553 PP_ASSERT_WITH_CODE(!ret,
536 "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for SOCCLK!", 554 "[SetupDefaultDpmTable] failed to get socclk dpm levels!",
537 return -EINVAL); 555 return ret);
538 556 } else {
539 dpm_table->count = num_levels; 557 dpm_table->count = 1;
540 558 dpm_table->dpm_levels[0].value = data->vbios_boot_state.soc_clock / 100;
541 for (i = 0; i < num_levels; i++) {
542 PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
543 PPCLK_SOCCLK, i, &clock) == 0,
544 "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for SOCCLK!",
545 return -EINVAL);
546
547 dpm_table->dpm_levels[i].value = clock;
548 dpm_table->dpm_levels[i].enabled = true;
549 } 559 }
550
551 vega12_init_dpm_state(&(dpm_table->dpm_state)); 560 vega12_init_dpm_state(&(dpm_table->dpm_state));
552 561
562 /* gfxclk */
553 dpm_table = &(data->dpm_table.gfx_table); 563 dpm_table = &(data->dpm_table.gfx_table);
554 564 if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
555 PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_GFXCLK, 565 ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_GFXCLK);
556 &num_levels) == 0, 566 PP_ASSERT_WITH_CODE(!ret,
557 "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for GFXCLK!", 567 "[SetupDefaultDpmTable] failed to get gfxclk dpm levels!",
558 return -EINVAL); 568 return ret);
559 569 } else {
560 dpm_table->count = num_levels; 570 dpm_table->count = 1;
561 for (i = 0; i < num_levels; i++) { 571 dpm_table->dpm_levels[0].value = data->vbios_boot_state.gfx_clock / 100;
562 PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
563 PPCLK_GFXCLK, i, &clock) == 0,
564 "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for GFXCLK!",
565 return -EINVAL);
566
567 dpm_table->dpm_levels[i].value = clock;
568 dpm_table->dpm_levels[i].enabled = true;
569 } 572 }
570
571 vega12_init_dpm_state(&(dpm_table->dpm_state)); 573 vega12_init_dpm_state(&(dpm_table->dpm_state));
572 /* Initialize Mclk DPM table based on allow Mclk values */
573 dpm_table = &(data->dpm_table.mem_table);
574
575 PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_UCLK,
576 &num_levels) == 0,
577 "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for UCLK!",
578 return -EINVAL);
579
580 dpm_table->count = num_levels;
581
582 for (i = 0; i < num_levels; i++) {
583 PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
584 PPCLK_UCLK, i, &clock) == 0,
585 "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for UCLK!",
586 return -EINVAL);
587 574
588 dpm_table->dpm_levels[i].value = clock; 575 /* memclk */
589 dpm_table->dpm_levels[i].enabled = true; 576 dpm_table = &(data->dpm_table.mem_table);
577 if (data->smu_features[GNLD_DPM_UCLK].enabled) {
578 ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_UCLK);
579 PP_ASSERT_WITH_CODE(!ret,
580 "[SetupDefaultDpmTable] failed to get memclk dpm levels!",
581 return ret);
582 } else {
583 dpm_table->count = 1;
584 dpm_table->dpm_levels[0].value = data->vbios_boot_state.mem_clock / 100;
590 } 585 }
591
592 vega12_init_dpm_state(&(dpm_table->dpm_state)); 586 vega12_init_dpm_state(&(dpm_table->dpm_state));
593 587
588 /* eclk */
594 dpm_table = &(data->dpm_table.eclk_table); 589 dpm_table = &(data->dpm_table.eclk_table);
595 590 if (data->smu_features[GNLD_DPM_VCE].enabled) {
596 PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_ECLK, 591 ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_ECLK);
597 &num_levels) == 0, 592 PP_ASSERT_WITH_CODE(!ret,
598 "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for ECLK!", 593 "[SetupDefaultDpmTable] failed to get eclk dpm levels!",
599 return -EINVAL); 594 return ret);
600 595 } else {
601 dpm_table->count = num_levels; 596 dpm_table->count = 1;
602 597 dpm_table->dpm_levels[0].value = data->vbios_boot_state.eclock / 100;
603 for (i = 0; i < num_levels; i++) {
604 PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
605 PPCLK_ECLK, i, &clock) == 0,
606 "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for ECLK!",
607 return -EINVAL);
608
609 dpm_table->dpm_levels[i].value = clock;
610 dpm_table->dpm_levels[i].enabled = true;
611 } 598 }
612
613 vega12_init_dpm_state(&(dpm_table->dpm_state)); 599 vega12_init_dpm_state(&(dpm_table->dpm_state));
614 600
601 /* vclk */
615 dpm_table = &(data->dpm_table.vclk_table); 602 dpm_table = &(data->dpm_table.vclk_table);
616 603 if (data->smu_features[GNLD_DPM_UVD].enabled) {
617 PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_VCLK, 604 ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_VCLK);
618 &num_levels) == 0, 605 PP_ASSERT_WITH_CODE(!ret,
619 "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for VCLK!", 606 "[SetupDefaultDpmTable] failed to get vclk dpm levels!",
620 return -EINVAL); 607 return ret);
621 608 } else {
622 dpm_table->count = num_levels; 609 dpm_table->count = 1;
623 610 dpm_table->dpm_levels[0].value = data->vbios_boot_state.vclock / 100;
624 for (i = 0; i < num_levels; i++) {
625 PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
626 PPCLK_VCLK, i, &clock) == 0,
627 "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for VCLK!",
628 return -EINVAL);
629
630 dpm_table->dpm_levels[i].value = clock;
631 dpm_table->dpm_levels[i].enabled = true;
632 } 611 }
633
634 vega12_init_dpm_state(&(dpm_table->dpm_state)); 612 vega12_init_dpm_state(&(dpm_table->dpm_state));
635 613
614 /* dclk */
636 dpm_table = &(data->dpm_table.dclk_table); 615 dpm_table = &(data->dpm_table.dclk_table);
637 616 if (data->smu_features[GNLD_DPM_UVD].enabled) {
638 PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_DCLK, 617 ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DCLK);
639 &num_levels) == 0, 618 PP_ASSERT_WITH_CODE(!ret,
640 "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DCLK!", 619 "[SetupDefaultDpmTable] failed to get dclk dpm levels!",
641 return -EINVAL); 620 return ret);
642 621 } else {
643 dpm_table->count = num_levels; 622 dpm_table->count = 1;
644 623 dpm_table->dpm_levels[0].value = data->vbios_boot_state.dclock / 100;
645 for (i = 0; i < num_levels; i++) {
646 PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
647 PPCLK_DCLK, i, &clock) == 0,
648 "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DCLK!",
649 return -EINVAL);
650
651 dpm_table->dpm_levels[i].value = clock;
652 dpm_table->dpm_levels[i].enabled = true;
653 } 624 }
654
655 vega12_init_dpm_state(&(dpm_table->dpm_state)); 625 vega12_init_dpm_state(&(dpm_table->dpm_state));
656 626
657 /* Assume there is no headless Vega12 for now */ 627 /* dcefclk */
658 dpm_table = &(data->dpm_table.dcef_table); 628 dpm_table = &(data->dpm_table.dcef_table);
659 629 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
660 PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, 630 ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DCEFCLK);
661 PPCLK_DCEFCLK, &num_levels) == 0, 631 PP_ASSERT_WITH_CODE(!ret,
662 "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DCEFCLK!", 632 "[SetupDefaultDpmTable] failed to get dcefclk dpm levels!",
663 return -EINVAL); 633 return ret);
664 634 } else {
665 dpm_table->count = num_levels; 635 dpm_table->count = 1;
666 636 dpm_table->dpm_levels[0].value = data->vbios_boot_state.dcef_clock / 100;
667 for (i = 0; i < num_levels; i++) {
668 PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
669 PPCLK_DCEFCLK, i, &clock) == 0,
670 "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DCEFCLK!",
671 return -EINVAL);
672
673 dpm_table->dpm_levels[i].value = clock;
674 dpm_table->dpm_levels[i].enabled = true;
675 } 637 }
676
677 vega12_init_dpm_state(&(dpm_table->dpm_state)); 638 vega12_init_dpm_state(&(dpm_table->dpm_state));
678 639
640 /* pixclk */
679 dpm_table = &(data->dpm_table.pixel_table); 641 dpm_table = &(data->dpm_table.pixel_table);
680 642 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
681 PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, 643 ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_PIXCLK);
682 PPCLK_PIXCLK, &num_levels) == 0, 644 PP_ASSERT_WITH_CODE(!ret,
683 "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for PIXCLK!", 645 "[SetupDefaultDpmTable] failed to get pixclk dpm levels!",
684 return -EINVAL); 646 return ret);
685 647 } else
686 dpm_table->count = num_levels; 648 dpm_table->count = 0;
687
688 for (i = 0; i < num_levels; i++) {
689 PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
690 PPCLK_PIXCLK, i, &clock) == 0,
691 "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for PIXCLK!",
692 return -EINVAL);
693
694 dpm_table->dpm_levels[i].value = clock;
695 dpm_table->dpm_levels[i].enabled = true;
696 }
697
698 vega12_init_dpm_state(&(dpm_table->dpm_state)); 649 vega12_init_dpm_state(&(dpm_table->dpm_state));
699 650
651 /* dispclk */
700 dpm_table = &(data->dpm_table.display_table); 652 dpm_table = &(data->dpm_table.display_table);
701 653 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
702 PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, 654 ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DISPCLK);
703 PPCLK_DISPCLK, &num_levels) == 0, 655 PP_ASSERT_WITH_CODE(!ret,
704 "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DISPCLK!", 656 "[SetupDefaultDpmTable] failed to get dispclk dpm levels!",
705 return -EINVAL); 657 return ret);
706 658 } else
707 dpm_table->count = num_levels; 659 dpm_table->count = 0;
708
709 for (i = 0; i < num_levels; i++) {
710 PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
711 PPCLK_DISPCLK, i, &clock) == 0,
712 "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DISPCLK!",
713 return -EINVAL);
714
715 dpm_table->dpm_levels[i].value = clock;
716 dpm_table->dpm_levels[i].enabled = true;
717 }
718
719 vega12_init_dpm_state(&(dpm_table->dpm_state)); 660 vega12_init_dpm_state(&(dpm_table->dpm_state));
720 661
662 /* phyclk */
721 dpm_table = &(data->dpm_table.phy_table); 663 dpm_table = &(data->dpm_table.phy_table);
722 664 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
723 PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, 665 ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_PHYCLK);
724 PPCLK_PHYCLK, &num_levels) == 0, 666 PP_ASSERT_WITH_CODE(!ret,
725 "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for PHYCLK!", 667 "[SetupDefaultDpmTable] failed to get phyclk dpm levels!",
726 return -EINVAL); 668 return ret);
727 669 } else
728 dpm_table->count = num_levels; 670 dpm_table->count = 0;
729
730 for (i = 0; i < num_levels; i++) {
731 PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
732 PPCLK_PHYCLK, i, &clock) == 0,
733 "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for PHYCLK!",
734 return -EINVAL);
735
736 dpm_table->dpm_levels[i].value = clock;
737 dpm_table->dpm_levels[i].enabled = true;
738 }
739
740 vega12_init_dpm_state(&(dpm_table->dpm_state)); 671 vega12_init_dpm_state(&(dpm_table->dpm_state));
741 672
742 /* save a copy of the default DPM table */ 673 /* save a copy of the default DPM table */
@@ -803,6 +734,9 @@ static int vega12_init_smc_table(struct pp_hwmgr *hwmgr)
803 data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk; 734 data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk;
804 data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk; 735 data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk;
805 data->vbios_boot_state.uc_cooling_id = boot_up_values.ucCoolingID; 736 data->vbios_boot_state.uc_cooling_id = boot_up_values.ucCoolingID;
737 data->vbios_boot_state.eclock = boot_up_values.ulEClk;
738 data->vbios_boot_state.dclock = boot_up_values.ulDClk;
739 data->vbios_boot_state.vclock = boot_up_values.ulVClk;
806 smum_send_msg_to_smc_with_parameter(hwmgr, 740 smum_send_msg_to_smc_with_parameter(hwmgr,
807 PPSMC_MSG_SetMinDeepSleepDcefclk, 741 PPSMC_MSG_SetMinDeepSleepDcefclk,
808 (uint32_t)(data->vbios_boot_state.dcef_clock / 100)); 742 (uint32_t)(data->vbios_boot_state.dcef_clock / 100));
@@ -844,6 +778,21 @@ static int vega12_set_allowed_featuresmask(struct pp_hwmgr *hwmgr)
844 return 0; 778 return 0;
845} 779}
846 780
781static void vega12_init_powergate_state(struct pp_hwmgr *hwmgr)
782{
783 struct vega12_hwmgr *data =
784 (struct vega12_hwmgr *)(hwmgr->backend);
785
786 data->uvd_power_gated = true;
787 data->vce_power_gated = true;
788
789 if (data->smu_features[GNLD_DPM_UVD].enabled)
790 data->uvd_power_gated = false;
791
792 if (data->smu_features[GNLD_DPM_VCE].enabled)
793 data->vce_power_gated = false;
794}
795
847static int vega12_enable_all_smu_features(struct pp_hwmgr *hwmgr) 796static int vega12_enable_all_smu_features(struct pp_hwmgr *hwmgr)
848{ 797{
849 struct vega12_hwmgr *data = 798 struct vega12_hwmgr *data =
@@ -862,12 +811,11 @@ static int vega12_enable_all_smu_features(struct pp_hwmgr *hwmgr)
862 enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ? true : false; 811 enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ? true : false;
863 data->smu_features[i].enabled = enabled; 812 data->smu_features[i].enabled = enabled;
864 data->smu_features[i].supported = enabled; 813 data->smu_features[i].supported = enabled;
865 PP_ASSERT(
866 !data->smu_features[i].allowed || enabled,
867 "[EnableAllSMUFeatures] Enabled feature is different from allowed, expected disabled!");
868 } 814 }
869 } 815 }
870 816
817 vega12_init_powergate_state(hwmgr);
818
871 return 0; 819 return 0;
872} 820}
873 821
@@ -923,6 +871,48 @@ static int vega12_power_control_set_level(struct pp_hwmgr *hwmgr)
923 return result; 871 return result;
924} 872}
925 873
874static int vega12_get_all_clock_ranges_helper(struct pp_hwmgr *hwmgr,
875 PPCLK_e clkid, struct vega12_clock_range *clock)
876{
877 /* AC Max */
878 PP_ASSERT_WITH_CODE(
879 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMaxDpmFreq, (clkid << 16)) == 0,
880 "[GetClockRanges] Failed to get max ac clock from SMC!",
881 return -EINVAL);
882 vega12_read_arg_from_smc(hwmgr, &(clock->ACMax));
883
884 /* AC Min */
885 PP_ASSERT_WITH_CODE(
886 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMinDpmFreq, (clkid << 16)) == 0,
887 "[GetClockRanges] Failed to get min ac clock from SMC!",
888 return -EINVAL);
889 vega12_read_arg_from_smc(hwmgr, &(clock->ACMin));
890
891 /* DC Max */
892 PP_ASSERT_WITH_CODE(
893 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDcModeMaxDpmFreq, (clkid << 16)) == 0,
894 "[GetClockRanges] Failed to get max dc clock from SMC!",
895 return -EINVAL);
896 vega12_read_arg_from_smc(hwmgr, &(clock->DCMax));
897
898 return 0;
899}
900
901static int vega12_get_all_clock_ranges(struct pp_hwmgr *hwmgr)
902{
903 struct vega12_hwmgr *data =
904 (struct vega12_hwmgr *)(hwmgr->backend);
905 uint32_t i;
906
907 for (i = 0; i < PPCLK_COUNT; i++)
908 PP_ASSERT_WITH_CODE(!vega12_get_all_clock_ranges_helper(hwmgr,
909 i, &(data->clk_range[i])),
910 "Failed to get clk range from SMC!",
911 return -EINVAL);
912
913 return 0;
914}
915
926static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr) 916static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
927{ 917{
928 int tmp_result, result = 0; 918 int tmp_result, result = 0;
@@ -950,6 +940,11 @@ static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
950 "Failed to power control set level!", 940 "Failed to power control set level!",
951 result = tmp_result); 941 result = tmp_result);
952 942
943 result = vega12_get_all_clock_ranges(hwmgr);
944 PP_ASSERT_WITH_CODE(!result,
945 "Failed to get all clock ranges!",
946 return result);
947
953 result = vega12_odn_initialize_default_settings(hwmgr); 948 result = vega12_odn_initialize_default_settings(hwmgr);
954 PP_ASSERT_WITH_CODE(!result, 949 PP_ASSERT_WITH_CODE(!result,
955 "Failed to power control set level!", 950 "Failed to power control set level!",
@@ -978,76 +973,172 @@ static uint32_t vega12_find_lowest_dpm_level(
978 break; 973 break;
979 } 974 }
980 975
976 if (i >= table->count) {
977 i = 0;
978 table->dpm_levels[i].enabled = true;
979 }
980
981 return i; 981 return i;
982} 982}
983 983
984static uint32_t vega12_find_highest_dpm_level( 984static uint32_t vega12_find_highest_dpm_level(
985 struct vega12_single_dpm_table *table) 985 struct vega12_single_dpm_table *table)
986{ 986{
987 uint32_t i = 0; 987 int32_t i = 0;
988 PP_ASSERT_WITH_CODE(table->count <= MAX_REGULAR_DPM_NUMBER,
989 "[FindHighestDPMLevel] DPM Table has too many entries!",
990 return MAX_REGULAR_DPM_NUMBER - 1);
988 991
989 if (table->count <= MAX_REGULAR_DPM_NUMBER) { 992 for (i = table->count - 1; i >= 0; i--) {
990 for (i = table->count; i > 0; i--) { 993 if (table->dpm_levels[i].enabled)
991 if (table->dpm_levels[i - 1].enabled) 994 break;
992 return i - 1;
993 }
994 } else {
995 pr_info("DPM Table Has Too Many Entries!");
996 return MAX_REGULAR_DPM_NUMBER - 1;
997 } 995 }
998 996
999 return i; 997 if (i < 0) {
998 i = 0;
999 table->dpm_levels[i].enabled = true;
1000 }
1001
1002 return (uint32_t)i;
1000} 1003}
1001 1004
1002static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr) 1005static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
1003{ 1006{
1004 struct vega12_hwmgr *data = hwmgr->backend; 1007 struct vega12_hwmgr *data = hwmgr->backend;
1005 if (data->smc_state_table.gfx_boot_level != 1008 uint32_t min_freq;
1006 data->dpm_table.gfx_table.dpm_state.soft_min_level) { 1009 int ret = 0;
1007 smum_send_msg_to_smc_with_parameter(hwmgr, 1010
1008 PPSMC_MSG_SetSoftMinByFreq, 1011 if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
1009 PPCLK_GFXCLK<<16 | data->dpm_table.gfx_table.dpm_levels[data->smc_state_table.gfx_boot_level].value); 1012 min_freq = data->dpm_table.gfx_table.dpm_state.soft_min_level;
1010 data->dpm_table.gfx_table.dpm_state.soft_min_level = 1013 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1011 data->smc_state_table.gfx_boot_level; 1014 hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1015 (PPCLK_GFXCLK << 16) | (min_freq & 0xffff))),
1016 "Failed to set soft min gfxclk !",
1017 return ret);
1012 } 1018 }
1013 1019
1014 if (data->smc_state_table.mem_boot_level != 1020 if (data->smu_features[GNLD_DPM_UCLK].enabled) {
1015 data->dpm_table.mem_table.dpm_state.soft_min_level) { 1021 min_freq = data->dpm_table.mem_table.dpm_state.soft_min_level;
1016 smum_send_msg_to_smc_with_parameter(hwmgr, 1022 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1017 PPSMC_MSG_SetSoftMinByFreq, 1023 hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1018 PPCLK_UCLK<<16 | data->dpm_table.mem_table.dpm_levels[data->smc_state_table.mem_boot_level].value); 1024 (PPCLK_UCLK << 16) | (min_freq & 0xffff))),
1019 data->dpm_table.mem_table.dpm_state.soft_min_level = 1025 "Failed to set soft min memclk !",
1020 data->smc_state_table.mem_boot_level; 1026 return ret);
1027
1028 min_freq = data->dpm_table.mem_table.dpm_state.hard_min_level;
1029 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1030 hwmgr, PPSMC_MSG_SetHardMinByFreq,
1031 (PPCLK_UCLK << 16) | (min_freq & 0xffff))),
1032 "Failed to set hard min memclk !",
1033 return ret);
1021 } 1034 }
1022 1035
1023 return 0; 1036 if (data->smu_features[GNLD_DPM_UVD].enabled) {
1037 min_freq = data->dpm_table.vclk_table.dpm_state.soft_min_level;
1038
1039 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1040 hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1041 (PPCLK_VCLK << 16) | (min_freq & 0xffff))),
1042 "Failed to set soft min vclk!",
1043 return ret);
1044
1045 min_freq = data->dpm_table.dclk_table.dpm_state.soft_min_level;
1046
1047 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1048 hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1049 (PPCLK_DCLK << 16) | (min_freq & 0xffff))),
1050 "Failed to set soft min dclk!",
1051 return ret);
1052 }
1053
1054 if (data->smu_features[GNLD_DPM_VCE].enabled) {
1055 min_freq = data->dpm_table.eclk_table.dpm_state.soft_min_level;
1056
1057 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1058 hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1059 (PPCLK_ECLK << 16) | (min_freq & 0xffff))),
1060 "Failed to set soft min eclk!",
1061 return ret);
1062 }
1063
1064 if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
1065 min_freq = data->dpm_table.soc_table.dpm_state.soft_min_level;
1066
1067 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1068 hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1069 (PPCLK_SOCCLK << 16) | (min_freq & 0xffff))),
1070 "Failed to set soft min socclk!",
1071 return ret);
1072 }
1073
1074 return ret;
1024 1075
1025} 1076}
1026 1077
1027static int vega12_upload_dpm_max_level(struct pp_hwmgr *hwmgr) 1078static int vega12_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
1028{ 1079{
1029 struct vega12_hwmgr *data = hwmgr->backend; 1080 struct vega12_hwmgr *data = hwmgr->backend;
1030 if (data->smc_state_table.gfx_max_level != 1081 uint32_t max_freq;
1031 data->dpm_table.gfx_table.dpm_state.soft_max_level) { 1082 int ret = 0;
1032 smum_send_msg_to_smc_with_parameter(hwmgr, 1083
1033 PPSMC_MSG_SetSoftMaxByFreq, 1084 if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
1034 /* plus the vale by 1 to align the resolution */ 1085 max_freq = data->dpm_table.gfx_table.dpm_state.soft_max_level;
1035 PPCLK_GFXCLK<<16 | (data->dpm_table.gfx_table.dpm_levels[data->smc_state_table.gfx_max_level].value + 1)); 1086
1036 data->dpm_table.gfx_table.dpm_state.soft_max_level = 1087 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1037 data->smc_state_table.gfx_max_level; 1088 hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1089 (PPCLK_GFXCLK << 16) | (max_freq & 0xffff))),
1090 "Failed to set soft max gfxclk!",
1091 return ret);
1038 } 1092 }
1039 1093
1040 if (data->smc_state_table.mem_max_level != 1094 if (data->smu_features[GNLD_DPM_UCLK].enabled) {
1041 data->dpm_table.mem_table.dpm_state.soft_max_level) { 1095 max_freq = data->dpm_table.mem_table.dpm_state.soft_max_level;
1042 smum_send_msg_to_smc_with_parameter(hwmgr, 1096
1043 PPSMC_MSG_SetSoftMaxByFreq, 1097 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1044 /* plus the vale by 1 to align the resolution */ 1098 hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1045 PPCLK_UCLK<<16 | (data->dpm_table.mem_table.dpm_levels[data->smc_state_table.mem_max_level].value + 1)); 1099 (PPCLK_UCLK << 16) | (max_freq & 0xffff))),
1046 data->dpm_table.mem_table.dpm_state.soft_max_level = 1100 "Failed to set soft max memclk!",
1047 data->smc_state_table.mem_max_level; 1101 return ret);
1048 } 1102 }
1049 1103
1050 return 0; 1104 if (data->smu_features[GNLD_DPM_UVD].enabled) {
1105 max_freq = data->dpm_table.vclk_table.dpm_state.soft_max_level;
1106
1107 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1108 hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1109 (PPCLK_VCLK << 16) | (max_freq & 0xffff))),
1110 "Failed to set soft max vclk!",
1111 return ret);
1112
1113 max_freq = data->dpm_table.dclk_table.dpm_state.soft_max_level;
1114 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1115 hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1116 (PPCLK_DCLK << 16) | (max_freq & 0xffff))),
1117 "Failed to set soft max dclk!",
1118 return ret);
1119 }
1120
1121 if (data->smu_features[GNLD_DPM_VCE].enabled) {
1122 max_freq = data->dpm_table.eclk_table.dpm_state.soft_max_level;
1123
1124 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1125 hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1126 (PPCLK_ECLK << 16) | (max_freq & 0xffff))),
1127 "Failed to set soft max eclk!",
1128 return ret);
1129 }
1130
1131 if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
1132 max_freq = data->dpm_table.soc_table.dpm_state.soft_max_level;
1133
1134 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1135 hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1136 (PPCLK_SOCCLK << 16) | (max_freq & 0xffff))),
1137 "Failed to set soft max socclk!",
1138 return ret);
1139 }
1140
1141 return ret;
1051} 1142}
1052 1143
1053int vega12_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable) 1144int vega12_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
@@ -1136,8 +1227,8 @@ static int vega12_get_current_gfx_clk_freq(struct pp_hwmgr *hwmgr, uint32_t *gfx
1136 1227
1137 *gfx_freq = 0; 1228 *gfx_freq = 0;
1138 1229
1139 PP_ASSERT_WITH_CODE( 1230 PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
1140 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDpmClockFreq, (PPCLK_GFXCLK << 16)) == 0, 1231 PPSMC_MSG_GetDpmClockFreq, (PPCLK_GFXCLK << 16)) == 0,
1141 "[GetCurrentGfxClkFreq] Attempt to get Current GFXCLK Frequency Failed!", 1232 "[GetCurrentGfxClkFreq] Attempt to get Current GFXCLK Frequency Failed!",
1142 return -1); 1233 return -1);
1143 PP_ASSERT_WITH_CODE( 1234 PP_ASSERT_WITH_CODE(
@@ -1306,9 +1397,9 @@ static int vega12_notify_smc_display_config_after_ps_adjustment(
1306 (struct vega12_hwmgr *)(hwmgr->backend); 1397 (struct vega12_hwmgr *)(hwmgr->backend);
1307 struct PP_Clocks min_clocks = {0}; 1398 struct PP_Clocks min_clocks = {0};
1308 struct pp_display_clock_request clock_req; 1399 struct pp_display_clock_request clock_req;
1309 uint32_t clk_request;
1310 1400
1311 if (hwmgr->display_config->num_display > 1) 1401 if ((hwmgr->display_config->num_display > 1) &&
1402 !hwmgr->display_config->multi_monitor_in_sync)
1312 vega12_notify_smc_display_change(hwmgr, false); 1403 vega12_notify_smc_display_change(hwmgr, false);
1313 else 1404 else
1314 vega12_notify_smc_display_change(hwmgr, true); 1405 vega12_notify_smc_display_change(hwmgr, true);
@@ -1333,15 +1424,6 @@ static int vega12_notify_smc_display_config_after_ps_adjustment(
1333 } 1424 }
1334 } 1425 }
1335 1426
1336 if (data->smu_features[GNLD_DPM_UCLK].enabled) {
1337 clk_request = (PPCLK_UCLK << 16) | (min_clocks.memoryClock) / 100;
1338 PP_ASSERT_WITH_CODE(
1339 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinByFreq, clk_request) == 0,
1340 "[PhwVega12_NotifySMCDisplayConfigAfterPowerStateAdjustment] Attempt to set UCLK HardMin Failed!",
1341 return -1);
1342 data->dpm_table.mem_table.dpm_state.hard_min_level = min_clocks.memoryClock;
1343 }
1344
1345 return 0; 1427 return 0;
1346} 1428}
1347 1429
@@ -1350,12 +1432,19 @@ static int vega12_force_dpm_highest(struct pp_hwmgr *hwmgr)
1350 struct vega12_hwmgr *data = 1432 struct vega12_hwmgr *data =
1351 (struct vega12_hwmgr *)(hwmgr->backend); 1433 (struct vega12_hwmgr *)(hwmgr->backend);
1352 1434
1353 data->smc_state_table.gfx_boot_level = 1435 uint32_t soft_level;
1354 data->smc_state_table.gfx_max_level = 1436
1355 vega12_find_highest_dpm_level(&(data->dpm_table.gfx_table)); 1437 soft_level = vega12_find_highest_dpm_level(&(data->dpm_table.gfx_table));
1356 data->smc_state_table.mem_boot_level = 1438
1357 data->smc_state_table.mem_max_level = 1439 data->dpm_table.gfx_table.dpm_state.soft_min_level =
1358 vega12_find_highest_dpm_level(&(data->dpm_table.mem_table)); 1440 data->dpm_table.gfx_table.dpm_state.soft_max_level =
1441 data->dpm_table.gfx_table.dpm_levels[soft_level].value;
1442
1443 soft_level = vega12_find_highest_dpm_level(&(data->dpm_table.mem_table));
1444
1445 data->dpm_table.mem_table.dpm_state.soft_min_level =
1446 data->dpm_table.mem_table.dpm_state.soft_max_level =
1447 data->dpm_table.mem_table.dpm_levels[soft_level].value;
1359 1448
1360 PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr), 1449 PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
1361 "Failed to upload boot level to highest!", 1450 "Failed to upload boot level to highest!",
@@ -1372,13 +1461,19 @@ static int vega12_force_dpm_lowest(struct pp_hwmgr *hwmgr)
1372{ 1461{
1373 struct vega12_hwmgr *data = 1462 struct vega12_hwmgr *data =
1374 (struct vega12_hwmgr *)(hwmgr->backend); 1463 (struct vega12_hwmgr *)(hwmgr->backend);
1464 uint32_t soft_level;
1465
1466 soft_level = vega12_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
1375 1467
1376 data->smc_state_table.gfx_boot_level = 1468 data->dpm_table.gfx_table.dpm_state.soft_min_level =
1377 data->smc_state_table.gfx_max_level = 1469 data->dpm_table.gfx_table.dpm_state.soft_max_level =
1378 vega12_find_lowest_dpm_level(&(data->dpm_table.gfx_table)); 1470 data->dpm_table.gfx_table.dpm_levels[soft_level].value;
1379 data->smc_state_table.mem_boot_level = 1471
1380 data->smc_state_table.mem_max_level = 1472 soft_level = vega12_find_lowest_dpm_level(&(data->dpm_table.mem_table));
1381 vega12_find_lowest_dpm_level(&(data->dpm_table.mem_table)); 1473
1474 data->dpm_table.mem_table.dpm_state.soft_min_level =
1475 data->dpm_table.mem_table.dpm_state.soft_max_level =
1476 data->dpm_table.mem_table.dpm_levels[soft_level].value;
1382 1477
1383 PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr), 1478 PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
1384 "Failed to upload boot level to highest!", 1479 "Failed to upload boot level to highest!",
@@ -1394,17 +1489,6 @@ static int vega12_force_dpm_lowest(struct pp_hwmgr *hwmgr)
1394 1489
1395static int vega12_unforce_dpm_levels(struct pp_hwmgr *hwmgr) 1490static int vega12_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
1396{ 1491{
1397 struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1398
1399 data->smc_state_table.gfx_boot_level =
1400 vega12_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
1401 data->smc_state_table.gfx_max_level =
1402 vega12_find_highest_dpm_level(&(data->dpm_table.gfx_table));
1403 data->smc_state_table.mem_boot_level =
1404 vega12_find_lowest_dpm_level(&(data->dpm_table.mem_table));
1405 data->smc_state_table.mem_max_level =
1406 vega12_find_highest_dpm_level(&(data->dpm_table.mem_table));
1407
1408 PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr), 1492 PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
1409 "Failed to upload DPM Bootup Levels!", 1493 "Failed to upload DPM Bootup Levels!",
1410 return -1); 1494 return -1);
@@ -1412,22 +1496,28 @@ static int vega12_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
1412 PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr), 1496 PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr),
1413 "Failed to upload DPM Max Levels!", 1497 "Failed to upload DPM Max Levels!",
1414 return -1); 1498 return -1);
1499
1415 return 0; 1500 return 0;
1416} 1501}
1417 1502
1418#if 0
1419static int vega12_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level, 1503static int vega12_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level,
1420 uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *soc_mask) 1504 uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *soc_mask)
1421{ 1505{
1422 struct phm_ppt_v2_information *table_info = 1506 struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1423 (struct phm_ppt_v2_information *)(hwmgr->pptable); 1507 struct vega12_single_dpm_table *gfx_dpm_table = &(data->dpm_table.gfx_table);
1508 struct vega12_single_dpm_table *mem_dpm_table = &(data->dpm_table.mem_table);
1509 struct vega12_single_dpm_table *soc_dpm_table = &(data->dpm_table.soc_table);
1424 1510
1425 if (table_info->vdd_dep_on_sclk->count > VEGA12_UMD_PSTATE_GFXCLK_LEVEL && 1511 *sclk_mask = 0;
1426 table_info->vdd_dep_on_socclk->count > VEGA12_UMD_PSTATE_SOCCLK_LEVEL && 1512 *mclk_mask = 0;
1427 table_info->vdd_dep_on_mclk->count > VEGA12_UMD_PSTATE_MCLK_LEVEL) { 1513 *soc_mask = 0;
1514
1515 if (gfx_dpm_table->count > VEGA12_UMD_PSTATE_GFXCLK_LEVEL &&
1516 mem_dpm_table->count > VEGA12_UMD_PSTATE_MCLK_LEVEL &&
1517 soc_dpm_table->count > VEGA12_UMD_PSTATE_SOCCLK_LEVEL) {
1428 *sclk_mask = VEGA12_UMD_PSTATE_GFXCLK_LEVEL; 1518 *sclk_mask = VEGA12_UMD_PSTATE_GFXCLK_LEVEL;
1429 *soc_mask = VEGA12_UMD_PSTATE_SOCCLK_LEVEL;
1430 *mclk_mask = VEGA12_UMD_PSTATE_MCLK_LEVEL; 1519 *mclk_mask = VEGA12_UMD_PSTATE_MCLK_LEVEL;
1520 *soc_mask = VEGA12_UMD_PSTATE_SOCCLK_LEVEL;
1431 } 1521 }
1432 1522
1433 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) { 1523 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
@@ -1435,13 +1525,13 @@ static int vega12_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_fo
1435 } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) { 1525 } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
1436 *mclk_mask = 0; 1526 *mclk_mask = 0;
1437 } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { 1527 } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
1438 *sclk_mask = table_info->vdd_dep_on_sclk->count - 1; 1528 *sclk_mask = gfx_dpm_table->count - 1;
1439 *soc_mask = table_info->vdd_dep_on_socclk->count - 1; 1529 *mclk_mask = mem_dpm_table->count - 1;
1440 *mclk_mask = table_info->vdd_dep_on_mclk->count - 1; 1530 *soc_mask = soc_dpm_table->count - 1;
1441 } 1531 }
1532
1442 return 0; 1533 return 0;
1443} 1534}
1444#endif
1445 1535
1446static void vega12_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode) 1536static void vega12_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
1447{ 1537{
@@ -1465,11 +1555,9 @@ static int vega12_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
1465 enum amd_dpm_forced_level level) 1555 enum amd_dpm_forced_level level)
1466{ 1556{
1467 int ret = 0; 1557 int ret = 0;
1468#if 0
1469 uint32_t sclk_mask = 0; 1558 uint32_t sclk_mask = 0;
1470 uint32_t mclk_mask = 0; 1559 uint32_t mclk_mask = 0;
1471 uint32_t soc_mask = 0; 1560 uint32_t soc_mask = 0;
1472#endif
1473 1561
1474 switch (level) { 1562 switch (level) {
1475 case AMD_DPM_FORCED_LEVEL_HIGH: 1563 case AMD_DPM_FORCED_LEVEL_HIGH:
@@ -1485,27 +1573,18 @@ static int vega12_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
1485 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: 1573 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
1486 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: 1574 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
1487 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: 1575 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
1488#if 0
1489 ret = vega12_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask); 1576 ret = vega12_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask);
1490 if (ret) 1577 if (ret)
1491 return ret; 1578 return ret;
1492 vega12_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask); 1579 vega12_force_clock_level(hwmgr, PP_SCLK, 1 << sclk_mask);
1493 vega12_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask); 1580 vega12_force_clock_level(hwmgr, PP_MCLK, 1 << mclk_mask);
1494#endif
1495 break; 1581 break;
1496 case AMD_DPM_FORCED_LEVEL_MANUAL: 1582 case AMD_DPM_FORCED_LEVEL_MANUAL:
1497 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: 1583 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
1498 default: 1584 default:
1499 break; 1585 break;
1500 } 1586 }
1501#if 0 1587
1502 if (!ret) {
1503 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
1504 vega12_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_NONE);
1505 else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
1506 vega12_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_AUTO);
1507 }
1508#endif
1509 return ret; 1588 return ret;
1510} 1589}
1511 1590
@@ -1539,24 +1618,14 @@ static int vega12_get_clock_ranges(struct pp_hwmgr *hwmgr,
1539 PPCLK_e clock_select, 1618 PPCLK_e clock_select,
1540 bool max) 1619 bool max)
1541{ 1620{
1542 int result; 1621 struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1543 *clock = 0;
1544 1622
1545 if (max) { 1623 if (max)
1546 PP_ASSERT_WITH_CODE( 1624 *clock = data->clk_range[clock_select].ACMax;
1547 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMaxDpmFreq, (clock_select << 16)) == 0, 1625 else
1548 "[GetClockRanges] Failed to get max clock from SMC!", 1626 *clock = data->clk_range[clock_select].ACMin;
1549 return -1);
1550 result = vega12_read_arg_from_smc(hwmgr, clock);
1551 } else {
1552 PP_ASSERT_WITH_CODE(
1553 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMinDpmFreq, (clock_select << 16)) == 0,
1554 "[GetClockRanges] Failed to get min clock from SMC!",
1555 return -1);
1556 result = vega12_read_arg_from_smc(hwmgr, clock);
1557 }
1558 1627
1559 return result; 1628 return 0;
1560} 1629}
1561 1630
1562static int vega12_get_sclks(struct pp_hwmgr *hwmgr, 1631static int vega12_get_sclks(struct pp_hwmgr *hwmgr,
@@ -1571,12 +1640,12 @@ static int vega12_get_sclks(struct pp_hwmgr *hwmgr,
1571 return -1; 1640 return -1;
1572 1641
1573 dpm_table = &(data->dpm_table.gfx_table); 1642 dpm_table = &(data->dpm_table.gfx_table);
1574 ucount = (dpm_table->count > VG12_PSUEDO_NUM_GFXCLK_DPM_LEVELS) ? 1643 ucount = (dpm_table->count > MAX_NUM_CLOCKS) ?
1575 VG12_PSUEDO_NUM_GFXCLK_DPM_LEVELS : dpm_table->count; 1644 MAX_NUM_CLOCKS : dpm_table->count;
1576 1645
1577 for (i = 0; i < ucount; i++) { 1646 for (i = 0; i < ucount; i++) {
1578 clocks->data[i].clocks_in_khz = 1647 clocks->data[i].clocks_in_khz =
1579 dpm_table->dpm_levels[i].value * 100; 1648 dpm_table->dpm_levels[i].value * 1000;
1580 1649
1581 clocks->data[i].latency_in_us = 0; 1650 clocks->data[i].latency_in_us = 0;
1582 } 1651 }
@@ -1603,13 +1672,12 @@ static int vega12_get_memclocks(struct pp_hwmgr *hwmgr,
1603 return -1; 1672 return -1;
1604 1673
1605 dpm_table = &(data->dpm_table.mem_table); 1674 dpm_table = &(data->dpm_table.mem_table);
1606 ucount = (dpm_table->count > VG12_PSUEDO_NUM_UCLK_DPM_LEVELS) ? 1675 ucount = (dpm_table->count > MAX_NUM_CLOCKS) ?
1607 VG12_PSUEDO_NUM_UCLK_DPM_LEVELS : dpm_table->count; 1676 MAX_NUM_CLOCKS : dpm_table->count;
1608 1677
1609 for (i = 0; i < ucount; i++) { 1678 for (i = 0; i < ucount; i++) {
1610 clocks->data[i].clocks_in_khz = 1679 clocks->data[i].clocks_in_khz = dpm_table->dpm_levels[i].value * 1000;
1611 dpm_table->dpm_levels[i].value * 100; 1680 data->mclk_latency_table.entries[i].frequency = dpm_table->dpm_levels[i].value * 100;
1612
1613 clocks->data[i].latency_in_us = 1681 clocks->data[i].latency_in_us =
1614 data->mclk_latency_table.entries[i].latency = 1682 data->mclk_latency_table.entries[i].latency =
1615 vega12_get_mem_latency(hwmgr, dpm_table->dpm_levels[i].value); 1683 vega12_get_mem_latency(hwmgr, dpm_table->dpm_levels[i].value);
@@ -1633,12 +1701,12 @@ static int vega12_get_dcefclocks(struct pp_hwmgr *hwmgr,
1633 1701
1634 1702
1635 dpm_table = &(data->dpm_table.dcef_table); 1703 dpm_table = &(data->dpm_table.dcef_table);
1636 ucount = (dpm_table->count > VG12_PSUEDO_NUM_DCEFCLK_DPM_LEVELS) ? 1704 ucount = (dpm_table->count > MAX_NUM_CLOCKS) ?
1637 VG12_PSUEDO_NUM_DCEFCLK_DPM_LEVELS : dpm_table->count; 1705 MAX_NUM_CLOCKS : dpm_table->count;
1638 1706
1639 for (i = 0; i < ucount; i++) { 1707 for (i = 0; i < ucount; i++) {
1640 clocks->data[i].clocks_in_khz = 1708 clocks->data[i].clocks_in_khz =
1641 dpm_table->dpm_levels[i].value * 100; 1709 dpm_table->dpm_levels[i].value * 1000;
1642 1710
1643 clocks->data[i].latency_in_us = 0; 1711 clocks->data[i].latency_in_us = 0;
1644 } 1712 }
@@ -1661,12 +1729,12 @@ static int vega12_get_socclocks(struct pp_hwmgr *hwmgr,
1661 1729
1662 1730
1663 dpm_table = &(data->dpm_table.soc_table); 1731 dpm_table = &(data->dpm_table.soc_table);
1664 ucount = (dpm_table->count > VG12_PSUEDO_NUM_SOCCLK_DPM_LEVELS) ? 1732 ucount = (dpm_table->count > MAX_NUM_CLOCKS) ?
1665 VG12_PSUEDO_NUM_SOCCLK_DPM_LEVELS : dpm_table->count; 1733 MAX_NUM_CLOCKS : dpm_table->count;
1666 1734
1667 for (i = 0; i < ucount; i++) { 1735 for (i = 0; i < ucount; i++) {
1668 clocks->data[i].clocks_in_khz = 1736 clocks->data[i].clocks_in_khz =
1669 dpm_table->dpm_levels[i].value * 100; 1737 dpm_table->dpm_levels[i].value * 1000;
1670 1738
1671 clocks->data[i].latency_in_us = 0; 1739 clocks->data[i].latency_in_us = 0;
1672 } 1740 }
@@ -1713,99 +1781,69 @@ static int vega12_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
1713} 1781}
1714 1782
1715static int vega12_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr, 1783static int vega12_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
1716 struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges) 1784 void *clock_ranges)
1717{ 1785{
1718 struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend); 1786 struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1719 Watermarks_t *table = &(data->smc_state_table.water_marks_table); 1787 Watermarks_t *table = &(data->smc_state_table.water_marks_table);
1720 int result = 0; 1788 struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_ranges;
1721 uint32_t i;
1722 1789
1723 if (!data->registry_data.disable_water_mark && 1790 if (!data->registry_data.disable_water_mark &&
1724 data->smu_features[GNLD_DPM_DCEFCLK].supported && 1791 data->smu_features[GNLD_DPM_DCEFCLK].supported &&
1725 data->smu_features[GNLD_DPM_SOCCLK].supported) { 1792 data->smu_features[GNLD_DPM_SOCCLK].supported) {
1726 for (i = 0; i < wm_with_clock_ranges->num_wm_sets_dmif; i++) { 1793 smu_set_watermarks_for_clocks_ranges(table, wm_with_clock_ranges);
1727 table->WatermarkRow[WM_DCEFCLK][i].MinClock =
1728 cpu_to_le16((uint16_t)
1729 (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_dcefclk_in_khz) /
1730 100);
1731 table->WatermarkRow[WM_DCEFCLK][i].MaxClock =
1732 cpu_to_le16((uint16_t)
1733 (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_dcefclk_in_khz) /
1734 100);
1735 table->WatermarkRow[WM_DCEFCLK][i].MinUclk =
1736 cpu_to_le16((uint16_t)
1737 (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_memclk_in_khz) /
1738 100);
1739 table->WatermarkRow[WM_DCEFCLK][i].MaxUclk =
1740 cpu_to_le16((uint16_t)
1741 (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_memclk_in_khz) /
1742 100);
1743 table->WatermarkRow[WM_DCEFCLK][i].WmSetting = (uint8_t)
1744 wm_with_clock_ranges->wm_sets_dmif[i].wm_set_id;
1745 }
1746
1747 for (i = 0; i < wm_with_clock_ranges->num_wm_sets_mcif; i++) {
1748 table->WatermarkRow[WM_SOCCLK][i].MinClock =
1749 cpu_to_le16((uint16_t)
1750 (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_socclk_in_khz) /
1751 100);
1752 table->WatermarkRow[WM_SOCCLK][i].MaxClock =
1753 cpu_to_le16((uint16_t)
1754 (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_socclk_in_khz) /
1755 100);
1756 table->WatermarkRow[WM_SOCCLK][i].MinUclk =
1757 cpu_to_le16((uint16_t)
1758 (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_memclk_in_khz) /
1759 100);
1760 table->WatermarkRow[WM_SOCCLK][i].MaxUclk =
1761 cpu_to_le16((uint16_t)
1762 (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_memclk_in_khz) /
1763 100);
1764 table->WatermarkRow[WM_SOCCLK][i].WmSetting = (uint8_t)
1765 wm_with_clock_ranges->wm_sets_mcif[i].wm_set_id;
1766 }
1767 data->water_marks_bitmap |= WaterMarksExist; 1794 data->water_marks_bitmap |= WaterMarksExist;
1768 data->water_marks_bitmap &= ~WaterMarksLoaded; 1795 data->water_marks_bitmap &= ~WaterMarksLoaded;
1769 } 1796 }
1770 1797
1771 return result; 1798 return 0;
1772} 1799}
1773 1800
1774static int vega12_force_clock_level(struct pp_hwmgr *hwmgr, 1801static int vega12_force_clock_level(struct pp_hwmgr *hwmgr,
1775 enum pp_clock_type type, uint32_t mask) 1802 enum pp_clock_type type, uint32_t mask)
1776{ 1803{
1777 struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend); 1804 struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1778 1805 uint32_t soft_min_level, soft_max_level;
1779 if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO | 1806 int ret = 0;
1780 AMD_DPM_FORCED_LEVEL_LOW |
1781 AMD_DPM_FORCED_LEVEL_HIGH))
1782 return -EINVAL;
1783 1807
1784 switch (type) { 1808 switch (type) {
1785 case PP_SCLK: 1809 case PP_SCLK:
1786 data->smc_state_table.gfx_boot_level = mask ? (ffs(mask) - 1) : 0; 1810 soft_min_level = mask ? (ffs(mask) - 1) : 0;
1787 data->smc_state_table.gfx_max_level = mask ? (fls(mask) - 1) : 0; 1811 soft_max_level = mask ? (fls(mask) - 1) : 0;
1812
1813 data->dpm_table.gfx_table.dpm_state.soft_min_level =
1814 data->dpm_table.gfx_table.dpm_levels[soft_min_level].value;
1815 data->dpm_table.gfx_table.dpm_state.soft_max_level =
1816 data->dpm_table.gfx_table.dpm_levels[soft_max_level].value;
1788 1817
1789 PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr), 1818 ret = vega12_upload_dpm_min_level(hwmgr);
1819 PP_ASSERT_WITH_CODE(!ret,
1790 "Failed to upload boot level to lowest!", 1820 "Failed to upload boot level to lowest!",
1791 return -EINVAL); 1821 return ret);
1792 1822
1793 PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr), 1823 ret = vega12_upload_dpm_max_level(hwmgr);
1824 PP_ASSERT_WITH_CODE(!ret,
1794 "Failed to upload dpm max level to highest!", 1825 "Failed to upload dpm max level to highest!",
1795 return -EINVAL); 1826 return ret);
1796 break; 1827 break;
1797 1828
1798 case PP_MCLK: 1829 case PP_MCLK:
1799 data->smc_state_table.mem_boot_level = mask ? (ffs(mask) - 1) : 0; 1830 soft_min_level = mask ? (ffs(mask) - 1) : 0;
1800 data->smc_state_table.mem_max_level = mask ? (fls(mask) - 1) : 0; 1831 soft_max_level = mask ? (fls(mask) - 1) : 0;
1801 1832
1802 PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr), 1833 data->dpm_table.mem_table.dpm_state.soft_min_level =
1834 data->dpm_table.mem_table.dpm_levels[soft_min_level].value;
1835 data->dpm_table.mem_table.dpm_state.soft_max_level =
1836 data->dpm_table.mem_table.dpm_levels[soft_max_level].value;
1837
1838 ret = vega12_upload_dpm_min_level(hwmgr);
1839 PP_ASSERT_WITH_CODE(!ret,
1803 "Failed to upload boot level to lowest!", 1840 "Failed to upload boot level to lowest!",
1804 return -EINVAL); 1841 return ret);
1805 1842
1806 PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr), 1843 ret = vega12_upload_dpm_max_level(hwmgr);
1844 PP_ASSERT_WITH_CODE(!ret,
1807 "Failed to upload dpm max level to highest!", 1845 "Failed to upload dpm max level to highest!",
1808 return -EINVAL); 1846 return ret);
1809 1847
1810 break; 1848 break;
1811 1849
@@ -1838,8 +1876,8 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
1838 return -1); 1876 return -1);
1839 for (i = 0; i < clocks.num_levels; i++) 1877 for (i = 0; i < clocks.num_levels; i++)
1840 size += sprintf(buf + size, "%d: %uMhz %s\n", 1878 size += sprintf(buf + size, "%d: %uMhz %s\n",
1841 i, clocks.data[i].clocks_in_khz / 100, 1879 i, clocks.data[i].clocks_in_khz / 1000,
1842 (clocks.data[i].clocks_in_khz == now) ? "*" : ""); 1880 (clocks.data[i].clocks_in_khz / 1000 == now) ? "*" : "");
1843 break; 1881 break;
1844 1882
1845 case PP_MCLK: 1883 case PP_MCLK:
@@ -1854,8 +1892,8 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
1854 return -1); 1892 return -1);
1855 for (i = 0; i < clocks.num_levels; i++) 1893 for (i = 0; i < clocks.num_levels; i++)
1856 size += sprintf(buf + size, "%d: %uMhz %s\n", 1894 size += sprintf(buf + size, "%d: %uMhz %s\n",
1857 i, clocks.data[i].clocks_in_khz / 100, 1895 i, clocks.data[i].clocks_in_khz / 1000,
1858 (clocks.data[i].clocks_in_khz == now) ? "*" : ""); 1896 (clocks.data[i].clocks_in_khz / 1000 == now) ? "*" : "");
1859 break; 1897 break;
1860 1898
1861 case PP_PCIE: 1899 case PP_PCIE:
@@ -1867,6 +1905,205 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
1867 return size; 1905 return size;
1868} 1906}
1869 1907
1908static int vega12_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
1909{
1910 struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1911 struct vega12_single_dpm_table *dpm_table;
1912 bool vblank_too_short = false;
1913 bool disable_mclk_switching;
1914 uint32_t i, latency;
1915
1916 disable_mclk_switching = ((1 < hwmgr->display_config->num_display) &&
1917 !hwmgr->display_config->multi_monitor_in_sync) ||
1918 vblank_too_short;
1919 latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency;
1920
1921 /* gfxclk */
1922 dpm_table = &(data->dpm_table.gfx_table);
1923 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
1924 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
1925 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
1926 dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
1927
1928 if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
1929 if (VEGA12_UMD_PSTATE_GFXCLK_LEVEL < dpm_table->count) {
1930 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_GFXCLK_LEVEL].value;
1931 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_GFXCLK_LEVEL].value;
1932 }
1933
1934 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
1935 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
1936 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value;
1937 }
1938
1939 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
1940 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
1941 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
1942 }
1943 }
1944
1945 /* memclk */
1946 dpm_table = &(data->dpm_table.mem_table);
1947 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
1948 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
1949 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
1950 dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
1951
1952 if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
1953 if (VEGA12_UMD_PSTATE_MCLK_LEVEL < dpm_table->count) {
1954 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_MCLK_LEVEL].value;
1955 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_MCLK_LEVEL].value;
1956 }
1957
1958 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
1959 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
1960 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value;
1961 }
1962
1963 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
1964 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
1965 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
1966 }
1967 }
1968
1969 /* honour DAL's UCLK Hardmin */
1970 if (dpm_table->dpm_state.hard_min_level < (hwmgr->display_config->min_mem_set_clock / 100))
1971 dpm_table->dpm_state.hard_min_level = hwmgr->display_config->min_mem_set_clock / 100;
1972
1973 /* Hardmin is dependent on displayconfig */
1974 if (disable_mclk_switching) {
1975 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
1976 for (i = 0; i < data->mclk_latency_table.count - 1; i++) {
1977 if (data->mclk_latency_table.entries[i].latency <= latency) {
1978 if (dpm_table->dpm_levels[i].value >= (hwmgr->display_config->min_mem_set_clock / 100)) {
1979 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[i].value;
1980 break;
1981 }
1982 }
1983 }
1984 }
1985
1986 if (hwmgr->display_config->nb_pstate_switch_disable)
1987 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
1988
1989 /* vclk */
1990 dpm_table = &(data->dpm_table.vclk_table);
1991 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
1992 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
1993 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
1994 dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
1995
1996 if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
1997 if (VEGA12_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) {
1998 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
1999 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
2000 }
2001
2002 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
2003 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2004 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2005 }
2006 }
2007
2008 /* dclk */
2009 dpm_table = &(data->dpm_table.dclk_table);
2010 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
2011 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2012 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
2013 dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2014
2015 if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
2016 if (VEGA12_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) {
2017 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
2018 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
2019 }
2020
2021 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
2022 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2023 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2024 }
2025 }
2026
2027 /* socclk */
2028 dpm_table = &(data->dpm_table.soc_table);
2029 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
2030 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2031 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
2032 dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2033
2034 if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
2035 if (VEGA12_UMD_PSTATE_SOCCLK_LEVEL < dpm_table->count) {
2036 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_SOCCLK_LEVEL].value;
2037 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_SOCCLK_LEVEL].value;
2038 }
2039
2040 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
2041 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2042 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2043 }
2044 }
2045
2046 /* eclk */
2047 dpm_table = &(data->dpm_table.eclk_table);
2048 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
2049 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2050 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
2051 dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2052
2053 if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
2054 if (VEGA12_UMD_PSTATE_VCEMCLK_LEVEL < dpm_table->count) {
2055 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_VCEMCLK_LEVEL].value;
2056 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_VCEMCLK_LEVEL].value;
2057 }
2058
2059 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
2060 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2061 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2062 }
2063 }
2064
2065 return 0;
2066}
2067
2068static int vega12_set_uclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr,
2069 struct vega12_single_dpm_table *dpm_table)
2070{
2071 struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
2072 int ret = 0;
2073
2074 if (data->smu_features[GNLD_DPM_UCLK].enabled) {
2075 PP_ASSERT_WITH_CODE(dpm_table->count > 0,
2076 "[SetUclkToHightestDpmLevel] Dpm table has no entry!",
2077 return -EINVAL);
2078 PP_ASSERT_WITH_CODE(dpm_table->count <= NUM_UCLK_DPM_LEVELS,
2079 "[SetUclkToHightestDpmLevel] Dpm table has too many entries!",
2080 return -EINVAL);
2081
2082 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2083 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr,
2084 PPSMC_MSG_SetHardMinByFreq,
2085 (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level)),
2086 "[SetUclkToHightestDpmLevel] Set hard min uclk failed!",
2087 return ret);
2088 }
2089
2090 return ret;
2091}
2092
2093static int vega12_pre_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
2094{
2095 struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
2096 int ret = 0;
2097
2098 smum_send_msg_to_smc_with_parameter(hwmgr,
2099 PPSMC_MSG_NumOfDisplays, 0);
2100
2101 ret = vega12_set_uclk_to_highest_dpm_level(hwmgr,
2102 &data->dpm_table.mem_table);
2103
2104 return ret;
2105}
2106
1870static int vega12_display_configuration_changed_task(struct pp_hwmgr *hwmgr) 2107static int vega12_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
1871{ 2108{
1872 struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend); 2109 struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
@@ -1911,6 +2148,9 @@ static void vega12_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate)
1911{ 2148{
1912 struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend); 2149 struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1913 2150
2151 if (data->vce_power_gated == bgate)
2152 return;
2153
1914 data->vce_power_gated = bgate; 2154 data->vce_power_gated = bgate;
1915 vega12_enable_disable_vce_dpm(hwmgr, !bgate); 2155 vega12_enable_disable_vce_dpm(hwmgr, !bgate);
1916} 2156}
@@ -1919,6 +2159,9 @@ static void vega12_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
1919{ 2159{
1920 struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend); 2160 struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1921 2161
2162 if (data->uvd_power_gated == bgate)
2163 return;
2164
1922 data->uvd_power_gated = bgate; 2165 data->uvd_power_gated = bgate;
1923 vega12_enable_disable_uvd_dpm(hwmgr, !bgate); 2166 vega12_enable_disable_uvd_dpm(hwmgr, !bgate);
1924} 2167}
@@ -2113,6 +2356,10 @@ static const struct pp_hwmgr_func vega12_hwmgr_funcs = {
2113 .display_clock_voltage_request = vega12_display_clock_voltage_request, 2356 .display_clock_voltage_request = vega12_display_clock_voltage_request,
2114 .force_clock_level = vega12_force_clock_level, 2357 .force_clock_level = vega12_force_clock_level,
2115 .print_clock_levels = vega12_print_clock_levels, 2358 .print_clock_levels = vega12_print_clock_levels,
2359 .apply_clocks_adjust_rules =
2360 vega12_apply_clocks_adjust_rules,
2361 .pre_display_config_changed =
2362 vega12_pre_display_configuration_changed_task,
2116 .display_config_changed = vega12_display_configuration_changed_task, 2363 .display_config_changed = vega12_display_configuration_changed_task,
2117 .powergate_uvd = vega12_power_gate_uvd, 2364 .powergate_uvd = vega12_power_gate_uvd,
2118 .powergate_vce = vega12_power_gate_vce, 2365 .powergate_vce = vega12_power_gate_vce,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
index e81ded1ec198..e17237c90eea 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
@@ -167,6 +167,9 @@ struct vega12_vbios_boot_state {
167 uint32_t mem_clock; 167 uint32_t mem_clock;
168 uint32_t soc_clock; 168 uint32_t soc_clock;
169 uint32_t dcef_clock; 169 uint32_t dcef_clock;
170 uint32_t eclock;
171 uint32_t dclock;
172 uint32_t vclock;
170}; 173};
171 174
172#define DPMTABLE_OD_UPDATE_SCLK 0x00000001 175#define DPMTABLE_OD_UPDATE_SCLK 0x00000001
@@ -301,6 +304,12 @@ struct vega12_odn_fan_table {
301 bool force_fan_pwm; 304 bool force_fan_pwm;
302}; 305};
303 306
307struct vega12_clock_range {
308 uint32_t ACMax;
309 uint32_t ACMin;
310 uint32_t DCMax;
311};
312
304struct vega12_hwmgr { 313struct vega12_hwmgr {
305 struct vega12_dpm_table dpm_table; 314 struct vega12_dpm_table dpm_table;
306 struct vega12_dpm_table golden_dpm_table; 315 struct vega12_dpm_table golden_dpm_table;
@@ -382,6 +391,8 @@ struct vega12_hwmgr {
382 uint32_t smu_version; 391 uint32_t smu_version;
383 struct smu_features smu_features[GNLD_FEATURES_MAX]; 392 struct smu_features smu_features[GNLD_FEATURES_MAX];
384 struct vega12_smc_state_table smc_state_table; 393 struct vega12_smc_state_table smc_state_table;
394
395 struct vega12_clock_range clk_range[PPCLK_COUNT];
385}; 396};
386 397
387#define VEGA12_DPM2_NEAR_TDP_DEC 10 398#define VEGA12_DPM2_NEAR_TDP_DEC 10
@@ -432,6 +443,8 @@ struct vega12_hwmgr {
432#define VEGA12_UMD_PSTATE_GFXCLK_LEVEL 0x3 443#define VEGA12_UMD_PSTATE_GFXCLK_LEVEL 0x3
433#define VEGA12_UMD_PSTATE_SOCCLK_LEVEL 0x3 444#define VEGA12_UMD_PSTATE_SOCCLK_LEVEL 0x3
434#define VEGA12_UMD_PSTATE_MCLK_LEVEL 0x2 445#define VEGA12_UMD_PSTATE_MCLK_LEVEL 0x2
446#define VEGA12_UMD_PSTATE_UVDCLK_LEVEL 0x3
447#define VEGA12_UMD_PSTATE_VCEMCLK_LEVEL 0x3
435 448
436int vega12_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable); 449int vega12_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
437 450
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
index 888ddca902d8..f4f366b26fd1 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
@@ -224,11 +224,7 @@ static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable
224 ppsmc_pptable->AcgGfxclkSpreadPercent = smc_dpm_table.acggfxclkspreadpercent; 224 ppsmc_pptable->AcgGfxclkSpreadPercent = smc_dpm_table.acggfxclkspreadpercent;
225 ppsmc_pptable->AcgGfxclkSpreadFreq = smc_dpm_table.acggfxclkspreadfreq; 225 ppsmc_pptable->AcgGfxclkSpreadFreq = smc_dpm_table.acggfxclkspreadfreq;
226 226
227 /* 0xFFFF will disable the ACG feature */ 227 ppsmc_pptable->Vr2_I2C_address = smc_dpm_table.Vr2_I2C_address;
228 if (!(hwmgr->feature_mask & PP_ACG_MASK)) {
229 ppsmc_pptable->AcgThresholdFreqHigh = 0xFFFF;
230 ppsmc_pptable->AcgThresholdFreqLow = 0xFFFF;
231 }
232 228
233 return 0; 229 return 0;
234} 230}
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
index a202247c9894..429c9c4322da 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
@@ -455,7 +455,7 @@ extern int phm_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
455 enum amd_pp_clock_type type, 455 enum amd_pp_clock_type type,
456 struct pp_clock_levels_with_voltage *clocks); 456 struct pp_clock_levels_with_voltage *clocks);
457extern int phm_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr, 457extern int phm_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
458 struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges); 458 void *clock_ranges);
459extern int phm_display_clock_voltage_request(struct pp_hwmgr *hwmgr, 459extern int phm_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
460 struct pp_display_clock_request *clock); 460 struct pp_display_clock_request *clock);
461 461
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index 40c98ca5feb7..b3363f26039a 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -293,8 +293,7 @@ struct pp_hwmgr_func {
293 int (*get_clock_by_type_with_voltage)(struct pp_hwmgr *hwmgr, 293 int (*get_clock_by_type_with_voltage)(struct pp_hwmgr *hwmgr,
294 enum amd_pp_clock_type type, 294 enum amd_pp_clock_type type,
295 struct pp_clock_levels_with_voltage *clocks); 295 struct pp_clock_levels_with_voltage *clocks);
296 int (*set_watermarks_for_clocks_ranges)(struct pp_hwmgr *hwmgr, 296 int (*set_watermarks_for_clocks_ranges)(struct pp_hwmgr *hwmgr, void *clock_ranges);
297 struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges);
298 int (*display_clock_voltage_request)(struct pp_hwmgr *hwmgr, 297 int (*display_clock_voltage_request)(struct pp_hwmgr *hwmgr,
299 struct pp_display_clock_request *clock); 298 struct pp_display_clock_request *clock);
300 int (*get_max_high_clocks)(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks); 299 int (*get_max_high_clocks)(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks);
@@ -302,7 +301,7 @@ struct pp_hwmgr_func {
302 int (*power_off_asic)(struct pp_hwmgr *hwmgr); 301 int (*power_off_asic)(struct pp_hwmgr *hwmgr);
303 int (*force_clock_level)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, uint32_t mask); 302 int (*force_clock_level)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, uint32_t mask);
304 int (*print_clock_levels)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, char *buf); 303 int (*print_clock_levels)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, char *buf);
305 int (*enable_per_cu_power_gating)(struct pp_hwmgr *hwmgr, bool enable); 304 int (*powergate_gfx)(struct pp_hwmgr *hwmgr, bool enable);
306 int (*get_sclk_od)(struct pp_hwmgr *hwmgr); 305 int (*get_sclk_od)(struct pp_hwmgr *hwmgr);
307 int (*set_sclk_od)(struct pp_hwmgr *hwmgr, uint32_t value); 306 int (*set_sclk_od)(struct pp_hwmgr *hwmgr, uint32_t value);
308 int (*get_mclk_od)(struct pp_hwmgr *hwmgr); 307 int (*get_mclk_od)(struct pp_hwmgr *hwmgr);
@@ -327,7 +326,7 @@ struct pp_hwmgr_func {
327 enum PP_OD_DPM_TABLE_COMMAND type, 326 enum PP_OD_DPM_TABLE_COMMAND type,
328 long *input, uint32_t size); 327 long *input, uint32_t size);
329 int (*set_power_limit)(struct pp_hwmgr *hwmgr, uint32_t n); 328 int (*set_power_limit)(struct pp_hwmgr *hwmgr, uint32_t n);
330 int (*set_mmhub_powergating_by_smu)(struct pp_hwmgr *hwmgr); 329 int (*powergate_mmhub)(struct pp_hwmgr *hwmgr);
331 int (*smus_notify_pwe)(struct pp_hwmgr *hwmgr); 330 int (*smus_notify_pwe)(struct pp_hwmgr *hwmgr);
332}; 331};
333 332
diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
index 2f8a3b983cce..b6ffd08784e7 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
@@ -412,10 +412,10 @@ typedef struct {
412 QuadraticInt_t ReservedEquation2; 412 QuadraticInt_t ReservedEquation2;
413 QuadraticInt_t ReservedEquation3; 413 QuadraticInt_t ReservedEquation3;
414 414
415 uint16_t MinVoltageUlvGfx; 415 uint16_t MinVoltageUlvGfx;
416 uint16_t MinVoltageUlvSoc; 416 uint16_t MinVoltageUlvSoc;
417 417
418 uint32_t Reserved[14]; 418 uint32_t Reserved[14];
419 419
420 420
421 421
@@ -483,9 +483,9 @@ typedef struct {
483 uint8_t padding8_4; 483 uint8_t padding8_4;
484 484
485 485
486 uint8_t PllGfxclkSpreadEnabled; 486 uint8_t PllGfxclkSpreadEnabled;
487 uint8_t PllGfxclkSpreadPercent; 487 uint8_t PllGfxclkSpreadPercent;
488 uint16_t PllGfxclkSpreadFreq; 488 uint16_t PllGfxclkSpreadFreq;
489 489
490 uint8_t UclkSpreadEnabled; 490 uint8_t UclkSpreadEnabled;
491 uint8_t UclkSpreadPercent; 491 uint8_t UclkSpreadPercent;
@@ -495,11 +495,14 @@ typedef struct {
495 uint8_t SocclkSpreadPercent; 495 uint8_t SocclkSpreadPercent;
496 uint16_t SocclkSpreadFreq; 496 uint16_t SocclkSpreadFreq;
497 497
498 uint8_t AcgGfxclkSpreadEnabled; 498 uint8_t AcgGfxclkSpreadEnabled;
499 uint8_t AcgGfxclkSpreadPercent; 499 uint8_t AcgGfxclkSpreadPercent;
500 uint16_t AcgGfxclkSpreadFreq; 500 uint16_t AcgGfxclkSpreadFreq;
501 501
502 uint32_t BoardReserved[10]; 502 uint8_t Vr2_I2C_address;
503 uint8_t padding_vr2[3];
504
505 uint32_t BoardReserved[9];
503 506
504 507
505 uint32_t MmHubPadding[7]; 508 uint32_t MmHubPadding[7];