aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlex Deucher <alexander.deucher@amd.com>2017-08-29 10:55:05 -0400
committerAlex Deucher <alexander.deucher@amd.com>2017-09-01 15:01:21 -0400
commitcd00a424d26f9b954f5a084b88800e859fc0c52f (patch)
tree87a0f8495f1eb9be98fa4dd6dc97d5b4a1ea1cb9
parent56d11d580974958bd3c7ae4a42368ae22cb50354 (diff)
drm/amd/powerplay: fix sclk setting for profile mode for CZ/ST
Need to select dpm0 to avoid clock fluctuations. Reviewed-by: Rex Zhu <Rex.Zhu@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c47
1 files changed, 1 insertions, 46 deletions
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
index 9f2c0378c059..b9c61ece6784 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
@@ -1310,48 +1310,9 @@ static int cz_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr)
1310 return 0; 1310 return 0;
1311} 1311}
1312 1312
1313static int cz_phm_force_dpm_sclk(struct pp_hwmgr *hwmgr, uint32_t sclk)
1314{
1315 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
1316 PPSMC_MSG_SetSclkSoftMin,
1317 cz_get_sclk_level(hwmgr,
1318 sclk,
1319 PPSMC_MSG_SetSclkSoftMin));
1320
1321 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
1322 PPSMC_MSG_SetSclkSoftMax,
1323 cz_get_sclk_level(hwmgr,
1324 sclk,
1325 PPSMC_MSG_SetSclkSoftMax));
1326 return 0;
1327}
1328
1329static int cz_get_profiling_clk(struct pp_hwmgr *hwmgr, uint32_t *sclk)
1330{
1331 struct phm_clock_voltage_dependency_table *table =
1332 hwmgr->dyn_state.vddc_dependency_on_sclk;
1333 int32_t tmp_sclk;
1334 int32_t count;
1335
1336 tmp_sclk = table->entries[table->count-1].clk * 70 / 100;
1337
1338 for (count = table->count-1; count >= 0; count--) {
1339 if (tmp_sclk >= table->entries[count].clk) {
1340 tmp_sclk = table->entries[count].clk;
1341 *sclk = tmp_sclk;
1342 break;
1343 }
1344 }
1345 if (count < 0)
1346 *sclk = table->entries[0].clk;
1347
1348 return 0;
1349}
1350
1351static int cz_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, 1313static int cz_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
1352 enum amd_dpm_forced_level level) 1314 enum amd_dpm_forced_level level)
1353{ 1315{
1354 uint32_t sclk = 0;
1355 int ret = 0; 1316 int ret = 0;
1356 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | 1317 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
1357 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | 1318 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
@@ -1389,6 +1350,7 @@ static int cz_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
1389 break; 1350 break;
1390 case AMD_DPM_FORCED_LEVEL_LOW: 1351 case AMD_DPM_FORCED_LEVEL_LOW:
1391 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: 1352 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
1353 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
1392 ret = cz_phm_force_dpm_lowest(hwmgr); 1354 ret = cz_phm_force_dpm_lowest(hwmgr);
1393 if (ret) 1355 if (ret)
1394 return ret; 1356 return ret;
@@ -1400,13 +1362,6 @@ static int cz_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
1400 return ret; 1362 return ret;
1401 hwmgr->dpm_level = level; 1363 hwmgr->dpm_level = level;
1402 break; 1364 break;
1403 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
1404 ret = cz_get_profiling_clk(hwmgr, &sclk);
1405 if (ret)
1406 return ret;
1407 hwmgr->dpm_level = level;
1408 cz_phm_force_dpm_sclk(hwmgr, sclk);
1409 break;
1410 case AMD_DPM_FORCED_LEVEL_MANUAL: 1365 case AMD_DPM_FORCED_LEVEL_MANUAL:
1411 hwmgr->dpm_level = level; 1366 hwmgr->dpm_level = level;
1412 break; 1367 break;