aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRex Zhu <Rex.Zhu@amd.com>2017-08-29 04:08:56 -0400
committerAlex Deucher <alexander.deucher@amd.com>2017-09-12 14:22:23 -0400
commit9947f7047f2b57735fa5c76d63728642a1850527 (patch)
tree0c3a5d2e2d392d1d40a643b63f8699e3e9396e1d
parent47047263c52779f1f3393c32e3e53661b53a372e (diff)
drm/amd/powerplay: add UMD P-state in powerplay.
This feature is for UMD to run benchmark in a power state that is as steady as possible. kmd need to fix the power state as stable as possible. now, kmd support four level: profile_standard,peak,min_sclk,min_mclk move common related code to amd_powerplay.c Reviewed-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Rex Zhu <Rex.Zhu@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c47
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c36
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c58
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c53
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h4
5 files changed, 66 insertions, 132 deletions
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index 94bed3c08161..75c810f93e9e 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -30,7 +30,6 @@
30#include "pp_instance.h" 30#include "pp_instance.h"
31#include "power_state.h" 31#include "power_state.h"
32 32
33
34static inline int pp_check(struct pp_instance *handle) 33static inline int pp_check(struct pp_instance *handle)
35{ 34{
36 if (handle == NULL || handle->pp_valid != PP_VALID) 35 if (handle == NULL || handle->pp_valid != PP_VALID)
@@ -287,6 +286,42 @@ static int pp_dpm_fw_loading_complete(void *handle)
287 return 0; 286 return 0;
288} 287}
289 288
289static void pp_dpm_en_umd_pstate(struct pp_hwmgr *hwmgr,
290 enum amd_dpm_forced_level *level)
291{
292 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
293 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
294 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
295 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
296
297 if (!(hwmgr->dpm_level & profile_mode_mask)) {
298 /* enter umd pstate, save current level, disable gfx cg*/
299 if (*level & profile_mode_mask) {
300 hwmgr->saved_dpm_level = hwmgr->dpm_level;
301 hwmgr->en_umd_pstate = true;
302 cgs_set_clockgating_state(hwmgr->device,
303 AMD_IP_BLOCK_TYPE_GFX,
304 AMD_CG_STATE_UNGATE);
305 cgs_set_powergating_state(hwmgr->device,
306 AMD_IP_BLOCK_TYPE_GFX,
307 AMD_PG_STATE_UNGATE);
308 }
309 } else {
310 /* exit umd pstate, restore level, enable gfx cg*/
311 if (!(*level & profile_mode_mask)) {
312 if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
313 *level = hwmgr->saved_dpm_level;
314 hwmgr->en_umd_pstate = false;
315 cgs_set_clockgating_state(hwmgr->device,
316 AMD_IP_BLOCK_TYPE_GFX,
317 AMD_CG_STATE_GATE);
318 cgs_set_powergating_state(hwmgr->device,
319 AMD_IP_BLOCK_TYPE_GFX,
320 AMD_PG_STATE_GATE);
321 }
322 }
323}
324
290static int pp_dpm_force_performance_level(void *handle, 325static int pp_dpm_force_performance_level(void *handle,
291 enum amd_dpm_forced_level level) 326 enum amd_dpm_forced_level level)
292{ 327{
@@ -301,14 +336,22 @@ static int pp_dpm_force_performance_level(void *handle,
301 336
302 hwmgr = pp_handle->hwmgr; 337 hwmgr = pp_handle->hwmgr;
303 338
339 if (level == hwmgr->dpm_level)
340 return 0;
341
304 if (hwmgr->hwmgr_func->force_dpm_level == NULL) { 342 if (hwmgr->hwmgr_func->force_dpm_level == NULL) {
305 pr_info("%s was not implemented.\n", __func__); 343 pr_info("%s was not implemented.\n", __func__);
306 return 0; 344 return 0;
307 } 345 }
308 346
309 mutex_lock(&pp_handle->pp_lock); 347 mutex_lock(&pp_handle->pp_lock);
348 pp_dpm_en_umd_pstate(hwmgr, &level);
349 hwmgr->request_dpm_level = level;
310 hwmgr_handle_task(pp_handle, AMD_PP_TASK_READJUST_POWER_STATE, NULL, NULL); 350 hwmgr_handle_task(pp_handle, AMD_PP_TASK_READJUST_POWER_STATE, NULL, NULL);
311 hwmgr->hwmgr_func->force_dpm_level(hwmgr, level); 351 ret = hwmgr->hwmgr_func->force_dpm_level(hwmgr, level);
352 if (!ret)
353 hwmgr->dpm_level = hwmgr->request_dpm_level;
354
312 mutex_unlock(&pp_handle->pp_lock); 355 mutex_unlock(&pp_handle->pp_lock);
313 return 0; 356 return 0;
314} 357}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
index b9c61ece6784..a301003f6e2a 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
@@ -1314,57 +1314,21 @@ static int cz_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
1314 enum amd_dpm_forced_level level) 1314 enum amd_dpm_forced_level level)
1315{ 1315{
1316 int ret = 0; 1316 int ret = 0;
1317 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
1318 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
1319 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
1320
1321 if (level == hwmgr->dpm_level)
1322 return ret;
1323
1324 if (!(hwmgr->dpm_level & profile_mode_mask)) {
1325 /* enter profile mode, save current level, disable gfx cg*/
1326 if (level & profile_mode_mask) {
1327 hwmgr->saved_dpm_level = hwmgr->dpm_level;
1328 cgs_set_clockgating_state(hwmgr->device,
1329 AMD_IP_BLOCK_TYPE_GFX,
1330 AMD_CG_STATE_UNGATE);
1331 }
1332 } else {
1333 /* exit profile mode, restore level, enable gfx cg*/
1334 if (!(level & profile_mode_mask)) {
1335 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
1336 level = hwmgr->saved_dpm_level;
1337 cgs_set_clockgating_state(hwmgr->device,
1338 AMD_IP_BLOCK_TYPE_GFX,
1339 AMD_CG_STATE_GATE);
1340 }
1341 }
1342 1317
1343 switch (level) { 1318 switch (level) {
1344 case AMD_DPM_FORCED_LEVEL_HIGH: 1319 case AMD_DPM_FORCED_LEVEL_HIGH:
1345 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: 1320 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
1346 ret = cz_phm_force_dpm_highest(hwmgr); 1321 ret = cz_phm_force_dpm_highest(hwmgr);
1347 if (ret)
1348 return ret;
1349 hwmgr->dpm_level = level;
1350 break; 1322 break;
1351 case AMD_DPM_FORCED_LEVEL_LOW: 1323 case AMD_DPM_FORCED_LEVEL_LOW:
1352 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: 1324 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
1353 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: 1325 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
1354 ret = cz_phm_force_dpm_lowest(hwmgr); 1326 ret = cz_phm_force_dpm_lowest(hwmgr);
1355 if (ret)
1356 return ret;
1357 hwmgr->dpm_level = level;
1358 break; 1327 break;
1359 case AMD_DPM_FORCED_LEVEL_AUTO: 1328 case AMD_DPM_FORCED_LEVEL_AUTO:
1360 ret = cz_phm_unforce_dpm_levels(hwmgr); 1329 ret = cz_phm_unforce_dpm_levels(hwmgr);
1361 if (ret)
1362 return ret;
1363 hwmgr->dpm_level = level;
1364 break; 1330 break;
1365 case AMD_DPM_FORCED_LEVEL_MANUAL: 1331 case AMD_DPM_FORCED_LEVEL_MANUAL:
1366 hwmgr->dpm_level = level;
1367 break;
1368 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: 1332 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
1369 default: 1333 default:
1370 break; 1334 break;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index eb8a3ff70cf7..dfe06d98304c 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -2568,51 +2568,16 @@ static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr,
2568 uint32_t sclk_mask = 0; 2568 uint32_t sclk_mask = 0;
2569 uint32_t mclk_mask = 0; 2569 uint32_t mclk_mask = 0;
2570 uint32_t pcie_mask = 0; 2570 uint32_t pcie_mask = 0;
2571 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
2572 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
2573 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
2574 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
2575
2576 if (level == hwmgr->dpm_level)
2577 return ret;
2578
2579 if (!(hwmgr->dpm_level & profile_mode_mask)) {
2580 /* enter profile mode, save current level, disable gfx cg*/
2581 if (level & profile_mode_mask) {
2582 hwmgr->saved_dpm_level = hwmgr->dpm_level;
2583 cgs_set_clockgating_state(hwmgr->device,
2584 AMD_IP_BLOCK_TYPE_GFX,
2585 AMD_CG_STATE_UNGATE);
2586 }
2587 } else {
2588 /* exit profile mode, restore level, enable gfx cg*/
2589 if (!(level & profile_mode_mask)) {
2590 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
2591 level = hwmgr->saved_dpm_level;
2592 cgs_set_clockgating_state(hwmgr->device,
2593 AMD_IP_BLOCK_TYPE_GFX,
2594 AMD_CG_STATE_GATE);
2595 }
2596 }
2597 2571
2598 switch (level) { 2572 switch (level) {
2599 case AMD_DPM_FORCED_LEVEL_HIGH: 2573 case AMD_DPM_FORCED_LEVEL_HIGH:
2600 ret = smu7_force_dpm_highest(hwmgr); 2574 ret = smu7_force_dpm_highest(hwmgr);
2601 if (ret)
2602 return ret;
2603 hwmgr->dpm_level = level;
2604 break; 2575 break;
2605 case AMD_DPM_FORCED_LEVEL_LOW: 2576 case AMD_DPM_FORCED_LEVEL_LOW:
2606 ret = smu7_force_dpm_lowest(hwmgr); 2577 ret = smu7_force_dpm_lowest(hwmgr);
2607 if (ret)
2608 return ret;
2609 hwmgr->dpm_level = level;
2610 break; 2578 break;
2611 case AMD_DPM_FORCED_LEVEL_AUTO: 2579 case AMD_DPM_FORCED_LEVEL_AUTO:
2612 ret = smu7_unforce_dpm_levels(hwmgr); 2580 ret = smu7_unforce_dpm_levels(hwmgr);
2613 if (ret)
2614 return ret;
2615 hwmgr->dpm_level = level;
2616 break; 2581 break;
2617 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: 2582 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
2618 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: 2583 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
@@ -2621,26 +2586,23 @@ static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr,
2621 ret = smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, &pcie_mask); 2586 ret = smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, &pcie_mask);
2622 if (ret) 2587 if (ret)
2623 return ret; 2588 return ret;
2624 hwmgr->dpm_level = level;
2625 smu7_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask); 2589 smu7_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
2626 smu7_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask); 2590 smu7_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
2627 smu7_force_clock_level(hwmgr, PP_PCIE, 1<<pcie_mask); 2591 smu7_force_clock_level(hwmgr, PP_PCIE, 1<<pcie_mask);
2628
2629 break; 2592 break;
2630 case AMD_DPM_FORCED_LEVEL_MANUAL: 2593 case AMD_DPM_FORCED_LEVEL_MANUAL:
2631 hwmgr->dpm_level = level;
2632 break;
2633 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: 2594 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
2634 default: 2595 default:
2635 break; 2596 break;
2636 } 2597 }
2637 2598
2638 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->saved_dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) 2599 if (!ret) {
2639 smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100); 2600 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
2640 else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->saved_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) 2601 smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
2641 smu7_fan_ctrl_reset_fan_speed_to_default(hwmgr); 2602 else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
2642 2603 smu7_fan_ctrl_reset_fan_speed_to_default(hwmgr);
2643 return 0; 2604 }
2605 return ret;
2644} 2606}
2645 2607
2646static int smu7_get_power_state_size(struct pp_hwmgr *hwmgr) 2608static int smu7_get_power_state_size(struct pp_hwmgr *hwmgr)
@@ -4245,9 +4207,9 @@ static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
4245{ 4207{
4246 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4208 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4247 4209
4248 if (hwmgr->dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO | 4210 if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
4249 AMD_DPM_FORCED_LEVEL_LOW | 4211 AMD_DPM_FORCED_LEVEL_LOW |
4250 AMD_DPM_FORCED_LEVEL_HIGH)) 4212 AMD_DPM_FORCED_LEVEL_HIGH))
4251 return -EINVAL; 4213 return -EINVAL;
4252 4214
4253 switch (type) { 4215 switch (type) {
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index f8f02e70b8bc..2e776edf9b8d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -4306,51 +4306,16 @@ static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
4306 uint32_t sclk_mask = 0; 4306 uint32_t sclk_mask = 0;
4307 uint32_t mclk_mask = 0; 4307 uint32_t mclk_mask = 0;
4308 uint32_t soc_mask = 0; 4308 uint32_t soc_mask = 0;
4309 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
4310 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
4311 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
4312 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
4313
4314 if (level == hwmgr->dpm_level)
4315 return ret;
4316
4317 if (!(hwmgr->dpm_level & profile_mode_mask)) {
4318 /* enter profile mode, save current level, disable gfx cg*/
4319 if (level & profile_mode_mask) {
4320 hwmgr->saved_dpm_level = hwmgr->dpm_level;
4321 cgs_set_clockgating_state(hwmgr->device,
4322 AMD_IP_BLOCK_TYPE_GFX,
4323 AMD_CG_STATE_UNGATE);
4324 }
4325 } else {
4326 /* exit profile mode, restore level, enable gfx cg*/
4327 if (!(level & profile_mode_mask)) {
4328 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
4329 level = hwmgr->saved_dpm_level;
4330 cgs_set_clockgating_state(hwmgr->device,
4331 AMD_IP_BLOCK_TYPE_GFX,
4332 AMD_CG_STATE_GATE);
4333 }
4334 }
4335 4309
4336 switch (level) { 4310 switch (level) {
4337 case AMD_DPM_FORCED_LEVEL_HIGH: 4311 case AMD_DPM_FORCED_LEVEL_HIGH:
4338 ret = vega10_force_dpm_highest(hwmgr); 4312 ret = vega10_force_dpm_highest(hwmgr);
4339 if (ret)
4340 return ret;
4341 hwmgr->dpm_level = level;
4342 break; 4313 break;
4343 case AMD_DPM_FORCED_LEVEL_LOW: 4314 case AMD_DPM_FORCED_LEVEL_LOW:
4344 ret = vega10_force_dpm_lowest(hwmgr); 4315 ret = vega10_force_dpm_lowest(hwmgr);
4345 if (ret)
4346 return ret;
4347 hwmgr->dpm_level = level;
4348 break; 4316 break;
4349 case AMD_DPM_FORCED_LEVEL_AUTO: 4317 case AMD_DPM_FORCED_LEVEL_AUTO:
4350 ret = vega10_unforce_dpm_levels(hwmgr); 4318 ret = vega10_unforce_dpm_levels(hwmgr);
4351 if (ret)
4352 return ret;
4353 hwmgr->dpm_level = level;
4354 break; 4319 break;
4355 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: 4320 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
4356 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: 4321 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
@@ -4359,24 +4324,22 @@ static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
4359 ret = vega10_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask); 4324 ret = vega10_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask);
4360 if (ret) 4325 if (ret)
4361 return ret; 4326 return ret;
4362 hwmgr->dpm_level = level;
4363 vega10_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask); 4327 vega10_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
4364 vega10_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask); 4328 vega10_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
4365 break; 4329 break;
4366 case AMD_DPM_FORCED_LEVEL_MANUAL: 4330 case AMD_DPM_FORCED_LEVEL_MANUAL:
4367 hwmgr->dpm_level = level;
4368 break;
4369 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: 4331 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
4370 default: 4332 default:
4371 break; 4333 break;
4372 } 4334 }
4373 4335
4374 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->saved_dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) 4336 if (!ret) {
4375 vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_NONE); 4337 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
4376 else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->saved_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) 4338 vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_NONE);
4377 vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_AUTO); 4339 else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
4378 4340 vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_AUTO);
4379 return 0; 4341 }
4342 return ret;
4380} 4343}
4381 4344
4382static int vega10_get_fan_control_mode(struct pp_hwmgr *hwmgr) 4345static int vega10_get_fan_control_mode(struct pp_hwmgr *hwmgr)
@@ -4624,7 +4587,7 @@ static int vega10_force_clock_level(struct pp_hwmgr *hwmgr,
4624 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); 4587 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4625 int i; 4588 int i;
4626 4589
4627 if (hwmgr->dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO | 4590 if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
4628 AMD_DPM_FORCED_LEVEL_LOW | 4591 AMD_DPM_FORCED_LEVEL_LOW |
4629 AMD_DPM_FORCED_LEVEL_HIGH)) 4592 AMD_DPM_FORCED_LEVEL_HIGH))
4630 return -EINVAL; 4593 return -EINVAL;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index c649354f08ca..3bbe7d5cb6de 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -747,6 +747,7 @@ struct pp_hwmgr {
747 747
748 enum amd_dpm_forced_level dpm_level; 748 enum amd_dpm_forced_level dpm_level;
749 enum amd_dpm_forced_level saved_dpm_level; 749 enum amd_dpm_forced_level saved_dpm_level;
750 enum amd_dpm_forced_level request_dpm_level;
750 bool block_hw_access; 751 bool block_hw_access;
751 struct phm_gfx_arbiter gfx_arbiter; 752 struct phm_gfx_arbiter gfx_arbiter;
752 struct phm_acp_arbiter acp_arbiter; 753 struct phm_acp_arbiter acp_arbiter;
@@ -786,12 +787,13 @@ struct pp_hwmgr {
786 struct amd_pp_display_configuration display_config; 787 struct amd_pp_display_configuration display_config;
787 uint32_t feature_mask; 788 uint32_t feature_mask;
788 789
789 /* power profile */ 790 /* UMD Pstate */
790 struct amd_pp_profile gfx_power_profile; 791 struct amd_pp_profile gfx_power_profile;
791 struct amd_pp_profile compute_power_profile; 792 struct amd_pp_profile compute_power_profile;
792 struct amd_pp_profile default_gfx_power_profile; 793 struct amd_pp_profile default_gfx_power_profile;
793 struct amd_pp_profile default_compute_power_profile; 794 struct amd_pp_profile default_compute_power_profile;
794 enum amd_pp_profile_type current_power_profile; 795 enum amd_pp_profile_type current_power_profile;
796 bool en_umd_pstate;
795}; 797};
796 798
797extern int hwmgr_early_init(struct pp_instance *handle); 799extern int hwmgr_early_init(struct pp_instance *handle);