aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd
diff options
context:
space:
mode:
authorLikun Gao <Likun.Gao@amd.com>2019-01-24 06:53:40 -0500
committerAlex Deucher <alexander.deucher@amd.com>2019-03-19 16:04:01 -0400
commitbc0fcffd36baa1cbbf2a6e951e4f1acad3aa8c90 (patch)
treeadb5efa55fc38f2fb84d5692f3411c7e1f2e1879 /drivers/gpu/drm/amd
parent72e91f37a81768c81aa7ac78169f5a3259c621ac (diff)
drm/amd/powerplay: Unify smu handle task function (v2)
Unify power stade adjust function into smu_handle_task by the judgment of task_id. Move functions which have no relationship with smu version into the file of amdgpu_smu. Modified the function of smu_display_config_changed into two part. Unify some similiar function. v2: Correct the operation of upload dpm level when force dpm limit value. Signed-off-by: Likun Gao <Likun.Gao@amd.com> Reviewed-by: Huang Rui <ray.huang@amd.com> Acked-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c46
-rw-r--r--drivers/gpu/drm/amd/powerplay/amdgpu_smu.c127
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h31
-rw-r--r--drivers/gpu/drm/amd/powerplay/vega20_ppt.c258
4 files changed, 250 insertions, 212 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index b83981284a7c..47d2ba528a0f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -2608,28 +2608,38 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
2608 amdgpu_fence_wait_empty(ring); 2608 amdgpu_fence_wait_empty(ring);
2609 } 2609 }
2610 2610
2611 if (adev->powerplay.pp_funcs->dispatch_tasks) { 2611 if (is_support_sw_smu(adev)) {
2612 if (!amdgpu_device_has_dc_support(adev)) { 2612 struct smu_context *smu = &adev->smu;
2613 struct smu_dpm_context *smu_dpm = &adev->smu.smu_dpm;
2614 mutex_lock(&(smu->mutex));
2615 smu_handle_task(&adev->smu,
2616 smu_dpm->dpm_level,
2617 AMD_PP_TASK_DISPLAY_CONFIG_CHANGE);
2618 mutex_unlock(&(smu->mutex));
2619 } else {
2620 if (adev->powerplay.pp_funcs->dispatch_tasks) {
2621 if (!amdgpu_device_has_dc_support(adev)) {
2622 mutex_lock(&adev->pm.mutex);
2623 amdgpu_dpm_get_active_displays(adev);
2624 adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
2625 adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
2626 adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
2627 /* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */
2628 if (adev->pm.pm_display_cfg.vrefresh > 120)
2629 adev->pm.pm_display_cfg.min_vblank_time = 0;
2630 if (adev->powerplay.pp_funcs->display_configuration_change)
2631 adev->powerplay.pp_funcs->display_configuration_change(
2632 adev->powerplay.pp_handle,
2633 &adev->pm.pm_display_cfg);
2634 mutex_unlock(&adev->pm.mutex);
2635 }
2636 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL);
2637 } else {
2613 mutex_lock(&adev->pm.mutex); 2638 mutex_lock(&adev->pm.mutex);
2614 amdgpu_dpm_get_active_displays(adev); 2639 amdgpu_dpm_get_active_displays(adev);
2615 adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count; 2640 amdgpu_dpm_change_power_state_locked(adev);
2616 adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
2617 adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
2618 /* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */
2619 if (adev->pm.pm_display_cfg.vrefresh > 120)
2620 adev->pm.pm_display_cfg.min_vblank_time = 0;
2621 if (adev->powerplay.pp_funcs->display_configuration_change)
2622 adev->powerplay.pp_funcs->display_configuration_change(
2623 adev->powerplay.pp_handle,
2624 &adev->pm.pm_display_cfg);
2625 mutex_unlock(&adev->pm.mutex); 2641 mutex_unlock(&adev->pm.mutex);
2626 } 2642 }
2627 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL);
2628 } else {
2629 mutex_lock(&adev->pm.mutex);
2630 amdgpu_dpm_get_active_displays(adev);
2631 amdgpu_dpm_change_power_state_locked(adev);
2632 mutex_unlock(&adev->pm.mutex);
2633 } 2643 }
2634} 2644}
2635 2645
diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index d6578be92196..48b346f95cd6 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -1041,6 +1041,133 @@ static int smu_enable_umd_pstate(void *handle,
1041 return 0; 1041 return 0;
1042} 1042}
1043 1043
1044int smu_unforce_dpm_levels(struct smu_context *smu)
1045{
1046 int ret = 0;
1047
1048 ret = smu_upload_dpm_level(smu, false);
1049 if (ret) {
1050 pr_err("Failed to upload DPM Bootup Levels!");
1051 return ret;
1052 }
1053
1054 ret = smu_upload_dpm_level(smu, true);
1055 if (ret) {
1056 pr_err("Failed to upload DPM Max Levels!");
1057 return ret;
1058 }
1059
1060 return ret;
1061}
1062
1063int smu_adjust_power_state_dynamic(struct smu_context *smu,
1064 enum amd_dpm_forced_level level,
1065 bool skip_display_settings)
1066{
1067 int ret = 0;
1068 int index = 0;
1069 uint32_t sclk_mask, mclk_mask, soc_mask;
1070 long workload;
1071 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1072
1073 if (!skip_display_settings) {
1074 ret = smu_display_config_changed(smu);
1075 if (ret) {
1076 pr_err("Failed to change display config!");
1077 return ret;
1078 }
1079 }
1080
1081 ret = smu_apply_clocks_adjust_rules(smu);
1082 if (ret) {
1083 pr_err("Failed to apply clocks adjust rules!");
1084 return ret;
1085 }
1086
1087 if (!skip_display_settings) {
1088 ret = smu_notify_smc_dispaly_config(smu);
1089 if (ret) {
1090 pr_err("Failed to notify smc display config!");
1091 return ret;
1092 }
1093 }
1094
1095 if (smu_dpm_ctx->dpm_level != level) {
1096 switch (level) {
1097 case AMD_DPM_FORCED_LEVEL_HIGH:
1098 ret = smu_force_dpm_limit_value(smu, true);
1099 break;
1100 case AMD_DPM_FORCED_LEVEL_LOW:
1101 ret = smu_force_dpm_limit_value(smu, false);
1102 break;
1103
1104 case AMD_DPM_FORCED_LEVEL_AUTO:
1105 ret = smu_unforce_dpm_levels(smu);
1106 break;
1107
1108 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
1109 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
1110 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
1111 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
1112 ret = smu_get_profiling_clk_mask(smu, level,
1113 &sclk_mask,
1114 &mclk_mask,
1115 &soc_mask);
1116 if (ret)
1117 return ret;
1118 smu_force_clk_levels(smu, PP_SCLK, 1 << sclk_mask);
1119 smu_force_clk_levels(smu, PP_MCLK, 1 << mclk_mask);
1120 break;
1121
1122 case AMD_DPM_FORCED_LEVEL_MANUAL:
1123 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
1124 default:
1125 break;
1126 }
1127
1128 if (!ret)
1129 smu_dpm_ctx->dpm_level = level;
1130 }
1131
1132 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1133 index = fls(smu->workload_mask);
1134 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1135 workload = smu->workload_setting[index];
1136
1137 if (smu->power_profile_mode != workload)
1138 smu_set_power_profile_mode(smu, &workload, 0);
1139 }
1140
1141 return ret;
1142}
1143
1144int smu_handle_task(struct smu_context *smu,
1145 enum amd_dpm_forced_level level,
1146 enum amd_pp_task task_id)
1147{
1148 int ret = 0;
1149
1150 switch (task_id) {
1151 case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
1152 ret = smu_pre_display_config_changed(smu);
1153 if (ret)
1154 return ret;
1155 ret = smu_set_cpu_power_state(smu);
1156 if (ret)
1157 return ret;
1158 ret = smu_adjust_power_state_dynamic(smu, level, false);
1159 break;
1160 case AMD_PP_TASK_COMPLETE_INIT:
1161 case AMD_PP_TASK_READJUST_POWER_STATE:
1162 ret = smu_adjust_power_state_dynamic(smu, level, true);
1163 break;
1164 default:
1165 break;
1166 }
1167
1168 return ret;
1169}
1170
1044const struct amd_ip_funcs smu_ip_funcs = { 1171const struct amd_ip_funcs smu_ip_funcs = {
1045 .name = "smu", 1172 .name = "smu",
1046 .early_init = smu_early_init, 1173 .early_init = smu_early_init,
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index 53ca9530ed1f..db050978020f 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -437,6 +437,18 @@ struct pptable_funcs {
437 int (*set_power_profile_mode)(struct smu_context *smu, long *input, uint32_t size); 437 int (*set_power_profile_mode)(struct smu_context *smu, long *input, uint32_t size);
438 enum amd_dpm_forced_level (*get_performance_level)(struct smu_context *smu); 438 enum amd_dpm_forced_level (*get_performance_level)(struct smu_context *smu);
439 int (*force_performance_level)(struct smu_context *smu, enum amd_dpm_forced_level level); 439 int (*force_performance_level)(struct smu_context *smu, enum amd_dpm_forced_level level);
440 int (*pre_display_config_changed)(struct smu_context *smu);
441 int (*display_config_changed)(struct smu_context *smu);
442 int (*apply_clocks_adjust_rules)(struct smu_context *smu);
443 int (*notify_smc_dispaly_config)(struct smu_context *smu);
444 int (*force_dpm_limit_value)(struct smu_context *smu, bool highest);
445 int (*upload_dpm_level)(struct smu_context *smu, bool max);
446 int (*get_profiling_clk_mask)(struct smu_context *smu,
447 enum amd_dpm_forced_level level,
448 uint32_t *sclk_mask,
449 uint32_t *mclk_mask,
450 uint32_t *soc_mask);
451 int (*set_cpu_power_state)(struct smu_context *smu);
440}; 452};
441 453
442struct smu_funcs 454struct smu_funcs
@@ -628,6 +640,22 @@ struct smu_funcs
628 ((smu)->ppt_funcs->get_performance_level ? (smu)->ppt_funcs->get_performance_level((smu)) : 0) 640 ((smu)->ppt_funcs->get_performance_level ? (smu)->ppt_funcs->get_performance_level((smu)) : 0)
629#define smu_force_performance_level(smu, level) \ 641#define smu_force_performance_level(smu, level) \
630 ((smu)->ppt_funcs->force_performance_level ? (smu)->ppt_funcs->force_performance_level((smu), (level)) : 0) 642 ((smu)->ppt_funcs->force_performance_level ? (smu)->ppt_funcs->force_performance_level((smu), (level)) : 0)
643#define smu_pre_display_config_changed(smu) \
644 ((smu)->ppt_funcs->pre_display_config_changed ? (smu)->ppt_funcs->pre_display_config_changed((smu)) : 0)
645#define smu_display_config_changed(smu) \
646 ((smu)->ppt_funcs->display_config_changed ? (smu)->ppt_funcs->display_config_changed((smu)) : 0)
647#define smu_apply_clocks_adjust_rules(smu) \
648 ((smu)->ppt_funcs->apply_clocks_adjust_rules ? (smu)->ppt_funcs->apply_clocks_adjust_rules((smu)) : 0)
649#define smu_notify_smc_dispaly_config(smu) \
650 ((smu)->ppt_funcs->notify_smc_dispaly_config ? (smu)->ppt_funcs->notify_smc_dispaly_config((smu)) : 0)
651#define smu_force_dpm_limit_value(smu, highest) \
652 ((smu)->ppt_funcs->force_dpm_limit_value ? (smu)->ppt_funcs->force_dpm_limit_value((smu), (highest)) : 0)
653#define smu_upload_dpm_level(smu, max) \
654 ((smu)->ppt_funcs->upload_dpm_level ? (smu)->ppt_funcs->upload_dpm_level((smu), (max)) : 0)
655#define smu_get_profiling_clk_mask(smu, level, sclk_mask, mclk_mask, soc_mask) \
656 ((smu)->ppt_funcs->get_profiling_clk_mask ? (smu)->ppt_funcs->get_profiling_clk_mask((smu), (level), (sclk_mask), (mclk_mask), (soc_mask)) : 0)
657#define smu_set_cpu_power_state(smu) \
658 ((smu)->ppt_funcs->set_cpu_power_state ? (smu)->ppt_funcs->set_cpu_power_state((smu)) : 0)
631 659
632#define smu_msg_get_index(smu, msg) \ 660#define smu_msg_get_index(smu, msg) \
633 ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_msg_index? (smu)->ppt_funcs->get_smu_msg_index((smu), (msg)) : -EINVAL) : -EINVAL) 661 ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_msg_index? (smu)->ppt_funcs->get_smu_msg_index((smu), (msg)) : -EINVAL) : -EINVAL)
@@ -699,4 +727,7 @@ extern int smu_display_configuration_change(struct smu_context *smu, const
699extern int smu_get_current_clocks(struct smu_context *smu, 727extern int smu_get_current_clocks(struct smu_context *smu,
700 struct amd_pp_clock_info *clocks); 728 struct amd_pp_clock_info *clocks);
701extern int smu_dpm_set_power_gate(struct smu_context *smu,uint32_t block_type, bool gate); 729extern int smu_dpm_set_power_gate(struct smu_context *smu,uint32_t block_type, bool gate);
730extern int smu_handle_task(struct smu_context *smu,
731 enum amd_dpm_forced_level level,
732 enum amd_pp_task task_id);
702#endif 733#endif
diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
index a90bf77dd9eb..300462aff83a 100644
--- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
@@ -877,71 +877,39 @@ static int vega20_print_clk_levels(struct smu_context *smu,
877 return size; 877 return size;
878} 878}
879 879
880static int vega20_upload_dpm_min_level(struct smu_context *smu) 880static int vega20_upload_dpm_level(struct smu_context *smu, bool max)
881{ 881{
882 struct vega20_dpm_table *dpm_table; 882 struct vega20_dpm_table *dpm_table;
883 struct vega20_single_dpm_table *single_dpm_table; 883 struct vega20_single_dpm_table *single_dpm_table;
884 uint32_t min_freq; 884 uint32_t freq;
885 int ret = 0; 885 int ret = 0;
886 886
887 dpm_table = smu->smu_dpm.dpm_context; 887 dpm_table = smu->smu_dpm.dpm_context;
888 888
889 if (smu_feature_is_enabled(smu, FEATURE_DPM_GFXCLK_BIT)) { 889 if (smu_feature_is_enabled(smu, FEATURE_DPM_GFXCLK_BIT)) {
890 single_dpm_table = &(dpm_table->gfx_table); 890 single_dpm_table = &(dpm_table->gfx_table);
891 min_freq = single_dpm_table->dpm_state.soft_min_level; 891 freq = max ? single_dpm_table->dpm_state.soft_max_level :
892 single_dpm_table->dpm_state.soft_min_level;
892 ret = smu_send_smc_msg_with_param(smu, 893 ret = smu_send_smc_msg_with_param(smu,
893 SMU_MSG_SetSoftMinByFreq, 894 (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
894 (PPCLK_GFXCLK << 16) | (min_freq & 0xffff)); 895 (PPCLK_GFXCLK << 16) | (freq & 0xffff));
895 if (ret) { 896 if (ret) {
896 pr_err("Failed to set soft min gfxclk !\n"); 897 pr_err("Failed to set soft %s gfxclk !\n",
898 max ? "max" : "min");
897 return ret; 899 return ret;
898 } 900 }
899 } 901 }
900 902
901 if (smu_feature_is_enabled(smu, FEATURE_DPM_UCLK_BIT)) { 903 if (smu_feature_is_enabled(smu, FEATURE_DPM_UCLK_BIT)) {
902 single_dpm_table = &(dpm_table->mem_table); 904 single_dpm_table = &(dpm_table->mem_table);
903 min_freq = single_dpm_table->dpm_state.soft_min_level; 905 freq = max ? single_dpm_table->dpm_state.soft_max_level :
906 single_dpm_table->dpm_state.soft_min_level;
904 ret = smu_send_smc_msg_with_param(smu, 907 ret = smu_send_smc_msg_with_param(smu,
905 SMU_MSG_SetSoftMinByFreq, 908 (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
906 (PPCLK_UCLK << 16) | (min_freq & 0xffff)); 909 (PPCLK_UCLK << 16) | (freq & 0xffff));
907 if (ret) { 910 if (ret) {
908 pr_err("Failed to set soft min memclk !\n"); 911 pr_err("Failed to set soft %s memclk !\n",
909 return ret; 912 max ? "max" : "min");
910 }
911 }
912
913 return ret;
914}
915
916static int vega20_upload_dpm_max_level(struct smu_context *smu)
917{
918 struct vega20_dpm_table *dpm_table;
919 struct vega20_single_dpm_table *single_dpm_table;
920 uint32_t max_freq;
921 int ret = 0;
922
923 dpm_table = smu->smu_dpm.dpm_context;
924
925 if (smu_feature_is_enabled(smu, FEATURE_DPM_GFXCLK_BIT)) {
926 single_dpm_table = &(dpm_table->gfx_table);
927 max_freq = single_dpm_table->dpm_state.soft_max_level;
928 ret = smu_send_smc_msg_with_param(smu,
929 SMU_MSG_SetSoftMaxByFreq,
930 (PPCLK_GFXCLK << 16) | (max_freq & 0xffff));
931 if (ret) {
932 pr_err("Failed to set soft max gfxclk !\n");
933 return ret;
934 }
935 }
936
937 if (smu_feature_is_enabled(smu, FEATURE_DPM_UCLK_BIT)) {
938 single_dpm_table = &(dpm_table->mem_table);
939 max_freq = single_dpm_table->dpm_state.soft_max_level;
940 ret = smu_send_smc_msg_with_param(smu,
941 SMU_MSG_SetSoftMaxByFreq,
942 (PPCLK_UCLK << 16) | (max_freq & 0xffff));
943 if (ret) {
944 pr_err("Failed to set soft max memclk !\n");
945 return ret; 913 return ret;
946 } 914 }
947 } 915 }
@@ -986,13 +954,13 @@ static int vega20_force_clk_levels(struct smu_context *smu,
986 single_dpm_table->dpm_state.soft_max_level = 954 single_dpm_table->dpm_state.soft_max_level =
987 single_dpm_table->dpm_levels[soft_max_level].value; 955 single_dpm_table->dpm_levels[soft_max_level].value;
988 956
989 ret = vega20_upload_dpm_min_level(smu); 957 ret = vega20_upload_dpm_level(smu, false);
990 if (ret) { 958 if (ret) {
991 pr_err("Failed to upload boot level to lowest!\n"); 959 pr_err("Failed to upload boot level to lowest!\n");
992 break; 960 break;
993 } 961 }
994 962
995 ret = vega20_upload_dpm_max_level(smu); 963 ret = vega20_upload_dpm_level(smu, true);
996 if (ret) 964 if (ret)
997 pr_err("Failed to upload dpm max level to highest!\n"); 965 pr_err("Failed to upload dpm max level to highest!\n");
998 966
@@ -1013,13 +981,13 @@ static int vega20_force_clk_levels(struct smu_context *smu,
1013 single_dpm_table->dpm_state.soft_max_level = 981 single_dpm_table->dpm_state.soft_max_level =
1014 single_dpm_table->dpm_levels[soft_max_level].value; 982 single_dpm_table->dpm_levels[soft_max_level].value;
1015 983
1016 ret = vega20_upload_dpm_min_level(smu); 984 ret = vega20_upload_dpm_level(smu, false);
1017 if (ret) { 985 if (ret) {
1018 pr_err("Failed to upload boot level to lowest!\n"); 986 pr_err("Failed to upload boot level to lowest!\n");
1019 break; 987 break;
1020 } 988 }
1021 989
1022 ret = vega20_upload_dpm_max_level(smu); 990 ret = vega20_upload_dpm_level(smu, true);
1023 if (ret) 991 if (ret)
1024 pr_err("Failed to upload dpm max level to highest!\n"); 992 pr_err("Failed to upload dpm max level to highest!\n");
1025 993
@@ -1389,11 +1357,26 @@ vega20_set_uclk_to_highest_dpm_level(struct smu_context *smu,
1389 return ret; 1357 return ret;
1390} 1358}
1391 1359
1392static int vega20_display_config_changed(struct smu_context *smu) 1360static int vega20_pre_display_config_changed(struct smu_context *smu)
1393{ 1361{
1394 int ret = 0; 1362 int ret = 0;
1395 struct vega20_dpm_table *dpm_table = smu->smu_dpm.dpm_context; 1363 struct vega20_dpm_table *dpm_table = smu->smu_dpm.dpm_context;
1396 1364
1365 if (!smu->smu_dpm.dpm_context)
1366 return -EINVAL;
1367
1368 smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0);
1369 ret = vega20_set_uclk_to_highest_dpm_level(smu,
1370 &dpm_table->mem_table);
1371 if (ret)
1372 pr_err("Failed to set uclk to highest dpm level");
1373 return ret;
1374}
1375
1376static int vega20_display_config_changed(struct smu_context *smu)
1377{
1378 int ret = 0;
1379
1397 if (!smu->funcs) 1380 if (!smu->funcs)
1398 return -EINVAL; 1381 return -EINVAL;
1399 1382
@@ -1402,14 +1385,6 @@ static int vega20_display_config_changed(struct smu_context *smu)
1402 !smu->smu_table.tables[TABLE_WATERMARKS].cpu_addr) 1385 !smu->smu_table.tables[TABLE_WATERMARKS].cpu_addr)
1403 return -EINVAL; 1386 return -EINVAL;
1404 1387
1405 smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0);
1406 ret = vega20_set_uclk_to_highest_dpm_level(smu,
1407 &dpm_table->mem_table);
1408 if (ret) {
1409 pr_err("Failed to set uclk to highest dpm level");
1410 return ret;
1411 }
1412
1413 if ((smu->watermarks_bitmap & WATERMARKS_EXIST) && 1388 if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
1414 !(smu->watermarks_bitmap & WATERMARKS_LOADED)) { 1389 !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
1415 ret = smu->funcs->write_watermarks_table(smu); 1390 ret = smu->funcs->write_watermarks_table(smu);
@@ -1672,85 +1647,42 @@ static uint32_t vega20_find_highest_dpm_level(struct vega20_single_dpm_table *ta
1672 return i; 1647 return i;
1673} 1648}
1674 1649
1675static int vega20_force_dpm_highest(struct smu_context *smu) 1650static int vega20_force_dpm_limit_value(struct smu_context *smu, bool highest)
1676{ 1651{
1677 uint32_t soft_level; 1652 uint32_t soft_level;
1678 int ret = 0; 1653 int ret = 0;
1679 struct vega20_dpm_table *dpm_table = (struct vega20_dpm_table *)smu->smu_dpm.dpm_context; 1654 struct vega20_dpm_table *dpm_table =
1655 (struct vega20_dpm_table *)smu->smu_dpm.dpm_context;
1680 1656
1681 soft_level = vega20_find_highest_dpm_level(&(dpm_table->gfx_table)); 1657 if (highest)
1658 soft_level = vega20_find_highest_dpm_level(&(dpm_table->gfx_table));
1659 else
1660 soft_level = vega20_find_lowest_dpm_level(&(dpm_table->gfx_table));
1682 1661
1683 dpm_table->gfx_table.dpm_state.soft_min_level = 1662 dpm_table->gfx_table.dpm_state.soft_min_level =
1684 dpm_table->gfx_table.dpm_state.soft_max_level = 1663 dpm_table->gfx_table.dpm_state.soft_max_level =
1685 dpm_table->gfx_table.dpm_levels[soft_level].value; 1664 dpm_table->gfx_table.dpm_levels[soft_level].value;
1686 1665
1687 soft_level = vega20_find_highest_dpm_level(&(dpm_table->mem_table)); 1666 if (highest)
1667 soft_level = vega20_find_highest_dpm_level(&(dpm_table->mem_table));
1668 else
1669 soft_level = vega20_find_lowest_dpm_level(&(dpm_table->mem_table));
1688 1670
1689 dpm_table->mem_table.dpm_state.soft_min_level = 1671 dpm_table->mem_table.dpm_state.soft_min_level =
1690 dpm_table->mem_table.dpm_state.soft_max_level = 1672 dpm_table->mem_table.dpm_state.soft_max_level =
1691 dpm_table->mem_table.dpm_levels[soft_level].value; 1673 dpm_table->mem_table.dpm_levels[soft_level].value;
1692 1674
1693 ret = vega20_upload_dpm_min_level(smu); 1675 ret = vega20_upload_dpm_level(smu, false);
1694 if (ret) { 1676 if (ret) {
1695 pr_err("Failed to upload boot level to highest!"); 1677 pr_err("Failed to upload boot level to %s!\n",
1678 highest ? "highest" : "lowest");
1696 return ret; 1679 return ret;
1697 } 1680 }
1698 1681
1699 ret = vega20_upload_dpm_max_level(smu); 1682 ret = vega20_upload_dpm_level(smu, true);
1700 if (ret) { 1683 if (ret) {
1701 pr_err("Failed to upload dpm max level to highest!"); 1684 pr_err("Failed to upload dpm max level to %s!\n!",
1702 return ret; 1685 highest ? "highest" : "lowest");
1703 }
1704
1705 return ret;
1706}
1707
1708static int vega20_force_dpm_lowest(struct smu_context *smu)
1709{
1710 uint32_t soft_level;
1711 int ret = 0;
1712 struct vega20_dpm_table *dpm_table = (struct vega20_dpm_table *)smu->smu_dpm.dpm_context;
1713
1714 soft_level = vega20_find_lowest_dpm_level(&(dpm_table->gfx_table));
1715
1716 dpm_table->gfx_table.dpm_state.soft_min_level =
1717 dpm_table->gfx_table.dpm_state.soft_max_level =
1718 dpm_table->gfx_table.dpm_levels[soft_level].value;
1719
1720 soft_level = vega20_find_lowest_dpm_level(&(dpm_table->mem_table));
1721
1722 dpm_table->mem_table.dpm_state.soft_min_level =
1723 dpm_table->mem_table.dpm_state.soft_max_level =
1724 dpm_table->mem_table.dpm_levels[soft_level].value;
1725
1726 ret = vega20_upload_dpm_min_level(smu);
1727 if (ret) {
1728 pr_err("Failed to upload boot level to lowest!");
1729 return ret;
1730 }
1731
1732 ret = vega20_upload_dpm_max_level(smu);
1733 if (ret) {
1734 pr_err("Failed to upload dpm max level to lowest!");
1735 return ret;
1736 }
1737
1738 return ret;
1739}
1740
1741static int vega20_unforce_dpm_levels(struct smu_context *smu)
1742{
1743 int ret = 0;
1744
1745 ret = vega20_upload_dpm_min_level(smu);
1746 if (ret) {
1747 pr_err("Failed to upload DPM Bootup Levels!");
1748 return ret;
1749 }
1750
1751 ret = vega20_upload_dpm_max_level(smu);
1752 if (ret) {
1753 pr_err("Failed to upload DPM Max Levels!");
1754 return ret; 1686 return ret;
1755 } 1687 }
1756 1688
@@ -1771,78 +1703,6 @@ static enum amd_dpm_forced_level vega20_get_performance_level(struct smu_context
1771 return smu_dpm_ctx->dpm_level; 1703 return smu_dpm_ctx->dpm_level;
1772} 1704}
1773 1705
1774static int vega20_adjust_power_state_dynamic(struct smu_context *smu,
1775 enum amd_dpm_forced_level level)
1776{
1777 int ret = 0;
1778 int index = 0;
1779 uint32_t sclk_mask, mclk_mask, soc_mask;
1780 long workload;
1781 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1782
1783 ret = vega20_display_config_changed(smu);
1784 if (ret) {
1785 pr_err("Failed to change display config!");
1786 return ret;
1787 }
1788 ret = vega20_apply_clocks_adjust_rules(smu);
1789 if (ret) {
1790 pr_err("Failed to apply clocks adjust rules!");
1791 return ret;
1792 }
1793 ret = vega20_notify_smc_dispaly_config(smu);
1794 if (ret) {
1795 pr_err("Failed to notify smc display config!");
1796 return ret;
1797 }
1798
1799 switch (level) {
1800 case AMD_DPM_FORCED_LEVEL_HIGH:
1801 ret = vega20_force_dpm_highest(smu);
1802 break;
1803 case AMD_DPM_FORCED_LEVEL_LOW:
1804 ret = vega20_force_dpm_lowest(smu);
1805 break;
1806
1807 case AMD_DPM_FORCED_LEVEL_AUTO:
1808 ret = vega20_unforce_dpm_levels(smu);
1809 break;
1810
1811 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
1812 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
1813 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
1814 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
1815 ret = vega20_get_profiling_clk_mask(smu, level,
1816 &sclk_mask,
1817 &mclk_mask,
1818 &soc_mask);
1819 if (ret)
1820 return ret;
1821 vega20_force_clk_levels(smu, PP_SCLK, 1 << sclk_mask);
1822 vega20_force_clk_levels(smu, PP_MCLK, 1 << mclk_mask);
1823 break;
1824
1825 case AMD_DPM_FORCED_LEVEL_MANUAL:
1826 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
1827 default:
1828 break;
1829 }
1830
1831 if (!ret)
1832 smu_dpm_ctx->dpm_level = level;
1833
1834 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1835 index = fls(smu->workload_mask);
1836 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1837 workload = smu->workload_setting[index];
1838
1839 if (smu->power_profile_mode != workload)
1840 smu->funcs->set_power_profile_mode(smu, &workload, 0);
1841 }
1842
1843 return ret;
1844}
1845
1846static int 1706static int
1847vega20_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level) 1707vega20_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
1848{ 1708{
@@ -1861,7 +1721,8 @@ vega20_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_leve
1861 mutex_lock(&smu->mutex); 1721 mutex_lock(&smu->mutex);
1862 1722
1863 smu->adev->ip_blocks[i].version->funcs->enable_umd_pstate(smu, &level); 1723 smu->adev->ip_blocks[i].version->funcs->enable_umd_pstate(smu, &level);
1864 ret = vega20_adjust_power_state_dynamic(smu, level); 1724 ret = smu_handle_task(smu, level,
1725 AMD_PP_TASK_READJUST_POWER_STATE);
1865 1726
1866 mutex_unlock(&smu->mutex); 1727 mutex_unlock(&smu->mutex);
1867 1728
@@ -2009,7 +1870,8 @@ static int vega20_set_od_percentage(struct smu_context *smu,
2009 single_dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100; 1870 single_dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100;
2010 } 1871 }
2011 1872
2012 ret = vega20_adjust_power_state_dynamic(smu, smu_dpm->dpm_level); 1873 ret = smu_handle_task(smu, smu_dpm->dpm_level,
1874 AMD_PP_TASK_READJUST_POWER_STATE);
2013 1875
2014set_od_failed: 1876set_od_failed:
2015 mutex_unlock(&(smu->mutex)); 1877 mutex_unlock(&(smu->mutex));
@@ -2239,7 +2101,8 @@ static int vega20_odn_edit_dpm_table(struct smu_context *smu,
2239 2101
2240 if (type == PP_OD_COMMIT_DPM_TABLE) { 2102 if (type == PP_OD_COMMIT_DPM_TABLE) {
2241 mutex_lock(&(smu->mutex)); 2103 mutex_lock(&(smu->mutex));
2242 ret = vega20_adjust_power_state_dynamic(smu, smu_dpm->dpm_level); 2104 ret = smu_handle_task(smu, smu_dpm->dpm_level,
2105 AMD_PP_TASK_READJUST_POWER_STATE);
2243 mutex_unlock(&(smu->mutex)); 2106 mutex_unlock(&(smu->mutex));
2244 } 2107 }
2245 2108
@@ -2268,6 +2131,13 @@ static const struct pptable_funcs vega20_ppt_funcs = {
2268 .update_specified_od8_value = vega20_update_specified_od8_value, 2131 .update_specified_od8_value = vega20_update_specified_od8_value,
2269 .set_od_percentage = vega20_set_od_percentage, 2132 .set_od_percentage = vega20_set_od_percentage,
2270 .od_edit_dpm_table = vega20_odn_edit_dpm_table, 2133 .od_edit_dpm_table = vega20_odn_edit_dpm_table,
2134 .pre_display_config_changed = vega20_pre_display_config_changed,
2135 .display_config_changed = vega20_display_config_changed,
2136 .apply_clocks_adjust_rules = vega20_apply_clocks_adjust_rules,
2137 .notify_smc_dispaly_config = vega20_notify_smc_dispaly_config,
2138 .force_dpm_limit_value = vega20_force_dpm_limit_value,
2139 .upload_dpm_level = vega20_upload_dpm_level,
2140 .get_profiling_clk_mask = vega20_get_profiling_clk_mask,
2271}; 2141};
2272 2142
2273void vega20_set_ppt_funcs(struct smu_context *smu) 2143void vega20_set_ppt_funcs(struct smu_context *smu)