diff options
Diffstat (limited to 'drivers/gpu/drm/radeon/ci_dpm.c')
-rw-r--r-- | drivers/gpu/drm/radeon/ci_dpm.c | 752 |
1 files changed, 670 insertions, 82 deletions
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c index 11a55e9dad7f..f373a81ba3d5 100644 --- a/drivers/gpu/drm/radeon/ci_dpm.c +++ b/drivers/gpu/drm/radeon/ci_dpm.c | |||
@@ -46,15 +46,15 @@ | |||
46 | static const struct ci_pt_defaults defaults_hawaii_xt = | 46 | static const struct ci_pt_defaults defaults_hawaii_xt = |
47 | { | 47 | { |
48 | 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000, | 48 | 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000, |
49 | { 0x84, 0x0, 0x0, 0x7F, 0x0, 0x0, 0x5A, 0x60, 0x51, 0x8E, 0x79, 0x6B, 0x5F, 0x90, 0x79 }, | 49 | { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 }, |
50 | { 0x1EA, 0x1EA, 0x1EA, 0x224, 0x224, 0x224, 0x24F, 0x24F, 0x24F, 0x28E, 0x28E, 0x28E, 0x2BC, 0x2BC, 0x2BC } | 50 | { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 } |
51 | }; | 51 | }; |
52 | 52 | ||
53 | static const struct ci_pt_defaults defaults_hawaii_pro = | 53 | static const struct ci_pt_defaults defaults_hawaii_pro = |
54 | { | 54 | { |
55 | 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062, | 55 | 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062, |
56 | { 0x93, 0x0, 0x0, 0x97, 0x0, 0x0, 0x6B, 0x60, 0x51, 0x95, 0x79, 0x6B, 0x5F, 0x90, 0x79 }, | 56 | { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 }, |
57 | { 0x1EA, 0x1EA, 0x1EA, 0x224, 0x224, 0x224, 0x24F, 0x24F, 0x24F, 0x28E, 0x28E, 0x28E, 0x2BC, 0x2BC, 0x2BC } | 57 | { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 } |
58 | }; | 58 | }; |
59 | 59 | ||
60 | static const struct ci_pt_defaults defaults_bonaire_xt = | 60 | static const struct ci_pt_defaults defaults_bonaire_xt = |
@@ -184,6 +184,9 @@ static int ci_set_overdrive_target_tdp(struct radeon_device *rdev, | |||
184 | u32 target_tdp); | 184 | u32 target_tdp); |
185 | static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate); | 185 | static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate); |
186 | 186 | ||
187 | static PPSMC_Result ci_send_msg_to_smc_with_parameter(struct radeon_device *rdev, | ||
188 | PPSMC_Msg msg, u32 parameter); | ||
189 | |||
187 | static struct ci_power_info *ci_get_pi(struct radeon_device *rdev) | 190 | static struct ci_power_info *ci_get_pi(struct radeon_device *rdev) |
188 | { | 191 | { |
189 | struct ci_power_info *pi = rdev->pm.dpm.priv; | 192 | struct ci_power_info *pi = rdev->pm.dpm.priv; |
@@ -249,7 +252,10 @@ static void ci_initialize_powertune_defaults(struct radeon_device *rdev) | |||
249 | 252 | ||
250 | if (pi->caps_power_containment) { | 253 | if (pi->caps_power_containment) { |
251 | pi->caps_cac = true; | 254 | pi->caps_cac = true; |
252 | pi->enable_bapm_feature = true; | 255 | if (rdev->family == CHIP_HAWAII) |
256 | pi->enable_bapm_feature = false; | ||
257 | else | ||
258 | pi->enable_bapm_feature = true; | ||
253 | pi->enable_tdc_limit_feature = true; | 259 | pi->enable_tdc_limit_feature = true; |
254 | pi->enable_pkg_pwr_tracking_feature = true; | 260 | pi->enable_pkg_pwr_tracking_feature = true; |
255 | } | 261 | } |
@@ -352,6 +358,21 @@ static int ci_populate_dw8(struct radeon_device *rdev) | |||
352 | return 0; | 358 | return 0; |
353 | } | 359 | } |
354 | 360 | ||
361 | static int ci_populate_fuzzy_fan(struct radeon_device *rdev) | ||
362 | { | ||
363 | struct ci_power_info *pi = ci_get_pi(rdev); | ||
364 | |||
365 | if ((rdev->pm.dpm.fan.fan_output_sensitivity & (1 << 15)) || | ||
366 | (rdev->pm.dpm.fan.fan_output_sensitivity == 0)) | ||
367 | rdev->pm.dpm.fan.fan_output_sensitivity = | ||
368 | rdev->pm.dpm.fan.default_fan_output_sensitivity; | ||
369 | |||
370 | pi->smc_powertune_table.FuzzyFan_PwmSetDelta = | ||
371 | cpu_to_be16(rdev->pm.dpm.fan.fan_output_sensitivity); | ||
372 | |||
373 | return 0; | ||
374 | } | ||
375 | |||
355 | static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct radeon_device *rdev) | 376 | static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct radeon_device *rdev) |
356 | { | 377 | { |
357 | struct ci_power_info *pi = ci_get_pi(rdev); | 378 | struct ci_power_info *pi = ci_get_pi(rdev); |
@@ -477,6 +498,9 @@ static int ci_populate_pm_base(struct radeon_device *rdev) | |||
477 | ret = ci_populate_dw8(rdev); | 498 | ret = ci_populate_dw8(rdev); |
478 | if (ret) | 499 | if (ret) |
479 | return ret; | 500 | return ret; |
501 | ret = ci_populate_fuzzy_fan(rdev); | ||
502 | if (ret) | ||
503 | return ret; | ||
480 | ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(rdev); | 504 | ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(rdev); |
481 | if (ret) | 505 | if (ret) |
482 | return ret; | 506 | return ret; |
@@ -690,6 +714,25 @@ static int ci_enable_smc_cac(struct radeon_device *rdev, bool enable) | |||
690 | return ret; | 714 | return ret; |
691 | } | 715 | } |
692 | 716 | ||
717 | static int ci_enable_thermal_based_sclk_dpm(struct radeon_device *rdev, | ||
718 | bool enable) | ||
719 | { | ||
720 | struct ci_power_info *pi = ci_get_pi(rdev); | ||
721 | PPSMC_Result smc_result = PPSMC_Result_OK; | ||
722 | |||
723 | if (pi->thermal_sclk_dpm_enabled) { | ||
724 | if (enable) | ||
725 | smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_ENABLE_THERMAL_DPM); | ||
726 | else | ||
727 | smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DISABLE_THERMAL_DPM); | ||
728 | } | ||
729 | |||
730 | if (smc_result == PPSMC_Result_OK) | ||
731 | return 0; | ||
732 | else | ||
733 | return -EINVAL; | ||
734 | } | ||
735 | |||
693 | static int ci_power_control_set_level(struct radeon_device *rdev) | 736 | static int ci_power_control_set_level(struct radeon_device *rdev) |
694 | { | 737 | { |
695 | struct ci_power_info *pi = ci_get_pi(rdev); | 738 | struct ci_power_info *pi = ci_get_pi(rdev); |
@@ -700,13 +743,11 @@ static int ci_power_control_set_level(struct radeon_device *rdev) | |||
700 | int ret = 0; | 743 | int ret = 0; |
701 | bool adjust_polarity = false; /* ??? */ | 744 | bool adjust_polarity = false; /* ??? */ |
702 | 745 | ||
703 | if (pi->caps_power_containment && | 746 | if (pi->caps_power_containment) { |
704 | (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)) { | ||
705 | adjust_percent = adjust_polarity ? | 747 | adjust_percent = adjust_polarity ? |
706 | rdev->pm.dpm.tdp_adjustment : (-1 * rdev->pm.dpm.tdp_adjustment); | 748 | rdev->pm.dpm.tdp_adjustment : (-1 * rdev->pm.dpm.tdp_adjustment); |
707 | target_tdp = ((100 + adjust_percent) * | 749 | target_tdp = ((100 + adjust_percent) * |
708 | (s32)cac_tdp_table->configurable_tdp) / 100; | 750 | (s32)cac_tdp_table->configurable_tdp) / 100; |
709 | target_tdp *= 256; | ||
710 | 751 | ||
711 | ret = ci_set_overdrive_target_tdp(rdev, (u32)target_tdp); | 752 | ret = ci_set_overdrive_target_tdp(rdev, (u32)target_tdp); |
712 | } | 753 | } |
@@ -814,7 +855,7 @@ static void ci_apply_state_adjust_rules(struct radeon_device *rdev, | |||
814 | } | 855 | } |
815 | } | 856 | } |
816 | 857 | ||
817 | static int ci_set_thermal_temperature_range(struct radeon_device *rdev, | 858 | static int ci_thermal_set_temperature_range(struct radeon_device *rdev, |
818 | int min_temp, int max_temp) | 859 | int min_temp, int max_temp) |
819 | { | 860 | { |
820 | int low_temp = 0 * 1000; | 861 | int low_temp = 0 * 1000; |
@@ -850,6 +891,350 @@ static int ci_set_thermal_temperature_range(struct radeon_device *rdev, | |||
850 | return 0; | 891 | return 0; |
851 | } | 892 | } |
852 | 893 | ||
894 | static int ci_thermal_enable_alert(struct radeon_device *rdev, | ||
895 | bool enable) | ||
896 | { | ||
897 | u32 thermal_int = RREG32_SMC(CG_THERMAL_INT); | ||
898 | PPSMC_Result result; | ||
899 | |||
900 | if (enable) { | ||
901 | thermal_int &= ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW); | ||
902 | WREG32_SMC(CG_THERMAL_INT, thermal_int); | ||
903 | rdev->irq.dpm_thermal = false; | ||
904 | result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Thermal_Cntl_Enable); | ||
905 | if (result != PPSMC_Result_OK) { | ||
906 | DRM_DEBUG_KMS("Could not enable thermal interrupts.\n"); | ||
907 | return -EINVAL; | ||
908 | } | ||
909 | } else { | ||
910 | thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW; | ||
911 | WREG32_SMC(CG_THERMAL_INT, thermal_int); | ||
912 | rdev->irq.dpm_thermal = true; | ||
913 | result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Thermal_Cntl_Disable); | ||
914 | if (result != PPSMC_Result_OK) { | ||
915 | DRM_DEBUG_KMS("Could not disable thermal interrupts.\n"); | ||
916 | return -EINVAL; | ||
917 | } | ||
918 | } | ||
919 | |||
920 | return 0; | ||
921 | } | ||
922 | |||
923 | static void ci_fan_ctrl_set_static_mode(struct radeon_device *rdev, u32 mode) | ||
924 | { | ||
925 | struct ci_power_info *pi = ci_get_pi(rdev); | ||
926 | u32 tmp; | ||
927 | |||
928 | if (pi->fan_ctrl_is_in_default_mode) { | ||
929 | tmp = (RREG32_SMC(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK) >> FDO_PWM_MODE_SHIFT; | ||
930 | pi->fan_ctrl_default_mode = tmp; | ||
931 | tmp = (RREG32_SMC(CG_FDO_CTRL2) & TMIN_MASK) >> TMIN_SHIFT; | ||
932 | pi->t_min = tmp; | ||
933 | pi->fan_ctrl_is_in_default_mode = false; | ||
934 | } | ||
935 | |||
936 | tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TMIN_MASK; | ||
937 | tmp |= TMIN(0); | ||
938 | WREG32_SMC(CG_FDO_CTRL2, tmp); | ||
939 | |||
940 | tmp = RREG32_SMC(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK; | ||
941 | tmp |= FDO_PWM_MODE(mode); | ||
942 | WREG32_SMC(CG_FDO_CTRL2, tmp); | ||
943 | } | ||
944 | |||
945 | static int ci_thermal_setup_fan_table(struct radeon_device *rdev) | ||
946 | { | ||
947 | struct ci_power_info *pi = ci_get_pi(rdev); | ||
948 | SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE }; | ||
949 | u32 duty100; | ||
950 | u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2; | ||
951 | u16 fdo_min, slope1, slope2; | ||
952 | u32 reference_clock, tmp; | ||
953 | int ret; | ||
954 | u64 tmp64; | ||
955 | |||
956 | if (!pi->fan_table_start) { | ||
957 | rdev->pm.dpm.fan.ucode_fan_control = false; | ||
958 | return 0; | ||
959 | } | ||
960 | |||
961 | duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT; | ||
962 | |||
963 | if (duty100 == 0) { | ||
964 | rdev->pm.dpm.fan.ucode_fan_control = false; | ||
965 | return 0; | ||
966 | } | ||
967 | |||
968 | tmp64 = (u64)rdev->pm.dpm.fan.pwm_min * duty100; | ||
969 | do_div(tmp64, 10000); | ||
970 | fdo_min = (u16)tmp64; | ||
971 | |||
972 | t_diff1 = rdev->pm.dpm.fan.t_med - rdev->pm.dpm.fan.t_min; | ||
973 | t_diff2 = rdev->pm.dpm.fan.t_high - rdev->pm.dpm.fan.t_med; | ||
974 | |||
975 | pwm_diff1 = rdev->pm.dpm.fan.pwm_med - rdev->pm.dpm.fan.pwm_min; | ||
976 | pwm_diff2 = rdev->pm.dpm.fan.pwm_high - rdev->pm.dpm.fan.pwm_med; | ||
977 | |||
978 | slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100); | ||
979 | slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100); | ||
980 | |||
981 | fan_table.TempMin = cpu_to_be16((50 + rdev->pm.dpm.fan.t_min) / 100); | ||
982 | fan_table.TempMed = cpu_to_be16((50 + rdev->pm.dpm.fan.t_med) / 100); | ||
983 | fan_table.TempMax = cpu_to_be16((50 + rdev->pm.dpm.fan.t_max) / 100); | ||
984 | |||
985 | fan_table.Slope1 = cpu_to_be16(slope1); | ||
986 | fan_table.Slope2 = cpu_to_be16(slope2); | ||
987 | |||
988 | fan_table.FdoMin = cpu_to_be16(fdo_min); | ||
989 | |||
990 | fan_table.HystDown = cpu_to_be16(rdev->pm.dpm.fan.t_hyst); | ||
991 | |||
992 | fan_table.HystUp = cpu_to_be16(1); | ||
993 | |||
994 | fan_table.HystSlope = cpu_to_be16(1); | ||
995 | |||
996 | fan_table.TempRespLim = cpu_to_be16(5); | ||
997 | |||
998 | reference_clock = radeon_get_xclk(rdev); | ||
999 | |||
1000 | fan_table.RefreshPeriod = cpu_to_be32((rdev->pm.dpm.fan.cycle_delay * | ||
1001 | reference_clock) / 1600); | ||
1002 | |||
1003 | fan_table.FdoMax = cpu_to_be16((u16)duty100); | ||
1004 | |||
1005 | tmp = (RREG32_SMC(CG_MULT_THERMAL_CTRL) & TEMP_SEL_MASK) >> TEMP_SEL_SHIFT; | ||
1006 | fan_table.TempSrc = (uint8_t)tmp; | ||
1007 | |||
1008 | ret = ci_copy_bytes_to_smc(rdev, | ||
1009 | pi->fan_table_start, | ||
1010 | (u8 *)(&fan_table), | ||
1011 | sizeof(fan_table), | ||
1012 | pi->sram_end); | ||
1013 | |||
1014 | if (ret) { | ||
1015 | DRM_ERROR("Failed to load fan table to the SMC."); | ||
1016 | rdev->pm.dpm.fan.ucode_fan_control = false; | ||
1017 | } | ||
1018 | |||
1019 | return 0; | ||
1020 | } | ||
1021 | |||
1022 | static int ci_fan_ctrl_start_smc_fan_control(struct radeon_device *rdev) | ||
1023 | { | ||
1024 | struct ci_power_info *pi = ci_get_pi(rdev); | ||
1025 | PPSMC_Result ret; | ||
1026 | |||
1027 | if (pi->caps_od_fuzzy_fan_control_support) { | ||
1028 | ret = ci_send_msg_to_smc_with_parameter(rdev, | ||
1029 | PPSMC_StartFanControl, | ||
1030 | FAN_CONTROL_FUZZY); | ||
1031 | if (ret != PPSMC_Result_OK) | ||
1032 | return -EINVAL; | ||
1033 | ret = ci_send_msg_to_smc_with_parameter(rdev, | ||
1034 | PPSMC_MSG_SetFanPwmMax, | ||
1035 | rdev->pm.dpm.fan.default_max_fan_pwm); | ||
1036 | if (ret != PPSMC_Result_OK) | ||
1037 | return -EINVAL; | ||
1038 | } else { | ||
1039 | ret = ci_send_msg_to_smc_with_parameter(rdev, | ||
1040 | PPSMC_StartFanControl, | ||
1041 | FAN_CONTROL_TABLE); | ||
1042 | if (ret != PPSMC_Result_OK) | ||
1043 | return -EINVAL; | ||
1044 | } | ||
1045 | |||
1046 | return 0; | ||
1047 | } | ||
1048 | |||
1049 | #if 0 | ||
1050 | static int ci_fan_ctrl_stop_smc_fan_control(struct radeon_device *rdev) | ||
1051 | { | ||
1052 | PPSMC_Result ret; | ||
1053 | |||
1054 | ret = ci_send_msg_to_smc(rdev, PPSMC_StopFanControl); | ||
1055 | if (ret == PPSMC_Result_OK) | ||
1056 | return 0; | ||
1057 | else | ||
1058 | return -EINVAL; | ||
1059 | } | ||
1060 | |||
1061 | static int ci_fan_ctrl_get_fan_speed_percent(struct radeon_device *rdev, | ||
1062 | u32 *speed) | ||
1063 | { | ||
1064 | u32 duty, duty100; | ||
1065 | u64 tmp64; | ||
1066 | |||
1067 | if (rdev->pm.no_fan) | ||
1068 | return -ENOENT; | ||
1069 | |||
1070 | duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT; | ||
1071 | duty = (RREG32_SMC(CG_THERMAL_STATUS) & FDO_PWM_DUTY_MASK) >> FDO_PWM_DUTY_SHIFT; | ||
1072 | |||
1073 | if (duty100 == 0) | ||
1074 | return -EINVAL; | ||
1075 | |||
1076 | tmp64 = (u64)duty * 100; | ||
1077 | do_div(tmp64, duty100); | ||
1078 | *speed = (u32)tmp64; | ||
1079 | |||
1080 | if (*speed > 100) | ||
1081 | *speed = 100; | ||
1082 | |||
1083 | return 0; | ||
1084 | } | ||
1085 | |||
1086 | static int ci_fan_ctrl_set_fan_speed_percent(struct radeon_device *rdev, | ||
1087 | u32 speed) | ||
1088 | { | ||
1089 | u32 tmp; | ||
1090 | u32 duty, duty100; | ||
1091 | u64 tmp64; | ||
1092 | |||
1093 | if (rdev->pm.no_fan) | ||
1094 | return -ENOENT; | ||
1095 | |||
1096 | if (speed > 100) | ||
1097 | return -EINVAL; | ||
1098 | |||
1099 | if (rdev->pm.dpm.fan.ucode_fan_control) | ||
1100 | ci_fan_ctrl_stop_smc_fan_control(rdev); | ||
1101 | |||
1102 | duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT; | ||
1103 | |||
1104 | if (duty100 == 0) | ||
1105 | return -EINVAL; | ||
1106 | |||
1107 | tmp64 = (u64)speed * duty100; | ||
1108 | do_div(tmp64, 100); | ||
1109 | duty = (u32)tmp64; | ||
1110 | |||
1111 | tmp = RREG32_SMC(CG_FDO_CTRL0) & ~FDO_STATIC_DUTY_MASK; | ||
1112 | tmp |= FDO_STATIC_DUTY(duty); | ||
1113 | WREG32_SMC(CG_FDO_CTRL0, tmp); | ||
1114 | |||
1115 | ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC); | ||
1116 | |||
1117 | return 0; | ||
1118 | } | ||
1119 | |||
1120 | static int ci_fan_ctrl_get_fan_speed_rpm(struct radeon_device *rdev, | ||
1121 | u32 *speed) | ||
1122 | { | ||
1123 | u32 tach_period; | ||
1124 | u32 xclk = radeon_get_xclk(rdev); | ||
1125 | |||
1126 | if (rdev->pm.no_fan) | ||
1127 | return -ENOENT; | ||
1128 | |||
1129 | if (rdev->pm.fan_pulses_per_revolution == 0) | ||
1130 | return -ENOENT; | ||
1131 | |||
1132 | tach_period = (RREG32_SMC(CG_TACH_STATUS) & TACH_PERIOD_MASK) >> TACH_PERIOD_SHIFT; | ||
1133 | if (tach_period == 0) | ||
1134 | return -ENOENT; | ||
1135 | |||
1136 | *speed = 60 * xclk * 10000 / tach_period; | ||
1137 | |||
1138 | return 0; | ||
1139 | } | ||
1140 | |||
1141 | static int ci_fan_ctrl_set_fan_speed_rpm(struct radeon_device *rdev, | ||
1142 | u32 speed) | ||
1143 | { | ||
1144 | u32 tach_period, tmp; | ||
1145 | u32 xclk = radeon_get_xclk(rdev); | ||
1146 | |||
1147 | if (rdev->pm.no_fan) | ||
1148 | return -ENOENT; | ||
1149 | |||
1150 | if (rdev->pm.fan_pulses_per_revolution == 0) | ||
1151 | return -ENOENT; | ||
1152 | |||
1153 | if ((speed < rdev->pm.fan_min_rpm) || | ||
1154 | (speed > rdev->pm.fan_max_rpm)) | ||
1155 | return -EINVAL; | ||
1156 | |||
1157 | if (rdev->pm.dpm.fan.ucode_fan_control) | ||
1158 | ci_fan_ctrl_stop_smc_fan_control(rdev); | ||
1159 | |||
1160 | tach_period = 60 * xclk * 10000 / (8 * speed); | ||
1161 | tmp = RREG32_SMC(CG_TACH_CTRL) & ~TARGET_PERIOD_MASK; | ||
1162 | tmp |= TARGET_PERIOD(tach_period); | ||
1163 | WREG32_SMC(CG_TACH_CTRL, tmp); | ||
1164 | |||
1165 | ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC_RPM); | ||
1166 | |||
1167 | return 0; | ||
1168 | } | ||
1169 | #endif | ||
1170 | |||
1171 | static void ci_fan_ctrl_set_default_mode(struct radeon_device *rdev) | ||
1172 | { | ||
1173 | struct ci_power_info *pi = ci_get_pi(rdev); | ||
1174 | u32 tmp; | ||
1175 | |||
1176 | if (!pi->fan_ctrl_is_in_default_mode) { | ||
1177 | tmp = RREG32_SMC(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK; | ||
1178 | tmp |= FDO_PWM_MODE(pi->fan_ctrl_default_mode); | ||
1179 | WREG32_SMC(CG_FDO_CTRL2, tmp); | ||
1180 | |||
1181 | tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TMIN_MASK; | ||
1182 | tmp |= TMIN(pi->t_min); | ||
1183 | WREG32_SMC(CG_FDO_CTRL2, tmp); | ||
1184 | pi->fan_ctrl_is_in_default_mode = true; | ||
1185 | } | ||
1186 | } | ||
1187 | |||
1188 | static void ci_thermal_start_smc_fan_control(struct radeon_device *rdev) | ||
1189 | { | ||
1190 | if (rdev->pm.dpm.fan.ucode_fan_control) { | ||
1191 | ci_fan_ctrl_start_smc_fan_control(rdev); | ||
1192 | ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC); | ||
1193 | } | ||
1194 | } | ||
1195 | |||
1196 | static void ci_thermal_initialize(struct radeon_device *rdev) | ||
1197 | { | ||
1198 | u32 tmp; | ||
1199 | |||
1200 | if (rdev->pm.fan_pulses_per_revolution) { | ||
1201 | tmp = RREG32_SMC(CG_TACH_CTRL) & ~EDGE_PER_REV_MASK; | ||
1202 | tmp |= EDGE_PER_REV(rdev->pm.fan_pulses_per_revolution -1); | ||
1203 | WREG32_SMC(CG_TACH_CTRL, tmp); | ||
1204 | } | ||
1205 | |||
1206 | tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TACH_PWM_RESP_RATE_MASK; | ||
1207 | tmp |= TACH_PWM_RESP_RATE(0x28); | ||
1208 | WREG32_SMC(CG_FDO_CTRL2, tmp); | ||
1209 | } | ||
1210 | |||
1211 | static int ci_thermal_start_thermal_controller(struct radeon_device *rdev) | ||
1212 | { | ||
1213 | int ret; | ||
1214 | |||
1215 | ci_thermal_initialize(rdev); | ||
1216 | ret = ci_thermal_set_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); | ||
1217 | if (ret) | ||
1218 | return ret; | ||
1219 | ret = ci_thermal_enable_alert(rdev, true); | ||
1220 | if (ret) | ||
1221 | return ret; | ||
1222 | if (rdev->pm.dpm.fan.ucode_fan_control) { | ||
1223 | ret = ci_thermal_setup_fan_table(rdev); | ||
1224 | if (ret) | ||
1225 | return ret; | ||
1226 | ci_thermal_start_smc_fan_control(rdev); | ||
1227 | } | ||
1228 | |||
1229 | return 0; | ||
1230 | } | ||
1231 | |||
1232 | static void ci_thermal_stop_thermal_controller(struct radeon_device *rdev) | ||
1233 | { | ||
1234 | if (!rdev->pm.no_fan) | ||
1235 | ci_fan_ctrl_set_default_mode(rdev); | ||
1236 | } | ||
1237 | |||
853 | #if 0 | 1238 | #if 0 |
854 | static int ci_read_smc_soft_register(struct radeon_device *rdev, | 1239 | static int ci_read_smc_soft_register(struct radeon_device *rdev, |
855 | u16 reg_offset, u32 *value) | 1240 | u16 reg_offset, u32 *value) |
@@ -1253,7 +1638,7 @@ static int ci_dpm_force_state_sclk(struct radeon_device *rdev, u32 n) | |||
1253 | 1638 | ||
1254 | if (!pi->sclk_dpm_key_disabled) { | 1639 | if (!pi->sclk_dpm_key_disabled) { |
1255 | PPSMC_Result smc_result = | 1640 | PPSMC_Result smc_result = |
1256 | ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, n); | 1641 | ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SCLKDPM_SetEnabledMask, 1 << n); |
1257 | if (smc_result != PPSMC_Result_OK) | 1642 | if (smc_result != PPSMC_Result_OK) |
1258 | return -EINVAL; | 1643 | return -EINVAL; |
1259 | } | 1644 | } |
@@ -1267,7 +1652,7 @@ static int ci_dpm_force_state_mclk(struct radeon_device *rdev, u32 n) | |||
1267 | 1652 | ||
1268 | if (!pi->mclk_dpm_key_disabled) { | 1653 | if (!pi->mclk_dpm_key_disabled) { |
1269 | PPSMC_Result smc_result = | 1654 | PPSMC_Result smc_result = |
1270 | ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_MCLKDPM_ForceState, n); | 1655 | ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_MCLKDPM_SetEnabledMask, 1 << n); |
1271 | if (smc_result != PPSMC_Result_OK) | 1656 | if (smc_result != PPSMC_Result_OK) |
1272 | return -EINVAL; | 1657 | return -EINVAL; |
1273 | } | 1658 | } |
@@ -2042,6 +2427,33 @@ static int ci_force_switch_to_arb_f0(struct radeon_device *rdev) | |||
2042 | return ni_copy_and_switch_arb_sets(rdev, tmp, MC_CG_ARB_FREQ_F0); | 2427 | return ni_copy_and_switch_arb_sets(rdev, tmp, MC_CG_ARB_FREQ_F0); |
2043 | } | 2428 | } |
2044 | 2429 | ||
2430 | static void ci_register_patching_mc_arb(struct radeon_device *rdev, | ||
2431 | const u32 engine_clock, | ||
2432 | const u32 memory_clock, | ||
2433 | u32 *dram_timimg2) | ||
2434 | { | ||
2435 | bool patch; | ||
2436 | u32 tmp, tmp2; | ||
2437 | |||
2438 | tmp = RREG32(MC_SEQ_MISC0); | ||
2439 | patch = ((tmp & 0x0000f00) == 0x300) ? true : false; | ||
2440 | |||
2441 | if (patch && | ||
2442 | ((rdev->pdev->device == 0x67B0) || | ||
2443 | (rdev->pdev->device == 0x67B1))) { | ||
2444 | if ((memory_clock > 100000) && (memory_clock <= 125000)) { | ||
2445 | tmp2 = (((0x31 * engine_clock) / 125000) - 1) & 0xff; | ||
2446 | *dram_timimg2 &= ~0x00ff0000; | ||
2447 | *dram_timimg2 |= tmp2 << 16; | ||
2448 | } else if ((memory_clock > 125000) && (memory_clock <= 137500)) { | ||
2449 | tmp2 = (((0x36 * engine_clock) / 137500) - 1) & 0xff; | ||
2450 | *dram_timimg2 &= ~0x00ff0000; | ||
2451 | *dram_timimg2 |= tmp2 << 16; | ||
2452 | } | ||
2453 | } | ||
2454 | } | ||
2455 | |||
2456 | |||
2045 | static int ci_populate_memory_timing_parameters(struct radeon_device *rdev, | 2457 | static int ci_populate_memory_timing_parameters(struct radeon_device *rdev, |
2046 | u32 sclk, | 2458 | u32 sclk, |
2047 | u32 mclk, | 2459 | u32 mclk, |
@@ -2057,6 +2469,8 @@ static int ci_populate_memory_timing_parameters(struct radeon_device *rdev, | |||
2057 | dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2); | 2469 | dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2); |
2058 | burst_time = RREG32(MC_ARB_BURST_TIME) & STATE0_MASK; | 2470 | burst_time = RREG32(MC_ARB_BURST_TIME) & STATE0_MASK; |
2059 | 2471 | ||
2472 | ci_register_patching_mc_arb(rdev, sclk, mclk, &dram_timing2); | ||
2473 | |||
2060 | arb_regs->McArbDramTiming = cpu_to_be32(dram_timing); | 2474 | arb_regs->McArbDramTiming = cpu_to_be32(dram_timing); |
2061 | arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2); | 2475 | arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2); |
2062 | arb_regs->McArbBurstTime = (u8)burst_time; | 2476 | arb_regs->McArbBurstTime = (u8)burst_time; |
@@ -2351,10 +2765,10 @@ static int ci_calculate_mclk_params(struct radeon_device *rdev, | |||
2351 | u32 tmp; | 2765 | u32 tmp; |
2352 | u32 reference_clock = rdev->clock.mpll.reference_freq; | 2766 | u32 reference_clock = rdev->clock.mpll.reference_freq; |
2353 | 2767 | ||
2354 | if (pi->mem_gddr5) | 2768 | if (mpll_param.qdr == 1) |
2355 | freq_nom = memory_clock * 4; | 2769 | freq_nom = memory_clock * 4 * (1 << mpll_param.post_div); |
2356 | else | 2770 | else |
2357 | freq_nom = memory_clock * 2; | 2771 | freq_nom = memory_clock * 2 * (1 << mpll_param.post_div); |
2358 | 2772 | ||
2359 | tmp = (freq_nom / reference_clock); | 2773 | tmp = (freq_nom / reference_clock); |
2360 | tmp = tmp * tmp; | 2774 | tmp = tmp * tmp; |
@@ -2434,7 +2848,6 @@ static int ci_populate_single_memory_level(struct radeon_device *rdev, | |||
2434 | &memory_level->MinVddcPhases); | 2848 | &memory_level->MinVddcPhases); |
2435 | 2849 | ||
2436 | memory_level->EnabledForThrottle = 1; | 2850 | memory_level->EnabledForThrottle = 1; |
2437 | memory_level->EnabledForActivity = 1; | ||
2438 | memory_level->UpH = 0; | 2851 | memory_level->UpH = 0; |
2439 | memory_level->DownH = 100; | 2852 | memory_level->DownH = 100; |
2440 | memory_level->VoltageDownH = 0; | 2853 | memory_level->VoltageDownH = 0; |
@@ -2767,7 +3180,6 @@ static int ci_populate_single_graphic_level(struct radeon_device *rdev, | |||
2767 | 3180 | ||
2768 | graphic_level->CcPwrDynRm = 0; | 3181 | graphic_level->CcPwrDynRm = 0; |
2769 | graphic_level->CcPwrDynRm1 = 0; | 3182 | graphic_level->CcPwrDynRm1 = 0; |
2770 | graphic_level->EnabledForActivity = 1; | ||
2771 | graphic_level->EnabledForThrottle = 1; | 3183 | graphic_level->EnabledForThrottle = 1; |
2772 | graphic_level->UpH = 0; | 3184 | graphic_level->UpH = 0; |
2773 | graphic_level->DownH = 0; | 3185 | graphic_level->DownH = 0; |
@@ -2816,10 +3228,13 @@ static int ci_populate_all_graphic_levels(struct radeon_device *rdev) | |||
2816 | &pi->smc_state_table.GraphicsLevel[i]); | 3228 | &pi->smc_state_table.GraphicsLevel[i]); |
2817 | if (ret) | 3229 | if (ret) |
2818 | return ret; | 3230 | return ret; |
3231 | if (i > 1) | ||
3232 | pi->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0; | ||
2819 | if (i == (dpm_table->sclk_table.count - 1)) | 3233 | if (i == (dpm_table->sclk_table.count - 1)) |
2820 | pi->smc_state_table.GraphicsLevel[i].DisplayWatermark = | 3234 | pi->smc_state_table.GraphicsLevel[i].DisplayWatermark = |
2821 | PPSMC_DISPLAY_WATERMARK_HIGH; | 3235 | PPSMC_DISPLAY_WATERMARK_HIGH; |
2822 | } | 3236 | } |
3237 | pi->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1; | ||
2823 | 3238 | ||
2824 | pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count; | 3239 | pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count; |
2825 | pi->dpm_level_enable_mask.sclk_dpm_enable_mask = | 3240 | pi->dpm_level_enable_mask.sclk_dpm_enable_mask = |
@@ -2863,6 +3278,16 @@ static int ci_populate_all_memory_levels(struct radeon_device *rdev) | |||
2863 | return ret; | 3278 | return ret; |
2864 | } | 3279 | } |
2865 | 3280 | ||
3281 | pi->smc_state_table.MemoryLevel[0].EnabledForActivity = 1; | ||
3282 | |||
3283 | if ((dpm_table->mclk_table.count >= 2) && | ||
3284 | ((rdev->pdev->device == 0x67B0) || (rdev->pdev->device == 0x67B1))) { | ||
3285 | pi->smc_state_table.MemoryLevel[1].MinVddc = | ||
3286 | pi->smc_state_table.MemoryLevel[0].MinVddc; | ||
3287 | pi->smc_state_table.MemoryLevel[1].MinVddcPhases = | ||
3288 | pi->smc_state_table.MemoryLevel[0].MinVddcPhases; | ||
3289 | } | ||
3290 | |||
2866 | pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F); | 3291 | pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F); |
2867 | 3292 | ||
2868 | pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count; | 3293 | pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count; |
@@ -2919,9 +3344,14 @@ static int ci_setup_default_pcie_tables(struct radeon_device *rdev) | |||
2919 | &pi->dpm_table.pcie_speed_table, | 3344 | &pi->dpm_table.pcie_speed_table, |
2920 | SMU7_MAX_LEVELS_LINK); | 3345 | SMU7_MAX_LEVELS_LINK); |
2921 | 3346 | ||
2922 | ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0, | 3347 | if (rdev->family == CHIP_BONAIRE) |
2923 | pi->pcie_gen_powersaving.min, | 3348 | ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0, |
2924 | pi->pcie_lane_powersaving.min); | 3349 | pi->pcie_gen_powersaving.min, |
3350 | pi->pcie_lane_powersaving.max); | ||
3351 | else | ||
3352 | ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0, | ||
3353 | pi->pcie_gen_powersaving.min, | ||
3354 | pi->pcie_lane_powersaving.min); | ||
2925 | ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 1, | 3355 | ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 1, |
2926 | pi->pcie_gen_performance.min, | 3356 | pi->pcie_gen_performance.min, |
2927 | pi->pcie_lane_performance.min); | 3357 | pi->pcie_lane_performance.min); |
@@ -2988,19 +3418,21 @@ static int ci_setup_default_dpm_tables(struct radeon_device *rdev) | |||
2988 | allowed_sclk_vddc_table->entries[i].clk)) { | 3418 | allowed_sclk_vddc_table->entries[i].clk)) { |
2989 | pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value = | 3419 | pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value = |
2990 | allowed_sclk_vddc_table->entries[i].clk; | 3420 | allowed_sclk_vddc_table->entries[i].clk; |
2991 | pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled = true; | 3421 | pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled = |
3422 | (i == 0) ? true : false; | ||
2992 | pi->dpm_table.sclk_table.count++; | 3423 | pi->dpm_table.sclk_table.count++; |
2993 | } | 3424 | } |
2994 | } | 3425 | } |
2995 | 3426 | ||
2996 | pi->dpm_table.mclk_table.count = 0; | 3427 | pi->dpm_table.mclk_table.count = 0; |
2997 | for (i = 0; i < allowed_mclk_table->count; i++) { | 3428 | for (i = 0; i < allowed_mclk_table->count; i++) { |
2998 | if ((i==0) || | 3429 | if ((i == 0) || |
2999 | (pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count-1].value != | 3430 | (pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count-1].value != |
3000 | allowed_mclk_table->entries[i].clk)) { | 3431 | allowed_mclk_table->entries[i].clk)) { |
3001 | pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value = | 3432 | pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value = |
3002 | allowed_mclk_table->entries[i].clk; | 3433 | allowed_mclk_table->entries[i].clk; |
3003 | pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled = true; | 3434 | pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled = |
3435 | (i == 0) ? true : false; | ||
3004 | pi->dpm_table.mclk_table.count++; | 3436 | pi->dpm_table.mclk_table.count++; |
3005 | } | 3437 | } |
3006 | } | 3438 | } |
@@ -3166,7 +3598,7 @@ static int ci_init_smc_table(struct radeon_device *rdev) | |||
3166 | table->VddcVddciDelta = 4000; | 3598 | table->VddcVddciDelta = 4000; |
3167 | table->PhaseResponseTime = 0; | 3599 | table->PhaseResponseTime = 0; |
3168 | table->MemoryThermThrottleEnable = 1; | 3600 | table->MemoryThermThrottleEnable = 1; |
3169 | table->PCIeBootLinkLevel = 0; | 3601 | table->PCIeBootLinkLevel = pi->dpm_table.pcie_speed_table.count - 1; |
3170 | table->PCIeGenInterval = 1; | 3602 | table->PCIeGenInterval = 1; |
3171 | if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) | 3603 | if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) |
3172 | table->SVI2Enable = 1; | 3604 | table->SVI2Enable = 1; |
@@ -3320,6 +3752,8 @@ static int ci_upload_dpm_level_enable_mask(struct radeon_device *rdev) | |||
3320 | struct ci_power_info *pi = ci_get_pi(rdev); | 3752 | struct ci_power_info *pi = ci_get_pi(rdev); |
3321 | PPSMC_Result result; | 3753 | PPSMC_Result result; |
3322 | 3754 | ||
3755 | ci_apply_disp_minimum_voltage_request(rdev); | ||
3756 | |||
3323 | if (!pi->sclk_dpm_key_disabled) { | 3757 | if (!pi->sclk_dpm_key_disabled) { |
3324 | if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) { | 3758 | if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) { |
3325 | result = ci_send_msg_to_smc_with_parameter(rdev, | 3759 | result = ci_send_msg_to_smc_with_parameter(rdev, |
@@ -3339,7 +3773,7 @@ static int ci_upload_dpm_level_enable_mask(struct radeon_device *rdev) | |||
3339 | return -EINVAL; | 3773 | return -EINVAL; |
3340 | } | 3774 | } |
3341 | } | 3775 | } |
3342 | 3776 | #if 0 | |
3343 | if (!pi->pcie_dpm_key_disabled) { | 3777 | if (!pi->pcie_dpm_key_disabled) { |
3344 | if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) { | 3778 | if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) { |
3345 | result = ci_send_msg_to_smc_with_parameter(rdev, | 3779 | result = ci_send_msg_to_smc_with_parameter(rdev, |
@@ -3349,9 +3783,7 @@ static int ci_upload_dpm_level_enable_mask(struct radeon_device *rdev) | |||
3349 | return -EINVAL; | 3783 | return -EINVAL; |
3350 | } | 3784 | } |
3351 | } | 3785 | } |
3352 | 3786 | #endif | |
3353 | ci_apply_disp_minimum_voltage_request(rdev); | ||
3354 | |||
3355 | return 0; | 3787 | return 0; |
3356 | } | 3788 | } |
3357 | 3789 | ||
@@ -3377,7 +3809,7 @@ static void ci_find_dpm_states_clocks_in_dpm_table(struct radeon_device *rdev, | |||
3377 | pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; | 3809 | pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; |
3378 | } else { | 3810 | } else { |
3379 | /* XXX check display min clock requirements */ | 3811 | /* XXX check display min clock requirements */ |
3380 | if (0 != CISLAND_MINIMUM_ENGINE_CLOCK) | 3812 | if (CISLAND_MINIMUM_ENGINE_CLOCK != CISLAND_MINIMUM_ENGINE_CLOCK) |
3381 | pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK; | 3813 | pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK; |
3382 | } | 3814 | } |
3383 | 3815 | ||
@@ -3707,62 +4139,61 @@ int ci_dpm_force_performance_level(struct radeon_device *rdev, | |||
3707 | enum radeon_dpm_forced_level level) | 4139 | enum radeon_dpm_forced_level level) |
3708 | { | 4140 | { |
3709 | struct ci_power_info *pi = ci_get_pi(rdev); | 4141 | struct ci_power_info *pi = ci_get_pi(rdev); |
3710 | PPSMC_Result smc_result; | ||
3711 | u32 tmp, levels, i; | 4142 | u32 tmp, levels, i; |
3712 | int ret; | 4143 | int ret; |
3713 | 4144 | ||
3714 | if (level == RADEON_DPM_FORCED_LEVEL_HIGH) { | 4145 | if (level == RADEON_DPM_FORCED_LEVEL_HIGH) { |
3715 | if ((!pi->sclk_dpm_key_disabled) && | 4146 | if ((!pi->pcie_dpm_key_disabled) && |
3716 | pi->dpm_level_enable_mask.sclk_dpm_enable_mask) { | 4147 | pi->dpm_level_enable_mask.pcie_dpm_enable_mask) { |
3717 | levels = 0; | 4148 | levels = 0; |
3718 | tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask; | 4149 | tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask; |
3719 | while (tmp >>= 1) | 4150 | while (tmp >>= 1) |
3720 | levels++; | 4151 | levels++; |
3721 | if (levels) { | 4152 | if (levels) { |
3722 | ret = ci_dpm_force_state_sclk(rdev, levels); | 4153 | ret = ci_dpm_force_state_pcie(rdev, level); |
3723 | if (ret) | 4154 | if (ret) |
3724 | return ret; | 4155 | return ret; |
3725 | for (i = 0; i < rdev->usec_timeout; i++) { | 4156 | for (i = 0; i < rdev->usec_timeout; i++) { |
3726 | tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & | 4157 | tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) & |
3727 | CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT; | 4158 | CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT; |
3728 | if (tmp == levels) | 4159 | if (tmp == levels) |
3729 | break; | 4160 | break; |
3730 | udelay(1); | 4161 | udelay(1); |
3731 | } | 4162 | } |
3732 | } | 4163 | } |
3733 | } | 4164 | } |
3734 | if ((!pi->mclk_dpm_key_disabled) && | 4165 | if ((!pi->sclk_dpm_key_disabled) && |
3735 | pi->dpm_level_enable_mask.mclk_dpm_enable_mask) { | 4166 | pi->dpm_level_enable_mask.sclk_dpm_enable_mask) { |
3736 | levels = 0; | 4167 | levels = 0; |
3737 | tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask; | 4168 | tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask; |
3738 | while (tmp >>= 1) | 4169 | while (tmp >>= 1) |
3739 | levels++; | 4170 | levels++; |
3740 | if (levels) { | 4171 | if (levels) { |
3741 | ret = ci_dpm_force_state_mclk(rdev, levels); | 4172 | ret = ci_dpm_force_state_sclk(rdev, levels); |
3742 | if (ret) | 4173 | if (ret) |
3743 | return ret; | 4174 | return ret; |
3744 | for (i = 0; i < rdev->usec_timeout; i++) { | 4175 | for (i = 0; i < rdev->usec_timeout; i++) { |
3745 | tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & | 4176 | tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & |
3746 | CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT; | 4177 | CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT; |
3747 | if (tmp == levels) | 4178 | if (tmp == levels) |
3748 | break; | 4179 | break; |
3749 | udelay(1); | 4180 | udelay(1); |
3750 | } | 4181 | } |
3751 | } | 4182 | } |
3752 | } | 4183 | } |
3753 | if ((!pi->pcie_dpm_key_disabled) && | 4184 | if ((!pi->mclk_dpm_key_disabled) && |
3754 | pi->dpm_level_enable_mask.pcie_dpm_enable_mask) { | 4185 | pi->dpm_level_enable_mask.mclk_dpm_enable_mask) { |
3755 | levels = 0; | 4186 | levels = 0; |
3756 | tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask; | 4187 | tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask; |
3757 | while (tmp >>= 1) | 4188 | while (tmp >>= 1) |
3758 | levels++; | 4189 | levels++; |
3759 | if (levels) { | 4190 | if (levels) { |
3760 | ret = ci_dpm_force_state_pcie(rdev, level); | 4191 | ret = ci_dpm_force_state_mclk(rdev, levels); |
3761 | if (ret) | 4192 | if (ret) |
3762 | return ret; | 4193 | return ret; |
3763 | for (i = 0; i < rdev->usec_timeout; i++) { | 4194 | for (i = 0; i < rdev->usec_timeout; i++) { |
3764 | tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) & | 4195 | tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & |
3765 | CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT; | 4196 | CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT; |
3766 | if (tmp == levels) | 4197 | if (tmp == levels) |
3767 | break; | 4198 | break; |
3768 | udelay(1); | 4199 | udelay(1); |
@@ -3816,21 +4247,17 @@ int ci_dpm_force_performance_level(struct radeon_device *rdev, | |||
3816 | } | 4247 | } |
3817 | } | 4248 | } |
3818 | } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) { | 4249 | } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) { |
3819 | if (!pi->sclk_dpm_key_disabled) { | ||
3820 | smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_NoForcedLevel); | ||
3821 | if (smc_result != PPSMC_Result_OK) | ||
3822 | return -EINVAL; | ||
3823 | } | ||
3824 | if (!pi->mclk_dpm_key_disabled) { | ||
3825 | smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_NoForcedLevel); | ||
3826 | if (smc_result != PPSMC_Result_OK) | ||
3827 | return -EINVAL; | ||
3828 | } | ||
3829 | if (!pi->pcie_dpm_key_disabled) { | 4250 | if (!pi->pcie_dpm_key_disabled) { |
3830 | smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_UnForceLevel); | 4251 | PPSMC_Result smc_result; |
4252 | |||
4253 | smc_result = ci_send_msg_to_smc(rdev, | ||
4254 | PPSMC_MSG_PCIeDPM_UnForceLevel); | ||
3831 | if (smc_result != PPSMC_Result_OK) | 4255 | if (smc_result != PPSMC_Result_OK) |
3832 | return -EINVAL; | 4256 | return -EINVAL; |
3833 | } | 4257 | } |
4258 | ret = ci_upload_dpm_level_enable_mask(rdev); | ||
4259 | if (ret) | ||
4260 | return ret; | ||
3834 | } | 4261 | } |
3835 | 4262 | ||
3836 | rdev->pm.dpm.forced_level = level; | 4263 | rdev->pm.dpm.forced_level = level; |
@@ -4036,6 +4463,96 @@ static int ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table *table, | |||
4036 | return 0; | 4463 | return 0; |
4037 | } | 4464 | } |
4038 | 4465 | ||
4466 | static int ci_register_patching_mc_seq(struct radeon_device *rdev, | ||
4467 | struct ci_mc_reg_table *table) | ||
4468 | { | ||
4469 | u8 i, k; | ||
4470 | u32 tmp; | ||
4471 | bool patch; | ||
4472 | |||
4473 | tmp = RREG32(MC_SEQ_MISC0); | ||
4474 | patch = ((tmp & 0x0000f00) == 0x300) ? true : false; | ||
4475 | |||
4476 | if (patch && | ||
4477 | ((rdev->pdev->device == 0x67B0) || | ||
4478 | (rdev->pdev->device == 0x67B1))) { | ||
4479 | for (i = 0; i < table->last; i++) { | ||
4480 | if (table->last >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) | ||
4481 | return -EINVAL; | ||
4482 | switch(table->mc_reg_address[i].s1 >> 2) { | ||
4483 | case MC_SEQ_MISC1: | ||
4484 | for (k = 0; k < table->num_entries; k++) { | ||
4485 | if ((table->mc_reg_table_entry[k].mclk_max == 125000) || | ||
4486 | (table->mc_reg_table_entry[k].mclk_max == 137500)) | ||
4487 | table->mc_reg_table_entry[k].mc_data[i] = | ||
4488 | (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFF8) | | ||
4489 | 0x00000007; | ||
4490 | } | ||
4491 | break; | ||
4492 | case MC_SEQ_WR_CTL_D0: | ||
4493 | for (k = 0; k < table->num_entries; k++) { | ||
4494 | if ((table->mc_reg_table_entry[k].mclk_max == 125000) || | ||
4495 | (table->mc_reg_table_entry[k].mclk_max == 137500)) | ||
4496 | table->mc_reg_table_entry[k].mc_data[i] = | ||
4497 | (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) | | ||
4498 | 0x0000D0DD; | ||
4499 | } | ||
4500 | break; | ||
4501 | case MC_SEQ_WR_CTL_D1: | ||
4502 | for (k = 0; k < table->num_entries; k++) { | ||
4503 | if ((table->mc_reg_table_entry[k].mclk_max == 125000) || | ||
4504 | (table->mc_reg_table_entry[k].mclk_max == 137500)) | ||
4505 | table->mc_reg_table_entry[k].mc_data[i] = | ||
4506 | (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) | | ||
4507 | 0x0000D0DD; | ||
4508 | } | ||
4509 | break; | ||
4510 | case MC_SEQ_WR_CTL_2: | ||
4511 | for (k = 0; k < table->num_entries; k++) { | ||
4512 | if ((table->mc_reg_table_entry[k].mclk_max == 125000) || | ||
4513 | (table->mc_reg_table_entry[k].mclk_max == 137500)) | ||
4514 | table->mc_reg_table_entry[k].mc_data[i] = 0; | ||
4515 | } | ||
4516 | break; | ||
4517 | case MC_SEQ_CAS_TIMING: | ||
4518 | for (k = 0; k < table->num_entries; k++) { | ||
4519 | if (table->mc_reg_table_entry[k].mclk_max == 125000) | ||
4520 | table->mc_reg_table_entry[k].mc_data[i] = | ||
4521 | (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) | | ||
4522 | 0x000C0140; | ||
4523 | else if (table->mc_reg_table_entry[k].mclk_max == 137500) | ||
4524 | table->mc_reg_table_entry[k].mc_data[i] = | ||
4525 | (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) | | ||
4526 | 0x000C0150; | ||
4527 | } | ||
4528 | break; | ||
4529 | case MC_SEQ_MISC_TIMING: | ||
4530 | for (k = 0; k < table->num_entries; k++) { | ||
4531 | if (table->mc_reg_table_entry[k].mclk_max == 125000) | ||
4532 | table->mc_reg_table_entry[k].mc_data[i] = | ||
4533 | (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) | | ||
4534 | 0x00000030; | ||
4535 | else if (table->mc_reg_table_entry[k].mclk_max == 137500) | ||
4536 | table->mc_reg_table_entry[k].mc_data[i] = | ||
4537 | (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) | | ||
4538 | 0x00000035; | ||
4539 | } | ||
4540 | break; | ||
4541 | default: | ||
4542 | break; | ||
4543 | } | ||
4544 | } | ||
4545 | |||
4546 | WREG32(MC_SEQ_IO_DEBUG_INDEX, 3); | ||
4547 | tmp = RREG32(MC_SEQ_IO_DEBUG_DATA); | ||
4548 | tmp = (tmp & 0xFFF8FFFF) | (1 << 16); | ||
4549 | WREG32(MC_SEQ_IO_DEBUG_INDEX, 3); | ||
4550 | WREG32(MC_SEQ_IO_DEBUG_DATA, tmp); | ||
4551 | } | ||
4552 | |||
4553 | return 0; | ||
4554 | } | ||
4555 | |||
4039 | static int ci_initialize_mc_reg_table(struct radeon_device *rdev) | 4556 | static int ci_initialize_mc_reg_table(struct radeon_device *rdev) |
4040 | { | 4557 | { |
4041 | struct ci_power_info *pi = ci_get_pi(rdev); | 4558 | struct ci_power_info *pi = ci_get_pi(rdev); |
@@ -4079,6 +4596,10 @@ static int ci_initialize_mc_reg_table(struct radeon_device *rdev) | |||
4079 | 4596 | ||
4080 | ci_set_s0_mc_reg_index(ci_table); | 4597 | ci_set_s0_mc_reg_index(ci_table); |
4081 | 4598 | ||
4599 | ret = ci_register_patching_mc_seq(rdev, ci_table); | ||
4600 | if (ret) | ||
4601 | goto init_mc_done; | ||
4602 | |||
4082 | ret = ci_set_mc_special_registers(rdev, ci_table); | 4603 | ret = ci_set_mc_special_registers(rdev, ci_table); |
4083 | if (ret) | 4604 | if (ret) |
4084 | goto init_mc_done; | 4605 | goto init_mc_done; |
@@ -4675,36 +5196,51 @@ int ci_dpm_enable(struct radeon_device *rdev) | |||
4675 | return ret; | 5196 | return ret; |
4676 | } | 5197 | } |
4677 | 5198 | ||
5199 | ret = ci_power_control_set_level(rdev); | ||
5200 | if (ret) { | ||
5201 | DRM_ERROR("ci_power_control_set_level failed\n"); | ||
5202 | return ret; | ||
5203 | } | ||
5204 | |||
4678 | ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true); | 5205 | ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true); |
4679 | 5206 | ||
5207 | ret = ci_enable_thermal_based_sclk_dpm(rdev, true); | ||
5208 | if (ret) { | ||
5209 | DRM_ERROR("ci_enable_thermal_based_sclk_dpm failed\n"); | ||
5210 | return ret; | ||
5211 | } | ||
5212 | |||
5213 | ci_thermal_start_thermal_controller(rdev); | ||
5214 | |||
4680 | ci_update_current_ps(rdev, boot_ps); | 5215 | ci_update_current_ps(rdev, boot_ps); |
4681 | 5216 | ||
4682 | return 0; | 5217 | return 0; |
4683 | } | 5218 | } |
4684 | 5219 | ||
4685 | int ci_dpm_late_enable(struct radeon_device *rdev) | 5220 | static int ci_set_temperature_range(struct radeon_device *rdev) |
4686 | { | 5221 | { |
4687 | int ret; | 5222 | int ret; |
4688 | 5223 | ||
4689 | if (rdev->irq.installed && | 5224 | ret = ci_thermal_enable_alert(rdev, false); |
4690 | r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { | 5225 | if (ret) |
4691 | #if 0 | 5226 | return ret; |
4692 | PPSMC_Result result; | 5227 | ret = ci_thermal_set_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); |
4693 | #endif | 5228 | if (ret) |
4694 | ret = ci_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); | 5229 | return ret; |
4695 | if (ret) { | 5230 | ret = ci_thermal_enable_alert(rdev, true); |
4696 | DRM_ERROR("ci_set_thermal_temperature_range failed\n"); | 5231 | if (ret) |
4697 | return ret; | 5232 | return ret; |
4698 | } | ||
4699 | rdev->irq.dpm_thermal = true; | ||
4700 | radeon_irq_set(rdev); | ||
4701 | #if 0 | ||
4702 | result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt); | ||
4703 | 5233 | ||
4704 | if (result != PPSMC_Result_OK) | 5234 | return ret; |
4705 | DRM_DEBUG_KMS("Could not enable thermal interrupts.\n"); | 5235 | } |
4706 | #endif | 5236 | |
4707 | } | 5237 | int ci_dpm_late_enable(struct radeon_device *rdev) |
5238 | { | ||
5239 | int ret; | ||
5240 | |||
5241 | ret = ci_set_temperature_range(rdev); | ||
5242 | if (ret) | ||
5243 | return ret; | ||
4708 | 5244 | ||
4709 | ci_dpm_powergate_uvd(rdev, true); | 5245 | ci_dpm_powergate_uvd(rdev, true); |
4710 | 5246 | ||
@@ -4721,6 +5257,8 @@ void ci_dpm_disable(struct radeon_device *rdev) | |||
4721 | if (!ci_is_smc_running(rdev)) | 5257 | if (!ci_is_smc_running(rdev)) |
4722 | return; | 5258 | return; |
4723 | 5259 | ||
5260 | ci_thermal_stop_thermal_controller(rdev); | ||
5261 | |||
4724 | if (pi->thermal_protection) | 5262 | if (pi->thermal_protection) |
4725 | ci_enable_thermal_protection(rdev, false); | 5263 | ci_enable_thermal_protection(rdev, false); |
4726 | ci_enable_power_containment(rdev, false); | 5264 | ci_enable_power_containment(rdev, false); |
@@ -4729,12 +5267,13 @@ void ci_dpm_disable(struct radeon_device *rdev) | |||
4729 | ci_enable_spread_spectrum(rdev, false); | 5267 | ci_enable_spread_spectrum(rdev, false); |
4730 | ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, false); | 5268 | ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, false); |
4731 | ci_stop_dpm(rdev); | 5269 | ci_stop_dpm(rdev); |
4732 | ci_enable_ds_master_switch(rdev, true); | 5270 | ci_enable_ds_master_switch(rdev, false); |
4733 | ci_enable_ulv(rdev, false); | 5271 | ci_enable_ulv(rdev, false); |
4734 | ci_clear_vc(rdev); | 5272 | ci_clear_vc(rdev); |
4735 | ci_reset_to_default(rdev); | 5273 | ci_reset_to_default(rdev); |
4736 | ci_dpm_stop_smc(rdev); | 5274 | ci_dpm_stop_smc(rdev); |
4737 | ci_force_switch_to_arb_f0(rdev); | 5275 | ci_force_switch_to_arb_f0(rdev); |
5276 | ci_enable_thermal_based_sclk_dpm(rdev, false); | ||
4738 | 5277 | ||
4739 | ci_update_current_ps(rdev, boot_ps); | 5278 | ci_update_current_ps(rdev, boot_ps); |
4740 | } | 5279 | } |
@@ -4804,11 +5343,6 @@ int ci_dpm_set_power_state(struct radeon_device *rdev) | |||
4804 | return 0; | 5343 | return 0; |
4805 | } | 5344 | } |
4806 | 5345 | ||
4807 | int ci_dpm_power_control_set_level(struct radeon_device *rdev) | ||
4808 | { | ||
4809 | return ci_power_control_set_level(rdev); | ||
4810 | } | ||
4811 | |||
4812 | void ci_dpm_reset_asic(struct radeon_device *rdev) | 5346 | void ci_dpm_reset_asic(struct radeon_device *rdev) |
4813 | { | 5347 | { |
4814 | ci_set_boot_state(rdev); | 5348 | ci_set_boot_state(rdev); |
@@ -5068,6 +5602,8 @@ void ci_dpm_fini(struct radeon_device *rdev) | |||
5068 | int ci_dpm_init(struct radeon_device *rdev) | 5602 | int ci_dpm_init(struct radeon_device *rdev) |
5069 | { | 5603 | { |
5070 | int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); | 5604 | int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); |
5605 | SMU7_Discrete_DpmTable *dpm_table; | ||
5606 | struct radeon_gpio_rec gpio; | ||
5071 | u16 data_offset, size; | 5607 | u16 data_offset, size; |
5072 | u8 frev, crev; | 5608 | u8 frev, crev; |
5073 | struct ci_power_info *pi; | 5609 | struct ci_power_info *pi; |
@@ -5137,6 +5673,7 @@ int ci_dpm_init(struct radeon_device *rdev) | |||
5137 | pi->sclk_dpm_key_disabled = 0; | 5673 | pi->sclk_dpm_key_disabled = 0; |
5138 | pi->mclk_dpm_key_disabled = 0; | 5674 | pi->mclk_dpm_key_disabled = 0; |
5139 | pi->pcie_dpm_key_disabled = 0; | 5675 | pi->pcie_dpm_key_disabled = 0; |
5676 | pi->thermal_sclk_dpm_enabled = 0; | ||
5140 | 5677 | ||
5141 | /* mclk dpm is unstable on some R7 260X cards with the old mc ucode */ | 5678 | /* mclk dpm is unstable on some R7 260X cards with the old mc ucode */ |
5142 | if ((rdev->pdev->device == 0x6658) && | 5679 | if ((rdev->pdev->device == 0x6658) && |
@@ -5201,6 +5738,55 @@ int ci_dpm_init(struct radeon_device *rdev) | |||
5201 | 5738 | ||
5202 | pi->uvd_enabled = false; | 5739 | pi->uvd_enabled = false; |
5203 | 5740 | ||
5741 | dpm_table = &pi->smc_state_table; | ||
5742 | |||
5743 | gpio = radeon_atombios_lookup_gpio(rdev, VDDC_VRHOT_GPIO_PINID); | ||
5744 | if (gpio.valid) { | ||
5745 | dpm_table->VRHotGpio = gpio.shift; | ||
5746 | rdev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_REGULATOR_HOT; | ||
5747 | } else { | ||
5748 | dpm_table->VRHotGpio = CISLANDS_UNUSED_GPIO_PIN; | ||
5749 | rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_REGULATOR_HOT; | ||
5750 | } | ||
5751 | |||
5752 | gpio = radeon_atombios_lookup_gpio(rdev, PP_AC_DC_SWITCH_GPIO_PINID); | ||
5753 | if (gpio.valid) { | ||
5754 | dpm_table->AcDcGpio = gpio.shift; | ||
5755 | rdev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_HARDWAREDC; | ||
5756 | } else { | ||
5757 | dpm_table->AcDcGpio = CISLANDS_UNUSED_GPIO_PIN; | ||
5758 | rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_HARDWAREDC; | ||
5759 | } | ||
5760 | |||
5761 | gpio = radeon_atombios_lookup_gpio(rdev, VDDC_PCC_GPIO_PINID); | ||
5762 | if (gpio.valid) { | ||
5763 | u32 tmp = RREG32_SMC(CNB_PWRMGT_CNTL); | ||
5764 | |||
5765 | switch (gpio.shift) { | ||
5766 | case 0: | ||
5767 | tmp &= ~GNB_SLOW_MODE_MASK; | ||
5768 | tmp |= GNB_SLOW_MODE(1); | ||
5769 | break; | ||
5770 | case 1: | ||
5771 | tmp &= ~GNB_SLOW_MODE_MASK; | ||
5772 | tmp |= GNB_SLOW_MODE(2); | ||
5773 | break; | ||
5774 | case 2: | ||
5775 | tmp |= GNB_SLOW; | ||
5776 | break; | ||
5777 | case 3: | ||
5778 | tmp |= FORCE_NB_PS1; | ||
5779 | break; | ||
5780 | case 4: | ||
5781 | tmp |= DPM_ENABLED; | ||
5782 | break; | ||
5783 | default: | ||
5784 | DRM_ERROR("Invalid PCC GPIO: %u!\n", gpio.shift); | ||
5785 | break; | ||
5786 | } | ||
5787 | WREG32_SMC(CNB_PWRMGT_CNTL, tmp); | ||
5788 | } | ||
5789 | |||
5204 | pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE; | 5790 | pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE; |
5205 | pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE; | 5791 | pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE; |
5206 | pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE; | 5792 | pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE; |
@@ -5262,6 +5848,8 @@ int ci_dpm_init(struct radeon_device *rdev) | |||
5262 | rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc = | 5848 | rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc = |
5263 | rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; | 5849 | rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; |
5264 | 5850 | ||
5851 | pi->fan_ctrl_is_in_default_mode = true; | ||
5852 | |||
5265 | return 0; | 5853 | return 0; |
5266 | } | 5854 | } |
5267 | 5855 | ||