diff options
author | Tom St Denis <tom.stdenis@amd.com> | 2016-09-06 09:45:43 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2016-09-12 18:12:21 -0400 |
commit | 77d318a6b9f2f8599d913b7b5013cdf0342c71ed (patch) | |
tree | 7c8027aa29a0e90571861880b3f1191187357365 | |
parent | c3d98645854485ca3c07b0e110ec6d61f77558fa (diff) |
drm/amd/amdgpu: Correct whitespace in SI DPM code
Replace 8 spaces with tabs, correct {} braces, etc.
Signed-off-by: Tom St Denis <tom.stdenis@amd.com>
Reviewed-by: Edward O'Callaghan <funfunctor@folklore1984.net>
Reviewed-by: Huang Rui <ray.huang@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/si_dpm.c | 441 |
1 files changed, 209 insertions, 232 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c index 8d623912891c..8e6bbaf380d0 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c | |||
@@ -84,11 +84,11 @@ union fan_info { | |||
84 | }; | 84 | }; |
85 | 85 | ||
86 | union pplib_clock_info { | 86 | union pplib_clock_info { |
87 | struct _ATOM_PPLIB_R600_CLOCK_INFO r600; | 87 | struct _ATOM_PPLIB_R600_CLOCK_INFO r600; |
88 | struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780; | 88 | struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780; |
89 | struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen; | 89 | struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen; |
90 | struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo; | 90 | struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo; |
91 | struct _ATOM_PPLIB_SI_CLOCK_INFO si; | 91 | struct _ATOM_PPLIB_SI_CLOCK_INFO si; |
92 | }; | 92 | }; |
93 | 93 | ||
94 | const u32 r600_utc[R600_PM_NUMBER_OF_TC] = | 94 | const u32 r600_utc[R600_PM_NUMBER_OF_TC] = |
@@ -1852,8 +1852,8 @@ extern u32 si_pciep_rreg(struct amdgpu_device *adev, u32 reg); | |||
1852 | 1852 | ||
1853 | static struct si_power_info *si_get_pi(struct amdgpu_device *adev) | 1853 | static struct si_power_info *si_get_pi(struct amdgpu_device *adev) |
1854 | { | 1854 | { |
1855 | struct si_power_info *pi = adev->pm.dpm.priv; | 1855 | struct si_power_info *pi = adev->pm.dpm.priv; |
1856 | return pi; | 1856 | return pi; |
1857 | } | 1857 | } |
1858 | 1858 | ||
1859 | static void si_calculate_leakage_for_v_and_t_formula(const struct ni_leakage_coeffients *coeff, | 1859 | static void si_calculate_leakage_for_v_and_t_formula(const struct ni_leakage_coeffients *coeff, |
@@ -1954,23 +1954,23 @@ static void si_update_dte_from_pl2(struct amdgpu_device *adev, | |||
1954 | 1954 | ||
1955 | struct rv7xx_power_info *rv770_get_pi(struct amdgpu_device *adev) | 1955 | struct rv7xx_power_info *rv770_get_pi(struct amdgpu_device *adev) |
1956 | { | 1956 | { |
1957 | struct rv7xx_power_info *pi = adev->pm.dpm.priv; | 1957 | struct rv7xx_power_info *pi = adev->pm.dpm.priv; |
1958 | 1958 | ||
1959 | return pi; | 1959 | return pi; |
1960 | } | 1960 | } |
1961 | 1961 | ||
1962 | struct ni_power_info *ni_get_pi(struct amdgpu_device *adev) | 1962 | struct ni_power_info *ni_get_pi(struct amdgpu_device *adev) |
1963 | { | 1963 | { |
1964 | struct ni_power_info *pi = adev->pm.dpm.priv; | 1964 | struct ni_power_info *pi = adev->pm.dpm.priv; |
1965 | 1965 | ||
1966 | return pi; | 1966 | return pi; |
1967 | } | 1967 | } |
1968 | 1968 | ||
1969 | struct si_ps *si_get_ps(struct amdgpu_ps *aps) | 1969 | struct si_ps *si_get_ps(struct amdgpu_ps *aps) |
1970 | { | 1970 | { |
1971 | struct si_ps *ps = aps->ps_priv; | 1971 | struct si_ps *ps = aps->ps_priv; |
1972 | 1972 | ||
1973 | return ps; | 1973 | return ps; |
1974 | } | 1974 | } |
1975 | 1975 | ||
1976 | static void si_initialize_powertune_defaults(struct amdgpu_device *adev) | 1976 | static void si_initialize_powertune_defaults(struct amdgpu_device *adev) |
@@ -2147,7 +2147,7 @@ static void si_initialize_powertune_defaults(struct amdgpu_device *adev) | |||
2147 | si_pi->enable_dte = false; | 2147 | si_pi->enable_dte = false; |
2148 | 2148 | ||
2149 | if (si_pi->powertune_data->enable_powertune_by_default) { | 2149 | if (si_pi->powertune_data->enable_powertune_by_default) { |
2150 | ni_pi->enable_power_containment= true; | 2150 | ni_pi->enable_power_containment = true; |
2151 | ni_pi->enable_cac = true; | 2151 | ni_pi->enable_cac = true; |
2152 | if (si_pi->dte_data.enable_dte_by_default) { | 2152 | if (si_pi->dte_data.enable_dte_by_default) { |
2153 | si_pi->enable_dte = true; | 2153 | si_pi->enable_dte = true; |
@@ -2426,13 +2426,12 @@ static int si_populate_power_containment_values(struct amdgpu_device *adev, | |||
2426 | 2426 | ||
2427 | if ((max_ps_percent == 0) || | 2427 | if ((max_ps_percent == 0) || |
2428 | (prev_sclk == max_sclk) || | 2428 | (prev_sclk == max_sclk) || |
2429 | disable_uvd_power_tune) { | 2429 | disable_uvd_power_tune) |
2430 | min_sclk = max_sclk; | 2430 | min_sclk = max_sclk; |
2431 | } else if (i == 1) { | 2431 | else if (i == 1) |
2432 | min_sclk = prev_sclk; | 2432 | min_sclk = prev_sclk; |
2433 | } else { | 2433 | else |
2434 | min_sclk = (prev_sclk * (u32)max_ps_percent) / 100; | 2434 | min_sclk = (prev_sclk * (u32)max_ps_percent) / 100; |
2435 | } | ||
2436 | 2435 | ||
2437 | if (min_sclk < state->performance_levels[0].sclk) | 2436 | if (min_sclk < state->performance_levels[0].sclk) |
2438 | min_sclk = state->performance_levels[0].sclk; | 2437 | min_sclk = state->performance_levels[0].sclk; |
@@ -2632,7 +2631,6 @@ static int si_get_cac_std_voltage_max_min(struct amdgpu_device *adev, | |||
2632 | u32 i; | 2631 | u32 i; |
2633 | u32 v0_loadline; | 2632 | u32 v0_loadline; |
2634 | 2633 | ||
2635 | |||
2636 | if (table == NULL) | 2634 | if (table == NULL) |
2637 | return -EINVAL; | 2635 | return -EINVAL; |
2638 | 2636 | ||
@@ -3079,14 +3077,14 @@ static int si_get_vce_clock_voltage(struct amdgpu_device *adev, | |||
3079 | static bool si_dpm_vblank_too_short(struct amdgpu_device *adev) | 3077 | static bool si_dpm_vblank_too_short(struct amdgpu_device *adev) |
3080 | { | 3078 | { |
3081 | 3079 | ||
3082 | u32 vblank_time = amdgpu_dpm_get_vblank_time(adev); | 3080 | u32 vblank_time = amdgpu_dpm_get_vblank_time(adev); |
3083 | /* we never hit the non-gddr5 limit so disable it */ | 3081 | /* we never hit the non-gddr5 limit so disable it */ |
3084 | u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 0; | 3082 | u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 0; |
3085 | 3083 | ||
3086 | if (vblank_time < switch_limit) | 3084 | if (vblank_time < switch_limit) |
3087 | return true; | 3085 | return true; |
3088 | else | 3086 | else |
3089 | return false; | 3087 | return false; |
3090 | 3088 | ||
3091 | } | 3089 | } |
3092 | 3090 | ||
@@ -3099,47 +3097,47 @@ static int ni_copy_and_switch_arb_sets(struct amdgpu_device *adev, | |||
3099 | u32 mc_cg_config; | 3097 | u32 mc_cg_config; |
3100 | 3098 | ||
3101 | switch (arb_freq_src) { | 3099 | switch (arb_freq_src) { |
3102 | case MC_CG_ARB_FREQ_F0: | 3100 | case MC_CG_ARB_FREQ_F0: |
3103 | mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING); | 3101 | mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING); |
3104 | mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2); | 3102 | mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2); |
3105 | burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE0_MASK) >> STATE0_SHIFT; | 3103 | burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE0_MASK) >> STATE0_SHIFT; |
3106 | break; | 3104 | break; |
3107 | case MC_CG_ARB_FREQ_F1: | 3105 | case MC_CG_ARB_FREQ_F1: |
3108 | mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_1); | 3106 | mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_1); |
3109 | mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_1); | 3107 | mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_1); |
3110 | burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE1_MASK) >> STATE1_SHIFT; | 3108 | burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE1_MASK) >> STATE1_SHIFT; |
3111 | break; | 3109 | break; |
3112 | case MC_CG_ARB_FREQ_F2: | 3110 | case MC_CG_ARB_FREQ_F2: |
3113 | mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_2); | 3111 | mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_2); |
3114 | mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_2); | 3112 | mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_2); |
3115 | burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE2_MASK) >> STATE2_SHIFT; | 3113 | burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE2_MASK) >> STATE2_SHIFT; |
3116 | break; | 3114 | break; |
3117 | case MC_CG_ARB_FREQ_F3: | 3115 | case MC_CG_ARB_FREQ_F3: |
3118 | mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_3); | 3116 | mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_3); |
3119 | mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_3); | 3117 | mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_3); |
3120 | burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE3_MASK) >> STATE3_SHIFT; | 3118 | burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE3_MASK) >> STATE3_SHIFT; |
3121 | break; | 3119 | break; |
3122 | default: | 3120 | default: |
3123 | return -EINVAL; | 3121 | return -EINVAL; |
3124 | } | 3122 | } |
3125 | 3123 | ||
3126 | switch (arb_freq_dest) { | 3124 | switch (arb_freq_dest) { |
3127 | case MC_CG_ARB_FREQ_F0: | 3125 | case MC_CG_ARB_FREQ_F0: |
3128 | WREG32(MC_ARB_DRAM_TIMING, mc_arb_dram_timing); | 3126 | WREG32(MC_ARB_DRAM_TIMING, mc_arb_dram_timing); |
3129 | WREG32(MC_ARB_DRAM_TIMING2, mc_arb_dram_timing2); | 3127 | WREG32(MC_ARB_DRAM_TIMING2, mc_arb_dram_timing2); |
3130 | WREG32_P(MC_ARB_BURST_TIME, STATE0(burst_time), ~STATE0_MASK); | 3128 | WREG32_P(MC_ARB_BURST_TIME, STATE0(burst_time), ~STATE0_MASK); |
3131 | break; | 3129 | break; |
3132 | case MC_CG_ARB_FREQ_F1: | 3130 | case MC_CG_ARB_FREQ_F1: |
3133 | WREG32(MC_ARB_DRAM_TIMING_1, mc_arb_dram_timing); | 3131 | WREG32(MC_ARB_DRAM_TIMING_1, mc_arb_dram_timing); |
3134 | WREG32(MC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2); | 3132 | WREG32(MC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2); |
3135 | WREG32_P(MC_ARB_BURST_TIME, STATE1(burst_time), ~STATE1_MASK); | 3133 | WREG32_P(MC_ARB_BURST_TIME, STATE1(burst_time), ~STATE1_MASK); |
3136 | break; | 3134 | break; |
3137 | case MC_CG_ARB_FREQ_F2: | 3135 | case MC_CG_ARB_FREQ_F2: |
3138 | WREG32(MC_ARB_DRAM_TIMING_2, mc_arb_dram_timing); | 3136 | WREG32(MC_ARB_DRAM_TIMING_2, mc_arb_dram_timing); |
3139 | WREG32(MC_ARB_DRAM_TIMING2_2, mc_arb_dram_timing2); | 3137 | WREG32(MC_ARB_DRAM_TIMING2_2, mc_arb_dram_timing2); |
3140 | WREG32_P(MC_ARB_BURST_TIME, STATE2(burst_time), ~STATE2_MASK); | 3138 | WREG32_P(MC_ARB_BURST_TIME, STATE2(burst_time), ~STATE2_MASK); |
3141 | break; | 3139 | break; |
3142 | case MC_CG_ARB_FREQ_F3: | 3140 | case MC_CG_ARB_FREQ_F3: |
3143 | WREG32(MC_ARB_DRAM_TIMING_3, mc_arb_dram_timing); | 3141 | WREG32(MC_ARB_DRAM_TIMING_3, mc_arb_dram_timing); |
3144 | WREG32(MC_ARB_DRAM_TIMING2_3, mc_arb_dram_timing2); | 3142 | WREG32(MC_ARB_DRAM_TIMING2_3, mc_arb_dram_timing2); |
3145 | WREG32_P(MC_ARB_BURST_TIME, STATE3(burst_time), ~STATE3_MASK); | 3143 | WREG32_P(MC_ARB_BURST_TIME, STATE3(burst_time), ~STATE3_MASK); |
@@ -3158,9 +3156,9 @@ static int ni_copy_and_switch_arb_sets(struct amdgpu_device *adev, | |||
3158 | static void ni_update_current_ps(struct amdgpu_device *adev, | 3156 | static void ni_update_current_ps(struct amdgpu_device *adev, |
3159 | struct amdgpu_ps *rps) | 3157 | struct amdgpu_ps *rps) |
3160 | { | 3158 | { |
3161 | struct si_ps *new_ps = si_get_ps(rps); | 3159 | struct si_ps *new_ps = si_get_ps(rps); |
3162 | struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); | 3160 | struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); |
3163 | struct ni_power_info *ni_pi = ni_get_pi(adev); | 3161 | struct ni_power_info *ni_pi = ni_get_pi(adev); |
3164 | 3162 | ||
3165 | eg_pi->current_rps = *rps; | 3163 | eg_pi->current_rps = *rps; |
3166 | ni_pi->current_ps = *new_ps; | 3164 | ni_pi->current_ps = *new_ps; |
@@ -3170,9 +3168,9 @@ static void ni_update_current_ps(struct amdgpu_device *adev, | |||
3170 | static void ni_update_requested_ps(struct amdgpu_device *adev, | 3168 | static void ni_update_requested_ps(struct amdgpu_device *adev, |
3171 | struct amdgpu_ps *rps) | 3169 | struct amdgpu_ps *rps) |
3172 | { | 3170 | { |
3173 | struct si_ps *new_ps = si_get_ps(rps); | 3171 | struct si_ps *new_ps = si_get_ps(rps); |
3174 | struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); | 3172 | struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); |
3175 | struct ni_power_info *ni_pi = ni_get_pi(adev); | 3173 | struct ni_power_info *ni_pi = ni_get_pi(adev); |
3176 | 3174 | ||
3177 | eg_pi->requested_rps = *rps; | 3175 | eg_pi->requested_rps = *rps; |
3178 | ni_pi->requested_ps = *new_ps; | 3176 | ni_pi->requested_ps = *new_ps; |
@@ -3183,8 +3181,8 @@ static void ni_set_uvd_clock_before_set_eng_clock(struct amdgpu_device *adev, | |||
3183 | struct amdgpu_ps *new_ps, | 3181 | struct amdgpu_ps *new_ps, |
3184 | struct amdgpu_ps *old_ps) | 3182 | struct amdgpu_ps *old_ps) |
3185 | { | 3183 | { |
3186 | struct si_ps *new_state = si_get_ps(new_ps); | 3184 | struct si_ps *new_state = si_get_ps(new_ps); |
3187 | struct si_ps *current_state = si_get_ps(old_ps); | 3185 | struct si_ps *current_state = si_get_ps(old_ps); |
3188 | 3186 | ||
3189 | if ((new_ps->vclk == old_ps->vclk) && | 3187 | if ((new_ps->vclk == old_ps->vclk) && |
3190 | (new_ps->dclk == old_ps->dclk)) | 3188 | (new_ps->dclk == old_ps->dclk)) |
@@ -3201,8 +3199,8 @@ static void ni_set_uvd_clock_after_set_eng_clock(struct amdgpu_device *adev, | |||
3201 | struct amdgpu_ps *new_ps, | 3199 | struct amdgpu_ps *new_ps, |
3202 | struct amdgpu_ps *old_ps) | 3200 | struct amdgpu_ps *old_ps) |
3203 | { | 3201 | { |
3204 | struct si_ps *new_state = si_get_ps(new_ps); | 3202 | struct si_ps *new_state = si_get_ps(new_ps); |
3205 | struct si_ps *current_state = si_get_ps(old_ps); | 3203 | struct si_ps *current_state = si_get_ps(old_ps); |
3206 | 3204 | ||
3207 | if ((new_ps->vclk == old_ps->vclk) && | 3205 | if ((new_ps->vclk == old_ps->vclk) && |
3208 | (new_ps->dclk == old_ps->dclk)) | 3206 | (new_ps->dclk == old_ps->dclk)) |
@@ -3217,134 +3215,133 @@ static void ni_set_uvd_clock_after_set_eng_clock(struct amdgpu_device *adev, | |||
3217 | 3215 | ||
3218 | static u16 btc_find_voltage(struct atom_voltage_table *table, u16 voltage) | 3216 | static u16 btc_find_voltage(struct atom_voltage_table *table, u16 voltage) |
3219 | { | 3217 | { |
3220 | unsigned int i; | 3218 | unsigned int i; |
3221 | 3219 | ||
3222 | for (i = 0; i < table->count; i++) { | 3220 | for (i = 0; i < table->count; i++) |
3223 | if (voltage <= table->entries[i].value) | 3221 | if (voltage <= table->entries[i].value) |
3224 | return table->entries[i].value; | 3222 | return table->entries[i].value; |
3225 | } | ||
3226 | 3223 | ||
3227 | return table->entries[table->count - 1].value; | 3224 | return table->entries[table->count - 1].value; |
3228 | } | 3225 | } |
3229 | 3226 | ||
3230 | static u32 btc_find_valid_clock(struct amdgpu_clock_array *clocks, | 3227 | static u32 btc_find_valid_clock(struct amdgpu_clock_array *clocks, |
3231 | u32 max_clock, u32 requested_clock) | 3228 | u32 max_clock, u32 requested_clock) |
3232 | { | 3229 | { |
3233 | unsigned int i; | 3230 | unsigned int i; |
3234 | 3231 | ||
3235 | if ((clocks == NULL) || (clocks->count == 0)) | 3232 | if ((clocks == NULL) || (clocks->count == 0)) |
3236 | return (requested_clock < max_clock) ? requested_clock : max_clock; | 3233 | return (requested_clock < max_clock) ? requested_clock : max_clock; |
3237 | 3234 | ||
3238 | for (i = 0; i < clocks->count; i++) { | 3235 | for (i = 0; i < clocks->count; i++) { |
3239 | if (clocks->values[i] >= requested_clock) | 3236 | if (clocks->values[i] >= requested_clock) |
3240 | return (clocks->values[i] < max_clock) ? clocks->values[i] : max_clock; | 3237 | return (clocks->values[i] < max_clock) ? clocks->values[i] : max_clock; |
3241 | } | 3238 | } |
3242 | 3239 | ||
3243 | return (clocks->values[clocks->count - 1] < max_clock) ? | 3240 | return (clocks->values[clocks->count - 1] < max_clock) ? |
3244 | clocks->values[clocks->count - 1] : max_clock; | 3241 | clocks->values[clocks->count - 1] : max_clock; |
3245 | } | 3242 | } |
3246 | 3243 | ||
3247 | static u32 btc_get_valid_mclk(struct amdgpu_device *adev, | 3244 | static u32 btc_get_valid_mclk(struct amdgpu_device *adev, |
3248 | u32 max_mclk, u32 requested_mclk) | 3245 | u32 max_mclk, u32 requested_mclk) |
3249 | { | 3246 | { |
3250 | return btc_find_valid_clock(&adev->pm.dpm.dyn_state.valid_mclk_values, | 3247 | return btc_find_valid_clock(&adev->pm.dpm.dyn_state.valid_mclk_values, |
3251 | max_mclk, requested_mclk); | 3248 | max_mclk, requested_mclk); |
3252 | } | 3249 | } |
3253 | 3250 | ||
3254 | static u32 btc_get_valid_sclk(struct amdgpu_device *adev, | 3251 | static u32 btc_get_valid_sclk(struct amdgpu_device *adev, |
3255 | u32 max_sclk, u32 requested_sclk) | 3252 | u32 max_sclk, u32 requested_sclk) |
3256 | { | 3253 | { |
3257 | return btc_find_valid_clock(&adev->pm.dpm.dyn_state.valid_sclk_values, | 3254 | return btc_find_valid_clock(&adev->pm.dpm.dyn_state.valid_sclk_values, |
3258 | max_sclk, requested_sclk); | 3255 | max_sclk, requested_sclk); |
3259 | } | 3256 | } |
3260 | 3257 | ||
3261 | void btc_get_max_clock_from_voltage_dependency_table(struct amdgpu_clock_voltage_dependency_table *table, | 3258 | void btc_get_max_clock_from_voltage_dependency_table(struct amdgpu_clock_voltage_dependency_table *table, |
3262 | u32 *max_clock) | 3259 | u32 *max_clock) |
3263 | { | 3260 | { |
3264 | u32 i, clock = 0; | 3261 | u32 i, clock = 0; |
3265 | 3262 | ||
3266 | if ((table == NULL) || (table->count == 0)) { | 3263 | if ((table == NULL) || (table->count == 0)) { |
3267 | *max_clock = clock; | 3264 | *max_clock = clock; |
3268 | return; | 3265 | return; |
3269 | } | 3266 | } |
3270 | 3267 | ||
3271 | for (i = 0; i < table->count; i++) { | 3268 | for (i = 0; i < table->count; i++) { |
3272 | if (clock < table->entries[i].clk) | 3269 | if (clock < table->entries[i].clk) |
3273 | clock = table->entries[i].clk; | 3270 | clock = table->entries[i].clk; |
3274 | } | 3271 | } |
3275 | *max_clock = clock; | 3272 | *max_clock = clock; |
3276 | } | 3273 | } |
3277 | 3274 | ||
3278 | static void btc_apply_voltage_dependency_rules(struct amdgpu_clock_voltage_dependency_table *table, | 3275 | static void btc_apply_voltage_dependency_rules(struct amdgpu_clock_voltage_dependency_table *table, |
3279 | u32 clock, u16 max_voltage, u16 *voltage) | 3276 | u32 clock, u16 max_voltage, u16 *voltage) |
3280 | { | 3277 | { |
3281 | u32 i; | 3278 | u32 i; |
3282 | 3279 | ||
3283 | if ((table == NULL) || (table->count == 0)) | 3280 | if ((table == NULL) || (table->count == 0)) |
3284 | return; | 3281 | return; |
3285 | 3282 | ||
3286 | for (i= 0; i < table->count; i++) { | 3283 | for (i= 0; i < table->count; i++) { |
3287 | if (clock <= table->entries[i].clk) { | 3284 | if (clock <= table->entries[i].clk) { |
3288 | if (*voltage < table->entries[i].v) | 3285 | if (*voltage < table->entries[i].v) |
3289 | *voltage = (u16)((table->entries[i].v < max_voltage) ? | 3286 | *voltage = (u16)((table->entries[i].v < max_voltage) ? |
3290 | table->entries[i].v : max_voltage); | 3287 | table->entries[i].v : max_voltage); |
3291 | return; | 3288 | return; |
3292 | } | 3289 | } |
3293 | } | 3290 | } |
3294 | 3291 | ||
3295 | *voltage = (*voltage > max_voltage) ? *voltage : max_voltage; | 3292 | *voltage = (*voltage > max_voltage) ? *voltage : max_voltage; |
3296 | } | 3293 | } |
3297 | 3294 | ||
3298 | static void btc_adjust_clock_combinations(struct amdgpu_device *adev, | 3295 | static void btc_adjust_clock_combinations(struct amdgpu_device *adev, |
3299 | const struct amdgpu_clock_and_voltage_limits *max_limits, | 3296 | const struct amdgpu_clock_and_voltage_limits *max_limits, |
3300 | struct rv7xx_pl *pl) | 3297 | struct rv7xx_pl *pl) |
3301 | { | 3298 | { |
3302 | 3299 | ||
3303 | if ((pl->mclk == 0) || (pl->sclk == 0)) | 3300 | if ((pl->mclk == 0) || (pl->sclk == 0)) |
3304 | return; | 3301 | return; |
3305 | 3302 | ||
3306 | if (pl->mclk == pl->sclk) | 3303 | if (pl->mclk == pl->sclk) |
3307 | return; | 3304 | return; |
3308 | 3305 | ||
3309 | if (pl->mclk > pl->sclk) { | 3306 | if (pl->mclk > pl->sclk) { |
3310 | if (((pl->mclk + (pl->sclk - 1)) / pl->sclk) > adev->pm.dpm.dyn_state.mclk_sclk_ratio) | 3307 | if (((pl->mclk + (pl->sclk - 1)) / pl->sclk) > adev->pm.dpm.dyn_state.mclk_sclk_ratio) |
3311 | pl->sclk = btc_get_valid_sclk(adev, | 3308 | pl->sclk = btc_get_valid_sclk(adev, |
3312 | max_limits->sclk, | 3309 | max_limits->sclk, |
3313 | (pl->mclk + | 3310 | (pl->mclk + |
3314 | (adev->pm.dpm.dyn_state.mclk_sclk_ratio - 1)) / | 3311 | (adev->pm.dpm.dyn_state.mclk_sclk_ratio - 1)) / |
3315 | adev->pm.dpm.dyn_state.mclk_sclk_ratio); | 3312 | adev->pm.dpm.dyn_state.mclk_sclk_ratio); |
3316 | } else { | 3313 | } else { |
3317 | if ((pl->sclk - pl->mclk) > adev->pm.dpm.dyn_state.sclk_mclk_delta) | 3314 | if ((pl->sclk - pl->mclk) > adev->pm.dpm.dyn_state.sclk_mclk_delta) |
3318 | pl->mclk = btc_get_valid_mclk(adev, | 3315 | pl->mclk = btc_get_valid_mclk(adev, |
3319 | max_limits->mclk, | 3316 | max_limits->mclk, |
3320 | pl->sclk - | 3317 | pl->sclk - |
3321 | adev->pm.dpm.dyn_state.sclk_mclk_delta); | 3318 | adev->pm.dpm.dyn_state.sclk_mclk_delta); |
3322 | } | 3319 | } |
3323 | } | 3320 | } |
3324 | 3321 | ||
3325 | static void btc_apply_voltage_delta_rules(struct amdgpu_device *adev, | 3322 | static void btc_apply_voltage_delta_rules(struct amdgpu_device *adev, |
3326 | u16 max_vddc, u16 max_vddci, | 3323 | u16 max_vddc, u16 max_vddci, |
3327 | u16 *vddc, u16 *vddci) | 3324 | u16 *vddc, u16 *vddci) |
3328 | { | 3325 | { |
3329 | struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); | 3326 | struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); |
3330 | u16 new_voltage; | 3327 | u16 new_voltage; |
3331 | 3328 | ||
3332 | if ((0 == *vddc) || (0 == *vddci)) | 3329 | if ((0 == *vddc) || (0 == *vddci)) |
3333 | return; | 3330 | return; |
3334 | 3331 | ||
3335 | if (*vddc > *vddci) { | 3332 | if (*vddc > *vddci) { |
3336 | if ((*vddc - *vddci) > adev->pm.dpm.dyn_state.vddc_vddci_delta) { | 3333 | if ((*vddc - *vddci) > adev->pm.dpm.dyn_state.vddc_vddci_delta) { |
3337 | new_voltage = btc_find_voltage(&eg_pi->vddci_voltage_table, | 3334 | new_voltage = btc_find_voltage(&eg_pi->vddci_voltage_table, |
3338 | (*vddc - adev->pm.dpm.dyn_state.vddc_vddci_delta)); | 3335 | (*vddc - adev->pm.dpm.dyn_state.vddc_vddci_delta)); |
3339 | *vddci = (new_voltage < max_vddci) ? new_voltage : max_vddci; | 3336 | *vddci = (new_voltage < max_vddci) ? new_voltage : max_vddci; |
3340 | } | 3337 | } |
3341 | } else { | 3338 | } else { |
3342 | if ((*vddci - *vddc) > adev->pm.dpm.dyn_state.vddc_vddci_delta) { | 3339 | if ((*vddci - *vddc) > adev->pm.dpm.dyn_state.vddc_vddci_delta) { |
3343 | new_voltage = btc_find_voltage(&eg_pi->vddc_voltage_table, | 3340 | new_voltage = btc_find_voltage(&eg_pi->vddc_voltage_table, |
3344 | (*vddci - adev->pm.dpm.dyn_state.vddc_vddci_delta)); | 3341 | (*vddci - adev->pm.dpm.dyn_state.vddc_vddci_delta)); |
3345 | *vddc = (new_voltage < max_vddc) ? new_voltage : max_vddc; | 3342 | *vddc = (new_voltage < max_vddc) ? new_voltage : max_vddc; |
3346 | } | 3343 | } |
3347 | } | 3344 | } |
3348 | } | 3345 | } |
3349 | 3346 | ||
3350 | static enum amdgpu_pcie_gen r600_get_pcie_gen_support(struct amdgpu_device *adev, | 3347 | static enum amdgpu_pcie_gen r600_get_pcie_gen_support(struct amdgpu_device *adev, |
@@ -3626,9 +3623,9 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev, | |||
3626 | } | 3623 | } |
3627 | } | 3624 | } |
3628 | 3625 | ||
3629 | for (i = 0; i < ps->performance_level_count; i++) | 3626 | for (i = 0; i < ps->performance_level_count; i++) |
3630 | btc_adjust_clock_combinations(adev, max_limits, | 3627 | btc_adjust_clock_combinations(adev, max_limits, |
3631 | &ps->performance_levels[i]); | 3628 | &ps->performance_levels[i]); |
3632 | 3629 | ||
3633 | for (i = 0; i < ps->performance_level_count; i++) { | 3630 | for (i = 0; i < ps->performance_level_count; i++) { |
3634 | if (ps->performance_levels[i].vddc < min_vce_voltage) | 3631 | if (ps->performance_levels[i].vddc < min_vce_voltage) |
@@ -3767,7 +3764,7 @@ static void si_set_dpm_event_sources(struct amdgpu_device *adev, u32 sources) | |||
3767 | case 0: | 3764 | case 0: |
3768 | default: | 3765 | default: |
3769 | want_thermal_protection = false; | 3766 | want_thermal_protection = false; |
3770 | break; | 3767 | break; |
3771 | case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL): | 3768 | case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL): |
3772 | want_thermal_protection = true; | 3769 | want_thermal_protection = true; |
3773 | dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGITAL; | 3770 | dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGITAL; |
@@ -3969,7 +3966,7 @@ static int si_process_firmware_header(struct amdgpu_device *adev) | |||
3969 | if (ret) | 3966 | if (ret) |
3970 | return ret; | 3967 | return ret; |
3971 | 3968 | ||
3972 | si_pi->state_table_start = tmp; | 3969 | si_pi->state_table_start = tmp; |
3973 | 3970 | ||
3974 | ret = si_read_smc_sram_dword(adev, | 3971 | ret = si_read_smc_sram_dword(adev, |
3975 | SISLANDS_SMC_FIRMWARE_HEADER_LOCATION + | 3972 | SISLANDS_SMC_FIRMWARE_HEADER_LOCATION + |
@@ -4128,7 +4125,7 @@ static void si_program_response_times(struct amdgpu_device *adev) | |||
4128 | si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_mvdd_chg_time, 1); | 4125 | si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_mvdd_chg_time, 1); |
4129 | 4126 | ||
4130 | voltage_response_time = (u32)adev->pm.dpm.voltage_response_time; | 4127 | voltage_response_time = (u32)adev->pm.dpm.voltage_response_time; |
4131 | backbias_response_time = (u32)adev->pm.dpm.backbias_response_time; | 4128 | backbias_response_time = (u32)adev->pm.dpm.backbias_response_time; |
4132 | 4129 | ||
4133 | if (voltage_response_time == 0) | 4130 | if (voltage_response_time == 0) |
4134 | voltage_response_time = 1000; | 4131 | voltage_response_time = 1000; |
@@ -4367,14 +4364,11 @@ static u8 si_get_strobe_mode_settings(struct amdgpu_device *adev, u32 mclk) | |||
4367 | static int si_upload_firmware(struct amdgpu_device *adev) | 4364 | static int si_upload_firmware(struct amdgpu_device *adev) |
4368 | { | 4365 | { |
4369 | struct si_power_info *si_pi = si_get_pi(adev); | 4366 | struct si_power_info *si_pi = si_get_pi(adev); |
4370 | int ret; | ||
4371 | 4367 | ||
4372 | si_reset_smc(adev); | 4368 | si_reset_smc(adev); |
4373 | si_stop_smc_clock(adev); | 4369 | si_stop_smc_clock(adev); |
4374 | 4370 | ||
4375 | ret = si_load_smc_ucode(adev, si_pi->sram_end); | 4371 | return si_load_smc_ucode(adev, si_pi->sram_end); |
4376 | |||
4377 | return ret; | ||
4378 | } | 4372 | } |
4379 | 4373 | ||
4380 | static bool si_validate_phase_shedding_tables(struct amdgpu_device *adev, | 4374 | static bool si_validate_phase_shedding_tables(struct amdgpu_device *adev, |
@@ -4790,7 +4784,7 @@ static int si_populate_memory_timing_parameters(struct amdgpu_device *adev, | |||
4790 | 4784 | ||
4791 | amdgpu_atombios_set_engine_dram_timings(adev, | 4785 | amdgpu_atombios_set_engine_dram_timings(adev, |
4792 | pl->sclk, | 4786 | pl->sclk, |
4793 | pl->mclk); | 4787 | pl->mclk); |
4794 | 4788 | ||
4795 | dram_timing = RREG32(MC_ARB_DRAM_TIMING); | 4789 | dram_timing = RREG32(MC_ARB_DRAM_TIMING); |
4796 | dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2); | 4790 | dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2); |
@@ -4825,7 +4819,7 @@ static int si_do_program_memory_timing_parameters(struct amdgpu_device *adev, | |||
4825 | si_pi->sram_end); | 4819 | si_pi->sram_end); |
4826 | if (ret) | 4820 | if (ret) |
4827 | break; | 4821 | break; |
4828 | } | 4822 | } |
4829 | 4823 | ||
4830 | return ret; | 4824 | return ret; |
4831 | } | 4825 | } |
@@ -4938,9 +4932,7 @@ static int si_populate_smc_initial_state(struct amdgpu_device *adev, | |||
4938 | 4932 | ||
4939 | reg = CG_R(0xffff) | CG_L(0); | 4933 | reg = CG_R(0xffff) | CG_L(0); |
4940 | table->initialState.levels[0].aT = cpu_to_be32(reg); | 4934 | table->initialState.levels[0].aT = cpu_to_be32(reg); |
4941 | |||
4942 | table->initialState.levels[0].bSP = cpu_to_be32(pi->dsp); | 4935 | table->initialState.levels[0].bSP = cpu_to_be32(pi->dsp); |
4943 | |||
4944 | table->initialState.levels[0].gen2PCIE = (u8)si_pi->boot_pcie_gen; | 4936 | table->initialState.levels[0].gen2PCIE = (u8)si_pi->boot_pcie_gen; |
4945 | 4937 | ||
4946 | if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) { | 4938 | if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) { |
@@ -5302,9 +5294,9 @@ static int si_calculate_sclk_params(struct amdgpu_device *adev, | |||
5302 | spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK; | 5294 | spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK; |
5303 | spll_func_cntl_2 |= SCLK_MUX_SEL(2); | 5295 | spll_func_cntl_2 |= SCLK_MUX_SEL(2); |
5304 | 5296 | ||
5305 | spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK; | 5297 | spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK; |
5306 | spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv); | 5298 | spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv); |
5307 | spll_func_cntl_3 |= SPLL_DITHEN; | 5299 | spll_func_cntl_3 |= SPLL_DITHEN; |
5308 | 5300 | ||
5309 | if (pi->sclk_ss) { | 5301 | if (pi->sclk_ss) { |
5310 | struct amdgpu_atom_ss ss; | 5302 | struct amdgpu_atom_ss ss; |
@@ -5411,15 +5403,15 @@ static int si_populate_mclk_value(struct amdgpu_device *adev, | |||
5411 | tmp = freq_nom / reference_clock; | 5403 | tmp = freq_nom / reference_clock; |
5412 | tmp = tmp * tmp; | 5404 | tmp = tmp * tmp; |
5413 | if (amdgpu_atombios_get_asic_ss_info(adev, &ss, | 5405 | if (amdgpu_atombios_get_asic_ss_info(adev, &ss, |
5414 | ASIC_INTERNAL_MEMORY_SS, freq_nom)) { | 5406 | ASIC_INTERNAL_MEMORY_SS, freq_nom)) { |
5415 | u32 clks = reference_clock * 5 / ss.rate; | 5407 | u32 clks = reference_clock * 5 / ss.rate; |
5416 | u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom); | 5408 | u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom); |
5417 | 5409 | ||
5418 | mpll_ss1 &= ~CLKV_MASK; | 5410 | mpll_ss1 &= ~CLKV_MASK; |
5419 | mpll_ss1 |= CLKV(clkv); | 5411 | mpll_ss1 |= CLKV(clkv); |
5420 | 5412 | ||
5421 | mpll_ss2 &= ~CLKS_MASK; | 5413 | mpll_ss2 &= ~CLKS_MASK; |
5422 | mpll_ss2 |= CLKS(clks); | 5414 | mpll_ss2 |= CLKS(clks); |
5423 | } | 5415 | } |
5424 | } | 5416 | } |
5425 | 5417 | ||
@@ -5746,7 +5738,7 @@ static int si_convert_power_state_to_smc(struct amdgpu_device *adev, | |||
5746 | ni_pi->enable_power_containment = false; | 5738 | ni_pi->enable_power_containment = false; |
5747 | 5739 | ||
5748 | ret = si_populate_sq_ramping_values(adev, amdgpu_state, smc_state); | 5740 | ret = si_populate_sq_ramping_values(adev, amdgpu_state, smc_state); |
5749 | if (ret) | 5741 | if (ret) |
5750 | ni_pi->enable_sq_ramping = false; | 5742 | ni_pi->enable_sq_ramping = false; |
5751 | 5743 | ||
5752 | return si_populate_smc_t(adev, amdgpu_state, smc_state); | 5744 | return si_populate_smc_t(adev, amdgpu_state, smc_state); |
@@ -5771,10 +5763,8 @@ static int si_upload_sw_state(struct amdgpu_device *adev, | |||
5771 | if (ret) | 5763 | if (ret) |
5772 | return ret; | 5764 | return ret; |
5773 | 5765 | ||
5774 | ret = si_copy_bytes_to_smc(adev, address, (u8 *)smc_state, | 5766 | return si_copy_bytes_to_smc(adev, address, (u8 *)smc_state, |
5775 | state_size, si_pi->sram_end); | 5767 | state_size, si_pi->sram_end); |
5776 | |||
5777 | return ret; | ||
5778 | } | 5768 | } |
5779 | 5769 | ||
5780 | static int si_upload_ulv_state(struct amdgpu_device *adev) | 5770 | static int si_upload_ulv_state(struct amdgpu_device *adev) |
@@ -5915,46 +5905,46 @@ static bool si_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg) | |||
5915 | case MC_SEQ_RAS_TIMING: | 5905 | case MC_SEQ_RAS_TIMING: |
5916 | *out_reg = MC_SEQ_RAS_TIMING_LP; | 5906 | *out_reg = MC_SEQ_RAS_TIMING_LP; |
5917 | break; | 5907 | break; |
5918 | case MC_SEQ_CAS_TIMING: | 5908 | case MC_SEQ_CAS_TIMING: |
5919 | *out_reg = MC_SEQ_CAS_TIMING_LP; | 5909 | *out_reg = MC_SEQ_CAS_TIMING_LP; |
5920 | break; | 5910 | break; |
5921 | case MC_SEQ_MISC_TIMING: | 5911 | case MC_SEQ_MISC_TIMING: |
5922 | *out_reg = MC_SEQ_MISC_TIMING_LP; | 5912 | *out_reg = MC_SEQ_MISC_TIMING_LP; |
5923 | break; | 5913 | break; |
5924 | case MC_SEQ_MISC_TIMING2: | 5914 | case MC_SEQ_MISC_TIMING2: |
5925 | *out_reg = MC_SEQ_MISC_TIMING2_LP; | 5915 | *out_reg = MC_SEQ_MISC_TIMING2_LP; |
5926 | break; | 5916 | break; |
5927 | case MC_SEQ_RD_CTL_D0: | 5917 | case MC_SEQ_RD_CTL_D0: |
5928 | *out_reg = MC_SEQ_RD_CTL_D0_LP; | 5918 | *out_reg = MC_SEQ_RD_CTL_D0_LP; |
5929 | break; | 5919 | break; |
5930 | case MC_SEQ_RD_CTL_D1: | 5920 | case MC_SEQ_RD_CTL_D1: |
5931 | *out_reg = MC_SEQ_RD_CTL_D1_LP; | 5921 | *out_reg = MC_SEQ_RD_CTL_D1_LP; |
5932 | break; | 5922 | break; |
5933 | case MC_SEQ_WR_CTL_D0: | 5923 | case MC_SEQ_WR_CTL_D0: |
5934 | *out_reg = MC_SEQ_WR_CTL_D0_LP; | 5924 | *out_reg = MC_SEQ_WR_CTL_D0_LP; |
5935 | break; | 5925 | break; |
5936 | case MC_SEQ_WR_CTL_D1: | 5926 | case MC_SEQ_WR_CTL_D1: |
5937 | *out_reg = MC_SEQ_WR_CTL_D1_LP; | 5927 | *out_reg = MC_SEQ_WR_CTL_D1_LP; |
5938 | break; | 5928 | break; |
5939 | case MC_PMG_CMD_EMRS: | 5929 | case MC_PMG_CMD_EMRS: |
5940 | *out_reg = MC_SEQ_PMG_CMD_EMRS_LP; | 5930 | *out_reg = MC_SEQ_PMG_CMD_EMRS_LP; |
5941 | break; | 5931 | break; |
5942 | case MC_PMG_CMD_MRS: | 5932 | case MC_PMG_CMD_MRS: |
5943 | *out_reg = MC_SEQ_PMG_CMD_MRS_LP; | 5933 | *out_reg = MC_SEQ_PMG_CMD_MRS_LP; |
5944 | break; | 5934 | break; |
5945 | case MC_PMG_CMD_MRS1: | 5935 | case MC_PMG_CMD_MRS1: |
5946 | *out_reg = MC_SEQ_PMG_CMD_MRS1_LP; | 5936 | *out_reg = MC_SEQ_PMG_CMD_MRS1_LP; |
5947 | break; | 5937 | break; |
5948 | case MC_SEQ_PMG_TIMING: | 5938 | case MC_SEQ_PMG_TIMING: |
5949 | *out_reg = MC_SEQ_PMG_TIMING_LP; | 5939 | *out_reg = MC_SEQ_PMG_TIMING_LP; |
5950 | break; | 5940 | break; |
5951 | case MC_PMG_CMD_MRS2: | 5941 | case MC_PMG_CMD_MRS2: |
5952 | *out_reg = MC_SEQ_PMG_CMD_MRS2_LP; | 5942 | *out_reg = MC_SEQ_PMG_CMD_MRS2_LP; |
5953 | break; | 5943 | break; |
5954 | case MC_SEQ_WR_CTL_2: | 5944 | case MC_SEQ_WR_CTL_2: |
5955 | *out_reg = MC_SEQ_WR_CTL_2_LP; | 5945 | *out_reg = MC_SEQ_WR_CTL_2_LP; |
5956 | break; | 5946 | break; |
5957 | default: | 5947 | default: |
5958 | result = false; | 5948 | result = false; |
5959 | break; | 5949 | break; |
5960 | } | 5950 | } |
@@ -6041,19 +6031,19 @@ static int si_initialize_mc_reg_table(struct amdgpu_device *adev) | |||
6041 | WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2)); | 6031 | WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2)); |
6042 | WREG32(MC_SEQ_WR_CTL_2_LP, RREG32(MC_SEQ_WR_CTL_2)); | 6032 | WREG32(MC_SEQ_WR_CTL_2_LP, RREG32(MC_SEQ_WR_CTL_2)); |
6043 | 6033 | ||
6044 | ret = amdgpu_atombios_init_mc_reg_table(adev, module_index, table); | 6034 | ret = amdgpu_atombios_init_mc_reg_table(adev, module_index, table); |
6045 | if (ret) | 6035 | if (ret) |
6046 | goto init_mc_done; | 6036 | goto init_mc_done; |
6047 | 6037 | ||
6048 | ret = si_copy_vbios_mc_reg_table(table, si_table); | 6038 | ret = si_copy_vbios_mc_reg_table(table, si_table); |
6049 | if (ret) | 6039 | if (ret) |
6050 | goto init_mc_done; | 6040 | goto init_mc_done; |
6051 | 6041 | ||
6052 | si_set_s0_mc_reg_index(si_table); | 6042 | si_set_s0_mc_reg_index(si_table); |
6053 | 6043 | ||
6054 | ret = si_set_mc_special_registers(adev, si_table); | 6044 | ret = si_set_mc_special_registers(adev, si_table); |
6055 | if (ret) | 6045 | if (ret) |
6056 | goto init_mc_done; | 6046 | goto init_mc_done; |
6057 | 6047 | ||
6058 | si_set_valid_flag(si_table); | 6048 | si_set_valid_flag(si_table); |
6059 | 6049 | ||
@@ -6122,7 +6112,7 @@ static void si_convert_mc_reg_table_to_smc(struct amdgpu_device *adev, | |||
6122 | struct amdgpu_ps *amdgpu_state, | 6112 | struct amdgpu_ps *amdgpu_state, |
6123 | SMC_SIslands_MCRegisters *mc_reg_table) | 6113 | SMC_SIslands_MCRegisters *mc_reg_table) |
6124 | { | 6114 | { |
6125 | struct si_ps *state = si_get_ps(amdgpu_state); | 6115 | struct si_ps *state = si_get_ps(amdgpu_state); |
6126 | int i; | 6116 | int i; |
6127 | 6117 | ||
6128 | for (i = 0; i < state->performance_level_count; i++) { | 6118 | for (i = 0; i < state->performance_level_count; i++) { |
@@ -6173,7 +6163,7 @@ static int si_populate_mc_reg_table(struct amdgpu_device *adev, | |||
6173 | static int si_upload_mc_reg_table(struct amdgpu_device *adev, | 6163 | static int si_upload_mc_reg_table(struct amdgpu_device *adev, |
6174 | struct amdgpu_ps *amdgpu_new_state) | 6164 | struct amdgpu_ps *amdgpu_new_state) |
6175 | { | 6165 | { |
6176 | struct si_ps *new_state = si_get_ps(amdgpu_new_state); | 6166 | struct si_ps *new_state = si_get_ps(amdgpu_new_state); |
6177 | struct si_power_info *si_pi = si_get_pi(adev); | 6167 | struct si_power_info *si_pi = si_get_pi(adev); |
6178 | u32 address = si_pi->mc_reg_table_start + | 6168 | u32 address = si_pi->mc_reg_table_start + |
6179 | offsetof(SMC_SIslands_MCRegisters, | 6169 | offsetof(SMC_SIslands_MCRegisters, |
@@ -6184,26 +6174,24 @@ static int si_upload_mc_reg_table(struct amdgpu_device *adev, | |||
6184 | 6174 | ||
6185 | si_convert_mc_reg_table_to_smc(adev, amdgpu_new_state, smc_mc_reg_table); | 6175 | si_convert_mc_reg_table_to_smc(adev, amdgpu_new_state, smc_mc_reg_table); |
6186 | 6176 | ||
6187 | |||
6188 | return si_copy_bytes_to_smc(adev, address, | 6177 | return si_copy_bytes_to_smc(adev, address, |
6189 | (u8 *)&smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT], | 6178 | (u8 *)&smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT], |
6190 | sizeof(SMC_SIslands_MCRegisterSet) * new_state->performance_level_count, | 6179 | sizeof(SMC_SIslands_MCRegisterSet) * new_state->performance_level_count, |
6191 | si_pi->sram_end); | 6180 | si_pi->sram_end); |
6192 | |||
6193 | } | 6181 | } |
6194 | 6182 | ||
6195 | static void si_enable_voltage_control(struct amdgpu_device *adev, bool enable) | 6183 | static void si_enable_voltage_control(struct amdgpu_device *adev, bool enable) |
6196 | { | 6184 | { |
6197 | if (enable) | 6185 | if (enable) |
6198 | WREG32_P(GENERAL_PWRMGT, VOLT_PWRMGT_EN, ~VOLT_PWRMGT_EN); | 6186 | WREG32_P(GENERAL_PWRMGT, VOLT_PWRMGT_EN, ~VOLT_PWRMGT_EN); |
6199 | else | 6187 | else |
6200 | WREG32_P(GENERAL_PWRMGT, 0, ~VOLT_PWRMGT_EN); | 6188 | WREG32_P(GENERAL_PWRMGT, 0, ~VOLT_PWRMGT_EN); |
6201 | } | 6189 | } |
6202 | 6190 | ||
6203 | static enum amdgpu_pcie_gen si_get_maximum_link_speed(struct amdgpu_device *adev, | 6191 | static enum amdgpu_pcie_gen si_get_maximum_link_speed(struct amdgpu_device *adev, |
6204 | struct amdgpu_ps *amdgpu_state) | 6192 | struct amdgpu_ps *amdgpu_state) |
6205 | { | 6193 | { |
6206 | struct si_ps *state = si_get_ps(amdgpu_state); | 6194 | struct si_ps *state = si_get_ps(amdgpu_state); |
6207 | int i; | 6195 | int i; |
6208 | u16 pcie_speed, max_speed = 0; | 6196 | u16 pcie_speed, max_speed = 0; |
6209 | 6197 | ||
@@ -6525,25 +6513,17 @@ static int si_thermal_setup_fan_table(struct amdgpu_device *adev) | |||
6525 | fan_table.temp_min = cpu_to_be16((50 + adev->pm.dpm.fan.t_min) / 100); | 6513 | fan_table.temp_min = cpu_to_be16((50 + adev->pm.dpm.fan.t_min) / 100); |
6526 | fan_table.temp_med = cpu_to_be16((50 + adev->pm.dpm.fan.t_med) / 100); | 6514 | fan_table.temp_med = cpu_to_be16((50 + adev->pm.dpm.fan.t_med) / 100); |
6527 | fan_table.temp_max = cpu_to_be16((50 + adev->pm.dpm.fan.t_max) / 100); | 6515 | fan_table.temp_max = cpu_to_be16((50 + adev->pm.dpm.fan.t_max) / 100); |
6528 | |||
6529 | fan_table.slope1 = cpu_to_be16(slope1); | 6516 | fan_table.slope1 = cpu_to_be16(slope1); |
6530 | fan_table.slope2 = cpu_to_be16(slope2); | 6517 | fan_table.slope2 = cpu_to_be16(slope2); |
6531 | |||
6532 | fan_table.fdo_min = cpu_to_be16(fdo_min); | 6518 | fan_table.fdo_min = cpu_to_be16(fdo_min); |
6533 | |||
6534 | fan_table.hys_down = cpu_to_be16(adev->pm.dpm.fan.t_hyst); | 6519 | fan_table.hys_down = cpu_to_be16(adev->pm.dpm.fan.t_hyst); |
6535 | |||
6536 | fan_table.hys_up = cpu_to_be16(1); | 6520 | fan_table.hys_up = cpu_to_be16(1); |
6537 | |||
6538 | fan_table.hys_slope = cpu_to_be16(1); | 6521 | fan_table.hys_slope = cpu_to_be16(1); |
6539 | |||
6540 | fan_table.temp_resp_lim = cpu_to_be16(5); | 6522 | fan_table.temp_resp_lim = cpu_to_be16(5); |
6541 | |||
6542 | reference_clock = amdgpu_asic_get_xclk(adev); | 6523 | reference_clock = amdgpu_asic_get_xclk(adev); |
6543 | 6524 | ||
6544 | fan_table.refresh_period = cpu_to_be32((adev->pm.dpm.fan.cycle_delay * | 6525 | fan_table.refresh_period = cpu_to_be32((adev->pm.dpm.fan.cycle_delay * |
6545 | reference_clock) / 1600); | 6526 | reference_clock) / 1600); |
6546 | |||
6547 | fan_table.fdo_max = cpu_to_be16((u16)duty100); | 6527 | fan_table.fdo_max = cpu_to_be16((u16)duty100); |
6548 | 6528 | ||
6549 | tmp = (RREG32(CG_MULT_THERMAL_CTRL) & TEMP_SEL_MASK) >> TEMP_SEL_SHIFT; | 6529 | tmp = (RREG32(CG_MULT_THERMAL_CTRL) & TEMP_SEL_MASK) >> TEMP_SEL_SHIFT; |
@@ -6916,9 +6896,7 @@ static int si_dpm_enable(struct amdgpu_device *adev) | |||
6916 | si_start_dpm(adev); | 6896 | si_start_dpm(adev); |
6917 | 6897 | ||
6918 | si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true); | 6898 | si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true); |
6919 | |||
6920 | si_thermal_start_thermal_controller(adev); | 6899 | si_thermal_start_thermal_controller(adev); |
6921 | |||
6922 | ni_update_current_ps(adev, boot_ps); | 6900 | ni_update_current_ps(adev, boot_ps); |
6923 | 6901 | ||
6924 | return 0; | 6902 | return 0; |
@@ -6972,7 +6950,6 @@ static int si_dpm_pre_set_power_state(struct amdgpu_device *adev) | |||
6972 | struct amdgpu_ps *new_ps = &requested_ps; | 6950 | struct amdgpu_ps *new_ps = &requested_ps; |
6973 | 6951 | ||
6974 | ni_update_requested_ps(adev, new_ps); | 6952 | ni_update_requested_ps(adev, new_ps); |
6975 | |||
6976 | si_apply_state_adjust_rules(adev, &eg_pi->requested_rps); | 6953 | si_apply_state_adjust_rules(adev, &eg_pi->requested_rps); |
6977 | 6954 | ||
6978 | return 0; | 6955 | return 0; |
@@ -7232,8 +7209,8 @@ static void si_parse_pplib_clock_info(struct amdgpu_device *adev, | |||
7232 | } | 7209 | } |
7233 | 7210 | ||
7234 | union pplib_power_state { | 7211 | union pplib_power_state { |
7235 | struct _ATOM_PPLIB_STATE v1; | 7212 | struct _ATOM_PPLIB_STATE v1; |
7236 | struct _ATOM_PPLIB_STATE_V2 v2; | 7213 | struct _ATOM_PPLIB_STATE_V2 v2; |
7237 | }; | 7214 | }; |
7238 | 7215 | ||
7239 | static int si_parse_power_table(struct amdgpu_device *adev) | 7216 | static int si_parse_power_table(struct amdgpu_device *adev) |
@@ -7248,7 +7225,7 @@ static int si_parse_power_table(struct amdgpu_device *adev) | |||
7248 | struct _NonClockInfoArray *non_clock_info_array; | 7225 | struct _NonClockInfoArray *non_clock_info_array; |
7249 | union power_info *power_info; | 7226 | union power_info *power_info; |
7250 | int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); | 7227 | int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); |
7251 | u16 data_offset; | 7228 | u16 data_offset; |
7252 | u8 frev, crev; | 7229 | u8 frev, crev; |
7253 | u8 *power_state_offset; | 7230 | u8 *power_state_offset; |
7254 | struct si_ps *ps; | 7231 | struct si_ps *ps; |
@@ -7896,46 +7873,46 @@ static int si_dpm_get_temp(struct amdgpu_device *adev) | |||
7896 | 7873 | ||
7897 | static u32 si_dpm_get_sclk(struct amdgpu_device *adev, bool low) | 7874 | static u32 si_dpm_get_sclk(struct amdgpu_device *adev, bool low) |
7898 | { | 7875 | { |
7899 | struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); | 7876 | struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); |
7900 | struct si_ps *requested_state = si_get_ps(&eg_pi->requested_rps); | 7877 | struct si_ps *requested_state = si_get_ps(&eg_pi->requested_rps); |
7901 | 7878 | ||
7902 | if (low) | 7879 | if (low) |
7903 | return requested_state->performance_levels[0].sclk; | 7880 | return requested_state->performance_levels[0].sclk; |
7904 | else | 7881 | else |
7905 | return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk; | 7882 | return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk; |
7906 | } | 7883 | } |
7907 | 7884 | ||
7908 | static u32 si_dpm_get_mclk(struct amdgpu_device *adev, bool low) | 7885 | static u32 si_dpm_get_mclk(struct amdgpu_device *adev, bool low) |
7909 | { | 7886 | { |
7910 | struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); | 7887 | struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); |
7911 | struct si_ps *requested_state = si_get_ps(&eg_pi->requested_rps); | 7888 | struct si_ps *requested_state = si_get_ps(&eg_pi->requested_rps); |
7912 | 7889 | ||
7913 | if (low) | 7890 | if (low) |
7914 | return requested_state->performance_levels[0].mclk; | 7891 | return requested_state->performance_levels[0].mclk; |
7915 | else | 7892 | else |
7916 | return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk; | 7893 | return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk; |
7917 | } | 7894 | } |
7918 | 7895 | ||
7919 | static void si_dpm_print_power_state(struct amdgpu_device *adev, | 7896 | static void si_dpm_print_power_state(struct amdgpu_device *adev, |
7920 | struct amdgpu_ps *rps) | 7897 | struct amdgpu_ps *rps) |
7921 | { | 7898 | { |
7922 | struct si_ps *ps = si_get_ps(rps); | 7899 | struct si_ps *ps = si_get_ps(rps); |
7923 | struct rv7xx_pl *pl; | 7900 | struct rv7xx_pl *pl; |
7924 | int i; | 7901 | int i; |
7925 | 7902 | ||
7926 | amdgpu_dpm_print_class_info(rps->class, rps->class2); | 7903 | amdgpu_dpm_print_class_info(rps->class, rps->class2); |
7927 | amdgpu_dpm_print_cap_info(rps->caps); | 7904 | amdgpu_dpm_print_cap_info(rps->caps); |
7928 | DRM_INFO("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); | 7905 | DRM_INFO("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); |
7929 | for (i = 0; i < ps->performance_level_count; i++) { | 7906 | for (i = 0; i < ps->performance_level_count; i++) { |
7930 | pl = &ps->performance_levels[i]; | 7907 | pl = &ps->performance_levels[i]; |
7931 | if (adev->asic_type >= CHIP_TAHITI) | 7908 | if (adev->asic_type >= CHIP_TAHITI) |
7932 | DRM_INFO("\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n", | 7909 | DRM_INFO("\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n", |
7933 | i, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1); | 7910 | i, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1); |
7934 | else | 7911 | else |
7935 | DRM_INFO("\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u\n", | 7912 | DRM_INFO("\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u\n", |
7936 | i, pl->sclk, pl->mclk, pl->vddc, pl->vddci); | 7913 | i, pl->sclk, pl->mclk, pl->vddc, pl->vddci); |
7937 | } | 7914 | } |
7938 | amdgpu_dpm_print_ps_status(adev, rps); | 7915 | amdgpu_dpm_print_ps_status(adev, rps); |
7939 | } | 7916 | } |
7940 | 7917 | ||
7941 | static int si_dpm_early_init(void *handle) | 7918 | static int si_dpm_early_init(void *handle) |