aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorRex Zhu <Rex.Zhu@amd.com>2016-08-19 09:00:27 -0400
committerAlex Deucher <alexander.deucher@amd.com>2016-09-19 13:22:17 -0400
commite805ed83ba1ca0961d19496c944faed27aef82a3 (patch)
tree4c12571a1618c79ee95cce4c3a0e22e29168fb18 /drivers/gpu
parentab4f06d3adcc5165b13ed2e657050fd1808f319b (diff)
drm/amd/powerplay: delete useless files.
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c121
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.h35
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_dyn_defaults.h105
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c5601
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h350
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c610
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h81
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.c687
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.h62
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/iceland_clockpowergating.c119
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/iceland_clockpowergating.h38
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/iceland_dyn_defaults.h41
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c5666
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.h424
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/iceland_powertune.c490
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/iceland_powertune.h60
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/iceland_thermal.c595
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/iceland_thermal.h58
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c444
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.h40
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_dyn_defaults.h62
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c5290
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h354
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c988
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h81
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c716
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.h62
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.c350
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.h36
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_dyn_defaults.h107
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c6371
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h402
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.c495
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.h80
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.c590
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.h61
36 files changed, 0 insertions, 31672 deletions
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c
deleted file mode 100644
index 5afe82068b29..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c
+++ /dev/null
@@ -1,121 +0,0 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "hwmgr.h"
25#include "fiji_clockpowergating.h"
26#include "fiji_ppsmc.h"
27#include "fiji_hwmgr.h"
28
29int fiji_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr)
30{
31 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
32
33 data->uvd_power_gated = false;
34 data->vce_power_gated = false;
35 data->samu_power_gated = false;
36 data->acp_power_gated = false;
37
38 return 0;
39}
40
41int fiji_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
42{
43 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
44
45 if (data->uvd_power_gated == bgate)
46 return 0;
47
48 data->uvd_power_gated = bgate;
49
50 if (bgate) {
51 cgs_set_clockgating_state(hwmgr->device,
52 AMD_IP_BLOCK_TYPE_UVD,
53 AMD_CG_STATE_GATE);
54 fiji_update_uvd_dpm(hwmgr, true);
55 } else {
56 fiji_update_uvd_dpm(hwmgr, false);
57 cgs_set_clockgating_state(hwmgr->device,
58 AMD_IP_BLOCK_TYPE_UVD,
59 AMD_CG_STATE_UNGATE);
60 }
61
62 return 0;
63}
64
65int fiji_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
66{
67 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
68 struct phm_set_power_state_input states;
69 const struct pp_power_state *pcurrent;
70 struct pp_power_state *requested;
71
72 if (data->vce_power_gated == bgate)
73 return 0;
74
75 data->vce_power_gated = bgate;
76
77 pcurrent = hwmgr->current_ps;
78 requested = hwmgr->request_ps;
79
80 states.pcurrent_state = &(pcurrent->hardware);
81 states.pnew_state = &(requested->hardware);
82
83 fiji_update_vce_dpm(hwmgr, &states);
84 fiji_enable_disable_vce_dpm(hwmgr, !bgate);
85
86 return 0;
87}
88
89int fiji_phm_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate)
90{
91 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
92
93 if (data->samu_power_gated == bgate)
94 return 0;
95
96 data->samu_power_gated = bgate;
97
98 if (bgate)
99 fiji_update_samu_dpm(hwmgr, true);
100 else
101 fiji_update_samu_dpm(hwmgr, false);
102
103 return 0;
104}
105
106int fiji_phm_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate)
107{
108 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
109
110 if (data->acp_power_gated == bgate)
111 return 0;
112
113 data->acp_power_gated = bgate;
114
115 if (bgate)
116 fiji_update_acp_dpm(hwmgr, true);
117 else
118 fiji_update_acp_dpm(hwmgr, false);
119
120 return 0;
121}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.h
deleted file mode 100644
index 33af5f511ab8..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.h
+++ /dev/null
@@ -1,35 +0,0 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef _FIJI_CLOCK_POWER_GATING_H_
25#define _FIJI_CLOCK_POWER_GATING_H_
26
27#include "fiji_hwmgr.h"
28#include "pp_asicblocks.h"
29
30extern int fiji_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
31extern int fiji_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
32extern int fiji_phm_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate);
33extern int fiji_phm_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate);
34extern int fiji_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr);
35#endif /* _TONGA_CLOCK_POWER_GATING_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_dyn_defaults.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_dyn_defaults.h
deleted file mode 100644
index 32d43e8fecb2..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_dyn_defaults.h
+++ /dev/null
@@ -1,105 +0,0 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef FIJI_DYN_DEFAULTS_H
25#define FIJI_DYN_DEFAULTS_H
26
27/** \file
28* Volcanic Islands Dynamic default parameters.
29*/
30
31enum FIJIdpm_TrendDetection
32{
33 FIJIAdpm_TrendDetection_AUTO,
34 FIJIAdpm_TrendDetection_UP,
35 FIJIAdpm_TrendDetection_DOWN
36};
37typedef enum FIJIdpm_TrendDetection FIJIdpm_TrendDetection;
38
39/* We need to fill in the default values!!!!!!!!!!!!!!!!!!!!!!! */
40
41/* Bit vector representing same fields as hardware register. */
42#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT0 0x3FFFC102 /* CP_Gfx_busy ????
43 * HDP_busy
44 * IH_busy
45 * UVD_busy
46 * VCE_busy
47 * ACP_busy
48 * SAMU_busy
49 * SDMA enabled */
50#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT1 0x000400 /* FE_Gfx_busy - Intended for primary usage. Rest are for flexibility. ????
51 * SH_Gfx_busy
52 * RB_Gfx_busy
53 * VCE_busy */
54
55#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT2 0xC00080 /* SH_Gfx_busy - Intended for primary usage. Rest are for flexibility.
56 * FE_Gfx_busy
57 * RB_Gfx_busy
58 * ACP_busy */
59
60#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT3 0xC00200 /* RB_Gfx_busy - Intended for primary usage. Rest are for flexibility.
61 * FE_Gfx_busy
62 * SH_Gfx_busy
63 * UVD_busy */
64
65#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT4 0xC01680 /* UVD_busy
66 * VCE_busy
67 * ACP_busy
68 * SAMU_busy */
69
70#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT5 0xC00033 /* GFX, HDP */
71#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT6 0xC00033 /* GFX, HDP */
72#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT7 0x3FFFC000 /* GFX, HDP */
73
74
75/* thermal protection counter (units). */
76#define PPFIJI_THERMALPROTECTCOUNTER_DFLT 0x200 /* ~19us */
77
78/* static screen threshold unit */
79#define PPFIJI_STATICSCREENTHRESHOLDUNIT_DFLT 0
80
81/* static screen threshold */
82#define PPFIJI_STATICSCREENTHRESHOLD_DFLT 0x00C8
83
84/* gfx idle clock stop threshold */
85#define PPFIJI_GFXIDLECLOCKSTOPTHRESHOLD_DFLT 0x200 /* ~19us with static screen threshold unit of 0 */
86
87/* Fixed reference divider to use when building baby stepping tables. */
88#define PPFIJI_REFERENCEDIVIDER_DFLT 4
89
90/* ULV voltage change delay time
91 * Used to be delay_vreg in N.I. split for S.I.
92 * Using N.I. delay_vreg value as default
93 * ReferenceClock = 2700
94 * VoltageResponseTime = 1000
95 * VDDCDelayTime = (VoltageResponseTime * ReferenceClock) / 1600 = 1687
96 */
97#define PPFIJI_ULVVOLTAGECHANGEDELAY_DFLT 1687
98
99#define PPFIJI_CGULVPARAMETER_DFLT 0x00040035
100#define PPFIJI_CGULVCONTROL_DFLT 0x00007450
101#define PPFIJI_TARGETACTIVITY_DFLT 30 /* 30%*/
102#define PPFIJI_MCLK_TARGETACTIVITY_DFLT 10 /* 10% */
103
104#endif
105
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
deleted file mode 100644
index 0d4c99b9e3f9..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
+++ /dev/null
@@ -1,5601 +0,0 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <linux/module.h>
24#include <linux/slab.h>
25#include <linux/fb.h>
26#include "linux/delay.h"
27
28#include "hwmgr.h"
29#include "fiji_smumgr.h"
30#include "atombios.h"
31#include "hardwaremanager.h"
32#include "ppatomctrl.h"
33#include "atombios.h"
34#include "cgs_common.h"
35#include "fiji_dyn_defaults.h"
36#include "fiji_powertune.h"
37#include "smu73.h"
38#include "smu/smu_7_1_3_d.h"
39#include "smu/smu_7_1_3_sh_mask.h"
40#include "gmc/gmc_8_1_d.h"
41#include "gmc/gmc_8_1_sh_mask.h"
42#include "bif/bif_5_0_d.h"
43#include "bif/bif_5_0_sh_mask.h"
44#include "dce/dce_10_0_d.h"
45#include "dce/dce_10_0_sh_mask.h"
46#include "pppcielanes.h"
47#include "fiji_hwmgr.h"
48#include "process_pptables_v1_0.h"
49#include "pptable_v1_0.h"
50#include "pp_debug.h"
51#include "pp_acpi.h"
52#include "amd_pcie_helpers.h"
53#include "cgs_linux.h"
54#include "ppinterrupt.h"
55
56#include "fiji_clockpowergating.h"
57#include "fiji_thermal.h"
58
59#define VOLTAGE_SCALE 4
60#define SMC_RAM_END 0x40000
61#define VDDC_VDDCI_DELTA 300
62
63#define MC_SEQ_MISC0_GDDR5_SHIFT 28
64#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000
65#define MC_SEQ_MISC0_GDDR5_VALUE 5
66
67#define MC_CG_ARB_FREQ_F0 0x0a /* boot-up default */
68#define MC_CG_ARB_FREQ_F1 0x0b
69#define MC_CG_ARB_FREQ_F2 0x0c
70#define MC_CG_ARB_FREQ_F3 0x0d
71
72/* From smc_reg.h */
73#define SMC_CG_IND_START 0xc0030000
74#define SMC_CG_IND_END 0xc0040000 /* First byte after SMC_CG_IND */
75
76#define VOLTAGE_SCALE 4
77#define VOLTAGE_VID_OFFSET_SCALE1 625
78#define VOLTAGE_VID_OFFSET_SCALE2 100
79
80#define VDDC_VDDCI_DELTA 300
81
82#define ixSWRST_COMMAND_1 0x1400103
83#define MC_SEQ_CNTL__CAC_EN_MASK 0x40000000
84
85/** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */
86enum DPM_EVENT_SRC {
87 DPM_EVENT_SRC_ANALOG = 0, /* Internal analog trip point */
88 DPM_EVENT_SRC_EXTERNAL = 1, /* External (GPIO 17) signal */
89 DPM_EVENT_SRC_DIGITAL = 2, /* Internal digital trip point (DIG_THERM_DPM) */
90 DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3, /* Internal analog or external */
91 DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4 /* Internal digital or external */
92};
93
94
95/* [2.5%,~2.5%] Clock stretched is multiple of 2.5% vs
96 * not and [Fmin, Fmax, LDO_REFSEL, USE_FOR_LOW_FREQ]
97 */
98static const uint16_t fiji_clock_stretcher_lookup_table[2][4] =
99{ {600, 1050, 3, 0}, {600, 1050, 6, 1} };
100
101/* [FF, SS] type, [] 4 voltage ranges, and
102 * [Floor Freq, Boundary Freq, VID min , VID max]
103 */
104static const uint32_t fiji_clock_stretcher_ddt_table[2][4][4] =
105{ { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} },
106 { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} } };
107
108/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%]
109 * (coming from PWR_CKS_CNTL.stretch_amount reg spec)
110 */
111static const uint8_t fiji_clock_stretch_amount_conversion[2][6] =
112{ {0, 1, 3, 2, 4, 5}, {0, 2, 4, 5, 6, 5} };
113
114static const unsigned long PhwFiji_Magic = (unsigned long)(PHM_VIslands_Magic);
115
116static struct fiji_power_state *cast_phw_fiji_power_state(
117 struct pp_hw_power_state *hw_ps)
118{
119 PP_ASSERT_WITH_CODE((PhwFiji_Magic == hw_ps->magic),
120 "Invalid Powerstate Type!",
121 return NULL;);
122
123 return (struct fiji_power_state *)hw_ps;
124}
125
126static const struct
127fiji_power_state *cast_const_phw_fiji_power_state(
128 const struct pp_hw_power_state *hw_ps)
129{
130 PP_ASSERT_WITH_CODE((PhwFiji_Magic == hw_ps->magic),
131 "Invalid Powerstate Type!",
132 return NULL;);
133
134 return (const struct fiji_power_state *)hw_ps;
135}
136
137static bool fiji_is_dpm_running(struct pp_hwmgr *hwmgr)
138{
139 return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
140 CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
141 ? true : false;
142}
143
144static void fiji_init_dpm_defaults(struct pp_hwmgr *hwmgr)
145{
146 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
147 struct fiji_ulv_parm *ulv = &data->ulv;
148
149 ulv->cg_ulv_parameter = PPFIJI_CGULVPARAMETER_DFLT;
150 data->voting_rights_clients0 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT0;
151 data->voting_rights_clients1 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT1;
152 data->voting_rights_clients2 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT2;
153 data->voting_rights_clients3 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT3;
154 data->voting_rights_clients4 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT4;
155 data->voting_rights_clients5 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT5;
156 data->voting_rights_clients6 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT6;
157 data->voting_rights_clients7 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT7;
158
159 data->static_screen_threshold_unit =
160 PPFIJI_STATICSCREENTHRESHOLDUNIT_DFLT;
161 data->static_screen_threshold =
162 PPFIJI_STATICSCREENTHRESHOLD_DFLT;
163
164 /* Unset ABM cap as it moved to DAL.
165 * Add PHM_PlatformCaps_NonABMSupportInPPLib
166 * for re-direct ABM related request to DAL
167 */
168 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
169 PHM_PlatformCaps_ABM);
170 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
171 PHM_PlatformCaps_NonABMSupportInPPLib);
172
173 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
174 PHM_PlatformCaps_DynamicACTiming);
175
176 fiji_initialize_power_tune_defaults(hwmgr);
177
178 data->mclk_stutter_mode_threshold = 60000;
179 data->pcie_gen_performance.max = PP_PCIEGen1;
180 data->pcie_gen_performance.min = PP_PCIEGen3;
181 data->pcie_gen_power_saving.max = PP_PCIEGen1;
182 data->pcie_gen_power_saving.min = PP_PCIEGen3;
183 data->pcie_lane_performance.max = 0;
184 data->pcie_lane_performance.min = 16;
185 data->pcie_lane_power_saving.max = 0;
186 data->pcie_lane_power_saving.min = 16;
187
188 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
189 PHM_PlatformCaps_DynamicUVDState);
190}
191
192static int fiji_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
193 phm_ppt_v1_voltage_lookup_table *lookup_table,
194 uint16_t virtual_voltage_id, int32_t *sclk)
195{
196 uint8_t entryId;
197 uint8_t voltageId;
198 struct phm_ppt_v1_information *table_info =
199 (struct phm_ppt_v1_information *)(hwmgr->pptable);
200
201 PP_ASSERT_WITH_CODE(lookup_table->count != 0, "Lookup table is empty", return -EINVAL);
202
203 /* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */
204 for (entryId = 0; entryId < table_info->vdd_dep_on_sclk->count; entryId++) {
205 voltageId = table_info->vdd_dep_on_sclk->entries[entryId].vddInd;
206 if (lookup_table->entries[voltageId].us_vdd == virtual_voltage_id)
207 break;
208 }
209
210 PP_ASSERT_WITH_CODE(entryId < table_info->vdd_dep_on_sclk->count,
211 "Can't find requested voltage id in vdd_dep_on_sclk table!",
212 return -EINVAL;
213 );
214
215 *sclk = table_info->vdd_dep_on_sclk->entries[entryId].clk;
216
217 return 0;
218}
219
220/**
221* Get Leakage VDDC based on leakage ID.
222*
223* @param hwmgr the address of the powerplay hardware manager.
224* @return always 0
225*/
226static int fiji_get_evv_voltages(struct pp_hwmgr *hwmgr)
227{
228 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
229 uint16_t vv_id;
230 uint16_t vddc = 0;
231 uint16_t evv_default = 1150;
232 uint16_t i, j;
233 uint32_t sclk = 0;
234 struct phm_ppt_v1_information *table_info =
235 (struct phm_ppt_v1_information *)hwmgr->pptable;
236 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
237 table_info->vdd_dep_on_sclk;
238 int result;
239
240 for (i = 0; i < FIJI_MAX_LEAKAGE_COUNT; i++) {
241 vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
242 if (!fiji_get_sclk_for_voltage_evv(hwmgr,
243 table_info->vddc_lookup_table, vv_id, &sclk)) {
244 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
245 PHM_PlatformCaps_ClockStretcher)) {
246 for (j = 1; j < sclk_table->count; j++) {
247 if (sclk_table->entries[j].clk == sclk &&
248 sclk_table->entries[j].cks_enable == 0) {
249 sclk += 5000;
250 break;
251 }
252 }
253 }
254
255 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
256 PHM_PlatformCaps_EnableDriverEVV))
257 result = atomctrl_calculate_voltage_evv_on_sclk(hwmgr,
258 VOLTAGE_TYPE_VDDC, sclk, vv_id, &vddc, i, true);
259 else
260 result = -EINVAL;
261
262 if (result)
263 result = atomctrl_get_voltage_evv_on_sclk(hwmgr,
264 VOLTAGE_TYPE_VDDC, sclk,vv_id, &vddc);
265
266 /* need to make sure vddc is less than 2v or else, it could burn the ASIC. */
267 PP_ASSERT_WITH_CODE((vddc < 2000),
268 "Invalid VDDC value, greater than 2v!", result = -EINVAL;);
269
270 if (result)
271 /* 1.15V is the default safe value for Fiji */
272 vddc = evv_default;
273
274 /* the voltage should not be zero nor equal to leakage ID */
275 if (vddc != 0 && vddc != vv_id) {
276 data->vddc_leakage.actual_voltage
277 [data->vddc_leakage.count] = vddc;
278 data->vddc_leakage.leakage_id
279 [data->vddc_leakage.count] = vv_id;
280 data->vddc_leakage.count++;
281 }
282 }
283 }
284 return 0;
285}
286
287/**
288 * Change virtual leakage voltage to actual value.
289 *
290 * @param hwmgr the address of the powerplay hardware manager.
291 * @param pointer to changing voltage
292 * @param pointer to leakage table
293 */
294static void fiji_patch_with_vdd_leakage(struct pp_hwmgr *hwmgr,
295 uint16_t *voltage, struct fiji_leakage_voltage *leakage_table)
296{
297 uint32_t index;
298
299 /* search for leakage voltage ID 0xff01 ~ 0xff08 */
300 for (index = 0; index < leakage_table->count; index++) {
301 /* if this voltage matches a leakage voltage ID */
302 /* patch with actual leakage voltage */
303 if (leakage_table->leakage_id[index] == *voltage) {
304 *voltage = leakage_table->actual_voltage[index];
305 break;
306 }
307 }
308
309 if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
310 printk(KERN_ERR "Voltage value looks like a Leakage ID but it's not patched \n");
311}
312
313/**
314* Patch voltage lookup table by EVV leakages.
315*
316* @param hwmgr the address of the powerplay hardware manager.
317* @param pointer to voltage lookup table
318* @param pointer to leakage table
319* @return always 0
320*/
321static int fiji_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
322 phm_ppt_v1_voltage_lookup_table *lookup_table,
323 struct fiji_leakage_voltage *leakage_table)
324{
325 uint32_t i;
326
327 for (i = 0; i < lookup_table->count; i++)
328 fiji_patch_with_vdd_leakage(hwmgr,
329 &lookup_table->entries[i].us_vdd, leakage_table);
330
331 return 0;
332}
333
334static int fiji_patch_clock_voltage_limits_with_vddc_leakage(
335 struct pp_hwmgr *hwmgr, struct fiji_leakage_voltage *leakage_table,
336 uint16_t *vddc)
337{
338 struct phm_ppt_v1_information *table_info =
339 (struct phm_ppt_v1_information *)(hwmgr->pptable);
340 fiji_patch_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table);
341 hwmgr->dyn_state.max_clock_voltage_on_dc.vddc =
342 table_info->max_clock_voltage_on_dc.vddc;
343 return 0;
344}
345
346static int fiji_patch_voltage_dependency_tables_with_lookup_table(
347 struct pp_hwmgr *hwmgr)
348{
349 uint8_t entryId;
350 uint8_t voltageId;
351 struct phm_ppt_v1_information *table_info =
352 (struct phm_ppt_v1_information *)(hwmgr->pptable);
353
354 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
355 table_info->vdd_dep_on_sclk;
356 struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table =
357 table_info->vdd_dep_on_mclk;
358 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
359 table_info->mm_dep_table;
360
361 for (entryId = 0; entryId < sclk_table->count; ++entryId) {
362 voltageId = sclk_table->entries[entryId].vddInd;
363 sclk_table->entries[entryId].vddc =
364 table_info->vddc_lookup_table->entries[voltageId].us_vdd;
365 }
366
367 for (entryId = 0; entryId < mclk_table->count; ++entryId) {
368 voltageId = mclk_table->entries[entryId].vddInd;
369 mclk_table->entries[entryId].vddc =
370 table_info->vddc_lookup_table->entries[voltageId].us_vdd;
371 }
372
373 for (entryId = 0; entryId < mm_table->count; ++entryId) {
374 voltageId = mm_table->entries[entryId].vddcInd;
375 mm_table->entries[entryId].vddc =
376 table_info->vddc_lookup_table->entries[voltageId].us_vdd;
377 }
378
379 return 0;
380
381}
382
383static int fiji_calc_voltage_dependency_tables(struct pp_hwmgr *hwmgr)
384{
385 /* Need to determine if we need calculated voltage. */
386 return 0;
387}
388
389static int fiji_calc_mm_voltage_dependency_table(struct pp_hwmgr *hwmgr)
390{
391 /* Need to determine if we need calculated voltage from mm table. */
392 return 0;
393}
394
395static int fiji_sort_lookup_table(struct pp_hwmgr *hwmgr,
396 struct phm_ppt_v1_voltage_lookup_table *lookup_table)
397{
398 uint32_t table_size, i, j;
399 struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record;
400 table_size = lookup_table->count;
401
402 PP_ASSERT_WITH_CODE(0 != lookup_table->count,
403 "Lookup table is empty", return -EINVAL);
404
405 /* Sorting voltages */
406 for (i = 0; i < table_size - 1; i++) {
407 for (j = i + 1; j > 0; j--) {
408 if (lookup_table->entries[j].us_vdd <
409 lookup_table->entries[j - 1].us_vdd) {
410 tmp_voltage_lookup_record = lookup_table->entries[j - 1];
411 lookup_table->entries[j - 1] = lookup_table->entries[j];
412 lookup_table->entries[j] = tmp_voltage_lookup_record;
413 }
414 }
415 }
416
417 return 0;
418}
419
420static int fiji_complete_dependency_tables(struct pp_hwmgr *hwmgr)
421{
422 int result = 0;
423 int tmp_result;
424 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
425 struct phm_ppt_v1_information *table_info =
426 (struct phm_ppt_v1_information *)(hwmgr->pptable);
427
428 tmp_result = fiji_patch_lookup_table_with_leakage(hwmgr,
429 table_info->vddc_lookup_table, &(data->vddc_leakage));
430 if (tmp_result)
431 result = tmp_result;
432
433 tmp_result = fiji_patch_clock_voltage_limits_with_vddc_leakage(hwmgr,
434 &(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc);
435 if (tmp_result)
436 result = tmp_result;
437
438 tmp_result = fiji_patch_voltage_dependency_tables_with_lookup_table(hwmgr);
439 if (tmp_result)
440 result = tmp_result;
441
442 tmp_result = fiji_calc_voltage_dependency_tables(hwmgr);
443 if (tmp_result)
444 result = tmp_result;
445
446 tmp_result = fiji_calc_mm_voltage_dependency_table(hwmgr);
447 if (tmp_result)
448 result = tmp_result;
449
450 tmp_result = fiji_sort_lookup_table(hwmgr, table_info->vddc_lookup_table);
451 if(tmp_result)
452 result = tmp_result;
453
454 return result;
455}
456
457static int fiji_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
458{
459 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
460 struct phm_ppt_v1_information *table_info =
461 (struct phm_ppt_v1_information *)(hwmgr->pptable);
462
463 struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
464 table_info->vdd_dep_on_sclk;
465 struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table =
466 table_info->vdd_dep_on_mclk;
467
468 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL,
469 "VDD dependency on SCLK table is missing. \
470 This table is mandatory", return -EINVAL);
471 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
472 "VDD dependency on SCLK table has to have is missing. \
473 This table is mandatory", return -EINVAL);
474
475 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL,
476 "VDD dependency on MCLK table is missing. \
477 This table is mandatory", return -EINVAL);
478 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
479 "VDD dependency on MCLK table has to have is missing. \
480 This table is mandatory", return -EINVAL);
481
482 data->min_vddc_in_pptable = (uint16_t)allowed_sclk_vdd_table->entries[0].vddc;
483 data->max_vddc_in_pptable = (uint16_t)allowed_sclk_vdd_table->
484 entries[allowed_sclk_vdd_table->count - 1].vddc;
485
486 table_info->max_clock_voltage_on_ac.sclk =
487 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk;
488 table_info->max_clock_voltage_on_ac.mclk =
489 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk;
490 table_info->max_clock_voltage_on_ac.vddc =
491 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
492 table_info->max_clock_voltage_on_ac.vddci =
493 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci;
494
495 hwmgr->dyn_state.max_clock_voltage_on_ac.sclk =
496 table_info->max_clock_voltage_on_ac.sclk;
497 hwmgr->dyn_state.max_clock_voltage_on_ac.mclk =
498 table_info->max_clock_voltage_on_ac.mclk;
499 hwmgr->dyn_state.max_clock_voltage_on_ac.vddc =
500 table_info->max_clock_voltage_on_ac.vddc;
501 hwmgr->dyn_state.max_clock_voltage_on_ac.vddci =
502 table_info->max_clock_voltage_on_ac.vddci;
503
504 return 0;
505}
506
507static uint16_t fiji_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
508{
509 uint32_t speedCntl = 0;
510
511 /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
512 speedCntl = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__PCIE,
513 ixPCIE_LC_SPEED_CNTL);
514 return((uint16_t)PHM_GET_FIELD(speedCntl,
515 PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
516}
517
518static int fiji_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
519{
520 uint32_t link_width;
521
522 /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
523 link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
524 PCIE_LC_LINK_WIDTH_CNTL, LC_LINK_WIDTH_RD);
525
526 PP_ASSERT_WITH_CODE((7 >= link_width),
527 "Invalid PCIe lane width!", return 0);
528
529 return decode_pcie_lane_width(link_width);
530}
531
532/** Patch the Boot State to match VBIOS boot clocks and voltage.
533*
534* @param hwmgr Pointer to the hardware manager.
535* @param pPowerState The address of the PowerState instance being created.
536*
537*/
538static int fiji_patch_boot_state(struct pp_hwmgr *hwmgr,
539 struct pp_hw_power_state *hw_ps)
540{
541 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
542 struct fiji_power_state *ps = (struct fiji_power_state *)hw_ps;
543 ATOM_FIRMWARE_INFO_V2_2 *fw_info;
544 uint16_t size;
545 uint8_t frev, crev;
546 int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
547
548 /* First retrieve the Boot clocks and VDDC from the firmware info table.
549 * We assume here that fw_info is unchanged if this call fails.
550 */
551 fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)cgs_atom_get_data_table(
552 hwmgr->device, index,
553 &size, &frev, &crev);
554 if (!fw_info)
555 /* During a test, there is no firmware info table. */
556 return 0;
557
558 /* Patch the state. */
559 data->vbios_boot_state.sclk_bootup_value =
560 le32_to_cpu(fw_info->ulDefaultEngineClock);
561 data->vbios_boot_state.mclk_bootup_value =
562 le32_to_cpu(fw_info->ulDefaultMemoryClock);
563 data->vbios_boot_state.mvdd_bootup_value =
564 le16_to_cpu(fw_info->usBootUpMVDDCVoltage);
565 data->vbios_boot_state.vddc_bootup_value =
566 le16_to_cpu(fw_info->usBootUpVDDCVoltage);
567 data->vbios_boot_state.vddci_bootup_value =
568 le16_to_cpu(fw_info->usBootUpVDDCIVoltage);
569 data->vbios_boot_state.pcie_gen_bootup_value =
570 fiji_get_current_pcie_speed(hwmgr);
571 data->vbios_boot_state.pcie_lane_bootup_value =
572 (uint16_t)fiji_get_current_pcie_lane_number(hwmgr);
573
574 /* set boot power state */
575 ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value;
576 ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value;
577 ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value;
578 ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value;
579
580 return 0;
581}
582
583static int fiji_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
584{
585 return phm_hwmgr_backend_fini(hwmgr);
586}
587
588static int fiji_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
589{
590 struct fiji_hwmgr *data;
591 uint32_t i;
592 struct phm_ppt_v1_information *table_info =
593 (struct phm_ppt_v1_information *)(hwmgr->pptable);
594 bool stay_in_boot;
595 int result;
596
597 data = kzalloc(sizeof(struct fiji_hwmgr), GFP_KERNEL);
598 if (data == NULL)
599 return -ENOMEM;
600
601 hwmgr->backend = data;
602
603 data->dll_default_on = false;
604 data->sram_end = SMC_RAM_END;
605
606 for (i = 0; i < SMU73_MAX_LEVELS_GRAPHICS; i++)
607 data->activity_target[i] = FIJI_AT_DFLT;
608
609 data->vddc_vddci_delta = VDDC_VDDCI_DELTA;
610
611 data->mclk_activity_target = PPFIJI_MCLK_TARGETACTIVITY_DFLT;
612 data->mclk_dpm0_activity_target = 0xa;
613
614 data->sclk_dpm_key_disabled = 0;
615 data->mclk_dpm_key_disabled = 0;
616 data->pcie_dpm_key_disabled = 0;
617
618 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
619 PHM_PlatformCaps_UnTabledHardwareInterface);
620 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
621 PHM_PlatformCaps_TablelessHardwareInterface);
622
623 data->gpio_debug = 0;
624
625 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
626 PHM_PlatformCaps_DynamicPatchPowerState);
627
628 /* need to set voltage control types before EVV patching */
629 data->voltage_control = FIJI_VOLTAGE_CONTROL_NONE;
630 data->vddci_control = FIJI_VOLTAGE_CONTROL_NONE;
631 data->mvdd_control = FIJI_VOLTAGE_CONTROL_NONE;
632
633 data->force_pcie_gen = PP_PCIEGenInvalid;
634
635 if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
636 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
637 data->voltage_control = FIJI_VOLTAGE_CONTROL_BY_SVID2;
638
639 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
640 PHM_PlatformCaps_EnableMVDDControl))
641 if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
642 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
643 data->mvdd_control = FIJI_VOLTAGE_CONTROL_BY_GPIO;
644
645 if (data->mvdd_control == FIJI_VOLTAGE_CONTROL_NONE)
646 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
647 PHM_PlatformCaps_EnableMVDDControl);
648
649 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
650 PHM_PlatformCaps_ControlVDDCI)) {
651 if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
652 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
653 data->vddci_control = FIJI_VOLTAGE_CONTROL_BY_GPIO;
654 else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
655 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
656 data->vddci_control = FIJI_VOLTAGE_CONTROL_BY_SVID2;
657 }
658
659 if (data->vddci_control == FIJI_VOLTAGE_CONTROL_NONE)
660 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
661 PHM_PlatformCaps_ControlVDDCI);
662
663 if (table_info && table_info->cac_dtp_table->usClockStretchAmount)
664 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
665 PHM_PlatformCaps_ClockStretcher);
666
667 fiji_init_dpm_defaults(hwmgr);
668
669 /* Get leakage voltage based on leakage ID. */
670 fiji_get_evv_voltages(hwmgr);
671
672 /* Patch our voltage dependency table with actual leakage voltage
673 * We need to perform leakage translation before it's used by other functions
674 */
675 fiji_complete_dependency_tables(hwmgr);
676
677 /* Parse pptable data read from VBIOS */
678 fiji_set_private_data_based_on_pptable(hwmgr);
679
680 /* ULV Support */
681 data->ulv.ulv_supported = true; /* ULV feature is enabled by default */
682
683 /* Initalize Dynamic State Adjustment Rule Settings */
684 result = tonga_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
685
686 if (!result) {
687 data->uvd_enabled = false;
688 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
689 PHM_PlatformCaps_EnableSMU7ThermalManagement);
690 data->vddc_phase_shed_control = false;
691 }
692
693 stay_in_boot = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
694 PHM_PlatformCaps_StayInBootState);
695
696 if (0 == result) {
697 struct cgs_system_info sys_info = {0};
698
699 data->is_tlu_enabled = false;
700 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
701 FIJI_MAX_HARDWARE_POWERLEVELS;
702 hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
703 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
704
705 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
706 PHM_PlatformCaps_FanSpeedInTableIsRPM);
707
708 if (table_info->cac_dtp_table->usDefaultTargetOperatingTemp &&
709 hwmgr->thermal_controller.
710 advanceFanControlParameters.ucFanControlMode) {
711 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM =
712 hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
713 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM =
714 hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM;
715 hwmgr->dyn_state.cac_dtp_table->usOperatingTempMinLimit =
716 table_info->cac_dtp_table->usOperatingTempMinLimit;
717 hwmgr->dyn_state.cac_dtp_table->usOperatingTempMaxLimit =
718 table_info->cac_dtp_table->usOperatingTempMaxLimit;
719 hwmgr->dyn_state.cac_dtp_table->usDefaultTargetOperatingTemp =
720 table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
721 hwmgr->dyn_state.cac_dtp_table->usOperatingTempStep =
722 table_info->cac_dtp_table->usOperatingTempStep;
723 hwmgr->dyn_state.cac_dtp_table->usTargetOperatingTemp =
724 table_info->cac_dtp_table->usTargetOperatingTemp;
725
726 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
727 PHM_PlatformCaps_ODFuzzyFanControlSupport);
728 }
729
730 sys_info.size = sizeof(struct cgs_system_info);
731 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO;
732 result = cgs_query_system_info(hwmgr->device, &sys_info);
733 if (result)
734 data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK;
735 else
736 data->pcie_gen_cap = (uint32_t)sys_info.value;
737 if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
738 data->pcie_spc_cap = 20;
739 sys_info.size = sizeof(struct cgs_system_info);
740 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW;
741 result = cgs_query_system_info(hwmgr->device, &sys_info);
742 if (result)
743 data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK;
744 else
745 data->pcie_lane_cap = (uint32_t)sys_info.value;
746 } else {
747 /* Ignore return value in here, we are cleaning up a mess. */
748 fiji_hwmgr_backend_fini(hwmgr);
749 }
750
751 return 0;
752}
753
754/**
755 * Read clock related registers.
756 *
757 * @param hwmgr the address of the powerplay hardware manager.
758 * @return always 0
759 */
760static int fiji_read_clock_registers(struct pp_hwmgr *hwmgr)
761{
762 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
763
764 data->clock_registers.vCG_SPLL_FUNC_CNTL =
765 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
766 ixCG_SPLL_FUNC_CNTL);
767 data->clock_registers.vCG_SPLL_FUNC_CNTL_2 =
768 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
769 ixCG_SPLL_FUNC_CNTL_2);
770 data->clock_registers.vCG_SPLL_FUNC_CNTL_3 =
771 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
772 ixCG_SPLL_FUNC_CNTL_3);
773 data->clock_registers.vCG_SPLL_FUNC_CNTL_4 =
774 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
775 ixCG_SPLL_FUNC_CNTL_4);
776 data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM =
777 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
778 ixCG_SPLL_SPREAD_SPECTRUM);
779 data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2 =
780 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
781 ixCG_SPLL_SPREAD_SPECTRUM_2);
782
783 return 0;
784}
785
786/**
787 * Find out if memory is GDDR5.
788 *
789 * @param hwmgr the address of the powerplay hardware manager.
790 * @return always 0
791 */
792static int fiji_get_memory_type(struct pp_hwmgr *hwmgr)
793{
794 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
795 uint32_t temp;
796
797 temp = cgs_read_register(hwmgr->device, mmMC_SEQ_MISC0);
798
799 data->is_memory_gddr5 = (MC_SEQ_MISC0_GDDR5_VALUE ==
800 ((temp & MC_SEQ_MISC0_GDDR5_MASK) >>
801 MC_SEQ_MISC0_GDDR5_SHIFT));
802
803 return 0;
804}
805
806/**
807 * Enables Dynamic Power Management by SMC
808 *
809 * @param hwmgr the address of the powerplay hardware manager.
810 * @return always 0
811 */
812static int fiji_enable_acpi_power_management(struct pp_hwmgr *hwmgr)
813{
814 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
815 GENERAL_PWRMGT, STATIC_PM_EN, 1);
816
817 return 0;
818}
819
820/**
821 * Initialize PowerGating States for different engines
822 *
823 * @param hwmgr the address of the powerplay hardware manager.
824 * @return always 0
825 */
826static int fiji_init_power_gate_state(struct pp_hwmgr *hwmgr)
827{
828 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
829
830 data->uvd_power_gated = false;
831 data->vce_power_gated = false;
832 data->samu_power_gated = false;
833 data->acp_power_gated = false;
834 data->pg_acp_init = true;
835
836 return 0;
837}
838
839static int fiji_init_sclk_threshold(struct pp_hwmgr *hwmgr)
840{
841 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
842 data->low_sclk_interrupt_threshold = 0;
843
844 return 0;
845}
846
847static int fiji_setup_asic_task(struct pp_hwmgr *hwmgr)
848{
849 int tmp_result, result = 0;
850
851 tmp_result = fiji_read_clock_registers(hwmgr);
852 PP_ASSERT_WITH_CODE((0 == tmp_result),
853 "Failed to read clock registers!", result = tmp_result);
854
855 tmp_result = fiji_get_memory_type(hwmgr);
856 PP_ASSERT_WITH_CODE((0 == tmp_result),
857 "Failed to get memory type!", result = tmp_result);
858
859 tmp_result = fiji_enable_acpi_power_management(hwmgr);
860 PP_ASSERT_WITH_CODE((0 == tmp_result),
861 "Failed to enable ACPI power management!", result = tmp_result);
862
863 tmp_result = fiji_init_power_gate_state(hwmgr);
864 PP_ASSERT_WITH_CODE((0 == tmp_result),
865 "Failed to init power gate state!", result = tmp_result);
866
867 tmp_result = tonga_get_mc_microcode_version(hwmgr);
868 PP_ASSERT_WITH_CODE((0 == tmp_result),
869 "Failed to get MC microcode version!", result = tmp_result);
870
871 tmp_result = fiji_init_sclk_threshold(hwmgr);
872 PP_ASSERT_WITH_CODE((0 == tmp_result),
873 "Failed to init sclk threshold!", result = tmp_result);
874
875 return result;
876}
877
878/**
879* Checks if we want to support voltage control
880*
881* @param hwmgr the address of the powerplay hardware manager.
882*/
883static bool fiji_voltage_control(const struct pp_hwmgr *hwmgr)
884{
885 const struct fiji_hwmgr *data =
886 (const struct fiji_hwmgr *)(hwmgr->backend);
887
888 return (FIJI_VOLTAGE_CONTROL_NONE != data->voltage_control);
889}
890
891/**
892* Enable voltage control
893*
894* @param hwmgr the address of the powerplay hardware manager.
895* @return always 0
896*/
897static int fiji_enable_voltage_control(struct pp_hwmgr *hwmgr)
898{
899 /* enable voltage control */
900 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
901 GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1);
902
903 return 0;
904}
905
906/**
907* Remove repeated voltage values and create table with unique values.
908*
909* @param hwmgr the address of the powerplay hardware manager.
910* @param vol_table the pointer to changing voltage table
911* @return 0 in success
912*/
913
914static int fiji_trim_voltage_table(struct pp_hwmgr *hwmgr,
915 struct pp_atomctrl_voltage_table *vol_table)
916{
917 uint32_t i, j;
918 uint16_t vvalue;
919 bool found = false;
920 struct pp_atomctrl_voltage_table *table;
921
922 PP_ASSERT_WITH_CODE((NULL != vol_table),
923 "Voltage Table empty.", return -EINVAL);
924 table = kzalloc(sizeof(struct pp_atomctrl_voltage_table),
925 GFP_KERNEL);
926
927 if (NULL == table)
928 return -ENOMEM;
929
930 table->mask_low = vol_table->mask_low;
931 table->phase_delay = vol_table->phase_delay;
932
933 for (i = 0; i < vol_table->count; i++) {
934 vvalue = vol_table->entries[i].value;
935 found = false;
936
937 for (j = 0; j < table->count; j++) {
938 if (vvalue == table->entries[j].value) {
939 found = true;
940 break;
941 }
942 }
943
944 if (!found) {
945 table->entries[table->count].value = vvalue;
946 table->entries[table->count].smio_low =
947 vol_table->entries[i].smio_low;
948 table->count++;
949 }
950 }
951
952 memcpy(vol_table, table, sizeof(struct pp_atomctrl_voltage_table));
953 kfree(table);
954
955 return 0;
956}
957
958static int fiji_get_svi2_mvdd_voltage_table(struct pp_hwmgr *hwmgr,
959 phm_ppt_v1_clock_voltage_dependency_table *dep_table)
960{
961 uint32_t i;
962 int result;
963 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
964 struct pp_atomctrl_voltage_table *vol_table = &(data->mvdd_voltage_table);
965
966 PP_ASSERT_WITH_CODE((0 != dep_table->count),
967 "Voltage Dependency Table empty.", return -EINVAL);
968
969 vol_table->mask_low = 0;
970 vol_table->phase_delay = 0;
971 vol_table->count = dep_table->count;
972
973 for (i = 0; i < dep_table->count; i++) {
974 vol_table->entries[i].value = dep_table->entries[i].mvdd;
975 vol_table->entries[i].smio_low = 0;
976 }
977
978 result = fiji_trim_voltage_table(hwmgr, vol_table);
979 PP_ASSERT_WITH_CODE((0 == result),
980 "Failed to trim MVDD table.", return result);
981
982 return 0;
983}
984
985static int fiji_get_svi2_vddci_voltage_table(struct pp_hwmgr *hwmgr,
986 phm_ppt_v1_clock_voltage_dependency_table *dep_table)
987{
988 uint32_t i;
989 int result;
990 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
991 struct pp_atomctrl_voltage_table *vol_table = &(data->vddci_voltage_table);
992
993 PP_ASSERT_WITH_CODE((0 != dep_table->count),
994 "Voltage Dependency Table empty.", return -EINVAL);
995
996 vol_table->mask_low = 0;
997 vol_table->phase_delay = 0;
998 vol_table->count = dep_table->count;
999
1000 for (i = 0; i < dep_table->count; i++) {
1001 vol_table->entries[i].value = dep_table->entries[i].vddci;
1002 vol_table->entries[i].smio_low = 0;
1003 }
1004
1005 result = fiji_trim_voltage_table(hwmgr, vol_table);
1006 PP_ASSERT_WITH_CODE((0 == result),
1007 "Failed to trim VDDCI table.", return result);
1008
1009 return 0;
1010}
1011
1012static int fiji_get_svi2_vdd_voltage_table(struct pp_hwmgr *hwmgr,
1013 phm_ppt_v1_voltage_lookup_table *lookup_table)
1014{
1015 int i = 0;
1016 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
1017 struct pp_atomctrl_voltage_table *vol_table = &(data->vddc_voltage_table);
1018
1019 PP_ASSERT_WITH_CODE((0 != lookup_table->count),
1020 "Voltage Lookup Table empty.", return -EINVAL);
1021
1022 vol_table->mask_low = 0;
1023 vol_table->phase_delay = 0;
1024
1025 vol_table->count = lookup_table->count;
1026
1027 for (i = 0; i < vol_table->count; i++) {
1028 vol_table->entries[i].value = lookup_table->entries[i].us_vdd;
1029 vol_table->entries[i].smio_low = 0;
1030 }
1031
1032 return 0;
1033}
1034
1035/* ---- Voltage Tables ----
1036 * If the voltage table would be bigger than
1037 * what will fit into the state table on
1038 * the SMC keep only the higher entries.
1039 */
1040static void fiji_trim_voltage_table_to_fit_state_table(struct pp_hwmgr *hwmgr,
1041 uint32_t max_vol_steps, struct pp_atomctrl_voltage_table *vol_table)
1042{
1043 unsigned int i, diff;
1044
1045 if (vol_table->count <= max_vol_steps)
1046 return;
1047
1048 diff = vol_table->count - max_vol_steps;
1049
1050 for (i = 0; i < max_vol_steps; i++)
1051 vol_table->entries[i] = vol_table->entries[i + diff];
1052
1053 vol_table->count = max_vol_steps;
1054
1055 return;
1056}
1057
1058/**
1059* Create Voltage Tables.
1060*
1061* @param hwmgr the address of the powerplay hardware manager.
1062* @return always 0
1063*/
1064static int fiji_construct_voltage_tables(struct pp_hwmgr *hwmgr)
1065{
1066 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
1067 struct phm_ppt_v1_information *table_info =
1068 (struct phm_ppt_v1_information *)hwmgr->pptable;
1069 int result;
1070
1071 if (FIJI_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
1072 result = atomctrl_get_voltage_table_v3(hwmgr,
1073 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT,
1074 &(data->mvdd_voltage_table));
1075 PP_ASSERT_WITH_CODE((0 == result),
1076 "Failed to retrieve MVDD table.",
1077 return result);
1078 } else if (FIJI_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
1079 result = fiji_get_svi2_mvdd_voltage_table(hwmgr,
1080 table_info->vdd_dep_on_mclk);
1081 PP_ASSERT_WITH_CODE((0 == result),
1082 "Failed to retrieve SVI2 MVDD table from dependancy table.",
1083 return result;);
1084 }
1085
1086 if (FIJI_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
1087 result = atomctrl_get_voltage_table_v3(hwmgr,
1088 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT,
1089 &(data->vddci_voltage_table));
1090 PP_ASSERT_WITH_CODE((0 == result),
1091 "Failed to retrieve VDDCI table.",
1092 return result);
1093 } else if (FIJI_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
1094 result = fiji_get_svi2_vddci_voltage_table(hwmgr,
1095 table_info->vdd_dep_on_mclk);
1096 PP_ASSERT_WITH_CODE((0 == result),
1097 "Failed to retrieve SVI2 VDDCI table from dependancy table.",
1098 return result);
1099 }
1100
1101 if(FIJI_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
1102 result = fiji_get_svi2_vdd_voltage_table(hwmgr,
1103 table_info->vddc_lookup_table);
1104 PP_ASSERT_WITH_CODE((0 == result),
1105 "Failed to retrieve SVI2 VDDC table from lookup table.",
1106 return result);
1107 }
1108
1109 PP_ASSERT_WITH_CODE(
1110 (data->vddc_voltage_table.count <= (SMU73_MAX_LEVELS_VDDC)),
1111 "Too many voltage values for VDDC. Trimming to fit state table.",
1112 fiji_trim_voltage_table_to_fit_state_table(hwmgr,
1113 SMU73_MAX_LEVELS_VDDC, &(data->vddc_voltage_table)));
1114
1115 PP_ASSERT_WITH_CODE(
1116 (data->vddci_voltage_table.count <= (SMU73_MAX_LEVELS_VDDCI)),
1117 "Too many voltage values for VDDCI. Trimming to fit state table.",
1118 fiji_trim_voltage_table_to_fit_state_table(hwmgr,
1119 SMU73_MAX_LEVELS_VDDCI, &(data->vddci_voltage_table)));
1120
1121 PP_ASSERT_WITH_CODE(
1122 (data->mvdd_voltage_table.count <= (SMU73_MAX_LEVELS_MVDD)),
1123 "Too many voltage values for MVDD. Trimming to fit state table.",
1124 fiji_trim_voltage_table_to_fit_state_table(hwmgr,
1125 SMU73_MAX_LEVELS_MVDD, &(data->mvdd_voltage_table)));
1126
1127 return 0;
1128}
1129
1130static int fiji_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
1131{
1132 /* Program additional LP registers
1133 * that are no longer programmed by VBIOS
1134 */
1135 cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP,
1136 cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
1137 cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP,
1138 cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
1139 cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP,
1140 cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
1141 cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP,
1142 cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
1143 cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP,
1144 cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
1145 cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP,
1146 cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
1147 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP,
1148 cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
1149
1150 return 0;
1151}
1152
1153/**
1154* Programs static screed detection parameters
1155*
1156* @param hwmgr the address of the powerplay hardware manager.
1157* @return always 0
1158*/
1159static int fiji_program_static_screen_threshold_parameters(
1160 struct pp_hwmgr *hwmgr)
1161{
1162 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
1163
1164 /* Set static screen threshold unit */
1165 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1166 CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT,
1167 data->static_screen_threshold_unit);
1168 /* Set static screen threshold */
1169 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1170 CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD,
1171 data->static_screen_threshold);
1172
1173 return 0;
1174}
1175
1176/**
1177* Setup display gap for glitch free memory clock switching.
1178*
1179* @param hwmgr the address of the powerplay hardware manager.
1180* @return always 0
1181*/
1182static int fiji_enable_display_gap(struct pp_hwmgr *hwmgr)
1183{
1184 uint32_t displayGap =
1185 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1186 ixCG_DISPLAY_GAP_CNTL);
1187
1188 displayGap = PHM_SET_FIELD(displayGap, CG_DISPLAY_GAP_CNTL,
1189 DISP_GAP, DISPLAY_GAP_IGNORE);
1190
1191 displayGap = PHM_SET_FIELD(displayGap, CG_DISPLAY_GAP_CNTL,
1192 DISP_GAP_MCHG, DISPLAY_GAP_VBLANK);
1193
1194 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1195 ixCG_DISPLAY_GAP_CNTL, displayGap);
1196
1197 return 0;
1198}
1199
1200/**
1201* Programs activity state transition voting clients
1202*
1203* @param hwmgr the address of the powerplay hardware manager.
1204* @return always 0
1205*/
1206static int fiji_program_voting_clients(struct pp_hwmgr *hwmgr)
1207{
1208 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
1209
1210 /* Clear reset for voting clients before enabling DPM */
1211 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1212 SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0);
1213 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1214 SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0);
1215
1216 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1217 ixCG_FREQ_TRAN_VOTING_0, data->voting_rights_clients0);
1218 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1219 ixCG_FREQ_TRAN_VOTING_1, data->voting_rights_clients1);
1220 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1221 ixCG_FREQ_TRAN_VOTING_2, data->voting_rights_clients2);
1222 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1223 ixCG_FREQ_TRAN_VOTING_3, data->voting_rights_clients3);
1224 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1225 ixCG_FREQ_TRAN_VOTING_4, data->voting_rights_clients4);
1226 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1227 ixCG_FREQ_TRAN_VOTING_5, data->voting_rights_clients5);
1228 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1229 ixCG_FREQ_TRAN_VOTING_6, data->voting_rights_clients6);
1230 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1231 ixCG_FREQ_TRAN_VOTING_7, data->voting_rights_clients7);
1232
1233 return 0;
1234}
1235
1236static int fiji_clear_voting_clients(struct pp_hwmgr *hwmgr)
1237{
1238 /* Reset voting clients before disabling DPM */
1239 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1240 SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1);
1241 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1242 SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1);
1243
1244 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1245 ixCG_FREQ_TRAN_VOTING_0, 0);
1246 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1247 ixCG_FREQ_TRAN_VOTING_1, 0);
1248 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1249 ixCG_FREQ_TRAN_VOTING_2, 0);
1250 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1251 ixCG_FREQ_TRAN_VOTING_3, 0);
1252 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1253 ixCG_FREQ_TRAN_VOTING_4, 0);
1254 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1255 ixCG_FREQ_TRAN_VOTING_5, 0);
1256 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1257 ixCG_FREQ_TRAN_VOTING_6, 0);
1258 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1259 ixCG_FREQ_TRAN_VOTING_7, 0);
1260
1261 return 0;
1262}
1263
1264/**
1265* Get the location of various tables inside the FW image.
1266*
1267* @param hwmgr the address of the powerplay hardware manager.
1268* @return always 0
1269*/
1270static int fiji_process_firmware_header(struct pp_hwmgr *hwmgr)
1271{
1272 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
1273 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
1274 uint32_t tmp;
1275 int result;
1276 bool error = false;
1277
1278 result = fiji_read_smc_sram_dword(hwmgr->smumgr,
1279 SMU7_FIRMWARE_HEADER_LOCATION +
1280 offsetof(SMU73_Firmware_Header, DpmTable),
1281 &tmp, data->sram_end);
1282
1283 if (0 == result)
1284 data->dpm_table_start = tmp;
1285
1286 error |= (0 != result);
1287
1288 result = fiji_read_smc_sram_dword(hwmgr->smumgr,
1289 SMU7_FIRMWARE_HEADER_LOCATION +
1290 offsetof(SMU73_Firmware_Header, SoftRegisters),
1291 &tmp, data->sram_end);
1292
1293 if (!result) {
1294 data->soft_regs_start = tmp;
1295 smu_data->soft_regs_start = tmp;
1296 }
1297
1298 error |= (0 != result);
1299
1300 result = fiji_read_smc_sram_dword(hwmgr->smumgr,
1301 SMU7_FIRMWARE_HEADER_LOCATION +
1302 offsetof(SMU73_Firmware_Header, mcRegisterTable),
1303 &tmp, data->sram_end);
1304
1305 if (!result)
1306 data->mc_reg_table_start = tmp;
1307
1308 result = fiji_read_smc_sram_dword(hwmgr->smumgr,
1309 SMU7_FIRMWARE_HEADER_LOCATION +
1310 offsetof(SMU73_Firmware_Header, FanTable),
1311 &tmp, data->sram_end);
1312
1313 if (!result)
1314 data->fan_table_start = tmp;
1315
1316 error |= (0 != result);
1317
1318 result = fiji_read_smc_sram_dword(hwmgr->smumgr,
1319 SMU7_FIRMWARE_HEADER_LOCATION +
1320 offsetof(SMU73_Firmware_Header, mcArbDramTimingTable),
1321 &tmp, data->sram_end);
1322
1323 if (!result)
1324 data->arb_table_start = tmp;
1325
1326 error |= (0 != result);
1327
1328 result = fiji_read_smc_sram_dword(hwmgr->smumgr,
1329 SMU7_FIRMWARE_HEADER_LOCATION +
1330 offsetof(SMU73_Firmware_Header, Version),
1331 &tmp, data->sram_end);
1332
1333 if (!result)
1334 hwmgr->microcode_version_info.SMC = tmp;
1335
1336 error |= (0 != result);
1337
1338 return error ? -1 : 0;
1339}
1340
1341/* Copy one arb setting to another and then switch the active set.
1342 * arb_src and arb_dest is one of the MC_CG_ARB_FREQ_Fx constants.
1343 */
1344static int fiji_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
1345 uint32_t arb_src, uint32_t arb_dest)
1346{
1347 uint32_t mc_arb_dram_timing;
1348 uint32_t mc_arb_dram_timing2;
1349 uint32_t burst_time;
1350 uint32_t mc_cg_config;
1351
1352 switch (arb_src) {
1353 case MC_CG_ARB_FREQ_F0:
1354 mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
1355 mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
1356 burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
1357 break;
1358 case MC_CG_ARB_FREQ_F1:
1359 mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1);
1360 mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1);
1361 burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1);
1362 break;
1363 default:
1364 return -EINVAL;
1365 }
1366
1367 switch (arb_dest) {
1368 case MC_CG_ARB_FREQ_F0:
1369 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
1370 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
1371 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time);
1372 break;
1373 case MC_CG_ARB_FREQ_F1:
1374 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
1375 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
1376 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time);
1377 break;
1378 default:
1379 return -EINVAL;
1380 }
1381
1382 mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG);
1383 mc_cg_config |= 0x0000000F;
1384 cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config);
1385 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arb_dest);
1386
1387 return 0;
1388}
1389
1390/**
1391* Call SMC to reset S0/S1 to S1 and Reset SMIO to initial value
1392*
1393* @param hwmgr the address of the powerplay hardware manager.
1394* @return if success then 0;
1395*/
1396static int fiji_reset_to_default(struct pp_hwmgr *hwmgr)
1397{
1398 return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ResetToDefaults);
1399}
1400
1401/**
1402* Initial switch from ARB F0->F1
1403*
1404* @param hwmgr the address of the powerplay hardware manager.
1405* @return always 0
1406* This function is to be called from the SetPowerState table.
1407*/
1408static int fiji_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr)
1409{
1410 return fiji_copy_and_switch_arb_sets(hwmgr,
1411 MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
1412}
1413
1414static int fiji_force_switch_to_arbf0(struct pp_hwmgr *hwmgr)
1415{
1416 uint32_t tmp;
1417
1418 tmp = (cgs_read_ind_register(hwmgr->device,
1419 CGS_IND_REG__SMC, ixSMC_SCRATCH9) &
1420 0x0000ff00) >> 8;
1421
1422 if (tmp == MC_CG_ARB_FREQ_F0)
1423 return 0;
1424
1425 return fiji_copy_and_switch_arb_sets(hwmgr,
1426 tmp, MC_CG_ARB_FREQ_F0);
1427}
1428
1429static int fiji_reset_single_dpm_table(struct pp_hwmgr *hwmgr,
1430 struct fiji_single_dpm_table *dpm_table, uint32_t count)
1431{
1432 int i;
1433 PP_ASSERT_WITH_CODE(count <= MAX_REGULAR_DPM_NUMBER,
1434 "Fatal error, can not set up single DPM table entries "
1435 "to exceed max number!",);
1436
1437 dpm_table->count = count;
1438 for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++)
1439 dpm_table->dpm_levels[i].enabled = false;
1440
1441 return 0;
1442}
1443
1444static void fiji_setup_pcie_table_entry(
1445 struct fiji_single_dpm_table *dpm_table,
1446 uint32_t index, uint32_t pcie_gen,
1447 uint32_t pcie_lanes)
1448{
1449 dpm_table->dpm_levels[index].value = pcie_gen;
1450 dpm_table->dpm_levels[index].param1 = pcie_lanes;
1451 dpm_table->dpm_levels[index].enabled = true;
1452}
1453
1454static int fiji_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
1455{
1456 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
1457 struct phm_ppt_v1_information *table_info =
1458 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1459 struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
1460 uint32_t i, max_entry;
1461
1462 PP_ASSERT_WITH_CODE((data->use_pcie_performance_levels ||
1463 data->use_pcie_power_saving_levels), "No pcie performance levels!",
1464 return -EINVAL);
1465
1466 if (data->use_pcie_performance_levels &&
1467 !data->use_pcie_power_saving_levels) {
1468 data->pcie_gen_power_saving = data->pcie_gen_performance;
1469 data->pcie_lane_power_saving = data->pcie_lane_performance;
1470 } else if (!data->use_pcie_performance_levels &&
1471 data->use_pcie_power_saving_levels) {
1472 data->pcie_gen_performance = data->pcie_gen_power_saving;
1473 data->pcie_lane_performance = data->pcie_lane_power_saving;
1474 }
1475
1476 fiji_reset_single_dpm_table(hwmgr,
1477 &data->dpm_table.pcie_speed_table, SMU73_MAX_LEVELS_LINK);
1478
1479 if (pcie_table != NULL) {
1480 /* max_entry is used to make sure we reserve one PCIE level
1481 * for boot level (fix for A+A PSPP issue).
1482 * If PCIE table from PPTable have ULV entry + 8 entries,
1483 * then ignore the last entry.*/
1484 max_entry = (SMU73_MAX_LEVELS_LINK < pcie_table->count) ?
1485 SMU73_MAX_LEVELS_LINK : pcie_table->count;
1486 for (i = 1; i < max_entry; i++) {
1487 fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i - 1,
1488 get_pcie_gen_support(data->pcie_gen_cap,
1489 pcie_table->entries[i].gen_speed),
1490 get_pcie_lane_support(data->pcie_lane_cap,
1491 pcie_table->entries[i].lane_width));
1492 }
1493 data->dpm_table.pcie_speed_table.count = max_entry - 1;
1494 } else {
1495 /* Hardcode Pcie Table */
1496 fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0,
1497 get_pcie_gen_support(data->pcie_gen_cap,
1498 PP_Min_PCIEGen),
1499 get_pcie_lane_support(data->pcie_lane_cap,
1500 PP_Max_PCIELane));
1501 fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1,
1502 get_pcie_gen_support(data->pcie_gen_cap,
1503 PP_Min_PCIEGen),
1504 get_pcie_lane_support(data->pcie_lane_cap,
1505 PP_Max_PCIELane));
1506 fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2,
1507 get_pcie_gen_support(data->pcie_gen_cap,
1508 PP_Max_PCIEGen),
1509 get_pcie_lane_support(data->pcie_lane_cap,
1510 PP_Max_PCIELane));
1511 fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3,
1512 get_pcie_gen_support(data->pcie_gen_cap,
1513 PP_Max_PCIEGen),
1514 get_pcie_lane_support(data->pcie_lane_cap,
1515 PP_Max_PCIELane));
1516 fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4,
1517 get_pcie_gen_support(data->pcie_gen_cap,
1518 PP_Max_PCIEGen),
1519 get_pcie_lane_support(data->pcie_lane_cap,
1520 PP_Max_PCIELane));
1521 fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5,
1522 get_pcie_gen_support(data->pcie_gen_cap,
1523 PP_Max_PCIEGen),
1524 get_pcie_lane_support(data->pcie_lane_cap,
1525 PP_Max_PCIELane));
1526
1527 data->dpm_table.pcie_speed_table.count = 6;
1528 }
1529 /* Populate last level for boot PCIE level, but do not increment count. */
1530 fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table,
1531 data->dpm_table.pcie_speed_table.count,
1532 get_pcie_gen_support(data->pcie_gen_cap,
1533 PP_Min_PCIEGen),
1534 get_pcie_lane_support(data->pcie_lane_cap,
1535 PP_Max_PCIELane));
1536
1537 return 0;
1538}
1539
1540/*
1541 * This function is to initalize all DPM state tables
1542 * for SMU7 based on the dependency table.
1543 * Dynamic state patching function will then trim these
1544 * state tables to the allowed range based
1545 * on the power policy or external client requests,
1546 * such as UVD request, etc.
1547 */
1548static int fiji_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
1549{
1550 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
1551 struct phm_ppt_v1_information *table_info =
1552 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1553 uint32_t i;
1554
1555 struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table =
1556 table_info->vdd_dep_on_sclk;
1557 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
1558 table_info->vdd_dep_on_mclk;
1559
1560 PP_ASSERT_WITH_CODE(dep_sclk_table != NULL,
1561 "SCLK dependency table is missing. This table is mandatory",
1562 return -EINVAL);
1563 PP_ASSERT_WITH_CODE(dep_sclk_table->count >= 1,
1564 "SCLK dependency table has to have is missing. "
1565 "This table is mandatory",
1566 return -EINVAL);
1567
1568 PP_ASSERT_WITH_CODE(dep_mclk_table != NULL,
1569 "MCLK dependency table is missing. This table is mandatory",
1570 return -EINVAL);
1571 PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1,
1572 "MCLK dependency table has to have is missing. "
1573 "This table is mandatory",
1574 return -EINVAL);
1575
1576 /* clear the state table to reset everything to default */
1577 fiji_reset_single_dpm_table(hwmgr,
1578 &data->dpm_table.sclk_table, SMU73_MAX_LEVELS_GRAPHICS);
1579 fiji_reset_single_dpm_table(hwmgr,
1580 &data->dpm_table.mclk_table, SMU73_MAX_LEVELS_MEMORY);
1581
1582 /* Initialize Sclk DPM table based on allow Sclk values */
1583 data->dpm_table.sclk_table.count = 0;
1584 for (i = 0; i < dep_sclk_table->count; i++) {
1585 if (i == 0 || data->dpm_table.sclk_table.dpm_levels
1586 [data->dpm_table.sclk_table.count - 1].value !=
1587 dep_sclk_table->entries[i].clk) {
1588 data->dpm_table.sclk_table.dpm_levels
1589 [data->dpm_table.sclk_table.count].value =
1590 dep_sclk_table->entries[i].clk;
1591 data->dpm_table.sclk_table.dpm_levels
1592 [data->dpm_table.sclk_table.count].enabled =
1593 (i == 0) ? true : false;
1594 data->dpm_table.sclk_table.count++;
1595 }
1596 }
1597
1598 /* Initialize Mclk DPM table based on allow Mclk values */
1599 data->dpm_table.mclk_table.count = 0;
1600 for (i=0; i<dep_mclk_table->count; i++) {
1601 if ( i==0 || data->dpm_table.mclk_table.dpm_levels
1602 [data->dpm_table.mclk_table.count - 1].value !=
1603 dep_mclk_table->entries[i].clk) {
1604 data->dpm_table.mclk_table.dpm_levels
1605 [data->dpm_table.mclk_table.count].value =
1606 dep_mclk_table->entries[i].clk;
1607 data->dpm_table.mclk_table.dpm_levels
1608 [data->dpm_table.mclk_table.count].enabled =
1609 (i == 0) ? true : false;
1610 data->dpm_table.mclk_table.count++;
1611 }
1612 }
1613
1614 /* setup PCIE gen speed levels */
1615 fiji_setup_default_pcie_table(hwmgr);
1616
1617 /* save a copy of the default DPM table */
1618 memcpy(&(data->golden_dpm_table), &(data->dpm_table),
1619 sizeof(struct fiji_dpm_table));
1620
1621 return 0;
1622}
1623
1624/**
1625 * @brief PhwFiji_GetVoltageOrder
1626 * Returns index of requested voltage record in lookup(table)
1627 * @param lookup_table - lookup list to search in
1628 * @param voltage - voltage to look for
1629 * @return 0 on success
1630 */
1631static uint8_t fiji_get_voltage_index(
1632 struct phm_ppt_v1_voltage_lookup_table *lookup_table, uint16_t voltage)
1633{
1634 uint8_t count = (uint8_t) (lookup_table->count);
1635 uint8_t i;
1636
1637 PP_ASSERT_WITH_CODE((NULL != lookup_table),
1638 "Lookup Table empty.", return 0);
1639 PP_ASSERT_WITH_CODE((0 != count),
1640 "Lookup Table empty.", return 0);
1641
1642 for (i = 0; i < lookup_table->count; i++) {
1643 /* find first voltage equal or bigger than requested */
1644 if (lookup_table->entries[i].us_vdd >= voltage)
1645 return i;
1646 }
1647 /* voltage is bigger than max voltage in the table */
1648 return i - 1;
1649}
1650
1651/**
1652* Preparation of vddc and vddgfx CAC tables for SMC.
1653*
1654* @param hwmgr the address of the hardware manager
1655* @param table the SMC DPM table structure to be populated
1656* @return always 0
1657*/
1658static int fiji_populate_cac_table(struct pp_hwmgr *hwmgr,
1659 struct SMU73_Discrete_DpmTable *table)
1660{
1661 uint32_t count;
1662 uint8_t index;
1663 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
1664 struct phm_ppt_v1_information *table_info =
1665 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1666 struct phm_ppt_v1_voltage_lookup_table *lookup_table =
1667 table_info->vddc_lookup_table;
1668 /* tables is already swapped, so in order to use the value from it,
1669 * we need to swap it back.
1670 * We are populating vddc CAC data to BapmVddc table
1671 * in split and merged mode
1672 */
1673 for( count = 0; count<lookup_table->count; count++) {
1674 index = fiji_get_voltage_index(lookup_table,
1675 data->vddc_voltage_table.entries[count].value);
1676 table->BapmVddcVidLoSidd[count] = (uint8_t) ((6200 -
1677 (lookup_table->entries[index].us_cac_low *
1678 VOLTAGE_SCALE)) / 25);
1679 table->BapmVddcVidHiSidd[count] = (uint8_t) ((6200 -
1680 (lookup_table->entries[index].us_cac_high *
1681 VOLTAGE_SCALE)) / 25);
1682 }
1683
1684 return 0;
1685}
1686
1687/**
1688* Preparation of voltage tables for SMC.
1689*
1690* @param hwmgr the address of the hardware manager
1691* @param table the SMC DPM table structure to be populated
1692* @return always 0
1693*/
1694
1695static int fiji_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
1696 struct SMU73_Discrete_DpmTable *table)
1697{
1698 int result;
1699
1700 result = fiji_populate_cac_table(hwmgr, table);
1701 PP_ASSERT_WITH_CODE(0 == result,
1702 "can not populate CAC voltage tables to SMC",
1703 return -EINVAL);
1704
1705 return 0;
1706}
1707
1708static int fiji_populate_ulv_level(struct pp_hwmgr *hwmgr,
1709 struct SMU73_Discrete_Ulv *state)
1710{
1711 int result = 0;
1712 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
1713 struct phm_ppt_v1_information *table_info =
1714 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1715
1716 state->CcPwrDynRm = 0;
1717 state->CcPwrDynRm1 = 0;
1718
1719 state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
1720 state->VddcOffsetVid = (uint8_t)( table_info->us_ulv_voltage_offset *
1721 VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1 );
1722
1723 state->VddcPhase = (data->vddc_phase_shed_control) ? 0 : 1;
1724
1725 if (!result) {
1726 CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
1727 CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
1728 CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
1729 }
1730 return result;
1731}
1732
1733static int fiji_populate_ulv_state(struct pp_hwmgr *hwmgr,
1734 struct SMU73_Discrete_DpmTable *table)
1735{
1736 return fiji_populate_ulv_level(hwmgr, &table->Ulv);
1737}
1738
1739static int32_t fiji_get_dpm_level_enable_mask_value(
1740 struct fiji_single_dpm_table* dpm_table)
1741{
1742 int32_t i;
1743 int32_t mask = 0;
1744
1745 for (i = dpm_table->count; i > 0; i--) {
1746 mask = mask << 1;
1747 if (dpm_table->dpm_levels[i - 1].enabled)
1748 mask |= 0x1;
1749 else
1750 mask &= 0xFFFFFFFE;
1751 }
1752 return mask;
1753}
1754
1755static int fiji_populate_smc_link_level(struct pp_hwmgr *hwmgr,
1756 struct SMU73_Discrete_DpmTable *table)
1757{
1758 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
1759 struct fiji_dpm_table *dpm_table = &data->dpm_table;
1760 int i;
1761
1762 /* Index (dpm_table->pcie_speed_table.count)
1763 * is reserved for PCIE boot level. */
1764 for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
1765 table->LinkLevel[i].PcieGenSpeed =
1766 (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
1767 table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width(
1768 dpm_table->pcie_speed_table.dpm_levels[i].param1);
1769 table->LinkLevel[i].EnabledForActivity = 1;
1770 table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff);
1771 table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5);
1772 table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30);
1773 }
1774
1775 data->smc_state_table.LinkLevelCount =
1776 (uint8_t)dpm_table->pcie_speed_table.count;
1777 data->dpm_level_enable_mask.pcie_dpm_enable_mask =
1778 fiji_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
1779
1780 return 0;
1781}
1782
1783/**
1784* Calculates the SCLK dividers using the provided engine clock
1785*
1786* @param hwmgr the address of the hardware manager
1787* @param clock the engine clock to use to populate the structure
1788* @param sclk the SMC SCLK structure to be populated
1789*/
1790static int fiji_calculate_sclk_params(struct pp_hwmgr *hwmgr,
1791 uint32_t clock, struct SMU73_Discrete_GraphicsLevel *sclk)
1792{
1793 const struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
1794 struct pp_atomctrl_clock_dividers_vi dividers;
1795 uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
1796 uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
1797 uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
1798 uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
1799 uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
1800 uint32_t ref_clock;
1801 uint32_t ref_divider;
1802 uint32_t fbdiv;
1803 int result;
1804
1805 /* get the engine clock dividers for this clock value */
1806 result = atomctrl_get_engine_pll_dividers_vi(hwmgr, clock, &dividers);
1807
1808 PP_ASSERT_WITH_CODE(result == 0,
1809 "Error retrieving Engine Clock dividers from VBIOS.",
1810 return result);
1811
1812 /* To get FBDIV we need to multiply this by 16384 and divide it by Fref. */
1813 ref_clock = atomctrl_get_reference_clock(hwmgr);
1814 ref_divider = 1 + dividers.uc_pll_ref_div;
1815
1816 /* low 14 bits is fraction and high 12 bits is divider */
1817 fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
1818
1819 /* SPLL_FUNC_CNTL setup */
1820 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
1821 SPLL_REF_DIV, dividers.uc_pll_ref_div);
1822 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
1823 SPLL_PDIV_A, dividers.uc_pll_post_div);
1824
1825 /* SPLL_FUNC_CNTL_3 setup*/
1826 spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
1827 SPLL_FB_DIV, fbdiv);
1828
1829 /* set to use fractional accumulation*/
1830 spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
1831 SPLL_DITHEN, 1);
1832
1833 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1834 PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
1835 struct pp_atomctrl_internal_ss_info ssInfo;
1836
1837 uint32_t vco_freq = clock * dividers.uc_pll_post_div;
1838 if (!atomctrl_get_engine_clock_spread_spectrum(hwmgr,
1839 vco_freq, &ssInfo)) {
1840 /*
1841 * ss_info.speed_spectrum_percentage -- in unit of 0.01%
1842 * ss_info.speed_spectrum_rate -- in unit of khz
1843 *
1844 * clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2
1845 */
1846 uint32_t clk_s = ref_clock * 5 /
1847 (ref_divider * ssInfo.speed_spectrum_rate);
1848 /* clkv = 2 * D * fbdiv / NS */
1849 uint32_t clk_v = 4 * ssInfo.speed_spectrum_percentage *
1850 fbdiv / (clk_s * 10000);
1851
1852 cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
1853 CG_SPLL_SPREAD_SPECTRUM, CLKS, clk_s);
1854 cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
1855 CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
1856 cg_spll_spread_spectrum_2 = PHM_SET_FIELD(cg_spll_spread_spectrum_2,
1857 CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clk_v);
1858 }
1859 }
1860
1861 sclk->SclkFrequency = clock;
1862 sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
1863 sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
1864 sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
1865 sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
1866 sclk->SclkDid = (uint8_t)dividers.pll_post_divider;
1867
1868 return 0;
1869}
1870
1871static uint16_t fiji_find_closest_vddci(struct pp_hwmgr *hwmgr, uint16_t vddci)
1872{
1873 uint32_t i;
1874 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
1875 struct pp_atomctrl_voltage_table *vddci_table =
1876 &(data->vddci_voltage_table);
1877
1878 for (i = 0; i < vddci_table->count; i++) {
1879 if (vddci_table->entries[i].value >= vddci)
1880 return vddci_table->entries[i].value;
1881 }
1882
1883 PP_ASSERT_WITH_CODE(false,
1884 "VDDCI is larger than max VDDCI in VDDCI Voltage Table!",
1885 return vddci_table->entries[i-1].value);
1886}
1887
1888static int fiji_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
1889 struct phm_ppt_v1_clock_voltage_dependency_table* dep_table,
1890 uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd)
1891{
1892 uint32_t i;
1893 uint16_t vddci;
1894 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
1895
1896 *voltage = *mvdd = 0;
1897
1898 /* clock - voltage dependency table is empty table */
1899 if (dep_table->count == 0)
1900 return -EINVAL;
1901
1902 for (i = 0; i < dep_table->count; i++) {
1903 /* find first sclk bigger than request */
1904 if (dep_table->entries[i].clk >= clock) {
1905 *voltage |= (dep_table->entries[i].vddc *
1906 VOLTAGE_SCALE) << VDDC_SHIFT;
1907 if (FIJI_VOLTAGE_CONTROL_NONE == data->vddci_control)
1908 *voltage |= (data->vbios_boot_state.vddci_bootup_value *
1909 VOLTAGE_SCALE) << VDDCI_SHIFT;
1910 else if (dep_table->entries[i].vddci)
1911 *voltage |= (dep_table->entries[i].vddci *
1912 VOLTAGE_SCALE) << VDDCI_SHIFT;
1913 else {
1914 vddci = fiji_find_closest_vddci(hwmgr,
1915 (dep_table->entries[i].vddc -
1916 (uint16_t)data->vddc_vddci_delta));
1917 *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
1918 }
1919
1920 if (FIJI_VOLTAGE_CONTROL_NONE == data->mvdd_control)
1921 *mvdd = data->vbios_boot_state.mvdd_bootup_value *
1922 VOLTAGE_SCALE;
1923 else if (dep_table->entries[i].mvdd)
1924 *mvdd = (uint32_t) dep_table->entries[i].mvdd *
1925 VOLTAGE_SCALE;
1926
1927 *voltage |= 1 << PHASES_SHIFT;
1928 return 0;
1929 }
1930 }
1931
1932 /* sclk is bigger than max sclk in the dependence table */
1933 *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
1934
1935 if (FIJI_VOLTAGE_CONTROL_NONE == data->vddci_control)
1936 *voltage |= (data->vbios_boot_state.vddci_bootup_value *
1937 VOLTAGE_SCALE) << VDDCI_SHIFT;
1938 else if (dep_table->entries[i-1].vddci) {
1939 vddci = fiji_find_closest_vddci(hwmgr,
1940 (dep_table->entries[i].vddc -
1941 (uint16_t)data->vddc_vddci_delta));
1942 *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
1943 }
1944
1945 if (FIJI_VOLTAGE_CONTROL_NONE == data->mvdd_control)
1946 *mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE;
1947 else if (dep_table->entries[i].mvdd)
1948 *mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE;
1949
1950 return 0;
1951}
1952
1953static uint8_t fiji_get_sleep_divider_id_from_clock(uint32_t clock,
1954 uint32_t clock_insr)
1955{
1956 uint8_t i;
1957 uint32_t temp;
1958 uint32_t min = max(clock_insr, (uint32_t)FIJI_MINIMUM_ENGINE_CLOCK);
1959
1960 PP_ASSERT_WITH_CODE((clock >= min), "Engine clock can't satisfy stutter requirement!", return 0);
1961 for (i = FIJI_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) {
1962 temp = clock >> i;
1963
1964 if (temp >= min || i == 0)
1965 break;
1966 }
1967 return i;
1968}
1969/**
1970* Populates single SMC SCLK structure using the provided engine clock
1971*
1972* @param hwmgr the address of the hardware manager
1973* @param clock the engine clock to use to populate the structure
1974* @param sclk the SMC SCLK structure to be populated
1975*/
1976
1977static int fiji_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
1978 uint32_t clock, uint16_t sclk_al_threshold,
1979 struct SMU73_Discrete_GraphicsLevel *level)
1980{
1981 int result;
1982 /* PP_Clocks minClocks; */
1983 uint32_t threshold, mvdd;
1984 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
1985 struct phm_ppt_v1_information *table_info =
1986 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1987
1988 result = fiji_calculate_sclk_params(hwmgr, clock, level);
1989
1990 /* populate graphics levels */
1991 result = fiji_get_dependency_volt_by_clk(hwmgr,
1992 table_info->vdd_dep_on_sclk, clock,
1993 &level->MinVoltage, &mvdd);
1994 PP_ASSERT_WITH_CODE((0 == result),
1995 "can not find VDDC voltage value for "
1996 "VDDC engine clock dependency table",
1997 return result);
1998
1999 level->SclkFrequency = clock;
2000 level->ActivityLevel = sclk_al_threshold;
2001 level->CcPwrDynRm = 0;
2002 level->CcPwrDynRm1 = 0;
2003 level->EnabledForActivity = 0;
2004 level->EnabledForThrottle = 1;
2005 level->UpHyst = 10;
2006 level->DownHyst = 0;
2007 level->VoltageDownHyst = 0;
2008 level->PowerThrottle = 0;
2009
2010 threshold = clock * data->fast_watermark_threshold / 100;
2011
2012
2013 data->display_timing.min_clock_in_sr = hwmgr->display_config.min_core_set_clock_in_sr;
2014
2015 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
2016 level->DeepSleepDivId = fiji_get_sleep_divider_id_from_clock(clock,
2017 hwmgr->display_config.min_core_set_clock_in_sr);
2018
2019
2020 /* Default to slow, highest DPM level will be
2021 * set to PPSMC_DISPLAY_WATERMARK_LOW later.
2022 */
2023 level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2024
2025 CONVERT_FROM_HOST_TO_SMC_UL(level->MinVoltage);
2026 CONVERT_FROM_HOST_TO_SMC_UL(level->SclkFrequency);
2027 CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
2028 CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl3);
2029 CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl4);
2030 CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum);
2031 CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum2);
2032 CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
2033 CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
2034
2035 return 0;
2036}
2037/**
2038* Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
2039*
2040* @param hwmgr the address of the hardware manager
2041*/
2042static int fiji_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
2043{
2044 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
2045 struct fiji_dpm_table *dpm_table = &data->dpm_table;
2046 struct phm_ppt_v1_information *table_info =
2047 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2048 struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
2049 uint8_t pcie_entry_cnt = (uint8_t) data->dpm_table.pcie_speed_table.count;
2050 int result = 0;
2051 uint32_t array = data->dpm_table_start +
2052 offsetof(SMU73_Discrete_DpmTable, GraphicsLevel);
2053 uint32_t array_size = sizeof(struct SMU73_Discrete_GraphicsLevel) *
2054 SMU73_MAX_LEVELS_GRAPHICS;
2055 struct SMU73_Discrete_GraphicsLevel *levels =
2056 data->smc_state_table.GraphicsLevel;
2057 uint32_t i, max_entry;
2058 uint8_t hightest_pcie_level_enabled = 0,
2059 lowest_pcie_level_enabled = 0,
2060 mid_pcie_level_enabled = 0,
2061 count = 0;
2062
2063 for (i = 0; i < dpm_table->sclk_table.count; i++) {
2064 result = fiji_populate_single_graphic_level(hwmgr,
2065 dpm_table->sclk_table.dpm_levels[i].value,
2066 (uint16_t)data->activity_target[i],
2067 &levels[i]);
2068 if (result)
2069 return result;
2070
2071 /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
2072 if (i > 1)
2073 levels[i].DeepSleepDivId = 0;
2074 }
2075
2076 /* Only enable level 0 for now.*/
2077 levels[0].EnabledForActivity = 1;
2078
2079 /* set highest level watermark to high */
2080 levels[dpm_table->sclk_table.count - 1].DisplayWatermark =
2081 PPSMC_DISPLAY_WATERMARK_HIGH;
2082
2083 data->smc_state_table.GraphicsDpmLevelCount =
2084 (uint8_t)dpm_table->sclk_table.count;
2085 data->dpm_level_enable_mask.sclk_dpm_enable_mask =
2086 fiji_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
2087
2088 if (pcie_table != NULL) {
2089 PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt),
2090 "There must be 1 or more PCIE levels defined in PPTable.",
2091 return -EINVAL);
2092 max_entry = pcie_entry_cnt - 1;
2093 for (i = 0; i < dpm_table->sclk_table.count; i++)
2094 levels[i].pcieDpmLevel =
2095 (uint8_t) ((i < max_entry)? i : max_entry);
2096 } else {
2097 while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
2098 ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
2099 (1 << (hightest_pcie_level_enabled + 1))) != 0 ))
2100 hightest_pcie_level_enabled++;
2101
2102 while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
2103 ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
2104 (1 << lowest_pcie_level_enabled)) == 0 ))
2105 lowest_pcie_level_enabled++;
2106
2107 while ((count < hightest_pcie_level_enabled) &&
2108 ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
2109 (1 << (lowest_pcie_level_enabled + 1 + count))) == 0 ))
2110 count++;
2111
2112 mid_pcie_level_enabled = (lowest_pcie_level_enabled + 1+ count) <
2113 hightest_pcie_level_enabled?
2114 (lowest_pcie_level_enabled + 1 + count) :
2115 hightest_pcie_level_enabled;
2116
2117 /* set pcieDpmLevel to hightest_pcie_level_enabled */
2118 for(i = 2; i < dpm_table->sclk_table.count; i++)
2119 levels[i].pcieDpmLevel = hightest_pcie_level_enabled;
2120
2121 /* set pcieDpmLevel to lowest_pcie_level_enabled */
2122 levels[0].pcieDpmLevel = lowest_pcie_level_enabled;
2123
2124 /* set pcieDpmLevel to mid_pcie_level_enabled */
2125 levels[1].pcieDpmLevel = mid_pcie_level_enabled;
2126 }
2127 /* level count will send to smc once at init smc table and never change */
2128 result = fiji_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
2129 (uint32_t)array_size, data->sram_end);
2130
2131 return result;
2132}
2133
2134/**
2135 * MCLK Frequency Ratio
2136 * SEQ_CG_RESP Bit[31:24] - 0x0
2137 * Bit[27:24] \96 DDR3 Frequency ratio
2138 * 0x0 <= 100MHz, 450 < 0x8 <= 500MHz
2139 * 100 < 0x1 <= 150MHz, 500 < 0x9 <= 550MHz
2140 * 150 < 0x2 <= 200MHz, 550 < 0xA <= 600MHz
2141 * 200 < 0x3 <= 250MHz, 600 < 0xB <= 650MHz
2142 * 250 < 0x4 <= 300MHz, 650 < 0xC <= 700MHz
2143 * 300 < 0x5 <= 350MHz, 700 < 0xD <= 750MHz
2144 * 350 < 0x6 <= 400MHz, 750 < 0xE <= 800MHz
2145 * 400 < 0x7 <= 450MHz, 800 < 0xF
2146 */
2147static uint8_t fiji_get_mclk_frequency_ratio(uint32_t mem_clock)
2148{
2149 if (mem_clock <= 10000) return 0x0;
2150 if (mem_clock <= 15000) return 0x1;
2151 if (mem_clock <= 20000) return 0x2;
2152 if (mem_clock <= 25000) return 0x3;
2153 if (mem_clock <= 30000) return 0x4;
2154 if (mem_clock <= 35000) return 0x5;
2155 if (mem_clock <= 40000) return 0x6;
2156 if (mem_clock <= 45000) return 0x7;
2157 if (mem_clock <= 50000) return 0x8;
2158 if (mem_clock <= 55000) return 0x9;
2159 if (mem_clock <= 60000) return 0xa;
2160 if (mem_clock <= 65000) return 0xb;
2161 if (mem_clock <= 70000) return 0xc;
2162 if (mem_clock <= 75000) return 0xd;
2163 if (mem_clock <= 80000) return 0xe;
2164 /* mem_clock > 800MHz */
2165 return 0xf;
2166}
2167
2168/**
2169* Populates the SMC MCLK structure using the provided memory clock
2170*
2171* @param hwmgr the address of the hardware manager
2172* @param clock the memory clock to use to populate the structure
2173* @param sclk the SMC SCLK structure to be populated
2174*/
2175static int fiji_calculate_mclk_params(struct pp_hwmgr *hwmgr,
2176 uint32_t clock, struct SMU73_Discrete_MemoryLevel *mclk)
2177{
2178 struct pp_atomctrl_memory_clock_param mem_param;
2179 int result;
2180
2181 result = atomctrl_get_memory_pll_dividers_vi(hwmgr, clock, &mem_param);
2182 PP_ASSERT_WITH_CODE((0 == result),
2183 "Failed to get Memory PLL Dividers.",);
2184
2185 /* Save the result data to outpupt memory level structure */
2186 mclk->MclkFrequency = clock;
2187 mclk->MclkDivider = (uint8_t)mem_param.mpll_post_divider;
2188 mclk->FreqRange = fiji_get_mclk_frequency_ratio(clock);
2189
2190 return result;
2191}
2192
2193static int fiji_populate_single_memory_level(struct pp_hwmgr *hwmgr,
2194 uint32_t clock, struct SMU73_Discrete_MemoryLevel *mem_level)
2195{
2196 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
2197 struct phm_ppt_v1_information *table_info =
2198 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2199 int result = 0;
2200
2201 if (table_info->vdd_dep_on_mclk) {
2202 result = fiji_get_dependency_volt_by_clk(hwmgr,
2203 table_info->vdd_dep_on_mclk, clock,
2204 &mem_level->MinVoltage, &mem_level->MinMvdd);
2205 PP_ASSERT_WITH_CODE((0 == result),
2206 "can not find MinVddc voltage value from memory "
2207 "VDDC voltage dependency table", return result);
2208 }
2209
2210 mem_level->EnabledForThrottle = 1;
2211 mem_level->EnabledForActivity = 0;
2212 mem_level->UpHyst = 0;
2213 mem_level->DownHyst = 100;
2214 mem_level->VoltageDownHyst = 0;
2215 mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
2216 mem_level->StutterEnable = false;
2217
2218 mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2219
2220 /* enable stutter mode if all the follow condition applied
2221 * PECI_GetNumberOfActiveDisplays(hwmgr->pPECI,
2222 * &(data->DisplayTiming.numExistingDisplays));
2223 */
2224 data->display_timing.num_existing_displays = 1;
2225
2226 if ((data->mclk_stutter_mode_threshold) &&
2227 (clock <= data->mclk_stutter_mode_threshold) &&
2228 (!data->is_uvd_enabled) &&
2229 (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL,
2230 STUTTER_ENABLE) & 0x1))
2231 mem_level->StutterEnable = true;
2232
2233 result = fiji_calculate_mclk_params(hwmgr, clock, mem_level);
2234 if (!result) {
2235 CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinMvdd);
2236 CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MclkFrequency);
2237 CONVERT_FROM_HOST_TO_SMC_US(mem_level->ActivityLevel);
2238 CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinVoltage);
2239 }
2240 return result;
2241}
2242
2243/**
2244* Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states
2245*
2246* @param hwmgr the address of the hardware manager
2247*/
2248static int fiji_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
2249{
2250 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
2251 struct fiji_dpm_table *dpm_table = &data->dpm_table;
2252 int result;
2253 /* populate MCLK dpm table to SMU7 */
2254 uint32_t array = data->dpm_table_start +
2255 offsetof(SMU73_Discrete_DpmTable, MemoryLevel);
2256 uint32_t array_size = sizeof(SMU73_Discrete_MemoryLevel) *
2257 SMU73_MAX_LEVELS_MEMORY;
2258 struct SMU73_Discrete_MemoryLevel *levels =
2259 data->smc_state_table.MemoryLevel;
2260 uint32_t i;
2261
2262 for (i = 0; i < dpm_table->mclk_table.count; i++) {
2263 PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
2264 "can not populate memory level as memory clock is zero",
2265 return -EINVAL);
2266 result = fiji_populate_single_memory_level(hwmgr,
2267 dpm_table->mclk_table.dpm_levels[i].value,
2268 &levels[i]);
2269 if (result)
2270 return result;
2271 }
2272
2273 /* Only enable level 0 for now. */
2274 levels[0].EnabledForActivity = 1;
2275
2276 /* in order to prevent MC activity from stutter mode to push DPM up.
2277 * the UVD change complements this by putting the MCLK in
2278 * a higher state by default such that we are not effected by
2279 * up threshold or and MCLK DPM latency.
2280 */
2281 levels[0].ActivityLevel = (uint16_t)data->mclk_dpm0_activity_target;
2282 CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel);
2283
2284 data->smc_state_table.MemoryDpmLevelCount =
2285 (uint8_t)dpm_table->mclk_table.count;
2286 data->dpm_level_enable_mask.mclk_dpm_enable_mask =
2287 fiji_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
2288 /* set highest level watermark to high */
2289 levels[dpm_table->mclk_table.count - 1].DisplayWatermark =
2290 PPSMC_DISPLAY_WATERMARK_HIGH;
2291
2292 /* level count will send to smc once at init smc table and never change */
2293 result = fiji_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
2294 (uint32_t)array_size, data->sram_end);
2295
2296 return result;
2297}
2298
2299/**
2300* Populates the SMC MVDD structure using the provided memory clock.
2301*
2302* @param hwmgr the address of the hardware manager
2303* @param mclk the MCLK value to be used in the decision if MVDD should be high or low.
2304* @param voltage the SMC VOLTAGE structure to be populated
2305*/
2306static int fiji_populate_mvdd_value(struct pp_hwmgr *hwmgr,
2307 uint32_t mclk, SMIO_Pattern *smio_pat)
2308{
2309 const struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
2310 struct phm_ppt_v1_information *table_info =
2311 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2312 uint32_t i = 0;
2313
2314 if (FIJI_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
2315 /* find mvdd value which clock is more than request */
2316 for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
2317 if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
2318 smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value;
2319 break;
2320 }
2321 }
2322 PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
2323 "MVDD Voltage is outside the supported range.",
2324 return -EINVAL);
2325 } else
2326 return -EINVAL;
2327
2328 return 0;
2329}
2330
2331static int fiji_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
2332 SMU73_Discrete_DpmTable *table)
2333{
2334 int result = 0;
2335 const struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
2336 struct phm_ppt_v1_information *table_info =
2337 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2338 struct pp_atomctrl_clock_dividers_vi dividers;
2339 SMIO_Pattern vol_level;
2340 uint32_t mvdd;
2341 uint16_t us_mvdd;
2342 uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
2343 uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
2344
2345 table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
2346
2347 if (!data->sclk_dpm_key_disabled) {
2348 /* Get MinVoltage and Frequency from DPM0,
2349 * already converted to SMC_UL */
2350 table->ACPILevel.SclkFrequency =
2351 data->dpm_table.sclk_table.dpm_levels[0].value;
2352 result = fiji_get_dependency_volt_by_clk(hwmgr,
2353 table_info->vdd_dep_on_sclk,
2354 table->ACPILevel.SclkFrequency,
2355 &table->ACPILevel.MinVoltage, &mvdd);
2356 PP_ASSERT_WITH_CODE((0 == result),
2357 "Cannot find ACPI VDDC voltage value "
2358 "in Clock Dependency Table",);
2359 } else {
2360 table->ACPILevel.SclkFrequency =
2361 data->vbios_boot_state.sclk_bootup_value;
2362 table->ACPILevel.MinVoltage =
2363 data->vbios_boot_state.vddc_bootup_value * VOLTAGE_SCALE;
2364 }
2365
2366 /* get the engine clock dividers for this clock value */
2367 result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
2368 table->ACPILevel.SclkFrequency, &dividers);
2369 PP_ASSERT_WITH_CODE(result == 0,
2370 "Error retrieving Engine Clock dividers from VBIOS.",
2371 return result);
2372
2373 table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
2374 table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2375 table->ACPILevel.DeepSleepDivId = 0;
2376
2377 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
2378 SPLL_PWRON, 0);
2379 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
2380 SPLL_RESET, 1);
2381 spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2, CG_SPLL_FUNC_CNTL_2,
2382 SCLK_MUX_SEL, 4);
2383
2384 table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
2385 table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
2386 table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
2387 table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
2388 table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
2389 table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
2390 table->ACPILevel.CcPwrDynRm = 0;
2391 table->ACPILevel.CcPwrDynRm1 = 0;
2392
2393 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
2394 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
2395 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage);
2396 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
2397 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
2398 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
2399 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
2400 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
2401 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
2402 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
2403 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
2404
2405 if (!data->mclk_dpm_key_disabled) {
2406 /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
2407 table->MemoryACPILevel.MclkFrequency =
2408 data->dpm_table.mclk_table.dpm_levels[0].value;
2409 result = fiji_get_dependency_volt_by_clk(hwmgr,
2410 table_info->vdd_dep_on_mclk,
2411 table->MemoryACPILevel.MclkFrequency,
2412 &table->MemoryACPILevel.MinVoltage, &mvdd);
2413 PP_ASSERT_WITH_CODE((0 == result),
2414 "Cannot find ACPI VDDCI voltage value "
2415 "in Clock Dependency Table",);
2416 } else {
2417 table->MemoryACPILevel.MclkFrequency =
2418 data->vbios_boot_state.mclk_bootup_value;
2419 table->MemoryACPILevel.MinVoltage =
2420 data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE;
2421 }
2422
2423 us_mvdd = 0;
2424 if ((FIJI_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
2425 (data->mclk_dpm_key_disabled))
2426 us_mvdd = data->vbios_boot_state.mvdd_bootup_value;
2427 else {
2428 if (!fiji_populate_mvdd_value(hwmgr,
2429 data->dpm_table.mclk_table.dpm_levels[0].value,
2430 &vol_level))
2431 us_mvdd = vol_level.Voltage;
2432 }
2433
2434 table->MemoryACPILevel.MinMvdd =
2435 PP_HOST_TO_SMC_UL(us_mvdd * VOLTAGE_SCALE);
2436
2437 table->MemoryACPILevel.EnabledForThrottle = 0;
2438 table->MemoryACPILevel.EnabledForActivity = 0;
2439 table->MemoryACPILevel.UpHyst = 0;
2440 table->MemoryACPILevel.DownHyst = 100;
2441 table->MemoryACPILevel.VoltageDownHyst = 0;
2442 table->MemoryACPILevel.ActivityLevel =
2443 PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
2444
2445 table->MemoryACPILevel.StutterEnable = false;
2446 CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency);
2447 CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);
2448
2449 return result;
2450}
2451
2452static int fiji_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
2453 SMU73_Discrete_DpmTable *table)
2454{
2455 int result = -EINVAL;
2456 uint8_t count;
2457 struct pp_atomctrl_clock_dividers_vi dividers;
2458 struct phm_ppt_v1_information *table_info =
2459 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2460 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
2461 table_info->mm_dep_table;
2462 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
2463
2464 table->VceLevelCount = (uint8_t)(mm_table->count);
2465 table->VceBootLevel = 0;
2466
2467 for(count = 0; count < table->VceLevelCount; count++) {
2468 table->VceLevel[count].Frequency = mm_table->entries[count].eclk;
2469 table->VceLevel[count].MinVoltage = 0;
2470 table->VceLevel[count].MinVoltage |=
2471 (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
2472 table->VceLevel[count].MinVoltage |=
2473 ((mm_table->entries[count].vddc - data->vddc_vddci_delta) *
2474 VOLTAGE_SCALE) << VDDCI_SHIFT;
2475 table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
2476
2477 /*retrieve divider value for VBIOS */
2478 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
2479 table->VceLevel[count].Frequency, &dividers);
2480 PP_ASSERT_WITH_CODE((0 == result),
2481 "can not find divide id for VCE engine clock",
2482 return result);
2483
2484 table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
2485
2486 CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
2487 CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage);
2488 }
2489 return result;
2490}
2491
2492static int fiji_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
2493 SMU73_Discrete_DpmTable *table)
2494{
2495 int result = -EINVAL;
2496 uint8_t count;
2497 struct pp_atomctrl_clock_dividers_vi dividers;
2498 struct phm_ppt_v1_information *table_info =
2499 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2500 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
2501 table_info->mm_dep_table;
2502 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
2503
2504 table->AcpLevelCount = (uint8_t)(mm_table->count);
2505 table->AcpBootLevel = 0;
2506
2507 for (count = 0; count < table->AcpLevelCount; count++) {
2508 table->AcpLevel[count].Frequency = mm_table->entries[count].aclk;
2509 table->AcpLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
2510 VOLTAGE_SCALE) << VDDC_SHIFT;
2511 table->AcpLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
2512 data->vddc_vddci_delta) * VOLTAGE_SCALE) << VDDCI_SHIFT;
2513 table->AcpLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
2514
2515 /* retrieve divider value for VBIOS */
2516 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
2517 table->AcpLevel[count].Frequency, &dividers);
2518 PP_ASSERT_WITH_CODE((0 == result),
2519 "can not find divide id for engine clock", return result);
2520
2521 table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
2522
2523 CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency);
2524 CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].MinVoltage);
2525 }
2526 return result;
2527}
2528
2529static int fiji_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
2530 SMU73_Discrete_DpmTable *table)
2531{
2532 int result = -EINVAL;
2533 uint8_t count;
2534 struct pp_atomctrl_clock_dividers_vi dividers;
2535 struct phm_ppt_v1_information *table_info =
2536 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2537 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
2538 table_info->mm_dep_table;
2539 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
2540
2541 table->SamuBootLevel = 0;
2542 table->SamuLevelCount = (uint8_t)(mm_table->count);
2543
2544 for (count = 0; count < table->SamuLevelCount; count++) {
2545 /* not sure whether we need evclk or not */
2546 table->SamuLevel[count].MinVoltage = 0;
2547 table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
2548 table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
2549 VOLTAGE_SCALE) << VDDC_SHIFT;
2550 table->SamuLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
2551 data->vddc_vddci_delta) * VOLTAGE_SCALE) << VDDCI_SHIFT;
2552 table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
2553
2554 /* retrieve divider value for VBIOS */
2555 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
2556 table->SamuLevel[count].Frequency, &dividers);
2557 PP_ASSERT_WITH_CODE((0 == result),
2558 "can not find divide id for samu clock", return result);
2559
2560 table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
2561
2562 CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
2563 CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
2564 }
2565 return result;
2566}
2567
2568static int fiji_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
2569 int32_t eng_clock, int32_t mem_clock,
2570 struct SMU73_Discrete_MCArbDramTimingTableEntry *arb_regs)
2571{
2572 uint32_t dram_timing;
2573 uint32_t dram_timing2;
2574 uint32_t burstTime;
2575 ULONG state, trrds, trrdl;
2576 int result;
2577
2578 result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
2579 eng_clock, mem_clock);
2580 PP_ASSERT_WITH_CODE(result == 0,
2581 "Error calling VBIOS to set DRAM_TIMING.", return result);
2582
2583 dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
2584 dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
2585 burstTime = cgs_read_register(hwmgr->device, mmMC_ARB_BURST_TIME);
2586
2587 state = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, STATE0);
2588 trrds = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDS0);
2589 trrdl = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDL0);
2590
2591 arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dram_timing);
2592 arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dram_timing2);
2593 arb_regs->McArbBurstTime = (uint8_t)burstTime;
2594 arb_regs->TRRDS = (uint8_t)trrds;
2595 arb_regs->TRRDL = (uint8_t)trrdl;
2596
2597 return 0;
2598}
2599
2600static int fiji_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
2601{
2602 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
2603 struct SMU73_Discrete_MCArbDramTimingTable arb_regs;
2604 uint32_t i, j;
2605 int result = 0;
2606
2607 for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
2608 for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
2609 result = fiji_populate_memory_timing_parameters(hwmgr,
2610 data->dpm_table.sclk_table.dpm_levels[i].value,
2611 data->dpm_table.mclk_table.dpm_levels[j].value,
2612 &arb_regs.entries[i][j]);
2613 if (result)
2614 break;
2615 }
2616 }
2617
2618 if (!result)
2619 result = fiji_copy_bytes_to_smc(
2620 hwmgr->smumgr,
2621 data->arb_table_start,
2622 (uint8_t *)&arb_regs,
2623 sizeof(SMU73_Discrete_MCArbDramTimingTable),
2624 data->sram_end);
2625 return result;
2626}
2627
2628static int fiji_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
2629 struct SMU73_Discrete_DpmTable *table)
2630{
2631 int result = -EINVAL;
2632 uint8_t count;
2633 struct pp_atomctrl_clock_dividers_vi dividers;
2634 struct phm_ppt_v1_information *table_info =
2635 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2636 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
2637 table_info->mm_dep_table;
2638 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
2639
2640 table->UvdLevelCount = (uint8_t)(mm_table->count);
2641 table->UvdBootLevel = 0;
2642
2643 for (count = 0; count < table->UvdLevelCount; count++) {
2644 table->UvdLevel[count].MinVoltage = 0;
2645 table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
2646 table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
2647 table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
2648 VOLTAGE_SCALE) << VDDC_SHIFT;
2649 table->UvdLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
2650 data->vddc_vddci_delta) * VOLTAGE_SCALE) << VDDCI_SHIFT;
2651 table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
2652
2653 /* retrieve divider value for VBIOS */
2654 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
2655 table->UvdLevel[count].VclkFrequency, &dividers);
2656 PP_ASSERT_WITH_CODE((0 == result),
2657 "can not find divide id for Vclk clock", return result);
2658
2659 table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
2660
2661 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
2662 table->UvdLevel[count].DclkFrequency, &dividers);
2663 PP_ASSERT_WITH_CODE((0 == result),
2664 "can not find divide id for Dclk clock", return result);
2665
2666 table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
2667
2668 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
2669 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
2670 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage);
2671
2672 }
2673 return result;
2674}
2675
2676static int fiji_find_boot_level(struct fiji_single_dpm_table *table,
2677 uint32_t value, uint32_t *boot_level)
2678{
2679 int result = -EINVAL;
2680 uint32_t i;
2681
2682 for (i = 0; i < table->count; i++) {
2683 if (value == table->dpm_levels[i].value) {
2684 *boot_level = i;
2685 result = 0;
2686 }
2687 }
2688 return result;
2689}
2690
2691static int fiji_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
2692 struct SMU73_Discrete_DpmTable *table)
2693{
2694 int result = 0;
2695 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
2696
2697 table->GraphicsBootLevel = 0;
2698 table->MemoryBootLevel = 0;
2699
2700 /* find boot level from dpm table */
2701 result = fiji_find_boot_level(&(data->dpm_table.sclk_table),
2702 data->vbios_boot_state.sclk_bootup_value,
2703 (uint32_t *)&(table->GraphicsBootLevel));
2704
2705 result = fiji_find_boot_level(&(data->dpm_table.mclk_table),
2706 data->vbios_boot_state.mclk_bootup_value,
2707 (uint32_t *)&(table->MemoryBootLevel));
2708
2709 table->BootVddc = data->vbios_boot_state.vddc_bootup_value *
2710 VOLTAGE_SCALE;
2711 table->BootVddci = data->vbios_boot_state.vddci_bootup_value *
2712 VOLTAGE_SCALE;
2713 table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value *
2714 VOLTAGE_SCALE;
2715
2716 CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc);
2717 CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci);
2718 CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
2719
2720 return 0;
2721}
2722
2723static int fiji_populate_smc_initailial_state(struct pp_hwmgr *hwmgr)
2724{
2725 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
2726 struct phm_ppt_v1_information *table_info =
2727 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2728 uint8_t count, level;
2729
2730 count = (uint8_t)(table_info->vdd_dep_on_sclk->count);
2731 for (level = 0; level < count; level++) {
2732 if(table_info->vdd_dep_on_sclk->entries[level].clk >=
2733 data->vbios_boot_state.sclk_bootup_value) {
2734 data->smc_state_table.GraphicsBootLevel = level;
2735 break;
2736 }
2737 }
2738
2739 count = (uint8_t)(table_info->vdd_dep_on_mclk->count);
2740 for (level = 0; level < count; level++) {
2741 if(table_info->vdd_dep_on_mclk->entries[level].clk >=
2742 data->vbios_boot_state.mclk_bootup_value) {
2743 data->smc_state_table.MemoryBootLevel = level;
2744 break;
2745 }
2746 }
2747
2748 return 0;
2749}
2750
2751static int fiji_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
2752{
2753 uint32_t ro, efuse, efuse2, clock_freq, volt_without_cks,
2754 volt_with_cks, value;
2755 uint16_t clock_freq_u16;
2756 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
2757 uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2,
2758 volt_offset = 0;
2759 struct phm_ppt_v1_information *table_info =
2760 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2761 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
2762 table_info->vdd_dep_on_sclk;
2763
2764 stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
2765
2766 /* Read SMU_Eefuse to read and calculate RO and determine
2767 * if the part is SS or FF. if RO >= 1660MHz, part is FF.
2768 */
2769 efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
2770 ixSMU_EFUSE_0 + (146 * 4));
2771 efuse2 = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
2772 ixSMU_EFUSE_0 + (148 * 4));
2773 efuse &= 0xFF000000;
2774 efuse = efuse >> 24;
2775 efuse2 &= 0xF;
2776
2777 if (efuse2 == 1)
2778 ro = (2300 - 1350) * efuse / 255 + 1350;
2779 else
2780 ro = (2500 - 1000) * efuse / 255 + 1000;
2781
2782 if (ro >= 1660)
2783 type = 0;
2784 else
2785 type = 1;
2786
2787 /* Populate Stretch amount */
2788 data->smc_state_table.ClockStretcherAmount = stretch_amount;
2789
2790 /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
2791 for (i = 0; i < sclk_table->count; i++) {
2792 data->smc_state_table.Sclk_CKS_masterEn0_7 |=
2793 sclk_table->entries[i].cks_enable << i;
2794 volt_without_cks = (uint32_t)((14041 *
2795 (sclk_table->entries[i].clk/100) / 10000 + 3571 + 75 - ro) * 1000 /
2796 (4026 - (13924 * (sclk_table->entries[i].clk/100) / 10000)));
2797 volt_with_cks = (uint32_t)((13946 *
2798 (sclk_table->entries[i].clk/100) / 10000 + 3320 + 45 - ro) * 1000 /
2799 (3664 - (11454 * (sclk_table->entries[i].clk/100) / 10000)));
2800 if (volt_without_cks >= volt_with_cks)
2801 volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
2802 sclk_table->entries[i].cks_voffset) * 100 / 625) + 1);
2803 data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
2804 }
2805
2806 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
2807 STRETCH_ENABLE, 0x0);
2808 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
2809 masterReset, 0x1);
2810 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
2811 staticEnable, 0x1);
2812 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
2813 masterReset, 0x0);
2814
2815 /* Populate CKS Lookup Table */
2816 if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
2817 stretch_amount2 = 0;
2818 else if (stretch_amount == 3 || stretch_amount == 4)
2819 stretch_amount2 = 1;
2820 else {
2821 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2822 PHM_PlatformCaps_ClockStretcher);
2823 PP_ASSERT_WITH_CODE(false,
2824 "Stretch Amount in PPTable not supported\n",
2825 return -EINVAL);
2826 }
2827
2828 value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
2829 ixPWR_CKS_CNTL);
2830 value &= 0xFFC2FF87;
2831 data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq =
2832 fiji_clock_stretcher_lookup_table[stretch_amount2][0];
2833 data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq =
2834 fiji_clock_stretcher_lookup_table[stretch_amount2][1];
2835 clock_freq_u16 = (uint16_t)(PP_SMC_TO_HOST_UL(data->smc_state_table.
2836 GraphicsLevel[data->smc_state_table.GraphicsDpmLevelCount - 1].
2837 SclkFrequency) / 100);
2838 if (fiji_clock_stretcher_lookup_table[stretch_amount2][0] <
2839 clock_freq_u16 &&
2840 fiji_clock_stretcher_lookup_table[stretch_amount2][1] >
2841 clock_freq_u16) {
2842 /* Program PWR_CKS_CNTL. CKS_USE_FOR_LOW_FREQ */
2843 value |= (fiji_clock_stretcher_lookup_table[stretch_amount2][3]) << 16;
2844 /* Program PWR_CKS_CNTL. CKS_LDO_REFSEL */
2845 value |= (fiji_clock_stretcher_lookup_table[stretch_amount2][2]) << 18;
2846 /* Program PWR_CKS_CNTL. CKS_STRETCH_AMOUNT */
2847 value |= (fiji_clock_stretch_amount_conversion
2848 [fiji_clock_stretcher_lookup_table[stretch_amount2][3]]
2849 [stretch_amount]) << 3;
2850 }
2851 CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.CKS_LOOKUPTable.
2852 CKS_LOOKUPTableEntry[0].minFreq);
2853 CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.CKS_LOOKUPTable.
2854 CKS_LOOKUPTableEntry[0].maxFreq);
2855 data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting =
2856 fiji_clock_stretcher_lookup_table[stretch_amount2][2] & 0x7F;
2857 data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting |=
2858 (fiji_clock_stretcher_lookup_table[stretch_amount2][3]) << 7;
2859
2860 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
2861 ixPWR_CKS_CNTL, value);
2862
2863 /* Populate DDT Lookup Table */
2864 for (i = 0; i < 4; i++) {
2865 /* Assign the minimum and maximum VID stored
2866 * in the last row of Clock Stretcher Voltage Table.
2867 */
2868 data->smc_state_table.ClockStretcherDataTable.
2869 ClockStretcherDataTableEntry[i].minVID =
2870 (uint8_t) fiji_clock_stretcher_ddt_table[type][i][2];
2871 data->smc_state_table.ClockStretcherDataTable.
2872 ClockStretcherDataTableEntry[i].maxVID =
2873 (uint8_t) fiji_clock_stretcher_ddt_table[type][i][3];
2874 /* Loop through each SCLK and check the frequency
2875 * to see if it lies within the frequency for clock stretcher.
2876 */
2877 for (j = 0; j < data->smc_state_table.GraphicsDpmLevelCount; j++) {
2878 cks_setting = 0;
2879 clock_freq = PP_SMC_TO_HOST_UL(
2880 data->smc_state_table.GraphicsLevel[j].SclkFrequency);
2881 /* Check the allowed frequency against the sclk level[j].
2882 * Sclk's endianness has already been converted,
2883 * and it's in 10Khz unit,
2884 * as opposed to Data table, which is in Mhz unit.
2885 */
2886 if (clock_freq >=
2887 (fiji_clock_stretcher_ddt_table[type][i][0]) * 100) {
2888 cks_setting |= 0x2;
2889 if (clock_freq <
2890 (fiji_clock_stretcher_ddt_table[type][i][1]) * 100)
2891 cks_setting |= 0x1;
2892 }
2893 data->smc_state_table.ClockStretcherDataTable.
2894 ClockStretcherDataTableEntry[i].setting |= cks_setting << (j * 2);
2895 }
2896 CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.
2897 ClockStretcherDataTable.
2898 ClockStretcherDataTableEntry[i].setting);
2899 }
2900
2901 value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL);
2902 value &= 0xFFFFFFFE;
2903 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value);
2904
2905 return 0;
2906}
2907
2908/**
2909* Populates the SMC VRConfig field in DPM table.
2910*
2911* @param hwmgr the address of the hardware manager
2912* @param table the SMC DPM table structure to be populated
2913* @return always 0
2914*/
2915static int fiji_populate_vr_config(struct pp_hwmgr *hwmgr,
2916 struct SMU73_Discrete_DpmTable *table)
2917{
2918 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
2919 uint16_t config;
2920
2921 config = VR_MERGED_WITH_VDDC;
2922 table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT);
2923
2924 /* Set Vddc Voltage Controller */
2925 if(FIJI_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
2926 config = VR_SVI2_PLANE_1;
2927 table->VRConfig |= config;
2928 } else {
2929 PP_ASSERT_WITH_CODE(false,
2930 "VDDC should be on SVI2 control in merged mode!",);
2931 }
2932 /* Set Vddci Voltage Controller */
2933 if(FIJI_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
2934 config = VR_SVI2_PLANE_2; /* only in merged mode */
2935 table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
2936 } else if (FIJI_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
2937 config = VR_SMIO_PATTERN_1;
2938 table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
2939 } else {
2940 config = VR_STATIC_VOLTAGE;
2941 table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
2942 }
2943 /* Set Mvdd Voltage Controller */
2944 if(FIJI_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
2945 config = VR_SVI2_PLANE_2;
2946 table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
2947 } else if(FIJI_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
2948 config = VR_SMIO_PATTERN_2;
2949 table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
2950 } else {
2951 config = VR_STATIC_VOLTAGE;
2952 table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
2953 }
2954
2955 return 0;
2956}
2957
2958/**
2959* Initializes the SMC table and uploads it
2960*
2961* @param hwmgr the address of the powerplay hardware manager.
2962* @param pInput the pointer to input data (PowerState)
2963* @return always 0
2964*/
2965static int fiji_init_smc_table(struct pp_hwmgr *hwmgr)
2966{
2967 int result;
2968 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
2969 struct phm_ppt_v1_information *table_info =
2970 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2971 struct SMU73_Discrete_DpmTable *table = &(data->smc_state_table);
2972 const struct fiji_ulv_parm *ulv = &(data->ulv);
2973 uint8_t i;
2974 struct pp_atomctrl_gpio_pin_assignment gpio_pin;
2975
2976 result = fiji_setup_default_dpm_tables(hwmgr);
2977 PP_ASSERT_WITH_CODE(0 == result,
2978 "Failed to setup default DPM tables!", return result);
2979
2980 if(FIJI_VOLTAGE_CONTROL_NONE != data->voltage_control)
2981 fiji_populate_smc_voltage_tables(hwmgr, table);
2982
2983 table->SystemFlags = 0;
2984
2985 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2986 PHM_PlatformCaps_AutomaticDCTransition))
2987 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
2988
2989 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2990 PHM_PlatformCaps_StepVddc))
2991 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
2992
2993 if (data->is_memory_gddr5)
2994 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
2995
2996 if (ulv->ulv_supported && table_info->us_ulv_voltage_offset) {
2997 result = fiji_populate_ulv_state(hwmgr, table);
2998 PP_ASSERT_WITH_CODE(0 == result,
2999 "Failed to initialize ULV state!", return result);
3000 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3001 ixCG_ULV_PARAMETER, ulv->cg_ulv_parameter);
3002 }
3003
3004 result = fiji_populate_smc_link_level(hwmgr, table);
3005 PP_ASSERT_WITH_CODE(0 == result,
3006 "Failed to initialize Link Level!", return result);
3007
3008 result = fiji_populate_all_graphic_levels(hwmgr);
3009 PP_ASSERT_WITH_CODE(0 == result,
3010 "Failed to initialize Graphics Level!", return result);
3011
3012 result = fiji_populate_all_memory_levels(hwmgr);
3013 PP_ASSERT_WITH_CODE(0 == result,
3014 "Failed to initialize Memory Level!", return result);
3015
3016 result = fiji_populate_smc_acpi_level(hwmgr, table);
3017 PP_ASSERT_WITH_CODE(0 == result,
3018 "Failed to initialize ACPI Level!", return result);
3019
3020 result = fiji_populate_smc_vce_level(hwmgr, table);
3021 PP_ASSERT_WITH_CODE(0 == result,
3022 "Failed to initialize VCE Level!", return result);
3023
3024 result = fiji_populate_smc_acp_level(hwmgr, table);
3025 PP_ASSERT_WITH_CODE(0 == result,
3026 "Failed to initialize ACP Level!", return result);
3027
3028 result = fiji_populate_smc_samu_level(hwmgr, table);
3029 PP_ASSERT_WITH_CODE(0 == result,
3030 "Failed to initialize SAMU Level!", return result);
3031
3032 /* Since only the initial state is completely set up at this point
3033 * (the other states are just copies of the boot state) we only
3034 * need to populate the ARB settings for the initial state.
3035 */
3036 result = fiji_program_memory_timing_parameters(hwmgr);
3037 PP_ASSERT_WITH_CODE(0 == result,
3038 "Failed to Write ARB settings for the initial state.", return result);
3039
3040 result = fiji_populate_smc_uvd_level(hwmgr, table);
3041 PP_ASSERT_WITH_CODE(0 == result,
3042 "Failed to initialize UVD Level!", return result);
3043
3044 result = fiji_populate_smc_boot_level(hwmgr, table);
3045 PP_ASSERT_WITH_CODE(0 == result,
3046 "Failed to initialize Boot Level!", return result);
3047
3048 result = fiji_populate_smc_initailial_state(hwmgr);
3049 PP_ASSERT_WITH_CODE(0 == result,
3050 "Failed to initialize Boot State!", return result);
3051
3052 result = fiji_populate_bapm_parameters_in_dpm_table(hwmgr);
3053 PP_ASSERT_WITH_CODE(0 == result,
3054 "Failed to populate BAPM Parameters!", return result);
3055
3056 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3057 PHM_PlatformCaps_ClockStretcher)) {
3058 result = fiji_populate_clock_stretcher_data_table(hwmgr);
3059 PP_ASSERT_WITH_CODE(0 == result,
3060 "Failed to populate Clock Stretcher Data Table!",
3061 return result);
3062 }
3063
3064 table->GraphicsVoltageChangeEnable = 1;
3065 table->GraphicsThermThrottleEnable = 1;
3066 table->GraphicsInterval = 1;
3067 table->VoltageInterval = 1;
3068 table->ThermalInterval = 1;
3069 table->TemperatureLimitHigh =
3070 table_info->cac_dtp_table->usTargetOperatingTemp *
3071 FIJI_Q88_FORMAT_CONVERSION_UNIT;
3072 table->TemperatureLimitLow =
3073 (table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
3074 FIJI_Q88_FORMAT_CONVERSION_UNIT;
3075 table->MemoryVoltageChangeEnable = 1;
3076 table->MemoryInterval = 1;
3077 table->VoltageResponseTime = 0;
3078 table->PhaseResponseTime = 0;
3079 table->MemoryThermThrottleEnable = 1;
3080 table->PCIeBootLinkLevel = 0; /* 0:Gen1 1:Gen2 2:Gen3*/
3081 table->PCIeGenInterval = 1;
3082 table->VRConfig = 0;
3083
3084 result = fiji_populate_vr_config(hwmgr, table);
3085 PP_ASSERT_WITH_CODE(0 == result,
3086 "Failed to populate VRConfig setting!", return result);
3087
3088 table->ThermGpio = 17;
3089 table->SclkStepSize = 0x4000;
3090
3091 if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
3092 table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
3093 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3094 PHM_PlatformCaps_RegulatorHot);
3095 } else {
3096 table->VRHotGpio = FIJI_UNUSED_GPIO_PIN;
3097 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
3098 PHM_PlatformCaps_RegulatorHot);
3099 }
3100
3101 if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
3102 &gpio_pin)) {
3103 table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift;
3104 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3105 PHM_PlatformCaps_AutomaticDCTransition);
3106 } else {
3107 table->AcDcGpio = FIJI_UNUSED_GPIO_PIN;
3108 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
3109 PHM_PlatformCaps_AutomaticDCTransition);
3110 }
3111
3112 /* Thermal Output GPIO */
3113 if (atomctrl_get_pp_assign_pin(hwmgr, THERMAL_INT_OUTPUT_GPIO_PINID,
3114 &gpio_pin)) {
3115 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3116 PHM_PlatformCaps_ThermalOutGPIO);
3117
3118 table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift;
3119
3120 /* For porlarity read GPIOPAD_A with assigned Gpio pin
3121 * since VBIOS will program this register to set 'inactive state',
3122 * driver can then determine 'active state' from this and
3123 * program SMU with correct polarity
3124 */
3125 table->ThermOutPolarity = (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) &
3126 (1 << gpio_pin.uc_gpio_pin_bit_shift))) ? 1:0;
3127 table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
3128
3129 /* if required, combine VRHot/PCC with thermal out GPIO */
3130 if(phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3131 PHM_PlatformCaps_RegulatorHot) &&
3132 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3133 PHM_PlatformCaps_CombinePCCWithThermalSignal))
3134 table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
3135 } else {
3136 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
3137 PHM_PlatformCaps_ThermalOutGPIO);
3138 table->ThermOutGpio = 17;
3139 table->ThermOutPolarity = 1;
3140 table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
3141 }
3142
3143 for (i = 0; i < SMU73_MAX_ENTRIES_SMIO; i++)
3144 table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
3145
3146 CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
3147 CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
3148 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
3149 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
3150 CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
3151 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
3152 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
3153 CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
3154 CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
3155
3156 /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
3157 result = fiji_copy_bytes_to_smc(hwmgr->smumgr,
3158 data->dpm_table_start +
3159 offsetof(SMU73_Discrete_DpmTable, SystemFlags),
3160 (uint8_t *)&(table->SystemFlags),
3161 sizeof(SMU73_Discrete_DpmTable) - 3 * sizeof(SMU73_PIDController),
3162 data->sram_end);
3163 PP_ASSERT_WITH_CODE(0 == result,
3164 "Failed to upload dpm data to SMC memory!", return result);
3165
3166 return 0;
3167}
3168
3169/**
3170* Initialize the ARB DRAM timing table's index field.
3171*
3172* @param hwmgr the address of the powerplay hardware manager.
3173* @return always 0
3174*/
3175static int fiji_init_arb_table_index(struct pp_hwmgr *hwmgr)
3176{
3177 const struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
3178 uint32_t tmp;
3179 int result;
3180
3181 /* This is a read-modify-write on the first byte of the ARB table.
3182 * The first byte in the SMU73_Discrete_MCArbDramTimingTable structure
3183 * is the field 'current'.
3184 * This solution is ugly, but we never write the whole table only
3185 * individual fields in it.
3186 * In reality this field should not be in that structure
3187 * but in a soft register.
3188 */
3189 result = fiji_read_smc_sram_dword(hwmgr->smumgr,
3190 data->arb_table_start, &tmp, data->sram_end);
3191
3192 if (result)
3193 return result;
3194
3195 tmp &= 0x00FFFFFF;
3196 tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
3197
3198 return fiji_write_smc_sram_dword(hwmgr->smumgr,
3199 data->arb_table_start, tmp, data->sram_end);
3200}
3201
3202static int fiji_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr)
3203{
3204 if(phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3205 PHM_PlatformCaps_RegulatorHot))
3206 return smum_send_msg_to_smc(hwmgr->smumgr,
3207 PPSMC_MSG_EnableVRHotGPIOInterrupt);
3208
3209 return 0;
3210}
3211
3212static int fiji_enable_sclk_control(struct pp_hwmgr *hwmgr)
3213{
3214 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
3215 SCLK_PWRMGT_OFF, 0);
3216 return 0;
3217}
3218
3219static int fiji_enable_ulv(struct pp_hwmgr *hwmgr)
3220{
3221 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
3222 struct fiji_ulv_parm *ulv = &(data->ulv);
3223
3224 if (ulv->ulv_supported)
3225 return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_EnableULV);
3226
3227 return 0;
3228}
3229
3230static int fiji_disable_ulv(struct pp_hwmgr *hwmgr)
3231{
3232 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
3233 struct fiji_ulv_parm *ulv = &(data->ulv);
3234
3235 if (ulv->ulv_supported)
3236 return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DisableULV);
3237
3238 return 0;
3239}
3240
3241static int fiji_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
3242{
3243 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3244 PHM_PlatformCaps_SclkDeepSleep)) {
3245 if (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_MASTER_DeepSleep_ON))
3246 PP_ASSERT_WITH_CODE(false,
3247 "Attempt to enable Master Deep Sleep switch failed!",
3248 return -1);
3249 } else {
3250 if (smum_send_msg_to_smc(hwmgr->smumgr,
3251 PPSMC_MSG_MASTER_DeepSleep_OFF)) {
3252 PP_ASSERT_WITH_CODE(false,
3253 "Attempt to disable Master Deep Sleep switch failed!",
3254 return -1);
3255 }
3256 }
3257
3258 return 0;
3259}
3260
3261static int fiji_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
3262{
3263 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3264 PHM_PlatformCaps_SclkDeepSleep)) {
3265 if (smum_send_msg_to_smc(hwmgr->smumgr,
3266 PPSMC_MSG_MASTER_DeepSleep_OFF)) {
3267 PP_ASSERT_WITH_CODE(false,
3268 "Attempt to disable Master Deep Sleep switch failed!",
3269 return -1);
3270 }
3271 }
3272
3273 return 0;
3274}
3275
3276static int fiji_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
3277{
3278 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
3279 uint32_t val, val0, val2;
3280 uint32_t i, cpl_cntl, cpl_threshold, mc_threshold;
3281
3282 /* enable SCLK dpm */
3283 if(!data->sclk_dpm_key_disabled)
3284 PP_ASSERT_WITH_CODE(
3285 (0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DPM_Enable)),
3286 "Failed to enable SCLK DPM during DPM Start Function!",
3287 return -1);
3288
3289 /* enable MCLK dpm */
3290 if(0 == data->mclk_dpm_key_disabled) {
3291 cpl_threshold = 0;
3292 mc_threshold = 0;
3293
3294 /* Read per MCD tile (0 - 7) */
3295 for (i = 0; i < 8; i++) {
3296 PHM_WRITE_FIELD(hwmgr->device, MC_CONFIG_MCD, MC_RD_ENABLE, i);
3297 val = cgs_read_register(hwmgr->device, mmMC_SEQ_RESERVE_0_S) & 0xf0000000;
3298 if (0xf0000000 != val) {
3299 /* count number of MCQ that has channel(s) enabled */
3300 cpl_threshold++;
3301 /* only harvest 3 or full 4 supported */
3302 mc_threshold = val ? 3 : 4;
3303 }
3304 }
3305 PP_ASSERT_WITH_CODE(0 != cpl_threshold,
3306 "Number of MCQ is zero!", return -EINVAL;);
3307
3308 mc_threshold = ((mc_threshold & LCAC_MC0_CNTL__MC0_THRESHOLD_MASK) <<
3309 LCAC_MC0_CNTL__MC0_THRESHOLD__SHIFT) |
3310 LCAC_MC0_CNTL__MC0_ENABLE_MASK;
3311 cpl_cntl = ((cpl_threshold & LCAC_CPL_CNTL__CPL_THRESHOLD_MASK) <<
3312 LCAC_CPL_CNTL__CPL_THRESHOLD__SHIFT) |
3313 LCAC_CPL_CNTL__CPL_ENABLE_MASK;
3314 cpl_cntl = (cpl_cntl | (8 << LCAC_CPL_CNTL__CPL_BLOCK_ID__SHIFT));
3315 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3316 ixLCAC_MC0_CNTL, mc_threshold);
3317 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3318 ixLCAC_MC1_CNTL, mc_threshold);
3319 if (8 == cpl_threshold) {
3320 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3321 ixLCAC_MC2_CNTL, mc_threshold);
3322 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3323 ixLCAC_MC3_CNTL, mc_threshold);
3324 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3325 ixLCAC_MC4_CNTL, mc_threshold);
3326 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3327 ixLCAC_MC5_CNTL, mc_threshold);
3328 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3329 ixLCAC_MC6_CNTL, mc_threshold);
3330 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3331 ixLCAC_MC7_CNTL, mc_threshold);
3332 }
3333 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3334 ixLCAC_CPL_CNTL, cpl_cntl);
3335
3336 udelay(5);
3337
3338 mc_threshold = mc_threshold |
3339 (1 << LCAC_MC0_CNTL__MC0_SIGNAL_ID__SHIFT);
3340 cpl_cntl = cpl_cntl | (1 << LCAC_CPL_CNTL__CPL_SIGNAL_ID__SHIFT);
3341 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3342 ixLCAC_MC0_CNTL, mc_threshold);
3343 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3344 ixLCAC_MC1_CNTL, mc_threshold);
3345 if (8 == cpl_threshold) {
3346 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3347 ixLCAC_MC2_CNTL, mc_threshold);
3348 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3349 ixLCAC_MC3_CNTL, mc_threshold);
3350 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3351 ixLCAC_MC4_CNTL, mc_threshold);
3352 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3353 ixLCAC_MC5_CNTL, mc_threshold);
3354 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3355 ixLCAC_MC6_CNTL, mc_threshold);
3356 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3357 ixLCAC_MC7_CNTL, mc_threshold);
3358 }
3359 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3360 ixLCAC_CPL_CNTL, cpl_cntl);
3361
3362 /* Program CAC_EN per MCD (0-7) Tile */
3363 val0 = val = cgs_read_register(hwmgr->device, mmMC_CONFIG_MCD);
3364 val &= ~(MC_CONFIG_MCD__MCD0_WR_ENABLE_MASK |
3365 MC_CONFIG_MCD__MCD1_WR_ENABLE_MASK |
3366 MC_CONFIG_MCD__MCD2_WR_ENABLE_MASK |
3367 MC_CONFIG_MCD__MCD3_WR_ENABLE_MASK |
3368 MC_CONFIG_MCD__MCD4_WR_ENABLE_MASK |
3369 MC_CONFIG_MCD__MCD5_WR_ENABLE_MASK |
3370 MC_CONFIG_MCD__MCD6_WR_ENABLE_MASK |
3371 MC_CONFIG_MCD__MCD7_WR_ENABLE_MASK |
3372 MC_CONFIG_MCD__MC_RD_ENABLE_MASK);
3373
3374 for (i = 0; i < 8; i++) {
3375 /* Enable MCD i Tile read & write */
3376 val2 = (val | (i << MC_CONFIG_MCD__MC_RD_ENABLE__SHIFT) |
3377 (1 << i));
3378 cgs_write_register(hwmgr->device, mmMC_CONFIG_MCD, val2);
3379 /* Enbale CAC_ON MCD i Tile */
3380 val2 = cgs_read_register(hwmgr->device, mmMC_SEQ_CNTL);
3381 val2 |= MC_SEQ_CNTL__CAC_EN_MASK;
3382 cgs_write_register(hwmgr->device, mmMC_SEQ_CNTL, val2);
3383 }
3384 /* Set MC_CONFIG_MCD back to its default setting val0 */
3385 cgs_write_register(hwmgr->device, mmMC_CONFIG_MCD, val0);
3386
3387 PP_ASSERT_WITH_CODE(
3388 (0 == smum_send_msg_to_smc(hwmgr->smumgr,
3389 PPSMC_MSG_MCLKDPM_Enable)),
3390 "Failed to enable MCLK DPM during DPM Start Function!",
3391 return -1);
3392 }
3393 return 0;
3394}
3395
3396static int fiji_start_dpm(struct pp_hwmgr *hwmgr)
3397{
3398 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
3399
3400 /*enable general power management */
3401 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
3402 GLOBAL_PWRMGT_EN, 1);
3403 /* enable sclk deep sleep */
3404 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
3405 DYNAMIC_PM_EN, 1);
3406 /* prepare for PCIE DPM */
3407 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3408 data->soft_regs_start + offsetof(SMU73_SoftRegisters,
3409 VoltageChangeTimeout), 0x1000);
3410 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
3411 SWRST_COMMAND_1, RESETLC, 0x0);
3412
3413 PP_ASSERT_WITH_CODE(
3414 (0 == smum_send_msg_to_smc(hwmgr->smumgr,
3415 PPSMC_MSG_Voltage_Cntl_Enable)),
3416 "Failed to enable voltage DPM during DPM Start Function!",
3417 return -1);
3418
3419 if (fiji_enable_sclk_mclk_dpm(hwmgr)) {
3420 printk(KERN_ERR "Failed to enable Sclk DPM and Mclk DPM!");
3421 return -1;
3422 }
3423
3424 /* enable PCIE dpm */
3425 if(!data->pcie_dpm_key_disabled) {
3426 PP_ASSERT_WITH_CODE(
3427 (0 == smum_send_msg_to_smc(hwmgr->smumgr,
3428 PPSMC_MSG_PCIeDPM_Enable)),
3429 "Failed to enable pcie DPM during DPM Start Function!",
3430 return -1);
3431 }
3432
3433 return 0;
3434}
3435
3436static int fiji_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
3437{
3438 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
3439
3440 /* disable SCLK dpm */
3441 if (!data->sclk_dpm_key_disabled)
3442 PP_ASSERT_WITH_CODE(
3443 (smum_send_msg_to_smc(hwmgr->smumgr,
3444 PPSMC_MSG_DPM_Disable) == 0),
3445 "Failed to disable SCLK DPM!",
3446 return -1);
3447
3448 /* disable MCLK dpm */
3449 if (!data->mclk_dpm_key_disabled) {
3450 PP_ASSERT_WITH_CODE(
3451 (smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3452 PPSMC_MSG_MCLKDPM_SetEnabledMask, 1) == 0),
3453 "Failed to force MCLK DPM0!",
3454 return -1);
3455
3456 PP_ASSERT_WITH_CODE(
3457 (smum_send_msg_to_smc(hwmgr->smumgr,
3458 PPSMC_MSG_MCLKDPM_Disable) == 0),
3459 "Failed to disable MCLK DPM!",
3460 return -1);
3461 }
3462
3463 return 0;
3464}
3465
3466static int fiji_stop_dpm(struct pp_hwmgr *hwmgr)
3467{
3468 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
3469
3470 /* disable general power management */
3471 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
3472 GLOBAL_PWRMGT_EN, 0);
3473 /* disable sclk deep sleep */
3474 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
3475 DYNAMIC_PM_EN, 0);
3476
3477 /* disable PCIE dpm */
3478 if (!data->pcie_dpm_key_disabled) {
3479 PP_ASSERT_WITH_CODE(
3480 (smum_send_msg_to_smc(hwmgr->smumgr,
3481 PPSMC_MSG_PCIeDPM_Disable) == 0),
3482 "Failed to disable pcie DPM during DPM Stop Function!",
3483 return -1);
3484 }
3485
3486 if (fiji_disable_sclk_mclk_dpm(hwmgr)) {
3487 printk(KERN_ERR "Failed to disable Sclk DPM and Mclk DPM!");
3488 return -1;
3489 }
3490
3491 PP_ASSERT_WITH_CODE(
3492 (smum_send_msg_to_smc(hwmgr->smumgr,
3493 PPSMC_MSG_Voltage_Cntl_Disable) == 0),
3494 "Failed to disable voltage DPM during DPM Stop Function!",
3495 return -1);
3496
3497 return 0;
3498}
3499
3500static void fiji_set_dpm_event_sources(struct pp_hwmgr *hwmgr,
3501 uint32_t sources)
3502{
3503 bool protection;
3504 enum DPM_EVENT_SRC src;
3505
3506 switch (sources) {
3507 default:
3508 printk(KERN_ERR "Unknown throttling event sources.");
3509 /* fall through */
3510 case 0:
3511 protection = false;
3512 /* src is unused */
3513 break;
3514 case (1 << PHM_AutoThrottleSource_Thermal):
3515 protection = true;
3516 src = DPM_EVENT_SRC_DIGITAL;
3517 break;
3518 case (1 << PHM_AutoThrottleSource_External):
3519 protection = true;
3520 src = DPM_EVENT_SRC_EXTERNAL;
3521 break;
3522 case (1 << PHM_AutoThrottleSource_External) |
3523 (1 << PHM_AutoThrottleSource_Thermal):
3524 protection = true;
3525 src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL;
3526 break;
3527 }
3528 /* Order matters - don't enable thermal protection for the wrong source. */
3529 if (protection) {
3530 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL,
3531 DPM_EVENT_SRC, src);
3532 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
3533 THERMAL_PROTECTION_DIS,
3534 !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3535 PHM_PlatformCaps_ThermalController));
3536 } else
3537 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
3538 THERMAL_PROTECTION_DIS, 1);
3539}
3540
3541static int fiji_enable_auto_throttle_source(struct pp_hwmgr *hwmgr,
3542 PHM_AutoThrottleSource source)
3543{
3544 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
3545
3546 if (!(data->active_auto_throttle_sources & (1 << source))) {
3547 data->active_auto_throttle_sources |= 1 << source;
3548 fiji_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
3549 }
3550 return 0;
3551}
3552
3553static int fiji_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
3554{
3555 return fiji_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
3556}
3557
3558static int fiji_disable_auto_throttle_source(struct pp_hwmgr *hwmgr,
3559 PHM_AutoThrottleSource source)
3560{
3561 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
3562
3563 if (data->active_auto_throttle_sources & (1 << source)) {
3564 data->active_auto_throttle_sources &= ~(1 << source);
3565 fiji_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
3566 }
3567 return 0;
3568}
3569
3570static int fiji_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
3571{
3572 return fiji_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
3573}
3574
3575static int fiji_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
3576{
3577 int tmp_result, result = 0;
3578
3579 tmp_result = (!fiji_is_dpm_running(hwmgr))? 0 : -1;
3580 PP_ASSERT_WITH_CODE(result == 0,
3581 "DPM is already running right now, no need to enable DPM!",
3582 return 0);
3583
3584 if (fiji_voltage_control(hwmgr)) {
3585 tmp_result = fiji_enable_voltage_control(hwmgr);
3586 PP_ASSERT_WITH_CODE(tmp_result == 0,
3587 "Failed to enable voltage control!",
3588 result = tmp_result);
3589 }
3590
3591 if (fiji_voltage_control(hwmgr)) {
3592 tmp_result = fiji_construct_voltage_tables(hwmgr);
3593 PP_ASSERT_WITH_CODE((0 == tmp_result),
3594 "Failed to contruct voltage tables!",
3595 result = tmp_result);
3596 }
3597
3598 tmp_result = fiji_initialize_mc_reg_table(hwmgr);
3599 PP_ASSERT_WITH_CODE((0 == tmp_result),
3600 "Failed to initialize MC reg table!", result = tmp_result);
3601
3602 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3603 PHM_PlatformCaps_EngineSpreadSpectrumSupport))
3604 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
3605 GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 1);
3606
3607 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3608 PHM_PlatformCaps_ThermalController))
3609 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
3610 GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 0);
3611
3612 tmp_result = fiji_program_static_screen_threshold_parameters(hwmgr);
3613 PP_ASSERT_WITH_CODE((0 == tmp_result),
3614 "Failed to program static screen threshold parameters!",
3615 result = tmp_result);
3616
3617 tmp_result = fiji_enable_display_gap(hwmgr);
3618 PP_ASSERT_WITH_CODE((0 == tmp_result),
3619 "Failed to enable display gap!", result = tmp_result);
3620
3621 tmp_result = fiji_program_voting_clients(hwmgr);
3622 PP_ASSERT_WITH_CODE((0 == tmp_result),
3623 "Failed to program voting clients!", result = tmp_result);
3624
3625 tmp_result = fiji_process_firmware_header(hwmgr);
3626 PP_ASSERT_WITH_CODE((0 == tmp_result),
3627 "Failed to process firmware header!", result = tmp_result);
3628
3629 tmp_result = fiji_initial_switch_from_arbf0_to_f1(hwmgr);
3630 PP_ASSERT_WITH_CODE((0 == tmp_result),
3631 "Failed to initialize switch from ArbF0 to F1!",
3632 result = tmp_result);
3633
3634 tmp_result = fiji_init_smc_table(hwmgr);
3635 PP_ASSERT_WITH_CODE((0 == tmp_result),
3636 "Failed to initialize SMC table!", result = tmp_result);
3637
3638 tmp_result = fiji_init_arb_table_index(hwmgr);
3639 PP_ASSERT_WITH_CODE((0 == tmp_result),
3640 "Failed to initialize ARB table index!", result = tmp_result);
3641
3642 tmp_result = fiji_populate_pm_fuses(hwmgr);
3643 PP_ASSERT_WITH_CODE((0 == tmp_result),
3644 "Failed to populate PM fuses!", result = tmp_result);
3645
3646 tmp_result = fiji_enable_vrhot_gpio_interrupt(hwmgr);
3647 PP_ASSERT_WITH_CODE((0 == tmp_result),
3648 "Failed to enable VR hot GPIO interrupt!", result = tmp_result);
3649
3650 tmp_result = tonga_notify_smc_display_change(hwmgr, false);
3651 PP_ASSERT_WITH_CODE((0 == tmp_result),
3652 "Failed to notify no display!", result = tmp_result);
3653
3654 tmp_result = fiji_enable_sclk_control(hwmgr);
3655 PP_ASSERT_WITH_CODE((0 == tmp_result),
3656 "Failed to enable SCLK control!", result = tmp_result);
3657
3658 tmp_result = fiji_enable_ulv(hwmgr);
3659 PP_ASSERT_WITH_CODE((0 == tmp_result),
3660 "Failed to enable ULV!", result = tmp_result);
3661
3662 tmp_result = fiji_enable_deep_sleep_master_switch(hwmgr);
3663 PP_ASSERT_WITH_CODE((0 == tmp_result),
3664 "Failed to enable deep sleep master switch!", result = tmp_result);
3665
3666 tmp_result = fiji_start_dpm(hwmgr);
3667 PP_ASSERT_WITH_CODE((0 == tmp_result),
3668 "Failed to start DPM!", result = tmp_result);
3669
3670 tmp_result = fiji_enable_smc_cac(hwmgr);
3671 PP_ASSERT_WITH_CODE((0 == tmp_result),
3672 "Failed to enable SMC CAC!", result = tmp_result);
3673
3674 tmp_result = fiji_enable_power_containment(hwmgr);
3675 PP_ASSERT_WITH_CODE((0 == tmp_result),
3676 "Failed to enable power containment!", result = tmp_result);
3677
3678 tmp_result = fiji_power_control_set_level(hwmgr);
3679 PP_ASSERT_WITH_CODE((0 == tmp_result),
3680 "Failed to power control set level!", result = tmp_result);
3681
3682 tmp_result = fiji_enable_thermal_auto_throttle(hwmgr);
3683 PP_ASSERT_WITH_CODE((0 == tmp_result),
3684 "Failed to enable thermal auto throttle!", result = tmp_result);
3685
3686 return result;
3687}
3688
3689static int fiji_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
3690{
3691 int tmp_result, result = 0;
3692
3693 tmp_result = (fiji_is_dpm_running(hwmgr)) ? 0 : -1;
3694 PP_ASSERT_WITH_CODE(tmp_result == 0,
3695 "DPM is not running right now, no need to disable DPM!",
3696 return 0);
3697
3698 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3699 PHM_PlatformCaps_ThermalController))
3700 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
3701 GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1);
3702
3703 tmp_result = fiji_disable_power_containment(hwmgr);
3704 PP_ASSERT_WITH_CODE((tmp_result == 0),
3705 "Failed to disable power containment!", result = tmp_result);
3706
3707 tmp_result = fiji_disable_smc_cac(hwmgr);
3708 PP_ASSERT_WITH_CODE((tmp_result == 0),
3709 "Failed to disable SMC CAC!", result = tmp_result);
3710
3711 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
3712 CG_SPLL_SPREAD_SPECTRUM, SSEN, 0);
3713 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
3714 GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0);
3715
3716 tmp_result = fiji_disable_thermal_auto_throttle(hwmgr);
3717 PP_ASSERT_WITH_CODE((tmp_result == 0),
3718 "Failed to disable thermal auto throttle!", result = tmp_result);
3719
3720 tmp_result = fiji_stop_dpm(hwmgr);
3721 PP_ASSERT_WITH_CODE((tmp_result == 0),
3722 "Failed to stop DPM!", result = tmp_result);
3723
3724 tmp_result = fiji_disable_deep_sleep_master_switch(hwmgr);
3725 PP_ASSERT_WITH_CODE((tmp_result == 0),
3726 "Failed to disable deep sleep master switch!", result = tmp_result);
3727
3728 tmp_result = fiji_disable_ulv(hwmgr);
3729 PP_ASSERT_WITH_CODE((tmp_result == 0),
3730 "Failed to disable ULV!", result = tmp_result);
3731
3732 tmp_result = fiji_clear_voting_clients(hwmgr);
3733 PP_ASSERT_WITH_CODE((tmp_result == 0),
3734 "Failed to clear voting clients!", result = tmp_result);
3735
3736 tmp_result = fiji_reset_to_default(hwmgr);
3737 PP_ASSERT_WITH_CODE((tmp_result == 0),
3738 "Failed to reset to default!", result = tmp_result);
3739
3740 tmp_result = fiji_force_switch_to_arbf0(hwmgr);
3741 PP_ASSERT_WITH_CODE((tmp_result == 0),
3742 "Failed to force to switch arbf0!", result = tmp_result);
3743
3744 return result;
3745}
3746
3747static int fiji_force_dpm_highest(struct pp_hwmgr *hwmgr)
3748{
3749 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
3750 uint32_t level, tmp;
3751
3752 if (!data->sclk_dpm_key_disabled) {
3753 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3754 level = 0;
3755 tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask;
3756 while (tmp >>= 1)
3757 level++;
3758 if (level)
3759 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3760 PPSMC_MSG_SCLKDPM_SetEnabledMask,
3761 (1 << level));
3762 }
3763 }
3764
3765 if (!data->mclk_dpm_key_disabled) {
3766 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3767 level = 0;
3768 tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask;
3769 while (tmp >>= 1)
3770 level++;
3771 if (level)
3772 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3773 PPSMC_MSG_MCLKDPM_SetEnabledMask,
3774 (1 << level));
3775 }
3776 }
3777
3778 if (!data->pcie_dpm_key_disabled) {
3779 if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3780 level = 0;
3781 tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask;
3782 while (tmp >>= 1)
3783 level++;
3784 if (level)
3785 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3786 PPSMC_MSG_PCIeDPM_ForceLevel,
3787 (1 << level));
3788 }
3789 }
3790 return 0;
3791}
3792
3793static int fiji_upload_dpmlevel_enable_mask(struct pp_hwmgr *hwmgr)
3794{
3795 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
3796
3797 phm_apply_dal_min_voltage_request(hwmgr);
3798
3799 if (!data->sclk_dpm_key_disabled) {
3800 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask)
3801 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3802 PPSMC_MSG_SCLKDPM_SetEnabledMask,
3803 data->dpm_level_enable_mask.sclk_dpm_enable_mask);
3804 }
3805 return 0;
3806}
3807
3808static int fiji_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
3809{
3810 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
3811
3812 if (!fiji_is_dpm_running(hwmgr))
3813 return -EINVAL;
3814
3815 if (!data->pcie_dpm_key_disabled) {
3816 smum_send_msg_to_smc(hwmgr->smumgr,
3817 PPSMC_MSG_PCIeDPM_UnForceLevel);
3818 }
3819
3820 return fiji_upload_dpmlevel_enable_mask(hwmgr);
3821}
3822
3823static uint32_t fiji_get_lowest_enabled_level(
3824 struct pp_hwmgr *hwmgr, uint32_t mask)
3825{
3826 uint32_t level = 0;
3827
3828 while(0 == (mask & (1 << level)))
3829 level++;
3830
3831 return level;
3832}
3833
3834static int fiji_force_dpm_lowest(struct pp_hwmgr *hwmgr)
3835{
3836 struct fiji_hwmgr *data =
3837 (struct fiji_hwmgr *)(hwmgr->backend);
3838 uint32_t level;
3839
3840 if (!data->sclk_dpm_key_disabled)
3841 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3842 level = fiji_get_lowest_enabled_level(hwmgr,
3843 data->dpm_level_enable_mask.sclk_dpm_enable_mask);
3844 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3845 PPSMC_MSG_SCLKDPM_SetEnabledMask,
3846 (1 << level));
3847
3848 }
3849
3850 if (!data->mclk_dpm_key_disabled) {
3851 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3852 level = fiji_get_lowest_enabled_level(hwmgr,
3853 data->dpm_level_enable_mask.mclk_dpm_enable_mask);
3854 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3855 PPSMC_MSG_MCLKDPM_SetEnabledMask,
3856 (1 << level));
3857 }
3858 }
3859
3860 if (!data->pcie_dpm_key_disabled) {
3861 if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3862 level = fiji_get_lowest_enabled_level(hwmgr,
3863 data->dpm_level_enable_mask.pcie_dpm_enable_mask);
3864 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3865 PPSMC_MSG_PCIeDPM_ForceLevel,
3866 (1 << level));
3867 }
3868 }
3869
3870 return 0;
3871
3872}
3873static int fiji_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
3874 enum amd_dpm_forced_level level)
3875{
3876 int ret = 0;
3877
3878 switch (level) {
3879 case AMD_DPM_FORCED_LEVEL_HIGH:
3880 ret = fiji_force_dpm_highest(hwmgr);
3881 if (ret)
3882 return ret;
3883 break;
3884 case AMD_DPM_FORCED_LEVEL_LOW:
3885 ret = fiji_force_dpm_lowest(hwmgr);
3886 if (ret)
3887 return ret;
3888 break;
3889 case AMD_DPM_FORCED_LEVEL_AUTO:
3890 ret = fiji_unforce_dpm_levels(hwmgr);
3891 if (ret)
3892 return ret;
3893 break;
3894 default:
3895 break;
3896 }
3897
3898 hwmgr->dpm_level = level;
3899
3900 return ret;
3901}
3902
3903static int fiji_get_power_state_size(struct pp_hwmgr *hwmgr)
3904{
3905 return sizeof(struct fiji_power_state);
3906}
3907
3908static int fiji_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
3909 void *state, struct pp_power_state *power_state,
3910 void *pp_table, uint32_t classification_flag)
3911{
3912 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
3913 struct fiji_power_state *fiji_power_state =
3914 (struct fiji_power_state *)(&(power_state->hardware));
3915 struct fiji_performance_level *performance_level;
3916 ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state;
3917 ATOM_Tonga_POWERPLAYTABLE *powerplay_table =
3918 (ATOM_Tonga_POWERPLAYTABLE *)pp_table;
3919 ATOM_Tonga_SCLK_Dependency_Table *sclk_dep_table =
3920 (ATOM_Tonga_SCLK_Dependency_Table *)
3921 (((unsigned long)powerplay_table) +
3922 le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
3923 ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
3924 (ATOM_Tonga_MCLK_Dependency_Table *)
3925 (((unsigned long)powerplay_table) +
3926 le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
3927
3928 /* The following fields are not initialized here: id orderedList allStatesList */
3929 power_state->classification.ui_label =
3930 (le16_to_cpu(state_entry->usClassification) &
3931 ATOM_PPLIB_CLASSIFICATION_UI_MASK) >>
3932 ATOM_PPLIB_CLASSIFICATION_UI_SHIFT;
3933 power_state->classification.flags = classification_flag;
3934 /* NOTE: There is a classification2 flag in BIOS that is not being used right now */
3935
3936 power_state->classification.temporary_state = false;
3937 power_state->classification.to_be_deleted = false;
3938
3939 power_state->validation.disallowOnDC =
3940 (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
3941 ATOM_Tonga_DISALLOW_ON_DC));
3942
3943 power_state->pcie.lanes = 0;
3944
3945 power_state->display.disableFrameModulation = false;
3946 power_state->display.limitRefreshrate = false;
3947 power_state->display.enableVariBright =
3948 (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
3949 ATOM_Tonga_ENABLE_VARIBRIGHT));
3950
3951 power_state->validation.supportedPowerLevels = 0;
3952 power_state->uvd_clocks.VCLK = 0;
3953 power_state->uvd_clocks.DCLK = 0;
3954 power_state->temperatures.min = 0;
3955 power_state->temperatures.max = 0;
3956
3957 performance_level = &(fiji_power_state->performance_levels
3958 [fiji_power_state->performance_level_count++]);
3959
3960 PP_ASSERT_WITH_CODE(
3961 (fiji_power_state->performance_level_count < SMU73_MAX_LEVELS_GRAPHICS),
3962 "Performance levels exceeds SMC limit!",
3963 return -1);
3964
3965 PP_ASSERT_WITH_CODE(
3966 (fiji_power_state->performance_level_count <=
3967 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
3968 "Performance levels exceeds Driver limit!",
3969 return -1);
3970
3971 /* Performance levels are arranged from low to high. */
3972 performance_level->memory_clock = mclk_dep_table->entries
3973 [state_entry->ucMemoryClockIndexLow].ulMclk;
3974 performance_level->engine_clock = sclk_dep_table->entries
3975 [state_entry->ucEngineClockIndexLow].ulSclk;
3976 performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
3977 state_entry->ucPCIEGenLow);
3978 performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
3979 state_entry->ucPCIELaneHigh);
3980
3981 performance_level = &(fiji_power_state->performance_levels
3982 [fiji_power_state->performance_level_count++]);
3983 performance_level->memory_clock = mclk_dep_table->entries
3984 [state_entry->ucMemoryClockIndexHigh].ulMclk;
3985 performance_level->engine_clock = sclk_dep_table->entries
3986 [state_entry->ucEngineClockIndexHigh].ulSclk;
3987 performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
3988 state_entry->ucPCIEGenHigh);
3989 performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
3990 state_entry->ucPCIELaneHigh);
3991
3992 return 0;
3993}
3994
3995static int fiji_get_pp_table_entry(struct pp_hwmgr *hwmgr,
3996 unsigned long entry_index, struct pp_power_state *state)
3997{
3998 int result;
3999 struct fiji_power_state *ps;
4000 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
4001 struct phm_ppt_v1_information *table_info =
4002 (struct phm_ppt_v1_information *)(hwmgr->pptable);
4003 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
4004 table_info->vdd_dep_on_mclk;
4005
4006 state->hardware.magic = PHM_VIslands_Magic;
4007
4008 ps = (struct fiji_power_state *)(&state->hardware);
4009
4010 result = get_powerplay_table_entry_v1_0(hwmgr, entry_index, state,
4011 fiji_get_pp_table_entry_callback_func);
4012
4013 /* This is the earliest time we have all the dependency table and the VBIOS boot state
4014 * as PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot state
4015 * if there is only one VDDCI/MCLK level, check if it's the same as VBIOS boot state
4016 */
4017 if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
4018 if (dep_mclk_table->entries[0].clk !=
4019 data->vbios_boot_state.mclk_bootup_value)
4020 printk(KERN_ERR "Single MCLK entry VDDCI/MCLK dependency table "
4021 "does not match VBIOS boot MCLK level");
4022 if (dep_mclk_table->entries[0].vddci !=
4023 data->vbios_boot_state.vddci_bootup_value)
4024 printk(KERN_ERR "Single VDDCI entry VDDCI/MCLK dependency table "
4025 "does not match VBIOS boot VDDCI level");
4026 }
4027
4028 /* set DC compatible flag if this state supports DC */
4029 if (!state->validation.disallowOnDC)
4030 ps->dc_compatible = true;
4031
4032 if (state->classification.flags & PP_StateClassificationFlag_ACPI)
4033 data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen;
4034
4035 ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
4036 ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
4037
4038 if (!result) {
4039 uint32_t i;
4040
4041 switch (state->classification.ui_label) {
4042 case PP_StateUILabel_Performance:
4043 data->use_pcie_performance_levels = true;
4044
4045 for (i = 0; i < ps->performance_level_count; i++) {
4046 if (data->pcie_gen_performance.max <
4047 ps->performance_levels[i].pcie_gen)
4048 data->pcie_gen_performance.max =
4049 ps->performance_levels[i].pcie_gen;
4050
4051 if (data->pcie_gen_performance.min >
4052 ps->performance_levels[i].pcie_gen)
4053 data->pcie_gen_performance.min =
4054 ps->performance_levels[i].pcie_gen;
4055
4056 if (data->pcie_lane_performance.max <
4057 ps->performance_levels[i].pcie_lane)
4058 data->pcie_lane_performance.max =
4059 ps->performance_levels[i].pcie_lane;
4060
4061 if (data->pcie_lane_performance.min >
4062 ps->performance_levels[i].pcie_lane)
4063 data->pcie_lane_performance.min =
4064 ps->performance_levels[i].pcie_lane;
4065 }
4066 break;
4067 case PP_StateUILabel_Battery:
4068 data->use_pcie_power_saving_levels = true;
4069
4070 for (i = 0; i < ps->performance_level_count; i++) {
4071 if (data->pcie_gen_power_saving.max <
4072 ps->performance_levels[i].pcie_gen)
4073 data->pcie_gen_power_saving.max =
4074 ps->performance_levels[i].pcie_gen;
4075
4076 if (data->pcie_gen_power_saving.min >
4077 ps->performance_levels[i].pcie_gen)
4078 data->pcie_gen_power_saving.min =
4079 ps->performance_levels[i].pcie_gen;
4080
4081 if (data->pcie_lane_power_saving.max <
4082 ps->performance_levels[i].pcie_lane)
4083 data->pcie_lane_power_saving.max =
4084 ps->performance_levels[i].pcie_lane;
4085
4086 if (data->pcie_lane_power_saving.min >
4087 ps->performance_levels[i].pcie_lane)
4088 data->pcie_lane_power_saving.min =
4089 ps->performance_levels[i].pcie_lane;
4090 }
4091 break;
4092 default:
4093 break;
4094 }
4095 }
4096 return 0;
4097}
4098
4099static int fiji_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
4100 struct pp_power_state *request_ps,
4101 const struct pp_power_state *current_ps)
4102{
4103 struct fiji_power_state *fiji_ps =
4104 cast_phw_fiji_power_state(&request_ps->hardware);
4105 uint32_t sclk;
4106 uint32_t mclk;
4107 struct PP_Clocks minimum_clocks = {0};
4108 bool disable_mclk_switching;
4109 bool disable_mclk_switching_for_frame_lock;
4110 struct cgs_display_info info = {0};
4111 const struct phm_clock_and_voltage_limits *max_limits;
4112 uint32_t i;
4113 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
4114 struct phm_ppt_v1_information *table_info =
4115 (struct phm_ppt_v1_information *)(hwmgr->pptable);
4116 int32_t count;
4117 int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
4118
4119 data->battery_state = (PP_StateUILabel_Battery ==
4120 request_ps->classification.ui_label);
4121
4122 PP_ASSERT_WITH_CODE(fiji_ps->performance_level_count == 2,
4123 "VI should always have 2 performance levels",);
4124
4125 max_limits = (PP_PowerSource_AC == hwmgr->power_source) ?
4126 &(hwmgr->dyn_state.max_clock_voltage_on_ac) :
4127 &(hwmgr->dyn_state.max_clock_voltage_on_dc);
4128
4129 /* Cap clock DPM tables at DC MAX if it is in DC. */
4130 if (PP_PowerSource_DC == hwmgr->power_source) {
4131 for (i = 0; i < fiji_ps->performance_level_count; i++) {
4132 if (fiji_ps->performance_levels[i].memory_clock > max_limits->mclk)
4133 fiji_ps->performance_levels[i].memory_clock = max_limits->mclk;
4134 if (fiji_ps->performance_levels[i].engine_clock > max_limits->sclk)
4135 fiji_ps->performance_levels[i].engine_clock = max_limits->sclk;
4136 }
4137 }
4138
4139 fiji_ps->vce_clks.evclk = hwmgr->vce_arbiter.evclk;
4140 fiji_ps->vce_clks.ecclk = hwmgr->vce_arbiter.ecclk;
4141
4142 fiji_ps->acp_clk = hwmgr->acp_arbiter.acpclk;
4143
4144 cgs_get_active_displays_info(hwmgr->device, &info);
4145
4146 /*TO DO result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
4147
4148 /* TO DO GetMinClockSettings(hwmgr->pPECI, &minimum_clocks); */
4149
4150 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4151 PHM_PlatformCaps_StablePState)) {
4152 max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
4153 stable_pstate_sclk = (max_limits->sclk * 75) / 100;
4154
4155 for (count = table_info->vdd_dep_on_sclk->count - 1;
4156 count >= 0; count--) {
4157 if (stable_pstate_sclk >=
4158 table_info->vdd_dep_on_sclk->entries[count].clk) {
4159 stable_pstate_sclk =
4160 table_info->vdd_dep_on_sclk->entries[count].clk;
4161 break;
4162 }
4163 }
4164
4165 if (count < 0)
4166 stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
4167
4168 stable_pstate_mclk = max_limits->mclk;
4169
4170 minimum_clocks.engineClock = stable_pstate_sclk;
4171 minimum_clocks.memoryClock = stable_pstate_mclk;
4172 }
4173
4174 if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk)
4175 minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk;
4176
4177 if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk)
4178 minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk;
4179
4180 fiji_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold;
4181
4182 if (0 != hwmgr->gfx_arbiter.sclk_over_drive) {
4183 PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <=
4184 hwmgr->platform_descriptor.overdriveLimit.engineClock),
4185 "Overdrive sclk exceeds limit",
4186 hwmgr->gfx_arbiter.sclk_over_drive =
4187 hwmgr->platform_descriptor.overdriveLimit.engineClock);
4188
4189 if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk)
4190 fiji_ps->performance_levels[1].engine_clock =
4191 hwmgr->gfx_arbiter.sclk_over_drive;
4192 }
4193
4194 if (0 != hwmgr->gfx_arbiter.mclk_over_drive) {
4195 PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <=
4196 hwmgr->platform_descriptor.overdriveLimit.memoryClock),
4197 "Overdrive mclk exceeds limit",
4198 hwmgr->gfx_arbiter.mclk_over_drive =
4199 hwmgr->platform_descriptor.overdriveLimit.memoryClock);
4200
4201 if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk)
4202 fiji_ps->performance_levels[1].memory_clock =
4203 hwmgr->gfx_arbiter.mclk_over_drive;
4204 }
4205
4206 disable_mclk_switching_for_frame_lock = phm_cap_enabled(
4207 hwmgr->platform_descriptor.platformCaps,
4208 PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
4209
4210 disable_mclk_switching = (1 < info.display_count) ||
4211 disable_mclk_switching_for_frame_lock;
4212
4213 sclk = fiji_ps->performance_levels[0].engine_clock;
4214 mclk = fiji_ps->performance_levels[0].memory_clock;
4215
4216 if (disable_mclk_switching)
4217 mclk = fiji_ps->performance_levels
4218 [fiji_ps->performance_level_count - 1].memory_clock;
4219
4220 if (sclk < minimum_clocks.engineClock)
4221 sclk = (minimum_clocks.engineClock > max_limits->sclk) ?
4222 max_limits->sclk : minimum_clocks.engineClock;
4223
4224 if (mclk < minimum_clocks.memoryClock)
4225 mclk = (minimum_clocks.memoryClock > max_limits->mclk) ?
4226 max_limits->mclk : minimum_clocks.memoryClock;
4227
4228 fiji_ps->performance_levels[0].engine_clock = sclk;
4229 fiji_ps->performance_levels[0].memory_clock = mclk;
4230
4231 fiji_ps->performance_levels[1].engine_clock =
4232 (fiji_ps->performance_levels[1].engine_clock >=
4233 fiji_ps->performance_levels[0].engine_clock) ?
4234 fiji_ps->performance_levels[1].engine_clock :
4235 fiji_ps->performance_levels[0].engine_clock;
4236
4237 if (disable_mclk_switching) {
4238 if (mclk < fiji_ps->performance_levels[1].memory_clock)
4239 mclk = fiji_ps->performance_levels[1].memory_clock;
4240
4241 fiji_ps->performance_levels[0].memory_clock = mclk;
4242 fiji_ps->performance_levels[1].memory_clock = mclk;
4243 } else {
4244 if (fiji_ps->performance_levels[1].memory_clock <
4245 fiji_ps->performance_levels[0].memory_clock)
4246 fiji_ps->performance_levels[1].memory_clock =
4247 fiji_ps->performance_levels[0].memory_clock;
4248 }
4249
4250 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4251 PHM_PlatformCaps_StablePState)) {
4252 for (i = 0; i < fiji_ps->performance_level_count; i++) {
4253 fiji_ps->performance_levels[i].engine_clock = stable_pstate_sclk;
4254 fiji_ps->performance_levels[i].memory_clock = stable_pstate_mclk;
4255 fiji_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max;
4256 fiji_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max;
4257 }
4258 }
4259
4260 return 0;
4261}
4262
4263static int fiji_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
4264{
4265 const struct phm_set_power_state_input *states =
4266 (const struct phm_set_power_state_input *)input;
4267 const struct fiji_power_state *fiji_ps =
4268 cast_const_phw_fiji_power_state(states->pnew_state);
4269 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
4270 struct fiji_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
4271 uint32_t sclk = fiji_ps->performance_levels
4272 [fiji_ps->performance_level_count - 1].engine_clock;
4273 struct fiji_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
4274 uint32_t mclk = fiji_ps->performance_levels
4275 [fiji_ps->performance_level_count - 1].memory_clock;
4276 uint32_t i;
4277 struct cgs_display_info info = {0};
4278
4279 data->need_update_smu7_dpm_table = 0;
4280
4281 for (i = 0; i < sclk_table->count; i++) {
4282 if (sclk == sclk_table->dpm_levels[i].value)
4283 break;
4284 }
4285
4286 if (i >= sclk_table->count)
4287 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
4288 else {
4289 if(data->display_timing.min_clock_in_sr !=
4290 hwmgr->display_config.min_core_set_clock_in_sr)
4291 data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
4292 }
4293
4294 for (i = 0; i < mclk_table->count; i++) {
4295 if (mclk == mclk_table->dpm_levels[i].value)
4296 break;
4297 }
4298
4299 if (i >= mclk_table->count)
4300 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
4301
4302 cgs_get_active_displays_info(hwmgr->device, &info);
4303
4304 if (data->display_timing.num_existing_displays != info.display_count)
4305 data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
4306
4307 return 0;
4308}
4309
4310static uint16_t fiji_get_maximum_link_speed(struct pp_hwmgr *hwmgr,
4311 const struct fiji_power_state *fiji_ps)
4312{
4313 uint32_t i;
4314 uint32_t sclk, max_sclk = 0;
4315 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
4316 struct fiji_dpm_table *dpm_table = &data->dpm_table;
4317
4318 for (i = 0; i < fiji_ps->performance_level_count; i++) {
4319 sclk = fiji_ps->performance_levels[i].engine_clock;
4320 if (max_sclk < sclk)
4321 max_sclk = sclk;
4322 }
4323
4324 for (i = 0; i < dpm_table->sclk_table.count; i++) {
4325 if (dpm_table->sclk_table.dpm_levels[i].value == max_sclk)
4326 return (uint16_t) ((i >= dpm_table->pcie_speed_table.count) ?
4327 dpm_table->pcie_speed_table.dpm_levels
4328 [dpm_table->pcie_speed_table.count - 1].value :
4329 dpm_table->pcie_speed_table.dpm_levels[i].value);
4330 }
4331
4332 return 0;
4333}
4334
4335static int fiji_request_link_speed_change_before_state_change(
4336 struct pp_hwmgr *hwmgr, const void *input)
4337{
4338 const struct phm_set_power_state_input *states =
4339 (const struct phm_set_power_state_input *)input;
4340 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
4341 const struct fiji_power_state *fiji_nps =
4342 cast_const_phw_fiji_power_state(states->pnew_state);
4343 const struct fiji_power_state *fiji_cps =
4344 cast_const_phw_fiji_power_state(states->pcurrent_state);
4345
4346 uint16_t target_link_speed = fiji_get_maximum_link_speed(hwmgr, fiji_nps);
4347 uint16_t current_link_speed;
4348
4349 if (data->force_pcie_gen == PP_PCIEGenInvalid)
4350 current_link_speed = fiji_get_maximum_link_speed(hwmgr, fiji_cps);
4351 else
4352 current_link_speed = data->force_pcie_gen;
4353
4354 data->force_pcie_gen = PP_PCIEGenInvalid;
4355 data->pspp_notify_required = false;
4356 if (target_link_speed > current_link_speed) {
4357 switch(target_link_speed) {
4358 case PP_PCIEGen3:
4359 if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN3, false))
4360 break;
4361 data->force_pcie_gen = PP_PCIEGen2;
4362 if (current_link_speed == PP_PCIEGen2)
4363 break;
4364 case PP_PCIEGen2:
4365 if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN2, false))
4366 break;
4367 default:
4368 data->force_pcie_gen = fiji_get_current_pcie_speed(hwmgr);
4369 break;
4370 }
4371 } else {
4372 if (target_link_speed < current_link_speed)
4373 data->pspp_notify_required = true;
4374 }
4375
4376 return 0;
4377}
4378
4379static int fiji_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
4380{
4381 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
4382
4383 if (0 == data->need_update_smu7_dpm_table)
4384 return 0;
4385
4386 if ((0 == data->sclk_dpm_key_disabled) &&
4387 (data->need_update_smu7_dpm_table &
4388 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
4389 PP_ASSERT_WITH_CODE(fiji_is_dpm_running(hwmgr),
4390 "Trying to freeze SCLK DPM when DPM is disabled",
4391 );
4392 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
4393 PPSMC_MSG_SCLKDPM_FreezeLevel),
4394 "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!",
4395 return -1);
4396 }
4397
4398 if ((0 == data->mclk_dpm_key_disabled) &&
4399 (data->need_update_smu7_dpm_table &
4400 DPMTABLE_OD_UPDATE_MCLK)) {
4401 PP_ASSERT_WITH_CODE(fiji_is_dpm_running(hwmgr),
4402 "Trying to freeze MCLK DPM when DPM is disabled",
4403 );
4404 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
4405 PPSMC_MSG_MCLKDPM_FreezeLevel),
4406 "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!",
4407 return -1);
4408 }
4409
4410 return 0;
4411}
4412
4413static int fiji_populate_and_upload_sclk_mclk_dpm_levels(
4414 struct pp_hwmgr *hwmgr, const void *input)
4415{
4416 int result = 0;
4417 const struct phm_set_power_state_input *states =
4418 (const struct phm_set_power_state_input *)input;
4419 const struct fiji_power_state *fiji_ps =
4420 cast_const_phw_fiji_power_state(states->pnew_state);
4421 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
4422 uint32_t sclk = fiji_ps->performance_levels
4423 [fiji_ps->performance_level_count - 1].engine_clock;
4424 uint32_t mclk = fiji_ps->performance_levels
4425 [fiji_ps->performance_level_count - 1].memory_clock;
4426 struct fiji_dpm_table *dpm_table = &data->dpm_table;
4427
4428 struct fiji_dpm_table *golden_dpm_table = &data->golden_dpm_table;
4429 uint32_t dpm_count, clock_percent;
4430 uint32_t i;
4431
4432 if (0 == data->need_update_smu7_dpm_table)
4433 return 0;
4434
4435 if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
4436 dpm_table->sclk_table.dpm_levels
4437 [dpm_table->sclk_table.count - 1].value = sclk;
4438
4439 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4440 PHM_PlatformCaps_OD6PlusinACSupport) ||
4441 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4442 PHM_PlatformCaps_OD6PlusinDCSupport)) {
4443 /* Need to do calculation based on the golden DPM table
4444 * as the Heatmap GPU Clock axis is also based on the default values
4445 */
4446 PP_ASSERT_WITH_CODE(
4447 (golden_dpm_table->sclk_table.dpm_levels
4448 [golden_dpm_table->sclk_table.count - 1].value != 0),
4449 "Divide by 0!",
4450 return -1);
4451 dpm_count = dpm_table->sclk_table.count < 2 ?
4452 0 : dpm_table->sclk_table.count - 2;
4453 for (i = dpm_count; i > 1; i--) {
4454 if (sclk > golden_dpm_table->sclk_table.dpm_levels
4455 [golden_dpm_table->sclk_table.count-1].value) {
4456 clock_percent =
4457 ((sclk - golden_dpm_table->sclk_table.dpm_levels
4458 [golden_dpm_table->sclk_table.count-1].value) * 100) /
4459 golden_dpm_table->sclk_table.dpm_levels
4460 [golden_dpm_table->sclk_table.count-1].value;
4461
4462 dpm_table->sclk_table.dpm_levels[i].value =
4463 golden_dpm_table->sclk_table.dpm_levels[i].value +
4464 (golden_dpm_table->sclk_table.dpm_levels[i].value *
4465 clock_percent)/100;
4466
4467 } else if (golden_dpm_table->sclk_table.dpm_levels
4468 [dpm_table->sclk_table.count-1].value > sclk) {
4469 clock_percent =
4470 ((golden_dpm_table->sclk_table.dpm_levels
4471 [golden_dpm_table->sclk_table.count - 1].value - sclk) *
4472 100) /
4473 golden_dpm_table->sclk_table.dpm_levels
4474 [golden_dpm_table->sclk_table.count-1].value;
4475
4476 dpm_table->sclk_table.dpm_levels[i].value =
4477 golden_dpm_table->sclk_table.dpm_levels[i].value -
4478 (golden_dpm_table->sclk_table.dpm_levels[i].value *
4479 clock_percent) / 100;
4480 } else
4481 dpm_table->sclk_table.dpm_levels[i].value =
4482 golden_dpm_table->sclk_table.dpm_levels[i].value;
4483 }
4484 }
4485 }
4486
4487 if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
4488 dpm_table->mclk_table.dpm_levels
4489 [dpm_table->mclk_table.count - 1].value = mclk;
4490 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4491 PHM_PlatformCaps_OD6PlusinACSupport) ||
4492 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4493 PHM_PlatformCaps_OD6PlusinDCSupport)) {
4494
4495 PP_ASSERT_WITH_CODE(
4496 (golden_dpm_table->mclk_table.dpm_levels
4497 [golden_dpm_table->mclk_table.count-1].value != 0),
4498 "Divide by 0!",
4499 return -1);
4500 dpm_count = dpm_table->mclk_table.count < 2 ?
4501 0 : dpm_table->mclk_table.count - 2;
4502 for (i = dpm_count; i > 1; i--) {
4503 if (mclk > golden_dpm_table->mclk_table.dpm_levels
4504 [golden_dpm_table->mclk_table.count-1].value) {
4505 clock_percent = ((mclk -
4506 golden_dpm_table->mclk_table.dpm_levels
4507 [golden_dpm_table->mclk_table.count-1].value) * 100) /
4508 golden_dpm_table->mclk_table.dpm_levels
4509 [golden_dpm_table->mclk_table.count-1].value;
4510
4511 dpm_table->mclk_table.dpm_levels[i].value =
4512 golden_dpm_table->mclk_table.dpm_levels[i].value +
4513 (golden_dpm_table->mclk_table.dpm_levels[i].value *
4514 clock_percent) / 100;
4515
4516 } else if (golden_dpm_table->mclk_table.dpm_levels
4517 [dpm_table->mclk_table.count-1].value > mclk) {
4518 clock_percent = ((golden_dpm_table->mclk_table.dpm_levels
4519 [golden_dpm_table->mclk_table.count-1].value - mclk) * 100) /
4520 golden_dpm_table->mclk_table.dpm_levels
4521 [golden_dpm_table->mclk_table.count-1].value;
4522
4523 dpm_table->mclk_table.dpm_levels[i].value =
4524 golden_dpm_table->mclk_table.dpm_levels[i].value -
4525 (golden_dpm_table->mclk_table.dpm_levels[i].value *
4526 clock_percent) / 100;
4527 } else
4528 dpm_table->mclk_table.dpm_levels[i].value =
4529 golden_dpm_table->mclk_table.dpm_levels[i].value;
4530 }
4531 }
4532 }
4533
4534 if (data->need_update_smu7_dpm_table &
4535 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) {
4536 result = fiji_populate_all_graphic_levels(hwmgr);
4537 PP_ASSERT_WITH_CODE((0 == result),
4538 "Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
4539 return result);
4540 }
4541
4542 if (data->need_update_smu7_dpm_table &
4543 (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
4544 /*populate MCLK dpm table to SMU7 */
4545 result = fiji_populate_all_memory_levels(hwmgr);
4546 PP_ASSERT_WITH_CODE((0 == result),
4547 "Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
4548 return result);
4549 }
4550
4551 return result;
4552}
4553
4554static int fiji_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
4555 struct fiji_single_dpm_table * dpm_table,
4556 uint32_t low_limit, uint32_t high_limit)
4557{
4558 uint32_t i;
4559
4560 for (i = 0; i < dpm_table->count; i++) {
4561 if ((dpm_table->dpm_levels[i].value < low_limit) ||
4562 (dpm_table->dpm_levels[i].value > high_limit))
4563 dpm_table->dpm_levels[i].enabled = false;
4564 else
4565 dpm_table->dpm_levels[i].enabled = true;
4566 }
4567 return 0;
4568}
4569
4570static int fiji_trim_dpm_states(struct pp_hwmgr *hwmgr,
4571 const struct fiji_power_state *fiji_ps)
4572{
4573 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
4574 uint32_t high_limit_count;
4575
4576 PP_ASSERT_WITH_CODE((fiji_ps->performance_level_count >= 1),
4577 "power state did not have any performance level",
4578 return -1);
4579
4580 high_limit_count = (1 == fiji_ps->performance_level_count) ? 0 : 1;
4581
4582 fiji_trim_single_dpm_states(hwmgr,
4583 &(data->dpm_table.sclk_table),
4584 fiji_ps->performance_levels[0].engine_clock,
4585 fiji_ps->performance_levels[high_limit_count].engine_clock);
4586
4587 fiji_trim_single_dpm_states(hwmgr,
4588 &(data->dpm_table.mclk_table),
4589 fiji_ps->performance_levels[0].memory_clock,
4590 fiji_ps->performance_levels[high_limit_count].memory_clock);
4591
4592 return 0;
4593}
4594
4595static int fiji_generate_dpm_level_enable_mask(
4596 struct pp_hwmgr *hwmgr, const void *input)
4597{
4598 int result;
4599 const struct phm_set_power_state_input *states =
4600 (const struct phm_set_power_state_input *)input;
4601 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
4602 const struct fiji_power_state *fiji_ps =
4603 cast_const_phw_fiji_power_state(states->pnew_state);
4604
4605 result = fiji_trim_dpm_states(hwmgr, fiji_ps);
4606 if (result)
4607 return result;
4608
4609 data->dpm_level_enable_mask.sclk_dpm_enable_mask =
4610 fiji_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table);
4611 data->dpm_level_enable_mask.mclk_dpm_enable_mask =
4612 fiji_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table);
4613 data->last_mclk_dpm_enable_mask =
4614 data->dpm_level_enable_mask.mclk_dpm_enable_mask;
4615
4616 if (data->uvd_enabled) {
4617 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask & 1)
4618 data->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
4619 }
4620
4621 data->dpm_level_enable_mask.pcie_dpm_enable_mask =
4622 fiji_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table);
4623
4624 return 0;
4625}
4626
4627static int fiji_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
4628{
4629 return smum_send_msg_to_smc(hwmgr->smumgr, enable ?
4630 (PPSMC_Msg)PPSMC_MSG_UVDDPM_Enable :
4631 (PPSMC_Msg)PPSMC_MSG_UVDDPM_Disable);
4632}
4633
4634int fiji_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
4635{
4636 return smum_send_msg_to_smc(hwmgr->smumgr, enable?
4637 PPSMC_MSG_VCEDPM_Enable :
4638 PPSMC_MSG_VCEDPM_Disable);
4639}
4640
4641static int fiji_enable_disable_samu_dpm(struct pp_hwmgr *hwmgr, bool enable)
4642{
4643 return smum_send_msg_to_smc(hwmgr->smumgr, enable?
4644 PPSMC_MSG_SAMUDPM_Enable :
4645 PPSMC_MSG_SAMUDPM_Disable);
4646}
4647
4648static int fiji_enable_disable_acp_dpm(struct pp_hwmgr *hwmgr, bool enable)
4649{
4650 return smum_send_msg_to_smc(hwmgr->smumgr, enable?
4651 PPSMC_MSG_ACPDPM_Enable :
4652 PPSMC_MSG_ACPDPM_Disable);
4653}
4654
4655int fiji_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
4656{
4657 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
4658 uint32_t mm_boot_level_offset, mm_boot_level_value;
4659 struct phm_ppt_v1_information *table_info =
4660 (struct phm_ppt_v1_information *)(hwmgr->pptable);
4661
4662 if (!bgate) {
4663 data->smc_state_table.UvdBootLevel = 0;
4664 if (table_info->mm_dep_table->count > 0)
4665 data->smc_state_table.UvdBootLevel =
4666 (uint8_t) (table_info->mm_dep_table->count - 1);
4667 mm_boot_level_offset = data->dpm_table_start +
4668 offsetof(SMU73_Discrete_DpmTable, UvdBootLevel);
4669 mm_boot_level_offset /= 4;
4670 mm_boot_level_offset *= 4;
4671 mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
4672 CGS_IND_REG__SMC, mm_boot_level_offset);
4673 mm_boot_level_value &= 0x00FFFFFF;
4674 mm_boot_level_value |= data->smc_state_table.UvdBootLevel << 24;
4675 cgs_write_ind_register(hwmgr->device,
4676 CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
4677
4678 if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4679 PHM_PlatformCaps_UVDDPM) ||
4680 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4681 PHM_PlatformCaps_StablePState))
4682 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
4683 PPSMC_MSG_UVDDPM_SetEnabledMask,
4684 (uint32_t)(1 << data->smc_state_table.UvdBootLevel));
4685 }
4686
4687 return fiji_enable_disable_uvd_dpm(hwmgr, !bgate);
4688}
4689
4690int fiji_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input)
4691{
4692 const struct phm_set_power_state_input *states =
4693 (const struct phm_set_power_state_input *)input;
4694 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
4695 const struct fiji_power_state *fiji_nps =
4696 cast_const_phw_fiji_power_state(states->pnew_state);
4697 const struct fiji_power_state *fiji_cps =
4698 cast_const_phw_fiji_power_state(states->pcurrent_state);
4699
4700 uint32_t mm_boot_level_offset, mm_boot_level_value;
4701 struct phm_ppt_v1_information *table_info =
4702 (struct phm_ppt_v1_information *)(hwmgr->pptable);
4703
4704 if (fiji_nps->vce_clks.evclk >0 &&
4705 (fiji_cps == NULL || fiji_cps->vce_clks.evclk == 0)) {
4706 data->smc_state_table.VceBootLevel =
4707 (uint8_t) (table_info->mm_dep_table->count - 1);
4708
4709 mm_boot_level_offset = data->dpm_table_start +
4710 offsetof(SMU73_Discrete_DpmTable, VceBootLevel);
4711 mm_boot_level_offset /= 4;
4712 mm_boot_level_offset *= 4;
4713 mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
4714 CGS_IND_REG__SMC, mm_boot_level_offset);
4715 mm_boot_level_value &= 0xFF00FFFF;
4716 mm_boot_level_value |= data->smc_state_table.VceBootLevel << 16;
4717 cgs_write_ind_register(hwmgr->device,
4718 CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
4719
4720 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4721 PHM_PlatformCaps_StablePState)) {
4722 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
4723 PPSMC_MSG_VCEDPM_SetEnabledMask,
4724 (uint32_t)1 << data->smc_state_table.VceBootLevel);
4725
4726 fiji_enable_disable_vce_dpm(hwmgr, true);
4727 } else if (fiji_nps->vce_clks.evclk == 0 &&
4728 fiji_cps != NULL &&
4729 fiji_cps->vce_clks.evclk > 0)
4730 fiji_enable_disable_vce_dpm(hwmgr, false);
4731 }
4732
4733 return 0;
4734}
4735
4736int fiji_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate)
4737{
4738 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
4739 uint32_t mm_boot_level_offset, mm_boot_level_value;
4740 struct phm_ppt_v1_information *table_info =
4741 (struct phm_ppt_v1_information *)(hwmgr->pptable);
4742
4743 if (!bgate) {
4744 data->smc_state_table.SamuBootLevel =
4745 (uint8_t) (table_info->mm_dep_table->count - 1);
4746 mm_boot_level_offset = data->dpm_table_start +
4747 offsetof(SMU73_Discrete_DpmTable, SamuBootLevel);
4748 mm_boot_level_offset /= 4;
4749 mm_boot_level_offset *= 4;
4750 mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
4751 CGS_IND_REG__SMC, mm_boot_level_offset);
4752 mm_boot_level_value &= 0xFFFFFF00;
4753 mm_boot_level_value |= data->smc_state_table.SamuBootLevel << 0;
4754 cgs_write_ind_register(hwmgr->device,
4755 CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
4756
4757 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4758 PHM_PlatformCaps_StablePState))
4759 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
4760 PPSMC_MSG_SAMUDPM_SetEnabledMask,
4761 (uint32_t)(1 << data->smc_state_table.SamuBootLevel));
4762 }
4763
4764 return fiji_enable_disable_samu_dpm(hwmgr, !bgate);
4765}
4766
4767int fiji_update_acp_dpm(struct pp_hwmgr *hwmgr, bool bgate)
4768{
4769 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
4770 uint32_t mm_boot_level_offset, mm_boot_level_value;
4771 struct phm_ppt_v1_information *table_info =
4772 (struct phm_ppt_v1_information *)(hwmgr->pptable);
4773
4774 if (!bgate) {
4775 data->smc_state_table.AcpBootLevel =
4776 (uint8_t) (table_info->mm_dep_table->count - 1);
4777 mm_boot_level_offset = data->dpm_table_start +
4778 offsetof(SMU73_Discrete_DpmTable, AcpBootLevel);
4779 mm_boot_level_offset /= 4;
4780 mm_boot_level_offset *= 4;
4781 mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
4782 CGS_IND_REG__SMC, mm_boot_level_offset);
4783 mm_boot_level_value &= 0xFFFF00FF;
4784 mm_boot_level_value |= data->smc_state_table.AcpBootLevel << 8;
4785 cgs_write_ind_register(hwmgr->device,
4786 CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
4787
4788 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4789 PHM_PlatformCaps_StablePState))
4790 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
4791 PPSMC_MSG_ACPDPM_SetEnabledMask,
4792 (uint32_t)(1 << data->smc_state_table.AcpBootLevel));
4793 }
4794
4795 return fiji_enable_disable_acp_dpm(hwmgr, !bgate);
4796}
4797
4798static int fiji_update_sclk_threshold(struct pp_hwmgr *hwmgr)
4799{
4800 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
4801
4802 int result = 0;
4803 uint32_t low_sclk_interrupt_threshold = 0;
4804
4805 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4806 PHM_PlatformCaps_SclkThrottleLowNotification)
4807 && (hwmgr->gfx_arbiter.sclk_threshold !=
4808 data->low_sclk_interrupt_threshold)) {
4809 data->low_sclk_interrupt_threshold =
4810 hwmgr->gfx_arbiter.sclk_threshold;
4811 low_sclk_interrupt_threshold =
4812 data->low_sclk_interrupt_threshold;
4813
4814 CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
4815
4816 result = fiji_copy_bytes_to_smc(
4817 hwmgr->smumgr,
4818 data->dpm_table_start +
4819 offsetof(SMU73_Discrete_DpmTable,
4820 LowSclkInterruptThreshold),
4821 (uint8_t *)&low_sclk_interrupt_threshold,
4822 sizeof(uint32_t),
4823 data->sram_end);
4824 }
4825
4826 return result;
4827}
4828
4829static int fiji_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
4830{
4831 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
4832
4833 if (data->need_update_smu7_dpm_table &
4834 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
4835 return fiji_program_memory_timing_parameters(hwmgr);
4836
4837 return 0;
4838}
4839
4840static int fiji_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
4841{
4842 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
4843
4844 if (0 == data->need_update_smu7_dpm_table)
4845 return 0;
4846
4847 if ((0 == data->sclk_dpm_key_disabled) &&
4848 (data->need_update_smu7_dpm_table &
4849 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
4850
4851 PP_ASSERT_WITH_CODE(fiji_is_dpm_running(hwmgr),
4852 "Trying to Unfreeze SCLK DPM when DPM is disabled",
4853 );
4854 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
4855 PPSMC_MSG_SCLKDPM_UnfreezeLevel),
4856 "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!",
4857 return -1);
4858 }
4859
4860 if ((0 == data->mclk_dpm_key_disabled) &&
4861 (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
4862
4863 PP_ASSERT_WITH_CODE(fiji_is_dpm_running(hwmgr),
4864 "Trying to Unfreeze MCLK DPM when DPM is disabled",
4865 );
4866 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
4867 PPSMC_MSG_SCLKDPM_UnfreezeLevel),
4868 "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
4869 return -1);
4870 }
4871
4872 data->need_update_smu7_dpm_table = 0;
4873
4874 return 0;
4875}
4876
4877/* Look up the voltaged based on DAL's requested level.
4878 * and then send the requested VDDC voltage to SMC
4879 */
4880static void fiji_apply_dal_minimum_voltage_request(struct pp_hwmgr *hwmgr)
4881{
4882 return;
4883}
4884
4885static int fiji_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
4886{
4887 int result;
4888 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
4889
4890 /* Apply minimum voltage based on DAL's request level */
4891 fiji_apply_dal_minimum_voltage_request(hwmgr);
4892
4893 if (0 == data->sclk_dpm_key_disabled) {
4894 /* Checking if DPM is running. If we discover hang because of this,
4895 * we should skip this message.
4896 */
4897 if (!fiji_is_dpm_running(hwmgr))
4898 printk(KERN_ERR "[ powerplay ] "
4899 "Trying to set Enable Mask when DPM is disabled \n");
4900
4901 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
4902 result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
4903 PPSMC_MSG_SCLKDPM_SetEnabledMask,
4904 data->dpm_level_enable_mask.sclk_dpm_enable_mask);
4905 PP_ASSERT_WITH_CODE((0 == result),
4906 "Set Sclk Dpm enable Mask failed", return -1);
4907 }
4908 }
4909
4910 if (0 == data->mclk_dpm_key_disabled) {
4911 /* Checking if DPM is running. If we discover hang because of this,
4912 * we should skip this message.
4913 */
4914 if (!fiji_is_dpm_running(hwmgr))
4915 printk(KERN_ERR "[ powerplay ]"
4916 " Trying to set Enable Mask when DPM is disabled \n");
4917
4918 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
4919 result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
4920 PPSMC_MSG_MCLKDPM_SetEnabledMask,
4921 data->dpm_level_enable_mask.mclk_dpm_enable_mask);
4922 PP_ASSERT_WITH_CODE((0 == result),
4923 "Set Mclk Dpm enable Mask failed", return -1);
4924 }
4925 }
4926
4927 return 0;
4928}
4929
4930static int fiji_notify_link_speed_change_after_state_change(
4931 struct pp_hwmgr *hwmgr, const void *input)
4932{
4933 const struct phm_set_power_state_input *states =
4934 (const struct phm_set_power_state_input *)input;
4935 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
4936 const struct fiji_power_state *fiji_ps =
4937 cast_const_phw_fiji_power_state(states->pnew_state);
4938 uint16_t target_link_speed = fiji_get_maximum_link_speed(hwmgr, fiji_ps);
4939 uint8_t request;
4940
4941 if (data->pspp_notify_required) {
4942 if (target_link_speed == PP_PCIEGen3)
4943 request = PCIE_PERF_REQ_GEN3;
4944 else if (target_link_speed == PP_PCIEGen2)
4945 request = PCIE_PERF_REQ_GEN2;
4946 else
4947 request = PCIE_PERF_REQ_GEN1;
4948
4949 if(request == PCIE_PERF_REQ_GEN1 &&
4950 fiji_get_current_pcie_speed(hwmgr) > 0)
4951 return 0;
4952
4953 if (acpi_pcie_perf_request(hwmgr->device, request, false)) {
4954 if (PP_PCIEGen2 == target_link_speed)
4955 printk("PSPP request to switch to Gen2 from Gen3 Failed!");
4956 else
4957 printk("PSPP request to switch to Gen1 from Gen2 Failed!");
4958 }
4959 }
4960
4961 return 0;
4962}
4963
4964static int fiji_set_power_state_tasks(struct pp_hwmgr *hwmgr,
4965 const void *input)
4966{
4967 int tmp_result, result = 0;
4968
4969 tmp_result = fiji_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
4970 PP_ASSERT_WITH_CODE((0 == tmp_result),
4971 "Failed to find DPM states clocks in DPM table!",
4972 result = tmp_result);
4973
4974 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4975 PHM_PlatformCaps_PCIEPerformanceRequest)) {
4976 tmp_result =
4977 fiji_request_link_speed_change_before_state_change(hwmgr, input);
4978 PP_ASSERT_WITH_CODE((0 == tmp_result),
4979 "Failed to request link speed change before state change!",
4980 result = tmp_result);
4981 }
4982
4983 tmp_result = fiji_freeze_sclk_mclk_dpm(hwmgr);
4984 PP_ASSERT_WITH_CODE((0 == tmp_result),
4985 "Failed to freeze SCLK MCLK DPM!", result = tmp_result);
4986
4987 tmp_result = fiji_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
4988 PP_ASSERT_WITH_CODE((0 == tmp_result),
4989 "Failed to populate and upload SCLK MCLK DPM levels!",
4990 result = tmp_result);
4991
4992 tmp_result = fiji_generate_dpm_level_enable_mask(hwmgr, input);
4993 PP_ASSERT_WITH_CODE((0 == tmp_result),
4994 "Failed to generate DPM level enabled mask!",
4995 result = tmp_result);
4996
4997 tmp_result = fiji_update_vce_dpm(hwmgr, input);
4998 PP_ASSERT_WITH_CODE((0 == tmp_result),
4999 "Failed to update VCE DPM!",
5000 result = tmp_result);
5001
5002 tmp_result = fiji_update_sclk_threshold(hwmgr);
5003 PP_ASSERT_WITH_CODE((0 == tmp_result),
5004 "Failed to update SCLK threshold!",
5005 result = tmp_result);
5006
5007 tmp_result = fiji_program_mem_timing_parameters(hwmgr);
5008 PP_ASSERT_WITH_CODE((0 == tmp_result),
5009 "Failed to program memory timing parameters!",
5010 result = tmp_result);
5011
5012 tmp_result = fiji_unfreeze_sclk_mclk_dpm(hwmgr);
5013 PP_ASSERT_WITH_CODE((0 == tmp_result),
5014 "Failed to unfreeze SCLK MCLK DPM!",
5015 result = tmp_result);
5016
5017 tmp_result = fiji_upload_dpm_level_enable_mask(hwmgr);
5018 PP_ASSERT_WITH_CODE((0 == tmp_result),
5019 "Failed to upload DPM level enabled mask!",
5020 result = tmp_result);
5021
5022 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
5023 PHM_PlatformCaps_PCIEPerformanceRequest)) {
5024 tmp_result =
5025 fiji_notify_link_speed_change_after_state_change(hwmgr, input);
5026 PP_ASSERT_WITH_CODE((0 == tmp_result),
5027 "Failed to notify link speed change after state change!",
5028 result = tmp_result);
5029 }
5030
5031 return result;
5032}
5033
5034static int fiji_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
5035{
5036 struct pp_power_state *ps;
5037 struct fiji_power_state *fiji_ps;
5038
5039 if (hwmgr == NULL)
5040 return -EINVAL;
5041
5042 ps = hwmgr->request_ps;
5043
5044 if (ps == NULL)
5045 return -EINVAL;
5046
5047 fiji_ps = cast_phw_fiji_power_state(&ps->hardware);
5048
5049 if (low)
5050 return fiji_ps->performance_levels[0].engine_clock;
5051 else
5052 return fiji_ps->performance_levels
5053 [fiji_ps->performance_level_count-1].engine_clock;
5054}
5055
5056static int fiji_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
5057{
5058 struct pp_power_state *ps;
5059 struct fiji_power_state *fiji_ps;
5060
5061 if (hwmgr == NULL)
5062 return -EINVAL;
5063
5064 ps = hwmgr->request_ps;
5065
5066 if (ps == NULL)
5067 return -EINVAL;
5068
5069 fiji_ps = cast_phw_fiji_power_state(&ps->hardware);
5070
5071 if (low)
5072 return fiji_ps->performance_levels[0].memory_clock;
5073 else
5074 return fiji_ps->performance_levels
5075 [fiji_ps->performance_level_count-1].memory_clock;
5076}
5077
5078static void fiji_print_current_perforce_level(
5079 struct pp_hwmgr *hwmgr, struct seq_file *m)
5080{
5081 uint32_t sclk, mclk, activity_percent = 0;
5082 uint32_t offset;
5083 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
5084
5085 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
5086
5087 sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
5088
5089 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
5090
5091 mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
5092 seq_printf(m, "\n [ mclk ]: %u MHz\n\n [ sclk ]: %u MHz\n",
5093 mclk / 100, sclk / 100);
5094
5095 offset = data->soft_regs_start + offsetof(SMU73_SoftRegisters, AverageGraphicsActivity);
5096 activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset);
5097 activity_percent += 0x80;
5098 activity_percent >>= 8;
5099
5100 seq_printf(m, "\n [GPU load]: %u%%\n\n", activity_percent > 100 ? 100 : activity_percent);
5101
5102 seq_printf(m, "uvd %sabled\n", data->uvd_power_gated ? "dis" : "en");
5103
5104 seq_printf(m, "vce %sabled\n", data->vce_power_gated ? "dis" : "en");
5105}
5106
5107static int fiji_program_display_gap(struct pp_hwmgr *hwmgr)
5108{
5109 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
5110 uint32_t num_active_displays = 0;
5111 uint32_t display_gap = cgs_read_ind_register(hwmgr->device,
5112 CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL);
5113 uint32_t display_gap2;
5114 uint32_t pre_vbi_time_in_us;
5115 uint32_t frame_time_in_us;
5116 uint32_t ref_clock;
5117 uint32_t refresh_rate = 0;
5118 struct cgs_display_info info = {0};
5119 struct cgs_mode_info mode_info;
5120
5121 info.mode_info = &mode_info;
5122
5123 cgs_get_active_displays_info(hwmgr->device, &info);
5124 num_active_displays = info.display_count;
5125
5126 display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
5127 DISP_GAP, (num_active_displays > 0)?
5128 DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE);
5129 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
5130 ixCG_DISPLAY_GAP_CNTL, display_gap);
5131
5132 ref_clock = mode_info.ref_clock;
5133 refresh_rate = mode_info.refresh_rate;
5134
5135 if (refresh_rate == 0)
5136 refresh_rate = 60;
5137
5138 frame_time_in_us = 1000000 / refresh_rate;
5139
5140 pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us;
5141 display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
5142
5143 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
5144 ixCG_DISPLAY_GAP_CNTL2, display_gap2);
5145
5146 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
5147 data->soft_regs_start +
5148 offsetof(SMU73_SoftRegisters, PreVBlankGap), 0x64);
5149
5150 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
5151 data->soft_regs_start +
5152 offsetof(SMU73_SoftRegisters, VBlankTimeout),
5153 (frame_time_in_us - pre_vbi_time_in_us));
5154
5155 if (num_active_displays == 1)
5156 tonga_notify_smc_display_change(hwmgr, true);
5157
5158 return 0;
5159}
5160
5161static int fiji_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
5162{
5163 return fiji_program_display_gap(hwmgr);
5164}
5165
5166static int fiji_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr,
5167 uint16_t us_max_fan_pwm)
5168{
5169 hwmgr->thermal_controller.
5170 advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm;
5171
5172 if (phm_is_hw_access_blocked(hwmgr))
5173 return 0;
5174
5175 return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
5176 PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm);
5177}
5178
5179static int fiji_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr,
5180 uint16_t us_max_fan_rpm)
5181{
5182 hwmgr->thermal_controller.
5183 advanceFanControlParameters.usMaxFanRPM = us_max_fan_rpm;
5184
5185 if (phm_is_hw_access_blocked(hwmgr))
5186 return 0;
5187
5188 return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
5189 PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm);
5190}
5191
5192static int fiji_dpm_set_interrupt_state(void *private_data,
5193 unsigned src_id, unsigned type,
5194 int enabled)
5195{
5196 uint32_t cg_thermal_int;
5197 struct pp_hwmgr *hwmgr = ((struct pp_eventmgr *)private_data)->hwmgr;
5198
5199 if (hwmgr == NULL)
5200 return -EINVAL;
5201
5202 switch (type) {
5203 case AMD_THERMAL_IRQ_LOW_TO_HIGH:
5204 if (enabled) {
5205 cg_thermal_int = cgs_read_ind_register(hwmgr->device,
5206 CGS_IND_REG__SMC, ixCG_THERMAL_INT);
5207 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
5208 cgs_write_ind_register(hwmgr->device,
5209 CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
5210 } else {
5211 cg_thermal_int = cgs_read_ind_register(hwmgr->device,
5212 CGS_IND_REG__SMC, ixCG_THERMAL_INT);
5213 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
5214 cgs_write_ind_register(hwmgr->device,
5215 CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
5216 }
5217 break;
5218
5219 case AMD_THERMAL_IRQ_HIGH_TO_LOW:
5220 if (enabled) {
5221 cg_thermal_int = cgs_read_ind_register(hwmgr->device,
5222 CGS_IND_REG__SMC, ixCG_THERMAL_INT);
5223 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
5224 cgs_write_ind_register(hwmgr->device,
5225 CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
5226 } else {
5227 cg_thermal_int = cgs_read_ind_register(hwmgr->device,
5228 CGS_IND_REG__SMC, ixCG_THERMAL_INT);
5229 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
5230 cgs_write_ind_register(hwmgr->device,
5231 CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
5232 }
5233 break;
5234 default:
5235 break;
5236 }
5237 return 0;
5238}
5239
5240static int fiji_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr,
5241 const void *thermal_interrupt_info)
5242{
5243 int result;
5244 const struct pp_interrupt_registration_info *info =
5245 (const struct pp_interrupt_registration_info *)
5246 thermal_interrupt_info;
5247
5248 if (info == NULL)
5249 return -EINVAL;
5250
5251 result = cgs_add_irq_source(hwmgr->device, 230, AMD_THERMAL_IRQ_LAST,
5252 fiji_dpm_set_interrupt_state,
5253 info->call_back, info->context);
5254
5255 if (result)
5256 return -EINVAL;
5257
5258 result = cgs_add_irq_source(hwmgr->device, 231, AMD_THERMAL_IRQ_LAST,
5259 fiji_dpm_set_interrupt_state,
5260 info->call_back, info->context);
5261
5262 if (result)
5263 return -EINVAL;
5264
5265 return 0;
5266}
5267
5268static int fiji_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
5269{
5270 if (mode) {
5271 /* stop auto-manage */
5272 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
5273 PHM_PlatformCaps_MicrocodeFanControl))
5274 fiji_fan_ctrl_stop_smc_fan_control(hwmgr);
5275 fiji_fan_ctrl_set_static_mode(hwmgr, mode);
5276 } else
5277 /* restart auto-manage */
5278 fiji_fan_ctrl_reset_fan_speed_to_default(hwmgr);
5279
5280 return 0;
5281}
5282
5283static int fiji_get_fan_control_mode(struct pp_hwmgr *hwmgr)
5284{
5285 if (hwmgr->fan_ctrl_is_in_default_mode)
5286 return hwmgr->fan_ctrl_default_mode;
5287 else
5288 return PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
5289 CG_FDO_CTRL2, FDO_PWM_MODE);
5290}
5291
5292static int fiji_force_clock_level(struct pp_hwmgr *hwmgr,
5293 enum pp_clock_type type, uint32_t mask)
5294{
5295 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
5296
5297 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
5298 return -EINVAL;
5299
5300 switch (type) {
5301 case PP_SCLK:
5302 if (!data->sclk_dpm_key_disabled)
5303 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
5304 PPSMC_MSG_SCLKDPM_SetEnabledMask,
5305 data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
5306 break;
5307
5308 case PP_MCLK:
5309 if (!data->mclk_dpm_key_disabled)
5310 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
5311 PPSMC_MSG_MCLKDPM_SetEnabledMask,
5312 data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
5313 break;
5314
5315 case PP_PCIE:
5316 {
5317 uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask;
5318 uint32_t level = 0;
5319
5320 while (tmp >>= 1)
5321 level++;
5322
5323 if (!data->pcie_dpm_key_disabled)
5324 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
5325 PPSMC_MSG_PCIeDPM_ForceLevel,
5326 level);
5327 break;
5328 }
5329 default:
5330 break;
5331 }
5332
5333 return 0;
5334}
5335
5336static int fiji_print_clock_levels(struct pp_hwmgr *hwmgr,
5337 enum pp_clock_type type, char *buf)
5338{
5339 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
5340 struct fiji_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
5341 struct fiji_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
5342 struct fiji_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table);
5343 int i, now, size = 0;
5344 uint32_t clock, pcie_speed;
5345
5346 switch (type) {
5347 case PP_SCLK:
5348 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
5349 clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
5350
5351 for (i = 0; i < sclk_table->count; i++) {
5352 if (clock > sclk_table->dpm_levels[i].value)
5353 continue;
5354 break;
5355 }
5356 now = i;
5357
5358 for (i = 0; i < sclk_table->count; i++)
5359 size += sprintf(buf + size, "%d: %uMhz %s\n",
5360 i, sclk_table->dpm_levels[i].value / 100,
5361 (i == now) ? "*" : "");
5362 break;
5363 case PP_MCLK:
5364 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
5365 clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
5366
5367 for (i = 0; i < mclk_table->count; i++) {
5368 if (clock > mclk_table->dpm_levels[i].value)
5369 continue;
5370 break;
5371 }
5372 now = i;
5373
5374 for (i = 0; i < mclk_table->count; i++)
5375 size += sprintf(buf + size, "%d: %uMhz %s\n",
5376 i, mclk_table->dpm_levels[i].value / 100,
5377 (i == now) ? "*" : "");
5378 break;
5379 case PP_PCIE:
5380 pcie_speed = fiji_get_current_pcie_speed(hwmgr);
5381 for (i = 0; i < pcie_table->count; i++) {
5382 if (pcie_speed != pcie_table->dpm_levels[i].value)
5383 continue;
5384 break;
5385 }
5386 now = i;
5387
5388 for (i = 0; i < pcie_table->count; i++)
5389 size += sprintf(buf + size, "%d: %s %s\n", i,
5390 (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x1" :
5391 (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" :
5392 (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "",
5393 (i == now) ? "*" : "");
5394 break;
5395 default:
5396 break;
5397 }
5398 return size;
5399}
5400
5401static inline bool fiji_are_power_levels_equal(const struct fiji_performance_level *pl1,
5402 const struct fiji_performance_level *pl2)
5403{
5404 return ((pl1->memory_clock == pl2->memory_clock) &&
5405 (pl1->engine_clock == pl2->engine_clock) &&
5406 (pl1->pcie_gen == pl2->pcie_gen) &&
5407 (pl1->pcie_lane == pl2->pcie_lane));
5408}
5409
5410static int
5411fiji_check_states_equal(struct pp_hwmgr *hwmgr,
5412 const struct pp_hw_power_state *pstate1,
5413 const struct pp_hw_power_state *pstate2, bool *equal)
5414{
5415 const struct fiji_power_state *psa = cast_const_phw_fiji_power_state(pstate1);
5416 const struct fiji_power_state *psb = cast_const_phw_fiji_power_state(pstate2);
5417 int i;
5418
5419 if (equal == NULL || psa == NULL || psb == NULL)
5420 return -EINVAL;
5421
5422 /* If the two states don't even have the same number of performance levels they cannot be the same state. */
5423 if (psa->performance_level_count != psb->performance_level_count) {
5424 *equal = false;
5425 return 0;
5426 }
5427
5428 for (i = 0; i < psa->performance_level_count; i++) {
5429 if (!fiji_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
5430 /* If we have found even one performance level pair that is different the states are different. */
5431 *equal = false;
5432 return 0;
5433 }
5434 }
5435
5436 /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
5437 *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
5438 *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
5439 *equal &= (psa->sclk_threshold == psb->sclk_threshold);
5440 *equal &= (psa->acp_clk == psb->acp_clk);
5441
5442 return 0;
5443}
5444
5445static bool
5446fiji_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
5447{
5448 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
5449 bool is_update_required = false;
5450 struct cgs_display_info info = {0,0,NULL};
5451
5452 cgs_get_active_displays_info(hwmgr->device, &info);
5453
5454 if (data->display_timing.num_existing_displays != info.display_count)
5455 is_update_required = true;
5456
5457 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
5458 if(hwmgr->display_config.min_core_set_clock_in_sr != data->display_timing.min_clock_in_sr)
5459 is_update_required = true;
5460 }
5461
5462 return is_update_required;
5463}
5464
5465static int fiji_get_sclk_od(struct pp_hwmgr *hwmgr)
5466{
5467 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
5468 struct fiji_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
5469 struct fiji_single_dpm_table *golden_sclk_table =
5470 &(data->golden_dpm_table.sclk_table);
5471 int value;
5472
5473 value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
5474 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
5475 100 /
5476 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
5477
5478 return value;
5479}
5480
5481static int fiji_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
5482{
5483 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
5484 struct fiji_single_dpm_table *golden_sclk_table =
5485 &(data->golden_dpm_table.sclk_table);
5486 struct pp_power_state *ps;
5487 struct fiji_power_state *fiji_ps;
5488
5489 if (value > 20)
5490 value = 20;
5491
5492 ps = hwmgr->request_ps;
5493
5494 if (ps == NULL)
5495 return -EINVAL;
5496
5497 fiji_ps = cast_phw_fiji_power_state(&ps->hardware);
5498
5499 fiji_ps->performance_levels[fiji_ps->performance_level_count - 1].engine_clock =
5500 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
5501 value / 100 +
5502 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
5503
5504 return 0;
5505}
5506
5507static int fiji_get_mclk_od(struct pp_hwmgr *hwmgr)
5508{
5509 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
5510 struct fiji_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
5511 struct fiji_single_dpm_table *golden_mclk_table =
5512 &(data->golden_dpm_table.mclk_table);
5513 int value;
5514
5515 value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
5516 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
5517 100 /
5518 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
5519
5520 return value;
5521}
5522
5523static int fiji_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
5524{
5525 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
5526 struct fiji_single_dpm_table *golden_mclk_table =
5527 &(data->golden_dpm_table.mclk_table);
5528 struct pp_power_state *ps;
5529 struct fiji_power_state *fiji_ps;
5530
5531 if (value > 20)
5532 value = 20;
5533
5534 ps = hwmgr->request_ps;
5535
5536 if (ps == NULL)
5537 return -EINVAL;
5538
5539 fiji_ps = cast_phw_fiji_power_state(&ps->hardware);
5540
5541 fiji_ps->performance_levels[fiji_ps->performance_level_count - 1].memory_clock =
5542 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
5543 value / 100 +
5544 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
5545
5546 return 0;
5547}
5548
5549static const struct pp_hwmgr_func fiji_hwmgr_funcs = {
5550 .backend_init = &fiji_hwmgr_backend_init,
5551 .backend_fini = &fiji_hwmgr_backend_fini,
5552 .asic_setup = &fiji_setup_asic_task,
5553 .dynamic_state_management_enable = &fiji_enable_dpm_tasks,
5554 .dynamic_state_management_disable = &fiji_disable_dpm_tasks,
5555 .force_dpm_level = &fiji_dpm_force_dpm_level,
5556 .get_num_of_pp_table_entries = &get_number_of_powerplay_table_entries_v1_0,
5557 .get_power_state_size = &fiji_get_power_state_size,
5558 .get_pp_table_entry = &fiji_get_pp_table_entry,
5559 .patch_boot_state = &fiji_patch_boot_state,
5560 .apply_state_adjust_rules = &fiji_apply_state_adjust_rules,
5561 .power_state_set = &fiji_set_power_state_tasks,
5562 .get_sclk = &fiji_dpm_get_sclk,
5563 .get_mclk = &fiji_dpm_get_mclk,
5564 .print_current_perforce_level = &fiji_print_current_perforce_level,
5565 .powergate_uvd = &fiji_phm_powergate_uvd,
5566 .powergate_vce = &fiji_phm_powergate_vce,
5567 .disable_clock_power_gating = &fiji_phm_disable_clock_power_gating,
5568 .notify_smc_display_config_after_ps_adjustment =
5569 &tonga_notify_smc_display_config_after_ps_adjustment,
5570 .display_config_changed = &fiji_display_configuration_changed_task,
5571 .set_max_fan_pwm_output = fiji_set_max_fan_pwm_output,
5572 .set_max_fan_rpm_output = fiji_set_max_fan_rpm_output,
5573 .get_temperature = fiji_thermal_get_temperature,
5574 .stop_thermal_controller = fiji_thermal_stop_thermal_controller,
5575 .get_fan_speed_info = fiji_fan_ctrl_get_fan_speed_info,
5576 .get_fan_speed_percent = fiji_fan_ctrl_get_fan_speed_percent,
5577 .set_fan_speed_percent = fiji_fan_ctrl_set_fan_speed_percent,
5578 .reset_fan_speed_to_default = fiji_fan_ctrl_reset_fan_speed_to_default,
5579 .get_fan_speed_rpm = fiji_fan_ctrl_get_fan_speed_rpm,
5580 .set_fan_speed_rpm = fiji_fan_ctrl_set_fan_speed_rpm,
5581 .uninitialize_thermal_controller = fiji_thermal_ctrl_uninitialize_thermal_controller,
5582 .register_internal_thermal_interrupt = fiji_register_internal_thermal_interrupt,
5583 .set_fan_control_mode = fiji_set_fan_control_mode,
5584 .get_fan_control_mode = fiji_get_fan_control_mode,
5585 .check_states_equal = fiji_check_states_equal,
5586 .check_smc_update_required_for_display_configuration = fiji_check_smc_update_required_for_display_configuration,
5587 .force_clock_level = fiji_force_clock_level,
5588 .print_clock_levels = fiji_print_clock_levels,
5589 .get_sclk_od = fiji_get_sclk_od,
5590 .set_sclk_od = fiji_set_sclk_od,
5591 .get_mclk_od = fiji_get_mclk_od,
5592 .set_mclk_od = fiji_set_mclk_od,
5593};
5594
5595int fiji_hwmgr_init(struct pp_hwmgr *hwmgr)
5596{
5597 hwmgr->hwmgr_func = &fiji_hwmgr_funcs;
5598 hwmgr->pptable_func = &pptable_v1_0_funcs;
5599 pp_fiji_thermal_initialize(hwmgr);
5600 return 0;
5601}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h
deleted file mode 100644
index bf67c2a92c68..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h
+++ /dev/null
@@ -1,350 +0,0 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef _FIJI_HWMGR_H_
25#define _FIJI_HWMGR_H_
26
27#include "hwmgr.h"
28#include "smu73.h"
29#include "smu73_discrete.h"
30#include "ppatomctrl.h"
31#include "fiji_ppsmc.h"
32#include "pp_endian.h"
33
34#define FIJI_MAX_HARDWARE_POWERLEVELS 2
35#define FIJI_AT_DFLT 30
36
37#define FIJI_VOLTAGE_CONTROL_NONE 0x0
38#define FIJI_VOLTAGE_CONTROL_BY_GPIO 0x1
39#define FIJI_VOLTAGE_CONTROL_BY_SVID2 0x2
40#define FIJI_VOLTAGE_CONTROL_MERGED 0x3
41
42#define DPMTABLE_OD_UPDATE_SCLK 0x00000001
43#define DPMTABLE_OD_UPDATE_MCLK 0x00000002
44#define DPMTABLE_UPDATE_SCLK 0x00000004
45#define DPMTABLE_UPDATE_MCLK 0x00000008
46
47struct fiji_performance_level {
48 uint32_t memory_clock;
49 uint32_t engine_clock;
50 uint16_t pcie_gen;
51 uint16_t pcie_lane;
52};
53
54struct fiji_uvd_clocks {
55 uint32_t vclk;
56 uint32_t dclk;
57};
58
59struct fiji_vce_clocks {
60 uint32_t evclk;
61 uint32_t ecclk;
62};
63
64struct fiji_power_state {
65 uint32_t magic;
66 struct fiji_uvd_clocks uvd_clks;
67 struct fiji_vce_clocks vce_clks;
68 uint32_t sam_clk;
69 uint32_t acp_clk;
70 uint16_t performance_level_count;
71 bool dc_compatible;
72 uint32_t sclk_threshold;
73 struct fiji_performance_level performance_levels[FIJI_MAX_HARDWARE_POWERLEVELS];
74};
75
76struct fiji_dpm_level {
77 bool enabled;
78 uint32_t value;
79 uint32_t param1;
80};
81
82#define FIJI_MAX_DEEPSLEEP_DIVIDER_ID 5
83#define MAX_REGULAR_DPM_NUMBER 8
84#define FIJI_MINIMUM_ENGINE_CLOCK 2500
85
86struct fiji_single_dpm_table {
87 uint32_t count;
88 struct fiji_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER];
89};
90
91struct fiji_dpm_table {
92 struct fiji_single_dpm_table sclk_table;
93 struct fiji_single_dpm_table mclk_table;
94 struct fiji_single_dpm_table pcie_speed_table;
95 struct fiji_single_dpm_table vddc_table;
96 struct fiji_single_dpm_table vddci_table;
97 struct fiji_single_dpm_table mvdd_table;
98};
99
100struct fiji_clock_registers {
101 uint32_t vCG_SPLL_FUNC_CNTL;
102 uint32_t vCG_SPLL_FUNC_CNTL_2;
103 uint32_t vCG_SPLL_FUNC_CNTL_3;
104 uint32_t vCG_SPLL_FUNC_CNTL_4;
105 uint32_t vCG_SPLL_SPREAD_SPECTRUM;
106 uint32_t vCG_SPLL_SPREAD_SPECTRUM_2;
107 uint32_t vDLL_CNTL;
108 uint32_t vMCLK_PWRMGT_CNTL;
109 uint32_t vMPLL_AD_FUNC_CNTL;
110 uint32_t vMPLL_DQ_FUNC_CNTL;
111 uint32_t vMPLL_FUNC_CNTL;
112 uint32_t vMPLL_FUNC_CNTL_1;
113 uint32_t vMPLL_FUNC_CNTL_2;
114 uint32_t vMPLL_SS1;
115 uint32_t vMPLL_SS2;
116};
117
118struct fiji_voltage_smio_registers {
119 uint32_t vS0_VID_LOWER_SMIO_CNTL;
120};
121
122#define FIJI_MAX_LEAKAGE_COUNT 8
123struct fiji_leakage_voltage {
124 uint16_t count;
125 uint16_t leakage_id[FIJI_MAX_LEAKAGE_COUNT];
126 uint16_t actual_voltage[FIJI_MAX_LEAKAGE_COUNT];
127};
128
129struct fiji_vbios_boot_state {
130 uint16_t mvdd_bootup_value;
131 uint16_t vddc_bootup_value;
132 uint16_t vddci_bootup_value;
133 uint32_t sclk_bootup_value;
134 uint32_t mclk_bootup_value;
135 uint16_t pcie_gen_bootup_value;
136 uint16_t pcie_lane_bootup_value;
137};
138
139struct fiji_bacos {
140 uint32_t best_match;
141 uint32_t baco_flags;
142 struct fiji_performance_level performance_level;
143};
144
145/* Ultra Low Voltage parameter structure */
146struct fiji_ulv_parm {
147 bool ulv_supported;
148 uint32_t cg_ulv_parameter;
149 uint32_t ulv_volt_change_delay;
150 struct fiji_performance_level ulv_power_level;
151};
152
153struct fiji_display_timing {
154 uint32_t min_clock_in_sr;
155 uint32_t num_existing_displays;
156};
157
158struct fiji_dpmlevel_enable_mask {
159 uint32_t uvd_dpm_enable_mask;
160 uint32_t vce_dpm_enable_mask;
161 uint32_t acp_dpm_enable_mask;
162 uint32_t samu_dpm_enable_mask;
163 uint32_t sclk_dpm_enable_mask;
164 uint32_t mclk_dpm_enable_mask;
165 uint32_t pcie_dpm_enable_mask;
166};
167
168struct fiji_pcie_perf_range {
169 uint16_t max;
170 uint16_t min;
171};
172
173struct fiji_hwmgr {
174 struct fiji_dpm_table dpm_table;
175 struct fiji_dpm_table golden_dpm_table;
176
177 uint32_t voting_rights_clients0;
178 uint32_t voting_rights_clients1;
179 uint32_t voting_rights_clients2;
180 uint32_t voting_rights_clients3;
181 uint32_t voting_rights_clients4;
182 uint32_t voting_rights_clients5;
183 uint32_t voting_rights_clients6;
184 uint32_t voting_rights_clients7;
185 uint32_t static_screen_threshold_unit;
186 uint32_t static_screen_threshold;
187 uint32_t voltage_control;
188 uint32_t vddc_vddci_delta;
189
190 uint32_t active_auto_throttle_sources;
191
192 struct fiji_clock_registers clock_registers;
193 struct fiji_voltage_smio_registers voltage_smio_registers;
194
195 bool is_memory_gddr5;
196 uint16_t acpi_vddc;
197 bool pspp_notify_required;
198 uint16_t force_pcie_gen;
199 uint16_t acpi_pcie_gen;
200 uint32_t pcie_gen_cap;
201 uint32_t pcie_lane_cap;
202 uint32_t pcie_spc_cap;
203 struct fiji_leakage_voltage vddc_leakage;
204 struct fiji_leakage_voltage Vddci_leakage;
205
206 uint32_t mvdd_control;
207 uint32_t vddc_mask_low;
208 uint32_t mvdd_mask_low;
209 uint16_t max_vddc_in_pptable;
210 uint16_t min_vddc_in_pptable;
211 uint16_t max_vddci_in_pptable;
212 uint16_t min_vddci_in_pptable;
213 uint32_t mclk_strobe_mode_threshold;
214 uint32_t mclk_stutter_mode_threshold;
215 uint32_t mclk_edc_enable_threshold;
216 uint32_t mclk_edcwr_enable_threshold;
217 bool is_uvd_enabled;
218 struct fiji_vbios_boot_state vbios_boot_state;
219
220 bool battery_state;
221 bool is_tlu_enabled;
222
223 /* ---- SMC SRAM Address of firmware header tables ---- */
224 uint32_t sram_end;
225 uint32_t dpm_table_start;
226 uint32_t soft_regs_start;
227 uint32_t mc_reg_table_start;
228 uint32_t fan_table_start;
229 uint32_t arb_table_start;
230 struct SMU73_Discrete_DpmTable smc_state_table;
231 struct SMU73_Discrete_Ulv ulv_setting;
232
233 /* ---- Stuff originally coming from Evergreen ---- */
234 uint32_t vddci_control;
235 struct pp_atomctrl_voltage_table vddc_voltage_table;
236 struct pp_atomctrl_voltage_table vddci_voltage_table;
237 struct pp_atomctrl_voltage_table mvdd_voltage_table;
238
239 uint32_t mgcg_cgtt_local2;
240 uint32_t mgcg_cgtt_local3;
241 uint32_t gpio_debug;
242 uint32_t mc_micro_code_feature;
243 uint32_t highest_mclk;
244 uint16_t acpi_vddci;
245 uint8_t mvdd_high_index;
246 uint8_t mvdd_low_index;
247 bool dll_default_on;
248 bool performance_request_registered;
249
250 /* ---- Low Power Features ---- */
251 struct fiji_bacos bacos;
252 struct fiji_ulv_parm ulv;
253
254 /* ---- CAC Stuff ---- */
255 uint32_t cac_table_start;
256 bool cac_configuration_required;
257 bool driver_calculate_cac_leakage;
258 bool cac_enabled;
259
260 /* ---- DPM2 Parameters ---- */
261 uint32_t power_containment_features;
262 bool enable_dte_feature;
263 bool enable_tdc_limit_feature;
264 bool enable_pkg_pwr_tracking_feature;
265 bool disable_uvd_power_tune_feature;
266 const struct fiji_pt_defaults *power_tune_defaults;
267 struct SMU73_Discrete_PmFuses power_tune_table;
268 uint32_t dte_tj_offset;
269 uint32_t fast_watermark_threshold;
270
271 /* ---- Phase Shedding ---- */
272 bool vddc_phase_shed_control;
273
274 /* ---- DI/DT ---- */
275 struct fiji_display_timing display_timing;
276
277 /* ---- Thermal Temperature Setting ---- */
278 struct fiji_dpmlevel_enable_mask dpm_level_enable_mask;
279 uint32_t need_update_smu7_dpm_table;
280 uint32_t sclk_dpm_key_disabled;
281 uint32_t mclk_dpm_key_disabled;
282 uint32_t pcie_dpm_key_disabled;
283 uint32_t min_engine_clocks;
284 struct fiji_pcie_perf_range pcie_gen_performance;
285 struct fiji_pcie_perf_range pcie_lane_performance;
286 struct fiji_pcie_perf_range pcie_gen_power_saving;
287 struct fiji_pcie_perf_range pcie_lane_power_saving;
288 bool use_pcie_performance_levels;
289 bool use_pcie_power_saving_levels;
290 uint32_t activity_target[SMU73_MAX_LEVELS_GRAPHICS];
291 uint32_t mclk_activity_target;
292 uint32_t mclk_dpm0_activity_target;
293 uint32_t low_sclk_interrupt_threshold;
294 uint32_t last_mclk_dpm_enable_mask;
295 bool uvd_enabled;
296
297 /* ---- Power Gating States ---- */
298 bool uvd_power_gated;
299 bool vce_power_gated;
300 bool samu_power_gated;
301 bool acp_power_gated;
302 bool pg_acp_init;
303 bool frtc_enabled;
304 bool frtc_status_changed;
305};
306
307/* To convert to Q8.8 format for firmware */
308#define FIJI_Q88_FORMAT_CONVERSION_UNIT 256
309
310enum Fiji_I2CLineID {
311 Fiji_I2CLineID_DDC1 = 0x90,
312 Fiji_I2CLineID_DDC2 = 0x91,
313 Fiji_I2CLineID_DDC3 = 0x92,
314 Fiji_I2CLineID_DDC4 = 0x93,
315 Fiji_I2CLineID_DDC5 = 0x94,
316 Fiji_I2CLineID_DDC6 = 0x95,
317 Fiji_I2CLineID_SCLSDA = 0x96,
318 Fiji_I2CLineID_DDCVGA = 0x97
319};
320
321#define Fiji_I2C_DDC1DATA 0
322#define Fiji_I2C_DDC1CLK 1
323#define Fiji_I2C_DDC2DATA 2
324#define Fiji_I2C_DDC2CLK 3
325#define Fiji_I2C_DDC3DATA 4
326#define Fiji_I2C_DDC3CLK 5
327#define Fiji_I2C_SDA 40
328#define Fiji_I2C_SCL 41
329#define Fiji_I2C_DDC4DATA 65
330#define Fiji_I2C_DDC4CLK 66
331#define Fiji_I2C_DDC5DATA 0x48
332#define Fiji_I2C_DDC5CLK 0x49
333#define Fiji_I2C_DDC6DATA 0x4a
334#define Fiji_I2C_DDC6CLK 0x4b
335#define Fiji_I2C_DDCVGADATA 0x4c
336#define Fiji_I2C_DDCVGACLK 0x4d
337
338#define FIJI_UNUSED_GPIO_PIN 0x7F
339
340extern int tonga_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr);
341extern int tonga_get_mc_microcode_version (struct pp_hwmgr *hwmgr);
342extern int tonga_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr);
343extern int tonga_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display);
344int fiji_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input);
345int fiji_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate);
346int fiji_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate);
347int fiji_update_acp_dpm(struct pp_hwmgr *hwmgr, bool bgate);
348int fiji_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
349
350#endif /* _FIJI_HWMGR_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c
deleted file mode 100644
index f5992ea0c56f..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c
+++ /dev/null
@@ -1,610 +0,0 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "hwmgr.h"
25#include "smumgr.h"
26#include "fiji_hwmgr.h"
27#include "fiji_powertune.h"
28#include "fiji_smumgr.h"
29#include "smu73_discrete.h"
30#include "pp_debug.h"
31
32#define VOLTAGE_SCALE 4
33#define POWERTUNE_DEFAULT_SET_MAX 1
34
35const struct fiji_pt_defaults fiji_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
36 /*sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc */
37 {1, 0xF, 0xFD,
38 /* TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase */
39 0x19, 5, 45}
40};
41
42void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
43{
44 struct fiji_hwmgr *fiji_hwmgr = (struct fiji_hwmgr *)(hwmgr->backend);
45 struct phm_ppt_v1_information *table_info =
46 (struct phm_ppt_v1_information *)(hwmgr->pptable);
47 uint32_t tmp = 0;
48
49 if(table_info &&
50 table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
51 table_info->cac_dtp_table->usPowerTuneDataSetID)
52 fiji_hwmgr->power_tune_defaults =
53 &fiji_power_tune_data_set_array
54 [table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
55 else
56 fiji_hwmgr->power_tune_defaults = &fiji_power_tune_data_set_array[0];
57
58 /* Assume disabled */
59 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
60 PHM_PlatformCaps_CAC);
61 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
62 PHM_PlatformCaps_SQRamping);
63 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
64 PHM_PlatformCaps_DBRamping);
65 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
66 PHM_PlatformCaps_TDRamping);
67 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
68 PHM_PlatformCaps_TCPRamping);
69
70 fiji_hwmgr->dte_tj_offset = tmp;
71
72 if (!tmp) {
73 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
74 PHM_PlatformCaps_CAC);
75
76 fiji_hwmgr->fast_watermark_threshold = 100;
77
78 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
79 PHM_PlatformCaps_PowerContainment)) {
80 tmp = 1;
81 fiji_hwmgr->enable_dte_feature = tmp ? false : true;
82 fiji_hwmgr->enable_tdc_limit_feature = tmp ? true : false;
83 fiji_hwmgr->enable_pkg_pwr_tracking_feature = tmp ? true : false;
84 }
85 }
86}
87
88/* PPGen has the gain setting generated in x * 100 unit
89 * This function is to convert the unit to x * 4096(0x1000) unit.
90 * This is the unit expected by SMC firmware
91 */
92static uint16_t scale_fan_gain_settings(uint16_t raw_setting)
93{
94 uint32_t tmp;
95 tmp = raw_setting * 4096 / 100;
96 return (uint16_t)tmp;
97}
98
99static void get_scl_sda_value(uint8_t line, uint8_t *scl, uint8_t* sda)
100{
101 switch (line) {
102 case Fiji_I2CLineID_DDC1 :
103 *scl = Fiji_I2C_DDC1CLK;
104 *sda = Fiji_I2C_DDC1DATA;
105 break;
106 case Fiji_I2CLineID_DDC2 :
107 *scl = Fiji_I2C_DDC2CLK;
108 *sda = Fiji_I2C_DDC2DATA;
109 break;
110 case Fiji_I2CLineID_DDC3 :
111 *scl = Fiji_I2C_DDC3CLK;
112 *sda = Fiji_I2C_DDC3DATA;
113 break;
114 case Fiji_I2CLineID_DDC4 :
115 *scl = Fiji_I2C_DDC4CLK;
116 *sda = Fiji_I2C_DDC4DATA;
117 break;
118 case Fiji_I2CLineID_DDC5 :
119 *scl = Fiji_I2C_DDC5CLK;
120 *sda = Fiji_I2C_DDC5DATA;
121 break;
122 case Fiji_I2CLineID_DDC6 :
123 *scl = Fiji_I2C_DDC6CLK;
124 *sda = Fiji_I2C_DDC6DATA;
125 break;
126 case Fiji_I2CLineID_SCLSDA :
127 *scl = Fiji_I2C_SCL;
128 *sda = Fiji_I2C_SDA;
129 break;
130 case Fiji_I2CLineID_DDCVGA :
131 *scl = Fiji_I2C_DDCVGACLK;
132 *sda = Fiji_I2C_DDCVGADATA;
133 break;
134 default:
135 *scl = 0;
136 *sda = 0;
137 break;
138 }
139}
140
141int fiji_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
142{
143 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
144 const struct fiji_pt_defaults *defaults = data->power_tune_defaults;
145 SMU73_Discrete_DpmTable *dpm_table = &(data->smc_state_table);
146 struct phm_ppt_v1_information *table_info =
147 (struct phm_ppt_v1_information *)(hwmgr->pptable);
148 struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
149 struct pp_advance_fan_control_parameters *fan_table=
150 &hwmgr->thermal_controller.advanceFanControlParameters;
151 uint8_t uc_scl, uc_sda;
152
153 /* TDP number of fraction bits are changed from 8 to 7 for Fiji
154 * as requested by SMC team
155 */
156 dpm_table->DefaultTdp = PP_HOST_TO_SMC_US(
157 (uint16_t)(cac_dtp_table->usTDP * 128));
158 dpm_table->TargetTdp = PP_HOST_TO_SMC_US(
159 (uint16_t)(cac_dtp_table->usTDP * 128));
160
161 PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
162 "Target Operating Temp is out of Range!",);
163
164 dpm_table->GpuTjMax = (uint8_t)(cac_dtp_table->usTargetOperatingTemp);
165 dpm_table->GpuTjHyst = 8;
166
167 dpm_table->DTEAmbientTempBase = defaults->DTEAmbientTempBase;
168
169 /* The following are for new Fiji Multi-input fan/thermal control */
170 dpm_table->TemperatureLimitEdge = PP_HOST_TO_SMC_US(
171 cac_dtp_table->usTargetOperatingTemp * 256);
172 dpm_table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US(
173 cac_dtp_table->usTemperatureLimitHotspot * 256);
174 dpm_table->TemperatureLimitLiquid1 = PP_HOST_TO_SMC_US(
175 cac_dtp_table->usTemperatureLimitLiquid1 * 256);
176 dpm_table->TemperatureLimitLiquid2 = PP_HOST_TO_SMC_US(
177 cac_dtp_table->usTemperatureLimitLiquid2 * 256);
178 dpm_table->TemperatureLimitVrVddc = PP_HOST_TO_SMC_US(
179 cac_dtp_table->usTemperatureLimitVrVddc * 256);
180 dpm_table->TemperatureLimitVrMvdd = PP_HOST_TO_SMC_US(
181 cac_dtp_table->usTemperatureLimitVrMvdd * 256);
182 dpm_table->TemperatureLimitPlx = PP_HOST_TO_SMC_US(
183 cac_dtp_table->usTemperatureLimitPlx * 256);
184
185 dpm_table->FanGainEdge = PP_HOST_TO_SMC_US(
186 scale_fan_gain_settings(fan_table->usFanGainEdge));
187 dpm_table->FanGainHotspot = PP_HOST_TO_SMC_US(
188 scale_fan_gain_settings(fan_table->usFanGainHotspot));
189 dpm_table->FanGainLiquid = PP_HOST_TO_SMC_US(
190 scale_fan_gain_settings(fan_table->usFanGainLiquid));
191 dpm_table->FanGainVrVddc = PP_HOST_TO_SMC_US(
192 scale_fan_gain_settings(fan_table->usFanGainVrVddc));
193 dpm_table->FanGainVrMvdd = PP_HOST_TO_SMC_US(
194 scale_fan_gain_settings(fan_table->usFanGainVrMvdd));
195 dpm_table->FanGainPlx = PP_HOST_TO_SMC_US(
196 scale_fan_gain_settings(fan_table->usFanGainPlx));
197 dpm_table->FanGainHbm = PP_HOST_TO_SMC_US(
198 scale_fan_gain_settings(fan_table->usFanGainHbm));
199
200 dpm_table->Liquid1_I2C_address = cac_dtp_table->ucLiquid1_I2C_address;
201 dpm_table->Liquid2_I2C_address = cac_dtp_table->ucLiquid2_I2C_address;
202 dpm_table->Vr_I2C_address = cac_dtp_table->ucVr_I2C_address;
203 dpm_table->Plx_I2C_address = cac_dtp_table->ucPlx_I2C_address;
204
205 get_scl_sda_value(cac_dtp_table->ucLiquid_I2C_Line, &uc_scl, &uc_sda);
206 dpm_table->Liquid_I2C_LineSCL = uc_scl;
207 dpm_table->Liquid_I2C_LineSDA = uc_sda;
208
209 get_scl_sda_value(cac_dtp_table->ucVr_I2C_Line, &uc_scl, &uc_sda);
210 dpm_table->Vr_I2C_LineSCL = uc_scl;
211 dpm_table->Vr_I2C_LineSDA = uc_sda;
212
213 get_scl_sda_value(cac_dtp_table->ucPlx_I2C_Line, &uc_scl, &uc_sda);
214 dpm_table->Plx_I2C_LineSCL = uc_scl;
215 dpm_table->Plx_I2C_LineSDA = uc_sda;
216
217 return 0;
218}
219
220static int fiji_populate_svi_load_line(struct pp_hwmgr *hwmgr)
221{
222 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
223 const struct fiji_pt_defaults *defaults = data->power_tune_defaults;
224
225 data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn;
226 data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC;
227 data->power_tune_table.SviLoadLineTrimVddC = 3;
228 data->power_tune_table.SviLoadLineOffsetVddC = 0;
229
230 return 0;
231}
232
233static int fiji_populate_tdc_limit(struct pp_hwmgr *hwmgr)
234{
235 uint16_t tdc_limit;
236 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
237 struct phm_ppt_v1_information *table_info =
238 (struct phm_ppt_v1_information *)(hwmgr->pptable);
239 const struct fiji_pt_defaults *defaults = data->power_tune_defaults;
240
241 /* TDC number of fraction bits are changed from 8 to 7
242 * for Fiji as requested by SMC team
243 */
244 tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128);
245 data->power_tune_table.TDC_VDDC_PkgLimit =
246 CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
247 data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
248 defaults->TDC_VDDC_ThrottleReleaseLimitPerc;
249 data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt;
250
251 return 0;
252}
253
254static int fiji_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
255{
256 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
257 const struct fiji_pt_defaults *defaults = data->power_tune_defaults;
258 uint32_t temp;
259
260 if (fiji_read_smc_sram_dword(hwmgr->smumgr,
261 fuse_table_offset +
262 offsetof(SMU73_Discrete_PmFuses, TdcWaterfallCtl),
263 (uint32_t *)&temp, data->sram_end))
264 PP_ASSERT_WITH_CODE(false,
265 "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
266 return -EINVAL);
267 else {
268 data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl;
269 data->power_tune_table.LPMLTemperatureMin =
270 (uint8_t)((temp >> 16) & 0xff);
271 data->power_tune_table.LPMLTemperatureMax =
272 (uint8_t)((temp >> 8) & 0xff);
273 data->power_tune_table.Reserved = (uint8_t)(temp & 0xff);
274 }
275 return 0;
276}
277
278static int fiji_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
279{
280 int i;
281 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
282
283 /* Currently not used. Set all to zero. */
284 for (i = 0; i < 16; i++)
285 data->power_tune_table.LPMLTemperatureScaler[i] = 0;
286
287 return 0;
288}
289
290static int fiji_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
291{
292 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
293
294 if( (hwmgr->thermal_controller.advanceFanControlParameters.
295 usFanOutputSensitivity & (1 << 15)) ||
296 0 == hwmgr->thermal_controller.advanceFanControlParameters.
297 usFanOutputSensitivity )
298 hwmgr->thermal_controller.advanceFanControlParameters.
299 usFanOutputSensitivity = hwmgr->thermal_controller.
300 advanceFanControlParameters.usDefaultFanOutputSensitivity;
301
302 data->power_tune_table.FuzzyFan_PwmSetDelta =
303 PP_HOST_TO_SMC_US(hwmgr->thermal_controller.
304 advanceFanControlParameters.usFanOutputSensitivity);
305 return 0;
306}
307
308static int fiji_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
309{
310 int i;
311 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
312
313 /* Currently not used. Set all to zero. */
314 for (i = 0; i < 16; i++)
315 data->power_tune_table.GnbLPML[i] = 0;
316
317 return 0;
318}
319
320static int fiji_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
321{
322 /* int i, min, max;
323 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
324 uint8_t * pHiVID = data->power_tune_table.BapmVddCVidHiSidd;
325 uint8_t * pLoVID = data->power_tune_table.BapmVddCVidLoSidd;
326
327 min = max = pHiVID[0];
328 for (i = 0; i < 8; i++) {
329 if (0 != pHiVID[i]) {
330 if (min > pHiVID[i])
331 min = pHiVID[i];
332 if (max < pHiVID[i])
333 max = pHiVID[i];
334 }
335
336 if (0 != pLoVID[i]) {
337 if (min > pLoVID[i])
338 min = pLoVID[i];
339 if (max < pLoVID[i])
340 max = pLoVID[i];
341 }
342 }
343
344 PP_ASSERT_WITH_CODE((0 != min) && (0 != max), "BapmVddcVidSidd table does not exist!", return int_Failed);
345 data->power_tune_table.GnbLPMLMaxVid = (uint8_t)max;
346 data->power_tune_table.GnbLPMLMinVid = (uint8_t)min;
347*/
348 return 0;
349}
350
351static int fiji_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
352{
353 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
354 struct phm_ppt_v1_information *table_info =
355 (struct phm_ppt_v1_information *)(hwmgr->pptable);
356 uint16_t HiSidd = data->power_tune_table.BapmVddCBaseLeakageHiSidd;
357 uint16_t LoSidd = data->power_tune_table.BapmVddCBaseLeakageLoSidd;
358 struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
359
360 HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
361 LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
362
363 data->power_tune_table.BapmVddCBaseLeakageHiSidd =
364 CONVERT_FROM_HOST_TO_SMC_US(HiSidd);
365 data->power_tune_table.BapmVddCBaseLeakageLoSidd =
366 CONVERT_FROM_HOST_TO_SMC_US(LoSidd);
367
368 return 0;
369}
370
371int fiji_populate_pm_fuses(struct pp_hwmgr *hwmgr)
372{
373 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
374 uint32_t pm_fuse_table_offset;
375
376 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
377 PHM_PlatformCaps_PowerContainment)) {
378 if (fiji_read_smc_sram_dword(hwmgr->smumgr,
379 SMU7_FIRMWARE_HEADER_LOCATION +
380 offsetof(SMU73_Firmware_Header, PmFuseTable),
381 &pm_fuse_table_offset, data->sram_end))
382 PP_ASSERT_WITH_CODE(false,
383 "Attempt to get pm_fuse_table_offset Failed!",
384 return -EINVAL);
385
386 /* DW6 */
387 if (fiji_populate_svi_load_line(hwmgr))
388 PP_ASSERT_WITH_CODE(false,
389 "Attempt to populate SviLoadLine Failed!",
390 return -EINVAL);
391 /* DW7 */
392 if (fiji_populate_tdc_limit(hwmgr))
393 PP_ASSERT_WITH_CODE(false,
394 "Attempt to populate TDCLimit Failed!", return -EINVAL);
395 /* DW8 */
396 if (fiji_populate_dw8(hwmgr, pm_fuse_table_offset))
397 PP_ASSERT_WITH_CODE(false,
398 "Attempt to populate TdcWaterfallCtl, "
399 "LPMLTemperature Min and Max Failed!",
400 return -EINVAL);
401
402 /* DW9-DW12 */
403 if (0 != fiji_populate_temperature_scaler(hwmgr))
404 PP_ASSERT_WITH_CODE(false,
405 "Attempt to populate LPMLTemperatureScaler Failed!",
406 return -EINVAL);
407
408 /* DW13-DW14 */
409 if(fiji_populate_fuzzy_fan(hwmgr))
410 PP_ASSERT_WITH_CODE(false,
411 "Attempt to populate Fuzzy Fan Control parameters Failed!",
412 return -EINVAL);
413
414 /* DW15-DW18 */
415 if (fiji_populate_gnb_lpml(hwmgr))
416 PP_ASSERT_WITH_CODE(false,
417 "Attempt to populate GnbLPML Failed!",
418 return -EINVAL);
419
420 /* DW19 */
421 if (fiji_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr))
422 PP_ASSERT_WITH_CODE(false,
423 "Attempt to populate GnbLPML Min and Max Vid Failed!",
424 return -EINVAL);
425
426 /* DW20 */
427 if (fiji_populate_bapm_vddc_base_leakage_sidd(hwmgr))
428 PP_ASSERT_WITH_CODE(false,
429 "Attempt to populate BapmVddCBaseLeakage Hi and Lo "
430 "Sidd Failed!", return -EINVAL);
431
432 if (fiji_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
433 (uint8_t *)&data->power_tune_table,
434 sizeof(struct SMU73_Discrete_PmFuses), data->sram_end))
435 PP_ASSERT_WITH_CODE(false,
436 "Attempt to download PmFuseTable Failed!",
437 return -EINVAL);
438 }
439 return 0;
440}
441
442int fiji_enable_smc_cac(struct pp_hwmgr *hwmgr)
443{
444 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
445 int result = 0;
446
447 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
448 PHM_PlatformCaps_CAC)) {
449 int smc_result;
450 smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
451 (uint16_t)(PPSMC_MSG_EnableCac));
452 PP_ASSERT_WITH_CODE((0 == smc_result),
453 "Failed to enable CAC in SMC.", result = -1);
454
455 data->cac_enabled = (0 == smc_result) ? true : false;
456 }
457 return result;
458}
459
460int fiji_disable_smc_cac(struct pp_hwmgr *hwmgr)
461{
462 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
463 int result = 0;
464
465 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
466 PHM_PlatformCaps_CAC) && data->cac_enabled) {
467 int smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
468 (uint16_t)(PPSMC_MSG_DisableCac));
469 PP_ASSERT_WITH_CODE((smc_result == 0),
470 "Failed to disable CAC in SMC.", result = -1);
471
472 data->cac_enabled = false;
473 }
474 return result;
475}
476
477int fiji_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
478{
479 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
480
481 if(data->power_containment_features &
482 POWERCONTAINMENT_FEATURE_PkgPwrLimit)
483 return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
484 PPSMC_MSG_PkgPwrSetLimit, n);
485 return 0;
486}
487
488static int fiji_set_overdriver_target_tdp(struct pp_hwmgr *pHwMgr, uint32_t target_tdp)
489{
490 return smum_send_msg_to_smc_with_parameter(pHwMgr->smumgr,
491 PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
492}
493
494int fiji_enable_power_containment(struct pp_hwmgr *hwmgr)
495{
496 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
497 struct phm_ppt_v1_information *table_info =
498 (struct phm_ppt_v1_information *)(hwmgr->pptable);
499 int smc_result;
500 int result = 0;
501
502 data->power_containment_features = 0;
503 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
504 PHM_PlatformCaps_PowerContainment)) {
505 if (data->enable_dte_feature) {
506 smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
507 (uint16_t)(PPSMC_MSG_EnableDTE));
508 PP_ASSERT_WITH_CODE((0 == smc_result),
509 "Failed to enable DTE in SMC.", result = -1;);
510 if (0 == smc_result)
511 data->power_containment_features |= POWERCONTAINMENT_FEATURE_DTE;
512 }
513
514 if (data->enable_tdc_limit_feature) {
515 smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
516 (uint16_t)(PPSMC_MSG_TDCLimitEnable));
517 PP_ASSERT_WITH_CODE((0 == smc_result),
518 "Failed to enable TDCLimit in SMC.", result = -1;);
519 if (0 == smc_result)
520 data->power_containment_features |=
521 POWERCONTAINMENT_FEATURE_TDCLimit;
522 }
523
524 if (data->enable_pkg_pwr_tracking_feature) {
525 smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
526 (uint16_t)(PPSMC_MSG_PkgPwrLimitEnable));
527 PP_ASSERT_WITH_CODE((0 == smc_result),
528 "Failed to enable PkgPwrTracking in SMC.", result = -1;);
529 if (0 == smc_result) {
530 struct phm_cac_tdp_table *cac_table =
531 table_info->cac_dtp_table;
532 uint32_t default_limit =
533 (uint32_t)(cac_table->usMaximumPowerDeliveryLimit * 256);
534
535 data->power_containment_features |=
536 POWERCONTAINMENT_FEATURE_PkgPwrLimit;
537
538 if (fiji_set_power_limit(hwmgr, default_limit))
539 printk(KERN_ERR "Failed to set Default Power Limit in SMC!");
540 }
541 }
542 }
543 return result;
544}
545
546int fiji_disable_power_containment(struct pp_hwmgr *hwmgr)
547{
548 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
549 int result = 0;
550
551 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
552 PHM_PlatformCaps_PowerContainment) &&
553 data->power_containment_features) {
554 int smc_result;
555
556 if (data->power_containment_features &
557 POWERCONTAINMENT_FEATURE_TDCLimit) {
558 smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
559 (uint16_t)(PPSMC_MSG_TDCLimitDisable));
560 PP_ASSERT_WITH_CODE((smc_result == 0),
561 "Failed to disable TDCLimit in SMC.",
562 result = smc_result);
563 }
564
565 if (data->power_containment_features &
566 POWERCONTAINMENT_FEATURE_DTE) {
567 smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
568 (uint16_t)(PPSMC_MSG_DisableDTE));
569 PP_ASSERT_WITH_CODE((smc_result == 0),
570 "Failed to disable DTE in SMC.",
571 result = smc_result);
572 }
573
574 if (data->power_containment_features &
575 POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
576 smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
577 (uint16_t)(PPSMC_MSG_PkgPwrLimitDisable));
578 PP_ASSERT_WITH_CODE((smc_result == 0),
579 "Failed to disable PkgPwrTracking in SMC.",
580 result = smc_result);
581 }
582 data->power_containment_features = 0;
583 }
584
585 return result;
586}
587
588int fiji_power_control_set_level(struct pp_hwmgr *hwmgr)
589{
590 struct phm_ppt_v1_information *table_info =
591 (struct phm_ppt_v1_information *)(hwmgr->pptable);
592 struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
593 int adjust_percent, target_tdp;
594 int result = 0;
595
596 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
597 PHM_PlatformCaps_PowerContainment)) {
598 /* adjustment percentage has already been validated */
599 adjust_percent = hwmgr->platform_descriptor.TDPAdjustmentPolarity ?
600 hwmgr->platform_descriptor.TDPAdjustment :
601 (-1 * hwmgr->platform_descriptor.TDPAdjustment);
602 /* SMC requested that target_tdp to be 7 bit fraction in DPM table
603 * but message to be 8 bit fraction for messages
604 */
605 target_tdp = ((100 + adjust_percent) * (int)(cac_table->usTDP * 256)) / 100;
606 result = fiji_set_overdriver_target_tdp(hwmgr, (uint32_t)target_tdp);
607 }
608
609 return result;
610}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h
deleted file mode 100644
index fec772421733..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h
+++ /dev/null
@@ -1,81 +0,0 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef FIJI_POWERTUNE_H
24#define FIJI_POWERTUNE_H
25
26enum fiji_pt_config_reg_type {
27 FIJI_CONFIGREG_MMR = 0,
28 FIJI_CONFIGREG_SMC_IND,
29 FIJI_CONFIGREG_DIDT_IND,
30 FIJI_CONFIGREG_CACHE,
31 FIJI_CONFIGREG_MAX
32};
33
34/* PowerContainment Features */
35#define POWERCONTAINMENT_FEATURE_DTE 0x00000001
36#define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002
37#define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004
38
39#define DIDT_SQ_CTRL0__UNUSED_0_MASK 0xffffffc0
40#define DIDT_SQ_CTRL0__UNUSED_0__SHIFT 0x6
41#define DIDT_TD_CTRL0__UNUSED_0_MASK 0xffffffc0
42#define DIDT_TD_CTRL0__UNUSED_0__SHIFT 0x6
43#define DIDT_TCP_CTRL0__UNUSED_0_MASK 0xffffffc0
44#define DIDT_TCP_CTRL0__UNUSED_0__SHIFT 0x6
45#define DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK 0xe0000000
46#define DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001d
47#define DIDT_TD_TUNING_CTRL__UNUSED_0_MASK 0xe0000000
48#define DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001d
49#define DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK 0xe0000000
50#define DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001d
51
52struct fiji_pt_config_reg {
53 uint32_t offset;
54 uint32_t mask;
55 uint32_t shift;
56 uint32_t value;
57 enum fiji_pt_config_reg_type type;
58};
59
60struct fiji_pt_defaults
61{
62 uint8_t SviLoadLineEn;
63 uint8_t SviLoadLineVddC;
64 uint8_t TDC_VDDC_ThrottleReleaseLimitPerc;
65 uint8_t TDC_MAWt;
66 uint8_t TdcWaterfallCtl;
67 uint8_t DTEAmbientTempBase;
68};
69
70void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr);
71int fiji_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr);
72int fiji_populate_pm_fuses(struct pp_hwmgr *hwmgr);
73int fiji_enable_smc_cac(struct pp_hwmgr *hwmgr);
74int fiji_disable_smc_cac(struct pp_hwmgr *hwmgr);
75int fiji_enable_power_containment(struct pp_hwmgr *hwmgr);
76int fiji_disable_power_containment(struct pp_hwmgr *hwmgr);
77int fiji_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n);
78int fiji_power_control_set_level(struct pp_hwmgr *hwmgr);
79
80#endif /* FIJI_POWERTUNE_H */
81
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.c
deleted file mode 100644
index 7f431e762262..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.c
+++ /dev/null
@@ -1,687 +0,0 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <asm/div64.h>
24#include "fiji_thermal.h"
25#include "fiji_hwmgr.h"
26#include "fiji_smumgr.h"
27#include "fiji_ppsmc.h"
28#include "smu/smu_7_1_3_d.h"
29#include "smu/smu_7_1_3_sh_mask.h"
30
31int fiji_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
32 struct phm_fan_speed_info *fan_speed_info)
33{
34
35 if (hwmgr->thermal_controller.fanInfo.bNoFan)
36 return 0;
37
38 fan_speed_info->supports_percent_read = true;
39 fan_speed_info->supports_percent_write = true;
40 fan_speed_info->min_percent = 0;
41 fan_speed_info->max_percent = 100;
42
43 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
44 PHM_PlatformCaps_FanSpeedInTableIsRPM) &&
45 hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) {
46 fan_speed_info->supports_rpm_read = true;
47 fan_speed_info->supports_rpm_write = true;
48 fan_speed_info->min_rpm = hwmgr->thermal_controller.fanInfo.ulMinRPM;
49 fan_speed_info->max_rpm = hwmgr->thermal_controller.fanInfo.ulMaxRPM;
50 } else {
51 fan_speed_info->min_rpm = 0;
52 fan_speed_info->max_rpm = 0;
53 }
54
55 return 0;
56}
57
58int fiji_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr,
59 uint32_t *speed)
60{
61 uint32_t duty100;
62 uint32_t duty;
63 uint64_t tmp64;
64
65 if (hwmgr->thermal_controller.fanInfo.bNoFan)
66 return 0;
67
68 duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
69 CG_FDO_CTRL1, FMAX_DUTY100);
70 duty = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
71 CG_THERMAL_STATUS, FDO_PWM_DUTY);
72
73 if (duty100 == 0)
74 return -EINVAL;
75
76
77 tmp64 = (uint64_t)duty * 100;
78 do_div(tmp64, duty100);
79 *speed = (uint32_t)tmp64;
80
81 if (*speed > 100)
82 *speed = 100;
83
84 return 0;
85}
86
87int fiji_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
88{
89 uint32_t tach_period;
90 uint32_t crystal_clock_freq;
91
92 if (hwmgr->thermal_controller.fanInfo.bNoFan ||
93 (hwmgr->thermal_controller.fanInfo.
94 ucTachometerPulsesPerRevolution == 0))
95 return 0;
96
97 tach_period = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
98 CG_TACH_STATUS, TACH_PERIOD);
99
100 if (tach_period == 0)
101 return -EINVAL;
102
103 crystal_clock_freq = tonga_get_xclk(hwmgr);
104
105 *speed = 60 * crystal_clock_freq * 10000/ tach_period;
106
107 return 0;
108}
109
110/**
111* Set Fan Speed Control to static mode, so that the user can decide what speed to use.
112* @param hwmgr the address of the powerplay hardware manager.
113* mode the fan control mode, 0 default, 1 by percent, 5, by RPM
114* @exception Should always succeed.
115*/
116int fiji_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
117{
118
119 if (hwmgr->fan_ctrl_is_in_default_mode) {
120 hwmgr->fan_ctrl_default_mode =
121 PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
122 CG_FDO_CTRL2, FDO_PWM_MODE);
123 hwmgr->tmin =
124 PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
125 CG_FDO_CTRL2, TMIN);
126 hwmgr->fan_ctrl_is_in_default_mode = false;
127 }
128
129 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
130 CG_FDO_CTRL2, TMIN, 0);
131 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
132 CG_FDO_CTRL2, FDO_PWM_MODE, mode);
133
134 return 0;
135}
136
137/**
138* Reset Fan Speed Control to default mode.
139* @param hwmgr the address of the powerplay hardware manager.
140* @exception Should always succeed.
141*/
142int fiji_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr)
143{
144 if (!hwmgr->fan_ctrl_is_in_default_mode) {
145 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
146 CG_FDO_CTRL2, FDO_PWM_MODE, hwmgr->fan_ctrl_default_mode);
147 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
148 CG_FDO_CTRL2, TMIN, hwmgr->tmin);
149 hwmgr->fan_ctrl_is_in_default_mode = true;
150 }
151
152 return 0;
153}
154
155static int fiji_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
156{
157 int result;
158
159 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
160 PHM_PlatformCaps_ODFuzzyFanControlSupport)) {
161 cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_FUZZY);
162 result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl);
163
164 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
165 PHM_PlatformCaps_FanSpeedInTableIsRPM))
166 hwmgr->hwmgr_func->set_max_fan_rpm_output(hwmgr,
167 hwmgr->thermal_controller.
168 advanceFanControlParameters.usMaxFanRPM);
169 else
170 hwmgr->hwmgr_func->set_max_fan_pwm_output(hwmgr,
171 hwmgr->thermal_controller.
172 advanceFanControlParameters.usMaxFanPWM);
173
174 } else {
175 cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_TABLE);
176 result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl);
177 }
178
179 if (!result && hwmgr->thermal_controller.
180 advanceFanControlParameters.ucTargetTemperature)
181 result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
182 PPSMC_MSG_SetFanTemperatureTarget,
183 hwmgr->thermal_controller.
184 advanceFanControlParameters.ucTargetTemperature);
185
186 return result;
187}
188
189
190int fiji_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr)
191{
192 return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StopFanControl);
193}
194
195/**
196* Set Fan Speed in percent.
197* @param hwmgr the address of the powerplay hardware manager.
198* @param speed is the percentage value (0% - 100%) to be set.
199* @exception Fails is the 100% setting appears to be 0.
200*/
201int fiji_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
202 uint32_t speed)
203{
204 uint32_t duty100;
205 uint32_t duty;
206 uint64_t tmp64;
207
208 if (hwmgr->thermal_controller.fanInfo.bNoFan)
209 return 0;
210
211 if (speed > 100)
212 speed = 100;
213
214 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
215 PHM_PlatformCaps_MicrocodeFanControl))
216 fiji_fan_ctrl_stop_smc_fan_control(hwmgr);
217
218 duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
219 CG_FDO_CTRL1, FMAX_DUTY100);
220
221 if (duty100 == 0)
222 return -EINVAL;
223
224 tmp64 = (uint64_t)speed * duty100;
225 do_div(tmp64, 100);
226 duty = (uint32_t)tmp64;
227
228 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
229 CG_FDO_CTRL0, FDO_STATIC_DUTY, duty);
230
231 return fiji_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
232}
233
234/**
235* Reset Fan Speed to default.
236* @param hwmgr the address of the powerplay hardware manager.
237* @exception Always succeeds.
238*/
239int fiji_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr)
240{
241 int result;
242
243 if (hwmgr->thermal_controller.fanInfo.bNoFan)
244 return 0;
245
246 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
247 PHM_PlatformCaps_MicrocodeFanControl)) {
248 result = fiji_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
249 if (!result)
250 result = fiji_fan_ctrl_start_smc_fan_control(hwmgr);
251 } else
252 result = fiji_fan_ctrl_set_default_mode(hwmgr);
253
254 return result;
255}
256
257/**
258* Set Fan Speed in RPM.
259* @param hwmgr the address of the powerplay hardware manager.
260* @param speed is the percentage value (min - max) to be set.
261* @exception Fails is the speed not lie between min and max.
262*/
263int fiji_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
264{
265 uint32_t tach_period;
266 uint32_t crystal_clock_freq;
267
268 if (hwmgr->thermal_controller.fanInfo.bNoFan ||
269 (hwmgr->thermal_controller.fanInfo.
270 ucTachometerPulsesPerRevolution == 0) ||
271 (speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) ||
272 (speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM))
273 return 0;
274
275 crystal_clock_freq = tonga_get_xclk(hwmgr);
276
277 tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
278
279 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
280 CG_TACH_STATUS, TACH_PERIOD, tach_period);
281
282 return fiji_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
283}
284
285/**
286* Reads the remote temperature from the SIslands thermal controller.
287*
288* @param hwmgr The address of the hardware manager.
289*/
290int fiji_thermal_get_temperature(struct pp_hwmgr *hwmgr)
291{
292 int temp;
293
294 temp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
295 CG_MULT_THERMAL_STATUS, CTF_TEMP);
296
297 /* Bit 9 means the reading is lower than the lowest usable value. */
298 if (temp & 0x200)
299 temp = FIJI_THERMAL_MAXIMUM_TEMP_READING;
300 else
301 temp = temp & 0x1ff;
302
303 temp *= PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
304
305 return temp;
306}
307
308/**
309* Set the requested temperature range for high and low alert signals
310*
311* @param hwmgr The address of the hardware manager.
312* @param range Temperature range to be programmed for high and low alert signals
313* @exception PP_Result_BadInput if the input data is not valid.
314*/
315static int fiji_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
316 uint32_t low_temp, uint32_t high_temp)
317{
318 uint32_t low = FIJI_THERMAL_MINIMUM_ALERT_TEMP *
319 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
320 uint32_t high = FIJI_THERMAL_MAXIMUM_ALERT_TEMP *
321 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
322
323 if (low < low_temp)
324 low = low_temp;
325 if (high > high_temp)
326 high = high_temp;
327
328 if (low > high)
329 return -EINVAL;
330
331 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
332 CG_THERMAL_INT, DIG_THERM_INTH,
333 (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
334 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
335 CG_THERMAL_INT, DIG_THERM_INTL,
336 (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
337 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
338 CG_THERMAL_CTRL, DIG_THERM_DPM,
339 (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
340
341 return 0;
342}
343
344/**
345* Programs thermal controller one-time setting registers
346*
347* @param hwmgr The address of the hardware manager.
348*/
349static int fiji_thermal_initialize(struct pp_hwmgr *hwmgr)
350{
351 if (hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution)
352 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
353 CG_TACH_CTRL, EDGE_PER_REV,
354 hwmgr->thermal_controller.fanInfo.
355 ucTachometerPulsesPerRevolution - 1);
356
357 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
358 CG_FDO_CTRL2, TACH_PWM_RESP_RATE, 0x28);
359
360 return 0;
361}
362
363/**
364* Enable thermal alerts on the RV770 thermal controller.
365*
366* @param hwmgr The address of the hardware manager.
367*/
368static int fiji_thermal_enable_alert(struct pp_hwmgr *hwmgr)
369{
370 uint32_t alert;
371
372 alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
373 CG_THERMAL_INT, THERM_INT_MASK);
374 alert &= ~(FIJI_THERMAL_HIGH_ALERT_MASK | FIJI_THERMAL_LOW_ALERT_MASK);
375 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
376 CG_THERMAL_INT, THERM_INT_MASK, alert);
377
378 /* send message to SMU to enable internal thermal interrupts */
379 return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Enable);
380}
381
382/**
383* Disable thermal alerts on the RV770 thermal controller.
384* @param hwmgr The address of the hardware manager.
385*/
386static int fiji_thermal_disable_alert(struct pp_hwmgr *hwmgr)
387{
388 uint32_t alert;
389
390 alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
391 CG_THERMAL_INT, THERM_INT_MASK);
392 alert |= (FIJI_THERMAL_HIGH_ALERT_MASK | FIJI_THERMAL_LOW_ALERT_MASK);
393 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
394 CG_THERMAL_INT, THERM_INT_MASK, alert);
395
396 /* send message to SMU to disable internal thermal interrupts */
397 return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Disable);
398}
399
400/**
401* Uninitialize the thermal controller.
402* Currently just disables alerts.
403* @param hwmgr The address of the hardware manager.
404*/
405int fiji_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr)
406{
407 int result = fiji_thermal_disable_alert(hwmgr);
408
409 if (hwmgr->thermal_controller.fanInfo.bNoFan)
410 fiji_fan_ctrl_set_default_mode(hwmgr);
411
412 return result;
413}
414
415/**
416* Set up the fan table to control the fan using the SMC.
417* @param hwmgr the address of the powerplay hardware manager.
418* @param pInput the pointer to input data
419* @param pOutput the pointer to output data
420* @param pStorage the pointer to temporary storage
421* @param Result the last failure code
422* @return result from set temperature range routine
423*/
424static int tf_fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
425 void *input, void *output, void *storage, int result)
426{
427 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
428 SMU73_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
429 uint32_t duty100;
430 uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
431 uint16_t fdo_min, slope1, slope2;
432 uint32_t reference_clock;
433 int res;
434 uint64_t tmp64;
435
436 if (data->fan_table_start == 0) {
437 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
438 PHM_PlatformCaps_MicrocodeFanControl);
439 return 0;
440 }
441
442 duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
443 CG_FDO_CTRL1, FMAX_DUTY100);
444
445 if (duty100 == 0) {
446 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
447 PHM_PlatformCaps_MicrocodeFanControl);
448 return 0;
449 }
450
451 tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.
452 usPWMMin * duty100;
453 do_div(tmp64, 10000);
454 fdo_min = (uint16_t)tmp64;
455
456 t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed -
457 hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
458 t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh -
459 hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
460
461 pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed -
462 hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
463 pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh -
464 hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
465
466 slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
467 slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
468
469 fan_table.TempMin = cpu_to_be16((50 + hwmgr->
470 thermal_controller.advanceFanControlParameters.usTMin) / 100);
471 fan_table.TempMed = cpu_to_be16((50 + hwmgr->
472 thermal_controller.advanceFanControlParameters.usTMed) / 100);
473 fan_table.TempMax = cpu_to_be16((50 + hwmgr->
474 thermal_controller.advanceFanControlParameters.usTMax) / 100);
475
476 fan_table.Slope1 = cpu_to_be16(slope1);
477 fan_table.Slope2 = cpu_to_be16(slope2);
478
479 fan_table.FdoMin = cpu_to_be16(fdo_min);
480
481 fan_table.HystDown = cpu_to_be16(hwmgr->
482 thermal_controller.advanceFanControlParameters.ucTHyst);
483
484 fan_table.HystUp = cpu_to_be16(1);
485
486 fan_table.HystSlope = cpu_to_be16(1);
487
488 fan_table.TempRespLim = cpu_to_be16(5);
489
490 reference_clock = tonga_get_xclk(hwmgr);
491
492 fan_table.RefreshPeriod = cpu_to_be32((hwmgr->
493 thermal_controller.advanceFanControlParameters.ulCycleDelay *
494 reference_clock) / 1600);
495
496 fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
497
498 fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(
499 hwmgr->device, CGS_IND_REG__SMC,
500 CG_MULT_THERMAL_CTRL, TEMP_SEL);
501
502 res = fiji_copy_bytes_to_smc(hwmgr->smumgr, data->fan_table_start,
503 (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table),
504 data->sram_end);
505
506 if (!res && hwmgr->thermal_controller.
507 advanceFanControlParameters.ucMinimumPWMLimit)
508 res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
509 PPSMC_MSG_SetFanMinPwm,
510 hwmgr->thermal_controller.
511 advanceFanControlParameters.ucMinimumPWMLimit);
512
513 if (!res && hwmgr->thermal_controller.
514 advanceFanControlParameters.ulMinFanSCLKAcousticLimit)
515 res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
516 PPSMC_MSG_SetFanSclkTarget,
517 hwmgr->thermal_controller.
518 advanceFanControlParameters.ulMinFanSCLKAcousticLimit);
519
520 if (res)
521 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
522 PHM_PlatformCaps_MicrocodeFanControl);
523
524 return 0;
525}
526
527/**
528* Start the fan control on the SMC.
529* @param hwmgr the address of the powerplay hardware manager.
530* @param pInput the pointer to input data
531* @param pOutput the pointer to output data
532* @param pStorage the pointer to temporary storage
533* @param Result the last failure code
534* @return result from set temperature range routine
535*/
536static int tf_fiji_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr,
537 void *input, void *output, void *storage, int result)
538{
539/* If the fantable setup has failed we could have disabled
540 * PHM_PlatformCaps_MicrocodeFanControl even after
541 * this function was included in the table.
542 * Make sure that we still think controlling the fan is OK.
543*/
544 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
545 PHM_PlatformCaps_MicrocodeFanControl)) {
546 fiji_fan_ctrl_start_smc_fan_control(hwmgr);
547 fiji_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
548 }
549
550 return 0;
551}
552
553/**
554* Set temperature range for high and low alerts
555* @param hwmgr the address of the powerplay hardware manager.
556* @param pInput the pointer to input data
557* @param pOutput the pointer to output data
558* @param pStorage the pointer to temporary storage
559* @param Result the last failure code
560* @return result from set temperature range routine
561*/
562int tf_fiji_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
563 void *input, void *output, void *storage, int result)
564{
565 struct PP_TemperatureRange *range = (struct PP_TemperatureRange *)input;
566
567 if (range == NULL)
568 return -EINVAL;
569
570 return fiji_thermal_set_temperature_range(hwmgr, range->min, range->max);
571}
572
573/**
574* Programs one-time setting registers
575* @param hwmgr the address of the powerplay hardware manager.
576* @param pInput the pointer to input data
577* @param pOutput the pointer to output data
578* @param pStorage the pointer to temporary storage
579* @param Result the last failure code
580* @return result from initialize thermal controller routine
581*/
582int tf_fiji_thermal_initialize(struct pp_hwmgr *hwmgr,
583 void *input, void *output, void *storage, int result)
584{
585 return fiji_thermal_initialize(hwmgr);
586}
587
588/**
589* Enable high and low alerts
590* @param hwmgr the address of the powerplay hardware manager.
591* @param pInput the pointer to input data
592* @param pOutput the pointer to output data
593* @param pStorage the pointer to temporary storage
594* @param Result the last failure code
595* @return result from enable alert routine
596*/
597int tf_fiji_thermal_enable_alert(struct pp_hwmgr *hwmgr,
598 void *input, void *output, void *storage, int result)
599{
600 return fiji_thermal_enable_alert(hwmgr);
601}
602
603/**
604* Disable high and low alerts
605* @param hwmgr the address of the powerplay hardware manager.
606* @param pInput the pointer to input data
607* @param pOutput the pointer to output data
608* @param pStorage the pointer to temporary storage
609* @param Result the last failure code
610* @return result from disable alert routine
611*/
612static int tf_fiji_thermal_disable_alert(struct pp_hwmgr *hwmgr,
613 void *input, void *output, void *storage, int result)
614{
615 return fiji_thermal_disable_alert(hwmgr);
616}
617
618static const struct phm_master_table_item
619fiji_thermal_start_thermal_controller_master_list[] = {
620 {NULL, tf_fiji_thermal_initialize},
621 {NULL, tf_fiji_thermal_set_temperature_range},
622 {NULL, tf_fiji_thermal_enable_alert},
623/* We should restrict performance levels to low before we halt the SMC.
624 * On the other hand we are still in boot state when we do this
625 * so it would be pointless.
626 * If this assumption changes we have to revisit this table.
627 */
628 {NULL, tf_fiji_thermal_setup_fan_table},
629 {NULL, tf_fiji_thermal_start_smc_fan_control},
630 {NULL, NULL}
631};
632
633static const struct phm_master_table_header
634fiji_thermal_start_thermal_controller_master = {
635 0,
636 PHM_MasterTableFlag_None,
637 fiji_thermal_start_thermal_controller_master_list
638};
639
640static const struct phm_master_table_item
641fiji_thermal_set_temperature_range_master_list[] = {
642 {NULL, tf_fiji_thermal_disable_alert},
643 {NULL, tf_fiji_thermal_set_temperature_range},
644 {NULL, tf_fiji_thermal_enable_alert},
645 {NULL, NULL}
646};
647
648static const struct phm_master_table_header
649fiji_thermal_set_temperature_range_master = {
650 0,
651 PHM_MasterTableFlag_None,
652 fiji_thermal_set_temperature_range_master_list
653};
654
655int fiji_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr)
656{
657 if (!hwmgr->thermal_controller.fanInfo.bNoFan)
658 fiji_fan_ctrl_set_default_mode(hwmgr);
659 return 0;
660}
661
662/**
663* Initializes the thermal controller related functions in the Hardware Manager structure.
664* @param hwmgr The address of the hardware manager.
665* @exception Any error code from the low-level communication.
666*/
667int pp_fiji_thermal_initialize(struct pp_hwmgr *hwmgr)
668{
669 int result;
670
671 result = phm_construct_table(hwmgr,
672 &fiji_thermal_set_temperature_range_master,
673 &(hwmgr->set_temperature_range));
674
675 if (!result) {
676 result = phm_construct_table(hwmgr,
677 &fiji_thermal_start_thermal_controller_master,
678 &(hwmgr->start_thermal_controller));
679 if (result)
680 phm_destroy_table(hwmgr, &(hwmgr->set_temperature_range));
681 }
682
683 if (!result)
684 hwmgr->fan_ctrl_is_in_default_mode = true;
685 return result;
686}
687
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.h
deleted file mode 100644
index 8621493b8574..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.h
+++ /dev/null
@@ -1,62 +0,0 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef FIJI_THERMAL_H
25#define FIJI_THERMAL_H
26
27#include "hwmgr.h"
28
29#define FIJI_THERMAL_HIGH_ALERT_MASK 0x1
30#define FIJI_THERMAL_LOW_ALERT_MASK 0x2
31
32#define FIJI_THERMAL_MINIMUM_TEMP_READING -256
33#define FIJI_THERMAL_MAXIMUM_TEMP_READING 255
34
35#define FIJI_THERMAL_MINIMUM_ALERT_TEMP 0
36#define FIJI_THERMAL_MAXIMUM_ALERT_TEMP 255
37
38#define FDO_PWM_MODE_STATIC 1
39#define FDO_PWM_MODE_STATIC_RPM 5
40
41
42extern int tf_fiji_thermal_initialize(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
43extern int tf_fiji_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
44extern int tf_fiji_thermal_enable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
45
46extern int fiji_thermal_get_temperature(struct pp_hwmgr *hwmgr);
47extern int fiji_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr);
48extern int fiji_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info);
49extern int fiji_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed);
50extern int fiji_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr);
51extern int fiji_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode);
52extern int fiji_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed);
53extern int fiji_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr);
54extern int pp_fiji_thermal_initialize(struct pp_hwmgr *hwmgr);
55extern int fiji_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr);
56extern int fiji_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed);
57extern int fiji_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed);
58extern int fiji_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr);
59extern uint32_t tonga_get_xclk(struct pp_hwmgr *hwmgr);
60
61#endif
62
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_clockpowergating.c
deleted file mode 100644
index 47949f5cd073..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_clockpowergating.c
+++ /dev/null
@@ -1,119 +0,0 @@
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Author: Huang Rui <ray.huang@amd.com>
23 *
24 */
25
26#include "hwmgr.h"
27#include "iceland_clockpowergating.h"
28#include "ppsmc.h"
29#include "iceland_hwmgr.h"
30
31int iceland_phm_powerdown_uvd(struct pp_hwmgr *hwmgr)
32{
33 /* iceland does not have MM hardware block */
34 return 0;
35}
36
37static int iceland_phm_powerup_uvd(struct pp_hwmgr *hwmgr)
38{
39 /* iceland does not have MM hardware block */
40 return 0;
41}
42
43static int iceland_phm_powerdown_vce(struct pp_hwmgr *hwmgr)
44{
45 /* iceland does not have MM hardware block */
46 return 0;
47}
48
49static int iceland_phm_powerup_vce(struct pp_hwmgr *hwmgr)
50{
51 /* iceland does not have MM hardware block */
52 return 0;
53}
54
55int iceland_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum
56 PHM_AsicBlock block, enum PHM_ClockGateSetting gating)
57{
58 int ret = 0;
59
60 switch (block) {
61 case PHM_AsicBlock_UVD_MVC:
62 case PHM_AsicBlock_UVD:
63 case PHM_AsicBlock_UVD_HD:
64 case PHM_AsicBlock_UVD_SD:
65 if (gating == PHM_ClockGateSetting_StaticOff)
66 ret = iceland_phm_powerdown_uvd(hwmgr);
67 else
68 ret = iceland_phm_powerup_uvd(hwmgr);
69 break;
70 case PHM_AsicBlock_GFX:
71 default:
72 break;
73 }
74
75 return ret;
76}
77
78int iceland_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr)
79{
80 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
81
82 data->uvd_power_gated = false;
83 data->vce_power_gated = false;
84
85 iceland_phm_powerup_uvd(hwmgr);
86 iceland_phm_powerup_vce(hwmgr);
87
88 return 0;
89}
90
91int iceland_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
92{
93 if (bgate) {
94 iceland_update_uvd_dpm(hwmgr, true);
95 iceland_phm_powerdown_uvd(hwmgr);
96 } else {
97 iceland_phm_powerup_uvd(hwmgr);
98 iceland_update_uvd_dpm(hwmgr, false);
99 }
100
101 return 0;
102}
103
104int iceland_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
105{
106 if (bgate)
107 return iceland_phm_powerdown_vce(hwmgr);
108 else
109 return iceland_phm_powerup_vce(hwmgr);
110
111 return 0;
112}
113
114int iceland_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
115 const uint32_t *msg_id)
116{
117 /* iceland does not have MM hardware block */
118 return 0;
119}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_clockpowergating.h
deleted file mode 100644
index ff5ef00c7c68..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_clockpowergating.h
+++ /dev/null
@@ -1,38 +0,0 @@
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Author: Huang Rui <ray.huang@amd.com>
23 *
24 */
25
26#ifndef _ICELAND_CLOCK_POWER_GATING_H_
27#define _ICELAND_CLOCK_POWER_GATING_H_
28
29#include "iceland_hwmgr.h"
30#include "pp_asicblocks.h"
31
32extern int iceland_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum PHM_AsicBlock block, enum PHM_ClockGateSetting gating);
33extern int iceland_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
34extern int iceland_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
35extern int iceland_phm_powerdown_uvd(struct pp_hwmgr *hwmgr);
36extern int iceland_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr);
37extern int iceland_phm_update_clock_gatings(struct pp_hwmgr *hwmgr, const uint32_t *msg_id);
38#endif /* _ICELAND_CLOCK_POWER_GATING_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_dyn_defaults.h b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_dyn_defaults.h
deleted file mode 100644
index a7b4bc6caea2..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_dyn_defaults.h
+++ /dev/null
@@ -1,41 +0,0 @@
1#ifndef ICELAND_DYN_DEFAULTS_H
2#define ICELAND_DYN_DEFAULTS_H
3
4enum ICELANDdpm_TrendDetection
5{
6 ICELANDdpm_TrendDetection_AUTO,
7 ICELANDdpm_TrendDetection_UP,
8 ICELANDdpm_TrendDetection_DOWN
9};
10typedef enum ICELANDdpm_TrendDetection ICELANDdpm_TrendDetection;
11
12
13#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT0 0x3FFFC102
14#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT1 0x000400
15#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT2 0xC00080
16#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT3 0xC00200
17#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT4 0xC01680
18#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT5 0xC00033
19#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT6 0xC00033
20#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT7 0x3FFFC000
21
22
23#define PPICELAND_THERMALPROTECTCOUNTER_DFLT 0x200
24
25#define PPICELAND_STATICSCREENTHRESHOLDUNIT_DFLT 0
26
27#define PPICELAND_STATICSCREENTHRESHOLD_DFLT 0x00C8
28
29#define PPICELAND_GFXIDLECLOCKSTOPTHRESHOLD_DFLT 0x200
30
31#define PPICELAND_REFERENCEDIVIDER_DFLT 4
32
33#define PPICELAND_ULVVOLTAGECHANGEDELAY_DFLT 1687
34
35#define PPICELAND_CGULVPARAMETER_DFLT 0x00040035
36#define PPICELAND_CGULVCONTROL_DFLT 0x00007450
37#define PPICELAND_TARGETACTIVITY_DFLT 30
38#define PPICELAND_MCLK_TARGETACTIVITY_DFLT 10
39
40#endif
41
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c
deleted file mode 100644
index 50aa23f15540..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c
+++ /dev/null
@@ -1,5666 +0,0 @@
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Author: Huang Rui <ray.huang@amd.com>
23 *
24 */
25#include <linux/module.h>
26#include <linux/slab.h>
27#include <linux/fb.h>
28#include "linux/delay.h"
29#include "pp_acpi.h"
30#include "hwmgr.h"
31#include <atombios.h>
32#include "iceland_hwmgr.h"
33#include "pptable.h"
34#include "processpptables.h"
35#include "pp_debug.h"
36#include "ppsmc.h"
37#include "cgs_common.h"
38#include "pppcielanes.h"
39#include "iceland_dyn_defaults.h"
40#include "smumgr.h"
41#include "iceland_smumgr.h"
42#include "iceland_clockpowergating.h"
43#include "iceland_thermal.h"
44#include "iceland_powertune.h"
45
46#include "gmc/gmc_8_1_d.h"
47#include "gmc/gmc_8_1_sh_mask.h"
48
49#include "bif/bif_5_0_d.h"
50#include "bif/bif_5_0_sh_mask.h"
51
52#include "smu/smu_7_1_1_d.h"
53#include "smu/smu_7_1_1_sh_mask.h"
54
55#include "cgs_linux.h"
56#include "eventmgr.h"
57#include "amd_pcie_helpers.h"
58
59#define MC_CG_ARB_FREQ_F0 0x0a
60#define MC_CG_ARB_FREQ_F1 0x0b
61#define MC_CG_ARB_FREQ_F2 0x0c
62#define MC_CG_ARB_FREQ_F3 0x0d
63
64#define MC_CG_SEQ_DRAMCONF_S0 0x05
65#define MC_CG_SEQ_DRAMCONF_S1 0x06
66#define MC_CG_SEQ_YCLK_SUSPEND 0x04
67#define MC_CG_SEQ_YCLK_RESUME 0x0a
68
69#define PCIE_BUS_CLK 10000
70#define TCLK (PCIE_BUS_CLK / 10)
71
72#define SMC_RAM_END 0x40000
73#define SMC_CG_IND_START 0xc0030000
74#define SMC_CG_IND_END 0xc0040000 /* First byte after SMC_CG_IND*/
75
76#define VOLTAGE_SCALE 4
77#define VOLTAGE_VID_OFFSET_SCALE1 625
78#define VOLTAGE_VID_OFFSET_SCALE2 100
79
80const uint32_t iceland_magic = (uint32_t)(PHM_VIslands_Magic);
81
82#define MC_SEQ_MISC0_GDDR5_SHIFT 28
83#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000
84#define MC_SEQ_MISC0_GDDR5_VALUE 5
85
86/** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */
87enum DPM_EVENT_SRC {
88 DPM_EVENT_SRC_ANALOG = 0, /* Internal analog trip point */
89 DPM_EVENT_SRC_EXTERNAL = 1, /* External (GPIO 17) signal */
90 DPM_EVENT_SRC_DIGITAL = 2, /* Internal digital trip point (DIG_THERM_DPM) */
91 DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3, /* Internal analog or external */
92 DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4 /* Internal digital or external */
93};
94
95static int iceland_read_clock_registers(struct pp_hwmgr *hwmgr)
96{
97 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
98
99 data->clock_registers.vCG_SPLL_FUNC_CNTL =
100 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL);
101 data->clock_registers.vCG_SPLL_FUNC_CNTL_2 =
102 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_2);
103 data->clock_registers.vCG_SPLL_FUNC_CNTL_3 =
104 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_3);
105 data->clock_registers.vCG_SPLL_FUNC_CNTL_4 =
106 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_4);
107 data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM =
108 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM);
109 data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2 =
110 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM_2);
111 data->clock_registers.vDLL_CNTL =
112 cgs_read_register(hwmgr->device, mmDLL_CNTL);
113 data->clock_registers.vMCLK_PWRMGT_CNTL =
114 cgs_read_register(hwmgr->device, mmMCLK_PWRMGT_CNTL);
115 data->clock_registers.vMPLL_AD_FUNC_CNTL =
116 cgs_read_register(hwmgr->device, mmMPLL_AD_FUNC_CNTL);
117 data->clock_registers.vMPLL_DQ_FUNC_CNTL =
118 cgs_read_register(hwmgr->device, mmMPLL_DQ_FUNC_CNTL);
119 data->clock_registers.vMPLL_FUNC_CNTL =
120 cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL);
121 data->clock_registers.vMPLL_FUNC_CNTL_1 =
122 cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_1);
123 data->clock_registers.vMPLL_FUNC_CNTL_2 =
124 cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_2);
125 data->clock_registers.vMPLL_SS1 =
126 cgs_read_register(hwmgr->device, mmMPLL_SS1);
127 data->clock_registers.vMPLL_SS2 =
128 cgs_read_register(hwmgr->device, mmMPLL_SS2);
129
130 return 0;
131}
132
133/**
134 * Find out if memory is GDDR5.
135 *
136 * @param hwmgr the address of the powerplay hardware manager.
137 * @return always 0
138 */
139int iceland_get_memory_type(struct pp_hwmgr *hwmgr)
140{
141 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
142 uint32_t temp;
143
144 temp = cgs_read_register(hwmgr->device, mmMC_SEQ_MISC0);
145
146 data->is_memory_GDDR5 = (MC_SEQ_MISC0_GDDR5_VALUE ==
147 ((temp & MC_SEQ_MISC0_GDDR5_MASK) >>
148 MC_SEQ_MISC0_GDDR5_SHIFT));
149
150 return 0;
151}
152
153int iceland_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
154{
155 /* iceland does not have MM hardware blocks */
156 return 0;
157}
158
159/**
160 * Enables Dynamic Power Management by SMC
161 *
162 * @param hwmgr the address of the powerplay hardware manager.
163 * @return always 0
164 */
165int iceland_enable_acpi_power_management(struct pp_hwmgr *hwmgr)
166{
167 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, STATIC_PM_EN, 1);
168
169 return 0;
170}
171
172/**
173 * Find the MC microcode version and store it in the HwMgr struct
174 *
175 * @param hwmgr the address of the powerplay hardware manager.
176 * @return always 0
177 */
178int iceland_get_mc_microcode_version(struct pp_hwmgr *hwmgr)
179{
180 cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
181
182 hwmgr->microcode_version_info.MC = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);
183
184 return 0;
185}
186
187static int iceland_init_sclk_threshold(struct pp_hwmgr *hwmgr)
188{
189 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
190
191 data->low_sclk_interrupt_threshold = 0;
192
193 return 0;
194}
195
196
197static int iceland_setup_asic_task(struct pp_hwmgr *hwmgr)
198{
199 int tmp_result, result = 0;
200
201 tmp_result = iceland_read_clock_registers(hwmgr);
202 PP_ASSERT_WITH_CODE((0 == tmp_result),
203 "Failed to read clock registers!", result = tmp_result);
204
205 tmp_result = iceland_get_memory_type(hwmgr);
206 PP_ASSERT_WITH_CODE((0 == tmp_result),
207 "Failed to get memory type!", result = tmp_result);
208
209 tmp_result = iceland_enable_acpi_power_management(hwmgr);
210 PP_ASSERT_WITH_CODE((0 == tmp_result),
211 "Failed to enable ACPI power management!", result = tmp_result);
212
213 tmp_result = iceland_get_mc_microcode_version(hwmgr);
214 PP_ASSERT_WITH_CODE((0 == tmp_result),
215 "Failed to get MC microcode version!", result = tmp_result);
216
217 tmp_result = iceland_init_sclk_threshold(hwmgr);
218 PP_ASSERT_WITH_CODE((0 == tmp_result),
219 "Failed to init sclk threshold!", result = tmp_result);
220
221 return result;
222}
223
224static bool cf_iceland_voltage_control(struct pp_hwmgr *hwmgr)
225{
226 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
227
228 return ICELAND_VOLTAGE_CONTROL_NONE != data->voltage_control;
229}
230
231/*
232 * -------------- Voltage Tables ----------------------
233 * If the voltage table would be bigger than what will fit into the
234 * state table on the SMC keep only the higher entries.
235 */
236
237static void iceland_trim_voltage_table_to_fit_state_table(
238 struct pp_hwmgr *hwmgr,
239 uint32_t max_voltage_steps,
240 pp_atomctrl_voltage_table *voltage_table)
241{
242 unsigned int i, diff;
243
244 if (voltage_table->count <= max_voltage_steps) {
245 return;
246 }
247
248 diff = voltage_table->count - max_voltage_steps;
249
250 for (i = 0; i < max_voltage_steps; i++) {
251 voltage_table->entries[i] = voltage_table->entries[i + diff];
252 }
253
254 voltage_table->count = max_voltage_steps;
255
256 return;
257}
258
259/**
260 * Enable voltage control
261 *
262 * @param hwmgr the address of the powerplay hardware manager.
263 * @return always 0
264 */
265int iceland_enable_voltage_control(struct pp_hwmgr *hwmgr)
266{
267 /* enable voltage control */
268 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1);
269
270 return 0;
271}
272
273static int iceland_get_svi2_voltage_table(struct pp_hwmgr *hwmgr,
274 struct phm_clock_voltage_dependency_table *voltage_dependency_table,
275 pp_atomctrl_voltage_table *voltage_table)
276{
277 uint32_t i;
278
279 PP_ASSERT_WITH_CODE((NULL != voltage_table),
280 "Voltage Dependency Table empty.", return -EINVAL;);
281
282 voltage_table->mask_low = 0;
283 voltage_table->phase_delay = 0;
284 voltage_table->count = voltage_dependency_table->count;
285
286 for (i = 0; i < voltage_dependency_table->count; i++) {
287 voltage_table->entries[i].value =
288 voltage_dependency_table->entries[i].v;
289 voltage_table->entries[i].smio_low = 0;
290 }
291
292 return 0;
293}
294
295/**
296 * Create Voltage Tables.
297 *
298 * @param hwmgr the address of the powerplay hardware manager.
299 * @return always 0
300 */
301int iceland_construct_voltage_tables(struct pp_hwmgr *hwmgr)
302{
303 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
304 int result;
305
306 /* GPIO voltage */
307 if (ICELAND_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control) {
308 result = atomctrl_get_voltage_table_v3(hwmgr,
309 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT,
310 &data->vddc_voltage_table);
311 PP_ASSERT_WITH_CODE((0 == result),
312 "Failed to retrieve VDDC table.", return result;);
313 } else if (ICELAND_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
314 /* SVI2 VDDC voltage */
315 result = iceland_get_svi2_voltage_table(hwmgr,
316 hwmgr->dyn_state.vddc_dependency_on_mclk,
317 &data->vddc_voltage_table);
318 PP_ASSERT_WITH_CODE((0 == result),
319 "Failed to retrieve SVI2 VDDC table from dependancy table.", return result;);
320 }
321
322 PP_ASSERT_WITH_CODE(
323 (data->vddc_voltage_table.count <= (SMU71_MAX_LEVELS_VDDC)),
324 "Too many voltage values for VDDC. Trimming to fit state table.",
325 iceland_trim_voltage_table_to_fit_state_table(hwmgr,
326 SMU71_MAX_LEVELS_VDDC, &(data->vddc_voltage_table));
327 );
328
329 /* GPIO */
330 if (ICELAND_VOLTAGE_CONTROL_BY_GPIO == data->vdd_ci_control) {
331 result = atomctrl_get_voltage_table_v3(hwmgr,
332 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT, &(data->vddci_voltage_table));
333 PP_ASSERT_WITH_CODE((0 == result),
334 "Failed to retrieve VDDCI table.", return result;);
335 }
336
337 /* SVI2 VDDCI voltage */
338 if (ICELAND_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_ci_control) {
339 result = iceland_get_svi2_voltage_table(hwmgr,
340 hwmgr->dyn_state.vddci_dependency_on_mclk,
341 &data->vddci_voltage_table);
342 PP_ASSERT_WITH_CODE((0 == result),
343 "Failed to retrieve SVI2 VDDCI table from dependancy table.", return result;);
344 }
345
346 PP_ASSERT_WITH_CODE(
347 (data->vddci_voltage_table.count <= (SMU71_MAX_LEVELS_VDDCI)),
348 "Too many voltage values for VDDCI. Trimming to fit state table.",
349 iceland_trim_voltage_table_to_fit_state_table(hwmgr,
350 SMU71_MAX_LEVELS_VDDCI, &(data->vddci_voltage_table));
351 );
352
353
354 /* GPIO */
355 if (ICELAND_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
356 result = atomctrl_get_voltage_table_v3(hwmgr,
357 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT, &(data->mvdd_voltage_table));
358 PP_ASSERT_WITH_CODE((0 == result),
359 "Failed to retrieve table.", return result;);
360 }
361
362 /* SVI2 voltage control */
363 if (ICELAND_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
364 result = iceland_get_svi2_voltage_table(hwmgr,
365 hwmgr->dyn_state.mvdd_dependency_on_mclk,
366 &data->mvdd_voltage_table);
367 PP_ASSERT_WITH_CODE((0 == result),
368 "Failed to retrieve SVI2 MVDD table from dependancy table.", return result;);
369 }
370
371 PP_ASSERT_WITH_CODE(
372 (data->mvdd_voltage_table.count <= (SMU71_MAX_LEVELS_MVDD)),
373 "Too many voltage values for MVDD. Trimming to fit state table.",
374 iceland_trim_voltage_table_to_fit_state_table(hwmgr,
375 SMU71_MAX_LEVELS_MVDD, &(data->mvdd_voltage_table));
376 );
377
378 return 0;
379}
380
381/*---------------------------MC----------------------------*/
382
383uint8_t iceland_get_memory_module_index(struct pp_hwmgr *hwmgr)
384{
385 return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16));
386}
387
388bool iceland_check_s0_mc_reg_index(uint16_t inReg, uint16_t *outReg)
389{
390 bool result = true;
391
392 switch (inReg) {
393 case mmMC_SEQ_RAS_TIMING:
394 *outReg = mmMC_SEQ_RAS_TIMING_LP;
395 break;
396
397 case mmMC_SEQ_DLL_STBY:
398 *outReg = mmMC_SEQ_DLL_STBY_LP;
399 break;
400
401 case mmMC_SEQ_G5PDX_CMD0:
402 *outReg = mmMC_SEQ_G5PDX_CMD0_LP;
403 break;
404
405 case mmMC_SEQ_G5PDX_CMD1:
406 *outReg = mmMC_SEQ_G5PDX_CMD1_LP;
407 break;
408
409 case mmMC_SEQ_G5PDX_CTRL:
410 *outReg = mmMC_SEQ_G5PDX_CTRL_LP;
411 break;
412
413 case mmMC_SEQ_CAS_TIMING:
414 *outReg = mmMC_SEQ_CAS_TIMING_LP;
415 break;
416
417 case mmMC_SEQ_MISC_TIMING:
418 *outReg = mmMC_SEQ_MISC_TIMING_LP;
419 break;
420
421 case mmMC_SEQ_MISC_TIMING2:
422 *outReg = mmMC_SEQ_MISC_TIMING2_LP;
423 break;
424
425 case mmMC_SEQ_PMG_DVS_CMD:
426 *outReg = mmMC_SEQ_PMG_DVS_CMD_LP;
427 break;
428
429 case mmMC_SEQ_PMG_DVS_CTL:
430 *outReg = mmMC_SEQ_PMG_DVS_CTL_LP;
431 break;
432
433 case mmMC_SEQ_RD_CTL_D0:
434 *outReg = mmMC_SEQ_RD_CTL_D0_LP;
435 break;
436
437 case mmMC_SEQ_RD_CTL_D1:
438 *outReg = mmMC_SEQ_RD_CTL_D1_LP;
439 break;
440
441 case mmMC_SEQ_WR_CTL_D0:
442 *outReg = mmMC_SEQ_WR_CTL_D0_LP;
443 break;
444
445 case mmMC_SEQ_WR_CTL_D1:
446 *outReg = mmMC_SEQ_WR_CTL_D1_LP;
447 break;
448
449 case mmMC_PMG_CMD_EMRS:
450 *outReg = mmMC_SEQ_PMG_CMD_EMRS_LP;
451 break;
452
453 case mmMC_PMG_CMD_MRS:
454 *outReg = mmMC_SEQ_PMG_CMD_MRS_LP;
455 break;
456
457 case mmMC_PMG_CMD_MRS1:
458 *outReg = mmMC_SEQ_PMG_CMD_MRS1_LP;
459 break;
460
461 case mmMC_SEQ_PMG_TIMING:
462 *outReg = mmMC_SEQ_PMG_TIMING_LP;
463 break;
464
465 case mmMC_PMG_CMD_MRS2:
466 *outReg = mmMC_SEQ_PMG_CMD_MRS2_LP;
467 break;
468
469 case mmMC_SEQ_WR_CTL_2:
470 *outReg = mmMC_SEQ_WR_CTL_2_LP;
471 break;
472
473 default:
474 result = false;
475 break;
476 }
477
478 return result;
479}
480
481int iceland_set_s0_mc_reg_index(phw_iceland_mc_reg_table *table)
482{
483 uint32_t i;
484 uint16_t address;
485
486 for (i = 0; i < table->last; i++) {
487 table->mc_reg_address[i].s0 =
488 iceland_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address)
489 ? address : table->mc_reg_address[i].s1;
490 }
491 return 0;
492}
493
494int iceland_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table, phw_iceland_mc_reg_table *ni_table)
495{
496 uint8_t i, j;
497
498 PP_ASSERT_WITH_CODE((table->last <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
499 "Invalid VramInfo table.", return -1);
500 PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES),
501 "Invalid VramInfo table.", return -1);
502
503 for (i = 0; i < table->last; i++) {
504 ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
505 }
506 ni_table->last = table->last;
507
508 for (i = 0; i < table->num_entries; i++) {
509 ni_table->mc_reg_table_entry[i].mclk_max =
510 table->mc_reg_table_entry[i].mclk_max;
511 for (j = 0; j < table->last; j++) {
512 ni_table->mc_reg_table_entry[i].mc_data[j] =
513 table->mc_reg_table_entry[i].mc_data[j];
514 }
515 }
516
517 ni_table->num_entries = table->num_entries;
518
519 return 0;
520}
521
522/**
523 * VBIOS omits some information to reduce size, we need to recover them here.
524 * 1. when we see mmMC_SEQ_MISC1, bit[31:16] EMRS1, need to be write to mmMC_PMG_CMD_EMRS /_LP[15:0].
525 * Bit[15:0] MRS, need to be update mmMC_PMG_CMD_MRS/_LP[15:0]
526 * 2. when we see mmMC_SEQ_RESERVE_M, bit[15:0] EMRS2, need to be write to mmMC_PMG_CMD_MRS1/_LP[15:0].
527 * 3. need to set these data for each clock range
528 *
529 * @param hwmgr the address of the powerplay hardware manager.
530 * @param table the address of MCRegTable
531 * @return always 0
532 */
533static int iceland_set_mc_special_registers(struct pp_hwmgr *hwmgr, phw_iceland_mc_reg_table *table)
534{
535 uint8_t i, j, k;
536 uint32_t temp_reg;
537 const iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
538
539 for (i = 0, j = table->last; i < table->last; i++) {
540 PP_ASSERT_WITH_CODE((j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
541 "Invalid VramInfo table.", return -1);
542 switch (table->mc_reg_address[i].s1) {
543 /*
544 * mmMC_SEQ_MISC1, bit[31:16] EMRS1, need to be write
545 * to mmMC_PMG_CMD_EMRS/_LP[15:0]. Bit[15:0] MRS, need
546 * to be update mmMC_PMG_CMD_MRS/_LP[15:0]
547 */
548 case mmMC_SEQ_MISC1:
549 temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS);
550 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
551 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
552 for (k = 0; k < table->num_entries; k++) {
553 table->mc_reg_table_entry[k].mc_data[j] =
554 ((temp_reg & 0xffff0000)) |
555 ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
556 }
557 j++;
558 PP_ASSERT_WITH_CODE((j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
559 "Invalid VramInfo table.", return -1);
560
561 temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS);
562 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
563 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
564 for (k = 0; k < table->num_entries; k++) {
565 table->mc_reg_table_entry[k].mc_data[j] =
566 (temp_reg & 0xffff0000) |
567 (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
568
569 if (!data->is_memory_GDDR5) {
570 table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
571 }
572 }
573 j++;
574 PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
575 "Invalid VramInfo table.", return -1);
576
577 if (!data->is_memory_GDDR5) {
578 table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
579 table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
580 for (k = 0; k < table->num_entries; k++) {
581 table->mc_reg_table_entry[k].mc_data[j] =
582 (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
583 }
584 j++;
585 PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
586 "Invalid VramInfo table.", return -1);
587 }
588
589 break;
590
591 case mmMC_SEQ_RESERVE_M:
592 temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1);
593 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
594 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
595 for (k = 0; k < table->num_entries; k++) {
596 table->mc_reg_table_entry[k].mc_data[j] =
597 (temp_reg & 0xffff0000) |
598 (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
599 }
600 j++;
601 PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
602 "Invalid VramInfo table.", return -1);
603 break;
604
605 default:
606 break;
607 }
608
609 }
610
611 table->last = j;
612
613 return 0;
614}
615
616
617static int iceland_set_valid_flag(phw_iceland_mc_reg_table *table)
618{
619 uint8_t i, j;
620 for (i = 0; i < table->last; i++) {
621 for (j = 1; j < table->num_entries; j++) {
622 if (table->mc_reg_table_entry[j-1].mc_data[i] !=
623 table->mc_reg_table_entry[j].mc_data[i]) {
624 table->validflag |= (1<<i);
625 break;
626 }
627 }
628 }
629
630 return 0;
631}
632
633static int iceland_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
634{
635 int result;
636 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
637 pp_atomctrl_mc_reg_table *table;
638 phw_iceland_mc_reg_table *ni_table = &data->iceland_mc_reg_table;
639 uint8_t module_index = iceland_get_memory_module_index(hwmgr);
640
641 table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL);
642
643 if (NULL == table)
644 return -ENOMEM;
645
646 /* Program additional LP registers that are no longer programmed by VBIOS */
647 cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
648 cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
649 cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY));
650 cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0));
651 cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1));
652 cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL));
653 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD));
654 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL));
655 cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING));
656 cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
657 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS));
658 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS));
659 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1));
660 cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0));
661 cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
662 cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
663 cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
664 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
665 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2));
666 cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2));
667
668 memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table));
669
670 result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table);
671
672 if (0 == result)
673 result = iceland_copy_vbios_smc_reg_table(table, ni_table);
674
675 if (0 == result) {
676 iceland_set_s0_mc_reg_index(ni_table);
677 result = iceland_set_mc_special_registers(hwmgr, ni_table);
678 }
679
680 if (0 == result)
681 iceland_set_valid_flag(ni_table);
682
683 kfree(table);
684 return result;
685}
686
687/**
688 * Programs static screed detection parameters
689 *
690 * @param hwmgr the address of the powerplay hardware manager.
691 * @return always 0
692 */
693int iceland_program_static_screen_threshold_parameters(struct pp_hwmgr *hwmgr)
694{
695 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
696
697 /* Set static screen threshold unit*/
698 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device,
699 CGS_IND_REG__SMC, CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT,
700 data->static_screen_threshold_unit);
701 /* Set static screen threshold*/
702 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device,
703 CGS_IND_REG__SMC, CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD,
704 data->static_screen_threshold);
705
706 return 0;
707}
708
709/**
710 * Setup display gap for glitch free memory clock switching.
711 *
712 * @param hwmgr the address of the powerplay hardware manager.
713 * @return always 0
714 */
715int iceland_enable_display_gap(struct pp_hwmgr *hwmgr)
716{
717 uint32_t display_gap = cgs_read_ind_register(hwmgr->device,
718 CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL);
719
720 display_gap = PHM_SET_FIELD(display_gap,
721 CG_DISPLAY_GAP_CNTL, DISP_GAP, DISPLAY_GAP_IGNORE);
722
723 display_gap = PHM_SET_FIELD(display_gap,
724 CG_DISPLAY_GAP_CNTL, DISP_GAP_MCHG, DISPLAY_GAP_VBLANK);
725
726 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
727 ixCG_DISPLAY_GAP_CNTL, display_gap);
728
729 return 0;
730}
731
732/**
733 * Programs activity state transition voting clients
734 *
735 * @param hwmgr the address of the powerplay hardware manager.
736 * @return always 0
737 */
738int iceland_program_voting_clients(struct pp_hwmgr *hwmgr)
739{
740 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
741
742 /* Clear reset for voting clients before enabling DPM */
743 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
744 SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0);
745 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
746 SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0);
747
748 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
749 ixCG_FREQ_TRAN_VOTING_0, data->voting_rights_clients0);
750 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
751 ixCG_FREQ_TRAN_VOTING_1, data->voting_rights_clients1);
752 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
753 ixCG_FREQ_TRAN_VOTING_2, data->voting_rights_clients2);
754 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
755 ixCG_FREQ_TRAN_VOTING_3, data->voting_rights_clients3);
756 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
757 ixCG_FREQ_TRAN_VOTING_4, data->voting_rights_clients4);
758 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
759 ixCG_FREQ_TRAN_VOTING_5, data->voting_rights_clients5);
760 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
761 ixCG_FREQ_TRAN_VOTING_6, data->voting_rights_clients6);
762 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
763 ixCG_FREQ_TRAN_VOTING_7, data->voting_rights_clients7);
764
765 return 0;
766}
767
768static int iceland_upload_firmware(struct pp_hwmgr *hwmgr)
769{
770 return 0;
771}
772
773/**
774 * Get the location of various tables inside the FW image.
775 *
776 * @param hwmgr the address of the powerplay hardware manager.
777 * @return always 0
778 */
779static int iceland_process_firmware_header(struct pp_hwmgr *hwmgr)
780{
781 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
782
783 uint32_t tmp;
784 int result;
785 bool error = 0;
786
787 result = smu7_read_smc_sram_dword(hwmgr->smumgr,
788 SMU71_FIRMWARE_HEADER_LOCATION +
789 offsetof(SMU71_Firmware_Header, DpmTable),
790 &tmp, data->sram_end);
791
792 if (0 == result) {
793 data->dpm_table_start = tmp;
794 }
795
796 error |= (0 != result);
797
798 result = smu7_read_smc_sram_dword(hwmgr->smumgr,
799 SMU71_FIRMWARE_HEADER_LOCATION +
800 offsetof(SMU71_Firmware_Header, SoftRegisters),
801 &tmp, data->sram_end);
802
803 if (0 == result) {
804 data->soft_regs_start = tmp;
805 }
806
807 error |= (0 != result);
808
809
810 result = smu7_read_smc_sram_dword(hwmgr->smumgr,
811 SMU71_FIRMWARE_HEADER_LOCATION +
812 offsetof(SMU71_Firmware_Header, mcRegisterTable),
813 &tmp, data->sram_end);
814
815 if (0 == result) {
816 data->mc_reg_table_start = tmp;
817 }
818
819 result = smu7_read_smc_sram_dword(hwmgr->smumgr,
820 SMU71_FIRMWARE_HEADER_LOCATION +
821 offsetof(SMU71_Firmware_Header, FanTable),
822 &tmp, data->sram_end);
823
824 if (0 == result) {
825 data->fan_table_start = tmp;
826 }
827
828 error |= (0 != result);
829
830 result = smu7_read_smc_sram_dword(hwmgr->smumgr,
831 SMU71_FIRMWARE_HEADER_LOCATION +
832 offsetof(SMU71_Firmware_Header, mcArbDramTimingTable),
833 &tmp, data->sram_end);
834
835 if (0 == result) {
836 data->arb_table_start = tmp;
837 }
838
839 error |= (0 != result);
840
841
842 result = smu7_read_smc_sram_dword(hwmgr->smumgr,
843 SMU71_FIRMWARE_HEADER_LOCATION +
844 offsetof(SMU71_Firmware_Header, Version),
845 &tmp, data->sram_end);
846
847 if (0 == result) {
848 hwmgr->microcode_version_info.SMC = tmp;
849 }
850
851 error |= (0 != result);
852
853 result = smu7_read_smc_sram_dword(hwmgr->smumgr,
854 SMU71_FIRMWARE_HEADER_LOCATION +
855 offsetof(SMU71_Firmware_Header, UlvSettings),
856 &tmp, data->sram_end);
857
858 if (0 == result) {
859 data->ulv_settings_start = tmp;
860 }
861
862 error |= (0 != result);
863
864 return error ? 1 : 0;
865}
866
867/*
868* Copy one arb setting to another and then switch the active set.
869* arbFreqSrc and arbFreqDest is one of the MC_CG_ARB_FREQ_Fx constants.
870*/
871int iceland_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
872 uint32_t arbFreqSrc, uint32_t arbFreqDest)
873{
874 uint32_t mc_arb_dram_timing;
875 uint32_t mc_arb_dram_timing2;
876 uint32_t burst_time;
877 uint32_t mc_cg_config;
878
879 switch (arbFreqSrc) {
880 case MC_CG_ARB_FREQ_F0:
881 mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
882 mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
883 burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
884 break;
885
886 case MC_CG_ARB_FREQ_F1:
887 mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1);
888 mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1);
889 burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1);
890 break;
891
892 default:
893 return -1;
894 }
895
896 switch (arbFreqDest) {
897 case MC_CG_ARB_FREQ_F0:
898 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
899 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
900 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time);
901 break;
902
903 case MC_CG_ARB_FREQ_F1:
904 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
905 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
906 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time);
907 break;
908
909 default:
910 return -1;
911 }
912
913 mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG);
914 mc_cg_config |= 0x0000000F;
915 cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config);
916 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arbFreqDest);
917
918 return 0;
919}
920
921/**
922 * Initial switch from ARB F0->F1
923 *
924 * @param hwmgr the address of the powerplay hardware manager.
925 * @return always 0
926 * This function is to be called from the SetPowerState table.
927 */
928int iceland_initial_switch_from_arb_f0_to_f1(struct pp_hwmgr *hwmgr)
929{
930 return iceland_copy_and_switch_arb_sets(hwmgr, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
931}
932
933/* ---------------------------------------- ULV related functions ----------------------------------------------------*/
934
935
936static int iceland_reset_single_dpm_table(
937 struct pp_hwmgr *hwmgr,
938 struct iceland_single_dpm_table *dpm_table,
939 uint32_t count)
940{
941 uint32_t i;
942 if (!(count <= MAX_REGULAR_DPM_NUMBER))
943 printk(KERN_ERR "[ powerplay ] Fatal error, can not set up single DPM \
944 table entries to exceed max number! \n");
945
946 dpm_table->count = count;
947 for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++) {
948 dpm_table->dpm_levels[i].enabled = 0;
949 }
950
951 return 0;
952}
953
954static void iceland_setup_pcie_table_entry(
955 struct iceland_single_dpm_table *dpm_table,
956 uint32_t index, uint32_t pcie_gen,
957 uint32_t pcie_lanes)
958{
959 dpm_table->dpm_levels[index].value = pcie_gen;
960 dpm_table->dpm_levels[index].param1 = pcie_lanes;
961 dpm_table->dpm_levels[index].enabled = 1;
962}
963
964/*
965 * Set up the PCIe DPM table as follows:
966 *
967 * A = Performance State, Max, Gen Speed
968 * C = Performance State, Min, Gen Speed
969 * 1 = Performance State, Max, Lane #
970 * 3 = Performance State, Min, Lane #
971 *
972 * B = Power Saving State, Max, Gen Speed
973 * D = Power Saving State, Min, Gen Speed
974 * 2 = Power Saving State, Max, Lane #
975 * 4 = Power Saving State, Min, Lane #
976 *
977 *
978 * DPM Index Gen Speed Lane #
979 * 5 A 1
980 * 4 B 2
981 * 3 C 1
982 * 2 D 2
983 * 1 C 3
984 * 0 D 4
985 *
986 */
987static int iceland_setup_default_pcie_tables(struct pp_hwmgr *hwmgr)
988{
989 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
990
991 PP_ASSERT_WITH_CODE((data->use_pcie_performance_levels ||
992 data->use_pcie_power_saving_levels),
993 "No pcie performance levels!", return -EINVAL);
994
995 if (data->use_pcie_performance_levels && !data->use_pcie_power_saving_levels) {
996 data->pcie_gen_power_saving = data->pcie_gen_performance;
997 data->pcie_lane_power_saving = data->pcie_lane_performance;
998 } else if (!data->use_pcie_performance_levels && data->use_pcie_power_saving_levels) {
999 data->pcie_gen_performance = data->pcie_gen_power_saving;
1000 data->pcie_lane_performance = data->pcie_lane_power_saving;
1001 }
1002
1003 iceland_reset_single_dpm_table(hwmgr, &data->dpm_table.pcie_speed_table, SMU71_MAX_LEVELS_LINK);
1004
1005 /* Hardcode Pcie Table */
1006 iceland_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0,
1007 get_pcie_gen_support(data->pcie_gen_cap, PP_Min_PCIEGen),
1008 get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
1009 iceland_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1,
1010 get_pcie_gen_support(data->pcie_gen_cap, PP_Min_PCIEGen),
1011 get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
1012 iceland_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2,
1013 get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen),
1014 get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
1015 iceland_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3,
1016 get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen),
1017 get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
1018 iceland_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4,
1019 get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen),
1020 get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
1021 iceland_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5,
1022 get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen),
1023 get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
1024 data->dpm_table.pcie_speed_table.count = 6;
1025
1026 return 0;
1027
1028}
1029
1030
1031/*
1032 * This function is to initalize all DPM state tables for SMU7 based on the dependency table.
1033 * Dynamic state patching function will then trim these state tables to the allowed range based
1034 * on the power policy or external client requests, such as UVD request, etc.
1035 */
1036static int iceland_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
1037{
1038 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
1039 uint32_t i;
1040
1041 struct phm_clock_voltage_dependency_table *allowed_vdd_sclk_table =
1042 hwmgr->dyn_state.vddc_dependency_on_sclk;
1043 struct phm_clock_voltage_dependency_table *allowed_vdd_mclk_table =
1044 hwmgr->dyn_state.vddc_dependency_on_mclk;
1045 struct phm_cac_leakage_table *std_voltage_table =
1046 hwmgr->dyn_state.cac_leakage_table;
1047
1048 PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL,
1049 "SCLK dependency table is missing. This table is mandatory", return -1);
1050 PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table->count >= 1,
1051 "SCLK dependency table has to have is missing. This table is mandatory", return -1);
1052
1053 PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
1054 "MCLK dependency table is missing. This table is mandatory", return -1);
1055 PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table->count >= 1,
1056 "VMCLK dependency table has to have is missing. This table is mandatory", return -1);
1057
1058 /* clear the state table to reset everything to default */
1059 memset(&(data->dpm_table), 0x00, sizeof(data->dpm_table));
1060 iceland_reset_single_dpm_table(hwmgr, &data->dpm_table.sclk_table, SMU71_MAX_LEVELS_GRAPHICS);
1061 iceland_reset_single_dpm_table(hwmgr, &data->dpm_table.mclk_table, SMU71_MAX_LEVELS_MEMORY);
1062 iceland_reset_single_dpm_table(hwmgr, &data->dpm_table.vddc_table, SMU71_MAX_LEVELS_VDDC);
1063 iceland_reset_single_dpm_table(hwmgr, &data->dpm_table.vdd_ci_table, SMU71_MAX_LEVELS_VDDCI);
1064 iceland_reset_single_dpm_table(hwmgr, &data->dpm_table.mvdd_table, SMU71_MAX_LEVELS_MVDD);
1065
1066 PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL,
1067 "SCLK dependency table is missing. This table is mandatory", return -1);
1068 /* Initialize Sclk DPM table based on allow Sclk values*/
1069 data->dpm_table.sclk_table.count = 0;
1070
1071 for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
1072 if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count-1].value !=
1073 allowed_vdd_sclk_table->entries[i].clk) {
1074 data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
1075 allowed_vdd_sclk_table->entries[i].clk;
1076 data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = 1; /*(i==0) ? 1 : 0; to do */
1077 data->dpm_table.sclk_table.count++;
1078 }
1079 }
1080
1081 PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
1082 "MCLK dependency table is missing. This table is mandatory", return -1);
1083 /* Initialize Mclk DPM table based on allow Mclk values */
1084 data->dpm_table.mclk_table.count = 0;
1085 for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
1086 if (i == 0 || data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count-1].value !=
1087 allowed_vdd_mclk_table->entries[i].clk) {
1088 data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
1089 allowed_vdd_mclk_table->entries[i].clk;
1090 data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = 1; /*(i==0) ? 1 : 0; */
1091 data->dpm_table.mclk_table.count++;
1092 }
1093 }
1094
1095 /* Initialize Vddc DPM table based on allow Vddc values. And populate corresponding std values. */
1096 for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
1097 data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
1098 data->dpm_table.vddc_table.dpm_levels[i].param1 = std_voltage_table->entries[i].Leakage;
1099 /* param1 is for corresponding std voltage */
1100 data->dpm_table.vddc_table.dpm_levels[i].enabled = 1;
1101 }
1102
1103 data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count;
1104 allowed_vdd_mclk_table = hwmgr->dyn_state.vddci_dependency_on_mclk;
1105
1106 if (NULL != allowed_vdd_mclk_table) {
1107 /* Initialize Vddci DPM table based on allow Mclk values */
1108 for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
1109 data->dpm_table.vdd_ci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
1110 data->dpm_table.vdd_ci_table.dpm_levels[i].enabled = 1;
1111 }
1112 data->dpm_table.vdd_ci_table.count = allowed_vdd_mclk_table->count;
1113 }
1114
1115 allowed_vdd_mclk_table = hwmgr->dyn_state.mvdd_dependency_on_mclk;
1116
1117 if (NULL != allowed_vdd_mclk_table) {
1118 /*
1119 * Initialize MVDD DPM table based on allow Mclk
1120 * values
1121 */
1122 for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
1123 data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
1124 data->dpm_table.mvdd_table.dpm_levels[i].enabled = 1;
1125 }
1126 data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count;
1127 }
1128
1129 /* setup PCIE gen speed levels*/
1130 iceland_setup_default_pcie_tables(hwmgr);
1131
1132 /* save a copy of the default DPM table*/
1133 memcpy(&(data->golden_dpm_table), &(data->dpm_table), sizeof(struct iceland_dpm_table));
1134
1135 return 0;
1136}
1137
1138/**
1139 * @brief PhwIceland_GetVoltageOrder
1140 * Returns index of requested voltage record in lookup(table)
1141 * @param hwmgr - pointer to hardware manager
1142 * @param lookutab - lookup list to search in
1143 * @param voltage - voltage to look for
1144 * @return 0 on success
1145 */
1146uint8_t iceland_get_voltage_index(phm_ppt_v1_voltage_lookup_table *look_up_table,
1147 uint16_t voltage)
1148{
1149 uint8_t count = (uint8_t) (look_up_table->count);
1150 uint8_t i;
1151
1152 PP_ASSERT_WITH_CODE((NULL != look_up_table), "Lookup Table empty.", return 0;);
1153 PP_ASSERT_WITH_CODE((0 != count), "Lookup Table empty.", return 0;);
1154
1155 for (i = 0; i < count; i++) {
1156 /* find first voltage equal or bigger than requested */
1157 if (look_up_table->entries[i].us_vdd >= voltage)
1158 return i;
1159 }
1160
1161 /* voltage is bigger than max voltage in the table */
1162 return i-1;
1163}
1164
1165
1166static int iceland_get_std_voltage_value_sidd(struct pp_hwmgr *hwmgr,
1167 pp_atomctrl_voltage_table_entry *tab, uint16_t *hi,
1168 uint16_t *lo)
1169{
1170 uint16_t v_index;
1171 bool vol_found = false;
1172 *hi = tab->value * VOLTAGE_SCALE;
1173 *lo = tab->value * VOLTAGE_SCALE;
1174
1175 /* SCLK/VDDC Dependency Table has to exist. */
1176 PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.vddc_dependency_on_sclk,
1177 "The SCLK/VDDC Dependency Table does not exist.\n",
1178 return -EINVAL);
1179
1180 if (NULL == hwmgr->dyn_state.cac_leakage_table) {
1181 pr_warning("CAC Leakage Table does not exist, using vddc.\n");
1182 return 0;
1183 }
1184
1185 /*
1186 * Since voltage in the sclk/vddc dependency table is not
1187 * necessarily in ascending order because of ELB voltage
1188 * patching, loop through entire list to find exact voltage.
1189 */
1190 for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) {
1191 if (tab->value == hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) {
1192 vol_found = true;
1193 if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) {
1194 *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE;
1195 *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage * VOLTAGE_SCALE);
1196 } else {
1197 pr_warning("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index, using maximum index from CAC table.\n");
1198 *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE;
1199 *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE);
1200 }
1201 break;
1202 }
1203 }
1204
1205 /*
1206 * If voltage is not found in the first pass, loop again to
1207 * find the best match, equal or higher value.
1208 */
1209 if (!vol_found) {
1210 for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) {
1211 if (tab->value <= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) {
1212 vol_found = true;
1213 if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) {
1214 *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE;
1215 *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage) * VOLTAGE_SCALE;
1216 } else {
1217 pr_warning("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index in second look up, using maximum index from CAC table.");
1218 *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE;
1219 *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE);
1220 }
1221 break;
1222 }
1223 }
1224
1225 if (!vol_found)
1226 pr_warning("Unable to get std_vddc from SCLK/VDDC Dependency Table, using vddc.\n");
1227 }
1228
1229 return 0;
1230}
1231
1232static int iceland_populate_smc_voltage_table(struct pp_hwmgr *hwmgr,
1233 pp_atomctrl_voltage_table_entry *tab,
1234 SMU71_Discrete_VoltageLevel *smc_voltage_tab) {
1235 int result;
1236
1237
1238 result = iceland_get_std_voltage_value_sidd(hwmgr, tab,
1239 &smc_voltage_tab->StdVoltageHiSidd,
1240 &smc_voltage_tab->StdVoltageLoSidd);
1241 if (0 != result) {
1242 smc_voltage_tab->StdVoltageHiSidd = tab->value * VOLTAGE_SCALE;
1243 smc_voltage_tab->StdVoltageLoSidd = tab->value * VOLTAGE_SCALE;
1244 }
1245
1246 smc_voltage_tab->Voltage = PP_HOST_TO_SMC_US(tab->value * VOLTAGE_SCALE);
1247 CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd);
1248 CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd);
1249
1250 return 0;
1251}
1252
1253/**
1254 * Vddc table preparation for SMC.
1255 *
1256 * @param hwmgr the address of the hardware manager
1257 * @param table the SMC DPM table structure to be populated
1258 * @return always 0
1259 */
1260static int iceland_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
1261 SMU71_Discrete_DpmTable *table)
1262{
1263 unsigned int count;
1264 int result;
1265
1266 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
1267
1268 table->VddcLevelCount = data->vddc_voltage_table.count;
1269 for (count = 0; count < table->VddcLevelCount; count++) {
1270 result = iceland_populate_smc_voltage_table(hwmgr,
1271 &data->vddc_voltage_table.entries[count],
1272 &table->VddcLevel[count]);
1273 PP_ASSERT_WITH_CODE(0 == result, "do not populate SMC VDDC voltage table", return -EINVAL);
1274
1275 /* GPIO voltage control */
1276 if (ICELAND_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control)
1277 table->VddcLevel[count].Smio |= data->vddc_voltage_table.entries[count].smio_low;
1278 else if (ICELAND_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control)
1279 table->VddcLevel[count].Smio = 0;
1280 }
1281
1282 CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount);
1283
1284 return 0;
1285}
1286
1287/**
1288 * Vddci table preparation for SMC.
1289 *
1290 * @param *hwmgr The address of the hardware manager.
1291 * @param *table The SMC DPM table structure to be populated.
1292 * @return 0
1293 */
1294static int iceland_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
1295 SMU71_Discrete_DpmTable *table)
1296{
1297 int result;
1298 uint32_t count;
1299 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
1300
1301 table->VddciLevelCount = data->vddci_voltage_table.count;
1302 for (count = 0; count < table->VddciLevelCount; count++) {
1303 result = iceland_populate_smc_voltage_table(hwmgr,
1304 &data->vddci_voltage_table.entries[count],
1305 &table->VddciLevel[count]);
1306 PP_ASSERT_WITH_CODE(0 == result, "do not populate SMC VDDCI voltage table", return -EINVAL);
1307
1308 /* GPIO voltage control */
1309 if (ICELAND_VOLTAGE_CONTROL_BY_GPIO == data->vdd_ci_control)
1310 table->VddciLevel[count].Smio |= data->vddci_voltage_table.entries[count].smio_low;
1311 else
1312 table->VddciLevel[count].Smio = 0;
1313 }
1314
1315 CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount);
1316
1317 return 0;
1318}
1319
1320/**
1321 * Mvdd table preparation for SMC.
1322 *
1323 * @param *hwmgr The address of the hardware manager.
1324 * @param *table The SMC DPM table structure to be populated.
1325 * @return 0
1326 */
1327static int iceland_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
1328 SMU71_Discrete_DpmTable *table)
1329{
1330 int result;
1331 uint32_t count;
1332 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
1333
1334 table->MvddLevelCount = data->mvdd_voltage_table.count;
1335 for (count = 0; count < table->MvddLevelCount; count++) {
1336 result = iceland_populate_smc_voltage_table(hwmgr,
1337 &data->mvdd_voltage_table.entries[count],
1338 &table->MvddLevel[count]);
1339 PP_ASSERT_WITH_CODE(0 == result, "do not populate SMC VDDCI voltage table", return -EINVAL);
1340
1341 /* GPIO voltage control */
1342 if (ICELAND_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control)
1343 table->MvddLevel[count].Smio |= data->mvdd_voltage_table.entries[count].smio_low;
1344 else
1345 table->MvddLevel[count].Smio = 0;
1346 }
1347
1348 CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
1349
1350 return 0;
1351}
1352
1353int iceland_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr)
1354{
1355 int i;
1356 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
1357 uint8_t * hi_vid = data->power_tune_table.BapmVddCVidHiSidd;
1358 uint8_t * lo_vid = data->power_tune_table.BapmVddCVidLoSidd;
1359
1360 PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.cac_leakage_table,
1361 "The CAC Leakage table does not exist!", return -EINVAL);
1362 PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count <= 8,
1363 "There should never be more than 8 entries for BapmVddcVid!!!", return -EINVAL);
1364 PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count == hwmgr->dyn_state.vddc_dependency_on_sclk->count,
1365 "CACLeakageTable->count and VddcDependencyOnSCLk->count not equal", return -EINVAL);
1366
1367 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_EVV)) {
1368 for (i = 0; (uint32_t) i < hwmgr->dyn_state.cac_leakage_table->count; i++) {
1369 lo_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc1);
1370 hi_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc2);
1371 }
1372 } else {
1373 PP_ASSERT_WITH_CODE(false, "Iceland should always support EVV", return -EINVAL);
1374 }
1375
1376 return 0;
1377}
1378
1379int iceland_populate_vddc_vid(struct pp_hwmgr *hwmgr)
1380{
1381 int i;
1382 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
1383 uint8_t *vid = data->power_tune_table.VddCVid;
1384
1385 PP_ASSERT_WITH_CODE(data->vddc_voltage_table.count <= 8,
1386 "There should never be more than 8 entries for VddcVid!!!",
1387 return -EINVAL);
1388
1389 for (i = 0; i < (int)data->vddc_voltage_table.count; i++) {
1390 vid[i] = convert_to_vid(data->vddc_voltage_table.entries[i].value);
1391 }
1392
1393 return 0;
1394}
1395
1396/**
1397 * Preparation of voltage tables for SMC.
1398 *
1399 * @param hwmgr the address of the hardware manager
1400 * @param table the SMC DPM table structure to be populated
1401 * @return always 0
1402 */
1403
1404int iceland_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
1405 SMU71_Discrete_DpmTable *table)
1406{
1407 int result;
1408
1409 result = iceland_populate_smc_vddc_table(hwmgr, table);
1410 PP_ASSERT_WITH_CODE(0 == result,
1411 "can not populate VDDC voltage table to SMC", return -1);
1412
1413 result = iceland_populate_smc_vdd_ci_table(hwmgr, table);
1414 PP_ASSERT_WITH_CODE(0 == result,
1415 "can not populate VDDCI voltage table to SMC", return -1);
1416
1417 result = iceland_populate_smc_mvdd_table(hwmgr, table);
1418 PP_ASSERT_WITH_CODE(0 == result,
1419 "can not populate MVDD voltage table to SMC", return -1);
1420
1421 return 0;
1422}
1423
1424
1425/**
1426 * Re-generate the DPM level mask value
1427 * @param hwmgr the address of the hardware manager
1428 */
1429static uint32_t iceland_get_dpm_level_enable_mask_value(
1430 struct iceland_single_dpm_table * dpm_table)
1431{
1432 uint32_t i;
1433 uint32_t mask_value = 0;
1434
1435 for (i = dpm_table->count; i > 0; i--) {
1436 mask_value = mask_value << 1;
1437
1438 if (dpm_table->dpm_levels[i-1].enabled)
1439 mask_value |= 0x1;
1440 else
1441 mask_value &= 0xFFFFFFFE;
1442 }
1443 return mask_value;
1444}
1445
1446int iceland_populate_memory_timing_parameters(
1447 struct pp_hwmgr *hwmgr,
1448 uint32_t engine_clock,
1449 uint32_t memory_clock,
1450 struct SMU71_Discrete_MCArbDramTimingTableEntry *arb_regs
1451 )
1452{
1453 uint32_t dramTiming;
1454 uint32_t dramTiming2;
1455 uint32_t burstTime;
1456 int result;
1457
1458 result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
1459 engine_clock, memory_clock);
1460
1461 PP_ASSERT_WITH_CODE(result == 0,
1462 "Error calling VBIOS to set DRAM_TIMING.", return result);
1463
1464 dramTiming = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
1465 dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
1466 burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
1467
1468 arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dramTiming);
1469 arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2);
1470 arb_regs->McArbBurstTime = (uint8_t)burstTime;
1471
1472 return 0;
1473}
1474
1475/**
1476 * Setup parameters for the MC ARB.
1477 *
1478 * @param hwmgr the address of the powerplay hardware manager.
1479 * @return always 0
1480 * This function is to be called from the SetPowerState table.
1481 */
1482int iceland_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
1483{
1484 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
1485 int result = 0;
1486 SMU71_Discrete_MCArbDramTimingTable arb_regs;
1487 uint32_t i, j;
1488
1489 memset(&arb_regs, 0x00, sizeof(SMU71_Discrete_MCArbDramTimingTable));
1490
1491 for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
1492 for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
1493 result = iceland_populate_memory_timing_parameters
1494 (hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value,
1495 data->dpm_table.mclk_table.dpm_levels[j].value,
1496 &arb_regs.entries[i][j]);
1497
1498 if (0 != result) {
1499 break;
1500 }
1501 }
1502 }
1503
1504 if (0 == result) {
1505 result = smu7_copy_bytes_to_smc(
1506 hwmgr->smumgr,
1507 data->arb_table_start,
1508 (uint8_t *)&arb_regs,
1509 sizeof(SMU71_Discrete_MCArbDramTimingTable),
1510 data->sram_end
1511 );
1512 }
1513
1514 return result;
1515}
1516
1517static int iceland_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU71_Discrete_DpmTable *table)
1518{
1519 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
1520 struct iceland_dpm_table *dpm_table = &data->dpm_table;
1521 uint32_t i;
1522
1523 /* Index (dpm_table->pcie_speed_table.count) is reserved for PCIE boot level. */
1524 for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
1525 table->LinkLevel[i].PcieGenSpeed =
1526 (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
1527 table->LinkLevel[i].PcieLaneCount =
1528 (uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
1529 table->LinkLevel[i].EnabledForActivity =
1530 1;
1531 table->LinkLevel[i].SPC =
1532 (uint8_t)(data->pcie_spc_cap & 0xff);
1533 table->LinkLevel[i].DownThreshold =
1534 PP_HOST_TO_SMC_UL(5);
1535 table->LinkLevel[i].UpThreshold =
1536 PP_HOST_TO_SMC_UL(30);
1537 }
1538
1539 data->smc_state_table.LinkLevelCount =
1540 (uint8_t)dpm_table->pcie_speed_table.count;
1541 data->dpm_level_enable_mask.pcie_dpm_enable_mask =
1542 iceland_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
1543
1544 return 0;
1545}
1546
1547static int iceland_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
1548 SMU71_Discrete_DpmTable *table)
1549{
1550 return 0;
1551}
1552
1553uint8_t iceland_get_voltage_id(pp_atomctrl_voltage_table *voltage_table,
1554 uint32_t voltage)
1555{
1556 uint8_t count = (uint8_t) (voltage_table->count);
1557 uint8_t i = 0;
1558
1559 PP_ASSERT_WITH_CODE((NULL != voltage_table),
1560 "Voltage Table empty.", return 0;);
1561 PP_ASSERT_WITH_CODE((0 != count),
1562 "Voltage Table empty.", return 0;);
1563
1564 for (i = 0; i < count; i++) {
1565 /* find first voltage bigger than requested */
1566 if (voltage_table->entries[i].value >= voltage)
1567 return i;
1568 }
1569
1570 /* voltage is bigger than max voltage in the table */
1571 return i - 1;
1572}
1573
1574static int iceland_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
1575 SMU71_Discrete_DpmTable *table)
1576{
1577 return 0;
1578}
1579
1580static int iceland_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
1581 SMU71_Discrete_DpmTable *table)
1582{
1583 return 0;
1584}
1585
1586static int iceland_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
1587 SMU71_Discrete_DpmTable *table)
1588{
1589 return 0;
1590}
1591
1592
1593static int iceland_populate_smc_svi2_config(struct pp_hwmgr *hwmgr,
1594 SMU71_Discrete_DpmTable *tab)
1595{
1596 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
1597
1598 if(ICELAND_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control)
1599 tab->SVI2Enable |= VDDC_ON_SVI2;
1600
1601 if(ICELAND_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_ci_control)
1602 tab->SVI2Enable |= VDDCI_ON_SVI2;
1603 else
1604 tab->MergedVddci = 1;
1605
1606 if(ICELAND_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control)
1607 tab->SVI2Enable |= MVDD_ON_SVI2;
1608
1609 PP_ASSERT_WITH_CODE( tab->SVI2Enable != (VDDC_ON_SVI2 | VDDCI_ON_SVI2 | MVDD_ON_SVI2) &&
1610 (tab->SVI2Enable & VDDC_ON_SVI2), "SVI2 domain configuration is incorrect!", return -EINVAL);
1611
1612 return 0;
1613}
1614
1615static int iceland_get_dependecy_volt_by_clk(struct pp_hwmgr *hwmgr,
1616 struct phm_clock_voltage_dependency_table *allowed_clock_voltage_table,
1617 uint32_t clock, uint32_t *vol)
1618{
1619 uint32_t i = 0;
1620
1621 /* clock - voltage dependency table is empty table */
1622 if (allowed_clock_voltage_table->count == 0)
1623 return -EINVAL;
1624
1625 for (i = 0; i < allowed_clock_voltage_table->count; i++) {
1626 /* find first sclk bigger than request */
1627 if (allowed_clock_voltage_table->entries[i].clk >= clock) {
1628 *vol = allowed_clock_voltage_table->entries[i].v;
1629 return 0;
1630 }
1631 }
1632
1633 /* sclk is bigger than max sclk in the dependence table */
1634 *vol = allowed_clock_voltage_table->entries[i - 1].v;
1635
1636 return 0;
1637}
1638
1639static uint8_t iceland_get_mclk_frequency_ratio(uint32_t memory_clock,
1640 bool strobe_mode)
1641{
1642 uint8_t mc_para_index;
1643
1644 if (strobe_mode) {
1645 if (memory_clock < 12500) {
1646 mc_para_index = 0x00;
1647 } else if (memory_clock > 47500) {
1648 mc_para_index = 0x0f;
1649 } else {
1650 mc_para_index = (uint8_t)((memory_clock - 10000) / 2500);
1651 }
1652 } else {
1653 if (memory_clock < 65000) {
1654 mc_para_index = 0x00;
1655 } else if (memory_clock > 135000) {
1656 mc_para_index = 0x0f;
1657 } else {
1658 mc_para_index = (uint8_t)((memory_clock - 60000) / 5000);
1659 }
1660 }
1661
1662 return mc_para_index;
1663}
1664
1665static uint8_t iceland_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock)
1666{
1667 uint8_t mc_para_index;
1668
1669 if (memory_clock < 10000) {
1670 mc_para_index = 0;
1671 } else if (memory_clock >= 80000) {
1672 mc_para_index = 0x0f;
1673 } else {
1674 mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1);
1675 }
1676
1677 return mc_para_index;
1678}
1679
1680static int iceland_populate_phase_value_based_on_sclk(struct pp_hwmgr *hwmgr, const struct phm_phase_shedding_limits_table *pl,
1681 uint32_t sclk, uint32_t *p_shed)
1682{
1683 unsigned int i;
1684
1685 /* use the minimum phase shedding */
1686 *p_shed = 1;
1687
1688 /*
1689 * PPGen ensures the phase shedding limits table is sorted
1690 * from lowest voltage/sclk/mclk to highest voltage/sclk/mclk.
1691 * VBIOS ensures the phase shedding masks table is sorted from
1692 * least phases enabled (phase shedding on) to most phases
1693 * enabled (phase shedding off).
1694 */
1695 for (i = 0; i < pl->count; i++) {
1696 if (sclk < pl->entries[i].Sclk) {
1697 /* Enable phase shedding */
1698 *p_shed = i;
1699 break;
1700 }
1701 }
1702
1703 return 0;
1704}
1705
1706static int iceland_populate_phase_value_based_on_mclk(struct pp_hwmgr *hwmgr, const struct phm_phase_shedding_limits_table *pl,
1707 uint32_t memory_clock, uint32_t *p_shed)
1708{
1709 unsigned int i;
1710
1711 /* use the minimum phase shedding */
1712 *p_shed = 1;
1713
1714 /*
1715 * PPGen ensures the phase shedding limits table is sorted
1716 * from lowest voltage/sclk/mclk to highest voltage/sclk/mclk.
1717 * VBIOS ensures the phase shedding masks table is sorted from
1718 * least phases enabled (phase shedding on) to most phases
1719 * enabled (phase shedding off).
1720 */
1721 for (i = 0; i < pl->count; i++) {
1722 if (memory_clock < pl->entries[i].Mclk) {
1723 /* Enable phase shedding */
1724 *p_shed = i;
1725 break;
1726 }
1727 }
1728
1729 return 0;
1730}
1731
1732/**
1733 * Populates the SMC MCLK structure using the provided memory clock
1734 *
1735 * @param hwmgr the address of the hardware manager
1736 * @param memory_clock the memory clock to use to populate the structure
1737 * @param sclk the SMC SCLK structure to be populated
1738 */
1739static int iceland_calculate_mclk_params(
1740 struct pp_hwmgr *hwmgr,
1741 uint32_t memory_clock,
1742 SMU71_Discrete_MemoryLevel *mclk,
1743 bool strobe_mode,
1744 bool dllStateOn
1745 )
1746{
1747 const iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
1748 uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
1749 uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
1750 uint32_t mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL;
1751 uint32_t mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL;
1752 uint32_t mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL;
1753 uint32_t mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1;
1754 uint32_t mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2;
1755 uint32_t mpll_ss1 = data->clock_registers.vMPLL_SS1;
1756 uint32_t mpll_ss2 = data->clock_registers.vMPLL_SS2;
1757
1758 pp_atomctrl_memory_clock_param mpll_param;
1759 int result;
1760
1761 result = atomctrl_get_memory_pll_dividers_si(hwmgr,
1762 memory_clock, &mpll_param, strobe_mode);
1763 PP_ASSERT_WITH_CODE(0 == result,
1764 "Error retrieving Memory Clock Parameters from VBIOS.", return result);
1765
1766 /* MPLL_FUNC_CNTL setup*/
1767 mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL, mpll_param.bw_ctrl);
1768
1769 /* MPLL_FUNC_CNTL_1 setup*/
1770 mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
1771 MPLL_FUNC_CNTL_1, CLKF, mpll_param.mpll_fb_divider.cl_kf);
1772 mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
1773 MPLL_FUNC_CNTL_1, CLKFRAC, mpll_param.mpll_fb_divider.clk_frac);
1774 mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
1775 MPLL_FUNC_CNTL_1, VCO_MODE, mpll_param.vco_mode);
1776
1777 /* MPLL_AD_FUNC_CNTL setup*/
1778 mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl,
1779 MPLL_AD_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
1780
1781 if (data->is_memory_GDDR5) {
1782 /* MPLL_DQ_FUNC_CNTL setup*/
1783 mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
1784 MPLL_DQ_FUNC_CNTL, YCLK_SEL, mpll_param.yclk_sel);
1785 mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
1786 MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
1787 }
1788
1789 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1790 PHM_PlatformCaps_MemorySpreadSpectrumSupport)) {
1791 /*
1792 ************************************
1793 Fref = Reference Frequency
1794 NF = Feedback divider ratio
1795 NR = Reference divider ratio
1796 Fnom = Nominal VCO output frequency = Fref * NF / NR
1797 Fs = Spreading Rate
1798 D = Percentage down-spread / 2
1799 Fint = Reference input frequency to PFD = Fref / NR
1800 NS = Spreading rate divider ratio = int(Fint / (2 * Fs))
1801 CLKS = NS - 1 = ISS_STEP_NUM[11:0]
1802 NV = D * Fs / Fnom * 4 * ((Fnom/Fref * NR) ^ 2)
1803 CLKV = 65536 * NV = ISS_STEP_SIZE[25:0]
1804 *************************************
1805 */
1806 pp_atomctrl_internal_ss_info ss_info;
1807 uint32_t freq_nom;
1808 uint32_t tmp;
1809 uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr);
1810
1811 /* for GDDR5 for all modes and DDR3 */
1812 if (1 == mpll_param.qdr)
1813 freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider);
1814 else
1815 freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider);
1816
1817 /* tmp = (freq_nom / reference_clock * reference_divider) ^ 2 Note: S.I. reference_divider = 1*/
1818 tmp = (freq_nom / reference_clock);
1819 tmp = tmp * tmp;
1820
1821 if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) {
1822 /* ss_info.speed_spectrum_percentage -- in unit of 0.01% */
1823 /* ss.Info.speed_spectrum_rate -- in unit of khz */
1824 /* CLKS = reference_clock / (2 * speed_spectrum_rate * reference_divider) * 10 */
1825 /* = reference_clock * 5 / speed_spectrum_rate */
1826 uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate;
1827
1828 /* CLKV = 65536 * speed_spectrum_percentage / 2 * spreadSpecrumRate / freq_nom * 4 / 100000 * ((freq_nom / reference_clock) ^ 2) */
1829 /* = 131 * speed_spectrum_percentage * speed_spectrum_rate / 100 * ((freq_nom / reference_clock) ^ 2) / freq_nom */
1830 uint32_t clkv =
1831 (uint32_t)((((131 * ss_info.speed_spectrum_percentage *
1832 ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom);
1833
1834 mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv);
1835 mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks);
1836 }
1837 }
1838
1839 /* MCLK_PWRMGT_CNTL setup */
1840 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1841 MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed);
1842 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1843 MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn);
1844 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1845 MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn);
1846
1847
1848 /* Save the result data to outpupt memory level structure */
1849 mclk->MclkFrequency = memory_clock;
1850 mclk->MpllFuncCntl = mpll_func_cntl;
1851 mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
1852 mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
1853 mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
1854 mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
1855 mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
1856 mclk->DllCntl = dll_cntl;
1857 mclk->MpllSs1 = mpll_ss1;
1858 mclk->MpllSs2 = mpll_ss2;
1859
1860 return 0;
1861}
1862
1863static int iceland_populate_single_memory_level(
1864 struct pp_hwmgr *hwmgr,
1865 uint32_t memory_clock,
1866 SMU71_Discrete_MemoryLevel *memory_level
1867 )
1868{
1869 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
1870 int result = 0;
1871 bool dllStateOn;
1872 struct cgs_display_info info = {0};
1873
1874
1875 if (NULL != hwmgr->dyn_state.vddc_dependency_on_mclk) {
1876 result = iceland_get_dependecy_volt_by_clk(hwmgr,
1877 hwmgr->dyn_state.vddc_dependency_on_mclk, memory_clock, &memory_level->MinVddc);
1878 PP_ASSERT_WITH_CODE((0 == result),
1879 "can not find MinVddc voltage value from memory VDDC voltage dependency table", return result);
1880 }
1881
1882 if (data->vdd_ci_control == ICELAND_VOLTAGE_CONTROL_NONE) {
1883 memory_level->MinVddci = memory_level->MinVddc;
1884 } else if (NULL != hwmgr->dyn_state.vddci_dependency_on_mclk) {
1885 result = iceland_get_dependecy_volt_by_clk(hwmgr,
1886 hwmgr->dyn_state.vddci_dependency_on_mclk,
1887 memory_clock,
1888 &memory_level->MinVddci);
1889 PP_ASSERT_WITH_CODE((0 == result),
1890 "can not find MinVddci voltage value from memory VDDCI voltage dependency table", return result);
1891 }
1892
1893 if (NULL != hwmgr->dyn_state.mvdd_dependency_on_mclk) {
1894 result = iceland_get_dependecy_volt_by_clk(hwmgr,
1895 hwmgr->dyn_state.mvdd_dependency_on_mclk, memory_clock, &memory_level->MinMvdd);
1896 PP_ASSERT_WITH_CODE((0 == result),
1897 "can not find MinMVDD voltage value from memory MVDD voltage dependency table", return result);
1898 }
1899
1900 memory_level->MinVddcPhases = 1;
1901
1902 if (data->vddc_phase_shed_control) {
1903 iceland_populate_phase_value_based_on_mclk(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table,
1904 memory_clock, &memory_level->MinVddcPhases);
1905 }
1906
1907 memory_level->EnabledForThrottle = 1;
1908 memory_level->EnabledForActivity = 1;
1909 memory_level->UpHyst = 0;
1910 memory_level->DownHyst = 100;
1911 memory_level->VoltageDownHyst = 0;
1912
1913 /* Indicates maximum activity level for this performance level.*/
1914 memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
1915 memory_level->StutterEnable = 0;
1916 memory_level->StrobeEnable = 0;
1917 memory_level->EdcReadEnable = 0;
1918 memory_level->EdcWriteEnable = 0;
1919 memory_level->RttEnable = 0;
1920
1921 /* default set to low watermark. Highest level will be set to high later.*/
1922 memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
1923
1924 cgs_get_active_displays_info(hwmgr->device, &info);
1925 data->display_timing.num_existing_displays = info.display_count;
1926
1927 //if ((data->mclk_stutter_mode_threshold != 0) &&
1928 // (memory_clock <= data->mclk_stutter_mode_threshold) &&
1929 // (data->is_uvd_enabled == 0)
1930 // && (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL, STUTTER_ENABLE) & 0x1)
1931 // && (data->display_timing.num_existing_displays <= 2)
1932 // && (data->display_timing.num_existing_displays != 0))
1933 // memory_level->StutterEnable = 1;
1934
1935 /* decide strobe mode*/
1936 memory_level->StrobeEnable = (data->mclk_strobe_mode_threshold != 0) &&
1937 (memory_clock <= data->mclk_strobe_mode_threshold);
1938
1939 /* decide EDC mode and memory clock ratio*/
1940 if (data->is_memory_GDDR5) {
1941 memory_level->StrobeRatio = iceland_get_mclk_frequency_ratio(memory_clock,
1942 memory_level->StrobeEnable);
1943
1944 if ((data->mclk_edc_enable_threshold != 0) &&
1945 (memory_clock > data->mclk_edc_enable_threshold)) {
1946 memory_level->EdcReadEnable = 1;
1947 }
1948
1949 if ((data->mclk_edc_wr_enable_threshold != 0) &&
1950 (memory_clock > data->mclk_edc_wr_enable_threshold)) {
1951 memory_level->EdcWriteEnable = 1;
1952 }
1953
1954 if (memory_level->StrobeEnable) {
1955 if (iceland_get_mclk_frequency_ratio(memory_clock, 1) >=
1956 ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf)) {
1957 dllStateOn = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
1958 } else {
1959 dllStateOn = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0;
1960 }
1961
1962 } else {
1963 dllStateOn = data->dll_defaule_on;
1964 }
1965 } else {
1966 memory_level->StrobeRatio =
1967 iceland_get_ddr3_mclk_frequency_ratio(memory_clock);
1968 dllStateOn = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
1969 }
1970
1971 result = iceland_calculate_mclk_params(hwmgr,
1972 memory_clock, memory_level, memory_level->StrobeEnable, dllStateOn);
1973
1974 if (0 == result) {
1975 memory_level->MinVddc = PP_HOST_TO_SMC_UL(memory_level->MinVddc * VOLTAGE_SCALE);
1976 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinVddcPhases);
1977 memory_level->MinVddci = PP_HOST_TO_SMC_UL(memory_level->MinVddci * VOLTAGE_SCALE);
1978 memory_level->MinMvdd = PP_HOST_TO_SMC_UL(memory_level->MinMvdd * VOLTAGE_SCALE);
1979 /* MCLK frequency in units of 10KHz*/
1980 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency);
1981 /* Indicates maximum activity level for this performance level.*/
1982 CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel);
1983 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl);
1984 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1);
1985 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2);
1986 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl);
1987 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl);
1988 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl);
1989 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl);
1990 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1);
1991 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2);
1992 }
1993
1994 return result;
1995}
1996
1997/**
1998 * Populates the SMC MVDD structure using the provided memory clock.
1999 *
2000 * @param hwmgr the address of the hardware manager
2001 * @param mclk the MCLK value to be used in the decision if MVDD should be high or low.
2002 * @param voltage the SMC VOLTAGE structure to be populated
2003 */
2004int iceland_populate_mvdd_value(struct pp_hwmgr *hwmgr, uint32_t mclk, SMU71_Discrete_VoltageLevel *voltage)
2005{
2006 const iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
2007 uint32_t i = 0;
2008
2009 if (ICELAND_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
2010 /* find mvdd value which clock is more than request */
2011 for (i = 0; i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count; i++) {
2012 if (mclk <= hwmgr->dyn_state.mvdd_dependency_on_mclk->entries[i].clk) {
2013 /* Always round to higher voltage. */
2014 voltage->Voltage = data->mvdd_voltage_table.entries[i].value;
2015 break;
2016 }
2017 }
2018
2019 PP_ASSERT_WITH_CODE(i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count,
2020 "MVDD Voltage is outside the supported range.", return -1);
2021
2022 } else {
2023 return -1;
2024 }
2025
2026 return 0;
2027}
2028
2029
2030static int iceland_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
2031 SMU71_Discrete_DpmTable *table)
2032{
2033 int result = 0;
2034 const iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
2035 pp_atomctrl_clock_dividers_vi dividers;
2036 SMU71_Discrete_VoltageLevel voltage_level;
2037 uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
2038 uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
2039 uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
2040 uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
2041
2042 /* The ACPI state should not do DPM on DC (or ever).*/
2043 table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
2044
2045 if (data->acpi_vddc)
2046 table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->acpi_vddc * VOLTAGE_SCALE);
2047 else
2048 table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->min_vddc_in_pp_table * VOLTAGE_SCALE);
2049
2050 table->ACPILevel.MinVddcPhases = (data->vddc_phase_shed_control) ? 0 : 1;
2051
2052 /* assign zero for now*/
2053 table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr);
2054
2055 /* get the engine clock dividers for this clock value*/
2056 result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
2057 table->ACPILevel.SclkFrequency, &dividers);
2058
2059 PP_ASSERT_WITH_CODE(result == 0,
2060 "Error retrieving Engine Clock dividers from VBIOS.", return result);
2061
2062 /* divider ID for required SCLK*/
2063 table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
2064 table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2065 table->ACPILevel.DeepSleepDivId = 0;
2066
2067 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
2068 CG_SPLL_FUNC_CNTL, SPLL_PWRON, 0);
2069 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
2070 CG_SPLL_FUNC_CNTL, SPLL_RESET, 1);
2071 spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2,
2072 CG_SPLL_FUNC_CNTL_2, SCLK_MUX_SEL, 4);
2073
2074 table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
2075 table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
2076 table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
2077 table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
2078 table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
2079 table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
2080 table->ACPILevel.CcPwrDynRm = 0;
2081 table->ACPILevel.CcPwrDynRm1 = 0;
2082
2083
2084 /* For various features to be enabled/disabled while this level is active.*/
2085 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
2086 /* SCLK frequency in units of 10KHz*/
2087 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
2088 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
2089 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
2090 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
2091 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
2092 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
2093 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
2094 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
2095 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
2096
2097 table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
2098 table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
2099
2100 /* CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);*/
2101
2102 if (0 == iceland_populate_mvdd_value(hwmgr, 0, &voltage_level))
2103 table->MemoryACPILevel.MinMvdd =
2104 PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE);
2105 else
2106 table->MemoryACPILevel.MinMvdd = 0;
2107
2108 /* Force reset on DLL*/
2109 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
2110 MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1);
2111 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
2112 MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1);
2113
2114 /* Disable DLL in ACPIState*/
2115 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
2116 MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0);
2117 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
2118 MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0);
2119
2120 /* Enable DLL bypass signal*/
2121 dll_cntl = PHM_SET_FIELD(dll_cntl,
2122 DLL_CNTL, MRDCK0_BYPASS, 0);
2123 dll_cntl = PHM_SET_FIELD(dll_cntl,
2124 DLL_CNTL, MRDCK1_BYPASS, 0);
2125
2126 table->MemoryACPILevel.DllCntl =
2127 PP_HOST_TO_SMC_UL(dll_cntl);
2128 table->MemoryACPILevel.MclkPwrmgtCntl =
2129 PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl);
2130 table->MemoryACPILevel.MpllAdFuncCntl =
2131 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL);
2132 table->MemoryACPILevel.MpllDqFuncCntl =
2133 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL);
2134 table->MemoryACPILevel.MpllFuncCntl =
2135 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL);
2136 table->MemoryACPILevel.MpllFuncCntl_1 =
2137 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1);
2138 table->MemoryACPILevel.MpllFuncCntl_2 =
2139 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2);
2140 table->MemoryACPILevel.MpllSs1 =
2141 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1);
2142 table->MemoryACPILevel.MpllSs2 =
2143 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2);
2144
2145 table->MemoryACPILevel.EnabledForThrottle = 0;
2146 table->MemoryACPILevel.EnabledForActivity = 0;
2147 table->MemoryACPILevel.UpHyst = 0;
2148 table->MemoryACPILevel.DownHyst = 100;
2149 table->MemoryACPILevel.VoltageDownHyst = 0;
2150 /* Indicates maximum activity level for this performance level.*/
2151 table->MemoryACPILevel.ActivityLevel = PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
2152
2153 table->MemoryACPILevel.StutterEnable = 0;
2154 table->MemoryACPILevel.StrobeEnable = 0;
2155 table->MemoryACPILevel.EdcReadEnable = 0;
2156 table->MemoryACPILevel.EdcWriteEnable = 0;
2157 table->MemoryACPILevel.RttEnable = 0;
2158
2159 return result;
2160}
2161
2162static int iceland_find_boot_level(struct iceland_single_dpm_table *table, uint32_t value, uint32_t *boot_level)
2163{
2164 int result = 0;
2165 uint32_t i;
2166
2167 for (i = 0; i < table->count; i++) {
2168 if (value == table->dpm_levels[i].value) {
2169 *boot_level = i;
2170 result = 0;
2171 }
2172 }
2173 return result;
2174}
2175
2176/**
2177 * Calculates the SCLK dividers using the provided engine clock
2178 *
2179 * @param hwmgr the address of the hardware manager
2180 * @param engine_clock the engine clock to use to populate the structure
2181 * @param sclk the SMC SCLK structure to be populated
2182 */
2183int iceland_calculate_sclk_params(struct pp_hwmgr *hwmgr,
2184 uint32_t engine_clock, SMU71_Discrete_GraphicsLevel *sclk)
2185{
2186 const iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
2187 pp_atomctrl_clock_dividers_vi dividers;
2188 uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
2189 uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
2190 uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
2191 uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
2192 uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
2193 uint32_t reference_clock;
2194 uint32_t reference_divider;
2195 uint32_t fbdiv;
2196 int result;
2197
2198 /* get the engine clock dividers for this clock value*/
2199 result = atomctrl_get_engine_pll_dividers_vi(hwmgr, engine_clock, &dividers);
2200
2201 PP_ASSERT_WITH_CODE(result == 0,
2202 "Error retrieving Engine Clock dividers from VBIOS.", return result);
2203
2204 /* To get FBDIV we need to multiply this by 16384 and divide it by Fref.*/
2205 reference_clock = atomctrl_get_reference_clock(hwmgr);
2206
2207 reference_divider = 1 + dividers.uc_pll_ref_div;
2208
2209 /* low 14 bits is fraction and high 12 bits is divider*/
2210 fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
2211
2212 /* SPLL_FUNC_CNTL setup*/
2213 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
2214 CG_SPLL_FUNC_CNTL, SPLL_REF_DIV, dividers.uc_pll_ref_div);
2215 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
2216 CG_SPLL_FUNC_CNTL, SPLL_PDIV_A, dividers.uc_pll_post_div);
2217
2218 /* SPLL_FUNC_CNTL_3 setup*/
2219 spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
2220 CG_SPLL_FUNC_CNTL_3, SPLL_FB_DIV, fbdiv);
2221
2222 /* set to use fractional accumulation*/
2223 spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
2224 CG_SPLL_FUNC_CNTL_3, SPLL_DITHEN, 1);
2225
2226 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2227 PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
2228 pp_atomctrl_internal_ss_info ss_info;
2229
2230 uint32_t vcoFreq = engine_clock * dividers.uc_pll_post_div;
2231 if (0 == atomctrl_get_engine_clock_spread_spectrum(hwmgr, vcoFreq, &ss_info)) {
2232 /*
2233 * ss_info.speed_spectrum_percentage -- in unit of 0.01%
2234 * ss_info.speed_spectrum_rate -- in unit of khz
2235 */
2236 /* clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2 */
2237 uint32_t clkS = reference_clock * 5 / (reference_divider * ss_info.speed_spectrum_rate);
2238
2239 /* clkv = 2 * D * fbdiv / NS */
2240 uint32_t clkV = 4 * ss_info.speed_spectrum_percentage * fbdiv / (clkS * 10000);
2241
2242 cg_spll_spread_spectrum =
2243 PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, CLKS, clkS);
2244 cg_spll_spread_spectrum =
2245 PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
2246 cg_spll_spread_spectrum_2 =
2247 PHM_SET_FIELD(cg_spll_spread_spectrum_2, CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clkV);
2248 }
2249 }
2250
2251 sclk->SclkFrequency = engine_clock;
2252 sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
2253 sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
2254 sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
2255 sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
2256 sclk->SclkDid = (uint8_t)dividers.pll_post_divider;
2257
2258 return 0;
2259}
2260
2261static uint8_t iceland_get_sleep_divider_id_from_clock(struct pp_hwmgr *hwmgr,
2262 uint32_t engine_clock, uint32_t min_engine_clock_in_sr)
2263{
2264 uint32_t i, temp;
2265 uint32_t min = (min_engine_clock_in_sr > ICELAND_MINIMUM_ENGINE_CLOCK) ?
2266 min_engine_clock_in_sr : ICELAND_MINIMUM_ENGINE_CLOCK;
2267
2268 PP_ASSERT_WITH_CODE((engine_clock >= min),
2269 "Engine clock can't satisfy stutter requirement!", return 0);
2270
2271 for (i = ICELAND_MAX_DEEPSLEEP_DIVIDER_ID;; i--) {
2272 temp = engine_clock / (1 << i);
2273
2274 if(temp >= min || i == 0)
2275 break;
2276 }
2277 return (uint8_t)i;
2278}
2279
2280/**
2281 * Populates single SMC SCLK structure using the provided engine clock
2282 *
2283 * @param hwmgr the address of the hardware manager
2284 * @param engine_clock the engine clock to use to populate the structure
2285 * @param sclk the SMC SCLK structure to be populated
2286 */
2287static int iceland_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
2288 uint32_t engine_clock, uint16_t sclk_activity_level_threshold,
2289 SMU71_Discrete_GraphicsLevel *graphic_level)
2290{
2291 int result;
2292 uint32_t threshold;
2293 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
2294
2295 result = iceland_calculate_sclk_params(hwmgr, engine_clock, graphic_level);
2296
2297
2298 /* populate graphics levels*/
2299 result = iceland_get_dependecy_volt_by_clk(hwmgr,
2300 hwmgr->dyn_state.vddc_dependency_on_sclk, engine_clock, &graphic_level->MinVddc);
2301 PP_ASSERT_WITH_CODE((0 == result),
2302 "can not find VDDC voltage value for VDDC engine clock dependency table", return result);
2303
2304 /* SCLK frequency in units of 10KHz*/
2305 graphic_level->SclkFrequency = engine_clock;
2306
2307 /*
2308 * Minimum VDDC phases required to support this level, it
2309 * should get from dependence table.
2310 */
2311 graphic_level->MinVddcPhases = 1;
2312
2313 if (data->vddc_phase_shed_control) {
2314 iceland_populate_phase_value_based_on_sclk(hwmgr,
2315 hwmgr->dyn_state.vddc_phase_shed_limits_table,
2316 engine_clock,
2317 &graphic_level->MinVddcPhases);
2318 }
2319
2320 /* Indicates maximum activity level for this performance level. 50% for now*/
2321 graphic_level->ActivityLevel = sclk_activity_level_threshold;
2322
2323 graphic_level->CcPwrDynRm = 0;
2324 graphic_level->CcPwrDynRm1 = 0;
2325 /* this level can be used if activity is high enough.*/
2326 graphic_level->EnabledForActivity = 1;
2327 /* this level can be used for throttling.*/
2328 graphic_level->EnabledForThrottle = 1;
2329 graphic_level->UpHyst = 0;
2330 graphic_level->DownHyst = 100;
2331 graphic_level->VoltageDownHyst = 0;
2332 graphic_level->PowerThrottle = 0;
2333
2334 threshold = engine_clock * data->fast_watermark_threshold / 100;
2335
2336 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2337 PHM_PlatformCaps_SclkDeepSleep)) {
2338 graphic_level->DeepSleepDivId =
2339 iceland_get_sleep_divider_id_from_clock(hwmgr, engine_clock,
2340 data->display_timing.min_clock_insr);
2341 }
2342
2343 /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/
2344 graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2345
2346 if (0 == result) {
2347 graphic_level->MinVddc = PP_HOST_TO_SMC_UL(graphic_level->MinVddc * VOLTAGE_SCALE);
2348 /* CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVoltage);*/
2349 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVddcPhases);
2350 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SclkFrequency);
2351 CONVERT_FROM_HOST_TO_SMC_US(graphic_level->ActivityLevel);
2352 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl3);
2353 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl4);
2354 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum);
2355 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum2);
2356 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm);
2357 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm1);
2358 }
2359
2360 return result;
2361}
2362
2363/**
2364 * Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
2365 *
2366 * @param hwmgr the address of the hardware manager
2367 */
2368static int iceland_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
2369{
2370 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
2371 struct iceland_dpm_table *dpm_table = &data->dpm_table;
2372 int result = 0;
2373 uint32_t level_array_adress = data->dpm_table_start +
2374 offsetof(SMU71_Discrete_DpmTable, GraphicsLevel);
2375
2376 uint32_t level_array_size = sizeof(SMU71_Discrete_GraphicsLevel) * SMU71_MAX_LEVELS_GRAPHICS;
2377 SMU71_Discrete_GraphicsLevel *levels = data->smc_state_table.GraphicsLevel;
2378 uint32_t i;
2379 uint8_t highest_pcie_level_enabled = 0, lowest_pcie_level_enabled = 0, mid_pcie_level_enabled = 0, count = 0;
2380 memset(levels, 0x00, level_array_size);
2381
2382 for (i = 0; i < dpm_table->sclk_table.count; i++) {
2383 result = iceland_populate_single_graphic_level(hwmgr,
2384 dpm_table->sclk_table.dpm_levels[i].value,
2385 (uint16_t)data->activity_target[i],
2386 &(data->smc_state_table.GraphicsLevel[i]));
2387 if (0 != result)
2388 return result;
2389
2390 /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
2391 if (i > 1)
2392 data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
2393 }
2394
2395 /* set highest level watermark to high */
2396 if (dpm_table->sclk_table.count > 1)
2397 data->smc_state_table.GraphicsLevel[dpm_table->sclk_table.count-1].DisplayWatermark =
2398 PPSMC_DISPLAY_WATERMARK_HIGH;
2399
2400 data->smc_state_table.GraphicsDpmLevelCount =
2401 (uint8_t)dpm_table->sclk_table.count;
2402 data->dpm_level_enable_mask.sclk_dpm_enable_mask =
2403 iceland_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
2404
2405 while ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
2406 (1 << (highest_pcie_level_enabled + 1))) != 0) {
2407 highest_pcie_level_enabled++;
2408 }
2409
2410 while ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
2411 (1 << lowest_pcie_level_enabled)) == 0) {
2412 lowest_pcie_level_enabled++;
2413 }
2414
2415 while ((count < highest_pcie_level_enabled) &&
2416 ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
2417 (1 << (lowest_pcie_level_enabled + 1 + count))) == 0)) {
2418 count++;
2419 }
2420
2421 mid_pcie_level_enabled = (lowest_pcie_level_enabled+1+count) < highest_pcie_level_enabled ?
2422 (lowest_pcie_level_enabled + 1 + count) : highest_pcie_level_enabled;
2423
2424 /* set pcieDpmLevel to highest_pcie_level_enabled*/
2425 for (i = 2; i < dpm_table->sclk_table.count; i++) {
2426 data->smc_state_table.GraphicsLevel[i].pcieDpmLevel = highest_pcie_level_enabled;
2427 }
2428
2429 /* set pcieDpmLevel to lowest_pcie_level_enabled*/
2430 data->smc_state_table.GraphicsLevel[0].pcieDpmLevel = lowest_pcie_level_enabled;
2431
2432 /* set pcieDpmLevel to mid_pcie_level_enabled*/
2433 data->smc_state_table.GraphicsLevel[1].pcieDpmLevel = mid_pcie_level_enabled;
2434
2435 /* level count will send to smc once at init smc table and never change*/
2436 result = smu7_copy_bytes_to_smc(hwmgr->smumgr, level_array_adress, (uint8_t *)levels, (uint32_t)level_array_size, data->sram_end);
2437
2438 if (0 != result)
2439 return result;
2440
2441 return 0;
2442}
2443
2444/**
2445 * Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states
2446 *
2447 * @param hwmgr the address of the hardware manager
2448 */
2449
2450static int iceland_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
2451{
2452 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
2453 struct iceland_dpm_table *dpm_table = &data->dpm_table;
2454 int result;
2455 /* populate MCLK dpm table to SMU7 */
2456 uint32_t level_array_adress = data->dpm_table_start + offsetof(SMU71_Discrete_DpmTable, MemoryLevel);
2457 uint32_t level_array_size = sizeof(SMU71_Discrete_MemoryLevel) * SMU71_MAX_LEVELS_MEMORY;
2458 SMU71_Discrete_MemoryLevel *levels = data->smc_state_table.MemoryLevel;
2459 uint32_t i;
2460
2461 memset(levels, 0x00, level_array_size);
2462
2463 for (i = 0; i < dpm_table->mclk_table.count; i++) {
2464 PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
2465 "can not populate memory level as memory clock is zero", return -1);
2466 result = iceland_populate_single_memory_level(hwmgr, dpm_table->mclk_table.dpm_levels[i].value,
2467 &(data->smc_state_table.MemoryLevel[i]));
2468 if (0 != result) {
2469 return result;
2470 }
2471 }
2472
2473 /* Only enable level 0 for now.*/
2474 data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
2475
2476 /*
2477 * in order to prevent MC activity from stutter mode to push DPM up.
2478 * the UVD change complements this by putting the MCLK in a higher state
2479 * by default such that we are not effected by up threshold or and MCLK DPM latency.
2480 */
2481 data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F;
2482 CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.MemoryLevel[0].ActivityLevel);
2483
2484 data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count;
2485 data->dpm_level_enable_mask.mclk_dpm_enable_mask = iceland_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
2486 /* set highest level watermark to high*/
2487 data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
2488
2489 /* level count will send to smc once at init smc table and never change*/
2490 result = smu7_copy_bytes_to_smc(hwmgr->smumgr,
2491 level_array_adress, (uint8_t *)levels, (uint32_t)level_array_size, data->sram_end);
2492
2493 if (0 != result) {
2494 return result;
2495 }
2496
2497 return 0;
2498}
2499
2500struct ICELAND_DLL_SPEED_SETTING
2501{
2502 uint16_t Min; /* Minimum Data Rate*/
2503 uint16_t Max; /* Maximum Data Rate*/
2504 uint32_t dll_speed; /* The desired DLL_SPEED setting*/
2505};
2506
2507static int iceland_populate_ulv_level(struct pp_hwmgr *hwmgr, SMU71_Discrete_Ulv *pstate)
2508{
2509 int result = 0;
2510 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
2511 uint32_t voltage_response_time, ulv_voltage;
2512
2513 pstate->CcPwrDynRm = 0;
2514 pstate->CcPwrDynRm1 = 0;
2515
2516 //backbiasResponseTime is use for ULV state voltage value.
2517 result = pp_tables_get_response_times(hwmgr, &voltage_response_time, &ulv_voltage);
2518 PP_ASSERT_WITH_CODE((0 == result), "can not get ULV voltage value", return result;);
2519
2520 if(!ulv_voltage) {
2521 data->ulv.ulv_supported = false;
2522 return 0;
2523 }
2524
2525 if (ICELAND_VOLTAGE_CONTROL_BY_SVID2 != data->voltage_control) {
2526 /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */
2527 if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v) {
2528 pstate->VddcOffset = 0;
2529 }
2530 else {
2531 /* used in SMIO Mode. not implemented for now. this is backup only for CI. */
2532 pstate->VddcOffset = (uint16_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage);
2533 }
2534 } else {
2535 /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */
2536 if(ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v) {
2537 pstate->VddcOffsetVid = 0;
2538 } else {
2539 /* used in SVI2 Mode */
2540 pstate->VddcOffsetVid = (uint8_t)((hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage) * VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
2541 }
2542 }
2543
2544 /* used in SVI2 Mode to shed phase */
2545 pstate->VddcPhase = (data->vddc_phase_shed_control) ? 0 : 1;
2546
2547 if (0 == result) {
2548 CONVERT_FROM_HOST_TO_SMC_UL(pstate->CcPwrDynRm);
2549 CONVERT_FROM_HOST_TO_SMC_UL(pstate->CcPwrDynRm1);
2550 CONVERT_FROM_HOST_TO_SMC_US(pstate->VddcOffset);
2551 }
2552
2553 return result;
2554}
2555
2556static int iceland_populate_ulv_state(struct pp_hwmgr *hwmgr, SMU71_Discrete_Ulv *ulv)
2557{
2558 return iceland_populate_ulv_level(hwmgr, ulv);
2559}
2560
2561static int iceland_populate_smc_initial_state(struct pp_hwmgr *hwmgr)
2562{
2563 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
2564 uint8_t count, level;
2565
2566 count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->count);
2567
2568 for (level = 0; level < count; level++) {
2569 if (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[level].clk
2570 >= data->vbios_boot_state.sclk_bootup_value) {
2571 data->smc_state_table.GraphicsBootLevel = level;
2572 break;
2573 }
2574 }
2575
2576 count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_mclk->count);
2577
2578 for (level = 0; level < count; level++) {
2579 if (hwmgr->dyn_state.vddc_dependency_on_mclk->entries[level].clk
2580 >= data->vbios_boot_state.mclk_bootup_value) {
2581 data->smc_state_table.MemoryBootLevel = level;
2582 break;
2583 }
2584 }
2585
2586 return 0;
2587}
2588
2589/**
2590 * Initializes the SMC table and uploads it
2591 *
2592 * @param hwmgr the address of the powerplay hardware manager.
2593 * @param pInput the pointer to input data (PowerState)
2594 * @return always 0
2595 */
2596static int iceland_init_smc_table(struct pp_hwmgr *hwmgr)
2597{
2598 int result;
2599 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
2600 SMU71_Discrete_DpmTable *table = &(data->smc_state_table);
2601 const struct phw_iceland_ulv_parm *ulv = &(data->ulv);
2602
2603 result = iceland_setup_default_dpm_tables(hwmgr);
2604 PP_ASSERT_WITH_CODE(0 == result,
2605 "Failed to setup default DPM tables!", return result;);
2606 memset(&(data->smc_state_table), 0x00, sizeof(data->smc_state_table));
2607
2608 if (ICELAND_VOLTAGE_CONTROL_NONE != data->voltage_control) {
2609 iceland_populate_smc_voltage_tables(hwmgr, table);
2610 }
2611
2612 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2613 PHM_PlatformCaps_AutomaticDCTransition)) {
2614 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
2615 }
2616
2617 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2618 PHM_PlatformCaps_StepVddc)) {
2619 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
2620 }
2621
2622 if (data->is_memory_GDDR5) {
2623 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
2624 }
2625
2626 if (ulv->ulv_supported) {
2627 result = iceland_populate_ulv_state(hwmgr, &data->ulv_setting);
2628 PP_ASSERT_WITH_CODE(0 == result,
2629 "Failed to initialize ULV state!", return result;);
2630
2631 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
2632 ixCG_ULV_PARAMETER, ulv->ch_ulv_parameter);
2633 }
2634
2635 result = iceland_populate_smc_link_level(hwmgr, table);
2636 PP_ASSERT_WITH_CODE(0 == result,
2637 "Failed to initialize Link Level!", return result;);
2638
2639 result = iceland_populate_all_graphic_levels(hwmgr);
2640 PP_ASSERT_WITH_CODE(0 == result,
2641 "Failed to initialize Graphics Level!", return result;);
2642
2643 result = iceland_populate_all_memory_levels(hwmgr);
2644 PP_ASSERT_WITH_CODE(0 == result,
2645 "Failed to initialize Memory Level!", return result;);
2646
2647 result = iceland_populate_smc_acpi_level(hwmgr, table);
2648 PP_ASSERT_WITH_CODE(0 == result,
2649 "Failed to initialize ACPI Level!", return result;);
2650
2651 result = iceland_populate_smc_vce_level(hwmgr, table);
2652 PP_ASSERT_WITH_CODE(0 == result,
2653 "Failed to initialize VCE Level!", return result;);
2654
2655 result = iceland_populate_smc_acp_level(hwmgr, table);
2656 PP_ASSERT_WITH_CODE(0 == result,
2657 "Failed to initialize ACP Level!", return result;);
2658
2659 result = iceland_populate_smc_samu_level(hwmgr, table);
2660 PP_ASSERT_WITH_CODE(0 == result,
2661 "Failed to initialize SAMU Level!", return result;);
2662
2663 /*
2664 * Since only the initial state is completely set up at this
2665 * point (the other states are just copies of the boot state)
2666 * we only need to populate the ARB settings for the initial
2667 * state.
2668 */
2669 result = iceland_program_memory_timing_parameters(hwmgr);
2670 PP_ASSERT_WITH_CODE(0 == result,
2671 "Failed to Write ARB settings for the initial state.", return result;);
2672
2673 result = iceland_populate_smc_uvd_level(hwmgr, table);
2674 PP_ASSERT_WITH_CODE(0 == result,
2675 "Failed to initialize UVD Level!", return result;);
2676
2677 table->GraphicsBootLevel = 0;
2678 table->MemoryBootLevel = 0;
2679
2680 /* find boot level from dpm table */
2681 result = iceland_find_boot_level(&(data->dpm_table.sclk_table),
2682 data->vbios_boot_state.sclk_bootup_value,
2683 (uint32_t *)&(data->smc_state_table.GraphicsBootLevel));
2684
2685 if (result)
2686 pr_warning("VBIOS did not find boot engine clock value in dependency table.\n");
2687
2688 result = iceland_find_boot_level(&(data->dpm_table.mclk_table),
2689 data->vbios_boot_state.mclk_bootup_value,
2690 (uint32_t *)&(data->smc_state_table.MemoryBootLevel));
2691
2692 if (result)
2693 pr_warning("VBIOS did not find boot memory clock value in dependency table.\n");
2694
2695 table->BootVddc = data->vbios_boot_state.vddc_bootup_value;
2696 if (ICELAND_VOLTAGE_CONTROL_NONE == data->vdd_ci_control) {
2697 table->BootVddci = table->BootVddc;
2698 }
2699 else {
2700 table->BootVddci = data->vbios_boot_state.vddci_bootup_value;
2701 }
2702 table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value;
2703
2704 result = iceland_populate_smc_initial_state(hwmgr);
2705 PP_ASSERT_WITH_CODE(0 == result, "Failed to initialize Boot State!", return result);
2706
2707 result = iceland_populate_bapm_parameters_in_dpm_table(hwmgr);
2708 PP_ASSERT_WITH_CODE(0 == result, "Failed to populate BAPM Parameters!", return result);
2709
2710 table->GraphicsVoltageChangeEnable = 1;
2711 table->GraphicsThermThrottleEnable = 1;
2712 table->GraphicsInterval = 1;
2713 table->VoltageInterval = 1;
2714 table->ThermalInterval = 1;
2715 table->TemperatureLimitHigh =
2716 (data->thermal_temp_setting.temperature_high *
2717 ICELAND_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
2718 table->TemperatureLimitLow =
2719 (data->thermal_temp_setting.temperature_low *
2720 ICELAND_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
2721 table->MemoryVoltageChangeEnable = 1;
2722 table->MemoryInterval = 1;
2723 table->VoltageResponseTime = 0;
2724 table->PhaseResponseTime = 0;
2725 table->MemoryThermThrottleEnable = 1;
2726 table->PCIeBootLinkLevel = 0;
2727 table->PCIeGenInterval = 1;
2728
2729 result = iceland_populate_smc_svi2_config(hwmgr, table);
2730 PP_ASSERT_WITH_CODE(0 == result,
2731 "Failed to populate SVI2 setting!", return result);
2732
2733 table->ThermGpio = 17;
2734 table->SclkStepSize = 0x4000;
2735
2736 CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
2737 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcVid);
2738 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcPhase);
2739 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddciVid);
2740 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskMvddVid);
2741 CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
2742 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
2743 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
2744 CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
2745 CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
2746
2747 table->BootVddc = PP_HOST_TO_SMC_US(table->BootVddc * VOLTAGE_SCALE);
2748 table->BootVddci = PP_HOST_TO_SMC_US(table->BootVddci * VOLTAGE_SCALE);
2749 table->BootMVdd = PP_HOST_TO_SMC_US(table->BootMVdd * VOLTAGE_SCALE);
2750
2751 /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
2752 result = smu7_copy_bytes_to_smc(hwmgr->smumgr, data->dpm_table_start +
2753 offsetof(SMU71_Discrete_DpmTable, SystemFlags),
2754 (uint8_t *)&(table->SystemFlags),
2755 sizeof(SMU71_Discrete_DpmTable) - 3 * sizeof(SMU71_PIDController),
2756 data->sram_end);
2757
2758 PP_ASSERT_WITH_CODE(0 == result,
2759 "Failed to upload dpm data to SMC memory!", return result);
2760
2761 /* Upload all ulv setting to SMC memory.(dpm level, dpm level count etc) */
2762 result = smu7_copy_bytes_to_smc(hwmgr->smumgr,
2763 data->ulv_settings_start,
2764 (uint8_t *)&(data->ulv_setting),
2765 sizeof(SMU71_Discrete_Ulv),
2766 data->sram_end);
2767
2768#if 0
2769 /* Notify SMC to follow new GPIO scheme */
2770 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2771 PHM_PlatformCaps_AutomaticDCTransition)) {
2772 if (0 == iceland_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_UseNewGPIOScheme))
2773 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2774 PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme);
2775 }
2776#endif
2777
2778 return result;
2779}
2780
2781int iceland_populate_mc_reg_address(struct pp_hwmgr *hwmgr, SMU71_Discrete_MCRegisters *mc_reg_table)
2782{
2783 const struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
2784
2785 uint32_t i, j;
2786
2787 for (i = 0, j = 0; j < data->iceland_mc_reg_table.last; j++) {
2788 if (data->iceland_mc_reg_table.validflag & 1<<j) {
2789 PP_ASSERT_WITH_CODE(i < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE,
2790 "Index of mc_reg_table->address[] array out of boundary", return -1);
2791 mc_reg_table->address[i].s0 =
2792 PP_HOST_TO_SMC_US(data->iceland_mc_reg_table.mc_reg_address[j].s0);
2793 mc_reg_table->address[i].s1 =
2794 PP_HOST_TO_SMC_US(data->iceland_mc_reg_table.mc_reg_address[j].s1);
2795 i++;
2796 }
2797 }
2798
2799 mc_reg_table->last = (uint8_t)i;
2800
2801 return 0;
2802}
2803
2804/* convert register values from driver to SMC format */
2805void iceland_convert_mc_registers(
2806 const phw_iceland_mc_reg_entry * pEntry,
2807 SMU71_Discrete_MCRegisterSet *pData,
2808 uint32_t numEntries, uint32_t validflag)
2809{
2810 uint32_t i, j;
2811
2812 for (i = 0, j = 0; j < numEntries; j++) {
2813 if (validflag & 1<<j) {
2814 pData->value[i] = PP_HOST_TO_SMC_UL(pEntry->mc_data[j]);
2815 i++;
2816 }
2817 }
2818}
2819
2820/* find the entry in the memory range table, then populate the value to SMC's iceland_mc_reg_table */
2821int iceland_convert_mc_reg_table_entry_to_smc(
2822 struct pp_hwmgr *hwmgr,
2823 const uint32_t memory_clock,
2824 SMU71_Discrete_MCRegisterSet *mc_reg_table_data
2825 )
2826{
2827 const iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
2828 uint32_t i = 0;
2829
2830 for (i = 0; i < data->iceland_mc_reg_table.num_entries; i++) {
2831 if (memory_clock <=
2832 data->iceland_mc_reg_table.mc_reg_table_entry[i].mclk_max) {
2833 break;
2834 }
2835 }
2836
2837 if ((i == data->iceland_mc_reg_table.num_entries) && (i > 0))
2838 --i;
2839
2840 iceland_convert_mc_registers(&data->iceland_mc_reg_table.mc_reg_table_entry[i],
2841 mc_reg_table_data, data->iceland_mc_reg_table.last, data->iceland_mc_reg_table.validflag);
2842
2843 return 0;
2844}
2845
2846int iceland_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
2847 SMU71_Discrete_MCRegisters *mc_reg_table)
2848{
2849 int result = 0;
2850 iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
2851 int res;
2852 uint32_t i;
2853
2854 for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
2855 res = iceland_convert_mc_reg_table_entry_to_smc(
2856 hwmgr,
2857 data->dpm_table.mclk_table.dpm_levels[i].value,
2858 &mc_reg_table->data[i]
2859 );
2860
2861 if (0 != res)
2862 result = res;
2863 }
2864
2865 return result;
2866}
2867
2868int iceland_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr)
2869{
2870 int result;
2871 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
2872
2873 memset(&data->mc_reg_table, 0x00, sizeof(SMU71_Discrete_MCRegisters));
2874 result = iceland_populate_mc_reg_address(hwmgr, &(data->mc_reg_table));
2875 PP_ASSERT_WITH_CODE(0 == result,
2876 "Failed to initialize MCRegTable for the MC register addresses!", return result;);
2877
2878 result = iceland_convert_mc_reg_table_to_smc(hwmgr, &data->mc_reg_table);
2879 PP_ASSERT_WITH_CODE(0 == result,
2880 "Failed to initialize MCRegTable for driver state!", return result;);
2881
2882 return smu7_copy_bytes_to_smc(hwmgr->smumgr, data->mc_reg_table_start,
2883 (uint8_t *)&data->mc_reg_table, sizeof(SMU71_Discrete_MCRegisters), data->sram_end);
2884}
2885
2886int iceland_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
2887{
2888 PPSMC_Msg msg = has_display? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay;
2889
2890 return (smum_send_msg_to_smc(hwmgr->smumgr, msg) == 0) ? 0 : -1;
2891}
2892
2893int iceland_enable_sclk_control(struct pp_hwmgr *hwmgr)
2894{
2895 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, 0);
2896
2897 return 0;
2898}
2899
2900int iceland_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
2901{
2902 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
2903
2904 /* enable SCLK dpm */
2905 if (0 == data->sclk_dpm_key_disabled) {
2906 PP_ASSERT_WITH_CODE(
2907 (0 == smum_send_msg_to_smc(hwmgr->smumgr,
2908 PPSMC_MSG_DPM_Enable)),
2909 "Failed to enable SCLK DPM during DPM Start Function!",
2910 return -1);
2911 }
2912
2913 /* enable MCLK dpm */
2914 if (0 == data->mclk_dpm_key_disabled) {
2915 PP_ASSERT_WITH_CODE(
2916 (0 == smum_send_msg_to_smc(hwmgr->smumgr,
2917 PPSMC_MSG_MCLKDPM_Enable)),
2918 "Failed to enable MCLK DPM during DPM Start Function!",
2919 return -1);
2920
2921 PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
2922
2923 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
2924 ixLCAC_MC0_CNTL, 0x05);/* CH0,1 read */
2925 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
2926 ixLCAC_MC1_CNTL, 0x05);/* CH2,3 read */
2927 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
2928 ixLCAC_CPL_CNTL, 0x100005);/*Read */
2929
2930 udelay(10);
2931
2932 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
2933 ixLCAC_MC0_CNTL, 0x400005);/* CH0,1 write */
2934 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
2935 ixLCAC_MC1_CNTL, 0x400005);/* CH2,3 write */
2936 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
2937 ixLCAC_CPL_CNTL, 0x500005);/* write */
2938
2939 }
2940
2941 return 0;
2942}
2943
2944int iceland_start_dpm(struct pp_hwmgr *hwmgr)
2945{
2946 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
2947
2948 /* enable general power management */
2949 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, 1);
2950 /* enable sclk deep sleep */
2951 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, DYNAMIC_PM_EN, 1);
2952
2953 /* prepare for PCIE DPM */
2954 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SOFT_REGISTERS_TABLE_12, VoltageChangeTimeout, 0x1000);
2955
2956 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE, SWRST_COMMAND_1, RESETLC, 0x0);
2957
2958 PP_ASSERT_WITH_CODE(
2959 (0 == smum_send_msg_to_smc(hwmgr->smumgr,
2960 PPSMC_MSG_Voltage_Cntl_Enable)),
2961 "Failed to enable voltage DPM during DPM Start Function!",
2962 return -1);
2963
2964 if (0 != iceland_enable_sclk_mclk_dpm(hwmgr)) {
2965 PP_ASSERT_WITH_CODE(0, "Failed to enable Sclk DPM and Mclk DPM!", return -1);
2966 }
2967
2968 /* enable PCIE dpm */
2969 if (0 == data->pcie_dpm_key_disabled) {
2970 PP_ASSERT_WITH_CODE(
2971 (0 == smum_send_msg_to_smc(hwmgr->smumgr,
2972 PPSMC_MSG_PCIeDPM_Enable)),
2973 "Failed to enable pcie DPM during DPM Start Function!",
2974 return -1
2975 );
2976 }
2977
2978 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2979 PHM_PlatformCaps_Falcon_QuickTransition)) {
2980 smum_send_msg_to_smc(hwmgr->smumgr,
2981 PPSMC_MSG_EnableACDCGPIOInterrupt);
2982 }
2983
2984 return 0;
2985}
2986
2987static void iceland_set_dpm_event_sources(struct pp_hwmgr *hwmgr,
2988 uint32_t sources)
2989{
2990 bool protection;
2991 enum DPM_EVENT_SRC src;
2992
2993 switch (sources) {
2994 default:
2995 printk(KERN_ERR "Unknown throttling event sources.");
2996 /* fall through */
2997 case 0:
2998 protection = false;
2999 /* src is unused */
3000 break;
3001 case (1 << PHM_AutoThrottleSource_Thermal):
3002 protection = true;
3003 src = DPM_EVENT_SRC_DIGITAL;
3004 break;
3005 case (1 << PHM_AutoThrottleSource_External):
3006 protection = true;
3007 src = DPM_EVENT_SRC_EXTERNAL;
3008 break;
3009 case (1 << PHM_AutoThrottleSource_External) |
3010 (1 << PHM_AutoThrottleSource_Thermal):
3011 protection = true;
3012 src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL;
3013 break;
3014 }
3015 /* Order matters - don't enable thermal protection for the wrong source. */
3016 if (protection) {
3017 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL,
3018 DPM_EVENT_SRC, src);
3019 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
3020 THERMAL_PROTECTION_DIS,
3021 !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3022 PHM_PlatformCaps_ThermalController));
3023 } else
3024 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
3025 THERMAL_PROTECTION_DIS, 1);
3026}
3027
3028static int iceland_enable_auto_throttle_source(struct pp_hwmgr *hwmgr,
3029 PHM_AutoThrottleSource source)
3030{
3031 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
3032
3033 if (!(data->active_auto_throttle_sources & (1 << source))) {
3034 data->active_auto_throttle_sources |= 1 << source;
3035 iceland_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
3036 }
3037 return 0;
3038}
3039
3040static int iceland_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
3041{
3042 return iceland_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
3043}
3044
3045
3046/**
3047* Programs the Deep Sleep registers
3048*
3049* @param pHwMgr the address of the powerplay hardware manager.
3050* @param pInput the pointer to input data (PhwEvergreen_DisplayConfiguration)
3051* @param pOutput the pointer to output data (unused)
3052* @param pStorage the pointer to temporary storage (unused)
3053* @param Result the last failure code (unused)
3054* @return always 0
3055*/
3056static int iceland_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
3057{
3058 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3059 PHM_PlatformCaps_SclkDeepSleep)) {
3060 if (smum_send_msg_to_smc(hwmgr->smumgr,
3061 PPSMC_MSG_MASTER_DeepSleep_ON) != 0)
3062 PP_ASSERT_WITH_CODE(false,
3063 "Attempt to enable Master Deep Sleep switch failed!",
3064 return -EINVAL);
3065 } else {
3066 if (smum_send_msg_to_smc(hwmgr->smumgr,
3067 PPSMC_MSG_MASTER_DeepSleep_OFF) != 0)
3068 PP_ASSERT_WITH_CODE(false,
3069 "Attempt to disable Master Deep Sleep switch failed!",
3070 return -EINVAL);
3071 }
3072
3073 return 0;
3074}
3075
3076static int iceland_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
3077{
3078 int tmp_result, result = 0;
3079
3080 if (cf_iceland_voltage_control(hwmgr)) {
3081 tmp_result = iceland_enable_voltage_control(hwmgr);
3082 PP_ASSERT_WITH_CODE((0 == tmp_result),
3083 "Failed to enable voltage control!", return tmp_result);
3084
3085 tmp_result = iceland_construct_voltage_tables(hwmgr);
3086 PP_ASSERT_WITH_CODE((0 == tmp_result),
3087 "Failed to contruct voltage tables!", return tmp_result);
3088 }
3089
3090 tmp_result = iceland_initialize_mc_reg_table(hwmgr);
3091 PP_ASSERT_WITH_CODE((0 == tmp_result),
3092 "Failed to initialize MC reg table!", return tmp_result);
3093
3094 tmp_result = iceland_program_static_screen_threshold_parameters(hwmgr);
3095 PP_ASSERT_WITH_CODE((0 == tmp_result),
3096 "Failed to program static screen threshold parameters!", return tmp_result);
3097
3098 tmp_result = iceland_enable_display_gap(hwmgr);
3099 PP_ASSERT_WITH_CODE((0 == tmp_result),
3100 "Failed to enable display gap!", return tmp_result);
3101
3102 tmp_result = iceland_program_voting_clients(hwmgr);
3103 PP_ASSERT_WITH_CODE((0 == tmp_result),
3104 "Failed to program voting clients!", return tmp_result);
3105
3106 tmp_result = iceland_upload_firmware(hwmgr);
3107 PP_ASSERT_WITH_CODE((0 == tmp_result),
3108 "Failed to upload firmware header!", return tmp_result);
3109
3110 tmp_result = iceland_process_firmware_header(hwmgr);
3111 PP_ASSERT_WITH_CODE((0 == tmp_result),
3112 "Failed to process firmware header!", return tmp_result);
3113
3114 tmp_result = iceland_initial_switch_from_arb_f0_to_f1(hwmgr);
3115 PP_ASSERT_WITH_CODE((0 == tmp_result),
3116 "Failed to initialize switch from ArbF0 to F1!", return tmp_result);
3117
3118 tmp_result = iceland_init_smc_table(hwmgr);
3119 PP_ASSERT_WITH_CODE((0 == tmp_result),
3120 "Failed to initialize SMC table!", return tmp_result);
3121
3122 tmp_result = iceland_populate_initial_mc_reg_table(hwmgr);
3123 PP_ASSERT_WITH_CODE((0 == tmp_result),
3124 "Failed to populate initialize MC Reg table!", return tmp_result);
3125
3126 tmp_result = iceland_populate_pm_fuses(hwmgr);
3127 PP_ASSERT_WITH_CODE((0 == tmp_result),
3128 "Failed to populate PM fuses!", return tmp_result);
3129
3130
3131 /* enable SCLK control */
3132 tmp_result = iceland_enable_sclk_control(hwmgr);
3133 PP_ASSERT_WITH_CODE((0 == tmp_result),
3134 "Failed to enable SCLK control!", return tmp_result);
3135
3136 tmp_result = iceland_enable_deep_sleep_master_switch(hwmgr);
3137 PP_ASSERT_WITH_CODE((tmp_result == 0),
3138 "Failed to enable deep sleep!", return tmp_result);
3139
3140 /* enable DPM */
3141 tmp_result = iceland_start_dpm(hwmgr);
3142 PP_ASSERT_WITH_CODE((0 == tmp_result),
3143 "Failed to start DPM!", return tmp_result);
3144
3145 tmp_result = iceland_enable_smc_cac(hwmgr);
3146 PP_ASSERT_WITH_CODE((0 == tmp_result),
3147 "Failed to enable SMC CAC!", return tmp_result);
3148
3149 tmp_result = iceland_enable_power_containment(hwmgr);
3150 PP_ASSERT_WITH_CODE((0 == tmp_result),
3151 "Failed to enable power containment!", return tmp_result);
3152
3153 tmp_result = iceland_power_control_set_level(hwmgr);
3154 PP_ASSERT_WITH_CODE((0 == tmp_result),
3155 "Failed to power control set level!", result = tmp_result);
3156
3157 tmp_result = iceland_enable_thermal_auto_throttle(hwmgr);
3158 PP_ASSERT_WITH_CODE((0 == tmp_result),
3159 "Failed to enable thermal auto throttle!", result = tmp_result);
3160
3161 return result;
3162}
3163
3164static int iceland_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
3165{
3166 return phm_hwmgr_backend_fini(hwmgr);
3167}
3168
3169static void iceland_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
3170{
3171 iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
3172 struct phw_iceland_ulv_parm *ulv;
3173
3174 ulv = &data->ulv;
3175 ulv->ch_ulv_parameter = PPICELAND_CGULVPARAMETER_DFLT;
3176 data->voting_rights_clients0 = PPICELAND_VOTINGRIGHTSCLIENTS_DFLT0;
3177 data->voting_rights_clients1 = PPICELAND_VOTINGRIGHTSCLIENTS_DFLT1;
3178 data->voting_rights_clients2 = PPICELAND_VOTINGRIGHTSCLIENTS_DFLT2;
3179 data->voting_rights_clients3 = PPICELAND_VOTINGRIGHTSCLIENTS_DFLT3;
3180 data->voting_rights_clients4 = PPICELAND_VOTINGRIGHTSCLIENTS_DFLT4;
3181 data->voting_rights_clients5 = PPICELAND_VOTINGRIGHTSCLIENTS_DFLT5;
3182 data->voting_rights_clients6 = PPICELAND_VOTINGRIGHTSCLIENTS_DFLT6;
3183 data->voting_rights_clients7 = PPICELAND_VOTINGRIGHTSCLIENTS_DFLT7;
3184
3185 data->static_screen_threshold_unit = PPICELAND_STATICSCREENTHRESHOLDUNIT_DFLT;
3186 data->static_screen_threshold = PPICELAND_STATICSCREENTHRESHOLD_DFLT;
3187
3188 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
3189 PHM_PlatformCaps_ABM);
3190 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3191 PHM_PlatformCaps_NonABMSupportInPPLib);
3192
3193 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3194 PHM_PlatformCaps_DynamicACTiming);
3195
3196 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
3197 PHM_PlatformCaps_DisableMemoryTransition);
3198
3199 iceland_initialize_power_tune_defaults(hwmgr);
3200
3201 data->mclk_strobe_mode_threshold = 40000;
3202 data->mclk_stutter_mode_threshold = 30000;
3203 data->mclk_edc_enable_threshold = 40000;
3204 data->mclk_edc_wr_enable_threshold = 40000;
3205
3206 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
3207 PHM_PlatformCaps_DisableMCLS);
3208
3209 data->pcie_gen_performance.max = PP_PCIEGen1;
3210 data->pcie_gen_performance.min = PP_PCIEGen3;
3211 data->pcie_gen_power_saving.max = PP_PCIEGen1;
3212 data->pcie_gen_power_saving.min = PP_PCIEGen3;
3213
3214 data->pcie_lane_performance.max = 0;
3215 data->pcie_lane_performance.min = 16;
3216 data->pcie_lane_power_saving.max = 0;
3217 data->pcie_lane_power_saving.min = 16;
3218
3219 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
3220 PHM_PlatformCaps_SclkThrottleLowNotification);
3221}
3222
3223static int iceland_get_evv_voltage(struct pp_hwmgr *hwmgr)
3224{
3225 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
3226 uint16_t virtual_voltage_id;
3227 uint16_t vddc = 0;
3228 uint16_t i;
3229
3230 /* the count indicates actual number of entries */
3231 data->vddc_leakage.count = 0;
3232 data->vddci_leakage.count = 0;
3233
3234 if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_EVV)) {
3235 pr_err("Iceland should always support EVV\n");
3236 return -EINVAL;
3237 }
3238
3239 /* retrieve voltage for leakage ID (0xff01 + i) */
3240 for (i = 0; i < ICELAND_MAX_LEAKAGE_COUNT; i++) {
3241 virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
3242
3243 PP_ASSERT_WITH_CODE((0 == atomctrl_get_voltage_evv(hwmgr, virtual_voltage_id, &vddc)),
3244 "Error retrieving EVV voltage value!\n", continue);
3245
3246 if (vddc >= 2000)
3247 pr_warning("Invalid VDDC value!\n");
3248
3249 if (vddc != 0 && vddc != virtual_voltage_id) {
3250 data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = vddc;
3251 data->vddc_leakage.leakage_id[data->vddc_leakage.count] = virtual_voltage_id;
3252 data->vddc_leakage.count++;
3253 }
3254 }
3255
3256 return 0;
3257}
3258
3259static void iceland_patch_with_vddc_leakage(struct pp_hwmgr *hwmgr,
3260 uint32_t *vddc)
3261{
3262 iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
3263 uint32_t leakage_index;
3264 struct phw_iceland_leakage_voltage *leakage_table = &data->vddc_leakage;
3265
3266 /* search for leakage voltage ID 0xff01 ~ 0xff08 */
3267 for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
3268 /*
3269 * If this voltage matches a leakage voltage ID, patch
3270 * with actual leakage voltage.
3271 */
3272 if (leakage_table->leakage_id[leakage_index] == *vddc) {
3273 /*
3274 * Need to make sure vddc is less than 2v or
3275 * else, it could burn the ASIC.
3276 */
3277 if (leakage_table->actual_voltage[leakage_index] >= 2000)
3278 pr_warning("Invalid VDDC value!\n");
3279 *vddc = leakage_table->actual_voltage[leakage_index];
3280 /* we found leakage voltage */
3281 break;
3282 }
3283 }
3284
3285 if (*vddc >= ATOM_VIRTUAL_VOLTAGE_ID0)
3286 pr_warning("Voltage value looks like a Leakage ID but it's not patched\n");
3287}
3288
3289static void iceland_patch_with_vddci_leakage(struct pp_hwmgr *hwmgr,
3290 uint32_t *vddci)
3291{
3292 iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
3293 uint32_t leakage_index;
3294 struct phw_iceland_leakage_voltage *leakage_table = &data->vddci_leakage;
3295
3296 /* search for leakage voltage ID 0xff01 ~ 0xff08 */
3297 for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
3298 /*
3299 * If this voltage matches a leakage voltage ID, patch
3300 * with actual leakage voltage.
3301 */
3302 if (leakage_table->leakage_id[leakage_index] == *vddci) {
3303 *vddci = leakage_table->actual_voltage[leakage_index];
3304 /* we found leakage voltage */
3305 break;
3306 }
3307 }
3308
3309 if (*vddci >= ATOM_VIRTUAL_VOLTAGE_ID0)
3310 pr_warning("Voltage value looks like a Leakage ID but it's not patched\n");
3311}
3312
3313static int iceland_patch_vddc(struct pp_hwmgr *hwmgr,
3314 struct phm_clock_voltage_dependency_table *tab)
3315{
3316 uint16_t i;
3317
3318 if (tab)
3319 for (i = 0; i < tab->count; i++)
3320 iceland_patch_with_vddc_leakage(hwmgr, &tab->entries[i].v);
3321
3322 return 0;
3323}
3324
3325static int iceland_patch_vddci(struct pp_hwmgr *hwmgr,
3326 struct phm_clock_voltage_dependency_table *tab)
3327{
3328 uint16_t i;
3329
3330 if (tab)
3331 for (i = 0; i < tab->count; i++)
3332 iceland_patch_with_vddci_leakage(hwmgr, &tab->entries[i].v);
3333
3334 return 0;
3335}
3336
3337static int iceland_patch_vce_vddc(struct pp_hwmgr *hwmgr,
3338 struct phm_vce_clock_voltage_dependency_table *tab)
3339{
3340 uint16_t i;
3341
3342 if (tab)
3343 for (i = 0; i < tab->count; i++)
3344 iceland_patch_with_vddc_leakage(hwmgr, &tab->entries[i].v);
3345
3346 return 0;
3347}
3348
3349
3350static int iceland_patch_uvd_vddc(struct pp_hwmgr *hwmgr,
3351 struct phm_uvd_clock_voltage_dependency_table *tab)
3352{
3353 uint16_t i;
3354
3355 if (tab)
3356 for (i = 0; i < tab->count; i++)
3357 iceland_patch_with_vddc_leakage(hwmgr, &tab->entries[i].v);
3358
3359 return 0;
3360}
3361
3362static int iceland_patch_vddc_shed_limit(struct pp_hwmgr *hwmgr,
3363 struct phm_phase_shedding_limits_table *tab)
3364{
3365 uint16_t i;
3366
3367 if (tab)
3368 for (i = 0; i < tab->count; i++)
3369 iceland_patch_with_vddc_leakage(hwmgr, &tab->entries[i].Voltage);
3370
3371 return 0;
3372}
3373
3374static int iceland_patch_samu_vddc(struct pp_hwmgr *hwmgr,
3375 struct phm_samu_clock_voltage_dependency_table *tab)
3376{
3377 uint16_t i;
3378
3379 if (tab)
3380 for (i = 0; i < tab->count; i++)
3381 iceland_patch_with_vddc_leakage(hwmgr, &tab->entries[i].v);
3382
3383 return 0;
3384}
3385
3386static int iceland_patch_acp_vddc(struct pp_hwmgr *hwmgr,
3387 struct phm_acp_clock_voltage_dependency_table *tab)
3388{
3389 uint16_t i;
3390
3391 if (tab)
3392 for (i = 0; i < tab->count; i++)
3393 iceland_patch_with_vddc_leakage(hwmgr, &tab->entries[i].v);
3394
3395 return 0;
3396}
3397
3398static int iceland_patch_limits_vddc(struct pp_hwmgr *hwmgr,
3399 struct phm_clock_and_voltage_limits *tab)
3400{
3401 if (tab) {
3402 iceland_patch_with_vddc_leakage(hwmgr, (uint32_t *)&tab->vddc);
3403 iceland_patch_with_vddci_leakage(hwmgr, (uint32_t *)&tab->vddci);
3404 }
3405
3406 return 0;
3407}
3408
3409static int iceland_patch_cac_vddc(struct pp_hwmgr *hwmgr, struct phm_cac_leakage_table *tab)
3410{
3411 uint32_t i;
3412 uint32_t vddc;
3413
3414 if (tab) {
3415 for (i = 0; i < tab->count; i++) {
3416 vddc = (uint32_t)(tab->entries[i].Vddc);
3417 iceland_patch_with_vddc_leakage(hwmgr, &vddc);
3418 tab->entries[i].Vddc = (uint16_t)vddc;
3419 }
3420 }
3421
3422 return 0;
3423}
3424
3425static int iceland_patch_dependency_tables_with_leakage(struct pp_hwmgr *hwmgr)
3426{
3427 int tmp;
3428
3429 tmp = iceland_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_sclk);
3430 if(tmp)
3431 return -EINVAL;
3432
3433 tmp = iceland_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_mclk);
3434 if(tmp)
3435 return -EINVAL;
3436
3437 tmp = iceland_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
3438 if(tmp)
3439 return -EINVAL;
3440
3441 tmp = iceland_patch_vddci(hwmgr, hwmgr->dyn_state.vddci_dependency_on_mclk);
3442 if(tmp)
3443 return -EINVAL;
3444
3445 tmp = iceland_patch_vce_vddc(hwmgr, hwmgr->dyn_state.vce_clock_voltage_dependency_table);
3446 if(tmp)
3447 return -EINVAL;
3448
3449 tmp = iceland_patch_uvd_vddc(hwmgr, hwmgr->dyn_state.uvd_clock_voltage_dependency_table);
3450 if(tmp)
3451 return -EINVAL;
3452
3453 tmp = iceland_patch_samu_vddc(hwmgr, hwmgr->dyn_state.samu_clock_voltage_dependency_table);
3454 if(tmp)
3455 return -EINVAL;
3456
3457 tmp = iceland_patch_acp_vddc(hwmgr, hwmgr->dyn_state.acp_clock_voltage_dependency_table);
3458 if(tmp)
3459 return -EINVAL;
3460
3461 tmp = iceland_patch_vddc_shed_limit(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table);
3462 if(tmp)
3463 return -EINVAL;
3464
3465 tmp = iceland_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_ac);
3466 if(tmp)
3467 return -EINVAL;
3468
3469 tmp = iceland_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_dc);
3470 if(tmp)
3471 return -EINVAL;
3472
3473 tmp = iceland_patch_cac_vddc(hwmgr, hwmgr->dyn_state.cac_leakage_table);
3474 if(tmp)
3475 return -EINVAL;
3476
3477 return 0;
3478}
3479
3480static int iceland_set_private_var_based_on_pptale(struct pp_hwmgr *hwmgr)
3481{
3482 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
3483
3484 struct phm_clock_voltage_dependency_table *allowed_sclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_sclk;
3485 struct phm_clock_voltage_dependency_table *allowed_mclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_mclk;
3486 struct phm_clock_voltage_dependency_table *allowed_mclk_vddci_table = hwmgr->dyn_state.vddci_dependency_on_mclk;
3487
3488 PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table != NULL,
3489 "VDDC dependency on SCLK table is missing. This table is mandatory\n", return -EINVAL);
3490 PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table->count >= 1,
3491 "VDDC dependency on SCLK table has to have is missing. This table is mandatory\n", return -EINVAL);
3492
3493 PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table != NULL,
3494 "VDDC dependency on MCLK table is missing. This table is mandatory\n", return -EINVAL);
3495 PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table->count >= 1,
3496 "VDD dependency on MCLK table has to have is missing. This table is mandatory\n", return -EINVAL);
3497
3498 data->min_vddc_in_pp_table = (uint16_t)allowed_sclk_vddc_table->entries[0].v;
3499 data->max_vddc_in_pp_table = (uint16_t)allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
3500
3501 hwmgr->dyn_state.max_clock_voltage_on_ac.sclk =
3502 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
3503 hwmgr->dyn_state.max_clock_voltage_on_ac.mclk =
3504 allowed_mclk_vddc_table->entries[allowed_mclk_vddc_table->count - 1].clk;
3505 hwmgr->dyn_state.max_clock_voltage_on_ac.vddc =
3506 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
3507
3508 if (allowed_mclk_vddci_table != NULL && allowed_mclk_vddci_table->count >= 1) {
3509 data->min_vddci_in_pp_table = (uint16_t)allowed_mclk_vddci_table->entries[0].v;
3510 data->max_vddci_in_pp_table = (uint16_t)allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
3511 }
3512
3513 if (hwmgr->dyn_state.vddci_dependency_on_mclk != NULL && hwmgr->dyn_state.vddci_dependency_on_mclk->count > 1)
3514 hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = hwmgr->dyn_state.vddci_dependency_on_mclk->entries[hwmgr->dyn_state.vddci_dependency_on_mclk->count - 1].v;
3515
3516 return 0;
3517}
3518
3519static int iceland_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr)
3520{
3521 uint32_t table_size;
3522 struct phm_clock_voltage_dependency_table *table_clk_vlt;
3523
3524 hwmgr->dyn_state.mclk_sclk_ratio = 4;
3525 hwmgr->dyn_state.sclk_mclk_delta = 15000; /* 150 MHz */
3526 hwmgr->dyn_state.vddc_vddci_delta = 200; /* 200mV */
3527
3528 /* initialize vddc_dep_on_dal_pwrl table */
3529 table_size = sizeof(uint32_t) + 4 * sizeof(struct phm_clock_voltage_dependency_record);
3530 table_clk_vlt = (struct phm_clock_voltage_dependency_table *)kzalloc(table_size, GFP_KERNEL);
3531
3532 if (NULL == table_clk_vlt) {
3533 pr_err("[ powerplay ] Can not allocate space for vddc_dep_on_dal_pwrl! \n");
3534 return -ENOMEM;
3535 } else {
3536 table_clk_vlt->count = 4;
3537 table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_ULTRALOW;
3538 table_clk_vlt->entries[0].v = 0;
3539 table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_LOW;
3540 table_clk_vlt->entries[1].v = 720;
3541 table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_NOMINAL;
3542 table_clk_vlt->entries[2].v = 810;
3543 table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_PERFORMANCE;
3544 table_clk_vlt->entries[3].v = 900;
3545 hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt;
3546 }
3547
3548 return 0;
3549}
3550
3551/**
3552 * Initializes the Volcanic Islands Hardware Manager
3553 *
3554 * @param hwmgr the address of the powerplay hardware manager.
3555 * @return 1 if success; otherwise appropriate error code.
3556 */
3557static int iceland_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
3558{
3559 int result = 0;
3560 SMU71_Discrete_DpmTable *table = NULL;
3561 iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
3562 pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
3563 bool stay_in_boot;
3564 struct phw_iceland_ulv_parm *ulv;
3565 struct cgs_system_info sys_info = {0};
3566
3567 PP_ASSERT_WITH_CODE((NULL != hwmgr),
3568 "Invalid Parameter!", return -EINVAL;);
3569
3570 data->dll_defaule_on = 0;
3571 data->sram_end = SMC_RAM_END;
3572
3573 data->activity_target[0] = PPICELAND_TARGETACTIVITY_DFLT;
3574 data->activity_target[1] = PPICELAND_TARGETACTIVITY_DFLT;
3575 data->activity_target[2] = PPICELAND_TARGETACTIVITY_DFLT;
3576 data->activity_target[3] = PPICELAND_TARGETACTIVITY_DFLT;
3577 data->activity_target[4] = PPICELAND_TARGETACTIVITY_DFLT;
3578 data->activity_target[5] = PPICELAND_TARGETACTIVITY_DFLT;
3579 data->activity_target[6] = PPICELAND_TARGETACTIVITY_DFLT;
3580 data->activity_target[7] = PPICELAND_TARGETACTIVITY_DFLT;
3581
3582 data->mclk_activity_target = PPICELAND_MCLK_TARGETACTIVITY_DFLT;
3583
3584 data->sclk_dpm_key_disabled = 0;
3585 data->mclk_dpm_key_disabled = 0;
3586 data->pcie_dpm_key_disabled = 0;
3587 data->pcc_monitor_enabled = 0;
3588
3589 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3590 PHM_PlatformCaps_UnTabledHardwareInterface);
3591
3592 data->gpio_debug = 0;
3593 data->engine_clock_data = 0;
3594 data->memory_clock_data = 0;
3595
3596 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
3597 PHM_PlatformCaps_SclkDeepSleepAboveLow);
3598
3599 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3600 PHM_PlatformCaps_DynamicPatchPowerState);
3601
3602 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3603 PHM_PlatformCaps_TablelessHardwareInterface);
3604
3605 /* Initializes DPM default values. */
3606 iceland_initialize_dpm_defaults(hwmgr);
3607
3608 /* Enable Platform EVV support. */
3609 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3610 PHM_PlatformCaps_EVV);
3611
3612 /* Get leakage voltage based on leakage ID. */
3613 result = iceland_get_evv_voltage(hwmgr);
3614 if (result)
3615 goto failed;
3616
3617 /**
3618 * Patch our voltage dependency table with actual leakage
3619 * voltage. We need to perform leakage translation before it's
3620 * used by other functions such as
3621 * iceland_set_hwmgr_variables_based_on_pptable.
3622 */
3623 result = iceland_patch_dependency_tables_with_leakage(hwmgr);
3624 if (result)
3625 goto failed;
3626
3627 /* Parse pptable data read from VBIOS. */
3628 result = iceland_set_private_var_based_on_pptale(hwmgr);
3629 if (result)
3630 goto failed;
3631
3632 /* ULV support */
3633 ulv = &(data->ulv);
3634 ulv->ulv_supported = 1;
3635
3636 /* Initalize Dynamic State Adjustment Rule Settings*/
3637 result = iceland_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
3638 if (result) {
3639 pr_err("[ powerplay ] iceland_initializa_dynamic_state_adjustment_rule_settings failed!\n");
3640 goto failed;
3641 }
3642
3643 data->voltage_control = ICELAND_VOLTAGE_CONTROL_NONE;
3644 data->vdd_ci_control = ICELAND_VOLTAGE_CONTROL_NONE;
3645 data->mvdd_control = ICELAND_VOLTAGE_CONTROL_NONE;
3646
3647 /*
3648 * Hardcode thermal temperature settings for now, these will
3649 * be overwritten if a custom policy exists.
3650 */
3651 data->thermal_temp_setting.temperature_low = 99500;
3652 data->thermal_temp_setting.temperature_high = 100000;
3653 data->thermal_temp_setting.temperature_shutdown = 104000;
3654 data->uvd_enabled = false;
3655
3656 table = &data->smc_state_table;
3657
3658 if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID,
3659 &gpio_pin_assignment)) {
3660 table->VRHotGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
3661 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3662 PHM_PlatformCaps_RegulatorHot);
3663 } else {
3664 table->VRHotGpio = ICELAND_UNUSED_GPIO_PIN;
3665 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
3666 PHM_PlatformCaps_RegulatorHot);
3667 }
3668
3669 if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
3670 &gpio_pin_assignment)) {
3671 table->AcDcGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
3672 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3673 PHM_PlatformCaps_AutomaticDCTransition);
3674 } else {
3675 table->AcDcGpio = ICELAND_UNUSED_GPIO_PIN;
3676 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
3677 PHM_PlatformCaps_AutomaticDCTransition);
3678 }
3679
3680 /*
3681 * If ucGPIO_ID=VDDC_PCC_GPIO_PINID in GPIO_LUTable, Peak.
3682 * Current Control feature is enabled and we should program
3683 * PCC HW register
3684 */
3685 if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID,
3686 &gpio_pin_assignment)) {
3687 uint32_t temp_reg = cgs_read_ind_register(hwmgr->device,
3688 CGS_IND_REG__SMC,
3689 ixCNB_PWRMGT_CNTL);
3690
3691 switch (gpio_pin_assignment.uc_gpio_pin_bit_shift) {
3692 case 0:
3693 temp_reg = PHM_SET_FIELD(temp_reg,
3694 CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x1);
3695 break;
3696 case 1:
3697 temp_reg = PHM_SET_FIELD(temp_reg,
3698 CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x2);
3699 break;
3700 case 2:
3701 temp_reg = PHM_SET_FIELD(temp_reg,
3702 CNB_PWRMGT_CNTL, GNB_SLOW, 0x1);
3703 break;
3704 case 3:
3705 temp_reg = PHM_SET_FIELD(temp_reg,
3706 CNB_PWRMGT_CNTL, FORCE_NB_PS1, 0x1);
3707 break;
3708 case 4:
3709 temp_reg = PHM_SET_FIELD(temp_reg,
3710 CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1);
3711 break;
3712 default:
3713 pr_warning("[ powerplay ] Failed to setup PCC HW register! Wrong GPIO assigned for VDDC_PCC_GPIO_PINID!\n");
3714 break;
3715 }
3716 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3717 ixCNB_PWRMGT_CNTL, temp_reg);
3718 }
3719
3720 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3721 PHM_PlatformCaps_EnableSMU7ThermalManagement);
3722 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3723 PHM_PlatformCaps_SMU7);
3724
3725 if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
3726 VOLTAGE_TYPE_VDDC,
3727 VOLTAGE_OBJ_GPIO_LUT))
3728 data->voltage_control = ICELAND_VOLTAGE_CONTROL_BY_GPIO;
3729 else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
3730 VOLTAGE_TYPE_VDDC,
3731 VOLTAGE_OBJ_SVID2))
3732 data->voltage_control = ICELAND_VOLTAGE_CONTROL_BY_SVID2;
3733
3734 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3735 PHM_PlatformCaps_ControlVDDCI)) {
3736 if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
3737 VOLTAGE_TYPE_VDDCI,
3738 VOLTAGE_OBJ_GPIO_LUT))
3739 data->vdd_ci_control = ICELAND_VOLTAGE_CONTROL_BY_GPIO;
3740 else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
3741 VOLTAGE_TYPE_VDDCI,
3742 VOLTAGE_OBJ_SVID2))
3743 data->vdd_ci_control = ICELAND_VOLTAGE_CONTROL_BY_SVID2;
3744 }
3745
3746 if (data->vdd_ci_control == ICELAND_VOLTAGE_CONTROL_NONE)
3747 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
3748 PHM_PlatformCaps_ControlVDDCI);
3749
3750 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3751 PHM_PlatformCaps_EnableMVDDControl)) {
3752 if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
3753 VOLTAGE_TYPE_MVDDC,
3754 VOLTAGE_OBJ_GPIO_LUT))
3755 data->mvdd_control = ICELAND_VOLTAGE_CONTROL_BY_GPIO;
3756 else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
3757 VOLTAGE_TYPE_MVDDC,
3758 VOLTAGE_OBJ_SVID2))
3759 data->mvdd_control = ICELAND_VOLTAGE_CONTROL_BY_SVID2;
3760 }
3761
3762 if (data->mvdd_control == ICELAND_VOLTAGE_CONTROL_NONE)
3763 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
3764 PHM_PlatformCaps_EnableMVDDControl);
3765
3766 data->vddc_phase_shed_control = false;
3767
3768 stay_in_boot = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3769 PHM_PlatformCaps_StayInBootState);
3770
3771 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3772 PHM_PlatformCaps_DynamicPowerManagement);
3773
3774 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3775 PHM_PlatformCaps_ActivityReporting);
3776
3777 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3778 PHM_PlatformCaps_GFXClockGatingSupport);
3779
3780 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3781 PHM_PlatformCaps_MemorySpreadSpectrumSupport);
3782 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3783 PHM_PlatformCaps_EngineSpreadSpectrumSupport);
3784
3785 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3786 PHM_PlatformCaps_DynamicPCIEGen2Support);
3787 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3788 PHM_PlatformCaps_SMC);
3789
3790 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
3791 PHM_PlatformCaps_DisablePowerGating);
3792 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
3793 PHM_PlatformCaps_BACO);
3794
3795 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3796 PHM_PlatformCaps_ThermalAutoThrottling);
3797 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3798 PHM_PlatformCaps_DisableLSClockGating);
3799 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3800 PHM_PlatformCaps_SamuDPM);
3801 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3802 PHM_PlatformCaps_AcpDPM);
3803 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3804 PHM_PlatformCaps_OD6inACSupport);
3805 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
3806 PHM_PlatformCaps_EnablePlatformPowerManagement);
3807
3808 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3809 PHM_PlatformCaps_PauseMMSessions);
3810
3811 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3812 PHM_PlatformCaps_OD6PlusinACSupport);
3813 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3814 PHM_PlatformCaps_PauseMMSessions);
3815 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3816 PHM_PlatformCaps_GFXClockGatingManagedInCAIL);
3817 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3818 PHM_PlatformCaps_IcelandULPSSWWorkAround);
3819
3820
3821 /* iceland doesn't support UVD and VCE */
3822 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
3823 PHM_PlatformCaps_UVDPowerGating);
3824 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
3825 PHM_PlatformCaps_VCEPowerGating);
3826
3827 sys_info.size = sizeof(struct cgs_system_info);
3828 sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS;
3829 result = cgs_query_system_info(hwmgr->device, &sys_info);
3830 if (!result) {
3831 if (sys_info.value & AMD_PG_SUPPORT_UVD)
3832 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3833 PHM_PlatformCaps_UVDPowerGating);
3834 if (sys_info.value & AMD_PG_SUPPORT_VCE)
3835 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3836 PHM_PlatformCaps_VCEPowerGating);
3837
3838 data->is_tlu_enabled = false;
3839 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
3840 ICELAND_MAX_HARDWARE_POWERLEVELS;
3841 hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
3842 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
3843
3844 sys_info.size = sizeof(struct cgs_system_info);
3845 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO;
3846 result = cgs_query_system_info(hwmgr->device, &sys_info);
3847 if (result)
3848 data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK;
3849 else
3850 data->pcie_gen_cap = (uint32_t)sys_info.value;
3851 if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
3852 data->pcie_spc_cap = 20;
3853 sys_info.size = sizeof(struct cgs_system_info);
3854 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW;
3855 result = cgs_query_system_info(hwmgr->device, &sys_info);
3856 if (result)
3857 data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK;
3858 else
3859 data->pcie_lane_cap = (uint32_t)sys_info.value;
3860 } else {
3861 /* Ignore return value in here, we are cleaning up a mess. */
3862 iceland_hwmgr_backend_fini(hwmgr);
3863 }
3864
3865 return 0;
3866failed:
3867 return result;
3868}
3869
3870static int iceland_get_num_of_entries(struct pp_hwmgr *hwmgr)
3871{
3872 int result;
3873 unsigned long ret = 0;
3874
3875 result = pp_tables_get_num_of_entries(hwmgr, &ret);
3876
3877 return result ? 0 : ret;
3878}
3879
3880static const unsigned long PhwIceland_Magic = (unsigned long)(PHM_VIslands_Magic);
3881
3882struct iceland_power_state *cast_phw_iceland_power_state(
3883 struct pp_hw_power_state *hw_ps)
3884{
3885 if (hw_ps == NULL)
3886 return NULL;
3887
3888 PP_ASSERT_WITH_CODE((PhwIceland_Magic == hw_ps->magic),
3889 "Invalid Powerstate Type!",
3890 return NULL);
3891
3892 return (struct iceland_power_state *)hw_ps;
3893}
3894
3895static int iceland_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
3896 struct pp_power_state *prequest_ps,
3897 const struct pp_power_state *pcurrent_ps)
3898{
3899 struct iceland_power_state *iceland_ps =
3900 cast_phw_iceland_power_state(&prequest_ps->hardware);
3901
3902 uint32_t sclk;
3903 uint32_t mclk;
3904 struct PP_Clocks minimum_clocks = {0};
3905 bool disable_mclk_switching;
3906 bool disable_mclk_switching_for_frame_lock;
3907 struct cgs_display_info info = {0};
3908 const struct phm_clock_and_voltage_limits *max_limits;
3909 uint32_t i;
3910 iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
3911
3912 int32_t count;
3913 int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
3914
3915 data->battery_state = (PP_StateUILabel_Battery == prequest_ps->classification.ui_label);
3916
3917 PP_ASSERT_WITH_CODE(iceland_ps->performance_level_count == 2,
3918 "VI should always have 2 performance levels",
3919 );
3920
3921 max_limits = (PP_PowerSource_AC == hwmgr->power_source) ?
3922 &(hwmgr->dyn_state.max_clock_voltage_on_ac) :
3923 &(hwmgr->dyn_state.max_clock_voltage_on_dc);
3924
3925 if (PP_PowerSource_DC == hwmgr->power_source) {
3926 for (i = 0; i < iceland_ps->performance_level_count; i++) {
3927 if (iceland_ps->performance_levels[i].memory_clock > max_limits->mclk)
3928 iceland_ps->performance_levels[i].memory_clock = max_limits->mclk;
3929 if (iceland_ps->performance_levels[i].engine_clock > max_limits->sclk)
3930 iceland_ps->performance_levels[i].engine_clock = max_limits->sclk;
3931 }
3932 }
3933
3934 iceland_ps->vce_clocks.EVCLK = hwmgr->vce_arbiter.evclk;
3935 iceland_ps->vce_clocks.ECCLK = hwmgr->vce_arbiter.ecclk;
3936
3937 cgs_get_active_displays_info(hwmgr->device, &info);
3938
3939 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) {
3940
3941 max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
3942 stable_pstate_sclk = (max_limits->sclk * 75) / 100;
3943
3944 for (count = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1; count >= 0; count--) {
3945 if (stable_pstate_sclk >= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk) {
3946 stable_pstate_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk;
3947 break;
3948 }
3949 }
3950
3951 if (count < 0)
3952 stable_pstate_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].clk;
3953
3954 stable_pstate_mclk = max_limits->mclk;
3955
3956 minimum_clocks.engineClock = stable_pstate_sclk;
3957 minimum_clocks.memoryClock = stable_pstate_mclk;
3958 }
3959
3960 if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk)
3961 minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk;
3962
3963 if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk)
3964 minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk;
3965
3966 iceland_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold;
3967
3968 if (0 != hwmgr->gfx_arbiter.sclk_over_drive) {
3969 PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <= hwmgr->platform_descriptor.overdriveLimit.engineClock),
3970 "Overdrive sclk exceeds limit",
3971 hwmgr->gfx_arbiter.sclk_over_drive = hwmgr->platform_descriptor.overdriveLimit.engineClock);
3972
3973 if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk)
3974 iceland_ps->performance_levels[1].engine_clock = hwmgr->gfx_arbiter.sclk_over_drive;
3975 }
3976
3977 if (0 != hwmgr->gfx_arbiter.mclk_over_drive) {
3978 PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <= hwmgr->platform_descriptor.overdriveLimit.memoryClock),
3979 "Overdrive mclk exceeds limit",
3980 hwmgr->gfx_arbiter.mclk_over_drive = hwmgr->platform_descriptor.overdriveLimit.memoryClock);
3981
3982 if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk)
3983 iceland_ps->performance_levels[1].memory_clock = hwmgr->gfx_arbiter.mclk_over_drive;
3984 }
3985
3986 disable_mclk_switching_for_frame_lock = phm_cap_enabled(
3987 hwmgr->platform_descriptor.platformCaps,
3988 PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
3989
3990 disable_mclk_switching = (1 < info.display_count) ||
3991 disable_mclk_switching_for_frame_lock;
3992
3993 sclk = iceland_ps->performance_levels[0].engine_clock;
3994 mclk = iceland_ps->performance_levels[0].memory_clock;
3995
3996 if (disable_mclk_switching)
3997 mclk = iceland_ps->performance_levels[iceland_ps->performance_level_count - 1].memory_clock;
3998
3999 if (sclk < minimum_clocks.engineClock)
4000 sclk = (minimum_clocks.engineClock > max_limits->sclk) ? max_limits->sclk : minimum_clocks.engineClock;
4001
4002 if (mclk < minimum_clocks.memoryClock)
4003 mclk = (minimum_clocks.memoryClock > max_limits->mclk) ? max_limits->mclk : minimum_clocks.memoryClock;
4004
4005 iceland_ps->performance_levels[0].engine_clock = sclk;
4006 iceland_ps->performance_levels[0].memory_clock = mclk;
4007
4008 iceland_ps->performance_levels[1].engine_clock =
4009 (iceland_ps->performance_levels[1].engine_clock >= iceland_ps->performance_levels[0].engine_clock) ?
4010 iceland_ps->performance_levels[1].engine_clock :
4011 iceland_ps->performance_levels[0].engine_clock;
4012
4013 if (disable_mclk_switching) {
4014 if (mclk < iceland_ps->performance_levels[1].memory_clock)
4015 mclk = iceland_ps->performance_levels[1].memory_clock;
4016
4017 iceland_ps->performance_levels[0].memory_clock = mclk;
4018 iceland_ps->performance_levels[1].memory_clock = mclk;
4019 } else {
4020 if (iceland_ps->performance_levels[1].memory_clock < iceland_ps->performance_levels[0].memory_clock)
4021 iceland_ps->performance_levels[1].memory_clock = iceland_ps->performance_levels[0].memory_clock;
4022 }
4023
4024 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) {
4025 for (i=0; i < iceland_ps->performance_level_count; i++) {
4026 iceland_ps->performance_levels[i].engine_clock = stable_pstate_sclk;
4027 iceland_ps->performance_levels[i].memory_clock = stable_pstate_mclk;
4028 iceland_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max;
4029 iceland_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max;
4030 }
4031 }
4032
4033 return 0;
4034}
4035
4036static bool iceland_is_dpm_running(struct pp_hwmgr *hwmgr)
4037{
4038 /*
4039 * We return the status of Voltage Control instead of checking SCLK/MCLK DPM
4040 * because we may have test scenarios that need us intentionly disable SCLK/MCLK DPM,
4041 * whereas voltage control is a fundemental change that will not be disabled
4042 */
4043 return (0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
4044 FEATURE_STATUS, VOLTAGE_CONTROLLER_ON) ? 1 : 0);
4045}
4046
4047/**
4048 * force DPM power State
4049 *
4050 * @param hwmgr: the address of the powerplay hardware manager.
4051 * @param n : DPM level
4052 * @return The response that came from the SMC.
4053 */
4054int iceland_dpm_force_state(struct pp_hwmgr *hwmgr, uint32_t n)
4055{
4056 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
4057
4058 /* Checking if DPM is running. If we discover hang because of this, we should skip this message. */
4059 PP_ASSERT_WITH_CODE(0 == iceland_is_dpm_running(hwmgr),
4060 "Trying to force SCLK when DPM is disabled", return -1;);
4061 if (0 == data->sclk_dpm_key_disabled)
4062 return (0 == smum_send_msg_to_smc_with_parameter(
4063 hwmgr->smumgr,
4064 PPSMC_MSG_DPM_ForceState,
4065 n) ? 0 : 1);
4066
4067 return 0;
4068}
4069
4070/**
4071 * force DPM power State
4072 *
4073 * @param hwmgr: the address of the powerplay hardware manager.
4074 * @param n : DPM level
4075 * @return The response that came from the SMC.
4076 */
4077int iceland_dpm_force_state_mclk(struct pp_hwmgr *hwmgr, uint32_t n)
4078{
4079 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
4080
4081 /* Checking if DPM is running. If we discover hang because of this, we should skip this message. */
4082 PP_ASSERT_WITH_CODE(0 == iceland_is_dpm_running(hwmgr),
4083 "Trying to Force MCLK when DPM is disabled", return -1;);
4084 if (0 == data->mclk_dpm_key_disabled)
4085 return (0 == smum_send_msg_to_smc_with_parameter(
4086 hwmgr->smumgr,
4087 PPSMC_MSG_MCLKDPM_ForceState,
4088 n) ? 0 : 1);
4089
4090 return 0;
4091}
4092
4093/**
4094 * force DPM power State
4095 *
4096 * @param hwmgr: the address of the powerplay hardware manager.
4097 * @param n : DPM level
4098 * @return The response that came from the SMC.
4099 */
4100int iceland_dpm_force_state_pcie(struct pp_hwmgr *hwmgr, uint32_t n)
4101{
4102 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
4103
4104 /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/
4105 PP_ASSERT_WITH_CODE(0 == iceland_is_dpm_running(hwmgr),
4106 "Trying to Force PCIE level when DPM is disabled", return -1;);
4107 if (0 == data->pcie_dpm_key_disabled)
4108 return (0 == smum_send_msg_to_smc_with_parameter(
4109 hwmgr->smumgr,
4110 PPSMC_MSG_PCIeDPM_ForceLevel,
4111 n) ? 0 : 1);
4112
4113 return 0;
4114}
4115
4116static int iceland_force_dpm_highest(struct pp_hwmgr *hwmgr)
4117{
4118 uint32_t level, tmp;
4119 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
4120
4121 if (0 == data->sclk_dpm_key_disabled) {
4122 /* SCLK */
4123 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask != 0) {
4124 level = 0;
4125 tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask;
4126 while (tmp >>= 1)
4127 level++ ;
4128
4129 if (0 != level) {
4130 PP_ASSERT_WITH_CODE((0 == iceland_dpm_force_state(hwmgr, level)),
4131 "force highest sclk dpm state failed!", return -1);
4132 PHM_WAIT_INDIRECT_FIELD(hwmgr->device,
4133 SMC_IND, TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX, level);
4134 }
4135 }
4136 }
4137
4138 if (0 == data->mclk_dpm_key_disabled) {
4139 /* MCLK */
4140 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask != 0) {
4141 level = 0;
4142 tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask;
4143 while (tmp >>= 1)
4144 level++ ;
4145
4146 if (0 != level) {
4147 PP_ASSERT_WITH_CODE((0 == iceland_dpm_force_state_mclk(hwmgr, level)),
4148 "force highest mclk dpm state failed!", return -1);
4149 PHM_WAIT_INDIRECT_FIELD(hwmgr->device, SMC_IND,
4150 TARGET_AND_CURRENT_PROFILE_INDEX, CURR_MCLK_INDEX, level);
4151 }
4152 }
4153 }
4154
4155 if (0 == data->pcie_dpm_key_disabled) {
4156 /* PCIE */
4157 if (data->dpm_level_enable_mask.pcie_dpm_enable_mask != 0) {
4158 level = 0;
4159 tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask;
4160 while (tmp >>= 1)
4161 level++ ;
4162
4163 if (0 != level) {
4164 PP_ASSERT_WITH_CODE((0 == iceland_dpm_force_state_pcie(hwmgr, level)),
4165 "force highest pcie dpm state failed!", return -1);
4166 }
4167 }
4168 }
4169
4170 return 0;
4171}
4172
4173static uint32_t iceland_get_lowest_enable_level(struct pp_hwmgr *hwmgr,
4174 uint32_t level_mask)
4175{
4176 uint32_t level = 0;
4177
4178 while (0 == (level_mask & (1 << level)))
4179 level++;
4180
4181 return level;
4182}
4183
4184static int iceland_force_dpm_lowest(struct pp_hwmgr *hwmgr)
4185{
4186 uint32_t level;
4187 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
4188
4189 /* for now force only sclk */
4190 if (0 != data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
4191 level = iceland_get_lowest_enable_level(hwmgr,
4192 data->dpm_level_enable_mask.sclk_dpm_enable_mask);
4193
4194 PP_ASSERT_WITH_CODE((0 == iceland_dpm_force_state(hwmgr, level)),
4195 "force sclk dpm state failed!", return -1);
4196
4197 PHM_WAIT_INDIRECT_FIELD(hwmgr->device, SMC_IND,
4198 TARGET_AND_CURRENT_PROFILE_INDEX,
4199 CURR_SCLK_INDEX,
4200 level);
4201 }
4202
4203 return 0;
4204}
4205
4206int iceland_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
4207{
4208 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
4209
4210 PP_ASSERT_WITH_CODE (0 == iceland_is_dpm_running(hwmgr),
4211 "Trying to Unforce DPM when DPM is disabled. Returning without sending SMC message.",
4212 return -1);
4213
4214 if (0 == data->sclk_dpm_key_disabled) {
4215 PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(
4216 hwmgr->smumgr,
4217 PPSMC_MSG_NoForcedLevel)),
4218 "unforce sclk dpm state failed!",
4219 return -1);
4220 }
4221
4222 if (0 == data->mclk_dpm_key_disabled) {
4223 PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(
4224 hwmgr->smumgr,
4225 PPSMC_MSG_MCLKDPM_NoForcedLevel)),
4226 "unforce mclk dpm state failed!",
4227 return -1);
4228 }
4229
4230 if (0 == data->pcie_dpm_key_disabled) {
4231 PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(
4232 hwmgr->smumgr,
4233 PPSMC_MSG_PCIeDPM_UnForceLevel)),
4234 "unforce pcie level failed!",
4235 return -1);
4236 }
4237
4238 return 0;
4239}
4240
4241static int iceland_force_dpm_level(struct pp_hwmgr *hwmgr,
4242 enum amd_dpm_forced_level level)
4243{
4244 int ret = 0;
4245
4246 switch (level) {
4247 case AMD_DPM_FORCED_LEVEL_HIGH:
4248 ret = iceland_force_dpm_highest(hwmgr);
4249 if (ret)
4250 return ret;
4251 break;
4252 case AMD_DPM_FORCED_LEVEL_LOW:
4253 ret = iceland_force_dpm_lowest(hwmgr);
4254 if (ret)
4255 return ret;
4256 break;
4257 case AMD_DPM_FORCED_LEVEL_AUTO:
4258 ret = iceland_unforce_dpm_levels(hwmgr);
4259 if (ret)
4260 return ret;
4261 break;
4262 default:
4263 break;
4264 }
4265
4266 hwmgr->dpm_level = level;
4267 return ret;
4268}
4269
4270const struct iceland_power_state *cast_const_phw_iceland_power_state(
4271 const struct pp_hw_power_state *hw_ps)
4272{
4273 if (hw_ps == NULL)
4274 return NULL;
4275
4276 PP_ASSERT_WITH_CODE((PhwIceland_Magic == hw_ps->magic),
4277 "Invalid Powerstate Type!",
4278 return NULL);
4279
4280 return (const struct iceland_power_state *)hw_ps;
4281}
4282
4283static int iceland_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
4284{
4285 const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input;
4286 const struct iceland_power_state *iceland_ps = cast_const_phw_iceland_power_state(states->pnew_state);
4287 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
4288 struct iceland_single_dpm_table *psclk_table = &(data->dpm_table.sclk_table);
4289 uint32_t sclk = iceland_ps->performance_levels[iceland_ps->performance_level_count-1].engine_clock;
4290 struct iceland_single_dpm_table *pmclk_table = &(data->dpm_table.mclk_table);
4291 uint32_t mclk = iceland_ps->performance_levels[iceland_ps->performance_level_count-1].memory_clock;
4292 struct PP_Clocks min_clocks = {0};
4293 uint32_t i;
4294 struct cgs_display_info info = {0};
4295
4296 data->need_update_smu7_dpm_table = 0;
4297
4298 for (i = 0; i < psclk_table->count; i++) {
4299 if (sclk == psclk_table->dpm_levels[i].value)
4300 break;
4301 }
4302
4303 if (i >= psclk_table->count)
4304 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
4305 else {
4306 /*
4307 * TODO: Check SCLK in DAL's minimum clocks in case DeepSleep
4308 * divider update is required.
4309 */
4310 if(data->display_timing.min_clock_insr != min_clocks.engineClockInSR)
4311 data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
4312 }
4313
4314 for (i = 0; i < pmclk_table->count; i++) {
4315 if (mclk == pmclk_table->dpm_levels[i].value)
4316 break;
4317 }
4318
4319 if (i >= pmclk_table->count)
4320 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
4321
4322 cgs_get_active_displays_info(hwmgr->device, &info);
4323
4324 if (data->display_timing.num_existing_displays != info.display_count)
4325 data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
4326
4327 return 0;
4328}
4329
4330static uint16_t iceland_get_maximum_link_speed(struct pp_hwmgr *hwmgr, const struct iceland_power_state *hw_ps)
4331{
4332 uint32_t i;
4333 uint32_t pcie_speed, max_speed = 0;
4334
4335 for (i = 0; i < hw_ps->performance_level_count; i++) {
4336 pcie_speed = hw_ps->performance_levels[i].pcie_gen;
4337 if (max_speed < pcie_speed)
4338 max_speed = pcie_speed;
4339 }
4340
4341 return max_speed;
4342}
4343
4344static uint16_t iceland_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
4345{
4346 uint32_t speed_cntl = 0;
4347
4348 speed_cntl = cgs_read_ind_register(hwmgr->device,
4349 CGS_IND_REG__PCIE,
4350 ixPCIE_LC_SPEED_CNTL);
4351 return((uint16_t)PHM_GET_FIELD(speed_cntl,
4352 PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
4353}
4354
4355
4356static int iceland_request_link_speed_change_before_state_change(struct pp_hwmgr *hwmgr, const void *input)
4357{
4358 const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input;
4359 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
4360 const struct iceland_power_state *iceland_nps = cast_const_phw_iceland_power_state(states->pnew_state);
4361 const struct iceland_power_state *iceland_cps = cast_const_phw_iceland_power_state(states->pcurrent_state);
4362
4363 uint16_t target_link_speed = iceland_get_maximum_link_speed(hwmgr, iceland_nps);
4364 uint16_t current_link_speed;
4365
4366 if (data->force_pcie_gen == PP_PCIEGenInvalid)
4367 current_link_speed = iceland_get_maximum_link_speed(hwmgr, iceland_cps);
4368 else
4369 current_link_speed = data->force_pcie_gen;
4370
4371 data->force_pcie_gen = PP_PCIEGenInvalid;
4372 data->pspp_notify_required = false;
4373 if (target_link_speed > current_link_speed) {
4374 switch(target_link_speed) {
4375 case PP_PCIEGen3:
4376 if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN3, false))
4377 break;
4378 data->force_pcie_gen = PP_PCIEGen2;
4379 if (current_link_speed == PP_PCIEGen2)
4380 break;
4381 case PP_PCIEGen2:
4382 if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN2, false))
4383 break;
4384 default:
4385 data->force_pcie_gen = iceland_get_current_pcie_speed(hwmgr);
4386 break;
4387 }
4388 } else {
4389 if (target_link_speed < current_link_speed)
4390 data->pspp_notify_required = true;
4391 }
4392
4393 return 0;
4394}
4395
4396static int iceland_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
4397{
4398 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
4399
4400 if (0 == data->need_update_smu7_dpm_table)
4401 return 0;
4402
4403 if ((0 == data->sclk_dpm_key_disabled) &&
4404 (data->need_update_smu7_dpm_table &
4405 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
4406 PP_ASSERT_WITH_CODE(
4407 0 == iceland_is_dpm_running(hwmgr),
4408 "Trying to freeze SCLK DPM when DPM is disabled",
4409 );
4410 PP_ASSERT_WITH_CODE(
4411 0 == smum_send_msg_to_smc(hwmgr->smumgr,
4412 PPSMC_MSG_SCLKDPM_FreezeLevel),
4413 "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!",
4414 return -1);
4415 }
4416
4417 if ((0 == data->mclk_dpm_key_disabled) &&
4418 (data->need_update_smu7_dpm_table &
4419 DPMTABLE_OD_UPDATE_MCLK)) {
4420 PP_ASSERT_WITH_CODE(0 == iceland_is_dpm_running(hwmgr),
4421 "Trying to freeze MCLK DPM when DPM is disabled",
4422 );
4423 PP_ASSERT_WITH_CODE(
4424 0 == smum_send_msg_to_smc(hwmgr->smumgr,
4425 PPSMC_MSG_MCLKDPM_FreezeLevel),
4426 "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!",
4427 return -1);
4428 }
4429
4430 return 0;
4431}
4432
4433static int iceland_populate_and_upload_sclk_mclk_dpm_levels(struct pp_hwmgr *hwmgr, const void *input)
4434{
4435 int result = 0;
4436
4437 const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input;
4438 const struct iceland_power_state *iceland_ps = cast_const_phw_iceland_power_state(states->pnew_state);
4439 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
4440 uint32_t sclk = iceland_ps->performance_levels[iceland_ps->performance_level_count-1].engine_clock;
4441 uint32_t mclk = iceland_ps->performance_levels[iceland_ps->performance_level_count-1].memory_clock;
4442 struct iceland_dpm_table *pdpm_table = &data->dpm_table;
4443
4444 struct iceland_dpm_table *pgolden_dpm_table = &data->golden_dpm_table;
4445 uint32_t dpm_count, clock_percent;
4446 uint32_t i;
4447
4448 if (0 == data->need_update_smu7_dpm_table)
4449 return 0;
4450
4451 if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
4452 pdpm_table->sclk_table.dpm_levels[pdpm_table->sclk_table.count-1].value = sclk;
4453
4454 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) ||
4455 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) {
4456 /*
4457 * Need to do calculation based on the golden DPM table
4458 * as the Heatmap GPU Clock axis is also based on the default values
4459 */
4460 PP_ASSERT_WITH_CODE(
4461 (pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value != 0),
4462 "Divide by 0!",
4463 return -1);
4464 dpm_count = pdpm_table->sclk_table.count < 2 ? 0 : pdpm_table->sclk_table.count-2;
4465 for (i = dpm_count; i > 1; i--) {
4466 if (sclk > pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value) {
4467 clock_percent = ((sclk - pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value)*100) /
4468 pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value;
4469
4470 pdpm_table->sclk_table.dpm_levels[i].value =
4471 pgolden_dpm_table->sclk_table.dpm_levels[i].value +
4472 (pgolden_dpm_table->sclk_table.dpm_levels[i].value * clock_percent)/100;
4473
4474 } else if (pgolden_dpm_table->sclk_table.dpm_levels[pdpm_table->sclk_table.count-1].value > sclk) {
4475 clock_percent = ((pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value - sclk)*100) /
4476 pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value;
4477
4478 pdpm_table->sclk_table.dpm_levels[i].value =
4479 pgolden_dpm_table->sclk_table.dpm_levels[i].value -
4480 (pgolden_dpm_table->sclk_table.dpm_levels[i].value * clock_percent)/100;
4481 } else
4482 pdpm_table->sclk_table.dpm_levels[i].value =
4483 pgolden_dpm_table->sclk_table.dpm_levels[i].value;
4484 }
4485 }
4486 }
4487
4488 if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
4489 pdpm_table->mclk_table.dpm_levels[pdpm_table->mclk_table.count-1].value = mclk;
4490
4491 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) ||
4492 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) {
4493
4494 PP_ASSERT_WITH_CODE(
4495 (pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value != 0),
4496 "Divide by 0!",
4497 return -1);
4498 dpm_count = pdpm_table->mclk_table.count < 2? 0 : pdpm_table->mclk_table.count-2;
4499 for (i = dpm_count; i > 1; i--) {
4500 if (mclk > pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value) {
4501 clock_percent = ((mclk - pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value)*100) /
4502 pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value;
4503
4504 pdpm_table->mclk_table.dpm_levels[i].value =
4505 pgolden_dpm_table->mclk_table.dpm_levels[i].value +
4506 (pgolden_dpm_table->mclk_table.dpm_levels[i].value * clock_percent)/100;
4507
4508 } else if (pgolden_dpm_table->mclk_table.dpm_levels[pdpm_table->mclk_table.count-1].value > mclk) {
4509 clock_percent = ((pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value - mclk)*100) /
4510 pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value;
4511
4512 pdpm_table->mclk_table.dpm_levels[i].value =
4513 pgolden_dpm_table->mclk_table.dpm_levels[i].value -
4514 (pgolden_dpm_table->mclk_table.dpm_levels[i].value * clock_percent)/100;
4515 } else
4516 pdpm_table->mclk_table.dpm_levels[i].value = pgolden_dpm_table->mclk_table.dpm_levels[i].value;
4517 }
4518 }
4519 }
4520
4521
4522 if (data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) {
4523 result = iceland_populate_all_graphic_levels(hwmgr);
4524 PP_ASSERT_WITH_CODE((0 == result),
4525 "Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
4526 return result);
4527 }
4528
4529 if (data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
4530 /*populate MCLK dpm table to SMU7 */
4531 result = iceland_populate_all_memory_levels(hwmgr);
4532 PP_ASSERT_WITH_CODE((0 == result),
4533 "Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
4534 return result);
4535 }
4536
4537 return result;
4538}
4539
4540static int iceland_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
4541 struct iceland_single_dpm_table *pdpm_table,
4542 uint32_t low_limit, uint32_t high_limit)
4543{
4544 uint32_t i;
4545
4546 for (i = 0; i < pdpm_table->count; i++) {
4547 if ((pdpm_table->dpm_levels[i].value < low_limit) ||
4548 (pdpm_table->dpm_levels[i].value > high_limit))
4549 pdpm_table->dpm_levels[i].enabled = false;
4550 else
4551 pdpm_table->dpm_levels[i].enabled = true;
4552 }
4553 return 0;
4554}
4555
4556static int iceland_trim_dpm_states(struct pp_hwmgr *hwmgr, const struct iceland_power_state *hw_state)
4557{
4558 int result = 0;
4559 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
4560 uint32_t high_limit_count;
4561
4562 PP_ASSERT_WITH_CODE((hw_state->performance_level_count >= 1),
4563 "power state did not have any performance level",
4564 return -1);
4565
4566 high_limit_count = (1 == hw_state->performance_level_count) ? 0: 1;
4567
4568 iceland_trim_single_dpm_states(hwmgr, &(data->dpm_table.sclk_table),
4569 hw_state->performance_levels[0].engine_clock,
4570 hw_state->performance_levels[high_limit_count].engine_clock);
4571
4572 iceland_trim_single_dpm_states(hwmgr, &(data->dpm_table.mclk_table),
4573 hw_state->performance_levels[0].memory_clock,
4574 hw_state->performance_levels[high_limit_count].memory_clock);
4575
4576 return result;
4577}
4578
4579static int iceland_generate_dpm_level_enable_mask(struct pp_hwmgr *hwmgr, const void *input)
4580{
4581 int result;
4582 const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input;
4583 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
4584 const struct iceland_power_state *iceland_ps = cast_const_phw_iceland_power_state(states->pnew_state);
4585
4586 result = iceland_trim_dpm_states(hwmgr, iceland_ps);
4587 if (0 != result)
4588 return result;
4589
4590 data->dpm_level_enable_mask.sclk_dpm_enable_mask = iceland_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table);
4591 data->dpm_level_enable_mask.mclk_dpm_enable_mask = iceland_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table);
4592 data->last_mclk_dpm_enable_mask = data->dpm_level_enable_mask.mclk_dpm_enable_mask;
4593 if (data->uvd_enabled && (data->dpm_level_enable_mask.mclk_dpm_enable_mask & 1))
4594 data->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
4595
4596 data->dpm_level_enable_mask.pcie_dpm_enable_mask = iceland_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table);
4597
4598 return 0;
4599}
4600
4601static int iceland_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input)
4602{
4603 return 0;
4604}
4605
4606static int iceland_update_sclk_threshold(struct pp_hwmgr *hwmgr)
4607{
4608 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
4609
4610 int result = 0;
4611 uint32_t low_sclk_interrupt_threshold = 0;
4612
4613 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4614 PHM_PlatformCaps_SclkThrottleLowNotification)
4615 && (hwmgr->gfx_arbiter.sclk_threshold != data->low_sclk_interrupt_threshold)) {
4616 data->low_sclk_interrupt_threshold = hwmgr->gfx_arbiter.sclk_threshold;
4617 low_sclk_interrupt_threshold = data->low_sclk_interrupt_threshold;
4618
4619 CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
4620
4621 result = smu7_copy_bytes_to_smc(
4622 hwmgr->smumgr,
4623 data->dpm_table_start + offsetof(SMU71_Discrete_DpmTable,
4624 LowSclkInterruptThreshold),
4625 (uint8_t *)&low_sclk_interrupt_threshold,
4626 sizeof(uint32_t),
4627 data->sram_end
4628 );
4629 }
4630
4631 return result;
4632}
4633
4634static int iceland_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
4635{
4636 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
4637
4638 uint32_t address;
4639 int32_t result;
4640
4641 if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
4642 return 0;
4643
4644
4645 memset(&data->mc_reg_table, 0, sizeof(SMU71_Discrete_MCRegisters));
4646
4647 result = iceland_convert_mc_reg_table_to_smc(hwmgr, &(data->mc_reg_table));
4648
4649 if(result != 0)
4650 return result;
4651
4652
4653 address = data->mc_reg_table_start + (uint32_t)offsetof(SMU71_Discrete_MCRegisters, data[0]);
4654
4655 return smu7_copy_bytes_to_smc(hwmgr->smumgr, address,
4656 (uint8_t *)&data->mc_reg_table.data[0],
4657 sizeof(SMU71_Discrete_MCRegisterSet) * data->dpm_table.mclk_table.count,
4658 data->sram_end);
4659}
4660
4661static int iceland_program_memory_timing_parameters_conditionally(struct pp_hwmgr *hwmgr)
4662{
4663 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
4664
4665 if (data->need_update_smu7_dpm_table &
4666 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
4667 return iceland_program_memory_timing_parameters(hwmgr);
4668
4669 return 0;
4670}
4671
4672static int iceland_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
4673{
4674 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
4675
4676 if (0 == data->need_update_smu7_dpm_table)
4677 return 0;
4678
4679 if ((0 == data->sclk_dpm_key_disabled) &&
4680 (data->need_update_smu7_dpm_table &
4681 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
4682
4683 PP_ASSERT_WITH_CODE(0 == iceland_is_dpm_running(hwmgr),
4684 "Trying to Unfreeze SCLK DPM when DPM is disabled",
4685 );
4686 PP_ASSERT_WITH_CODE(
4687 0 == smum_send_msg_to_smc(hwmgr->smumgr,
4688 PPSMC_MSG_SCLKDPM_UnfreezeLevel),
4689 "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!",
4690 return -1);
4691 }
4692
4693 if ((0 == data->mclk_dpm_key_disabled) &&
4694 (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
4695
4696 PP_ASSERT_WITH_CODE(
4697 0 == iceland_is_dpm_running(hwmgr),
4698 "Trying to Unfreeze MCLK DPM when DPM is disabled",
4699 );
4700 PP_ASSERT_WITH_CODE(
4701 0 == smum_send_msg_to_smc(hwmgr->smumgr,
4702 PPSMC_MSG_MCLKDPM_UnfreezeLevel),
4703 "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
4704 return -1);
4705 }
4706
4707 data->need_update_smu7_dpm_table = 0;
4708
4709 return 0;
4710}
4711
4712static int iceland_notify_link_speed_change_after_state_change(struct pp_hwmgr *hwmgr, const void *input)
4713{
4714 const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input;
4715 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
4716 const struct iceland_power_state *iceland_ps = cast_const_phw_iceland_power_state(states->pnew_state);
4717 uint16_t target_link_speed = iceland_get_maximum_link_speed(hwmgr, iceland_ps);
4718 uint8_t request;
4719
4720 if (data->pspp_notify_required ||
4721 data->pcie_performance_request) {
4722 if (target_link_speed == PP_PCIEGen3)
4723 request = PCIE_PERF_REQ_GEN3;
4724 else if (target_link_speed == PP_PCIEGen2)
4725 request = PCIE_PERF_REQ_GEN2;
4726 else
4727 request = PCIE_PERF_REQ_GEN1;
4728
4729 if(request == PCIE_PERF_REQ_GEN1 && iceland_get_current_pcie_speed(hwmgr) > 0) {
4730 data->pcie_performance_request = false;
4731 return 0;
4732 }
4733
4734 if (0 != acpi_pcie_perf_request(hwmgr->device, request, false)) {
4735 if (PP_PCIEGen2 == target_link_speed)
4736 printk("PSPP request to switch to Gen2 from Gen3 Failed!");
4737 else
4738 printk("PSPP request to switch to Gen1 from Gen2 Failed!");
4739 }
4740 }
4741
4742 data->pcie_performance_request = false;
4743 return 0;
4744}
4745
4746int iceland_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
4747{
4748 PPSMC_Result result;
4749 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
4750
4751 if (0 == data->sclk_dpm_key_disabled) {
4752 /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/
4753 if (0 != iceland_is_dpm_running(hwmgr))
4754 printk(KERN_ERR "[ powerplay ] Trying to set Enable Sclk Mask when DPM is disabled \n");
4755
4756 if (0 != data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
4757 result = smum_send_msg_to_smc_with_parameter(
4758 hwmgr->smumgr,
4759 (PPSMC_Msg)PPSMC_MSG_SCLKDPM_SetEnabledMask,
4760 data->dpm_level_enable_mask.sclk_dpm_enable_mask);
4761 PP_ASSERT_WITH_CODE((0 == result),
4762 "Set Sclk Dpm enable Mask failed", return -1);
4763 }
4764 }
4765
4766 if (0 == data->mclk_dpm_key_disabled) {
4767 /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/
4768 if (0 != iceland_is_dpm_running(hwmgr))
4769 printk(KERN_ERR "[ powerplay ] Trying to set Enable Mclk Mask when DPM is disabled \n");
4770
4771 if (0 != data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
4772 result = smum_send_msg_to_smc_with_parameter(
4773 hwmgr->smumgr,
4774 (PPSMC_Msg)PPSMC_MSG_MCLKDPM_SetEnabledMask,
4775 data->dpm_level_enable_mask.mclk_dpm_enable_mask);
4776 PP_ASSERT_WITH_CODE((0 == result),
4777 "Set Mclk Dpm enable Mask failed", return -1);
4778 }
4779 }
4780
4781 return 0;
4782}
4783
4784static int iceland_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
4785{
4786 int tmp_result, result = 0;
4787
4788 tmp_result = iceland_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
4789 PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to find DPM states clocks in DPM table!", result = tmp_result);
4790
4791 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest)) {
4792 tmp_result = iceland_request_link_speed_change_before_state_change(hwmgr, input);
4793 PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to request link speed change before state change!", result = tmp_result);
4794 }
4795
4796 tmp_result = iceland_freeze_sclk_mclk_dpm(hwmgr);
4797 PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to freeze SCLK MCLK DPM!", result = tmp_result);
4798
4799 tmp_result = iceland_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
4800 PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to populate and upload SCLK MCLK DPM levels!", result = tmp_result);
4801
4802 tmp_result = iceland_generate_dpm_level_enable_mask(hwmgr, input);
4803 PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to generate DPM level enabled mask!", result = tmp_result);
4804
4805 tmp_result = iceland_update_vce_dpm(hwmgr, input);
4806 PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to update VCE DPM!", result = tmp_result);
4807
4808 tmp_result = iceland_update_sclk_threshold(hwmgr);
4809 PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to update SCLK threshold!", result = tmp_result);
4810
4811 tmp_result = iceland_update_and_upload_mc_reg_table(hwmgr);
4812 PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to upload MC reg table!", result = tmp_result);
4813
4814 tmp_result = iceland_program_memory_timing_parameters_conditionally(hwmgr);
4815 PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to program memory timing parameters!", result = tmp_result);
4816
4817 tmp_result = iceland_unfreeze_sclk_mclk_dpm(hwmgr);
4818 PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to unfreeze SCLK MCLK DPM!", result = tmp_result);
4819
4820 tmp_result = iceland_upload_dpm_level_enable_mask(hwmgr);
4821 PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to upload DPM level enabled mask!", result = tmp_result);
4822
4823 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest)) {
4824 tmp_result = iceland_notify_link_speed_change_after_state_change(hwmgr, input);
4825 PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to notify link speed change after state change!", result = tmp_result);
4826 }
4827
4828 return result;
4829}
4830
4831static int iceland_get_power_state_size(struct pp_hwmgr *hwmgr)
4832{
4833 return sizeof(struct iceland_power_state);
4834}
4835
4836static int iceland_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
4837{
4838 struct pp_power_state *ps;
4839 struct iceland_power_state *iceland_ps;
4840
4841 if (hwmgr == NULL)
4842 return -EINVAL;
4843
4844 ps = hwmgr->request_ps;
4845
4846 if (ps == NULL)
4847 return -EINVAL;
4848
4849 iceland_ps = cast_phw_iceland_power_state(&ps->hardware);
4850
4851 if (low)
4852 return iceland_ps->performance_levels[0].memory_clock;
4853 else
4854 return iceland_ps->performance_levels[iceland_ps->performance_level_count-1].memory_clock;
4855}
4856
4857static int iceland_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
4858{
4859 struct pp_power_state *ps;
4860 struct iceland_power_state *iceland_ps;
4861
4862 if (hwmgr == NULL)
4863 return -EINVAL;
4864
4865 ps = hwmgr->request_ps;
4866
4867 if (ps == NULL)
4868 return -EINVAL;
4869
4870 iceland_ps = cast_phw_iceland_power_state(&ps->hardware);
4871
4872 if (low)
4873 return iceland_ps->performance_levels[0].engine_clock;
4874 else
4875 return iceland_ps->performance_levels[iceland_ps->performance_level_count-1].engine_clock;
4876}
4877
4878static int iceland_get_current_pcie_lane_number(
4879 struct pp_hwmgr *hwmgr)
4880{
4881 uint32_t link_width;
4882
4883 link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device,
4884 CGS_IND_REG__PCIE,
4885 PCIE_LC_LINK_WIDTH_CNTL,
4886 LC_LINK_WIDTH_RD);
4887
4888 PP_ASSERT_WITH_CODE((7 >= link_width),
4889 "Invalid PCIe lane width!", return 0);
4890
4891 return decode_pcie_lane_width(link_width);
4892}
4893
4894static int iceland_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
4895 struct pp_hw_power_state *hw_ps)
4896{
4897 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
4898 struct iceland_power_state *ps = (struct iceland_power_state *)hw_ps;
4899 ATOM_FIRMWARE_INFO_V2_2 *fw_info;
4900 uint16_t size;
4901 uint8_t frev, crev;
4902 int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
4903
4904 /* First retrieve the Boot clocks and VDDC from the firmware info table.
4905 * We assume here that fw_info is unchanged if this call fails.
4906 */
4907 fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)cgs_atom_get_data_table(
4908 hwmgr->device, index,
4909 &size, &frev, &crev);
4910 if (!fw_info)
4911 /* During a test, there is no firmware info table. */
4912 return 0;
4913
4914 /* Patch the state. */
4915 data->vbios_boot_state.sclk_bootup_value = le32_to_cpu(fw_info->ulDefaultEngineClock);
4916 data->vbios_boot_state.mclk_bootup_value = le32_to_cpu(fw_info->ulDefaultMemoryClock);
4917 data->vbios_boot_state.mvdd_bootup_value = le16_to_cpu(fw_info->usBootUpMVDDCVoltage);
4918 data->vbios_boot_state.vddc_bootup_value = le16_to_cpu(fw_info->usBootUpVDDCVoltage);
4919 data->vbios_boot_state.vddci_bootup_value = le16_to_cpu(fw_info->usBootUpVDDCIVoltage);
4920 data->vbios_boot_state.pcie_gen_bootup_value = iceland_get_current_pcie_speed(hwmgr);
4921 data->vbios_boot_state.pcie_lane_bootup_value =
4922 (uint16_t)iceland_get_current_pcie_lane_number(hwmgr);
4923
4924 /* set boot power state */
4925 ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value;
4926 ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value;
4927 ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value;
4928 ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value;
4929
4930 return 0;
4931}
4932
4933static int iceland_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
4934 struct pp_hw_power_state *power_state,
4935 unsigned int index, const void *clock_info)
4936{
4937 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
4938 struct iceland_power_state *iceland_power_state = cast_phw_iceland_power_state(power_state);
4939 const ATOM_PPLIB_CI_CLOCK_INFO *visland_clk_info = clock_info;
4940 struct iceland_performance_level *performance_level;
4941 uint32_t engine_clock, memory_clock;
4942 uint16_t pcie_gen_from_bios;
4943
4944 engine_clock = visland_clk_info->ucEngineClockHigh << 16 | visland_clk_info->usEngineClockLow;
4945 memory_clock = visland_clk_info->ucMemoryClockHigh << 16 | visland_clk_info->usMemoryClockLow;
4946
4947 if (!(data->mc_micro_code_feature & DISABLE_MC_LOADMICROCODE) && memory_clock > data->highest_mclk)
4948 data->highest_mclk = memory_clock;
4949
4950 performance_level = &(iceland_power_state->performance_levels
4951 [iceland_power_state->performance_level_count++]);
4952
4953 PP_ASSERT_WITH_CODE(
4954 (iceland_power_state->performance_level_count < SMU71_MAX_LEVELS_GRAPHICS),
4955 "Performance levels exceeds SMC limit!",
4956 return -1);
4957
4958 PP_ASSERT_WITH_CODE(
4959 (iceland_power_state->performance_level_count <=
4960 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
4961 "Performance levels exceeds Driver limit!",
4962 return -1);
4963
4964 /* Performance levels are arranged from low to high. */
4965 performance_level->memory_clock = memory_clock;
4966 performance_level->engine_clock = engine_clock;
4967
4968 pcie_gen_from_bios = visland_clk_info->ucPCIEGen;
4969
4970 performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, pcie_gen_from_bios);
4971 performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, visland_clk_info->usPCIELane);
4972
4973 return 0;
4974}
4975
4976static int iceland_get_pp_table_entry(struct pp_hwmgr *hwmgr,
4977 unsigned long entry_index, struct pp_power_state *state)
4978{
4979 int result;
4980 struct iceland_power_state *ps;
4981 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
4982 struct phm_clock_voltage_dependency_table *dep_mclk_table =
4983 hwmgr->dyn_state.vddci_dependency_on_mclk;
4984
4985 memset(&state->hardware, 0x00, sizeof(struct pp_hw_power_state));
4986
4987 state->hardware.magic = PHM_VIslands_Magic;
4988
4989 ps = (struct iceland_power_state *)(&state->hardware);
4990
4991 result = pp_tables_get_entry(hwmgr, entry_index, state,
4992 iceland_get_pp_table_entry_callback_func);
4993
4994 /*
4995 * This is the earliest time we have all the dependency table
4996 * and the VBIOS boot state as
4997 * PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot
4998 * state if there is only one VDDCI/MCLK level, check if it's
4999 * the same as VBIOS boot state
5000 */
5001 if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
5002 if (dep_mclk_table->entries[0].clk !=
5003 data->vbios_boot_state.mclk_bootup_value)
5004 printk(KERN_ERR "Single MCLK entry VDDCI/MCLK dependency table "
5005 "does not match VBIOS boot MCLK level");
5006 if (dep_mclk_table->entries[0].v !=
5007 data->vbios_boot_state.vddci_bootup_value)
5008 printk(KERN_ERR "Single VDDCI entry VDDCI/MCLK dependency table "
5009 "does not match VBIOS boot VDDCI level");
5010 }
5011
5012 /* set DC compatible flag if this state supports DC */
5013 if (!state->validation.disallowOnDC)
5014 ps->dc_compatible = true;
5015
5016 if (state->classification.flags & PP_StateClassificationFlag_ACPI)
5017 data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen;
5018 else if (0 != (state->classification.flags & PP_StateClassificationFlag_Boot)) {
5019 if (data->bacos.best_match == 0xffff) {
5020 /* For C.I. use boot state as base BACO state */
5021 data->bacos.best_match = PP_StateClassificationFlag_Boot;
5022 data->bacos.performance_level = ps->performance_levels[0];
5023 }
5024 }
5025
5026
5027 ps->uvd_clocks.VCLK = state->uvd_clocks.VCLK;
5028 ps->uvd_clocks.DCLK = state->uvd_clocks.DCLK;
5029
5030 if (!result) {
5031 uint32_t i;
5032
5033 switch (state->classification.ui_label) {
5034 case PP_StateUILabel_Performance:
5035 data->use_pcie_performance_levels = true;
5036
5037 for (i = 0; i < ps->performance_level_count; i++) {
5038 if (data->pcie_gen_performance.max <
5039 ps->performance_levels[i].pcie_gen)
5040 data->pcie_gen_performance.max =
5041 ps->performance_levels[i].pcie_gen;
5042
5043 if (data->pcie_gen_performance.min >
5044 ps->performance_levels[i].pcie_gen)
5045 data->pcie_gen_performance.min =
5046 ps->performance_levels[i].pcie_gen;
5047
5048 if (data->pcie_lane_performance.max <
5049 ps->performance_levels[i].pcie_lane)
5050 data->pcie_lane_performance.max =
5051 ps->performance_levels[i].pcie_lane;
5052
5053 if (data->pcie_lane_performance.min >
5054 ps->performance_levels[i].pcie_lane)
5055 data->pcie_lane_performance.min =
5056 ps->performance_levels[i].pcie_lane;
5057 }
5058 break;
5059 case PP_StateUILabel_Battery:
5060 data->use_pcie_power_saving_levels = true;
5061
5062 for (i = 0; i < ps->performance_level_count; i++) {
5063 if (data->pcie_gen_power_saving.max <
5064 ps->performance_levels[i].pcie_gen)
5065 data->pcie_gen_power_saving.max =
5066 ps->performance_levels[i].pcie_gen;
5067
5068 if (data->pcie_gen_power_saving.min >
5069 ps->performance_levels[i].pcie_gen)
5070 data->pcie_gen_power_saving.min =
5071 ps->performance_levels[i].pcie_gen;
5072
5073 if (data->pcie_lane_power_saving.max <
5074 ps->performance_levels[i].pcie_lane)
5075 data->pcie_lane_power_saving.max =
5076 ps->performance_levels[i].pcie_lane;
5077
5078 if (data->pcie_lane_power_saving.min >
5079 ps->performance_levels[i].pcie_lane)
5080 data->pcie_lane_power_saving.min =
5081 ps->performance_levels[i].pcie_lane;
5082 }
5083 break;
5084 default:
5085 break;
5086 }
5087 }
5088 return 0;
5089}
5090
5091static void
5092iceland_print_current_perforce_level(struct pp_hwmgr *hwmgr, struct seq_file *m)
5093{
5094 uint32_t sclk, mclk, activity_percent;
5095 uint32_t offset;
5096 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
5097
5098 smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)(PPSMC_MSG_API_GetSclkFrequency));
5099
5100 sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
5101
5102 smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)(PPSMC_MSG_API_GetMclkFrequency));
5103
5104 mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
5105 seq_printf(m, "\n [ mclk ]: %u MHz\n\n [ sclk ]: %u MHz\n", mclk/100, sclk/100);
5106
5107 offset = data->soft_regs_start + offsetof(SMU71_SoftRegisters, AverageGraphicsActivity);
5108 activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset);
5109 activity_percent += 0x80;
5110 activity_percent >>= 8;
5111
5112 seq_printf(m, "\n [GPU load]: %u%%\n\n", activity_percent > 100 ? 100 : activity_percent);
5113
5114 seq_printf(m, "uvd %sabled\n", data->uvd_power_gated ? "dis" : "en");
5115
5116 seq_printf(m, "vce %sabled\n", data->vce_power_gated ? "dis" : "en");
5117}
5118
5119int iceland_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
5120{
5121 uint32_t num_active_displays = 0;
5122 struct cgs_display_info info = {0};
5123 info.mode_info = NULL;
5124
5125 cgs_get_active_displays_info(hwmgr->device, &info);
5126
5127 num_active_displays = info.display_count;
5128
5129 if (num_active_displays > 1) /* to do && (pHwMgr->pPECI->displayConfiguration.bMultiMonitorInSync != TRUE)) */
5130 iceland_notify_smc_display_change(hwmgr, false);
5131 else
5132 iceland_notify_smc_display_change(hwmgr, true);
5133
5134 return 0;
5135}
5136
5137/**
5138* Programs the display gap
5139*
5140* @param hwmgr the address of the powerplay hardware manager.
5141* @return always OK
5142*/
5143int iceland_program_display_gap(struct pp_hwmgr *hwmgr)
5144{
5145 uint32_t num_active_displays = 0;
5146 uint32_t display_gap = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL);
5147 uint32_t display_gap2;
5148 uint32_t pre_vbi_time_in_us;
5149 uint32_t frame_time_in_us;
5150 uint32_t ref_clock;
5151 uint32_t refresh_rate = 0;
5152 struct cgs_display_info info = {0};
5153 struct cgs_mode_info mode_info;
5154
5155 info.mode_info = &mode_info;
5156
5157 cgs_get_active_displays_info(hwmgr->device, &info);
5158 num_active_displays = info.display_count;
5159
5160 display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (num_active_displays > 0)? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE);
5161 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap);
5162
5163 ref_clock = mode_info.ref_clock;
5164 refresh_rate = mode_info.refresh_rate;
5165
5166 if(0 == refresh_rate)
5167 refresh_rate = 60;
5168
5169 frame_time_in_us = 1000000 / refresh_rate;
5170
5171 pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us;
5172 display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
5173
5174 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2);
5175
5176 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SOFT_REGISTERS_TABLE_4, PreVBlankGap, 0x64);
5177
5178 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SOFT_REGISTERS_TABLE_5, VBlankTimeout, (frame_time_in_us - pre_vbi_time_in_us));
5179
5180 if (num_active_displays == 1)
5181 iceland_notify_smc_display_change(hwmgr, true);
5182
5183 return 0;
5184}
5185
5186int iceland_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
5187{
5188 iceland_program_display_gap(hwmgr);
5189
5190 return 0;
5191}
5192
5193/**
5194* Set maximum target operating fan output PWM
5195*
5196* @param pHwMgr: the address of the powerplay hardware manager.
5197* @param usMaxFanPwm: max operating fan PWM in percents
5198* @return The response that came from the SMC.
5199*/
5200static int iceland_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm)
5201{
5202 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm;
5203
5204 if (phm_is_hw_access_blocked(hwmgr))
5205 return 0;
5206
5207 return (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm) ? 0 : -1);
5208}
5209
5210/**
5211* Set maximum target operating fan output RPM
5212*
5213* @param pHwMgr: the address of the powerplay hardware manager.
5214* @param usMaxFanRpm: max operating fan RPM value.
5215* @return The response that came from the SMC.
5216*/
5217static int iceland_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm)
5218{
5219 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM = us_max_fan_pwm;
5220
5221 if (phm_is_hw_access_blocked(hwmgr))
5222 return 0;
5223
5224 return (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanRpmMax, us_max_fan_pwm) ? 0 : -1);
5225}
5226
5227static int iceland_dpm_set_interrupt_state(void *private_data,
5228 unsigned src_id, unsigned type,
5229 int enabled)
5230{
5231 uint32_t cg_thermal_int;
5232 struct pp_hwmgr *hwmgr = ((struct pp_eventmgr *)private_data)->hwmgr;
5233
5234 if (hwmgr == NULL)
5235 return -EINVAL;
5236
5237 switch (type) {
5238 case AMD_THERMAL_IRQ_LOW_TO_HIGH:
5239 if (enabled) {
5240 cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT);
5241 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
5242 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
5243 } else {
5244 cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT);
5245 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
5246 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
5247 }
5248 break;
5249
5250 case AMD_THERMAL_IRQ_HIGH_TO_LOW:
5251 if (enabled) {
5252 cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT);
5253 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
5254 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
5255 } else {
5256 cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT);
5257 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
5258 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
5259 }
5260 break;
5261 default:
5262 break;
5263 }
5264 return 0;
5265}
5266
5267static int iceland_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr,
5268 const void *thermal_interrupt_info)
5269{
5270 int result;
5271 const struct pp_interrupt_registration_info *info =
5272 (const struct pp_interrupt_registration_info *)thermal_interrupt_info;
5273
5274 if (info == NULL)
5275 return -EINVAL;
5276
5277 result = cgs_add_irq_source(hwmgr->device, 230, AMD_THERMAL_IRQ_LAST,
5278 iceland_dpm_set_interrupt_state,
5279 info->call_back, info->context);
5280
5281 if (result)
5282 return -EINVAL;
5283
5284 result = cgs_add_irq_source(hwmgr->device, 231, AMD_THERMAL_IRQ_LAST,
5285 iceland_dpm_set_interrupt_state,
5286 info->call_back, info->context);
5287
5288 if (result)
5289 return -EINVAL;
5290
5291 return 0;
5292}
5293
5294
5295static bool iceland_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
5296{
5297 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
5298 bool is_update_required = false;
5299 struct cgs_display_info info = {0,0,NULL};
5300
5301 cgs_get_active_displays_info(hwmgr->device, &info);
5302
5303 if (data->display_timing.num_existing_displays != info.display_count)
5304 is_update_required = true;
5305/* TO DO NEED TO GET DEEP SLEEP CLOCK FROM DAL
5306 if (phm_cap_enabled(hwmgr->hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
5307 cgs_get_min_clock_settings(hwmgr->device, &min_clocks);
5308 if(min_clocks.engineClockInSR != data->display_timing.minClockInSR)
5309 is_update_required = true;
5310*/
5311 return is_update_required;
5312}
5313
5314
5315static inline bool iceland_are_power_levels_equal(const struct iceland_performance_level *pl1,
5316 const struct iceland_performance_level *pl2)
5317{
5318 return ((pl1->memory_clock == pl2->memory_clock) &&
5319 (pl1->engine_clock == pl2->engine_clock) &&
5320 (pl1->pcie_gen == pl2->pcie_gen) &&
5321 (pl1->pcie_lane == pl2->pcie_lane));
5322}
5323
5324int iceland_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1,
5325 const struct pp_hw_power_state *pstate2, bool *equal)
5326{
5327 const struct iceland_power_state *psa = cast_const_phw_iceland_power_state(pstate1);
5328 const struct iceland_power_state *psb = cast_const_phw_iceland_power_state(pstate2);
5329 int i;
5330
5331 if (equal == NULL || psa == NULL || psb == NULL)
5332 return -EINVAL;
5333
5334 /* If the two states don't even have the same number of performance levels they cannot be the same state. */
5335 if (psa->performance_level_count != psb->performance_level_count) {
5336 *equal = false;
5337 return 0;
5338 }
5339
5340 for (i = 0; i < psa->performance_level_count; i++) {
5341 if (!iceland_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
5342 /* If we have found even one performance level pair that is different the states are different. */
5343 *equal = false;
5344 return 0;
5345 }
5346 }
5347
5348 /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
5349 *equal = ((psa->uvd_clocks.VCLK == psb->uvd_clocks.VCLK) && (psa->uvd_clocks.DCLK == psb->uvd_clocks.DCLK));
5350 *equal &= ((psa->vce_clocks.EVCLK == psb->vce_clocks.EVCLK) && (psa->vce_clocks.ECCLK == psb->vce_clocks.ECCLK));
5351 *equal &= (psa->sclk_threshold == psb->sclk_threshold);
5352 *equal &= (psa->acp_clk == psb->acp_clk);
5353
5354 return 0;
5355}
5356
5357static int iceland_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
5358{
5359 if (mode) {
5360 /* stop auto-manage */
5361 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
5362 PHM_PlatformCaps_MicrocodeFanControl))
5363 iceland_fan_ctrl_stop_smc_fan_control(hwmgr);
5364 iceland_fan_ctrl_set_static_mode(hwmgr, mode);
5365 } else
5366 /* restart auto-manage */
5367 iceland_fan_ctrl_reset_fan_speed_to_default(hwmgr);
5368
5369 return 0;
5370}
5371
5372static int iceland_get_fan_control_mode(struct pp_hwmgr *hwmgr)
5373{
5374 if (hwmgr->fan_ctrl_is_in_default_mode)
5375 return hwmgr->fan_ctrl_default_mode;
5376 else
5377 return PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
5378 CG_FDO_CTRL2, FDO_PWM_MODE);
5379}
5380
5381static int iceland_force_clock_level(struct pp_hwmgr *hwmgr,
5382 enum pp_clock_type type, uint32_t mask)
5383{
5384 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
5385
5386 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
5387 return -EINVAL;
5388
5389 switch (type) {
5390 case PP_SCLK:
5391 if (!data->sclk_dpm_key_disabled)
5392 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
5393 PPSMC_MSG_SCLKDPM_SetEnabledMask,
5394 data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
5395 break;
5396 case PP_MCLK:
5397 if (!data->mclk_dpm_key_disabled)
5398 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
5399 PPSMC_MSG_MCLKDPM_SetEnabledMask,
5400 data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
5401 break;
5402 case PP_PCIE:
5403 {
5404 uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask;
5405 uint32_t level = 0;
5406
5407 while (tmp >>= 1)
5408 level++;
5409
5410 if (!data->pcie_dpm_key_disabled)
5411 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
5412 PPSMC_MSG_PCIeDPM_ForceLevel,
5413 level);
5414 break;
5415 }
5416 default:
5417 break;
5418 }
5419
5420 return 0;
5421}
5422
5423static int iceland_print_clock_levels(struct pp_hwmgr *hwmgr,
5424 enum pp_clock_type type, char *buf)
5425{
5426 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
5427 struct iceland_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
5428 struct iceland_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
5429 struct iceland_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table);
5430 int i, now, size = 0;
5431 uint32_t clock, pcie_speed;
5432
5433 switch (type) {
5434 case PP_SCLK:
5435 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
5436 clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
5437
5438 for (i = 0; i < sclk_table->count; i++) {
5439 if (clock > sclk_table->dpm_levels[i].value)
5440 continue;
5441 break;
5442 }
5443 now = i;
5444
5445 for (i = 0; i < sclk_table->count; i++)
5446 size += sprintf(buf + size, "%d: %uMhz %s\n",
5447 i, sclk_table->dpm_levels[i].value / 100,
5448 (i == now) ? "*" : "");
5449 break;
5450 case PP_MCLK:
5451 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
5452 clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
5453
5454 for (i = 0; i < mclk_table->count; i++) {
5455 if (clock > mclk_table->dpm_levels[i].value)
5456 continue;
5457 break;
5458 }
5459 now = i;
5460
5461 for (i = 0; i < mclk_table->count; i++)
5462 size += sprintf(buf + size, "%d: %uMhz %s\n",
5463 i, mclk_table->dpm_levels[i].value / 100,
5464 (i == now) ? "*" : "");
5465 break;
5466 case PP_PCIE:
5467 pcie_speed = iceland_get_current_pcie_speed(hwmgr);
5468 for (i = 0; i < pcie_table->count; i++) {
5469 if (pcie_speed != pcie_table->dpm_levels[i].value)
5470 continue;
5471 break;
5472 }
5473 now = i;
5474
5475 for (i = 0; i < pcie_table->count; i++)
5476 size += sprintf(buf + size, "%d: %s %s\n", i,
5477 (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x8" :
5478 (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" :
5479 (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "",
5480 (i == now) ? "*" : "");
5481 break;
5482 default:
5483 break;
5484 }
5485 return size;
5486}
5487
5488static int iceland_get_sclk_od(struct pp_hwmgr *hwmgr)
5489{
5490 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
5491 struct iceland_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
5492 struct iceland_single_dpm_table *golden_sclk_table =
5493 &(data->golden_dpm_table.sclk_table);
5494 int value;
5495
5496 value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
5497 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
5498 100 /
5499 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
5500
5501 return value;
5502}
5503
5504static int iceland_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
5505{
5506 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
5507 struct iceland_single_dpm_table *golden_sclk_table =
5508 &(data->golden_dpm_table.sclk_table);
5509 struct pp_power_state *ps;
5510 struct iceland_power_state *iceland_ps;
5511
5512 if (value > 20)
5513 value = 20;
5514
5515 ps = hwmgr->request_ps;
5516
5517 if (ps == NULL)
5518 return -EINVAL;
5519
5520 iceland_ps = cast_phw_iceland_power_state(&ps->hardware);
5521
5522 iceland_ps->performance_levels[iceland_ps->performance_level_count - 1].engine_clock =
5523 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
5524 value / 100 +
5525 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
5526
5527 return 0;
5528}
5529
5530static int iceland_get_mclk_od(struct pp_hwmgr *hwmgr)
5531{
5532 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
5533 struct iceland_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
5534 struct iceland_single_dpm_table *golden_mclk_table =
5535 &(data->golden_dpm_table.mclk_table);
5536 int value;
5537
5538 value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
5539 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
5540 100 /
5541 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
5542
5543 return value;
5544}
5545
5546uint32_t iceland_get_xclk(struct pp_hwmgr *hwmgr)
5547{
5548 uint32_t reference_clock;
5549 uint32_t tc;
5550 uint32_t divide;
5551
5552 ATOM_FIRMWARE_INFO *fw_info;
5553 uint16_t size;
5554 uint8_t frev, crev;
5555 int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
5556
5557 tc = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK);
5558
5559 if (tc)
5560 return TCLK;
5561
5562 fw_info = (ATOM_FIRMWARE_INFO *)cgs_atom_get_data_table(hwmgr->device, index,
5563 &size, &frev, &crev);
5564
5565 if (!fw_info)
5566 return 0;
5567
5568 reference_clock = le16_to_cpu(fw_info->usReferenceClock);
5569
5570 divide = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL, XTALIN_DIVIDE);
5571
5572 if (0 != divide)
5573 return reference_clock / 4;
5574
5575 return reference_clock;
5576}
5577
5578static int iceland_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
5579{
5580 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
5581 struct iceland_single_dpm_table *golden_mclk_table =
5582 &(data->golden_dpm_table.mclk_table);
5583 struct pp_power_state *ps;
5584 struct iceland_power_state *iceland_ps;
5585
5586 if (value > 20)
5587 value = 20;
5588
5589 ps = hwmgr->request_ps;
5590
5591 if (ps == NULL)
5592 return -EINVAL;
5593
5594 iceland_ps = cast_phw_iceland_power_state(&ps->hardware);
5595
5596 iceland_ps->performance_levels[iceland_ps->performance_level_count - 1].memory_clock =
5597 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
5598 value / 100 +
5599 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
5600
5601 return 0;
5602}
5603
5604static const struct pp_hwmgr_func iceland_hwmgr_funcs = {
5605 .backend_init = &iceland_hwmgr_backend_init,
5606 .backend_fini = &iceland_hwmgr_backend_fini,
5607 .asic_setup = &iceland_setup_asic_task,
5608 .dynamic_state_management_enable = &iceland_enable_dpm_tasks,
5609 .apply_state_adjust_rules = iceland_apply_state_adjust_rules,
5610 .force_dpm_level = &iceland_force_dpm_level,
5611 .power_state_set = iceland_set_power_state_tasks,
5612 .get_power_state_size = iceland_get_power_state_size,
5613 .get_mclk = iceland_dpm_get_mclk,
5614 .get_sclk = iceland_dpm_get_sclk,
5615 .patch_boot_state = iceland_dpm_patch_boot_state,
5616 .get_pp_table_entry = iceland_get_pp_table_entry,
5617 .get_num_of_pp_table_entries = iceland_get_num_of_entries,
5618 .print_current_perforce_level = iceland_print_current_perforce_level,
5619 .powerdown_uvd = iceland_phm_powerdown_uvd,
5620 .powergate_uvd = iceland_phm_powergate_uvd,
5621 .powergate_vce = iceland_phm_powergate_vce,
5622 .disable_clock_power_gating = iceland_phm_disable_clock_power_gating,
5623 .update_clock_gatings = iceland_phm_update_clock_gatings,
5624 .notify_smc_display_config_after_ps_adjustment = iceland_notify_smc_display_config_after_ps_adjustment,
5625 .display_config_changed = iceland_display_configuration_changed_task,
5626 .set_max_fan_pwm_output = iceland_set_max_fan_pwm_output,
5627 .set_max_fan_rpm_output = iceland_set_max_fan_rpm_output,
5628 .get_temperature = iceland_thermal_get_temperature,
5629 .stop_thermal_controller = iceland_thermal_stop_thermal_controller,
5630 .get_fan_speed_info = iceland_fan_ctrl_get_fan_speed_info,
5631 .get_fan_speed_percent = iceland_fan_ctrl_get_fan_speed_percent,
5632 .set_fan_speed_percent = iceland_fan_ctrl_set_fan_speed_percent,
5633 .reset_fan_speed_to_default = iceland_fan_ctrl_reset_fan_speed_to_default,
5634 .get_fan_speed_rpm = iceland_fan_ctrl_get_fan_speed_rpm,
5635 .set_fan_speed_rpm = iceland_fan_ctrl_set_fan_speed_rpm,
5636 .uninitialize_thermal_controller = iceland_thermal_ctrl_uninitialize_thermal_controller,
5637 .register_internal_thermal_interrupt = iceland_register_internal_thermal_interrupt,
5638 .check_smc_update_required_for_display_configuration = iceland_check_smc_update_required_for_display_configuration,
5639 .check_states_equal = iceland_check_states_equal,
5640 .set_fan_control_mode = iceland_set_fan_control_mode,
5641 .get_fan_control_mode = iceland_get_fan_control_mode,
5642 .force_clock_level = iceland_force_clock_level,
5643 .print_clock_levels = iceland_print_clock_levels,
5644 .get_sclk_od = iceland_get_sclk_od,
5645 .set_sclk_od = iceland_set_sclk_od,
5646 .get_mclk_od = iceland_get_mclk_od,
5647 .set_mclk_od = iceland_set_mclk_od,
5648};
5649
5650int iceland_hwmgr_init(struct pp_hwmgr *hwmgr)
5651{
5652 iceland_hwmgr *data;
5653
5654 data = kzalloc (sizeof(iceland_hwmgr), GFP_KERNEL);
5655 if (data == NULL)
5656 return -ENOMEM;
5657 memset(data, 0x00, sizeof(iceland_hwmgr));
5658
5659 hwmgr->backend = data;
5660 hwmgr->hwmgr_func = &iceland_hwmgr_funcs;
5661 hwmgr->pptable_func = &pptable_funcs;
5662
5663 /* thermal */
5664 pp_iceland_thermal_initialize(hwmgr);
5665 return 0;
5666}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.h
deleted file mode 100644
index f253988de2d2..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.h
+++ /dev/null
@@ -1,424 +0,0 @@
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Author: Huang Rui <ray.huang@amd.com>
23 *
24 */
25#ifndef ICELAND_HWMGR_H
26#define ICELAND_HWMGR_H
27
28#include "hwmgr.h"
29#include "ppatomctrl.h"
30#include "ppinterrupt.h"
31#include "ppsmc.h"
32#include "iceland_powertune.h"
33#include "pp_endian.h"
34#include "smu71_discrete.h"
35
36#define ICELAND_MAX_HARDWARE_POWERLEVELS 2
37#define ICELAND_DYNCLK_NUMBER_OF_TREND_COEFFICIENTS 15
38
39struct iceland_performance_level {
40 uint32_t memory_clock;
41 uint32_t engine_clock;
42 uint16_t pcie_gen;
43 uint16_t pcie_lane;
44};
45
46struct _phw_iceland_bacos {
47 uint32_t best_match;
48 uint32_t baco_flags;
49 struct iceland_performance_level performance_level;
50};
51typedef struct _phw_iceland_bacos phw_iceland_bacos;
52
53struct _phw_iceland_uvd_clocks {
54 uint32_t VCLK;
55 uint32_t DCLK;
56};
57
58typedef struct _phw_iceland_uvd_clocks phw_iceland_uvd_clocks;
59
60struct _phw_iceland_vce_clocks {
61 uint32_t EVCLK;
62 uint32_t ECCLK;
63};
64
65typedef struct _phw_iceland_vce_clocks phw_iceland_vce_clocks;
66
67struct iceland_power_state {
68 uint32_t magic;
69 phw_iceland_uvd_clocks uvd_clocks;
70 phw_iceland_vce_clocks vce_clocks;
71 uint32_t sam_clk;
72 uint32_t acp_clk;
73 uint16_t performance_level_count;
74 bool dc_compatible;
75 uint32_t sclk_threshold;
76 struct iceland_performance_level performance_levels[ICELAND_MAX_HARDWARE_POWERLEVELS];
77};
78
79struct _phw_iceland_dpm_level {
80 bool enabled;
81 uint32_t value;
82 uint32_t param1;
83};
84typedef struct _phw_iceland_dpm_level phw_iceland_dpm_level;
85
86#define ICELAND_MAX_DEEPSLEEP_DIVIDER_ID 5
87#define MAX_REGULAR_DPM_NUMBER 8
88#define ICELAND_MINIMUM_ENGINE_CLOCK 5000
89
90struct iceland_single_dpm_table {
91 uint32_t count;
92 phw_iceland_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER];
93};
94
95struct iceland_dpm_table {
96 struct iceland_single_dpm_table sclk_table;
97 struct iceland_single_dpm_table mclk_table;
98 struct iceland_single_dpm_table pcie_speed_table;
99 struct iceland_single_dpm_table vddc_table;
100 struct iceland_single_dpm_table vdd_gfx_table;
101 struct iceland_single_dpm_table vdd_ci_table;
102 struct iceland_single_dpm_table mvdd_table;
103};
104typedef struct _phw_iceland_dpm_table phw_iceland_dpm_table;
105
106
107struct _phw_iceland_clock_regisiters {
108 uint32_t vCG_SPLL_FUNC_CNTL;
109 uint32_t vCG_SPLL_FUNC_CNTL_2;
110 uint32_t vCG_SPLL_FUNC_CNTL_3;
111 uint32_t vCG_SPLL_FUNC_CNTL_4;
112 uint32_t vCG_SPLL_SPREAD_SPECTRUM;
113 uint32_t vCG_SPLL_SPREAD_SPECTRUM_2;
114 uint32_t vDLL_CNTL;
115 uint32_t vMCLK_PWRMGT_CNTL;
116 uint32_t vMPLL_AD_FUNC_CNTL;
117 uint32_t vMPLL_DQ_FUNC_CNTL;
118 uint32_t vMPLL_FUNC_CNTL;
119 uint32_t vMPLL_FUNC_CNTL_1;
120 uint32_t vMPLL_FUNC_CNTL_2;
121 uint32_t vMPLL_SS1;
122 uint32_t vMPLL_SS2;
123};
124typedef struct _phw_iceland_clock_regisiters phw_iceland_clock_registers;
125
126struct _phw_iceland_voltage_smio_registers {
127 uint32_t vs0_vid_lower_smio_cntl;
128};
129typedef struct _phw_iceland_voltage_smio_registers phw_iceland_voltage_smio_registers;
130
131
132struct _phw_iceland_mc_reg_entry {
133 uint32_t mclk_max;
134 uint32_t mc_data[SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE];
135};
136typedef struct _phw_iceland_mc_reg_entry phw_iceland_mc_reg_entry;
137
138struct _phw_iceland_mc_reg_table {
139 uint8_t last; /* number of registers*/
140 uint8_t num_entries; /* number of entries in mc_reg_table_entry used*/
141 uint16_t validflag; /* indicate the corresponding register is valid or not. 1: valid, 0: invalid. bit0->address[0], bit1->address[1], etc.*/
142 phw_iceland_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
143 SMU71_Discrete_MCRegisterAddress mc_reg_address[SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE];
144};
145typedef struct _phw_iceland_mc_reg_table phw_iceland_mc_reg_table;
146
147#define DISABLE_MC_LOADMICROCODE 1
148#define DISABLE_MC_CFGPROGRAMMING 2
149
150
151/*Ultra Low Voltage parameter structure */
152struct phw_iceland_ulv_parm{
153 bool ulv_supported;
154 uint32_t ch_ulv_parameter;
155 uint32_t ulv_volt_change_delay;
156 struct iceland_performance_level ulv_power_level;
157};
158
159#define ICELAND_MAX_LEAKAGE_COUNT 8
160
161struct phw_iceland_leakage_voltage {
162 uint16_t count;
163 uint16_t leakage_id[ICELAND_MAX_LEAKAGE_COUNT];
164 uint16_t actual_voltage[ICELAND_MAX_LEAKAGE_COUNT];
165};
166
167struct _phw_iceland_display_timing {
168 uint32_t min_clock_insr;
169 uint32_t num_existing_displays;
170};
171typedef struct _phw_iceland_display_timing phw_iceland_display_timing;
172
173
174struct phw_iceland_thermal_temperature_setting
175{
176 long temperature_low;
177 long temperature_high;
178 long temperature_shutdown;
179};
180
181struct _phw_iceland_dpmlevel_enable_mask {
182 uint32_t uvd_dpm_enable_mask;
183 uint32_t vce_dpm_enable_mask;
184 uint32_t acp_dpm_enable_mask;
185 uint32_t samu_dpm_enable_mask;
186 uint32_t sclk_dpm_enable_mask;
187 uint32_t mclk_dpm_enable_mask;
188 uint32_t pcie_dpm_enable_mask;
189};
190typedef struct _phw_iceland_dpmlevel_enable_mask phw_iceland_dpmlevel_enable_mask;
191
192struct _phw_iceland_pcie_perf_range {
193 uint16_t max;
194 uint16_t min;
195};
196typedef struct _phw_iceland_pcie_perf_range phw_iceland_pcie_perf_range;
197
198struct _phw_iceland_vbios_boot_state {
199 uint16_t mvdd_bootup_value;
200 uint16_t vddc_bootup_value;
201 uint16_t vddci_bootup_value;
202 uint16_t vddgfx_bootup_value;
203 uint32_t sclk_bootup_value;
204 uint32_t mclk_bootup_value;
205 uint16_t pcie_gen_bootup_value;
206 uint16_t pcie_lane_bootup_value;
207};
208typedef struct _phw_iceland_vbios_boot_state phw_iceland_vbios_boot_state;
209
210#define DPMTABLE_OD_UPDATE_SCLK 0x00000001
211#define DPMTABLE_OD_UPDATE_MCLK 0x00000002
212#define DPMTABLE_UPDATE_SCLK 0x00000004
213#define DPMTABLE_UPDATE_MCLK 0x00000008
214
215/* We need to review which fields are needed. */
216/* This is mostly a copy of the RV7xx/Evergreen structure which is close, but not identical to the N.Islands one. */
217struct iceland_hwmgr {
218 struct iceland_dpm_table dpm_table;
219 struct iceland_dpm_table golden_dpm_table;
220
221 uint32_t voting_rights_clients0;
222 uint32_t voting_rights_clients1;
223 uint32_t voting_rights_clients2;
224 uint32_t voting_rights_clients3;
225 uint32_t voting_rights_clients4;
226 uint32_t voting_rights_clients5;
227 uint32_t voting_rights_clients6;
228 uint32_t voting_rights_clients7;
229 uint32_t static_screen_threshold_unit;
230 uint32_t static_screen_threshold;
231 uint32_t voltage_control;
232 uint32_t vdd_gfx_control;
233
234 uint32_t vddc_vddci_delta;
235 uint32_t vddc_vddgfx_delta;
236
237 struct pp_interrupt_registration_info internal_high_thermal_interrupt_info;
238 struct pp_interrupt_registration_info internal_low_thermal_interrupt_info;
239 struct pp_interrupt_registration_info smc_to_host_interrupt_info;
240 uint32_t active_auto_throttle_sources;
241
242 struct pp_interrupt_registration_info external_throttle_interrupt;
243 irq_handler_func_t external_throttle_callback;
244 void *external_throttle_context;
245
246 struct pp_interrupt_registration_info ctf_interrupt_info;
247 irq_handler_func_t ctf_callback;
248 void *ctf_context;
249
250 phw_iceland_clock_registers clock_registers;
251 phw_iceland_voltage_smio_registers voltage_smio_registers;
252
253 bool is_memory_GDDR5;
254 uint16_t acpi_vddc;
255 bool pspp_notify_required; /* Flag to indicate if PSPP notification to SBIOS is required */
256 uint16_t force_pcie_gen; /* The forced PCI-E speed if not 0xffff */
257 uint16_t acpi_pcie_gen; /* The PCI-E speed at ACPI time */
258 uint32_t pcie_gen_cap; /* The PCI-E speed capabilities bitmap from CAIL */
259 uint32_t pcie_lane_cap; /* The PCI-E lane capabilities bitmap from CAIL */
260 uint32_t pcie_spc_cap; /* Symbol Per Clock Capabilities from registry */
261 struct phw_iceland_leakage_voltage vddc_leakage; /* The Leakage VDDC supported (based on leakage ID).*/
262 struct phw_iceland_leakage_voltage vddcgfx_leakage; /* The Leakage VDDC supported (based on leakage ID). */
263 struct phw_iceland_leakage_voltage vddci_leakage; /* The Leakage VDDCI supported (based on leakage ID). */
264
265 uint32_t mvdd_control;
266 uint32_t vddc_mask_low;
267 uint32_t mvdd_mask_low;
268 uint16_t max_vddc_in_pp_table; /* the maximum VDDC value in the powerplay table*/
269 uint16_t min_vddc_in_pp_table;
270 uint16_t max_vddci_in_pp_table; /* the maximum VDDCI value in the powerplay table */
271 uint16_t min_vddci_in_pp_table;
272 uint32_t mclk_strobe_mode_threshold;
273 uint32_t mclk_stutter_mode_threshold;
274 uint32_t mclk_edc_enable_threshold;
275 uint32_t mclk_edc_wr_enable_threshold;
276 bool is_uvd_enabled;
277 bool is_xdma_enabled;
278 phw_iceland_vbios_boot_state vbios_boot_state;
279
280 bool battery_state;
281 bool is_tlu_enabled;
282 bool pcie_performance_request;
283
284 /* -------------- SMC SRAM Address of firmware header tables ----------------*/
285 uint32_t sram_end; /* The first address after the SMC SRAM. */
286 uint32_t dpm_table_start; /* The start of the dpm table in the SMC SRAM. */
287 uint32_t soft_regs_start; /* The start of the soft registers in the SMC SRAM. */
288 uint32_t mc_reg_table_start; /* The start of the mc register table in the SMC SRAM. */
289 uint32_t fan_table_start; /* The start of the fan table in the SMC SRAM. */
290 uint32_t arb_table_start; /* The start of the ARB setting table in the SMC SRAM. */
291 uint32_t ulv_settings_start;
292 SMU71_Discrete_DpmTable smc_state_table; /* The carbon copy of the SMC state table. */
293 SMU71_Discrete_MCRegisters mc_reg_table;
294 SMU71_Discrete_Ulv ulv_setting; /* The carbon copy of ULV setting. */
295
296 /* -------------- Stuff originally coming from Evergreen --------------------*/
297 phw_iceland_mc_reg_table iceland_mc_reg_table;
298 uint32_t vdd_ci_control;
299 pp_atomctrl_voltage_table vddc_voltage_table;
300 pp_atomctrl_voltage_table vddci_voltage_table;
301 pp_atomctrl_voltage_table vddgfx_voltage_table;
302 pp_atomctrl_voltage_table mvdd_voltage_table;
303
304 uint32_t mgcg_cgtt_local2;
305 uint32_t mgcg_cgtt_local3;
306 uint32_t gpio_debug;
307 uint32_t mc_micro_code_feature;
308 uint32_t highest_mclk;
309 uint16_t acpi_vdd_ci;
310 uint8_t mvdd_high_index;
311 uint8_t mvdd_low_index;
312 bool dll_defaule_on;
313 bool performance_request_registered;
314
315 /* ----------------- Low Power Features ---------------------*/
316 phw_iceland_bacos bacos;
317 struct phw_iceland_ulv_parm ulv;
318
319 /* ----------------- CAC Stuff ---------------------*/
320 uint32_t cac_table_start;
321 bool cac_configuration_required; /* TRUE if PP_CACConfigurationRequired == 1 */
322 bool driver_calculate_cac_leakage; /* TRUE if PP_DriverCalculateCACLeakage == 1 */
323 bool cac_enabled;
324
325 /* ----------------- DPM2 Parameters ---------------------*/
326 uint32_t power_containment_features;
327 bool enable_bapm_feature;
328 bool enable_dte_feature;
329 bool enable_tdc_limit_feature;
330 bool enable_pkg_pwr_tracking_feature;
331 bool disable_uvd_power_tune_feature;
332 struct iceland_pt_defaults *power_tune_defaults;
333 SMU71_Discrete_PmFuses power_tune_table;
334 uint32_t ul_dte_tj_offset; /* Fudge factor in DPM table to correct HW DTE errors */
335 uint32_t fast_watermark_threshold; /* use fast watermark if clock is equal or above this. In percentage of the target high sclk. */
336
337 /* ----------------- Phase Shedding ---------------------*/
338 bool vddc_phase_shed_control;
339
340 /* --------------------- DI/DT --------------------------*/
341 phw_iceland_display_timing display_timing;
342
343 /* --------- ReadRegistry data for memory and engine clock margins ---- */
344 uint32_t engine_clock_data;
345 uint32_t memory_clock_data;
346
347 /* -------- Thermal Temperature Setting --------------*/
348 struct phw_iceland_thermal_temperature_setting thermal_temp_setting;
349 phw_iceland_dpmlevel_enable_mask dpm_level_enable_mask;
350
351 uint32_t need_update_smu7_dpm_table;
352 uint32_t sclk_dpm_key_disabled;
353 uint32_t mclk_dpm_key_disabled;
354 uint32_t pcie_dpm_key_disabled;
355 /* used to store the previous dal min sclock */
356 uint32_t min_engine_clocks;
357 phw_iceland_pcie_perf_range pcie_gen_performance;
358 phw_iceland_pcie_perf_range pcie_lane_performance;
359 phw_iceland_pcie_perf_range pcie_gen_power_saving;
360 phw_iceland_pcie_perf_range pcie_lane_power_saving;
361 bool use_pcie_performance_levels;
362 bool use_pcie_power_saving_levels;
363 /* percentage value from 0-100, default 50 */
364 uint32_t activity_target[SMU71_MAX_LEVELS_GRAPHICS];
365 uint32_t mclk_activity_target;
366 uint32_t low_sclk_interrupt_threshold;
367 uint32_t last_mclk_dpm_enable_mask;
368 bool uvd_enabled;
369 uint32_t pcc_monitor_enabled;
370
371 /* --------- Power Gating States ------------*/
372 bool uvd_power_gated; /* 1: gated, 0:not gated */
373 bool vce_power_gated; /* 1: gated, 0:not gated */
374 bool samu_power_gated; /* 1: gated, 0:not gated */
375 bool acp_power_gated; /* 1: gated, 0:not gated */
376 bool pg_acp_init;
377
378 /* soft pptable for re-uploading into smu */
379 void *soft_pp_table;
380};
381
382typedef struct iceland_hwmgr iceland_hwmgr;
383
384int iceland_hwmgr_init(struct pp_hwmgr *hwmgr);
385int iceland_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate);
386uint32_t iceland_get_xclk(struct pp_hwmgr *hwmgr);
387int iceland_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr);
388int iceland_populate_vddc_vid(struct pp_hwmgr *hwmgr);
389
390#define ICELAND_DPM2_NEAR_TDP_DEC 10
391#define ICELAND_DPM2_ABOVE_SAFE_INC 5
392#define ICELAND_DPM2_BELOW_SAFE_INC 20
393
394/*
395 * Log2 of the LTA window size (l2numWin_TDP). Eg. If LTA windows size
396 * is 128, then this value should be Log2(128) = 7.
397 */
398#define ICELAND_DPM2_LTA_WINDOW_SIZE 7
399
400#define ICELAND_DPM2_LTS_TRUNCATE 0
401
402#define ICELAND_DPM2_TDP_SAFE_LIMIT_PERCENT 80 // Maximum 100
403
404#define ICELAND_DPM2_MAXPS_PERCENT_H 90 // Maximum 0xFF
405#define ICELAND_DPM2_MAXPS_PERCENT_M 90 // Maximum 0xFF
406
407#define ICELAND_DPM2_PWREFFICIENCYRATIO_MARGIN 50
408
409#define ICELAND_DPM2_SQ_RAMP_MAX_POWER 0x3FFF
410#define ICELAND_DPM2_SQ_RAMP_MIN_POWER 0x12
411#define ICELAND_DPM2_SQ_RAMP_MAX_POWER_DELTA 0x15
412#define ICELAND_DPM2_SQ_RAMP_SHORT_TERM_INTERVAL_SIZE 0x1E
413#define ICELAND_DPM2_SQ_RAMP_LONG_TERM_INTERVAL_RATIO 0xF
414
415#define ICELAND_VOLTAGE_CONTROL_NONE 0x0
416#define ICELAND_VOLTAGE_CONTROL_BY_GPIO 0x1
417#define ICELAND_VOLTAGE_CONTROL_BY_SVID2 0x2
418
419/* convert to Q8.8 format for firmware */
420#define ICELAND_Q88_FORMAT_CONVERSION_UNIT 256
421
422#define ICELAND_UNUSED_GPIO_PIN 0x7F
423
424#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_powertune.c
deleted file mode 100644
index 766280626836..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_powertune.c
+++ /dev/null
@@ -1,490 +0,0 @@
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Author: Huang Rui <ray.huang@amd.com>
23 *
24 */
25
26#include "amdgpu.h"
27#include "hwmgr.h"
28#include "smumgr.h"
29#include "iceland_hwmgr.h"
30#include "iceland_powertune.h"
31#include "iceland_smumgr.h"
32#include "smu71_discrete.h"
33#include "smu71.h"
34#include "pp_debug.h"
35#include "cgs_common.h"
36#include "pp_endian.h"
37
38#include "bif/bif_5_0_d.h"
39#include "bif/bif_5_0_sh_mask.h"
40
41#define VOLTAGE_SCALE 4
42#define POWERTUNE_DEFAULT_SET_MAX 1
43
44#define DEVICE_ID_VI_ICELAND_M_6900 0x6900
45#define DEVICE_ID_VI_ICELAND_M_6901 0x6901
46#define DEVICE_ID_VI_ICELAND_M_6902 0x6902
47#define DEVICE_ID_VI_ICELAND_M_6903 0x6903
48
49
50struct iceland_pt_defaults defaults_iceland =
51{
52 /*
53 * sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc,
54 * TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT
55 */
56 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
57 { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 },
58 { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
59};
60
61/* 35W - XT, XTL */
62struct iceland_pt_defaults defaults_icelandxt =
63{
64 /*
65 * sviLoadLIneEn, SviLoadLineVddC,
66 * TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
67 * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac,
68 * BAPM_TEMP_GRADIENT
69 */
70 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x0,
71 { 0xA7, 0x0, 0x0, 0xB5, 0x0, 0x0, 0x9F, 0x0, 0x0, 0xD6, 0x0, 0x0, 0xD7, 0x0, 0x0},
72 { 0x1EA, 0x0, 0x0, 0x224, 0x0, 0x0, 0x25E, 0x0, 0x0, 0x28E, 0x0, 0x0, 0x2AB, 0x0, 0x0}
73};
74
75/* 25W - PRO, LE */
76struct iceland_pt_defaults defaults_icelandpro =
77{
78 /*
79 * sviLoadLIneEn, SviLoadLineVddC,
80 * TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
81 * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac,
82 * BAPM_TEMP_GRADIENT
83 */
84 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x0,
85 { 0xB7, 0x0, 0x0, 0xC3, 0x0, 0x0, 0xB5, 0x0, 0x0, 0xEA, 0x0, 0x0, 0xE6, 0x0, 0x0},
86 { 0x1EA, 0x0, 0x0, 0x224, 0x0, 0x0, 0x25E, 0x0, 0x0, 0x28E, 0x0, 0x0, 0x2AB, 0x0, 0x0}
87};
88
89void iceland_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
90{
91 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
92 uint32_t tmp = 0;
93 struct cgs_system_info sys_info = {0};
94 uint32_t pdev_id;
95
96 sys_info.size = sizeof(struct cgs_system_info);
97 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV;
98 cgs_query_system_info(hwmgr->device, &sys_info);
99 pdev_id = (uint32_t)sys_info.value;
100
101 switch (pdev_id) {
102 case DEVICE_ID_VI_ICELAND_M_6900:
103 case DEVICE_ID_VI_ICELAND_M_6903:
104 data->power_tune_defaults = &defaults_icelandxt;
105 break;
106
107 case DEVICE_ID_VI_ICELAND_M_6901:
108 case DEVICE_ID_VI_ICELAND_M_6902:
109 data->power_tune_defaults = &defaults_icelandpro;
110 break;
111 default:
112 /* TODO: need to assign valid defaults */
113 data->power_tune_defaults = &defaults_iceland;
114 pr_warning("Unknown V.I. Device ID.\n");
115 break;
116 }
117
118 /* Assume disabled */
119 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
120 PHM_PlatformCaps_PowerContainment);
121 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
122 PHM_PlatformCaps_CAC);
123 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
124 PHM_PlatformCaps_SQRamping);
125 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
126 PHM_PlatformCaps_DBRamping);
127 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
128 PHM_PlatformCaps_TDRamping);
129 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
130 PHM_PlatformCaps_TCPRamping);
131
132 data->ul_dte_tj_offset = tmp;
133
134 if (!tmp) {
135 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
136 PHM_PlatformCaps_CAC);
137
138 data->fast_watermark_threshold = 100;
139
140 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
141 PHM_PlatformCaps_PowerContainment)) {
142 tmp = 1;
143 data->enable_dte_feature = tmp ? false : true;
144 data->enable_tdc_limit_feature = tmp ? true : false;
145 data->enable_pkg_pwr_tracking_feature = tmp ? true : false;
146 }
147 }
148}
149
150int iceland_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
151{
152 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
153 struct iceland_pt_defaults *defaults = data->power_tune_defaults;
154 SMU71_Discrete_DpmTable *dpm_table = &(data->smc_state_table);
155 struct phm_cac_tdp_table *cac_dtp_table = hwmgr->dyn_state.cac_dtp_table;
156 struct phm_ppm_table *ppm = hwmgr->dyn_state.ppm_parameter_table;
157 uint16_t *def1, *def2;
158 int i, j, k;
159
160 /*
161 * TDP number of fraction bits are changed from 8 to 7 for Iceland
162 * as requested by SMC team
163 */
164 dpm_table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 256));
165 dpm_table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usConfigurableTDP * 256));
166
167 dpm_table->DTETjOffset = (uint8_t)data->ul_dte_tj_offset;
168
169 dpm_table->GpuTjMax = (uint8_t)(data->thermal_temp_setting.temperature_high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES);
170 dpm_table->GpuTjHyst = 8;
171
172 dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base;
173
174 /* The following are for new Iceland Multi-input fan/thermal control */
175 if(NULL != ppm) {
176 dpm_table->PPM_PkgPwrLimit = (uint16_t)ppm->dgpu_tdp * 256 / 1000;
177 dpm_table->PPM_TemperatureLimit = (uint16_t)ppm->tj_max * 256;
178 } else {
179 dpm_table->PPM_PkgPwrLimit = 0;
180 dpm_table->PPM_TemperatureLimit = 0;
181 }
182
183 CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_PkgPwrLimit);
184 CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_TemperatureLimit);
185
186 dpm_table->BAPM_TEMP_GRADIENT = PP_HOST_TO_SMC_UL(defaults->bamp_temp_gradient);
187 def1 = defaults->bapmti_r;
188 def2 = defaults->bapmti_rc;
189
190 for (i = 0; i < SMU71_DTE_ITERATIONS; i++) {
191 for (j = 0; j < SMU71_DTE_SOURCES; j++) {
192 for (k = 0; k < SMU71_DTE_SINKS; k++) {
193 dpm_table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*def1);
194 dpm_table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*def2);
195 def1++;
196 def2++;
197 }
198 }
199 }
200
201 return 0;
202}
203
204static int iceland_populate_svi_load_line(struct pp_hwmgr *hwmgr)
205{
206 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
207 const struct iceland_pt_defaults *defaults = data->power_tune_defaults;
208
209 data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en;
210 data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddc;
211 data->power_tune_table.SviLoadLineTrimVddC = 3;
212 data->power_tune_table.SviLoadLineOffsetVddC = 0;
213
214 return 0;
215}
216
217static int iceland_populate_tdc_limit(struct pp_hwmgr *hwmgr)
218{
219 uint16_t tdc_limit;
220 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
221 const struct iceland_pt_defaults *defaults = data->power_tune_defaults;
222
223 /* TDC number of fraction bits are changed from 8 to 7
224 * for Iceland as requested by SMC team
225 */
226 tdc_limit = (uint16_t)(hwmgr->dyn_state.cac_dtp_table->usTDC * 256);
227 data->power_tune_table.TDC_VDDC_PkgLimit =
228 CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
229 data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
230 defaults->tdc_vddc_throttle_release_limit_perc;
231 data->power_tune_table.TDC_MAWt = defaults->tdc_mawt;
232
233 return 0;
234}
235
236static int iceland_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
237{
238 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
239 const struct iceland_pt_defaults *defaults = data->power_tune_defaults;
240 uint32_t temp;
241
242 if (smu7_read_smc_sram_dword(hwmgr->smumgr,
243 fuse_table_offset +
244 offsetof(SMU71_Discrete_PmFuses, TdcWaterfallCtl),
245 (uint32_t *)&temp, data->sram_end))
246 PP_ASSERT_WITH_CODE(false,
247 "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
248 return -EINVAL);
249 else
250 data->power_tune_table.TdcWaterfallCtl = defaults->tdc_waterfall_ctl;
251
252 return 0;
253}
254
255static int iceland_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
256{
257 return 0;
258}
259
260static int iceland_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
261{
262 int i;
263 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
264
265 /* Currently not used. Set all to zero. */
266 for (i = 0; i < 8; i++)
267 data->power_tune_table.GnbLPML[i] = 0;
268
269 return 0;
270}
271
272static int iceland_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
273{
274 return 0;
275}
276
277static int iceland_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
278{
279 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
280 uint16_t HiSidd = data->power_tune_table.BapmVddCBaseLeakageHiSidd;
281 uint16_t LoSidd = data->power_tune_table.BapmVddCBaseLeakageLoSidd;
282 struct phm_cac_tdp_table *cac_table = hwmgr->dyn_state.cac_dtp_table;
283
284 HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
285 LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
286
287 data->power_tune_table.BapmVddCBaseLeakageHiSidd =
288 CONVERT_FROM_HOST_TO_SMC_US(HiSidd);
289 data->power_tune_table.BapmVddCBaseLeakageLoSidd =
290 CONVERT_FROM_HOST_TO_SMC_US(LoSidd);
291
292 return 0;
293}
294
295int iceland_populate_pm_fuses(struct pp_hwmgr *hwmgr)
296{
297 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
298 uint32_t pm_fuse_table_offset;
299
300 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
301 PHM_PlatformCaps_PowerContainment)) {
302 if (smu7_read_smc_sram_dword(hwmgr->smumgr,
303 SMU71_FIRMWARE_HEADER_LOCATION +
304 offsetof(SMU71_Firmware_Header, PmFuseTable),
305 &pm_fuse_table_offset, data->sram_end))
306 PP_ASSERT_WITH_CODE(false,
307 "Attempt to get pm_fuse_table_offset Failed!",
308 return -EINVAL);
309
310 /* DW0 - DW3 */
311 if (iceland_populate_bapm_vddc_vid_sidd(hwmgr))
312 PP_ASSERT_WITH_CODE(false,
313 "Attempt to populate bapm vddc vid Failed!",
314 return -EINVAL);
315
316 /* DW4 - DW5 */
317 if (iceland_populate_vddc_vid(hwmgr))
318 PP_ASSERT_WITH_CODE(false,
319 "Attempt to populate vddc vid Failed!",
320 return -EINVAL);
321
322 /* DW6 */
323 if (iceland_populate_svi_load_line(hwmgr))
324 PP_ASSERT_WITH_CODE(false,
325 "Attempt to populate SviLoadLine Failed!",
326 return -EINVAL);
327 /* DW7 */
328 if (iceland_populate_tdc_limit(hwmgr))
329 PP_ASSERT_WITH_CODE(false,
330 "Attempt to populate TDCLimit Failed!", return -EINVAL);
331 /* DW8 */
332 if (iceland_populate_dw8(hwmgr, pm_fuse_table_offset))
333 PP_ASSERT_WITH_CODE(false,
334 "Attempt to populate TdcWaterfallCtl, "
335 "LPMLTemperature Min and Max Failed!",
336 return -EINVAL);
337
338 /* DW9-DW12 */
339 if (0 != iceland_populate_temperature_scaler(hwmgr))
340 PP_ASSERT_WITH_CODE(false,
341 "Attempt to populate LPMLTemperatureScaler Failed!",
342 return -EINVAL);
343
344 /* DW13-DW16 */
345 if (iceland_populate_gnb_lpml(hwmgr))
346 PP_ASSERT_WITH_CODE(false,
347 "Attempt to populate GnbLPML Failed!",
348 return -EINVAL);
349
350 /* DW17 */
351 if (iceland_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr))
352 PP_ASSERT_WITH_CODE(false,
353 "Attempt to populate GnbLPML Min and Max Vid Failed!",
354 return -EINVAL);
355
356 /* DW18 */
357 if (iceland_populate_bapm_vddc_base_leakage_sidd(hwmgr))
358 PP_ASSERT_WITH_CODE(false,
359 "Attempt to populate BapmVddCBaseLeakage Hi and Lo Sidd Failed!",
360 return -EINVAL);
361
362 if (smu7_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
363 (uint8_t *)&data->power_tune_table,
364 sizeof(struct SMU71_Discrete_PmFuses), data->sram_end))
365 PP_ASSERT_WITH_CODE(false,
366 "Attempt to download PmFuseTable Failed!",
367 return -EINVAL);
368 }
369 return 0;
370}
371
372int iceland_enable_smc_cac(struct pp_hwmgr *hwmgr)
373{
374 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
375 int result = 0;
376
377 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
378 PHM_PlatformCaps_CAC)) {
379 int smc_result;
380 smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
381 (uint16_t)(PPSMC_MSG_EnableCac));
382 PP_ASSERT_WITH_CODE((0 == smc_result),
383 "Failed to enable CAC in SMC.", result = -1);
384
385 data->cac_enabled = (0 == smc_result) ? true : false;
386 }
387 return result;
388}
389
390static int iceland_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
391{
392 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
393
394 if(data->power_containment_features &
395 POWERCONTAINMENT_FEATURE_PkgPwrLimit)
396 return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
397 PPSMC_MSG_PkgPwrSetLimit, n);
398 return 0;
399}
400
401static int iceland_set_overdriver_target_tdp(struct pp_hwmgr *pHwMgr, uint32_t target_tdp)
402{
403 return smum_send_msg_to_smc_with_parameter(pHwMgr->smumgr,
404 PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
405}
406
407int iceland_enable_power_containment(struct pp_hwmgr *hwmgr)
408{
409 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
410 SMU71_Discrete_DpmTable *dpm_table = &data->smc_state_table;
411 int smc_result;
412 int result = 0;
413 uint32_t is_asic_kicker;
414
415 data->power_containment_features = 0;
416 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
417 PHM_PlatformCaps_PowerContainment)) {
418 is_asic_kicker = cgs_read_register(hwmgr->device, mmCC_BIF_BX_STRAP2);
419 is_asic_kicker = (is_asic_kicker >> 12) & 0x01;
420
421 if (data->enable_bapm_feature &&
422 (!is_asic_kicker ||
423 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
424 PHM_PlatformCaps_DisableUsingActualTemperatureForPowerCalc))) {
425 smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
426 (uint16_t)(PPSMC_MSG_EnableDTE));
427 PP_ASSERT_WITH_CODE((0 == smc_result),
428 "Failed to enable BAPM in SMC.", result = -1;);
429 if (0 == smc_result)
430 data->power_containment_features |= POWERCONTAINMENT_FEATURE_BAPM;
431 }
432
433 if (is_asic_kicker && !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
434 PHM_PlatformCaps_DisableUsingActualTemperatureForPowerCalc))
435 dpm_table->DTEMode = 2;
436
437 if (data->enable_tdc_limit_feature) {
438 smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
439 (uint16_t)(PPSMC_MSG_TDCLimitEnable));
440 PP_ASSERT_WITH_CODE((0 == smc_result),
441 "Failed to enable TDCLimit in SMC.", result = -1;);
442 if (0 == smc_result)
443 data->power_containment_features |=
444 POWERCONTAINMENT_FEATURE_TDCLimit;
445 }
446
447 if (data->enable_pkg_pwr_tracking_feature) {
448 smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
449 (uint16_t)(PPSMC_MSG_PkgPwrLimitEnable));
450 PP_ASSERT_WITH_CODE((0 == smc_result),
451 "Failed to enable PkgPwrTracking in SMC.", result = -1;);
452 if (0 == smc_result) {
453 struct phm_cac_tdp_table *cac_table =
454 hwmgr->dyn_state.cac_dtp_table;
455 uint32_t default_limit =
456 (uint32_t)(cac_table->usMaximumPowerDeliveryLimit * 256);
457
458 data->power_containment_features |=
459 POWERCONTAINMENT_FEATURE_PkgPwrLimit;
460
461 if (iceland_set_power_limit(hwmgr, default_limit))
462 printk(KERN_ERR "Failed to set Default Power Limit in SMC!");
463 }
464 }
465 }
466 return result;
467}
468
469int iceland_power_control_set_level(struct pp_hwmgr *hwmgr)
470{
471 struct phm_cac_tdp_table *cac_table = hwmgr->dyn_state.cac_dtp_table;
472 int adjust_percent, target_tdp;
473 int result = 0;
474
475 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
476 PHM_PlatformCaps_PowerContainment)) {
477 /* adjustment percentage has already been validated */
478 adjust_percent = hwmgr->platform_descriptor.TDPAdjustmentPolarity ?
479 hwmgr->platform_descriptor.TDPAdjustment :
480 (-1 * hwmgr->platform_descriptor.TDPAdjustment);
481 /*
482 * SMC requested that target_tdp to be 7 bit fraction in DPM table
483 * but message to be 8 bit fraction for messages
484 */
485 target_tdp = ((100 + adjust_percent) * (int)(cac_table->usTDP * 256)) / 100;
486 result = iceland_set_overdriver_target_tdp(hwmgr, (uint32_t)target_tdp);
487 }
488
489 return result;
490}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_powertune.h
deleted file mode 100644
index 4008d49617e4..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_powertune.h
+++ /dev/null
@@ -1,60 +0,0 @@
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Author: Huang Rui <ray.huang@amd.com>
23 *
24 */
25#ifndef ICELAND_POWERTUNE_H
26#define ICELAND_POWERTUNE_H
27
28#include "smu71.h"
29
30enum iceland_pt_config_reg_type {
31 ICELAND_CONFIGREG_MMR = 0,
32 ICELAND_CONFIGREG_SMC_IND,
33 ICELAND_CONFIGREG_DIDT_IND,
34 ICELAND_CONFIGREG_CACHE,
35 ICELAND_CONFIGREG_MAX
36};
37
38/* PowerContainment Features */
39#define POWERCONTAINMENT_FEATURE_DTE 0x00000001
40#define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002
41#define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004
42#define POWERCONTAINMENT_FEATURE_BAPM 0x00000001
43
44struct iceland_pt_config_reg {
45 uint32_t offset;
46 uint32_t mask;
47 uint32_t shift;
48 uint32_t value;
49 enum iceland_pt_config_reg_type type;
50};
51
52void iceland_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr);
53int iceland_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr);
54int iceland_populate_pm_fuses(struct pp_hwmgr *hwmgr);
55int iceland_enable_smc_cac(struct pp_hwmgr *hwmgr);
56int iceland_enable_power_containment(struct pp_hwmgr *hwmgr);
57int iceland_power_control_set_level(struct pp_hwmgr *hwmgr);
58
59#endif /* ICELAND_POWERTUNE_H */
60
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_thermal.c
deleted file mode 100644
index 45d17d715640..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_thermal.c
+++ /dev/null
@@ -1,595 +0,0 @@
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Author: Huang Rui <ray.huang@amd.com>
23 *
24 */
25#include <asm/div64.h>
26#include "iceland_thermal.h"
27#include "iceland_hwmgr.h"
28#include "iceland_smumgr.h"
29#include "atombios.h"
30#include "ppsmc.h"
31
32#include "gmc/gmc_8_1_d.h"
33#include "gmc/gmc_8_1_sh_mask.h"
34
35#include "bif/bif_5_0_d.h"
36#include "bif/bif_5_0_sh_mask.h"
37
38#include "smu/smu_7_1_1_d.h"
39#include "smu/smu_7_1_1_sh_mask.h"
40
41
42/**
43* Get Fan Speed Control Parameters.
44* @param hwmgr the address of the powerplay hardware manager.
45* @param pSpeed is the address of the structure where the result is to be placed.
46* @exception Always succeeds except if we cannot zero out the output structure.
47*/
48int iceland_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
49 struct phm_fan_speed_info *fan_speed_info)
50{
51
52 if (hwmgr->thermal_controller.fanInfo.bNoFan)
53 return 0;
54
55 fan_speed_info->supports_percent_read = true;
56 fan_speed_info->supports_percent_write = true;
57 fan_speed_info->min_percent = 0;
58 fan_speed_info->max_percent = 100;
59
60 if (0 != hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) {
61 fan_speed_info->supports_rpm_read = true;
62 fan_speed_info->supports_rpm_write = true;
63 fan_speed_info->min_rpm = hwmgr->thermal_controller.fanInfo.ulMinRPM;
64 fan_speed_info->max_rpm = hwmgr->thermal_controller.fanInfo.ulMaxRPM;
65 } else {
66 fan_speed_info->min_rpm = 0;
67 fan_speed_info->max_rpm = 0;
68 }
69
70 return 0;
71}
72
73/**
74* Get Fan Speed in percent.
75* @param hwmgr the address of the powerplay hardware manager.
76* @param pSpeed is the address of the structure where the result is to be placed.
77* @exception Fails is the 100% setting appears to be 0.
78*/
79int iceland_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed)
80{
81 uint32_t duty100;
82 uint32_t duty;
83 uint64_t tmp64;
84
85 if (hwmgr->thermal_controller.fanInfo.bNoFan)
86 return 0;
87
88 duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100);
89 duty = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_STATUS, FDO_PWM_DUTY);
90
91 if (0 == duty100)
92 return -EINVAL;
93
94
95 tmp64 = (uint64_t)duty * 100;
96 do_div(tmp64, duty100);
97 *speed = (uint32_t)tmp64;
98
99 if (*speed > 100)
100 *speed = 100;
101
102 return 0;
103}
104
105/**
106* Get Fan Speed in RPM.
107* @param hwmgr the address of the powerplay hardware manager.
108* @param speed is the address of the structure where the result is to be placed.
109* @exception Returns not supported if no fan is found or if pulses per revolution are not set
110*/
111int iceland_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
112{
113 return 0;
114}
115
116/**
117* Set Fan Speed Control to static mode, so that the user can decide what speed to use.
118* @param hwmgr the address of the powerplay hardware manager.
119* mode the fan control mode, 0 default, 1 by percent, 5, by RPM
120* @exception Should always succeed.
121*/
122int iceland_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
123{
124
125 if (hwmgr->fan_ctrl_is_in_default_mode) {
126 hwmgr->fan_ctrl_default_mode = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, FDO_PWM_MODE);
127 hwmgr->tmin = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TMIN);
128 hwmgr->fan_ctrl_is_in_default_mode = false;
129 }
130
131 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TMIN, 0);
132 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, FDO_PWM_MODE, mode);
133
134 return 0;
135}
136
137/**
138* Reset Fan Speed Control to default mode.
139* @param hwmgr the address of the powerplay hardware manager.
140* @exception Should always succeed.
141*/
142static int iceland_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr)
143{
144 if (!hwmgr->fan_ctrl_is_in_default_mode) {
145 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, FDO_PWM_MODE, hwmgr->fan_ctrl_default_mode);
146 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TMIN, hwmgr->tmin);
147 hwmgr->fan_ctrl_is_in_default_mode = true;
148 }
149
150 return 0;
151}
152
153int iceland_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
154{
155 return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl) == 0) ? 0 : -EINVAL;
156}
157
158
159int iceland_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr)
160{
161 return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StopFanControl) == 0) ? 0 : -EINVAL;
162}
163
164/**
165* Set Fan Speed in percent.
166* @param hwmgr the address of the powerplay hardware manager.
167* @param speed is the percentage value (0% - 100%) to be set.
168* @exception Fails is the 100% setting appears to be 0.
169*/
170int iceland_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed)
171{
172 uint32_t duty100;
173 uint32_t duty;
174 uint64_t tmp64;
175
176 if (hwmgr->thermal_controller.fanInfo.bNoFan)
177 return -EINVAL;
178
179 if (speed > 100) {
180 pr_warning("Cannot set more than 100%% duty cycle. Set it to 100.\n");
181 speed = 100;
182 }
183
184 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl))
185 iceland_fan_ctrl_stop_smc_fan_control(hwmgr);
186
187 duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100);
188
189 if (0 == duty100)
190 return -EINVAL;
191
192 tmp64 = (uint64_t)speed * duty100;
193 do_div(tmp64, 100);
194 duty = (uint32_t)tmp64;
195
196 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL0, FDO_STATIC_DUTY, duty);
197
198 return iceland_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
199}
200
201/**
202* Reset Fan Speed to default.
203* @param hwmgr the address of the powerplay hardware manager.
204* @exception Always succeeds.
205*/
206int iceland_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr)
207{
208 int result;
209
210 if (hwmgr->thermal_controller.fanInfo.bNoFan)
211 return 0;
212
213 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl)) {
214 result = iceland_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
215 if (0 == result)
216 result = iceland_fan_ctrl_start_smc_fan_control(hwmgr);
217 } else
218 result = iceland_fan_ctrl_set_default_mode(hwmgr);
219
220 return result;
221}
222
223/**
224* Set Fan Speed in RPM.
225* @param hwmgr the address of the powerplay hardware manager.
226* @param speed is the percentage value (min - max) to be set.
227* @exception Fails is the speed not lie between min and max.
228*/
229int iceland_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
230{
231 return 0;
232}
233
234/**
235* Reads the remote temperature from the SIslands thermal controller.
236*
237* @param hwmgr The address of the hardware manager.
238*/
239int iceland_thermal_get_temperature(struct pp_hwmgr *hwmgr)
240{
241 int temp;
242
243 temp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_STATUS, CTF_TEMP);
244
245 /*
246 * Bit 9 means the reading is lower than the lowest usable
247 * value.
248 */
249 if (0 != (0x200 & temp))
250 temp = ICELAND_THERMAL_MAXIMUM_TEMP_READING;
251 else
252 temp = (temp & 0x1ff);
253
254 temp = temp * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
255
256 return temp;
257}
258
259/**
260* Set the requested temperature range for high and low alert signals
261*
262* @param hwmgr The address of the hardware manager.
263* @param range Temperature range to be programmed for high and low alert signals
264* @exception PP_Result_BadInput if the input data is not valid.
265*/
266static int iceland_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, uint32_t low_temp, uint32_t high_temp)
267{
268 uint32_t low = ICELAND_THERMAL_MINIMUM_ALERT_TEMP * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
269 uint32_t high = ICELAND_THERMAL_MAXIMUM_ALERT_TEMP * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
270
271 if (low < low_temp)
272 low = low_temp;
273 if (high > high_temp)
274 high = high_temp;
275
276 if (low > high)
277 return -EINVAL;
278
279 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
280 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
281 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL, DIG_THERM_DPM, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
282
283 return 0;
284}
285
286/**
287* Programs thermal controller one-time setting registers
288*
289* @param hwmgr The address of the hardware manager.
290*/
291static int iceland_thermal_initialize(struct pp_hwmgr *hwmgr)
292{
293 if (0 != hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution)
294 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
295 CG_TACH_CTRL, EDGE_PER_REV,
296 hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution - 1);
297
298 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TACH_PWM_RESP_RATE, 0x28);
299
300 return 0;
301}
302
303/**
304* Enable thermal alerts on the RV770 thermal controller.
305*
306* @param hwmgr The address of the hardware manager.
307*/
308static int iceland_thermal_enable_alert(struct pp_hwmgr *hwmgr)
309{
310 uint32_t alert;
311
312 alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK);
313 alert &= ~(ICELAND_THERMAL_HIGH_ALERT_MASK | ICELAND_THERMAL_LOW_ALERT_MASK);
314 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK, alert);
315
316 /* send message to SMU to enable internal thermal interrupts */
317 return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Enable) == 0) ? 0 : -1;
318}
319
320/**
321* Disable thermal alerts on the RV770 thermal controller.
322* @param hwmgr The address of the hardware manager.
323*/
324static int iceland_thermal_disable_alert(struct pp_hwmgr *hwmgr)
325{
326 uint32_t alert;
327
328 alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK);
329 alert |= (ICELAND_THERMAL_HIGH_ALERT_MASK | ICELAND_THERMAL_LOW_ALERT_MASK);
330 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK, alert);
331
332 /* send message to SMU to disable internal thermal interrupts */
333 return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Disable) == 0) ? 0 : -1;
334}
335
336/**
337* Uninitialize the thermal controller.
338* Currently just disables alerts.
339* @param hwmgr The address of the hardware manager.
340*/
341int iceland_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr)
342{
343 int result = iceland_thermal_disable_alert(hwmgr);
344
345 if (result)
346 pr_warning("Failed to disable thermal alerts!\n");
347
348 if (hwmgr->thermal_controller.fanInfo.bNoFan)
349 iceland_fan_ctrl_set_default_mode(hwmgr);
350
351 return result;
352}
353
354/**
355* Set up the fan table to control the fan using the SMC.
356* @param hwmgr the address of the powerplay hardware manager.
357* @param pInput the pointer to input data
358* @param pOutput the pointer to output data
359* @param pStorage the pointer to temporary storage
360* @param Result the last failure code
361* @return result from set temperature range routine
362*/
363int tf_iceland_thermal_setup_fan_table(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result)
364{
365 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
366 SMU71_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
367 uint32_t duty100;
368 uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
369 uint16_t fdo_min, slope1, slope2;
370 uint32_t reference_clock;
371 int res;
372 uint64_t tmp64;
373
374 if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl))
375 return 0;
376
377 if (0 == data->fan_table_start) {
378 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
379 return 0;
380 }
381
382 duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100);
383
384 if (0 == duty100) {
385 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
386 return 0;
387 }
388
389 tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100;
390 do_div(tmp64, 10000);
391 fdo_min = (uint16_t)tmp64;
392
393 t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed - hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
394 t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh - hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
395
396 pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
397 pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
398
399 slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
400 slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
401
402 fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100);
403 fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100);
404 fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100);
405
406 fan_table.Slope1 = cpu_to_be16(slope1);
407 fan_table.Slope2 = cpu_to_be16(slope2);
408
409 fan_table.FdoMin = cpu_to_be16(fdo_min);
410
411 fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst);
412
413 fan_table.HystUp = cpu_to_be16(1);
414
415 fan_table.HystSlope = cpu_to_be16(1);
416
417 fan_table.TempRespLim = cpu_to_be16(5);
418
419 reference_clock = iceland_get_xclk(hwmgr);
420
421 fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600);
422
423 fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
424
425 fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL);
426
427 //fan_table.FanControl_GL_Flag = 1;
428
429 res = smu7_copy_bytes_to_smc(hwmgr->smumgr, data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), data->sram_end);
430/* TO DO FOR SOME DEVICE ID 0X692b, send this msg return invalid command.
431 if (res == 0 && hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit != 0)
432 res = (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanMinPwm, \
433 hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit) ? 0 : -1);
434
435 if (res == 0 && hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit != 0)
436 res = (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanSclkTarget, \
437 hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit) ? 0 : -1);
438
439 if (0 != res)
440 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
441*/
442 return 0;
443}
444
445/**
446* Start the fan control on the SMC.
447* @param hwmgr the address of the powerplay hardware manager.
448* @param pInput the pointer to input data
449* @param pOutput the pointer to output data
450* @param pStorage the pointer to temporary storage
451* @param Result the last failure code
452* @return result from set temperature range routine
453*/
454int tf_iceland_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result)
455{
456/* If the fantable setup has failed we could have disabled PHM_PlatformCaps_MicrocodeFanControl even after this function was included in the table.
457 * Make sure that we still think controlling the fan is OK.
458*/
459 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl)) {
460 iceland_fan_ctrl_start_smc_fan_control(hwmgr);
461 iceland_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
462 }
463
464 return 0;
465}
466
467/**
468* Set temperature range for high and low alerts
469* @param hwmgr the address of the powerplay hardware manager.
470* @param pInput the pointer to input data
471* @param pOutput the pointer to output data
472* @param pStorage the pointer to temporary storage
473* @param Result the last failure code
474* @return result from set temperature range routine
475*/
476static int tf_iceland_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
477 void *input, void *output, void *storage, int result)
478{
479 struct PP_TemperatureRange *range = (struct PP_TemperatureRange *)input;
480
481 if (range == NULL)
482 return -EINVAL;
483
484 return iceland_thermal_set_temperature_range(hwmgr, range->min, range->max);
485}
486
487/**
488* Programs one-time setting registers
489* @param hwmgr the address of the powerplay hardware manager.
490* @param pInput the pointer to input data
491* @param pOutput the pointer to output data
492* @param pStorage the pointer to temporary storage
493* @param Result the last failure code
494* @return result from initialize thermal controller routine
495*/
496static int tf_iceland_thermal_initialize(struct pp_hwmgr *hwmgr, void *input,
497 void *output, void *storage, int result)
498{
499 return iceland_thermal_initialize(hwmgr);
500}
501
502/**
503* Enable high and low alerts
504* @param hwmgr the address of the powerplay hardware manager.
505* @param pInput the pointer to input data
506* @param pOutput the pointer to output data
507* @param pStorage the pointer to temporary storage
508* @param Result the last failure code
509* @return result from enable alert routine
510*/
511static int tf_iceland_thermal_enable_alert(struct pp_hwmgr *hwmgr,
512 void *input, void *output, void *storage, int result)
513{
514 return iceland_thermal_enable_alert(hwmgr);
515}
516
517/**
518* Disable high and low alerts
519* @param hwmgr the address of the powerplay hardware manager.
520* @param pInput the pointer to input data
521* @param pOutput the pointer to output data
522* @param pStorage the pointer to temporary storage
523* @param Result the last failure code
524* @return result from disable alert routine
525*/
526static int tf_iceland_thermal_disable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result)
527{
528 return iceland_thermal_disable_alert(hwmgr);
529}
530
531static const struct phm_master_table_item iceland_thermal_start_thermal_controller_master_list[] = {
532 { NULL, tf_iceland_thermal_initialize },
533 { NULL, tf_iceland_thermal_set_temperature_range },
534 { NULL, tf_iceland_thermal_enable_alert },
535 /*
536 * We should restrict performance levels to low before we halt
537 * the SMC. On the other hand we are still in boot state when
538 * we do this so it would be pointless. If this assumption
539 * changes we have to revisit this table.
540 */
541 { NULL, tf_iceland_thermal_setup_fan_table},
542 { NULL, tf_iceland_thermal_start_smc_fan_control},
543 { NULL, NULL }
544};
545
546static const struct phm_master_table_header iceland_thermal_start_thermal_controller_master = {
547 0,
548 PHM_MasterTableFlag_None,
549 iceland_thermal_start_thermal_controller_master_list
550};
551
552static const struct phm_master_table_item iceland_thermal_set_temperature_range_master_list[] = {
553 { NULL, tf_iceland_thermal_disable_alert},
554 { NULL, tf_iceland_thermal_set_temperature_range},
555 { NULL, tf_iceland_thermal_enable_alert},
556 { NULL, NULL }
557};
558
559static const struct phm_master_table_header iceland_thermal_set_temperature_range_master = {
560 0,
561 PHM_MasterTableFlag_None,
562 iceland_thermal_set_temperature_range_master_list
563};
564
565int iceland_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr)
566{
567 if (!hwmgr->thermal_controller.fanInfo.bNoFan)
568 iceland_fan_ctrl_set_default_mode(hwmgr);
569 return 0;
570}
571
572/**
573* Initializes the thermal controller related functions in the Hardware Manager structure.
574* @param hwmgr The address of the hardware manager.
575* @exception Any error code from the low-level communication.
576*/
577int pp_iceland_thermal_initialize(struct pp_hwmgr *hwmgr)
578{
579 int result;
580
581 result = phm_construct_table(hwmgr, &iceland_thermal_set_temperature_range_master, &(hwmgr->set_temperature_range));
582
583 if (0 == result) {
584 result = phm_construct_table(hwmgr,
585 &iceland_thermal_start_thermal_controller_master,
586 &(hwmgr->start_thermal_controller));
587 if (0 != result)
588 phm_destroy_table(hwmgr, &(hwmgr->set_temperature_range));
589 }
590
591 if (0 == result)
592 hwmgr->fan_ctrl_is_in_default_mode = true;
593 return result;
594}
595
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_thermal.h
deleted file mode 100644
index 267945f4df71..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_thermal.h
+++ /dev/null
@@ -1,58 +0,0 @@
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Author: Huang Rui <ray.huang@amd.com>
23 *
24 */
25
26#ifndef ICELAND_THERMAL_H
27#define ICELAND_THERMAL_H
28
29#include "hwmgr.h"
30
31#define ICELAND_THERMAL_HIGH_ALERT_MASK 0x1
32#define ICELAND_THERMAL_LOW_ALERT_MASK 0x2
33
34#define ICELAND_THERMAL_MINIMUM_TEMP_READING -256
35#define ICELAND_THERMAL_MAXIMUM_TEMP_READING 255
36
37#define ICELAND_THERMAL_MINIMUM_ALERT_TEMP 0
38#define ICELAND_THERMAL_MAXIMUM_ALERT_TEMP 255
39
40#define FDO_PWM_MODE_STATIC 1
41#define FDO_PWM_MODE_STATIC_RPM 5
42
43
44extern int iceland_thermal_get_temperature(struct pp_hwmgr *hwmgr);
45extern int iceland_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr);
46extern int iceland_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info);
47extern int iceland_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed);
48extern int iceland_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode);
49extern int iceland_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed);
50extern int iceland_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr);
51extern int pp_iceland_thermal_initialize(struct pp_hwmgr *hwmgr);
52extern int iceland_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr);
53extern int iceland_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed);
54extern int iceland_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed);
55extern int iceland_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr);
56
57#endif
58
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c
deleted file mode 100644
index 7e405b04c2c5..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c
+++ /dev/null
@@ -1,444 +0,0 @@
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "polaris10_clockpowergating.h"
25
26int polaris10_phm_powerdown_uvd(struct pp_hwmgr *hwmgr)
27{
28 if (phm_cf_want_uvd_power_gating(hwmgr))
29 return smum_send_msg_to_smc(hwmgr->smumgr,
30 PPSMC_MSG_UVDPowerOFF);
31 return 0;
32}
33
34static int polaris10_phm_powerup_uvd(struct pp_hwmgr *hwmgr)
35{
36 if (phm_cf_want_uvd_power_gating(hwmgr)) {
37 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
38 PHM_PlatformCaps_UVDDynamicPowerGating)) {
39 return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
40 PPSMC_MSG_UVDPowerON, 1);
41 } else {
42 return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
43 PPSMC_MSG_UVDPowerON, 0);
44 }
45 }
46
47 return 0;
48}
49
50static int polaris10_phm_powerdown_vce(struct pp_hwmgr *hwmgr)
51{
52 if (phm_cf_want_vce_power_gating(hwmgr))
53 return smum_send_msg_to_smc(hwmgr->smumgr,
54 PPSMC_MSG_VCEPowerOFF);
55 return 0;
56}
57
58static int polaris10_phm_powerup_vce(struct pp_hwmgr *hwmgr)
59{
60 if (phm_cf_want_vce_power_gating(hwmgr))
61 return smum_send_msg_to_smc(hwmgr->smumgr,
62 PPSMC_MSG_VCEPowerON);
63 return 0;
64}
65
66static int polaris10_phm_powerdown_samu(struct pp_hwmgr *hwmgr)
67{
68 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
69 PHM_PlatformCaps_SamuPowerGating))
70 return smum_send_msg_to_smc(hwmgr->smumgr,
71 PPSMC_MSG_SAMPowerOFF);
72 return 0;
73}
74
75static int polaris10_phm_powerup_samu(struct pp_hwmgr *hwmgr)
76{
77 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
78 PHM_PlatformCaps_SamuPowerGating))
79 return smum_send_msg_to_smc(hwmgr->smumgr,
80 PPSMC_MSG_SAMPowerON);
81 return 0;
82}
83
84int polaris10_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr)
85{
86 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
87
88 data->uvd_power_gated = false;
89 data->vce_power_gated = false;
90 data->samu_power_gated = false;
91
92 polaris10_phm_powerup_uvd(hwmgr);
93 polaris10_phm_powerup_vce(hwmgr);
94 polaris10_phm_powerup_samu(hwmgr);
95
96 return 0;
97}
98
99int polaris10_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
100{
101 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
102
103 if (data->uvd_power_gated == bgate)
104 return 0;
105
106 data->uvd_power_gated = bgate;
107
108 if (bgate) {
109 cgs_set_clockgating_state(hwmgr->device,
110 AMD_IP_BLOCK_TYPE_UVD,
111 AMD_CG_STATE_GATE);
112 polaris10_update_uvd_dpm(hwmgr, true);
113 polaris10_phm_powerdown_uvd(hwmgr);
114 } else {
115 polaris10_phm_powerup_uvd(hwmgr);
116 polaris10_update_uvd_dpm(hwmgr, false);
117 cgs_set_clockgating_state(hwmgr->device,
118 AMD_IP_BLOCK_TYPE_UVD,
119 AMD_CG_STATE_UNGATE);
120 }
121
122 return 0;
123}
124
125int polaris10_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
126{
127 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
128
129 if (data->vce_power_gated == bgate)
130 return 0;
131
132 data->vce_power_gated = bgate;
133
134 if (bgate) {
135 cgs_set_clockgating_state(hwmgr->device,
136 AMD_IP_BLOCK_TYPE_VCE,
137 AMD_CG_STATE_GATE);
138 polaris10_update_vce_dpm(hwmgr, true);
139 polaris10_phm_powerdown_vce(hwmgr);
140 } else {
141 polaris10_phm_powerup_vce(hwmgr);
142 polaris10_update_vce_dpm(hwmgr, false);
143 cgs_set_clockgating_state(hwmgr->device,
144 AMD_IP_BLOCK_TYPE_VCE,
145 AMD_CG_STATE_UNGATE);
146 }
147 return 0;
148}
149
150int polaris10_phm_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate)
151{
152 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
153
154 if (data->samu_power_gated == bgate)
155 return 0;
156
157 data->samu_power_gated = bgate;
158
159 if (bgate) {
160 polaris10_update_samu_dpm(hwmgr, true);
161 polaris10_phm_powerdown_samu(hwmgr);
162 } else {
163 polaris10_phm_powerup_samu(hwmgr);
164 polaris10_update_samu_dpm(hwmgr, false);
165 }
166
167 return 0;
168}
169
170int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
171 const uint32_t *msg_id)
172{
173 PPSMC_Msg msg;
174 uint32_t value;
175
176 switch ((*msg_id & PP_GROUP_MASK) >> PP_GROUP_SHIFT) {
177 case PP_GROUP_GFX:
178 switch ((*msg_id & PP_BLOCK_MASK) >> PP_BLOCK_SHIFT) {
179 case PP_BLOCK_GFX_CG:
180 if (PP_STATE_SUPPORT_CG & *msg_id) {
181 msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) ?
182 PPSMC_MSG_EnableClockGatingFeature :
183 PPSMC_MSG_DisableClockGatingFeature;
184 value = CG_GFX_CGCG_MASK;
185
186 if (smum_send_msg_to_smc_with_parameter(
187 hwmgr->smumgr, msg, value))
188 return -1;
189 }
190 if (PP_STATE_SUPPORT_LS & *msg_id) {
191 msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS
192 ? PPSMC_MSG_EnableClockGatingFeature
193 : PPSMC_MSG_DisableClockGatingFeature;
194 value = CG_GFX_CGLS_MASK;
195
196 if (smum_send_msg_to_smc_with_parameter(
197 hwmgr->smumgr, msg, value))
198 return -1;
199 }
200 break;
201
202 case PP_BLOCK_GFX_3D:
203 if (PP_STATE_SUPPORT_CG & *msg_id) {
204 msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) ?
205 PPSMC_MSG_EnableClockGatingFeature :
206 PPSMC_MSG_DisableClockGatingFeature;
207 value = CG_GFX_3DCG_MASK;
208
209 if (smum_send_msg_to_smc_with_parameter(
210 hwmgr->smumgr, msg, value))
211 return -1;
212 }
213
214 if (PP_STATE_SUPPORT_LS & *msg_id) {
215 msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ?
216 PPSMC_MSG_EnableClockGatingFeature :
217 PPSMC_MSG_DisableClockGatingFeature;
218 value = CG_GFX_3DLS_MASK;
219
220 if (smum_send_msg_to_smc_with_parameter(
221 hwmgr->smumgr, msg, value))
222 return -1;
223 }
224 break;
225
226 case PP_BLOCK_GFX_RLC:
227 if (PP_STATE_SUPPORT_LS & *msg_id) {
228 msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ?
229 PPSMC_MSG_EnableClockGatingFeature :
230 PPSMC_MSG_DisableClockGatingFeature;
231 value = CG_GFX_RLC_LS_MASK;
232
233 if (smum_send_msg_to_smc_with_parameter(
234 hwmgr->smumgr, msg, value))
235 return -1;
236 }
237 break;
238
239 case PP_BLOCK_GFX_CP:
240 if (PP_STATE_SUPPORT_LS & *msg_id) {
241 msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ?
242 PPSMC_MSG_EnableClockGatingFeature :
243 PPSMC_MSG_DisableClockGatingFeature;
244 value = CG_GFX_CP_LS_MASK;
245
246 if (smum_send_msg_to_smc_with_parameter(
247 hwmgr->smumgr, msg, value))
248 return -1;
249 }
250 break;
251
252 case PP_BLOCK_GFX_MG:
253 if (PP_STATE_SUPPORT_CG & *msg_id) {
254 msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) ?
255 PPSMC_MSG_EnableClockGatingFeature :
256 PPSMC_MSG_DisableClockGatingFeature;
257 value = (CG_CPF_MGCG_MASK | CG_RLC_MGCG_MASK |
258 CG_GFX_OTHERS_MGCG_MASK);
259
260 if (smum_send_msg_to_smc_with_parameter(
261 hwmgr->smumgr, msg, value))
262 return -1;
263 }
264 break;
265
266 default:
267 return -1;
268 }
269 break;
270
271 case PP_GROUP_SYS:
272 switch ((*msg_id & PP_BLOCK_MASK) >> PP_BLOCK_SHIFT) {
273 case PP_BLOCK_SYS_BIF:
274 if (PP_STATE_SUPPORT_CG & *msg_id) {
275 msg = (*msg_id & PP_STATE_MASK) & PP_STATE_CG ?
276 PPSMC_MSG_EnableClockGatingFeature :
277 PPSMC_MSG_DisableClockGatingFeature;
278 value = CG_SYS_BIF_MGCG_MASK;
279
280 if (smum_send_msg_to_smc_with_parameter(
281 hwmgr->smumgr, msg, value))
282 return -1;
283 }
284 if (PP_STATE_SUPPORT_LS & *msg_id) {
285 msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ?
286 PPSMC_MSG_EnableClockGatingFeature :
287 PPSMC_MSG_DisableClockGatingFeature;
288 value = CG_SYS_BIF_MGLS_MASK;
289
290 if (smum_send_msg_to_smc_with_parameter(
291 hwmgr->smumgr, msg, value))
292 return -1;
293 }
294 break;
295
296 case PP_BLOCK_SYS_MC:
297 if (PP_STATE_SUPPORT_CG & *msg_id) {
298 msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) ?
299 PPSMC_MSG_EnableClockGatingFeature :
300 PPSMC_MSG_DisableClockGatingFeature;
301 value = CG_SYS_MC_MGCG_MASK;
302
303 if (smum_send_msg_to_smc_with_parameter(
304 hwmgr->smumgr, msg, value))
305 return -1;
306 }
307
308 if (PP_STATE_SUPPORT_LS & *msg_id) {
309 msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ?
310 PPSMC_MSG_EnableClockGatingFeature :
311 PPSMC_MSG_DisableClockGatingFeature;
312 value = CG_SYS_MC_MGLS_MASK;
313
314 if (smum_send_msg_to_smc_with_parameter(
315 hwmgr->smumgr, msg, value))
316 return -1;
317 }
318 break;
319
320 case PP_BLOCK_SYS_DRM:
321 if (PP_STATE_SUPPORT_CG & *msg_id) {
322 msg = (*msg_id & PP_STATE_MASK) & PP_STATE_CG ?
323 PPSMC_MSG_EnableClockGatingFeature :
324 PPSMC_MSG_DisableClockGatingFeature;
325 value = CG_SYS_DRM_MGCG_MASK;
326
327 if (smum_send_msg_to_smc_with_parameter(
328 hwmgr->smumgr, msg, value))
329 return -1;
330 }
331 if (PP_STATE_SUPPORT_LS & *msg_id) {
332 msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ?
333 PPSMC_MSG_EnableClockGatingFeature :
334 PPSMC_MSG_DisableClockGatingFeature;
335 value = CG_SYS_DRM_MGLS_MASK;
336
337 if (smum_send_msg_to_smc_with_parameter(
338 hwmgr->smumgr, msg, value))
339 return -1;
340 }
341 break;
342
343 case PP_BLOCK_SYS_HDP:
344 if (PP_STATE_SUPPORT_CG & *msg_id) {
345 msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) ?
346 PPSMC_MSG_EnableClockGatingFeature :
347 PPSMC_MSG_DisableClockGatingFeature;
348 value = CG_SYS_HDP_MGCG_MASK;
349
350 if (smum_send_msg_to_smc_with_parameter(
351 hwmgr->smumgr, msg, value))
352 return -1;
353 }
354
355 if (PP_STATE_SUPPORT_LS & *msg_id) {
356 msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ?
357 PPSMC_MSG_EnableClockGatingFeature :
358 PPSMC_MSG_DisableClockGatingFeature;
359 value = CG_SYS_HDP_MGLS_MASK;
360
361 if (smum_send_msg_to_smc_with_parameter(
362 hwmgr->smumgr, msg, value))
363 return -1;
364 }
365 break;
366
367 case PP_BLOCK_SYS_SDMA:
368 if (PP_STATE_SUPPORT_CG & *msg_id) {
369 msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) ?
370 PPSMC_MSG_EnableClockGatingFeature :
371 PPSMC_MSG_DisableClockGatingFeature;
372 value = CG_SYS_SDMA_MGCG_MASK;
373
374 if (smum_send_msg_to_smc_with_parameter(
375 hwmgr->smumgr, msg, value))
376 return -1;
377 }
378
379 if (PP_STATE_SUPPORT_LS & *msg_id) {
380 msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ?
381 PPSMC_MSG_EnableClockGatingFeature :
382 PPSMC_MSG_DisableClockGatingFeature;
383 value = CG_SYS_SDMA_MGLS_MASK;
384
385 if (smum_send_msg_to_smc_with_parameter(
386 hwmgr->smumgr, msg, value))
387 return -1;
388 }
389 break;
390
391 case PP_BLOCK_SYS_ROM:
392 if (PP_STATE_SUPPORT_CG & *msg_id) {
393 msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) ?
394 PPSMC_MSG_EnableClockGatingFeature :
395 PPSMC_MSG_DisableClockGatingFeature;
396 value = CG_SYS_ROM_MASK;
397
398 if (smum_send_msg_to_smc_with_parameter(
399 hwmgr->smumgr, msg, value))
400 return -1;
401 }
402 break;
403
404 default:
405 return -1;
406
407 }
408 break;
409
410 default:
411 return -1;
412
413 }
414
415 return 0;
416}
417
418/* This function is for Polaris11 only for now,
419 * Powerplay will only control the static per CU Power Gating.
420 * Dynamic per CU Power Gating will be done in gfx.
421 */
422int polaris10_phm_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable)
423{
424 struct cgs_system_info sys_info = {0};
425 uint32_t active_cus;
426 int result;
427
428 sys_info.size = sizeof(struct cgs_system_info);
429 sys_info.info_id = CGS_SYSTEM_INFO_GFX_CU_INFO;
430
431 result = cgs_query_system_info(hwmgr->device, &sys_info);
432
433 if (result)
434 return -EINVAL;
435 else
436 active_cus = sys_info.value;
437
438 if (enable)
439 return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
440 PPSMC_MSG_GFX_CU_PG_ENABLE, active_cus);
441 else
442 return smum_send_msg_to_smc(hwmgr->smumgr,
443 PPSMC_MSG_GFX_CU_PG_DISABLE);
444}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.h
deleted file mode 100644
index 88d68cb6e89d..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.h
+++ /dev/null
@@ -1,40 +0,0 @@
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef _POLARIS10_CLOCK_POWER_GATING_H_
25#define _POLARIS10_CLOCK_POWER_GATING_H_
26
27#include "polaris10_hwmgr.h"
28#include "pp_asicblocks.h"
29
30int polaris10_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
31int polaris10_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
32int polaris10_phm_powerdown_uvd(struct pp_hwmgr *hwmgr);
33int polaris10_phm_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate);
34int polaris10_phm_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate);
35int polaris10_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr);
36int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
37 const uint32_t *msg_id);
38int polaris10_phm_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable);
39
40#endif /* _POLARIS10_CLOCK_POWER_GATING_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_dyn_defaults.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_dyn_defaults.h
deleted file mode 100644
index f78ffd935cee..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_dyn_defaults.h
+++ /dev/null
@@ -1,62 +0,0 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef POLARIS10_DYN_DEFAULTS_H
25#define POLARIS10_DYN_DEFAULTS_H
26
27
28enum Polaris10dpm_TrendDetection {
29 Polaris10Adpm_TrendDetection_AUTO,
30 Polaris10Adpm_TrendDetection_UP,
31 Polaris10Adpm_TrendDetection_DOWN
32};
33typedef enum Polaris10dpm_TrendDetection Polaris10dpm_TrendDetection;
34
35/* We need to fill in the default values */
36
37
38#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT0 0x3FFFC102
39#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT1 0x000400
40#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT2 0xC00080
41#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT3 0xC00200
42#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT4 0xC01680
43#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT5 0xC00033
44#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT6 0xC00033
45#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT7 0x3FFFC000
46
47
48#define PPPOLARIS10_THERMALPROTECTCOUNTER_DFLT 0x200
49#define PPPOLARIS10_STATICSCREENTHRESHOLDUNIT_DFLT 0
50#define PPPOLARIS10_STATICSCREENTHRESHOLD_DFLT 0x00C8
51#define PPPOLARIS10_GFXIDLECLOCKSTOPTHRESHOLD_DFLT 0x200
52#define PPPOLARIS10_REFERENCEDIVIDER_DFLT 4
53
54#define PPPOLARIS10_ULVVOLTAGECHANGEDELAY_DFLT 1687
55
56#define PPPOLARIS10_CGULVPARAMETER_DFLT 0x00040035
57#define PPPOLARIS10_CGULVCONTROL_DFLT 0x00007450
58#define PPPOLARIS10_TARGETACTIVITY_DFLT 50
59#define PPPOLARIS10_MCLK_TARGETACTIVITY_DFLT 10
60
61#endif
62
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c
deleted file mode 100644
index 970e3930452d..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c
+++ /dev/null
@@ -1,5290 +0,0 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <linux/module.h>
24#include <linux/slab.h>
25#include <linux/fb.h>
26#include <asm/div64.h>
27#include "linux/delay.h"
28#include "pp_acpi.h"
29#include "hwmgr.h"
30#include "polaris10_hwmgr.h"
31#include "polaris10_powertune.h"
32#include "polaris10_dyn_defaults.h"
33#include "polaris10_smumgr.h"
34#include "pp_debug.h"
35#include "ppatomctrl.h"
36#include "atombios.h"
37#include "pptable_v1_0.h"
38#include "pppcielanes.h"
39#include "amd_pcie_helpers.h"
40#include "hardwaremanager.h"
41#include "process_pptables_v1_0.h"
42#include "cgs_common.h"
43#include "smu74.h"
44#include "smu_ucode_xfer_vi.h"
45#include "smu74_discrete.h"
46#include "smu/smu_7_1_3_d.h"
47#include "smu/smu_7_1_3_sh_mask.h"
48#include "gmc/gmc_8_1_d.h"
49#include "gmc/gmc_8_1_sh_mask.h"
50#include "oss/oss_3_0_d.h"
51#include "gca/gfx_8_0_d.h"
52#include "bif/bif_5_0_d.h"
53#include "bif/bif_5_0_sh_mask.h"
54#include "gmc/gmc_8_1_d.h"
55#include "gmc/gmc_8_1_sh_mask.h"
56#include "bif/bif_5_0_d.h"
57#include "bif/bif_5_0_sh_mask.h"
58#include "dce/dce_10_0_d.h"
59#include "dce/dce_10_0_sh_mask.h"
60
61#include "polaris10_thermal.h"
62#include "polaris10_clockpowergating.h"
63
64#define MC_CG_ARB_FREQ_F0 0x0a
65#define MC_CG_ARB_FREQ_F1 0x0b
66#define MC_CG_ARB_FREQ_F2 0x0c
67#define MC_CG_ARB_FREQ_F3 0x0d
68
69#define MC_CG_SEQ_DRAMCONF_S0 0x05
70#define MC_CG_SEQ_DRAMCONF_S1 0x06
71#define MC_CG_SEQ_YCLK_SUSPEND 0x04
72#define MC_CG_SEQ_YCLK_RESUME 0x0a
73
74
75#define SMC_RAM_END 0x40000
76
77#define SMC_CG_IND_START 0xc0030000
78#define SMC_CG_IND_END 0xc0040000
79
80#define VOLTAGE_SCALE 4
81#define VOLTAGE_VID_OFFSET_SCALE1 625
82#define VOLTAGE_VID_OFFSET_SCALE2 100
83
84#define VDDC_VDDCI_DELTA 200
85
86#define MEM_FREQ_LOW_LATENCY 25000
87#define MEM_FREQ_HIGH_LATENCY 80000
88
89#define MEM_LATENCY_HIGH 45
90#define MEM_LATENCY_LOW 35
91#define MEM_LATENCY_ERR 0xFFFF
92
93#define MC_SEQ_MISC0_GDDR5_SHIFT 28
94#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000
95#define MC_SEQ_MISC0_GDDR5_VALUE 5
96
97
98#define PCIE_BUS_CLK 10000
99#define TCLK (PCIE_BUS_CLK / 10)
100
101/** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */
102enum DPM_EVENT_SRC {
103 DPM_EVENT_SRC_ANALOG = 0,
104 DPM_EVENT_SRC_EXTERNAL = 1,
105 DPM_EVENT_SRC_DIGITAL = 2,
106 DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
107 DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4
108};
109
110static const unsigned long PhwPolaris10_Magic = (unsigned long)(PHM_VIslands_Magic);
111
112static struct polaris10_power_state *cast_phw_polaris10_power_state(
113 struct pp_hw_power_state *hw_ps)
114{
115 PP_ASSERT_WITH_CODE((PhwPolaris10_Magic == hw_ps->magic),
116 "Invalid Powerstate Type!",
117 return NULL);
118
119 return (struct polaris10_power_state *)hw_ps;
120}
121
122static const struct polaris10_power_state *
123cast_const_phw_polaris10_power_state(
124 const struct pp_hw_power_state *hw_ps)
125{
126 PP_ASSERT_WITH_CODE((PhwPolaris10_Magic == hw_ps->magic),
127 "Invalid Powerstate Type!",
128 return NULL);
129
130 return (const struct polaris10_power_state *)hw_ps;
131}
132
133static bool polaris10_is_dpm_running(struct pp_hwmgr *hwmgr)
134{
135 return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
136 CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
137 ? true : false;
138}
139
140/**
141 * Find the MC microcode version and store it in the HwMgr struct
142 *
143 * @param hwmgr the address of the powerplay hardware manager.
144 * @return always 0
145 */
146static int phm_get_mc_microcode_version(struct pp_hwmgr *hwmgr)
147{
148 cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
149
150 hwmgr->microcode_version_info.MC = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);
151
152 return 0;
153}
154
155static uint16_t phm_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
156{
157 uint32_t speedCntl = 0;
158
159 /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
160 speedCntl = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__PCIE,
161 ixPCIE_LC_SPEED_CNTL);
162 return((uint16_t)PHM_GET_FIELD(speedCntl,
163 PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
164}
165
166static int phm_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
167{
168 uint32_t link_width;
169
170 /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
171 link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
172 PCIE_LC_LINK_WIDTH_CNTL, LC_LINK_WIDTH_RD);
173
174 PP_ASSERT_WITH_CODE((7 >= link_width),
175 "Invalid PCIe lane width!", return 0);
176
177 return decode_pcie_lane_width(link_width);
178}
179
180/**
181* Enable voltage control
182*
183* @param pHwMgr the address of the powerplay hardware manager.
184* @return always PP_Result_OK
185*/
186static int polaris10_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
187{
188 PP_ASSERT_WITH_CODE(
189 (hwmgr->smumgr->smumgr_funcs->send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Voltage_Cntl_Enable) == 0),
190 "Failed to enable voltage DPM during DPM Start Function!",
191 return 1;
192 );
193
194 return 0;
195}
196
197/**
198* Checks if we want to support voltage control
199*
200* @param hwmgr the address of the powerplay hardware manager.
201*/
202static bool polaris10_voltage_control(const struct pp_hwmgr *hwmgr)
203{
204 const struct polaris10_hwmgr *data =
205 (const struct polaris10_hwmgr *)(hwmgr->backend);
206
207 return (POLARIS10_VOLTAGE_CONTROL_NONE != data->voltage_control);
208}
209
210/**
211* Enable voltage control
212*
213* @param hwmgr the address of the powerplay hardware manager.
214* @return always 0
215*/
216static int polaris10_enable_voltage_control(struct pp_hwmgr *hwmgr)
217{
218 /* enable voltage control */
219 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
220 GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1);
221
222 return 0;
223}
224
225/**
226* Create Voltage Tables.
227*
228* @param hwmgr the address of the powerplay hardware manager.
229* @return always 0
230*/
231static int polaris10_construct_voltage_tables(struct pp_hwmgr *hwmgr)
232{
233 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
234 struct phm_ppt_v1_information *table_info =
235 (struct phm_ppt_v1_information *)hwmgr->pptable;
236 int result;
237
238 if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
239 result = atomctrl_get_voltage_table_v3(hwmgr,
240 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT,
241 &(data->mvdd_voltage_table));
242 PP_ASSERT_WITH_CODE((0 == result),
243 "Failed to retrieve MVDD table.",
244 return result);
245 } else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
246 result = phm_get_svi2_mvdd_voltage_table(&(data->mvdd_voltage_table),
247 table_info->vdd_dep_on_mclk);
248 PP_ASSERT_WITH_CODE((0 == result),
249 "Failed to retrieve SVI2 MVDD table from dependancy table.",
250 return result;);
251 }
252
253 if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
254 result = atomctrl_get_voltage_table_v3(hwmgr,
255 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT,
256 &(data->vddci_voltage_table));
257 PP_ASSERT_WITH_CODE((0 == result),
258 "Failed to retrieve VDDCI table.",
259 return result);
260 } else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
261 result = phm_get_svi2_vddci_voltage_table(&(data->vddci_voltage_table),
262 table_info->vdd_dep_on_mclk);
263 PP_ASSERT_WITH_CODE((0 == result),
264 "Failed to retrieve SVI2 VDDCI table from dependancy table.",
265 return result);
266 }
267
268 if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
269 result = phm_get_svi2_vdd_voltage_table(&(data->vddc_voltage_table),
270 table_info->vddc_lookup_table);
271 PP_ASSERT_WITH_CODE((0 == result),
272 "Failed to retrieve SVI2 VDDC table from lookup table.",
273 return result);
274 }
275
276 PP_ASSERT_WITH_CODE(
277 (data->vddc_voltage_table.count <= (SMU74_MAX_LEVELS_VDDC)),
278 "Too many voltage values for VDDC. Trimming to fit state table.",
279 phm_trim_voltage_table_to_fit_state_table(SMU74_MAX_LEVELS_VDDC,
280 &(data->vddc_voltage_table)));
281
282 PP_ASSERT_WITH_CODE(
283 (data->vddci_voltage_table.count <= (SMU74_MAX_LEVELS_VDDCI)),
284 "Too many voltage values for VDDCI. Trimming to fit state table.",
285 phm_trim_voltage_table_to_fit_state_table(SMU74_MAX_LEVELS_VDDCI,
286 &(data->vddci_voltage_table)));
287
288 PP_ASSERT_WITH_CODE(
289 (data->mvdd_voltage_table.count <= (SMU74_MAX_LEVELS_MVDD)),
290 "Too many voltage values for MVDD. Trimming to fit state table.",
291 phm_trim_voltage_table_to_fit_state_table(SMU74_MAX_LEVELS_MVDD,
292 &(data->mvdd_voltage_table)));
293
294 return 0;
295}
296
297/**
298* Programs static screed detection parameters
299*
300* @param hwmgr the address of the powerplay hardware manager.
301* @return always 0
302*/
303static int polaris10_program_static_screen_threshold_parameters(
304 struct pp_hwmgr *hwmgr)
305{
306 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
307
308 /* Set static screen threshold unit */
309 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
310 CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT,
311 data->static_screen_threshold_unit);
312 /* Set static screen threshold */
313 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
314 CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD,
315 data->static_screen_threshold);
316
317 return 0;
318}
319
320/**
321* Setup display gap for glitch free memory clock switching.
322*
323* @param hwmgr the address of the powerplay hardware manager.
324* @return always 0
325*/
326static int polaris10_enable_display_gap(struct pp_hwmgr *hwmgr)
327{
328 uint32_t display_gap =
329 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
330 ixCG_DISPLAY_GAP_CNTL);
331
332 display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
333 DISP_GAP, DISPLAY_GAP_IGNORE);
334
335 display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
336 DISP_GAP_MCHG, DISPLAY_GAP_VBLANK);
337
338 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
339 ixCG_DISPLAY_GAP_CNTL, display_gap);
340
341 return 0;
342}
343
344/**
345* Programs activity state transition voting clients
346*
347* @param hwmgr the address of the powerplay hardware manager.
348* @return always 0
349*/
350static int polaris10_program_voting_clients(struct pp_hwmgr *hwmgr)
351{
352 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
353
354 /* Clear reset for voting clients before enabling DPM */
355 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
356 SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0);
357 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
358 SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0);
359
360 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
361 ixCG_FREQ_TRAN_VOTING_0, data->voting_rights_clients0);
362 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
363 ixCG_FREQ_TRAN_VOTING_1, data->voting_rights_clients1);
364 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
365 ixCG_FREQ_TRAN_VOTING_2, data->voting_rights_clients2);
366 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
367 ixCG_FREQ_TRAN_VOTING_3, data->voting_rights_clients3);
368 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
369 ixCG_FREQ_TRAN_VOTING_4, data->voting_rights_clients4);
370 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
371 ixCG_FREQ_TRAN_VOTING_5, data->voting_rights_clients5);
372 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
373 ixCG_FREQ_TRAN_VOTING_6, data->voting_rights_clients6);
374 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
375 ixCG_FREQ_TRAN_VOTING_7, data->voting_rights_clients7);
376
377 return 0;
378}
379
380static int polaris10_clear_voting_clients(struct pp_hwmgr *hwmgr)
381{
382 /* Reset voting clients before disabling DPM */
383 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
384 SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1);
385 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
386 SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1);
387
388 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
389 ixCG_FREQ_TRAN_VOTING_0, 0);
390 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
391 ixCG_FREQ_TRAN_VOTING_1, 0);
392 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
393 ixCG_FREQ_TRAN_VOTING_2, 0);
394 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
395 ixCG_FREQ_TRAN_VOTING_3, 0);
396 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
397 ixCG_FREQ_TRAN_VOTING_4, 0);
398 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
399 ixCG_FREQ_TRAN_VOTING_5, 0);
400 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
401 ixCG_FREQ_TRAN_VOTING_6, 0);
402 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
403 ixCG_FREQ_TRAN_VOTING_7, 0);
404
405 return 0;
406}
407
408/**
409* Get the location of various tables inside the FW image.
410*
411* @param hwmgr the address of the powerplay hardware manager.
412* @return always 0
413*/
414static int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr)
415{
416 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
417 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
418 uint32_t tmp;
419 int result;
420 bool error = false;
421
422 result = polaris10_read_smc_sram_dword(hwmgr->smumgr,
423 SMU7_FIRMWARE_HEADER_LOCATION +
424 offsetof(SMU74_Firmware_Header, DpmTable),
425 &tmp, data->sram_end);
426
427 if (0 == result)
428 data->dpm_table_start = tmp;
429
430 error |= (0 != result);
431
432 result = polaris10_read_smc_sram_dword(hwmgr->smumgr,
433 SMU7_FIRMWARE_HEADER_LOCATION +
434 offsetof(SMU74_Firmware_Header, SoftRegisters),
435 &tmp, data->sram_end);
436
437 if (!result) {
438 data->soft_regs_start = tmp;
439 smu_data->soft_regs_start = tmp;
440 }
441
442 error |= (0 != result);
443
444 result = polaris10_read_smc_sram_dword(hwmgr->smumgr,
445 SMU7_FIRMWARE_HEADER_LOCATION +
446 offsetof(SMU74_Firmware_Header, mcRegisterTable),
447 &tmp, data->sram_end);
448
449 if (!result)
450 data->mc_reg_table_start = tmp;
451
452 result = polaris10_read_smc_sram_dword(hwmgr->smumgr,
453 SMU7_FIRMWARE_HEADER_LOCATION +
454 offsetof(SMU74_Firmware_Header, FanTable),
455 &tmp, data->sram_end);
456
457 if (!result)
458 data->fan_table_start = tmp;
459
460 error |= (0 != result);
461
462 result = polaris10_read_smc_sram_dword(hwmgr->smumgr,
463 SMU7_FIRMWARE_HEADER_LOCATION +
464 offsetof(SMU74_Firmware_Header, mcArbDramTimingTable),
465 &tmp, data->sram_end);
466
467 if (!result)
468 data->arb_table_start = tmp;
469
470 error |= (0 != result);
471
472 result = polaris10_read_smc_sram_dword(hwmgr->smumgr,
473 SMU7_FIRMWARE_HEADER_LOCATION +
474 offsetof(SMU74_Firmware_Header, Version),
475 &tmp, data->sram_end);
476
477 if (!result)
478 hwmgr->microcode_version_info.SMC = tmp;
479
480 error |= (0 != result);
481
482 return error ? -1 : 0;
483}
484
485/* Copy one arb setting to another and then switch the active set.
486 * arb_src and arb_dest is one of the MC_CG_ARB_FREQ_Fx constants.
487 */
488static int polaris10_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
489 uint32_t arb_src, uint32_t arb_dest)
490{
491 uint32_t mc_arb_dram_timing;
492 uint32_t mc_arb_dram_timing2;
493 uint32_t burst_time;
494 uint32_t mc_cg_config;
495
496 switch (arb_src) {
497 case MC_CG_ARB_FREQ_F0:
498 mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
499 mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
500 burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
501 break;
502 case MC_CG_ARB_FREQ_F1:
503 mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1);
504 mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1);
505 burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1);
506 break;
507 default:
508 return -EINVAL;
509 }
510
511 switch (arb_dest) {
512 case MC_CG_ARB_FREQ_F0:
513 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
514 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
515 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time);
516 break;
517 case MC_CG_ARB_FREQ_F1:
518 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
519 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
520 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time);
521 break;
522 default:
523 return -EINVAL;
524 }
525
526 mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG);
527 mc_cg_config |= 0x0000000F;
528 cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config);
529 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arb_dest);
530
531 return 0;
532}
533
534static int polaris10_reset_to_default(struct pp_hwmgr *hwmgr)
535{
536 return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ResetToDefaults);
537}
538
539/**
540* Initial switch from ARB F0->F1
541*
542* @param hwmgr the address of the powerplay hardware manager.
543* @return always 0
544* This function is to be called from the SetPowerState table.
545*/
546static int polaris10_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr)
547{
548 return polaris10_copy_and_switch_arb_sets(hwmgr,
549 MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
550}
551
552static int polaris10_force_switch_to_arbf0(struct pp_hwmgr *hwmgr)
553{
554 uint32_t tmp;
555
556 tmp = (cgs_read_ind_register(hwmgr->device,
557 CGS_IND_REG__SMC, ixSMC_SCRATCH9) &
558 0x0000ff00) >> 8;
559
560 if (tmp == MC_CG_ARB_FREQ_F0)
561 return 0;
562
563 return polaris10_copy_and_switch_arb_sets(hwmgr,
564 tmp, MC_CG_ARB_FREQ_F0);
565}
566
567static int polaris10_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
568{
569 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
570 struct phm_ppt_v1_information *table_info =
571 (struct phm_ppt_v1_information *)(hwmgr->pptable);
572 struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
573 uint32_t i, max_entry;
574
575 PP_ASSERT_WITH_CODE((data->use_pcie_performance_levels ||
576 data->use_pcie_power_saving_levels), "No pcie performance levels!",
577 return -EINVAL);
578
579 if (data->use_pcie_performance_levels &&
580 !data->use_pcie_power_saving_levels) {
581 data->pcie_gen_power_saving = data->pcie_gen_performance;
582 data->pcie_lane_power_saving = data->pcie_lane_performance;
583 } else if (!data->use_pcie_performance_levels &&
584 data->use_pcie_power_saving_levels) {
585 data->pcie_gen_performance = data->pcie_gen_power_saving;
586 data->pcie_lane_performance = data->pcie_lane_power_saving;
587 }
588
589 phm_reset_single_dpm_table(&data->dpm_table.pcie_speed_table,
590 SMU74_MAX_LEVELS_LINK,
591 MAX_REGULAR_DPM_NUMBER);
592
593 if (pcie_table != NULL) {
594 /* max_entry is used to make sure we reserve one PCIE level
595 * for boot level (fix for A+A PSPP issue).
596 * If PCIE table from PPTable have ULV entry + 8 entries,
597 * then ignore the last entry.*/
598 max_entry = (SMU74_MAX_LEVELS_LINK < pcie_table->count) ?
599 SMU74_MAX_LEVELS_LINK : pcie_table->count;
600 for (i = 1; i < max_entry; i++) {
601 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i - 1,
602 get_pcie_gen_support(data->pcie_gen_cap,
603 pcie_table->entries[i].gen_speed),
604 get_pcie_lane_support(data->pcie_lane_cap,
605 pcie_table->entries[i].lane_width));
606 }
607 data->dpm_table.pcie_speed_table.count = max_entry - 1;
608
609 /* Setup BIF_SCLK levels */
610 for (i = 0; i < max_entry; i++)
611 data->bif_sclk_table[i] = pcie_table->entries[i].pcie_sclk;
612 } else {
613 /* Hardcode Pcie Table */
614 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0,
615 get_pcie_gen_support(data->pcie_gen_cap,
616 PP_Min_PCIEGen),
617 get_pcie_lane_support(data->pcie_lane_cap,
618 PP_Max_PCIELane));
619 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1,
620 get_pcie_gen_support(data->pcie_gen_cap,
621 PP_Min_PCIEGen),
622 get_pcie_lane_support(data->pcie_lane_cap,
623 PP_Max_PCIELane));
624 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2,
625 get_pcie_gen_support(data->pcie_gen_cap,
626 PP_Max_PCIEGen),
627 get_pcie_lane_support(data->pcie_lane_cap,
628 PP_Max_PCIELane));
629 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3,
630 get_pcie_gen_support(data->pcie_gen_cap,
631 PP_Max_PCIEGen),
632 get_pcie_lane_support(data->pcie_lane_cap,
633 PP_Max_PCIELane));
634 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4,
635 get_pcie_gen_support(data->pcie_gen_cap,
636 PP_Max_PCIEGen),
637 get_pcie_lane_support(data->pcie_lane_cap,
638 PP_Max_PCIELane));
639 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5,
640 get_pcie_gen_support(data->pcie_gen_cap,
641 PP_Max_PCIEGen),
642 get_pcie_lane_support(data->pcie_lane_cap,
643 PP_Max_PCIELane));
644
645 data->dpm_table.pcie_speed_table.count = 6;
646 }
647 /* Populate last level for boot PCIE level, but do not increment count. */
648 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table,
649 data->dpm_table.pcie_speed_table.count,
650 get_pcie_gen_support(data->pcie_gen_cap,
651 PP_Min_PCIEGen),
652 get_pcie_lane_support(data->pcie_lane_cap,
653 PP_Max_PCIELane));
654
655 return 0;
656}
657
658/*
659 * This function is to initalize all DPM state tables
660 * for SMU7 based on the dependency table.
661 * Dynamic state patching function will then trim these
662 * state tables to the allowed range based
663 * on the power policy or external client requests,
664 * such as UVD request, etc.
665 */
666static int polaris10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
667{
668 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
669 struct phm_ppt_v1_information *table_info =
670 (struct phm_ppt_v1_information *)(hwmgr->pptable);
671 uint32_t i;
672
673 struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table =
674 table_info->vdd_dep_on_sclk;
675 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
676 table_info->vdd_dep_on_mclk;
677
678 PP_ASSERT_WITH_CODE(dep_sclk_table != NULL,
679 "SCLK dependency table is missing. This table is mandatory",
680 return -EINVAL);
681 PP_ASSERT_WITH_CODE(dep_sclk_table->count >= 1,
682 "SCLK dependency table has to have is missing."
683 "This table is mandatory",
684 return -EINVAL);
685
686 PP_ASSERT_WITH_CODE(dep_mclk_table != NULL,
687 "MCLK dependency table is missing. This table is mandatory",
688 return -EINVAL);
689 PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1,
690 "MCLK dependency table has to have is missing."
691 "This table is mandatory",
692 return -EINVAL);
693
694 /* clear the state table to reset everything to default */
695 phm_reset_single_dpm_table(
696 &data->dpm_table.sclk_table, SMU74_MAX_LEVELS_GRAPHICS, MAX_REGULAR_DPM_NUMBER);
697 phm_reset_single_dpm_table(
698 &data->dpm_table.mclk_table, SMU74_MAX_LEVELS_MEMORY, MAX_REGULAR_DPM_NUMBER);
699
700
701 /* Initialize Sclk DPM table based on allow Sclk values */
702 data->dpm_table.sclk_table.count = 0;
703 for (i = 0; i < dep_sclk_table->count; i++) {
704 if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count - 1].value !=
705 dep_sclk_table->entries[i].clk) {
706
707 data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
708 dep_sclk_table->entries[i].clk;
709
710 data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled =
711 (i == 0) ? true : false;
712 data->dpm_table.sclk_table.count++;
713 }
714 }
715
716 /* Initialize Mclk DPM table based on allow Mclk values */
717 data->dpm_table.mclk_table.count = 0;
718 for (i = 0; i < dep_mclk_table->count; i++) {
719 if (i == 0 || data->dpm_table.mclk_table.dpm_levels
720 [data->dpm_table.mclk_table.count - 1].value !=
721 dep_mclk_table->entries[i].clk) {
722 data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
723 dep_mclk_table->entries[i].clk;
724 data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled =
725 (i == 0) ? true : false;
726 data->dpm_table.mclk_table.count++;
727 }
728 }
729
730 /* setup PCIE gen speed levels */
731 polaris10_setup_default_pcie_table(hwmgr);
732
733 /* save a copy of the default DPM table */
734 memcpy(&(data->golden_dpm_table), &(data->dpm_table),
735 sizeof(struct polaris10_dpm_table));
736
737 return 0;
738}
739
740/**
741 * Mvdd table preparation for SMC.
742 *
743 * @param *hwmgr The address of the hardware manager.
744 * @param *table The SMC DPM table structure to be populated.
745 * @return 0
746 */
747static int polaris10_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
748 SMU74_Discrete_DpmTable *table)
749{
750 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
751 uint32_t count, level;
752
753 if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
754 count = data->mvdd_voltage_table.count;
755 if (count > SMU_MAX_SMIO_LEVELS)
756 count = SMU_MAX_SMIO_LEVELS;
757 for (level = 0; level < count; level++) {
758 table->SmioTable2.Pattern[level].Voltage =
759 PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
760 /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
761 table->SmioTable2.Pattern[level].Smio =
762 (uint8_t) level;
763 table->Smio[level] |=
764 data->mvdd_voltage_table.entries[level].smio_low;
765 }
766 table->SmioMask2 = data->mvdd_voltage_table.mask_low;
767
768 table->MvddLevelCount = (uint32_t) PP_HOST_TO_SMC_UL(count);
769 }
770
771 return 0;
772}
773
774static int polaris10_populate_smc_vddci_table(struct pp_hwmgr *hwmgr,
775 struct SMU74_Discrete_DpmTable *table)
776{
777 uint32_t count, level;
778 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
779
780 count = data->vddci_voltage_table.count;
781
782 if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
783 if (count > SMU_MAX_SMIO_LEVELS)
784 count = SMU_MAX_SMIO_LEVELS;
785 for (level = 0; level < count; ++level) {
786 table->SmioTable1.Pattern[level].Voltage =
787 PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[level].value * VOLTAGE_SCALE);
788 table->SmioTable1.Pattern[level].Smio = (uint8_t) level;
789
790 table->Smio[level] |= data->vddci_voltage_table.entries[level].smio_low;
791 }
792 }
793
794 table->SmioMask1 = data->vddci_voltage_table.mask_low;
795
796 return 0;
797}
798
799/**
800* Preparation of vddc and vddgfx CAC tables for SMC.
801*
802* @param hwmgr the address of the hardware manager
803* @param table the SMC DPM table structure to be populated
804* @return always 0
805*/
806static int polaris10_populate_cac_table(struct pp_hwmgr *hwmgr,
807 struct SMU74_Discrete_DpmTable *table)
808{
809 uint32_t count;
810 uint8_t index;
811 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
812 struct phm_ppt_v1_information *table_info =
813 (struct phm_ppt_v1_information *)(hwmgr->pptable);
814 struct phm_ppt_v1_voltage_lookup_table *lookup_table =
815 table_info->vddc_lookup_table;
816 /* tables is already swapped, so in order to use the value from it,
817 * we need to swap it back.
818 * We are populating vddc CAC data to BapmVddc table
819 * in split and merged mode
820 */
821 for (count = 0; count < lookup_table->count; count++) {
822 index = phm_get_voltage_index(lookup_table,
823 data->vddc_voltage_table.entries[count].value);
824 table->BapmVddcVidLoSidd[count] = convert_to_vid(lookup_table->entries[index].us_cac_low);
825 table->BapmVddcVidHiSidd[count] = convert_to_vid(lookup_table->entries[index].us_cac_mid);
826 table->BapmVddcVidHiSidd2[count] = convert_to_vid(lookup_table->entries[index].us_cac_high);
827 }
828
829 return 0;
830}
831
832/**
833* Preparation of voltage tables for SMC.
834*
835* @param hwmgr the address of the hardware manager
836* @param table the SMC DPM table structure to be populated
837* @return always 0
838*/
839
840static int polaris10_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
841 struct SMU74_Discrete_DpmTable *table)
842{
843 polaris10_populate_smc_vddci_table(hwmgr, table);
844 polaris10_populate_smc_mvdd_table(hwmgr, table);
845 polaris10_populate_cac_table(hwmgr, table);
846
847 return 0;
848}
849
850static int polaris10_populate_ulv_level(struct pp_hwmgr *hwmgr,
851 struct SMU74_Discrete_Ulv *state)
852{
853 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
854 struct phm_ppt_v1_information *table_info =
855 (struct phm_ppt_v1_information *)(hwmgr->pptable);
856
857 state->CcPwrDynRm = 0;
858 state->CcPwrDynRm1 = 0;
859
860 state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
861 state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset *
862 VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
863
864 state->VddcPhase = (data->vddc_phase_shed_control) ? 0 : 1;
865
866 CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
867 CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
868 CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
869
870 return 0;
871}
872
873static int polaris10_populate_ulv_state(struct pp_hwmgr *hwmgr,
874 struct SMU74_Discrete_DpmTable *table)
875{
876 return polaris10_populate_ulv_level(hwmgr, &table->Ulv);
877}
878
879static int polaris10_populate_smc_link_level(struct pp_hwmgr *hwmgr,
880 struct SMU74_Discrete_DpmTable *table)
881{
882 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
883 struct polaris10_dpm_table *dpm_table = &data->dpm_table;
884 int i;
885
886 /* Index (dpm_table->pcie_speed_table.count)
887 * is reserved for PCIE boot level. */
888 for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
889 table->LinkLevel[i].PcieGenSpeed =
890 (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
891 table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width(
892 dpm_table->pcie_speed_table.dpm_levels[i].param1);
893 table->LinkLevel[i].EnabledForActivity = 1;
894 table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff);
895 table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5);
896 table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30);
897 }
898
899 data->smc_state_table.LinkLevelCount =
900 (uint8_t)dpm_table->pcie_speed_table.count;
901 data->dpm_level_enable_mask.pcie_dpm_enable_mask =
902 phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
903
904 return 0;
905}
906
907static uint32_t polaris10_get_xclk(struct pp_hwmgr *hwmgr)
908{
909 uint32_t reference_clock, tmp;
910 struct cgs_display_info info = {0};
911 struct cgs_mode_info mode_info;
912
913 info.mode_info = &mode_info;
914
915 tmp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK);
916
917 if (tmp)
918 return TCLK;
919
920 cgs_get_active_displays_info(hwmgr->device, &info);
921 reference_clock = mode_info.ref_clock;
922
923 tmp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL, XTALIN_DIVIDE);
924
925 if (0 != tmp)
926 return reference_clock / 4;
927
928 return reference_clock;
929}
930
931/**
932* Calculates the SCLK dividers using the provided engine clock
933*
934* @param hwmgr the address of the hardware manager
935* @param clock the engine clock to use to populate the structure
936* @param sclk the SMC SCLK structure to be populated
937*/
938static int polaris10_calculate_sclk_params(struct pp_hwmgr *hwmgr,
939 uint32_t clock, SMU_SclkSetting *sclk_setting)
940{
941 const struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
942 const SMU74_Discrete_DpmTable *table = &(data->smc_state_table);
943 struct pp_atomctrl_clock_dividers_ai dividers;
944
945 uint32_t ref_clock;
946 uint32_t pcc_target_percent, pcc_target_freq, ss_target_percent, ss_target_freq;
947 uint8_t i;
948 int result;
949 uint64_t temp;
950
951 sclk_setting->SclkFrequency = clock;
952 /* get the engine clock dividers for this clock value */
953 result = atomctrl_get_engine_pll_dividers_ai(hwmgr, clock, &dividers);
954 if (result == 0) {
955 sclk_setting->Fcw_int = dividers.usSclk_fcw_int;
956 sclk_setting->Fcw_frac = dividers.usSclk_fcw_frac;
957 sclk_setting->Pcc_fcw_int = dividers.usPcc_fcw_int;
958 sclk_setting->PllRange = dividers.ucSclkPllRange;
959 sclk_setting->Sclk_slew_rate = 0x400;
960 sclk_setting->Pcc_up_slew_rate = dividers.usPcc_fcw_slew_frac;
961 sclk_setting->Pcc_down_slew_rate = 0xffff;
962 sclk_setting->SSc_En = dividers.ucSscEnable;
963 sclk_setting->Fcw1_int = dividers.usSsc_fcw1_int;
964 sclk_setting->Fcw1_frac = dividers.usSsc_fcw1_frac;
965 sclk_setting->Sclk_ss_slew_rate = dividers.usSsc_fcw_slew_frac;
966 return result;
967 }
968
969 ref_clock = polaris10_get_xclk(hwmgr);
970
971 for (i = 0; i < NUM_SCLK_RANGE; i++) {
972 if (clock > data->range_table[i].trans_lower_frequency
973 && clock <= data->range_table[i].trans_upper_frequency) {
974 sclk_setting->PllRange = i;
975 break;
976 }
977 }
978
979 sclk_setting->Fcw_int = (uint16_t)((clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
980 temp = clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv;
981 temp <<= 0x10;
982 do_div(temp, ref_clock);
983 sclk_setting->Fcw_frac = temp & 0xffff;
984
985 pcc_target_percent = 10; /* Hardcode 10% for now. */
986 pcc_target_freq = clock - (clock * pcc_target_percent / 100);
987 sclk_setting->Pcc_fcw_int = (uint16_t)((pcc_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
988
989 ss_target_percent = 2; /* Hardcode 2% for now. */
990 sclk_setting->SSc_En = 0;
991 if (ss_target_percent) {
992 sclk_setting->SSc_En = 1;
993 ss_target_freq = clock - (clock * ss_target_percent / 100);
994 sclk_setting->Fcw1_int = (uint16_t)((ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
995 temp = ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv;
996 temp <<= 0x10;
997 do_div(temp, ref_clock);
998 sclk_setting->Fcw1_frac = temp & 0xffff;
999 }
1000
1001 return 0;
1002}
1003
1004static int polaris10_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
1005 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table,
1006 uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd)
1007{
1008 uint32_t i;
1009 uint16_t vddci;
1010 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
1011
1012 *voltage = *mvdd = 0;
1013
1014 /* clock - voltage dependency table is empty table */
1015 if (dep_table->count == 0)
1016 return -EINVAL;
1017
1018 for (i = 0; i < dep_table->count; i++) {
1019 /* find first sclk bigger than request */
1020 if (dep_table->entries[i].clk >= clock) {
1021 *voltage |= (dep_table->entries[i].vddc *
1022 VOLTAGE_SCALE) << VDDC_SHIFT;
1023 if (POLARIS10_VOLTAGE_CONTROL_NONE == data->vddci_control)
1024 *voltage |= (data->vbios_boot_state.vddci_bootup_value *
1025 VOLTAGE_SCALE) << VDDCI_SHIFT;
1026 else if (dep_table->entries[i].vddci)
1027 *voltage |= (dep_table->entries[i].vddci *
1028 VOLTAGE_SCALE) << VDDCI_SHIFT;
1029 else {
1030 vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
1031 (dep_table->entries[i].vddc -
1032 (uint16_t)data->vddc_vddci_delta));
1033 *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
1034 }
1035
1036 if (POLARIS10_VOLTAGE_CONTROL_NONE == data->mvdd_control)
1037 *mvdd = data->vbios_boot_state.mvdd_bootup_value *
1038 VOLTAGE_SCALE;
1039 else if (dep_table->entries[i].mvdd)
1040 *mvdd = (uint32_t) dep_table->entries[i].mvdd *
1041 VOLTAGE_SCALE;
1042
1043 *voltage |= 1 << PHASES_SHIFT;
1044 return 0;
1045 }
1046 }
1047
1048 /* sclk is bigger than max sclk in the dependence table */
1049 *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
1050
1051 if (POLARIS10_VOLTAGE_CONTROL_NONE == data->vddci_control)
1052 *voltage |= (data->vbios_boot_state.vddci_bootup_value *
1053 VOLTAGE_SCALE) << VDDCI_SHIFT;
1054 else if (dep_table->entries[i-1].vddci) {
1055 vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
1056 (dep_table->entries[i].vddc -
1057 (uint16_t)data->vddc_vddci_delta));
1058 *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
1059 }
1060
1061 if (POLARIS10_VOLTAGE_CONTROL_NONE == data->mvdd_control)
1062 *mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE;
1063 else if (dep_table->entries[i].mvdd)
1064 *mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE;
1065
1066 return 0;
1067}
1068
1069static const sclkFcwRange_t Range_Table[NUM_SCLK_RANGE] =
1070{ {VCO_2_4, POSTDIV_DIV_BY_16, 75, 160, 112},
1071 {VCO_3_6, POSTDIV_DIV_BY_16, 112, 224, 160},
1072 {VCO_2_4, POSTDIV_DIV_BY_8, 75, 160, 112},
1073 {VCO_3_6, POSTDIV_DIV_BY_8, 112, 224, 160},
1074 {VCO_2_4, POSTDIV_DIV_BY_4, 75, 160, 112},
1075 {VCO_3_6, POSTDIV_DIV_BY_4, 112, 216, 160},
1076 {VCO_2_4, POSTDIV_DIV_BY_2, 75, 160, 108},
1077 {VCO_3_6, POSTDIV_DIV_BY_2, 112, 216, 160} };
1078
1079static void polaris10_get_sclk_range_table(struct pp_hwmgr *hwmgr)
1080{
1081 uint32_t i, ref_clk;
1082 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
1083 SMU74_Discrete_DpmTable *table = &(data->smc_state_table);
1084 struct pp_atom_ctrl_sclk_range_table range_table_from_vbios = { { {0} } };
1085
1086 ref_clk = polaris10_get_xclk(hwmgr);
1087
1088 if (0 == atomctrl_get_smc_sclk_range_table(hwmgr, &range_table_from_vbios)) {
1089 for (i = 0; i < NUM_SCLK_RANGE; i++) {
1090 table->SclkFcwRangeTable[i].vco_setting = range_table_from_vbios.entry[i].ucVco_setting;
1091 table->SclkFcwRangeTable[i].postdiv = range_table_from_vbios.entry[i].ucPostdiv;
1092 table->SclkFcwRangeTable[i].fcw_pcc = range_table_from_vbios.entry[i].usFcw_pcc;
1093
1094 table->SclkFcwRangeTable[i].fcw_trans_upper = range_table_from_vbios.entry[i].usFcw_trans_upper;
1095 table->SclkFcwRangeTable[i].fcw_trans_lower = range_table_from_vbios.entry[i].usRcw_trans_lower;
1096
1097 CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc);
1098 CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper);
1099 CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower);
1100 }
1101 return;
1102 }
1103
1104 for (i = 0; i < NUM_SCLK_RANGE; i++) {
1105
1106 data->range_table[i].trans_lower_frequency = (ref_clk * Range_Table[i].fcw_trans_lower) >> Range_Table[i].postdiv;
1107 data->range_table[i].trans_upper_frequency = (ref_clk * Range_Table[i].fcw_trans_upper) >> Range_Table[i].postdiv;
1108
1109 table->SclkFcwRangeTable[i].vco_setting = Range_Table[i].vco_setting;
1110 table->SclkFcwRangeTable[i].postdiv = Range_Table[i].postdiv;
1111 table->SclkFcwRangeTable[i].fcw_pcc = Range_Table[i].fcw_pcc;
1112
1113 table->SclkFcwRangeTable[i].fcw_trans_upper = Range_Table[i].fcw_trans_upper;
1114 table->SclkFcwRangeTable[i].fcw_trans_lower = Range_Table[i].fcw_trans_lower;
1115
1116 CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc);
1117 CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper);
1118 CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower);
1119 }
1120}
1121
1122/**
1123* Populates single SMC SCLK structure using the provided engine clock
1124*
1125* @param hwmgr the address of the hardware manager
1126* @param clock the engine clock to use to populate the structure
1127* @param sclk the SMC SCLK structure to be populated
1128*/
1129
1130static int polaris10_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
1131 uint32_t clock, uint16_t sclk_al_threshold,
1132 struct SMU74_Discrete_GraphicsLevel *level)
1133{
1134 int result, i, temp;
1135 /* PP_Clocks minClocks; */
1136 uint32_t mvdd;
1137 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
1138 struct phm_ppt_v1_information *table_info =
1139 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1140 SMU_SclkSetting curr_sclk_setting = { 0 };
1141
1142 result = polaris10_calculate_sclk_params(hwmgr, clock, &curr_sclk_setting);
1143
1144 /* populate graphics levels */
1145 result = polaris10_get_dependency_volt_by_clk(hwmgr,
1146 table_info->vdd_dep_on_sclk, clock,
1147 &level->MinVoltage, &mvdd);
1148
1149 PP_ASSERT_WITH_CODE((0 == result),
1150 "can not find VDDC voltage value for "
1151 "VDDC engine clock dependency table",
1152 return result);
1153 level->ActivityLevel = sclk_al_threshold;
1154
1155 level->CcPwrDynRm = 0;
1156 level->CcPwrDynRm1 = 0;
1157 level->EnabledForActivity = 0;
1158 level->EnabledForThrottle = 1;
1159 level->UpHyst = 10;
1160 level->DownHyst = 0;
1161 level->VoltageDownHyst = 0;
1162 level->PowerThrottle = 0;
1163
1164 /*
1165 * TODO: get minimum clocks from dal configaration
1166 * PECI_GetMinClockSettings(hwmgr->pPECI, &minClocks);
1167 */
1168 /* data->DisplayTiming.minClockInSR = minClocks.engineClockInSR; */
1169
1170 /* get level->DeepSleepDivId
1171 if (phm_cap_enabled(hwmgr->platformDescriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
1172 level->DeepSleepDivId = PhwFiji_GetSleepDividerIdFromClock(hwmgr, clock, minClocks.engineClockInSR);
1173 */
1174 PP_ASSERT_WITH_CODE((clock >= POLARIS10_MINIMUM_ENGINE_CLOCK), "Engine clock can't satisfy stutter requirement!", return 0);
1175 for (i = POLARIS10_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) {
1176 temp = clock >> i;
1177
1178 if (temp >= POLARIS10_MINIMUM_ENGINE_CLOCK || i == 0)
1179 break;
1180 }
1181
1182 level->DeepSleepDivId = i;
1183
1184 /* Default to slow, highest DPM level will be
1185 * set to PPSMC_DISPLAY_WATERMARK_LOW later.
1186 */
1187 if (data->update_up_hyst)
1188 level->UpHyst = (uint8_t)data->up_hyst;
1189 if (data->update_down_hyst)
1190 level->DownHyst = (uint8_t)data->down_hyst;
1191
1192 level->SclkSetting = curr_sclk_setting;
1193
1194 CONVERT_FROM_HOST_TO_SMC_UL(level->MinVoltage);
1195 CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
1196 CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
1197 CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
1198 CONVERT_FROM_HOST_TO_SMC_UL(level->SclkSetting.SclkFrequency);
1199 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_int);
1200 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_frac);
1201 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_fcw_int);
1202 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_slew_rate);
1203 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_up_slew_rate);
1204 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_down_slew_rate);
1205 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_int);
1206 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_frac);
1207 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_ss_slew_rate);
1208 return 0;
1209}
1210
1211/**
1212* Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
1213*
1214* @param hwmgr the address of the hardware manager
1215*/
1216static int polaris10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
1217{
1218 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
1219 struct polaris10_dpm_table *dpm_table = &data->dpm_table;
1220 struct phm_ppt_v1_information *table_info =
1221 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1222 struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
1223 uint8_t pcie_entry_cnt = (uint8_t) data->dpm_table.pcie_speed_table.count;
1224 int result = 0;
1225 uint32_t array = data->dpm_table_start +
1226 offsetof(SMU74_Discrete_DpmTable, GraphicsLevel);
1227 uint32_t array_size = sizeof(struct SMU74_Discrete_GraphicsLevel) *
1228 SMU74_MAX_LEVELS_GRAPHICS;
1229 struct SMU74_Discrete_GraphicsLevel *levels =
1230 data->smc_state_table.GraphicsLevel;
1231 uint32_t i, max_entry;
1232 uint8_t hightest_pcie_level_enabled = 0,
1233 lowest_pcie_level_enabled = 0,
1234 mid_pcie_level_enabled = 0,
1235 count = 0;
1236
1237 polaris10_get_sclk_range_table(hwmgr);
1238
1239 for (i = 0; i < dpm_table->sclk_table.count; i++) {
1240
1241 result = polaris10_populate_single_graphic_level(hwmgr,
1242 dpm_table->sclk_table.dpm_levels[i].value,
1243 (uint16_t)data->activity_target[i],
1244 &(data->smc_state_table.GraphicsLevel[i]));
1245 if (result)
1246 return result;
1247
1248 /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
1249 if (i > 1)
1250 levels[i].DeepSleepDivId = 0;
1251 }
1252 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1253 PHM_PlatformCaps_SPLLShutdownSupport))
1254 data->smc_state_table.GraphicsLevel[0].SclkSetting.SSc_En = 0;
1255
1256 data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
1257 data->smc_state_table.GraphicsDpmLevelCount =
1258 (uint8_t)dpm_table->sclk_table.count;
1259 data->dpm_level_enable_mask.sclk_dpm_enable_mask =
1260 phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
1261
1262
1263 if (pcie_table != NULL) {
1264 PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt),
1265 "There must be 1 or more PCIE levels defined in PPTable.",
1266 return -EINVAL);
1267 max_entry = pcie_entry_cnt - 1;
1268 for (i = 0; i < dpm_table->sclk_table.count; i++)
1269 levels[i].pcieDpmLevel =
1270 (uint8_t) ((i < max_entry) ? i : max_entry);
1271 } else {
1272 while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
1273 ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
1274 (1 << (hightest_pcie_level_enabled + 1))) != 0))
1275 hightest_pcie_level_enabled++;
1276
1277 while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
1278 ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
1279 (1 << lowest_pcie_level_enabled)) == 0))
1280 lowest_pcie_level_enabled++;
1281
1282 while ((count < hightest_pcie_level_enabled) &&
1283 ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
1284 (1 << (lowest_pcie_level_enabled + 1 + count))) == 0))
1285 count++;
1286
1287 mid_pcie_level_enabled = (lowest_pcie_level_enabled + 1 + count) <
1288 hightest_pcie_level_enabled ?
1289 (lowest_pcie_level_enabled + 1 + count) :
1290 hightest_pcie_level_enabled;
1291
1292 /* set pcieDpmLevel to hightest_pcie_level_enabled */
1293 for (i = 2; i < dpm_table->sclk_table.count; i++)
1294 levels[i].pcieDpmLevel = hightest_pcie_level_enabled;
1295
1296 /* set pcieDpmLevel to lowest_pcie_level_enabled */
1297 levels[0].pcieDpmLevel = lowest_pcie_level_enabled;
1298
1299 /* set pcieDpmLevel to mid_pcie_level_enabled */
1300 levels[1].pcieDpmLevel = mid_pcie_level_enabled;
1301 }
1302 /* level count will send to smc once at init smc table and never change */
1303 result = polaris10_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
1304 (uint32_t)array_size, data->sram_end);
1305
1306 return result;
1307}
1308
1309static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
1310 uint32_t clock, struct SMU74_Discrete_MemoryLevel *mem_level)
1311{
1312 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
1313 struct phm_ppt_v1_information *table_info =
1314 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1315 int result = 0;
1316 struct cgs_display_info info = {0, 0, NULL};
1317
1318 cgs_get_active_displays_info(hwmgr->device, &info);
1319
1320 if (table_info->vdd_dep_on_mclk) {
1321 result = polaris10_get_dependency_volt_by_clk(hwmgr,
1322 table_info->vdd_dep_on_mclk, clock,
1323 &mem_level->MinVoltage, &mem_level->MinMvdd);
1324 PP_ASSERT_WITH_CODE((0 == result),
1325 "can not find MinVddc voltage value from memory "
1326 "VDDC voltage dependency table", return result);
1327 }
1328
1329 mem_level->MclkFrequency = clock;
1330 mem_level->EnabledForThrottle = 1;
1331 mem_level->EnabledForActivity = 0;
1332 mem_level->UpHyst = 0;
1333 mem_level->DownHyst = 100;
1334 mem_level->VoltageDownHyst = 0;
1335 mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
1336 mem_level->StutterEnable = false;
1337 mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
1338
1339 data->display_timing.num_existing_displays = info.display_count;
1340
1341 if ((data->mclk_stutter_mode_threshold) &&
1342 (clock <= data->mclk_stutter_mode_threshold) &&
1343 (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL,
1344 STUTTER_ENABLE) & 0x1))
1345 mem_level->StutterEnable = true;
1346
1347 if (!result) {
1348 CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinMvdd);
1349 CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MclkFrequency);
1350 CONVERT_FROM_HOST_TO_SMC_US(mem_level->ActivityLevel);
1351 CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinVoltage);
1352 }
1353 return result;
1354}
1355
1356/**
1357* Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states
1358*
1359* @param hwmgr the address of the hardware manager
1360*/
1361static int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
1362{
1363 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
1364 struct polaris10_dpm_table *dpm_table = &data->dpm_table;
1365 int result;
1366 /* populate MCLK dpm table to SMU7 */
1367 uint32_t array = data->dpm_table_start +
1368 offsetof(SMU74_Discrete_DpmTable, MemoryLevel);
1369 uint32_t array_size = sizeof(SMU74_Discrete_MemoryLevel) *
1370 SMU74_MAX_LEVELS_MEMORY;
1371 struct SMU74_Discrete_MemoryLevel *levels =
1372 data->smc_state_table.MemoryLevel;
1373 uint32_t i;
1374
1375 for (i = 0; i < dpm_table->mclk_table.count; i++) {
1376 PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
1377 "can not populate memory level as memory clock is zero",
1378 return -EINVAL);
1379 result = polaris10_populate_single_memory_level(hwmgr,
1380 dpm_table->mclk_table.dpm_levels[i].value,
1381 &levels[i]);
1382 if (i == dpm_table->mclk_table.count - 1) {
1383 levels[i].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
1384 levels[i].EnabledForActivity = 1;
1385 }
1386 if (result)
1387 return result;
1388 }
1389
1390 /* In order to prevent MC activity from stutter mode to push DPM up,
1391 * the UVD change complements this by putting the MCLK in
1392 * a higher state by default such that we are not affected by
1393 * up threshold or and MCLK DPM latency.
1394 */
1395 levels[0].ActivityLevel = 0x1f;
1396 CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel);
1397
1398 data->smc_state_table.MemoryDpmLevelCount =
1399 (uint8_t)dpm_table->mclk_table.count;
1400 data->dpm_level_enable_mask.mclk_dpm_enable_mask =
1401 phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
1402
1403 /* level count will send to smc once at init smc table and never change */
1404 result = polaris10_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
1405 (uint32_t)array_size, data->sram_end);
1406
1407 return result;
1408}
1409
1410/**
1411* Populates the SMC MVDD structure using the provided memory clock.
1412*
1413* @param hwmgr the address of the hardware manager
1414* @param mclk the MCLK value to be used in the decision if MVDD should be high or low.
1415* @param voltage the SMC VOLTAGE structure to be populated
1416*/
1417static int polaris10_populate_mvdd_value(struct pp_hwmgr *hwmgr,
1418 uint32_t mclk, SMIO_Pattern *smio_pat)
1419{
1420 const struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
1421 struct phm_ppt_v1_information *table_info =
1422 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1423 uint32_t i = 0;
1424
1425 if (POLARIS10_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
1426 /* find mvdd value which clock is more than request */
1427 for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
1428 if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
1429 smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value;
1430 break;
1431 }
1432 }
1433 PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
1434 "MVDD Voltage is outside the supported range.",
1435 return -EINVAL);
1436 } else
1437 return -EINVAL;
1438
1439 return 0;
1440}
1441
1442static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
1443 SMU74_Discrete_DpmTable *table)
1444{
1445 int result = 0;
1446 uint32_t sclk_frequency;
1447 const struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
1448 struct phm_ppt_v1_information *table_info =
1449 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1450 SMIO_Pattern vol_level;
1451 uint32_t mvdd;
1452 uint16_t us_mvdd;
1453
1454 table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
1455
1456
1457 /* Get MinVoltage and Frequency from DPM0,
1458 * already converted to SMC_UL */
1459 sclk_frequency = data->vbios_boot_state.sclk_bootup_value;
1460 result = polaris10_get_dependency_volt_by_clk(hwmgr,
1461 table_info->vdd_dep_on_sclk,
1462 sclk_frequency,
1463 &table->ACPILevel.MinVoltage, &mvdd);
1464 PP_ASSERT_WITH_CODE((0 == result),
1465 "Cannot find ACPI VDDC voltage value "
1466 "in Clock Dependency Table",
1467 );
1468
1469
1470 result = polaris10_calculate_sclk_params(hwmgr, sclk_frequency, &(table->ACPILevel.SclkSetting));
1471 PP_ASSERT_WITH_CODE(result == 0, "Error retrieving Engine Clock dividers from VBIOS.", return result);
1472
1473 table->ACPILevel.DeepSleepDivId = 0;
1474 table->ACPILevel.CcPwrDynRm = 0;
1475 table->ACPILevel.CcPwrDynRm1 = 0;
1476
1477 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
1478 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage);
1479 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
1480 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
1481
1482 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkSetting.SclkFrequency);
1483 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_int);
1484 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_frac);
1485 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_fcw_int);
1486 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_slew_rate);
1487 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_up_slew_rate);
1488 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_down_slew_rate);
1489 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_int);
1490 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_frac);
1491 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_ss_slew_rate);
1492
1493
1494 /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
1495 table->MemoryACPILevel.MclkFrequency = data->vbios_boot_state.mclk_bootup_value;
1496 result = polaris10_get_dependency_volt_by_clk(hwmgr,
1497 table_info->vdd_dep_on_mclk,
1498 table->MemoryACPILevel.MclkFrequency,
1499 &table->MemoryACPILevel.MinVoltage, &mvdd);
1500 PP_ASSERT_WITH_CODE((0 == result),
1501 "Cannot find ACPI VDDCI voltage value "
1502 "in Clock Dependency Table",
1503 );
1504
1505 us_mvdd = 0;
1506 if ((POLARIS10_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
1507 (data->mclk_dpm_key_disabled))
1508 us_mvdd = data->vbios_boot_state.mvdd_bootup_value;
1509 else {
1510 if (!polaris10_populate_mvdd_value(hwmgr,
1511 data->dpm_table.mclk_table.dpm_levels[0].value,
1512 &vol_level))
1513 us_mvdd = vol_level.Voltage;
1514 }
1515
1516 if (0 == polaris10_populate_mvdd_value(hwmgr, 0, &vol_level))
1517 table->MemoryACPILevel.MinMvdd = PP_HOST_TO_SMC_UL(vol_level.Voltage);
1518 else
1519 table->MemoryACPILevel.MinMvdd = 0;
1520
1521 table->MemoryACPILevel.StutterEnable = false;
1522
1523 table->MemoryACPILevel.EnabledForThrottle = 0;
1524 table->MemoryACPILevel.EnabledForActivity = 0;
1525 table->MemoryACPILevel.UpHyst = 0;
1526 table->MemoryACPILevel.DownHyst = 100;
1527 table->MemoryACPILevel.VoltageDownHyst = 0;
1528 table->MemoryACPILevel.ActivityLevel =
1529 PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
1530
1531 CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency);
1532 CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);
1533
1534 return result;
1535}
1536
1537static int polaris10_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
1538 SMU74_Discrete_DpmTable *table)
1539{
1540 int result = -EINVAL;
1541 uint8_t count;
1542 struct pp_atomctrl_clock_dividers_vi dividers;
1543 struct phm_ppt_v1_information *table_info =
1544 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1545 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1546 table_info->mm_dep_table;
1547 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
1548 uint32_t vddci;
1549
1550 table->VceLevelCount = (uint8_t)(mm_table->count);
1551 table->VceBootLevel = 0;
1552
1553 for (count = 0; count < table->VceLevelCount; count++) {
1554 table->VceLevel[count].Frequency = mm_table->entries[count].eclk;
1555 table->VceLevel[count].MinVoltage = 0;
1556 table->VceLevel[count].MinVoltage |=
1557 (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
1558
1559 if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
1560 vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
1561 mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
1562 else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
1563 vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
1564 else
1565 vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
1566
1567
1568 table->VceLevel[count].MinVoltage |=
1569 (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
1570 table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
1571
1572 /*retrieve divider value for VBIOS */
1573 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1574 table->VceLevel[count].Frequency, &dividers);
1575 PP_ASSERT_WITH_CODE((0 == result),
1576 "can not find divide id for VCE engine clock",
1577 return result);
1578
1579 table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1580
1581 CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
1582 CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage);
1583 }
1584 return result;
1585}
1586
1587static int polaris10_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
1588 SMU74_Discrete_DpmTable *table)
1589{
1590 int result = -EINVAL;
1591 uint8_t count;
1592 struct pp_atomctrl_clock_dividers_vi dividers;
1593 struct phm_ppt_v1_information *table_info =
1594 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1595 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1596 table_info->mm_dep_table;
1597 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
1598 uint32_t vddci;
1599
1600 table->SamuBootLevel = 0;
1601 table->SamuLevelCount = (uint8_t)(mm_table->count);
1602
1603 for (count = 0; count < table->SamuLevelCount; count++) {
1604 /* not sure whether we need evclk or not */
1605 table->SamuLevel[count].MinVoltage = 0;
1606 table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
1607 table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
1608 VOLTAGE_SCALE) << VDDC_SHIFT;
1609
1610 if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
1611 vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
1612 mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
1613 else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
1614 vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
1615 else
1616 vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
1617
1618 table->SamuLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
1619 table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
1620
1621 /* retrieve divider value for VBIOS */
1622 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1623 table->SamuLevel[count].Frequency, &dividers);
1624 PP_ASSERT_WITH_CODE((0 == result),
1625 "can not find divide id for samu clock", return result);
1626
1627 table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1628
1629 CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
1630 CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
1631 }
1632 return result;
1633}
1634
1635static int polaris10_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
1636 int32_t eng_clock, int32_t mem_clock,
1637 SMU74_Discrete_MCArbDramTimingTableEntry *arb_regs)
1638{
1639 uint32_t dram_timing;
1640 uint32_t dram_timing2;
1641 uint32_t burst_time;
1642 int result;
1643
1644 result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
1645 eng_clock, mem_clock);
1646 PP_ASSERT_WITH_CODE(result == 0,
1647 "Error calling VBIOS to set DRAM_TIMING.", return result);
1648
1649 dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
1650 dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
1651 burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
1652
1653
1654 arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dram_timing);
1655 arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dram_timing2);
1656 arb_regs->McArbBurstTime = (uint8_t)burst_time;
1657
1658 return 0;
1659}
1660
1661static int polaris10_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
1662{
1663 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
1664 struct SMU74_Discrete_MCArbDramTimingTable arb_regs;
1665 uint32_t i, j;
1666 int result = 0;
1667
1668 for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
1669 for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
1670 result = polaris10_populate_memory_timing_parameters(hwmgr,
1671 data->dpm_table.sclk_table.dpm_levels[i].value,
1672 data->dpm_table.mclk_table.dpm_levels[j].value,
1673 &arb_regs.entries[i][j]);
1674 if (result == 0)
1675 result = atomctrl_set_ac_timing_ai(hwmgr, data->dpm_table.mclk_table.dpm_levels[j].value, j);
1676 if (result != 0)
1677 return result;
1678 }
1679 }
1680
1681 result = polaris10_copy_bytes_to_smc(
1682 hwmgr->smumgr,
1683 data->arb_table_start,
1684 (uint8_t *)&arb_regs,
1685 sizeof(SMU74_Discrete_MCArbDramTimingTable),
1686 data->sram_end);
1687 return result;
1688}
1689
1690static int polaris10_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
1691 struct SMU74_Discrete_DpmTable *table)
1692{
1693 int result = -EINVAL;
1694 uint8_t count;
1695 struct pp_atomctrl_clock_dividers_vi dividers;
1696 struct phm_ppt_v1_information *table_info =
1697 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1698 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1699 table_info->mm_dep_table;
1700 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
1701 uint32_t vddci;
1702
1703 table->UvdLevelCount = (uint8_t)(mm_table->count);
1704 table->UvdBootLevel = 0;
1705
1706 for (count = 0; count < table->UvdLevelCount; count++) {
1707 table->UvdLevel[count].MinVoltage = 0;
1708 table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
1709 table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
1710 table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
1711 VOLTAGE_SCALE) << VDDC_SHIFT;
1712
1713 if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
1714 vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
1715 mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
1716 else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
1717 vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
1718 else
1719 vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
1720
1721 table->UvdLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
1722 table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
1723
1724 /* retrieve divider value for VBIOS */
1725 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1726 table->UvdLevel[count].VclkFrequency, &dividers);
1727 PP_ASSERT_WITH_CODE((0 == result),
1728 "can not find divide id for Vclk clock", return result);
1729
1730 table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
1731
1732 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1733 table->UvdLevel[count].DclkFrequency, &dividers);
1734 PP_ASSERT_WITH_CODE((0 == result),
1735 "can not find divide id for Dclk clock", return result);
1736
1737 table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
1738
1739 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
1740 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
1741 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage);
1742 }
1743
1744 return result;
1745}
1746
1747static int polaris10_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
1748 struct SMU74_Discrete_DpmTable *table)
1749{
1750 int result = 0;
1751 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
1752
1753 table->GraphicsBootLevel = 0;
1754 table->MemoryBootLevel = 0;
1755
1756 /* find boot level from dpm table */
1757 result = phm_find_boot_level(&(data->dpm_table.sclk_table),
1758 data->vbios_boot_state.sclk_bootup_value,
1759 (uint32_t *)&(table->GraphicsBootLevel));
1760
1761 result = phm_find_boot_level(&(data->dpm_table.mclk_table),
1762 data->vbios_boot_state.mclk_bootup_value,
1763 (uint32_t *)&(table->MemoryBootLevel));
1764
1765 table->BootVddc = data->vbios_boot_state.vddc_bootup_value *
1766 VOLTAGE_SCALE;
1767 table->BootVddci = data->vbios_boot_state.vddci_bootup_value *
1768 VOLTAGE_SCALE;
1769 table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value *
1770 VOLTAGE_SCALE;
1771
1772 CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc);
1773 CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci);
1774 CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
1775
1776 return 0;
1777}
1778
1779
1780static int polaris10_populate_smc_initailial_state(struct pp_hwmgr *hwmgr)
1781{
1782 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
1783 struct phm_ppt_v1_information *table_info =
1784 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1785 uint8_t count, level;
1786
1787 count = (uint8_t)(table_info->vdd_dep_on_sclk->count);
1788
1789 for (level = 0; level < count; level++) {
1790 if (table_info->vdd_dep_on_sclk->entries[level].clk >=
1791 data->vbios_boot_state.sclk_bootup_value) {
1792 data->smc_state_table.GraphicsBootLevel = level;
1793 break;
1794 }
1795 }
1796
1797 count = (uint8_t)(table_info->vdd_dep_on_mclk->count);
1798 for (level = 0; level < count; level++) {
1799 if (table_info->vdd_dep_on_mclk->entries[level].clk >=
1800 data->vbios_boot_state.mclk_bootup_value) {
1801 data->smc_state_table.MemoryBootLevel = level;
1802 break;
1803 }
1804 }
1805
1806 return 0;
1807}
1808
1809static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
1810{
1811 uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min;
1812 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
1813 uint8_t i, stretch_amount, volt_offset = 0;
1814 struct phm_ppt_v1_information *table_info =
1815 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1816 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
1817 table_info->vdd_dep_on_sclk;
1818
1819 stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
1820
1821 /* Read SMU_Eefuse to read and calculate RO and determine
1822 * if the part is SS or FF. if RO >= 1660MHz, part is FF.
1823 */
1824 efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1825 ixSMU_EFUSE_0 + (67 * 4));
1826 efuse &= 0xFF000000;
1827 efuse = efuse >> 24;
1828
1829 if (hwmgr->chip_id == CHIP_POLARIS10) {
1830 min = 1000;
1831 max = 2300;
1832 } else {
1833 min = 1100;
1834 max = 2100;
1835 }
1836
1837 ro = efuse * (max -min)/255 + min;
1838
1839 /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
1840 for (i = 0; i < sclk_table->count; i++) {
1841 data->smc_state_table.Sclk_CKS_masterEn0_7 |=
1842 sclk_table->entries[i].cks_enable << i;
1843 if (hwmgr->chip_id == CHIP_POLARIS10) {
1844 volt_without_cks = (uint32_t)((2753594000U + (sclk_table->entries[i].clk/100) * 136418 -(ro - 70) * 1000000) / \
1845 (2424180 - (sclk_table->entries[i].clk/100) * 1132925/1000));
1846 volt_with_cks = (uint32_t)((2797202000U + sclk_table->entries[i].clk/100 * 3232 - (ro - 65) * 1000000) / \
1847 (2522480 - sclk_table->entries[i].clk/100 * 115764/100));
1848 } else {
1849 volt_without_cks = (uint32_t)((2416794800U + (sclk_table->entries[i].clk/100) * 1476925/10 -(ro - 50) * 1000000) / \
1850 (2625416 - (sclk_table->entries[i].clk/100) * (12586807/10000)));
1851 volt_with_cks = (uint32_t)((2999656000U - sclk_table->entries[i].clk/100 * 392803 - (ro - 44) * 1000000) / \
1852 (3422454 - sclk_table->entries[i].clk/100 * (18886376/10000)));
1853 }
1854
1855 if (volt_without_cks >= volt_with_cks)
1856 volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
1857 sclk_table->entries[i].cks_voffset) * 100 + 624) / 625);
1858
1859 data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
1860 }
1861
1862 data->smc_state_table.LdoRefSel = (table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ? table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 6;
1863 /* Populate CKS Lookup Table */
1864 if (stretch_amount != 1 && stretch_amount != 2 && stretch_amount != 3 &&
1865 stretch_amount != 4 && stretch_amount != 5) {
1866 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1867 PHM_PlatformCaps_ClockStretcher);
1868 PP_ASSERT_WITH_CODE(false,
1869 "Stretch Amount in PPTable not supported\n",
1870 return -EINVAL);
1871 }
1872
1873 value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL);
1874 value &= 0xFFFFFFFE;
1875 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value);
1876
1877 return 0;
1878}
1879
1880/**
1881* Populates the SMC VRConfig field in DPM table.
1882*
1883* @param hwmgr the address of the hardware manager
1884* @param table the SMC DPM table structure to be populated
1885* @return always 0
1886*/
1887static int polaris10_populate_vr_config(struct pp_hwmgr *hwmgr,
1888 struct SMU74_Discrete_DpmTable *table)
1889{
1890 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
1891 uint16_t config;
1892
1893 config = VR_MERGED_WITH_VDDC;
1894 table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT);
1895
1896 /* Set Vddc Voltage Controller */
1897 if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
1898 config = VR_SVI2_PLANE_1;
1899 table->VRConfig |= config;
1900 } else {
1901 PP_ASSERT_WITH_CODE(false,
1902 "VDDC should be on SVI2 control in merged mode!",
1903 );
1904 }
1905 /* Set Vddci Voltage Controller */
1906 if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
1907 config = VR_SVI2_PLANE_2; /* only in merged mode */
1908 table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
1909 } else if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
1910 config = VR_SMIO_PATTERN_1;
1911 table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
1912 } else {
1913 config = VR_STATIC_VOLTAGE;
1914 table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
1915 }
1916 /* Set Mvdd Voltage Controller */
1917 if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
1918 config = VR_SVI2_PLANE_2;
1919 table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
1920 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start +
1921 offsetof(SMU74_SoftRegisters, AllowMvddSwitch), 0x1);
1922 } else {
1923 config = VR_STATIC_VOLTAGE;
1924 table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
1925 }
1926
1927 return 0;
1928}
1929
1930
1931static int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
1932{
1933 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
1934 SMU74_Discrete_DpmTable *table = &(data->smc_state_table);
1935 int result = 0;
1936 struct pp_atom_ctrl__avfs_parameters avfs_params = {0};
1937 AVFS_meanNsigma_t AVFS_meanNsigma = { {0} };
1938 AVFS_Sclk_Offset_t AVFS_SclkOffset = { {0} };
1939 uint32_t tmp, i;
1940 struct pp_smumgr *smumgr = hwmgr->smumgr;
1941 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
1942
1943 struct phm_ppt_v1_information *table_info =
1944 (struct phm_ppt_v1_information *)hwmgr->pptable;
1945 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
1946 table_info->vdd_dep_on_sclk;
1947
1948
1949 if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
1950 return result;
1951
1952 result = atomctrl_get_avfs_information(hwmgr, &avfs_params);
1953
1954 if (0 == result) {
1955 table->BTCGB_VDROOP_TABLE[0].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a0);
1956 table->BTCGB_VDROOP_TABLE[0].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a1);
1957 table->BTCGB_VDROOP_TABLE[0].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a2);
1958 table->BTCGB_VDROOP_TABLE[1].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0);
1959 table->BTCGB_VDROOP_TABLE[1].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1);
1960 table->BTCGB_VDROOP_TABLE[1].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2);
1961 table->AVFSGB_VDROOP_TABLE[0].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_m1);
1962 table->AVFSGB_VDROOP_TABLE[0].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSON_m2);
1963 table->AVFSGB_VDROOP_TABLE[0].b = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_b);
1964 table->AVFSGB_VDROOP_TABLE[0].m1_shift = 24;
1965 table->AVFSGB_VDROOP_TABLE[0].m2_shift = 12;
1966 table->AVFSGB_VDROOP_TABLE[1].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1);
1967 table->AVFSGB_VDROOP_TABLE[1].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2);
1968 table->AVFSGB_VDROOP_TABLE[1].b = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b);
1969 table->AVFSGB_VDROOP_TABLE[1].m1_shift = 24;
1970 table->AVFSGB_VDROOP_TABLE[1].m2_shift = 12;
1971 table->MaxVoltage = PP_HOST_TO_SMC_US(avfs_params.usMaxVoltage_0_25mv);
1972 AVFS_meanNsigma.Aconstant[0] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant0);
1973 AVFS_meanNsigma.Aconstant[1] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant1);
1974 AVFS_meanNsigma.Aconstant[2] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant2);
1975 AVFS_meanNsigma.DC_tol_sigma = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_DC_tol_sigma);
1976 AVFS_meanNsigma.Platform_mean = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_mean);
1977 AVFS_meanNsigma.PSM_Age_CompFactor = PP_HOST_TO_SMC_US(avfs_params.usPSM_Age_ComFactor);
1978 AVFS_meanNsigma.Platform_sigma = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_sigma);
1979
1980 for (i = 0; i < NUM_VFT_COLUMNS; i++) {
1981 AVFS_meanNsigma.Static_Voltage_Offset[i] = (uint8_t)(sclk_table->entries[i].cks_voffset * 100 / 625);
1982 AVFS_SclkOffset.Sclk_Offset[i] = PP_HOST_TO_SMC_US((uint16_t)(sclk_table->entries[i].sclk_offset) / 100);
1983 }
1984
1985 result = polaris10_read_smc_sram_dword(smumgr,
1986 SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsMeanNSigma),
1987 &tmp, data->sram_end);
1988
1989 polaris10_copy_bytes_to_smc(smumgr,
1990 tmp,
1991 (uint8_t *)&AVFS_meanNsigma,
1992 sizeof(AVFS_meanNsigma_t),
1993 data->sram_end);
1994
1995 result = polaris10_read_smc_sram_dword(smumgr,
1996 SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsSclkOffsetTable),
1997 &tmp, data->sram_end);
1998 polaris10_copy_bytes_to_smc(smumgr,
1999 tmp,
2000 (uint8_t *)&AVFS_SclkOffset,
2001 sizeof(AVFS_Sclk_Offset_t),
2002 data->sram_end);
2003
2004 data->avfs_vdroop_override_setting = (avfs_params.ucEnableGB_VDROOP_TABLE_CKSON << BTCGB0_Vdroop_Enable_SHIFT) |
2005 (avfs_params.ucEnableGB_VDROOP_TABLE_CKSOFF << BTCGB1_Vdroop_Enable_SHIFT) |
2006 (avfs_params.ucEnableGB_FUSE_TABLE_CKSON << AVFSGB0_Vdroop_Enable_SHIFT) |
2007 (avfs_params.ucEnableGB_FUSE_TABLE_CKSOFF << AVFSGB1_Vdroop_Enable_SHIFT);
2008 data->apply_avfs_cks_off_voltage = (avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage == 1) ? true : false;
2009 }
2010 return result;
2011}
2012
2013
2014/**
2015* Initializes the SMC table and uploads it
2016*
2017* @param hwmgr the address of the powerplay hardware manager.
2018* @return always 0
2019*/
2020static int polaris10_init_smc_table(struct pp_hwmgr *hwmgr)
2021{
2022 int result;
2023 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
2024 struct phm_ppt_v1_information *table_info =
2025 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2026 struct SMU74_Discrete_DpmTable *table = &(data->smc_state_table);
2027 const struct polaris10_ulv_parm *ulv = &(data->ulv);
2028 uint8_t i;
2029 struct pp_atomctrl_gpio_pin_assignment gpio_pin;
2030 pp_atomctrl_clock_dividers_vi dividers;
2031
2032 result = polaris10_setup_default_dpm_tables(hwmgr);
2033 PP_ASSERT_WITH_CODE(0 == result,
2034 "Failed to setup default DPM tables!", return result);
2035
2036 if (POLARIS10_VOLTAGE_CONTROL_NONE != data->voltage_control)
2037 polaris10_populate_smc_voltage_tables(hwmgr, table);
2038
2039 table->SystemFlags = 0;
2040 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2041 PHM_PlatformCaps_AutomaticDCTransition))
2042 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
2043
2044 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2045 PHM_PlatformCaps_StepVddc))
2046 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
2047
2048 if (data->is_memory_gddr5)
2049 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
2050
2051 if (ulv->ulv_supported && table_info->us_ulv_voltage_offset) {
2052 result = polaris10_populate_ulv_state(hwmgr, table);
2053 PP_ASSERT_WITH_CODE(0 == result,
2054 "Failed to initialize ULV state!", return result);
2055 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
2056 ixCG_ULV_PARAMETER, PPPOLARIS10_CGULVPARAMETER_DFLT);
2057 }
2058
2059 result = polaris10_populate_smc_link_level(hwmgr, table);
2060 PP_ASSERT_WITH_CODE(0 == result,
2061 "Failed to initialize Link Level!", return result);
2062
2063 result = polaris10_populate_all_graphic_levels(hwmgr);
2064 PP_ASSERT_WITH_CODE(0 == result,
2065 "Failed to initialize Graphics Level!", return result);
2066
2067 result = polaris10_populate_all_memory_levels(hwmgr);
2068 PP_ASSERT_WITH_CODE(0 == result,
2069 "Failed to initialize Memory Level!", return result);
2070
2071 result = polaris10_populate_smc_acpi_level(hwmgr, table);
2072 PP_ASSERT_WITH_CODE(0 == result,
2073 "Failed to initialize ACPI Level!", return result);
2074
2075 result = polaris10_populate_smc_vce_level(hwmgr, table);
2076 PP_ASSERT_WITH_CODE(0 == result,
2077 "Failed to initialize VCE Level!", return result);
2078
2079 result = polaris10_populate_smc_samu_level(hwmgr, table);
2080 PP_ASSERT_WITH_CODE(0 == result,
2081 "Failed to initialize SAMU Level!", return result);
2082
2083 /* Since only the initial state is completely set up at this point
2084 * (the other states are just copies of the boot state) we only
2085 * need to populate the ARB settings for the initial state.
2086 */
2087 result = polaris10_program_memory_timing_parameters(hwmgr);
2088 PP_ASSERT_WITH_CODE(0 == result,
2089 "Failed to Write ARB settings for the initial state.", return result);
2090
2091 result = polaris10_populate_smc_uvd_level(hwmgr, table);
2092 PP_ASSERT_WITH_CODE(0 == result,
2093 "Failed to initialize UVD Level!", return result);
2094
2095 result = polaris10_populate_smc_boot_level(hwmgr, table);
2096 PP_ASSERT_WITH_CODE(0 == result,
2097 "Failed to initialize Boot Level!", return result);
2098
2099 result = polaris10_populate_smc_initailial_state(hwmgr);
2100 PP_ASSERT_WITH_CODE(0 == result,
2101 "Failed to initialize Boot State!", return result);
2102
2103 result = polaris10_populate_bapm_parameters_in_dpm_table(hwmgr);
2104 PP_ASSERT_WITH_CODE(0 == result,
2105 "Failed to populate BAPM Parameters!", return result);
2106
2107 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2108 PHM_PlatformCaps_ClockStretcher)) {
2109 result = polaris10_populate_clock_stretcher_data_table(hwmgr);
2110 PP_ASSERT_WITH_CODE(0 == result,
2111 "Failed to populate Clock Stretcher Data Table!",
2112 return result);
2113 }
2114
2115 result = polaris10_populate_avfs_parameters(hwmgr);
2116 PP_ASSERT_WITH_CODE(0 == result, "Failed to populate AVFS Parameters!", return result;);
2117
2118 table->CurrSclkPllRange = 0xff;
2119 table->GraphicsVoltageChangeEnable = 1;
2120 table->GraphicsThermThrottleEnable = 1;
2121 table->GraphicsInterval = 1;
2122 table->VoltageInterval = 1;
2123 table->ThermalInterval = 1;
2124 table->TemperatureLimitHigh =
2125 table_info->cac_dtp_table->usTargetOperatingTemp *
2126 POLARIS10_Q88_FORMAT_CONVERSION_UNIT;
2127 table->TemperatureLimitLow =
2128 (table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
2129 POLARIS10_Q88_FORMAT_CONVERSION_UNIT;
2130 table->MemoryVoltageChangeEnable = 1;
2131 table->MemoryInterval = 1;
2132 table->VoltageResponseTime = 0;
2133 table->PhaseResponseTime = 0;
2134 table->MemoryThermThrottleEnable = 1;
2135 table->PCIeBootLinkLevel = 0;
2136 table->PCIeGenInterval = 1;
2137 table->VRConfig = 0;
2138
2139 result = polaris10_populate_vr_config(hwmgr, table);
2140 PP_ASSERT_WITH_CODE(0 == result,
2141 "Failed to populate VRConfig setting!", return result);
2142
2143 table->ThermGpio = 17;
2144 table->SclkStepSize = 0x4000;
2145
2146 if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
2147 table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
2148 } else {
2149 table->VRHotGpio = POLARIS10_UNUSED_GPIO_PIN;
2150 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2151 PHM_PlatformCaps_RegulatorHot);
2152 }
2153
2154 if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
2155 &gpio_pin)) {
2156 table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift;
2157 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2158 PHM_PlatformCaps_AutomaticDCTransition);
2159 } else {
2160 table->AcDcGpio = POLARIS10_UNUSED_GPIO_PIN;
2161 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2162 PHM_PlatformCaps_AutomaticDCTransition);
2163 }
2164
2165 /* Thermal Output GPIO */
2166 if (atomctrl_get_pp_assign_pin(hwmgr, THERMAL_INT_OUTPUT_GPIO_PINID,
2167 &gpio_pin)) {
2168 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2169 PHM_PlatformCaps_ThermalOutGPIO);
2170
2171 table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift;
2172
2173 /* For porlarity read GPIOPAD_A with assigned Gpio pin
2174 * since VBIOS will program this register to set 'inactive state',
2175 * driver can then determine 'active state' from this and
2176 * program SMU with correct polarity
2177 */
2178 table->ThermOutPolarity = (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A)
2179 & (1 << gpio_pin.uc_gpio_pin_bit_shift))) ? 1:0;
2180 table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
2181
2182 /* if required, combine VRHot/PCC with thermal out GPIO */
2183 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_RegulatorHot)
2184 && phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_CombinePCCWithThermalSignal))
2185 table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
2186 } else {
2187 table->ThermOutGpio = 17;
2188 table->ThermOutPolarity = 1;
2189 table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
2190 }
2191
2192 /* Populate BIF_SCLK levels into SMC DPM table */
2193 for (i = 0; i <= data->dpm_table.pcie_speed_table.count; i++) {
2194 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, data->bif_sclk_table[i], &dividers);
2195 PP_ASSERT_WITH_CODE((result == 0), "Can not find DFS divide id for Sclk", return result);
2196
2197 if (i == 0)
2198 table->Ulv.BifSclkDfs = PP_HOST_TO_SMC_US((USHORT)(dividers.pll_post_divider));
2199 else
2200 table->LinkLevel[i-1].BifSclkDfs = PP_HOST_TO_SMC_US((USHORT)(dividers.pll_post_divider));
2201 }
2202
2203 for (i = 0; i < SMU74_MAX_ENTRIES_SMIO; i++)
2204 table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
2205
2206 CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
2207 CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
2208 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
2209 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
2210 CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
2211 CONVERT_FROM_HOST_TO_SMC_UL(table->CurrSclkPllRange);
2212 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
2213 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
2214 CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
2215 CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
2216
2217 /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
2218 result = polaris10_copy_bytes_to_smc(hwmgr->smumgr,
2219 data->dpm_table_start +
2220 offsetof(SMU74_Discrete_DpmTable, SystemFlags),
2221 (uint8_t *)&(table->SystemFlags),
2222 sizeof(SMU74_Discrete_DpmTable) - 3 * sizeof(SMU74_PIDController),
2223 data->sram_end);
2224 PP_ASSERT_WITH_CODE(0 == result,
2225 "Failed to upload dpm data to SMC memory!", return result);
2226
2227 return 0;
2228}
2229
2230/**
2231* Initialize the ARB DRAM timing table's index field.
2232*
2233* @param hwmgr the address of the powerplay hardware manager.
2234* @return always 0
2235*/
2236static int polaris10_init_arb_table_index(struct pp_hwmgr *hwmgr)
2237{
2238 const struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
2239 uint32_t tmp;
2240 int result;
2241
2242 /* This is a read-modify-write on the first byte of the ARB table.
2243 * The first byte in the SMU73_Discrete_MCArbDramTimingTable structure
2244 * is the field 'current'.
2245 * This solution is ugly, but we never write the whole table only
2246 * individual fields in it.
2247 * In reality this field should not be in that structure
2248 * but in a soft register.
2249 */
2250 result = polaris10_read_smc_sram_dword(hwmgr->smumgr,
2251 data->arb_table_start, &tmp, data->sram_end);
2252
2253 if (result)
2254 return result;
2255
2256 tmp &= 0x00FFFFFF;
2257 tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
2258
2259 return polaris10_write_smc_sram_dword(hwmgr->smumgr,
2260 data->arb_table_start, tmp, data->sram_end);
2261}
2262
2263static int polaris10_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr)
2264{
2265 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2266 PHM_PlatformCaps_RegulatorHot))
2267 return smum_send_msg_to_smc(hwmgr->smumgr,
2268 PPSMC_MSG_EnableVRHotGPIOInterrupt);
2269
2270 return 0;
2271}
2272
2273static int polaris10_enable_sclk_control(struct pp_hwmgr *hwmgr)
2274{
2275 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
2276 SCLK_PWRMGT_OFF, 0);
2277 return 0;
2278}
2279
2280static int polaris10_enable_ulv(struct pp_hwmgr *hwmgr)
2281{
2282 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
2283 struct polaris10_ulv_parm *ulv = &(data->ulv);
2284
2285 if (ulv->ulv_supported)
2286 return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_EnableULV);
2287
2288 return 0;
2289}
2290
2291static int polaris10_disable_ulv(struct pp_hwmgr *hwmgr)
2292{
2293 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
2294 struct polaris10_ulv_parm *ulv = &(data->ulv);
2295
2296 if (ulv->ulv_supported)
2297 return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DisableULV);
2298
2299 return 0;
2300}
2301
2302static int polaris10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
2303{
2304 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2305 PHM_PlatformCaps_SclkDeepSleep)) {
2306 if (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_MASTER_DeepSleep_ON))
2307 PP_ASSERT_WITH_CODE(false,
2308 "Attempt to enable Master Deep Sleep switch failed!",
2309 return -1);
2310 } else {
2311 if (smum_send_msg_to_smc(hwmgr->smumgr,
2312 PPSMC_MSG_MASTER_DeepSleep_OFF)) {
2313 PP_ASSERT_WITH_CODE(false,
2314 "Attempt to disable Master Deep Sleep switch failed!",
2315 return -1);
2316 }
2317 }
2318
2319 return 0;
2320}
2321
2322static int polaris10_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
2323{
2324 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2325 PHM_PlatformCaps_SclkDeepSleep)) {
2326 if (smum_send_msg_to_smc(hwmgr->smumgr,
2327 PPSMC_MSG_MASTER_DeepSleep_OFF)) {
2328 PP_ASSERT_WITH_CODE(false,
2329 "Attempt to disable Master Deep Sleep switch failed!",
2330 return -1);
2331 }
2332 }
2333
2334 return 0;
2335}
2336
2337static int polaris10_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
2338{
2339 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
2340 uint32_t soft_register_value = 0;
2341 uint32_t handshake_disables_offset = data->soft_regs_start
2342 + offsetof(SMU74_SoftRegisters, HandshakeDisables);
2343
2344 /* enable SCLK dpm */
2345 if (!data->sclk_dpm_key_disabled)
2346 PP_ASSERT_WITH_CODE(
2347 (0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DPM_Enable)),
2348 "Failed to enable SCLK DPM during DPM Start Function!",
2349 return -1);
2350
2351 /* enable MCLK dpm */
2352 if (0 == data->mclk_dpm_key_disabled) {
2353/* Disable UVD - SMU handshake for MCLK. */
2354 soft_register_value = cgs_read_ind_register(hwmgr->device,
2355 CGS_IND_REG__SMC, handshake_disables_offset);
2356 soft_register_value |= SMU7_UVD_MCLK_HANDSHAKE_DISABLE;
2357 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
2358 handshake_disables_offset, soft_register_value);
2359
2360 PP_ASSERT_WITH_CODE(
2361 (0 == smum_send_msg_to_smc(hwmgr->smumgr,
2362 PPSMC_MSG_MCLKDPM_Enable)),
2363 "Failed to enable MCLK DPM during DPM Start Function!",
2364 return -1);
2365
2366 PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
2367
2368 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5);
2369 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x5);
2370 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x100005);
2371 udelay(10);
2372 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400005);
2373 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400005);
2374 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x500005);
2375 }
2376
2377 return 0;
2378}
2379
2380static int polaris10_start_dpm(struct pp_hwmgr *hwmgr)
2381{
2382 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
2383
2384 /*enable general power management */
2385
2386 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
2387 GLOBAL_PWRMGT_EN, 1);
2388
2389 /* enable sclk deep sleep */
2390
2391 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
2392 DYNAMIC_PM_EN, 1);
2393
2394 /* prepare for PCIE DPM */
2395
2396 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
2397 data->soft_regs_start + offsetof(SMU74_SoftRegisters,
2398 VoltageChangeTimeout), 0x1000);
2399 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
2400 SWRST_COMMAND_1, RESETLC, 0x0);
2401/*
2402 PP_ASSERT_WITH_CODE(
2403 (0 == smum_send_msg_to_smc(hwmgr->smumgr,
2404 PPSMC_MSG_Voltage_Cntl_Enable)),
2405 "Failed to enable voltage DPM during DPM Start Function!",
2406 return -1);
2407*/
2408
2409 if (polaris10_enable_sclk_mclk_dpm(hwmgr)) {
2410 printk(KERN_ERR "Failed to enable Sclk DPM and Mclk DPM!");
2411 return -1;
2412 }
2413
2414 /* enable PCIE dpm */
2415 if (0 == data->pcie_dpm_key_disabled) {
2416 PP_ASSERT_WITH_CODE(
2417 (0 == smum_send_msg_to_smc(hwmgr->smumgr,
2418 PPSMC_MSG_PCIeDPM_Enable)),
2419 "Failed to enable pcie DPM during DPM Start Function!",
2420 return -1);
2421 }
2422
2423 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2424 PHM_PlatformCaps_Falcon_QuickTransition)) {
2425 PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr->smumgr,
2426 PPSMC_MSG_EnableACDCGPIOInterrupt)),
2427 "Failed to enable AC DC GPIO Interrupt!",
2428 );
2429 }
2430
2431 return 0;
2432}
2433
2434static int polaris10_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
2435{
2436 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
2437
2438 /* disable SCLK dpm */
2439 if (!data->sclk_dpm_key_disabled)
2440 PP_ASSERT_WITH_CODE(
2441 (smum_send_msg_to_smc(hwmgr->smumgr,
2442 PPSMC_MSG_DPM_Disable) == 0),
2443 "Failed to disable SCLK DPM!",
2444 return -1);
2445
2446 /* disable MCLK dpm */
2447 if (!data->mclk_dpm_key_disabled) {
2448 PP_ASSERT_WITH_CODE(
2449 (smum_send_msg_to_smc(hwmgr->smumgr,
2450 PPSMC_MSG_MCLKDPM_Disable) == 0),
2451 "Failed to disable MCLK DPM!",
2452 return -1);
2453 }
2454
2455 return 0;
2456}
2457
2458static int polaris10_stop_dpm(struct pp_hwmgr *hwmgr)
2459{
2460 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
2461
2462 /* disable general power management */
2463 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
2464 GLOBAL_PWRMGT_EN, 0);
2465 /* disable sclk deep sleep */
2466 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
2467 DYNAMIC_PM_EN, 0);
2468
2469 /* disable PCIE dpm */
2470 if (!data->pcie_dpm_key_disabled) {
2471 PP_ASSERT_WITH_CODE(
2472 (smum_send_msg_to_smc(hwmgr->smumgr,
2473 PPSMC_MSG_PCIeDPM_Disable) == 0),
2474 "Failed to disable pcie DPM during DPM Stop Function!",
2475 return -1);
2476 }
2477
2478 if (polaris10_disable_sclk_mclk_dpm(hwmgr)) {
2479 printk(KERN_ERR "Failed to disable Sclk DPM and Mclk DPM!");
2480 return -1;
2481 }
2482
2483 return 0;
2484}
2485
2486static void polaris10_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources)
2487{
2488 bool protection;
2489 enum DPM_EVENT_SRC src;
2490
2491 switch (sources) {
2492 default:
2493 printk(KERN_ERR "Unknown throttling event sources.");
2494 /* fall through */
2495 case 0:
2496 protection = false;
2497 /* src is unused */
2498 break;
2499 case (1 << PHM_AutoThrottleSource_Thermal):
2500 protection = true;
2501 src = DPM_EVENT_SRC_DIGITAL;
2502 break;
2503 case (1 << PHM_AutoThrottleSource_External):
2504 protection = true;
2505 src = DPM_EVENT_SRC_EXTERNAL;
2506 break;
2507 case (1 << PHM_AutoThrottleSource_External) |
2508 (1 << PHM_AutoThrottleSource_Thermal):
2509 protection = true;
2510 src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL;
2511 break;
2512 }
2513 /* Order matters - don't enable thermal protection for the wrong source. */
2514 if (protection) {
2515 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL,
2516 DPM_EVENT_SRC, src);
2517 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
2518 THERMAL_PROTECTION_DIS,
2519 !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2520 PHM_PlatformCaps_ThermalController));
2521 } else
2522 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
2523 THERMAL_PROTECTION_DIS, 1);
2524}
2525
2526static int polaris10_enable_auto_throttle_source(struct pp_hwmgr *hwmgr,
2527 PHM_AutoThrottleSource source)
2528{
2529 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
2530
2531 if (!(data->active_auto_throttle_sources & (1 << source))) {
2532 data->active_auto_throttle_sources |= 1 << source;
2533 polaris10_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
2534 }
2535 return 0;
2536}
2537
2538static int polaris10_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
2539{
2540 return polaris10_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
2541}
2542
2543static int polaris10_disable_auto_throttle_source(struct pp_hwmgr *hwmgr,
2544 PHM_AutoThrottleSource source)
2545{
2546 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
2547
2548 if (data->active_auto_throttle_sources & (1 << source)) {
2549 data->active_auto_throttle_sources &= ~(1 << source);
2550 polaris10_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
2551 }
2552 return 0;
2553}
2554
2555static int polaris10_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
2556{
2557 return polaris10_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
2558}
2559
2560static int polaris10_pcie_performance_request(struct pp_hwmgr *hwmgr)
2561{
2562 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
2563 data->pcie_performance_request = true;
2564
2565 return 0;
2566}
2567
2568static int polaris10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
2569{
2570 int tmp_result, result = 0;
2571 tmp_result = (!polaris10_is_dpm_running(hwmgr)) ? 0 : -1;
2572 PP_ASSERT_WITH_CODE(result == 0,
2573 "DPM is already running right now, no need to enable DPM!",
2574 return 0);
2575
2576 if (polaris10_voltage_control(hwmgr)) {
2577 tmp_result = polaris10_enable_voltage_control(hwmgr);
2578 PP_ASSERT_WITH_CODE(tmp_result == 0,
2579 "Failed to enable voltage control!",
2580 result = tmp_result);
2581
2582 tmp_result = polaris10_construct_voltage_tables(hwmgr);
2583 PP_ASSERT_WITH_CODE((0 == tmp_result),
2584 "Failed to contruct voltage tables!",
2585 result = tmp_result);
2586 }
2587
2588 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2589 PHM_PlatformCaps_EngineSpreadSpectrumSupport))
2590 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
2591 GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 1);
2592
2593 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2594 PHM_PlatformCaps_ThermalController))
2595 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
2596 GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 0);
2597
2598 tmp_result = polaris10_program_static_screen_threshold_parameters(hwmgr);
2599 PP_ASSERT_WITH_CODE((0 == tmp_result),
2600 "Failed to program static screen threshold parameters!",
2601 result = tmp_result);
2602
2603 tmp_result = polaris10_enable_display_gap(hwmgr);
2604 PP_ASSERT_WITH_CODE((0 == tmp_result),
2605 "Failed to enable display gap!", result = tmp_result);
2606
2607 tmp_result = polaris10_program_voting_clients(hwmgr);
2608 PP_ASSERT_WITH_CODE((0 == tmp_result),
2609 "Failed to program voting clients!", result = tmp_result);
2610
2611 tmp_result = polaris10_process_firmware_header(hwmgr);
2612 PP_ASSERT_WITH_CODE((0 == tmp_result),
2613 "Failed to process firmware header!", result = tmp_result);
2614
2615 tmp_result = polaris10_initial_switch_from_arbf0_to_f1(hwmgr);
2616 PP_ASSERT_WITH_CODE((0 == tmp_result),
2617 "Failed to initialize switch from ArbF0 to F1!",
2618 result = tmp_result);
2619
2620 tmp_result = polaris10_init_smc_table(hwmgr);
2621 PP_ASSERT_WITH_CODE((0 == tmp_result),
2622 "Failed to initialize SMC table!", result = tmp_result);
2623
2624 tmp_result = polaris10_init_arb_table_index(hwmgr);
2625 PP_ASSERT_WITH_CODE((0 == tmp_result),
2626 "Failed to initialize ARB table index!", result = tmp_result);
2627
2628 tmp_result = polaris10_populate_pm_fuses(hwmgr);
2629 PP_ASSERT_WITH_CODE((0 == tmp_result),
2630 "Failed to populate PM fuses!", result = tmp_result);
2631
2632 tmp_result = polaris10_enable_vrhot_gpio_interrupt(hwmgr);
2633 PP_ASSERT_WITH_CODE((0 == tmp_result),
2634 "Failed to enable VR hot GPIO interrupt!", result = tmp_result);
2635
2636 smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay);
2637
2638 tmp_result = polaris10_enable_sclk_control(hwmgr);
2639 PP_ASSERT_WITH_CODE((0 == tmp_result),
2640 "Failed to enable SCLK control!", result = tmp_result);
2641
2642 tmp_result = polaris10_enable_smc_voltage_controller(hwmgr);
2643 PP_ASSERT_WITH_CODE((0 == tmp_result),
2644 "Failed to enable voltage control!", result = tmp_result);
2645
2646 tmp_result = polaris10_enable_ulv(hwmgr);
2647 PP_ASSERT_WITH_CODE((0 == tmp_result),
2648 "Failed to enable ULV!", result = tmp_result);
2649
2650 tmp_result = polaris10_enable_deep_sleep_master_switch(hwmgr);
2651 PP_ASSERT_WITH_CODE((0 == tmp_result),
2652 "Failed to enable deep sleep master switch!", result = tmp_result);
2653
2654 tmp_result = polaris10_enable_didt_config(hwmgr);
2655 PP_ASSERT_WITH_CODE((tmp_result == 0),
2656 "Failed to enable deep sleep master switch!", result = tmp_result);
2657
2658 tmp_result = polaris10_start_dpm(hwmgr);
2659 PP_ASSERT_WITH_CODE((0 == tmp_result),
2660 "Failed to start DPM!", result = tmp_result);
2661
2662 tmp_result = polaris10_enable_smc_cac(hwmgr);
2663 PP_ASSERT_WITH_CODE((0 == tmp_result),
2664 "Failed to enable SMC CAC!", result = tmp_result);
2665
2666 tmp_result = polaris10_enable_power_containment(hwmgr);
2667 PP_ASSERT_WITH_CODE((0 == tmp_result),
2668 "Failed to enable power containment!", result = tmp_result);
2669
2670 tmp_result = polaris10_power_control_set_level(hwmgr);
2671 PP_ASSERT_WITH_CODE((0 == tmp_result),
2672 "Failed to power control set level!", result = tmp_result);
2673
2674 tmp_result = polaris10_enable_thermal_auto_throttle(hwmgr);
2675 PP_ASSERT_WITH_CODE((0 == tmp_result),
2676 "Failed to enable thermal auto throttle!", result = tmp_result);
2677
2678 tmp_result = polaris10_pcie_performance_request(hwmgr);
2679 PP_ASSERT_WITH_CODE((0 == tmp_result),
2680 "pcie performance request failed!", result = tmp_result);
2681
2682 return result;
2683}
2684
2685int polaris10_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
2686{
2687 int tmp_result, result = 0;
2688
2689 tmp_result = (polaris10_is_dpm_running(hwmgr)) ? 0 : -1;
2690 PP_ASSERT_WITH_CODE(tmp_result == 0,
2691 "DPM is not running right now, no need to disable DPM!",
2692 return 0);
2693
2694 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2695 PHM_PlatformCaps_ThermalController))
2696 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
2697 GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1);
2698
2699 tmp_result = polaris10_disable_power_containment(hwmgr);
2700 PP_ASSERT_WITH_CODE((tmp_result == 0),
2701 "Failed to disable power containment!", result = tmp_result);
2702
2703 tmp_result = polaris10_disable_smc_cac(hwmgr);
2704 PP_ASSERT_WITH_CODE((tmp_result == 0),
2705 "Failed to disable SMC CAC!", result = tmp_result);
2706
2707 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
2708 CG_SPLL_SPREAD_SPECTRUM, SSEN, 0);
2709 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
2710 GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0);
2711
2712 tmp_result = polaris10_disable_thermal_auto_throttle(hwmgr);
2713 PP_ASSERT_WITH_CODE((tmp_result == 0),
2714 "Failed to disable thermal auto throttle!", result = tmp_result);
2715
2716 tmp_result = polaris10_stop_dpm(hwmgr);
2717 PP_ASSERT_WITH_CODE((tmp_result == 0),
2718 "Failed to stop DPM!", result = tmp_result);
2719
2720 tmp_result = polaris10_disable_deep_sleep_master_switch(hwmgr);
2721 PP_ASSERT_WITH_CODE((tmp_result == 0),
2722 "Failed to disable deep sleep master switch!", result = tmp_result);
2723
2724 tmp_result = polaris10_disable_ulv(hwmgr);
2725 PP_ASSERT_WITH_CODE((tmp_result == 0),
2726 "Failed to disable ULV!", result = tmp_result);
2727
2728 tmp_result = polaris10_clear_voting_clients(hwmgr);
2729 PP_ASSERT_WITH_CODE((tmp_result == 0),
2730 "Failed to clear voting clients!", result = tmp_result);
2731
2732 tmp_result = polaris10_reset_to_default(hwmgr);
2733 PP_ASSERT_WITH_CODE((tmp_result == 0),
2734 "Failed to reset to default!", result = tmp_result);
2735
2736 tmp_result = polaris10_force_switch_to_arbf0(hwmgr);
2737 PP_ASSERT_WITH_CODE((tmp_result == 0),
2738 "Failed to force to switch arbf0!", result = tmp_result);
2739
2740 return result;
2741}
2742
2743int polaris10_reset_asic_tasks(struct pp_hwmgr *hwmgr)
2744{
2745
2746 return 0;
2747}
2748
2749static int polaris10_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
2750{
2751 return phm_hwmgr_backend_fini(hwmgr);
2752}
2753
2754static int polaris10_set_features_platform_caps(struct pp_hwmgr *hwmgr)
2755{
2756 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
2757
2758 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2759 PHM_PlatformCaps_DynamicPatchPowerState);
2760
2761 if (data->mvdd_control == POLARIS10_VOLTAGE_CONTROL_NONE)
2762 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2763 PHM_PlatformCaps_EnableMVDDControl);
2764
2765 if (data->vddci_control == POLARIS10_VOLTAGE_CONTROL_NONE)
2766 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2767 PHM_PlatformCaps_ControlVDDCI);
2768
2769 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2770 PHM_PlatformCaps_TablelessHardwareInterface);
2771
2772 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2773 PHM_PlatformCaps_EnableSMU7ThermalManagement);
2774
2775 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2776 PHM_PlatformCaps_DynamicPowerManagement);
2777
2778 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2779 PHM_PlatformCaps_UnTabledHardwareInterface);
2780
2781 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2782 PHM_PlatformCaps_TablelessHardwareInterface);
2783
2784 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2785 PHM_PlatformCaps_SMC);
2786
2787 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2788 PHM_PlatformCaps_NonABMSupportInPPLib);
2789
2790 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2791 PHM_PlatformCaps_DynamicUVDState);
2792
2793 /* power tune caps Assume disabled */
2794 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2795 PHM_PlatformCaps_SQRamping);
2796 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2797 PHM_PlatformCaps_DBRamping);
2798 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2799 PHM_PlatformCaps_TDRamping);
2800 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2801 PHM_PlatformCaps_TCPRamping);
2802
2803 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2804 PHM_PlatformCaps_CAC);
2805
2806 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2807 PHM_PlatformCaps_RegulatorHot);
2808
2809 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2810 PHM_PlatformCaps_AutomaticDCTransition);
2811
2812 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2813 PHM_PlatformCaps_ODFuzzyFanControlSupport);
2814
2815 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2816 PHM_PlatformCaps_FanSpeedInTableIsRPM);
2817
2818 if (hwmgr->chip_id == CHIP_POLARIS11)
2819 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2820 PHM_PlatformCaps_SPLLShutdownSupport);
2821 return 0;
2822}
2823
2824static void polaris10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
2825{
2826 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
2827
2828 polaris10_initialize_power_tune_defaults(hwmgr);
2829
2830 data->pcie_gen_performance.max = PP_PCIEGen1;
2831 data->pcie_gen_performance.min = PP_PCIEGen3;
2832 data->pcie_gen_power_saving.max = PP_PCIEGen1;
2833 data->pcie_gen_power_saving.min = PP_PCIEGen3;
2834 data->pcie_lane_performance.max = 0;
2835 data->pcie_lane_performance.min = 16;
2836 data->pcie_lane_power_saving.max = 0;
2837 data->pcie_lane_power_saving.min = 16;
2838}
2839
2840/**
2841* Get Leakage VDDC based on leakage ID.
2842*
2843* @param hwmgr the address of the powerplay hardware manager.
2844* @return always 0
2845*/
2846static int polaris10_get_evv_voltages(struct pp_hwmgr *hwmgr)
2847{
2848 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
2849 uint16_t vv_id;
2850 uint32_t vddc = 0;
2851 uint16_t i, j;
2852 uint32_t sclk = 0;
2853 struct phm_ppt_v1_information *table_info =
2854 (struct phm_ppt_v1_information *)hwmgr->pptable;
2855 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
2856 table_info->vdd_dep_on_sclk;
2857 int result;
2858
2859 for (i = 0; i < POLARIS10_MAX_LEAKAGE_COUNT; i++) {
2860 vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
2861 if (!phm_get_sclk_for_voltage_evv(hwmgr,
2862 table_info->vddc_lookup_table, vv_id, &sclk)) {
2863 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2864 PHM_PlatformCaps_ClockStretcher)) {
2865 for (j = 1; j < sclk_table->count; j++) {
2866 if (sclk_table->entries[j].clk == sclk &&
2867 sclk_table->entries[j].cks_enable == 0) {
2868 sclk += 5000;
2869 break;
2870 }
2871 }
2872 }
2873
2874 if (atomctrl_get_voltage_evv_on_sclk_ai(hwmgr,
2875 VOLTAGE_TYPE_VDDC,
2876 sclk, vv_id, &vddc) != 0) {
2877 printk(KERN_WARNING "failed to retrieving EVV voltage!\n");
2878 continue;
2879 }
2880
2881 /* need to make sure vddc is less than 2V or else, it could burn the ASIC.
2882 * real voltage level in unit of 0.01mV */
2883 PP_ASSERT_WITH_CODE((vddc < 200000 && vddc != 0),
2884 "Invalid VDDC value", result = -EINVAL;);
2885
2886 /* the voltage should not be zero nor equal to leakage ID */
2887 if (vddc != 0 && vddc != vv_id) {
2888 data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = (uint16_t)(vddc/100);
2889 data->vddc_leakage.leakage_id[data->vddc_leakage.count] = vv_id;
2890 data->vddc_leakage.count++;
2891 }
2892 }
2893 }
2894
2895 return 0;
2896}
2897
2898/**
2899 * Change virtual leakage voltage to actual value.
2900 *
2901 * @param hwmgr the address of the powerplay hardware manager.
2902 * @param pointer to changing voltage
2903 * @param pointer to leakage table
2904 */
2905static void polaris10_patch_with_vdd_leakage(struct pp_hwmgr *hwmgr,
2906 uint16_t *voltage, struct polaris10_leakage_voltage *leakage_table)
2907{
2908 uint32_t index;
2909
2910 /* search for leakage voltage ID 0xff01 ~ 0xff08 */
2911 for (index = 0; index < leakage_table->count; index++) {
2912 /* if this voltage matches a leakage voltage ID */
2913 /* patch with actual leakage voltage */
2914 if (leakage_table->leakage_id[index] == *voltage) {
2915 *voltage = leakage_table->actual_voltage[index];
2916 break;
2917 }
2918 }
2919
2920 if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
2921 printk(KERN_ERR "Voltage value looks like a Leakage ID but it's not patched \n");
2922}
2923
2924/**
2925* Patch voltage lookup table by EVV leakages.
2926*
2927* @param hwmgr the address of the powerplay hardware manager.
2928* @param pointer to voltage lookup table
2929* @param pointer to leakage table
2930* @return always 0
2931*/
2932static int polaris10_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
2933 phm_ppt_v1_voltage_lookup_table *lookup_table,
2934 struct polaris10_leakage_voltage *leakage_table)
2935{
2936 uint32_t i;
2937
2938 for (i = 0; i < lookup_table->count; i++)
2939 polaris10_patch_with_vdd_leakage(hwmgr,
2940 &lookup_table->entries[i].us_vdd, leakage_table);
2941
2942 return 0;
2943}
2944
2945static int polaris10_patch_clock_voltage_limits_with_vddc_leakage(
2946 struct pp_hwmgr *hwmgr, struct polaris10_leakage_voltage *leakage_table,
2947 uint16_t *vddc)
2948{
2949 struct phm_ppt_v1_information *table_info =
2950 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2951 polaris10_patch_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table);
2952 hwmgr->dyn_state.max_clock_voltage_on_dc.vddc =
2953 table_info->max_clock_voltage_on_dc.vddc;
2954 return 0;
2955}
2956
2957static int polaris10_patch_voltage_dependency_tables_with_lookup_table(
2958 struct pp_hwmgr *hwmgr)
2959{
2960 uint8_t entryId;
2961 uint8_t voltageId;
2962 struct phm_ppt_v1_information *table_info =
2963 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2964
2965 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
2966 table_info->vdd_dep_on_sclk;
2967 struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table =
2968 table_info->vdd_dep_on_mclk;
2969 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
2970 table_info->mm_dep_table;
2971
2972 for (entryId = 0; entryId < sclk_table->count; ++entryId) {
2973 voltageId = sclk_table->entries[entryId].vddInd;
2974 sclk_table->entries[entryId].vddc =
2975 table_info->vddc_lookup_table->entries[voltageId].us_vdd;
2976 }
2977
2978 for (entryId = 0; entryId < mclk_table->count; ++entryId) {
2979 voltageId = mclk_table->entries[entryId].vddInd;
2980 mclk_table->entries[entryId].vddc =
2981 table_info->vddc_lookup_table->entries[voltageId].us_vdd;
2982 }
2983
2984 for (entryId = 0; entryId < mm_table->count; ++entryId) {
2985 voltageId = mm_table->entries[entryId].vddcInd;
2986 mm_table->entries[entryId].vddc =
2987 table_info->vddc_lookup_table->entries[voltageId].us_vdd;
2988 }
2989
2990 return 0;
2991
2992}
2993
2994static int polaris10_calc_voltage_dependency_tables(struct pp_hwmgr *hwmgr)
2995{
2996 /* Need to determine if we need calculated voltage. */
2997 return 0;
2998}
2999
3000static int polaris10_calc_mm_voltage_dependency_table(struct pp_hwmgr *hwmgr)
3001{
3002 /* Need to determine if we need calculated voltage from mm table. */
3003 return 0;
3004}
3005
3006static int polaris10_sort_lookup_table(struct pp_hwmgr *hwmgr,
3007 struct phm_ppt_v1_voltage_lookup_table *lookup_table)
3008{
3009 uint32_t table_size, i, j;
3010 struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record;
3011 table_size = lookup_table->count;
3012
3013 PP_ASSERT_WITH_CODE(0 != lookup_table->count,
3014 "Lookup table is empty", return -EINVAL);
3015
3016 /* Sorting voltages */
3017 for (i = 0; i < table_size - 1; i++) {
3018 for (j = i + 1; j > 0; j--) {
3019 if (lookup_table->entries[j].us_vdd <
3020 lookup_table->entries[j - 1].us_vdd) {
3021 tmp_voltage_lookup_record = lookup_table->entries[j - 1];
3022 lookup_table->entries[j - 1] = lookup_table->entries[j];
3023 lookup_table->entries[j] = tmp_voltage_lookup_record;
3024 }
3025 }
3026 }
3027
3028 return 0;
3029}
3030
3031static int polaris10_complete_dependency_tables(struct pp_hwmgr *hwmgr)
3032{
3033 int result = 0;
3034 int tmp_result;
3035 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
3036 struct phm_ppt_v1_information *table_info =
3037 (struct phm_ppt_v1_information *)(hwmgr->pptable);
3038
3039 tmp_result = polaris10_patch_lookup_table_with_leakage(hwmgr,
3040 table_info->vddc_lookup_table, &(data->vddc_leakage));
3041 if (tmp_result)
3042 result = tmp_result;
3043
3044 tmp_result = polaris10_patch_clock_voltage_limits_with_vddc_leakage(hwmgr,
3045 &(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc);
3046 if (tmp_result)
3047 result = tmp_result;
3048
3049 tmp_result = polaris10_patch_voltage_dependency_tables_with_lookup_table(hwmgr);
3050 if (tmp_result)
3051 result = tmp_result;
3052
3053 tmp_result = polaris10_calc_voltage_dependency_tables(hwmgr);
3054 if (tmp_result)
3055 result = tmp_result;
3056
3057 tmp_result = polaris10_calc_mm_voltage_dependency_table(hwmgr);
3058 if (tmp_result)
3059 result = tmp_result;
3060
3061 tmp_result = polaris10_sort_lookup_table(hwmgr, table_info->vddc_lookup_table);
3062 if (tmp_result)
3063 result = tmp_result;
3064
3065 return result;
3066}
3067
3068static int polaris10_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
3069{
3070 struct phm_ppt_v1_information *table_info =
3071 (struct phm_ppt_v1_information *)(hwmgr->pptable);
3072
3073 struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
3074 table_info->vdd_dep_on_sclk;
3075 struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table =
3076 table_info->vdd_dep_on_mclk;
3077
3078 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL,
3079 "VDD dependency on SCLK table is missing. \
3080 This table is mandatory", return -EINVAL);
3081 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
3082 "VDD dependency on SCLK table has to have is missing. \
3083 This table is mandatory", return -EINVAL);
3084
3085 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL,
3086 "VDD dependency on MCLK table is missing. \
3087 This table is mandatory", return -EINVAL);
3088 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
3089 "VDD dependency on MCLK table has to have is missing. \
3090 This table is mandatory", return -EINVAL);
3091
3092 table_info->max_clock_voltage_on_ac.sclk =
3093 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk;
3094 table_info->max_clock_voltage_on_ac.mclk =
3095 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk;
3096 table_info->max_clock_voltage_on_ac.vddc =
3097 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
3098 table_info->max_clock_voltage_on_ac.vddci =
3099 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci;
3100
3101 hwmgr->dyn_state.max_clock_voltage_on_ac.sclk = table_info->max_clock_voltage_on_ac.sclk;
3102 hwmgr->dyn_state.max_clock_voltage_on_ac.mclk = table_info->max_clock_voltage_on_ac.mclk;
3103 hwmgr->dyn_state.max_clock_voltage_on_ac.vddc = table_info->max_clock_voltage_on_ac.vddc;
3104 hwmgr->dyn_state.max_clock_voltage_on_ac.vddci =table_info->max_clock_voltage_on_ac.vddci;
3105
3106 return 0;
3107}
3108
3109static int polaris10_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
3110{
3111 struct phm_ppt_v1_information *table_info =
3112 (struct phm_ppt_v1_information *)(hwmgr->pptable);
3113 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
3114 table_info->vdd_dep_on_mclk;
3115 struct phm_ppt_v1_voltage_lookup_table *lookup_table =
3116 table_info->vddc_lookup_table;
3117 uint32_t i;
3118 uint32_t hw_revision, sub_vendor_id, sub_sys_id;
3119 struct cgs_system_info sys_info = {0};
3120
3121 sys_info.size = sizeof(struct cgs_system_info);
3122
3123 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_REV;
3124 cgs_query_system_info(hwmgr->device, &sys_info);
3125 hw_revision = (uint32_t)sys_info.value;
3126
3127 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_SUB_SYS_ID;
3128 cgs_query_system_info(hwmgr->device, &sys_info);
3129 sub_sys_id = (uint32_t)sys_info.value;
3130
3131 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID;
3132 cgs_query_system_info(hwmgr->device, &sys_info);
3133 sub_vendor_id = (uint32_t)sys_info.value;
3134
3135 if (hwmgr->chip_id == CHIP_POLARIS10 && hw_revision == 0xC7 &&
3136 ((sub_sys_id == 0xb37 && sub_vendor_id == 0x1002) ||
3137 (sub_sys_id == 0x4a8 && sub_vendor_id == 0x1043) ||
3138 (sub_sys_id == 0x9480 && sub_vendor_id == 0x1682))) {
3139 if (lookup_table->entries[dep_mclk_table->entries[dep_mclk_table->count-1].vddInd].us_vdd >= 1000)
3140 return 0;
3141
3142 for (i = 0; i < lookup_table->count; i++) {
3143 if (lookup_table->entries[i].us_vdd < 0xff01 && lookup_table->entries[i].us_vdd >= 1000) {
3144 dep_mclk_table->entries[dep_mclk_table->count-1].vddInd = (uint8_t) i;
3145 return 0;
3146 }
3147 }
3148 }
3149 return 0;
3150}
3151
3152
3153static int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
3154{
3155 struct polaris10_hwmgr *data;
3156 struct pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
3157 uint32_t temp_reg;
3158 int result;
3159 struct phm_ppt_v1_information *table_info =
3160 (struct phm_ppt_v1_information *)(hwmgr->pptable);
3161
3162 data = kzalloc(sizeof(struct polaris10_hwmgr), GFP_KERNEL);
3163 if (data == NULL)
3164 return -ENOMEM;
3165
3166 hwmgr->backend = data;
3167
3168 data->dll_default_on = false;
3169 data->sram_end = SMC_RAM_END;
3170 data->mclk_dpm0_activity_target = 0xa;
3171 data->disable_dpm_mask = 0xFF;
3172 data->static_screen_threshold = PPPOLARIS10_STATICSCREENTHRESHOLD_DFLT;
3173 data->static_screen_threshold_unit = PPPOLARIS10_STATICSCREENTHRESHOLD_DFLT;
3174 data->activity_target[0] = PPPOLARIS10_TARGETACTIVITY_DFLT;
3175 data->activity_target[1] = PPPOLARIS10_TARGETACTIVITY_DFLT;
3176 data->activity_target[2] = PPPOLARIS10_TARGETACTIVITY_DFLT;
3177 data->activity_target[3] = PPPOLARIS10_TARGETACTIVITY_DFLT;
3178 data->activity_target[4] = PPPOLARIS10_TARGETACTIVITY_DFLT;
3179 data->activity_target[5] = PPPOLARIS10_TARGETACTIVITY_DFLT;
3180 data->activity_target[6] = PPPOLARIS10_TARGETACTIVITY_DFLT;
3181 data->activity_target[7] = PPPOLARIS10_TARGETACTIVITY_DFLT;
3182
3183 data->voting_rights_clients0 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT0;
3184 data->voting_rights_clients1 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT1;
3185 data->voting_rights_clients2 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT2;
3186 data->voting_rights_clients3 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT3;
3187 data->voting_rights_clients4 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT4;
3188 data->voting_rights_clients5 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT5;
3189 data->voting_rights_clients6 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT6;
3190 data->voting_rights_clients7 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT7;
3191
3192 data->vddc_vddci_delta = VDDC_VDDCI_DELTA;
3193
3194 data->mclk_activity_target = PPPOLARIS10_MCLK_TARGETACTIVITY_DFLT;
3195
3196 /* need to set voltage control types before EVV patching */
3197 data->voltage_control = POLARIS10_VOLTAGE_CONTROL_NONE;
3198 data->vddci_control = POLARIS10_VOLTAGE_CONTROL_NONE;
3199 data->mvdd_control = POLARIS10_VOLTAGE_CONTROL_NONE;
3200
3201 data->enable_tdc_limit_feature = true;
3202 data->enable_pkg_pwr_tracking_feature = true;
3203 data->force_pcie_gen = PP_PCIEGenInvalid;
3204 data->mclk_stutter_mode_threshold = 40000;
3205
3206 if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
3207 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
3208 data->voltage_control = POLARIS10_VOLTAGE_CONTROL_BY_SVID2;
3209
3210 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3211 PHM_PlatformCaps_EnableMVDDControl)) {
3212 if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
3213 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
3214 data->mvdd_control = POLARIS10_VOLTAGE_CONTROL_BY_GPIO;
3215 else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
3216 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
3217 data->mvdd_control = POLARIS10_VOLTAGE_CONTROL_BY_SVID2;
3218 }
3219
3220 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3221 PHM_PlatformCaps_ControlVDDCI)) {
3222 if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
3223 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
3224 data->vddci_control = POLARIS10_VOLTAGE_CONTROL_BY_GPIO;
3225 else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
3226 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
3227 data->vddci_control = POLARIS10_VOLTAGE_CONTROL_BY_SVID2;
3228 }
3229
3230 if (table_info->cac_dtp_table->usClockStretchAmount != 0)
3231 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3232 PHM_PlatformCaps_ClockStretcher);
3233
3234 polaris10_set_features_platform_caps(hwmgr);
3235
3236 polaris10_patch_voltage_workaround(hwmgr);
3237 polaris10_init_dpm_defaults(hwmgr);
3238
3239 /* Get leakage voltage based on leakage ID. */
3240 result = polaris10_get_evv_voltages(hwmgr);
3241
3242 if (result) {
3243 printk("Get EVV Voltage Failed. Abort Driver loading!\n");
3244 return -1;
3245 }
3246
3247 polaris10_complete_dependency_tables(hwmgr);
3248 polaris10_set_private_data_based_on_pptable(hwmgr);
3249
3250 /* Initalize Dynamic State Adjustment Rule Settings */
3251 result = phm_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
3252
3253 if (0 == result) {
3254 struct cgs_system_info sys_info = {0};
3255
3256 data->is_tlu_enabled = false;
3257
3258 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
3259 POLARIS10_MAX_HARDWARE_POWERLEVELS;
3260 hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
3261 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
3262
3263
3264 if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) {
3265 temp_reg = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL);
3266 switch (gpio_pin_assignment.uc_gpio_pin_bit_shift) {
3267 case 0:
3268 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x1);
3269 break;
3270 case 1:
3271 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x2);
3272 break;
3273 case 2:
3274 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW, 0x1);
3275 break;
3276 case 3:
3277 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, FORCE_NB_PS1, 0x1);
3278 break;
3279 case 4:
3280 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1);
3281 break;
3282 default:
3283 PP_ASSERT_WITH_CODE(0,
3284 "Failed to setup PCC HW register! Wrong GPIO assigned for VDDC_PCC_GPIO_PINID!",
3285 );
3286 break;
3287 }
3288 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL, temp_reg);
3289 }
3290
3291 if (table_info->cac_dtp_table->usDefaultTargetOperatingTemp != 0 &&
3292 hwmgr->thermal_controller.advanceFanControlParameters.ucFanControlMode) {
3293 hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMinLimit =
3294 (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit;
3295
3296 hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMaxLimit =
3297 (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
3298
3299 hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMStep = 1;
3300
3301 hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMaxLimit = 100;
3302
3303 hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMinLimit =
3304 (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit;
3305
3306 hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMStep = 1;
3307
3308 table_info->cac_dtp_table->usDefaultTargetOperatingTemp = (table_info->cac_dtp_table->usDefaultTargetOperatingTemp >= 50) ?
3309 (table_info->cac_dtp_table->usDefaultTargetOperatingTemp -50) : 0;
3310
3311 table_info->cac_dtp_table->usOperatingTempMaxLimit = table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
3312 table_info->cac_dtp_table->usOperatingTempStep = 1;
3313 table_info->cac_dtp_table->usOperatingTempHyst = 1;
3314
3315 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM =
3316 hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
3317
3318 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM =
3319 hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM;
3320
3321 hwmgr->dyn_state.cac_dtp_table->usOperatingTempMinLimit =
3322 table_info->cac_dtp_table->usOperatingTempMinLimit;
3323
3324 hwmgr->dyn_state.cac_dtp_table->usOperatingTempMaxLimit =
3325 table_info->cac_dtp_table->usOperatingTempMaxLimit;
3326
3327 hwmgr->dyn_state.cac_dtp_table->usDefaultTargetOperatingTemp =
3328 table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
3329
3330 hwmgr->dyn_state.cac_dtp_table->usOperatingTempStep =
3331 table_info->cac_dtp_table->usOperatingTempStep;
3332
3333 hwmgr->dyn_state.cac_dtp_table->usTargetOperatingTemp =
3334 table_info->cac_dtp_table->usTargetOperatingTemp;
3335 }
3336
3337 sys_info.size = sizeof(struct cgs_system_info);
3338 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO;
3339 result = cgs_query_system_info(hwmgr->device, &sys_info);
3340 if (result)
3341 data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK;
3342 else
3343 data->pcie_gen_cap = (uint32_t)sys_info.value;
3344 if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
3345 data->pcie_spc_cap = 20;
3346 sys_info.size = sizeof(struct cgs_system_info);
3347 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW;
3348 result = cgs_query_system_info(hwmgr->device, &sys_info);
3349 if (result)
3350 data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK;
3351 else
3352 data->pcie_lane_cap = (uint32_t)sys_info.value;
3353
3354 hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
3355/* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
3356 hwmgr->platform_descriptor.clockStep.engineClock = 500;
3357 hwmgr->platform_descriptor.clockStep.memoryClock = 500;
3358 } else {
3359 /* Ignore return value in here, we are cleaning up a mess. */
3360 polaris10_hwmgr_backend_fini(hwmgr);
3361 }
3362
3363 return 0;
3364}
3365
3366static int polaris10_force_dpm_highest(struct pp_hwmgr *hwmgr)
3367{
3368 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
3369 uint32_t level, tmp;
3370
3371 if (!data->pcie_dpm_key_disabled) {
3372 if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3373 level = 0;
3374 tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask;
3375 while (tmp >>= 1)
3376 level++;
3377
3378 if (level)
3379 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3380 PPSMC_MSG_PCIeDPM_ForceLevel, level);
3381 }
3382 }
3383
3384 if (!data->sclk_dpm_key_disabled) {
3385 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3386 level = 0;
3387 tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask;
3388 while (tmp >>= 1)
3389 level++;
3390
3391 if (level)
3392 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3393 PPSMC_MSG_SCLKDPM_SetEnabledMask,
3394 (1 << level));
3395 }
3396 }
3397
3398 if (!data->mclk_dpm_key_disabled) {
3399 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3400 level = 0;
3401 tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask;
3402 while (tmp >>= 1)
3403 level++;
3404
3405 if (level)
3406 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3407 PPSMC_MSG_MCLKDPM_SetEnabledMask,
3408 (1 << level));
3409 }
3410 }
3411
3412 return 0;
3413}
3414
3415static int polaris10_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
3416{
3417 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
3418
3419 phm_apply_dal_min_voltage_request(hwmgr);
3420
3421 if (!data->sclk_dpm_key_disabled) {
3422 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask)
3423 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3424 PPSMC_MSG_SCLKDPM_SetEnabledMask,
3425 data->dpm_level_enable_mask.sclk_dpm_enable_mask);
3426 }
3427
3428 if (!data->mclk_dpm_key_disabled) {
3429 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask)
3430 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3431 PPSMC_MSG_MCLKDPM_SetEnabledMask,
3432 data->dpm_level_enable_mask.mclk_dpm_enable_mask);
3433 }
3434
3435 return 0;
3436}
3437
3438static int polaris10_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
3439{
3440 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
3441
3442 if (!polaris10_is_dpm_running(hwmgr))
3443 return -EINVAL;
3444
3445 if (!data->pcie_dpm_key_disabled) {
3446 smum_send_msg_to_smc(hwmgr->smumgr,
3447 PPSMC_MSG_PCIeDPM_UnForceLevel);
3448 }
3449
3450 return polaris10_upload_dpm_level_enable_mask(hwmgr);
3451}
3452
3453static int polaris10_force_dpm_lowest(struct pp_hwmgr *hwmgr)
3454{
3455 struct polaris10_hwmgr *data =
3456 (struct polaris10_hwmgr *)(hwmgr->backend);
3457 uint32_t level;
3458
3459 if (!data->sclk_dpm_key_disabled)
3460 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3461 level = phm_get_lowest_enabled_level(hwmgr,
3462 data->dpm_level_enable_mask.sclk_dpm_enable_mask);
3463 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3464 PPSMC_MSG_SCLKDPM_SetEnabledMask,
3465 (1 << level));
3466
3467 }
3468
3469 if (!data->mclk_dpm_key_disabled) {
3470 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3471 level = phm_get_lowest_enabled_level(hwmgr,
3472 data->dpm_level_enable_mask.mclk_dpm_enable_mask);
3473 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3474 PPSMC_MSG_MCLKDPM_SetEnabledMask,
3475 (1 << level));
3476 }
3477 }
3478
3479 if (!data->pcie_dpm_key_disabled) {
3480 if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3481 level = phm_get_lowest_enabled_level(hwmgr,
3482 data->dpm_level_enable_mask.pcie_dpm_enable_mask);
3483 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3484 PPSMC_MSG_PCIeDPM_ForceLevel,
3485 (level));
3486 }
3487 }
3488
3489 return 0;
3490
3491}
3492static int polaris10_force_dpm_level(struct pp_hwmgr *hwmgr,
3493 enum amd_dpm_forced_level level)
3494{
3495 int ret = 0;
3496
3497 switch (level) {
3498 case AMD_DPM_FORCED_LEVEL_HIGH:
3499 ret = polaris10_force_dpm_highest(hwmgr);
3500 if (ret)
3501 return ret;
3502 break;
3503 case AMD_DPM_FORCED_LEVEL_LOW:
3504 ret = polaris10_force_dpm_lowest(hwmgr);
3505 if (ret)
3506 return ret;
3507 break;
3508 case AMD_DPM_FORCED_LEVEL_AUTO:
3509 ret = polaris10_unforce_dpm_levels(hwmgr);
3510 if (ret)
3511 return ret;
3512 break;
3513 default:
3514 break;
3515 }
3516
3517 hwmgr->dpm_level = level;
3518
3519 return ret;
3520}
3521
3522static int polaris10_get_power_state_size(struct pp_hwmgr *hwmgr)
3523{
3524 return sizeof(struct polaris10_power_state);
3525}
3526
3527
3528static int polaris10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
3529 struct pp_power_state *request_ps,
3530 const struct pp_power_state *current_ps)
3531{
3532
3533 struct polaris10_power_state *polaris10_ps =
3534 cast_phw_polaris10_power_state(&request_ps->hardware);
3535 uint32_t sclk;
3536 uint32_t mclk;
3537 struct PP_Clocks minimum_clocks = {0};
3538 bool disable_mclk_switching;
3539 bool disable_mclk_switching_for_frame_lock;
3540 struct cgs_display_info info = {0};
3541 const struct phm_clock_and_voltage_limits *max_limits;
3542 uint32_t i;
3543 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
3544 struct phm_ppt_v1_information *table_info =
3545 (struct phm_ppt_v1_information *)(hwmgr->pptable);
3546 int32_t count;
3547 int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
3548
3549 data->battery_state = (PP_StateUILabel_Battery ==
3550 request_ps->classification.ui_label);
3551
3552 PP_ASSERT_WITH_CODE(polaris10_ps->performance_level_count == 2,
3553 "VI should always have 2 performance levels",
3554 );
3555
3556 max_limits = (PP_PowerSource_AC == hwmgr->power_source) ?
3557 &(hwmgr->dyn_state.max_clock_voltage_on_ac) :
3558 &(hwmgr->dyn_state.max_clock_voltage_on_dc);
3559
3560 /* Cap clock DPM tables at DC MAX if it is in DC. */
3561 if (PP_PowerSource_DC == hwmgr->power_source) {
3562 for (i = 0; i < polaris10_ps->performance_level_count; i++) {
3563 if (polaris10_ps->performance_levels[i].memory_clock > max_limits->mclk)
3564 polaris10_ps->performance_levels[i].memory_clock = max_limits->mclk;
3565 if (polaris10_ps->performance_levels[i].engine_clock > max_limits->sclk)
3566 polaris10_ps->performance_levels[i].engine_clock = max_limits->sclk;
3567 }
3568 }
3569
3570 polaris10_ps->vce_clks.evclk = hwmgr->vce_arbiter.evclk;
3571 polaris10_ps->vce_clks.ecclk = hwmgr->vce_arbiter.ecclk;
3572
3573 cgs_get_active_displays_info(hwmgr->device, &info);
3574
3575 /*TO DO result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
3576
3577 /* TO DO GetMinClockSettings(hwmgr->pPECI, &minimum_clocks); */
3578
3579 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3580 PHM_PlatformCaps_StablePState)) {
3581 max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
3582 stable_pstate_sclk = (max_limits->sclk * 75) / 100;
3583
3584 for (count = table_info->vdd_dep_on_sclk->count - 1;
3585 count >= 0; count--) {
3586 if (stable_pstate_sclk >=
3587 table_info->vdd_dep_on_sclk->entries[count].clk) {
3588 stable_pstate_sclk =
3589 table_info->vdd_dep_on_sclk->entries[count].clk;
3590 break;
3591 }
3592 }
3593
3594 if (count < 0)
3595 stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
3596
3597 stable_pstate_mclk = max_limits->mclk;
3598
3599 minimum_clocks.engineClock = stable_pstate_sclk;
3600 minimum_clocks.memoryClock = stable_pstate_mclk;
3601 }
3602
3603 if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk)
3604 minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk;
3605
3606 if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk)
3607 minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk;
3608
3609 polaris10_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold;
3610
3611 if (0 != hwmgr->gfx_arbiter.sclk_over_drive) {
3612 PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <=
3613 hwmgr->platform_descriptor.overdriveLimit.engineClock),
3614 "Overdrive sclk exceeds limit",
3615 hwmgr->gfx_arbiter.sclk_over_drive =
3616 hwmgr->platform_descriptor.overdriveLimit.engineClock);
3617
3618 if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk)
3619 polaris10_ps->performance_levels[1].engine_clock =
3620 hwmgr->gfx_arbiter.sclk_over_drive;
3621 }
3622
3623 if (0 != hwmgr->gfx_arbiter.mclk_over_drive) {
3624 PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <=
3625 hwmgr->platform_descriptor.overdriveLimit.memoryClock),
3626 "Overdrive mclk exceeds limit",
3627 hwmgr->gfx_arbiter.mclk_over_drive =
3628 hwmgr->platform_descriptor.overdriveLimit.memoryClock);
3629
3630 if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk)
3631 polaris10_ps->performance_levels[1].memory_clock =
3632 hwmgr->gfx_arbiter.mclk_over_drive;
3633 }
3634
3635 disable_mclk_switching_for_frame_lock = phm_cap_enabled(
3636 hwmgr->platform_descriptor.platformCaps,
3637 PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
3638
3639
3640 disable_mclk_switching = (1 < info.display_count) ||
3641 disable_mclk_switching_for_frame_lock;
3642
3643 sclk = polaris10_ps->performance_levels[0].engine_clock;
3644 mclk = polaris10_ps->performance_levels[0].memory_clock;
3645
3646 if (disable_mclk_switching)
3647 mclk = polaris10_ps->performance_levels
3648 [polaris10_ps->performance_level_count - 1].memory_clock;
3649
3650 if (sclk < minimum_clocks.engineClock)
3651 sclk = (minimum_clocks.engineClock > max_limits->sclk) ?
3652 max_limits->sclk : minimum_clocks.engineClock;
3653
3654 if (mclk < minimum_clocks.memoryClock)
3655 mclk = (minimum_clocks.memoryClock > max_limits->mclk) ?
3656 max_limits->mclk : minimum_clocks.memoryClock;
3657
3658 polaris10_ps->performance_levels[0].engine_clock = sclk;
3659 polaris10_ps->performance_levels[0].memory_clock = mclk;
3660
3661 polaris10_ps->performance_levels[1].engine_clock =
3662 (polaris10_ps->performance_levels[1].engine_clock >=
3663 polaris10_ps->performance_levels[0].engine_clock) ?
3664 polaris10_ps->performance_levels[1].engine_clock :
3665 polaris10_ps->performance_levels[0].engine_clock;
3666
3667 if (disable_mclk_switching) {
3668 if (mclk < polaris10_ps->performance_levels[1].memory_clock)
3669 mclk = polaris10_ps->performance_levels[1].memory_clock;
3670
3671 polaris10_ps->performance_levels[0].memory_clock = mclk;
3672 polaris10_ps->performance_levels[1].memory_clock = mclk;
3673 } else {
3674 if (polaris10_ps->performance_levels[1].memory_clock <
3675 polaris10_ps->performance_levels[0].memory_clock)
3676 polaris10_ps->performance_levels[1].memory_clock =
3677 polaris10_ps->performance_levels[0].memory_clock;
3678 }
3679
3680 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3681 PHM_PlatformCaps_StablePState)) {
3682 for (i = 0; i < polaris10_ps->performance_level_count; i++) {
3683 polaris10_ps->performance_levels[i].engine_clock = stable_pstate_sclk;
3684 polaris10_ps->performance_levels[i].memory_clock = stable_pstate_mclk;
3685 polaris10_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max;
3686 polaris10_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max;
3687 }
3688 }
3689 return 0;
3690}
3691
3692
3693static int polaris10_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
3694{
3695 struct pp_power_state *ps;
3696 struct polaris10_power_state *polaris10_ps;
3697
3698 if (hwmgr == NULL)
3699 return -EINVAL;
3700
3701 ps = hwmgr->request_ps;
3702
3703 if (ps == NULL)
3704 return -EINVAL;
3705
3706 polaris10_ps = cast_phw_polaris10_power_state(&ps->hardware);
3707
3708 if (low)
3709 return polaris10_ps->performance_levels[0].memory_clock;
3710 else
3711 return polaris10_ps->performance_levels
3712 [polaris10_ps->performance_level_count-1].memory_clock;
3713}
3714
3715static int polaris10_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
3716{
3717 struct pp_power_state *ps;
3718 struct polaris10_power_state *polaris10_ps;
3719
3720 if (hwmgr == NULL)
3721 return -EINVAL;
3722
3723 ps = hwmgr->request_ps;
3724
3725 if (ps == NULL)
3726 return -EINVAL;
3727
3728 polaris10_ps = cast_phw_polaris10_power_state(&ps->hardware);
3729
3730 if (low)
3731 return polaris10_ps->performance_levels[0].engine_clock;
3732 else
3733 return polaris10_ps->performance_levels
3734 [polaris10_ps->performance_level_count-1].engine_clock;
3735}
3736
3737static int polaris10_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
3738 struct pp_hw_power_state *hw_ps)
3739{
3740 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
3741 struct polaris10_power_state *ps = (struct polaris10_power_state *)hw_ps;
3742 ATOM_FIRMWARE_INFO_V2_2 *fw_info;
3743 uint16_t size;
3744 uint8_t frev, crev;
3745 int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
3746
3747 /* First retrieve the Boot clocks and VDDC from the firmware info table.
3748 * We assume here that fw_info is unchanged if this call fails.
3749 */
3750 fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)cgs_atom_get_data_table(
3751 hwmgr->device, index,
3752 &size, &frev, &crev);
3753 if (!fw_info)
3754 /* During a test, there is no firmware info table. */
3755 return 0;
3756
3757 /* Patch the state. */
3758 data->vbios_boot_state.sclk_bootup_value =
3759 le32_to_cpu(fw_info->ulDefaultEngineClock);
3760 data->vbios_boot_state.mclk_bootup_value =
3761 le32_to_cpu(fw_info->ulDefaultMemoryClock);
3762 data->vbios_boot_state.mvdd_bootup_value =
3763 le16_to_cpu(fw_info->usBootUpMVDDCVoltage);
3764 data->vbios_boot_state.vddc_bootup_value =
3765 le16_to_cpu(fw_info->usBootUpVDDCVoltage);
3766 data->vbios_boot_state.vddci_bootup_value =
3767 le16_to_cpu(fw_info->usBootUpVDDCIVoltage);
3768 data->vbios_boot_state.pcie_gen_bootup_value =
3769 phm_get_current_pcie_speed(hwmgr);
3770
3771 data->vbios_boot_state.pcie_lane_bootup_value =
3772 (uint16_t)phm_get_current_pcie_lane_number(hwmgr);
3773
3774 /* set boot power state */
3775 ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value;
3776 ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value;
3777 ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value;
3778 ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value;
3779
3780 return 0;
3781}
3782
3783static int polaris10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
3784 void *state, struct pp_power_state *power_state,
3785 void *pp_table, uint32_t classification_flag)
3786{
3787 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
3788 struct polaris10_power_state *polaris10_power_state =
3789 (struct polaris10_power_state *)(&(power_state->hardware));
3790 struct polaris10_performance_level *performance_level;
3791 ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state;
3792 ATOM_Tonga_POWERPLAYTABLE *powerplay_table =
3793 (ATOM_Tonga_POWERPLAYTABLE *)pp_table;
3794 PPTable_Generic_SubTable_Header *sclk_dep_table =
3795 (PPTable_Generic_SubTable_Header *)
3796 (((unsigned long)powerplay_table) +
3797 le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
3798
3799 ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
3800 (ATOM_Tonga_MCLK_Dependency_Table *)
3801 (((unsigned long)powerplay_table) +
3802 le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
3803
3804 /* The following fields are not initialized here: id orderedList allStatesList */
3805 power_state->classification.ui_label =
3806 (le16_to_cpu(state_entry->usClassification) &
3807 ATOM_PPLIB_CLASSIFICATION_UI_MASK) >>
3808 ATOM_PPLIB_CLASSIFICATION_UI_SHIFT;
3809 power_state->classification.flags = classification_flag;
3810 /* NOTE: There is a classification2 flag in BIOS that is not being used right now */
3811
3812 power_state->classification.temporary_state = false;
3813 power_state->classification.to_be_deleted = false;
3814
3815 power_state->validation.disallowOnDC =
3816 (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
3817 ATOM_Tonga_DISALLOW_ON_DC));
3818
3819 power_state->pcie.lanes = 0;
3820
3821 power_state->display.disableFrameModulation = false;
3822 power_state->display.limitRefreshrate = false;
3823 power_state->display.enableVariBright =
3824 (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
3825 ATOM_Tonga_ENABLE_VARIBRIGHT));
3826
3827 power_state->validation.supportedPowerLevels = 0;
3828 power_state->uvd_clocks.VCLK = 0;
3829 power_state->uvd_clocks.DCLK = 0;
3830 power_state->temperatures.min = 0;
3831 power_state->temperatures.max = 0;
3832
3833 performance_level = &(polaris10_power_state->performance_levels
3834 [polaris10_power_state->performance_level_count++]);
3835
3836 PP_ASSERT_WITH_CODE(
3837 (polaris10_power_state->performance_level_count < SMU74_MAX_LEVELS_GRAPHICS),
3838 "Performance levels exceeds SMC limit!",
3839 return -1);
3840
3841 PP_ASSERT_WITH_CODE(
3842 (polaris10_power_state->performance_level_count <=
3843 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
3844 "Performance levels exceeds Driver limit!",
3845 return -1);
3846
3847 /* Performance levels are arranged from low to high. */
3848 performance_level->memory_clock = mclk_dep_table->entries
3849 [state_entry->ucMemoryClockIndexLow].ulMclk;
3850 if (sclk_dep_table->ucRevId == 0)
3851 performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
3852 [state_entry->ucEngineClockIndexLow].ulSclk;
3853 else if (sclk_dep_table->ucRevId == 1)
3854 performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
3855 [state_entry->ucEngineClockIndexLow].ulSclk;
3856 performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
3857 state_entry->ucPCIEGenLow);
3858 performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
3859 state_entry->ucPCIELaneHigh);
3860
3861 performance_level = &(polaris10_power_state->performance_levels
3862 [polaris10_power_state->performance_level_count++]);
3863 performance_level->memory_clock = mclk_dep_table->entries
3864 [state_entry->ucMemoryClockIndexHigh].ulMclk;
3865
3866 if (sclk_dep_table->ucRevId == 0)
3867 performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
3868 [state_entry->ucEngineClockIndexHigh].ulSclk;
3869 else if (sclk_dep_table->ucRevId == 1)
3870 performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
3871 [state_entry->ucEngineClockIndexHigh].ulSclk;
3872
3873 performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
3874 state_entry->ucPCIEGenHigh);
3875 performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
3876 state_entry->ucPCIELaneHigh);
3877
3878 return 0;
3879}
3880
3881static int polaris10_get_pp_table_entry(struct pp_hwmgr *hwmgr,
3882 unsigned long entry_index, struct pp_power_state *state)
3883{
3884 int result;
3885 struct polaris10_power_state *ps;
3886 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
3887 struct phm_ppt_v1_information *table_info =
3888 (struct phm_ppt_v1_information *)(hwmgr->pptable);
3889 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
3890 table_info->vdd_dep_on_mclk;
3891
3892 state->hardware.magic = PHM_VIslands_Magic;
3893
3894 ps = (struct polaris10_power_state *)(&state->hardware);
3895
3896 result = get_powerplay_table_entry_v1_0(hwmgr, entry_index, state,
3897 polaris10_get_pp_table_entry_callback_func);
3898
3899 /* This is the earliest time we have all the dependency table and the VBIOS boot state
3900 * as PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot state
3901 * if there is only one VDDCI/MCLK level, check if it's the same as VBIOS boot state
3902 */
3903 if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
3904 if (dep_mclk_table->entries[0].clk !=
3905 data->vbios_boot_state.mclk_bootup_value)
3906 printk(KERN_ERR "Single MCLK entry VDDCI/MCLK dependency table "
3907 "does not match VBIOS boot MCLK level");
3908 if (dep_mclk_table->entries[0].vddci !=
3909 data->vbios_boot_state.vddci_bootup_value)
3910 printk(KERN_ERR "Single VDDCI entry VDDCI/MCLK dependency table "
3911 "does not match VBIOS boot VDDCI level");
3912 }
3913
3914 /* set DC compatible flag if this state supports DC */
3915 if (!state->validation.disallowOnDC)
3916 ps->dc_compatible = true;
3917
3918 if (state->classification.flags & PP_StateClassificationFlag_ACPI)
3919 data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen;
3920
3921 ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
3922 ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
3923
3924 if (!result) {
3925 uint32_t i;
3926
3927 switch (state->classification.ui_label) {
3928 case PP_StateUILabel_Performance:
3929 data->use_pcie_performance_levels = true;
3930 for (i = 0; i < ps->performance_level_count; i++) {
3931 if (data->pcie_gen_performance.max <
3932 ps->performance_levels[i].pcie_gen)
3933 data->pcie_gen_performance.max =
3934 ps->performance_levels[i].pcie_gen;
3935
3936 if (data->pcie_gen_performance.min >
3937 ps->performance_levels[i].pcie_gen)
3938 data->pcie_gen_performance.min =
3939 ps->performance_levels[i].pcie_gen;
3940
3941 if (data->pcie_lane_performance.max <
3942 ps->performance_levels[i].pcie_lane)
3943 data->pcie_lane_performance.max =
3944 ps->performance_levels[i].pcie_lane;
3945 if (data->pcie_lane_performance.min >
3946 ps->performance_levels[i].pcie_lane)
3947 data->pcie_lane_performance.min =
3948 ps->performance_levels[i].pcie_lane;
3949 }
3950 break;
3951 case PP_StateUILabel_Battery:
3952 data->use_pcie_power_saving_levels = true;
3953
3954 for (i = 0; i < ps->performance_level_count; i++) {
3955 if (data->pcie_gen_power_saving.max <
3956 ps->performance_levels[i].pcie_gen)
3957 data->pcie_gen_power_saving.max =
3958 ps->performance_levels[i].pcie_gen;
3959
3960 if (data->pcie_gen_power_saving.min >
3961 ps->performance_levels[i].pcie_gen)
3962 data->pcie_gen_power_saving.min =
3963 ps->performance_levels[i].pcie_gen;
3964
3965 if (data->pcie_lane_power_saving.max <
3966 ps->performance_levels[i].pcie_lane)
3967 data->pcie_lane_power_saving.max =
3968 ps->performance_levels[i].pcie_lane;
3969
3970 if (data->pcie_lane_power_saving.min >
3971 ps->performance_levels[i].pcie_lane)
3972 data->pcie_lane_power_saving.min =
3973 ps->performance_levels[i].pcie_lane;
3974 }
3975 break;
3976 default:
3977 break;
3978 }
3979 }
3980 return 0;
3981}
3982
3983static void
3984polaris10_print_current_perforce_level(struct pp_hwmgr *hwmgr, struct seq_file *m)
3985{
3986 uint32_t sclk, mclk, activity_percent;
3987 uint32_t offset;
3988 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
3989
3990 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
3991
3992 sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
3993
3994 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
3995
3996 mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
3997 seq_printf(m, "\n [ mclk ]: %u MHz\n\n [ sclk ]: %u MHz\n",
3998 mclk / 100, sclk / 100);
3999
4000 offset = data->soft_regs_start + offsetof(SMU74_SoftRegisters, AverageGraphicsActivity);
4001 activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset);
4002 activity_percent += 0x80;
4003 activity_percent >>= 8;
4004
4005 seq_printf(m, "\n [GPU load]: %u%%\n\n", activity_percent > 100 ? 100 : activity_percent);
4006
4007 seq_printf(m, "uvd %sabled\n", data->uvd_power_gated ? "dis" : "en");
4008
4009 seq_printf(m, "vce %sabled\n", data->vce_power_gated ? "dis" : "en");
4010}
4011
4012static int polaris10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
4013{
4014 const struct phm_set_power_state_input *states =
4015 (const struct phm_set_power_state_input *)input;
4016 const struct polaris10_power_state *polaris10_ps =
4017 cast_const_phw_polaris10_power_state(states->pnew_state);
4018 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
4019 struct polaris10_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
4020 uint32_t sclk = polaris10_ps->performance_levels
4021 [polaris10_ps->performance_level_count - 1].engine_clock;
4022 struct polaris10_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
4023 uint32_t mclk = polaris10_ps->performance_levels
4024 [polaris10_ps->performance_level_count - 1].memory_clock;
4025 struct PP_Clocks min_clocks = {0};
4026 uint32_t i;
4027 struct cgs_display_info info = {0};
4028
4029 data->need_update_smu7_dpm_table = 0;
4030
4031 for (i = 0; i < sclk_table->count; i++) {
4032 if (sclk == sclk_table->dpm_levels[i].value)
4033 break;
4034 }
4035
4036 if (i >= sclk_table->count)
4037 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
4038 else {
4039 /* TODO: Check SCLK in DAL's minimum clocks
4040 * in case DeepSleep divider update is required.
4041 */
4042 if (data->display_timing.min_clock_in_sr != min_clocks.engineClockInSR &&
4043 (min_clocks.engineClockInSR >= POLARIS10_MINIMUM_ENGINE_CLOCK ||
4044 data->display_timing.min_clock_in_sr >= POLARIS10_MINIMUM_ENGINE_CLOCK))
4045 data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
4046 }
4047
4048 for (i = 0; i < mclk_table->count; i++) {
4049 if (mclk == mclk_table->dpm_levels[i].value)
4050 break;
4051 }
4052
4053 if (i >= mclk_table->count)
4054 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
4055
4056 cgs_get_active_displays_info(hwmgr->device, &info);
4057
4058 if (data->display_timing.num_existing_displays != info.display_count)
4059 data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
4060
4061 return 0;
4062}
4063
4064static uint16_t polaris10_get_maximum_link_speed(struct pp_hwmgr *hwmgr,
4065 const struct polaris10_power_state *polaris10_ps)
4066{
4067 uint32_t i;
4068 uint32_t sclk, max_sclk = 0;
4069 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
4070 struct polaris10_dpm_table *dpm_table = &data->dpm_table;
4071
4072 for (i = 0; i < polaris10_ps->performance_level_count; i++) {
4073 sclk = polaris10_ps->performance_levels[i].engine_clock;
4074 if (max_sclk < sclk)
4075 max_sclk = sclk;
4076 }
4077
4078 for (i = 0; i < dpm_table->sclk_table.count; i++) {
4079 if (dpm_table->sclk_table.dpm_levels[i].value == max_sclk)
4080 return (uint16_t) ((i >= dpm_table->pcie_speed_table.count) ?
4081 dpm_table->pcie_speed_table.dpm_levels
4082 [dpm_table->pcie_speed_table.count - 1].value :
4083 dpm_table->pcie_speed_table.dpm_levels[i].value);
4084 }
4085
4086 return 0;
4087}
4088
4089static int polaris10_request_link_speed_change_before_state_change(
4090 struct pp_hwmgr *hwmgr, const void *input)
4091{
4092 const struct phm_set_power_state_input *states =
4093 (const struct phm_set_power_state_input *)input;
4094 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
4095 const struct polaris10_power_state *polaris10_nps =
4096 cast_const_phw_polaris10_power_state(states->pnew_state);
4097 const struct polaris10_power_state *polaris10_cps =
4098 cast_const_phw_polaris10_power_state(states->pcurrent_state);
4099
4100 uint16_t target_link_speed = polaris10_get_maximum_link_speed(hwmgr, polaris10_nps);
4101 uint16_t current_link_speed;
4102
4103 if (data->force_pcie_gen == PP_PCIEGenInvalid)
4104 current_link_speed = polaris10_get_maximum_link_speed(hwmgr, polaris10_cps);
4105 else
4106 current_link_speed = data->force_pcie_gen;
4107
4108 data->force_pcie_gen = PP_PCIEGenInvalid;
4109 data->pspp_notify_required = false;
4110
4111 if (target_link_speed > current_link_speed) {
4112 switch (target_link_speed) {
4113 case PP_PCIEGen3:
4114 if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN3, false))
4115 break;
4116 data->force_pcie_gen = PP_PCIEGen2;
4117 if (current_link_speed == PP_PCIEGen2)
4118 break;
4119 case PP_PCIEGen2:
4120 if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN2, false))
4121 break;
4122 default:
4123 data->force_pcie_gen = phm_get_current_pcie_speed(hwmgr);
4124 break;
4125 }
4126 } else {
4127 if (target_link_speed < current_link_speed)
4128 data->pspp_notify_required = true;
4129 }
4130
4131 return 0;
4132}
4133
4134static int polaris10_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
4135{
4136 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
4137
4138 if (0 == data->need_update_smu7_dpm_table)
4139 return 0;
4140
4141 if ((0 == data->sclk_dpm_key_disabled) &&
4142 (data->need_update_smu7_dpm_table &
4143 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
4144 PP_ASSERT_WITH_CODE(polaris10_is_dpm_running(hwmgr),
4145 "Trying to freeze SCLK DPM when DPM is disabled",
4146 );
4147 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
4148 PPSMC_MSG_SCLKDPM_FreezeLevel),
4149 "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!",
4150 return -1);
4151 }
4152
4153 if ((0 == data->mclk_dpm_key_disabled) &&
4154 (data->need_update_smu7_dpm_table &
4155 DPMTABLE_OD_UPDATE_MCLK)) {
4156 PP_ASSERT_WITH_CODE(polaris10_is_dpm_running(hwmgr),
4157 "Trying to freeze MCLK DPM when DPM is disabled",
4158 );
4159 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
4160 PPSMC_MSG_MCLKDPM_FreezeLevel),
4161 "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!",
4162 return -1);
4163 }
4164
4165 return 0;
4166}
4167
4168static int polaris10_populate_and_upload_sclk_mclk_dpm_levels(
4169 struct pp_hwmgr *hwmgr, const void *input)
4170{
4171 int result = 0;
4172 const struct phm_set_power_state_input *states =
4173 (const struct phm_set_power_state_input *)input;
4174 const struct polaris10_power_state *polaris10_ps =
4175 cast_const_phw_polaris10_power_state(states->pnew_state);
4176 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
4177 uint32_t sclk = polaris10_ps->performance_levels
4178 [polaris10_ps->performance_level_count - 1].engine_clock;
4179 uint32_t mclk = polaris10_ps->performance_levels
4180 [polaris10_ps->performance_level_count - 1].memory_clock;
4181 struct polaris10_dpm_table *dpm_table = &data->dpm_table;
4182
4183 struct polaris10_dpm_table *golden_dpm_table = &data->golden_dpm_table;
4184 uint32_t dpm_count, clock_percent;
4185 uint32_t i;
4186
4187 if (0 == data->need_update_smu7_dpm_table)
4188 return 0;
4189
4190 if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
4191 dpm_table->sclk_table.dpm_levels
4192 [dpm_table->sclk_table.count - 1].value = sclk;
4193
4194 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) ||
4195 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) {
4196 /* Need to do calculation based on the golden DPM table
4197 * as the Heatmap GPU Clock axis is also based on the default values
4198 */
4199 PP_ASSERT_WITH_CODE(
4200 (golden_dpm_table->sclk_table.dpm_levels
4201 [golden_dpm_table->sclk_table.count - 1].value != 0),
4202 "Divide by 0!",
4203 return -1);
4204 dpm_count = dpm_table->sclk_table.count < 2 ? 0 : dpm_table->sclk_table.count - 2;
4205
4206 for (i = dpm_count; i > 1; i--) {
4207 if (sclk > golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value) {
4208 clock_percent =
4209 ((sclk
4210 - golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value
4211 ) * 100)
4212 / golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value;
4213
4214 dpm_table->sclk_table.dpm_levels[i].value =
4215 golden_dpm_table->sclk_table.dpm_levels[i].value +
4216 (golden_dpm_table->sclk_table.dpm_levels[i].value *
4217 clock_percent)/100;
4218
4219 } else if (golden_dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value > sclk) {
4220 clock_percent =
4221 ((golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count - 1].value
4222 - sclk) * 100)
4223 / golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value;
4224
4225 dpm_table->sclk_table.dpm_levels[i].value =
4226 golden_dpm_table->sclk_table.dpm_levels[i].value -
4227 (golden_dpm_table->sclk_table.dpm_levels[i].value *
4228 clock_percent) / 100;
4229 } else
4230 dpm_table->sclk_table.dpm_levels[i].value =
4231 golden_dpm_table->sclk_table.dpm_levels[i].value;
4232 }
4233 }
4234 }
4235
4236 if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
4237 dpm_table->mclk_table.dpm_levels
4238 [dpm_table->mclk_table.count - 1].value = mclk;
4239
4240 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) ||
4241 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) {
4242
4243 PP_ASSERT_WITH_CODE(
4244 (golden_dpm_table->mclk_table.dpm_levels
4245 [golden_dpm_table->mclk_table.count-1].value != 0),
4246 "Divide by 0!",
4247 return -1);
4248 dpm_count = dpm_table->mclk_table.count < 2 ? 0 : dpm_table->mclk_table.count - 2;
4249 for (i = dpm_count; i > 1; i--) {
4250 if (golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value < mclk) {
4251 clock_percent = ((mclk -
4252 golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value) * 100)
4253 / golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value;
4254
4255 dpm_table->mclk_table.dpm_levels[i].value =
4256 golden_dpm_table->mclk_table.dpm_levels[i].value +
4257 (golden_dpm_table->mclk_table.dpm_levels[i].value *
4258 clock_percent) / 100;
4259
4260 } else if (golden_dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value > mclk) {
4261 clock_percent = (
4262 (golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value - mclk)
4263 * 100)
4264 / golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value;
4265
4266 dpm_table->mclk_table.dpm_levels[i].value =
4267 golden_dpm_table->mclk_table.dpm_levels[i].value -
4268 (golden_dpm_table->mclk_table.dpm_levels[i].value *
4269 clock_percent) / 100;
4270 } else
4271 dpm_table->mclk_table.dpm_levels[i].value =
4272 golden_dpm_table->mclk_table.dpm_levels[i].value;
4273 }
4274 }
4275 }
4276
4277 if (data->need_update_smu7_dpm_table &
4278 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) {
4279 result = polaris10_populate_all_graphic_levels(hwmgr);
4280 PP_ASSERT_WITH_CODE((0 == result),
4281 "Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
4282 return result);
4283 }
4284
4285 if (data->need_update_smu7_dpm_table &
4286 (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
4287 /*populate MCLK dpm table to SMU7 */
4288 result = polaris10_populate_all_memory_levels(hwmgr);
4289 PP_ASSERT_WITH_CODE((0 == result),
4290 "Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
4291 return result);
4292 }
4293
4294 return result;
4295}
4296
4297static int polaris10_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
4298 struct polaris10_single_dpm_table *dpm_table,
4299 uint32_t low_limit, uint32_t high_limit)
4300{
4301 uint32_t i;
4302
4303 for (i = 0; i < dpm_table->count; i++) {
4304 if ((dpm_table->dpm_levels[i].value < low_limit)
4305 || (dpm_table->dpm_levels[i].value > high_limit))
4306 dpm_table->dpm_levels[i].enabled = false;
4307 else
4308 dpm_table->dpm_levels[i].enabled = true;
4309 }
4310
4311 return 0;
4312}
4313
4314static int polaris10_trim_dpm_states(struct pp_hwmgr *hwmgr,
4315 const struct polaris10_power_state *polaris10_ps)
4316{
4317 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
4318 uint32_t high_limit_count;
4319
4320 PP_ASSERT_WITH_CODE((polaris10_ps->performance_level_count >= 1),
4321 "power state did not have any performance level",
4322 return -1);
4323
4324 high_limit_count = (1 == polaris10_ps->performance_level_count) ? 0 : 1;
4325
4326 polaris10_trim_single_dpm_states(hwmgr,
4327 &(data->dpm_table.sclk_table),
4328 polaris10_ps->performance_levels[0].engine_clock,
4329 polaris10_ps->performance_levels[high_limit_count].engine_clock);
4330
4331 polaris10_trim_single_dpm_states(hwmgr,
4332 &(data->dpm_table.mclk_table),
4333 polaris10_ps->performance_levels[0].memory_clock,
4334 polaris10_ps->performance_levels[high_limit_count].memory_clock);
4335
4336 return 0;
4337}
4338
4339static int polaris10_generate_dpm_level_enable_mask(
4340 struct pp_hwmgr *hwmgr, const void *input)
4341{
4342 int result;
4343 const struct phm_set_power_state_input *states =
4344 (const struct phm_set_power_state_input *)input;
4345 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
4346 const struct polaris10_power_state *polaris10_ps =
4347 cast_const_phw_polaris10_power_state(states->pnew_state);
4348
4349 result = polaris10_trim_dpm_states(hwmgr, polaris10_ps);
4350 if (result)
4351 return result;
4352
4353 data->dpm_level_enable_mask.sclk_dpm_enable_mask =
4354 phm_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table);
4355 data->dpm_level_enable_mask.mclk_dpm_enable_mask =
4356 phm_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table);
4357 data->dpm_level_enable_mask.pcie_dpm_enable_mask =
4358 phm_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table);
4359
4360 return 0;
4361}
4362
4363static int
4364polaris10_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
4365{
4366 return smum_send_msg_to_smc(hwmgr->smumgr, enable ?
4367 PPSMC_MSG_UVDDPM_Enable :
4368 PPSMC_MSG_UVDDPM_Disable);
4369}
4370
4371int polaris10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
4372{
4373 return smum_send_msg_to_smc(hwmgr->smumgr, enable?
4374 PPSMC_MSG_VCEDPM_Enable :
4375 PPSMC_MSG_VCEDPM_Disable);
4376}
4377
4378static int
4379polaris10_enable_disable_samu_dpm(struct pp_hwmgr *hwmgr, bool enable)
4380{
4381 return smum_send_msg_to_smc(hwmgr->smumgr, enable?
4382 PPSMC_MSG_SAMUDPM_Enable :
4383 PPSMC_MSG_SAMUDPM_Disable);
4384}
4385
4386int polaris10_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
4387{
4388 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
4389 uint32_t mm_boot_level_offset, mm_boot_level_value;
4390 struct phm_ppt_v1_information *table_info =
4391 (struct phm_ppt_v1_information *)(hwmgr->pptable);
4392
4393 if (!bgate) {
4394 data->smc_state_table.UvdBootLevel = 0;
4395 if (table_info->mm_dep_table->count > 0)
4396 data->smc_state_table.UvdBootLevel =
4397 (uint8_t) (table_info->mm_dep_table->count - 1);
4398 mm_boot_level_offset = data->dpm_table_start +
4399 offsetof(SMU74_Discrete_DpmTable, UvdBootLevel);
4400 mm_boot_level_offset /= 4;
4401 mm_boot_level_offset *= 4;
4402 mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
4403 CGS_IND_REG__SMC, mm_boot_level_offset);
4404 mm_boot_level_value &= 0x00FFFFFF;
4405 mm_boot_level_value |= data->smc_state_table.UvdBootLevel << 24;
4406 cgs_write_ind_register(hwmgr->device,
4407 CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
4408
4409 if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4410 PHM_PlatformCaps_UVDDPM) ||
4411 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4412 PHM_PlatformCaps_StablePState))
4413 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
4414 PPSMC_MSG_UVDDPM_SetEnabledMask,
4415 (uint32_t)(1 << data->smc_state_table.UvdBootLevel));
4416 }
4417
4418 return polaris10_enable_disable_uvd_dpm(hwmgr, !bgate);
4419}
4420
4421int polaris10_update_vce_dpm(struct pp_hwmgr *hwmgr, bool bgate)
4422{
4423 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
4424 uint32_t mm_boot_level_offset, mm_boot_level_value;
4425 struct phm_ppt_v1_information *table_info =
4426 (struct phm_ppt_v1_information *)(hwmgr->pptable);
4427
4428 if (!bgate) {
4429 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4430 PHM_PlatformCaps_StablePState))
4431 data->smc_state_table.VceBootLevel =
4432 (uint8_t) (table_info->mm_dep_table->count - 1);
4433 else
4434 data->smc_state_table.VceBootLevel = 0;
4435
4436 mm_boot_level_offset = data->dpm_table_start +
4437 offsetof(SMU74_Discrete_DpmTable, VceBootLevel);
4438 mm_boot_level_offset /= 4;
4439 mm_boot_level_offset *= 4;
4440 mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
4441 CGS_IND_REG__SMC, mm_boot_level_offset);
4442 mm_boot_level_value &= 0xFF00FFFF;
4443 mm_boot_level_value |= data->smc_state_table.VceBootLevel << 16;
4444 cgs_write_ind_register(hwmgr->device,
4445 CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
4446
4447 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
4448 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
4449 PPSMC_MSG_VCEDPM_SetEnabledMask,
4450 (uint32_t)1 << data->smc_state_table.VceBootLevel);
4451 }
4452
4453 polaris10_enable_disable_vce_dpm(hwmgr, !bgate);
4454
4455 return 0;
4456}
4457
4458int polaris10_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate)
4459{
4460 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
4461 uint32_t mm_boot_level_offset, mm_boot_level_value;
4462
4463 if (!bgate) {
4464 data->smc_state_table.SamuBootLevel = 0;
4465 mm_boot_level_offset = data->dpm_table_start +
4466 offsetof(SMU74_Discrete_DpmTable, SamuBootLevel);
4467 mm_boot_level_offset /= 4;
4468 mm_boot_level_offset *= 4;
4469 mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
4470 CGS_IND_REG__SMC, mm_boot_level_offset);
4471 mm_boot_level_value &= 0xFFFFFF00;
4472 mm_boot_level_value |= data->smc_state_table.SamuBootLevel << 0;
4473 cgs_write_ind_register(hwmgr->device,
4474 CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
4475
4476 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4477 PHM_PlatformCaps_StablePState))
4478 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
4479 PPSMC_MSG_SAMUDPM_SetEnabledMask,
4480 (uint32_t)(1 << data->smc_state_table.SamuBootLevel));
4481 }
4482
4483 return polaris10_enable_disable_samu_dpm(hwmgr, !bgate);
4484}
4485
4486static int polaris10_update_sclk_threshold(struct pp_hwmgr *hwmgr)
4487{
4488 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
4489
4490 int result = 0;
4491 uint32_t low_sclk_interrupt_threshold = 0;
4492
4493 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4494 PHM_PlatformCaps_SclkThrottleLowNotification)
4495 && (hwmgr->gfx_arbiter.sclk_threshold !=
4496 data->low_sclk_interrupt_threshold)) {
4497 data->low_sclk_interrupt_threshold =
4498 hwmgr->gfx_arbiter.sclk_threshold;
4499 low_sclk_interrupt_threshold =
4500 data->low_sclk_interrupt_threshold;
4501
4502 CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
4503
4504 result = polaris10_copy_bytes_to_smc(
4505 hwmgr->smumgr,
4506 data->dpm_table_start +
4507 offsetof(SMU74_Discrete_DpmTable,
4508 LowSclkInterruptThreshold),
4509 (uint8_t *)&low_sclk_interrupt_threshold,
4510 sizeof(uint32_t),
4511 data->sram_end);
4512 }
4513
4514 return result;
4515}
4516
4517static int polaris10_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
4518{
4519 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
4520
4521 if (data->need_update_smu7_dpm_table &
4522 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
4523 return polaris10_program_memory_timing_parameters(hwmgr);
4524
4525 return 0;
4526}
4527
4528static int polaris10_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
4529{
4530 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
4531
4532 if (0 == data->need_update_smu7_dpm_table)
4533 return 0;
4534
4535 if ((0 == data->sclk_dpm_key_disabled) &&
4536 (data->need_update_smu7_dpm_table &
4537 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
4538
4539 PP_ASSERT_WITH_CODE(polaris10_is_dpm_running(hwmgr),
4540 "Trying to Unfreeze SCLK DPM when DPM is disabled",
4541 );
4542 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
4543 PPSMC_MSG_SCLKDPM_UnfreezeLevel),
4544 "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!",
4545 return -1);
4546 }
4547
4548 if ((0 == data->mclk_dpm_key_disabled) &&
4549 (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
4550
4551 PP_ASSERT_WITH_CODE(polaris10_is_dpm_running(hwmgr),
4552 "Trying to Unfreeze MCLK DPM when DPM is disabled",
4553 );
4554 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
4555 PPSMC_MSG_SCLKDPM_UnfreezeLevel),
4556 "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
4557 return -1);
4558 }
4559
4560 data->need_update_smu7_dpm_table = 0;
4561
4562 return 0;
4563}
4564
4565static int polaris10_notify_link_speed_change_after_state_change(
4566 struct pp_hwmgr *hwmgr, const void *input)
4567{
4568 const struct phm_set_power_state_input *states =
4569 (const struct phm_set_power_state_input *)input;
4570 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
4571 const struct polaris10_power_state *polaris10_ps =
4572 cast_const_phw_polaris10_power_state(states->pnew_state);
4573 uint16_t target_link_speed = polaris10_get_maximum_link_speed(hwmgr, polaris10_ps);
4574 uint8_t request;
4575
4576 if (data->pspp_notify_required) {
4577 if (target_link_speed == PP_PCIEGen3)
4578 request = PCIE_PERF_REQ_GEN3;
4579 else if (target_link_speed == PP_PCIEGen2)
4580 request = PCIE_PERF_REQ_GEN2;
4581 else
4582 request = PCIE_PERF_REQ_GEN1;
4583
4584 if (request == PCIE_PERF_REQ_GEN1 &&
4585 phm_get_current_pcie_speed(hwmgr) > 0)
4586 return 0;
4587
4588 if (acpi_pcie_perf_request(hwmgr->device, request, false)) {
4589 if (PP_PCIEGen2 == target_link_speed)
4590 printk("PSPP request to switch to Gen2 from Gen3 Failed!");
4591 else
4592 printk("PSPP request to switch to Gen1 from Gen2 Failed!");
4593 }
4594 }
4595
4596 return 0;
4597}
4598
4599static int polaris10_notify_smc_display(struct pp_hwmgr *hwmgr)
4600{
4601 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
4602
4603 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
4604 (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2);
4605 return (smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ? 0 : -EINVAL;
4606}
4607
4608
4609
4610static int polaris10_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
4611{
4612 int tmp_result, result = 0;
4613 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
4614
4615 tmp_result = polaris10_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
4616 PP_ASSERT_WITH_CODE((0 == tmp_result),
4617 "Failed to find DPM states clocks in DPM table!",
4618 result = tmp_result);
4619
4620 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4621 PHM_PlatformCaps_PCIEPerformanceRequest)) {
4622 tmp_result =
4623 polaris10_request_link_speed_change_before_state_change(hwmgr, input);
4624 PP_ASSERT_WITH_CODE((0 == tmp_result),
4625 "Failed to request link speed change before state change!",
4626 result = tmp_result);
4627 }
4628
4629 tmp_result = polaris10_freeze_sclk_mclk_dpm(hwmgr);
4630 PP_ASSERT_WITH_CODE((0 == tmp_result),
4631 "Failed to freeze SCLK MCLK DPM!", result = tmp_result);
4632
4633 tmp_result = polaris10_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
4634 PP_ASSERT_WITH_CODE((0 == tmp_result),
4635 "Failed to populate and upload SCLK MCLK DPM levels!",
4636 result = tmp_result);
4637
4638 tmp_result = polaris10_generate_dpm_level_enable_mask(hwmgr, input);
4639 PP_ASSERT_WITH_CODE((0 == tmp_result),
4640 "Failed to generate DPM level enabled mask!",
4641 result = tmp_result);
4642
4643 tmp_result = polaris10_update_sclk_threshold(hwmgr);
4644 PP_ASSERT_WITH_CODE((0 == tmp_result),
4645 "Failed to update SCLK threshold!",
4646 result = tmp_result);
4647
4648 tmp_result = polaris10_program_mem_timing_parameters(hwmgr);
4649 PP_ASSERT_WITH_CODE((0 == tmp_result),
4650 "Failed to program memory timing parameters!",
4651 result = tmp_result);
4652
4653 tmp_result = polaris10_notify_smc_display(hwmgr);
4654 PP_ASSERT_WITH_CODE((0 == tmp_result),
4655 "Failed to notify smc display settings!",
4656 result = tmp_result);
4657
4658 tmp_result = polaris10_unfreeze_sclk_mclk_dpm(hwmgr);
4659 PP_ASSERT_WITH_CODE((0 == tmp_result),
4660 "Failed to unfreeze SCLK MCLK DPM!",
4661 result = tmp_result);
4662
4663 tmp_result = polaris10_upload_dpm_level_enable_mask(hwmgr);
4664 PP_ASSERT_WITH_CODE((0 == tmp_result),
4665 "Failed to upload DPM level enabled mask!",
4666 result = tmp_result);
4667
4668 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4669 PHM_PlatformCaps_PCIEPerformanceRequest)) {
4670 tmp_result =
4671 polaris10_notify_link_speed_change_after_state_change(hwmgr, input);
4672 PP_ASSERT_WITH_CODE((0 == tmp_result),
4673 "Failed to notify link speed change after state change!",
4674 result = tmp_result);
4675 }
4676 data->apply_optimized_settings = false;
4677 return result;
4678}
4679
4680static int polaris10_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm)
4681{
4682 hwmgr->thermal_controller.
4683 advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm;
4684
4685 if (phm_is_hw_access_blocked(hwmgr))
4686 return 0;
4687
4688 return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
4689 PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm);
4690}
4691
4692
4693static int
4694polaris10_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
4695{
4696 PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay;
4697
4698 return (smum_send_msg_to_smc(hwmgr->smumgr, msg) == 0) ? 0 : -1;
4699}
4700
4701static int
4702polaris10_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
4703{
4704 uint32_t num_active_displays = 0;
4705 struct cgs_display_info info = {0};
4706 info.mode_info = NULL;
4707
4708 cgs_get_active_displays_info(hwmgr->device, &info);
4709
4710 num_active_displays = info.display_count;
4711
4712 if (num_active_displays > 1) /* to do && (pHwMgr->pPECI->displayConfiguration.bMultiMonitorInSync != TRUE)) */
4713 polaris10_notify_smc_display_change(hwmgr, false);
4714
4715
4716 return 0;
4717}
4718
4719/**
4720* Programs the display gap
4721*
4722* @param hwmgr the address of the powerplay hardware manager.
4723* @return always OK
4724*/
4725static int polaris10_program_display_gap(struct pp_hwmgr *hwmgr)
4726{
4727 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
4728 uint32_t num_active_displays = 0;
4729 uint32_t display_gap = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL);
4730 uint32_t display_gap2;
4731 uint32_t pre_vbi_time_in_us;
4732 uint32_t frame_time_in_us;
4733 uint32_t ref_clock;
4734 uint32_t refresh_rate = 0;
4735 struct cgs_display_info info = {0};
4736 struct cgs_mode_info mode_info;
4737
4738 info.mode_info = &mode_info;
4739
4740 cgs_get_active_displays_info(hwmgr->device, &info);
4741 num_active_displays = info.display_count;
4742
4743 display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (num_active_displays > 0) ? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE);
4744 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap);
4745
4746 ref_clock = mode_info.ref_clock;
4747 refresh_rate = mode_info.refresh_rate;
4748
4749 if (0 == refresh_rate)
4750 refresh_rate = 60;
4751
4752 frame_time_in_us = 1000000 / refresh_rate;
4753
4754 pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us;
4755 data->frame_time_x2 = frame_time_in_us * 2 / 100;
4756
4757 display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
4758
4759 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2);
4760
4761 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + offsetof(SMU74_SoftRegisters, PreVBlankGap), 0x64);
4762
4763 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + offsetof(SMU74_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
4764
4765
4766 return 0;
4767}
4768
4769
4770static int polaris10_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
4771{
4772 return polaris10_program_display_gap(hwmgr);
4773}
4774
4775/**
4776* Set maximum target operating fan output RPM
4777*
4778* @param hwmgr: the address of the powerplay hardware manager.
4779* @param usMaxFanRpm: max operating fan RPM value.
4780* @return The response that came from the SMC.
4781*/
4782static int polaris10_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_rpm)
4783{
4784 hwmgr->thermal_controller.
4785 advanceFanControlParameters.usMaxFanRPM = us_max_fan_rpm;
4786
4787 if (phm_is_hw_access_blocked(hwmgr))
4788 return 0;
4789
4790 return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
4791 PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm);
4792}
4793
4794static int
4795polaris10_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr,
4796 const void *thermal_interrupt_info)
4797{
4798 return 0;
4799}
4800
4801static bool polaris10_check_smc_update_required_for_display_configuration(
4802 struct pp_hwmgr *hwmgr)
4803{
4804 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
4805 bool is_update_required = false;
4806 struct cgs_display_info info = {0, 0, NULL};
4807
4808 cgs_get_active_displays_info(hwmgr->device, &info);
4809
4810 if (data->display_timing.num_existing_displays != info.display_count)
4811 is_update_required = true;
4812/* TO DO NEED TO GET DEEP SLEEP CLOCK FROM DAL
4813 if (phm_cap_enabled(hwmgr->hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
4814 cgs_get_min_clock_settings(hwmgr->device, &min_clocks);
4815 if (min_clocks.engineClockInSR != data->display_timing.minClockInSR &&
4816 (min_clocks.engineClockInSR >= POLARIS10_MINIMUM_ENGINE_CLOCK ||
4817 data->display_timing.minClockInSR >= POLARIS10_MINIMUM_ENGINE_CLOCK))
4818 is_update_required = true;
4819*/
4820 return is_update_required;
4821}
4822
4823static inline bool polaris10_are_power_levels_equal(const struct polaris10_performance_level *pl1,
4824 const struct polaris10_performance_level *pl2)
4825{
4826 return ((pl1->memory_clock == pl2->memory_clock) &&
4827 (pl1->engine_clock == pl2->engine_clock) &&
4828 (pl1->pcie_gen == pl2->pcie_gen) &&
4829 (pl1->pcie_lane == pl2->pcie_lane));
4830}
4831
4832static int polaris10_check_states_equal(struct pp_hwmgr *hwmgr,
4833 const struct pp_hw_power_state *pstate1,
4834 const struct pp_hw_power_state *pstate2, bool *equal)
4835{
4836 const struct polaris10_power_state *psa = cast_const_phw_polaris10_power_state(pstate1);
4837 const struct polaris10_power_state *psb = cast_const_phw_polaris10_power_state(pstate2);
4838 int i;
4839
4840 if (pstate1 == NULL || pstate2 == NULL || equal == NULL)
4841 return -EINVAL;
4842
4843 /* If the two states don't even have the same number of performance levels they cannot be the same state. */
4844 if (psa->performance_level_count != psb->performance_level_count) {
4845 *equal = false;
4846 return 0;
4847 }
4848
4849 for (i = 0; i < psa->performance_level_count; i++) {
4850 if (!polaris10_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
4851 /* If we have found even one performance level pair that is different the states are different. */
4852 *equal = false;
4853 return 0;
4854 }
4855 }
4856
4857 /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
4858 *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
4859 *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
4860 *equal &= (psa->sclk_threshold == psb->sclk_threshold);
4861
4862 return 0;
4863}
4864
4865static int polaris10_upload_mc_firmware(struct pp_hwmgr *hwmgr)
4866{
4867 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
4868
4869 uint32_t vbios_version;
4870
4871 /* Read MC indirect register offset 0x9F bits [3:0] to see if VBIOS has already loaded a full version of MC ucode or not.*/
4872
4873 phm_get_mc_microcode_version(hwmgr);
4874 vbios_version = hwmgr->microcode_version_info.MC & 0xf;
4875 /* Full version of MC ucode has already been loaded. */
4876 if (vbios_version == 0) {
4877 data->need_long_memory_training = false;
4878 return 0;
4879 }
4880
4881 data->need_long_memory_training = false;
4882
4883/*
4884 * PPMCME_FirmwareDescriptorEntry *pfd = NULL;
4885 pfd = &tonga_mcmeFirmware;
4886 if (0 == PHM_READ_FIELD(hwmgr->device, MC_SEQ_SUP_CNTL, RUN))
4887 polaris10_load_mc_microcode(hwmgr, pfd->dpmThreshold,
4888 pfd->cfgArray, pfd->cfgSize, pfd->ioDebugArray,
4889 pfd->ioDebugSize, pfd->ucodeArray, pfd->ucodeSize);
4890*/
4891 return 0;
4892}
4893
4894/**
4895 * Read clock related registers.
4896 *
4897 * @param hwmgr the address of the powerplay hardware manager.
4898 * @return always 0
4899 */
4900static int polaris10_read_clock_registers(struct pp_hwmgr *hwmgr)
4901{
4902 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
4903
4904 data->clock_registers.vCG_SPLL_FUNC_CNTL = cgs_read_ind_register(hwmgr->device,
4905 CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL)
4906 & CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK;
4907
4908 data->clock_registers.vCG_SPLL_FUNC_CNTL_2 = cgs_read_ind_register(hwmgr->device,
4909 CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_2)
4910 & CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK;
4911
4912 data->clock_registers.vCG_SPLL_FUNC_CNTL_4 = cgs_read_ind_register(hwmgr->device,
4913 CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_4)
4914 & CG_SPLL_FUNC_CNTL_4__SPLL_SPARE_MASK;
4915
4916 return 0;
4917}
4918
4919/**
4920 * Find out if memory is GDDR5.
4921 *
4922 * @param hwmgr the address of the powerplay hardware manager.
4923 * @return always 0
4924 */
4925static int polaris10_get_memory_type(struct pp_hwmgr *hwmgr)
4926{
4927 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
4928 uint32_t temp;
4929
4930 temp = cgs_read_register(hwmgr->device, mmMC_SEQ_MISC0);
4931
4932 data->is_memory_gddr5 = (MC_SEQ_MISC0_GDDR5_VALUE ==
4933 ((temp & MC_SEQ_MISC0_GDDR5_MASK) >>
4934 MC_SEQ_MISC0_GDDR5_SHIFT));
4935
4936 return 0;
4937}
4938
4939/**
4940 * Enables Dynamic Power Management by SMC
4941 *
4942 * @param hwmgr the address of the powerplay hardware manager.
4943 * @return always 0
4944 */
4945static int polaris10_enable_acpi_power_management(struct pp_hwmgr *hwmgr)
4946{
4947 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
4948 GENERAL_PWRMGT, STATIC_PM_EN, 1);
4949
4950 return 0;
4951}
4952
4953/**
4954 * Initialize PowerGating States for different engines
4955 *
4956 * @param hwmgr the address of the powerplay hardware manager.
4957 * @return always 0
4958 */
4959static int polaris10_init_power_gate_state(struct pp_hwmgr *hwmgr)
4960{
4961 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
4962
4963 data->uvd_power_gated = false;
4964 data->vce_power_gated = false;
4965 data->samu_power_gated = false;
4966
4967 return 0;
4968}
4969
4970static int polaris10_init_sclk_threshold(struct pp_hwmgr *hwmgr)
4971{
4972 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
4973 data->low_sclk_interrupt_threshold = 0;
4974
4975 return 0;
4976}
4977
4978static int polaris10_setup_asic_task(struct pp_hwmgr *hwmgr)
4979{
4980 int tmp_result, result = 0;
4981
4982 polaris10_upload_mc_firmware(hwmgr);
4983
4984 tmp_result = polaris10_read_clock_registers(hwmgr);
4985 PP_ASSERT_WITH_CODE((0 == tmp_result),
4986 "Failed to read clock registers!", result = tmp_result);
4987
4988 tmp_result = polaris10_get_memory_type(hwmgr);
4989 PP_ASSERT_WITH_CODE((0 == tmp_result),
4990 "Failed to get memory type!", result = tmp_result);
4991
4992 tmp_result = polaris10_enable_acpi_power_management(hwmgr);
4993 PP_ASSERT_WITH_CODE((0 == tmp_result),
4994 "Failed to enable ACPI power management!", result = tmp_result);
4995
4996 tmp_result = polaris10_init_power_gate_state(hwmgr);
4997 PP_ASSERT_WITH_CODE((0 == tmp_result),
4998 "Failed to init power gate state!", result = tmp_result);
4999
5000 tmp_result = phm_get_mc_microcode_version(hwmgr);
5001 PP_ASSERT_WITH_CODE((0 == tmp_result),
5002 "Failed to get MC microcode version!", result = tmp_result);
5003
5004 tmp_result = polaris10_init_sclk_threshold(hwmgr);
5005 PP_ASSERT_WITH_CODE((0 == tmp_result),
5006 "Failed to init sclk threshold!", result = tmp_result);
5007
5008 return result;
5009}
5010
5011static int polaris10_force_clock_level(struct pp_hwmgr *hwmgr,
5012 enum pp_clock_type type, uint32_t mask)
5013{
5014 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
5015
5016 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
5017 return -EINVAL;
5018
5019 switch (type) {
5020 case PP_SCLK:
5021 if (!data->sclk_dpm_key_disabled)
5022 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
5023 PPSMC_MSG_SCLKDPM_SetEnabledMask,
5024 data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
5025 break;
5026 case PP_MCLK:
5027 if (!data->mclk_dpm_key_disabled)
5028 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
5029 PPSMC_MSG_MCLKDPM_SetEnabledMask,
5030 data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
5031 break;
5032 case PP_PCIE:
5033 {
5034 uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask;
5035 uint32_t level = 0;
5036
5037 while (tmp >>= 1)
5038 level++;
5039
5040 if (!data->pcie_dpm_key_disabled)
5041 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
5042 PPSMC_MSG_PCIeDPM_ForceLevel,
5043 level);
5044 break;
5045 }
5046 default:
5047 break;
5048 }
5049
5050 return 0;
5051}
5052
5053static uint16_t polaris10_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
5054{
5055 uint32_t speedCntl = 0;
5056
5057 /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
5058 speedCntl = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__PCIE,
5059 ixPCIE_LC_SPEED_CNTL);
5060 return((uint16_t)PHM_GET_FIELD(speedCntl,
5061 PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
5062}
5063
5064static int polaris10_print_clock_levels(struct pp_hwmgr *hwmgr,
5065 enum pp_clock_type type, char *buf)
5066{
5067 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
5068 struct polaris10_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
5069 struct polaris10_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
5070 struct polaris10_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table);
5071 int i, now, size = 0;
5072 uint32_t clock, pcie_speed;
5073
5074 switch (type) {
5075 case PP_SCLK:
5076 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
5077 clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
5078
5079 for (i = 0; i < sclk_table->count; i++) {
5080 if (clock > sclk_table->dpm_levels[i].value)
5081 continue;
5082 break;
5083 }
5084 now = i;
5085
5086 for (i = 0; i < sclk_table->count; i++)
5087 size += sprintf(buf + size, "%d: %uMhz %s\n",
5088 i, sclk_table->dpm_levels[i].value / 100,
5089 (i == now) ? "*" : "");
5090 break;
5091 case PP_MCLK:
5092 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
5093 clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
5094
5095 for (i = 0; i < mclk_table->count; i++) {
5096 if (clock > mclk_table->dpm_levels[i].value)
5097 continue;
5098 break;
5099 }
5100 now = i;
5101
5102 for (i = 0; i < mclk_table->count; i++)
5103 size += sprintf(buf + size, "%d: %uMhz %s\n",
5104 i, mclk_table->dpm_levels[i].value / 100,
5105 (i == now) ? "*" : "");
5106 break;
5107 case PP_PCIE:
5108 pcie_speed = polaris10_get_current_pcie_speed(hwmgr);
5109 for (i = 0; i < pcie_table->count; i++) {
5110 if (pcie_speed != pcie_table->dpm_levels[i].value)
5111 continue;
5112 break;
5113 }
5114 now = i;
5115
5116 for (i = 0; i < pcie_table->count; i++)
5117 size += sprintf(buf + size, "%d: %s %s\n", i,
5118 (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x8" :
5119 (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" :
5120 (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "",
5121 (i == now) ? "*" : "");
5122 break;
5123 default:
5124 break;
5125 }
5126 return size;
5127}
5128
5129static int polaris10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
5130{
5131 if (mode) {
5132 /* stop auto-manage */
5133 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
5134 PHM_PlatformCaps_MicrocodeFanControl))
5135 polaris10_fan_ctrl_stop_smc_fan_control(hwmgr);
5136 polaris10_fan_ctrl_set_static_mode(hwmgr, mode);
5137 } else
5138 /* restart auto-manage */
5139 polaris10_fan_ctrl_reset_fan_speed_to_default(hwmgr);
5140
5141 return 0;
5142}
5143
5144static int polaris10_get_fan_control_mode(struct pp_hwmgr *hwmgr)
5145{
5146 if (hwmgr->fan_ctrl_is_in_default_mode)
5147 return hwmgr->fan_ctrl_default_mode;
5148 else
5149 return PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
5150 CG_FDO_CTRL2, FDO_PWM_MODE);
5151}
5152
5153static int polaris10_get_sclk_od(struct pp_hwmgr *hwmgr)
5154{
5155 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
5156 struct polaris10_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
5157 struct polaris10_single_dpm_table *golden_sclk_table =
5158 &(data->golden_dpm_table.sclk_table);
5159 int value;
5160
5161 value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
5162 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
5163 100 /
5164 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
5165
5166 return value;
5167}
5168
5169static int polaris10_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
5170{
5171 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
5172 struct polaris10_single_dpm_table *golden_sclk_table =
5173 &(data->golden_dpm_table.sclk_table);
5174 struct pp_power_state *ps;
5175 struct polaris10_power_state *polaris10_ps;
5176
5177 if (value > 20)
5178 value = 20;
5179
5180 ps = hwmgr->request_ps;
5181
5182 if (ps == NULL)
5183 return -EINVAL;
5184
5185 polaris10_ps = cast_phw_polaris10_power_state(&ps->hardware);
5186
5187 polaris10_ps->performance_levels[polaris10_ps->performance_level_count - 1].engine_clock =
5188 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
5189 value / 100 +
5190 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
5191
5192 return 0;
5193}
5194
5195static int polaris10_get_mclk_od(struct pp_hwmgr *hwmgr)
5196{
5197 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
5198 struct polaris10_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
5199 struct polaris10_single_dpm_table *golden_mclk_table =
5200 &(data->golden_dpm_table.mclk_table);
5201 int value;
5202
5203 value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
5204 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
5205 100 /
5206 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
5207
5208 return value;
5209}
5210
5211static int polaris10_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
5212{
5213 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
5214 struct polaris10_single_dpm_table *golden_mclk_table =
5215 &(data->golden_dpm_table.mclk_table);
5216 struct pp_power_state *ps;
5217 struct polaris10_power_state *polaris10_ps;
5218
5219 if (value > 20)
5220 value = 20;
5221
5222 ps = hwmgr->request_ps;
5223
5224 if (ps == NULL)
5225 return -EINVAL;
5226
5227 polaris10_ps = cast_phw_polaris10_power_state(&ps->hardware);
5228
5229 polaris10_ps->performance_levels[polaris10_ps->performance_level_count - 1].memory_clock =
5230 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
5231 value / 100 +
5232 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
5233
5234 return 0;
5235}
5236static const struct pp_hwmgr_func polaris10_hwmgr_funcs = {
5237 .backend_init = &polaris10_hwmgr_backend_init,
5238 .backend_fini = &polaris10_hwmgr_backend_fini,
5239 .asic_setup = &polaris10_setup_asic_task,
5240 .dynamic_state_management_enable = &polaris10_enable_dpm_tasks,
5241 .apply_state_adjust_rules = polaris10_apply_state_adjust_rules,
5242 .force_dpm_level = &polaris10_force_dpm_level,
5243 .power_state_set = polaris10_set_power_state_tasks,
5244 .get_power_state_size = polaris10_get_power_state_size,
5245 .get_mclk = polaris10_dpm_get_mclk,
5246 .get_sclk = polaris10_dpm_get_sclk,
5247 .patch_boot_state = polaris10_dpm_patch_boot_state,
5248 .get_pp_table_entry = polaris10_get_pp_table_entry,
5249 .get_num_of_pp_table_entries = get_number_of_powerplay_table_entries_v1_0,
5250 .print_current_perforce_level = polaris10_print_current_perforce_level,
5251 .powerdown_uvd = polaris10_phm_powerdown_uvd,
5252 .powergate_uvd = polaris10_phm_powergate_uvd,
5253 .powergate_vce = polaris10_phm_powergate_vce,
5254 .disable_clock_power_gating = polaris10_phm_disable_clock_power_gating,
5255 .update_clock_gatings = polaris10_phm_update_clock_gatings,
5256 .notify_smc_display_config_after_ps_adjustment = polaris10_notify_smc_display_config_after_ps_adjustment,
5257 .display_config_changed = polaris10_display_configuration_changed_task,
5258 .set_max_fan_pwm_output = polaris10_set_max_fan_pwm_output,
5259 .set_max_fan_rpm_output = polaris10_set_max_fan_rpm_output,
5260 .get_temperature = polaris10_thermal_get_temperature,
5261 .stop_thermal_controller = polaris10_thermal_stop_thermal_controller,
5262 .get_fan_speed_info = polaris10_fan_ctrl_get_fan_speed_info,
5263 .get_fan_speed_percent = polaris10_fan_ctrl_get_fan_speed_percent,
5264 .set_fan_speed_percent = polaris10_fan_ctrl_set_fan_speed_percent,
5265 .reset_fan_speed_to_default = polaris10_fan_ctrl_reset_fan_speed_to_default,
5266 .get_fan_speed_rpm = polaris10_fan_ctrl_get_fan_speed_rpm,
5267 .set_fan_speed_rpm = polaris10_fan_ctrl_set_fan_speed_rpm,
5268 .uninitialize_thermal_controller = polaris10_thermal_ctrl_uninitialize_thermal_controller,
5269 .register_internal_thermal_interrupt = polaris10_register_internal_thermal_interrupt,
5270 .check_smc_update_required_for_display_configuration = polaris10_check_smc_update_required_for_display_configuration,
5271 .check_states_equal = polaris10_check_states_equal,
5272 .set_fan_control_mode = polaris10_set_fan_control_mode,
5273 .get_fan_control_mode = polaris10_get_fan_control_mode,
5274 .force_clock_level = polaris10_force_clock_level,
5275 .print_clock_levels = polaris10_print_clock_levels,
5276 .enable_per_cu_power_gating = polaris10_phm_enable_per_cu_power_gating,
5277 .get_sclk_od = polaris10_get_sclk_od,
5278 .set_sclk_od = polaris10_set_sclk_od,
5279 .get_mclk_od = polaris10_get_mclk_od,
5280 .set_mclk_od = polaris10_set_mclk_od,
5281};
5282
5283int polaris10_hwmgr_init(struct pp_hwmgr *hwmgr)
5284{
5285 hwmgr->hwmgr_func = &polaris10_hwmgr_funcs;
5286 hwmgr->pptable_func = &pptable_v1_0_funcs;
5287 pp_polaris10_thermal_initialize(hwmgr);
5288
5289 return 0;
5290}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h
deleted file mode 100644
index 378ab342c257..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h
+++ /dev/null
@@ -1,354 +0,0 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef POLARIS10_HWMGR_H
25#define POLARIS10_HWMGR_H
26
27#include "hwmgr.h"
28#include "smu74.h"
29#include "smu74_discrete.h"
30#include "ppatomctrl.h"
31#include "polaris10_ppsmc.h"
32#include "polaris10_powertune.h"
33#include "polaris10_smumgr.h"
34
35#define POLARIS10_MAX_HARDWARE_POWERLEVELS 2
36
37#define POLARIS10_VOLTAGE_CONTROL_NONE 0x0
38#define POLARIS10_VOLTAGE_CONTROL_BY_GPIO 0x1
39#define POLARIS10_VOLTAGE_CONTROL_BY_SVID2 0x2
40#define POLARIS10_VOLTAGE_CONTROL_MERGED 0x3
41
42#define DPMTABLE_OD_UPDATE_SCLK 0x00000001
43#define DPMTABLE_OD_UPDATE_MCLK 0x00000002
44#define DPMTABLE_UPDATE_SCLK 0x00000004
45#define DPMTABLE_UPDATE_MCLK 0x00000008
46
47struct polaris10_performance_level {
48 uint32_t memory_clock;
49 uint32_t engine_clock;
50 uint16_t pcie_gen;
51 uint16_t pcie_lane;
52};
53
54struct polaris10_uvd_clocks {
55 uint32_t vclk;
56 uint32_t dclk;
57};
58
59struct polaris10_vce_clocks {
60 uint32_t evclk;
61 uint32_t ecclk;
62};
63
64struct polaris10_power_state {
65 uint32_t magic;
66 struct polaris10_uvd_clocks uvd_clks;
67 struct polaris10_vce_clocks vce_clks;
68 uint32_t sam_clk;
69 uint16_t performance_level_count;
70 bool dc_compatible;
71 uint32_t sclk_threshold;
72 struct polaris10_performance_level performance_levels[POLARIS10_MAX_HARDWARE_POWERLEVELS];
73};
74
75struct polaris10_dpm_level {
76 bool enabled;
77 uint32_t value;
78 uint32_t param1;
79};
80
81#define POLARIS10_MAX_DEEPSLEEP_DIVIDER_ID 5
82#define MAX_REGULAR_DPM_NUMBER 8
83#define POLARIS10_MINIMUM_ENGINE_CLOCK 2500
84
85struct polaris10_single_dpm_table {
86 uint32_t count;
87 struct polaris10_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER];
88};
89
90struct polaris10_dpm_table {
91 struct polaris10_single_dpm_table sclk_table;
92 struct polaris10_single_dpm_table mclk_table;
93 struct polaris10_single_dpm_table pcie_speed_table;
94 struct polaris10_single_dpm_table vddc_table;
95 struct polaris10_single_dpm_table vddci_table;
96 struct polaris10_single_dpm_table mvdd_table;
97};
98
99struct polaris10_clock_registers {
100 uint32_t vCG_SPLL_FUNC_CNTL;
101 uint32_t vCG_SPLL_FUNC_CNTL_2;
102 uint32_t vCG_SPLL_FUNC_CNTL_3;
103 uint32_t vCG_SPLL_FUNC_CNTL_4;
104 uint32_t vCG_SPLL_SPREAD_SPECTRUM;
105 uint32_t vCG_SPLL_SPREAD_SPECTRUM_2;
106 uint32_t vDLL_CNTL;
107 uint32_t vMCLK_PWRMGT_CNTL;
108 uint32_t vMPLL_AD_FUNC_CNTL;
109 uint32_t vMPLL_DQ_FUNC_CNTL;
110 uint32_t vMPLL_FUNC_CNTL;
111 uint32_t vMPLL_FUNC_CNTL_1;
112 uint32_t vMPLL_FUNC_CNTL_2;
113 uint32_t vMPLL_SS1;
114 uint32_t vMPLL_SS2;
115};
116
117#define DISABLE_MC_LOADMICROCODE 1
118#define DISABLE_MC_CFGPROGRAMMING 2
119
120struct polaris10_voltage_smio_registers {
121 uint32_t vS0_VID_LOWER_SMIO_CNTL;
122};
123
124#define POLARIS10_MAX_LEAKAGE_COUNT 8
125
126struct polaris10_leakage_voltage {
127 uint16_t count;
128 uint16_t leakage_id[POLARIS10_MAX_LEAKAGE_COUNT];
129 uint16_t actual_voltage[POLARIS10_MAX_LEAKAGE_COUNT];
130};
131
132struct polaris10_vbios_boot_state {
133 uint16_t mvdd_bootup_value;
134 uint16_t vddc_bootup_value;
135 uint16_t vddci_bootup_value;
136 uint32_t sclk_bootup_value;
137 uint32_t mclk_bootup_value;
138 uint16_t pcie_gen_bootup_value;
139 uint16_t pcie_lane_bootup_value;
140};
141
142/* Ultra Low Voltage parameter structure */
143struct polaris10_ulv_parm {
144 bool ulv_supported;
145 uint32_t cg_ulv_parameter;
146 uint32_t ulv_volt_change_delay;
147 struct polaris10_performance_level ulv_power_level;
148};
149
150struct polaris10_display_timing {
151 uint32_t min_clock_in_sr;
152 uint32_t num_existing_displays;
153};
154
155struct polaris10_dpmlevel_enable_mask {
156 uint32_t uvd_dpm_enable_mask;
157 uint32_t vce_dpm_enable_mask;
158 uint32_t acp_dpm_enable_mask;
159 uint32_t samu_dpm_enable_mask;
160 uint32_t sclk_dpm_enable_mask;
161 uint32_t mclk_dpm_enable_mask;
162 uint32_t pcie_dpm_enable_mask;
163};
164
165struct polaris10_pcie_perf_range {
166 uint16_t max;
167 uint16_t min;
168};
169
170struct polaris10_hwmgr {
171 struct polaris10_dpm_table dpm_table;
172 struct polaris10_dpm_table golden_dpm_table;
173 SMU74_Discrete_DpmTable smc_state_table;
174 struct SMU74_Discrete_Ulv ulv_setting;
175
176 struct polaris10_range_table range_table[NUM_SCLK_RANGE];
177 uint32_t voting_rights_clients0;
178 uint32_t voting_rights_clients1;
179 uint32_t voting_rights_clients2;
180 uint32_t voting_rights_clients3;
181 uint32_t voting_rights_clients4;
182 uint32_t voting_rights_clients5;
183 uint32_t voting_rights_clients6;
184 uint32_t voting_rights_clients7;
185 uint32_t static_screen_threshold_unit;
186 uint32_t static_screen_threshold;
187 uint32_t voltage_control;
188 uint32_t vddc_vddci_delta;
189
190 uint32_t active_auto_throttle_sources;
191
192 struct polaris10_clock_registers clock_registers;
193 struct polaris10_voltage_smio_registers voltage_smio_registers;
194
195 bool is_memory_gddr5;
196 uint16_t acpi_vddc;
197 bool pspp_notify_required;
198 uint16_t force_pcie_gen;
199 uint16_t acpi_pcie_gen;
200 uint32_t pcie_gen_cap;
201 uint32_t pcie_lane_cap;
202 uint32_t pcie_spc_cap;
203 struct polaris10_leakage_voltage vddc_leakage;
204 struct polaris10_leakage_voltage Vddci_leakage;
205
206 uint32_t mvdd_control;
207 uint32_t vddc_mask_low;
208 uint32_t mvdd_mask_low;
209 uint16_t max_vddc_in_pptable;
210 uint16_t min_vddc_in_pptable;
211 uint16_t max_vddci_in_pptable;
212 uint16_t min_vddci_in_pptable;
213 uint32_t mclk_strobe_mode_threshold;
214 uint32_t mclk_stutter_mode_threshold;
215 uint32_t mclk_edc_enable_threshold;
216 uint32_t mclk_edcwr_enable_threshold;
217 bool is_uvd_enabled;
218 struct polaris10_vbios_boot_state vbios_boot_state;
219
220 bool pcie_performance_request;
221 bool battery_state;
222 bool is_tlu_enabled;
223
224 /* ---- SMC SRAM Address of firmware header tables ---- */
225 uint32_t sram_end;
226 uint32_t dpm_table_start;
227 uint32_t soft_regs_start;
228 uint32_t mc_reg_table_start;
229 uint32_t fan_table_start;
230 uint32_t arb_table_start;
231
232 /* ---- Stuff originally coming from Evergreen ---- */
233 uint32_t vddci_control;
234 struct pp_atomctrl_voltage_table vddc_voltage_table;
235 struct pp_atomctrl_voltage_table vddci_voltage_table;
236 struct pp_atomctrl_voltage_table mvdd_voltage_table;
237
238 uint32_t mgcg_cgtt_local2;
239 uint32_t mgcg_cgtt_local3;
240 uint32_t gpio_debug;
241 uint32_t mc_micro_code_feature;
242 uint32_t highest_mclk;
243 uint16_t acpi_vddci;
244 uint8_t mvdd_high_index;
245 uint8_t mvdd_low_index;
246 bool dll_default_on;
247 bool performance_request_registered;
248
249 /* ---- Low Power Features ---- */
250 struct polaris10_ulv_parm ulv;
251
252 /* ---- CAC Stuff ---- */
253 uint32_t cac_table_start;
254 bool cac_configuration_required;
255 bool driver_calculate_cac_leakage;
256 bool cac_enabled;
257
258 /* ---- DPM2 Parameters ---- */
259 uint32_t power_containment_features;
260 bool enable_dte_feature;
261 bool enable_tdc_limit_feature;
262 bool enable_pkg_pwr_tracking_feature;
263 bool disable_uvd_power_tune_feature;
264 const struct polaris10_pt_defaults *power_tune_defaults;
265 struct SMU74_Discrete_PmFuses power_tune_table;
266 uint32_t dte_tj_offset;
267 uint32_t fast_watermark_threshold;
268
269 /* ---- Phase Shedding ---- */
270 bool vddc_phase_shed_control;
271
272 /* ---- DI/DT ---- */
273 struct polaris10_display_timing display_timing;
274 uint32_t bif_sclk_table[SMU74_MAX_LEVELS_LINK];
275
276 /* ---- Thermal Temperature Setting ---- */
277 struct polaris10_dpmlevel_enable_mask dpm_level_enable_mask;
278 uint32_t need_update_smu7_dpm_table;
279 uint32_t sclk_dpm_key_disabled;
280 uint32_t mclk_dpm_key_disabled;
281 uint32_t pcie_dpm_key_disabled;
282 uint32_t min_engine_clocks;
283 struct polaris10_pcie_perf_range pcie_gen_performance;
284 struct polaris10_pcie_perf_range pcie_lane_performance;
285 struct polaris10_pcie_perf_range pcie_gen_power_saving;
286 struct polaris10_pcie_perf_range pcie_lane_power_saving;
287 bool use_pcie_performance_levels;
288 bool use_pcie_power_saving_levels;
289 uint32_t activity_target[SMU74_MAX_LEVELS_GRAPHICS];
290 uint32_t mclk_activity_target;
291 uint32_t mclk_dpm0_activity_target;
292 uint32_t low_sclk_interrupt_threshold;
293 uint32_t last_mclk_dpm_enable_mask;
294 bool uvd_enabled;
295
296 /* ---- Power Gating States ---- */
297 bool uvd_power_gated;
298 bool vce_power_gated;
299 bool samu_power_gated;
300 bool need_long_memory_training;
301
302 /* Application power optimization parameters */
303 bool update_up_hyst;
304 bool update_down_hyst;
305 uint32_t down_hyst;
306 uint32_t up_hyst;
307 uint32_t disable_dpm_mask;
308 bool apply_optimized_settings;
309 uint32_t avfs_vdroop_override_setting;
310 bool apply_avfs_cks_off_voltage;
311 uint32_t frame_time_x2;
312};
313
314/* To convert to Q8.8 format for firmware */
315#define POLARIS10_Q88_FORMAT_CONVERSION_UNIT 256
316
317enum Polaris10_I2CLineID {
318 Polaris10_I2CLineID_DDC1 = 0x90,
319 Polaris10_I2CLineID_DDC2 = 0x91,
320 Polaris10_I2CLineID_DDC3 = 0x92,
321 Polaris10_I2CLineID_DDC4 = 0x93,
322 Polaris10_I2CLineID_DDC5 = 0x94,
323 Polaris10_I2CLineID_DDC6 = 0x95,
324 Polaris10_I2CLineID_SCLSDA = 0x96,
325 Polaris10_I2CLineID_DDCVGA = 0x97
326};
327
328#define POLARIS10_I2C_DDC1DATA 0
329#define POLARIS10_I2C_DDC1CLK 1
330#define POLARIS10_I2C_DDC2DATA 2
331#define POLARIS10_I2C_DDC2CLK 3
332#define POLARIS10_I2C_DDC3DATA 4
333#define POLARIS10_I2C_DDC3CLK 5
334#define POLARIS10_I2C_SDA 40
335#define POLARIS10_I2C_SCL 41
336#define POLARIS10_I2C_DDC4DATA 65
337#define POLARIS10_I2C_DDC4CLK 66
338#define POLARIS10_I2C_DDC5DATA 0x48
339#define POLARIS10_I2C_DDC5CLK 0x49
340#define POLARIS10_I2C_DDC6DATA 0x4a
341#define POLARIS10_I2C_DDC6CLK 0x4b
342#define POLARIS10_I2C_DDCVGADATA 0x4c
343#define POLARIS10_I2C_DDCVGACLK 0x4d
344
345#define POLARIS10_UNUSED_GPIO_PIN 0x7F
346
347int polaris10_hwmgr_init(struct pp_hwmgr *hwmgr);
348
349int polaris10_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate);
350int polaris10_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate);
351int polaris10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
352int polaris10_update_vce_dpm(struct pp_hwmgr *hwmgr, bool bgate);
353#endif
354
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c
deleted file mode 100644
index b9cb240a135d..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c
+++ /dev/null
@@ -1,988 +0,0 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "hwmgr.h"
25#include "smumgr.h"
26#include "polaris10_hwmgr.h"
27#include "polaris10_powertune.h"
28#include "polaris10_smumgr.h"
29#include "smu74_discrete.h"
30#include "pp_debug.h"
31#include "gca/gfx_8_0_d.h"
32#include "gca/gfx_8_0_sh_mask.h"
33#include "oss/oss_3_0_sh_mask.h"
34
35#define VOLTAGE_SCALE 4
36#define POWERTUNE_DEFAULT_SET_MAX 1
37
38uint32_t DIDTBlock_Info = SQ_IR_MASK | TCP_IR_MASK | TD_PCC_MASK;
39
40struct polaris10_pt_config_reg GCCACConfig_Polaris10[] = {
41/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
42 * Offset Mask Shift Value Type
43 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
44 */
45 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00060013, POLARIS10_CONFIGREG_GC_CAC_IND },
46 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00860013, POLARIS10_CONFIGREG_GC_CAC_IND },
47 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01060013, POLARIS10_CONFIGREG_GC_CAC_IND },
48 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01860013, POLARIS10_CONFIGREG_GC_CAC_IND },
49 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02060013, POLARIS10_CONFIGREG_GC_CAC_IND },
50 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02860013, POLARIS10_CONFIGREG_GC_CAC_IND },
51 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03060013, POLARIS10_CONFIGREG_GC_CAC_IND },
52 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03860013, POLARIS10_CONFIGREG_GC_CAC_IND },
53 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x04060013, POLARIS10_CONFIGREG_GC_CAC_IND },
54
55 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x000E0013, POLARIS10_CONFIGREG_GC_CAC_IND },
56 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x008E0013, POLARIS10_CONFIGREG_GC_CAC_IND },
57 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x010E0013, POLARIS10_CONFIGREG_GC_CAC_IND },
58 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x018E0013, POLARIS10_CONFIGREG_GC_CAC_IND },
59 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x020E0013, POLARIS10_CONFIGREG_GC_CAC_IND },
60
61 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00100013, POLARIS10_CONFIGREG_GC_CAC_IND },
62 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00900013, POLARIS10_CONFIGREG_GC_CAC_IND },
63 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01100013, POLARIS10_CONFIGREG_GC_CAC_IND },
64 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01900013, POLARIS10_CONFIGREG_GC_CAC_IND },
65 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02100013, POLARIS10_CONFIGREG_GC_CAC_IND },
66 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02900013, POLARIS10_CONFIGREG_GC_CAC_IND },
67
68 { 0xFFFFFFFF }
69};
70
71struct polaris10_pt_config_reg GCCACConfig_Polaris11[] = {
72/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
73 * Offset Mask Shift Value Type
74 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
75 */
76 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00060011, POLARIS10_CONFIGREG_GC_CAC_IND },
77 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00860011, POLARIS10_CONFIGREG_GC_CAC_IND },
78 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01060011, POLARIS10_CONFIGREG_GC_CAC_IND },
79 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01860011, POLARIS10_CONFIGREG_GC_CAC_IND },
80 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02060011, POLARIS10_CONFIGREG_GC_CAC_IND },
81 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02860011, POLARIS10_CONFIGREG_GC_CAC_IND },
82 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03060011, POLARIS10_CONFIGREG_GC_CAC_IND },
83 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03860011, POLARIS10_CONFIGREG_GC_CAC_IND },
84 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x04060011, POLARIS10_CONFIGREG_GC_CAC_IND },
85
86 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x000E0011, POLARIS10_CONFIGREG_GC_CAC_IND },
87 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x008E0011, POLARIS10_CONFIGREG_GC_CAC_IND },
88 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x010E0011, POLARIS10_CONFIGREG_GC_CAC_IND },
89 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x018E0011, POLARIS10_CONFIGREG_GC_CAC_IND },
90 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x020E0011, POLARIS10_CONFIGREG_GC_CAC_IND },
91
92 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00100011, POLARIS10_CONFIGREG_GC_CAC_IND },
93 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00900011, POLARIS10_CONFIGREG_GC_CAC_IND },
94 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01100011, POLARIS10_CONFIGREG_GC_CAC_IND },
95 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01900011, POLARIS10_CONFIGREG_GC_CAC_IND },
96 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02100011, POLARIS10_CONFIGREG_GC_CAC_IND },
97 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02900011, POLARIS10_CONFIGREG_GC_CAC_IND },
98
99 { 0xFFFFFFFF }
100};
101
102struct polaris10_pt_config_reg DIDTConfig_Polaris10[] = {
103/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
104 * Offset Mask Shift Value Type
105 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
106 */
107 { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT0_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT0__SHIFT, 0x0073, POLARIS10_CONFIGREG_DIDT_IND },
108 { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT1_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT1__SHIFT, 0x00ab, POLARIS10_CONFIGREG_DIDT_IND },
109 { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT2_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT2__SHIFT, 0x0084, POLARIS10_CONFIGREG_DIDT_IND },
110 { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT3_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT3__SHIFT, 0x005a, POLARIS10_CONFIGREG_DIDT_IND },
111
112 { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT4_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT4__SHIFT, 0x0067, POLARIS10_CONFIGREG_DIDT_IND },
113 { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT5_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT5__SHIFT, 0x0084, POLARIS10_CONFIGREG_DIDT_IND },
114 { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT6_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT6__SHIFT, 0x0027, POLARIS10_CONFIGREG_DIDT_IND },
115 { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT7_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT7__SHIFT, 0x0046, POLARIS10_CONFIGREG_DIDT_IND },
116
117 { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT8_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT8__SHIFT, 0x00aa, POLARIS10_CONFIGREG_DIDT_IND },
118 { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT9_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT9__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
119 { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT10_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT10__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
120 { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT11_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT11__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
121
122 { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MIN_POWER_MASK, DIDT_SQ_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
123 { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND },
124
125 { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__UNUSED_0_MASK, DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
126 { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND },
127
128 { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3853, POLARIS10_CONFIGREG_DIDT_IND },
129 { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_0_MASK, DIDT_SQ_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
130 { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x005a, POLARIS10_CONFIGREG_DIDT_IND },
131 { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_1_MASK, DIDT_SQ_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
132 { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
133 { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_2_MASK, DIDT_SQ_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
134
135 { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
136 { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
137 { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
138 { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb, POLARIS10_CONFIGREG_DIDT_IND },
139 { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__UNUSED_0_MASK, DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
140
141 { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
142 { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3853, POLARIS10_CONFIGREG_DIDT_IND },
143 { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3153, POLARIS10_CONFIGREG_DIDT_IND },
144 { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK, DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
145
146 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
147 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK, DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
148 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
149 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
150 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
151 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND },
152 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND },
153 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__UNUSED_0_MASK, DIDT_SQ_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
154
155 { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT0_MASK, DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT, 0x000a, POLARIS10_CONFIGREG_DIDT_IND },
156 { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT1_MASK, DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND },
157 { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT2_MASK, DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT, 0x0017, POLARIS10_CONFIGREG_DIDT_IND },
158 { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT3_MASK, DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT, 0x002f, POLARIS10_CONFIGREG_DIDT_IND },
159
160 { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT4_MASK, DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT, 0x0046, POLARIS10_CONFIGREG_DIDT_IND },
161 { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT5_MASK, DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT, 0x005d, POLARIS10_CONFIGREG_DIDT_IND },
162 { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT6_MASK, DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
163 { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT7_MASK, DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
164
165 { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MIN_POWER_MASK, DIDT_TD_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
166 { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MAX_POWER_MASK, DIDT_TD_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND },
167
168 { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__UNUSED_0_MASK, DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
169 { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, POLARIS10_CONFIGREG_DIDT_IND },
170
171 { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, POLARIS10_CONFIGREG_DIDT_IND },
172 { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_0_MASK, DIDT_TD_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
173 { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x000f, POLARIS10_CONFIGREG_DIDT_IND },
174 { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_1_MASK, DIDT_TD_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
175 { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
176 { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_2_MASK, DIDT_TD_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
177
178 { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
179 { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
180 { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
181 { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, POLARIS10_CONFIGREG_DIDT_IND },
182 { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__UNUSED_0_MASK, DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
183
184 { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
185 { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x0dde, POLARIS10_CONFIGREG_DIDT_IND },
186 { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x0dde, POLARIS10_CONFIGREG_DIDT_IND },
187 { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__UNUSED_0_MASK, DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
188
189 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
190 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__USE_REF_CLOCK_MASK, DIDT_TD_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
191 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__PHASE_OFFSET_MASK, DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
192 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
193 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
194 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0009, POLARIS10_CONFIGREG_DIDT_IND },
195 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0009, POLARIS10_CONFIGREG_DIDT_IND },
196 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__UNUSED_0_MASK, DIDT_TD_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
197
198 { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT0_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT0__SHIFT, 0x0004, POLARIS10_CONFIGREG_DIDT_IND },
199 { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT1_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT1__SHIFT, 0x0037, POLARIS10_CONFIGREG_DIDT_IND },
200 { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT2_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT2__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
201 { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT3_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT3__SHIFT, 0x00ff, POLARIS10_CONFIGREG_DIDT_IND },
202
203 { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT4_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT4__SHIFT, 0x0054, POLARIS10_CONFIGREG_DIDT_IND },
204 { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT5_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT5__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
205 { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT6_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
206 { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT7_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
207
208 { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MIN_POWER_MASK, DIDT_TCP_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
209 { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MAX_POWER_MASK, DIDT_TCP_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND },
210
211 { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__UNUSED_0_MASK, DIDT_TCP_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
212 { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND },
213
214 { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND },
215 { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_0_MASK, DIDT_TCP_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
216 { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x0032, POLARIS10_CONFIGREG_DIDT_IND },
217 { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_1_MASK, DIDT_TCP_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
218 { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
219 { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_2_MASK, DIDT_TCP_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
220
221 { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
222 { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
223 { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
224 { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, POLARIS10_CONFIGREG_DIDT_IND },
225 { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__UNUSED_0_MASK, DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
226
227 { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
228 { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND },
229 { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND },
230 { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK, DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
231
232 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
233 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__USE_REF_CLOCK_MASK, DIDT_TCP_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
234 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__PHASE_OFFSET_MASK, DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
235 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
236 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
237 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND },
238 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND },
239 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__UNUSED_0_MASK, DIDT_TCP_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
240
241 { 0xFFFFFFFF }
242};
243
244struct polaris10_pt_config_reg DIDTConfig_Polaris11[] = {
245/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
246 * Offset Mask Shift Value Type
247 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
248 */
249 { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT0_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT0__SHIFT, 0x0073, POLARIS10_CONFIGREG_DIDT_IND },
250 { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT1_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT1__SHIFT, 0x00ab, POLARIS10_CONFIGREG_DIDT_IND },
251 { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT2_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT2__SHIFT, 0x0084, POLARIS10_CONFIGREG_DIDT_IND },
252 { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT3_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT3__SHIFT, 0x005a, POLARIS10_CONFIGREG_DIDT_IND },
253
254 { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT4_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT4__SHIFT, 0x0067, POLARIS10_CONFIGREG_DIDT_IND },
255 { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT5_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT5__SHIFT, 0x0084, POLARIS10_CONFIGREG_DIDT_IND },
256 { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT6_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT6__SHIFT, 0x0027, POLARIS10_CONFIGREG_DIDT_IND },
257 { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT7_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT7__SHIFT, 0x0046, POLARIS10_CONFIGREG_DIDT_IND },
258
259 { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT8_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT8__SHIFT, 0x00aa, POLARIS10_CONFIGREG_DIDT_IND },
260 { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT9_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT9__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
261 { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT10_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT10__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
262 { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT11_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT11__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
263
264 { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MIN_POWER_MASK, DIDT_SQ_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
265 { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND },
266
267 { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__UNUSED_0_MASK, DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
268 { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND },
269
270 { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3853, POLARIS10_CONFIGREG_DIDT_IND },
271 { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_0_MASK, DIDT_SQ_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
272 { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x005a, POLARIS10_CONFIGREG_DIDT_IND },
273 { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_1_MASK, DIDT_SQ_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
274 { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
275 { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_2_MASK, DIDT_SQ_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
276
277 { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
278 { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
279 { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
280 { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb, POLARIS10_CONFIGREG_DIDT_IND },
281 { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__UNUSED_0_MASK, DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
282
283 { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
284 { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3853, POLARIS10_CONFIGREG_DIDT_IND },
285 { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3153, POLARIS10_CONFIGREG_DIDT_IND },
286 { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK, DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
287
288 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
289 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK, DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
290 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
291 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
292 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
293 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND },
294 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND },
295 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__UNUSED_0_MASK, DIDT_SQ_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
296
297 { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT0_MASK, DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT, 0x000a, POLARIS10_CONFIGREG_DIDT_IND },
298 { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT1_MASK, DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND },
299 { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT2_MASK, DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT, 0x0017, POLARIS10_CONFIGREG_DIDT_IND },
300 { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT3_MASK, DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT, 0x002f, POLARIS10_CONFIGREG_DIDT_IND },
301
302 { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT4_MASK, DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT, 0x0046, POLARIS10_CONFIGREG_DIDT_IND },
303 { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT5_MASK, DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT, 0x005d, POLARIS10_CONFIGREG_DIDT_IND },
304 { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT6_MASK, DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
305 { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT7_MASK, DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
306
307 { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MIN_POWER_MASK, DIDT_TD_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
308 { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MAX_POWER_MASK, DIDT_TD_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND },
309
310 { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__UNUSED_0_MASK, DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
311 { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, POLARIS10_CONFIGREG_DIDT_IND },
312
313 { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, POLARIS10_CONFIGREG_DIDT_IND },
314 { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_0_MASK, DIDT_TD_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
315 { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x000f, POLARIS10_CONFIGREG_DIDT_IND },
316 { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_1_MASK, DIDT_TD_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
317 { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
318 { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_2_MASK, DIDT_TD_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
319
320 { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
321 { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
322 { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
323 { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, POLARIS10_CONFIGREG_DIDT_IND },
324 { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__UNUSED_0_MASK, DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
325
326 { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
327 { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x0dde, POLARIS10_CONFIGREG_DIDT_IND },
328 { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x0dde, POLARIS10_CONFIGREG_DIDT_IND },
329 { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__UNUSED_0_MASK, DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
330
331 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
332 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__USE_REF_CLOCK_MASK, DIDT_TD_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
333 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__PHASE_OFFSET_MASK, DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
334 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
335 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
336 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0008, POLARIS10_CONFIGREG_DIDT_IND },
337 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0008, POLARIS10_CONFIGREG_DIDT_IND },
338 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__UNUSED_0_MASK, DIDT_TD_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
339
340 { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT0_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT0__SHIFT, 0x0004, POLARIS10_CONFIGREG_DIDT_IND },
341 { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT1_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT1__SHIFT, 0x0037, POLARIS10_CONFIGREG_DIDT_IND },
342 { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT2_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT2__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
343 { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT3_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT3__SHIFT, 0x00ff, POLARIS10_CONFIGREG_DIDT_IND },
344
345 { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT4_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT4__SHIFT, 0x0054, POLARIS10_CONFIGREG_DIDT_IND },
346 { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT5_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT5__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
347 { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT6_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
348 { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT7_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
349
350 { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MIN_POWER_MASK, DIDT_TCP_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
351 { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MAX_POWER_MASK, DIDT_TCP_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND },
352
353 { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__UNUSED_0_MASK, DIDT_TCP_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
354 { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND },
355
356 { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND },
357 { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_0_MASK, DIDT_TCP_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
358 { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x0032, POLARIS10_CONFIGREG_DIDT_IND },
359 { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_1_MASK, DIDT_TCP_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
360 { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
361 { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_2_MASK, DIDT_TCP_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
362
363 { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
364 { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
365 { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
366 { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, POLARIS10_CONFIGREG_DIDT_IND },
367 { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__UNUSED_0_MASK, DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
368
369 { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
370 { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND },
371 { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND },
372 { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK, DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
373
374 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
375 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__USE_REF_CLOCK_MASK, DIDT_TCP_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
376 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__PHASE_OFFSET_MASK, DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
377 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
378 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
379 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND },
380 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND },
381 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__UNUSED_0_MASK, DIDT_TCP_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
382 { 0xFFFFFFFF }
383};
384
385static const struct polaris10_pt_defaults polaris10_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
386 /* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
387 * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT */
388 { 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
389 { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61},
390 { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } },
391};
392
393void polaris10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
394{
395 struct polaris10_hwmgr *polaris10_hwmgr = (struct polaris10_hwmgr *)(hwmgr->backend);
396 struct phm_ppt_v1_information *table_info =
397 (struct phm_ppt_v1_information *)(hwmgr->pptable);
398
399 if (table_info &&
400 table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
401 table_info->cac_dtp_table->usPowerTuneDataSetID)
402 polaris10_hwmgr->power_tune_defaults =
403 &polaris10_power_tune_data_set_array
404 [table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
405 else
406 polaris10_hwmgr->power_tune_defaults = &polaris10_power_tune_data_set_array[0];
407
408}
409
410static uint16_t scale_fan_gain_settings(uint16_t raw_setting)
411{
412 uint32_t tmp;
413 tmp = raw_setting * 4096 / 100;
414 return (uint16_t)tmp;
415}
416
417int polaris10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
418{
419 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
420 const struct polaris10_pt_defaults *defaults = data->power_tune_defaults;
421 SMU74_Discrete_DpmTable *dpm_table = &(data->smc_state_table);
422 struct phm_ppt_v1_information *table_info =
423 (struct phm_ppt_v1_information *)(hwmgr->pptable);
424 struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
425 struct pp_advance_fan_control_parameters *fan_table=
426 &hwmgr->thermal_controller.advanceFanControlParameters;
427 int i, j, k;
428 const uint16_t *pdef1;
429 const uint16_t *pdef2;
430
431 dpm_table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128));
432 dpm_table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128));
433
434 PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
435 "Target Operating Temp is out of Range!",
436 );
437
438 dpm_table->TemperatureLimitEdge = PP_HOST_TO_SMC_US(
439 cac_dtp_table->usTargetOperatingTemp * 256);
440 dpm_table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US(
441 cac_dtp_table->usTemperatureLimitHotspot * 256);
442 dpm_table->FanGainEdge = PP_HOST_TO_SMC_US(
443 scale_fan_gain_settings(fan_table->usFanGainEdge));
444 dpm_table->FanGainHotspot = PP_HOST_TO_SMC_US(
445 scale_fan_gain_settings(fan_table->usFanGainHotspot));
446
447 pdef1 = defaults->BAPMTI_R;
448 pdef2 = defaults->BAPMTI_RC;
449
450 for (i = 0; i < SMU74_DTE_ITERATIONS; i++) {
451 for (j = 0; j < SMU74_DTE_SOURCES; j++) {
452 for (k = 0; k < SMU74_DTE_SINKS; k++) {
453 dpm_table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*pdef1);
454 dpm_table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*pdef2);
455 pdef1++;
456 pdef2++;
457 }
458 }
459 }
460
461 return 0;
462}
463
464static int polaris10_populate_svi_load_line(struct pp_hwmgr *hwmgr)
465{
466 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
467 const struct polaris10_pt_defaults *defaults = data->power_tune_defaults;
468
469 data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn;
470 data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC;
471 data->power_tune_table.SviLoadLineTrimVddC = 3;
472 data->power_tune_table.SviLoadLineOffsetVddC = 0;
473
474 return 0;
475}
476
477static int polaris10_populate_tdc_limit(struct pp_hwmgr *hwmgr)
478{
479 uint16_t tdc_limit;
480 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
481 struct phm_ppt_v1_information *table_info =
482 (struct phm_ppt_v1_information *)(hwmgr->pptable);
483 const struct polaris10_pt_defaults *defaults = data->power_tune_defaults;
484
485 tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128);
486 data->power_tune_table.TDC_VDDC_PkgLimit =
487 CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
488 data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
489 defaults->TDC_VDDC_ThrottleReleaseLimitPerc;
490 data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt;
491
492 return 0;
493}
494
495static int polaris10_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
496{
497 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
498 const struct polaris10_pt_defaults *defaults = data->power_tune_defaults;
499 uint32_t temp;
500
501 if (polaris10_read_smc_sram_dword(hwmgr->smumgr,
502 fuse_table_offset +
503 offsetof(SMU74_Discrete_PmFuses, TdcWaterfallCtl),
504 (uint32_t *)&temp, data->sram_end))
505 PP_ASSERT_WITH_CODE(false,
506 "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
507 return -EINVAL);
508 else {
509 data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl;
510 data->power_tune_table.LPMLTemperatureMin =
511 (uint8_t)((temp >> 16) & 0xff);
512 data->power_tune_table.LPMLTemperatureMax =
513 (uint8_t)((temp >> 8) & 0xff);
514 data->power_tune_table.Reserved = (uint8_t)(temp & 0xff);
515 }
516 return 0;
517}
518
519static int polaris10_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
520{
521 int i;
522 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
523
524 /* Currently not used. Set all to zero. */
525 for (i = 0; i < 16; i++)
526 data->power_tune_table.LPMLTemperatureScaler[i] = 0;
527
528 return 0;
529}
530
531static int polaris10_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
532{
533 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
534
535 if ((hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity & (1 << 15))
536 || 0 == hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity)
537 hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity =
538 hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity;
539
540 data->power_tune_table.FuzzyFan_PwmSetDelta = PP_HOST_TO_SMC_US(
541 hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity);
542 return 0;
543}
544
545static int polaris10_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
546{
547 int i;
548 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
549
550 /* Currently not used. Set all to zero. */
551 for (i = 0; i < 16; i++)
552 data->power_tune_table.GnbLPML[i] = 0;
553
554 return 0;
555}
556
557static int polaris10_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
558{
559 return 0;
560}
561
562static int polaris10_enable_didt(struct pp_hwmgr *hwmgr, const bool enable)
563{
564
565 uint32_t en = enable ? 1 : 0;
566 int32_t result = 0;
567 uint32_t data;
568
569 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping)) {
570 data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0);
571 data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
572 data |= ((en << DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK);
573 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0, data);
574 DIDTBlock_Info &= ~SQ_Enable_MASK;
575 DIDTBlock_Info |= en << SQ_Enable_SHIFT;
576 }
577
578 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping)) {
579 data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0);
580 data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
581 data |= ((en << DIDT_DB_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK);
582 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0, data);
583 DIDTBlock_Info &= ~DB_Enable_MASK;
584 DIDTBlock_Info |= en << DB_Enable_SHIFT;
585 }
586
587 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping)) {
588 data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0);
589 data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
590 data |= ((en << DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK);
591 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0, data);
592 DIDTBlock_Info &= ~TD_Enable_MASK;
593 DIDTBlock_Info |= en << TD_Enable_SHIFT;
594 }
595
596 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) {
597 data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0);
598 data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
599 data |= ((en << DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK);
600 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0, data);
601 DIDTBlock_Info &= ~TCP_Enable_MASK;
602 DIDTBlock_Info |= en << TCP_Enable_SHIFT;
603 }
604
605 if (enable)
606 result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_Didt_Block_Function, DIDTBlock_Info);
607
608 return result;
609}
610
611static int polaris10_program_pt_config_registers(struct pp_hwmgr *hwmgr,
612 struct polaris10_pt_config_reg *cac_config_regs)
613{
614 struct polaris10_pt_config_reg *config_regs = cac_config_regs;
615 uint32_t cache = 0;
616 uint32_t data = 0;
617
618 PP_ASSERT_WITH_CODE((config_regs != NULL), "Invalid config register table.", return -EINVAL);
619
620 while (config_regs->offset != 0xFFFFFFFF) {
621 if (config_regs->type == POLARIS10_CONFIGREG_CACHE)
622 cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
623 else {
624 switch (config_regs->type) {
625 case POLARIS10_CONFIGREG_SMC_IND:
626 data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, config_regs->offset);
627 break;
628
629 case POLARIS10_CONFIGREG_DIDT_IND:
630 data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, config_regs->offset);
631 break;
632
633 case POLARIS10_CONFIGREG_GC_CAC_IND:
634 data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG_GC_CAC, config_regs->offset);
635 break;
636
637 default:
638 data = cgs_read_register(hwmgr->device, config_regs->offset);
639 break;
640 }
641
642 data &= ~config_regs->mask;
643 data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
644 data |= cache;
645
646 switch (config_regs->type) {
647 case POLARIS10_CONFIGREG_SMC_IND:
648 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, config_regs->offset, data);
649 break;
650
651 case POLARIS10_CONFIGREG_DIDT_IND:
652 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, config_regs->offset, data);
653 break;
654
655 case POLARIS10_CONFIGREG_GC_CAC_IND:
656 cgs_write_ind_register(hwmgr->device, CGS_IND_REG_GC_CAC, config_regs->offset, data);
657 break;
658
659 default:
660 cgs_write_register(hwmgr->device, config_regs->offset, data);
661 break;
662 }
663 cache = 0;
664 }
665
666 config_regs++;
667 }
668
669 return 0;
670}
671
672int polaris10_enable_didt_config(struct pp_hwmgr *hwmgr)
673{
674 int result;
675 uint32_t num_se = 0;
676 uint32_t count, value, value2;
677 struct cgs_system_info sys_info = {0};
678
679 sys_info.size = sizeof(struct cgs_system_info);
680 sys_info.info_id = CGS_SYSTEM_INFO_GFX_SE_INFO;
681 result = cgs_query_system_info(hwmgr->device, &sys_info);
682
683
684 if (result == 0)
685 num_se = sys_info.value;
686
687 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping) ||
688 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping) ||
689 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping) ||
690 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) {
691
692 /* TO DO Pre DIDT disable clock gating */
693 value = 0;
694 value2 = cgs_read_register(hwmgr->device, mmGRBM_GFX_INDEX);
695 for (count = 0; count < num_se; count++) {
696 value = SYS_GRBM_GFX_INDEX_DATA__INSTANCE_BROADCAST_WRITES_MASK
697 | SYS_GRBM_GFX_INDEX_DATA__SH_BROADCAST_WRITES_MASK
698 | (count << SYS_GRBM_GFX_INDEX_DATA__SE_INDEX__SHIFT);
699 cgs_write_register(hwmgr->device, mmGRBM_GFX_INDEX, value);
700
701 if (hwmgr->chip_id == CHIP_POLARIS10) {
702 result = polaris10_program_pt_config_registers(hwmgr, GCCACConfig_Polaris10);
703 PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
704 result = polaris10_program_pt_config_registers(hwmgr, DIDTConfig_Polaris10);
705 PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
706 } else if (hwmgr->chip_id == CHIP_POLARIS11) {
707 result = polaris10_program_pt_config_registers(hwmgr, GCCACConfig_Polaris11);
708 PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
709 result = polaris10_program_pt_config_registers(hwmgr, DIDTConfig_Polaris11);
710 PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
711 }
712 }
713 cgs_write_register(hwmgr->device, mmGRBM_GFX_INDEX, value2);
714
715 result = polaris10_enable_didt(hwmgr, true);
716 PP_ASSERT_WITH_CODE((result == 0), "EnableDiDt failed.", return result);
717
718 /* TO DO Post DIDT enable clock gating */
719 }
720
721 return 0;
722}
723
724int polaris10_disable_didt_config(struct pp_hwmgr *hwmgr)
725{
726 int result;
727
728 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping) ||
729 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping) ||
730 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping) ||
731 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) {
732 /* TO DO Pre DIDT disable clock gating */
733
734 result = polaris10_enable_didt(hwmgr, false);
735 PP_ASSERT_WITH_CODE((result == 0), "Post DIDT enable clock gating failed.", return result);
736 /* TO DO Post DIDT enable clock gating */
737 }
738
739 return 0;
740}
741
742
743static int polaris10_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
744{
745 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
746 struct phm_ppt_v1_information *table_info =
747 (struct phm_ppt_v1_information *)(hwmgr->pptable);
748 uint16_t hi_sidd = data->power_tune_table.BapmVddCBaseLeakageHiSidd;
749 uint16_t lo_sidd = data->power_tune_table.BapmVddCBaseLeakageLoSidd;
750 struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
751
752 hi_sidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
753 lo_sidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
754
755 data->power_tune_table.BapmVddCBaseLeakageHiSidd =
756 CONVERT_FROM_HOST_TO_SMC_US(hi_sidd);
757 data->power_tune_table.BapmVddCBaseLeakageLoSidd =
758 CONVERT_FROM_HOST_TO_SMC_US(lo_sidd);
759
760 return 0;
761}
762
763int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr)
764{
765 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
766 uint32_t pm_fuse_table_offset;
767
768 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
769 PHM_PlatformCaps_PowerContainment)) {
770 if (polaris10_read_smc_sram_dword(hwmgr->smumgr,
771 SMU7_FIRMWARE_HEADER_LOCATION +
772 offsetof(SMU74_Firmware_Header, PmFuseTable),
773 &pm_fuse_table_offset, data->sram_end))
774 PP_ASSERT_WITH_CODE(false,
775 "Attempt to get pm_fuse_table_offset Failed!",
776 return -EINVAL);
777
778 if (polaris10_populate_svi_load_line(hwmgr))
779 PP_ASSERT_WITH_CODE(false,
780 "Attempt to populate SviLoadLine Failed!",
781 return -EINVAL);
782
783 if (polaris10_populate_tdc_limit(hwmgr))
784 PP_ASSERT_WITH_CODE(false,
785 "Attempt to populate TDCLimit Failed!", return -EINVAL);
786
787 if (polaris10_populate_dw8(hwmgr, pm_fuse_table_offset))
788 PP_ASSERT_WITH_CODE(false,
789 "Attempt to populate TdcWaterfallCtl, "
790 "LPMLTemperature Min and Max Failed!",
791 return -EINVAL);
792
793 if (0 != polaris10_populate_temperature_scaler(hwmgr))
794 PP_ASSERT_WITH_CODE(false,
795 "Attempt to populate LPMLTemperatureScaler Failed!",
796 return -EINVAL);
797
798 if (polaris10_populate_fuzzy_fan(hwmgr))
799 PP_ASSERT_WITH_CODE(false,
800 "Attempt to populate Fuzzy Fan Control parameters Failed!",
801 return -EINVAL);
802
803 if (polaris10_populate_gnb_lpml(hwmgr))
804 PP_ASSERT_WITH_CODE(false,
805 "Attempt to populate GnbLPML Failed!",
806 return -EINVAL);
807
808 if (polaris10_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr))
809 PP_ASSERT_WITH_CODE(false,
810 "Attempt to populate GnbLPML Min and Max Vid Failed!",
811 return -EINVAL);
812
813 if (polaris10_populate_bapm_vddc_base_leakage_sidd(hwmgr))
814 PP_ASSERT_WITH_CODE(false,
815 "Attempt to populate BapmVddCBaseLeakage Hi and Lo "
816 "Sidd Failed!", return -EINVAL);
817
818 if (polaris10_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
819 (uint8_t *)&data->power_tune_table,
820 (sizeof(struct SMU74_Discrete_PmFuses) - 92), data->sram_end))
821 PP_ASSERT_WITH_CODE(false,
822 "Attempt to download PmFuseTable Failed!",
823 return -EINVAL);
824 }
825 return 0;
826}
827
828int polaris10_enable_smc_cac(struct pp_hwmgr *hwmgr)
829{
830 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
831 int result = 0;
832
833 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
834 PHM_PlatformCaps_CAC)) {
835 int smc_result;
836 smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
837 (uint16_t)(PPSMC_MSG_EnableCac));
838 PP_ASSERT_WITH_CODE((0 == smc_result),
839 "Failed to enable CAC in SMC.", result = -1);
840
841 data->cac_enabled = (0 == smc_result) ? true : false;
842 }
843 return result;
844}
845
846int polaris10_disable_smc_cac(struct pp_hwmgr *hwmgr)
847{
848 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
849 int result = 0;
850
851 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
852 PHM_PlatformCaps_CAC) && data->cac_enabled) {
853 int smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
854 (uint16_t)(PPSMC_MSG_DisableCac));
855 PP_ASSERT_WITH_CODE((smc_result == 0),
856 "Failed to disable CAC in SMC.", result = -1);
857
858 data->cac_enabled = false;
859 }
860 return result;
861}
862
863int polaris10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
864{
865 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
866
867 if (data->power_containment_features &
868 POWERCONTAINMENT_FEATURE_PkgPwrLimit)
869 return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
870 PPSMC_MSG_PkgPwrSetLimit, n);
871 return 0;
872}
873
874static int polaris10_set_overdriver_target_tdp(struct pp_hwmgr *pHwMgr, uint32_t target_tdp)
875{
876 return smum_send_msg_to_smc_with_parameter(pHwMgr->smumgr,
877 PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
878}
879
880int polaris10_enable_power_containment(struct pp_hwmgr *hwmgr)
881{
882 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
883 struct phm_ppt_v1_information *table_info =
884 (struct phm_ppt_v1_information *)(hwmgr->pptable);
885 int smc_result;
886 int result = 0;
887
888 data->power_containment_features = 0;
889 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
890 PHM_PlatformCaps_PowerContainment)) {
891
892 if (data->enable_tdc_limit_feature) {
893 smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
894 (uint16_t)(PPSMC_MSG_TDCLimitEnable));
895 PP_ASSERT_WITH_CODE((0 == smc_result),
896 "Failed to enable TDCLimit in SMC.", result = -1;);
897 if (0 == smc_result)
898 data->power_containment_features |=
899 POWERCONTAINMENT_FEATURE_TDCLimit;
900 }
901
902 if (data->enable_pkg_pwr_tracking_feature) {
903 smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
904 (uint16_t)(PPSMC_MSG_PkgPwrLimitEnable));
905 PP_ASSERT_WITH_CODE((0 == smc_result),
906 "Failed to enable PkgPwrTracking in SMC.", result = -1;);
907 if (0 == smc_result) {
908 struct phm_cac_tdp_table *cac_table =
909 table_info->cac_dtp_table;
910 uint32_t default_limit =
911 (uint32_t)(cac_table->usMaximumPowerDeliveryLimit * 256);
912
913 data->power_containment_features |=
914 POWERCONTAINMENT_FEATURE_PkgPwrLimit;
915
916 if (polaris10_set_power_limit(hwmgr, default_limit))
917 printk(KERN_ERR "Failed to set Default Power Limit in SMC!");
918 }
919 }
920 }
921 return result;
922}
923
924int polaris10_disable_power_containment(struct pp_hwmgr *hwmgr)
925{
926 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
927 int result = 0;
928
929 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
930 PHM_PlatformCaps_PowerContainment) &&
931 data->power_containment_features) {
932 int smc_result;
933
934 if (data->power_containment_features &
935 POWERCONTAINMENT_FEATURE_TDCLimit) {
936 smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
937 (uint16_t)(PPSMC_MSG_TDCLimitDisable));
938 PP_ASSERT_WITH_CODE((smc_result == 0),
939 "Failed to disable TDCLimit in SMC.",
940 result = smc_result);
941 }
942
943 if (data->power_containment_features &
944 POWERCONTAINMENT_FEATURE_DTE) {
945 smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
946 (uint16_t)(PPSMC_MSG_DisableDTE));
947 PP_ASSERT_WITH_CODE((smc_result == 0),
948 "Failed to disable DTE in SMC.",
949 result = smc_result);
950 }
951
952 if (data->power_containment_features &
953 POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
954 smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
955 (uint16_t)(PPSMC_MSG_PkgPwrLimitDisable));
956 PP_ASSERT_WITH_CODE((smc_result == 0),
957 "Failed to disable PkgPwrTracking in SMC.",
958 result = smc_result);
959 }
960 data->power_containment_features = 0;
961 }
962
963 return result;
964}
965
966int polaris10_power_control_set_level(struct pp_hwmgr *hwmgr)
967{
968 struct phm_ppt_v1_information *table_info =
969 (struct phm_ppt_v1_information *)(hwmgr->pptable);
970 struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
971 int adjust_percent, target_tdp;
972 int result = 0;
973
974 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
975 PHM_PlatformCaps_PowerContainment)) {
976 /* adjustment percentage has already been validated */
977 adjust_percent = hwmgr->platform_descriptor.TDPAdjustmentPolarity ?
978 hwmgr->platform_descriptor.TDPAdjustment :
979 (-1 * hwmgr->platform_descriptor.TDPAdjustment);
980 /* SMC requested that target_tdp to be 7 bit fraction in DPM table
981 * but message to be 8 bit fraction for messages
982 */
983 target_tdp = ((100 + adjust_percent) * (int)(cac_table->usTDP * 256)) / 100;
984 result = polaris10_set_overdriver_target_tdp(hwmgr, (uint32_t)target_tdp);
985 }
986
987 return result;
988}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h
deleted file mode 100644
index 329119d6cc71..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h
+++ /dev/null
@@ -1,81 +0,0 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef POLARIS10_POWERTUNE_H
24#define POLARIS10_POWERTUNE_H
25
26enum polaris10_pt_config_reg_type {
27 POLARIS10_CONFIGREG_MMR = 0,
28 POLARIS10_CONFIGREG_SMC_IND,
29 POLARIS10_CONFIGREG_DIDT_IND,
30 POLARIS10_CONFIGREG_GC_CAC_IND,
31 POLARIS10_CONFIGREG_CACHE,
32 POLARIS10_CONFIGREG_MAX
33};
34
35#define DIDT_SQ_CTRL0__UNUSED_0_MASK 0xfffc0000
36#define DIDT_SQ_CTRL0__UNUSED_0__SHIFT 0x12
37#define DIDT_TD_CTRL0__UNUSED_0_MASK 0xfffc0000
38#define DIDT_TD_CTRL0__UNUSED_0__SHIFT 0x12
39#define DIDT_TCP_CTRL0__UNUSED_0_MASK 0xfffc0000
40#define DIDT_TCP_CTRL0__UNUSED_0__SHIFT 0x12
41#define DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK 0xc0000000
42#define DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001e
43#define DIDT_TD_TUNING_CTRL__UNUSED_0_MASK 0xc0000000
44#define DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001e
45#define DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK 0xc0000000
46#define DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001e
47
48/* PowerContainment Features */
49#define POWERCONTAINMENT_FEATURE_DTE 0x00000001
50#define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002
51#define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004
52
53#define ixGC_CAC_CNTL 0x0000
54#define ixDIDT_SQ_STALL_CTRL 0x0004
55#define ixDIDT_SQ_TUNING_CTRL 0x0005
56#define ixDIDT_TD_STALL_CTRL 0x0044
57#define ixDIDT_TD_TUNING_CTRL 0x0045
58#define ixDIDT_TCP_STALL_CTRL 0x0064
59#define ixDIDT_TCP_TUNING_CTRL 0x0065
60
61struct polaris10_pt_config_reg {
62 uint32_t offset;
63 uint32_t mask;
64 uint32_t shift;
65 uint32_t value;
66 enum polaris10_pt_config_reg_type type;
67};
68
69
70void polaris10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr);
71int polaris10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr);
72int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr);
73int polaris10_enable_smc_cac(struct pp_hwmgr *hwmgr);
74int polaris10_disable_smc_cac(struct pp_hwmgr *hwmgr);
75int polaris10_enable_power_containment(struct pp_hwmgr *hwmgr);
76int polaris10_disable_power_containment(struct pp_hwmgr *hwmgr);
77int polaris10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n);
78int polaris10_power_control_set_level(struct pp_hwmgr *hwmgr);
79int polaris10_enable_didt_config(struct pp_hwmgr *hwmgr);
80#endif /* POLARIS10_POWERTUNE_H */
81
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c
deleted file mode 100644
index 41f835adba91..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c
+++ /dev/null
@@ -1,716 +0,0 @@
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <asm/div64.h>
25#include "polaris10_thermal.h"
26#include "polaris10_hwmgr.h"
27#include "polaris10_smumgr.h"
28#include "polaris10_ppsmc.h"
29#include "smu/smu_7_1_3_d.h"
30#include "smu/smu_7_1_3_sh_mask.h"
31
32int polaris10_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
33 struct phm_fan_speed_info *fan_speed_info)
34{
35 if (hwmgr->thermal_controller.fanInfo.bNoFan)
36 return 0;
37
38 fan_speed_info->supports_percent_read = true;
39 fan_speed_info->supports_percent_write = true;
40 fan_speed_info->min_percent = 0;
41 fan_speed_info->max_percent = 100;
42
43 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
44 PHM_PlatformCaps_FanSpeedInTableIsRPM) &&
45 hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) {
46 fan_speed_info->supports_rpm_read = true;
47 fan_speed_info->supports_rpm_write = true;
48 fan_speed_info->min_rpm = hwmgr->thermal_controller.fanInfo.ulMinRPM;
49 fan_speed_info->max_rpm = hwmgr->thermal_controller.fanInfo.ulMaxRPM;
50 } else {
51 fan_speed_info->min_rpm = 0;
52 fan_speed_info->max_rpm = 0;
53 }
54
55 return 0;
56}
57
58int polaris10_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr,
59 uint32_t *speed)
60{
61 uint32_t duty100;
62 uint32_t duty;
63 uint64_t tmp64;
64
65 if (hwmgr->thermal_controller.fanInfo.bNoFan)
66 return 0;
67
68 duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
69 CG_FDO_CTRL1, FMAX_DUTY100);
70 duty = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
71 CG_THERMAL_STATUS, FDO_PWM_DUTY);
72
73 if (duty100 == 0)
74 return -EINVAL;
75
76
77 tmp64 = (uint64_t)duty * 100;
78 do_div(tmp64, duty100);
79 *speed = (uint32_t)tmp64;
80
81 if (*speed > 100)
82 *speed = 100;
83
84 return 0;
85}
86
87int polaris10_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
88{
89 uint32_t tach_period;
90 uint32_t crystal_clock_freq;
91
92 if (hwmgr->thermal_controller.fanInfo.bNoFan ||
93 (hwmgr->thermal_controller.fanInfo.
94 ucTachometerPulsesPerRevolution == 0))
95 return 0;
96
97 tach_period = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
98 CG_TACH_STATUS, TACH_PERIOD);
99
100 if (tach_period == 0)
101 return -EINVAL;
102
103 crystal_clock_freq = tonga_get_xclk(hwmgr);
104
105 *speed = 60 * crystal_clock_freq * 10000 / tach_period;
106
107 return 0;
108}
109
110/**
111* Set Fan Speed Control to static mode, so that the user can decide what speed to use.
112* @param hwmgr the address of the powerplay hardware manager.
113* mode the fan control mode, 0 default, 1 by percent, 5, by RPM
114* @exception Should always succeed.
115*/
116int polaris10_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
117{
118
119 if (hwmgr->fan_ctrl_is_in_default_mode) {
120 hwmgr->fan_ctrl_default_mode =
121 PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
122 CG_FDO_CTRL2, FDO_PWM_MODE);
123 hwmgr->tmin =
124 PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
125 CG_FDO_CTRL2, TMIN);
126 hwmgr->fan_ctrl_is_in_default_mode = false;
127 }
128
129 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
130 CG_FDO_CTRL2, TMIN, 0);
131 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
132 CG_FDO_CTRL2, FDO_PWM_MODE, mode);
133
134 return 0;
135}
136
137/**
138* Reset Fan Speed Control to default mode.
139* @param hwmgr the address of the powerplay hardware manager.
140* @exception Should always succeed.
141*/
142int polaris10_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr)
143{
144 if (!hwmgr->fan_ctrl_is_in_default_mode) {
145 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
146 CG_FDO_CTRL2, FDO_PWM_MODE, hwmgr->fan_ctrl_default_mode);
147 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
148 CG_FDO_CTRL2, TMIN, hwmgr->tmin);
149 hwmgr->fan_ctrl_is_in_default_mode = true;
150 }
151
152 return 0;
153}
154
155static int polaris10_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
156{
157 int result;
158
159 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
160 PHM_PlatformCaps_ODFuzzyFanControlSupport)) {
161 cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_FUZZY);
162 result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl);
163
164 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
165 PHM_PlatformCaps_FanSpeedInTableIsRPM))
166 hwmgr->hwmgr_func->set_max_fan_rpm_output(hwmgr,
167 hwmgr->thermal_controller.
168 advanceFanControlParameters.usMaxFanRPM);
169 else
170 hwmgr->hwmgr_func->set_max_fan_pwm_output(hwmgr,
171 hwmgr->thermal_controller.
172 advanceFanControlParameters.usMaxFanPWM);
173
174 } else {
175 cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_TABLE);
176 result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl);
177 }
178
179 if (!result && hwmgr->thermal_controller.
180 advanceFanControlParameters.ucTargetTemperature)
181 result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
182 PPSMC_MSG_SetFanTemperatureTarget,
183 hwmgr->thermal_controller.
184 advanceFanControlParameters.ucTargetTemperature);
185
186 return result;
187}
188
189
190int polaris10_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr)
191{
192 return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StopFanControl);
193}
194
195/**
196* Set Fan Speed in percent.
197* @param hwmgr the address of the powerplay hardware manager.
198* @param speed is the percentage value (0% - 100%) to be set.
199* @exception Fails is the 100% setting appears to be 0.
200*/
201int polaris10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
202 uint32_t speed)
203{
204 uint32_t duty100;
205 uint32_t duty;
206 uint64_t tmp64;
207
208 if (hwmgr->thermal_controller.fanInfo.bNoFan)
209 return 0;
210
211 if (speed > 100)
212 speed = 100;
213
214 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
215 PHM_PlatformCaps_MicrocodeFanControl))
216 polaris10_fan_ctrl_stop_smc_fan_control(hwmgr);
217
218 duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
219 CG_FDO_CTRL1, FMAX_DUTY100);
220
221 if (duty100 == 0)
222 return -EINVAL;
223
224 tmp64 = (uint64_t)speed * duty100;
225 do_div(tmp64, 100);
226 duty = (uint32_t)tmp64;
227
228 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
229 CG_FDO_CTRL0, FDO_STATIC_DUTY, duty);
230
231 return polaris10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
232}
233
234/**
235* Reset Fan Speed to default.
236* @param hwmgr the address of the powerplay hardware manager.
237* @exception Always succeeds.
238*/
239int polaris10_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr)
240{
241 int result;
242
243 if (hwmgr->thermal_controller.fanInfo.bNoFan)
244 return 0;
245
246 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
247 PHM_PlatformCaps_MicrocodeFanControl)) {
248 result = polaris10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
249 if (!result)
250 result = polaris10_fan_ctrl_start_smc_fan_control(hwmgr);
251 } else
252 result = polaris10_fan_ctrl_set_default_mode(hwmgr);
253
254 return result;
255}
256
257/**
258* Set Fan Speed in RPM.
259* @param hwmgr the address of the powerplay hardware manager.
260* @param speed is the percentage value (min - max) to be set.
261* @exception Fails is the speed not lie between min and max.
262*/
263int polaris10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
264{
265 uint32_t tach_period;
266 uint32_t crystal_clock_freq;
267
268 if (hwmgr->thermal_controller.fanInfo.bNoFan ||
269 (hwmgr->thermal_controller.fanInfo.
270 ucTachometerPulsesPerRevolution == 0) ||
271 (speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) ||
272 (speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM))
273 return 0;
274
275 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
276 PHM_PlatformCaps_MicrocodeFanControl))
277 polaris10_fan_ctrl_stop_smc_fan_control(hwmgr);
278
279 crystal_clock_freq = tonga_get_xclk(hwmgr);
280
281 tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
282
283 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
284 CG_TACH_STATUS, TACH_PERIOD, tach_period);
285
286 return polaris10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
287}
288
289/**
290* Reads the remote temperature from the SIslands thermal controller.
291*
292* @param hwmgr The address of the hardware manager.
293*/
294int polaris10_thermal_get_temperature(struct pp_hwmgr *hwmgr)
295{
296 int temp;
297
298 temp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
299 CG_MULT_THERMAL_STATUS, CTF_TEMP);
300
301 /* Bit 9 means the reading is lower than the lowest usable value. */
302 if (temp & 0x200)
303 temp = POLARIS10_THERMAL_MAXIMUM_TEMP_READING;
304 else
305 temp = temp & 0x1ff;
306
307 temp *= PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
308
309 return temp;
310}
311
312/**
313* Set the requested temperature range for high and low alert signals
314*
315* @param hwmgr The address of the hardware manager.
316* @param range Temperature range to be programmed for high and low alert signals
317* @exception PP_Result_BadInput if the input data is not valid.
318*/
319static int polaris10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
320 uint32_t low_temp, uint32_t high_temp)
321{
322 uint32_t low = POLARIS10_THERMAL_MINIMUM_ALERT_TEMP *
323 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
324 uint32_t high = POLARIS10_THERMAL_MAXIMUM_ALERT_TEMP *
325 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
326
327 if (low < low_temp)
328 low = low_temp;
329 if (high > high_temp)
330 high = high_temp;
331
332 if (low > high)
333 return -EINVAL;
334
335 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
336 CG_THERMAL_INT, DIG_THERM_INTH,
337 (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
338 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
339 CG_THERMAL_INT, DIG_THERM_INTL,
340 (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
341 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
342 CG_THERMAL_CTRL, DIG_THERM_DPM,
343 (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
344
345 return 0;
346}
347
348/**
349* Programs thermal controller one-time setting registers
350*
351* @param hwmgr The address of the hardware manager.
352*/
353static int polaris10_thermal_initialize(struct pp_hwmgr *hwmgr)
354{
355 if (hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution)
356 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
357 CG_TACH_CTRL, EDGE_PER_REV,
358 hwmgr->thermal_controller.fanInfo.
359 ucTachometerPulsesPerRevolution - 1);
360
361 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
362 CG_FDO_CTRL2, TACH_PWM_RESP_RATE, 0x28);
363
364 return 0;
365}
366
367/**
368* Enable thermal alerts on the RV770 thermal controller.
369*
370* @param hwmgr The address of the hardware manager.
371*/
372static int polaris10_thermal_enable_alert(struct pp_hwmgr *hwmgr)
373{
374 uint32_t alert;
375
376 alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
377 CG_THERMAL_INT, THERM_INT_MASK);
378 alert &= ~(POLARIS10_THERMAL_HIGH_ALERT_MASK | POLARIS10_THERMAL_LOW_ALERT_MASK);
379 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
380 CG_THERMAL_INT, THERM_INT_MASK, alert);
381
382 /* send message to SMU to enable internal thermal interrupts */
383 return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Enable);
384}
385
386/**
387* Disable thermal alerts on the RV770 thermal controller.
388* @param hwmgr The address of the hardware manager.
389*/
390static int polaris10_thermal_disable_alert(struct pp_hwmgr *hwmgr)
391{
392 uint32_t alert;
393
394 alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
395 CG_THERMAL_INT, THERM_INT_MASK);
396 alert |= (POLARIS10_THERMAL_HIGH_ALERT_MASK | POLARIS10_THERMAL_LOW_ALERT_MASK);
397 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
398 CG_THERMAL_INT, THERM_INT_MASK, alert);
399
400 /* send message to SMU to disable internal thermal interrupts */
401 return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Disable);
402}
403
404/**
405* Uninitialize the thermal controller.
406* Currently just disables alerts.
407* @param hwmgr The address of the hardware manager.
408*/
409int polaris10_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr)
410{
411 int result = polaris10_thermal_disable_alert(hwmgr);
412
413 if (!hwmgr->thermal_controller.fanInfo.bNoFan)
414 polaris10_fan_ctrl_set_default_mode(hwmgr);
415
416 return result;
417}
418
419/**
420* Set up the fan table to control the fan using the SMC.
421* @param hwmgr the address of the powerplay hardware manager.
422* @param pInput the pointer to input data
423* @param pOutput the pointer to output data
424* @param pStorage the pointer to temporary storage
425* @param Result the last failure code
426* @return result from set temperature range routine
427*/
428static int tf_polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
429 void *input, void *output, void *storage, int result)
430{
431 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
432 SMU74_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
433 uint32_t duty100;
434 uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
435 uint16_t fdo_min, slope1, slope2;
436 uint32_t reference_clock;
437 int res;
438 uint64_t tmp64;
439
440 if (data->fan_table_start == 0) {
441 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
442 PHM_PlatformCaps_MicrocodeFanControl);
443 return 0;
444 }
445
446 duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
447 CG_FDO_CTRL1, FMAX_DUTY100);
448
449 if (duty100 == 0) {
450 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
451 PHM_PlatformCaps_MicrocodeFanControl);
452 return 0;
453 }
454
455 tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.
456 usPWMMin * duty100;
457 do_div(tmp64, 10000);
458 fdo_min = (uint16_t)tmp64;
459
460 t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed -
461 hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
462 t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh -
463 hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
464
465 pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed -
466 hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
467 pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh -
468 hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
469
470 slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
471 slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
472
473 fan_table.TempMin = cpu_to_be16((50 + hwmgr->
474 thermal_controller.advanceFanControlParameters.usTMin) / 100);
475 fan_table.TempMed = cpu_to_be16((50 + hwmgr->
476 thermal_controller.advanceFanControlParameters.usTMed) / 100);
477 fan_table.TempMax = cpu_to_be16((50 + hwmgr->
478 thermal_controller.advanceFanControlParameters.usTMax) / 100);
479
480 fan_table.Slope1 = cpu_to_be16(slope1);
481 fan_table.Slope2 = cpu_to_be16(slope2);
482
483 fan_table.FdoMin = cpu_to_be16(fdo_min);
484
485 fan_table.HystDown = cpu_to_be16(hwmgr->
486 thermal_controller.advanceFanControlParameters.ucTHyst);
487
488 fan_table.HystUp = cpu_to_be16(1);
489
490 fan_table.HystSlope = cpu_to_be16(1);
491
492 fan_table.TempRespLim = cpu_to_be16(5);
493
494 reference_clock = tonga_get_xclk(hwmgr);
495
496 fan_table.RefreshPeriod = cpu_to_be32((hwmgr->
497 thermal_controller.advanceFanControlParameters.ulCycleDelay *
498 reference_clock) / 1600);
499
500 fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
501
502 fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(
503 hwmgr->device, CGS_IND_REG__SMC,
504 CG_MULT_THERMAL_CTRL, TEMP_SEL);
505
506 res = polaris10_copy_bytes_to_smc(hwmgr->smumgr, data->fan_table_start,
507 (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table),
508 data->sram_end);
509
510 if (!res && hwmgr->thermal_controller.
511 advanceFanControlParameters.ucMinimumPWMLimit)
512 res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
513 PPSMC_MSG_SetFanMinPwm,
514 hwmgr->thermal_controller.
515 advanceFanControlParameters.ucMinimumPWMLimit);
516
517 if (!res && hwmgr->thermal_controller.
518 advanceFanControlParameters.ulMinFanSCLKAcousticLimit)
519 res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
520 PPSMC_MSG_SetFanSclkTarget,
521 hwmgr->thermal_controller.
522 advanceFanControlParameters.ulMinFanSCLKAcousticLimit);
523
524 if (res)
525 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
526 PHM_PlatformCaps_MicrocodeFanControl);
527
528 return 0;
529}
530
531/**
532* Start the fan control on the SMC.
533* @param hwmgr the address of the powerplay hardware manager.
534* @param pInput the pointer to input data
535* @param pOutput the pointer to output data
536* @param pStorage the pointer to temporary storage
537* @param Result the last failure code
538* @return result from set temperature range routine
539*/
540static int tf_polaris10_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr,
541 void *input, void *output, void *storage, int result)
542{
543/* If the fantable setup has failed we could have disabled
544 * PHM_PlatformCaps_MicrocodeFanControl even after
545 * this function was included in the table.
546 * Make sure that we still think controlling the fan is OK.
547*/
548 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
549 PHM_PlatformCaps_MicrocodeFanControl)) {
550 polaris10_fan_ctrl_start_smc_fan_control(hwmgr);
551 polaris10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
552 }
553
554 return 0;
555}
556
557/**
558* Set temperature range for high and low alerts
559* @param hwmgr the address of the powerplay hardware manager.
560* @param pInput the pointer to input data
561* @param pOutput the pointer to output data
562* @param pStorage the pointer to temporary storage
563* @param Result the last failure code
564* @return result from set temperature range routine
565*/
566int tf_polaris10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
567 void *input, void *output, void *storage, int result)
568{
569 struct PP_TemperatureRange *range = (struct PP_TemperatureRange *)input;
570
571 if (range == NULL)
572 return -EINVAL;
573
574 return polaris10_thermal_set_temperature_range(hwmgr, range->min, range->max);
575}
576
577/**
578* Programs one-time setting registers
579* @param hwmgr the address of the powerplay hardware manager.
580* @param pInput the pointer to input data
581* @param pOutput the pointer to output data
582* @param pStorage the pointer to temporary storage
583* @param Result the last failure code
584* @return result from initialize thermal controller routine
585*/
586int tf_polaris10_thermal_initialize(struct pp_hwmgr *hwmgr,
587 void *input, void *output, void *storage, int result)
588{
589 return polaris10_thermal_initialize(hwmgr);
590}
591
592/**
593* Enable high and low alerts
594* @param hwmgr the address of the powerplay hardware manager.
595* @param pInput the pointer to input data
596* @param pOutput the pointer to output data
597* @param pStorage the pointer to temporary storage
598* @param Result the last failure code
599* @return result from enable alert routine
600*/
601int tf_polaris10_thermal_enable_alert(struct pp_hwmgr *hwmgr,
602 void *input, void *output, void *storage, int result)
603{
604 return polaris10_thermal_enable_alert(hwmgr);
605}
606
607/**
608* Disable high and low alerts
609* @param hwmgr the address of the powerplay hardware manager.
610* @param pInput the pointer to input data
611* @param pOutput the pointer to output data
612* @param pStorage the pointer to temporary storage
613* @param Result the last failure code
614* @return result from disable alert routine
615*/
616static int tf_polaris10_thermal_disable_alert(struct pp_hwmgr *hwmgr,
617 void *input, void *output, void *storage, int result)
618{
619 return polaris10_thermal_disable_alert(hwmgr);
620}
621
622static int tf_polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr,
623 void *input, void *output, void *storage, int result)
624{
625 int ret;
626 struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr);
627 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
628 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
629
630 if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
631 return 0;
632
633 ret = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
634 PPSMC_MSG_SetGBDroopSettings, data->avfs_vdroop_override_setting);
635
636 ret = (smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs) == 0) ?
637 0 : -1;
638
639 if (!ret)
640 /* If this param is not changed, this function could fire unnecessarily */
641 smu_data->avfs.avfs_btc_status = AVFS_BTC_COMPLETED_PREVIOUSLY;
642
643 return ret;
644}
645
646static const struct phm_master_table_item
647polaris10_thermal_start_thermal_controller_master_list[] = {
648 {NULL, tf_polaris10_thermal_initialize},
649 {NULL, tf_polaris10_thermal_set_temperature_range},
650 {NULL, tf_polaris10_thermal_enable_alert},
651 {NULL, tf_polaris10_thermal_avfs_enable},
652/* We should restrict performance levels to low before we halt the SMC.
653 * On the other hand we are still in boot state when we do this
654 * so it would be pointless.
655 * If this assumption changes we have to revisit this table.
656 */
657 {NULL, tf_polaris10_thermal_setup_fan_table},
658 {NULL, tf_polaris10_thermal_start_smc_fan_control},
659 {NULL, NULL}
660};
661
662static const struct phm_master_table_header
663polaris10_thermal_start_thermal_controller_master = {
664 0,
665 PHM_MasterTableFlag_None,
666 polaris10_thermal_start_thermal_controller_master_list
667};
668
669static const struct phm_master_table_item
670polaris10_thermal_set_temperature_range_master_list[] = {
671 {NULL, tf_polaris10_thermal_disable_alert},
672 {NULL, tf_polaris10_thermal_set_temperature_range},
673 {NULL, tf_polaris10_thermal_enable_alert},
674 {NULL, NULL}
675};
676
677static const struct phm_master_table_header
678polaris10_thermal_set_temperature_range_master = {
679 0,
680 PHM_MasterTableFlag_None,
681 polaris10_thermal_set_temperature_range_master_list
682};
683
684int polaris10_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr)
685{
686 if (!hwmgr->thermal_controller.fanInfo.bNoFan)
687 polaris10_fan_ctrl_set_default_mode(hwmgr);
688 return 0;
689}
690
691/**
692* Initializes the thermal controller related functions in the Hardware Manager structure.
693* @param hwmgr The address of the hardware manager.
694* @exception Any error code from the low-level communication.
695*/
696int pp_polaris10_thermal_initialize(struct pp_hwmgr *hwmgr)
697{
698 int result;
699
700 result = phm_construct_table(hwmgr,
701 &polaris10_thermal_set_temperature_range_master,
702 &(hwmgr->set_temperature_range));
703
704 if (!result) {
705 result = phm_construct_table(hwmgr,
706 &polaris10_thermal_start_thermal_controller_master,
707 &(hwmgr->start_thermal_controller));
708 if (result)
709 phm_destroy_table(hwmgr, &(hwmgr->set_temperature_range));
710 }
711
712 if (!result)
713 hwmgr->fan_ctrl_is_in_default_mode = true;
714 return result;
715}
716
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.h
deleted file mode 100644
index 62f8cbc2d590..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.h
+++ /dev/null
@@ -1,62 +0,0 @@
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef _POLARIS10_THERMAL_H_
25#define _POLARIS10_THERMAL_H_
26
27#include "hwmgr.h"
28
29#define POLARIS10_THERMAL_HIGH_ALERT_MASK 0x1
30#define POLARIS10_THERMAL_LOW_ALERT_MASK 0x2
31
32#define POLARIS10_THERMAL_MINIMUM_TEMP_READING -256
33#define POLARIS10_THERMAL_MAXIMUM_TEMP_READING 255
34
35#define POLARIS10_THERMAL_MINIMUM_ALERT_TEMP 0
36#define POLARIS10_THERMAL_MAXIMUM_ALERT_TEMP 255
37
38#define FDO_PWM_MODE_STATIC 1
39#define FDO_PWM_MODE_STATIC_RPM 5
40
41
42extern int tf_polaris10_thermal_initialize(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
43extern int tf_polaris10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
44extern int tf_polaris10_thermal_enable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
45
46extern int polaris10_thermal_get_temperature(struct pp_hwmgr *hwmgr);
47extern int polaris10_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr);
48extern int polaris10_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info);
49extern int polaris10_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed);
50extern int polaris10_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr);
51extern int polaris10_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode);
52extern int polaris10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed);
53extern int polaris10_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr);
54extern int pp_polaris10_thermal_initialize(struct pp_hwmgr *hwmgr);
55extern int polaris10_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr);
56extern int polaris10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed);
57extern int polaris10_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed);
58extern int polaris10_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr);
59extern uint32_t tonga_get_xclk(struct pp_hwmgr *hwmgr);
60
61#endif
62
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.c
deleted file mode 100644
index e58d038a997b..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.c
+++ /dev/null
@@ -1,350 +0,0 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "hwmgr.h"
25#include "tonga_clockpowergating.h"
26#include "tonga_ppsmc.h"
27#include "tonga_hwmgr.h"
28
29int tonga_phm_powerdown_uvd(struct pp_hwmgr *hwmgr)
30{
31 if (phm_cf_want_uvd_power_gating(hwmgr))
32 return smum_send_msg_to_smc(hwmgr->smumgr,
33 PPSMC_MSG_UVDPowerOFF);
34 return 0;
35}
36
37int tonga_phm_powerup_uvd(struct pp_hwmgr *hwmgr)
38{
39 if (phm_cf_want_uvd_power_gating(hwmgr)) {
40 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
41 PHM_PlatformCaps_UVDDynamicPowerGating)) {
42 return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
43 PPSMC_MSG_UVDPowerON, 1);
44 } else {
45 return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
46 PPSMC_MSG_UVDPowerON, 0);
47 }
48 }
49
50 return 0;
51}
52
53int tonga_phm_powerdown_vce(struct pp_hwmgr *hwmgr)
54{
55 if (phm_cf_want_vce_power_gating(hwmgr))
56 return smum_send_msg_to_smc(hwmgr->smumgr,
57 PPSMC_MSG_VCEPowerOFF);
58 return 0;
59}
60
61int tonga_phm_powerup_vce(struct pp_hwmgr *hwmgr)
62{
63 if (phm_cf_want_vce_power_gating(hwmgr))
64 return smum_send_msg_to_smc(hwmgr->smumgr,
65 PPSMC_MSG_VCEPowerON);
66 return 0;
67}
68
69int tonga_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum PHM_AsicBlock block, enum PHM_ClockGateSetting gating)
70{
71 int ret = 0;
72
73 switch (block) {
74 case PHM_AsicBlock_UVD_MVC:
75 case PHM_AsicBlock_UVD:
76 case PHM_AsicBlock_UVD_HD:
77 case PHM_AsicBlock_UVD_SD:
78 if (gating == PHM_ClockGateSetting_StaticOff)
79 ret = tonga_phm_powerdown_uvd(hwmgr);
80 else
81 ret = tonga_phm_powerup_uvd(hwmgr);
82 break;
83 case PHM_AsicBlock_GFX:
84 default:
85 break;
86 }
87
88 return ret;
89}
90
91int tonga_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr)
92{
93 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
94
95 data->uvd_power_gated = false;
96 data->vce_power_gated = false;
97
98 tonga_phm_powerup_uvd(hwmgr);
99 tonga_phm_powerup_vce(hwmgr);
100
101 return 0;
102}
103
104int tonga_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
105{
106 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
107
108 if (data->uvd_power_gated == bgate)
109 return 0;
110
111 data->uvd_power_gated = bgate;
112
113 if (bgate) {
114 cgs_set_clockgating_state(hwmgr->device,
115 AMD_IP_BLOCK_TYPE_UVD,
116 AMD_CG_STATE_UNGATE);
117 cgs_set_powergating_state(hwmgr->device,
118 AMD_IP_BLOCK_TYPE_UVD,
119 AMD_PG_STATE_GATE);
120 tonga_update_uvd_dpm(hwmgr, true);
121 tonga_phm_powerdown_uvd(hwmgr);
122 } else {
123 tonga_phm_powerup_uvd(hwmgr);
124 cgs_set_powergating_state(hwmgr->device,
125 AMD_IP_BLOCK_TYPE_UVD,
126 AMD_PG_STATE_UNGATE);
127 cgs_set_clockgating_state(hwmgr->device,
128 AMD_IP_BLOCK_TYPE_UVD,
129 AMD_PG_STATE_GATE);
130
131 tonga_update_uvd_dpm(hwmgr, false);
132 }
133
134 return 0;
135}
136
137int tonga_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
138{
139 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
140 struct phm_set_power_state_input states;
141 const struct pp_power_state *pcurrent;
142 struct pp_power_state *requested;
143
144 pcurrent = hwmgr->current_ps;
145 requested = hwmgr->request_ps;
146
147 states.pcurrent_state = &(pcurrent->hardware);
148 states.pnew_state = &(requested->hardware);
149
150 if (phm_cf_want_vce_power_gating(hwmgr)) {
151 if (data->vce_power_gated != bgate) {
152 if (bgate) {
153 cgs_set_clockgating_state(
154 hwmgr->device,
155 AMD_IP_BLOCK_TYPE_VCE,
156 AMD_CG_STATE_UNGATE);
157 cgs_set_powergating_state(
158 hwmgr->device,
159 AMD_IP_BLOCK_TYPE_VCE,
160 AMD_PG_STATE_GATE);
161 tonga_enable_disable_vce_dpm(hwmgr, false);
162 data->vce_power_gated = true;
163 } else {
164 tonga_phm_powerup_vce(hwmgr);
165 data->vce_power_gated = false;
166 cgs_set_powergating_state(
167 hwmgr->device,
168 AMD_IP_BLOCK_TYPE_VCE,
169 AMD_PG_STATE_UNGATE);
170 cgs_set_clockgating_state(
171 hwmgr->device,
172 AMD_IP_BLOCK_TYPE_VCE,
173 AMD_PG_STATE_GATE);
174
175 tonga_update_vce_dpm(hwmgr, &states);
176 tonga_enable_disable_vce_dpm(hwmgr, true);
177 return 0;
178 }
179 }
180 } else {
181 tonga_update_vce_dpm(hwmgr, &states);
182 tonga_enable_disable_vce_dpm(hwmgr, true);
183 return 0;
184 }
185
186 if (!data->vce_power_gated)
187 tonga_update_vce_dpm(hwmgr, &states);
188
189 return 0;
190}
191
192int tonga_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
193 const uint32_t *msg_id)
194{
195 PPSMC_Msg msg;
196 uint32_t value;
197
198 switch ((*msg_id & PP_GROUP_MASK) >> PP_GROUP_SHIFT) {
199 case PP_GROUP_GFX:
200 switch ((*msg_id & PP_BLOCK_MASK) >> PP_BLOCK_SHIFT) {
201 case PP_BLOCK_GFX_CG:
202 if (PP_STATE_SUPPORT_CG & *msg_id) {
203 msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG)
204 ? PPSMC_MSG_EnableClockGatingFeature
205 : PPSMC_MSG_DisableClockGatingFeature;
206 value = CG_GFX_CGCG_MASK;
207
208 if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
209 return -1;
210 }
211 if (PP_STATE_SUPPORT_LS & *msg_id) {
212 msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS
213 ? PPSMC_MSG_EnableClockGatingFeature
214 : PPSMC_MSG_DisableClockGatingFeature;
215 value = CG_GFX_CGLS_MASK;
216
217 if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
218 return -1;
219 }
220 break;
221
222 case PP_BLOCK_GFX_MG:
223 /* For GFX MGCG, there are three different ones;
224 * CPF, RLC, and all others. CPF MGCG will not be used for Tonga.
225 * For GFX MGLS, Tonga will not support it.
226 * */
227 if (PP_STATE_SUPPORT_CG & *msg_id) {
228 msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG)
229 ? PPSMC_MSG_EnableClockGatingFeature
230 : PPSMC_MSG_DisableClockGatingFeature;
231 value = (CG_RLC_MGCG_MASK | CG_GFX_OTHERS_MGCG_MASK);
232
233 if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
234 return -1;
235 }
236 break;
237
238 default:
239 return -1;
240 }
241 break;
242
243 case PP_GROUP_SYS:
244 switch ((*msg_id & PP_BLOCK_MASK) >> PP_BLOCK_SHIFT) {
245 case PP_BLOCK_SYS_BIF:
246 if (PP_STATE_SUPPORT_LS & *msg_id) {
247 msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS
248 ? PPSMC_MSG_EnableClockGatingFeature
249 : PPSMC_MSG_DisableClockGatingFeature;
250 value = CG_SYS_BIF_MGLS_MASK;
251
252 if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
253 return -1;
254 }
255 break;
256
257 case PP_BLOCK_SYS_MC:
258 if (PP_STATE_SUPPORT_CG & *msg_id) {
259 msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG)
260 ? PPSMC_MSG_EnableClockGatingFeature
261 : PPSMC_MSG_DisableClockGatingFeature;
262 value = CG_SYS_MC_MGCG_MASK;
263
264 if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
265 return -1;
266 }
267
268 if (PP_STATE_SUPPORT_LS & *msg_id) {
269 msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS
270 ? PPSMC_MSG_EnableClockGatingFeature
271 : PPSMC_MSG_DisableClockGatingFeature;
272 value = CG_SYS_MC_MGLS_MASK;
273
274 if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
275 return -1;
276
277 }
278 break;
279
280 case PP_BLOCK_SYS_HDP:
281 if (PP_STATE_SUPPORT_CG & *msg_id) {
282 msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG)
283 ? PPSMC_MSG_EnableClockGatingFeature
284 : PPSMC_MSG_DisableClockGatingFeature;
285 value = CG_SYS_HDP_MGCG_MASK;
286
287 if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
288 return -1;
289 }
290
291 if (PP_STATE_SUPPORT_LS & *msg_id) {
292 msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS
293 ? PPSMC_MSG_EnableClockGatingFeature
294 : PPSMC_MSG_DisableClockGatingFeature;
295
296 value = CG_SYS_HDP_MGLS_MASK;
297
298 if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
299 return -1;
300 }
301 break;
302
303 case PP_BLOCK_SYS_SDMA:
304 if (PP_STATE_SUPPORT_CG & *msg_id) {
305 msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG)
306 ? PPSMC_MSG_EnableClockGatingFeature
307 : PPSMC_MSG_DisableClockGatingFeature;
308 value = CG_SYS_SDMA_MGCG_MASK;
309
310 if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
311 return -1;
312 }
313
314 if (PP_STATE_SUPPORT_LS & *msg_id) {
315 msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS
316 ? PPSMC_MSG_EnableClockGatingFeature
317 : PPSMC_MSG_DisableClockGatingFeature;
318
319 value = CG_SYS_SDMA_MGLS_MASK;
320
321 if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
322 return -1;
323 }
324 break;
325
326 case PP_BLOCK_SYS_ROM:
327 if (PP_STATE_SUPPORT_CG & *msg_id) {
328 msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG)
329 ? PPSMC_MSG_EnableClockGatingFeature
330 : PPSMC_MSG_DisableClockGatingFeature;
331 value = CG_SYS_ROM_MASK;
332
333 if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
334 return -1;
335 }
336 break;
337
338 default:
339 return -1;
340
341 }
342 break;
343
344 default:
345 return -1;
346
347 }
348
349 return 0;
350}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.h
deleted file mode 100644
index 8bc38cb17b7f..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.h
+++ /dev/null
@@ -1,36 +0,0 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef _TONGA_CLOCK_POWER_GATING_H_
25#define _TONGA_CLOCK_POWER_GATING_H_
26
27#include "tonga_hwmgr.h"
28#include "pp_asicblocks.h"
29
30extern int tonga_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum PHM_AsicBlock block, enum PHM_ClockGateSetting gating);
31extern int tonga_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
32extern int tonga_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
33extern int tonga_phm_powerdown_uvd(struct pp_hwmgr *hwmgr);
34extern int tonga_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr);
35extern int tonga_phm_update_clock_gatings(struct pp_hwmgr *hwmgr, const uint32_t *msg_id);
36#endif /* _TONGA_CLOCK_POWER_GATING_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_dyn_defaults.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_dyn_defaults.h
deleted file mode 100644
index 080d69d77f04..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_dyn_defaults.h
+++ /dev/null
@@ -1,107 +0,0 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef TONGA_DYN_DEFAULTS_H
24#define TONGA_DYN_DEFAULTS_H
25
26
27/** \file
28 * Volcanic Islands Dynamic default parameters.
29 */
30
31enum TONGAdpm_TrendDetection {
32 TONGAdpm_TrendDetection_AUTO,
33 TONGAdpm_TrendDetection_UP,
34 TONGAdpm_TrendDetection_DOWN
35};
36typedef enum TONGAdpm_TrendDetection TONGAdpm_TrendDetection;
37
38/* Bit vector representing same fields as hardware register. */
39#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT0 0x3FFFC102 /* CP_Gfx_busy */
40/* HDP_busy */
41/* IH_busy */
42/* DRM_busy */
43/* DRMDMA_busy */
44/* UVD_busy */
45/* VCE_busy */
46/* ACP_busy */
47/* SAMU_busy */
48/* AVP_busy */
49/* SDMA enabled */
50#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT1 0x000400 /* FE_Gfx_busy - Intended for primary usage. Rest are for flexibility. */
51/* SH_Gfx_busy */
52/* RB_Gfx_busy */
53/* VCE_busy */
54
55#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT2 0xC00080 /* SH_Gfx_busy - Intended for primary usage. Rest are for flexibility. */
56/* FE_Gfx_busy */
57/* RB_Gfx_busy */
58/* ACP_busy */
59
60#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT3 0xC00200 /* RB_Gfx_busy - Intended for primary usage. Rest are for flexibility. */
61/* FE_Gfx_busy */
62/* SH_Gfx_busy */
63/* UVD_busy */
64
65#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT4 0xC01680 /* UVD_busy */
66/* VCE_busy */
67/* ACP_busy */
68/* SAMU_busy */
69
70#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT5 0xC00033 /* GFX, HDP, DRMDMA */
71#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT6 0xC00033 /* GFX, HDP, DRMDMA */
72#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT7 0x3FFFC000 /* GFX, HDP, DRMDMA */
73
74
75/* thermal protection counter (units).*/
76#define PPTONGA_THERMALPROTECTCOUNTER_DFLT 0x200 /* ~19us */
77
78/* static screen threshold unit */
79#define PPTONGA_STATICSCREENTHRESHOLDUNIT_DFLT 0
80
81/* static screen threshold */
82#define PPTONGA_STATICSCREENTHRESHOLD_DFLT 0x00C8
83
84/* gfx idle clock stop threshold */
85#define PPTONGA_GFXIDLECLOCKSTOPTHRESHOLD_DFLT 0x200 /* ~19us with static screen threshold unit of 0 */
86
87/* Fixed reference divider to use when building baby stepping tables. */
88#define PPTONGA_REFERENCEDIVIDER_DFLT 4
89
90/*
91 * ULV voltage change delay time
92 * Used to be delay_vreg in N.I. split for S.I.
93 * Using N.I. delay_vreg value as default
94 * ReferenceClock = 2700
95 * VoltageResponseTime = 1000
96 * VDDCDelayTime = (VoltageResponseTime * ReferenceClock) / 1600 = 1687
97 */
98
99#define PPTONGA_ULVVOLTAGECHANGEDELAY_DFLT 1687
100
101#define PPTONGA_CGULVPARAMETER_DFLT 0x00040035
102#define PPTONGA_CGULVCONTROL_DFLT 0x00007450
103#define PPTONGA_TARGETACTIVITY_DFLT 30 /*30% */
104#define PPTONGA_MCLK_TARGETACTIVITY_DFLT 10 /*10% */
105
106#endif
107
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
deleted file mode 100644
index 3110bf0eeacc..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
+++ /dev/null
@@ -1,6371 +0,0 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <linux/module.h>
24#include <linux/slab.h>
25#include <linux/fb.h>
26#include "linux/delay.h"
27#include "pp_acpi.h"
28#include "hwmgr.h"
29#include <atombios.h>
30#include "tonga_hwmgr.h"
31#include "pptable.h"
32#include "processpptables.h"
33#include "process_pptables_v1_0.h"
34#include "pptable_v1_0.h"
35#include "pp_debug.h"
36#include "tonga_ppsmc.h"
37#include "cgs_common.h"
38#include "pppcielanes.h"
39#include "tonga_dyn_defaults.h"
40#include "smumgr.h"
41#include "tonga_smumgr.h"
42#include "tonga_clockpowergating.h"
43#include "tonga_thermal.h"
44
45#include "smu/smu_7_1_2_d.h"
46#include "smu/smu_7_1_2_sh_mask.h"
47
48#include "gmc/gmc_8_1_d.h"
49#include "gmc/gmc_8_1_sh_mask.h"
50
51#include "bif/bif_5_0_d.h"
52#include "bif/bif_5_0_sh_mask.h"
53
54#include "dce/dce_10_0_d.h"
55#include "dce/dce_10_0_sh_mask.h"
56
57#include "cgs_linux.h"
58#include "eventmgr.h"
59#include "amd_pcie_helpers.h"
60
61#define MC_CG_ARB_FREQ_F0 0x0a
62#define MC_CG_ARB_FREQ_F1 0x0b
63#define MC_CG_ARB_FREQ_F2 0x0c
64#define MC_CG_ARB_FREQ_F3 0x0d
65
66#define MC_CG_SEQ_DRAMCONF_S0 0x05
67#define MC_CG_SEQ_DRAMCONF_S1 0x06
68#define MC_CG_SEQ_YCLK_SUSPEND 0x04
69#define MC_CG_SEQ_YCLK_RESUME 0x0a
70
71#define PCIE_BUS_CLK 10000
72#define TCLK (PCIE_BUS_CLK / 10)
73
74#define SMC_RAM_END 0x40000
75#define SMC_CG_IND_START 0xc0030000
76#define SMC_CG_IND_END 0xc0040000 /* First byte after SMC_CG_IND*/
77
78#define VOLTAGE_SCALE 4
79#define VOLTAGE_VID_OFFSET_SCALE1 625
80#define VOLTAGE_VID_OFFSET_SCALE2 100
81
82#define VDDC_VDDCI_DELTA 200
83#define VDDC_VDDGFX_DELTA 300
84
85#define MC_SEQ_MISC0_GDDR5_SHIFT 28
86#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000
87#define MC_SEQ_MISC0_GDDR5_VALUE 5
88
89typedef uint32_t PECI_RegistryValue;
90
91/* [2.5%,~2.5%] Clock stretched is multiple of 2.5% vs not and [Fmin, Fmax, LDO_REFSEL, USE_FOR_LOW_FREQ] */
92static const uint16_t PP_ClockStretcherLookupTable[2][4] = {
93 {600, 1050, 3, 0},
94 {600, 1050, 6, 1} };
95
96/* [FF, SS] type, [] 4 voltage ranges, and [Floor Freq, Boundary Freq, VID min , VID max] */
97static const uint32_t PP_ClockStretcherDDTTable[2][4][4] = {
98 { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} },
99 { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} } };
100
101/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%] (coming from PWR_CKS_CNTL.stretch_amount reg spec) */
102static const uint8_t PP_ClockStretchAmountConversion[2][6] = {
103 {0, 1, 3, 2, 4, 5},
104 {0, 2, 4, 5, 6, 5} };
105
106/* Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */
107enum DPM_EVENT_SRC {
108 DPM_EVENT_SRC_ANALOG = 0, /* Internal analog trip point */
109 DPM_EVENT_SRC_EXTERNAL = 1, /* External (GPIO 17) signal */
110 DPM_EVENT_SRC_DIGITAL = 2, /* Internal digital trip point (DIG_THERM_DPM) */
111 DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3, /* Internal analog or external */
112 DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4 /* Internal digital or external */
113};
114typedef enum DPM_EVENT_SRC DPM_EVENT_SRC;
115
116static const unsigned long PhwTonga_Magic = (unsigned long)(PHM_VIslands_Magic);
117
118struct tonga_power_state *cast_phw_tonga_power_state(
119 struct pp_hw_power_state *hw_ps)
120{
121 if (hw_ps == NULL)
122 return NULL;
123
124 PP_ASSERT_WITH_CODE((PhwTonga_Magic == hw_ps->magic),
125 "Invalid Powerstate Type!",
126 return NULL);
127
128 return (struct tonga_power_state *)hw_ps;
129}
130
131const struct tonga_power_state *cast_const_phw_tonga_power_state(
132 const struct pp_hw_power_state *hw_ps)
133{
134 if (hw_ps == NULL)
135 return NULL;
136
137 PP_ASSERT_WITH_CODE((PhwTonga_Magic == hw_ps->magic),
138 "Invalid Powerstate Type!",
139 return NULL);
140
141 return (const struct tonga_power_state *)hw_ps;
142}
143
144int tonga_add_voltage(struct pp_hwmgr *hwmgr,
145 phm_ppt_v1_voltage_lookup_table *look_up_table,
146 phm_ppt_v1_voltage_lookup_record *record)
147{
148 uint32_t i;
149 PP_ASSERT_WITH_CODE((NULL != look_up_table),
150 "Lookup Table empty.", return -1;);
151 PP_ASSERT_WITH_CODE((0 != look_up_table->count),
152 "Lookup Table empty.", return -1;);
153 PP_ASSERT_WITH_CODE((SMU72_MAX_LEVELS_VDDGFX >= look_up_table->count),
154 "Lookup Table is full.", return -1;);
155
156 /* This is to avoid entering duplicate calculated records. */
157 for (i = 0; i < look_up_table->count; i++) {
158 if (look_up_table->entries[i].us_vdd == record->us_vdd) {
159 if (look_up_table->entries[i].us_calculated == 1)
160 return 0;
161 else
162 break;
163 }
164 }
165
166 look_up_table->entries[i].us_calculated = 1;
167 look_up_table->entries[i].us_vdd = record->us_vdd;
168 look_up_table->entries[i].us_cac_low = record->us_cac_low;
169 look_up_table->entries[i].us_cac_mid = record->us_cac_mid;
170 look_up_table->entries[i].us_cac_high = record->us_cac_high;
171 /* Only increment the count when we're appending, not replacing duplicate entry. */
172 if (i == look_up_table->count)
173 look_up_table->count++;
174
175 return 0;
176}
177
178int tonga_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
179{
180 PPSMC_Msg msg = has_display? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay;
181
182 return (smum_send_msg_to_smc(hwmgr->smumgr, msg) == 0) ? 0 : -1;
183}
184
185uint8_t tonga_get_voltage_id(pp_atomctrl_voltage_table *voltage_table,
186 uint32_t voltage)
187{
188 uint8_t count = (uint8_t) (voltage_table->count);
189 uint8_t i = 0;
190
191 PP_ASSERT_WITH_CODE((NULL != voltage_table),
192 "Voltage Table empty.", return 0;);
193 PP_ASSERT_WITH_CODE((0 != count),
194 "Voltage Table empty.", return 0;);
195
196 for (i = 0; i < count; i++) {
197 /* find first voltage bigger than requested */
198 if (voltage_table->entries[i].value >= voltage)
199 return i;
200 }
201
202 /* voltage is bigger than max voltage in the table */
203 return i - 1;
204}
205
206
207/**
208 * @brief PhwTonga_GetVoltageOrder
209 * Returns index of requested voltage record in lookup(table)
210 * @param hwmgr - pointer to hardware manager
211 * @param lookupTable - lookup list to search in
212 * @param voltage - voltage to look for
213 * @return 0 on success
214 */
215uint8_t tonga_get_voltage_index(phm_ppt_v1_voltage_lookup_table *look_up_table,
216 uint16_t voltage)
217{
218 uint8_t count = (uint8_t) (look_up_table->count);
219 uint8_t i;
220
221 PP_ASSERT_WITH_CODE((NULL != look_up_table), "Lookup Table empty.", return 0;);
222 PP_ASSERT_WITH_CODE((0 != count), "Lookup Table empty.", return 0;);
223
224 for (i = 0; i < count; i++) {
225 /* find first voltage equal or bigger than requested */
226 if (look_up_table->entries[i].us_vdd >= voltage)
227 return i;
228 }
229
230 /* voltage is bigger than max voltage in the table */
231 return i-1;
232}
233
234static bool tonga_is_dpm_running(struct pp_hwmgr *hwmgr)
235{
236 /*
237 * We return the status of Voltage Control instead of checking SCLK/MCLK DPM
238 * because we may have test scenarios that need us intentionly disable SCLK/MCLK DPM,
239 * whereas voltage control is a fundemental change that will not be disabled
240 */
241
242 return (0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
243 FEATURE_STATUS, VOLTAGE_CONTROLLER_ON) ? 1 : 0);
244}
245
246/**
247 * Re-generate the DPM level mask value
248 * @param hwmgr the address of the hardware manager
249 */
250static uint32_t tonga_get_dpm_level_enable_mask_value(
251 struct tonga_single_dpm_table * dpm_table)
252{
253 uint32_t i;
254 uint32_t mask_value = 0;
255
256 for (i = dpm_table->count; i > 0; i--) {
257 mask_value = mask_value << 1;
258
259 if (dpm_table->dpm_levels[i-1].enabled)
260 mask_value |= 0x1;
261 else
262 mask_value &= 0xFFFFFFFE;
263 }
264 return mask_value;
265}
266
267/**
268 * Retrieve DPM default values from registry (if available)
269 *
270 * @param hwmgr the address of the powerplay hardware manager.
271 */
272void tonga_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
273{
274 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
275 phw_tonga_ulv_parm *ulv = &(data->ulv);
276 uint32_t tmp;
277
278 ulv->ch_ulv_parameter = PPTONGA_CGULVPARAMETER_DFLT;
279 data->voting_rights_clients0 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT0;
280 data->voting_rights_clients1 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT1;
281 data->voting_rights_clients2 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT2;
282 data->voting_rights_clients3 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT3;
283 data->voting_rights_clients4 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT4;
284 data->voting_rights_clients5 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT5;
285 data->voting_rights_clients6 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT6;
286 data->voting_rights_clients7 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT7;
287
288 data->static_screen_threshold_unit = PPTONGA_STATICSCREENTHRESHOLDUNIT_DFLT;
289 data->static_screen_threshold = PPTONGA_STATICSCREENTHRESHOLD_DFLT;
290
291 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
292 PHM_PlatformCaps_ABM);
293 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
294 PHM_PlatformCaps_NonABMSupportInPPLib);
295
296 tmp = 0;
297 if (tmp == 0)
298 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
299 PHM_PlatformCaps_DynamicACTiming);
300
301 tmp = 0;
302 if (0 != tmp)
303 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
304 PHM_PlatformCaps_DisableMemoryTransition);
305
306 tonga_initialize_power_tune_defaults(hwmgr);
307
308 data->mclk_strobe_mode_threshold = 40000;
309 data->mclk_stutter_mode_threshold = 30000;
310 data->mclk_edc_enable_threshold = 40000;
311 data->mclk_edc_wr_enable_threshold = 40000;
312
313 tmp = 0;
314 if (tmp != 0)
315 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
316 PHM_PlatformCaps_DisableMCLS);
317
318 data->pcie_gen_performance.max = PP_PCIEGen1;
319 data->pcie_gen_performance.min = PP_PCIEGen3;
320 data->pcie_gen_power_saving.max = PP_PCIEGen1;
321 data->pcie_gen_power_saving.min = PP_PCIEGen3;
322
323 data->pcie_lane_performance.max = 0;
324 data->pcie_lane_performance.min = 16;
325 data->pcie_lane_power_saving.max = 0;
326 data->pcie_lane_power_saving.min = 16;
327
328 tmp = 0;
329
330 if (tmp)
331 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
332 PHM_PlatformCaps_SclkThrottleLowNotification);
333
334 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
335 PHM_PlatformCaps_DynamicUVDState);
336
337}
338
339static int tonga_update_sclk_threshold(struct pp_hwmgr *hwmgr)
340{
341 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
342
343 int result = 0;
344 uint32_t low_sclk_interrupt_threshold = 0;
345
346 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
347 PHM_PlatformCaps_SclkThrottleLowNotification)
348 && (hwmgr->gfx_arbiter.sclk_threshold != data->low_sclk_interrupt_threshold)) {
349 data->low_sclk_interrupt_threshold = hwmgr->gfx_arbiter.sclk_threshold;
350 low_sclk_interrupt_threshold = data->low_sclk_interrupt_threshold;
351
352 CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
353
354 result = tonga_copy_bytes_to_smc(
355 hwmgr->smumgr,
356 data->dpm_table_start + offsetof(SMU72_Discrete_DpmTable,
357 LowSclkInterruptThreshold),
358 (uint8_t *)&low_sclk_interrupt_threshold,
359 sizeof(uint32_t),
360 data->sram_end
361 );
362 }
363
364 return result;
365}
366
367/**
368 * Find SCLK value that is associated with specified virtual_voltage_Id.
369 *
370 * @param hwmgr the address of the powerplay hardware manager.
371 * @param virtual_voltage_Id voltageId to look for.
372 * @param sclk output value .
373 * @return always 0 if success and 2 if association not found
374 */
375static int tonga_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
376 phm_ppt_v1_voltage_lookup_table *lookup_table,
377 uint16_t virtual_voltage_id, uint32_t *sclk)
378{
379 uint8_t entryId;
380 uint8_t voltageId;
381 struct phm_ppt_v1_information *pptable_info =
382 (struct phm_ppt_v1_information *)(hwmgr->pptable);
383
384 PP_ASSERT_WITH_CODE(lookup_table->count != 0, "Lookup table is empty", return -1);
385
386 /* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */
387 for (entryId = 0; entryId < pptable_info->vdd_dep_on_sclk->count; entryId++) {
388 voltageId = pptable_info->vdd_dep_on_sclk->entries[entryId].vddInd;
389 if (lookup_table->entries[voltageId].us_vdd == virtual_voltage_id)
390 break;
391 }
392
393 PP_ASSERT_WITH_CODE(entryId < pptable_info->vdd_dep_on_sclk->count,
394 "Can't find requested voltage id in vdd_dep_on_sclk table!",
395 return -1;
396 );
397
398 *sclk = pptable_info->vdd_dep_on_sclk->entries[entryId].clk;
399
400 return 0;
401}
402
403/**
404 * Get Leakage VDDC based on leakage ID.
405 *
406 * @param hwmgr the address of the powerplay hardware manager.
407 * @return 2 if vddgfx returned is greater than 2V or if BIOS
408 */
409int tonga_get_evv_voltage(struct pp_hwmgr *hwmgr)
410{
411 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
412 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
413 phm_ppt_v1_clock_voltage_dependency_table *sclk_table = pptable_info->vdd_dep_on_sclk;
414 uint16_t virtual_voltage_id;
415 uint16_t vddc = 0;
416 uint16_t vddgfx = 0;
417 uint16_t i, j;
418 uint32_t sclk = 0;
419
420 /* retrieve voltage for leakage ID (0xff01 + i) */
421 for (i = 0; i < TONGA_MAX_LEAKAGE_COUNT; i++) {
422 virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
423
424 /* in split mode we should have only vddgfx EVV leakages */
425 if (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) {
426 if (0 == tonga_get_sclk_for_voltage_evv(hwmgr,
427 pptable_info->vddgfx_lookup_table, virtual_voltage_id, &sclk)) {
428 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
429 PHM_PlatformCaps_ClockStretcher)) {
430 for (j = 1; j < sclk_table->count; j++) {
431 if (sclk_table->entries[j].clk == sclk &&
432 sclk_table->entries[j].cks_enable == 0) {
433 sclk += 5000;
434 break;
435 }
436 }
437 }
438 if (0 == atomctrl_get_voltage_evv_on_sclk
439 (hwmgr, VOLTAGE_TYPE_VDDGFX, sclk,
440 virtual_voltage_id, &vddgfx)) {
441 /* need to make sure vddgfx is less than 2v or else, it could burn the ASIC. */
442 PP_ASSERT_WITH_CODE((vddgfx < 2000 && vddgfx != 0), "Invalid VDDGFX value!", return -1);
443
444 /* the voltage should not be zero nor equal to leakage ID */
445 if (vddgfx != 0 && vddgfx != virtual_voltage_id) {
446 data->vddcgfx_leakage.actual_voltage[data->vddcgfx_leakage.count] = vddgfx;
447 data->vddcgfx_leakage.leakage_id[data->vddcgfx_leakage.count] = virtual_voltage_id;
448 data->vddcgfx_leakage.count++;
449 }
450 } else {
451 printk("Error retrieving EVV voltage value!\n");
452 }
453 }
454 } else {
455 /* in merged mode we have only vddc EVV leakages */
456 if (0 == tonga_get_sclk_for_voltage_evv(hwmgr,
457 pptable_info->vddc_lookup_table,
458 virtual_voltage_id, &sclk)) {
459 if (0 == atomctrl_get_voltage_evv_on_sclk
460 (hwmgr, VOLTAGE_TYPE_VDDC, sclk,
461 virtual_voltage_id, &vddc)) {
462 /* need to make sure vddc is less than 2v or else, it could burn the ASIC. */
463 PP_ASSERT_WITH_CODE(vddc < 2000, "Invalid VDDC value!", return -1);
464
465 /* the voltage should not be zero nor equal to leakage ID */
466 if (vddc != 0 && vddc != virtual_voltage_id) {
467 data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = vddc;
468 data->vddc_leakage.leakage_id[data->vddc_leakage.count] = virtual_voltage_id;
469 data->vddc_leakage.count++;
470 }
471 } else {
472 printk("Error retrieving EVV voltage value!\n");
473 }
474 }
475 }
476 }
477
478 return 0;
479}
480
481int tonga_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
482{
483 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
484
485 /* enable SCLK dpm */
486 if (0 == data->sclk_dpm_key_disabled) {
487 PP_ASSERT_WITH_CODE(
488 (0 == smum_send_msg_to_smc(hwmgr->smumgr,
489 PPSMC_MSG_DPM_Enable)),
490 "Failed to enable SCLK DPM during DPM Start Function!",
491 return -1);
492 }
493
494 /* enable MCLK dpm */
495 if (0 == data->mclk_dpm_key_disabled) {
496 PP_ASSERT_WITH_CODE(
497 (0 == smum_send_msg_to_smc(hwmgr->smumgr,
498 PPSMC_MSG_MCLKDPM_Enable)),
499 "Failed to enable MCLK DPM during DPM Start Function!",
500 return -1);
501
502 PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
503
504 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
505 ixLCAC_MC0_CNTL, 0x05);/* CH0,1 read */
506 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
507 ixLCAC_MC1_CNTL, 0x05);/* CH2,3 read */
508 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
509 ixLCAC_CPL_CNTL, 0x100005);/*Read */
510
511 udelay(10);
512
513 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
514 ixLCAC_MC0_CNTL, 0x400005);/* CH0,1 write */
515 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
516 ixLCAC_MC1_CNTL, 0x400005);/* CH2,3 write */
517 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
518 ixLCAC_CPL_CNTL, 0x500005);/* write */
519
520 }
521
522 return 0;
523}
524
525int tonga_start_dpm(struct pp_hwmgr *hwmgr)
526{
527 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
528
529 /* enable general power management */
530 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, 1);
531 /* enable sclk deep sleep */
532 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, DYNAMIC_PM_EN, 1);
533
534 /* prepare for PCIE DPM */
535 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start +
536 offsetof(SMU72_SoftRegisters, VoltageChangeTimeout), 0x1000);
537
538 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE, SWRST_COMMAND_1, RESETLC, 0x0);
539
540 PP_ASSERT_WITH_CODE(
541 (0 == smum_send_msg_to_smc(hwmgr->smumgr,
542 PPSMC_MSG_Voltage_Cntl_Enable)),
543 "Failed to enable voltage DPM during DPM Start Function!",
544 return -1);
545
546 if (0 != tonga_enable_sclk_mclk_dpm(hwmgr)) {
547 PP_ASSERT_WITH_CODE(0, "Failed to enable Sclk DPM and Mclk DPM!", return -1);
548 }
549
550 /* enable PCIE dpm */
551 if (0 == data->pcie_dpm_key_disabled) {
552 PP_ASSERT_WITH_CODE(
553 (0 == smum_send_msg_to_smc(hwmgr->smumgr,
554 PPSMC_MSG_PCIeDPM_Enable)),
555 "Failed to enable pcie DPM during DPM Start Function!",
556 return -1
557 );
558 }
559
560 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
561 PHM_PlatformCaps_Falcon_QuickTransition)) {
562 smum_send_msg_to_smc(hwmgr->smumgr,
563 PPSMC_MSG_EnableACDCGPIOInterrupt);
564 }
565
566 return 0;
567}
568
569int tonga_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
570{
571 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
572
573 /* disable SCLK dpm */
574 if (0 == data->sclk_dpm_key_disabled) {
575 /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/
576 PP_ASSERT_WITH_CODE(
577 !tonga_is_dpm_running(hwmgr),
578 "Trying to Disable SCLK DPM when DPM is disabled",
579 return -1
580 );
581
582 PP_ASSERT_WITH_CODE(
583 (0 == smum_send_msg_to_smc(hwmgr->smumgr,
584 PPSMC_MSG_DPM_Disable)),
585 "Failed to disable SCLK DPM during DPM stop Function!",
586 return -1);
587 }
588
589 /* disable MCLK dpm */
590 if (0 == data->mclk_dpm_key_disabled) {
591 /* Checking if DPM is running. If we discover hang because of this, we should skip this message. */
592 PP_ASSERT_WITH_CODE(
593 !tonga_is_dpm_running(hwmgr),
594 "Trying to Disable MCLK DPM when DPM is disabled",
595 return -1
596 );
597
598 PP_ASSERT_WITH_CODE(
599 (0 == smum_send_msg_to_smc(hwmgr->smumgr,
600 PPSMC_MSG_MCLKDPM_Disable)),
601 "Failed to Disable MCLK DPM during DPM stop Function!",
602 return -1);
603 }
604
605 return 0;
606}
607
608int tonga_stop_dpm(struct pp_hwmgr *hwmgr)
609{
610 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
611
612 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, 0);
613 /* disable sclk deep sleep*/
614 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, DYNAMIC_PM_EN, 0);
615
616 /* disable PCIE dpm */
617 if (0 == data->pcie_dpm_key_disabled) {
618 /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/
619 PP_ASSERT_WITH_CODE(
620 !tonga_is_dpm_running(hwmgr),
621 "Trying to Disable PCIE DPM when DPM is disabled",
622 return -1
623 );
624 PP_ASSERT_WITH_CODE(
625 (0 == smum_send_msg_to_smc(hwmgr->smumgr,
626 PPSMC_MSG_PCIeDPM_Disable)),
627 "Failed to disable pcie DPM during DPM stop Function!",
628 return -1);
629 }
630
631 if (0 != tonga_disable_sclk_mclk_dpm(hwmgr))
632 PP_ASSERT_WITH_CODE(0, "Failed to disable Sclk DPM and Mclk DPM!", return -1);
633
634 /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/
635 PP_ASSERT_WITH_CODE(
636 !tonga_is_dpm_running(hwmgr),
637 "Trying to Disable Voltage CNTL when DPM is disabled",
638 return -1
639 );
640
641 PP_ASSERT_WITH_CODE(
642 (0 == smum_send_msg_to_smc(hwmgr->smumgr,
643 PPSMC_MSG_Voltage_Cntl_Disable)),
644 "Failed to disable voltage DPM during DPM stop Function!",
645 return -1);
646
647 return 0;
648}
649
650int tonga_enable_sclk_control(struct pp_hwmgr *hwmgr)
651{
652 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, 0);
653
654 return 0;
655}
656
657/**
658 * Send a message to the SMC and return a parameter
659 *
660 * @param hwmgr: the address of the powerplay hardware manager.
661 * @param msg: the message to send.
662 * @param parameter: pointer to the received parameter
663 * @return The response that came from the SMC.
664 */
665PPSMC_Result tonga_send_msg_to_smc_return_parameter(
666 struct pp_hwmgr *hwmgr,
667 PPSMC_Msg msg,
668 uint32_t *parameter)
669{
670 int result;
671
672 result = smum_send_msg_to_smc(hwmgr->smumgr, msg);
673
674 if ((0 == result) && parameter) {
675 *parameter = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
676 }
677
678 return result;
679}
680
681/**
682 * force DPM power State
683 *
684 * @param hwmgr: the address of the powerplay hardware manager.
685 * @param n : DPM level
686 * @return The response that came from the SMC.
687 */
688int tonga_dpm_force_state(struct pp_hwmgr *hwmgr, uint32_t n)
689{
690 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
691 uint32_t level_mask = 1 << n;
692
693 /* Checking if DPM is running. If we discover hang because of this, we should skip this message. */
694 PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
695 "Trying to force SCLK when DPM is disabled",
696 return -1;);
697 if (0 == data->sclk_dpm_key_disabled)
698 return (0 == smum_send_msg_to_smc_with_parameter(
699 hwmgr->smumgr,
700 (PPSMC_Msg)(PPSMC_MSG_SCLKDPM_SetEnabledMask),
701 level_mask) ? 0 : 1);
702
703 return 0;
704}
705
706/**
707 * force DPM power State
708 *
709 * @param hwmgr: the address of the powerplay hardware manager.
710 * @param n : DPM level
711 * @return The response that came from the SMC.
712 */
713int tonga_dpm_force_state_mclk(struct pp_hwmgr *hwmgr, uint32_t n)
714{
715 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
716 uint32_t level_mask = 1 << n;
717
718 /* Checking if DPM is running. If we discover hang because of this, we should skip this message. */
719 PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
720 "Trying to Force MCLK when DPM is disabled",
721 return -1;);
722 if (0 == data->mclk_dpm_key_disabled)
723 return (0 == smum_send_msg_to_smc_with_parameter(
724 hwmgr->smumgr,
725 (PPSMC_Msg)(PPSMC_MSG_MCLKDPM_SetEnabledMask),
726 level_mask) ? 0 : 1);
727
728 return 0;
729}
730
731/**
732 * force DPM power State
733 *
734 * @param hwmgr: the address of the powerplay hardware manager.
735 * @param n : DPM level
736 * @return The response that came from the SMC.
737 */
738int tonga_dpm_force_state_pcie(struct pp_hwmgr *hwmgr, uint32_t n)
739{
740 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
741
742 /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/
743 PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
744 "Trying to Force PCIE level when DPM is disabled",
745 return -1;);
746 if (0 == data->pcie_dpm_key_disabled)
747 return (0 == smum_send_msg_to_smc_with_parameter(
748 hwmgr->smumgr,
749 (PPSMC_Msg)(PPSMC_MSG_PCIeDPM_ForceLevel),
750 n) ? 0 : 1);
751
752 return 0;
753}
754
755/**
756 * Set the initial state by calling SMC to switch to this state directly
757 *
758 * @param hwmgr the address of the powerplay hardware manager.
759 * @return always 0
760 */
761int tonga_set_boot_state(struct pp_hwmgr *hwmgr)
762{
763 /*
764 * SMC only stores one state that SW will ask to switch too,
765 * so we switch the the just uploaded one
766 */
767 return (0 == tonga_disable_sclk_mclk_dpm(hwmgr)) ? 0 : 1;
768}
769
770/**
771 * Get the location of various tables inside the FW image.
772 *
773 * @param hwmgr the address of the powerplay hardware manager.
774 * @return always 0
775 */
776static int tonga_process_firmware_header(struct pp_hwmgr *hwmgr)
777{
778 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
779 struct tonga_smumgr *tonga_smu = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
780
781 uint32_t tmp;
782 int result;
783 bool error = false;
784
785 result = tonga_read_smc_sram_dword(hwmgr->smumgr,
786 SMU72_FIRMWARE_HEADER_LOCATION +
787 offsetof(SMU72_Firmware_Header, DpmTable),
788 &tmp, data->sram_end);
789
790 if (0 == result) {
791 data->dpm_table_start = tmp;
792 }
793
794 error |= (0 != result);
795
796 result = tonga_read_smc_sram_dword(hwmgr->smumgr,
797 SMU72_FIRMWARE_HEADER_LOCATION +
798 offsetof(SMU72_Firmware_Header, SoftRegisters),
799 &tmp, data->sram_end);
800
801 if (0 == result) {
802 data->soft_regs_start = tmp;
803 tonga_smu->soft_regs_start = tmp;
804 }
805
806 error |= (0 != result);
807
808
809 result = tonga_read_smc_sram_dword(hwmgr->smumgr,
810 SMU72_FIRMWARE_HEADER_LOCATION +
811 offsetof(SMU72_Firmware_Header, mcRegisterTable),
812 &tmp, data->sram_end);
813
814 if (0 == result) {
815 data->mc_reg_table_start = tmp;
816 }
817
818 result = tonga_read_smc_sram_dword(hwmgr->smumgr,
819 SMU72_FIRMWARE_HEADER_LOCATION +
820 offsetof(SMU72_Firmware_Header, FanTable),
821 &tmp, data->sram_end);
822
823 if (0 == result) {
824 data->fan_table_start = tmp;
825 }
826
827 error |= (0 != result);
828
829 result = tonga_read_smc_sram_dword(hwmgr->smumgr,
830 SMU72_FIRMWARE_HEADER_LOCATION +
831 offsetof(SMU72_Firmware_Header, mcArbDramTimingTable),
832 &tmp, data->sram_end);
833
834 if (0 == result) {
835 data->arb_table_start = tmp;
836 }
837
838 error |= (0 != result);
839
840
841 result = tonga_read_smc_sram_dword(hwmgr->smumgr,
842 SMU72_FIRMWARE_HEADER_LOCATION +
843 offsetof(SMU72_Firmware_Header, Version),
844 &tmp, data->sram_end);
845
846 if (0 == result) {
847 hwmgr->microcode_version_info.SMC = tmp;
848 }
849
850 error |= (0 != result);
851
852 return error ? 1 : 0;
853}
854
855/**
856 * Read clock related registers.
857 *
858 * @param hwmgr the address of the powerplay hardware manager.
859 * @return always 0
860 */
861int tonga_read_clock_registers(struct pp_hwmgr *hwmgr)
862{
863 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
864
865 data->clock_registers.vCG_SPLL_FUNC_CNTL =
866 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL);
867 data->clock_registers.vCG_SPLL_FUNC_CNTL_2 =
868 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_2);
869 data->clock_registers.vCG_SPLL_FUNC_CNTL_3 =
870 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_3);
871 data->clock_registers.vCG_SPLL_FUNC_CNTL_4 =
872 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_4);
873 data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM =
874 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM);
875 data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2 =
876 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM_2);
877 data->clock_registers.vDLL_CNTL =
878 cgs_read_register(hwmgr->device, mmDLL_CNTL);
879 data->clock_registers.vMCLK_PWRMGT_CNTL =
880 cgs_read_register(hwmgr->device, mmMCLK_PWRMGT_CNTL);
881 data->clock_registers.vMPLL_AD_FUNC_CNTL =
882 cgs_read_register(hwmgr->device, mmMPLL_AD_FUNC_CNTL);
883 data->clock_registers.vMPLL_DQ_FUNC_CNTL =
884 cgs_read_register(hwmgr->device, mmMPLL_DQ_FUNC_CNTL);
885 data->clock_registers.vMPLL_FUNC_CNTL =
886 cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL);
887 data->clock_registers.vMPLL_FUNC_CNTL_1 =
888 cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_1);
889 data->clock_registers.vMPLL_FUNC_CNTL_2 =
890 cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_2);
891 data->clock_registers.vMPLL_SS1 =
892 cgs_read_register(hwmgr->device, mmMPLL_SS1);
893 data->clock_registers.vMPLL_SS2 =
894 cgs_read_register(hwmgr->device, mmMPLL_SS2);
895
896 return 0;
897}
898
899/**
900 * Find out if memory is GDDR5.
901 *
902 * @param hwmgr the address of the powerplay hardware manager.
903 * @return always 0
904 */
905int tonga_get_memory_type(struct pp_hwmgr *hwmgr)
906{
907 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
908 uint32_t temp;
909
910 temp = cgs_read_register(hwmgr->device, mmMC_SEQ_MISC0);
911
912 data->is_memory_GDDR5 = (MC_SEQ_MISC0_GDDR5_VALUE ==
913 ((temp & MC_SEQ_MISC0_GDDR5_MASK) >>
914 MC_SEQ_MISC0_GDDR5_SHIFT));
915
916 return 0;
917}
918
919/**
920 * Enables Dynamic Power Management by SMC
921 *
922 * @param hwmgr the address of the powerplay hardware manager.
923 * @return always 0
924 */
925int tonga_enable_acpi_power_management(struct pp_hwmgr *hwmgr)
926{
927 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, STATIC_PM_EN, 1);
928
929 return 0;
930}
931
932/**
933 * Initialize PowerGating States for different engines
934 *
935 * @param hwmgr the address of the powerplay hardware manager.
936 * @return always 0
937 */
938int tonga_init_power_gate_state(struct pp_hwmgr *hwmgr)
939{
940 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
941
942 data->uvd_power_gated = false;
943 data->vce_power_gated = false;
944 data->samu_power_gated = false;
945 data->acp_power_gated = false;
946 data->pg_acp_init = true;
947
948 return 0;
949}
950
951/**
952 * Checks if DPM is enabled
953 *
954 * @param hwmgr the address of the powerplay hardware manager.
955 * @return always 0
956 */
957int tonga_check_for_dpm_running(struct pp_hwmgr *hwmgr)
958{
959 /*
960 * We return the status of Voltage Control instead of checking SCLK/MCLK DPM
961 * because we may have test scenarios that need us intentionly disable SCLK/MCLK DPM,
962 * whereas voltage control is a fundemental change that will not be disabled
963 */
964 return (!tonga_is_dpm_running(hwmgr) ? 0 : 1);
965}
966
967/**
968 * Checks if DPM is stopped
969 *
970 * @param hwmgr the address of the powerplay hardware manager.
971 * @return always 0
972 */
973int tonga_check_for_dpm_stopped(struct pp_hwmgr *hwmgr)
974{
975 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
976
977 if (tonga_is_dpm_running(hwmgr)) {
978 /* If HW Virtualization is enabled, dpm_table_start will not have a valid value */
979 if (!data->dpm_table_start) {
980 return 1;
981 }
982 }
983
984 return 0;
985}
986
987/**
988 * Remove repeated voltage values and create table with unique values.
989 *
990 * @param hwmgr the address of the powerplay hardware manager.
991 * @param voltage_table the pointer to changing voltage table
992 * @return 1 in success
993 */
994
995static int tonga_trim_voltage_table(struct pp_hwmgr *hwmgr,
996 pp_atomctrl_voltage_table *voltage_table)
997{
998 uint32_t table_size, i, j;
999 uint16_t vvalue;
1000 bool bVoltageFound = false;
1001 pp_atomctrl_voltage_table *table;
1002
1003 PP_ASSERT_WITH_CODE((NULL != voltage_table), "Voltage Table empty.", return -1;);
1004 table_size = sizeof(pp_atomctrl_voltage_table);
1005 table = kzalloc(table_size, GFP_KERNEL);
1006
1007 if (NULL == table)
1008 return -ENOMEM;
1009
1010 memset(table, 0x00, table_size);
1011 table->mask_low = voltage_table->mask_low;
1012 table->phase_delay = voltage_table->phase_delay;
1013
1014 for (i = 0; i < voltage_table->count; i++) {
1015 vvalue = voltage_table->entries[i].value;
1016 bVoltageFound = false;
1017
1018 for (j = 0; j < table->count; j++) {
1019 if (vvalue == table->entries[j].value) {
1020 bVoltageFound = true;
1021 break;
1022 }
1023 }
1024
1025 if (!bVoltageFound) {
1026 table->entries[table->count].value = vvalue;
1027 table->entries[table->count].smio_low =
1028 voltage_table->entries[i].smio_low;
1029 table->count++;
1030 }
1031 }
1032
1033 memcpy(table, voltage_table, sizeof(pp_atomctrl_voltage_table));
1034
1035 kfree(table);
1036
1037 return 0;
1038}
1039
1040static int tonga_get_svi2_vdd_ci_voltage_table(
1041 struct pp_hwmgr *hwmgr,
1042 phm_ppt_v1_clock_voltage_dependency_table *voltage_dependency_table)
1043{
1044 uint32_t i;
1045 int result;
1046 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
1047 pp_atomctrl_voltage_table *vddci_voltage_table = &(data->vddci_voltage_table);
1048
1049 PP_ASSERT_WITH_CODE((0 != voltage_dependency_table->count),
1050 "Voltage Dependency Table empty.", return -1;);
1051
1052 vddci_voltage_table->mask_low = 0;
1053 vddci_voltage_table->phase_delay = 0;
1054 vddci_voltage_table->count = voltage_dependency_table->count;
1055
1056 for (i = 0; i < voltage_dependency_table->count; i++) {
1057 vddci_voltage_table->entries[i].value =
1058 voltage_dependency_table->entries[i].vddci;
1059 vddci_voltage_table->entries[i].smio_low = 0;
1060 }
1061
1062 result = tonga_trim_voltage_table(hwmgr, vddci_voltage_table);
1063 PP_ASSERT_WITH_CODE((0 == result),
1064 "Failed to trim VDDCI table.", return result;);
1065
1066 return 0;
1067}
1068
1069
1070
1071static int tonga_get_svi2_vdd_voltage_table(
1072 struct pp_hwmgr *hwmgr,
1073 phm_ppt_v1_voltage_lookup_table *look_up_table,
1074 pp_atomctrl_voltage_table *voltage_table)
1075{
1076 uint8_t i = 0;
1077
1078 PP_ASSERT_WITH_CODE((0 != look_up_table->count),
1079 "Voltage Lookup Table empty.", return -1;);
1080
1081 voltage_table->mask_low = 0;
1082 voltage_table->phase_delay = 0;
1083
1084 voltage_table->count = look_up_table->count;
1085
1086 for (i = 0; i < voltage_table->count; i++) {
1087 voltage_table->entries[i].value = look_up_table->entries[i].us_vdd;
1088 voltage_table->entries[i].smio_low = 0;
1089 }
1090
1091 return 0;
1092}
1093
1094/*
1095 * -------------------------------------------------------- Voltage Tables --------------------------------------------------------------------------
1096 * If the voltage table would be bigger than what will fit into the state table on the SMC keep only the higher entries.
1097 */
1098
1099static void tonga_trim_voltage_table_to_fit_state_table(
1100 struct pp_hwmgr *hwmgr,
1101 uint32_t max_voltage_steps,
1102 pp_atomctrl_voltage_table *voltage_table)
1103{
1104 unsigned int i, diff;
1105
1106 if (voltage_table->count <= max_voltage_steps) {
1107 return;
1108 }
1109
1110 diff = voltage_table->count - max_voltage_steps;
1111
1112 for (i = 0; i < max_voltage_steps; i++) {
1113 voltage_table->entries[i] = voltage_table->entries[i + diff];
1114 }
1115
1116 voltage_table->count = max_voltage_steps;
1117
1118 return;
1119}
1120
1121/**
1122 * Create Voltage Tables.
1123 *
1124 * @param hwmgr the address of the powerplay hardware manager.
1125 * @return always 0
1126 */
1127int tonga_construct_voltage_tables(struct pp_hwmgr *hwmgr)
1128{
1129 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
1130 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
1131 int result;
1132
1133 /* MVDD has only GPIO voltage control */
1134 if (TONGA_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
1135 result = atomctrl_get_voltage_table_v3(hwmgr,
1136 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT, &(data->mvdd_voltage_table));
1137 PP_ASSERT_WITH_CODE((0 == result),
1138 "Failed to retrieve MVDD table.", return result;);
1139 }
1140
1141 if (TONGA_VOLTAGE_CONTROL_BY_GPIO == data->vdd_ci_control) {
1142 /* GPIO voltage */
1143 result = atomctrl_get_voltage_table_v3(hwmgr,
1144 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT, &(data->vddci_voltage_table));
1145 PP_ASSERT_WITH_CODE((0 == result),
1146 "Failed to retrieve VDDCI table.", return result;);
1147 } else if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_ci_control) {
1148 /* SVI2 voltage */
1149 result = tonga_get_svi2_vdd_ci_voltage_table(hwmgr,
1150 pptable_info->vdd_dep_on_mclk);
1151 PP_ASSERT_WITH_CODE((0 == result),
1152 "Failed to retrieve SVI2 VDDCI table from dependancy table.", return result;);
1153 }
1154
1155 if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
1156 /* VDDGFX has only SVI2 voltage control */
1157 result = tonga_get_svi2_vdd_voltage_table(hwmgr,
1158 pptable_info->vddgfx_lookup_table, &(data->vddgfx_voltage_table));
1159 PP_ASSERT_WITH_CODE((0 == result),
1160 "Failed to retrieve SVI2 VDDGFX table from lookup table.", return result;);
1161 }
1162
1163 if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
1164 /* VDDC has only SVI2 voltage control */
1165 result = tonga_get_svi2_vdd_voltage_table(hwmgr,
1166 pptable_info->vddc_lookup_table, &(data->vddc_voltage_table));
1167 PP_ASSERT_WITH_CODE((0 == result),
1168 "Failed to retrieve SVI2 VDDC table from lookup table.", return result;);
1169 }
1170
1171 PP_ASSERT_WITH_CODE(
1172 (data->vddc_voltage_table.count <= (SMU72_MAX_LEVELS_VDDC)),
1173 "Too many voltage values for VDDC. Trimming to fit state table.",
1174 tonga_trim_voltage_table_to_fit_state_table(hwmgr,
1175 SMU72_MAX_LEVELS_VDDC, &(data->vddc_voltage_table));
1176 );
1177
1178 PP_ASSERT_WITH_CODE(
1179 (data->vddgfx_voltage_table.count <= (SMU72_MAX_LEVELS_VDDGFX)),
1180 "Too many voltage values for VDDGFX. Trimming to fit state table.",
1181 tonga_trim_voltage_table_to_fit_state_table(hwmgr,
1182 SMU72_MAX_LEVELS_VDDGFX, &(data->vddgfx_voltage_table));
1183 );
1184
1185 PP_ASSERT_WITH_CODE(
1186 (data->vddci_voltage_table.count <= (SMU72_MAX_LEVELS_VDDCI)),
1187 "Too many voltage values for VDDCI. Trimming to fit state table.",
1188 tonga_trim_voltage_table_to_fit_state_table(hwmgr,
1189 SMU72_MAX_LEVELS_VDDCI, &(data->vddci_voltage_table));
1190 );
1191
1192 PP_ASSERT_WITH_CODE(
1193 (data->mvdd_voltage_table.count <= (SMU72_MAX_LEVELS_MVDD)),
1194 "Too many voltage values for MVDD. Trimming to fit state table.",
1195 tonga_trim_voltage_table_to_fit_state_table(hwmgr,
1196 SMU72_MAX_LEVELS_MVDD, &(data->mvdd_voltage_table));
1197 );
1198
1199 return 0;
1200}
1201
1202/**
1203 * Vddc table preparation for SMC.
1204 *
1205 * @param hwmgr the address of the hardware manager
1206 * @param table the SMC DPM table structure to be populated
1207 * @return always 0
1208 */
1209static int tonga_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
1210 SMU72_Discrete_DpmTable *table)
1211{
1212 unsigned int count;
1213 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
1214
1215 if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
1216 table->VddcLevelCount = data->vddc_voltage_table.count;
1217 for (count = 0; count < table->VddcLevelCount; count++) {
1218 table->VddcTable[count] =
1219 PP_HOST_TO_SMC_US(data->vddc_voltage_table.entries[count].value * VOLTAGE_SCALE);
1220 }
1221 CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount);
1222 }
1223 return 0;
1224}
1225
1226/**
1227 * VddGfx table preparation for SMC.
1228 *
1229 * @param hwmgr the address of the hardware manager
1230 * @param table the SMC DPM table structure to be populated
1231 * @return always 0
1232 */
1233static int tonga_populate_smc_vdd_gfx_table(struct pp_hwmgr *hwmgr,
1234 SMU72_Discrete_DpmTable *table)
1235{
1236 unsigned int count;
1237 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
1238
1239 if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
1240 table->VddGfxLevelCount = data->vddgfx_voltage_table.count;
1241 for (count = 0; count < data->vddgfx_voltage_table.count; count++) {
1242 table->VddGfxTable[count] =
1243 PP_HOST_TO_SMC_US(data->vddgfx_voltage_table.entries[count].value * VOLTAGE_SCALE);
1244 }
1245 CONVERT_FROM_HOST_TO_SMC_UL(table->VddGfxLevelCount);
1246 }
1247 return 0;
1248}
1249
1250/**
1251 * Vddci table preparation for SMC.
1252 *
1253 * @param *hwmgr The address of the hardware manager.
1254 * @param *table The SMC DPM table structure to be populated.
1255 * @return 0
1256 */
1257static int tonga_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
1258 SMU72_Discrete_DpmTable *table)
1259{
1260 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
1261 uint32_t count;
1262
1263 table->VddciLevelCount = data->vddci_voltage_table.count;
1264 for (count = 0; count < table->VddciLevelCount; count++) {
1265 if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_ci_control) {
1266 table->VddciTable[count] =
1267 PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE);
1268 } else if (TONGA_VOLTAGE_CONTROL_BY_GPIO == data->vdd_ci_control) {
1269 table->SmioTable1.Pattern[count].Voltage =
1270 PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE);
1271 /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level. */
1272 table->SmioTable1.Pattern[count].Smio =
1273 (uint8_t) count;
1274 table->Smio[count] |=
1275 data->vddci_voltage_table.entries[count].smio_low;
1276 table->VddciTable[count] =
1277 PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE);
1278 }
1279 }
1280
1281 table->SmioMask1 = data->vddci_voltage_table.mask_low;
1282 CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount);
1283
1284 return 0;
1285}
1286
1287/**
1288 * Mvdd table preparation for SMC.
1289 *
1290 * @param *hwmgr The address of the hardware manager.
1291 * @param *table The SMC DPM table structure to be populated.
1292 * @return 0
1293 */
1294static int tonga_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
1295 SMU72_Discrete_DpmTable *table)
1296{
1297 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
1298 uint32_t count;
1299
1300 if (TONGA_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
1301 table->MvddLevelCount = data->mvdd_voltage_table.count;
1302 for (count = 0; count < table->MvddLevelCount; count++) {
1303 table->SmioTable2.Pattern[count].Voltage =
1304 PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
1305 /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
1306 table->SmioTable2.Pattern[count].Smio =
1307 (uint8_t) count;
1308 table->Smio[count] |=
1309 data->mvdd_voltage_table.entries[count].smio_low;
1310 }
1311 table->SmioMask2 = data->mvdd_voltage_table.mask_low;
1312
1313 CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
1314 }
1315
1316 return 0;
1317}
1318
1319/**
1320 * Preparation of vddc and vddgfx CAC tables for SMC.
1321 *
1322 * @param hwmgr the address of the hardware manager
1323 * @param table the SMC DPM table structure to be populated
1324 * @return always 0
1325 */
1326static int tonga_populate_cac_tables(struct pp_hwmgr *hwmgr,
1327 SMU72_Discrete_DpmTable *table)
1328{
1329 uint32_t count;
1330 uint8_t index;
1331 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
1332 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
1333 struct phm_ppt_v1_voltage_lookup_table *vddgfx_lookup_table = pptable_info->vddgfx_lookup_table;
1334 struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table = pptable_info->vddc_lookup_table;
1335
1336 /* pTables is already swapped, so in order to use the value from it, we need to swap it back. */
1337 uint32_t vddcLevelCount = PP_SMC_TO_HOST_UL(table->VddcLevelCount);
1338 uint32_t vddgfxLevelCount = PP_SMC_TO_HOST_UL(table->VddGfxLevelCount);
1339
1340 for (count = 0; count < vddcLevelCount; count++) {
1341 /* We are populating vddc CAC data to BapmVddc table in split and merged mode */
1342 index = tonga_get_voltage_index(vddc_lookup_table,
1343 data->vddc_voltage_table.entries[count].value);
1344 table->BapmVddcVidLoSidd[count] =
1345 convert_to_vid(vddc_lookup_table->entries[index].us_cac_low);
1346 table->BapmVddcVidHiSidd[count] =
1347 convert_to_vid(vddc_lookup_table->entries[index].us_cac_mid);
1348 table->BapmVddcVidHiSidd2[count] =
1349 convert_to_vid(vddc_lookup_table->entries[index].us_cac_high);
1350 }
1351
1352 if ((data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2)) {
1353 /* We are populating vddgfx CAC data to BapmVddgfx table in split mode */
1354 for (count = 0; count < vddgfxLevelCount; count++) {
1355 index = tonga_get_voltage_index(vddgfx_lookup_table,
1356 data->vddgfx_voltage_table.entries[count].value);
1357 table->BapmVddGfxVidLoSidd[count] =
1358 convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_low);
1359 table->BapmVddGfxVidHiSidd[count] =
1360 convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_mid);
1361 table->BapmVddGfxVidHiSidd2[count] =
1362 convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_high);
1363 }
1364 } else {
1365 for (count = 0; count < vddcLevelCount; count++) {
1366 index = tonga_get_voltage_index(vddc_lookup_table,
1367 data->vddc_voltage_table.entries[count].value);
1368 table->BapmVddGfxVidLoSidd[count] =
1369 convert_to_vid(vddc_lookup_table->entries[index].us_cac_low);
1370 table->BapmVddGfxVidHiSidd[count] =
1371 convert_to_vid(vddc_lookup_table->entries[index].us_cac_mid);
1372 table->BapmVddGfxVidHiSidd2[count] =
1373 convert_to_vid(vddc_lookup_table->entries[index].us_cac_high);
1374 }
1375 }
1376
1377 return 0;
1378}
1379
1380
1381/**
1382 * Preparation of voltage tables for SMC.
1383 *
1384 * @param hwmgr the address of the hardware manager
1385 * @param table the SMC DPM table structure to be populated
1386 * @return always 0
1387 */
1388
1389int tonga_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
1390 SMU72_Discrete_DpmTable *table)
1391{
1392 int result;
1393
1394 result = tonga_populate_smc_vddc_table(hwmgr, table);
1395 PP_ASSERT_WITH_CODE(0 == result,
1396 "can not populate VDDC voltage table to SMC", return -1);
1397
1398 result = tonga_populate_smc_vdd_ci_table(hwmgr, table);
1399 PP_ASSERT_WITH_CODE(0 == result,
1400 "can not populate VDDCI voltage table to SMC", return -1);
1401
1402 result = tonga_populate_smc_vdd_gfx_table(hwmgr, table);
1403 PP_ASSERT_WITH_CODE(0 == result,
1404 "can not populate VDDGFX voltage table to SMC", return -1);
1405
1406 result = tonga_populate_smc_mvdd_table(hwmgr, table);
1407 PP_ASSERT_WITH_CODE(0 == result,
1408 "can not populate MVDD voltage table to SMC", return -1);
1409
1410 result = tonga_populate_cac_tables(hwmgr, table);
1411 PP_ASSERT_WITH_CODE(0 == result,
1412 "can not populate CAC voltage tables to SMC", return -1);
1413
1414 return 0;
1415}
1416
1417/**
1418 * Populates the SMC VRConfig field in DPM table.
1419 *
1420 * @param hwmgr the address of the hardware manager
1421 * @param table the SMC DPM table structure to be populated
1422 * @return always 0
1423 */
1424static int tonga_populate_vr_config(struct pp_hwmgr *hwmgr,
1425 SMU72_Discrete_DpmTable *table)
1426{
1427 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
1428 uint16_t config;
1429
1430 if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
1431 /* Splitted mode */
1432 config = VR_SVI2_PLANE_1;
1433 table->VRConfig |= (config<<VRCONF_VDDGFX_SHIFT);
1434
1435 if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
1436 config = VR_SVI2_PLANE_2;
1437 table->VRConfig |= config;
1438 } else {
1439 printk(KERN_ERR "[ powerplay ] VDDC and VDDGFX should be both on SVI2 control in splitted mode! \n");
1440 }
1441 } else {
1442 /* Merged mode */
1443 config = VR_MERGED_WITH_VDDC;
1444 table->VRConfig |= (config<<VRCONF_VDDGFX_SHIFT);
1445
1446 /* Set Vddc Voltage Controller */
1447 if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
1448 config = VR_SVI2_PLANE_1;
1449 table->VRConfig |= config;
1450 } else {
1451 printk(KERN_ERR "[ powerplay ] VDDC should be on SVI2 control in merged mode! \n");
1452 }
1453 }
1454
1455 /* Set Vddci Voltage Controller */
1456 if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_ci_control) {
1457 config = VR_SVI2_PLANE_2; /* only in merged mode */
1458 table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT);
1459 } else if (TONGA_VOLTAGE_CONTROL_BY_GPIO == data->vdd_ci_control) {
1460 config = VR_SMIO_PATTERN_1;
1461 table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT);
1462 }
1463
1464 /* Set Mvdd Voltage Controller */
1465 if (TONGA_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
1466 config = VR_SMIO_PATTERN_2;
1467 table->VRConfig |= (config<<VRCONF_MVDD_SHIFT);
1468 }
1469
1470 return 0;
1471}
1472
1473static int tonga_get_dependecy_volt_by_clk(struct pp_hwmgr *hwmgr,
1474 phm_ppt_v1_clock_voltage_dependency_table *allowed_clock_voltage_table,
1475 uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd)
1476{
1477 uint32_t i = 0;
1478 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
1479 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
1480
1481 /* clock - voltage dependency table is empty table */
1482 if (allowed_clock_voltage_table->count == 0)
1483 return -1;
1484
1485 for (i = 0; i < allowed_clock_voltage_table->count; i++) {
1486 /* find first sclk bigger than request */
1487 if (allowed_clock_voltage_table->entries[i].clk >= clock) {
1488 voltage->VddGfx = tonga_get_voltage_index(pptable_info->vddgfx_lookup_table,
1489 allowed_clock_voltage_table->entries[i].vddgfx);
1490
1491 voltage->Vddc = tonga_get_voltage_index(pptable_info->vddc_lookup_table,
1492 allowed_clock_voltage_table->entries[i].vddc);
1493
1494 if (allowed_clock_voltage_table->entries[i].vddci) {
1495 voltage->Vddci = tonga_get_voltage_id(&data->vddci_voltage_table,
1496 allowed_clock_voltage_table->entries[i].vddci);
1497 } else {
1498 voltage->Vddci = tonga_get_voltage_id(&data->vddci_voltage_table,
1499 allowed_clock_voltage_table->entries[i].vddc - data->vddc_vddci_delta);
1500 }
1501
1502 if (allowed_clock_voltage_table->entries[i].mvdd) {
1503 *mvdd = (uint32_t) allowed_clock_voltage_table->entries[i].mvdd;
1504 }
1505
1506 voltage->Phases = 1;
1507 return 0;
1508 }
1509 }
1510
1511 /* sclk is bigger than max sclk in the dependence table */
1512 voltage->VddGfx = tonga_get_voltage_index(pptable_info->vddgfx_lookup_table,
1513 allowed_clock_voltage_table->entries[i-1].vddgfx);
1514 voltage->Vddc = tonga_get_voltage_index(pptable_info->vddc_lookup_table,
1515 allowed_clock_voltage_table->entries[i-1].vddc);
1516
1517 if (allowed_clock_voltage_table->entries[i-1].vddci) {
1518 voltage->Vddci = tonga_get_voltage_id(&data->vddci_voltage_table,
1519 allowed_clock_voltage_table->entries[i-1].vddci);
1520 }
1521 if (allowed_clock_voltage_table->entries[i-1].mvdd) {
1522 *mvdd = (uint32_t) allowed_clock_voltage_table->entries[i-1].mvdd;
1523 }
1524
1525 return 0;
1526}
1527
1528/**
1529 * Call SMC to reset S0/S1 to S1 and Reset SMIO to initial value
1530 *
1531 * @param hwmgr the address of the powerplay hardware manager.
1532 * @return always 0
1533 */
1534int tonga_reset_to_default(struct pp_hwmgr *hwmgr)
1535{
1536 return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ResetToDefaults) == 0) ? 0 : 1;
1537}
1538
1539int tonga_populate_memory_timing_parameters(
1540 struct pp_hwmgr *hwmgr,
1541 uint32_t engine_clock,
1542 uint32_t memory_clock,
1543 struct SMU72_Discrete_MCArbDramTimingTableEntry *arb_regs
1544 )
1545{
1546 uint32_t dramTiming;
1547 uint32_t dramTiming2;
1548 uint32_t burstTime;
1549 int result;
1550
1551 result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
1552 engine_clock, memory_clock);
1553
1554 PP_ASSERT_WITH_CODE(result == 0,
1555 "Error calling VBIOS to set DRAM_TIMING.", return result);
1556
1557 dramTiming = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
1558 dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
1559 burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
1560
1561 arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dramTiming);
1562 arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2);
1563 arb_regs->McArbBurstTime = (uint8_t)burstTime;
1564
1565 return 0;
1566}
1567
1568/**
1569 * Setup parameters for the MC ARB.
1570 *
1571 * @param hwmgr the address of the powerplay hardware manager.
1572 * @return always 0
1573 * This function is to be called from the SetPowerState table.
1574 */
1575int tonga_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
1576{
1577 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
1578 int result = 0;
1579 SMU72_Discrete_MCArbDramTimingTable arb_regs;
1580 uint32_t i, j;
1581
1582 memset(&arb_regs, 0x00, sizeof(SMU72_Discrete_MCArbDramTimingTable));
1583
1584 for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
1585 for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
1586 result = tonga_populate_memory_timing_parameters
1587 (hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value,
1588 data->dpm_table.mclk_table.dpm_levels[j].value,
1589 &arb_regs.entries[i][j]);
1590
1591 if (0 != result) {
1592 break;
1593 }
1594 }
1595 }
1596
1597 if (0 == result) {
1598 result = tonga_copy_bytes_to_smc(
1599 hwmgr->smumgr,
1600 data->arb_table_start,
1601 (uint8_t *)&arb_regs,
1602 sizeof(SMU72_Discrete_MCArbDramTimingTable),
1603 data->sram_end
1604 );
1605 }
1606
1607 return result;
1608}
1609
1610static int tonga_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU72_Discrete_DpmTable *table)
1611{
1612 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
1613 struct tonga_dpm_table *dpm_table = &data->dpm_table;
1614 uint32_t i;
1615
1616 /* Index (dpm_table->pcie_speed_table.count) is reserved for PCIE boot level. */
1617 for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
1618 table->LinkLevel[i].PcieGenSpeed =
1619 (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
1620 table->LinkLevel[i].PcieLaneCount =
1621 (uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
1622 table->LinkLevel[i].EnabledForActivity =
1623 1;
1624 table->LinkLevel[i].SPC =
1625 (uint8_t)(data->pcie_spc_cap & 0xff);
1626 table->LinkLevel[i].DownThreshold =
1627 PP_HOST_TO_SMC_UL(5);
1628 table->LinkLevel[i].UpThreshold =
1629 PP_HOST_TO_SMC_UL(30);
1630 }
1631
1632 data->smc_state_table.LinkLevelCount =
1633 (uint8_t)dpm_table->pcie_speed_table.count;
1634 data->dpm_level_enable_mask.pcie_dpm_enable_mask =
1635 tonga_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
1636
1637 return 0;
1638}
1639
1640static int tonga_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
1641 SMU72_Discrete_DpmTable *table)
1642{
1643 int result = 0;
1644
1645 uint8_t count;
1646 pp_atomctrl_clock_dividers_vi dividers;
1647 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
1648 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
1649 phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table;
1650
1651 table->UvdLevelCount = (uint8_t) (mm_table->count);
1652 table->UvdBootLevel = 0;
1653
1654 for (count = 0; count < table->UvdLevelCount; count++) {
1655 table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
1656 table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
1657 table->UvdLevel[count].MinVoltage.Vddc =
1658 tonga_get_voltage_index(pptable_info->vddc_lookup_table,
1659 mm_table->entries[count].vddc);
1660 table->UvdLevel[count].MinVoltage.VddGfx =
1661 (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) ?
1662 tonga_get_voltage_index(pptable_info->vddgfx_lookup_table,
1663 mm_table->entries[count].vddgfx) : 0;
1664 table->UvdLevel[count].MinVoltage.Vddci =
1665 tonga_get_voltage_id(&data->vddci_voltage_table,
1666 mm_table->entries[count].vddc - data->vddc_vddci_delta);
1667 table->UvdLevel[count].MinVoltage.Phases = 1;
1668
1669 /* retrieve divider value for VBIOS */
1670 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1671 table->UvdLevel[count].VclkFrequency, &dividers);
1672 PP_ASSERT_WITH_CODE((0 == result),
1673 "can not find divide id for Vclk clock", return result);
1674
1675 table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
1676
1677 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1678 table->UvdLevel[count].DclkFrequency, &dividers);
1679 PP_ASSERT_WITH_CODE((0 == result),
1680 "can not find divide id for Dclk clock", return result);
1681
1682 table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
1683
1684 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
1685 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
1686 //CONVERT_FROM_HOST_TO_SMC_UL((uint32_t)table->UvdLevel[count].MinVoltage);
1687 }
1688
1689 return result;
1690
1691}
1692
1693static int tonga_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
1694 SMU72_Discrete_DpmTable *table)
1695{
1696 int result = 0;
1697
1698 uint8_t count;
1699 pp_atomctrl_clock_dividers_vi dividers;
1700 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
1701 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
1702 phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table;
1703
1704 table->VceLevelCount = (uint8_t) (mm_table->count);
1705 table->VceBootLevel = 0;
1706
1707 for (count = 0; count < table->VceLevelCount; count++) {
1708 table->VceLevel[count].Frequency =
1709 mm_table->entries[count].eclk;
1710 table->VceLevel[count].MinVoltage.Vddc =
1711 tonga_get_voltage_index(pptable_info->vddc_lookup_table,
1712 mm_table->entries[count].vddc);
1713 table->VceLevel[count].MinVoltage.VddGfx =
1714 (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) ?
1715 tonga_get_voltage_index(pptable_info->vddgfx_lookup_table,
1716 mm_table->entries[count].vddgfx) : 0;
1717 table->VceLevel[count].MinVoltage.Vddci =
1718 tonga_get_voltage_id(&data->vddci_voltage_table,
1719 mm_table->entries[count].vddc - data->vddc_vddci_delta);
1720 table->VceLevel[count].MinVoltage.Phases = 1;
1721
1722 /* retrieve divider value for VBIOS */
1723 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1724 table->VceLevel[count].Frequency, &dividers);
1725 PP_ASSERT_WITH_CODE((0 == result),
1726 "can not find divide id for VCE engine clock", return result);
1727
1728 table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1729
1730 CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
1731 }
1732
1733 return result;
1734}
1735
1736static int tonga_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
1737 SMU72_Discrete_DpmTable *table)
1738{
1739 int result = 0;
1740 uint8_t count;
1741 pp_atomctrl_clock_dividers_vi dividers;
1742 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
1743 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
1744 phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table;
1745
1746 table->AcpLevelCount = (uint8_t) (mm_table->count);
1747 table->AcpBootLevel = 0;
1748
1749 for (count = 0; count < table->AcpLevelCount; count++) {
1750 table->AcpLevel[count].Frequency =
1751 pptable_info->mm_dep_table->entries[count].aclk;
1752 table->AcpLevel[count].MinVoltage.Vddc =
1753 tonga_get_voltage_index(pptable_info->vddc_lookup_table,
1754 mm_table->entries[count].vddc);
1755 table->AcpLevel[count].MinVoltage.VddGfx =
1756 (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) ?
1757 tonga_get_voltage_index(pptable_info->vddgfx_lookup_table,
1758 mm_table->entries[count].vddgfx) : 0;
1759 table->AcpLevel[count].MinVoltage.Vddci =
1760 tonga_get_voltage_id(&data->vddci_voltage_table,
1761 mm_table->entries[count].vddc - data->vddc_vddci_delta);
1762 table->AcpLevel[count].MinVoltage.Phases = 1;
1763
1764 /* retrieve divider value for VBIOS */
1765 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1766 table->AcpLevel[count].Frequency, &dividers);
1767 PP_ASSERT_WITH_CODE((0 == result),
1768 "can not find divide id for engine clock", return result);
1769
1770 table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1771
1772 CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency);
1773 }
1774
1775 return result;
1776}
1777
1778static int tonga_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
1779 SMU72_Discrete_DpmTable *table)
1780{
1781 int result = 0;
1782 uint8_t count;
1783 pp_atomctrl_clock_dividers_vi dividers;
1784 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
1785 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
1786 phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table;
1787
1788 table->SamuBootLevel = 0;
1789 table->SamuLevelCount = (uint8_t) (mm_table->count);
1790
1791 for (count = 0; count < table->SamuLevelCount; count++) {
1792 /* not sure whether we need evclk or not */
1793 table->SamuLevel[count].Frequency =
1794 pptable_info->mm_dep_table->entries[count].samclock;
1795 table->SamuLevel[count].MinVoltage.Vddc =
1796 tonga_get_voltage_index(pptable_info->vddc_lookup_table,
1797 mm_table->entries[count].vddc);
1798 table->SamuLevel[count].MinVoltage.VddGfx =
1799 (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) ?
1800 tonga_get_voltage_index(pptable_info->vddgfx_lookup_table,
1801 mm_table->entries[count].vddgfx) : 0;
1802 table->SamuLevel[count].MinVoltage.Vddci =
1803 tonga_get_voltage_id(&data->vddci_voltage_table,
1804 mm_table->entries[count].vddc - data->vddc_vddci_delta);
1805 table->SamuLevel[count].MinVoltage.Phases = 1;
1806
1807 /* retrieve divider value for VBIOS */
1808 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1809 table->SamuLevel[count].Frequency, &dividers);
1810 PP_ASSERT_WITH_CODE((0 == result),
1811 "can not find divide id for samu clock", return result);
1812
1813 table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1814
1815 CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
1816 }
1817
1818 return result;
1819}
1820
1821/**
1822 * Populates the SMC MCLK structure using the provided memory clock
1823 *
1824 * @param hwmgr the address of the hardware manager
1825 * @param memory_clock the memory clock to use to populate the structure
1826 * @param sclk the SMC SCLK structure to be populated
1827 */
1828static int tonga_calculate_mclk_params(
1829 struct pp_hwmgr *hwmgr,
1830 uint32_t memory_clock,
1831 SMU72_Discrete_MemoryLevel *mclk,
1832 bool strobe_mode,
1833 bool dllStateOn
1834 )
1835{
1836 const tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
1837 uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
1838 uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
1839 uint32_t mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL;
1840 uint32_t mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL;
1841 uint32_t mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL;
1842 uint32_t mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1;
1843 uint32_t mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2;
1844 uint32_t mpll_ss1 = data->clock_registers.vMPLL_SS1;
1845 uint32_t mpll_ss2 = data->clock_registers.vMPLL_SS2;
1846
1847 pp_atomctrl_memory_clock_param mpll_param;
1848 int result;
1849
1850 result = atomctrl_get_memory_pll_dividers_si(hwmgr,
1851 memory_clock, &mpll_param, strobe_mode);
1852 PP_ASSERT_WITH_CODE(0 == result,
1853 "Error retrieving Memory Clock Parameters from VBIOS.", return result);
1854
1855 /* MPLL_FUNC_CNTL setup*/
1856 mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL, mpll_param.bw_ctrl);
1857
1858 /* MPLL_FUNC_CNTL_1 setup*/
1859 mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
1860 MPLL_FUNC_CNTL_1, CLKF, mpll_param.mpll_fb_divider.cl_kf);
1861 mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
1862 MPLL_FUNC_CNTL_1, CLKFRAC, mpll_param.mpll_fb_divider.clk_frac);
1863 mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
1864 MPLL_FUNC_CNTL_1, VCO_MODE, mpll_param.vco_mode);
1865
1866 /* MPLL_AD_FUNC_CNTL setup*/
1867 mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl,
1868 MPLL_AD_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
1869
1870 if (data->is_memory_GDDR5) {
1871 /* MPLL_DQ_FUNC_CNTL setup*/
1872 mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
1873 MPLL_DQ_FUNC_CNTL, YCLK_SEL, mpll_param.yclk_sel);
1874 mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
1875 MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
1876 }
1877
1878 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1879 PHM_PlatformCaps_MemorySpreadSpectrumSupport)) {
1880 /*
1881 ************************************
1882 Fref = Reference Frequency
1883 NF = Feedback divider ratio
1884 NR = Reference divider ratio
1885 Fnom = Nominal VCO output frequency = Fref * NF / NR
1886 Fs = Spreading Rate
1887 D = Percentage down-spread / 2
1888 Fint = Reference input frequency to PFD = Fref / NR
1889 NS = Spreading rate divider ratio = int(Fint / (2 * Fs))
1890 CLKS = NS - 1 = ISS_STEP_NUM[11:0]
1891 NV = D * Fs / Fnom * 4 * ((Fnom/Fref * NR) ^ 2)
1892 CLKV = 65536 * NV = ISS_STEP_SIZE[25:0]
1893 *************************************
1894 */
1895 pp_atomctrl_internal_ss_info ss_info;
1896 uint32_t freq_nom;
1897 uint32_t tmp;
1898 uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr);
1899
1900 /* for GDDR5 for all modes and DDR3 */
1901 if (1 == mpll_param.qdr)
1902 freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider);
1903 else
1904 freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider);
1905
1906 /* tmp = (freq_nom / reference_clock * reference_divider) ^ 2 Note: S.I. reference_divider = 1*/
1907 tmp = (freq_nom / reference_clock);
1908 tmp = tmp * tmp;
1909
1910 if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) {
1911 /* ss_info.speed_spectrum_percentage -- in unit of 0.01% */
1912 /* ss.Info.speed_spectrum_rate -- in unit of khz */
1913 /* CLKS = reference_clock / (2 * speed_spectrum_rate * reference_divider) * 10 */
1914 /* = reference_clock * 5 / speed_spectrum_rate */
1915 uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate;
1916
1917 /* CLKV = 65536 * speed_spectrum_percentage / 2 * spreadSpecrumRate / freq_nom * 4 / 100000 * ((freq_nom / reference_clock) ^ 2) */
1918 /* = 131 * speed_spectrum_percentage * speed_spectrum_rate / 100 * ((freq_nom / reference_clock) ^ 2) / freq_nom */
1919 uint32_t clkv =
1920 (uint32_t)((((131 * ss_info.speed_spectrum_percentage *
1921 ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom);
1922
1923 mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv);
1924 mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks);
1925 }
1926 }
1927
1928 /* MCLK_PWRMGT_CNTL setup */
1929 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1930 MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed);
1931 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1932 MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn);
1933 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1934 MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn);
1935
1936
1937 /* Save the result data to outpupt memory level structure */
1938 mclk->MclkFrequency = memory_clock;
1939 mclk->MpllFuncCntl = mpll_func_cntl;
1940 mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
1941 mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
1942 mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
1943 mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
1944 mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
1945 mclk->DllCntl = dll_cntl;
1946 mclk->MpllSs1 = mpll_ss1;
1947 mclk->MpllSs2 = mpll_ss2;
1948
1949 return 0;
1950}
1951
1952static uint8_t tonga_get_mclk_frequency_ratio(uint32_t memory_clock,
1953 bool strobe_mode)
1954{
1955 uint8_t mc_para_index;
1956
1957 if (strobe_mode) {
1958 if (memory_clock < 12500) {
1959 mc_para_index = 0x00;
1960 } else if (memory_clock > 47500) {
1961 mc_para_index = 0x0f;
1962 } else {
1963 mc_para_index = (uint8_t)((memory_clock - 10000) / 2500);
1964 }
1965 } else {
1966 if (memory_clock < 65000) {
1967 mc_para_index = 0x00;
1968 } else if (memory_clock > 135000) {
1969 mc_para_index = 0x0f;
1970 } else {
1971 mc_para_index = (uint8_t)((memory_clock - 60000) / 5000);
1972 }
1973 }
1974
1975 return mc_para_index;
1976}
1977
1978static uint8_t tonga_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock)
1979{
1980 uint8_t mc_para_index;
1981
1982 if (memory_clock < 10000) {
1983 mc_para_index = 0;
1984 } else if (memory_clock >= 80000) {
1985 mc_para_index = 0x0f;
1986 } else {
1987 mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1);
1988 }
1989
1990 return mc_para_index;
1991}
1992
1993static int tonga_populate_single_memory_level(
1994 struct pp_hwmgr *hwmgr,
1995 uint32_t memory_clock,
1996 SMU72_Discrete_MemoryLevel *memory_level
1997 )
1998{
1999 uint32_t minMvdd = 0;
2000 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
2001 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
2002 int result = 0;
2003 bool dllStateOn;
2004 struct cgs_display_info info = {0};
2005
2006
2007 if (NULL != pptable_info->vdd_dep_on_mclk) {
2008 result = tonga_get_dependecy_volt_by_clk(hwmgr,
2009 pptable_info->vdd_dep_on_mclk, memory_clock, &memory_level->MinVoltage, &minMvdd);
2010 PP_ASSERT_WITH_CODE((0 == result),
2011 "can not find MinVddc voltage value from memory VDDC voltage dependency table", return result);
2012 }
2013
2014 if (data->mvdd_control == TONGA_VOLTAGE_CONTROL_NONE) {
2015 memory_level->MinMvdd = data->vbios_boot_state.mvdd_bootup_value;
2016 } else {
2017 memory_level->MinMvdd = minMvdd;
2018 }
2019 memory_level->EnabledForThrottle = 1;
2020 memory_level->EnabledForActivity = 0;
2021 memory_level->UpHyst = 0;
2022 memory_level->DownHyst = 100;
2023 memory_level->VoltageDownHyst = 0;
2024
2025 /* Indicates maximum activity level for this performance level.*/
2026 memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
2027 memory_level->StutterEnable = 0;
2028 memory_level->StrobeEnable = 0;
2029 memory_level->EdcReadEnable = 0;
2030 memory_level->EdcWriteEnable = 0;
2031 memory_level->RttEnable = 0;
2032
2033 /* default set to low watermark. Highest level will be set to high later.*/
2034 memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2035
2036 cgs_get_active_displays_info(hwmgr->device, &info);
2037 data->display_timing.num_existing_displays = info.display_count;
2038
2039 if ((data->mclk_stutter_mode_threshold != 0) &&
2040 (memory_clock <= data->mclk_stutter_mode_threshold) &&
2041 (!data->is_uvd_enabled)
2042 && (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL, STUTTER_ENABLE) & 0x1)
2043 && (data->display_timing.num_existing_displays <= 2)
2044 && (data->display_timing.num_existing_displays != 0))
2045 memory_level->StutterEnable = 1;
2046
2047 /* decide strobe mode*/
2048 memory_level->StrobeEnable = (data->mclk_strobe_mode_threshold != 0) &&
2049 (memory_clock <= data->mclk_strobe_mode_threshold);
2050
2051 /* decide EDC mode and memory clock ratio*/
2052 if (data->is_memory_GDDR5) {
2053 memory_level->StrobeRatio = tonga_get_mclk_frequency_ratio(memory_clock,
2054 memory_level->StrobeEnable);
2055
2056 if ((data->mclk_edc_enable_threshold != 0) &&
2057 (memory_clock > data->mclk_edc_enable_threshold)) {
2058 memory_level->EdcReadEnable = 1;
2059 }
2060
2061 if ((data->mclk_edc_wr_enable_threshold != 0) &&
2062 (memory_clock > data->mclk_edc_wr_enable_threshold)) {
2063 memory_level->EdcWriteEnable = 1;
2064 }
2065
2066 if (memory_level->StrobeEnable) {
2067 if (tonga_get_mclk_frequency_ratio(memory_clock, 1) >=
2068 ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf)) {
2069 dllStateOn = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
2070 } else {
2071 dllStateOn = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0;
2072 }
2073
2074 } else {
2075 dllStateOn = data->dll_defaule_on;
2076 }
2077 } else {
2078 memory_level->StrobeRatio =
2079 tonga_get_ddr3_mclk_frequency_ratio(memory_clock);
2080 dllStateOn = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
2081 }
2082
2083 result = tonga_calculate_mclk_params(hwmgr,
2084 memory_clock, memory_level, memory_level->StrobeEnable, dllStateOn);
2085
2086 if (0 == result) {
2087 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinMvdd);
2088 /* MCLK frequency in units of 10KHz*/
2089 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency);
2090 /* Indicates maximum activity level for this performance level.*/
2091 CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel);
2092 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl);
2093 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1);
2094 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2);
2095 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl);
2096 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl);
2097 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl);
2098 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl);
2099 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1);
2100 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2);
2101 }
2102
2103 return result;
2104}
2105
2106/**
2107 * Populates the SMC MVDD structure using the provided memory clock.
2108 *
2109 * @param hwmgr the address of the hardware manager
2110 * @param mclk the MCLK value to be used in the decision if MVDD should be high or low.
2111 * @param voltage the SMC VOLTAGE structure to be populated
2112 */
2113int tonga_populate_mvdd_value(struct pp_hwmgr *hwmgr, uint32_t mclk, SMIO_Pattern *smio_pattern)
2114{
2115 const tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
2116 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
2117 uint32_t i = 0;
2118
2119 if (TONGA_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
2120 /* find mvdd value which clock is more than request */
2121 for (i = 0; i < pptable_info->vdd_dep_on_mclk->count; i++) {
2122 if (mclk <= pptable_info->vdd_dep_on_mclk->entries[i].clk) {
2123 /* Always round to higher voltage. */
2124 smio_pattern->Voltage = data->mvdd_voltage_table.entries[i].value;
2125 break;
2126 }
2127 }
2128
2129 PP_ASSERT_WITH_CODE(i < pptable_info->vdd_dep_on_mclk->count,
2130 "MVDD Voltage is outside the supported range.", return -1);
2131
2132 } else {
2133 return -1;
2134 }
2135
2136 return 0;
2137}
2138
2139
2140static int tonga_populate_smv_acpi_level(struct pp_hwmgr *hwmgr,
2141 SMU72_Discrete_DpmTable *table)
2142{
2143 int result = 0;
2144 const tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
2145 pp_atomctrl_clock_dividers_vi dividers;
2146 SMIO_Pattern voltage_level;
2147 uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
2148 uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
2149 uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
2150 uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
2151
2152 /* The ACPI state should not do DPM on DC (or ever).*/
2153 table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
2154
2155 table->ACPILevel.MinVoltage = data->smc_state_table.GraphicsLevel[0].MinVoltage;
2156
2157 /* assign zero for now*/
2158 table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr);
2159
2160 /* get the engine clock dividers for this clock value*/
2161 result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
2162 table->ACPILevel.SclkFrequency, &dividers);
2163
2164 PP_ASSERT_WITH_CODE(result == 0,
2165 "Error retrieving Engine Clock dividers from VBIOS.", return result);
2166
2167 /* divider ID for required SCLK*/
2168 table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
2169 table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2170 table->ACPILevel.DeepSleepDivId = 0;
2171
2172 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
2173 CG_SPLL_FUNC_CNTL, SPLL_PWRON, 0);
2174 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
2175 CG_SPLL_FUNC_CNTL, SPLL_RESET, 1);
2176 spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2,
2177 CG_SPLL_FUNC_CNTL_2, SCLK_MUX_SEL, 4);
2178
2179 table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
2180 table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
2181 table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
2182 table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
2183 table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
2184 table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
2185 table->ACPILevel.CcPwrDynRm = 0;
2186 table->ACPILevel.CcPwrDynRm1 = 0;
2187
2188
2189 /* For various features to be enabled/disabled while this level is active.*/
2190 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
2191 /* SCLK frequency in units of 10KHz*/
2192 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
2193 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
2194 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
2195 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
2196 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
2197 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
2198 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
2199 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
2200 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
2201
2202 /* table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;*/
2203 table->MemoryACPILevel.MinVoltage = data->smc_state_table.MemoryLevel[0].MinVoltage;
2204
2205 /* CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);*/
2206
2207 if (0 == tonga_populate_mvdd_value(hwmgr, 0, &voltage_level))
2208 table->MemoryACPILevel.MinMvdd =
2209 PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE);
2210 else
2211 table->MemoryACPILevel.MinMvdd = 0;
2212
2213 /* Force reset on DLL*/
2214 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
2215 MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1);
2216 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
2217 MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1);
2218
2219 /* Disable DLL in ACPIState*/
2220 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
2221 MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0);
2222 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
2223 MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0);
2224
2225 /* Enable DLL bypass signal*/
2226 dll_cntl = PHM_SET_FIELD(dll_cntl,
2227 DLL_CNTL, MRDCK0_BYPASS, 0);
2228 dll_cntl = PHM_SET_FIELD(dll_cntl,
2229 DLL_CNTL, MRDCK1_BYPASS, 0);
2230
2231 table->MemoryACPILevel.DllCntl =
2232 PP_HOST_TO_SMC_UL(dll_cntl);
2233 table->MemoryACPILevel.MclkPwrmgtCntl =
2234 PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl);
2235 table->MemoryACPILevel.MpllAdFuncCntl =
2236 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL);
2237 table->MemoryACPILevel.MpllDqFuncCntl =
2238 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL);
2239 table->MemoryACPILevel.MpllFuncCntl =
2240 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL);
2241 table->MemoryACPILevel.MpllFuncCntl_1 =
2242 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1);
2243 table->MemoryACPILevel.MpllFuncCntl_2 =
2244 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2);
2245 table->MemoryACPILevel.MpllSs1 =
2246 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1);
2247 table->MemoryACPILevel.MpllSs2 =
2248 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2);
2249
2250 table->MemoryACPILevel.EnabledForThrottle = 0;
2251 table->MemoryACPILevel.EnabledForActivity = 0;
2252 table->MemoryACPILevel.UpHyst = 0;
2253 table->MemoryACPILevel.DownHyst = 100;
2254 table->MemoryACPILevel.VoltageDownHyst = 0;
2255 /* Indicates maximum activity level for this performance level.*/
2256 table->MemoryACPILevel.ActivityLevel = PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
2257
2258 table->MemoryACPILevel.StutterEnable = 0;
2259 table->MemoryACPILevel.StrobeEnable = 0;
2260 table->MemoryACPILevel.EdcReadEnable = 0;
2261 table->MemoryACPILevel.EdcWriteEnable = 0;
2262 table->MemoryACPILevel.RttEnable = 0;
2263
2264 return result;
2265}
2266
2267static int tonga_find_boot_level(struct tonga_single_dpm_table *table, uint32_t value, uint32_t *boot_level)
2268{
2269 int result = 0;
2270 uint32_t i;
2271
2272 for (i = 0; i < table->count; i++) {
2273 if (value == table->dpm_levels[i].value) {
2274 *boot_level = i;
2275 result = 0;
2276 }
2277 }
2278 return result;
2279}
2280
2281static int tonga_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
2282 SMU72_Discrete_DpmTable *table)
2283{
2284 int result = 0;
2285 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
2286
2287 table->GraphicsBootLevel = 0; /* 0 == DPM[0] (low), etc. */
2288 table->MemoryBootLevel = 0; /* 0 == DPM[0] (low), etc. */
2289
2290 /* find boot level from dpm table*/
2291 result = tonga_find_boot_level(&(data->dpm_table.sclk_table),
2292 data->vbios_boot_state.sclk_bootup_value,
2293 (uint32_t *)&(data->smc_state_table.GraphicsBootLevel));
2294
2295 if (0 != result) {
2296 data->smc_state_table.GraphicsBootLevel = 0;
2297 printk(KERN_ERR "[ powerplay ] VBIOS did not find boot engine clock value \
2298 in dependency table. Using Graphics DPM level 0!");
2299 result = 0;
2300 }
2301
2302 result = tonga_find_boot_level(&(data->dpm_table.mclk_table),
2303 data->vbios_boot_state.mclk_bootup_value,
2304 (uint32_t *)&(data->smc_state_table.MemoryBootLevel));
2305
2306 if (0 != result) {
2307 data->smc_state_table.MemoryBootLevel = 0;
2308 printk(KERN_ERR "[ powerplay ] VBIOS did not find boot engine clock value \
2309 in dependency table. Using Memory DPM level 0!");
2310 result = 0;
2311 }
2312
2313 table->BootVoltage.Vddc =
2314 tonga_get_voltage_id(&(data->vddc_voltage_table),
2315 data->vbios_boot_state.vddc_bootup_value);
2316 table->BootVoltage.VddGfx =
2317 tonga_get_voltage_id(&(data->vddgfx_voltage_table),
2318 data->vbios_boot_state.vddgfx_bootup_value);
2319 table->BootVoltage.Vddci =
2320 tonga_get_voltage_id(&(data->vddci_voltage_table),
2321 data->vbios_boot_state.vddci_bootup_value);
2322 table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value;
2323
2324 CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
2325
2326 return result;
2327}
2328
2329
2330/**
2331 * Calculates the SCLK dividers using the provided engine clock
2332 *
2333 * @param hwmgr the address of the hardware manager
2334 * @param engine_clock the engine clock to use to populate the structure
2335 * @param sclk the SMC SCLK structure to be populated
2336 */
2337int tonga_calculate_sclk_params(struct pp_hwmgr *hwmgr,
2338 uint32_t engine_clock, SMU72_Discrete_GraphicsLevel *sclk)
2339{
2340 const tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
2341 pp_atomctrl_clock_dividers_vi dividers;
2342 uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
2343 uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
2344 uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
2345 uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
2346 uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
2347 uint32_t reference_clock;
2348 uint32_t reference_divider;
2349 uint32_t fbdiv;
2350 int result;
2351
2352 /* get the engine clock dividers for this clock value*/
2353 result = atomctrl_get_engine_pll_dividers_vi(hwmgr, engine_clock, &dividers);
2354
2355 PP_ASSERT_WITH_CODE(result == 0,
2356 "Error retrieving Engine Clock dividers from VBIOS.", return result);
2357
2358 /* To get FBDIV we need to multiply this by 16384 and divide it by Fref.*/
2359 reference_clock = atomctrl_get_reference_clock(hwmgr);
2360
2361 reference_divider = 1 + dividers.uc_pll_ref_div;
2362
2363 /* low 14 bits is fraction and high 12 bits is divider*/
2364 fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
2365
2366 /* SPLL_FUNC_CNTL setup*/
2367 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
2368 CG_SPLL_FUNC_CNTL, SPLL_REF_DIV, dividers.uc_pll_ref_div);
2369 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
2370 CG_SPLL_FUNC_CNTL, SPLL_PDIV_A, dividers.uc_pll_post_div);
2371
2372 /* SPLL_FUNC_CNTL_3 setup*/
2373 spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
2374 CG_SPLL_FUNC_CNTL_3, SPLL_FB_DIV, fbdiv);
2375
2376 /* set to use fractional accumulation*/
2377 spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
2378 CG_SPLL_FUNC_CNTL_3, SPLL_DITHEN, 1);
2379
2380 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2381 PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
2382 pp_atomctrl_internal_ss_info ss_info;
2383
2384 uint32_t vcoFreq = engine_clock * dividers.uc_pll_post_div;
2385 if (0 == atomctrl_get_engine_clock_spread_spectrum(hwmgr, vcoFreq, &ss_info)) {
2386 /*
2387 * ss_info.speed_spectrum_percentage -- in unit of 0.01%
2388 * ss_info.speed_spectrum_rate -- in unit of khz
2389 */
2390 /* clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2 */
2391 uint32_t clkS = reference_clock * 5 / (reference_divider * ss_info.speed_spectrum_rate);
2392
2393 /* clkv = 2 * D * fbdiv / NS */
2394 uint32_t clkV = 4 * ss_info.speed_spectrum_percentage * fbdiv / (clkS * 10000);
2395
2396 cg_spll_spread_spectrum =
2397 PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, CLKS, clkS);
2398 cg_spll_spread_spectrum =
2399 PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
2400 cg_spll_spread_spectrum_2 =
2401 PHM_SET_FIELD(cg_spll_spread_spectrum_2, CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clkV);
2402 }
2403 }
2404
2405 sclk->SclkFrequency = engine_clock;
2406 sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
2407 sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
2408 sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
2409 sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
2410 sclk->SclkDid = (uint8_t)dividers.pll_post_divider;
2411
2412 return 0;
2413}
2414
2415static uint8_t tonga_get_sleep_divider_id_from_clock(uint32_t engine_clock,
2416 uint32_t min_engine_clock_in_sr)
2417{
2418 uint32_t i, temp;
2419 uint32_t min = max(min_engine_clock_in_sr, (uint32_t)TONGA_MINIMUM_ENGINE_CLOCK);
2420
2421 PP_ASSERT_WITH_CODE((engine_clock >= min),
2422 "Engine clock can't satisfy stutter requirement!", return 0);
2423
2424 for (i = TONGA_MAX_DEEPSLEEP_DIVIDER_ID;; i--) {
2425 temp = engine_clock >> i;
2426
2427 if(temp >= min || i == 0)
2428 break;
2429 }
2430 return (uint8_t)i;
2431}
2432
2433/**
2434 * Populates single SMC SCLK structure using the provided engine clock
2435 *
2436 * @param hwmgr the address of the hardware manager
2437 * @param engine_clock the engine clock to use to populate the structure
2438 * @param sclk the SMC SCLK structure to be populated
2439 */
2440static int tonga_populate_single_graphic_level(struct pp_hwmgr *hwmgr, uint32_t engine_clock, uint16_t sclk_activity_level_threshold, SMU72_Discrete_GraphicsLevel *graphic_level)
2441{
2442 int result;
2443 uint32_t threshold;
2444 uint32_t mvdd;
2445 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
2446 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
2447
2448 result = tonga_calculate_sclk_params(hwmgr, engine_clock, graphic_level);
2449
2450
2451 /* populate graphics levels*/
2452 result = tonga_get_dependecy_volt_by_clk(hwmgr,
2453 pptable_info->vdd_dep_on_sclk, engine_clock,
2454 &graphic_level->MinVoltage, &mvdd);
2455 PP_ASSERT_WITH_CODE((0 == result),
2456 "can not find VDDC voltage value for VDDC \
2457 engine clock dependency table", return result);
2458
2459 /* SCLK frequency in units of 10KHz*/
2460 graphic_level->SclkFrequency = engine_clock;
2461
2462 /* Indicates maximum activity level for this performance level. 50% for now*/
2463 graphic_level->ActivityLevel = sclk_activity_level_threshold;
2464
2465 graphic_level->CcPwrDynRm = 0;
2466 graphic_level->CcPwrDynRm1 = 0;
2467 /* this level can be used if activity is high enough.*/
2468 graphic_level->EnabledForActivity = 0;
2469 /* this level can be used for throttling.*/
2470 graphic_level->EnabledForThrottle = 1;
2471 graphic_level->UpHyst = 0;
2472 graphic_level->DownHyst = 0;
2473 graphic_level->VoltageDownHyst = 0;
2474 graphic_level->PowerThrottle = 0;
2475
2476 threshold = engine_clock * data->fast_watermark_threshold / 100;
2477/*
2478 *get the DAL clock. do it in funture.
2479 PECI_GetMinClockSettings(hwmgr->peci, &minClocks);
2480 data->display_timing.min_clock_insr = minClocks.engineClockInSR;
2481*/
2482 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2483 PHM_PlatformCaps_SclkDeepSleep))
2484 graphic_level->DeepSleepDivId =
2485 tonga_get_sleep_divider_id_from_clock(engine_clock,
2486 data->display_timing.min_clock_insr);
2487
2488 /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/
2489 graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2490
2491 if (0 == result) {
2492 /* CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVoltage);*/
2493 /* CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVddcPhases);*/
2494 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SclkFrequency);
2495 CONVERT_FROM_HOST_TO_SMC_US(graphic_level->ActivityLevel);
2496 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl3);
2497 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl4);
2498 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum);
2499 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum2);
2500 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm);
2501 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm1);
2502 }
2503
2504 return result;
2505}
2506
2507/**
2508 * Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
2509 *
2510 * @param hwmgr the address of the hardware manager
2511 */
2512static int tonga_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
2513{
2514 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
2515 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
2516 struct tonga_dpm_table *dpm_table = &data->dpm_table;
2517 phm_ppt_v1_pcie_table *pcie_table = pptable_info->pcie_table;
2518 uint8_t pcie_entry_count = (uint8_t) data->dpm_table.pcie_speed_table.count;
2519 int result = 0;
2520 uint32_t level_array_adress = data->dpm_table_start +
2521 offsetof(SMU72_Discrete_DpmTable, GraphicsLevel);
2522 uint32_t level_array_size = sizeof(SMU72_Discrete_GraphicsLevel) *
2523 SMU72_MAX_LEVELS_GRAPHICS; /* 64 -> long; 32 -> int*/
2524 SMU72_Discrete_GraphicsLevel *levels = data->smc_state_table.GraphicsLevel;
2525 uint32_t i, maxEntry;
2526 uint8_t highest_pcie_level_enabled = 0, lowest_pcie_level_enabled = 0, mid_pcie_level_enabled = 0, count = 0;
2527 PECI_RegistryValue reg_value;
2528 memset(levels, 0x00, level_array_size);
2529
2530 for (i = 0; i < dpm_table->sclk_table.count; i++) {
2531 result = tonga_populate_single_graphic_level(hwmgr,
2532 dpm_table->sclk_table.dpm_levels[i].value,
2533 (uint16_t)data->activity_target[i],
2534 &(data->smc_state_table.GraphicsLevel[i]));
2535
2536 if (0 != result)
2537 return result;
2538
2539 /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
2540 if (i > 1)
2541 data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
2542
2543 if (0 == i) {
2544 reg_value = 0;
2545 if (reg_value != 0)
2546 data->smc_state_table.GraphicsLevel[0].UpHyst = (uint8_t)reg_value;
2547 }
2548
2549 if (1 == i) {
2550 reg_value = 0;
2551 if (reg_value != 0)
2552 data->smc_state_table.GraphicsLevel[1].UpHyst = (uint8_t)reg_value;
2553 }
2554 }
2555
2556 /* Only enable level 0 for now. */
2557 data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
2558
2559 /* set highest level watermark to high */
2560 if (dpm_table->sclk_table.count > 1)
2561 data->smc_state_table.GraphicsLevel[dpm_table->sclk_table.count-1].DisplayWatermark =
2562 PPSMC_DISPLAY_WATERMARK_HIGH;
2563
2564 data->smc_state_table.GraphicsDpmLevelCount =
2565 (uint8_t)dpm_table->sclk_table.count;
2566 data->dpm_level_enable_mask.sclk_dpm_enable_mask =
2567 tonga_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
2568
2569 if (pcie_table != NULL) {
2570 PP_ASSERT_WITH_CODE((pcie_entry_count >= 1),
2571 "There must be 1 or more PCIE levels defined in PPTable.", return -1);
2572 maxEntry = pcie_entry_count - 1; /* for indexing, we need to decrement by 1.*/
2573 for (i = 0; i < dpm_table->sclk_table.count; i++) {
2574 data->smc_state_table.GraphicsLevel[i].pcieDpmLevel =
2575 (uint8_t) ((i < maxEntry) ? i : maxEntry);
2576 }
2577 } else {
2578 if (0 == data->dpm_level_enable_mask.pcie_dpm_enable_mask)
2579 printk(KERN_ERR "[ powerplay ] Pcie Dpm Enablemask is 0!");
2580
2581 while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
2582 ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
2583 (1<<(highest_pcie_level_enabled+1))) != 0)) {
2584 highest_pcie_level_enabled++;
2585 }
2586
2587 while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
2588 ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
2589 (1<<lowest_pcie_level_enabled)) == 0)) {
2590 lowest_pcie_level_enabled++;
2591 }
2592
2593 while ((count < highest_pcie_level_enabled) &&
2594 ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
2595 (1<<(lowest_pcie_level_enabled+1+count))) == 0)) {
2596 count++;
2597 }
2598 mid_pcie_level_enabled = (lowest_pcie_level_enabled+1+count) < highest_pcie_level_enabled ?
2599 (lowest_pcie_level_enabled+1+count) : highest_pcie_level_enabled;
2600
2601
2602 /* set pcieDpmLevel to highest_pcie_level_enabled*/
2603 for (i = 2; i < dpm_table->sclk_table.count; i++) {
2604 data->smc_state_table.GraphicsLevel[i].pcieDpmLevel = highest_pcie_level_enabled;
2605 }
2606
2607 /* set pcieDpmLevel to lowest_pcie_level_enabled*/
2608 data->smc_state_table.GraphicsLevel[0].pcieDpmLevel = lowest_pcie_level_enabled;
2609
2610 /* set pcieDpmLevel to mid_pcie_level_enabled*/
2611 data->smc_state_table.GraphicsLevel[1].pcieDpmLevel = mid_pcie_level_enabled;
2612 }
2613 /* level count will send to smc once at init smc table and never change*/
2614 result = tonga_copy_bytes_to_smc(hwmgr->smumgr, level_array_adress, (uint8_t *)levels, (uint32_t)level_array_size, data->sram_end);
2615
2616 if (0 != result)
2617 return result;
2618
2619 return 0;
2620}
2621
2622/**
2623 * Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states
2624 *
2625 * @param hwmgr the address of the hardware manager
2626 */
2627
2628static int tonga_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
2629{
2630 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
2631 struct tonga_dpm_table *dpm_table = &data->dpm_table;
2632 int result;
2633 /* populate MCLK dpm table to SMU7 */
2634 uint32_t level_array_adress = data->dpm_table_start + offsetof(SMU72_Discrete_DpmTable, MemoryLevel);
2635 uint32_t level_array_size = sizeof(SMU72_Discrete_MemoryLevel) * SMU72_MAX_LEVELS_MEMORY;
2636 SMU72_Discrete_MemoryLevel *levels = data->smc_state_table.MemoryLevel;
2637 uint32_t i;
2638
2639 memset(levels, 0x00, level_array_size);
2640
2641 for (i = 0; i < dpm_table->mclk_table.count; i++) {
2642 PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
2643 "can not populate memory level as memory clock is zero", return -1);
2644 result = tonga_populate_single_memory_level(hwmgr, dpm_table->mclk_table.dpm_levels[i].value,
2645 &(data->smc_state_table.MemoryLevel[i]));
2646 if (0 != result) {
2647 return result;
2648 }
2649 }
2650
2651 /* Only enable level 0 for now.*/
2652 data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
2653
2654 /*
2655 * in order to prevent MC activity from stutter mode to push DPM up.
2656 * the UVD change complements this by putting the MCLK in a higher state
2657 * by default such that we are not effected by up threshold or and MCLK DPM latency.
2658 */
2659 data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F;
2660 CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.MemoryLevel[0].ActivityLevel);
2661
2662 data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count;
2663 data->dpm_level_enable_mask.mclk_dpm_enable_mask = tonga_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
2664 /* set highest level watermark to high*/
2665 data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
2666
2667 /* level count will send to smc once at init smc table and never change*/
2668 result = tonga_copy_bytes_to_smc(hwmgr->smumgr,
2669 level_array_adress, (uint8_t *)levels, (uint32_t)level_array_size, data->sram_end);
2670
2671 if (0 != result) {
2672 return result;
2673 }
2674
2675 return 0;
2676}
2677
2678struct TONGA_DLL_SPEED_SETTING {
2679 uint16_t Min; /* Minimum Data Rate*/
2680 uint16_t Max; /* Maximum Data Rate*/
2681 uint32_t dll_speed; /* The desired DLL_SPEED setting*/
2682};
2683
2684static int tonga_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
2685{
2686 return 0;
2687}
2688
2689/* ---------------------------------------- ULV related functions ----------------------------------------------------*/
2690
2691
2692static int tonga_reset_single_dpm_table(
2693 struct pp_hwmgr *hwmgr,
2694 struct tonga_single_dpm_table *dpm_table,
2695 uint32_t count)
2696{
2697 uint32_t i;
2698 if (!(count <= MAX_REGULAR_DPM_NUMBER))
2699 printk(KERN_ERR "[ powerplay ] Fatal error, can not set up single DPM \
2700 table entries to exceed max number! \n");
2701
2702 dpm_table->count = count;
2703 for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++) {
2704 dpm_table->dpm_levels[i].enabled = false;
2705 }
2706
2707 return 0;
2708}
2709
2710static void tonga_setup_pcie_table_entry(
2711 struct tonga_single_dpm_table *dpm_table,
2712 uint32_t index, uint32_t pcie_gen,
2713 uint32_t pcie_lanes)
2714{
2715 dpm_table->dpm_levels[index].value = pcie_gen;
2716 dpm_table->dpm_levels[index].param1 = pcie_lanes;
2717 dpm_table->dpm_levels[index].enabled = true;
2718}
2719
2720static int tonga_setup_default_pcie_tables(struct pp_hwmgr *hwmgr)
2721{
2722 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
2723 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
2724 phm_ppt_v1_pcie_table *pcie_table = pptable_info->pcie_table;
2725 uint32_t i, maxEntry;
2726
2727 if (data->use_pcie_performance_levels && !data->use_pcie_power_saving_levels) {
2728 data->pcie_gen_power_saving = data->pcie_gen_performance;
2729 data->pcie_lane_power_saving = data->pcie_lane_performance;
2730 } else if (!data->use_pcie_performance_levels && data->use_pcie_power_saving_levels) {
2731 data->pcie_gen_performance = data->pcie_gen_power_saving;
2732 data->pcie_lane_performance = data->pcie_lane_power_saving;
2733 }
2734
2735 tonga_reset_single_dpm_table(hwmgr, &data->dpm_table.pcie_speed_table, SMU72_MAX_LEVELS_LINK);
2736
2737 if (pcie_table != NULL) {
2738 /*
2739 * maxEntry is used to make sure we reserve one PCIE level for boot level (fix for A+A PSPP issue).
2740 * If PCIE table from PPTable have ULV entry + 8 entries, then ignore the last entry.
2741 */
2742 maxEntry = (SMU72_MAX_LEVELS_LINK < pcie_table->count) ?
2743 SMU72_MAX_LEVELS_LINK : pcie_table->count;
2744 for (i = 1; i < maxEntry; i++) {
2745 tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i-1,
2746 get_pcie_gen_support(data->pcie_gen_cap, pcie_table->entries[i].gen_speed),
2747 get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
2748 }
2749 data->dpm_table.pcie_speed_table.count = maxEntry - 1;
2750 } else {
2751 /* Hardcode Pcie Table */
2752 tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0,
2753 get_pcie_gen_support(data->pcie_gen_cap, PP_Min_PCIEGen),
2754 get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
2755 tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1,
2756 get_pcie_gen_support(data->pcie_gen_cap, PP_Min_PCIEGen),
2757 get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
2758 tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2,
2759 get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen),
2760 get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
2761 tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3,
2762 get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen),
2763 get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
2764 tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4,
2765 get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen),
2766 get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
2767 tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5,
2768 get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen),
2769 get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
2770 data->dpm_table.pcie_speed_table.count = 6;
2771 }
2772 /* Populate last level for boot PCIE level, but do not increment count. */
2773 tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table,
2774 data->dpm_table.pcie_speed_table.count,
2775 get_pcie_gen_support(data->pcie_gen_cap, PP_Min_PCIEGen),
2776 get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
2777
2778 return 0;
2779
2780}
2781
2782/*
2783 * This function is to initalize all DPM state tables for SMU7 based on the dependency table.
2784 * Dynamic state patching function will then trim these state tables to the allowed range based
2785 * on the power policy or external client requests, such as UVD request, etc.
2786 */
2787static int tonga_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
2788{
2789 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
2790 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
2791 uint32_t i;
2792
2793 phm_ppt_v1_clock_voltage_dependency_table *allowed_vdd_sclk_table =
2794 pptable_info->vdd_dep_on_sclk;
2795 phm_ppt_v1_clock_voltage_dependency_table *allowed_vdd_mclk_table =
2796 pptable_info->vdd_dep_on_mclk;
2797
2798 PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL,
2799 "SCLK dependency table is missing. This table is mandatory", return -1);
2800 PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table->count >= 1,
2801 "SCLK dependency table has to have is missing. This table is mandatory", return -1);
2802
2803 PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
2804 "MCLK dependency table is missing. This table is mandatory", return -1);
2805 PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table->count >= 1,
2806 "VMCLK dependency table has to have is missing. This table is mandatory", return -1);
2807
2808 /* clear the state table to reset everything to default */
2809 memset(&(data->dpm_table), 0x00, sizeof(data->dpm_table));
2810 tonga_reset_single_dpm_table(hwmgr, &data->dpm_table.sclk_table, SMU72_MAX_LEVELS_GRAPHICS);
2811 tonga_reset_single_dpm_table(hwmgr, &data->dpm_table.mclk_table, SMU72_MAX_LEVELS_MEMORY);
2812 /* tonga_reset_single_dpm_table(hwmgr, &tonga_hwmgr->dpm_table.VddcTable, SMU72_MAX_LEVELS_VDDC); */
2813 /* tonga_reset_single_dpm_table(hwmgr, &tonga_hwmgr->dpm_table.vdd_gfx_table, SMU72_MAX_LEVELS_VDDGFX);*/
2814 /* tonga_reset_single_dpm_table(hwmgr, &tonga_hwmgr->dpm_table.vdd_ci_table, SMU72_MAX_LEVELS_VDDCI);*/
2815 /* tonga_reset_single_dpm_table(hwmgr, &tonga_hwmgr->dpm_table.mvdd_table, SMU72_MAX_LEVELS_MVDD);*/
2816
2817 PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL,
2818 "SCLK dependency table is missing. This table is mandatory", return -1);
2819 /* Initialize Sclk DPM table based on allow Sclk values*/
2820 data->dpm_table.sclk_table.count = 0;
2821
2822 for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
2823 if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count-1].value !=
2824 allowed_vdd_sclk_table->entries[i].clk) {
2825 data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
2826 allowed_vdd_sclk_table->entries[i].clk;
2827 data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = true; /*(i==0) ? 1 : 0; to do */
2828 data->dpm_table.sclk_table.count++;
2829 }
2830 }
2831
2832 PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
2833 "MCLK dependency table is missing. This table is mandatory", return -1);
2834 /* Initialize Mclk DPM table based on allow Mclk values */
2835 data->dpm_table.mclk_table.count = 0;
2836 for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
2837 if (i == 0 || data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count-1].value !=
2838 allowed_vdd_mclk_table->entries[i].clk) {
2839 data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
2840 allowed_vdd_mclk_table->entries[i].clk;
2841 data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = true; /*(i==0) ? 1 : 0; */
2842 data->dpm_table.mclk_table.count++;
2843 }
2844 }
2845
2846 /* setup PCIE gen speed levels*/
2847 tonga_setup_default_pcie_tables(hwmgr);
2848
2849 /* save a copy of the default DPM table*/
2850 memcpy(&(data->golden_dpm_table), &(data->dpm_table), sizeof(struct tonga_dpm_table));
2851
2852 return 0;
2853}
2854
2855int tonga_populate_smc_initial_state(struct pp_hwmgr *hwmgr,
2856 const struct tonga_power_state *bootState)
2857{
2858 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
2859 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
2860 uint8_t count, level;
2861
2862 count = (uint8_t) (pptable_info->vdd_dep_on_sclk->count);
2863 for (level = 0; level < count; level++) {
2864 if (pptable_info->vdd_dep_on_sclk->entries[level].clk >=
2865 bootState->performance_levels[0].engine_clock) {
2866 data->smc_state_table.GraphicsBootLevel = level;
2867 break;
2868 }
2869 }
2870
2871 count = (uint8_t) (pptable_info->vdd_dep_on_mclk->count);
2872 for (level = 0; level < count; level++) {
2873 if (pptable_info->vdd_dep_on_mclk->entries[level].clk >=
2874 bootState->performance_levels[0].memory_clock) {
2875 data->smc_state_table.MemoryBootLevel = level;
2876 break;
2877 }
2878 }
2879
2880 return 0;
2881}
2882
2883/**
2884 * Initializes the SMC table and uploads it
2885 *
2886 * @param hwmgr the address of the powerplay hardware manager.
2887 * @param pInput the pointer to input data (PowerState)
2888 * @return always 0
2889 */
2890static int tonga_init_smc_table(struct pp_hwmgr *hwmgr)
2891{
2892 int result;
2893 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
2894 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
2895 SMU72_Discrete_DpmTable *table = &(data->smc_state_table);
2896 const phw_tonga_ulv_parm *ulv = &(data->ulv);
2897 uint8_t i;
2898 PECI_RegistryValue reg_value;
2899 pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
2900
2901 result = tonga_setup_default_dpm_tables(hwmgr);
2902 PP_ASSERT_WITH_CODE(0 == result,
2903 "Failed to setup default DPM tables!", return result;);
2904 memset(&(data->smc_state_table), 0x00, sizeof(data->smc_state_table));
2905 if (TONGA_VOLTAGE_CONTROL_NONE != data->voltage_control) {
2906 tonga_populate_smc_voltage_tables(hwmgr, table);
2907 }
2908
2909 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2910 PHM_PlatformCaps_AutomaticDCTransition)) {
2911 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
2912 }
2913
2914 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2915 PHM_PlatformCaps_StepVddc)) {
2916 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
2917 }
2918
2919 if (data->is_memory_GDDR5) {
2920 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
2921 }
2922
2923 i = PHM_READ_FIELD(hwmgr->device, CC_MC_MAX_CHANNEL, NOOFCHAN);
2924
2925 if (i == 1 || i == 0) {
2926 table->SystemFlags |= PPSMC_SYSTEMFLAG_12CHANNEL;
2927 }
2928
2929 if (ulv->ulv_supported && pptable_info->us_ulv_voltage_offset) {
2930 PP_ASSERT_WITH_CODE(0 == result,
2931 "Failed to initialize ULV state!", return result;);
2932
2933 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
2934 ixCG_ULV_PARAMETER, ulv->ch_ulv_parameter);
2935 }
2936
2937 result = tonga_populate_smc_link_level(hwmgr, table);
2938 PP_ASSERT_WITH_CODE(0 == result,
2939 "Failed to initialize Link Level!", return result;);
2940
2941 result = tonga_populate_all_graphic_levels(hwmgr);
2942 PP_ASSERT_WITH_CODE(0 == result,
2943 "Failed to initialize Graphics Level!", return result;);
2944
2945 result = tonga_populate_all_memory_levels(hwmgr);
2946 PP_ASSERT_WITH_CODE(0 == result,
2947 "Failed to initialize Memory Level!", return result;);
2948
2949 result = tonga_populate_smv_acpi_level(hwmgr, table);
2950 PP_ASSERT_WITH_CODE(0 == result,
2951 "Failed to initialize ACPI Level!", return result;);
2952
2953 result = tonga_populate_smc_vce_level(hwmgr, table);
2954 PP_ASSERT_WITH_CODE(0 == result,
2955 "Failed to initialize VCE Level!", return result;);
2956
2957 result = tonga_populate_smc_acp_level(hwmgr, table);
2958 PP_ASSERT_WITH_CODE(0 == result,
2959 "Failed to initialize ACP Level!", return result;);
2960
2961 result = tonga_populate_smc_samu_level(hwmgr, table);
2962 PP_ASSERT_WITH_CODE(0 == result,
2963 "Failed to initialize SAMU Level!", return result;);
2964
2965 /* Since only the initial state is completely set up at this point (the other states are just copies of the boot state) we only */
2966 /* need to populate the ARB settings for the initial state. */
2967 result = tonga_program_memory_timing_parameters(hwmgr);
2968 PP_ASSERT_WITH_CODE(0 == result,
2969 "Failed to Write ARB settings for the initial state.", return result;);
2970
2971 result = tonga_populate_smc_uvd_level(hwmgr, table);
2972 PP_ASSERT_WITH_CODE(0 == result,
2973 "Failed to initialize UVD Level!", return result;);
2974
2975 result = tonga_populate_smc_boot_level(hwmgr, table);
2976 PP_ASSERT_WITH_CODE(0 == result,
2977 "Failed to initialize Boot Level!", return result;);
2978
2979 result = tonga_populate_bapm_parameters_in_dpm_table(hwmgr);
2980 PP_ASSERT_WITH_CODE(result == 0,
2981 "Failed to populate BAPM Parameters!", return result);
2982
2983 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2984 PHM_PlatformCaps_ClockStretcher)) {
2985 result = tonga_populate_clock_stretcher_data_table(hwmgr);
2986 PP_ASSERT_WITH_CODE(0 == result,
2987 "Failed to populate Clock Stretcher Data Table!", return result;);
2988 }
2989 table->GraphicsVoltageChangeEnable = 1;
2990 table->GraphicsThermThrottleEnable = 1;
2991 table->GraphicsInterval = 1;
2992 table->VoltageInterval = 1;
2993 table->ThermalInterval = 1;
2994 table->TemperatureLimitHigh =
2995 pptable_info->cac_dtp_table->usTargetOperatingTemp *
2996 TONGA_Q88_FORMAT_CONVERSION_UNIT;
2997 table->TemperatureLimitLow =
2998 (pptable_info->cac_dtp_table->usTargetOperatingTemp - 1) *
2999 TONGA_Q88_FORMAT_CONVERSION_UNIT;
3000 table->MemoryVoltageChangeEnable = 1;
3001 table->MemoryInterval = 1;
3002 table->VoltageResponseTime = 0;
3003 table->PhaseResponseTime = 0;
3004 table->MemoryThermThrottleEnable = 1;
3005
3006 /*
3007 * Cail reads current link status and reports it as cap (we cannot change this due to some previous issues we had)
3008 * SMC drops the link status to lowest level after enabling DPM by PowerPlay. After pnp or toggling CF, driver gets reloaded again
3009 * but this time Cail reads current link status which was set to low by SMC and reports it as cap to powerplay
3010 * To avoid it, we set PCIeBootLinkLevel to highest dpm level
3011 */
3012 PP_ASSERT_WITH_CODE((1 <= data->dpm_table.pcie_speed_table.count),
3013 "There must be 1 or more PCIE levels defined in PPTable.",
3014 return -1);
3015
3016 table->PCIeBootLinkLevel = (uint8_t) (data->dpm_table.pcie_speed_table.count);
3017
3018 table->PCIeGenInterval = 1;
3019
3020 result = tonga_populate_vr_config(hwmgr, table);
3021 PP_ASSERT_WITH_CODE(0 == result,
3022 "Failed to populate VRConfig setting!", return result);
3023
3024 table->ThermGpio = 17;
3025 table->SclkStepSize = 0x4000;
3026
3027 reg_value = 0;
3028 if ((0 == reg_value) &&
3029 (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID,
3030 &gpio_pin_assignment))) {
3031 table->VRHotGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
3032 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3033 PHM_PlatformCaps_RegulatorHot);
3034 } else {
3035 table->VRHotGpio = TONGA_UNUSED_GPIO_PIN;
3036 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
3037 PHM_PlatformCaps_RegulatorHot);
3038 }
3039
3040 /* ACDC Switch GPIO */
3041 reg_value = 0;
3042 if ((0 == reg_value) &&
3043 (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
3044 &gpio_pin_assignment))) {
3045 table->AcDcGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
3046 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3047 PHM_PlatformCaps_AutomaticDCTransition);
3048 } else {
3049 table->AcDcGpio = TONGA_UNUSED_GPIO_PIN;
3050 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
3051 PHM_PlatformCaps_AutomaticDCTransition);
3052 }
3053
3054 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
3055 PHM_PlatformCaps_Falcon_QuickTransition);
3056
3057 reg_value = 0;
3058 if (1 == reg_value) {
3059 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
3060 PHM_PlatformCaps_AutomaticDCTransition);
3061 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3062 PHM_PlatformCaps_Falcon_QuickTransition);
3063 }
3064
3065 reg_value = 0;
3066 if ((0 == reg_value) && (atomctrl_get_pp_assign_pin(hwmgr,
3067 THERMAL_INT_OUTPUT_GPIO_PINID, &gpio_pin_assignment))) {
3068 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3069 PHM_PlatformCaps_ThermalOutGPIO);
3070
3071 table->ThermOutGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
3072
3073 table->ThermOutPolarity =
3074 (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) &
3075 (1 << gpio_pin_assignment.uc_gpio_pin_bit_shift))) ? 1:0;
3076
3077 table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
3078
3079 /* if required, combine VRHot/PCC with thermal out GPIO*/
3080 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3081 PHM_PlatformCaps_RegulatorHot) &&
3082 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3083 PHM_PlatformCaps_CombinePCCWithThermalSignal)){
3084 table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
3085 }
3086 } else {
3087 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
3088 PHM_PlatformCaps_ThermalOutGPIO);
3089
3090 table->ThermOutGpio = 17;
3091 table->ThermOutPolarity = 1;
3092 table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
3093 }
3094
3095 for (i = 0; i < SMU72_MAX_ENTRIES_SMIO; i++) {
3096 table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
3097 }
3098 CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
3099 CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
3100 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
3101 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
3102 CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
3103 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
3104 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
3105 CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
3106 CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
3107
3108 /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
3109 result = tonga_copy_bytes_to_smc(hwmgr->smumgr, data->dpm_table_start +
3110 offsetof(SMU72_Discrete_DpmTable, SystemFlags),
3111 (uint8_t *)&(table->SystemFlags),
3112 sizeof(SMU72_Discrete_DpmTable)-3 * sizeof(SMU72_PIDController),
3113 data->sram_end);
3114
3115 PP_ASSERT_WITH_CODE(0 == result,
3116 "Failed to upload dpm data to SMC memory!", return result;);
3117
3118 return result;
3119}
3120
3121/* Look up the voltaged based on DAL's requested level. and then send the requested VDDC voltage to SMC*/
3122static void tonga_apply_dal_minimum_voltage_request(struct pp_hwmgr *hwmgr)
3123{
3124 return;
3125}
3126
3127int tonga_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
3128{
3129 PPSMC_Result result;
3130 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
3131
3132 /* Apply minimum voltage based on DAL's request level */
3133 tonga_apply_dal_minimum_voltage_request(hwmgr);
3134
3135 if (0 == data->sclk_dpm_key_disabled) {
3136 /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/
3137 if (tonga_is_dpm_running(hwmgr))
3138 printk(KERN_ERR "[ powerplay ] Trying to set Enable Mask when DPM is disabled \n");
3139
3140 if (0 != data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3141 result = smum_send_msg_to_smc_with_parameter(
3142 hwmgr->smumgr,
3143 (PPSMC_Msg)PPSMC_MSG_SCLKDPM_SetEnabledMask,
3144 data->dpm_level_enable_mask.sclk_dpm_enable_mask);
3145 PP_ASSERT_WITH_CODE((0 == result),
3146 "Set Sclk Dpm enable Mask failed", return -1);
3147 }
3148 }
3149
3150 if (0 == data->mclk_dpm_key_disabled) {
3151 /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/
3152 if (tonga_is_dpm_running(hwmgr))
3153 printk(KERN_ERR "[ powerplay ] Trying to set Enable Mask when DPM is disabled \n");
3154
3155 if (0 != data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3156 result = smum_send_msg_to_smc_with_parameter(
3157 hwmgr->smumgr,
3158 (PPSMC_Msg)PPSMC_MSG_MCLKDPM_SetEnabledMask,
3159 data->dpm_level_enable_mask.mclk_dpm_enable_mask);
3160 PP_ASSERT_WITH_CODE((0 == result),
3161 "Set Mclk Dpm enable Mask failed", return -1);
3162 }
3163 }
3164
3165 return 0;
3166}
3167
3168
3169int tonga_force_dpm_highest(struct pp_hwmgr *hwmgr)
3170{
3171 uint32_t level, tmp;
3172 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
3173
3174 if (0 == data->pcie_dpm_key_disabled) {
3175 /* PCIE */
3176 if (data->dpm_level_enable_mask.pcie_dpm_enable_mask != 0) {
3177 level = 0;
3178 tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask;
3179 while (tmp >>= 1)
3180 level++ ;
3181
3182 if (0 != level) {
3183 PP_ASSERT_WITH_CODE((0 == tonga_dpm_force_state_pcie(hwmgr, level)),
3184 "force highest pcie dpm state failed!", return -1);
3185 }
3186 }
3187 }
3188
3189 if (0 == data->sclk_dpm_key_disabled) {
3190 /* SCLK */
3191 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask != 0) {
3192 level = 0;
3193 tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask;
3194 while (tmp >>= 1)
3195 level++ ;
3196
3197 if (0 != level) {
3198 PP_ASSERT_WITH_CODE((0 == tonga_dpm_force_state(hwmgr, level)),
3199 "force highest sclk dpm state failed!", return -1);
3200 if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
3201 CGS_IND_REG__SMC, TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX) != level)
3202 printk(KERN_ERR "[ powerplay ] Target_and_current_Profile_Index. \
3203 Curr_Sclk_Index does not match the level \n");
3204
3205 }
3206 }
3207 }
3208
3209 if (0 == data->mclk_dpm_key_disabled) {
3210 /* MCLK */
3211 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask != 0) {
3212 level = 0;
3213 tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask;
3214 while (tmp >>= 1)
3215 level++ ;
3216
3217 if (0 != level) {
3218 PP_ASSERT_WITH_CODE((0 == tonga_dpm_force_state_mclk(hwmgr, level)),
3219 "force highest mclk dpm state failed!", return -1);
3220 if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
3221 TARGET_AND_CURRENT_PROFILE_INDEX, CURR_MCLK_INDEX) != level)
3222 printk(KERN_ERR "[ powerplay ] Target_and_current_Profile_Index. \
3223 Curr_Mclk_Index does not match the level \n");
3224 }
3225 }
3226 }
3227
3228 return 0;
3229}
3230
3231/**
3232 * Find the MC microcode version and store it in the HwMgr struct
3233 *
3234 * @param hwmgr the address of the powerplay hardware manager.
3235 * @return always 0
3236 */
3237int tonga_get_mc_microcode_version (struct pp_hwmgr *hwmgr)
3238{
3239 cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
3240
3241 hwmgr->microcode_version_info.MC = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);
3242
3243 return 0;
3244}
3245
3246/**
3247 * Initialize Dynamic State Adjustment Rule Settings
3248 *
3249 * @param hwmgr the address of the powerplay hardware manager.
3250 */
3251int tonga_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr)
3252{
3253 uint32_t table_size;
3254 struct phm_clock_voltage_dependency_table *table_clk_vlt;
3255 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
3256
3257 hwmgr->dyn_state.mclk_sclk_ratio = 4;
3258 hwmgr->dyn_state.sclk_mclk_delta = 15000; /* 150 MHz */
3259 hwmgr->dyn_state.vddc_vddci_delta = 200; /* 200mV */
3260
3261 /* initialize vddc_dep_on_dal_pwrl table */
3262 table_size = sizeof(uint32_t) + 4 * sizeof(struct phm_clock_voltage_dependency_record);
3263 table_clk_vlt = kzalloc(table_size, GFP_KERNEL);
3264
3265 if (NULL == table_clk_vlt) {
3266 printk(KERN_ERR "[ powerplay ] Can not allocate space for vddc_dep_on_dal_pwrl! \n");
3267 return -ENOMEM;
3268 } else {
3269 table_clk_vlt->count = 4;
3270 table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_ULTRALOW;
3271 table_clk_vlt->entries[0].v = 0;
3272 table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_LOW;
3273 table_clk_vlt->entries[1].v = 720;
3274 table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_NOMINAL;
3275 table_clk_vlt->entries[2].v = 810;
3276 table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_PERFORMANCE;
3277 table_clk_vlt->entries[3].v = 900;
3278 pptable_info->vddc_dep_on_dal_pwrl = table_clk_vlt;
3279 hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt;
3280 }
3281
3282 return 0;
3283}
3284
3285static int tonga_set_private_var_based_on_pptale(struct pp_hwmgr *hwmgr)
3286{
3287 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
3288 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
3289
3290 phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
3291 pptable_info->vdd_dep_on_sclk;
3292 phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table =
3293 pptable_info->vdd_dep_on_mclk;
3294
3295 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL,
3296 "VDD dependency on SCLK table is missing. \
3297 This table is mandatory", return -1);
3298 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
3299 "VDD dependency on SCLK table has to have is missing. \
3300 This table is mandatory", return -1);
3301
3302 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL,
3303 "VDD dependency on MCLK table is missing. \
3304 This table is mandatory", return -1);
3305 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
3306 "VDD dependency on MCLK table has to have is missing. \
3307 This table is mandatory", return -1);
3308
3309 data->min_vddc_in_pp_table = (uint16_t)allowed_sclk_vdd_table->entries[0].vddc;
3310 data->max_vddc_in_pp_table = (uint16_t)allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
3311
3312 pptable_info->max_clock_voltage_on_ac.sclk =
3313 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk;
3314 pptable_info->max_clock_voltage_on_ac.mclk =
3315 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk;
3316 pptable_info->max_clock_voltage_on_ac.vddc =
3317 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
3318 pptable_info->max_clock_voltage_on_ac.vddci =
3319 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci;
3320
3321 hwmgr->dyn_state.max_clock_voltage_on_ac.sclk =
3322 pptable_info->max_clock_voltage_on_ac.sclk;
3323 hwmgr->dyn_state.max_clock_voltage_on_ac.mclk =
3324 pptable_info->max_clock_voltage_on_ac.mclk;
3325 hwmgr->dyn_state.max_clock_voltage_on_ac.vddc =
3326 pptable_info->max_clock_voltage_on_ac.vddc;
3327 hwmgr->dyn_state.max_clock_voltage_on_ac.vddci =
3328 pptable_info->max_clock_voltage_on_ac.vddci;
3329
3330 return 0;
3331}
3332
3333int tonga_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
3334{
3335 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
3336 int result = 1;
3337
3338 PP_ASSERT_WITH_CODE (!tonga_is_dpm_running(hwmgr),
3339 "Trying to Unforce DPM when DPM is disabled. Returning without sending SMC message.",
3340 return result);
3341
3342 if (0 == data->pcie_dpm_key_disabled) {
3343 PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(
3344 hwmgr->smumgr,
3345 PPSMC_MSG_PCIeDPM_UnForceLevel)),
3346 "unforce pcie level failed!",
3347 return -1);
3348 }
3349
3350 result = tonga_upload_dpm_level_enable_mask(hwmgr);
3351
3352 return result;
3353}
3354
3355static uint32_t tonga_get_lowest_enable_level(
3356 struct pp_hwmgr *hwmgr, uint32_t level_mask)
3357{
3358 uint32_t level = 0;
3359
3360 while (0 == (level_mask & (1 << level)))
3361 level++;
3362
3363 return level;
3364}
3365
3366static int tonga_force_dpm_lowest(struct pp_hwmgr *hwmgr)
3367{
3368 uint32_t level;
3369 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
3370
3371 if (0 == data->pcie_dpm_key_disabled) {
3372 /* PCIE */
3373 if (data->dpm_level_enable_mask.pcie_dpm_enable_mask != 0) {
3374 level = tonga_get_lowest_enable_level(hwmgr,
3375 data->dpm_level_enable_mask.pcie_dpm_enable_mask);
3376 PP_ASSERT_WITH_CODE((0 == tonga_dpm_force_state_pcie(hwmgr, level)),
3377 "force lowest pcie dpm state failed!", return -1);
3378 }
3379 }
3380
3381 if (0 == data->sclk_dpm_key_disabled) {
3382 /* SCLK */
3383 if (0 != data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3384 level = tonga_get_lowest_enable_level(hwmgr,
3385 data->dpm_level_enable_mask.sclk_dpm_enable_mask);
3386
3387 PP_ASSERT_WITH_CODE((0 == tonga_dpm_force_state(hwmgr, level)),
3388 "force sclk dpm state failed!", return -1);
3389
3390 if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
3391 CGS_IND_REG__SMC, TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX) != level)
3392 printk(KERN_ERR "[ powerplay ] Target_and_current_Profile_Index. \
3393 Curr_Sclk_Index does not match the level \n");
3394 }
3395 }
3396
3397 if (0 == data->mclk_dpm_key_disabled) {
3398 /* MCLK */
3399 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask != 0) {
3400 level = tonga_get_lowest_enable_level(hwmgr,
3401 data->dpm_level_enable_mask.mclk_dpm_enable_mask);
3402 PP_ASSERT_WITH_CODE((0 == tonga_dpm_force_state_mclk(hwmgr, level)),
3403 "force lowest mclk dpm state failed!", return -1);
3404 if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
3405 TARGET_AND_CURRENT_PROFILE_INDEX, CURR_MCLK_INDEX) != level)
3406 printk(KERN_ERR "[ powerplay ] Target_and_current_Profile_Index. \
3407 Curr_Mclk_Index does not match the level \n");
3408 }
3409 }
3410
3411 return 0;
3412}
3413
3414static int tonga_patch_voltage_dependency_tables_with_lookup_table(struct pp_hwmgr *hwmgr)
3415{
3416 uint8_t entryId;
3417 uint8_t voltageId;
3418 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
3419 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
3420
3421 phm_ppt_v1_clock_voltage_dependency_table *sclk_table = pptable_info->vdd_dep_on_sclk;
3422 phm_ppt_v1_clock_voltage_dependency_table *mclk_table = pptable_info->vdd_dep_on_mclk;
3423 phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table;
3424
3425 if (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) {
3426 for (entryId = 0; entryId < sclk_table->count; ++entryId) {
3427 voltageId = sclk_table->entries[entryId].vddInd;
3428 sclk_table->entries[entryId].vddgfx =
3429 pptable_info->vddgfx_lookup_table->entries[voltageId].us_vdd;
3430 }
3431 } else {
3432 for (entryId = 0; entryId < sclk_table->count; ++entryId) {
3433 voltageId = sclk_table->entries[entryId].vddInd;
3434 sclk_table->entries[entryId].vddc =
3435 pptable_info->vddc_lookup_table->entries[voltageId].us_vdd;
3436 }
3437 }
3438
3439 for (entryId = 0; entryId < mclk_table->count; ++entryId) {
3440 voltageId = mclk_table->entries[entryId].vddInd;
3441 mclk_table->entries[entryId].vddc =
3442 pptable_info->vddc_lookup_table->entries[voltageId].us_vdd;
3443 }
3444
3445 for (entryId = 0; entryId < mm_table->count; ++entryId) {
3446 voltageId = mm_table->entries[entryId].vddcInd;
3447 mm_table->entries[entryId].vddc =
3448 pptable_info->vddc_lookup_table->entries[voltageId].us_vdd;
3449 }
3450
3451 return 0;
3452
3453}
3454
3455static int tonga_calc_voltage_dependency_tables(struct pp_hwmgr *hwmgr)
3456{
3457 uint8_t entryId;
3458 phm_ppt_v1_voltage_lookup_record v_record;
3459 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
3460 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
3461
3462 phm_ppt_v1_clock_voltage_dependency_table *sclk_table = pptable_info->vdd_dep_on_sclk;
3463 phm_ppt_v1_clock_voltage_dependency_table *mclk_table = pptable_info->vdd_dep_on_mclk;
3464
3465 if (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) {
3466 for (entryId = 0; entryId < sclk_table->count; ++entryId) {
3467 if (sclk_table->entries[entryId].vdd_offset & (1 << 15))
3468 v_record.us_vdd = sclk_table->entries[entryId].vddgfx +
3469 sclk_table->entries[entryId].vdd_offset - 0xFFFF;
3470 else
3471 v_record.us_vdd = sclk_table->entries[entryId].vddgfx +
3472 sclk_table->entries[entryId].vdd_offset;
3473
3474 sclk_table->entries[entryId].vddc =
3475 v_record.us_cac_low = v_record.us_cac_mid =
3476 v_record.us_cac_high = v_record.us_vdd;
3477
3478 tonga_add_voltage(hwmgr, pptable_info->vddc_lookup_table, &v_record);
3479 }
3480
3481 for (entryId = 0; entryId < mclk_table->count; ++entryId) {
3482 if (mclk_table->entries[entryId].vdd_offset & (1 << 15))
3483 v_record.us_vdd = mclk_table->entries[entryId].vddc +
3484 mclk_table->entries[entryId].vdd_offset - 0xFFFF;
3485 else
3486 v_record.us_vdd = mclk_table->entries[entryId].vddc +
3487 mclk_table->entries[entryId].vdd_offset;
3488
3489 mclk_table->entries[entryId].vddgfx = v_record.us_cac_low =
3490 v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd;
3491 tonga_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record);
3492 }
3493 }
3494
3495 return 0;
3496
3497}
3498
3499static int tonga_calc_mm_voltage_dependency_table(struct pp_hwmgr *hwmgr)
3500{
3501 uint32_t entryId;
3502 phm_ppt_v1_voltage_lookup_record v_record;
3503 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
3504 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
3505 phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table;
3506
3507 if (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) {
3508 for (entryId = 0; entryId < mm_table->count; entryId++) {
3509 if (mm_table->entries[entryId].vddgfx_offset & (1 << 15))
3510 v_record.us_vdd = mm_table->entries[entryId].vddc +
3511 mm_table->entries[entryId].vddgfx_offset - 0xFFFF;
3512 else
3513 v_record.us_vdd = mm_table->entries[entryId].vddc +
3514 mm_table->entries[entryId].vddgfx_offset;
3515
3516 /* Add the calculated VDDGFX to the VDDGFX lookup table */
3517 mm_table->entries[entryId].vddgfx = v_record.us_cac_low =
3518 v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd;
3519 tonga_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record);
3520 }
3521 }
3522 return 0;
3523}
3524
3525
3526/**
3527 * Change virtual leakage voltage to actual value.
3528 *
3529 * @param hwmgr the address of the powerplay hardware manager.
3530 * @param pointer to changing voltage
3531 * @param pointer to leakage table
3532 */
3533static void tonga_patch_with_vdd_leakage(struct pp_hwmgr *hwmgr,
3534 uint16_t *voltage, phw_tonga_leakage_voltage *pLeakageTable)
3535{
3536 uint32_t leakage_index;
3537
3538 /* search for leakage voltage ID 0xff01 ~ 0xff08 */
3539 for (leakage_index = 0; leakage_index < pLeakageTable->count; leakage_index++) {
3540 /* if this voltage matches a leakage voltage ID */
3541 /* patch with actual leakage voltage */
3542 if (pLeakageTable->leakage_id[leakage_index] == *voltage) {
3543 *voltage = pLeakageTable->actual_voltage[leakage_index];
3544 break;
3545 }
3546 }
3547
3548 if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
3549 printk(KERN_ERR "[ powerplay ] Voltage value looks like a Leakage ID but it's not patched \n");
3550}
3551
3552/**
3553 * Patch voltage lookup table by EVV leakages.
3554 *
3555 * @param hwmgr the address of the powerplay hardware manager.
3556 * @param pointer to voltage lookup table
3557 * @param pointer to leakage table
3558 * @return always 0
3559 */
3560static int tonga_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
3561 phm_ppt_v1_voltage_lookup_table *lookup_table,
3562 phw_tonga_leakage_voltage *pLeakageTable)
3563{
3564 uint32_t i;
3565
3566 for (i = 0; i < lookup_table->count; i++) {
3567 tonga_patch_with_vdd_leakage(hwmgr,
3568 &lookup_table->entries[i].us_vdd, pLeakageTable);
3569 }
3570
3571 return 0;
3572}
3573
3574static int tonga_patch_clock_voltage_lomits_with_vddc_leakage(struct pp_hwmgr *hwmgr,
3575 phw_tonga_leakage_voltage *pLeakageTable, uint16_t *Vddc)
3576{
3577 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
3578
3579 tonga_patch_with_vdd_leakage(hwmgr, (uint16_t *)Vddc, pLeakageTable);
3580 hwmgr->dyn_state.max_clock_voltage_on_dc.vddc =
3581 pptable_info->max_clock_voltage_on_dc.vddc;
3582
3583 return 0;
3584}
3585
3586static int tonga_patch_clock_voltage_limits_with_vddgfx_leakage(
3587 struct pp_hwmgr *hwmgr, phw_tonga_leakage_voltage *pLeakageTable,
3588 uint16_t *Vddgfx)
3589{
3590 tonga_patch_with_vdd_leakage(hwmgr, (uint16_t *)Vddgfx, pLeakageTable);
3591 return 0;
3592}
3593
3594int tonga_sort_lookup_table(struct pp_hwmgr *hwmgr,
3595 phm_ppt_v1_voltage_lookup_table *lookup_table)
3596{
3597 uint32_t table_size, i, j;
3598 phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record;
3599 table_size = lookup_table->count;
3600
3601 PP_ASSERT_WITH_CODE(0 != lookup_table->count,
3602 "Lookup table is empty", return -1);
3603
3604 /* Sorting voltages */
3605 for (i = 0; i < table_size - 1; i++) {
3606 for (j = i + 1; j > 0; j--) {
3607 if (lookup_table->entries[j].us_vdd < lookup_table->entries[j-1].us_vdd) {
3608 tmp_voltage_lookup_record = lookup_table->entries[j-1];
3609 lookup_table->entries[j-1] = lookup_table->entries[j];
3610 lookup_table->entries[j] = tmp_voltage_lookup_record;
3611 }
3612 }
3613 }
3614
3615 return 0;
3616}
3617
3618static int tonga_complete_dependency_tables(struct pp_hwmgr *hwmgr)
3619{
3620 int result = 0;
3621 int tmp_result;
3622 tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
3623 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
3624
3625 if (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) {
3626 tmp_result = tonga_patch_lookup_table_with_leakage(hwmgr,
3627 pptable_info->vddgfx_lookup_table, &(data->vddcgfx_leakage));
3628 if (tmp_result != 0)
3629 result = tmp_result;
3630
3631 tmp_result = tonga_patch_clock_voltage_limits_with_vddgfx_leakage(hwmgr,
3632 &(data->vddcgfx_leakage), &pptable_info->max_clock_voltage_on_dc.vddgfx);
3633 if (tmp_result != 0)
3634 result = tmp_result;
3635 } else {
3636 tmp_result = tonga_patch_lookup_table_with_leakage(hwmgr,
3637 pptable_info->vddc_lookup_table, &(data->vddc_leakage));
3638 if (tmp_result != 0)
3639 result = tmp_result;
3640
3641 tmp_result = tonga_patch_clock_voltage_lomits_with_vddc_leakage(hwmgr,
3642 &(data->vddc_leakage), &pptable_info->max_clock_voltage_on_dc.vddc);
3643 if (tmp_result != 0)
3644 result = tmp_result;
3645 }
3646
3647 tmp_result = tonga_patch_voltage_dependency_tables_with_lookup_table(hwmgr);
3648 if (tmp_result != 0)
3649 result = tmp_result;
3650
3651 tmp_result = tonga_calc_voltage_dependency_tables(hwmgr);
3652 if (tmp_result != 0)
3653 result = tmp_result;
3654
3655 tmp_result = tonga_calc_mm_voltage_dependency_table(hwmgr);
3656 if (tmp_result != 0)
3657 result = tmp_result;
3658
3659 tmp_result = tonga_sort_lookup_table(hwmgr, pptable_info->vddgfx_lookup_table);
3660 if (tmp_result != 0)
3661 result = tmp_result;
3662
3663 tmp_result = tonga_sort_lookup_table(hwmgr, pptable_info->vddc_lookup_table);
3664 if (tmp_result != 0)
3665 result = tmp_result;
3666
3667 return result;
3668}
3669
3670int tonga_init_sclk_threshold(struct pp_hwmgr *hwmgr)
3671{
3672 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
3673 data->low_sclk_interrupt_threshold = 0;
3674
3675 return 0;
3676}
3677
3678int tonga_setup_asic_task(struct pp_hwmgr *hwmgr)
3679{
3680 int tmp_result, result = 0;
3681
3682 tmp_result = tonga_read_clock_registers(hwmgr);
3683 PP_ASSERT_WITH_CODE((0 == tmp_result),
3684 "Failed to read clock registers!", result = tmp_result);
3685
3686 tmp_result = tonga_get_memory_type(hwmgr);
3687 PP_ASSERT_WITH_CODE((0 == tmp_result),
3688 "Failed to get memory type!", result = tmp_result);
3689
3690 tmp_result = tonga_enable_acpi_power_management(hwmgr);
3691 PP_ASSERT_WITH_CODE((0 == tmp_result),
3692 "Failed to enable ACPI power management!", result = tmp_result);
3693
3694 tmp_result = tonga_init_power_gate_state(hwmgr);
3695 PP_ASSERT_WITH_CODE((0 == tmp_result),
3696 "Failed to init power gate state!", result = tmp_result);
3697
3698 tmp_result = tonga_get_mc_microcode_version(hwmgr);
3699 PP_ASSERT_WITH_CODE((0 == tmp_result),
3700 "Failed to get MC microcode version!", result = tmp_result);
3701
3702 tmp_result = tonga_init_sclk_threshold(hwmgr);
3703 PP_ASSERT_WITH_CODE((0 == tmp_result),
3704 "Failed to init sclk threshold!", result = tmp_result);
3705
3706 return result;
3707}
3708
3709/**
3710 * Enable voltage control
3711 *
3712 * @param hwmgr the address of the powerplay hardware manager.
3713 * @return always 0
3714 */
3715int tonga_enable_voltage_control(struct pp_hwmgr *hwmgr)
3716{
3717 /* enable voltage control */
3718 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1);
3719
3720 return 0;
3721}
3722
3723/**
3724 * Checks if we want to support voltage control
3725 *
3726 * @param hwmgr the address of the powerplay hardware manager.
3727 */
3728bool cf_tonga_voltage_control(const struct pp_hwmgr *hwmgr)
3729{
3730 const struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
3731
3732 return(TONGA_VOLTAGE_CONTROL_NONE != data->voltage_control);
3733}
3734
3735/*---------------------------MC----------------------------*/
3736
3737uint8_t tonga_get_memory_modile_index(struct pp_hwmgr *hwmgr)
3738{
3739 return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16));
3740}
3741
3742bool tonga_check_s0_mc_reg_index(uint16_t inReg, uint16_t *outReg)
3743{
3744 bool result = true;
3745
3746 switch (inReg) {
3747 case mmMC_SEQ_RAS_TIMING:
3748 *outReg = mmMC_SEQ_RAS_TIMING_LP;
3749 break;
3750
3751 case mmMC_SEQ_DLL_STBY:
3752 *outReg = mmMC_SEQ_DLL_STBY_LP;
3753 break;
3754
3755 case mmMC_SEQ_G5PDX_CMD0:
3756 *outReg = mmMC_SEQ_G5PDX_CMD0_LP;
3757 break;
3758
3759 case mmMC_SEQ_G5PDX_CMD1:
3760 *outReg = mmMC_SEQ_G5PDX_CMD1_LP;
3761 break;
3762
3763 case mmMC_SEQ_G5PDX_CTRL:
3764 *outReg = mmMC_SEQ_G5PDX_CTRL_LP;
3765 break;
3766
3767 case mmMC_SEQ_CAS_TIMING:
3768 *outReg = mmMC_SEQ_CAS_TIMING_LP;
3769 break;
3770
3771 case mmMC_SEQ_MISC_TIMING:
3772 *outReg = mmMC_SEQ_MISC_TIMING_LP;
3773 break;
3774
3775 case mmMC_SEQ_MISC_TIMING2:
3776 *outReg = mmMC_SEQ_MISC_TIMING2_LP;
3777 break;
3778
3779 case mmMC_SEQ_PMG_DVS_CMD:
3780 *outReg = mmMC_SEQ_PMG_DVS_CMD_LP;
3781 break;
3782
3783 case mmMC_SEQ_PMG_DVS_CTL:
3784 *outReg = mmMC_SEQ_PMG_DVS_CTL_LP;
3785 break;
3786
3787 case mmMC_SEQ_RD_CTL_D0:
3788 *outReg = mmMC_SEQ_RD_CTL_D0_LP;
3789 break;
3790
3791 case mmMC_SEQ_RD_CTL_D1:
3792 *outReg = mmMC_SEQ_RD_CTL_D1_LP;
3793 break;
3794
3795 case mmMC_SEQ_WR_CTL_D0:
3796 *outReg = mmMC_SEQ_WR_CTL_D0_LP;
3797 break;
3798
3799 case mmMC_SEQ_WR_CTL_D1:
3800 *outReg = mmMC_SEQ_WR_CTL_D1_LP;
3801 break;
3802
3803 case mmMC_PMG_CMD_EMRS:
3804 *outReg = mmMC_SEQ_PMG_CMD_EMRS_LP;
3805 break;
3806
3807 case mmMC_PMG_CMD_MRS:
3808 *outReg = mmMC_SEQ_PMG_CMD_MRS_LP;
3809 break;
3810
3811 case mmMC_PMG_CMD_MRS1:
3812 *outReg = mmMC_SEQ_PMG_CMD_MRS1_LP;
3813 break;
3814
3815 case mmMC_SEQ_PMG_TIMING:
3816 *outReg = mmMC_SEQ_PMG_TIMING_LP;
3817 break;
3818
3819 case mmMC_PMG_CMD_MRS2:
3820 *outReg = mmMC_SEQ_PMG_CMD_MRS2_LP;
3821 break;
3822
3823 case mmMC_SEQ_WR_CTL_2:
3824 *outReg = mmMC_SEQ_WR_CTL_2_LP;
3825 break;
3826
3827 default:
3828 result = false;
3829 break;
3830 }
3831
3832 return result;
3833}
3834
3835int tonga_set_s0_mc_reg_index(phw_tonga_mc_reg_table *table)
3836{
3837 uint32_t i;
3838 uint16_t address;
3839
3840 for (i = 0; i < table->last; i++) {
3841 table->mc_reg_address[i].s0 =
3842 tonga_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address)
3843 ? address : table->mc_reg_address[i].s1;
3844 }
3845 return 0;
3846}
3847
3848int tonga_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table, phw_tonga_mc_reg_table *ni_table)
3849{
3850 uint8_t i, j;
3851
3852 PP_ASSERT_WITH_CODE((table->last <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
3853 "Invalid VramInfo table.", return -1);
3854 PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES),
3855 "Invalid VramInfo table.", return -1);
3856
3857 for (i = 0; i < table->last; i++) {
3858 ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
3859 }
3860 ni_table->last = table->last;
3861
3862 for (i = 0; i < table->num_entries; i++) {
3863 ni_table->mc_reg_table_entry[i].mclk_max =
3864 table->mc_reg_table_entry[i].mclk_max;
3865 for (j = 0; j < table->last; j++) {
3866 ni_table->mc_reg_table_entry[i].mc_data[j] =
3867 table->mc_reg_table_entry[i].mc_data[j];
3868 }
3869 }
3870
3871 ni_table->num_entries = table->num_entries;
3872
3873 return 0;
3874}
3875
3876/**
3877 * VBIOS omits some information to reduce size, we need to recover them here.
3878 * 1. when we see mmMC_SEQ_MISC1, bit[31:16] EMRS1, need to be write to mmMC_PMG_CMD_EMRS /_LP[15:0].
3879 * Bit[15:0] MRS, need to be update mmMC_PMG_CMD_MRS/_LP[15:0]
3880 * 2. when we see mmMC_SEQ_RESERVE_M, bit[15:0] EMRS2, need to be write to mmMC_PMG_CMD_MRS1/_LP[15:0].
3881 * 3. need to set these data for each clock range
3882 *
3883 * @param hwmgr the address of the powerplay hardware manager.
3884 * @param table the address of MCRegTable
3885 * @return always 0
3886 */
3887int tonga_set_mc_special_registers(struct pp_hwmgr *hwmgr, phw_tonga_mc_reg_table *table)
3888{
3889 uint8_t i, j, k;
3890 uint32_t temp_reg;
3891 const tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
3892
3893 for (i = 0, j = table->last; i < table->last; i++) {
3894 PP_ASSERT_WITH_CODE((j < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
3895 "Invalid VramInfo table.", return -1);
3896 switch (table->mc_reg_address[i].s1) {
3897 /*
3898 * mmMC_SEQ_MISC1, bit[31:16] EMRS1, need to be write to mmMC_PMG_CMD_EMRS /_LP[15:0].
3899 * Bit[15:0] MRS, need to be update mmMC_PMG_CMD_MRS/_LP[15:0]
3900 */
3901 case mmMC_SEQ_MISC1:
3902 temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS);
3903 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
3904 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
3905 for (k = 0; k < table->num_entries; k++) {
3906 table->mc_reg_table_entry[k].mc_data[j] =
3907 ((temp_reg & 0xffff0000)) |
3908 ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
3909 }
3910 j++;
3911 PP_ASSERT_WITH_CODE((j < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
3912 "Invalid VramInfo table.", return -1);
3913
3914 temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS);
3915 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
3916 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
3917 for (k = 0; k < table->num_entries; k++) {
3918 table->mc_reg_table_entry[k].mc_data[j] =
3919 (temp_reg & 0xffff0000) |
3920 (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
3921
3922 if (!data->is_memory_GDDR5) {
3923 table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
3924 }
3925 }
3926 j++;
3927 PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
3928 "Invalid VramInfo table.", return -1);
3929
3930 if (!data->is_memory_GDDR5) {
3931 table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
3932 table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
3933 for (k = 0; k < table->num_entries; k++) {
3934 table->mc_reg_table_entry[k].mc_data[j] =
3935 (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
3936 }
3937 j++;
3938 PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
3939 "Invalid VramInfo table.", return -1);
3940 }
3941
3942 break;
3943
3944 case mmMC_SEQ_RESERVE_M:
3945 temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1);
3946 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
3947 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
3948 for (k = 0; k < table->num_entries; k++) {
3949 table->mc_reg_table_entry[k].mc_data[j] =
3950 (temp_reg & 0xffff0000) |
3951 (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
3952 }
3953 j++;
3954 PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
3955 "Invalid VramInfo table.", return -1);
3956 break;
3957
3958 default:
3959 break;
3960 }
3961
3962 }
3963
3964 table->last = j;
3965
3966 return 0;
3967}
3968
3969int tonga_set_valid_flag(phw_tonga_mc_reg_table *table)
3970{
3971 uint8_t i, j;
3972 for (i = 0; i < table->last; i++) {
3973 for (j = 1; j < table->num_entries; j++) {
3974 if (table->mc_reg_table_entry[j-1].mc_data[i] !=
3975 table->mc_reg_table_entry[j].mc_data[i]) {
3976 table->validflag |= (1<<i);
3977 break;
3978 }
3979 }
3980 }
3981
3982 return 0;
3983}
3984
3985static int tonga_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
3986{
3987 int result;
3988 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
3989 pp_atomctrl_mc_reg_table *table;
3990 phw_tonga_mc_reg_table *ni_table = &data->tonga_mc_reg_table;
3991 uint8_t module_index = tonga_get_memory_modile_index(hwmgr);
3992
3993 table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL);
3994
3995 if (NULL == table)
3996 return -ENOMEM;
3997
3998 /* Program additional LP registers that are no longer programmed by VBIOS */
3999 cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
4000 cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
4001 cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY));
4002 cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0));
4003 cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1));
4004 cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL));
4005 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD));
4006 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL));
4007 cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING));
4008 cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
4009 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS));
4010 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS));
4011 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1));
4012 cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0));
4013 cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
4014 cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
4015 cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
4016 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
4017 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2));
4018 cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2));
4019
4020 memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table));
4021
4022 result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table);
4023
4024 if (0 == result)
4025 result = tonga_copy_vbios_smc_reg_table(table, ni_table);
4026
4027 if (0 == result) {
4028 tonga_set_s0_mc_reg_index(ni_table);
4029 result = tonga_set_mc_special_registers(hwmgr, ni_table);
4030 }
4031
4032 if (0 == result)
4033 tonga_set_valid_flag(ni_table);
4034
4035 kfree(table);
4036 return result;
4037}
4038
4039/*
4040* Copy one arb setting to another and then switch the active set.
4041* arbFreqSrc and arbFreqDest is one of the MC_CG_ARB_FREQ_Fx constants.
4042*/
4043int tonga_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
4044 uint32_t arbFreqSrc, uint32_t arbFreqDest)
4045{
4046 uint32_t mc_arb_dram_timing;
4047 uint32_t mc_arb_dram_timing2;
4048 uint32_t burst_time;
4049 uint32_t mc_cg_config;
4050
4051 switch (arbFreqSrc) {
4052 case MC_CG_ARB_FREQ_F0:
4053 mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
4054 mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
4055 burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
4056 break;
4057
4058 case MC_CG_ARB_FREQ_F1:
4059 mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1);
4060 mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1);
4061 burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1);
4062 break;
4063
4064 default:
4065 return -1;
4066 }
4067
4068 switch (arbFreqDest) {
4069 case MC_CG_ARB_FREQ_F0:
4070 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
4071 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
4072 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time);
4073 break;
4074
4075 case MC_CG_ARB_FREQ_F1:
4076 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
4077 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
4078 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time);
4079 break;
4080
4081 default:
4082 return -1;
4083 }
4084
4085 mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG);
4086 mc_cg_config |= 0x0000000F;
4087 cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config);
4088 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arbFreqDest);
4089
4090 return 0;
4091}
4092
4093/**
4094 * Initial switch from ARB F0->F1
4095 *
4096 * @param hwmgr the address of the powerplay hardware manager.
4097 * @return always 0
4098 * This function is to be called from the SetPowerState table.
4099 */
4100int tonga_initial_switch_from_arb_f0_to_f1(struct pp_hwmgr *hwmgr)
4101{
4102 return tonga_copy_and_switch_arb_sets(hwmgr, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
4103}
4104
4105/**
4106 * Initialize the ARB DRAM timing table's index field.
4107 *
4108 * @param hwmgr the address of the powerplay hardware manager.
4109 * @return always 0
4110 */
4111int tonga_init_arb_table_index(struct pp_hwmgr *hwmgr)
4112{
4113 const tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
4114 uint32_t tmp;
4115 int result;
4116
4117 /*
4118 * This is a read-modify-write on the first byte of the ARB table.
4119 * The first byte in the SMU72_Discrete_MCArbDramTimingTable structure is the field 'current'.
4120 * This solution is ugly, but we never write the whole table only individual fields in it.
4121 * In reality this field should not be in that structure but in a soft register.
4122 */
4123 result = tonga_read_smc_sram_dword(hwmgr->smumgr,
4124 data->arb_table_start, &tmp, data->sram_end);
4125
4126 if (0 != result)
4127 return result;
4128
4129 tmp &= 0x00FFFFFF;
4130 tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
4131
4132 return tonga_write_smc_sram_dword(hwmgr->smumgr,
4133 data->arb_table_start, tmp, data->sram_end);
4134}
4135
4136int tonga_populate_mc_reg_address(struct pp_hwmgr *hwmgr, SMU72_Discrete_MCRegisters *mc_reg_table)
4137{
4138 const struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
4139
4140 uint32_t i, j;
4141
4142 for (i = 0, j = 0; j < data->tonga_mc_reg_table.last; j++) {
4143 if (data->tonga_mc_reg_table.validflag & 1<<j) {
4144 PP_ASSERT_WITH_CODE(i < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE,
4145 "Index of mc_reg_table->address[] array out of boundary", return -1);
4146 mc_reg_table->address[i].s0 =
4147 PP_HOST_TO_SMC_US(data->tonga_mc_reg_table.mc_reg_address[j].s0);
4148 mc_reg_table->address[i].s1 =
4149 PP_HOST_TO_SMC_US(data->tonga_mc_reg_table.mc_reg_address[j].s1);
4150 i++;
4151 }
4152 }
4153
4154 mc_reg_table->last = (uint8_t)i;
4155
4156 return 0;
4157}
4158
4159/*convert register values from driver to SMC format */
4160void tonga_convert_mc_registers(
4161 const phw_tonga_mc_reg_entry * pEntry,
4162 SMU72_Discrete_MCRegisterSet *pData,
4163 uint32_t numEntries, uint32_t validflag)
4164{
4165 uint32_t i, j;
4166
4167 for (i = 0, j = 0; j < numEntries; j++) {
4168 if (validflag & 1<<j) {
4169 pData->value[i] = PP_HOST_TO_SMC_UL(pEntry->mc_data[j]);
4170 i++;
4171 }
4172 }
4173}
4174
4175/* find the entry in the memory range table, then populate the value to SMC's tonga_mc_reg_table */
4176int tonga_convert_mc_reg_table_entry_to_smc(
4177 struct pp_hwmgr *hwmgr,
4178 const uint32_t memory_clock,
4179 SMU72_Discrete_MCRegisterSet *mc_reg_table_data
4180 )
4181{
4182 const tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
4183 uint32_t i = 0;
4184
4185 for (i = 0; i < data->tonga_mc_reg_table.num_entries; i++) {
4186 if (memory_clock <=
4187 data->tonga_mc_reg_table.mc_reg_table_entry[i].mclk_max) {
4188 break;
4189 }
4190 }
4191
4192 if ((i == data->tonga_mc_reg_table.num_entries) && (i > 0))
4193 --i;
4194
4195 tonga_convert_mc_registers(&data->tonga_mc_reg_table.mc_reg_table_entry[i],
4196 mc_reg_table_data, data->tonga_mc_reg_table.last, data->tonga_mc_reg_table.validflag);
4197
4198 return 0;
4199}
4200
4201int tonga_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
4202 SMU72_Discrete_MCRegisters *mc_reg_table)
4203{
4204 int result = 0;
4205 tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
4206 int res;
4207 uint32_t i;
4208
4209 for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
4210 res = tonga_convert_mc_reg_table_entry_to_smc(
4211 hwmgr,
4212 data->dpm_table.mclk_table.dpm_levels[i].value,
4213 &mc_reg_table->data[i]
4214 );
4215
4216 if (0 != res)
4217 result = res;
4218 }
4219
4220 return result;
4221}
4222
4223int tonga_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr)
4224{
4225 int result;
4226 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
4227
4228 memset(&data->mc_reg_table, 0x00, sizeof(SMU72_Discrete_MCRegisters));
4229 result = tonga_populate_mc_reg_address(hwmgr, &(data->mc_reg_table));
4230 PP_ASSERT_WITH_CODE(0 == result,
4231 "Failed to initialize MCRegTable for the MC register addresses!", return result;);
4232
4233 result = tonga_convert_mc_reg_table_to_smc(hwmgr, &data->mc_reg_table);
4234 PP_ASSERT_WITH_CODE(0 == result,
4235 "Failed to initialize MCRegTable for driver state!", return result;);
4236
4237 return tonga_copy_bytes_to_smc(hwmgr->smumgr, data->mc_reg_table_start,
4238 (uint8_t *)&data->mc_reg_table, sizeof(SMU72_Discrete_MCRegisters), data->sram_end);
4239}
4240
4241/**
4242 * Programs static screed detection parameters
4243 *
4244 * @param hwmgr the address of the powerplay hardware manager.
4245 * @return always 0
4246 */
4247int tonga_program_static_screen_threshold_parameters(struct pp_hwmgr *hwmgr)
4248{
4249 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
4250
4251 /* Set static screen threshold unit*/
4252 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device,
4253 CGS_IND_REG__SMC, CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT,
4254 data->static_screen_threshold_unit);
4255 /* Set static screen threshold*/
4256 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device,
4257 CGS_IND_REG__SMC, CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD,
4258 data->static_screen_threshold);
4259
4260 return 0;
4261}
4262
4263/**
4264 * Setup display gap for glitch free memory clock switching.
4265 *
4266 * @param hwmgr the address of the powerplay hardware manager.
4267 * @return always 0
4268 */
4269int tonga_enable_display_gap(struct pp_hwmgr *hwmgr)
4270{
4271 uint32_t display_gap = cgs_read_ind_register(hwmgr->device,
4272 CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL);
4273
4274 display_gap = PHM_SET_FIELD(display_gap,
4275 CG_DISPLAY_GAP_CNTL, DISP_GAP, DISPLAY_GAP_IGNORE);
4276
4277 display_gap = PHM_SET_FIELD(display_gap,
4278 CG_DISPLAY_GAP_CNTL, DISP_GAP_MCHG, DISPLAY_GAP_VBLANK);
4279
4280 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4281 ixCG_DISPLAY_GAP_CNTL, display_gap);
4282
4283 return 0;
4284}
4285
4286/**
4287 * Programs activity state transition voting clients
4288 *
4289 * @param hwmgr the address of the powerplay hardware manager.
4290 * @return always 0
4291 */
4292int tonga_program_voting_clients(struct pp_hwmgr *hwmgr)
4293{
4294 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
4295
4296 /* Clear reset for voting clients before enabling DPM */
4297 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
4298 SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0);
4299 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
4300 SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0);
4301
4302 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4303 ixCG_FREQ_TRAN_VOTING_0, data->voting_rights_clients0);
4304 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4305 ixCG_FREQ_TRAN_VOTING_1, data->voting_rights_clients1);
4306 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4307 ixCG_FREQ_TRAN_VOTING_2, data->voting_rights_clients2);
4308 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4309 ixCG_FREQ_TRAN_VOTING_3, data->voting_rights_clients3);
4310 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4311 ixCG_FREQ_TRAN_VOTING_4, data->voting_rights_clients4);
4312 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4313 ixCG_FREQ_TRAN_VOTING_5, data->voting_rights_clients5);
4314 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4315 ixCG_FREQ_TRAN_VOTING_6, data->voting_rights_clients6);
4316 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4317 ixCG_FREQ_TRAN_VOTING_7, data->voting_rights_clients7);
4318
4319 return 0;
4320}
4321
4322static void tonga_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources)
4323{
4324 bool protection;
4325 enum DPM_EVENT_SRC src;
4326
4327 switch (sources) {
4328 default:
4329 printk(KERN_ERR "Unknown throttling event sources.");
4330 /* fall through */
4331 case 0:
4332 protection = false;
4333 /* src is unused */
4334 break;
4335 case (1 << PHM_AutoThrottleSource_Thermal):
4336 protection = true;
4337 src = DPM_EVENT_SRC_DIGITAL;
4338 break;
4339 case (1 << PHM_AutoThrottleSource_External):
4340 protection = true;
4341 src = DPM_EVENT_SRC_EXTERNAL;
4342 break;
4343 case (1 << PHM_AutoThrottleSource_External) |
4344 (1 << PHM_AutoThrottleSource_Thermal):
4345 protection = true;
4346 src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL;
4347 break;
4348 }
4349 /* Order matters - don't enable thermal protection for the wrong source. */
4350 if (protection) {
4351 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL,
4352 DPM_EVENT_SRC, src);
4353 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
4354 THERMAL_PROTECTION_DIS,
4355 !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4356 PHM_PlatformCaps_ThermalController));
4357 } else
4358 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
4359 THERMAL_PROTECTION_DIS, 1);
4360}
4361
4362static int tonga_enable_auto_throttle_source(struct pp_hwmgr *hwmgr,
4363 PHM_AutoThrottleSource source)
4364{
4365 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
4366
4367 if (!(data->active_auto_throttle_sources & (1 << source))) {
4368 data->active_auto_throttle_sources |= 1 << source;
4369 tonga_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
4370 }
4371 return 0;
4372}
4373
4374static int tonga_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
4375{
4376 return tonga_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
4377}
4378
4379static int tonga_disable_auto_throttle_source(struct pp_hwmgr *hwmgr,
4380 PHM_AutoThrottleSource source)
4381{
4382 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
4383
4384 if (data->active_auto_throttle_sources & (1 << source)) {
4385 data->active_auto_throttle_sources &= ~(1 << source);
4386 tonga_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
4387 }
4388 return 0;
4389}
4390
4391static int tonga_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
4392{
4393 return tonga_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
4394}
4395
4396int tonga_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
4397{
4398 int tmp_result, result = 0;
4399
4400 tmp_result = tonga_check_for_dpm_stopped(hwmgr);
4401
4402 if (cf_tonga_voltage_control(hwmgr)) {
4403 tmp_result = tonga_enable_voltage_control(hwmgr);
4404 PP_ASSERT_WITH_CODE((0 == tmp_result),
4405 "Failed to enable voltage control!", result = tmp_result);
4406
4407 tmp_result = tonga_construct_voltage_tables(hwmgr);
4408 PP_ASSERT_WITH_CODE((0 == tmp_result),
4409 "Failed to contruct voltage tables!", result = tmp_result);
4410 }
4411
4412 tmp_result = tonga_initialize_mc_reg_table(hwmgr);
4413 PP_ASSERT_WITH_CODE((0 == tmp_result),
4414 "Failed to initialize MC reg table!", result = tmp_result);
4415
4416 tmp_result = tonga_program_static_screen_threshold_parameters(hwmgr);
4417 PP_ASSERT_WITH_CODE((0 == tmp_result),
4418 "Failed to program static screen threshold parameters!", result = tmp_result);
4419
4420 tmp_result = tonga_enable_display_gap(hwmgr);
4421 PP_ASSERT_WITH_CODE((0 == tmp_result),
4422 "Failed to enable display gap!", result = tmp_result);
4423
4424 tmp_result = tonga_program_voting_clients(hwmgr);
4425 PP_ASSERT_WITH_CODE((0 == tmp_result),
4426 "Failed to program voting clients!", result = tmp_result);
4427
4428 tmp_result = tonga_process_firmware_header(hwmgr);
4429 PP_ASSERT_WITH_CODE((0 == tmp_result),
4430 "Failed to process firmware header!", result = tmp_result);
4431
4432 tmp_result = tonga_initial_switch_from_arb_f0_to_f1(hwmgr);
4433 PP_ASSERT_WITH_CODE((0 == tmp_result),
4434 "Failed to initialize switch from ArbF0 to F1!", result = tmp_result);
4435
4436 tmp_result = tonga_init_smc_table(hwmgr);
4437 PP_ASSERT_WITH_CODE((0 == tmp_result),
4438 "Failed to initialize SMC table!", result = tmp_result);
4439
4440 tmp_result = tonga_init_arb_table_index(hwmgr);
4441 PP_ASSERT_WITH_CODE((0 == tmp_result),
4442 "Failed to initialize ARB table index!", result = tmp_result);
4443
4444 tmp_result = tonga_populate_pm_fuses(hwmgr);
4445 PP_ASSERT_WITH_CODE((tmp_result == 0),
4446 "Failed to populate PM fuses!", result = tmp_result);
4447
4448 tmp_result = tonga_populate_initial_mc_reg_table(hwmgr);
4449 PP_ASSERT_WITH_CODE((0 == tmp_result),
4450 "Failed to populate initialize MC Reg table!", result = tmp_result);
4451
4452 tmp_result = tonga_notify_smc_display_change(hwmgr, false);
4453 PP_ASSERT_WITH_CODE((0 == tmp_result),
4454 "Failed to notify no display!", result = tmp_result);
4455
4456 /* enable SCLK control */
4457 tmp_result = tonga_enable_sclk_control(hwmgr);
4458 PP_ASSERT_WITH_CODE((0 == tmp_result),
4459 "Failed to enable SCLK control!", result = tmp_result);
4460
4461 /* enable DPM */
4462 tmp_result = tonga_start_dpm(hwmgr);
4463 PP_ASSERT_WITH_CODE((0 == tmp_result),
4464 "Failed to start DPM!", result = tmp_result);
4465
4466 tmp_result = tonga_enable_smc_cac(hwmgr);
4467 PP_ASSERT_WITH_CODE((tmp_result == 0),
4468 "Failed to enable SMC CAC!", result = tmp_result);
4469
4470 tmp_result = tonga_enable_power_containment(hwmgr);
4471 PP_ASSERT_WITH_CODE((tmp_result == 0),
4472 "Failed to enable power containment!", result = tmp_result);
4473
4474 tmp_result = tonga_power_control_set_level(hwmgr);
4475 PP_ASSERT_WITH_CODE((tmp_result == 0),
4476 "Failed to power control set level!", result = tmp_result);
4477
4478 tmp_result = tonga_enable_thermal_auto_throttle(hwmgr);
4479 PP_ASSERT_WITH_CODE((0 == tmp_result),
4480 "Failed to enable thermal auto throttle!", result = tmp_result);
4481
4482 return result;
4483}
4484
4485int tonga_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
4486{
4487 int tmp_result, result = 0;
4488
4489 tmp_result = tonga_check_for_dpm_running(hwmgr);
4490 PP_ASSERT_WITH_CODE((0 == tmp_result),
4491 "SMC is still running!", return 0);
4492
4493 tmp_result = tonga_disable_thermal_auto_throttle(hwmgr);
4494 PP_ASSERT_WITH_CODE((tmp_result == 0),
4495 "Failed to disable thermal auto throttle!", result = tmp_result);
4496
4497 tmp_result = tonga_stop_dpm(hwmgr);
4498 PP_ASSERT_WITH_CODE((0 == tmp_result),
4499 "Failed to stop DPM!", result = tmp_result);
4500
4501 tmp_result = tonga_reset_to_default(hwmgr);
4502 PP_ASSERT_WITH_CODE((0 == tmp_result),
4503 "Failed to reset to default!", result = tmp_result);
4504
4505 return result;
4506}
4507
4508int tonga_reset_asic_tasks(struct pp_hwmgr *hwmgr)
4509{
4510 int result;
4511
4512 result = tonga_set_boot_state(hwmgr);
4513 if (0 != result)
4514 printk(KERN_ERR "[ powerplay ] Failed to reset asic via set boot state! \n");
4515
4516 return result;
4517}
4518
4519int tonga_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
4520{
4521 return phm_hwmgr_backend_fini(hwmgr);
4522}
4523
4524/**
4525 * Initializes the Volcanic Islands Hardware Manager
4526 *
4527 * @param hwmgr the address of the powerplay hardware manager.
4528 * @return 1 if success; otherwise appropriate error code.
4529 */
4530int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
4531{
4532 int result = 0;
4533 SMU72_Discrete_DpmTable *table = NULL;
4534 tonga_hwmgr *data;
4535 pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
4536 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
4537 phw_tonga_ulv_parm *ulv;
4538 struct cgs_system_info sys_info = {0};
4539
4540 PP_ASSERT_WITH_CODE((NULL != hwmgr),
4541 "Invalid Parameter!", return -1;);
4542
4543 data = kzalloc(sizeof(struct tonga_hwmgr), GFP_KERNEL);
4544 if (data == NULL)
4545 return -ENOMEM;
4546
4547 hwmgr->backend = data;
4548
4549 data->dll_defaule_on = false;
4550 data->sram_end = SMC_RAM_END;
4551
4552 data->activity_target[0] = PPTONGA_TARGETACTIVITY_DFLT;
4553 data->activity_target[1] = PPTONGA_TARGETACTIVITY_DFLT;
4554 data->activity_target[2] = PPTONGA_TARGETACTIVITY_DFLT;
4555 data->activity_target[3] = PPTONGA_TARGETACTIVITY_DFLT;
4556 data->activity_target[4] = PPTONGA_TARGETACTIVITY_DFLT;
4557 data->activity_target[5] = PPTONGA_TARGETACTIVITY_DFLT;
4558 data->activity_target[6] = PPTONGA_TARGETACTIVITY_DFLT;
4559 data->activity_target[7] = PPTONGA_TARGETACTIVITY_DFLT;
4560
4561 data->vddc_vddci_delta = VDDC_VDDCI_DELTA;
4562 data->vddc_vddgfx_delta = VDDC_VDDGFX_DELTA;
4563 data->mclk_activity_target = PPTONGA_MCLK_TARGETACTIVITY_DFLT;
4564
4565 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
4566 PHM_PlatformCaps_DisableVoltageIsland);
4567
4568 data->sclk_dpm_key_disabled = 0;
4569 data->mclk_dpm_key_disabled = 0;
4570 data->pcie_dpm_key_disabled = 0;
4571 data->pcc_monitor_enabled = 0;
4572
4573 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
4574 PHM_PlatformCaps_UnTabledHardwareInterface);
4575
4576 data->gpio_debug = 0;
4577 data->engine_clock_data = 0;
4578 data->memory_clock_data = 0;
4579 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
4580 PHM_PlatformCaps_DynamicPatchPowerState);
4581
4582 /* need to set voltage control types before EVV patching*/
4583 data->voltage_control = TONGA_VOLTAGE_CONTROL_NONE;
4584 data->vdd_ci_control = TONGA_VOLTAGE_CONTROL_NONE;
4585 data->vdd_gfx_control = TONGA_VOLTAGE_CONTROL_NONE;
4586 data->mvdd_control = TONGA_VOLTAGE_CONTROL_NONE;
4587 data->force_pcie_gen = PP_PCIEGenInvalid;
4588
4589 if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
4590 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) {
4591 data->voltage_control = TONGA_VOLTAGE_CONTROL_BY_SVID2;
4592 }
4593
4594 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4595 PHM_PlatformCaps_ControlVDDGFX)) {
4596 if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
4597 VOLTAGE_TYPE_VDDGFX, VOLTAGE_OBJ_SVID2)) {
4598 data->vdd_gfx_control = TONGA_VOLTAGE_CONTROL_BY_SVID2;
4599 }
4600 }
4601
4602 if (TONGA_VOLTAGE_CONTROL_NONE == data->vdd_gfx_control) {
4603 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
4604 PHM_PlatformCaps_ControlVDDGFX);
4605 }
4606
4607 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4608 PHM_PlatformCaps_EnableMVDDControl)) {
4609 if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
4610 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT)) {
4611 data->mvdd_control = TONGA_VOLTAGE_CONTROL_BY_GPIO;
4612 }
4613 }
4614
4615 if (TONGA_VOLTAGE_CONTROL_NONE == data->mvdd_control) {
4616 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
4617 PHM_PlatformCaps_EnableMVDDControl);
4618 }
4619
4620 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4621 PHM_PlatformCaps_ControlVDDCI)) {
4622 if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
4623 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
4624 data->vdd_ci_control = TONGA_VOLTAGE_CONTROL_BY_GPIO;
4625 else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
4626 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
4627 data->vdd_ci_control = TONGA_VOLTAGE_CONTROL_BY_SVID2;
4628 }
4629
4630 if (TONGA_VOLTAGE_CONTROL_NONE == data->vdd_ci_control)
4631 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
4632 PHM_PlatformCaps_ControlVDDCI);
4633
4634 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
4635 PHM_PlatformCaps_TablelessHardwareInterface);
4636
4637 if (pptable_info->cac_dtp_table->usClockStretchAmount != 0)
4638 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
4639 PHM_PlatformCaps_ClockStretcher);
4640
4641 /* Initializes DPM default values*/
4642 tonga_initialize_dpm_defaults(hwmgr);
4643
4644 /* Get leakage voltage based on leakage ID.*/
4645 PP_ASSERT_WITH_CODE((0 == tonga_get_evv_voltage(hwmgr)),
4646 "Get EVV Voltage Failed. Abort Driver loading!", return -1);
4647
4648 tonga_complete_dependency_tables(hwmgr);
4649
4650 /* Parse pptable data read from VBIOS*/
4651 tonga_set_private_var_based_on_pptale(hwmgr);
4652
4653 /* ULV Support*/
4654 ulv = &(data->ulv);
4655 ulv->ulv_supported = false;
4656
4657 /* Initalize Dynamic State Adjustment Rule Settings*/
4658 result = tonga_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
4659 if (result)
4660 printk(KERN_ERR "[ powerplay ] tonga_initializa_dynamic_state_adjustment_rule_settings failed!\n");
4661 data->uvd_enabled = false;
4662
4663 table = &(data->smc_state_table);
4664
4665 /*
4666 * if ucGPIO_ID=VDDC_PCC_GPIO_PINID in GPIO_LUTable,
4667 * Peak Current Control feature is enabled and we should program PCC HW register
4668 */
4669 if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) {
4670 uint32_t temp_reg = cgs_read_ind_register(hwmgr->device,
4671 CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL);
4672
4673 switch (gpio_pin_assignment.uc_gpio_pin_bit_shift) {
4674 case 0:
4675 temp_reg = PHM_SET_FIELD(temp_reg,
4676 CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x1);
4677 break;
4678 case 1:
4679 temp_reg = PHM_SET_FIELD(temp_reg,
4680 CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x2);
4681 break;
4682 case 2:
4683 temp_reg = PHM_SET_FIELD(temp_reg,
4684 CNB_PWRMGT_CNTL, GNB_SLOW, 0x1);
4685 break;
4686 case 3:
4687 temp_reg = PHM_SET_FIELD(temp_reg,
4688 CNB_PWRMGT_CNTL, FORCE_NB_PS1, 0x1);
4689 break;
4690 case 4:
4691 temp_reg = PHM_SET_FIELD(temp_reg,
4692 CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1);
4693 break;
4694 default:
4695 printk(KERN_ERR "[ powerplay ] Failed to setup PCC HW register! \
4696 Wrong GPIO assigned for VDDC_PCC_GPIO_PINID! \n");
4697 break;
4698 }
4699 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4700 ixCNB_PWRMGT_CNTL, temp_reg);
4701 }
4702
4703 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
4704 PHM_PlatformCaps_EnableSMU7ThermalManagement);
4705 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
4706 PHM_PlatformCaps_SMU7);
4707
4708 data->vddc_phase_shed_control = false;
4709
4710 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
4711 PHM_PlatformCaps_UVDPowerGating);
4712 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
4713 PHM_PlatformCaps_VCEPowerGating);
4714 sys_info.size = sizeof(struct cgs_system_info);
4715 sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS;
4716 result = cgs_query_system_info(hwmgr->device, &sys_info);
4717 if (!result) {
4718 if (sys_info.value & AMD_PG_SUPPORT_UVD)
4719 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
4720 PHM_PlatformCaps_UVDPowerGating);
4721 if (sys_info.value & AMD_PG_SUPPORT_VCE)
4722 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
4723 PHM_PlatformCaps_VCEPowerGating);
4724 }
4725
4726 if (0 == result) {
4727 data->is_tlu_enabled = false;
4728 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
4729 TONGA_MAX_HARDWARE_POWERLEVELS;
4730 hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
4731 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
4732
4733 sys_info.size = sizeof(struct cgs_system_info);
4734 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO;
4735 result = cgs_query_system_info(hwmgr->device, &sys_info);
4736 if (result)
4737 data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK;
4738 else
4739 data->pcie_gen_cap = (uint32_t)sys_info.value;
4740 if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
4741 data->pcie_spc_cap = 20;
4742 sys_info.size = sizeof(struct cgs_system_info);
4743 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW;
4744 result = cgs_query_system_info(hwmgr->device, &sys_info);
4745 if (result)
4746 data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK;
4747 else
4748 data->pcie_lane_cap = (uint32_t)sys_info.value;
4749 } else {
4750 /* Ignore return value in here, we are cleaning up a mess. */
4751 tonga_hwmgr_backend_fini(hwmgr);
4752 }
4753
4754 return result;
4755}
4756
4757static int tonga_force_dpm_level(struct pp_hwmgr *hwmgr,
4758 enum amd_dpm_forced_level level)
4759{
4760 int ret = 0;
4761
4762 switch (level) {
4763 case AMD_DPM_FORCED_LEVEL_HIGH:
4764 ret = tonga_force_dpm_highest(hwmgr);
4765 if (ret)
4766 return ret;
4767 break;
4768 case AMD_DPM_FORCED_LEVEL_LOW:
4769 ret = tonga_force_dpm_lowest(hwmgr);
4770 if (ret)
4771 return ret;
4772 break;
4773 case AMD_DPM_FORCED_LEVEL_AUTO:
4774 ret = tonga_unforce_dpm_levels(hwmgr);
4775 if (ret)
4776 return ret;
4777 break;
4778 default:
4779 break;
4780 }
4781
4782 hwmgr->dpm_level = level;
4783 return ret;
4784}
4785
4786static int tonga_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
4787 struct pp_power_state *prequest_ps,
4788 const struct pp_power_state *pcurrent_ps)
4789{
4790 struct tonga_power_state *tonga_ps =
4791 cast_phw_tonga_power_state(&prequest_ps->hardware);
4792
4793 uint32_t sclk;
4794 uint32_t mclk;
4795 struct PP_Clocks minimum_clocks = {0};
4796 bool disable_mclk_switching;
4797 bool disable_mclk_switching_for_frame_lock;
4798 struct cgs_display_info info = {0};
4799 const struct phm_clock_and_voltage_limits *max_limits;
4800 uint32_t i;
4801 tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
4802 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
4803
4804 int32_t count;
4805 int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
4806
4807 data->battery_state = (PP_StateUILabel_Battery == prequest_ps->classification.ui_label);
4808
4809 PP_ASSERT_WITH_CODE(tonga_ps->performance_level_count == 2,
4810 "VI should always have 2 performance levels",
4811 );
4812
4813 max_limits = (PP_PowerSource_AC == hwmgr->power_source) ?
4814 &(hwmgr->dyn_state.max_clock_voltage_on_ac) :
4815 &(hwmgr->dyn_state.max_clock_voltage_on_dc);
4816
4817 if (PP_PowerSource_DC == hwmgr->power_source) {
4818 for (i = 0; i < tonga_ps->performance_level_count; i++) {
4819 if (tonga_ps->performance_levels[i].memory_clock > max_limits->mclk)
4820 tonga_ps->performance_levels[i].memory_clock = max_limits->mclk;
4821 if (tonga_ps->performance_levels[i].engine_clock > max_limits->sclk)
4822 tonga_ps->performance_levels[i].engine_clock = max_limits->sclk;
4823 }
4824 }
4825
4826 tonga_ps->vce_clocks.EVCLK = hwmgr->vce_arbiter.evclk;
4827 tonga_ps->vce_clocks.ECCLK = hwmgr->vce_arbiter.ecclk;
4828
4829 tonga_ps->acp_clk = hwmgr->acp_arbiter.acpclk;
4830
4831 cgs_get_active_displays_info(hwmgr->device, &info);
4832
4833 /*TO DO result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
4834
4835 /* TO DO GetMinClockSettings(hwmgr->pPECI, &minimum_clocks); */
4836
4837 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) {
4838
4839 max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
4840 stable_pstate_sclk = (max_limits->sclk * 75) / 100;
4841
4842 for (count = pptable_info->vdd_dep_on_sclk->count-1; count >= 0; count--) {
4843 if (stable_pstate_sclk >= pptable_info->vdd_dep_on_sclk->entries[count].clk) {
4844 stable_pstate_sclk = pptable_info->vdd_dep_on_sclk->entries[count].clk;
4845 break;
4846 }
4847 }
4848
4849 if (count < 0)
4850 stable_pstate_sclk = pptable_info->vdd_dep_on_sclk->entries[0].clk;
4851
4852 stable_pstate_mclk = max_limits->mclk;
4853
4854 minimum_clocks.engineClock = stable_pstate_sclk;
4855 minimum_clocks.memoryClock = stable_pstate_mclk;
4856 }
4857
4858 if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk)
4859 minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk;
4860
4861 if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk)
4862 minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk;
4863
4864 tonga_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold;
4865
4866 if (0 != hwmgr->gfx_arbiter.sclk_over_drive) {
4867 PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <= hwmgr->platform_descriptor.overdriveLimit.engineClock),
4868 "Overdrive sclk exceeds limit",
4869 hwmgr->gfx_arbiter.sclk_over_drive = hwmgr->platform_descriptor.overdriveLimit.engineClock);
4870
4871 if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk)
4872 tonga_ps->performance_levels[1].engine_clock = hwmgr->gfx_arbiter.sclk_over_drive;
4873 }
4874
4875 if (0 != hwmgr->gfx_arbiter.mclk_over_drive) {
4876 PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <= hwmgr->platform_descriptor.overdriveLimit.memoryClock),
4877 "Overdrive mclk exceeds limit",
4878 hwmgr->gfx_arbiter.mclk_over_drive = hwmgr->platform_descriptor.overdriveLimit.memoryClock);
4879
4880 if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk)
4881 tonga_ps->performance_levels[1].memory_clock = hwmgr->gfx_arbiter.mclk_over_drive;
4882 }
4883
4884 disable_mclk_switching_for_frame_lock = phm_cap_enabled(
4885 hwmgr->platform_descriptor.platformCaps,
4886 PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
4887
4888 disable_mclk_switching = (1 < info.display_count) ||
4889 disable_mclk_switching_for_frame_lock;
4890
4891 sclk = tonga_ps->performance_levels[0].engine_clock;
4892 mclk = tonga_ps->performance_levels[0].memory_clock;
4893
4894 if (disable_mclk_switching)
4895 mclk = tonga_ps->performance_levels[tonga_ps->performance_level_count - 1].memory_clock;
4896
4897 if (sclk < minimum_clocks.engineClock)
4898 sclk = (minimum_clocks.engineClock > max_limits->sclk) ? max_limits->sclk : minimum_clocks.engineClock;
4899
4900 if (mclk < minimum_clocks.memoryClock)
4901 mclk = (minimum_clocks.memoryClock > max_limits->mclk) ? max_limits->mclk : minimum_clocks.memoryClock;
4902
4903 tonga_ps->performance_levels[0].engine_clock = sclk;
4904 tonga_ps->performance_levels[0].memory_clock = mclk;
4905
4906 tonga_ps->performance_levels[1].engine_clock =
4907 (tonga_ps->performance_levels[1].engine_clock >= tonga_ps->performance_levels[0].engine_clock) ?
4908 tonga_ps->performance_levels[1].engine_clock :
4909 tonga_ps->performance_levels[0].engine_clock;
4910
4911 if (disable_mclk_switching) {
4912 if (mclk < tonga_ps->performance_levels[1].memory_clock)
4913 mclk = tonga_ps->performance_levels[1].memory_clock;
4914
4915 tonga_ps->performance_levels[0].memory_clock = mclk;
4916 tonga_ps->performance_levels[1].memory_clock = mclk;
4917 } else {
4918 if (tonga_ps->performance_levels[1].memory_clock < tonga_ps->performance_levels[0].memory_clock)
4919 tonga_ps->performance_levels[1].memory_clock = tonga_ps->performance_levels[0].memory_clock;
4920 }
4921
4922 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) {
4923 for (i=0; i < tonga_ps->performance_level_count; i++) {
4924 tonga_ps->performance_levels[i].engine_clock = stable_pstate_sclk;
4925 tonga_ps->performance_levels[i].memory_clock = stable_pstate_mclk;
4926 tonga_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max;
4927 tonga_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max;
4928 }
4929 }
4930
4931 return 0;
4932}
4933
4934int tonga_get_power_state_size(struct pp_hwmgr *hwmgr)
4935{
4936 return sizeof(struct tonga_power_state);
4937}
4938
4939static int tonga_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
4940{
4941 struct pp_power_state *ps;
4942 struct tonga_power_state *tonga_ps;
4943
4944 if (hwmgr == NULL)
4945 return -EINVAL;
4946
4947 ps = hwmgr->request_ps;
4948
4949 if (ps == NULL)
4950 return -EINVAL;
4951
4952 tonga_ps = cast_phw_tonga_power_state(&ps->hardware);
4953
4954 if (low)
4955 return tonga_ps->performance_levels[0].memory_clock;
4956 else
4957 return tonga_ps->performance_levels[tonga_ps->performance_level_count-1].memory_clock;
4958}
4959
4960static int tonga_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
4961{
4962 struct pp_power_state *ps;
4963 struct tonga_power_state *tonga_ps;
4964
4965 if (hwmgr == NULL)
4966 return -EINVAL;
4967
4968 ps = hwmgr->request_ps;
4969
4970 if (ps == NULL)
4971 return -EINVAL;
4972
4973 tonga_ps = cast_phw_tonga_power_state(&ps->hardware);
4974
4975 if (low)
4976 return tonga_ps->performance_levels[0].engine_clock;
4977 else
4978 return tonga_ps->performance_levels[tonga_ps->performance_level_count-1].engine_clock;
4979}
4980
4981static uint16_t tonga_get_current_pcie_speed(
4982 struct pp_hwmgr *hwmgr)
4983{
4984 uint32_t speed_cntl = 0;
4985
4986 speed_cntl = cgs_read_ind_register(hwmgr->device,
4987 CGS_IND_REG__PCIE,
4988 ixPCIE_LC_SPEED_CNTL);
4989 return((uint16_t)PHM_GET_FIELD(speed_cntl,
4990 PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
4991}
4992
4993static int tonga_get_current_pcie_lane_number(
4994 struct pp_hwmgr *hwmgr)
4995{
4996 uint32_t link_width;
4997
4998 link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device,
4999 CGS_IND_REG__PCIE,
5000 PCIE_LC_LINK_WIDTH_CNTL,
5001 LC_LINK_WIDTH_RD);
5002
5003 PP_ASSERT_WITH_CODE((7 >= link_width),
5004 "Invalid PCIe lane width!", return 0);
5005
5006 return decode_pcie_lane_width(link_width);
5007}
5008
5009static int tonga_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
5010 struct pp_hw_power_state *hw_ps)
5011{
5012 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
5013 struct tonga_power_state *ps = (struct tonga_power_state *)hw_ps;
5014 ATOM_FIRMWARE_INFO_V2_2 *fw_info;
5015 uint16_t size;
5016 uint8_t frev, crev;
5017 int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
5018
5019 /* First retrieve the Boot clocks and VDDC from the firmware info table.
5020 * We assume here that fw_info is unchanged if this call fails.
5021 */
5022 fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)cgs_atom_get_data_table(
5023 hwmgr->device, index,
5024 &size, &frev, &crev);
5025 if (!fw_info)
5026 /* During a test, there is no firmware info table. */
5027 return 0;
5028
5029 /* Patch the state. */
5030 data->vbios_boot_state.sclk_bootup_value = le32_to_cpu(fw_info->ulDefaultEngineClock);
5031 data->vbios_boot_state.mclk_bootup_value = le32_to_cpu(fw_info->ulDefaultMemoryClock);
5032 data->vbios_boot_state.mvdd_bootup_value = le16_to_cpu(fw_info->usBootUpMVDDCVoltage);
5033 data->vbios_boot_state.vddc_bootup_value = le16_to_cpu(fw_info->usBootUpVDDCVoltage);
5034 data->vbios_boot_state.vddci_bootup_value = le16_to_cpu(fw_info->usBootUpVDDCIVoltage);
5035 data->vbios_boot_state.pcie_gen_bootup_value = tonga_get_current_pcie_speed(hwmgr);
5036 data->vbios_boot_state.pcie_lane_bootup_value =
5037 (uint16_t)tonga_get_current_pcie_lane_number(hwmgr);
5038
5039 /* set boot power state */
5040 ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value;
5041 ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value;
5042 ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value;
5043 ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value;
5044
5045 return 0;
5046}
5047
5048static int tonga_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
5049 void *state, struct pp_power_state *power_state,
5050 void *pp_table, uint32_t classification_flag)
5051{
5052 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
5053
5054 struct tonga_power_state *tonga_ps =
5055 (struct tonga_power_state *)(&(power_state->hardware));
5056
5057 struct tonga_performance_level *performance_level;
5058
5059 ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state;
5060
5061 ATOM_Tonga_POWERPLAYTABLE *powerplay_table =
5062 (ATOM_Tonga_POWERPLAYTABLE *)pp_table;
5063
5064 ATOM_Tonga_SCLK_Dependency_Table *sclk_dep_table =
5065 (ATOM_Tonga_SCLK_Dependency_Table *)
5066 (((unsigned long)powerplay_table) +
5067 le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
5068
5069 ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
5070 (ATOM_Tonga_MCLK_Dependency_Table *)
5071 (((unsigned long)powerplay_table) +
5072 le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
5073
5074 /* The following fields are not initialized here: id orderedList allStatesList */
5075 power_state->classification.ui_label =
5076 (le16_to_cpu(state_entry->usClassification) &
5077 ATOM_PPLIB_CLASSIFICATION_UI_MASK) >>
5078 ATOM_PPLIB_CLASSIFICATION_UI_SHIFT;
5079 power_state->classification.flags = classification_flag;
5080 /* NOTE: There is a classification2 flag in BIOS that is not being used right now */
5081
5082 power_state->classification.temporary_state = false;
5083 power_state->classification.to_be_deleted = false;
5084
5085 power_state->validation.disallowOnDC =
5086 (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) & ATOM_Tonga_DISALLOW_ON_DC));
5087
5088 power_state->pcie.lanes = 0;
5089
5090 power_state->display.disableFrameModulation = false;
5091 power_state->display.limitRefreshrate = false;
5092 power_state->display.enableVariBright =
5093 (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) & ATOM_Tonga_ENABLE_VARIBRIGHT));
5094
5095 power_state->validation.supportedPowerLevels = 0;
5096 power_state->uvd_clocks.VCLK = 0;
5097 power_state->uvd_clocks.DCLK = 0;
5098 power_state->temperatures.min = 0;
5099 power_state->temperatures.max = 0;
5100
5101 performance_level = &(tonga_ps->performance_levels
5102 [tonga_ps->performance_level_count++]);
5103
5104 PP_ASSERT_WITH_CODE(
5105 (tonga_ps->performance_level_count < SMU72_MAX_LEVELS_GRAPHICS),
5106 "Performance levels exceeds SMC limit!",
5107 return -1);
5108
5109 PP_ASSERT_WITH_CODE(
5110 (tonga_ps->performance_level_count <=
5111 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
5112 "Performance levels exceeds Driver limit!",
5113 return -1);
5114
5115 /* Performance levels are arranged from low to high. */
5116 performance_level->memory_clock =
5117 le32_to_cpu(mclk_dep_table->entries[state_entry->ucMemoryClockIndexLow].ulMclk);
5118
5119 performance_level->engine_clock =
5120 le32_to_cpu(sclk_dep_table->entries[state_entry->ucEngineClockIndexLow].ulSclk);
5121
5122 performance_level->pcie_gen = get_pcie_gen_support(
5123 data->pcie_gen_cap,
5124 state_entry->ucPCIEGenLow);
5125
5126 performance_level->pcie_lane = get_pcie_lane_support(
5127 data->pcie_lane_cap,
5128 state_entry->ucPCIELaneHigh);
5129
5130 performance_level =
5131 &(tonga_ps->performance_levels[tonga_ps->performance_level_count++]);
5132
5133 performance_level->memory_clock =
5134 le32_to_cpu(mclk_dep_table->entries[state_entry->ucMemoryClockIndexHigh].ulMclk);
5135
5136 performance_level->engine_clock =
5137 le32_to_cpu(sclk_dep_table->entries[state_entry->ucEngineClockIndexHigh].ulSclk);
5138
5139 performance_level->pcie_gen = get_pcie_gen_support(
5140 data->pcie_gen_cap,
5141 state_entry->ucPCIEGenHigh);
5142
5143 performance_level->pcie_lane = get_pcie_lane_support(
5144 data->pcie_lane_cap,
5145 state_entry->ucPCIELaneHigh);
5146
5147 return 0;
5148}
5149
5150static int tonga_get_pp_table_entry(struct pp_hwmgr *hwmgr,
5151 unsigned long entry_index, struct pp_power_state *ps)
5152{
5153 int result;
5154 struct tonga_power_state *tonga_ps;
5155 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
5156
5157 struct phm_ppt_v1_information *table_info =
5158 (struct phm_ppt_v1_information *)(hwmgr->pptable);
5159
5160 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
5161 table_info->vdd_dep_on_mclk;
5162
5163 ps->hardware.magic = PhwTonga_Magic;
5164
5165 tonga_ps = cast_phw_tonga_power_state(&(ps->hardware));
5166
5167 result = get_powerplay_table_entry_v1_0(hwmgr, entry_index, ps,
5168 tonga_get_pp_table_entry_callback_func);
5169
5170 /* This is the earliest time we have all the dependency table and the VBIOS boot state
5171 * as PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot state
5172 * if there is only one VDDCI/MCLK level, check if it's the same as VBIOS boot state
5173 */
5174 if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
5175 if (dep_mclk_table->entries[0].clk !=
5176 data->vbios_boot_state.mclk_bootup_value)
5177 printk(KERN_ERR "Single MCLK entry VDDCI/MCLK dependency table "
5178 "does not match VBIOS boot MCLK level");
5179 if (dep_mclk_table->entries[0].vddci !=
5180 data->vbios_boot_state.vddci_bootup_value)
5181 printk(KERN_ERR "Single VDDCI entry VDDCI/MCLK dependency table "
5182 "does not match VBIOS boot VDDCI level");
5183 }
5184
5185 /* set DC compatible flag if this state supports DC */
5186 if (!ps->validation.disallowOnDC)
5187 tonga_ps->dc_compatible = true;
5188
5189 if (ps->classification.flags & PP_StateClassificationFlag_ACPI)
5190 data->acpi_pcie_gen = tonga_ps->performance_levels[0].pcie_gen;
5191 else if (ps->classification.flags & PP_StateClassificationFlag_Boot) {
5192 if (data->bacos.best_match == 0xffff) {
5193 /* For V.I. use boot state as base BACO state */
5194 data->bacos.best_match = PP_StateClassificationFlag_Boot;
5195 data->bacos.performance_level = tonga_ps->performance_levels[0];
5196 }
5197 }
5198
5199 tonga_ps->uvd_clocks.VCLK = ps->uvd_clocks.VCLK;
5200 tonga_ps->uvd_clocks.DCLK = ps->uvd_clocks.DCLK;
5201
5202 if (!result) {
5203 uint32_t i;
5204
5205 switch (ps->classification.ui_label) {
5206 case PP_StateUILabel_Performance:
5207 data->use_pcie_performance_levels = true;
5208
5209 for (i = 0; i < tonga_ps->performance_level_count; i++) {
5210 if (data->pcie_gen_performance.max <
5211 tonga_ps->performance_levels[i].pcie_gen)
5212 data->pcie_gen_performance.max =
5213 tonga_ps->performance_levels[i].pcie_gen;
5214
5215 if (data->pcie_gen_performance.min >
5216 tonga_ps->performance_levels[i].pcie_gen)
5217 data->pcie_gen_performance.min =
5218 tonga_ps->performance_levels[i].pcie_gen;
5219
5220 if (data->pcie_lane_performance.max <
5221 tonga_ps->performance_levels[i].pcie_lane)
5222 data->pcie_lane_performance.max =
5223 tonga_ps->performance_levels[i].pcie_lane;
5224
5225 if (data->pcie_lane_performance.min >
5226 tonga_ps->performance_levels[i].pcie_lane)
5227 data->pcie_lane_performance.min =
5228 tonga_ps->performance_levels[i].pcie_lane;
5229 }
5230 break;
5231 case PP_StateUILabel_Battery:
5232 data->use_pcie_power_saving_levels = true;
5233
5234 for (i = 0; i < tonga_ps->performance_level_count; i++) {
5235 if (data->pcie_gen_power_saving.max <
5236 tonga_ps->performance_levels[i].pcie_gen)
5237 data->pcie_gen_power_saving.max =
5238 tonga_ps->performance_levels[i].pcie_gen;
5239
5240 if (data->pcie_gen_power_saving.min >
5241 tonga_ps->performance_levels[i].pcie_gen)
5242 data->pcie_gen_power_saving.min =
5243 tonga_ps->performance_levels[i].pcie_gen;
5244
5245 if (data->pcie_lane_power_saving.max <
5246 tonga_ps->performance_levels[i].pcie_lane)
5247 data->pcie_lane_power_saving.max =
5248 tonga_ps->performance_levels[i].pcie_lane;
5249
5250 if (data->pcie_lane_power_saving.min >
5251 tonga_ps->performance_levels[i].pcie_lane)
5252 data->pcie_lane_power_saving.min =
5253 tonga_ps->performance_levels[i].pcie_lane;
5254 }
5255 break;
5256 default:
5257 break;
5258 }
5259 }
5260 return 0;
5261}
5262
5263static void
5264tonga_print_current_perforce_level(struct pp_hwmgr *hwmgr, struct seq_file *m)
5265{
5266 uint32_t sclk, mclk, activity_percent;
5267 uint32_t offset;
5268 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
5269
5270 smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)(PPSMC_MSG_API_GetSclkFrequency));
5271
5272 sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
5273
5274 smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)(PPSMC_MSG_API_GetMclkFrequency));
5275
5276 mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
5277 seq_printf(m, "\n [ mclk ]: %u MHz\n\n [ sclk ]: %u MHz\n", mclk/100, sclk/100);
5278
5279 offset = data->soft_regs_start + offsetof(SMU72_SoftRegisters, AverageGraphicsActivity);
5280 activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset);
5281 activity_percent += 0x80;
5282 activity_percent >>= 8;
5283
5284 seq_printf(m, "\n [GPU load]: %u%%\n\n", activity_percent > 100 ? 100 : activity_percent);
5285
5286 seq_printf(m, "uvd %sabled\n", data->uvd_power_gated ? "dis" : "en");
5287
5288 seq_printf(m, "vce %sabled\n", data->vce_power_gated ? "dis" : "en");
5289}
5290
5291static int tonga_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
5292{
5293 const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input;
5294 const struct tonga_power_state *tonga_ps = cast_const_phw_tonga_power_state(states->pnew_state);
5295 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
5296 struct tonga_single_dpm_table *psclk_table = &(data->dpm_table.sclk_table);
5297 uint32_t sclk = tonga_ps->performance_levels[tonga_ps->performance_level_count-1].engine_clock;
5298 struct tonga_single_dpm_table *pmclk_table = &(data->dpm_table.mclk_table);
5299 uint32_t mclk = tonga_ps->performance_levels[tonga_ps->performance_level_count-1].memory_clock;
5300 struct PP_Clocks min_clocks = {0};
5301 uint32_t i;
5302 struct cgs_display_info info = {0};
5303
5304 data->need_update_smu7_dpm_table = 0;
5305
5306 for (i = 0; i < psclk_table->count; i++) {
5307 if (sclk == psclk_table->dpm_levels[i].value)
5308 break;
5309 }
5310
5311 if (i >= psclk_table->count)
5312 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
5313 else {
5314 /* TODO: Check SCLK in DAL's minimum clocks in case DeepSleep divider update is required.*/
5315 if(data->display_timing.min_clock_insr != min_clocks.engineClockInSR)
5316 data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
5317 }
5318
5319 for (i=0; i < pmclk_table->count; i++) {
5320 if (mclk == pmclk_table->dpm_levels[i].value)
5321 break;
5322 }
5323
5324 if (i >= pmclk_table->count)
5325 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
5326
5327 cgs_get_active_displays_info(hwmgr->device, &info);
5328
5329 if (data->display_timing.num_existing_displays != info.display_count)
5330 data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
5331
5332 return 0;
5333}
5334
5335static uint16_t tonga_get_maximum_link_speed(struct pp_hwmgr *hwmgr, const struct tonga_power_state *hw_ps)
5336{
5337 uint32_t i;
5338 uint32_t sclk, max_sclk = 0;
5339 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
5340 struct tonga_dpm_table *pdpm_table = &data->dpm_table;
5341
5342 for (i = 0; i < hw_ps->performance_level_count; i++) {
5343 sclk = hw_ps->performance_levels[i].engine_clock;
5344 if (max_sclk < sclk)
5345 max_sclk = sclk;
5346 }
5347
5348 for (i = 0; i < pdpm_table->sclk_table.count; i++) {
5349 if (pdpm_table->sclk_table.dpm_levels[i].value == max_sclk)
5350 return (uint16_t) ((i >= pdpm_table->pcie_speed_table.count) ?
5351 pdpm_table->pcie_speed_table.dpm_levels[pdpm_table->pcie_speed_table.count-1].value :
5352 pdpm_table->pcie_speed_table.dpm_levels[i].value);
5353 }
5354
5355 return 0;
5356}
5357
5358static int tonga_request_link_speed_change_before_state_change(struct pp_hwmgr *hwmgr, const void *input)
5359{
5360 const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input;
5361 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
5362 const struct tonga_power_state *tonga_nps = cast_const_phw_tonga_power_state(states->pnew_state);
5363 const struct tonga_power_state *tonga_cps = cast_const_phw_tonga_power_state(states->pcurrent_state);
5364
5365 uint16_t target_link_speed = tonga_get_maximum_link_speed(hwmgr, tonga_nps);
5366 uint16_t current_link_speed;
5367
5368 if (data->force_pcie_gen == PP_PCIEGenInvalid)
5369 current_link_speed = tonga_get_maximum_link_speed(hwmgr, tonga_cps);
5370 else
5371 current_link_speed = data->force_pcie_gen;
5372
5373 data->force_pcie_gen = PP_PCIEGenInvalid;
5374 data->pspp_notify_required = false;
5375 if (target_link_speed > current_link_speed) {
5376 switch(target_link_speed) {
5377 case PP_PCIEGen3:
5378 if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN3, false))
5379 break;
5380 data->force_pcie_gen = PP_PCIEGen2;
5381 if (current_link_speed == PP_PCIEGen2)
5382 break;
5383 case PP_PCIEGen2:
5384 if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN2, false))
5385 break;
5386 default:
5387 data->force_pcie_gen = tonga_get_current_pcie_speed(hwmgr);
5388 break;
5389 }
5390 } else {
5391 if (target_link_speed < current_link_speed)
5392 data->pspp_notify_required = true;
5393 }
5394
5395 return 0;
5396}
5397
5398static int tonga_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
5399{
5400 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
5401
5402 if (0 == data->need_update_smu7_dpm_table)
5403 return 0;
5404
5405 if ((0 == data->sclk_dpm_key_disabled) &&
5406 (data->need_update_smu7_dpm_table &
5407 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
5408 PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
5409 "Trying to freeze SCLK DPM when DPM is disabled",
5410 );
5411 PP_ASSERT_WITH_CODE(
5412 0 == smum_send_msg_to_smc(hwmgr->smumgr,
5413 PPSMC_MSG_SCLKDPM_FreezeLevel),
5414 "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!",
5415 return -1);
5416 }
5417
5418 if ((0 == data->mclk_dpm_key_disabled) &&
5419 (data->need_update_smu7_dpm_table &
5420 DPMTABLE_OD_UPDATE_MCLK)) {
5421 PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
5422 "Trying to freeze MCLK DPM when DPM is disabled",
5423 );
5424 PP_ASSERT_WITH_CODE(
5425 0 == smum_send_msg_to_smc(hwmgr->smumgr,
5426 PPSMC_MSG_MCLKDPM_FreezeLevel),
5427 "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!",
5428 return -1);
5429 }
5430
5431 return 0;
5432}
5433
5434static int tonga_populate_and_upload_sclk_mclk_dpm_levels(struct pp_hwmgr *hwmgr, const void *input)
5435{
5436 int result = 0;
5437
5438 const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input;
5439 const struct tonga_power_state *tonga_ps = cast_const_phw_tonga_power_state(states->pnew_state);
5440 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
5441 uint32_t sclk = tonga_ps->performance_levels[tonga_ps->performance_level_count-1].engine_clock;
5442 uint32_t mclk = tonga_ps->performance_levels[tonga_ps->performance_level_count-1].memory_clock;
5443 struct tonga_dpm_table *pdpm_table = &data->dpm_table;
5444
5445 struct tonga_dpm_table *pgolden_dpm_table = &data->golden_dpm_table;
5446 uint32_t dpm_count, clock_percent;
5447 uint32_t i;
5448
5449 if (0 == data->need_update_smu7_dpm_table)
5450 return 0;
5451
5452 if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
5453 pdpm_table->sclk_table.dpm_levels[pdpm_table->sclk_table.count-1].value = sclk;
5454
5455 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) ||
5456 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) {
5457 /* Need to do calculation based on the golden DPM table
5458 * as the Heatmap GPU Clock axis is also based on the default values
5459 */
5460 PP_ASSERT_WITH_CODE(
5461 (pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value != 0),
5462 "Divide by 0!",
5463 return -1);
5464 dpm_count = pdpm_table->sclk_table.count < 2 ? 0 : pdpm_table->sclk_table.count-2;
5465 for (i = dpm_count; i > 1; i--) {
5466 if (sclk > pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value) {
5467 clock_percent = ((sclk - pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value)*100) /
5468 pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value;
5469
5470 pdpm_table->sclk_table.dpm_levels[i].value =
5471 pgolden_dpm_table->sclk_table.dpm_levels[i].value +
5472 (pgolden_dpm_table->sclk_table.dpm_levels[i].value * clock_percent)/100;
5473
5474 } else if (pgolden_dpm_table->sclk_table.dpm_levels[pdpm_table->sclk_table.count-1].value > sclk) {
5475 clock_percent = ((pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value - sclk)*100) /
5476 pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value;
5477
5478 pdpm_table->sclk_table.dpm_levels[i].value =
5479 pgolden_dpm_table->sclk_table.dpm_levels[i].value -
5480 (pgolden_dpm_table->sclk_table.dpm_levels[i].value * clock_percent)/100;
5481 } else
5482 pdpm_table->sclk_table.dpm_levels[i].value =
5483 pgolden_dpm_table->sclk_table.dpm_levels[i].value;
5484 }
5485 }
5486 }
5487
5488 if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
5489 pdpm_table->mclk_table.dpm_levels[pdpm_table->mclk_table.count-1].value = mclk;
5490
5491 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) ||
5492 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) {
5493
5494 PP_ASSERT_WITH_CODE(
5495 (pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value != 0),
5496 "Divide by 0!",
5497 return -1);
5498 dpm_count = pdpm_table->mclk_table.count < 2? 0 : pdpm_table->mclk_table.count-2;
5499 for (i = dpm_count; i > 1; i--) {
5500 if (mclk > pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value) {
5501 clock_percent = ((mclk - pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value)*100) /
5502 pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value;
5503
5504 pdpm_table->mclk_table.dpm_levels[i].value =
5505 pgolden_dpm_table->mclk_table.dpm_levels[i].value +
5506 (pgolden_dpm_table->mclk_table.dpm_levels[i].value * clock_percent)/100;
5507
5508 } else if (pgolden_dpm_table->mclk_table.dpm_levels[pdpm_table->mclk_table.count-1].value > mclk) {
5509 clock_percent = ((pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value - mclk)*100) /
5510 pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value;
5511
5512 pdpm_table->mclk_table.dpm_levels[i].value =
5513 pgolden_dpm_table->mclk_table.dpm_levels[i].value -
5514 (pgolden_dpm_table->mclk_table.dpm_levels[i].value * clock_percent)/100;
5515 } else
5516 pdpm_table->mclk_table.dpm_levels[i].value = pgolden_dpm_table->mclk_table.dpm_levels[i].value;
5517 }
5518 }
5519 }
5520
5521 if (data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) {
5522 result = tonga_populate_all_graphic_levels(hwmgr);
5523 PP_ASSERT_WITH_CODE((0 == result),
5524 "Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
5525 return result);
5526 }
5527
5528 if (data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
5529 /*populate MCLK dpm table to SMU7 */
5530 result = tonga_populate_all_memory_levels(hwmgr);
5531 PP_ASSERT_WITH_CODE((0 == result),
5532 "Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
5533 return result);
5534 }
5535
5536 return result;
5537}
5538
5539static int tonga_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
5540 struct tonga_single_dpm_table * pdpm_table,
5541 uint32_t low_limit, uint32_t high_limit)
5542{
5543 uint32_t i;
5544
5545 for (i = 0; i < pdpm_table->count; i++) {
5546 if ((pdpm_table->dpm_levels[i].value < low_limit) ||
5547 (pdpm_table->dpm_levels[i].value > high_limit))
5548 pdpm_table->dpm_levels[i].enabled = false;
5549 else
5550 pdpm_table->dpm_levels[i].enabled = true;
5551 }
5552 return 0;
5553}
5554
5555static int tonga_trim_dpm_states(struct pp_hwmgr *hwmgr, const struct tonga_power_state *hw_state)
5556{
5557 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
5558 uint32_t high_limit_count;
5559
5560 PP_ASSERT_WITH_CODE((hw_state->performance_level_count >= 1),
5561 "power state did not have any performance level",
5562 return -1);
5563
5564 high_limit_count = (1 == hw_state->performance_level_count) ? 0: 1;
5565
5566 tonga_trim_single_dpm_states(hwmgr,
5567 &(data->dpm_table.sclk_table),
5568 hw_state->performance_levels[0].engine_clock,
5569 hw_state->performance_levels[high_limit_count].engine_clock);
5570
5571 tonga_trim_single_dpm_states(hwmgr,
5572 &(data->dpm_table.mclk_table),
5573 hw_state->performance_levels[0].memory_clock,
5574 hw_state->performance_levels[high_limit_count].memory_clock);
5575
5576 return 0;
5577}
5578
5579static int tonga_generate_dpm_level_enable_mask(struct pp_hwmgr *hwmgr, const void *input)
5580{
5581 int result;
5582 const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input;
5583 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
5584 const struct tonga_power_state *tonga_ps = cast_const_phw_tonga_power_state(states->pnew_state);
5585
5586 result = tonga_trim_dpm_states(hwmgr, tonga_ps);
5587 if (0 != result)
5588 return result;
5589
5590 data->dpm_level_enable_mask.sclk_dpm_enable_mask = tonga_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table);
5591 data->dpm_level_enable_mask.mclk_dpm_enable_mask = tonga_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table);
5592 data->last_mclk_dpm_enable_mask = data->dpm_level_enable_mask.mclk_dpm_enable_mask;
5593 if (data->uvd_enabled)
5594 data->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
5595
5596 data->dpm_level_enable_mask.pcie_dpm_enable_mask = tonga_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table);
5597
5598 return 0;
5599}
5600
5601int tonga_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
5602{
5603 return smum_send_msg_to_smc(hwmgr->smumgr, enable ?
5604 (PPSMC_Msg)PPSMC_MSG_VCEDPM_Enable :
5605 (PPSMC_Msg)PPSMC_MSG_VCEDPM_Disable);
5606}
5607
5608int tonga_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
5609{
5610 return smum_send_msg_to_smc(hwmgr->smumgr, enable ?
5611 (PPSMC_Msg)PPSMC_MSG_UVDDPM_Enable :
5612 (PPSMC_Msg)PPSMC_MSG_UVDDPM_Disable);
5613}
5614
5615int tonga_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
5616{
5617 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
5618 uint32_t mm_boot_level_offset, mm_boot_level_value;
5619 struct phm_ppt_v1_information *ptable_information = (struct phm_ppt_v1_information *)(hwmgr->pptable);
5620
5621 if (!bgate) {
5622 data->smc_state_table.UvdBootLevel = (uint8_t) (ptable_information->mm_dep_table->count - 1);
5623 mm_boot_level_offset = data->dpm_table_start + offsetof(SMU72_Discrete_DpmTable, UvdBootLevel);
5624 mm_boot_level_offset /= 4;
5625 mm_boot_level_offset *= 4;
5626 mm_boot_level_value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, mm_boot_level_offset);
5627 mm_boot_level_value &= 0x00FFFFFF;
5628 mm_boot_level_value |= data->smc_state_table.UvdBootLevel << 24;
5629 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
5630
5631 if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDDPM) ||
5632 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
5633 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
5634 PPSMC_MSG_UVDDPM_SetEnabledMask,
5635 (uint32_t)(1 << data->smc_state_table.UvdBootLevel));
5636 }
5637
5638 return tonga_enable_disable_uvd_dpm(hwmgr, !bgate);
5639}
5640
5641int tonga_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input)
5642{
5643 const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input;
5644 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
5645 const struct tonga_power_state *tonga_nps = cast_const_phw_tonga_power_state(states->pnew_state);
5646 const struct tonga_power_state *tonga_cps = cast_const_phw_tonga_power_state(states->pcurrent_state);
5647
5648 uint32_t mm_boot_level_offset, mm_boot_level_value;
5649 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
5650
5651 if (tonga_nps->vce_clocks.EVCLK > 0 && (tonga_cps == NULL || tonga_cps->vce_clocks.EVCLK == 0)) {
5652 data->smc_state_table.VceBootLevel = (uint8_t) (pptable_info->mm_dep_table->count - 1);
5653
5654 mm_boot_level_offset = data->dpm_table_start + offsetof(SMU72_Discrete_DpmTable, VceBootLevel);
5655 mm_boot_level_offset /= 4;
5656 mm_boot_level_offset *= 4;
5657 mm_boot_level_value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, mm_boot_level_offset);
5658 mm_boot_level_value &= 0xFF00FFFF;
5659 mm_boot_level_value |= data->smc_state_table.VceBootLevel << 16;
5660 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
5661
5662 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
5663 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
5664 PPSMC_MSG_VCEDPM_SetEnabledMask,
5665 (uint32_t)(1 << data->smc_state_table.VceBootLevel));
5666
5667 tonga_enable_disable_vce_dpm(hwmgr, true);
5668 } else if (tonga_nps->vce_clocks.EVCLK == 0 && tonga_cps != NULL && tonga_cps->vce_clocks.EVCLK > 0)
5669 tonga_enable_disable_vce_dpm(hwmgr, false);
5670
5671 return 0;
5672}
5673
5674static int tonga_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
5675{
5676 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
5677
5678 uint32_t address;
5679 int32_t result;
5680
5681 if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
5682 return 0;
5683
5684
5685 memset(&data->mc_reg_table, 0, sizeof(SMU72_Discrete_MCRegisters));
5686
5687 result = tonga_convert_mc_reg_table_to_smc(hwmgr, &(data->mc_reg_table));
5688
5689 if(result != 0)
5690 return result;
5691
5692
5693 address = data->mc_reg_table_start + (uint32_t)offsetof(SMU72_Discrete_MCRegisters, data[0]);
5694
5695 return tonga_copy_bytes_to_smc(hwmgr->smumgr, address,
5696 (uint8_t *)&data->mc_reg_table.data[0],
5697 sizeof(SMU72_Discrete_MCRegisterSet) * data->dpm_table.mclk_table.count,
5698 data->sram_end);
5699}
5700
5701static int tonga_program_memory_timing_parameters_conditionally(struct pp_hwmgr *hwmgr)
5702{
5703 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
5704
5705 if (data->need_update_smu7_dpm_table &
5706 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
5707 return tonga_program_memory_timing_parameters(hwmgr);
5708
5709 return 0;
5710}
5711
5712static int tonga_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
5713{
5714 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
5715
5716 if (0 == data->need_update_smu7_dpm_table)
5717 return 0;
5718
5719 if ((0 == data->sclk_dpm_key_disabled) &&
5720 (data->need_update_smu7_dpm_table &
5721 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
5722
5723 PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
5724 "Trying to Unfreeze SCLK DPM when DPM is disabled",
5725 );
5726 PP_ASSERT_WITH_CODE(
5727 0 == smum_send_msg_to_smc(hwmgr->smumgr,
5728 PPSMC_MSG_SCLKDPM_UnfreezeLevel),
5729 "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!",
5730 return -1);
5731 }
5732
5733 if ((0 == data->mclk_dpm_key_disabled) &&
5734 (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
5735
5736 PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
5737 "Trying to Unfreeze MCLK DPM when DPM is disabled",
5738 );
5739 PP_ASSERT_WITH_CODE(
5740 0 == smum_send_msg_to_smc(hwmgr->smumgr,
5741 PPSMC_MSG_SCLKDPM_UnfreezeLevel),
5742 "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
5743 return -1);
5744 }
5745
5746 data->need_update_smu7_dpm_table = 0;
5747
5748 return 0;
5749}
5750
5751static int tonga_notify_link_speed_change_after_state_change(struct pp_hwmgr *hwmgr, const void *input)
5752{
5753 const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input;
5754 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
5755 const struct tonga_power_state *tonga_ps = cast_const_phw_tonga_power_state(states->pnew_state);
5756 uint16_t target_link_speed = tonga_get_maximum_link_speed(hwmgr, tonga_ps);
5757 uint8_t request;
5758
5759 if (data->pspp_notify_required ||
5760 data->pcie_performance_request) {
5761 if (target_link_speed == PP_PCIEGen3)
5762 request = PCIE_PERF_REQ_GEN3;
5763 else if (target_link_speed == PP_PCIEGen2)
5764 request = PCIE_PERF_REQ_GEN2;
5765 else
5766 request = PCIE_PERF_REQ_GEN1;
5767
5768 if(request == PCIE_PERF_REQ_GEN1 && tonga_get_current_pcie_speed(hwmgr) > 0) {
5769 data->pcie_performance_request = false;
5770 return 0;
5771 }
5772
5773 if (0 != acpi_pcie_perf_request(hwmgr->device, request, false)) {
5774 if (PP_PCIEGen2 == target_link_speed)
5775 printk("PSPP request to switch to Gen2 from Gen3 Failed!");
5776 else
5777 printk("PSPP request to switch to Gen1 from Gen2 Failed!");
5778 }
5779 }
5780
5781 data->pcie_performance_request = false;
5782 return 0;
5783}
5784
5785static int tonga_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
5786{
5787 int tmp_result, result = 0;
5788
5789 tmp_result = tonga_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
5790 PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to find DPM states clocks in DPM table!", result = tmp_result);
5791
5792 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest)) {
5793 tmp_result = tonga_request_link_speed_change_before_state_change(hwmgr, input);
5794 PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to request link speed change before state change!", result = tmp_result);
5795 }
5796
5797 tmp_result = tonga_freeze_sclk_mclk_dpm(hwmgr);
5798 PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to freeze SCLK MCLK DPM!", result = tmp_result);
5799
5800 tmp_result = tonga_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
5801 PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to populate and upload SCLK MCLK DPM levels!", result = tmp_result);
5802
5803 tmp_result = tonga_generate_dpm_level_enable_mask(hwmgr, input);
5804 PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to generate DPM level enabled mask!", result = tmp_result);
5805
5806 tmp_result = tonga_update_vce_dpm(hwmgr, input);
5807 PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to update VCE DPM!", result = tmp_result);
5808
5809 tmp_result = tonga_update_sclk_threshold(hwmgr);
5810 PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to update SCLK threshold!", result = tmp_result);
5811
5812 tmp_result = tonga_update_and_upload_mc_reg_table(hwmgr);
5813 PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to upload MC reg table!", result = tmp_result);
5814
5815 tmp_result = tonga_program_memory_timing_parameters_conditionally(hwmgr);
5816 PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to program memory timing parameters!", result = tmp_result);
5817
5818 tmp_result = tonga_unfreeze_sclk_mclk_dpm(hwmgr);
5819 PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to unfreeze SCLK MCLK DPM!", result = tmp_result);
5820
5821 tmp_result = tonga_upload_dpm_level_enable_mask(hwmgr);
5822 PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to upload DPM level enabled mask!", result = tmp_result);
5823
5824 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest)) {
5825 tmp_result = tonga_notify_link_speed_change_after_state_change(hwmgr, input);
5826 PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to notify link speed change after state change!", result = tmp_result);
5827 }
5828
5829 return result;
5830}
5831
5832/**
5833* Set maximum target operating fan output PWM
5834*
5835* @param pHwMgr: the address of the powerplay hardware manager.
5836* @param usMaxFanPwm: max operating fan PWM in percents
5837* @return The response that came from the SMC.
5838*/
5839static int tonga_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm)
5840{
5841 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm;
5842
5843 if (phm_is_hw_access_blocked(hwmgr))
5844 return 0;
5845
5846 return (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm) ? 0 : -1);
5847}
5848
5849int tonga_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
5850{
5851 uint32_t num_active_displays = 0;
5852 struct cgs_display_info info = {0};
5853 info.mode_info = NULL;
5854
5855 cgs_get_active_displays_info(hwmgr->device, &info);
5856
5857 num_active_displays = info.display_count;
5858
5859 if (num_active_displays > 1) /* to do && (pHwMgr->pPECI->displayConfiguration.bMultiMonitorInSync != TRUE)) */
5860 tonga_notify_smc_display_change(hwmgr, false);
5861 else
5862 tonga_notify_smc_display_change(hwmgr, true);
5863
5864 return 0;
5865}
5866
5867/**
5868* Programs the display gap
5869*
5870* @param hwmgr the address of the powerplay hardware manager.
5871* @return always OK
5872*/
5873int tonga_program_display_gap(struct pp_hwmgr *hwmgr)
5874{
5875 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
5876 uint32_t num_active_displays = 0;
5877 uint32_t display_gap = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL);
5878 uint32_t display_gap2;
5879 uint32_t pre_vbi_time_in_us;
5880 uint32_t frame_time_in_us;
5881 uint32_t ref_clock;
5882 uint32_t refresh_rate = 0;
5883 struct cgs_display_info info = {0};
5884 struct cgs_mode_info mode_info;
5885
5886 info.mode_info = &mode_info;
5887
5888 cgs_get_active_displays_info(hwmgr->device, &info);
5889 num_active_displays = info.display_count;
5890
5891 display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (num_active_displays > 0)? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE);
5892 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap);
5893
5894 ref_clock = mode_info.ref_clock;
5895 refresh_rate = mode_info.refresh_rate;
5896
5897 if(0 == refresh_rate)
5898 refresh_rate = 60;
5899
5900 frame_time_in_us = 1000000 / refresh_rate;
5901
5902 pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us;
5903 display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
5904
5905 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2);
5906
5907 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + offsetof(SMU72_SoftRegisters, PreVBlankGap), 0x64);
5908
5909 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + offsetof(SMU72_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
5910
5911 if (num_active_displays == 1)
5912 tonga_notify_smc_display_change(hwmgr, true);
5913
5914 return 0;
5915}
5916
5917int tonga_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
5918{
5919
5920 tonga_program_display_gap(hwmgr);
5921
5922 /* to do PhwTonga_CacUpdateDisplayConfiguration(pHwMgr); */
5923 return 0;
5924}
5925
5926/**
5927* Set maximum target operating fan output RPM
5928*
5929* @param pHwMgr: the address of the powerplay hardware manager.
5930* @param usMaxFanRpm: max operating fan RPM value.
5931* @return The response that came from the SMC.
5932*/
5933static int tonga_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm)
5934{
5935 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM = us_max_fan_pwm;
5936
5937 if (phm_is_hw_access_blocked(hwmgr))
5938 return 0;
5939
5940 return (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanRpmMax, us_max_fan_pwm) ? 0 : -1);
5941}
5942
5943uint32_t tonga_get_xclk(struct pp_hwmgr *hwmgr)
5944{
5945 uint32_t reference_clock;
5946 uint32_t tc;
5947 uint32_t divide;
5948
5949 ATOM_FIRMWARE_INFO *fw_info;
5950 uint16_t size;
5951 uint8_t frev, crev;
5952 int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
5953
5954 tc = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK);
5955
5956 if (tc)
5957 return TCLK;
5958
5959 fw_info = (ATOM_FIRMWARE_INFO *)cgs_atom_get_data_table(hwmgr->device, index,
5960 &size, &frev, &crev);
5961
5962 if (!fw_info)
5963 return 0;
5964
5965 reference_clock = le16_to_cpu(fw_info->usReferenceClock);
5966
5967 divide = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL, XTALIN_DIVIDE);
5968
5969 if (0 != divide)
5970 return reference_clock / 4;
5971
5972 return reference_clock;
5973}
5974
5975int tonga_dpm_set_interrupt_state(void *private_data,
5976 unsigned src_id, unsigned type,
5977 int enabled)
5978{
5979 uint32_t cg_thermal_int;
5980 struct pp_hwmgr *hwmgr = ((struct pp_eventmgr *)private_data)->hwmgr;
5981
5982 if (hwmgr == NULL)
5983 return -EINVAL;
5984
5985 switch (type) {
5986 case AMD_THERMAL_IRQ_LOW_TO_HIGH:
5987 if (enabled) {
5988 cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT);
5989 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
5990 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
5991 } else {
5992 cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT);
5993 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
5994 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
5995 }
5996 break;
5997
5998 case AMD_THERMAL_IRQ_HIGH_TO_LOW:
5999 if (enabled) {
6000 cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT);
6001 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
6002 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
6003 } else {
6004 cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT);
6005 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
6006 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
6007 }
6008 break;
6009 default:
6010 break;
6011 }
6012 return 0;
6013}
6014
6015int tonga_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr,
6016 const void *thermal_interrupt_info)
6017{
6018 int result;
6019 const struct pp_interrupt_registration_info *info =
6020 (const struct pp_interrupt_registration_info *)thermal_interrupt_info;
6021
6022 if (info == NULL)
6023 return -EINVAL;
6024
6025 result = cgs_add_irq_source(hwmgr->device, 230, AMD_THERMAL_IRQ_LAST,
6026 tonga_dpm_set_interrupt_state,
6027 info->call_back, info->context);
6028
6029 if (result)
6030 return -EINVAL;
6031
6032 result = cgs_add_irq_source(hwmgr->device, 231, AMD_THERMAL_IRQ_LAST,
6033 tonga_dpm_set_interrupt_state,
6034 info->call_back, info->context);
6035
6036 if (result)
6037 return -EINVAL;
6038
6039 return 0;
6040}
6041
6042bool tonga_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
6043{
6044 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
6045 bool is_update_required = false;
6046 struct cgs_display_info info = {0,0,NULL};
6047
6048 cgs_get_active_displays_info(hwmgr->device, &info);
6049
6050 if (data->display_timing.num_existing_displays != info.display_count)
6051 is_update_required = true;
6052/* TO DO NEED TO GET DEEP SLEEP CLOCK FROM DAL
6053 if (phm_cap_enabled(hwmgr->hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
6054 cgs_get_min_clock_settings(hwmgr->device, &min_clocks);
6055 if(min_clocks.engineClockInSR != data->display_timing.minClockInSR)
6056 is_update_required = true;
6057*/
6058 return is_update_required;
6059}
6060
6061static inline bool tonga_are_power_levels_equal(const struct tonga_performance_level *pl1,
6062 const struct tonga_performance_level *pl2)
6063{
6064 return ((pl1->memory_clock == pl2->memory_clock) &&
6065 (pl1->engine_clock == pl2->engine_clock) &&
6066 (pl1->pcie_gen == pl2->pcie_gen) &&
6067 (pl1->pcie_lane == pl2->pcie_lane));
6068}
6069
6070int tonga_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal)
6071{
6072 const struct tonga_power_state *psa = cast_const_phw_tonga_power_state(pstate1);
6073 const struct tonga_power_state *psb = cast_const_phw_tonga_power_state(pstate2);
6074 int i;
6075
6076 if (equal == NULL || psa == NULL || psb == NULL)
6077 return -EINVAL;
6078
6079 /* If the two states don't even have the same number of performance levels they cannot be the same state. */
6080 if (psa->performance_level_count != psb->performance_level_count) {
6081 *equal = false;
6082 return 0;
6083 }
6084
6085 for (i = 0; i < psa->performance_level_count; i++) {
6086 if (!tonga_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
6087 /* If we have found even one performance level pair that is different the states are different. */
6088 *equal = false;
6089 return 0;
6090 }
6091 }
6092
6093 /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
6094 *equal = ((psa->uvd_clocks.VCLK == psb->uvd_clocks.VCLK) && (psa->uvd_clocks.DCLK == psb->uvd_clocks.DCLK));
6095 *equal &= ((psa->vce_clocks.EVCLK == psb->vce_clocks.EVCLK) && (psa->vce_clocks.ECCLK == psb->vce_clocks.ECCLK));
6096 *equal &= (psa->sclk_threshold == psb->sclk_threshold);
6097 *equal &= (psa->acp_clk == psb->acp_clk);
6098
6099 return 0;
6100}
6101
6102static int tonga_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
6103{
6104 if (mode) {
6105 /* stop auto-manage */
6106 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
6107 PHM_PlatformCaps_MicrocodeFanControl))
6108 tonga_fan_ctrl_stop_smc_fan_control(hwmgr);
6109 tonga_fan_ctrl_set_static_mode(hwmgr, mode);
6110 } else
6111 /* restart auto-manage */
6112 tonga_fan_ctrl_reset_fan_speed_to_default(hwmgr);
6113
6114 return 0;
6115}
6116
6117static int tonga_get_fan_control_mode(struct pp_hwmgr *hwmgr)
6118{
6119 if (hwmgr->fan_ctrl_is_in_default_mode)
6120 return hwmgr->fan_ctrl_default_mode;
6121 else
6122 return PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
6123 CG_FDO_CTRL2, FDO_PWM_MODE);
6124}
6125
6126static int tonga_force_clock_level(struct pp_hwmgr *hwmgr,
6127 enum pp_clock_type type, uint32_t mask)
6128{
6129 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
6130
6131 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
6132 return -EINVAL;
6133
6134 switch (type) {
6135 case PP_SCLK:
6136 if (!data->sclk_dpm_key_disabled)
6137 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
6138 PPSMC_MSG_SCLKDPM_SetEnabledMask,
6139 data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
6140 break;
6141 case PP_MCLK:
6142 if (!data->mclk_dpm_key_disabled)
6143 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
6144 PPSMC_MSG_MCLKDPM_SetEnabledMask,
6145 data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
6146 break;
6147 case PP_PCIE:
6148 {
6149 uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask;
6150 uint32_t level = 0;
6151
6152 while (tmp >>= 1)
6153 level++;
6154
6155 if (!data->pcie_dpm_key_disabled)
6156 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
6157 PPSMC_MSG_PCIeDPM_ForceLevel,
6158 level);
6159 break;
6160 }
6161 default:
6162 break;
6163 }
6164
6165 return 0;
6166}
6167
6168static int tonga_print_clock_levels(struct pp_hwmgr *hwmgr,
6169 enum pp_clock_type type, char *buf)
6170{
6171 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
6172 struct tonga_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
6173 struct tonga_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
6174 struct tonga_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table);
6175 int i, now, size = 0;
6176 uint32_t clock, pcie_speed;
6177
6178 switch (type) {
6179 case PP_SCLK:
6180 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
6181 clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
6182
6183 for (i = 0; i < sclk_table->count; i++) {
6184 if (clock > sclk_table->dpm_levels[i].value)
6185 continue;
6186 break;
6187 }
6188 now = i;
6189
6190 for (i = 0; i < sclk_table->count; i++)
6191 size += sprintf(buf + size, "%d: %uMhz %s\n",
6192 i, sclk_table->dpm_levels[i].value / 100,
6193 (i == now) ? "*" : "");
6194 break;
6195 case PP_MCLK:
6196 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
6197 clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
6198
6199 for (i = 0; i < mclk_table->count; i++) {
6200 if (clock > mclk_table->dpm_levels[i].value)
6201 continue;
6202 break;
6203 }
6204 now = i;
6205
6206 for (i = 0; i < mclk_table->count; i++)
6207 size += sprintf(buf + size, "%d: %uMhz %s\n",
6208 i, mclk_table->dpm_levels[i].value / 100,
6209 (i == now) ? "*" : "");
6210 break;
6211 case PP_PCIE:
6212 pcie_speed = tonga_get_current_pcie_speed(hwmgr);
6213 for (i = 0; i < pcie_table->count; i++) {
6214 if (pcie_speed != pcie_table->dpm_levels[i].value)
6215 continue;
6216 break;
6217 }
6218 now = i;
6219
6220 for (i = 0; i < pcie_table->count; i++)
6221 size += sprintf(buf + size, "%d: %s %s\n", i,
6222 (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x8" :
6223 (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" :
6224 (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "",
6225 (i == now) ? "*" : "");
6226 break;
6227 default:
6228 break;
6229 }
6230 return size;
6231}
6232
6233static int tonga_get_sclk_od(struct pp_hwmgr *hwmgr)
6234{
6235 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
6236 struct tonga_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
6237 struct tonga_single_dpm_table *golden_sclk_table =
6238 &(data->golden_dpm_table.sclk_table);
6239 int value;
6240
6241 value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
6242 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
6243 100 /
6244 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
6245
6246 return value;
6247}
6248
6249static int tonga_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
6250{
6251 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
6252 struct tonga_single_dpm_table *golden_sclk_table =
6253 &(data->golden_dpm_table.sclk_table);
6254 struct pp_power_state *ps;
6255 struct tonga_power_state *tonga_ps;
6256
6257 if (value > 20)
6258 value = 20;
6259
6260 ps = hwmgr->request_ps;
6261
6262 if (ps == NULL)
6263 return -EINVAL;
6264
6265 tonga_ps = cast_phw_tonga_power_state(&ps->hardware);
6266
6267 tonga_ps->performance_levels[tonga_ps->performance_level_count - 1].engine_clock =
6268 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
6269 value / 100 +
6270 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
6271
6272 return 0;
6273}
6274
6275static int tonga_get_mclk_od(struct pp_hwmgr *hwmgr)
6276{
6277 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
6278 struct tonga_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
6279 struct tonga_single_dpm_table *golden_mclk_table =
6280 &(data->golden_dpm_table.mclk_table);
6281 int value;
6282
6283 value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
6284 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
6285 100 /
6286 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
6287
6288 return value;
6289}
6290
6291static int tonga_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
6292{
6293 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
6294 struct tonga_single_dpm_table *golden_mclk_table =
6295 &(data->golden_dpm_table.mclk_table);
6296 struct pp_power_state *ps;
6297 struct tonga_power_state *tonga_ps;
6298
6299 if (value > 20)
6300 value = 20;
6301
6302 ps = hwmgr->request_ps;
6303
6304 if (ps == NULL)
6305 return -EINVAL;
6306
6307 tonga_ps = cast_phw_tonga_power_state(&ps->hardware);
6308
6309 tonga_ps->performance_levels[tonga_ps->performance_level_count - 1].memory_clock =
6310 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
6311 value / 100 +
6312 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
6313
6314 return 0;
6315}
6316
6317static const struct pp_hwmgr_func tonga_hwmgr_funcs = {
6318 .backend_init = &tonga_hwmgr_backend_init,
6319 .backend_fini = &tonga_hwmgr_backend_fini,
6320 .asic_setup = &tonga_setup_asic_task,
6321 .dynamic_state_management_enable = &tonga_enable_dpm_tasks,
6322 .dynamic_state_management_disable = &tonga_disable_dpm_tasks,
6323 .apply_state_adjust_rules = tonga_apply_state_adjust_rules,
6324 .force_dpm_level = &tonga_force_dpm_level,
6325 .power_state_set = tonga_set_power_state_tasks,
6326 .get_power_state_size = tonga_get_power_state_size,
6327 .get_mclk = tonga_dpm_get_mclk,
6328 .get_sclk = tonga_dpm_get_sclk,
6329 .patch_boot_state = tonga_dpm_patch_boot_state,
6330 .get_pp_table_entry = tonga_get_pp_table_entry,
6331 .get_num_of_pp_table_entries = get_number_of_powerplay_table_entries_v1_0,
6332 .print_current_perforce_level = tonga_print_current_perforce_level,
6333 .powerdown_uvd = tonga_phm_powerdown_uvd,
6334 .powergate_uvd = tonga_phm_powergate_uvd,
6335 .powergate_vce = tonga_phm_powergate_vce,
6336 .disable_clock_power_gating = tonga_phm_disable_clock_power_gating,
6337 .update_clock_gatings = tonga_phm_update_clock_gatings,
6338 .notify_smc_display_config_after_ps_adjustment = tonga_notify_smc_display_config_after_ps_adjustment,
6339 .display_config_changed = tonga_display_configuration_changed_task,
6340 .set_max_fan_pwm_output = tonga_set_max_fan_pwm_output,
6341 .set_max_fan_rpm_output = tonga_set_max_fan_rpm_output,
6342 .get_temperature = tonga_thermal_get_temperature,
6343 .stop_thermal_controller = tonga_thermal_stop_thermal_controller,
6344 .get_fan_speed_info = tonga_fan_ctrl_get_fan_speed_info,
6345 .get_fan_speed_percent = tonga_fan_ctrl_get_fan_speed_percent,
6346 .set_fan_speed_percent = tonga_fan_ctrl_set_fan_speed_percent,
6347 .reset_fan_speed_to_default = tonga_fan_ctrl_reset_fan_speed_to_default,
6348 .get_fan_speed_rpm = tonga_fan_ctrl_get_fan_speed_rpm,
6349 .set_fan_speed_rpm = tonga_fan_ctrl_set_fan_speed_rpm,
6350 .uninitialize_thermal_controller = tonga_thermal_ctrl_uninitialize_thermal_controller,
6351 .register_internal_thermal_interrupt = tonga_register_internal_thermal_interrupt,
6352 .check_smc_update_required_for_display_configuration = tonga_check_smc_update_required_for_display_configuration,
6353 .check_states_equal = tonga_check_states_equal,
6354 .set_fan_control_mode = tonga_set_fan_control_mode,
6355 .get_fan_control_mode = tonga_get_fan_control_mode,
6356 .force_clock_level = tonga_force_clock_level,
6357 .print_clock_levels = tonga_print_clock_levels,
6358 .get_sclk_od = tonga_get_sclk_od,
6359 .set_sclk_od = tonga_set_sclk_od,
6360 .get_mclk_od = tonga_get_mclk_od,
6361 .set_mclk_od = tonga_set_mclk_od,
6362};
6363
6364int tonga_hwmgr_init(struct pp_hwmgr *hwmgr)
6365{
6366 hwmgr->hwmgr_func = &tonga_hwmgr_funcs;
6367 hwmgr->pptable_func = &pptable_v1_0_funcs;
6368 pp_tonga_thermal_initialize(hwmgr);
6369 return 0;
6370}
6371
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h
deleted file mode 100644
index fcad9426d3c1..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h
+++ /dev/null
@@ -1,402 +0,0 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef TONGA_HWMGR_H
24#define TONGA_HWMGR_H
25
26#include "hwmgr.h"
27#include "smu72_discrete.h"
28#include "ppatomctrl.h"
29#include "ppinterrupt.h"
30#include "tonga_powertune.h"
31#include "pp_endian.h"
32
33#define TONGA_MAX_HARDWARE_POWERLEVELS 2
34#define TONGA_DYNCLK_NUMBER_OF_TREND_COEFFICIENTS 15
35
36struct tonga_performance_level {
37 uint32_t memory_clock;
38 uint32_t engine_clock;
39 uint16_t pcie_gen;
40 uint16_t pcie_lane;
41};
42
43struct _phw_tonga_bacos {
44 uint32_t best_match;
45 uint32_t baco_flags;
46 struct tonga_performance_level performance_level;
47};
48typedef struct _phw_tonga_bacos phw_tonga_bacos;
49
50struct _phw_tonga_uvd_clocks {
51 uint32_t VCLK;
52 uint32_t DCLK;
53};
54
55typedef struct _phw_tonga_uvd_clocks phw_tonga_uvd_clocks;
56
57struct _phw_tonga_vce_clocks {
58 uint32_t EVCLK;
59 uint32_t ECCLK;
60};
61
62typedef struct _phw_tonga_vce_clocks phw_tonga_vce_clocks;
63
64struct tonga_power_state {
65 uint32_t magic;
66 phw_tonga_uvd_clocks uvd_clocks;
67 phw_tonga_vce_clocks vce_clocks;
68 uint32_t sam_clk;
69 uint32_t acp_clk;
70 uint16_t performance_level_count;
71 bool dc_compatible;
72 uint32_t sclk_threshold;
73 struct tonga_performance_level performance_levels[TONGA_MAX_HARDWARE_POWERLEVELS];
74};
75
76struct _phw_tonga_dpm_level {
77 bool enabled;
78 uint32_t value;
79 uint32_t param1;
80};
81typedef struct _phw_tonga_dpm_level phw_tonga_dpm_level;
82
83#define TONGA_MAX_DEEPSLEEP_DIVIDER_ID 5
84#define MAX_REGULAR_DPM_NUMBER 8
85#define TONGA_MINIMUM_ENGINE_CLOCK 2500
86
87struct tonga_single_dpm_table {
88 uint32_t count;
89 phw_tonga_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER];
90};
91
92struct tonga_dpm_table {
93 struct tonga_single_dpm_table sclk_table;
94 struct tonga_single_dpm_table mclk_table;
95 struct tonga_single_dpm_table pcie_speed_table;
96 struct tonga_single_dpm_table vddc_table;
97 struct tonga_single_dpm_table vdd_gfx_table;
98 struct tonga_single_dpm_table vdd_ci_table;
99 struct tonga_single_dpm_table mvdd_table;
100};
101typedef struct _phw_tonga_dpm_table phw_tonga_dpm_table;
102
103
104struct _phw_tonga_clock_regisiters {
105 uint32_t vCG_SPLL_FUNC_CNTL;
106 uint32_t vCG_SPLL_FUNC_CNTL_2;
107 uint32_t vCG_SPLL_FUNC_CNTL_3;
108 uint32_t vCG_SPLL_FUNC_CNTL_4;
109 uint32_t vCG_SPLL_SPREAD_SPECTRUM;
110 uint32_t vCG_SPLL_SPREAD_SPECTRUM_2;
111 uint32_t vDLL_CNTL;
112 uint32_t vMCLK_PWRMGT_CNTL;
113 uint32_t vMPLL_AD_FUNC_CNTL;
114 uint32_t vMPLL_DQ_FUNC_CNTL;
115 uint32_t vMPLL_FUNC_CNTL;
116 uint32_t vMPLL_FUNC_CNTL_1;
117 uint32_t vMPLL_FUNC_CNTL_2;
118 uint32_t vMPLL_SS1;
119 uint32_t vMPLL_SS2;
120};
121typedef struct _phw_tonga_clock_regisiters phw_tonga_clock_registers;
122
123struct _phw_tonga_voltage_smio_registers {
124 uint32_t vs0_vid_lower_smio_cntl;
125};
126typedef struct _phw_tonga_voltage_smio_registers phw_tonga_voltage_smio_registers;
127
128
129struct _phw_tonga_mc_reg_entry {
130 uint32_t mclk_max;
131 uint32_t mc_data[SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE];
132};
133typedef struct _phw_tonga_mc_reg_entry phw_tonga_mc_reg_entry;
134
135struct _phw_tonga_mc_reg_table {
136 uint8_t last; /* number of registers*/
137 uint8_t num_entries; /* number of entries in mc_reg_table_entry used*/
138 uint16_t validflag; /* indicate the corresponding register is valid or not. 1: valid, 0: invalid. bit0->address[0], bit1->address[1], etc.*/
139 phw_tonga_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
140 SMU72_Discrete_MCRegisterAddress mc_reg_address[SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE];
141};
142typedef struct _phw_tonga_mc_reg_table phw_tonga_mc_reg_table;
143
144#define DISABLE_MC_LOADMICROCODE 1
145#define DISABLE_MC_CFGPROGRAMMING 2
146
147/*Ultra Low Voltage parameter structure */
148struct _phw_tonga_ulv_parm{
149 bool ulv_supported;
150 uint32_t ch_ulv_parameter;
151 uint32_t ulv_volt_change_delay;
152 struct tonga_performance_level ulv_power_level;
153};
154typedef struct _phw_tonga_ulv_parm phw_tonga_ulv_parm;
155
156#define TONGA_MAX_LEAKAGE_COUNT 8
157
158struct _phw_tonga_leakage_voltage {
159 uint16_t count;
160 uint16_t leakage_id[TONGA_MAX_LEAKAGE_COUNT];
161 uint16_t actual_voltage[TONGA_MAX_LEAKAGE_COUNT];
162};
163typedef struct _phw_tonga_leakage_voltage phw_tonga_leakage_voltage;
164
165struct _phw_tonga_display_timing {
166 uint32_t min_clock_insr;
167 uint32_t num_existing_displays;
168};
169typedef struct _phw_tonga_display_timing phw_tonga_display_timing;
170
171struct _phw_tonga_dpmlevel_enable_mask {
172 uint32_t uvd_dpm_enable_mask;
173 uint32_t vce_dpm_enable_mask;
174 uint32_t acp_dpm_enable_mask;
175 uint32_t samu_dpm_enable_mask;
176 uint32_t sclk_dpm_enable_mask;
177 uint32_t mclk_dpm_enable_mask;
178 uint32_t pcie_dpm_enable_mask;
179};
180typedef struct _phw_tonga_dpmlevel_enable_mask phw_tonga_dpmlevel_enable_mask;
181
182struct _phw_tonga_pcie_perf_range {
183 uint16_t max;
184 uint16_t min;
185};
186typedef struct _phw_tonga_pcie_perf_range phw_tonga_pcie_perf_range;
187
188struct _phw_tonga_vbios_boot_state {
189 uint16_t mvdd_bootup_value;
190 uint16_t vddc_bootup_value;
191 uint16_t vddci_bootup_value;
192 uint16_t vddgfx_bootup_value;
193 uint32_t sclk_bootup_value;
194 uint32_t mclk_bootup_value;
195 uint16_t pcie_gen_bootup_value;
196 uint16_t pcie_lane_bootup_value;
197};
198typedef struct _phw_tonga_vbios_boot_state phw_tonga_vbios_boot_state;
199
200#define DPMTABLE_OD_UPDATE_SCLK 0x00000001
201#define DPMTABLE_OD_UPDATE_MCLK 0x00000002
202#define DPMTABLE_UPDATE_SCLK 0x00000004
203#define DPMTABLE_UPDATE_MCLK 0x00000008
204
205/* We need to review which fields are needed. */
206/* This is mostly a copy of the RV7xx/Evergreen structure which is close, but not identical to the N.Islands one. */
207struct tonga_hwmgr {
208 struct tonga_dpm_table dpm_table;
209 struct tonga_dpm_table golden_dpm_table;
210
211 uint32_t voting_rights_clients0;
212 uint32_t voting_rights_clients1;
213 uint32_t voting_rights_clients2;
214 uint32_t voting_rights_clients3;
215 uint32_t voting_rights_clients4;
216 uint32_t voting_rights_clients5;
217 uint32_t voting_rights_clients6;
218 uint32_t voting_rights_clients7;
219 uint32_t static_screen_threshold_unit;
220 uint32_t static_screen_threshold;
221 uint32_t voltage_control;
222 uint32_t vdd_gfx_control;
223
224 uint32_t vddc_vddci_delta;
225 uint32_t vddc_vddgfx_delta;
226
227 struct pp_interrupt_registration_info internal_high_thermal_interrupt_info;
228 struct pp_interrupt_registration_info internal_low_thermal_interrupt_info;
229 struct pp_interrupt_registration_info smc_to_host_interrupt_info;
230 uint32_t active_auto_throttle_sources;
231
232 struct pp_interrupt_registration_info external_throttle_interrupt;
233 irq_handler_func_t external_throttle_callback;
234 void *external_throttle_context;
235
236 struct pp_interrupt_registration_info ctf_interrupt_info;
237 irq_handler_func_t ctf_callback;
238 void *ctf_context;
239
240 phw_tonga_clock_registers clock_registers;
241 phw_tonga_voltage_smio_registers voltage_smio_registers;
242
243 bool is_memory_GDDR5;
244 uint16_t acpi_vddc;
245 bool pspp_notify_required; /* Flag to indicate if PSPP notification to SBIOS is required */
246 uint16_t force_pcie_gen; /* The forced PCI-E speed if not 0xffff */
247 uint16_t acpi_pcie_gen; /* The PCI-E speed at ACPI time */
248 uint32_t pcie_gen_cap; /* The PCI-E speed capabilities bitmap from CAIL */
249 uint32_t pcie_lane_cap; /* The PCI-E lane capabilities bitmap from CAIL */
250 uint32_t pcie_spc_cap; /* Symbol Per Clock Capabilities from registry */
251 phw_tonga_leakage_voltage vddc_leakage; /* The Leakage VDDC supported (based on leakage ID).*/
252 phw_tonga_leakage_voltage vddcgfx_leakage; /* The Leakage VDDC supported (based on leakage ID). */
253 phw_tonga_leakage_voltage vddci_leakage; /* The Leakage VDDCI supported (based on leakage ID). */
254
255 uint32_t mvdd_control;
256 uint32_t vddc_mask_low;
257 uint32_t mvdd_mask_low;
258 uint16_t max_vddc_in_pp_table; /* the maximum VDDC value in the powerplay table*/
259 uint16_t min_vddc_in_pp_table;
260 uint16_t max_vddci_in_pp_table; /* the maximum VDDCI value in the powerplay table */
261 uint16_t min_vddci_in_pp_table;
262 uint32_t mclk_strobe_mode_threshold;
263 uint32_t mclk_stutter_mode_threshold;
264 uint32_t mclk_edc_enable_threshold;
265 uint32_t mclk_edc_wr_enable_threshold;
266 bool is_uvd_enabled;
267 bool is_xdma_enabled;
268 phw_tonga_vbios_boot_state vbios_boot_state;
269
270 bool battery_state;
271 bool is_tlu_enabled;
272 bool pcie_performance_request;
273
274 /* -------------- SMC SRAM Address of firmware header tables ----------------*/
275 uint32_t sram_end; /* The first address after the SMC SRAM. */
276 uint32_t dpm_table_start; /* The start of the dpm table in the SMC SRAM. */
277 uint32_t soft_regs_start; /* The start of the soft registers in the SMC SRAM. */
278 uint32_t mc_reg_table_start; /* The start of the mc register table in the SMC SRAM. */
279 uint32_t fan_table_start; /* The start of the fan table in the SMC SRAM. */
280 uint32_t arb_table_start; /* The start of the ARB setting table in the SMC SRAM. */
281 SMU72_Discrete_DpmTable smc_state_table; /* The carbon copy of the SMC state table. */
282 SMU72_Discrete_MCRegisters mc_reg_table;
283 SMU72_Discrete_Ulv ulv_setting; /* The carbon copy of ULV setting. */
284 /* -------------- Stuff originally coming from Evergreen --------------------*/
285 phw_tonga_mc_reg_table tonga_mc_reg_table;
286 uint32_t vdd_ci_control;
287 pp_atomctrl_voltage_table vddc_voltage_table;
288 pp_atomctrl_voltage_table vddci_voltage_table;
289 pp_atomctrl_voltage_table vddgfx_voltage_table;
290 pp_atomctrl_voltage_table mvdd_voltage_table;
291
292 uint32_t mgcg_cgtt_local2;
293 uint32_t mgcg_cgtt_local3;
294 uint32_t gpio_debug;
295 uint32_t mc_micro_code_feature;
296 uint32_t highest_mclk;
297 uint16_t acpi_vdd_ci;
298 uint8_t mvdd_high_index;
299 uint8_t mvdd_low_index;
300 bool dll_defaule_on;
301 bool performance_request_registered;
302
303
304 /* ----------------- Low Power Features ---------------------*/
305 phw_tonga_bacos bacos;
306 phw_tonga_ulv_parm ulv;
307 /* ----------------- CAC Stuff ---------------------*/
308 uint32_t cac_table_start;
309 bool cac_configuration_required; /* TRUE if PP_CACConfigurationRequired == 1 */
310 bool driver_calculate_cac_leakage; /* TRUE if PP_DriverCalculateCACLeakage == 1 */
311 bool cac_enabled;
312 /* ----------------- DPM2 Parameters ---------------------*/
313 uint32_t power_containment_features;
314 bool enable_bapm_feature;
315 bool enable_tdc_limit_feature;
316 bool enable_pkg_pwr_tracking_feature;
317 bool disable_uvd_power_tune_feature;
318 struct tonga_pt_defaults *power_tune_defaults;
319 SMU72_Discrete_PmFuses power_tune_table;
320 uint32_t dte_tj_offset; /* Fudge factor in DPM table to correct HW DTE errors */
321 uint32_t fast_watermark_threshold; /* use fast watermark if clock is equal or above this. In percentage of the target high sclk. */
322
323
324 bool enable_dte_feature;
325
326
327 /* ----------------- Phase Shedding ---------------------*/
328 bool vddc_phase_shed_control;
329 /* --------------------- DI/DT --------------------------*/
330 phw_tonga_display_timing display_timing;
331 /* --------- ReadRegistry data for memory and engine clock margins ---- */
332 uint32_t engine_clock_data;
333 uint32_t memory_clock_data;
334 /* -------- Thermal Temperature Setting --------------*/
335 phw_tonga_dpmlevel_enable_mask dpm_level_enable_mask;
336 uint32_t need_update_smu7_dpm_table;
337 uint32_t sclk_dpm_key_disabled;
338 uint32_t mclk_dpm_key_disabled;
339 uint32_t pcie_dpm_key_disabled;
340 uint32_t min_engine_clocks; /* used to store the previous dal min sclock */
341 phw_tonga_pcie_perf_range pcie_gen_performance;
342 phw_tonga_pcie_perf_range pcie_lane_performance;
343 phw_tonga_pcie_perf_range pcie_gen_power_saving;
344 phw_tonga_pcie_perf_range pcie_lane_power_saving;
345 bool use_pcie_performance_levels;
346 bool use_pcie_power_saving_levels;
347 uint32_t activity_target[SMU72_MAX_LEVELS_GRAPHICS]; /* percentage value from 0-100, default 50 */
348 uint32_t mclk_activity_target;
349 uint32_t low_sclk_interrupt_threshold;
350 uint32_t last_mclk_dpm_enable_mask;
351 bool uvd_enabled;
352 uint32_t pcc_monitor_enabled;
353
354 /* --------- Power Gating States ------------*/
355 bool uvd_power_gated; /* 1: gated, 0:not gated */
356 bool vce_power_gated; /* 1: gated, 0:not gated */
357 bool samu_power_gated; /* 1: gated, 0:not gated */
358 bool acp_power_gated; /* 1: gated, 0:not gated */
359 bool pg_acp_init;
360};
361
362typedef struct tonga_hwmgr tonga_hwmgr;
363
364#define TONGA_DPM2_NEAR_TDP_DEC 10
365#define TONGA_DPM2_ABOVE_SAFE_INC 5
366#define TONGA_DPM2_BELOW_SAFE_INC 20
367
368#define TONGA_DPM2_LTA_WINDOW_SIZE 7 /* Log2 of the LTA window size (l2numWin_TDP). Eg. If LTA windows size is 128, then this value should be Log2(128) = 7. */
369
370#define TONGA_DPM2_LTS_TRUNCATE 0
371
372#define TONGA_DPM2_TDP_SAFE_LIMIT_PERCENT 80 /* Maximum 100 */
373
374#define TONGA_DPM2_MAXPS_PERCENT_H 90 /* Maximum 0xFF */
375#define TONGA_DPM2_MAXPS_PERCENT_M 90 /* Maximum 0xFF */
376
377#define TONGA_DPM2_PWREFFICIENCYRATIO_MARGIN 50
378
379#define TONGA_DPM2_SQ_RAMP_MAX_POWER 0x3FFF
380#define TONGA_DPM2_SQ_RAMP_MIN_POWER 0x12
381#define TONGA_DPM2_SQ_RAMP_MAX_POWER_DELTA 0x15
382#define TONGA_DPM2_SQ_RAMP_SHORT_TERM_INTERVAL_SIZE 0x1E
383#define TONGA_DPM2_SQ_RAMP_LONG_TERM_INTERVAL_RATIO 0xF
384
385#define TONGA_VOLTAGE_CONTROL_NONE 0x0
386#define TONGA_VOLTAGE_CONTROL_BY_GPIO 0x1
387#define TONGA_VOLTAGE_CONTROL_BY_SVID2 0x2
388#define TONGA_VOLTAGE_CONTROL_MERGED 0x3
389
390#define TONGA_Q88_FORMAT_CONVERSION_UNIT 256 /*To convert to Q8.8 format for firmware */
391
392#define TONGA_UNUSED_GPIO_PIN 0x7F
393
394int tonga_hwmgr_init(struct pp_hwmgr *hwmgr);
395int tonga_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input);
396int tonga_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate);
397int tonga_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable);
398int tonga_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
399uint32_t tonga_get_xclk(struct pp_hwmgr *hwmgr);
400
401#endif
402
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.c
deleted file mode 100644
index 24d9a05e7997..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.c
+++ /dev/null
@@ -1,495 +0,0 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "hwmgr.h"
25#include "smumgr.h"
26#include "tonga_hwmgr.h"
27#include "tonga_powertune.h"
28#include "tonga_smumgr.h"
29#include "smu72_discrete.h"
30#include "pp_debug.h"
31#include "tonga_ppsmc.h"
32
33#define VOLTAGE_SCALE 4
34#define POWERTUNE_DEFAULT_SET_MAX 1
35
36struct tonga_pt_defaults tonga_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
37/* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT */
38 {1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
39 {0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61},
40 {0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } },
41};
42
43void tonga_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
44{
45 struct tonga_hwmgr *tonga_hwmgr = (struct tonga_hwmgr *)(hwmgr->backend);
46 struct phm_ppt_v1_information *table_info =
47 (struct phm_ppt_v1_information *)(hwmgr->pptable);
48 uint32_t tmp = 0;
49
50 if (table_info &&
51 table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
52 table_info->cac_dtp_table->usPowerTuneDataSetID)
53 tonga_hwmgr->power_tune_defaults =
54 &tonga_power_tune_data_set_array
55 [table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
56 else
57 tonga_hwmgr->power_tune_defaults = &tonga_power_tune_data_set_array[0];
58
59 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
60 PHM_PlatformCaps_CAC);
61 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
62 PHM_PlatformCaps_SQRamping);
63 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
64 PHM_PlatformCaps_DBRamping);
65 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
66 PHM_PlatformCaps_TDRamping);
67 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
68 PHM_PlatformCaps_TCPRamping);
69
70 tonga_hwmgr->dte_tj_offset = tmp;
71
72 if (!tmp) {
73 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
74 PHM_PlatformCaps_CAC);
75
76 tonga_hwmgr->fast_watermark_threshold = 100;
77
78 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
79 PHM_PlatformCaps_PowerContainment)) {
80 tmp = 1;
81 tonga_hwmgr->enable_dte_feature = tmp ? false : true;
82 tonga_hwmgr->enable_tdc_limit_feature = tmp ? true : false;
83 tonga_hwmgr->enable_pkg_pwr_tracking_feature = tmp ? true : false;
84 }
85 }
86}
87
88
89int tonga_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
90{
91 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
92 struct tonga_pt_defaults *defaults = data->power_tune_defaults;
93 SMU72_Discrete_DpmTable *dpm_table = &(data->smc_state_table);
94 struct phm_ppt_v1_information *table_info =
95 (struct phm_ppt_v1_information *)(hwmgr->pptable);
96 struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
97 int i, j, k;
98 uint16_t *pdef1;
99 uint16_t *pdef2;
100
101
102 /* TDP number of fraction bits are changed from 8 to 7 for Fiji
103 * as requested by SMC team
104 */
105 dpm_table->DefaultTdp = PP_HOST_TO_SMC_US(
106 (uint16_t)(cac_dtp_table->usTDP * 256));
107 dpm_table->TargetTdp = PP_HOST_TO_SMC_US(
108 (uint16_t)(cac_dtp_table->usConfigurableTDP * 256));
109
110 PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
111 "Target Operating Temp is out of Range!",
112 );
113
114 dpm_table->GpuTjMax = (uint8_t)(cac_dtp_table->usTargetOperatingTemp);
115 dpm_table->GpuTjHyst = 8;
116
117 dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base;
118
119 dpm_table->BAPM_TEMP_GRADIENT = PP_HOST_TO_SMC_UL(defaults->bamp_temp_gradient);
120 pdef1 = defaults->bapmti_r;
121 pdef2 = defaults->bapmti_rc;
122
123 for (i = 0; i < SMU72_DTE_ITERATIONS; i++) {
124 for (j = 0; j < SMU72_DTE_SOURCES; j++) {
125 for (k = 0; k < SMU72_DTE_SINKS; k++) {
126 dpm_table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*pdef1);
127 dpm_table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*pdef2);
128 pdef1++;
129 pdef2++;
130 }
131 }
132 }
133
134 return 0;
135}
136
137static int tonga_populate_svi_load_line(struct pp_hwmgr *hwmgr)
138{
139 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
140 const struct tonga_pt_defaults *defaults = data->power_tune_defaults;
141
142 data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en;
143 data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddC;
144 data->power_tune_table.SviLoadLineTrimVddC = 3;
145 data->power_tune_table.SviLoadLineOffsetVddC = 0;
146
147 return 0;
148}
149
150static int tonga_populate_tdc_limit(struct pp_hwmgr *hwmgr)
151{
152 uint16_t tdc_limit;
153 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
154 struct phm_ppt_v1_information *table_info =
155 (struct phm_ppt_v1_information *)(hwmgr->pptable);
156 const struct tonga_pt_defaults *defaults = data->power_tune_defaults;
157
158 /* TDC number of fraction bits are changed from 8 to 7
159 * for Fiji as requested by SMC team
160 */
161 tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 256);
162 data->power_tune_table.TDC_VDDC_PkgLimit =
163 CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
164 data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
165 defaults->tdc_vddc_throttle_release_limit_perc;
166 data->power_tune_table.TDC_MAWt = defaults->tdc_mawt;
167
168 return 0;
169}
170
171static int tonga_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
172{
173 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
174 const struct tonga_pt_defaults *defaults = data->power_tune_defaults;
175 uint32_t temp;
176
177 if (tonga_read_smc_sram_dword(hwmgr->smumgr,
178 fuse_table_offset +
179 offsetof(SMU72_Discrete_PmFuses, TdcWaterfallCtl),
180 (uint32_t *)&temp, data->sram_end))
181 PP_ASSERT_WITH_CODE(false,
182 "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
183 return -EINVAL);
184 else
185 data->power_tune_table.TdcWaterfallCtl = defaults->tdc_waterfall_ctl;
186
187 return 0;
188}
189
190static int tonga_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
191{
192 int i;
193 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
194
195 /* Currently not used. Set all to zero. */
196 for (i = 0; i < 16; i++)
197 data->power_tune_table.LPMLTemperatureScaler[i] = 0;
198
199 return 0;
200}
201
202static int tonga_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
203{
204 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
205
206 if ((hwmgr->thermal_controller.advanceFanControlParameters.
207 usFanOutputSensitivity & (1 << 15)) ||
208 (hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity == 0))
209 hwmgr->thermal_controller.advanceFanControlParameters.
210 usFanOutputSensitivity = hwmgr->thermal_controller.
211 advanceFanControlParameters.usDefaultFanOutputSensitivity;
212
213 data->power_tune_table.FuzzyFan_PwmSetDelta =
214 PP_HOST_TO_SMC_US(hwmgr->thermal_controller.
215 advanceFanControlParameters.usFanOutputSensitivity);
216 return 0;
217}
218
219static int tonga_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
220{
221 int i;
222 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
223
224 /* Currently not used. Set all to zero. */
225 for (i = 0; i < 16; i++)
226 data->power_tune_table.GnbLPML[i] = 0;
227
228 return 0;
229}
230
231static int tonga_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
232{
233 return 0;
234}
235
236static int tonga_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
237{
238 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
239 struct phm_ppt_v1_information *table_info =
240 (struct phm_ppt_v1_information *)(hwmgr->pptable);
241 uint16_t hi_sidd = data->power_tune_table.BapmVddCBaseLeakageHiSidd;
242 uint16_t lo_sidd = data->power_tune_table.BapmVddCBaseLeakageLoSidd;
243 struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
244
245 hi_sidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
246 lo_sidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
247
248 data->power_tune_table.BapmVddCBaseLeakageHiSidd =
249 CONVERT_FROM_HOST_TO_SMC_US(hi_sidd);
250 data->power_tune_table.BapmVddCBaseLeakageLoSidd =
251 CONVERT_FROM_HOST_TO_SMC_US(lo_sidd);
252
253 return 0;
254}
255
256int tonga_populate_pm_fuses(struct pp_hwmgr *hwmgr)
257{
258 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
259 uint32_t pm_fuse_table_offset;
260
261 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
262 PHM_PlatformCaps_PowerContainment)) {
263 if (tonga_read_smc_sram_dword(hwmgr->smumgr,
264 SMU72_FIRMWARE_HEADER_LOCATION +
265 offsetof(SMU72_Firmware_Header, PmFuseTable),
266 &pm_fuse_table_offset, data->sram_end))
267 PP_ASSERT_WITH_CODE(false,
268 "Attempt to get pm_fuse_table_offset Failed!",
269 return -EINVAL);
270
271 /* DW6 */
272 if (tonga_populate_svi_load_line(hwmgr))
273 PP_ASSERT_WITH_CODE(false,
274 "Attempt to populate SviLoadLine Failed!",
275 return -EINVAL);
276 /* DW7 */
277 if (tonga_populate_tdc_limit(hwmgr))
278 PP_ASSERT_WITH_CODE(false,
279 "Attempt to populate TDCLimit Failed!", return -EINVAL);
280 /* DW8 */
281 if (tonga_populate_dw8(hwmgr, pm_fuse_table_offset))
282 PP_ASSERT_WITH_CODE(false,
283 "Attempt to populate TdcWaterfallCtl Failed !",
284 return -EINVAL);
285
286 /* DW9-DW12 */
287 if (tonga_populate_temperature_scaler(hwmgr) != 0)
288 PP_ASSERT_WITH_CODE(false,
289 "Attempt to populate LPMLTemperatureScaler Failed!",
290 return -EINVAL);
291
292 /* DW13-DW14 */
293 if (tonga_populate_fuzzy_fan(hwmgr))
294 PP_ASSERT_WITH_CODE(false,
295 "Attempt to populate Fuzzy Fan Control parameters Failed!",
296 return -EINVAL);
297
298 /* DW15-DW18 */
299 if (tonga_populate_gnb_lpml(hwmgr))
300 PP_ASSERT_WITH_CODE(false,
301 "Attempt to populate GnbLPML Failed!",
302 return -EINVAL);
303
304 /* DW19 */
305 if (tonga_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr))
306 PP_ASSERT_WITH_CODE(false,
307 "Attempt to populate GnbLPML Min and Max Vid Failed!",
308 return -EINVAL);
309
310 /* DW20 */
311 if (tonga_populate_bapm_vddc_base_leakage_sidd(hwmgr))
312 PP_ASSERT_WITH_CODE(false,
313 "Attempt to populate BapmVddCBaseLeakage Hi and Lo Sidd Failed!",
314 return -EINVAL);
315
316 if (tonga_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
317 (uint8_t *)&data->power_tune_table,
318 sizeof(struct SMU72_Discrete_PmFuses), data->sram_end))
319 PP_ASSERT_WITH_CODE(false,
320 "Attempt to download PmFuseTable Failed!",
321 return -EINVAL);
322 }
323 return 0;
324}
325
326int tonga_enable_smc_cac(struct pp_hwmgr *hwmgr)
327{
328 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
329 int result = 0;
330
331 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
332 PHM_PlatformCaps_CAC)) {
333 int smc_result;
334
335 smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
336 (uint16_t)(PPSMC_MSG_EnableCac));
337 PP_ASSERT_WITH_CODE((smc_result == 0),
338 "Failed to enable CAC in SMC.", result = -1);
339
340 data->cac_enabled = (smc_result == 0) ? true : false;
341 }
342 return result;
343}
344
345int tonga_disable_smc_cac(struct pp_hwmgr *hwmgr)
346{
347 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
348 int result = 0;
349
350 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
351 PHM_PlatformCaps_CAC) && data->cac_enabled) {
352 int smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
353 (uint16_t)(PPSMC_MSG_DisableCac));
354 PP_ASSERT_WITH_CODE((smc_result == 0),
355 "Failed to disable CAC in SMC.", result = -1);
356
357 data->cac_enabled = false;
358 }
359 return result;
360}
361
362int tonga_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
363{
364 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
365
366 if (data->power_containment_features &
367 POWERCONTAINMENT_FEATURE_PkgPwrLimit)
368 return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
369 PPSMC_MSG_PkgPwrSetLimit, n);
370 return 0;
371}
372
373static int tonga_set_overdriver_target_tdp(struct pp_hwmgr *pHwMgr, uint32_t target_tdp)
374{
375 return smum_send_msg_to_smc_with_parameter(pHwMgr->smumgr,
376 PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
377}
378
379int tonga_enable_power_containment(struct pp_hwmgr *hwmgr)
380{
381 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
382 struct phm_ppt_v1_information *table_info =
383 (struct phm_ppt_v1_information *)(hwmgr->pptable);
384 int smc_result;
385 int result = 0;
386
387 data->power_containment_features = 0;
388 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
389 PHM_PlatformCaps_PowerContainment)) {
390 if (data->enable_dte_feature) {
391 smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
392 (uint16_t)(PPSMC_MSG_EnableDTE));
393 PP_ASSERT_WITH_CODE((smc_result == 0),
394 "Failed to enable DTE in SMC.", result = -1;);
395 if (smc_result == 0)
396 data->power_containment_features |= POWERCONTAINMENT_FEATURE_DTE;
397 }
398
399 if (data->enable_tdc_limit_feature) {
400 smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
401 (uint16_t)(PPSMC_MSG_TDCLimitEnable));
402 PP_ASSERT_WITH_CODE((smc_result == 0),
403 "Failed to enable TDCLimit in SMC.", result = -1;);
404 if (smc_result == 0)
405 data->power_containment_features |=
406 POWERCONTAINMENT_FEATURE_TDCLimit;
407 }
408
409 if (data->enable_pkg_pwr_tracking_feature) {
410 smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
411 (uint16_t)(PPSMC_MSG_PkgPwrLimitEnable));
412 PP_ASSERT_WITH_CODE((smc_result == 0),
413 "Failed to enable PkgPwrTracking in SMC.", result = -1;);
414 if (smc_result == 0) {
415 struct phm_cac_tdp_table *cac_table =
416 table_info->cac_dtp_table;
417 uint32_t default_limit =
418 (uint32_t)(cac_table->usMaximumPowerDeliveryLimit * 256);
419
420 data->power_containment_features |=
421 POWERCONTAINMENT_FEATURE_PkgPwrLimit;
422
423 if (tonga_set_power_limit(hwmgr, default_limit))
424 printk(KERN_ERR "Failed to set Default Power Limit in SMC!");
425 }
426 }
427 }
428 return result;
429}
430
431int tonga_disable_power_containment(struct pp_hwmgr *hwmgr)
432{
433 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
434 int result = 0;
435
436 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
437 PHM_PlatformCaps_PowerContainment) &&
438 data->power_containment_features) {
439 int smc_result;
440
441 if (data->power_containment_features &
442 POWERCONTAINMENT_FEATURE_TDCLimit) {
443 smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
444 (uint16_t)(PPSMC_MSG_TDCLimitDisable));
445 PP_ASSERT_WITH_CODE((smc_result == 0),
446 "Failed to disable TDCLimit in SMC.",
447 result = smc_result);
448 }
449
450 if (data->power_containment_features &
451 POWERCONTAINMENT_FEATURE_DTE) {
452 smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
453 (uint16_t)(PPSMC_MSG_DisableDTE));
454 PP_ASSERT_WITH_CODE((smc_result == 0),
455 "Failed to disable DTE in SMC.",
456 result = smc_result);
457 }
458
459 if (data->power_containment_features &
460 POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
461 smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
462 (uint16_t)(PPSMC_MSG_PkgPwrLimitDisable));
463 PP_ASSERT_WITH_CODE((smc_result == 0),
464 "Failed to disable PkgPwrTracking in SMC.",
465 result = smc_result);
466 }
467 data->power_containment_features = 0;
468 }
469
470 return result;
471}
472
473int tonga_power_control_set_level(struct pp_hwmgr *hwmgr)
474{
475 struct phm_ppt_v1_information *table_info =
476 (struct phm_ppt_v1_information *)(hwmgr->pptable);
477 struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
478 int adjust_percent, target_tdp;
479 int result = 0;
480
481 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
482 PHM_PlatformCaps_PowerContainment)) {
483 /* adjustment percentage has already been validated */
484 adjust_percent = hwmgr->platform_descriptor.TDPAdjustmentPolarity ?
485 hwmgr->platform_descriptor.TDPAdjustment :
486 (-1 * hwmgr->platform_descriptor.TDPAdjustment);
487 /* SMC requested that target_tdp to be 7 bit fraction in DPM table
488 * but message to be 8 bit fraction for messages
489 */
490 target_tdp = ((100 + adjust_percent) * (int)(cac_table->usTDP * 256)) / 100;
491 result = tonga_set_overdriver_target_tdp(hwmgr, (uint32_t)target_tdp);
492 }
493
494 return result;
495}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.h
deleted file mode 100644
index c8bdb92d81f4..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.h
+++ /dev/null
@@ -1,80 +0,0 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef TONGA_POWERTUNE_H
25#define TONGA_POWERTUNE_H
26
27enum _phw_tonga_ptc_config_reg_type {
28 TONGA_CONFIGREG_MMR = 0,
29 TONGA_CONFIGREG_SMC_IND,
30 TONGA_CONFIGREG_DIDT_IND,
31 TONGA_CONFIGREG_CACHE,
32
33 TONGA_CONFIGREG_MAX
34};
35typedef enum _phw_tonga_ptc_config_reg_type phw_tonga_ptc_config_reg_type;
36
37/* PowerContainment Features */
38#define POWERCONTAINMENT_FEATURE_DTE 0x00000001
39
40
41/* PowerContainment Features */
42#define POWERCONTAINMENT_FEATURE_BAPM 0x00000001
43#define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002
44#define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004
45
46struct tonga_pt_config_reg {
47 uint32_t Offset;
48 uint32_t Mask;
49 uint32_t Shift;
50 uint32_t Value;
51 phw_tonga_ptc_config_reg_type Type;
52};
53
54struct tonga_pt_defaults {
55 uint8_t svi_load_line_en;
56 uint8_t svi_load_line_vddC;
57 uint8_t tdc_vddc_throttle_release_limit_perc;
58 uint8_t tdc_mawt;
59 uint8_t tdc_waterfall_ctl;
60 uint8_t dte_ambient_temp_base;
61 uint32_t display_cac;
62 uint32_t bamp_temp_gradient;
63 uint16_t bapmti_r[SMU72_DTE_ITERATIONS * SMU72_DTE_SOURCES * SMU72_DTE_SINKS];
64 uint16_t bapmti_rc[SMU72_DTE_ITERATIONS * SMU72_DTE_SOURCES * SMU72_DTE_SINKS];
65};
66
67
68
69void tonga_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr);
70int tonga_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr);
71int tonga_populate_pm_fuses(struct pp_hwmgr *hwmgr);
72int tonga_enable_smc_cac(struct pp_hwmgr *hwmgr);
73int tonga_disable_smc_cac(struct pp_hwmgr *hwmgr);
74int tonga_enable_power_containment(struct pp_hwmgr *hwmgr);
75int tonga_disable_power_containment(struct pp_hwmgr *hwmgr);
76int tonga_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n);
77int tonga_power_control_set_level(struct pp_hwmgr *hwmgr);
78
79#endif
80
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.c
deleted file mode 100644
index 47ef1ca2d78b..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.c
+++ /dev/null
@@ -1,590 +0,0 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <asm/div64.h>
24#include "tonga_thermal.h"
25#include "tonga_hwmgr.h"
26#include "tonga_smumgr.h"
27#include "tonga_ppsmc.h"
28#include "smu/smu_7_1_2_d.h"
29#include "smu/smu_7_1_2_sh_mask.h"
30
31/**
32* Get Fan Speed Control Parameters.
33* @param hwmgr the address of the powerplay hardware manager.
34* @param pSpeed is the address of the structure where the result is to be placed.
35* @exception Always succeeds except if we cannot zero out the output structure.
36*/
37int tonga_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info)
38{
39
40 if (hwmgr->thermal_controller.fanInfo.bNoFan)
41 return 0;
42
43 fan_speed_info->supports_percent_read = true;
44 fan_speed_info->supports_percent_write = true;
45 fan_speed_info->min_percent = 0;
46 fan_speed_info->max_percent = 100;
47
48 if (0 != hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) {
49 fan_speed_info->supports_rpm_read = true;
50 fan_speed_info->supports_rpm_write = true;
51 fan_speed_info->min_rpm = hwmgr->thermal_controller.fanInfo.ulMinRPM;
52 fan_speed_info->max_rpm = hwmgr->thermal_controller.fanInfo.ulMaxRPM;
53 } else {
54 fan_speed_info->min_rpm = 0;
55 fan_speed_info->max_rpm = 0;
56 }
57
58 return 0;
59}
60
61/**
62* Get Fan Speed in percent.
63* @param hwmgr the address of the powerplay hardware manager.
64* @param pSpeed is the address of the structure where the result is to be placed.
65* @exception Fails is the 100% setting appears to be 0.
66*/
67int tonga_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed)
68{
69 uint32_t duty100;
70 uint32_t duty;
71 uint64_t tmp64;
72
73 if (hwmgr->thermal_controller.fanInfo.bNoFan)
74 return 0;
75
76 duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100);
77 duty = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_STATUS, FDO_PWM_DUTY);
78
79 if (0 == duty100)
80 return -EINVAL;
81
82
83 tmp64 = (uint64_t)duty * 100;
84 do_div(tmp64, duty100);
85 *speed = (uint32_t)tmp64;
86
87 if (*speed > 100)
88 *speed = 100;
89
90 return 0;
91}
92
93/**
94* Get Fan Speed in RPM.
95* @param hwmgr the address of the powerplay hardware manager.
96* @param speed is the address of the structure where the result is to be placed.
97* @exception Returns not supported if no fan is found or if pulses per revolution are not set
98*/
99int tonga_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
100{
101 return 0;
102}
103
104/**
105* Set Fan Speed Control to static mode, so that the user can decide what speed to use.
106* @param hwmgr the address of the powerplay hardware manager.
107* mode the fan control mode, 0 default, 1 by percent, 5, by RPM
108* @exception Should always succeed.
109*/
110int tonga_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
111{
112
113 if (hwmgr->fan_ctrl_is_in_default_mode) {
114 hwmgr->fan_ctrl_default_mode = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, FDO_PWM_MODE);
115 hwmgr->tmin = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TMIN);
116 hwmgr->fan_ctrl_is_in_default_mode = false;
117 }
118
119 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TMIN, 0);
120 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, FDO_PWM_MODE, mode);
121
122 return 0;
123}
124
125/**
126* Reset Fan Speed Control to default mode.
127* @param hwmgr the address of the powerplay hardware manager.
128* @exception Should always succeed.
129*/
130int tonga_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr)
131{
132 if (!hwmgr->fan_ctrl_is_in_default_mode) {
133 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, FDO_PWM_MODE, hwmgr->fan_ctrl_default_mode);
134 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TMIN, hwmgr->tmin);
135 hwmgr->fan_ctrl_is_in_default_mode = true;
136 }
137
138 return 0;
139}
140
141int tonga_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
142{
143 int result;
144
145 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ODFuzzyFanControlSupport)) {
146 cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_FUZZY);
147 result = (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl) == 0) ? 0 : -EINVAL;
148/*
149 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_FanSpeedInTableIsRPM))
150 hwmgr->set_max_fan_rpm_output(hwmgr, hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM);
151 else
152 hwmgr->set_max_fan_pwm_output(hwmgr, hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM);
153*/
154 } else {
155 cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_TABLE);
156 result = (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl) == 0) ? 0 : -EINVAL;
157 }
158/* TO DO FOR SOME DEVICE ID 0X692b, send this msg return invalid command.
159 if (result == 0 && hwmgr->thermal_controller.advanceFanControlParameters.ucTargetTemperature != 0)
160 result = (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanTemperatureTarget, \
161 hwmgr->thermal_controller.advanceFanControlParameters.ucTargetTemperature) ? 0 : -EINVAL);
162*/
163 return result;
164}
165
166
167int tonga_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr)
168{
169 return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StopFanControl) == 0) ? 0 : -EINVAL;
170}
171
172/**
173* Set Fan Speed in percent.
174* @param hwmgr the address of the powerplay hardware manager.
175* @param speed is the percentage value (0% - 100%) to be set.
176* @exception Fails is the 100% setting appears to be 0.
177*/
178int tonga_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed)
179{
180 uint32_t duty100;
181 uint32_t duty;
182 uint64_t tmp64;
183
184 if (hwmgr->thermal_controller.fanInfo.bNoFan)
185 return -EINVAL;
186
187 if (speed > 100)
188 speed = 100;
189
190 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl))
191 tonga_fan_ctrl_stop_smc_fan_control(hwmgr);
192
193 duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100);
194
195 if (0 == duty100)
196 return -EINVAL;
197
198 tmp64 = (uint64_t)speed * duty100;
199 do_div(tmp64, 100);
200 duty = (uint32_t)tmp64;
201
202 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL0, FDO_STATIC_DUTY, duty);
203
204 return tonga_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
205}
206
207/**
208* Reset Fan Speed to default.
209* @param hwmgr the address of the powerplay hardware manager.
210* @exception Always succeeds.
211*/
212int tonga_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr)
213{
214 int result;
215
216 if (hwmgr->thermal_controller.fanInfo.bNoFan)
217 return 0;
218
219 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl)) {
220 result = tonga_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
221 if (0 == result)
222 result = tonga_fan_ctrl_start_smc_fan_control(hwmgr);
223 } else
224 result = tonga_fan_ctrl_set_default_mode(hwmgr);
225
226 return result;
227}
228
229/**
230* Set Fan Speed in RPM.
231* @param hwmgr the address of the powerplay hardware manager.
232* @param speed is the percentage value (min - max) to be set.
233* @exception Fails is the speed not lie between min and max.
234*/
235int tonga_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
236{
237 return 0;
238}
239
240/**
241* Reads the remote temperature from the SIslands thermal controller.
242*
243* @param hwmgr The address of the hardware manager.
244*/
245int tonga_thermal_get_temperature(struct pp_hwmgr *hwmgr)
246{
247 int temp;
248
249 temp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_STATUS, CTF_TEMP);
250
251/* Bit 9 means the reading is lower than the lowest usable value. */
252 if (0 != (0x200 & temp))
253 temp = TONGA_THERMAL_MAXIMUM_TEMP_READING;
254 else
255 temp = (temp & 0x1ff);
256
257 temp = temp * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
258
259 return temp;
260}
261
262/**
263* Set the requested temperature range for high and low alert signals
264*
265* @param hwmgr The address of the hardware manager.
266* @param range Temperature range to be programmed for high and low alert signals
267* @exception PP_Result_BadInput if the input data is not valid.
268*/
269static int tonga_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, uint32_t low_temp, uint32_t high_temp)
270{
271 uint32_t low = TONGA_THERMAL_MINIMUM_ALERT_TEMP * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
272 uint32_t high = TONGA_THERMAL_MAXIMUM_ALERT_TEMP * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
273
274 if (low < low_temp)
275 low = low_temp;
276 if (high > high_temp)
277 high = high_temp;
278
279 if (low > high)
280 return -EINVAL;
281
282 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
283 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
284 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL, DIG_THERM_DPM, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
285
286 return 0;
287}
288
289/**
290* Programs thermal controller one-time setting registers
291*
292* @param hwmgr The address of the hardware manager.
293*/
294static int tonga_thermal_initialize(struct pp_hwmgr *hwmgr)
295{
296 if (0 != hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution)
297 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
298 CG_TACH_CTRL, EDGE_PER_REV,
299 hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution - 1);
300
301 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TACH_PWM_RESP_RATE, 0x28);
302
303 return 0;
304}
305
306/**
307* Enable thermal alerts on the RV770 thermal controller.
308*
309* @param hwmgr The address of the hardware manager.
310*/
311static int tonga_thermal_enable_alert(struct pp_hwmgr *hwmgr)
312{
313 uint32_t alert;
314
315 alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK);
316 alert &= ~(TONGA_THERMAL_HIGH_ALERT_MASK | TONGA_THERMAL_LOW_ALERT_MASK);
317 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK, alert);
318
319 /* send message to SMU to enable internal thermal interrupts */
320 return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Enable) == 0) ? 0 : -1;
321}
322
323/**
324* Disable thermal alerts on the RV770 thermal controller.
325* @param hwmgr The address of the hardware manager.
326*/
327static int tonga_thermal_disable_alert(struct pp_hwmgr *hwmgr)
328{
329 uint32_t alert;
330
331 alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK);
332 alert |= (TONGA_THERMAL_HIGH_ALERT_MASK | TONGA_THERMAL_LOW_ALERT_MASK);
333 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK, alert);
334
335 /* send message to SMU to disable internal thermal interrupts */
336 return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Disable) == 0) ? 0 : -1;
337}
338
339/**
340* Uninitialize the thermal controller.
341* Currently just disables alerts.
342* @param hwmgr The address of the hardware manager.
343*/
344int tonga_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr)
345{
346 int result = tonga_thermal_disable_alert(hwmgr);
347
348 if (hwmgr->thermal_controller.fanInfo.bNoFan)
349 tonga_fan_ctrl_set_default_mode(hwmgr);
350
351 return result;
352}
353
354/**
355* Set up the fan table to control the fan using the SMC.
356* @param hwmgr the address of the powerplay hardware manager.
357* @param pInput the pointer to input data
358* @param pOutput the pointer to output data
359* @param pStorage the pointer to temporary storage
360* @param Result the last failure code
361* @return result from set temperature range routine
362*/
363int tf_tonga_thermal_setup_fan_table(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result)
364{
365 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
366 SMU72_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
367 uint32_t duty100;
368 uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
369 uint16_t fdo_min, slope1, slope2;
370 uint32_t reference_clock;
371 int res;
372 uint64_t tmp64;
373
374 if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl))
375 return 0;
376
377 if (0 == data->fan_table_start) {
378 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
379 return 0;
380 }
381
382 duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100);
383
384 if (0 == duty100) {
385 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
386 return 0;
387 }
388
389 tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100;
390 do_div(tmp64, 10000);
391 fdo_min = (uint16_t)tmp64;
392
393 t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed - hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
394 t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh - hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
395
396 pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
397 pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
398
399 slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
400 slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
401
402 fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100);
403 fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100);
404 fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100);
405
406 fan_table.Slope1 = cpu_to_be16(slope1);
407 fan_table.Slope2 = cpu_to_be16(slope2);
408
409 fan_table.FdoMin = cpu_to_be16(fdo_min);
410
411 fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst);
412
413 fan_table.HystUp = cpu_to_be16(1);
414
415 fan_table.HystSlope = cpu_to_be16(1);
416
417 fan_table.TempRespLim = cpu_to_be16(5);
418
419 reference_clock = tonga_get_xclk(hwmgr);
420
421 fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600);
422
423 fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
424
425 fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL);
426
427 fan_table.FanControl_GL_Flag = 1;
428
429 res = tonga_copy_bytes_to_smc(hwmgr->smumgr, data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), data->sram_end);
430/* TO DO FOR SOME DEVICE ID 0X692b, send this msg return invalid command.
431 if (res == 0 && hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit != 0)
432 res = (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanMinPwm, \
433 hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit) ? 0 : -1);
434
435 if (res == 0 && hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit != 0)
436 res = (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanSclkTarget, \
437 hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit) ? 0 : -1);
438
439 if (0 != res)
440 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
441*/
442 return 0;
443}
444
445/**
446* Start the fan control on the SMC.
447* @param hwmgr the address of the powerplay hardware manager.
448* @param pInput the pointer to input data
449* @param pOutput the pointer to output data
450* @param pStorage the pointer to temporary storage
451* @param Result the last failure code
452* @return result from set temperature range routine
453*/
454int tf_tonga_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result)
455{
456/* If the fantable setup has failed we could have disabled PHM_PlatformCaps_MicrocodeFanControl even after this function was included in the table.
457 * Make sure that we still think controlling the fan is OK.
458*/
459 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl)) {
460 tonga_fan_ctrl_start_smc_fan_control(hwmgr);
461 tonga_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
462 }
463
464 return 0;
465}
466
467/**
468* Set temperature range for high and low alerts
469* @param hwmgr the address of the powerplay hardware manager.
470* @param pInput the pointer to input data
471* @param pOutput the pointer to output data
472* @param pStorage the pointer to temporary storage
473* @param Result the last failure code
474* @return result from set temperature range routine
475*/
476int tf_tonga_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result)
477{
478 struct PP_TemperatureRange *range = (struct PP_TemperatureRange *)input;
479
480 if (range == NULL)
481 return -EINVAL;
482
483 return tonga_thermal_set_temperature_range(hwmgr, range->min, range->max);
484}
485
486/**
487* Programs one-time setting registers
488* @param hwmgr the address of the powerplay hardware manager.
489* @param pInput the pointer to input data
490* @param pOutput the pointer to output data
491* @param pStorage the pointer to temporary storage
492* @param Result the last failure code
493* @return result from initialize thermal controller routine
494*/
495int tf_tonga_thermal_initialize(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result)
496{
497 return tonga_thermal_initialize(hwmgr);
498}
499
500/**
501* Enable high and low alerts
502* @param hwmgr the address of the powerplay hardware manager.
503* @param pInput the pointer to input data
504* @param pOutput the pointer to output data
505* @param pStorage the pointer to temporary storage
506* @param Result the last failure code
507* @return result from enable alert routine
508*/
509int tf_tonga_thermal_enable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result)
510{
511 return tonga_thermal_enable_alert(hwmgr);
512}
513
514/**
515* Disable high and low alerts
516* @param hwmgr the address of the powerplay hardware manager.
517* @param pInput the pointer to input data
518* @param pOutput the pointer to output data
519* @param pStorage the pointer to temporary storage
520* @param Result the last failure code
521* @return result from disable alert routine
522*/
523static int tf_tonga_thermal_disable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result)
524{
525 return tonga_thermal_disable_alert(hwmgr);
526}
527
528static const struct phm_master_table_item tonga_thermal_start_thermal_controller_master_list[] = {
529 { NULL, tf_tonga_thermal_initialize },
530 { NULL, tf_tonga_thermal_set_temperature_range },
531 { NULL, tf_tonga_thermal_enable_alert },
532/* We should restrict performance levels to low before we halt the SMC.
533 * On the other hand we are still in boot state when we do this so it would be pointless.
534 * If this assumption changes we have to revisit this table.
535 */
536 { NULL, tf_tonga_thermal_setup_fan_table},
537 { NULL, tf_tonga_thermal_start_smc_fan_control},
538 { NULL, NULL }
539};
540
541static const struct phm_master_table_header tonga_thermal_start_thermal_controller_master = {
542 0,
543 PHM_MasterTableFlag_None,
544 tonga_thermal_start_thermal_controller_master_list
545};
546
547static const struct phm_master_table_item tonga_thermal_set_temperature_range_master_list[] = {
548 { NULL, tf_tonga_thermal_disable_alert},
549 { NULL, tf_tonga_thermal_set_temperature_range},
550 { NULL, tf_tonga_thermal_enable_alert},
551 { NULL, NULL }
552};
553
554static const struct phm_master_table_header tonga_thermal_set_temperature_range_master = {
555 0,
556 PHM_MasterTableFlag_None,
557 tonga_thermal_set_temperature_range_master_list
558};
559
560int tonga_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr)
561{
562 if (!hwmgr->thermal_controller.fanInfo.bNoFan)
563 tonga_fan_ctrl_set_default_mode(hwmgr);
564 return 0;
565}
566
567/**
568* Initializes the thermal controller related functions in the Hardware Manager structure.
569* @param hwmgr The address of the hardware manager.
570* @exception Any error code from the low-level communication.
571*/
572int pp_tonga_thermal_initialize(struct pp_hwmgr *hwmgr)
573{
574 int result;
575
576 result = phm_construct_table(hwmgr, &tonga_thermal_set_temperature_range_master, &(hwmgr->set_temperature_range));
577
578 if (0 == result) {
579 result = phm_construct_table(hwmgr,
580 &tonga_thermal_start_thermal_controller_master,
581 &(hwmgr->start_thermal_controller));
582 if (0 != result)
583 phm_destroy_table(hwmgr, &(hwmgr->set_temperature_range));
584 }
585
586 if (0 == result)
587 hwmgr->fan_ctrl_is_in_default_mode = true;
588 return result;
589}
590
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.h
deleted file mode 100644
index aa335f267e25..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.h
+++ /dev/null
@@ -1,61 +0,0 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef TONGA_THERMAL_H
25#define TONGA_THERMAL_H
26
27#include "hwmgr.h"
28
29#define TONGA_THERMAL_HIGH_ALERT_MASK 0x1
30#define TONGA_THERMAL_LOW_ALERT_MASK 0x2
31
32#define TONGA_THERMAL_MINIMUM_TEMP_READING -256
33#define TONGA_THERMAL_MAXIMUM_TEMP_READING 255
34
35#define TONGA_THERMAL_MINIMUM_ALERT_TEMP 0
36#define TONGA_THERMAL_MAXIMUM_ALERT_TEMP 255
37
38#define FDO_PWM_MODE_STATIC 1
39#define FDO_PWM_MODE_STATIC_RPM 5
40
41
42extern int tf_tonga_thermal_initialize(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
43extern int tf_tonga_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
44extern int tf_tonga_thermal_enable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
45
46extern int tonga_thermal_get_temperature(struct pp_hwmgr *hwmgr);
47extern int tonga_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr);
48extern int tonga_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info);
49extern int tonga_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed);
50extern int tonga_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr);
51extern int tonga_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode);
52extern int tonga_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed);
53extern int tonga_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr);
54extern int pp_tonga_thermal_initialize(struct pp_hwmgr *hwmgr);
55extern int tonga_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr);
56extern int tonga_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed);
57extern int tonga_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed);
58extern int tonga_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr);
59
60#endif
61