aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd
diff options
context:
space:
mode:
authorRex Zhu <Rex.Zhu@amd.com>2018-03-12 07:52:23 -0400
committerAlex Deucher <alexander.deucher@amd.com>2018-03-15 10:57:50 -0400
commitb905090d2bae2e6189511714a7b88691b439c5a1 (patch)
tree5e76fdd63bf4d6d2c9ca8ccfa881bc9950ffbc9e /drivers/gpu/drm/amd
parent5b2a3d2c153eb49343091e5394201d4f7e5554ed (diff)
drm/amdgpu: Remove wrapper layer of smu ip functions
1. delete amdgpu_powerplay.c used for wrapping smu ip functions 2. delete struct pp_instance, 3. make struct hwmgr as the smu hw handle. Reviewed-by: Alex Deucher <alexander.deucher@amd.com> Reviewed-by: Evan Quan <evan.quan@amd.com> Signed-off-by: Rex Zhu <Rex.Zhu@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c285
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_dpm.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c13
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c611
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c50
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h16
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/pp_instance.h36
19 files changed, 336 insertions, 804 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 8522c2ea1f3e..2ca2b5154d52 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -87,8 +87,7 @@ amdgpu-y += \
87 87
88# add SMC block 88# add SMC block
89amdgpu-y += \ 89amdgpu-y += \
90 amdgpu_dpm.o \ 90 amdgpu_dpm.o
91 amdgpu_powerplay.o
92 91
93# add DCE block 92# add DCE block
94amdgpu-y += \ 93amdgpu-y += \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index dd8a6661fbeb..f44a83ab2bf4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1393,9 +1393,7 @@ enum amd_hw_ip_block_type {
1393#define HWIP_MAX_INSTANCE 6 1393#define HWIP_MAX_INSTANCE 6
1394 1394
1395struct amd_powerplay { 1395struct amd_powerplay {
1396 struct cgs_device *cgs_device;
1397 void *pp_handle; 1396 void *pp_handle;
1398 const struct amd_ip_funcs *ip_funcs;
1399 const struct amd_pm_funcs *pp_funcs; 1397 const struct amd_pm_funcs *pp_funcs;
1400}; 1398};
1401 1399
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 632b18670098..361975cf45a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -1154,7 +1154,7 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
1154 umode_t effective_mode = attr->mode; 1154 umode_t effective_mode = attr->mode;
1155 1155
1156 /* handle non-powerplay limitations */ 1156 /* handle non-powerplay limitations */
1157 if (!adev->powerplay.cgs_device) { 1157 if (!adev->powerplay.pp_handle) {
1158 /* Skip fan attributes if fan is not present */ 1158 /* Skip fan attributes if fan is not present */
1159 if (adev->pm.no_fan && 1159 if (adev->pm.no_fan &&
1160 (attr == &sensor_dev_attr_pwm1.dev_attr.attr || 1160 (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
deleted file mode 100644
index 825c9b90ebf5..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
+++ /dev/null
@@ -1,285 +0,0 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25#include "atom.h"
26#include "amdgpu.h"
27#include "amd_shared.h"
28#include <linux/module.h>
29#include <linux/moduleparam.h>
30#include "amdgpu_pm.h"
31#include <drm/amdgpu_drm.h>
32#include "amdgpu_powerplay.h"
33#include "si_dpm.h"
34#include "cik_dpm.h"
35#include "vi_dpm.h"
36
37static int amdgpu_pp_early_init(void *handle)
38{
39 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
40 struct amd_powerplay *amd_pp;
41 int ret = 0;
42
43 amd_pp = &(adev->powerplay);
44 amd_pp->pp_handle = (void *)adev;
45
46 switch (adev->asic_type) {
47 case CHIP_POLARIS11:
48 case CHIP_POLARIS10:
49 case CHIP_POLARIS12:
50 case CHIP_TONGA:
51 case CHIP_FIJI:
52 case CHIP_TOPAZ:
53 case CHIP_CARRIZO:
54 case CHIP_STONEY:
55 case CHIP_VEGA10:
56 case CHIP_RAVEN:
57 amd_pp->cgs_device = amdgpu_cgs_create_device(adev);
58 amd_pp->ip_funcs = &pp_ip_funcs;
59 amd_pp->pp_funcs = &pp_dpm_funcs;
60 break;
61 /* These chips don't have powerplay implemenations */
62#ifdef CONFIG_DRM_AMDGPU_SI
63 case CHIP_TAHITI:
64 case CHIP_PITCAIRN:
65 case CHIP_VERDE:
66 case CHIP_OLAND:
67 case CHIP_HAINAN:
68 amd_pp->ip_funcs = &si_dpm_ip_funcs;
69 amd_pp->pp_funcs = &si_dpm_funcs;
70 break;
71#endif
72#ifdef CONFIG_DRM_AMDGPU_CIK
73 case CHIP_BONAIRE:
74 case CHIP_HAWAII:
75 if (amdgpu_dpm == -1) {
76 amd_pp->ip_funcs = &ci_dpm_ip_funcs;
77 amd_pp->pp_funcs = &ci_dpm_funcs;
78 } else {
79 amd_pp->cgs_device = amdgpu_cgs_create_device(adev);
80 amd_pp->ip_funcs = &pp_ip_funcs;
81 amd_pp->pp_funcs = &pp_dpm_funcs;
82 }
83 break;
84 case CHIP_KABINI:
85 case CHIP_MULLINS:
86 case CHIP_KAVERI:
87 amd_pp->ip_funcs = &kv_dpm_ip_funcs;
88 amd_pp->pp_funcs = &kv_dpm_funcs;
89 break;
90#endif
91 default:
92 ret = -EINVAL;
93 break;
94 }
95
96 if (adev->powerplay.ip_funcs->early_init)
97 ret = adev->powerplay.ip_funcs->early_init(adev);
98
99 return ret;
100}
101
102
103static int amdgpu_pp_late_init(void *handle)
104{
105 int ret = 0;
106 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
107
108 if (adev->powerplay.ip_funcs->late_init)
109 ret = adev->powerplay.ip_funcs->late_init(
110 adev->powerplay.pp_handle);
111
112 return ret;
113}
114
115static int amdgpu_pp_sw_init(void *handle)
116{
117 int ret = 0;
118 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
119
120 if (adev->powerplay.ip_funcs->sw_init)
121 ret = adev->powerplay.ip_funcs->sw_init(
122 adev->powerplay.pp_handle);
123
124 return ret;
125}
126
127static int amdgpu_pp_sw_fini(void *handle)
128{
129 int ret = 0;
130 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
131
132 if (adev->powerplay.ip_funcs->sw_fini)
133 ret = adev->powerplay.ip_funcs->sw_fini(
134 adev->powerplay.pp_handle);
135 if (ret)
136 return ret;
137
138 return ret;
139}
140
141static int amdgpu_pp_hw_init(void *handle)
142{
143 int ret = 0;
144 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
145
146 if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
147 amdgpu_ucode_init_bo(adev);
148
149 if (adev->powerplay.ip_funcs->hw_init)
150 ret = adev->powerplay.ip_funcs->hw_init(
151 adev->powerplay.pp_handle);
152
153 return ret;
154}
155
156static int amdgpu_pp_hw_fini(void *handle)
157{
158 int ret = 0;
159 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
160
161 if (adev->powerplay.ip_funcs->hw_fini)
162 ret = adev->powerplay.ip_funcs->hw_fini(
163 adev->powerplay.pp_handle);
164
165 return ret;
166}
167
168static void amdgpu_pp_late_fini(void *handle)
169{
170 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
171
172 if (adev->powerplay.ip_funcs->late_fini)
173 adev->powerplay.ip_funcs->late_fini(
174 adev->powerplay.pp_handle);
175
176 if (adev->powerplay.cgs_device)
177 amdgpu_cgs_destroy_device(adev->powerplay.cgs_device);
178}
179
180static int amdgpu_pp_suspend(void *handle)
181{
182 int ret = 0;
183 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
184
185 if (adev->powerplay.ip_funcs->suspend)
186 ret = adev->powerplay.ip_funcs->suspend(
187 adev->powerplay.pp_handle);
188 return ret;
189}
190
191static int amdgpu_pp_resume(void *handle)
192{
193 int ret = 0;
194 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
195
196 if (adev->powerplay.ip_funcs->resume)
197 ret = adev->powerplay.ip_funcs->resume(
198 adev->powerplay.pp_handle);
199 return ret;
200}
201
202static int amdgpu_pp_set_clockgating_state(void *handle,
203 enum amd_clockgating_state state)
204{
205 int ret = 0;
206 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
207
208 if (adev->powerplay.ip_funcs->set_clockgating_state)
209 ret = adev->powerplay.ip_funcs->set_clockgating_state(
210 adev->powerplay.pp_handle, state);
211 return ret;
212}
213
214static int amdgpu_pp_set_powergating_state(void *handle,
215 enum amd_powergating_state state)
216{
217 int ret = 0;
218 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
219
220 if (adev->powerplay.ip_funcs->set_powergating_state)
221 ret = adev->powerplay.ip_funcs->set_powergating_state(
222 adev->powerplay.pp_handle, state);
223 return ret;
224}
225
226
227static bool amdgpu_pp_is_idle(void *handle)
228{
229 bool ret = true;
230 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
231
232 if (adev->powerplay.ip_funcs->is_idle)
233 ret = adev->powerplay.ip_funcs->is_idle(
234 adev->powerplay.pp_handle);
235 return ret;
236}
237
238static int amdgpu_pp_wait_for_idle(void *handle)
239{
240 int ret = 0;
241 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
242
243 if (adev->powerplay.ip_funcs->wait_for_idle)
244 ret = adev->powerplay.ip_funcs->wait_for_idle(
245 adev->powerplay.pp_handle);
246 return ret;
247}
248
249static int amdgpu_pp_soft_reset(void *handle)
250{
251 int ret = 0;
252 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
253
254 if (adev->powerplay.ip_funcs->soft_reset)
255 ret = adev->powerplay.ip_funcs->soft_reset(
256 adev->powerplay.pp_handle);
257 return ret;
258}
259
260static const struct amd_ip_funcs amdgpu_pp_ip_funcs = {
261 .name = "amdgpu_powerplay",
262 .early_init = amdgpu_pp_early_init,
263 .late_init = amdgpu_pp_late_init,
264 .sw_init = amdgpu_pp_sw_init,
265 .sw_fini = amdgpu_pp_sw_fini,
266 .hw_init = amdgpu_pp_hw_init,
267 .hw_fini = amdgpu_pp_hw_fini,
268 .late_fini = amdgpu_pp_late_fini,
269 .suspend = amdgpu_pp_suspend,
270 .resume = amdgpu_pp_resume,
271 .is_idle = amdgpu_pp_is_idle,
272 .wait_for_idle = amdgpu_pp_wait_for_idle,
273 .soft_reset = amdgpu_pp_soft_reset,
274 .set_clockgating_state = amdgpu_pp_set_clockgating_state,
275 .set_powergating_state = amdgpu_pp_set_powergating_state,
276};
277
278const struct amdgpu_ip_block_version amdgpu_pp_ip_block =
279{
280 .type = AMD_IP_BLOCK_TYPE_SMC,
281 .major = 1,
282 .minor = 0,
283 .rev = 0,
284 .funcs = &amdgpu_pp_ip_funcs,
285};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h
deleted file mode 100644
index c0c4bfdcdb14..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h
+++ /dev/null
@@ -1,33 +0,0 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __AMDGPU_POWERPLAY_H__
27#define __AMDGPU_POWERPLAY_H__
28
29#include "amd_shared.h"
30
31extern const struct amdgpu_ip_block_version amdgpu_pp_ip_block;
32
33#endif /* __AMDGPU_POWERPLAY_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index ddb814f7e952..98d1dd253596 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -65,6 +65,8 @@ MODULE_FIRMWARE("radeon/hawaii_k_smc.bin");
65#define VOLTAGE_VID_OFFSET_SCALE1 625 65#define VOLTAGE_VID_OFFSET_SCALE1 625
66#define VOLTAGE_VID_OFFSET_SCALE2 100 66#define VOLTAGE_VID_OFFSET_SCALE2 100
67 67
68static const struct amd_pm_funcs ci_dpm_funcs;
69
68static const struct ci_pt_defaults defaults_hawaii_xt = 70static const struct ci_pt_defaults defaults_hawaii_xt =
69{ 71{
70 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000, 72 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
@@ -6241,6 +6243,7 @@ static int ci_dpm_early_init(void *handle)
6241{ 6243{
6242 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 6244 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6243 6245
6246 adev->powerplay.pp_funcs = &ci_dpm_funcs;
6244 ci_dpm_set_irq_funcs(adev); 6247 ci_dpm_set_irq_funcs(adev);
6245 6248
6246 return 0; 6249 return 0;
@@ -6760,7 +6763,7 @@ static int ci_dpm_read_sensor(void *handle, int idx,
6760 } 6763 }
6761} 6764}
6762 6765
6763const struct amd_ip_funcs ci_dpm_ip_funcs = { 6766static const struct amd_ip_funcs ci_dpm_ip_funcs = {
6764 .name = "ci_dpm", 6767 .name = "ci_dpm",
6765 .early_init = ci_dpm_early_init, 6768 .early_init = ci_dpm_early_init,
6766 .late_init = ci_dpm_late_init, 6769 .late_init = ci_dpm_late_init,
@@ -6777,7 +6780,16 @@ const struct amd_ip_funcs ci_dpm_ip_funcs = {
6777 .set_powergating_state = ci_dpm_set_powergating_state, 6780 .set_powergating_state = ci_dpm_set_powergating_state,
6778}; 6781};
6779 6782
6780const struct amd_pm_funcs ci_dpm_funcs = { 6783const struct amdgpu_ip_block_version ci_smu_ip_block =
6784{
6785 .type = AMD_IP_BLOCK_TYPE_SMC,
6786 .major = 7,
6787 .minor = 0,
6788 .rev = 0,
6789 .funcs = &ci_dpm_ip_funcs,
6790};
6791
6792static const struct amd_pm_funcs ci_dpm_funcs = {
6781 .pre_set_power_state = &ci_dpm_pre_set_power_state, 6793 .pre_set_power_state = &ci_dpm_pre_set_power_state,
6782 .set_power_state = &ci_dpm_set_power_state, 6794 .set_power_state = &ci_dpm_set_power_state,
6783 .post_set_power_state = &ci_dpm_post_set_power_state, 6795 .post_set_power_state = &ci_dpm_post_set_power_state,
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 71b35623b32d..0df22030e713 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -67,7 +67,6 @@
67 67
68#include "amdgpu_dm.h" 68#include "amdgpu_dm.h"
69#include "amdgpu_amdkfd.h" 69#include "amdgpu_amdkfd.h"
70#include "amdgpu_powerplay.h"
71#include "dce_virtual.h" 70#include "dce_virtual.h"
72 71
73/* 72/*
@@ -1996,7 +1995,10 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
1996 amdgpu_device_ip_block_add(adev, &cik_common_ip_block); 1995 amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
1997 amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block); 1996 amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
1998 amdgpu_device_ip_block_add(adev, &cik_ih_ip_block); 1997 amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
1999 amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 1998 if (amdgpu_dpm == -1)
1999 amdgpu_device_ip_block_add(adev, &ci_smu_ip_block);
2000 else
2001 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
2000 if (adev->enable_virtual_display) 2002 if (adev->enable_virtual_display)
2001 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 2003 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
2002#if defined(CONFIG_DRM_AMD_DC) 2004#if defined(CONFIG_DRM_AMD_DC)
@@ -2014,7 +2016,10 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
2014 amdgpu_device_ip_block_add(adev, &cik_common_ip_block); 2016 amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
2015 amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block); 2017 amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
2016 amdgpu_device_ip_block_add(adev, &cik_ih_ip_block); 2018 amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
2017 amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 2019 if (amdgpu_dpm == -1)
2020 amdgpu_device_ip_block_add(adev, &ci_smu_ip_block);
2021 else
2022 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
2018 if (adev->enable_virtual_display) 2023 if (adev->enable_virtual_display)
2019 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 2024 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
2020#if defined(CONFIG_DRM_AMD_DC) 2025#if defined(CONFIG_DRM_AMD_DC)
@@ -2032,7 +2037,7 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
2032 amdgpu_device_ip_block_add(adev, &cik_common_ip_block); 2037 amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
2033 amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block); 2038 amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
2034 amdgpu_device_ip_block_add(adev, &cik_ih_ip_block); 2039 amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
2035 amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 2040 amdgpu_device_ip_block_add(adev, &kv_smu_ip_block);
2036 if (adev->enable_virtual_display) 2041 if (adev->enable_virtual_display)
2037 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 2042 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
2038#if defined(CONFIG_DRM_AMD_DC) 2043#if defined(CONFIG_DRM_AMD_DC)
@@ -2051,7 +2056,7 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
2051 amdgpu_device_ip_block_add(adev, &cik_common_ip_block); 2056 amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
2052 amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block); 2057 amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
2053 amdgpu_device_ip_block_add(adev, &cik_ih_ip_block); 2058 amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
2054 amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 2059 amdgpu_device_ip_block_add(adev, &kv_smu_ip_block);
2055 if (adev->enable_virtual_display) 2060 if (adev->enable_virtual_display)
2056 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 2061 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
2057#if defined(CONFIG_DRM_AMD_DC) 2062#if defined(CONFIG_DRM_AMD_DC)
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_dpm.h b/drivers/gpu/drm/amd/amdgpu/cik_dpm.h
index c7b4349f6319..2a086610f74d 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/cik_dpm.h
@@ -24,8 +24,7 @@
24#ifndef __CIK_DPM_H__ 24#ifndef __CIK_DPM_H__
25#define __CIK_DPM_H__ 25#define __CIK_DPM_H__
26 26
27extern const struct amd_ip_funcs ci_dpm_ip_funcs; 27extern const struct amdgpu_ip_block_version ci_smu_ip_block;
28extern const struct amd_ip_funcs kv_dpm_ip_funcs; 28extern const struct amdgpu_ip_block_version kv_smu_ip_block;
29extern const struct amd_pm_funcs ci_dpm_funcs; 29
30extern const struct amd_pm_funcs kv_dpm_funcs;
31#endif 30#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index 8766681cfd3f..81babe026529 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -42,6 +42,8 @@
42#define KV_MINIMUM_ENGINE_CLOCK 800 42#define KV_MINIMUM_ENGINE_CLOCK 800
43#define SMC_RAM_END 0x40000 43#define SMC_RAM_END 0x40000
44 44
45static const struct amd_pm_funcs kv_dpm_funcs;
46
45static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev); 47static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev);
46static int kv_enable_nb_dpm(struct amdgpu_device *adev, 48static int kv_enable_nb_dpm(struct amdgpu_device *adev,
47 bool enable); 49 bool enable);
@@ -2960,6 +2962,7 @@ static int kv_dpm_early_init(void *handle)
2960{ 2962{
2961 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2963 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2962 2964
2965 adev->powerplay.pp_funcs = &kv_dpm_funcs;
2963 kv_dpm_set_irq_funcs(adev); 2966 kv_dpm_set_irq_funcs(adev);
2964 2967
2965 return 0; 2968 return 0;
@@ -3301,7 +3304,7 @@ static int kv_dpm_read_sensor(void *handle, int idx,
3301 } 3304 }
3302} 3305}
3303 3306
3304const struct amd_ip_funcs kv_dpm_ip_funcs = { 3307static const struct amd_ip_funcs kv_dpm_ip_funcs = {
3305 .name = "kv_dpm", 3308 .name = "kv_dpm",
3306 .early_init = kv_dpm_early_init, 3309 .early_init = kv_dpm_early_init,
3307 .late_init = kv_dpm_late_init, 3310 .late_init = kv_dpm_late_init,
@@ -3318,7 +3321,16 @@ const struct amd_ip_funcs kv_dpm_ip_funcs = {
3318 .set_powergating_state = kv_dpm_set_powergating_state, 3321 .set_powergating_state = kv_dpm_set_powergating_state,
3319}; 3322};
3320 3323
3321const struct amd_pm_funcs kv_dpm_funcs = { 3324const struct amdgpu_ip_block_version kv_smu_ip_block =
3325{
3326 .type = AMD_IP_BLOCK_TYPE_SMC,
3327 .major = 1,
3328 .minor = 0,
3329 .rev = 0,
3330 .funcs = &kv_dpm_ip_funcs,
3331};
3332
3333static const struct amd_pm_funcs kv_dpm_funcs = {
3322 .pre_set_power_state = &kv_dpm_pre_set_power_state, 3334 .pre_set_power_state = &kv_dpm_pre_set_power_state,
3323 .set_power_state = &kv_dpm_set_power_state, 3335 .set_power_state = &kv_dpm_set_power_state,
3324 .post_set_power_state = &kv_dpm_post_set_power_state, 3336 .post_set_power_state = &kv_dpm_post_set_power_state,
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index 6e61b56bfbfc..b154667a8fd9 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -32,7 +32,7 @@
32#include "amdgpu_vce.h" 32#include "amdgpu_vce.h"
33#include "atom.h" 33#include "atom.h"
34#include "amd_pcie.h" 34#include "amd_pcie.h"
35#include "amdgpu_powerplay.h" 35#include "si_dpm.h"
36#include "sid.h" 36#include "sid.h"
37#include "si_ih.h" 37#include "si_ih.h"
38#include "gfx_v6_0.h" 38#include "gfx_v6_0.h"
@@ -1983,7 +1983,7 @@ int si_set_ip_blocks(struct amdgpu_device *adev)
1983 amdgpu_device_ip_block_add(adev, &si_common_ip_block); 1983 amdgpu_device_ip_block_add(adev, &si_common_ip_block);
1984 amdgpu_device_ip_block_add(adev, &gmc_v6_0_ip_block); 1984 amdgpu_device_ip_block_add(adev, &gmc_v6_0_ip_block);
1985 amdgpu_device_ip_block_add(adev, &si_ih_ip_block); 1985 amdgpu_device_ip_block_add(adev, &si_ih_ip_block);
1986 amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 1986 amdgpu_device_ip_block_add(adev, &si_smu_ip_block);
1987 if (adev->enable_virtual_display) 1987 if (adev->enable_virtual_display)
1988 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1988 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1989 else 1989 else
@@ -1997,7 +1997,7 @@ int si_set_ip_blocks(struct amdgpu_device *adev)
1997 amdgpu_device_ip_block_add(adev, &si_common_ip_block); 1997 amdgpu_device_ip_block_add(adev, &si_common_ip_block);
1998 amdgpu_device_ip_block_add(adev, &gmc_v6_0_ip_block); 1998 amdgpu_device_ip_block_add(adev, &gmc_v6_0_ip_block);
1999 amdgpu_device_ip_block_add(adev, &si_ih_ip_block); 1999 amdgpu_device_ip_block_add(adev, &si_ih_ip_block);
2000 amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 2000 amdgpu_device_ip_block_add(adev, &si_smu_ip_block);
2001 if (adev->enable_virtual_display) 2001 if (adev->enable_virtual_display)
2002 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 2002 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
2003 else 2003 else
@@ -2011,7 +2011,7 @@ int si_set_ip_blocks(struct amdgpu_device *adev)
2011 amdgpu_device_ip_block_add(adev, &si_common_ip_block); 2011 amdgpu_device_ip_block_add(adev, &si_common_ip_block);
2012 amdgpu_device_ip_block_add(adev, &gmc_v6_0_ip_block); 2012 amdgpu_device_ip_block_add(adev, &gmc_v6_0_ip_block);
2013 amdgpu_device_ip_block_add(adev, &si_ih_ip_block); 2013 amdgpu_device_ip_block_add(adev, &si_ih_ip_block);
2014 amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 2014 amdgpu_device_ip_block_add(adev, &si_smu_ip_block);
2015 if (adev->enable_virtual_display) 2015 if (adev->enable_virtual_display)
2016 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 2016 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
2017 amdgpu_device_ip_block_add(adev, &gfx_v6_0_ip_block); 2017 amdgpu_device_ip_block_add(adev, &gfx_v6_0_ip_block);
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index 8137c02fd16a..3bfcf0d257ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -67,6 +67,8 @@ MODULE_FIRMWARE("radeon/hainan_smc.bin");
67MODULE_FIRMWARE("radeon/hainan_k_smc.bin"); 67MODULE_FIRMWARE("radeon/hainan_k_smc.bin");
68MODULE_FIRMWARE("radeon/banks_k_2_smc.bin"); 68MODULE_FIRMWARE("radeon/banks_k_2_smc.bin");
69 69
70static const struct amd_pm_funcs si_dpm_funcs;
71
70union power_info { 72union power_info {
71 struct _ATOM_POWERPLAY_INFO info; 73 struct _ATOM_POWERPLAY_INFO info;
72 struct _ATOM_POWERPLAY_INFO_V2 info_2; 74 struct _ATOM_POWERPLAY_INFO_V2 info_2;
@@ -7914,6 +7916,7 @@ static int si_dpm_early_init(void *handle)
7914 7916
7915 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 7917 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7916 7918
7919 adev->powerplay.pp_funcs = &si_dpm_funcs;
7917 si_dpm_set_irq_funcs(adev); 7920 si_dpm_set_irq_funcs(adev);
7918 return 0; 7921 return 0;
7919} 7922}
@@ -8014,7 +8017,7 @@ static int si_dpm_read_sensor(void *handle, int idx,
8014 } 8017 }
8015} 8018}
8016 8019
8017const struct amd_ip_funcs si_dpm_ip_funcs = { 8020static const struct amd_ip_funcs si_dpm_ip_funcs = {
8018 .name = "si_dpm", 8021 .name = "si_dpm",
8019 .early_init = si_dpm_early_init, 8022 .early_init = si_dpm_early_init,
8020 .late_init = si_dpm_late_init, 8023 .late_init = si_dpm_late_init,
@@ -8031,7 +8034,16 @@ const struct amd_ip_funcs si_dpm_ip_funcs = {
8031 .set_powergating_state = si_dpm_set_powergating_state, 8034 .set_powergating_state = si_dpm_set_powergating_state,
8032}; 8035};
8033 8036
8034const struct amd_pm_funcs si_dpm_funcs = { 8037const struct amdgpu_ip_block_version si_smu_ip_block =
8038{
8039 .type = AMD_IP_BLOCK_TYPE_SMC,
8040 .major = 6,
8041 .minor = 0,
8042 .rev = 0,
8043 .funcs = &si_dpm_ip_funcs,
8044};
8045
8046static const struct amd_pm_funcs si_dpm_funcs = {
8035 .pre_set_power_state = &si_dpm_pre_set_power_state, 8047 .pre_set_power_state = &si_dpm_pre_set_power_state,
8036 .set_power_state = &si_dpm_set_power_state, 8048 .set_power_state = &si_dpm_set_power_state,
8037 .post_set_power_state = &si_dpm_post_set_power_state, 8049 .post_set_power_state = &si_dpm_post_set_power_state,
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.h b/drivers/gpu/drm/amd/amdgpu/si_dpm.h
index 9fe343de3477..6b7d292b919f 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.h
@@ -245,8 +245,7 @@ enum si_display_gap
245 SI_PM_DISPLAY_GAP_IGNORE = 3, 245 SI_PM_DISPLAY_GAP_IGNORE = 3,
246}; 246};
247 247
248extern const struct amd_ip_funcs si_dpm_ip_funcs; 248extern const struct amdgpu_ip_block_version si_smu_ip_block;
249extern const struct amd_pm_funcs si_dpm_funcs;
250 249
251struct ni_leakage_coeffients 250struct ni_leakage_coeffients
252{ 251{
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 28b0dbf85986..c6e857325b58 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -57,7 +57,6 @@
57#include "uvd_v7_0.h" 57#include "uvd_v7_0.h"
58#include "vce_v4_0.h" 58#include "vce_v4_0.h"
59#include "vcn_v1_0.h" 59#include "vcn_v1_0.h"
60#include "amdgpu_powerplay.h"
61#include "dce_virtual.h" 60#include "dce_virtual.h"
62#include "mxgpu_ai.h" 61#include "mxgpu_ai.h"
63 62
@@ -533,7 +532,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
533 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); 532 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
534 amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block); 533 amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
535 if (!amdgpu_sriov_vf(adev)) 534 if (!amdgpu_sriov_vf(adev))
536 amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 535 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
537 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 536 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
538 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 537 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
539#if defined(CONFIG_DRM_AMD_DC) 538#if defined(CONFIG_DRM_AMD_DC)
@@ -552,7 +551,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
552 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); 551 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
553 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); 552 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
554 amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block); 553 amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
555 amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 554 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
556 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 555 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
557 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 556 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
558#if defined(CONFIG_DRM_AMD_DC) 557#if defined(CONFIG_DRM_AMD_DC)
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index e7cf752c3c87..e7fb165cc9db 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -71,7 +71,6 @@
71#include "uvd_v5_0.h" 71#include "uvd_v5_0.h"
72#include "uvd_v6_0.h" 72#include "uvd_v6_0.h"
73#include "vce_v3_0.h" 73#include "vce_v3_0.h"
74#include "amdgpu_powerplay.h"
75#if defined(CONFIG_DRM_AMD_ACP) 74#if defined(CONFIG_DRM_AMD_ACP)
76#include "amdgpu_acp.h" 75#include "amdgpu_acp.h"
77#endif 76#endif
@@ -1511,7 +1510,7 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
1511 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1510 amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1512 amdgpu_device_ip_block_add(adev, &gmc_v7_4_ip_block); 1511 amdgpu_device_ip_block_add(adev, &gmc_v7_4_ip_block);
1513 amdgpu_device_ip_block_add(adev, &iceland_ih_ip_block); 1512 amdgpu_device_ip_block_add(adev, &iceland_ih_ip_block);
1514 amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 1513 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1515 if (adev->enable_virtual_display) 1514 if (adev->enable_virtual_display)
1516 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1515 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1517 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); 1516 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
@@ -1521,7 +1520,7 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
1521 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1520 amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1522 amdgpu_device_ip_block_add(adev, &gmc_v8_5_ip_block); 1521 amdgpu_device_ip_block_add(adev, &gmc_v8_5_ip_block);
1523 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block); 1522 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
1524 amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 1523 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1525 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 1524 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
1526 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1525 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1527#if defined(CONFIG_DRM_AMD_DC) 1526#if defined(CONFIG_DRM_AMD_DC)
@@ -1541,7 +1540,7 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
1541 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1540 amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1542 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block); 1541 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
1543 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block); 1542 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
1544 amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 1543 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1545 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 1544 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
1546 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1545 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1547#if defined(CONFIG_DRM_AMD_DC) 1546#if defined(CONFIG_DRM_AMD_DC)
@@ -1563,7 +1562,7 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
1563 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1562 amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1564 amdgpu_device_ip_block_add(adev, &gmc_v8_1_ip_block); 1563 amdgpu_device_ip_block_add(adev, &gmc_v8_1_ip_block);
1565 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block); 1564 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
1566 amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 1565 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1567 if (adev->enable_virtual_display) 1566 if (adev->enable_virtual_display)
1568 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1567 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1569#if defined(CONFIG_DRM_AMD_DC) 1568#if defined(CONFIG_DRM_AMD_DC)
@@ -1581,7 +1580,7 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
1581 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1580 amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1582 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block); 1581 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
1583 amdgpu_device_ip_block_add(adev, &cz_ih_ip_block); 1582 amdgpu_device_ip_block_add(adev, &cz_ih_ip_block);
1584 amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 1583 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1585 if (adev->enable_virtual_display) 1584 if (adev->enable_virtual_display)
1586 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1585 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1587#if defined(CONFIG_DRM_AMD_DC) 1586#if defined(CONFIG_DRM_AMD_DC)
@@ -1602,7 +1601,7 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
1602 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1601 amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1603 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block); 1602 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
1604 amdgpu_device_ip_block_add(adev, &cz_ih_ip_block); 1603 amdgpu_device_ip_block_add(adev, &cz_ih_ip_block);
1605 amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 1604 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1606 if (adev->enable_virtual_display) 1605 if (adev->enable_virtual_display)
1607 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1606 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1608#if defined(CONFIG_DRM_AMD_DC) 1607#if defined(CONFIG_DRM_AMD_DC)
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index 15bd0f9acf73..5c840c022b52 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -24,8 +24,7 @@
24#ifndef __KGD_PP_INTERFACE_H__ 24#ifndef __KGD_PP_INTERFACE_H__
25#define __KGD_PP_INTERFACE_H__ 25#define __KGD_PP_INTERFACE_H__
26 26
27extern const struct amd_ip_funcs pp_ip_funcs; 27extern const struct amdgpu_ip_block_version pp_smu_ip_block;
28extern const struct amd_pm_funcs pp_dpm_funcs;
29 28
30struct amd_vce_state { 29struct amd_vce_state {
31 /* vce clocks */ 30 /* vce clocks */
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index b989bf3542d6..a5bc52cdc40d 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -27,7 +27,6 @@
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include "amd_shared.h" 28#include "amd_shared.h"
29#include "amd_powerplay.h" 29#include "amd_powerplay.h"
30#include "pp_instance.h"
31#include "power_state.h" 30#include "power_state.h"
32#include "amdgpu.h" 31#include "amdgpu.h"
33#include "hwmgr.h" 32#include "hwmgr.h"
@@ -37,18 +36,14 @@
37static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id, 36static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
38 enum amd_pm_state_type *user_state); 37 enum amd_pm_state_type *user_state);
39 38
40static inline int pp_check(struct pp_instance *handle) 39static const struct amd_pm_funcs pp_dpm_funcs;
41{
42 if (handle == NULL)
43 return -EINVAL;
44 40
45 if (handle->hwmgr == NULL || handle->hwmgr->smumgr_funcs == NULL) 41static inline int pp_check(struct pp_hwmgr *hwmgr)
42{
43 if (hwmgr == NULL || hwmgr->smumgr_funcs == NULL)
46 return -EINVAL; 44 return -EINVAL;
47 45
48 if (handle->pm_en == 0) 46 if (hwmgr->pm_en == 0 || hwmgr->hwmgr_func == NULL)
49 return PP_DPM_DISABLED;
50
51 if (handle->hwmgr->hwmgr_func == NULL)
52 return PP_DPM_DISABLED; 47 return PP_DPM_DISABLED;
53 48
54 return 0; 49 return 0;
@@ -56,54 +51,52 @@ static inline int pp_check(struct pp_instance *handle)
56 51
57static int amd_powerplay_create(struct amdgpu_device *adev) 52static int amd_powerplay_create(struct amdgpu_device *adev)
58{ 53{
59 struct pp_instance *instance; 54 struct pp_hwmgr *hwmgr;
60 55
61 if (adev == NULL) 56 if (adev == NULL)
62 return -EINVAL; 57 return -EINVAL;
63 58
64 instance = kzalloc(sizeof(struct pp_instance), GFP_KERNEL); 59 hwmgr = kzalloc(sizeof(struct pp_hwmgr), GFP_KERNEL);
65 if (instance == NULL) 60 if (hwmgr == NULL)
66 return -ENOMEM; 61 return -ENOMEM;
67 62
68 instance->parent = adev; 63 hwmgr->adev = adev;
69 instance->pm_en = (amdgpu_dpm != 0 && !amdgpu_sriov_vf(adev)) ? true : false; 64 hwmgr->pm_en = (amdgpu_dpm != 0 && !amdgpu_sriov_vf(adev)) ? true : false;
70 instance->device = adev->powerplay.cgs_device; 65 hwmgr->device = amdgpu_cgs_create_device(adev);
71 mutex_init(&instance->pp_lock); 66 mutex_init(&hwmgr->smu_lock);
72 adev->powerplay.pp_handle = instance; 67 hwmgr->chip_family = adev->family;
73 68 hwmgr->chip_id = adev->asic_type;
69 hwmgr->feature_mask = amdgpu_pp_feature_mask;
70 adev->powerplay.pp_handle = hwmgr;
71 adev->powerplay.pp_funcs = &pp_dpm_funcs;
74 return 0; 72 return 0;
75} 73}
76 74
77 75
78static int amd_powerplay_destroy(void *handle) 76static int amd_powerplay_destroy(struct amdgpu_device *adev)
79{ 77{
80 struct pp_instance *instance = (struct pp_instance *)handle; 78 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
81 79
82 kfree(instance->hwmgr->hardcode_pp_table); 80 kfree(hwmgr->hardcode_pp_table);
83 instance->hwmgr->hardcode_pp_table = NULL; 81 hwmgr->hardcode_pp_table = NULL;
84 82
85 kfree(instance->hwmgr); 83 kfree(hwmgr);
86 instance->hwmgr = NULL; 84 hwmgr = NULL;
87 85
88 kfree(instance);
89 instance = NULL;
90 return 0; 86 return 0;
91} 87}
92 88
93static int pp_early_init(void *handle) 89static int pp_early_init(void *handle)
94{ 90{
95 int ret; 91 int ret;
96 struct pp_instance *pp_handle = NULL; 92 struct amdgpu_device *adev = handle;
97 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
98 93
99 ret = amd_powerplay_create(adev); 94 ret = amd_powerplay_create(adev);
100 95
101 if (ret != 0) 96 if (ret != 0)
102 return ret; 97 return ret;
103 98
104 pp_handle = adev->powerplay.pp_handle; 99 ret = hwmgr_early_init(adev->powerplay.pp_handle);
105
106 ret = hwmgr_early_init(pp_handle);
107 if (ret) 100 if (ret)
108 return -EINVAL; 101 return -EINVAL;
109 102
@@ -112,15 +105,13 @@ static int pp_early_init(void *handle)
112 105
113static int pp_sw_init(void *handle) 106static int pp_sw_init(void *handle)
114{ 107{
115 struct pp_hwmgr *hwmgr; 108 struct amdgpu_device *adev = handle;
109 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
116 int ret = 0; 110 int ret = 0;
117 struct pp_instance *pp_handle = (struct pp_instance *)handle;
118 111
119 ret = pp_check(pp_handle); 112 ret = pp_check(hwmgr);
120 113
121 if (ret >= 0) { 114 if (ret >= 0) {
122 hwmgr = pp_handle->hwmgr;
123
124 if (hwmgr->smumgr_funcs->smu_init == NULL) 115 if (hwmgr->smumgr_funcs->smu_init == NULL)
125 return -EINVAL; 116 return -EINVAL;
126 117
@@ -128,55 +119,53 @@ static int pp_sw_init(void *handle)
128 119
129 pr_debug("amdgpu: powerplay sw initialized\n"); 120 pr_debug("amdgpu: powerplay sw initialized\n");
130 } 121 }
122
131 return ret; 123 return ret;
132} 124}
133 125
134static int pp_sw_fini(void *handle) 126static int pp_sw_fini(void *handle)
135{ 127{
136 struct pp_hwmgr *hwmgr; 128 struct amdgpu_device *adev = handle;
129 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
137 int ret = 0; 130 int ret = 0;
138 struct pp_instance *pp_handle = (struct pp_instance *)handle;
139 131
140 ret = pp_check(pp_handle); 132 ret = pp_check(hwmgr);
141 if (ret >= 0) { 133 if (ret >= 0) {
142 hwmgr = pp_handle->hwmgr; 134 if (hwmgr->smumgr_funcs->smu_fini != NULL)
143 135 hwmgr->smumgr_funcs->smu_fini(hwmgr);
144 if (hwmgr->smumgr_funcs->smu_fini == NULL)
145 return -EINVAL;
146
147 ret = hwmgr->smumgr_funcs->smu_fini(pp_handle->hwmgr);
148 } 136 }
149 return ret; 137 return 0;
150} 138}
151 139
152static int pp_hw_init(void *handle) 140static int pp_hw_init(void *handle)
153{ 141{
154 int ret = 0; 142 int ret = 0;
155 struct pp_instance *pp_handle = (struct pp_instance *)handle; 143 struct amdgpu_device *adev = handle;
156 struct pp_hwmgr *hwmgr; 144 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
157 145
158 ret = pp_check(pp_handle); 146 if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
147 amdgpu_ucode_init_bo(adev);
159 148
160 if (ret >= 0) { 149 ret = pp_check(hwmgr);
161 hwmgr = pp_handle->hwmgr;
162 150
151 if (ret >= 0) {
163 if (hwmgr->smumgr_funcs->start_smu == NULL) 152 if (hwmgr->smumgr_funcs->start_smu == NULL)
164 return -EINVAL; 153 return -EINVAL;
165 154
166 if(hwmgr->smumgr_funcs->start_smu(pp_handle->hwmgr)) { 155 if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
167 pr_err("smc start failed\n"); 156 pr_err("smc start failed\n");
168 hwmgr->smumgr_funcs->smu_fini(pp_handle->hwmgr); 157 hwmgr->smumgr_funcs->smu_fini(hwmgr);
169 return -EINVAL; 158 return -EINVAL;
170 } 159 }
171 if (ret == PP_DPM_DISABLED) 160 if (ret == PP_DPM_DISABLED)
172 goto exit; 161 goto exit;
173 ret = hwmgr_hw_init(pp_handle); 162 ret = hwmgr_hw_init(hwmgr);
174 if (ret) 163 if (ret)
175 goto exit; 164 goto exit;
176 } 165 }
177 return ret; 166 return ret;
178exit: 167exit:
179 pp_handle->pm_en = 0; 168 hwmgr->pm_en = 0;
180 cgs_notify_dpm_enabled(hwmgr->device, false); 169 cgs_notify_dpm_enabled(hwmgr->device, false);
181 return 0; 170 return 0;
182 171
@@ -184,24 +173,27 @@ exit:
184 173
185static int pp_hw_fini(void *handle) 174static int pp_hw_fini(void *handle)
186{ 175{
187 struct pp_instance *pp_handle = (struct pp_instance *)handle; 176 struct amdgpu_device *adev = handle;
177 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
188 int ret = 0; 178 int ret = 0;
189 179
190 ret = pp_check(pp_handle); 180 ret = pp_check(hwmgr);
191 if (ret == 0) 181 if (ret == 0)
192 hwmgr_hw_fini(pp_handle); 182 hwmgr_hw_fini(hwmgr);
193 183
194 return 0; 184 return 0;
195} 185}
196 186
197static int pp_late_init(void *handle) 187static int pp_late_init(void *handle)
198{ 188{
199 struct pp_instance *pp_handle = (struct pp_instance *)handle; 189 struct amdgpu_device *adev = handle;
190 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
200 int ret = 0; 191 int ret = 0;
201 192
202 ret = pp_check(pp_handle); 193 ret = pp_check(hwmgr);
194
203 if (ret == 0) 195 if (ret == 0)
204 pp_dpm_dispatch_tasks(pp_handle, 196 pp_dpm_dispatch_tasks(hwmgr,
205 AMD_PP_TASK_COMPLETE_INIT, NULL); 197 AMD_PP_TASK_COMPLETE_INIT, NULL);
206 198
207 return 0; 199 return 0;
@@ -231,17 +223,15 @@ static int pp_sw_reset(void *handle)
231static int pp_set_powergating_state(void *handle, 223static int pp_set_powergating_state(void *handle,
232 enum amd_powergating_state state) 224 enum amd_powergating_state state)
233{ 225{
234 struct pp_hwmgr *hwmgr; 226 struct amdgpu_device *adev = handle;
235 struct pp_instance *pp_handle = (struct pp_instance *)handle; 227 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
236 int ret = 0; 228 int ret = 0;
237 229
238 ret = pp_check(pp_handle); 230 ret = pp_check(hwmgr);
239 231
240 if (ret) 232 if (ret)
241 return ret; 233 return ret;
242 234
243 hwmgr = pp_handle->hwmgr;
244
245 if (hwmgr->hwmgr_func->enable_per_cu_power_gating == NULL) { 235 if (hwmgr->hwmgr_func->enable_per_cu_power_gating == NULL) {
246 pr_info("%s was not implemented.\n", __func__); 236 pr_info("%s was not implemented.\n", __func__);
247 return 0; 237 return 0;
@@ -254,44 +244,43 @@ static int pp_set_powergating_state(void *handle,
254 244
255static int pp_suspend(void *handle) 245static int pp_suspend(void *handle)
256{ 246{
257 struct pp_instance *pp_handle = (struct pp_instance *)handle; 247 struct amdgpu_device *adev = handle;
248 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
258 int ret = 0; 249 int ret = 0;
259 250
260 ret = pp_check(pp_handle); 251 ret = pp_check(hwmgr);
261 if (ret == 0) 252 if (ret == 0)
262 hwmgr_hw_suspend(pp_handle); 253 hwmgr_hw_suspend(hwmgr);
263 return 0; 254 return 0;
264} 255}
265 256
266static int pp_resume(void *handle) 257static int pp_resume(void *handle)
267{ 258{
268 struct pp_hwmgr *hwmgr; 259 struct amdgpu_device *adev = handle;
260 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
269 int ret; 261 int ret;
270 struct pp_instance *pp_handle = (struct pp_instance *)handle;
271 262
272 ret = pp_check(pp_handle); 263 ret = pp_check(hwmgr);
273 264
274 if (ret < 0) 265 if (ret < 0)
275 return ret; 266 return ret;
276 267
277 hwmgr = pp_handle->hwmgr;
278
279 if (hwmgr->smumgr_funcs->start_smu == NULL) 268 if (hwmgr->smumgr_funcs->start_smu == NULL)
280 return -EINVAL; 269 return -EINVAL;
281 270
282 if (hwmgr->smumgr_funcs->start_smu(pp_handle->hwmgr)) { 271 if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
283 pr_err("smc start failed\n"); 272 pr_err("smc start failed\n");
284 hwmgr->smumgr_funcs->smu_fini(pp_handle->hwmgr); 273 hwmgr->smumgr_funcs->smu_fini(hwmgr);
285 return -EINVAL; 274 return -EINVAL;
286 } 275 }
287 276
288 if (ret == PP_DPM_DISABLED) 277 if (ret == PP_DPM_DISABLED)
289 return 0; 278 return 0;
290 279
291 return hwmgr_hw_resume(pp_handle); 280 return hwmgr_hw_resume(hwmgr);
292} 281}
293 282
294const struct amd_ip_funcs pp_ip_funcs = { 283static const struct amd_ip_funcs pp_ip_funcs = {
295 .name = "powerplay", 284 .name = "powerplay",
296 .early_init = pp_early_init, 285 .early_init = pp_early_init,
297 .late_init = pp_late_init, 286 .late_init = pp_late_init,
@@ -309,6 +298,15 @@ const struct amd_ip_funcs pp_ip_funcs = {
309 .set_powergating_state = pp_set_powergating_state, 298 .set_powergating_state = pp_set_powergating_state,
310}; 299};
311 300
301const struct amdgpu_ip_block_version pp_smu_ip_block =
302{
303 .type = AMD_IP_BLOCK_TYPE_SMC,
304 .major = 1,
305 .minor = 0,
306 .rev = 0,
307 .funcs = &pp_ip_funcs,
308};
309
312static int pp_dpm_load_fw(void *handle) 310static int pp_dpm_load_fw(void *handle)
313{ 311{
314 return 0; 312 return 0;
@@ -321,17 +319,14 @@ static int pp_dpm_fw_loading_complete(void *handle)
321 319
322static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id) 320static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id)
323{ 321{
324 struct pp_hwmgr *hwmgr; 322 struct pp_hwmgr *hwmgr = handle;
325 struct pp_instance *pp_handle = (struct pp_instance *)handle;
326 int ret = 0; 323 int ret = 0;
327 324
328 ret = pp_check(pp_handle); 325 ret = pp_check(hwmgr);
329 326
330 if (ret) 327 if (ret)
331 return ret; 328 return ret;
332 329
333 hwmgr = pp_handle->hwmgr;
334
335 if (hwmgr->hwmgr_func->update_clock_gatings == NULL) { 330 if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
336 pr_info("%s was not implemented.\n", __func__); 331 pr_info("%s was not implemented.\n", __func__);
337 return 0; 332 return 0;
@@ -379,25 +374,22 @@ static void pp_dpm_en_umd_pstate(struct pp_hwmgr *hwmgr,
379static int pp_dpm_force_performance_level(void *handle, 374static int pp_dpm_force_performance_level(void *handle,
380 enum amd_dpm_forced_level level) 375 enum amd_dpm_forced_level level)
381{ 376{
382 struct pp_hwmgr *hwmgr; 377 struct pp_hwmgr *hwmgr = handle;
383 struct pp_instance *pp_handle = (struct pp_instance *)handle;
384 int ret = 0; 378 int ret = 0;
385 379
386 ret = pp_check(pp_handle); 380 ret = pp_check(hwmgr);
387 381
388 if (ret) 382 if (ret)
389 return ret; 383 return ret;
390 384
391 hwmgr = pp_handle->hwmgr;
392
393 if (level == hwmgr->dpm_level) 385 if (level == hwmgr->dpm_level)
394 return 0; 386 return 0;
395 387
396 mutex_lock(&pp_handle->pp_lock); 388 mutex_lock(&hwmgr->smu_lock);
397 pp_dpm_en_umd_pstate(hwmgr, &level); 389 pp_dpm_en_umd_pstate(hwmgr, &level);
398 hwmgr->request_dpm_level = level; 390 hwmgr->request_dpm_level = level;
399 hwmgr_handle_task(pp_handle, AMD_PP_TASK_READJUST_POWER_STATE, NULL); 391 hwmgr_handle_task(hwmgr, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
400 mutex_unlock(&pp_handle->pp_lock); 392 mutex_unlock(&hwmgr->smu_lock);
401 393
402 return 0; 394 return 0;
403} 395}
@@ -405,152 +397,135 @@ static int pp_dpm_force_performance_level(void *handle,
405static enum amd_dpm_forced_level pp_dpm_get_performance_level( 397static enum amd_dpm_forced_level pp_dpm_get_performance_level(
406 void *handle) 398 void *handle)
407{ 399{
408 struct pp_hwmgr *hwmgr; 400 struct pp_hwmgr *hwmgr = handle;
409 struct pp_instance *pp_handle = (struct pp_instance *)handle;
410 int ret = 0; 401 int ret = 0;
411 enum amd_dpm_forced_level level; 402 enum amd_dpm_forced_level level;
412 403
413 ret = pp_check(pp_handle); 404 ret = pp_check(hwmgr);
414 405
415 if (ret) 406 if (ret)
416 return ret; 407 return ret;
417 408
418 hwmgr = pp_handle->hwmgr; 409 mutex_lock(&hwmgr->smu_lock);
419 mutex_lock(&pp_handle->pp_lock);
420 level = hwmgr->dpm_level; 410 level = hwmgr->dpm_level;
421 mutex_unlock(&pp_handle->pp_lock); 411 mutex_unlock(&hwmgr->smu_lock);
422 return level; 412 return level;
423} 413}
424 414
425static uint32_t pp_dpm_get_sclk(void *handle, bool low) 415static uint32_t pp_dpm_get_sclk(void *handle, bool low)
426{ 416{
427 struct pp_hwmgr *hwmgr; 417 struct pp_hwmgr *hwmgr = handle;
428 struct pp_instance *pp_handle = (struct pp_instance *)handle;
429 int ret = 0; 418 int ret = 0;
430 uint32_t clk = 0; 419 uint32_t clk = 0;
431 420
432 ret = pp_check(pp_handle); 421 ret = pp_check(hwmgr);
433 422
434 if (ret) 423 if (ret)
435 return ret; 424 return ret;
436 425
437 hwmgr = pp_handle->hwmgr;
438
439 if (hwmgr->hwmgr_func->get_sclk == NULL) { 426 if (hwmgr->hwmgr_func->get_sclk == NULL) {
440 pr_info("%s was not implemented.\n", __func__); 427 pr_info("%s was not implemented.\n", __func__);
441 return 0; 428 return 0;
442 } 429 }
443 mutex_lock(&pp_handle->pp_lock); 430 mutex_lock(&hwmgr->smu_lock);
444 clk = hwmgr->hwmgr_func->get_sclk(hwmgr, low); 431 clk = hwmgr->hwmgr_func->get_sclk(hwmgr, low);
445 mutex_unlock(&pp_handle->pp_lock); 432 mutex_unlock(&hwmgr->smu_lock);
446 return clk; 433 return clk;
447} 434}
448 435
449static uint32_t pp_dpm_get_mclk(void *handle, bool low) 436static uint32_t pp_dpm_get_mclk(void *handle, bool low)
450{ 437{
451 struct pp_hwmgr *hwmgr; 438 struct pp_hwmgr *hwmgr = handle;
452 struct pp_instance *pp_handle = (struct pp_instance *)handle;
453 int ret = 0; 439 int ret = 0;
454 uint32_t clk = 0; 440 uint32_t clk = 0;
455 441
456 ret = pp_check(pp_handle); 442 ret = pp_check(hwmgr);
457 443
458 if (ret) 444 if (ret)
459 return ret; 445 return ret;
460 446
461 hwmgr = pp_handle->hwmgr;
462
463 if (hwmgr->hwmgr_func->get_mclk == NULL) { 447 if (hwmgr->hwmgr_func->get_mclk == NULL) {
464 pr_info("%s was not implemented.\n", __func__); 448 pr_info("%s was not implemented.\n", __func__);
465 return 0; 449 return 0;
466 } 450 }
467 mutex_lock(&pp_handle->pp_lock); 451 mutex_lock(&hwmgr->smu_lock);
468 clk = hwmgr->hwmgr_func->get_mclk(hwmgr, low); 452 clk = hwmgr->hwmgr_func->get_mclk(hwmgr, low);
469 mutex_unlock(&pp_handle->pp_lock); 453 mutex_unlock(&hwmgr->smu_lock);
470 return clk; 454 return clk;
471} 455}
472 456
473static void pp_dpm_powergate_vce(void *handle, bool gate) 457static void pp_dpm_powergate_vce(void *handle, bool gate)
474{ 458{
475 struct pp_hwmgr *hwmgr; 459 struct pp_hwmgr *hwmgr = handle;
476 struct pp_instance *pp_handle = (struct pp_instance *)handle;
477 int ret = 0; 460 int ret = 0;
478 461
479 ret = pp_check(pp_handle); 462 ret = pp_check(hwmgr);
480 463
481 if (ret) 464 if (ret)
482 return; 465 return;
483 466
484 hwmgr = pp_handle->hwmgr;
485
486 if (hwmgr->hwmgr_func->powergate_vce == NULL) { 467 if (hwmgr->hwmgr_func->powergate_vce == NULL) {
487 pr_info("%s was not implemented.\n", __func__); 468 pr_info("%s was not implemented.\n", __func__);
488 return; 469 return;
489 } 470 }
490 mutex_lock(&pp_handle->pp_lock); 471 mutex_lock(&hwmgr->smu_lock);
491 hwmgr->hwmgr_func->powergate_vce(hwmgr, gate); 472 hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
492 mutex_unlock(&pp_handle->pp_lock); 473 mutex_unlock(&hwmgr->smu_lock);
493} 474}
494 475
495static void pp_dpm_powergate_uvd(void *handle, bool gate) 476static void pp_dpm_powergate_uvd(void *handle, bool gate)
496{ 477{
497 struct pp_hwmgr *hwmgr; 478 struct pp_hwmgr *hwmgr = handle;
498 struct pp_instance *pp_handle = (struct pp_instance *)handle;
499 int ret = 0; 479 int ret = 0;
500 480
501 ret = pp_check(pp_handle); 481 ret = pp_check(hwmgr);
502 482
503 if (ret) 483 if (ret)
504 return; 484 return;
505 485
506 hwmgr = pp_handle->hwmgr;
507
508 if (hwmgr->hwmgr_func->powergate_uvd == NULL) { 486 if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
509 pr_info("%s was not implemented.\n", __func__); 487 pr_info("%s was not implemented.\n", __func__);
510 return; 488 return;
511 } 489 }
512 mutex_lock(&pp_handle->pp_lock); 490 mutex_lock(&hwmgr->smu_lock);
513 hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate); 491 hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
514 mutex_unlock(&pp_handle->pp_lock); 492 mutex_unlock(&hwmgr->smu_lock);
515} 493}
516 494
517static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id, 495static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
518 enum amd_pm_state_type *user_state) 496 enum amd_pm_state_type *user_state)
519{ 497{
520 int ret = 0; 498 int ret = 0;
521 struct pp_instance *pp_handle = (struct pp_instance *)handle; 499 struct pp_hwmgr *hwmgr = handle;
522 500
523 ret = pp_check(pp_handle); 501 ret = pp_check(hwmgr);
524 502
525 if (ret) 503 if (ret)
526 return ret; 504 return ret;
527 505
528 mutex_lock(&pp_handle->pp_lock); 506 mutex_lock(&hwmgr->smu_lock);
529 ret = hwmgr_handle_task(pp_handle, task_id, user_state); 507 ret = hwmgr_handle_task(hwmgr, task_id, user_state);
530 mutex_unlock(&pp_handle->pp_lock); 508 mutex_unlock(&hwmgr->smu_lock);
531 509
532 return ret; 510 return ret;
533} 511}
534 512
535static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle) 513static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
536{ 514{
537 struct pp_hwmgr *hwmgr; 515 struct pp_hwmgr *hwmgr = handle;
538 struct pp_power_state *state; 516 struct pp_power_state *state;
539 struct pp_instance *pp_handle = (struct pp_instance *)handle;
540 int ret = 0; 517 int ret = 0;
541 enum amd_pm_state_type pm_type; 518 enum amd_pm_state_type pm_type;
542 519
543 ret = pp_check(pp_handle); 520 ret = pp_check(hwmgr);
544 521
545 if (ret) 522 if (ret)
546 return ret; 523 return ret;
547 524
548 hwmgr = pp_handle->hwmgr;
549
550 if (hwmgr->current_ps == NULL) 525 if (hwmgr->current_ps == NULL)
551 return -EINVAL; 526 return -EINVAL;
552 527
553 mutex_lock(&pp_handle->pp_lock); 528 mutex_lock(&hwmgr->smu_lock);
554 529
555 state = hwmgr->current_ps; 530 state = hwmgr->current_ps;
556 531
@@ -571,147 +546,129 @@ static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
571 pm_type = POWER_STATE_TYPE_DEFAULT; 546 pm_type = POWER_STATE_TYPE_DEFAULT;
572 break; 547 break;
573 } 548 }
574 mutex_unlock(&pp_handle->pp_lock); 549 mutex_unlock(&hwmgr->smu_lock);
575 550
576 return pm_type; 551 return pm_type;
577} 552}
578 553
579static void pp_dpm_set_fan_control_mode(void *handle, uint32_t mode) 554static void pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
580{ 555{
581 struct pp_hwmgr *hwmgr; 556 struct pp_hwmgr *hwmgr = handle;
582 struct pp_instance *pp_handle = (struct pp_instance *)handle;
583 int ret = 0; 557 int ret = 0;
584 558
585 ret = pp_check(pp_handle); 559 ret = pp_check(hwmgr);
586 560
587 if (ret) 561 if (ret)
588 return; 562 return;
589 563
590 hwmgr = pp_handle->hwmgr;
591
592 if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) { 564 if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) {
593 pr_info("%s was not implemented.\n", __func__); 565 pr_info("%s was not implemented.\n", __func__);
594 return; 566 return;
595 } 567 }
596 mutex_lock(&pp_handle->pp_lock); 568 mutex_lock(&hwmgr->smu_lock);
597 hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode); 569 hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode);
598 mutex_unlock(&pp_handle->pp_lock); 570 mutex_unlock(&hwmgr->smu_lock);
599} 571}
600 572
601static uint32_t pp_dpm_get_fan_control_mode(void *handle) 573static uint32_t pp_dpm_get_fan_control_mode(void *handle)
602{ 574{
603 struct pp_hwmgr *hwmgr; 575 struct pp_hwmgr *hwmgr = handle;
604 struct pp_instance *pp_handle = (struct pp_instance *)handle;
605 int ret = 0; 576 int ret = 0;
606 uint32_t mode = 0; 577 uint32_t mode = 0;
607 578
608 ret = pp_check(pp_handle); 579 ret = pp_check(hwmgr);
609 580
610 if (ret) 581 if (ret)
611 return ret; 582 return ret;
612 583
613 hwmgr = pp_handle->hwmgr;
614
615 if (hwmgr->hwmgr_func->get_fan_control_mode == NULL) { 584 if (hwmgr->hwmgr_func->get_fan_control_mode == NULL) {
616 pr_info("%s was not implemented.\n", __func__); 585 pr_info("%s was not implemented.\n", __func__);
617 return 0; 586 return 0;
618 } 587 }
619 mutex_lock(&pp_handle->pp_lock); 588 mutex_lock(&hwmgr->smu_lock);
620 mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr); 589 mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr);
621 mutex_unlock(&pp_handle->pp_lock); 590 mutex_unlock(&hwmgr->smu_lock);
622 return mode; 591 return mode;
623} 592}
624 593
625static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent) 594static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent)
626{ 595{
627 struct pp_hwmgr *hwmgr; 596 struct pp_hwmgr *hwmgr = handle;
628 struct pp_instance *pp_handle = (struct pp_instance *)handle;
629 int ret = 0; 597 int ret = 0;
630 598
631 ret = pp_check(pp_handle); 599 ret = pp_check(hwmgr);
632 600
633 if (ret) 601 if (ret)
634 return ret; 602 return ret;
635 603
636 hwmgr = pp_handle->hwmgr;
637
638 if (hwmgr->hwmgr_func->set_fan_speed_percent == NULL) { 604 if (hwmgr->hwmgr_func->set_fan_speed_percent == NULL) {
639 pr_info("%s was not implemented.\n", __func__); 605 pr_info("%s was not implemented.\n", __func__);
640 return 0; 606 return 0;
641 } 607 }
642 mutex_lock(&pp_handle->pp_lock); 608 mutex_lock(&hwmgr->smu_lock);
643 ret = hwmgr->hwmgr_func->set_fan_speed_percent(hwmgr, percent); 609 ret = hwmgr->hwmgr_func->set_fan_speed_percent(hwmgr, percent);
644 mutex_unlock(&pp_handle->pp_lock); 610 mutex_unlock(&hwmgr->smu_lock);
645 return ret; 611 return ret;
646} 612}
647 613
648static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed) 614static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed)
649{ 615{
650 struct pp_hwmgr *hwmgr; 616 struct pp_hwmgr *hwmgr = handle;
651 struct pp_instance *pp_handle = (struct pp_instance *)handle;
652 int ret = 0; 617 int ret = 0;
653 618
654 ret = pp_check(pp_handle); 619 ret = pp_check(hwmgr);
655 620
656 if (ret) 621 if (ret)
657 return ret; 622 return ret;
658 623
659 hwmgr = pp_handle->hwmgr;
660
661 if (hwmgr->hwmgr_func->get_fan_speed_percent == NULL) { 624 if (hwmgr->hwmgr_func->get_fan_speed_percent == NULL) {
662 pr_info("%s was not implemented.\n", __func__); 625 pr_info("%s was not implemented.\n", __func__);
663 return 0; 626 return 0;
664 } 627 }
665 628
666 mutex_lock(&pp_handle->pp_lock); 629 mutex_lock(&hwmgr->smu_lock);
667 ret = hwmgr->hwmgr_func->get_fan_speed_percent(hwmgr, speed); 630 ret = hwmgr->hwmgr_func->get_fan_speed_percent(hwmgr, speed);
668 mutex_unlock(&pp_handle->pp_lock); 631 mutex_unlock(&hwmgr->smu_lock);
669 return ret; 632 return ret;
670} 633}
671 634
672static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm) 635static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
673{ 636{
674 struct pp_hwmgr *hwmgr; 637 struct pp_hwmgr *hwmgr = handle;
675 struct pp_instance *pp_handle = (struct pp_instance *)handle;
676 int ret = 0; 638 int ret = 0;
677 639
678 ret = pp_check(pp_handle); 640 ret = pp_check(hwmgr);
679 641
680 if (ret) 642 if (ret)
681 return ret; 643 return ret;
682 644
683 hwmgr = pp_handle->hwmgr;
684
685 if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL) 645 if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL)
686 return -EINVAL; 646 return -EINVAL;
687 647
688 mutex_lock(&pp_handle->pp_lock); 648 mutex_lock(&hwmgr->smu_lock);
689 ret = hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm); 649 ret = hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm);
690 mutex_unlock(&pp_handle->pp_lock); 650 mutex_unlock(&hwmgr->smu_lock);
691 return ret; 651 return ret;
692} 652}
693 653
694static int pp_dpm_get_pp_num_states(void *handle, 654static int pp_dpm_get_pp_num_states(void *handle,
695 struct pp_states_info *data) 655 struct pp_states_info *data)
696{ 656{
697 struct pp_hwmgr *hwmgr; 657 struct pp_hwmgr *hwmgr = handle;
698 int i; 658 int i;
699 struct pp_instance *pp_handle = (struct pp_instance *)handle;
700 int ret = 0; 659 int ret = 0;
701 660
702 memset(data, 0, sizeof(*data)); 661 memset(data, 0, sizeof(*data));
703 662
704 ret = pp_check(pp_handle); 663 ret = pp_check(hwmgr);
705 664
706 if (ret) 665 if (ret)
707 return ret; 666 return ret;
708 667
709 hwmgr = pp_handle->hwmgr;
710
711 if (hwmgr->ps == NULL) 668 if (hwmgr->ps == NULL)
712 return -EINVAL; 669 return -EINVAL;
713 670
714 mutex_lock(&pp_handle->pp_lock); 671 mutex_lock(&hwmgr->smu_lock);
715 672
716 data->nums = hwmgr->num_ps; 673 data->nums = hwmgr->num_ps;
717 674
@@ -735,73 +692,68 @@ static int pp_dpm_get_pp_num_states(void *handle,
735 data->states[i] = POWER_STATE_TYPE_DEFAULT; 692 data->states[i] = POWER_STATE_TYPE_DEFAULT;
736 } 693 }
737 } 694 }
738 mutex_unlock(&pp_handle->pp_lock); 695 mutex_unlock(&hwmgr->smu_lock);
739 return 0; 696 return 0;
740} 697}
741 698
742static int pp_dpm_get_pp_table(void *handle, char **table) 699static int pp_dpm_get_pp_table(void *handle, char **table)
743{ 700{
744 struct pp_hwmgr *hwmgr; 701 struct pp_hwmgr *hwmgr = handle;
745 struct pp_instance *pp_handle = (struct pp_instance *)handle;
746 int ret = 0; 702 int ret = 0;
747 int size = 0; 703 int size = 0;
748 704
749 ret = pp_check(pp_handle); 705 ret = pp_check(hwmgr);
750 706
751 if (ret) 707 if (ret)
752 return ret; 708 return ret;
753 709
754 hwmgr = pp_handle->hwmgr;
755
756 if (!hwmgr->soft_pp_table) 710 if (!hwmgr->soft_pp_table)
757 return -EINVAL; 711 return -EINVAL;
758 712
759 mutex_lock(&pp_handle->pp_lock); 713 mutex_lock(&hwmgr->smu_lock);
760 *table = (char *)hwmgr->soft_pp_table; 714 *table = (char *)hwmgr->soft_pp_table;
761 size = hwmgr->soft_pp_table_size; 715 size = hwmgr->soft_pp_table_size;
762 mutex_unlock(&pp_handle->pp_lock); 716 mutex_unlock(&hwmgr->smu_lock);
763 return size; 717 return size;
764} 718}
765 719
766static int amd_powerplay_reset(void *handle) 720static int amd_powerplay_reset(void *handle)
767{ 721{
768 struct pp_instance *instance = (struct pp_instance *)handle; 722 struct pp_hwmgr *hwmgr = handle;
769 int ret; 723 int ret;
770 724
771 ret = pp_check(instance); 725 ret = pp_check(hwmgr);
772 if (ret) 726 if (ret)
773 return ret; 727 return ret;
774 728
775 ret = pp_hw_fini(instance); 729 ret = pp_hw_fini(hwmgr);
776 if (ret) 730 if (ret)
777 return ret; 731 return ret;
778 732
779 ret = hwmgr_hw_init(instance); 733 ret = hwmgr_hw_init(hwmgr);
780 if (ret) 734 if (ret)
781 return ret; 735 return ret;
782 736
783 return hwmgr_handle_task(instance, AMD_PP_TASK_COMPLETE_INIT, NULL); 737 return hwmgr_handle_task(hwmgr, AMD_PP_TASK_COMPLETE_INIT, NULL);
784} 738}
785 739
786static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size) 740static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
787{ 741{
788 struct pp_hwmgr *hwmgr; 742 struct pp_hwmgr *hwmgr = handle;
789 struct pp_instance *pp_handle = (struct pp_instance *)handle;
790 int ret = 0; 743 int ret = 0;
791 744
792 ret = pp_check(pp_handle); 745 ret = pp_check(hwmgr);
793 746
794 if (ret) 747 if (ret)
795 return ret; 748 return ret;
796 749
797 hwmgr = pp_handle->hwmgr; 750 mutex_lock(&hwmgr->smu_lock);
798 mutex_lock(&pp_handle->pp_lock);
799 if (!hwmgr->hardcode_pp_table) { 751 if (!hwmgr->hardcode_pp_table) {
800 hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table, 752 hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table,
801 hwmgr->soft_pp_table_size, 753 hwmgr->soft_pp_table_size,
802 GFP_KERNEL); 754 GFP_KERNEL);
803 if (!hwmgr->hardcode_pp_table) { 755 if (!hwmgr->hardcode_pp_table) {
804 mutex_unlock(&pp_handle->pp_lock); 756 mutex_unlock(&hwmgr->smu_lock);
805 return -ENOMEM; 757 return -ENOMEM;
806 } 758 }
807 } 759 }
@@ -809,7 +761,7 @@ static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
809 memcpy(hwmgr->hardcode_pp_table, buf, size); 761 memcpy(hwmgr->hardcode_pp_table, buf, size);
810 762
811 hwmgr->soft_pp_table = hwmgr->hardcode_pp_table; 763 hwmgr->soft_pp_table = hwmgr->hardcode_pp_table;
812 mutex_unlock(&pp_handle->pp_lock); 764 mutex_unlock(&hwmgr->smu_lock);
813 765
814 ret = amd_powerplay_reset(handle); 766 ret = amd_powerplay_reset(handle);
815 if (ret) 767 if (ret)
@@ -827,163 +779,142 @@ static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
827static int pp_dpm_force_clock_level(void *handle, 779static int pp_dpm_force_clock_level(void *handle,
828 enum pp_clock_type type, uint32_t mask) 780 enum pp_clock_type type, uint32_t mask)
829{ 781{
830 struct pp_hwmgr *hwmgr; 782 struct pp_hwmgr *hwmgr = handle;
831 struct pp_instance *pp_handle = (struct pp_instance *)handle;
832 int ret = 0; 783 int ret = 0;
833 784
834 ret = pp_check(pp_handle); 785 ret = pp_check(hwmgr);
835 786
836 if (ret) 787 if (ret)
837 return ret; 788 return ret;
838 789
839 hwmgr = pp_handle->hwmgr;
840
841 if (hwmgr->hwmgr_func->force_clock_level == NULL) { 790 if (hwmgr->hwmgr_func->force_clock_level == NULL) {
842 pr_info("%s was not implemented.\n", __func__); 791 pr_info("%s was not implemented.\n", __func__);
843 return 0; 792 return 0;
844 } 793 }
845 mutex_lock(&pp_handle->pp_lock); 794 mutex_lock(&hwmgr->smu_lock);
846 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) 795 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)
847 ret = hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask); 796 ret = hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
848 else 797 else
849 ret = -EINVAL; 798 ret = -EINVAL;
850 mutex_unlock(&pp_handle->pp_lock); 799 mutex_unlock(&hwmgr->smu_lock);
851 return ret; 800 return ret;
852} 801}
853 802
854static int pp_dpm_print_clock_levels(void *handle, 803static int pp_dpm_print_clock_levels(void *handle,
855 enum pp_clock_type type, char *buf) 804 enum pp_clock_type type, char *buf)
856{ 805{
857 struct pp_hwmgr *hwmgr; 806 struct pp_hwmgr *hwmgr = handle;
858 struct pp_instance *pp_handle = (struct pp_instance *)handle;
859 int ret = 0; 807 int ret = 0;
860 808
861 ret = pp_check(pp_handle); 809 ret = pp_check(hwmgr);
862 810
863 if (ret) 811 if (ret)
864 return ret; 812 return ret;
865 813
866 hwmgr = pp_handle->hwmgr;
867
868 if (hwmgr->hwmgr_func->print_clock_levels == NULL) { 814 if (hwmgr->hwmgr_func->print_clock_levels == NULL) {
869 pr_info("%s was not implemented.\n", __func__); 815 pr_info("%s was not implemented.\n", __func__);
870 return 0; 816 return 0;
871 } 817 }
872 mutex_lock(&pp_handle->pp_lock); 818 mutex_lock(&hwmgr->smu_lock);
873 ret = hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf); 819 ret = hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
874 mutex_unlock(&pp_handle->pp_lock); 820 mutex_unlock(&hwmgr->smu_lock);
875 return ret; 821 return ret;
876} 822}
877 823
878static int pp_dpm_get_sclk_od(void *handle) 824static int pp_dpm_get_sclk_od(void *handle)
879{ 825{
880 struct pp_hwmgr *hwmgr; 826 struct pp_hwmgr *hwmgr = handle;
881 struct pp_instance *pp_handle = (struct pp_instance *)handle;
882 int ret = 0; 827 int ret = 0;
883 828
884 ret = pp_check(pp_handle); 829 ret = pp_check(hwmgr);
885 830
886 if (ret) 831 if (ret)
887 return ret; 832 return ret;
888 833
889 hwmgr = pp_handle->hwmgr;
890
891 if (hwmgr->hwmgr_func->get_sclk_od == NULL) { 834 if (hwmgr->hwmgr_func->get_sclk_od == NULL) {
892 pr_info("%s was not implemented.\n", __func__); 835 pr_info("%s was not implemented.\n", __func__);
893 return 0; 836 return 0;
894 } 837 }
895 mutex_lock(&pp_handle->pp_lock); 838 mutex_lock(&hwmgr->smu_lock);
896 ret = hwmgr->hwmgr_func->get_sclk_od(hwmgr); 839 ret = hwmgr->hwmgr_func->get_sclk_od(hwmgr);
897 mutex_unlock(&pp_handle->pp_lock); 840 mutex_unlock(&hwmgr->smu_lock);
898 return ret; 841 return ret;
899} 842}
900 843
901static int pp_dpm_set_sclk_od(void *handle, uint32_t value) 844static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
902{ 845{
903 struct pp_hwmgr *hwmgr; 846 struct pp_hwmgr *hwmgr = handle;
904 struct pp_instance *pp_handle = (struct pp_instance *)handle;
905 int ret = 0; 847 int ret = 0;
906 848
907 ret = pp_check(pp_handle); 849 ret = pp_check(hwmgr);
908 850
909 if (ret) 851 if (ret)
910 return ret; 852 return ret;
911 853
912 hwmgr = pp_handle->hwmgr;
913
914 if (hwmgr->hwmgr_func->set_sclk_od == NULL) { 854 if (hwmgr->hwmgr_func->set_sclk_od == NULL) {
915 pr_info("%s was not implemented.\n", __func__); 855 pr_info("%s was not implemented.\n", __func__);
916 return 0; 856 return 0;
917 } 857 }
918 858
919 mutex_lock(&pp_handle->pp_lock); 859 mutex_lock(&hwmgr->smu_lock);
920 ret = hwmgr->hwmgr_func->set_sclk_od(hwmgr, value); 860 ret = hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
921 mutex_unlock(&pp_handle->pp_lock); 861 mutex_unlock(&hwmgr->smu_lock);
922 return ret; 862 return ret;
923} 863}
924 864
925static int pp_dpm_get_mclk_od(void *handle) 865static int pp_dpm_get_mclk_od(void *handle)
926{ 866{
927 struct pp_hwmgr *hwmgr; 867 struct pp_hwmgr *hwmgr = handle;
928 struct pp_instance *pp_handle = (struct pp_instance *)handle;
929 int ret = 0; 868 int ret = 0;
930 869
931 ret = pp_check(pp_handle); 870 ret = pp_check(hwmgr);
932 871
933 if (ret) 872 if (ret)
934 return ret; 873 return ret;
935 874
936 hwmgr = pp_handle->hwmgr;
937
938 if (hwmgr->hwmgr_func->get_mclk_od == NULL) { 875 if (hwmgr->hwmgr_func->get_mclk_od == NULL) {
939 pr_info("%s was not implemented.\n", __func__); 876 pr_info("%s was not implemented.\n", __func__);
940 return 0; 877 return 0;
941 } 878 }
942 mutex_lock(&pp_handle->pp_lock); 879 mutex_lock(&hwmgr->smu_lock);
943 ret = hwmgr->hwmgr_func->get_mclk_od(hwmgr); 880 ret = hwmgr->hwmgr_func->get_mclk_od(hwmgr);
944 mutex_unlock(&pp_handle->pp_lock); 881 mutex_unlock(&hwmgr->smu_lock);
945 return ret; 882 return ret;
946} 883}
947 884
948static int pp_dpm_set_mclk_od(void *handle, uint32_t value) 885static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
949{ 886{
950 struct pp_hwmgr *hwmgr; 887 struct pp_hwmgr *hwmgr = handle;
951 struct pp_instance *pp_handle = (struct pp_instance *)handle;
952 int ret = 0; 888 int ret = 0;
953 889
954 ret = pp_check(pp_handle); 890 ret = pp_check(hwmgr);
955 891
956 if (ret) 892 if (ret)
957 return ret; 893 return ret;
958 894
959 hwmgr = pp_handle->hwmgr;
960
961 if (hwmgr->hwmgr_func->set_mclk_od == NULL) { 895 if (hwmgr->hwmgr_func->set_mclk_od == NULL) {
962 pr_info("%s was not implemented.\n", __func__); 896 pr_info("%s was not implemented.\n", __func__);
963 return 0; 897 return 0;
964 } 898 }
965 mutex_lock(&pp_handle->pp_lock); 899 mutex_lock(&hwmgr->smu_lock);
966 ret = hwmgr->hwmgr_func->set_mclk_od(hwmgr, value); 900 ret = hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
967 mutex_unlock(&pp_handle->pp_lock); 901 mutex_unlock(&hwmgr->smu_lock);
968 return ret; 902 return ret;
969} 903}
970 904
971static int pp_dpm_read_sensor(void *handle, int idx, 905static int pp_dpm_read_sensor(void *handle, int idx,
972 void *value, int *size) 906 void *value, int *size)
973{ 907{
974 struct pp_hwmgr *hwmgr; 908 struct pp_hwmgr *hwmgr = handle;
975 struct pp_instance *pp_handle = (struct pp_instance *)handle;
976 int ret = 0; 909 int ret = 0;
977 910
978 ret = pp_check(pp_handle); 911 ret = pp_check(hwmgr);
979 if (ret) 912 if (ret)
980 return ret; 913 return ret;
981 914
982 if (value == NULL) 915 if (value == NULL)
983 return -EINVAL; 916 return -EINVAL;
984 917
985 hwmgr = pp_handle->hwmgr;
986
987 switch (idx) { 918 switch (idx) {
988 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK: 919 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
989 *((uint32_t *)value) = hwmgr->pstate_sclk; 920 *((uint32_t *)value) = hwmgr->pstate_sclk;
@@ -992,9 +923,9 @@ static int pp_dpm_read_sensor(void *handle, int idx,
992 *((uint32_t *)value) = hwmgr->pstate_mclk; 923 *((uint32_t *)value) = hwmgr->pstate_mclk;
993 return 0; 924 return 0;
994 default: 925 default:
995 mutex_lock(&pp_handle->pp_lock); 926 mutex_lock(&hwmgr->smu_lock);
996 ret = hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size); 927 ret = hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size);
997 mutex_unlock(&pp_handle->pp_lock); 928 mutex_unlock(&hwmgr->smu_lock);
998 return ret; 929 return ret;
999 } 930 }
1000} 931}
@@ -1002,17 +933,14 @@ static int pp_dpm_read_sensor(void *handle, int idx,
1002static struct amd_vce_state* 933static struct amd_vce_state*
1003pp_dpm_get_vce_clock_state(void *handle, unsigned idx) 934pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
1004{ 935{
1005 struct pp_hwmgr *hwmgr; 936 struct pp_hwmgr *hwmgr = handle;
1006 struct pp_instance *pp_handle = (struct pp_instance *)handle;
1007 int ret = 0; 937 int ret = 0;
1008 938
1009 ret = pp_check(pp_handle); 939 ret = pp_check(hwmgr);
1010 940
1011 if (ret) 941 if (ret)
1012 return NULL; 942 return NULL;
1013 943
1014 hwmgr = pp_handle->hwmgr;
1015
1016 if (hwmgr && idx < hwmgr->num_vce_state_tables) 944 if (hwmgr && idx < hwmgr->num_vce_state_tables)
1017 return &hwmgr->vce_states[idx]; 945 return &hwmgr->vce_states[idx];
1018 return NULL; 946 return NULL;
@@ -1020,14 +948,11 @@ pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
1020 948
1021static int pp_get_power_profile_mode(void *handle, char *buf) 949static int pp_get_power_profile_mode(void *handle, char *buf)
1022{ 950{
1023 struct pp_hwmgr *hwmgr; 951 struct pp_hwmgr *hwmgr = handle;
1024 struct pp_instance *pp_handle = (struct pp_instance *)handle;
1025 952
1026 if (!buf || pp_check(pp_handle)) 953 if (!buf || pp_check(hwmgr))
1027 return -EINVAL; 954 return -EINVAL;
1028 955
1029 hwmgr = pp_handle->hwmgr;
1030
1031 if (hwmgr->hwmgr_func->get_power_profile_mode == NULL) { 956 if (hwmgr->hwmgr_func->get_power_profile_mode == NULL) {
1032 pr_info("%s was not implemented.\n", __func__); 957 pr_info("%s was not implemented.\n", __func__);
1033 return snprintf(buf, PAGE_SIZE, "\n"); 958 return snprintf(buf, PAGE_SIZE, "\n");
@@ -1038,36 +963,30 @@ static int pp_get_power_profile_mode(void *handle, char *buf)
1038 963
1039static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size) 964static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
1040{ 965{
1041 struct pp_hwmgr *hwmgr; 966 struct pp_hwmgr *hwmgr = handle;
1042 struct pp_instance *pp_handle = (struct pp_instance *)handle;
1043 int ret = -EINVAL; 967 int ret = -EINVAL;
1044 968
1045 if (pp_check(pp_handle)) 969 if (pp_check(hwmgr))
1046 return -EINVAL; 970 return -EINVAL;
1047 971
1048 hwmgr = pp_handle->hwmgr;
1049
1050 if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) { 972 if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
1051 pr_info("%s was not implemented.\n", __func__); 973 pr_info("%s was not implemented.\n", __func__);
1052 return -EINVAL; 974 return -EINVAL;
1053 } 975 }
1054 mutex_lock(&pp_handle->pp_lock); 976 mutex_lock(&hwmgr->smu_lock);
1055 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) 977 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)
1056 ret = hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, input, size); 978 ret = hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, input, size);
1057 mutex_unlock(&pp_handle->pp_lock); 979 mutex_unlock(&hwmgr->smu_lock);
1058 return ret; 980 return ret;
1059} 981}
1060 982
1061static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint32_t size) 983static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint32_t size)
1062{ 984{
1063 struct pp_hwmgr *hwmgr; 985 struct pp_hwmgr *hwmgr = handle;
1064 struct pp_instance *pp_handle = (struct pp_instance *)handle;
1065 986
1066 if (pp_check(pp_handle)) 987 if (pp_check(hwmgr))
1067 return -EINVAL; 988 return -EINVAL;
1068 989
1069 hwmgr = pp_handle->hwmgr;
1070
1071 if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) { 990 if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) {
1072 pr_info("%s was not implemented.\n", __func__); 991 pr_info("%s was not implemented.\n", __func__);
1073 return -EINVAL; 992 return -EINVAL;
@@ -1079,16 +998,13 @@ static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint3
1079static int pp_dpm_switch_power_profile(void *handle, 998static int pp_dpm_switch_power_profile(void *handle,
1080 enum PP_SMC_POWER_PROFILE type, bool en) 999 enum PP_SMC_POWER_PROFILE type, bool en)
1081{ 1000{
1082 struct pp_hwmgr *hwmgr; 1001 struct pp_hwmgr *hwmgr = handle;
1083 struct pp_instance *pp_handle = (struct pp_instance *)handle;
1084 long workload; 1002 long workload;
1085 uint32_t index; 1003 uint32_t index;
1086 1004
1087 if (pp_check(pp_handle)) 1005 if (pp_check(hwmgr))
1088 return -EINVAL; 1006 return -EINVAL;
1089 1007
1090 hwmgr = pp_handle->hwmgr;
1091
1092 if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) { 1008 if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
1093 pr_info("%s was not implemented.\n", __func__); 1009 pr_info("%s was not implemented.\n", __func__);
1094 return -EINVAL; 1010 return -EINVAL;
@@ -1097,7 +1013,7 @@ static int pp_dpm_switch_power_profile(void *handle,
1097 if (!(type < PP_SMC_POWER_PROFILE_CUSTOM)) 1013 if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
1098 return -EINVAL; 1014 return -EINVAL;
1099 1015
1100 mutex_lock(&pp_handle->pp_lock); 1016 mutex_lock(&hwmgr->smu_lock);
1101 1017
1102 if (!en) { 1018 if (!en) {
1103 hwmgr->workload_mask &= ~(1 << hwmgr->workload_prority[type]); 1019 hwmgr->workload_mask &= ~(1 << hwmgr->workload_prority[type]);
@@ -1113,7 +1029,7 @@ static int pp_dpm_switch_power_profile(void *handle,
1113 1029
1114 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) 1030 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
1115 hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0); 1031 hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0);
1116 mutex_unlock(&pp_handle->pp_lock); 1032 mutex_unlock(&hwmgr->smu_lock);
1117 1033
1118 return 0; 1034 return 0;
1119} 1035}
@@ -1125,46 +1041,40 @@ static int pp_dpm_notify_smu_memory_info(void *handle,
1125 uint32_t mc_addr_hi, 1041 uint32_t mc_addr_hi,
1126 uint32_t size) 1042 uint32_t size)
1127{ 1043{
1128 struct pp_hwmgr *hwmgr; 1044 struct pp_hwmgr *hwmgr = handle;
1129 struct pp_instance *pp_handle = (struct pp_instance *)handle;
1130 int ret = 0; 1045 int ret = 0;
1131 1046
1132 ret = pp_check(pp_handle); 1047 ret = pp_check(hwmgr);
1133 1048
1134 if (ret) 1049 if (ret)
1135 return ret; 1050 return ret;
1136 1051
1137 hwmgr = pp_handle->hwmgr;
1138
1139 if (hwmgr->hwmgr_func->notify_cac_buffer_info == NULL) { 1052 if (hwmgr->hwmgr_func->notify_cac_buffer_info == NULL) {
1140 pr_info("%s was not implemented.\n", __func__); 1053 pr_info("%s was not implemented.\n", __func__);
1141 return -EINVAL; 1054 return -EINVAL;
1142 } 1055 }
1143 1056
1144 mutex_lock(&pp_handle->pp_lock); 1057 mutex_lock(&hwmgr->smu_lock);
1145 1058
1146 ret = hwmgr->hwmgr_func->notify_cac_buffer_info(hwmgr, virtual_addr_low, 1059 ret = hwmgr->hwmgr_func->notify_cac_buffer_info(hwmgr, virtual_addr_low,
1147 virtual_addr_hi, mc_addr_low, mc_addr_hi, 1060 virtual_addr_hi, mc_addr_low, mc_addr_hi,
1148 size); 1061 size);
1149 1062
1150 mutex_unlock(&pp_handle->pp_lock); 1063 mutex_unlock(&hwmgr->smu_lock);
1151 1064
1152 return ret; 1065 return ret;
1153} 1066}
1154 1067
1155static int pp_set_power_limit(void *handle, uint32_t limit) 1068static int pp_set_power_limit(void *handle, uint32_t limit)
1156{ 1069{
1157 struct pp_hwmgr *hwmgr; 1070 struct pp_hwmgr *hwmgr = handle;
1158 struct pp_instance *pp_handle = (struct pp_instance *)handle;
1159 int ret = 0; 1071 int ret = 0;
1160 1072
1161 ret = pp_check(pp_handle); 1073 ret = pp_check(hwmgr);
1162 1074
1163 if (ret) 1075 if (ret)
1164 return ret; 1076 return ret;
1165 1077
1166 hwmgr = pp_handle->hwmgr;
1167
1168 if (hwmgr->hwmgr_func->set_power_limit == NULL) { 1078 if (hwmgr->hwmgr_func->set_power_limit == NULL) {
1169 pr_info("%s was not implemented.\n", __func__); 1079 pr_info("%s was not implemented.\n", __func__);
1170 return -EINVAL; 1080 return -EINVAL;
@@ -1176,20 +1086,19 @@ static int pp_set_power_limit(void *handle, uint32_t limit)
1176 if (limit > hwmgr->default_power_limit) 1086 if (limit > hwmgr->default_power_limit)
1177 return -EINVAL; 1087 return -EINVAL;
1178 1088
1179 mutex_lock(&pp_handle->pp_lock); 1089 mutex_lock(&hwmgr->smu_lock);
1180 hwmgr->hwmgr_func->set_power_limit(hwmgr, limit); 1090 hwmgr->hwmgr_func->set_power_limit(hwmgr, limit);
1181 hwmgr->power_limit = limit; 1091 hwmgr->power_limit = limit;
1182 mutex_unlock(&pp_handle->pp_lock); 1092 mutex_unlock(&hwmgr->smu_lock);
1183 return ret; 1093 return ret;
1184} 1094}
1185 1095
1186static int pp_get_power_limit(void *handle, uint32_t *limit, bool default_limit) 1096static int pp_get_power_limit(void *handle, uint32_t *limit, bool default_limit)
1187{ 1097{
1188 struct pp_hwmgr *hwmgr; 1098 struct pp_hwmgr *hwmgr = handle;
1189 struct pp_instance *pp_handle = (struct pp_instance *)handle;
1190 int ret = 0; 1099 int ret = 0;
1191 1100
1192 ret = pp_check(pp_handle); 1101 ret = pp_check(hwmgr);
1193 1102
1194 if (ret) 1103 if (ret)
1195 return ret; 1104 return ret;
@@ -1197,16 +1106,14 @@ static int pp_get_power_limit(void *handle, uint32_t *limit, bool default_limit)
1197 if (limit == NULL) 1106 if (limit == NULL)
1198 return -EINVAL; 1107 return -EINVAL;
1199 1108
1200 hwmgr = pp_handle->hwmgr; 1109 mutex_lock(&hwmgr->smu_lock);
1201
1202 mutex_lock(&pp_handle->pp_lock);
1203 1110
1204 if (default_limit) 1111 if (default_limit)
1205 *limit = hwmgr->default_power_limit; 1112 *limit = hwmgr->default_power_limit;
1206 else 1113 else
1207 *limit = hwmgr->power_limit; 1114 *limit = hwmgr->power_limit;
1208 1115
1209 mutex_unlock(&pp_handle->pp_lock); 1116 mutex_unlock(&hwmgr->smu_lock);
1210 1117
1211 return ret; 1118 return ret;
1212} 1119}
@@ -1214,42 +1121,37 @@ static int pp_get_power_limit(void *handle, uint32_t *limit, bool default_limit)
1214static int pp_display_configuration_change(void *handle, 1121static int pp_display_configuration_change(void *handle,
1215 const struct amd_pp_display_configuration *display_config) 1122 const struct amd_pp_display_configuration *display_config)
1216{ 1123{
1217 struct pp_hwmgr *hwmgr; 1124 struct pp_hwmgr *hwmgr = handle;
1218 struct pp_instance *pp_handle = (struct pp_instance *)handle;
1219 int ret = 0; 1125 int ret = 0;
1220 1126
1221 ret = pp_check(pp_handle); 1127 ret = pp_check(hwmgr);
1222 1128
1223 if (ret) 1129 if (ret)
1224 return ret; 1130 return ret;
1225 1131
1226 hwmgr = pp_handle->hwmgr; 1132 mutex_lock(&hwmgr->smu_lock);
1227 mutex_lock(&pp_handle->pp_lock);
1228 phm_store_dal_configuration_data(hwmgr, display_config); 1133 phm_store_dal_configuration_data(hwmgr, display_config);
1229 mutex_unlock(&pp_handle->pp_lock); 1134 mutex_unlock(&hwmgr->smu_lock);
1230 return 0; 1135 return 0;
1231} 1136}
1232 1137
1233static int pp_get_display_power_level(void *handle, 1138static int pp_get_display_power_level(void *handle,
1234 struct amd_pp_simple_clock_info *output) 1139 struct amd_pp_simple_clock_info *output)
1235{ 1140{
1236 struct pp_hwmgr *hwmgr; 1141 struct pp_hwmgr *hwmgr = handle;
1237 struct pp_instance *pp_handle = (struct pp_instance *)handle;
1238 int ret = 0; 1142 int ret = 0;
1239 1143
1240 ret = pp_check(pp_handle); 1144 ret = pp_check(hwmgr);
1241 1145
1242 if (ret) 1146 if (ret)
1243 return ret; 1147 return ret;
1244 1148
1245 hwmgr = pp_handle->hwmgr;
1246
1247 if (output == NULL) 1149 if (output == NULL)
1248 return -EINVAL; 1150 return -EINVAL;
1249 1151
1250 mutex_lock(&pp_handle->pp_lock); 1152 mutex_lock(&hwmgr->smu_lock);
1251 ret = phm_get_dal_power_level(hwmgr, output); 1153 ret = phm_get_dal_power_level(hwmgr, output);
1252 mutex_unlock(&pp_handle->pp_lock); 1154 mutex_unlock(&hwmgr->smu_lock);
1253 return ret; 1155 return ret;
1254} 1156}
1255 1157
@@ -1258,18 +1160,15 @@ static int pp_get_current_clocks(void *handle,
1258{ 1160{
1259 struct amd_pp_simple_clock_info simple_clocks; 1161 struct amd_pp_simple_clock_info simple_clocks;
1260 struct pp_clock_info hw_clocks; 1162 struct pp_clock_info hw_clocks;
1261 struct pp_hwmgr *hwmgr; 1163 struct pp_hwmgr *hwmgr = handle;
1262 struct pp_instance *pp_handle = (struct pp_instance *)handle;
1263 int ret = 0; 1164 int ret = 0;
1264 1165
1265 ret = pp_check(pp_handle); 1166 ret = pp_check(hwmgr);
1266 1167
1267 if (ret) 1168 if (ret)
1268 return ret; 1169 return ret;
1269 1170
1270 hwmgr = pp_handle->hwmgr; 1171 mutex_lock(&hwmgr->smu_lock);
1271
1272 mutex_lock(&pp_handle->pp_lock);
1273 1172
1274 phm_get_dal_power_level(hwmgr, &simple_clocks); 1173 phm_get_dal_power_level(hwmgr, &simple_clocks);
1275 1174
@@ -1283,7 +1182,7 @@ static int pp_get_current_clocks(void *handle,
1283 1182
1284 if (ret) { 1183 if (ret) {
1285 pr_info("Error in phm_get_clock_info \n"); 1184 pr_info("Error in phm_get_clock_info \n");
1286 mutex_unlock(&pp_handle->pp_lock); 1185 mutex_unlock(&hwmgr->smu_lock);
1287 return -EINVAL; 1186 return -EINVAL;
1288 } 1187 }
1289 1188
@@ -1303,29 +1202,26 @@ static int pp_get_current_clocks(void *handle,
1303 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk; 1202 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1304 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk; 1203 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1305 } 1204 }
1306 mutex_unlock(&pp_handle->pp_lock); 1205 mutex_unlock(&hwmgr->smu_lock);
1307 return 0; 1206 return 0;
1308} 1207}
1309 1208
1310static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks) 1209static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks)
1311{ 1210{
1312 struct pp_hwmgr *hwmgr; 1211 struct pp_hwmgr *hwmgr = handle;
1313 struct pp_instance *pp_handle = (struct pp_instance *)handle;
1314 int ret = 0; 1212 int ret = 0;
1315 1213
1316 ret = pp_check(pp_handle); 1214 ret = pp_check(hwmgr);
1317 1215
1318 if (ret) 1216 if (ret)
1319 return ret; 1217 return ret;
1320 1218
1321 hwmgr = pp_handle->hwmgr;
1322
1323 if (clocks == NULL) 1219 if (clocks == NULL)
1324 return -EINVAL; 1220 return -EINVAL;
1325 1221
1326 mutex_lock(&pp_handle->pp_lock); 1222 mutex_lock(&hwmgr->smu_lock);
1327 ret = phm_get_clock_by_type(hwmgr, type, clocks); 1223 ret = phm_get_clock_by_type(hwmgr, type, clocks);
1328 mutex_unlock(&pp_handle->pp_lock); 1224 mutex_unlock(&hwmgr->smu_lock);
1329 return ret; 1225 return ret;
1330} 1226}
1331 1227
@@ -1333,21 +1229,19 @@ static int pp_get_clock_by_type_with_latency(void *handle,
1333 enum amd_pp_clock_type type, 1229 enum amd_pp_clock_type type,
1334 struct pp_clock_levels_with_latency *clocks) 1230 struct pp_clock_levels_with_latency *clocks)
1335{ 1231{
1336 struct pp_hwmgr *hwmgr; 1232 struct pp_hwmgr *hwmgr = handle;
1337 struct pp_instance *pp_handle = (struct pp_instance *)handle;
1338 int ret = 0; 1233 int ret = 0;
1339 1234
1340 ret = pp_check(pp_handle); 1235 ret = pp_check(hwmgr);
1341 if (ret) 1236 if (ret)
1342 return ret; 1237 return ret;
1343 1238
1344 if (!clocks) 1239 if (!clocks)
1345 return -EINVAL; 1240 return -EINVAL;
1346 1241
1347 mutex_lock(&pp_handle->pp_lock); 1242 mutex_lock(&hwmgr->smu_lock);
1348 hwmgr = ((struct pp_instance *)handle)->hwmgr;
1349 ret = phm_get_clock_by_type_with_latency(hwmgr, type, clocks); 1243 ret = phm_get_clock_by_type_with_latency(hwmgr, type, clocks);
1350 mutex_unlock(&pp_handle->pp_lock); 1244 mutex_unlock(&hwmgr->smu_lock);
1351 return ret; 1245 return ret;
1352} 1246}
1353 1247
@@ -1355,47 +1249,41 @@ static int pp_get_clock_by_type_with_voltage(void *handle,
1355 enum amd_pp_clock_type type, 1249 enum amd_pp_clock_type type,
1356 struct pp_clock_levels_with_voltage *clocks) 1250 struct pp_clock_levels_with_voltage *clocks)
1357{ 1251{
1358 struct pp_hwmgr *hwmgr; 1252 struct pp_hwmgr *hwmgr = handle;
1359 struct pp_instance *pp_handle = (struct pp_instance *)handle;
1360 int ret = 0; 1253 int ret = 0;
1361 1254
1362 ret = pp_check(pp_handle); 1255 ret = pp_check(hwmgr);
1363 if (ret) 1256 if (ret)
1364 return ret; 1257 return ret;
1365 1258
1366 if (!clocks) 1259 if (!clocks)
1367 return -EINVAL; 1260 return -EINVAL;
1368 1261
1369 hwmgr = ((struct pp_instance *)handle)->hwmgr; 1262 mutex_lock(&hwmgr->smu_lock);
1370
1371 mutex_lock(&pp_handle->pp_lock);
1372 1263
1373 ret = phm_get_clock_by_type_with_voltage(hwmgr, type, clocks); 1264 ret = phm_get_clock_by_type_with_voltage(hwmgr, type, clocks);
1374 1265
1375 mutex_unlock(&pp_handle->pp_lock); 1266 mutex_unlock(&hwmgr->smu_lock);
1376 return ret; 1267 return ret;
1377} 1268}
1378 1269
1379static int pp_set_watermarks_for_clocks_ranges(void *handle, 1270static int pp_set_watermarks_for_clocks_ranges(void *handle,
1380 struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges) 1271 struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
1381{ 1272{
1382 struct pp_hwmgr *hwmgr; 1273 struct pp_hwmgr *hwmgr = handle;
1383 struct pp_instance *pp_handle = (struct pp_instance *)handle;
1384 int ret = 0; 1274 int ret = 0;
1385 1275
1386 ret = pp_check(pp_handle); 1276 ret = pp_check(hwmgr);
1387 if (ret) 1277 if (ret)
1388 return ret; 1278 return ret;
1389 1279
1390 if (!wm_with_clock_ranges) 1280 if (!wm_with_clock_ranges)
1391 return -EINVAL; 1281 return -EINVAL;
1392 1282
1393 hwmgr = ((struct pp_instance *)handle)->hwmgr; 1283 mutex_lock(&hwmgr->smu_lock);
1394
1395 mutex_lock(&pp_handle->pp_lock);
1396 ret = phm_set_watermarks_for_clocks_ranges(hwmgr, 1284 ret = phm_set_watermarks_for_clocks_ranges(hwmgr,
1397 wm_with_clock_ranges); 1285 wm_with_clock_ranges);
1398 mutex_unlock(&pp_handle->pp_lock); 1286 mutex_unlock(&hwmgr->smu_lock);
1399 1287
1400 return ret; 1288 return ret;
1401} 1289}
@@ -1403,22 +1291,19 @@ static int pp_set_watermarks_for_clocks_ranges(void *handle,
1403static int pp_display_clock_voltage_request(void *handle, 1291static int pp_display_clock_voltage_request(void *handle,
1404 struct pp_display_clock_request *clock) 1292 struct pp_display_clock_request *clock)
1405{ 1293{
1406 struct pp_hwmgr *hwmgr; 1294 struct pp_hwmgr *hwmgr = handle;
1407 struct pp_instance *pp_handle = (struct pp_instance *)handle;
1408 int ret = 0; 1295 int ret = 0;
1409 1296
1410 ret = pp_check(pp_handle); 1297 ret = pp_check(hwmgr);
1411 if (ret) 1298 if (ret)
1412 return ret; 1299 return ret;
1413 1300
1414 if (!clock) 1301 if (!clock)
1415 return -EINVAL; 1302 return -EINVAL;
1416 1303
1417 hwmgr = ((struct pp_instance *)handle)->hwmgr; 1304 mutex_lock(&hwmgr->smu_lock);
1418
1419 mutex_lock(&pp_handle->pp_lock);
1420 ret = phm_display_clock_voltage_request(hwmgr, clock); 1305 ret = phm_display_clock_voltage_request(hwmgr, clock);
1421 mutex_unlock(&pp_handle->pp_lock); 1306 mutex_unlock(&hwmgr->smu_lock);
1422 1307
1423 return ret; 1308 return ret;
1424} 1309}
@@ -1426,42 +1311,36 @@ static int pp_display_clock_voltage_request(void *handle,
1426static int pp_get_display_mode_validation_clocks(void *handle, 1311static int pp_get_display_mode_validation_clocks(void *handle,
1427 struct amd_pp_simple_clock_info *clocks) 1312 struct amd_pp_simple_clock_info *clocks)
1428{ 1313{
1429 struct pp_hwmgr *hwmgr; 1314 struct pp_hwmgr *hwmgr = handle;
1430 struct pp_instance *pp_handle = (struct pp_instance *)handle;
1431 int ret = 0; 1315 int ret = 0;
1432 1316
1433 ret = pp_check(pp_handle); 1317 ret = pp_check(hwmgr);
1434 1318
1435 if (ret) 1319 if (ret)
1436 return ret; 1320 return ret;
1437 1321
1438 hwmgr = pp_handle->hwmgr;
1439
1440 if (clocks == NULL) 1322 if (clocks == NULL)
1441 return -EINVAL; 1323 return -EINVAL;
1442 1324
1443 mutex_lock(&pp_handle->pp_lock); 1325 mutex_lock(&hwmgr->smu_lock);
1444 1326
1445 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState)) 1327 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
1446 ret = phm_get_max_high_clocks(hwmgr, clocks); 1328 ret = phm_get_max_high_clocks(hwmgr, clocks);
1447 1329
1448 mutex_unlock(&pp_handle->pp_lock); 1330 mutex_unlock(&hwmgr->smu_lock);
1449 return ret; 1331 return ret;
1450} 1332}
1451 1333
1452static int pp_set_mmhub_powergating_by_smu(void *handle) 1334static int pp_set_mmhub_powergating_by_smu(void *handle)
1453{ 1335{
1454 struct pp_hwmgr *hwmgr; 1336 struct pp_hwmgr *hwmgr = handle;
1455 struct pp_instance *pp_handle = (struct pp_instance *)handle;
1456 int ret = 0; 1337 int ret = 0;
1457 1338
1458 ret = pp_check(pp_handle); 1339 ret = pp_check(hwmgr);
1459 1340
1460 if (ret) 1341 if (ret)
1461 return ret; 1342 return ret;
1462 1343
1463 hwmgr = pp_handle->hwmgr;
1464
1465 if (hwmgr->hwmgr_func->set_mmhub_powergating_by_smu == NULL) { 1344 if (hwmgr->hwmgr_func->set_mmhub_powergating_by_smu == NULL) {
1466 pr_info("%s was not implemented.\n", __func__); 1345 pr_info("%s was not implemented.\n", __func__);
1467 return 0; 1346 return 0;
@@ -1470,7 +1349,7 @@ static int pp_set_mmhub_powergating_by_smu(void *handle)
1470 return hwmgr->hwmgr_func->set_mmhub_powergating_by_smu(hwmgr); 1349 return hwmgr->hwmgr_func->set_mmhub_powergating_by_smu(hwmgr);
1471} 1350}
1472 1351
1473const struct amd_pm_funcs pp_dpm_funcs = { 1352static const struct amd_pm_funcs pp_dpm_funcs = {
1474 .load_firmware = pp_dpm_load_fw, 1353 .load_firmware = pp_dpm_load_fw,
1475 .wait_for_fw_loading_complete = pp_dpm_fw_loading_complete, 1354 .wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
1476 .force_performance_level = pp_dpm_force_performance_level, 1355 .force_performance_level = pp_dpm_force_performance_level,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index 5563b6502c4d..238dd59caf63 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -116,23 +116,11 @@ static void hwmgr_init_workload_prority(struct pp_hwmgr *hwmgr)
116 hwmgr->workload_setting[4] = PP_SMC_POWER_PROFILE_COMPUTE; 116 hwmgr->workload_setting[4] = PP_SMC_POWER_PROFILE_COMPUTE;
117} 117}
118 118
119int hwmgr_early_init(struct pp_instance *handle) 119int hwmgr_early_init(struct pp_hwmgr *hwmgr)
120{ 120{
121 struct pp_hwmgr *hwmgr; 121 if (hwmgr == NULL)
122
123 if (handle == NULL)
124 return -EINVAL; 122 return -EINVAL;
125 123
126 hwmgr = kzalloc(sizeof(struct pp_hwmgr), GFP_KERNEL);
127 if (hwmgr == NULL)
128 return -ENOMEM;
129
130 handle->hwmgr = hwmgr;
131 hwmgr->adev = handle->parent;
132 hwmgr->device = handle->device;
133 hwmgr->chip_family = ((struct amdgpu_device *)handle->parent)->family;
134 hwmgr->chip_id = ((struct amdgpu_device *)handle->parent)->asic_type;
135 hwmgr->feature_mask = amdgpu_pp_feature_mask;
136 hwmgr->usec_timeout = AMD_MAX_USEC_TIMEOUT; 124 hwmgr->usec_timeout = AMD_MAX_USEC_TIMEOUT;
137 hwmgr->power_source = PP_PowerSource_AC; 125 hwmgr->power_source = PP_PowerSource_AC;
138 hwmgr->pp_table_version = PP_TABLE_V1; 126 hwmgr->pp_table_version = PP_TABLE_V1;
@@ -220,16 +208,13 @@ int hwmgr_early_init(struct pp_instance *handle)
220 return 0; 208 return 0;
221} 209}
222 210
223int hwmgr_hw_init(struct pp_instance *handle) 211int hwmgr_hw_init(struct pp_hwmgr *hwmgr)
224{ 212{
225 struct pp_hwmgr *hwmgr;
226 int ret = 0; 213 int ret = 0;
227 214
228 if (handle == NULL) 215 if (hwmgr == NULL)
229 return -EINVAL; 216 return -EINVAL;
230 217
231 hwmgr = handle->hwmgr;
232
233 if (hwmgr->pptable_func == NULL || 218 if (hwmgr->pptable_func == NULL ||
234 hwmgr->pptable_func->pptable_init == NULL || 219 hwmgr->pptable_func->pptable_init == NULL ||
235 hwmgr->hwmgr_func->backend_init == NULL) 220 hwmgr->hwmgr_func->backend_init == NULL)
@@ -275,15 +260,11 @@ err:
275 return ret; 260 return ret;
276} 261}
277 262
278int hwmgr_hw_fini(struct pp_instance *handle) 263int hwmgr_hw_fini(struct pp_hwmgr *hwmgr)
279{ 264{
280 struct pp_hwmgr *hwmgr; 265 if (hwmgr == NULL)
281
282 if (handle == NULL || handle->hwmgr == NULL)
283 return -EINVAL; 266 return -EINVAL;
284 267
285 hwmgr = handle->hwmgr;
286
287 phm_stop_thermal_controller(hwmgr); 268 phm_stop_thermal_controller(hwmgr);
288 psm_set_boot_states(hwmgr); 269 psm_set_boot_states(hwmgr);
289 psm_adjust_power_state_dynamic(hwmgr, false, NULL); 270 psm_adjust_power_state_dynamic(hwmgr, false, NULL);
@@ -297,15 +278,13 @@ int hwmgr_hw_fini(struct pp_instance *handle)
297 return psm_fini_power_state_table(hwmgr); 278 return psm_fini_power_state_table(hwmgr);
298} 279}
299 280
300int hwmgr_hw_suspend(struct pp_instance *handle) 281int hwmgr_hw_suspend(struct pp_hwmgr *hwmgr)
301{ 282{
302 struct pp_hwmgr *hwmgr;
303 int ret = 0; 283 int ret = 0;
304 284
305 if (handle == NULL || handle->hwmgr == NULL) 285 if (hwmgr == NULL)
306 return -EINVAL; 286 return -EINVAL;
307 287
308 hwmgr = handle->hwmgr;
309 phm_disable_smc_firmware_ctf(hwmgr); 288 phm_disable_smc_firmware_ctf(hwmgr);
310 ret = psm_set_boot_states(hwmgr); 289 ret = psm_set_boot_states(hwmgr);
311 if (ret) 290 if (ret)
@@ -318,15 +297,13 @@ int hwmgr_hw_suspend(struct pp_instance *handle)
318 return ret; 297 return ret;
319} 298}
320 299
321int hwmgr_hw_resume(struct pp_instance *handle) 300int hwmgr_hw_resume(struct pp_hwmgr *hwmgr)
322{ 301{
323 struct pp_hwmgr *hwmgr;
324 int ret = 0; 302 int ret = 0;
325 303
326 if (handle == NULL || handle->hwmgr == NULL) 304 if (hwmgr == NULL)
327 return -EINVAL; 305 return -EINVAL;
328 306
329 hwmgr = handle->hwmgr;
330 ret = phm_setup_asic(hwmgr); 307 ret = phm_setup_asic(hwmgr);
331 if (ret) 308 if (ret)
332 return ret; 309 return ret;
@@ -361,17 +338,14 @@ static enum PP_StateUILabel power_state_convert(enum amd_pm_state_type state)
361 } 338 }
362} 339}
363 340
364int hwmgr_handle_task(struct pp_instance *handle, enum amd_pp_task task_id, 341int hwmgr_handle_task(struct pp_hwmgr *hwmgr, enum amd_pp_task task_id,
365 enum amd_pm_state_type *user_state) 342 enum amd_pm_state_type *user_state)
366{ 343{
367 int ret = 0; 344 int ret = 0;
368 struct pp_hwmgr *hwmgr;
369 345
370 if (handle == NULL || handle->hwmgr == NULL) 346 if (hwmgr == NULL)
371 return -EINVAL; 347 return -EINVAL;
372 348
373 hwmgr = handle->hwmgr;
374
375 switch (task_id) { 349 switch (task_id) {
376 case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE: 350 case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
377 ret = phm_set_cpu_power_state(hwmgr); 351 ret = phm_set_cpu_power_state(hwmgr);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index 2e2e4d062134..85b46ad68546 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -25,7 +25,6 @@
25 25
26#include <linux/seq_file.h> 26#include <linux/seq_file.h>
27#include "amd_powerplay.h" 27#include "amd_powerplay.h"
28#include "pp_instance.h"
29#include "hardwaremanager.h" 28#include "hardwaremanager.h"
30#include "pp_power_source.h" 29#include "pp_power_source.h"
31#include "hwmgr_ppt.h" 30#include "hwmgr_ppt.h"
@@ -34,7 +33,6 @@
34#include "power_state.h" 33#include "power_state.h"
35#include "smu_helper.h" 34#include "smu_helper.h"
36 35
37struct pp_instance;
38struct pp_hwmgr; 36struct pp_hwmgr;
39struct phm_fan_speed_info; 37struct phm_fan_speed_info;
40struct pp_atomctrl_voltage_table; 38struct pp_atomctrl_voltage_table;
@@ -703,6 +701,8 @@ struct pp_hwmgr {
703 uint32_t chip_family; 701 uint32_t chip_family;
704 uint32_t chip_id; 702 uint32_t chip_id;
705 uint32_t smu_version; 703 uint32_t smu_version;
704 bool pm_en;
705 struct mutex smu_lock;
706 706
707 uint32_t pp_table_version; 707 uint32_t pp_table_version;
708 void *device; 708 void *device;
@@ -769,12 +769,12 @@ struct cgs_irq_src_funcs {
769 cgs_irq_handler_func_t handler; 769 cgs_irq_handler_func_t handler;
770}; 770};
771 771
772extern int hwmgr_early_init(struct pp_instance *handle); 772extern int hwmgr_early_init(struct pp_hwmgr *hwmgr);
773extern int hwmgr_hw_init(struct pp_instance *handle); 773extern int hwmgr_hw_init(struct pp_hwmgr *hwmgr);
774extern int hwmgr_hw_fini(struct pp_instance *handle); 774extern int hwmgr_hw_fini(struct pp_hwmgr *hwmgr);
775extern int hwmgr_hw_suspend(struct pp_instance *handle); 775extern int hwmgr_hw_suspend(struct pp_hwmgr *hwmgr);
776extern int hwmgr_hw_resume(struct pp_instance *handle); 776extern int hwmgr_hw_resume(struct pp_hwmgr *hwmgr);
777extern int hwmgr_handle_task(struct pp_instance *handle, 777extern int hwmgr_handle_task(struct pp_hwmgr *hwmgr,
778 enum amd_pp_task task_id, 778 enum amd_pp_task task_id,
779 enum amd_pm_state_type *user_state); 779 enum amd_pm_state_type *user_state);
780 780
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_instance.h b/drivers/gpu/drm/amd/powerplay/inc/pp_instance.h
deleted file mode 100644
index 6c2fa33bd63a..000000000000
--- a/drivers/gpu/drm/amd/powerplay/inc/pp_instance.h
+++ /dev/null
@@ -1,36 +0,0 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef _PP_INSTANCE_H_
24#define _PP_INSTANCE_H_
25
26struct pp_hwmgr;
27
28struct pp_instance {
29 void *parent; /* e.g. amdgpu_device */
30 void *device; /* e.g. cgs_device */
31 bool pm_en;
32 struct pp_hwmgr *hwmgr;
33 struct mutex pp_lock;
34};
35
36#endif