aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/cz_smc.c
diff options
context:
space:
mode:
authorAlex Deucher <alexander.deucher@amd.com>2015-04-20 17:31:14 -0400
committerAlex Deucher <alexander.deucher@amd.com>2015-06-03 21:03:17 -0400
commitaaa36a976bbb9b02a54c087ff390c0bad1d18e3e (patch)
tree105be3c06ef33c39e6934801d386847950d4ebf9 /drivers/gpu/drm/amd/amdgpu/cz_smc.c
parenta2e73f56fa6282481927ec43aa9362c03c2e2104 (diff)
drm/amdgpu: Add initial VI support
This adds initial support for VI asics. This includes Iceland, Tonga, and Carrizo. Our inital focus as been Carrizo, so there are still gaps in support for Tonga and Iceland, notably power management. Acked-by: Christian König <christian.koenig@amd.com> Acked-by: Jammy Zhou <Jammy.Zhou@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/cz_smc.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_smc.c962
1 files changed, 962 insertions, 0 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_smc.c b/drivers/gpu/drm/amd/amdgpu/cz_smc.c
new file mode 100644
index 000000000000..a72ffc7d6c26
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/cz_smc.c
@@ -0,0 +1,962 @@
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <linux/firmware.h>
24#include "drmP.h"
25#include "amdgpu.h"
26#include "smu8.h"
27#include "smu8_fusion.h"
28#include "cz_ppsmc.h"
29#include "cz_smumgr.h"
30#include "smu_ucode_xfer_cz.h"
31#include "amdgpu_ucode.h"
32
33#include "smu/smu_8_0_d.h"
34#include "smu/smu_8_0_sh_mask.h"
35#include "gca/gfx_8_0_d.h"
36#include "gca/gfx_8_0_sh_mask.h"
37
38uint32_t cz_get_argument(struct amdgpu_device *adev)
39{
40 return RREG32(mmSMU_MP1_SRBM2P_ARG_0);
41}
42
43static struct cz_smu_private_data *cz_smu_get_priv(struct amdgpu_device *adev)
44{
45 struct cz_smu_private_data *priv =
46 (struct cz_smu_private_data *)(adev->smu.priv);
47
48 return priv;
49}
50
51int cz_send_msg_to_smc_async(struct amdgpu_device *adev, u16 msg)
52{
53 int i;
54 u32 content = 0, tmp;
55
56 for (i = 0; i < adev->usec_timeout; i++) {
57 tmp = REG_GET_FIELD(RREG32(mmSMU_MP1_SRBM2P_RESP_0),
58 SMU_MP1_SRBM2P_RESP_0, CONTENT);
59 if (content != tmp)
60 break;
61 udelay(1);
62 }
63
64 /* timeout means wrong logic*/
65 if (i == adev->usec_timeout)
66 return -EINVAL;
67
68 WREG32(mmSMU_MP1_SRBM2P_RESP_0, 0);
69 WREG32(mmSMU_MP1_SRBM2P_MSG_0, msg);
70
71 return 0;
72}
73
74int cz_send_msg_to_smc(struct amdgpu_device *adev, u16 msg)
75{
76 int i;
77 u32 content = 0, tmp = 0;
78
79 if (cz_send_msg_to_smc_async(adev, msg))
80 return -EINVAL;
81
82 for (i = 0; i < adev->usec_timeout; i++) {
83 tmp = REG_GET_FIELD(RREG32(mmSMU_MP1_SRBM2P_RESP_0),
84 SMU_MP1_SRBM2P_RESP_0, CONTENT);
85 if (content != tmp)
86 break;
87 udelay(1);
88 }
89
90 /* timeout means wrong logic*/
91 if (i == adev->usec_timeout)
92 return -EINVAL;
93
94 if (PPSMC_Result_OK != tmp) {
95 dev_err(adev->dev, "SMC Failed to send Message.\n");
96 return -EINVAL;
97 }
98
99 return 0;
100}
101
102int cz_send_msg_to_smc_with_parameter_async(struct amdgpu_device *adev,
103 u16 msg, u32 parameter)
104{
105 WREG32(mmSMU_MP1_SRBM2P_ARG_0, parameter);
106 return cz_send_msg_to_smc_async(adev, msg);
107}
108
109int cz_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
110 u16 msg, u32 parameter)
111{
112 WREG32(mmSMU_MP1_SRBM2P_ARG_0, parameter);
113 return cz_send_msg_to_smc(adev, msg);
114}
115
116static int cz_set_smc_sram_address(struct amdgpu_device *adev,
117 u32 smc_address, u32 limit)
118{
119 if (smc_address & 3)
120 return -EINVAL;
121 if ((smc_address + 3) > limit)
122 return -EINVAL;
123
124 WREG32(mmMP0PUB_IND_INDEX_0, SMN_MP1_SRAM_START_ADDR + smc_address);
125
126 return 0;
127}
128
129int cz_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
130 u32 *value, u32 limit)
131{
132 int ret;
133
134 ret = cz_set_smc_sram_address(adev, smc_address, limit);
135 if (ret)
136 return ret;
137
138 *value = RREG32(mmMP0PUB_IND_DATA_0);
139
140 return 0;
141}
142
143int cz_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
144 u32 value, u32 limit)
145{
146 int ret;
147
148 ret = cz_set_smc_sram_address(adev, smc_address, limit);
149 if (ret)
150 return ret;
151
152 WREG32(mmMP0PUB_IND_DATA_0, value);
153
154 return 0;
155}
156
157static int cz_smu_request_load_fw(struct amdgpu_device *adev)
158{
159 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
160
161 uint32_t smc_addr = SMU8_FIRMWARE_HEADER_LOCATION +
162 offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
163
164 cz_write_smc_sram_dword(adev, smc_addr, 0, smc_addr + 4);
165
166 /*prepare toc buffers*/
167 cz_send_msg_to_smc_with_parameter(adev,
168 PPSMC_MSG_DriverDramAddrHi,
169 priv->toc_buffer.mc_addr_high);
170 cz_send_msg_to_smc_with_parameter(adev,
171 PPSMC_MSG_DriverDramAddrLo,
172 priv->toc_buffer.mc_addr_low);
173 cz_send_msg_to_smc(adev, PPSMC_MSG_InitJobs);
174
175 /*execute jobs*/
176 cz_send_msg_to_smc_with_parameter(adev,
177 PPSMC_MSG_ExecuteJob,
178 priv->toc_entry_aram);
179
180 cz_send_msg_to_smc_with_parameter(adev,
181 PPSMC_MSG_ExecuteJob,
182 priv->toc_entry_power_profiling_index);
183
184 cz_send_msg_to_smc_with_parameter(adev,
185 PPSMC_MSG_ExecuteJob,
186 priv->toc_entry_initialize_index);
187
188 return 0;
189}
190
191/*
192 *Check if the FW has been loaded, SMU will not return if loading
193 *has not finished.
194 */
195static int cz_smu_check_fw_load_finish(struct amdgpu_device *adev,
196 uint32_t fw_mask)
197{
198 int i;
199 uint32_t index = SMN_MP1_SRAM_START_ADDR +
200 SMU8_FIRMWARE_HEADER_LOCATION +
201 offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
202
203 WREG32(mmMP0PUB_IND_INDEX, index);
204
205 for (i = 0; i < adev->usec_timeout; i++) {
206 if (fw_mask == (RREG32(mmMP0PUB_IND_DATA) & fw_mask))
207 break;
208 udelay(1);
209 }
210
211 if (i >= adev->usec_timeout) {
212 dev_err(adev->dev,
213 "SMU check loaded firmware failed, expecting 0x%x, getting 0x%x",
214 fw_mask, RREG32(mmMP0PUB_IND_DATA));
215 return -EINVAL;
216 }
217
218 return 0;
219}
220
221/*
222 * interfaces for different ip blocks to check firmware loading status
223 * 0 for success otherwise failed
224 */
225static int cz_smu_check_finished(struct amdgpu_device *adev,
226 enum AMDGPU_UCODE_ID id)
227{
228 switch (id) {
229 case AMDGPU_UCODE_ID_SDMA0:
230 if (adev->smu.fw_flags & AMDGPU_SDMA0_UCODE_LOADED)
231 return 0;
232 break;
233 case AMDGPU_UCODE_ID_SDMA1:
234 if (adev->smu.fw_flags & AMDGPU_SDMA1_UCODE_LOADED)
235 return 0;
236 break;
237 case AMDGPU_UCODE_ID_CP_CE:
238 if (adev->smu.fw_flags & AMDGPU_CPCE_UCODE_LOADED)
239 return 0;
240 break;
241 case AMDGPU_UCODE_ID_CP_PFP:
242 if (adev->smu.fw_flags & AMDGPU_CPPFP_UCODE_LOADED)
243 return 0;
244 case AMDGPU_UCODE_ID_CP_ME:
245 if (adev->smu.fw_flags & AMDGPU_CPME_UCODE_LOADED)
246 return 0;
247 break;
248 case AMDGPU_UCODE_ID_CP_MEC1:
249 if (adev->smu.fw_flags & AMDGPU_CPMEC1_UCODE_LOADED)
250 return 0;
251 break;
252 case AMDGPU_UCODE_ID_CP_MEC2:
253 if (adev->smu.fw_flags & AMDGPU_CPMEC2_UCODE_LOADED)
254 return 0;
255 break;
256 case AMDGPU_UCODE_ID_RLC_G:
257 if (adev->smu.fw_flags & AMDGPU_CPRLC_UCODE_LOADED)
258 return 0;
259 break;
260 case AMDGPU_UCODE_ID_MAXIMUM:
261 default:
262 break;
263 }
264
265 return 1;
266}
267
268static int cz_load_mec_firmware(struct amdgpu_device *adev)
269{
270 struct amdgpu_firmware_info *ucode =
271 &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
272 uint32_t reg_data;
273 uint32_t tmp;
274
275 if (ucode->fw == NULL)
276 return -EINVAL;
277
278 /* Disable MEC parsing/prefetching */
279 tmp = RREG32(mmCP_MEC_CNTL);
280 tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1);
281 tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1);
282 WREG32(mmCP_MEC_CNTL, tmp);
283
284 tmp = RREG32(mmCP_CPC_IC_BASE_CNTL);
285 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
286 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ATC, 0);
287 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
288 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, MTYPE, 1);
289 WREG32(mmCP_CPC_IC_BASE_CNTL, tmp);
290
291 reg_data = lower_32_bits(ucode->mc_addr) &
292 REG_FIELD_MASK(CP_CPC_IC_BASE_LO, IC_BASE_LO);
293 WREG32(mmCP_CPC_IC_BASE_LO, reg_data);
294
295 reg_data = upper_32_bits(ucode->mc_addr) &
296 REG_FIELD_MASK(CP_CPC_IC_BASE_HI, IC_BASE_HI);
297 WREG32(mmCP_CPC_IC_BASE_HI, reg_data);
298
299 return 0;
300}
301
302int cz_smu_start(struct amdgpu_device *adev)
303{
304 int ret = 0;
305
306 uint32_t fw_to_check = UCODE_ID_RLC_G_MASK |
307 UCODE_ID_SDMA0_MASK |
308 UCODE_ID_SDMA1_MASK |
309 UCODE_ID_CP_CE_MASK |
310 UCODE_ID_CP_ME_MASK |
311 UCODE_ID_CP_PFP_MASK |
312 UCODE_ID_CP_MEC_JT1_MASK |
313 UCODE_ID_CP_MEC_JT2_MASK;
314
315 cz_smu_request_load_fw(adev);
316 ret = cz_smu_check_fw_load_finish(adev, fw_to_check);
317 if (ret)
318 return ret;
319
320 /* manually load MEC firmware for CZ */
321 if (adev->asic_type == CHIP_CARRIZO) {
322 ret = cz_load_mec_firmware(adev);
323 if (ret) {
324 dev_err(adev->dev, "(%d) Mec Firmware load failed\n", ret);
325 return ret;
326 }
327 }
328
329 /* setup fw load flag */
330 adev->smu.fw_flags = AMDGPU_SDMA0_UCODE_LOADED |
331 AMDGPU_SDMA1_UCODE_LOADED |
332 AMDGPU_CPCE_UCODE_LOADED |
333 AMDGPU_CPPFP_UCODE_LOADED |
334 AMDGPU_CPME_UCODE_LOADED |
335 AMDGPU_CPMEC1_UCODE_LOADED |
336 AMDGPU_CPMEC2_UCODE_LOADED |
337 AMDGPU_CPRLC_UCODE_LOADED;
338
339 return ret;
340}
341
342static uint32_t cz_convert_fw_type(uint32_t fw_type)
343{
344 enum AMDGPU_UCODE_ID result = AMDGPU_UCODE_ID_MAXIMUM;
345
346 switch (fw_type) {
347 case UCODE_ID_SDMA0:
348 result = AMDGPU_UCODE_ID_SDMA0;
349 break;
350 case UCODE_ID_SDMA1:
351 result = AMDGPU_UCODE_ID_SDMA1;
352 break;
353 case UCODE_ID_CP_CE:
354 result = AMDGPU_UCODE_ID_CP_CE;
355 break;
356 case UCODE_ID_CP_PFP:
357 result = AMDGPU_UCODE_ID_CP_PFP;
358 break;
359 case UCODE_ID_CP_ME:
360 result = AMDGPU_UCODE_ID_CP_ME;
361 break;
362 case UCODE_ID_CP_MEC_JT1:
363 case UCODE_ID_CP_MEC_JT2:
364 result = AMDGPU_UCODE_ID_CP_MEC1;
365 break;
366 case UCODE_ID_RLC_G:
367 result = AMDGPU_UCODE_ID_RLC_G;
368 break;
369 default:
370 DRM_ERROR("UCode type is out of range!");
371 }
372
373 return result;
374}
375
376static uint8_t cz_smu_translate_firmware_enum_to_arg(
377 enum cz_scratch_entry firmware_enum)
378{
379 uint8_t ret = 0;
380
381 switch (firmware_enum) {
382 case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0:
383 ret = UCODE_ID_SDMA0;
384 break;
385 case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1:
386 ret = UCODE_ID_SDMA1;
387 break;
388 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE:
389 ret = UCODE_ID_CP_CE;
390 break;
391 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP:
392 ret = UCODE_ID_CP_PFP;
393 break;
394 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME:
395 ret = UCODE_ID_CP_ME;
396 break;
397 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1:
398 ret = UCODE_ID_CP_MEC_JT1;
399 break;
400 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2:
401 ret = UCODE_ID_CP_MEC_JT2;
402 break;
403 case CZ_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG:
404 ret = UCODE_ID_GMCON_RENG;
405 break;
406 case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G:
407 ret = UCODE_ID_RLC_G;
408 break;
409 case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH:
410 ret = UCODE_ID_RLC_SCRATCH;
411 break;
412 case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM:
413 ret = UCODE_ID_RLC_SRM_ARAM;
414 break;
415 case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM:
416 ret = UCODE_ID_RLC_SRM_DRAM;
417 break;
418 case CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_ERAM:
419 ret = UCODE_ID_DMCU_ERAM;
420 break;
421 case CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_IRAM:
422 ret = UCODE_ID_DMCU_IRAM;
423 break;
424 case CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING:
425 ret = TASK_ARG_INIT_MM_PWR_LOG;
426 break;
427 case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_HALT:
428 case CZ_SCRATCH_ENTRY_DATA_ID_SYS_CLOCKGATING:
429 case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_RING_REGS:
430 case CZ_SCRATCH_ENTRY_DATA_ID_NONGFX_REINIT:
431 case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_START:
432 case CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS:
433 ret = TASK_ARG_REG_MMIO;
434 break;
435 case CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE:
436 ret = TASK_ARG_INIT_CLK_TABLE;
437 break;
438 }
439
440 return ret;
441}
442
443static int cz_smu_populate_single_firmware_entry(struct amdgpu_device *adev,
444 enum cz_scratch_entry firmware_enum,
445 struct cz_buffer_entry *entry)
446{
447 uint64_t gpu_addr;
448 uint32_t data_size;
449 uint8_t ucode_id = cz_smu_translate_firmware_enum_to_arg(firmware_enum);
450 enum AMDGPU_UCODE_ID id = cz_convert_fw_type(ucode_id);
451 struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id];
452 const struct gfx_firmware_header_v1_0 *header;
453
454 if (ucode->fw == NULL)
455 return -EINVAL;
456
457 gpu_addr = ucode->mc_addr;
458 header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
459 data_size = le32_to_cpu(header->header.ucode_size_bytes);
460
461 if ((firmware_enum == CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1) ||
462 (firmware_enum == CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2)) {
463 gpu_addr += le32_to_cpu(header->jt_offset) << 2;
464 data_size = le32_to_cpu(header->jt_size) << 2;
465 }
466
467 entry->mc_addr_low = lower_32_bits(gpu_addr);
468 entry->mc_addr_high = upper_32_bits(gpu_addr);
469 entry->data_size = data_size;
470 entry->firmware_ID = firmware_enum;
471
472 return 0;
473}
474
475static int cz_smu_populate_single_scratch_entry(struct amdgpu_device *adev,
476 enum cz_scratch_entry scratch_type,
477 uint32_t size_in_byte,
478 struct cz_buffer_entry *entry)
479{
480 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
481 uint64_t mc_addr = (((uint64_t) priv->smu_buffer.mc_addr_high) << 32) |
482 priv->smu_buffer.mc_addr_low;
483 mc_addr += size_in_byte;
484
485 priv->smu_buffer_used_bytes += size_in_byte;
486 entry->data_size = size_in_byte;
487 entry->kaddr = priv->smu_buffer.kaddr + priv->smu_buffer_used_bytes;
488 entry->mc_addr_low = lower_32_bits(mc_addr);
489 entry->mc_addr_high = upper_32_bits(mc_addr);
490 entry->firmware_ID = scratch_type;
491
492 return 0;
493}
494
495static int cz_smu_populate_single_ucode_load_task(struct amdgpu_device *adev,
496 enum cz_scratch_entry firmware_enum,
497 bool is_last)
498{
499 uint8_t i;
500 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
501 struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
502 struct SMU_Task *task = &toc->tasks[priv->toc_entry_used_count++];
503
504 task->type = TASK_TYPE_UCODE_LOAD;
505 task->arg = cz_smu_translate_firmware_enum_to_arg(firmware_enum);
506 task->next = is_last ? END_OF_TASK_LIST : priv->toc_entry_used_count;
507
508 for (i = 0; i < priv->driver_buffer_length; i++)
509 if (priv->driver_buffer[i].firmware_ID == firmware_enum)
510 break;
511
512 if (i >= priv->driver_buffer_length) {
513 dev_err(adev->dev, "Invalid Firmware Type\n");
514 return -EINVAL;
515 }
516
517 task->addr.low = priv->driver_buffer[i].mc_addr_low;
518 task->addr.high = priv->driver_buffer[i].mc_addr_high;
519 task->size_bytes = priv->driver_buffer[i].data_size;
520
521 return 0;
522}
523
524static int cz_smu_populate_single_scratch_task(struct amdgpu_device *adev,
525 enum cz_scratch_entry firmware_enum,
526 uint8_t type, bool is_last)
527{
528 uint8_t i;
529 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
530 struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
531 struct SMU_Task *task = &toc->tasks[priv->toc_entry_used_count++];
532
533 task->type = type;
534 task->arg = cz_smu_translate_firmware_enum_to_arg(firmware_enum);
535 task->next = is_last ? END_OF_TASK_LIST : priv->toc_entry_used_count;
536
537 for (i = 0; i < priv->scratch_buffer_length; i++)
538 if (priv->scratch_buffer[i].firmware_ID == firmware_enum)
539 break;
540
541 if (i >= priv->scratch_buffer_length) {
542 dev_err(adev->dev, "Invalid Firmware Type\n");
543 return -EINVAL;
544 }
545
546 task->addr.low = priv->scratch_buffer[i].mc_addr_low;
547 task->addr.high = priv->scratch_buffer[i].mc_addr_high;
548 task->size_bytes = priv->scratch_buffer[i].data_size;
549
550 if (CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS == firmware_enum) {
551 struct cz_ih_meta_data *pIHReg_restore =
552 (struct cz_ih_meta_data *)priv->scratch_buffer[i].kaddr;
553 pIHReg_restore->command =
554 METADATA_CMD_MODE0 | METADATA_PERFORM_ON_LOAD;
555 }
556
557 return 0;
558}
559
560static int cz_smu_construct_toc_for_rlc_aram_save(struct amdgpu_device *adev)
561{
562 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
563 priv->toc_entry_aram = priv->toc_entry_used_count;
564 cz_smu_populate_single_scratch_task(adev,
565 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
566 TASK_TYPE_UCODE_SAVE, true);
567
568 return 0;
569}
570
571static int cz_smu_construct_toc_for_vddgfx_enter(struct amdgpu_device *adev)
572{
573 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
574 struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
575
576 toc->JobList[JOB_GFX_SAVE] = (uint8_t)priv->toc_entry_used_count;
577 cz_smu_populate_single_scratch_task(adev,
578 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
579 TASK_TYPE_UCODE_SAVE, false);
580 cz_smu_populate_single_scratch_task(adev,
581 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
582 TASK_TYPE_UCODE_SAVE, true);
583
584 return 0;
585}
586
587static int cz_smu_construct_toc_for_vddgfx_exit(struct amdgpu_device *adev)
588{
589 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
590 struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
591
592 toc->JobList[JOB_GFX_RESTORE] = (uint8_t)priv->toc_entry_used_count;
593
594 /* populate ucode */
595 if (adev->firmware.smu_load) {
596 cz_smu_populate_single_ucode_load_task(adev,
597 CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
598 cz_smu_populate_single_ucode_load_task(adev,
599 CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
600 cz_smu_populate_single_ucode_load_task(adev,
601 CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
602 cz_smu_populate_single_ucode_load_task(adev,
603 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
604 cz_smu_populate_single_ucode_load_task(adev,
605 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
606 cz_smu_populate_single_ucode_load_task(adev,
607 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, false);
608 }
609
610 /* populate scratch */
611 cz_smu_populate_single_scratch_task(adev,
612 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
613 TASK_TYPE_UCODE_LOAD, false);
614 cz_smu_populate_single_scratch_task(adev,
615 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
616 TASK_TYPE_UCODE_LOAD, false);
617 cz_smu_populate_single_scratch_task(adev,
618 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
619 TASK_TYPE_UCODE_LOAD, true);
620
621 return 0;
622}
623
624static int cz_smu_construct_toc_for_power_profiling(struct amdgpu_device *adev)
625{
626 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
627
628 priv->toc_entry_power_profiling_index = priv->toc_entry_used_count;
629
630 cz_smu_populate_single_scratch_task(adev,
631 CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
632 TASK_TYPE_INITIALIZE, true);
633 return 0;
634}
635
636static int cz_smu_construct_toc_for_bootup(struct amdgpu_device *adev)
637{
638 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
639
640 priv->toc_entry_initialize_index = priv->toc_entry_used_count;
641
642 if (adev->firmware.smu_load) {
643 cz_smu_populate_single_ucode_load_task(adev,
644 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
645 cz_smu_populate_single_ucode_load_task(adev,
646 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, false);
647 cz_smu_populate_single_ucode_load_task(adev,
648 CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
649 cz_smu_populate_single_ucode_load_task(adev,
650 CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
651 cz_smu_populate_single_ucode_load_task(adev,
652 CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
653 cz_smu_populate_single_ucode_load_task(adev,
654 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
655 cz_smu_populate_single_ucode_load_task(adev,
656 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
657 cz_smu_populate_single_ucode_load_task(adev,
658 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, true);
659 }
660
661 return 0;
662}
663
664static int cz_smu_construct_toc_for_clock_table(struct amdgpu_device *adev)
665{
666 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
667
668 priv->toc_entry_clock_table = priv->toc_entry_used_count;
669
670 cz_smu_populate_single_scratch_task(adev,
671 CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
672 TASK_TYPE_INITIALIZE, true);
673
674 return 0;
675}
676
677static int cz_smu_initialize_toc_empty_job_list(struct amdgpu_device *adev)
678{
679 int i;
680 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
681 struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
682
683 for (i = 0; i < NUM_JOBLIST_ENTRIES; i++)
684 toc->JobList[i] = (uint8_t)IGNORE_JOB;
685
686 return 0;
687}
688
689/*
690 * cz smu uninitialization
691 */
692int cz_smu_fini(struct amdgpu_device *adev)
693{
694 amdgpu_bo_unref(&adev->smu.toc_buf);
695 amdgpu_bo_unref(&adev->smu.smu_buf);
696 kfree(adev->smu.priv);
697 adev->smu.priv = NULL;
698 if (adev->firmware.smu_load)
699 amdgpu_ucode_fini_bo(adev);
700
701 return 0;
702}
703
704int cz_smu_download_pptable(struct amdgpu_device *adev, void **table)
705{
706 uint8_t i;
707 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
708
709 for (i = 0; i < priv->scratch_buffer_length; i++)
710 if (priv->scratch_buffer[i].firmware_ID ==
711 CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
712 break;
713
714 if (i >= priv->scratch_buffer_length) {
715 dev_err(adev->dev, "Invalid Scratch Type\n");
716 return -EINVAL;
717 }
718
719 *table = (struct SMU8_Fusion_ClkTable *)priv->scratch_buffer[i].kaddr;
720
721 /* prepare buffer for pptable */
722 cz_send_msg_to_smc_with_parameter(adev,
723 PPSMC_MSG_SetClkTableAddrHi,
724 priv->scratch_buffer[i].mc_addr_high);
725 cz_send_msg_to_smc_with_parameter(adev,
726 PPSMC_MSG_SetClkTableAddrLo,
727 priv->scratch_buffer[i].mc_addr_low);
728 cz_send_msg_to_smc_with_parameter(adev,
729 PPSMC_MSG_ExecuteJob,
730 priv->toc_entry_clock_table);
731
732 /* actual downloading */
733 cz_send_msg_to_smc(adev, PPSMC_MSG_ClkTableXferToDram);
734
735 return 0;
736}
737
738int cz_smu_upload_pptable(struct amdgpu_device *adev)
739{
740 uint8_t i;
741 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
742
743 for (i = 0; i < priv->scratch_buffer_length; i++)
744 if (priv->scratch_buffer[i].firmware_ID ==
745 CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
746 break;
747
748 if (i >= priv->scratch_buffer_length) {
749 dev_err(adev->dev, "Invalid Scratch Type\n");
750 return -EINVAL;
751 }
752
753 /* prepare SMU */
754 cz_send_msg_to_smc_with_parameter(adev,
755 PPSMC_MSG_SetClkTableAddrHi,
756 priv->scratch_buffer[i].mc_addr_high);
757 cz_send_msg_to_smc_with_parameter(adev,
758 PPSMC_MSG_SetClkTableAddrLo,
759 priv->scratch_buffer[i].mc_addr_low);
760 cz_send_msg_to_smc_with_parameter(adev,
761 PPSMC_MSG_ExecuteJob,
762 priv->toc_entry_clock_table);
763
764 /* actual uploading */
765 cz_send_msg_to_smc(adev, PPSMC_MSG_ClkTableXferToSmu);
766
767 return 0;
768}
769
770/*
771 * cz smumgr functions initialization
772 */
773static const struct amdgpu_smumgr_funcs cz_smumgr_funcs = {
774 .check_fw_load_finish = cz_smu_check_finished,
775 .request_smu_load_fw = NULL,
776 .request_smu_specific_fw = NULL,
777};
778
779/*
780 * cz smu initialization
781 */
782int cz_smu_init(struct amdgpu_device *adev)
783{
784 int ret = -EINVAL;
785 uint64_t mc_addr = 0;
786 struct amdgpu_bo **toc_buf = &adev->smu.toc_buf;
787 struct amdgpu_bo **smu_buf = &adev->smu.smu_buf;
788 void *toc_buf_ptr = NULL;
789 void *smu_buf_ptr = NULL;
790
791 struct cz_smu_private_data *priv =
792 kzalloc(sizeof(struct cz_smu_private_data), GFP_KERNEL);
793 if (priv == NULL)
794 return -ENOMEM;
795
796 /* allocate firmware buffers */
797 if (adev->firmware.smu_load)
798 amdgpu_ucode_init_bo(adev);
799
800 adev->smu.priv = priv;
801 adev->smu.fw_flags = 0;
802 priv->toc_buffer.data_size = 4096;
803
804 priv->smu_buffer.data_size =
805 ALIGN(UCODE_ID_RLC_SCRATCH_SIZE_BYTE, 32) +
806 ALIGN(UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE, 32) +
807 ALIGN(UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE, 32) +
808 ALIGN(sizeof(struct SMU8_MultimediaPowerLogData), 32) +
809 ALIGN(sizeof(struct SMU8_Fusion_ClkTable), 32);
810
811 /* prepare toc buffer and smu buffer:
812 * 1. create amdgpu_bo for toc buffer and smu buffer
813 * 2. pin mc address
814 * 3. map kernel virtual address
815 */
816 ret = amdgpu_bo_create(adev, priv->toc_buffer.data_size, PAGE_SIZE,
817 true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, toc_buf);
818
819 if (ret) {
820 dev_err(adev->dev, "(%d) SMC TOC buffer allocation failed\n", ret);
821 return ret;
822 }
823
824 ret = amdgpu_bo_create(adev, priv->smu_buffer.data_size, PAGE_SIZE,
825 true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, smu_buf);
826
827 if (ret) {
828 dev_err(adev->dev, "(%d) SMC Internal buffer allocation failed\n", ret);
829 return ret;
830 }
831
832 /* toc buffer reserve/pin/map */
833 ret = amdgpu_bo_reserve(adev->smu.toc_buf, false);
834 if (ret) {
835 amdgpu_bo_unref(&adev->smu.toc_buf);
836 dev_err(adev->dev, "(%d) SMC TOC buffer reserve failed\n", ret);
837 return ret;
838 }
839
840 ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_GTT, &mc_addr);
841 if (ret) {
842 amdgpu_bo_unreserve(adev->smu.toc_buf);
843 amdgpu_bo_unref(&adev->smu.toc_buf);
844 dev_err(adev->dev, "(%d) SMC TOC buffer pin failed\n", ret);
845 return ret;
846 }
847
848 ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr);
849 if (ret)
850 goto smu_init_failed;
851
852 amdgpu_bo_unreserve(adev->smu.toc_buf);
853
854 priv->toc_buffer.mc_addr_low = lower_32_bits(mc_addr);
855 priv->toc_buffer.mc_addr_high = upper_32_bits(mc_addr);
856 priv->toc_buffer.kaddr = toc_buf_ptr;
857
858 /* smu buffer reserve/pin/map */
859 ret = amdgpu_bo_reserve(adev->smu.smu_buf, false);
860 if (ret) {
861 amdgpu_bo_unref(&adev->smu.smu_buf);
862 dev_err(adev->dev, "(%d) SMC Internal buffer reserve failed\n", ret);
863 return ret;
864 }
865
866 ret = amdgpu_bo_pin(adev->smu.smu_buf, AMDGPU_GEM_DOMAIN_GTT, &mc_addr);
867 if (ret) {
868 amdgpu_bo_unreserve(adev->smu.smu_buf);
869 amdgpu_bo_unref(&adev->smu.smu_buf);
870 dev_err(adev->dev, "(%d) SMC Internal buffer pin failed\n", ret);
871 return ret;
872 }
873
874 ret = amdgpu_bo_kmap(*smu_buf, &smu_buf_ptr);
875 if (ret)
876 goto smu_init_failed;
877
878 amdgpu_bo_unreserve(adev->smu.smu_buf);
879
880 priv->smu_buffer.mc_addr_low = lower_32_bits(mc_addr);
881 priv->smu_buffer.mc_addr_high = upper_32_bits(mc_addr);
882 priv->smu_buffer.kaddr = smu_buf_ptr;
883
884 if (adev->firmware.smu_load) {
885 if (cz_smu_populate_single_firmware_entry(adev,
886 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0,
887 &priv->driver_buffer[priv->driver_buffer_length++]))
888 goto smu_init_failed;
889 if (cz_smu_populate_single_firmware_entry(adev,
890 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1,
891 &priv->driver_buffer[priv->driver_buffer_length++]))
892 goto smu_init_failed;
893 if (cz_smu_populate_single_firmware_entry(adev,
894 CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE,
895 &priv->driver_buffer[priv->driver_buffer_length++]))
896 goto smu_init_failed;
897 if (cz_smu_populate_single_firmware_entry(adev,
898 CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP,
899 &priv->driver_buffer[priv->driver_buffer_length++]))
900 goto smu_init_failed;
901 if (cz_smu_populate_single_firmware_entry(adev,
902 CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME,
903 &priv->driver_buffer[priv->driver_buffer_length++]))
904 goto smu_init_failed;
905 if (cz_smu_populate_single_firmware_entry(adev,
906 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1,
907 &priv->driver_buffer[priv->driver_buffer_length++]))
908 goto smu_init_failed;
909 if (cz_smu_populate_single_firmware_entry(adev,
910 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2,
911 &priv->driver_buffer[priv->driver_buffer_length++]))
912 goto smu_init_failed;
913 if (cz_smu_populate_single_firmware_entry(adev,
914 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G,
915 &priv->driver_buffer[priv->driver_buffer_length++]))
916 goto smu_init_failed;
917 }
918
919 if (cz_smu_populate_single_scratch_entry(adev,
920 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
921 UCODE_ID_RLC_SCRATCH_SIZE_BYTE,
922 &priv->scratch_buffer[priv->scratch_buffer_length++]))
923 goto smu_init_failed;
924 if (cz_smu_populate_single_scratch_entry(adev,
925 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
926 UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE,
927 &priv->scratch_buffer[priv->scratch_buffer_length++]))
928 goto smu_init_failed;
929 if (cz_smu_populate_single_scratch_entry(adev,
930 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
931 UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE,
932 &priv->scratch_buffer[priv->scratch_buffer_length++]))
933 goto smu_init_failed;
934 if (cz_smu_populate_single_scratch_entry(adev,
935 CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
936 sizeof(struct SMU8_MultimediaPowerLogData),
937 &priv->scratch_buffer[priv->scratch_buffer_length++]))
938 goto smu_init_failed;
939 if (cz_smu_populate_single_scratch_entry(adev,
940 CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
941 sizeof(struct SMU8_Fusion_ClkTable),
942 &priv->scratch_buffer[priv->scratch_buffer_length++]))
943 goto smu_init_failed;
944
945 cz_smu_initialize_toc_empty_job_list(adev);
946 cz_smu_construct_toc_for_rlc_aram_save(adev);
947 cz_smu_construct_toc_for_vddgfx_enter(adev);
948 cz_smu_construct_toc_for_vddgfx_exit(adev);
949 cz_smu_construct_toc_for_power_profiling(adev);
950 cz_smu_construct_toc_for_bootup(adev);
951 cz_smu_construct_toc_for_clock_table(adev);
952 /* init the smumgr functions */
953 adev->smu.smumgr_funcs = &cz_smumgr_funcs;
954
955 return 0;
956
957smu_init_failed:
958 amdgpu_bo_unref(toc_buf);
959 amdgpu_bo_unref(smu_buf);
960
961 return ret;
962}