diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/iceland_smc.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/iceland_smc.c | 675 |
1 files changed, 675 insertions, 0 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c new file mode 100644 index 000000000000..c6f1e2f12b5f --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c | |||
@@ -0,0 +1,675 @@ | |||
1 | /* | ||
2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | #include <linux/firmware.h> | ||
25 | #include "drmP.h" | ||
26 | #include "amdgpu.h" | ||
27 | #include "ppsmc.h" | ||
28 | #include "iceland_smumgr.h" | ||
29 | #include "smu_ucode_xfer_vi.h" | ||
30 | #include "amdgpu_ucode.h" | ||
31 | |||
32 | #include "smu/smu_7_1_1_d.h" | ||
33 | #include "smu/smu_7_1_1_sh_mask.h" | ||
34 | |||
35 | #define ICELAND_SMC_SIZE 0x20000 | ||
36 | |||
37 | static int iceland_set_smc_sram_address(struct amdgpu_device *adev, | ||
38 | uint32_t smc_address, uint32_t limit) | ||
39 | { | ||
40 | uint32_t val; | ||
41 | |||
42 | if (smc_address & 3) | ||
43 | return -EINVAL; | ||
44 | |||
45 | if ((smc_address + 3) > limit) | ||
46 | return -EINVAL; | ||
47 | |||
48 | WREG32(mmSMC_IND_INDEX_0, smc_address); | ||
49 | |||
50 | val = RREG32(mmSMC_IND_ACCESS_CNTL); | ||
51 | val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0); | ||
52 | WREG32(mmSMC_IND_ACCESS_CNTL, val); | ||
53 | |||
54 | return 0; | ||
55 | } | ||
56 | |||
57 | static int iceland_copy_bytes_to_smc(struct amdgpu_device *adev, | ||
58 | uint32_t smc_start_address, | ||
59 | const uint8_t *src, | ||
60 | uint32_t byte_count, uint32_t limit) | ||
61 | { | ||
62 | uint32_t addr; | ||
63 | uint32_t data, orig_data; | ||
64 | int result = 0; | ||
65 | uint32_t extra_shift; | ||
66 | unsigned long flags; | ||
67 | |||
68 | if (smc_start_address & 3) | ||
69 | return -EINVAL; | ||
70 | |||
71 | if ((smc_start_address + byte_count) > limit) | ||
72 | return -EINVAL; | ||
73 | |||
74 | addr = smc_start_address; | ||
75 | |||
76 | spin_lock_irqsave(&adev->smc_idx_lock, flags); | ||
77 | while (byte_count >= 4) { | ||
78 | /* Bytes are written into the SMC addres space with the MSB first */ | ||
79 | data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3]; | ||
80 | |||
81 | result = iceland_set_smc_sram_address(adev, addr, limit); | ||
82 | |||
83 | if (result) | ||
84 | goto out; | ||
85 | |||
86 | WREG32(mmSMC_IND_DATA_0, data); | ||
87 | |||
88 | src += 4; | ||
89 | byte_count -= 4; | ||
90 | addr += 4; | ||
91 | } | ||
92 | |||
93 | if (0 != byte_count) { | ||
94 | /* Now write odd bytes left, do a read modify write cycle */ | ||
95 | data = 0; | ||
96 | |||
97 | result = iceland_set_smc_sram_address(adev, addr, limit); | ||
98 | if (result) | ||
99 | goto out; | ||
100 | |||
101 | orig_data = RREG32(mmSMC_IND_DATA_0); | ||
102 | extra_shift = 8 * (4 - byte_count); | ||
103 | |||
104 | while (byte_count > 0) { | ||
105 | data = (data << 8) + *src++; | ||
106 | byte_count--; | ||
107 | } | ||
108 | |||
109 | data <<= extra_shift; | ||
110 | data |= (orig_data & ~((~0UL) << extra_shift)); | ||
111 | |||
112 | result = iceland_set_smc_sram_address(adev, addr, limit); | ||
113 | if (result) | ||
114 | goto out; | ||
115 | |||
116 | WREG32(mmSMC_IND_DATA_0, data); | ||
117 | } | ||
118 | |||
119 | out: | ||
120 | spin_unlock_irqrestore(&adev->smc_idx_lock, flags); | ||
121 | return result; | ||
122 | } | ||
123 | |||
124 | void iceland_start_smc(struct amdgpu_device *adev) | ||
125 | { | ||
126 | uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL); | ||
127 | |||
128 | val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0); | ||
129 | WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val); | ||
130 | } | ||
131 | |||
132 | void iceland_reset_smc(struct amdgpu_device *adev) | ||
133 | { | ||
134 | uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL); | ||
135 | |||
136 | val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1); | ||
137 | WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val); | ||
138 | } | ||
139 | |||
140 | static int iceland_program_jump_on_start(struct amdgpu_device *adev) | ||
141 | { | ||
142 | static unsigned char data[] = {0xE0, 0x00, 0x80, 0x40}; | ||
143 | iceland_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1); | ||
144 | |||
145 | return 0; | ||
146 | } | ||
147 | |||
148 | void iceland_stop_smc_clock(struct amdgpu_device *adev) | ||
149 | { | ||
150 | uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); | ||
151 | |||
152 | val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1); | ||
153 | WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val); | ||
154 | } | ||
155 | |||
156 | void iceland_start_smc_clock(struct amdgpu_device *adev) | ||
157 | { | ||
158 | uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); | ||
159 | |||
160 | val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); | ||
161 | WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val); | ||
162 | } | ||
163 | |||
164 | static bool iceland_is_smc_ram_running(struct amdgpu_device *adev) | ||
165 | { | ||
166 | uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); | ||
167 | val = REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable); | ||
168 | |||
169 | return ((0 == val) && (0x20100 <= RREG32_SMC(ixSMC_PC_C))); | ||
170 | } | ||
171 | |||
172 | static int wait_smu_response(struct amdgpu_device *adev) | ||
173 | { | ||
174 | int i; | ||
175 | uint32_t val; | ||
176 | |||
177 | for (i = 0; i < adev->usec_timeout; i++) { | ||
178 | val = RREG32(mmSMC_RESP_0); | ||
179 | if (REG_GET_FIELD(val, SMC_RESP_0, SMC_RESP)) | ||
180 | break; | ||
181 | udelay(1); | ||
182 | } | ||
183 | |||
184 | if (i == adev->usec_timeout) | ||
185 | return -EINVAL; | ||
186 | |||
187 | return 0; | ||
188 | } | ||
189 | |||
190 | static int iceland_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg) | ||
191 | { | ||
192 | if (!iceland_is_smc_ram_running(adev)) | ||
193 | return -EINVAL; | ||
194 | |||
195 | if (wait_smu_response(adev)) { | ||
196 | DRM_ERROR("Failed to send previous message\n"); | ||
197 | return -EINVAL; | ||
198 | } | ||
199 | |||
200 | WREG32(mmSMC_MESSAGE_0, msg); | ||
201 | |||
202 | if (wait_smu_response(adev)) { | ||
203 | DRM_ERROR("Failed to send message\n"); | ||
204 | return -EINVAL; | ||
205 | } | ||
206 | |||
207 | return 0; | ||
208 | } | ||
209 | |||
210 | static int iceland_send_msg_to_smc_without_waiting(struct amdgpu_device *adev, | ||
211 | PPSMC_Msg msg) | ||
212 | { | ||
213 | if (!iceland_is_smc_ram_running(adev)) | ||
214 | return -EINVAL;; | ||
215 | |||
216 | if (wait_smu_response(adev)) { | ||
217 | DRM_ERROR("Failed to send previous message\n"); | ||
218 | return -EINVAL; | ||
219 | } | ||
220 | |||
221 | WREG32(mmSMC_MESSAGE_0, msg); | ||
222 | |||
223 | return 0; | ||
224 | } | ||
225 | |||
226 | static int iceland_send_msg_to_smc_with_parameter(struct amdgpu_device *adev, | ||
227 | PPSMC_Msg msg, | ||
228 | uint32_t parameter) | ||
229 | { | ||
230 | WREG32(mmSMC_MSG_ARG_0, parameter); | ||
231 | |||
232 | return iceland_send_msg_to_smc(adev, msg); | ||
233 | } | ||
234 | |||
235 | static int iceland_send_msg_to_smc_with_parameter_without_waiting( | ||
236 | struct amdgpu_device *adev, | ||
237 | PPSMC_Msg msg, uint32_t parameter) | ||
238 | { | ||
239 | WREG32(mmSMC_MSG_ARG_0, parameter); | ||
240 | |||
241 | return iceland_send_msg_to_smc_without_waiting(adev, msg); | ||
242 | } | ||
243 | |||
244 | #if 0 /* not used yet */ | ||
245 | static int iceland_wait_for_smc_inactive(struct amdgpu_device *adev) | ||
246 | { | ||
247 | int i; | ||
248 | uint32_t val; | ||
249 | |||
250 | if (!iceland_is_smc_ram_running(adev)) | ||
251 | return -EINVAL; | ||
252 | |||
253 | for (i = 0; i < adev->usec_timeout; i++) { | ||
254 | val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); | ||
255 | if (REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, cken) == 0) | ||
256 | break; | ||
257 | udelay(1); | ||
258 | } | ||
259 | |||
260 | if (i == adev->usec_timeout) | ||
261 | return -EINVAL; | ||
262 | |||
263 | return 0; | ||
264 | } | ||
265 | #endif | ||
266 | |||
267 | static int iceland_smu_upload_firmware_image(struct amdgpu_device *adev) | ||
268 | { | ||
269 | const struct smc_firmware_header_v1_0 *hdr; | ||
270 | uint32_t ucode_size; | ||
271 | uint32_t ucode_start_address; | ||
272 | const uint8_t *src; | ||
273 | uint32_t val; | ||
274 | uint32_t byte_count; | ||
275 | uint32_t data; | ||
276 | unsigned long flags; | ||
277 | int i; | ||
278 | |||
279 | if (!adev->pm.fw) | ||
280 | return -EINVAL; | ||
281 | |||
282 | hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data; | ||
283 | amdgpu_ucode_print_smc_hdr(&hdr->header); | ||
284 | |||
285 | adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version); | ||
286 | ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes); | ||
287 | ucode_start_address = le32_to_cpu(hdr->ucode_start_addr); | ||
288 | src = (const uint8_t *) | ||
289 | (adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | ||
290 | |||
291 | if (ucode_size & 3) { | ||
292 | DRM_ERROR("SMC ucode is not 4 bytes aligned\n"); | ||
293 | return -EINVAL; | ||
294 | } | ||
295 | |||
296 | if (ucode_size > ICELAND_SMC_SIZE) { | ||
297 | DRM_ERROR("SMC address is beyond the SMC RAM area\n"); | ||
298 | return -EINVAL; | ||
299 | } | ||
300 | |||
301 | for (i = 0; i < adev->usec_timeout; i++) { | ||
302 | val = RREG32_SMC(ixRCU_UC_EVENTS); | ||
303 | if (REG_GET_FIELD(val, RCU_UC_EVENTS, boot_seq_done) == 0) | ||
304 | break; | ||
305 | udelay(1); | ||
306 | } | ||
307 | val = RREG32_SMC(ixSMC_SYSCON_MISC_CNTL); | ||
308 | WREG32_SMC(ixSMC_SYSCON_MISC_CNTL, val | 1); | ||
309 | |||
310 | iceland_stop_smc_clock(adev); | ||
311 | iceland_reset_smc(adev); | ||
312 | |||
313 | spin_lock_irqsave(&adev->smc_idx_lock, flags); | ||
314 | WREG32(mmSMC_IND_INDEX_0, ucode_start_address); | ||
315 | |||
316 | val = RREG32(mmSMC_IND_ACCESS_CNTL); | ||
317 | val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1); | ||
318 | WREG32(mmSMC_IND_ACCESS_CNTL, val); | ||
319 | |||
320 | byte_count = ucode_size; | ||
321 | while (byte_count >= 4) { | ||
322 | data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3]; | ||
323 | WREG32(mmSMC_IND_DATA_0, data); | ||
324 | src += 4; | ||
325 | byte_count -= 4; | ||
326 | } | ||
327 | val = RREG32(mmSMC_IND_ACCESS_CNTL); | ||
328 | val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0); | ||
329 | WREG32(mmSMC_IND_ACCESS_CNTL, val); | ||
330 | spin_unlock_irqrestore(&adev->smc_idx_lock, flags); | ||
331 | |||
332 | return 0; | ||
333 | } | ||
334 | |||
335 | #if 0 /* not used yet */ | ||
336 | static int iceland_read_smc_sram_dword(struct amdgpu_device *adev, | ||
337 | uint32_t smc_address, | ||
338 | uint32_t *value, | ||
339 | uint32_t limit) | ||
340 | { | ||
341 | int result; | ||
342 | unsigned long flags; | ||
343 | |||
344 | spin_lock_irqsave(&adev->smc_idx_lock, flags); | ||
345 | result = iceland_set_smc_sram_address(adev, smc_address, limit); | ||
346 | if (result == 0) | ||
347 | *value = RREG32(mmSMC_IND_DATA_0); | ||
348 | spin_unlock_irqrestore(&adev->smc_idx_lock, flags); | ||
349 | return result; | ||
350 | } | ||
351 | |||
352 | static int iceland_write_smc_sram_dword(struct amdgpu_device *adev, | ||
353 | uint32_t smc_address, | ||
354 | uint32_t value, | ||
355 | uint32_t limit) | ||
356 | { | ||
357 | int result; | ||
358 | unsigned long flags; | ||
359 | |||
360 | spin_lock_irqsave(&adev->smc_idx_lock, flags); | ||
361 | result = iceland_set_smc_sram_address(adev, smc_address, limit); | ||
362 | if (result == 0) | ||
363 | WREG32(mmSMC_IND_DATA_0, value); | ||
364 | spin_unlock_irqrestore(&adev->smc_idx_lock, flags); | ||
365 | return result; | ||
366 | } | ||
367 | |||
368 | static int iceland_smu_stop_smc(struct amdgpu_device *adev) | ||
369 | { | ||
370 | iceland_reset_smc(adev); | ||
371 | iceland_stop_smc_clock(adev); | ||
372 | |||
373 | return 0; | ||
374 | } | ||
375 | #endif | ||
376 | |||
377 | static int iceland_smu_start_smc(struct amdgpu_device *adev) | ||
378 | { | ||
379 | int i; | ||
380 | uint32_t val; | ||
381 | |||
382 | iceland_program_jump_on_start(adev); | ||
383 | iceland_start_smc_clock(adev); | ||
384 | iceland_start_smc(adev); | ||
385 | |||
386 | for (i = 0; i < adev->usec_timeout; i++) { | ||
387 | val = RREG32_SMC(ixFIRMWARE_FLAGS); | ||
388 | if (REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED) == 1) | ||
389 | break; | ||
390 | udelay(1); | ||
391 | } | ||
392 | return 0; | ||
393 | } | ||
394 | |||
395 | static enum AMDGPU_UCODE_ID iceland_convert_fw_type(uint32_t fw_type) | ||
396 | { | ||
397 | switch (fw_type) { | ||
398 | case UCODE_ID_SDMA0: | ||
399 | return AMDGPU_UCODE_ID_SDMA0; | ||
400 | case UCODE_ID_SDMA1: | ||
401 | return AMDGPU_UCODE_ID_SDMA1; | ||
402 | case UCODE_ID_CP_CE: | ||
403 | return AMDGPU_UCODE_ID_CP_CE; | ||
404 | case UCODE_ID_CP_PFP: | ||
405 | return AMDGPU_UCODE_ID_CP_PFP; | ||
406 | case UCODE_ID_CP_ME: | ||
407 | return AMDGPU_UCODE_ID_CP_ME; | ||
408 | case UCODE_ID_CP_MEC: | ||
409 | case UCODE_ID_CP_MEC_JT1: | ||
410 | return AMDGPU_UCODE_ID_CP_MEC1; | ||
411 | case UCODE_ID_CP_MEC_JT2: | ||
412 | return AMDGPU_UCODE_ID_CP_MEC2; | ||
413 | case UCODE_ID_RLC_G: | ||
414 | return AMDGPU_UCODE_ID_RLC_G; | ||
415 | default: | ||
416 | DRM_ERROR("ucode type is out of range!\n"); | ||
417 | return AMDGPU_UCODE_ID_MAXIMUM; | ||
418 | } | ||
419 | } | ||
420 | |||
421 | static uint32_t iceland_smu_get_mask_for_fw_type(uint32_t fw_type) | ||
422 | { | ||
423 | switch (fw_type) { | ||
424 | case AMDGPU_UCODE_ID_SDMA0: | ||
425 | return UCODE_ID_SDMA0_MASK; | ||
426 | case AMDGPU_UCODE_ID_SDMA1: | ||
427 | return UCODE_ID_SDMA1_MASK; | ||
428 | case AMDGPU_UCODE_ID_CP_CE: | ||
429 | return UCODE_ID_CP_CE_MASK; | ||
430 | case AMDGPU_UCODE_ID_CP_PFP: | ||
431 | return UCODE_ID_CP_PFP_MASK; | ||
432 | case AMDGPU_UCODE_ID_CP_ME: | ||
433 | return UCODE_ID_CP_ME_MASK; | ||
434 | case AMDGPU_UCODE_ID_CP_MEC1: | ||
435 | return UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT1_MASK | UCODE_ID_CP_MEC_JT2_MASK; | ||
436 | case AMDGPU_UCODE_ID_CP_MEC2: | ||
437 | return UCODE_ID_CP_MEC_MASK; | ||
438 | case AMDGPU_UCODE_ID_RLC_G: | ||
439 | return UCODE_ID_RLC_G_MASK; | ||
440 | default: | ||
441 | DRM_ERROR("ucode type is out of range!\n"); | ||
442 | return 0; | ||
443 | } | ||
444 | } | ||
445 | |||
446 | static int iceland_smu_populate_single_firmware_entry(struct amdgpu_device *adev, | ||
447 | uint32_t fw_type, | ||
448 | struct SMU_Entry *entry) | ||
449 | { | ||
450 | enum AMDGPU_UCODE_ID id = iceland_convert_fw_type(fw_type); | ||
451 | struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id]; | ||
452 | const struct gfx_firmware_header_v1_0 *header = NULL; | ||
453 | uint64_t gpu_addr; | ||
454 | uint32_t data_size; | ||
455 | |||
456 | if (ucode->fw == NULL) | ||
457 | return -EINVAL; | ||
458 | |||
459 | gpu_addr = ucode->mc_addr; | ||
460 | header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data; | ||
461 | data_size = le32_to_cpu(header->header.ucode_size_bytes); | ||
462 | |||
463 | entry->version = (uint16_t)le32_to_cpu(header->header.ucode_version); | ||
464 | entry->id = (uint16_t)fw_type; | ||
465 | entry->image_addr_high = upper_32_bits(gpu_addr); | ||
466 | entry->image_addr_low = lower_32_bits(gpu_addr); | ||
467 | entry->meta_data_addr_high = 0; | ||
468 | entry->meta_data_addr_low = 0; | ||
469 | entry->data_size_byte = data_size; | ||
470 | entry->num_register_entries = 0; | ||
471 | entry->flags = 0; | ||
472 | |||
473 | return 0; | ||
474 | } | ||
475 | |||
476 | static int iceland_smu_request_load_fw(struct amdgpu_device *adev) | ||
477 | { | ||
478 | struct iceland_smu_private_data *private = (struct iceland_smu_private_data *)adev->smu.priv; | ||
479 | struct SMU_DRAMData_TOC *toc; | ||
480 | uint32_t fw_to_load; | ||
481 | |||
482 | toc = (struct SMU_DRAMData_TOC *)private->header; | ||
483 | toc->num_entries = 0; | ||
484 | toc->structure_version = 1; | ||
485 | |||
486 | if (!adev->firmware.smu_load) | ||
487 | return 0; | ||
488 | |||
489 | if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_RLC_G, | ||
490 | &toc->entry[toc->num_entries++])) { | ||
491 | DRM_ERROR("Failed to get firmware entry for RLC\n"); | ||
492 | return -EINVAL; | ||
493 | } | ||
494 | |||
495 | if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_CE, | ||
496 | &toc->entry[toc->num_entries++])) { | ||
497 | DRM_ERROR("Failed to get firmware entry for CE\n"); | ||
498 | return -EINVAL; | ||
499 | } | ||
500 | |||
501 | if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_PFP, | ||
502 | &toc->entry[toc->num_entries++])) { | ||
503 | DRM_ERROR("Failed to get firmware entry for PFP\n"); | ||
504 | return -EINVAL; | ||
505 | } | ||
506 | |||
507 | if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_ME, | ||
508 | &toc->entry[toc->num_entries++])) { | ||
509 | DRM_ERROR("Failed to get firmware entry for ME\n"); | ||
510 | return -EINVAL; | ||
511 | } | ||
512 | |||
513 | if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC, | ||
514 | &toc->entry[toc->num_entries++])) { | ||
515 | DRM_ERROR("Failed to get firmware entry for MEC\n"); | ||
516 | return -EINVAL; | ||
517 | } | ||
518 | |||
519 | if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT1, | ||
520 | &toc->entry[toc->num_entries++])) { | ||
521 | DRM_ERROR("Failed to get firmware entry for MEC_JT1\n"); | ||
522 | return -EINVAL; | ||
523 | } | ||
524 | |||
525 | if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT2, | ||
526 | &toc->entry[toc->num_entries++])) { | ||
527 | DRM_ERROR("Failed to get firmware entry for MEC_JT2\n"); | ||
528 | return -EINVAL; | ||
529 | } | ||
530 | |||
531 | if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0, | ||
532 | &toc->entry[toc->num_entries++])) { | ||
533 | DRM_ERROR("Failed to get firmware entry for SDMA0\n"); | ||
534 | return -EINVAL; | ||
535 | } | ||
536 | |||
537 | if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA1, | ||
538 | &toc->entry[toc->num_entries++])) { | ||
539 | DRM_ERROR("Failed to get firmware entry for SDMA1\n"); | ||
540 | return -EINVAL; | ||
541 | } | ||
542 | |||
543 | iceland_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_HI, private->header_addr_high); | ||
544 | iceland_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_LO, private->header_addr_low); | ||
545 | |||
546 | fw_to_load = UCODE_ID_RLC_G_MASK | | ||
547 | UCODE_ID_SDMA0_MASK | | ||
548 | UCODE_ID_SDMA1_MASK | | ||
549 | UCODE_ID_CP_CE_MASK | | ||
550 | UCODE_ID_CP_ME_MASK | | ||
551 | UCODE_ID_CP_PFP_MASK | | ||
552 | UCODE_ID_CP_MEC_MASK | | ||
553 | UCODE_ID_CP_MEC_JT1_MASK | | ||
554 | UCODE_ID_CP_MEC_JT2_MASK; | ||
555 | |||
556 | if (iceland_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) { | ||
557 | DRM_ERROR("Fail to request SMU load ucode\n"); | ||
558 | return -EINVAL; | ||
559 | } | ||
560 | |||
561 | return 0; | ||
562 | } | ||
563 | |||
564 | static int iceland_smu_check_fw_load_finish(struct amdgpu_device *adev, | ||
565 | uint32_t fw_type) | ||
566 | { | ||
567 | uint32_t fw_mask = iceland_smu_get_mask_for_fw_type(fw_type); | ||
568 | int i; | ||
569 | |||
570 | for (i = 0; i < adev->usec_timeout; i++) { | ||
571 | if (fw_mask == (RREG32_SMC(ixSOFT_REGISTERS_TABLE_27) & fw_mask)) | ||
572 | break; | ||
573 | udelay(1); | ||
574 | } | ||
575 | |||
576 | if (i == adev->usec_timeout) { | ||
577 | DRM_ERROR("check firmware loading failed\n"); | ||
578 | return -EINVAL; | ||
579 | } | ||
580 | |||
581 | return 0; | ||
582 | } | ||
583 | |||
584 | int iceland_smu_start(struct amdgpu_device *adev) | ||
585 | { | ||
586 | int result; | ||
587 | |||
588 | result = iceland_smu_upload_firmware_image(adev); | ||
589 | if (result) | ||
590 | return result; | ||
591 | result = iceland_smu_start_smc(adev); | ||
592 | if (result) | ||
593 | return result; | ||
594 | |||
595 | return iceland_smu_request_load_fw(adev); | ||
596 | } | ||
597 | |||
598 | static const struct amdgpu_smumgr_funcs iceland_smumgr_funcs = { | ||
599 | .check_fw_load_finish = iceland_smu_check_fw_load_finish, | ||
600 | .request_smu_load_fw = NULL, | ||
601 | .request_smu_specific_fw = NULL, | ||
602 | }; | ||
603 | |||
604 | int iceland_smu_init(struct amdgpu_device *adev) | ||
605 | { | ||
606 | struct iceland_smu_private_data *private; | ||
607 | uint32_t image_size = ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096; | ||
608 | struct amdgpu_bo **toc_buf = &adev->smu.toc_buf; | ||
609 | uint64_t mc_addr; | ||
610 | void *toc_buf_ptr; | ||
611 | int ret; | ||
612 | |||
613 | private = kzalloc(sizeof(struct iceland_smu_private_data), GFP_KERNEL); | ||
614 | if (NULL == private) | ||
615 | return -ENOMEM; | ||
616 | |||
617 | /* allocate firmware buffers */ | ||
618 | if (adev->firmware.smu_load) | ||
619 | amdgpu_ucode_init_bo(adev); | ||
620 | |||
621 | adev->smu.priv = private; | ||
622 | adev->smu.fw_flags = 0; | ||
623 | |||
624 | /* Allocate FW image data structure and header buffer */ | ||
625 | ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE, | ||
626 | true, AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, toc_buf); | ||
627 | if (ret) { | ||
628 | DRM_ERROR("Failed to allocate memory for TOC buffer\n"); | ||
629 | return -ENOMEM; | ||
630 | } | ||
631 | |||
632 | /* Retrieve GPU address for header buffer and internal buffer */ | ||
633 | ret = amdgpu_bo_reserve(adev->smu.toc_buf, false); | ||
634 | if (ret) { | ||
635 | amdgpu_bo_unref(&adev->smu.toc_buf); | ||
636 | DRM_ERROR("Failed to reserve the TOC buffer\n"); | ||
637 | return -EINVAL; | ||
638 | } | ||
639 | |||
640 | ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr); | ||
641 | if (ret) { | ||
642 | amdgpu_bo_unreserve(adev->smu.toc_buf); | ||
643 | amdgpu_bo_unref(&adev->smu.toc_buf); | ||
644 | DRM_ERROR("Failed to pin the TOC buffer\n"); | ||
645 | return -EINVAL; | ||
646 | } | ||
647 | |||
648 | ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr); | ||
649 | if (ret) { | ||
650 | amdgpu_bo_unreserve(adev->smu.toc_buf); | ||
651 | amdgpu_bo_unref(&adev->smu.toc_buf); | ||
652 | DRM_ERROR("Failed to map the TOC buffer\n"); | ||
653 | return -EINVAL; | ||
654 | } | ||
655 | |||
656 | amdgpu_bo_unreserve(adev->smu.toc_buf); | ||
657 | private->header_addr_low = lower_32_bits(mc_addr); | ||
658 | private->header_addr_high = upper_32_bits(mc_addr); | ||
659 | private->header = toc_buf_ptr; | ||
660 | |||
661 | adev->smu.smumgr_funcs = &iceland_smumgr_funcs; | ||
662 | |||
663 | return 0; | ||
664 | } | ||
665 | |||
666 | int iceland_smu_fini(struct amdgpu_device *adev) | ||
667 | { | ||
668 | amdgpu_bo_unref(&adev->smu.toc_buf); | ||
669 | kfree(adev->smu.priv); | ||
670 | adev->smu.priv = NULL; | ||
671 | if (adev->firmware.fw_buf) | ||
672 | amdgpu_ucode_fini_bo(adev); | ||
673 | |||
674 | return 0; | ||
675 | } | ||