aboutsummaryrefslogtreecommitdiffstats
path: root/include/gk20a/flcn_gk20a.c
diff options
context:
space:
mode:
Diffstat (limited to 'include/gk20a/flcn_gk20a.c')
-rw-r--r--include/gk20a/flcn_gk20a.c759
1 files changed, 759 insertions, 0 deletions
diff --git a/include/gk20a/flcn_gk20a.c b/include/gk20a/flcn_gk20a.c
new file mode 100644
index 0000000..fdcaef9
--- /dev/null
+++ b/include/gk20a/flcn_gk20a.c
@@ -0,0 +1,759 @@
1/*
2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22#include <nvgpu/falcon.h>
23#include <nvgpu/pmu.h>
24#include <nvgpu/io.h>
25
26#include "gk20a/gk20a.h"
27#include "gk20a/flcn_gk20a.h"
28
29#include <nvgpu/hw/gm20b/hw_falcon_gm20b.h>
30
31static int gk20a_flcn_reset(struct nvgpu_falcon *flcn)
32{
33 struct gk20a *g = flcn->g;
34 u32 base_addr = flcn->flcn_base;
35 u32 unit_status = 0;
36 int status = 0;
37
38 if (flcn->flcn_engine_dep_ops.reset_eng) {
39 /* falcon & engine reset */
40 status = flcn->flcn_engine_dep_ops.reset_eng(g);
41 } else {
42 /* do falcon CPU hard reset */
43 unit_status = gk20a_readl(g, base_addr +
44 falcon_falcon_cpuctl_r());
45 gk20a_writel(g, base_addr + falcon_falcon_cpuctl_r(),
46 (unit_status | falcon_falcon_cpuctl_hreset_f(1)));
47 }
48
49 return status;
50}
51
52static bool gk20a_flcn_clear_halt_interrupt_status(struct nvgpu_falcon *flcn)
53{
54 struct gk20a *g = flcn->g;
55 u32 base_addr = flcn->flcn_base;
56 u32 data = 0;
57 bool status = false;
58
59 gk20a_writel(g, base_addr + falcon_falcon_irqsclr_r(),
60 gk20a_readl(g, base_addr + falcon_falcon_irqsclr_r()) |
61 (0x10));
62 data = gk20a_readl(g, (base_addr + falcon_falcon_irqstat_r()));
63
64 if ((data & falcon_falcon_irqstat_halt_true_f()) !=
65 falcon_falcon_irqstat_halt_true_f()) {
66 /*halt irq is clear*/
67 status = true;
68 }
69
70 return status;
71}
72
73static void gk20a_flcn_set_irq(struct nvgpu_falcon *flcn, bool enable)
74{
75 struct gk20a *g = flcn->g;
76 u32 base_addr = flcn->flcn_base;
77
78 if (!flcn->is_interrupt_enabled) {
79 nvgpu_warn(g, "Interrupt not supported on flcn 0x%x ",
80 flcn->flcn_id);
81 /* Keep interrupt disabled */
82 enable = false;
83 }
84
85 if (enable) {
86 gk20a_writel(g, base_addr + falcon_falcon_irqmset_r(),
87 flcn->intr_mask);
88 gk20a_writel(g, base_addr + falcon_falcon_irqdest_r(),
89 flcn->intr_dest);
90 } else {
91 gk20a_writel(g, base_addr + falcon_falcon_irqmclr_r(),
92 0xffffffff);
93 }
94}
95
96static bool gk20a_is_falcon_cpu_halted(struct nvgpu_falcon *flcn)
97{
98 struct gk20a *g = flcn->g;
99 u32 base_addr = flcn->flcn_base;
100
101 return (gk20a_readl(g, base_addr + falcon_falcon_cpuctl_r()) &
102 falcon_falcon_cpuctl_halt_intr_m() ?
103 true : false);
104}
105
106static bool gk20a_is_falcon_idle(struct nvgpu_falcon *flcn)
107{
108 struct gk20a *g = flcn->g;
109 u32 base_addr = flcn->flcn_base;
110 u32 unit_status = 0;
111 bool status = false;
112
113 unit_status = gk20a_readl(g,
114 base_addr + falcon_falcon_idlestate_r());
115
116 if (falcon_falcon_idlestate_falcon_busy_v(unit_status) == 0 &&
117 falcon_falcon_idlestate_ext_busy_v(unit_status) == 0) {
118 status = true;
119 } else {
120 status = false;
121 }
122
123 return status;
124}
125
126static bool gk20a_is_falcon_scrubbing_done(struct nvgpu_falcon *flcn)
127{
128 struct gk20a *g = flcn->g;
129 u32 base_addr = flcn->flcn_base;
130 u32 unit_status = 0;
131 bool status = false;
132
133 unit_status = gk20a_readl(g,
134 base_addr + falcon_falcon_dmactl_r());
135
136 if (unit_status & (falcon_falcon_dmactl_dmem_scrubbing_m() |
137 falcon_falcon_dmactl_imem_scrubbing_m())) {
138 status = false;
139 } else {
140 status = true;
141 }
142
143 return status;
144}
145
146static u32 gk20a_falcon_get_mem_size(struct nvgpu_falcon *flcn,
147 enum flcn_mem_type mem_type)
148{
149 struct gk20a *g = flcn->g;
150 u32 mem_size = 0;
151 u32 hw_cfg_reg = gk20a_readl(g,
152 flcn->flcn_base + falcon_falcon_hwcfg_r());
153
154 if (mem_type == MEM_DMEM) {
155 mem_size = falcon_falcon_hwcfg_dmem_size_v(hw_cfg_reg)
156 << GK20A_PMU_DMEM_BLKSIZE2;
157 } else {
158 mem_size = falcon_falcon_hwcfg_imem_size_v(hw_cfg_reg)
159 << GK20A_PMU_DMEM_BLKSIZE2;
160 }
161
162 return mem_size;
163}
164
165static int flcn_mem_overflow_check(struct nvgpu_falcon *flcn,
166 u32 offset, u32 size, enum flcn_mem_type mem_type)
167{
168 struct gk20a *g = flcn->g;
169 u32 mem_size = 0;
170
171 if (size == 0) {
172 nvgpu_err(g, "size is zero");
173 return -EINVAL;
174 }
175
176 if (offset & 0x3) {
177 nvgpu_err(g, "offset (0x%08x) not 4-byte aligned", offset);
178 return -EINVAL;
179 }
180
181 mem_size = gk20a_falcon_get_mem_size(flcn, mem_type);
182 if (!(offset <= mem_size && (offset + size) <= mem_size)) {
183 nvgpu_err(g, "flcn-id 0x%x, copy overflow ",
184 flcn->flcn_id);
185 nvgpu_err(g, "total size 0x%x, offset 0x%x, copy size 0x%x",
186 mem_size, offset, size);
187 return -EINVAL;
188 }
189
190 return 0;
191}
192
193static int gk20a_flcn_copy_from_dmem(struct nvgpu_falcon *flcn,
194 u32 src, u8 *dst, u32 size, u8 port)
195{
196 struct gk20a *g = flcn->g;
197 u32 base_addr = flcn->flcn_base;
198 u32 i, words, bytes;
199 u32 data, addr_mask;
200 u32 *dst_u32 = (u32 *)dst;
201
202 nvgpu_log_fn(g, " src dmem offset - %x, size - %x", src, size);
203
204 if (flcn_mem_overflow_check(flcn, src, size, MEM_DMEM)) {
205 nvgpu_err(g, "incorrect parameters");
206 return -EINVAL;
207 }
208
209 nvgpu_mutex_acquire(&flcn->copy_lock);
210
211 words = size >> 2;
212 bytes = size & 0x3;
213
214 addr_mask = falcon_falcon_dmemc_offs_m() |
215 falcon_falcon_dmemc_blk_m();
216
217 src &= addr_mask;
218
219 gk20a_writel(g, base_addr + falcon_falcon_dmemc_r(port),
220 src | falcon_falcon_dmemc_aincr_f(1));
221
222 for (i = 0; i < words; i++) {
223 dst_u32[i] = gk20a_readl(g,
224 base_addr + falcon_falcon_dmemd_r(port));
225 }
226
227 if (bytes > 0) {
228 data = gk20a_readl(g, base_addr + falcon_falcon_dmemd_r(port));
229 for (i = 0; i < bytes; i++) {
230 dst[(words << 2) + i] = ((u8 *)&data)[i];
231 }
232 }
233
234 nvgpu_mutex_release(&flcn->copy_lock);
235 return 0;
236}
237
238static int gk20a_flcn_copy_to_dmem(struct nvgpu_falcon *flcn,
239 u32 dst, u8 *src, u32 size, u8 port)
240{
241 struct gk20a *g = flcn->g;
242 u32 base_addr = flcn->flcn_base;
243 u32 i, words, bytes;
244 u32 data, addr_mask;
245 u32 *src_u32 = (u32 *)src;
246
247 nvgpu_log_fn(g, "dest dmem offset - %x, size - %x", dst, size);
248
249 if (flcn_mem_overflow_check(flcn, dst, size, MEM_DMEM)) {
250 nvgpu_err(g, "incorrect parameters");
251 return -EINVAL;
252 }
253
254 nvgpu_mutex_acquire(&flcn->copy_lock);
255
256 words = size >> 2;
257 bytes = size & 0x3;
258
259 addr_mask = falcon_falcon_dmemc_offs_m() |
260 falcon_falcon_dmemc_blk_m();
261
262 dst &= addr_mask;
263
264 gk20a_writel(g, base_addr + falcon_falcon_dmemc_r(port),
265 dst | falcon_falcon_dmemc_aincw_f(1));
266
267 for (i = 0; i < words; i++) {
268 gk20a_writel(g,
269 base_addr + falcon_falcon_dmemd_r(port), src_u32[i]);
270 }
271
272 if (bytes > 0) {
273 data = 0;
274 for (i = 0; i < bytes; i++) {
275 ((u8 *)&data)[i] = src[(words << 2) + i];
276 }
277 gk20a_writel(g, base_addr + falcon_falcon_dmemd_r(port), data);
278 }
279
280 size = ALIGN(size, 4);
281 data = gk20a_readl(g,
282 base_addr + falcon_falcon_dmemc_r(port)) & addr_mask;
283 if (data != ((dst + size) & addr_mask)) {
284 nvgpu_warn(g, "copy failed. bytes written %d, expected %d",
285 data - dst, size);
286 }
287
288 nvgpu_mutex_release(&flcn->copy_lock);
289
290 return 0;
291}
292
293static int gk20a_flcn_copy_from_imem(struct nvgpu_falcon *flcn, u32 src,
294 u8 *dst, u32 size, u8 port)
295{
296 struct gk20a *g = flcn->g;
297 u32 base_addr = flcn->flcn_base;
298 u32 *dst_u32 = (u32 *)dst;
299 u32 words = 0;
300 u32 bytes = 0;
301 u32 data = 0;
302 u32 blk = 0;
303 u32 i = 0;
304
305 nvgpu_log_info(g, "download %d bytes from 0x%x", size, src);
306
307 if (flcn_mem_overflow_check(flcn, src, size, MEM_IMEM)) {
308 nvgpu_err(g, "incorrect parameters");
309 return -EINVAL;
310 }
311
312 nvgpu_mutex_acquire(&flcn->copy_lock);
313
314 words = size >> 2;
315 bytes = size & 0x3;
316 blk = src >> 8;
317
318 nvgpu_log_info(g, "download %d words from 0x%x block %d",
319 words, src, blk);
320
321 gk20a_writel(g, base_addr + falcon_falcon_imemc_r(port),
322 falcon_falcon_imemc_offs_f(src >> 2) |
323 falcon_falcon_imemc_blk_f(blk) |
324 falcon_falcon_dmemc_aincr_f(1));
325
326 for (i = 0; i < words; i++) {
327 dst_u32[i] = gk20a_readl(g,
328 base_addr + falcon_falcon_imemd_r(port));
329 }
330
331 if (bytes > 0) {
332 data = gk20a_readl(g, base_addr + falcon_falcon_imemd_r(port));
333 for (i = 0; i < bytes; i++) {
334 dst[(words << 2) + i] = ((u8 *)&data)[i];
335 }
336 }
337
338 nvgpu_mutex_release(&flcn->copy_lock);
339
340 return 0;
341}
342
343static int gk20a_flcn_copy_to_imem(struct nvgpu_falcon *flcn, u32 dst,
344 u8 *src, u32 size, u8 port, bool sec, u32 tag)
345{
346 struct gk20a *g = flcn->g;
347 u32 base_addr = flcn->flcn_base;
348 u32 *src_u32 = (u32 *)src;
349 u32 words = 0;
350 u32 blk = 0;
351 u32 i = 0;
352
353 nvgpu_log_info(g, "upload %d bytes to 0x%x", size, dst);
354
355 if (flcn_mem_overflow_check(flcn, dst, size, MEM_IMEM)) {
356 nvgpu_err(g, "incorrect parameters");
357 return -EINVAL;
358 }
359
360 nvgpu_mutex_acquire(&flcn->copy_lock);
361
362 words = size >> 2;
363 blk = dst >> 8;
364
365 nvgpu_log_info(g, "upload %d words to 0x%x block %d, tag 0x%x",
366 words, dst, blk, tag);
367
368 gk20a_writel(g, base_addr + falcon_falcon_imemc_r(port),
369 falcon_falcon_imemc_offs_f(dst >> 2) |
370 falcon_falcon_imemc_blk_f(blk) |
371 /* Set Auto-Increment on write */
372 falcon_falcon_imemc_aincw_f(1) |
373 falcon_falcon_imemc_secure_f(sec ? 1U : 0U));
374
375 for (i = 0; i < words; i++) {
376 if (i % 64 == 0) {
377 /* tag is always 256B aligned */
378 gk20a_writel(g, base_addr + falcon_falcon_imemt_r(0),
379 tag);
380 tag++;
381 }
382
383 gk20a_writel(g, base_addr + falcon_falcon_imemd_r(port),
384 src_u32[i]);
385 }
386
387 /* WARNING : setting remaining bytes in block to 0x0 */
388 while (i % 64) {
389 gk20a_writel(g, base_addr + falcon_falcon_imemd_r(port), 0);
390 i++;
391 }
392
393 nvgpu_mutex_release(&flcn->copy_lock);
394
395 return 0;
396}
397
398static int gk20a_falcon_bootstrap(struct nvgpu_falcon *flcn,
399 u32 boot_vector)
400{
401 struct gk20a *g = flcn->g;
402 u32 base_addr = flcn->flcn_base;
403
404 nvgpu_log_info(g, "boot vec 0x%x", boot_vector);
405
406 gk20a_writel(g, base_addr + falcon_falcon_dmactl_r(),
407 falcon_falcon_dmactl_require_ctx_f(0));
408
409 gk20a_writel(g, base_addr + falcon_falcon_bootvec_r(),
410 falcon_falcon_bootvec_vec_f(boot_vector));
411
412 gk20a_writel(g, base_addr + falcon_falcon_cpuctl_r(),
413 falcon_falcon_cpuctl_startcpu_f(1));
414
415 return 0;
416}
417
418static u32 gk20a_falcon_mailbox_read(struct nvgpu_falcon *flcn,
419 u32 mailbox_index)
420{
421 struct gk20a *g = flcn->g;
422 u32 data = 0;
423
424 if (mailbox_index < FALCON_MAILBOX_COUNT) {
425 data = gk20a_readl(g, flcn->flcn_base + (mailbox_index ?
426 falcon_falcon_mailbox1_r() :
427 falcon_falcon_mailbox0_r()));
428 } else {
429 nvgpu_err(g, "incorrect mailbox id %d", mailbox_index);
430 }
431
432 return data;
433}
434
435static void gk20a_falcon_mailbox_write(struct nvgpu_falcon *flcn,
436 u32 mailbox_index, u32 data)
437{
438 struct gk20a *g = flcn->g;
439
440 if (mailbox_index < FALCON_MAILBOX_COUNT) {
441 gk20a_writel(g, flcn->flcn_base + (mailbox_index ?
442 falcon_falcon_mailbox1_r() :
443 falcon_falcon_mailbox0_r()),
444 data);
445 } else {
446 nvgpu_err(g, "incorrect mailbox id %d", mailbox_index);
447 }
448}
449
450static int gk20a_falcon_bl_bootstrap(struct nvgpu_falcon *flcn,
451 struct nvgpu_falcon_bl_info *bl_info)
452{
453 struct gk20a *g = flcn->g;
454 u32 base_addr = flcn->flcn_base;
455 u32 virt_addr = 0;
456 u32 dst = 0;
457 int err = 0;
458
459 /*copy bootloader interface structure to dmem*/
460 err = gk20a_flcn_copy_to_dmem(flcn, 0, (u8 *)bl_info->bl_desc,
461 bl_info->bl_desc_size, (u8)0);
462 if (err != 0) {
463 goto exit;
464 }
465
466 /* copy bootloader to TOP of IMEM */
467 dst = (falcon_falcon_hwcfg_imem_size_v(gk20a_readl(g,
468 base_addr + falcon_falcon_hwcfg_r())) << 8) - bl_info->bl_size;
469
470 err = gk20a_flcn_copy_to_imem(flcn, dst, (u8 *)(bl_info->bl_src),
471 bl_info->bl_size, (u8)0, false, bl_info->bl_start_tag);
472 if (err != 0) {
473 goto exit;
474 }
475
476 gk20a_falcon_mailbox_write(flcn, FALCON_MAILBOX_0, 0xDEADA5A5U);
477
478 virt_addr = bl_info->bl_start_tag << 8;
479
480 err = gk20a_falcon_bootstrap(flcn, virt_addr);
481
482exit:
483 if (err != 0) {
484 nvgpu_err(g, "falcon id-0x%x bootstrap failed", flcn->flcn_id);
485 }
486
487 return err;
488}
489
490static void gk20a_falcon_dump_imblk(struct nvgpu_falcon *flcn)
491{
492 struct gk20a *g = flcn->g;
493 u32 base_addr = flcn->flcn_base;
494 u32 i = 0, j = 0;
495 u32 data[8] = {0};
496 u32 block_count = 0;
497
498 block_count = falcon_falcon_hwcfg_imem_size_v(gk20a_readl(g,
499 flcn->flcn_base + falcon_falcon_hwcfg_r()));
500
501 /* block_count must be multiple of 8 */
502 block_count &= ~0x7;
503 nvgpu_err(g, "FALCON IMEM BLK MAPPING (PA->VA) (%d TOTAL):",
504 block_count);
505
506 for (i = 0; i < block_count; i += 8) {
507 for (j = 0; j < 8; j++) {
508 gk20a_writel(g, flcn->flcn_base +
509 falcon_falcon_imctl_debug_r(),
510 falcon_falcon_imctl_debug_cmd_f(0x2) |
511 falcon_falcon_imctl_debug_addr_blk_f(i + j));
512
513 data[j] = gk20a_readl(g, base_addr +
514 falcon_falcon_imstat_r());
515 }
516
517 nvgpu_err(g, " %#04x: %#010x %#010x %#010x %#010x",
518 i, data[0], data[1], data[2], data[3]);
519 nvgpu_err(g, " %#04x: %#010x %#010x %#010x %#010x",
520 i + 4, data[4], data[5], data[6], data[7]);
521 }
522}
523
524static void gk20a_falcon_dump_pc_trace(struct nvgpu_falcon *flcn)
525{
526 struct gk20a *g = flcn->g;
527 u32 base_addr = flcn->flcn_base;
528 u32 trace_pc_count = 0;
529 u32 pc = 0;
530 u32 i = 0;
531
532 if (gk20a_readl(g, base_addr + falcon_falcon_sctl_r()) & 0x02) {
533 nvgpu_err(g, " falcon is in HS mode, PC TRACE dump not supported");
534 return;
535 }
536
537 trace_pc_count = falcon_falcon_traceidx_maxidx_v(gk20a_readl(g,
538 base_addr + falcon_falcon_traceidx_r()));
539 nvgpu_err(g,
540 "PC TRACE (TOTAL %d ENTRIES. entry 0 is the most recent branch):",
541 trace_pc_count);
542
543 for (i = 0; i < trace_pc_count; i++) {
544 gk20a_writel(g, base_addr + falcon_falcon_traceidx_r(),
545 falcon_falcon_traceidx_idx_f(i));
546
547 pc = falcon_falcon_tracepc_pc_v(gk20a_readl(g,
548 base_addr + falcon_falcon_tracepc_r()));
549 nvgpu_err(g, "FALCON_TRACEPC(%d) : %#010x", i, pc);
550 }
551}
552
553void gk20a_falcon_dump_stats(struct nvgpu_falcon *flcn)
554{
555 struct gk20a *g = flcn->g;
556 u32 base_addr = flcn->flcn_base;
557 unsigned int i;
558
559 nvgpu_err(g, "<<< FALCON id-%d DEBUG INFORMATION - START >>>",
560 flcn->flcn_id);
561
562 /* imblk dump */
563 gk20a_falcon_dump_imblk(flcn);
564 /* PC trace dump */
565 gk20a_falcon_dump_pc_trace(flcn);
566
567 nvgpu_err(g, "FALCON ICD REGISTERS DUMP");
568
569 for (i = 0; i < 4; i++) {
570 gk20a_writel(g, base_addr + falcon_falcon_icd_cmd_r(),
571 falcon_falcon_icd_cmd_opc_rreg_f() |
572 falcon_falcon_icd_cmd_idx_f(FALCON_REG_PC));
573 nvgpu_err(g, "FALCON_REG_PC : 0x%x",
574 gk20a_readl(g, base_addr +
575 falcon_falcon_icd_rdata_r()));
576
577 gk20a_writel(g, base_addr + falcon_falcon_icd_cmd_r(),
578 falcon_falcon_icd_cmd_opc_rreg_f() |
579 falcon_falcon_icd_cmd_idx_f(FALCON_REG_SP));
580 nvgpu_err(g, "FALCON_REG_SP : 0x%x",
581 gk20a_readl(g, base_addr +
582 falcon_falcon_icd_rdata_r()));
583 }
584
585 gk20a_writel(g, base_addr + falcon_falcon_icd_cmd_r(),
586 falcon_falcon_icd_cmd_opc_rreg_f() |
587 falcon_falcon_icd_cmd_idx_f(FALCON_REG_IMB));
588 nvgpu_err(g, "FALCON_REG_IMB : 0x%x",
589 gk20a_readl(g, base_addr + falcon_falcon_icd_rdata_r()));
590
591 gk20a_writel(g, base_addr + falcon_falcon_icd_cmd_r(),
592 falcon_falcon_icd_cmd_opc_rreg_f() |
593 falcon_falcon_icd_cmd_idx_f(FALCON_REG_DMB));
594 nvgpu_err(g, "FALCON_REG_DMB : 0x%x",
595 gk20a_readl(g, base_addr + falcon_falcon_icd_rdata_r()));
596
597 gk20a_writel(g, base_addr + falcon_falcon_icd_cmd_r(),
598 falcon_falcon_icd_cmd_opc_rreg_f() |
599 falcon_falcon_icd_cmd_idx_f(FALCON_REG_CSW));
600 nvgpu_err(g, "FALCON_REG_CSW : 0x%x",
601 gk20a_readl(g, base_addr + falcon_falcon_icd_rdata_r()));
602
603 gk20a_writel(g, base_addr + falcon_falcon_icd_cmd_r(),
604 falcon_falcon_icd_cmd_opc_rreg_f() |
605 falcon_falcon_icd_cmd_idx_f(FALCON_REG_CTX));
606 nvgpu_err(g, "FALCON_REG_CTX : 0x%x",
607 gk20a_readl(g, base_addr + falcon_falcon_icd_rdata_r()));
608
609 gk20a_writel(g, base_addr + falcon_falcon_icd_cmd_r(),
610 falcon_falcon_icd_cmd_opc_rreg_f() |
611 falcon_falcon_icd_cmd_idx_f(FALCON_REG_EXCI));
612 nvgpu_err(g, "FALCON_REG_EXCI : 0x%x",
613 gk20a_readl(g, base_addr + falcon_falcon_icd_rdata_r()));
614
615 for (i = 0; i < 6; i++) {
616 gk20a_writel(g, base_addr + falcon_falcon_icd_cmd_r(),
617 falcon_falcon_icd_cmd_opc_rreg_f() |
618 falcon_falcon_icd_cmd_idx_f(
619 falcon_falcon_icd_cmd_opc_rstat_f()));
620 nvgpu_err(g, "FALCON_REG_RSTAT[%d] : 0x%x", i,
621 gk20a_readl(g, base_addr +
622 falcon_falcon_icd_rdata_r()));
623 }
624
625 nvgpu_err(g, " FALCON REGISTERS DUMP");
626 nvgpu_err(g, "falcon_falcon_os_r : %d",
627 gk20a_readl(g, base_addr + falcon_falcon_os_r()));
628 nvgpu_err(g, "falcon_falcon_cpuctl_r : 0x%x",
629 gk20a_readl(g, base_addr + falcon_falcon_cpuctl_r()));
630 nvgpu_err(g, "falcon_falcon_idlestate_r : 0x%x",
631 gk20a_readl(g, base_addr + falcon_falcon_idlestate_r()));
632 nvgpu_err(g, "falcon_falcon_mailbox0_r : 0x%x",
633 gk20a_readl(g, base_addr + falcon_falcon_mailbox0_r()));
634 nvgpu_err(g, "falcon_falcon_mailbox1_r : 0x%x",
635 gk20a_readl(g, base_addr + falcon_falcon_mailbox1_r()));
636 nvgpu_err(g, "falcon_falcon_irqstat_r : 0x%x",
637 gk20a_readl(g, base_addr + falcon_falcon_irqstat_r()));
638 nvgpu_err(g, "falcon_falcon_irqmode_r : 0x%x",
639 gk20a_readl(g, base_addr + falcon_falcon_irqmode_r()));
640 nvgpu_err(g, "falcon_falcon_irqmask_r : 0x%x",
641 gk20a_readl(g, base_addr + falcon_falcon_irqmask_r()));
642 nvgpu_err(g, "falcon_falcon_irqdest_r : 0x%x",
643 gk20a_readl(g, base_addr + falcon_falcon_irqdest_r()));
644 nvgpu_err(g, "falcon_falcon_debug1_r : 0x%x",
645 gk20a_readl(g, base_addr + falcon_falcon_debug1_r()));
646 nvgpu_err(g, "falcon_falcon_debuginfo_r : 0x%x",
647 gk20a_readl(g, base_addr + falcon_falcon_debuginfo_r()));
648 nvgpu_err(g, "falcon_falcon_bootvec_r : 0x%x",
649 gk20a_readl(g, base_addr + falcon_falcon_bootvec_r()));
650 nvgpu_err(g, "falcon_falcon_hwcfg_r : 0x%x",
651 gk20a_readl(g, base_addr + falcon_falcon_hwcfg_r()));
652 nvgpu_err(g, "falcon_falcon_engctl_r : 0x%x",
653 gk20a_readl(g, base_addr + falcon_falcon_engctl_r()));
654 nvgpu_err(g, "falcon_falcon_curctx_r : 0x%x",
655 gk20a_readl(g, base_addr + falcon_falcon_curctx_r()));
656 nvgpu_err(g, "falcon_falcon_nxtctx_r : 0x%x",
657 gk20a_readl(g, base_addr + falcon_falcon_nxtctx_r()));
658 nvgpu_err(g, "falcon_falcon_exterrstat_r : 0x%x",
659 gk20a_readl(g, base_addr + falcon_falcon_exterrstat_r()));
660 nvgpu_err(g, "falcon_falcon_exterraddr_r : 0x%x",
661 gk20a_readl(g, base_addr + falcon_falcon_exterraddr_r()));
662}
663
664static void gk20a_falcon_engine_dependency_ops(struct nvgpu_falcon *flcn)
665{
666 struct gk20a *g = flcn->g;
667 struct nvgpu_falcon_engine_dependency_ops *flcn_eng_dep_ops =
668 &flcn->flcn_engine_dep_ops;
669
670 switch (flcn->flcn_id) {
671 case FALCON_ID_PMU:
672 flcn_eng_dep_ops->reset_eng = nvgpu_pmu_reset;
673 flcn_eng_dep_ops->queue_head = g->ops.pmu.pmu_queue_head;
674 flcn_eng_dep_ops->queue_tail = g->ops.pmu.pmu_queue_tail;
675 break;
676 default:
677 /* NULL assignment make sure
678 * CPU hard reset in gk20a_flcn_reset() gets execute
679 * if falcon doesn't need specific reset implementation
680 */
681 flcn_eng_dep_ops->reset_eng = NULL;
682 break;
683 }
684}
685
686void gk20a_falcon_ops(struct nvgpu_falcon *flcn)
687{
688 struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops;
689
690 flcn_ops->reset = gk20a_flcn_reset;
691 flcn_ops->set_irq = gk20a_flcn_set_irq;
692 flcn_ops->clear_halt_interrupt_status =
693 gk20a_flcn_clear_halt_interrupt_status;
694 flcn_ops->is_falcon_cpu_halted = gk20a_is_falcon_cpu_halted;
695 flcn_ops->is_falcon_idle = gk20a_is_falcon_idle;
696 flcn_ops->is_falcon_scrubbing_done = gk20a_is_falcon_scrubbing_done;
697 flcn_ops->copy_from_dmem = gk20a_flcn_copy_from_dmem;
698 flcn_ops->copy_to_dmem = gk20a_flcn_copy_to_dmem;
699 flcn_ops->copy_to_imem = gk20a_flcn_copy_to_imem;
700 flcn_ops->copy_from_imem = gk20a_flcn_copy_from_imem;
701 flcn_ops->bootstrap = gk20a_falcon_bootstrap;
702 flcn_ops->dump_falcon_stats = gk20a_falcon_dump_stats;
703 flcn_ops->mailbox_read = gk20a_falcon_mailbox_read;
704 flcn_ops->mailbox_write = gk20a_falcon_mailbox_write;
705 flcn_ops->bl_bootstrap = gk20a_falcon_bl_bootstrap;
706
707 gk20a_falcon_engine_dependency_ops(flcn);
708}
709
710int gk20a_falcon_hal_sw_init(struct nvgpu_falcon *flcn)
711{
712 struct gk20a *g = flcn->g;
713 int err = 0;
714
715 switch (flcn->flcn_id) {
716 case FALCON_ID_PMU:
717 flcn->flcn_base = FALCON_PWR_BASE;
718 flcn->is_falcon_supported = true;
719 flcn->is_interrupt_enabled = true;
720 break;
721 case FALCON_ID_SEC2:
722 flcn->flcn_base = FALCON_SEC_BASE;
723 flcn->is_falcon_supported = false;
724 flcn->is_interrupt_enabled = false;
725 break;
726 case FALCON_ID_FECS:
727 flcn->flcn_base = FALCON_FECS_BASE;
728 flcn->is_falcon_supported = true;
729 flcn->is_interrupt_enabled = false;
730 break;
731 case FALCON_ID_GPCCS:
732 flcn->flcn_base = FALCON_GPCCS_BASE;
733 flcn->is_falcon_supported = true;
734 flcn->is_interrupt_enabled = false;
735 break;
736 case FALCON_ID_NVDEC:
737 flcn->flcn_base = FALCON_NVDEC_BASE;
738 flcn->is_falcon_supported = false;
739 flcn->is_interrupt_enabled = false;
740 break;
741 default:
742 flcn->is_falcon_supported = false;
743 break;
744 }
745
746 if (flcn->is_falcon_supported) {
747 err = nvgpu_mutex_init(&flcn->copy_lock);
748 if (err != 0) {
749 nvgpu_err(g, "Error in flcn.copy_lock mutex initialization");
750 } else {
751 gk20a_falcon_ops(flcn);
752 }
753 } else {
754 nvgpu_log_info(g, "falcon 0x%x not supported on %s",
755 flcn->flcn_id, g->name);
756 }
757
758 return err;
759}