summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/flcn_gk20a.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/flcn_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/flcn_gk20a.c693
1 files changed, 693 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/flcn_gk20a.c b/drivers/gpu/nvgpu/gk20a/flcn_gk20a.c
new file mode 100644
index 00000000..83850a19
--- /dev/null
+++ b/drivers/gpu/nvgpu/gk20a/flcn_gk20a.c
@@ -0,0 +1,693 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22#include <nvgpu/falcon.h>
23#include <nvgpu/pmu.h>
24
25#include "gk20a/gk20a.h"
26#include "gk20a/flcn_gk20a.h"
27
28#include <nvgpu/hw/gm20b/hw_falcon_gm20b.h>
29
30static int gk20a_flcn_reset(struct nvgpu_falcon *flcn)
31{
32 struct gk20a *g = flcn->g;
33 u32 base_addr = flcn->flcn_base;
34 u32 unit_status = 0;
35 int status = 0;
36
37 if (flcn->flcn_engine_dep_ops.reset_eng)
38 /* falcon & engine reset */
39 status = flcn->flcn_engine_dep_ops.reset_eng(g);
40 else {
41 /* do falcon CPU hard reset */
42 unit_status = gk20a_readl(g, base_addr +
43 falcon_falcon_cpuctl_r());
44 gk20a_writel(g, base_addr + falcon_falcon_cpuctl_r(),
45 (unit_status | falcon_falcon_cpuctl_hreset_f(1)));
46 }
47
48 return status;
49}
50
51static bool gk20a_flcn_clear_halt_interrupt_status(struct nvgpu_falcon *flcn)
52{
53 struct gk20a *g = flcn->g;
54 u32 base_addr = flcn->flcn_base;
55 u32 data = 0;
56 bool status = false;
57
58 gk20a_writel(g, base_addr + falcon_falcon_irqsclr_r(),
59 gk20a_readl(g, base_addr + falcon_falcon_irqsclr_r()) |
60 (0x10));
61 data = gk20a_readl(g, (base_addr + falcon_falcon_irqstat_r()));
62
63 if ((data & falcon_falcon_irqstat_halt_true_f()) !=
64 falcon_falcon_irqstat_halt_true_f())
65 /*halt irq is clear*/
66 status = true;
67
68 return status;
69}
70
71static void gk20a_flcn_set_irq(struct nvgpu_falcon *flcn, bool enable)
72{
73 struct gk20a *g = flcn->g;
74 u32 base_addr = flcn->flcn_base;
75
76 if (!flcn->is_interrupt_enabled) {
77 nvgpu_warn(g, "Interrupt not supported on flcn 0x%x ",
78 flcn->flcn_id);
79 /* Keep interrupt disabled */
80 enable = false;
81 }
82
83 if (enable) {
84 gk20a_writel(g, base_addr + falcon_falcon_irqmset_r(),
85 flcn->intr_mask);
86 gk20a_writel(g, base_addr + falcon_falcon_irqdest_r(),
87 flcn->intr_dest);
88 } else
89 gk20a_writel(g, base_addr + falcon_falcon_irqmclr_r(),
90 0xffffffff);
91}
92
93static bool gk20a_is_falcon_cpu_halted(struct nvgpu_falcon *flcn)
94{
95 struct gk20a *g = flcn->g;
96 u32 base_addr = flcn->flcn_base;
97
98 return (gk20a_readl(g, base_addr + falcon_falcon_cpuctl_r()) &
99 falcon_falcon_cpuctl_halt_intr_m() ?
100 true : false);
101}
102
103static bool gk20a_is_falcon_idle(struct nvgpu_falcon *flcn)
104{
105 struct gk20a *g = flcn->g;
106 u32 base_addr = flcn->flcn_base;
107 u32 unit_status = 0;
108 bool status = false;
109
110 unit_status = gk20a_readl(g,
111 base_addr + falcon_falcon_idlestate_r());
112
113 if (falcon_falcon_idlestate_falcon_busy_v(unit_status) == 0 &&
114 falcon_falcon_idlestate_ext_busy_v(unit_status) == 0)
115 status = true;
116 else
117 status = false;
118
119 return status;
120}
121
122static bool gk20a_is_falcon_scrubbing_done(struct nvgpu_falcon *flcn)
123{
124 struct gk20a *g = flcn->g;
125 u32 base_addr = flcn->flcn_base;
126 u32 unit_status = 0;
127 bool status = false;
128
129 unit_status = gk20a_readl(g,
130 base_addr + falcon_falcon_dmactl_r());
131
132 if (unit_status & (falcon_falcon_dmactl_dmem_scrubbing_m() |
133 falcon_falcon_dmactl_imem_scrubbing_m()))
134 status = false;
135 else
136 status = true;
137
138 return status;
139}
140
141static u32 gk20a_falcon_get_mem_size(struct nvgpu_falcon *flcn, u32 mem_type)
142{
143 struct gk20a *g = flcn->g;
144 u32 mem_size = 0;
145 u32 hw_cfg_reg = gk20a_readl(g,
146 flcn->flcn_base + falcon_falcon_hwcfg_r());
147
148 if (mem_type == MEM_DMEM)
149 mem_size = falcon_falcon_hwcfg_dmem_size_v(hw_cfg_reg)
150 << GK20A_PMU_DMEM_BLKSIZE2;
151 else
152 mem_size = falcon_falcon_hwcfg_imem_size_v(hw_cfg_reg)
153 << GK20A_PMU_DMEM_BLKSIZE2;
154
155 return mem_size;
156}
157
158static int flcn_mem_overflow_check(struct nvgpu_falcon *flcn,
159 u32 offset, u32 size, u32 mem_type)
160{
161 struct gk20a *g = flcn->g;
162 u32 mem_size = 0;
163
164 if (size == 0) {
165 nvgpu_err(g, "size is zero");
166 return -EINVAL;
167 }
168
169 if (offset & 0x3) {
170 nvgpu_err(g, "offset (0x%08x) not 4-byte aligned", offset);
171 return -EINVAL;
172 }
173
174 mem_size = gk20a_falcon_get_mem_size(flcn, mem_type);
175 if (!(offset <= mem_size && (offset + size) <= mem_size)) {
176 nvgpu_err(g, "flcn-id 0x%x, copy overflow ",
177 flcn->flcn_id);
178 nvgpu_err(g, "total size 0x%x, offset 0x%x, copy size 0x%x",
179 mem_size, offset, size);
180 return -EINVAL;
181 }
182
183 return 0;
184}
185
186static int gk20a_flcn_copy_from_dmem(struct nvgpu_falcon *flcn,
187 u32 src, u8 *dst, u32 size, u8 port)
188{
189 struct gk20a *g = flcn->g;
190 u32 base_addr = flcn->flcn_base;
191 u32 i, words, bytes;
192 u32 data, addr_mask;
193 u32 *dst_u32 = (u32 *)dst;
194
195 nvgpu_log_fn(g, " src dmem offset - %x, size - %x", src, size);
196
197 if (flcn_mem_overflow_check(flcn, src, size, MEM_DMEM)) {
198 nvgpu_err(g, "incorrect parameters");
199 return -EINVAL;
200 }
201
202 nvgpu_mutex_acquire(&flcn->copy_lock);
203
204 words = size >> 2;
205 bytes = size & 0x3;
206
207 addr_mask = falcon_falcon_dmemc_offs_m() |
208 falcon_falcon_dmemc_blk_m();
209
210 src &= addr_mask;
211
212 gk20a_writel(g, base_addr + falcon_falcon_dmemc_r(port),
213 src | falcon_falcon_dmemc_aincr_f(1));
214
215 for (i = 0; i < words; i++)
216 dst_u32[i] = gk20a_readl(g,
217 base_addr + falcon_falcon_dmemd_r(port));
218
219 if (bytes > 0) {
220 data = gk20a_readl(g, base_addr + falcon_falcon_dmemd_r(port));
221 for (i = 0; i < bytes; i++)
222 dst[(words << 2) + i] = ((u8 *)&data)[i];
223 }
224
225 nvgpu_mutex_release(&flcn->copy_lock);
226 return 0;
227}
228
229static int gk20a_flcn_copy_to_dmem(struct nvgpu_falcon *flcn,
230 u32 dst, u8 *src, u32 size, u8 port)
231{
232 struct gk20a *g = flcn->g;
233 u32 base_addr = flcn->flcn_base;
234 u32 i, words, bytes;
235 u32 data, addr_mask;
236 u32 *src_u32 = (u32 *)src;
237
238 nvgpu_log_fn(g, "dest dmem offset - %x, size - %x", dst, size);
239
240 if (flcn_mem_overflow_check(flcn, dst, size, MEM_DMEM)) {
241 nvgpu_err(g, "incorrect parameters");
242 return -EINVAL;
243 }
244
245 nvgpu_mutex_acquire(&flcn->copy_lock);
246
247 words = size >> 2;
248 bytes = size & 0x3;
249
250 addr_mask = falcon_falcon_dmemc_offs_m() |
251 falcon_falcon_dmemc_blk_m();
252
253 dst &= addr_mask;
254
255 gk20a_writel(g, base_addr + falcon_falcon_dmemc_r(port),
256 dst | falcon_falcon_dmemc_aincw_f(1));
257
258 for (i = 0; i < words; i++)
259 gk20a_writel(g,
260 base_addr + falcon_falcon_dmemd_r(port), src_u32[i]);
261
262 if (bytes > 0) {
263 data = 0;
264 for (i = 0; i < bytes; i++)
265 ((u8 *)&data)[i] = src[(words << 2) + i];
266 gk20a_writel(g, base_addr + falcon_falcon_dmemd_r(port), data);
267 }
268
269 size = ALIGN(size, 4);
270 data = gk20a_readl(g,
271 base_addr + falcon_falcon_dmemc_r(port)) & addr_mask;
272 if (data != ((dst + size) & addr_mask)) {
273 nvgpu_warn(g, "copy failed. bytes written %d, expected %d",
274 data - dst, size);
275 }
276
277 nvgpu_mutex_release(&flcn->copy_lock);
278
279 return 0;
280}
281
282static int gk20a_flcn_copy_from_imem(struct nvgpu_falcon *flcn, u32 src,
283 u8 *dst, u32 size, u8 port)
284{
285 struct gk20a *g = flcn->g;
286 u32 base_addr = flcn->flcn_base;
287 u32 *dst_u32 = (u32 *)dst;
288 u32 words = 0;
289 u32 bytes = 0;
290 u32 data = 0;
291 u32 blk = 0;
292 u32 i = 0;
293
294 nvgpu_log_info(g, "download %d bytes from 0x%x", size, src);
295
296 if (flcn_mem_overflow_check(flcn, src, size, MEM_IMEM)) {
297 nvgpu_err(g, "incorrect parameters");
298 return -EINVAL;
299 }
300
301 nvgpu_mutex_acquire(&flcn->copy_lock);
302
303 words = size >> 2;
304 bytes = size & 0x3;
305 blk = src >> 8;
306
307 nvgpu_log_info(g, "download %d words from 0x%x block %d",
308 words, src, blk);
309
310 gk20a_writel(g, base_addr + falcon_falcon_imemc_r(port),
311 falcon_falcon_imemc_offs_f(src >> 2) |
312 falcon_falcon_imemc_blk_f(blk) |
313 falcon_falcon_dmemc_aincr_f(1));
314
315 for (i = 0; i < words; i++)
316 dst_u32[i] = gk20a_readl(g,
317 base_addr + falcon_falcon_imemd_r(port));
318
319 if (bytes > 0) {
320 data = gk20a_readl(g, base_addr + falcon_falcon_imemd_r(port));
321 for (i = 0; i < bytes; i++)
322 dst[(words << 2) + i] = ((u8 *)&data)[i];
323 }
324
325 nvgpu_mutex_release(&flcn->copy_lock);
326
327 return 0;
328}
329
330static int gk20a_flcn_copy_to_imem(struct nvgpu_falcon *flcn, u32 dst,
331 u8 *src, u32 size, u8 port, bool sec, u32 tag)
332{
333 struct gk20a *g = flcn->g;
334 u32 base_addr = flcn->flcn_base;
335 u32 *src_u32 = (u32 *)src;
336 u32 words = 0;
337 u32 blk = 0;
338 u32 i = 0;
339
340 nvgpu_log_info(g, "upload %d bytes to 0x%x", size, dst);
341
342 if (flcn_mem_overflow_check(flcn, dst, size, MEM_IMEM)) {
343 nvgpu_err(g, "incorrect parameters");
344 return -EINVAL;
345 }
346
347 nvgpu_mutex_acquire(&flcn->copy_lock);
348
349 words = size >> 2;
350 blk = dst >> 8;
351
352 nvgpu_log_info(g, "upload %d words to 0x%x block %d, tag 0x%x",
353 words, dst, blk, tag);
354
355 gk20a_writel(g, base_addr + falcon_falcon_imemc_r(port),
356 falcon_falcon_imemc_offs_f(dst >> 2) |
357 falcon_falcon_imemc_blk_f(blk) |
358 /* Set Auto-Increment on write */
359 falcon_falcon_imemc_aincw_f(1) |
360 sec << 28);
361
362 for (i = 0; i < words; i++) {
363 if (i % 64 == 0) {
364 /* tag is always 256B aligned */
365 gk20a_writel(g, base_addr + falcon_falcon_imemt_r(0),
366 tag);
367 tag++;
368 }
369
370 gk20a_writel(g, base_addr + falcon_falcon_imemd_r(port),
371 src_u32[i]);
372 }
373
374 /* WARNING : setting remaining bytes in block to 0x0 */
375 while (i % 64) {
376 gk20a_writel(g, base_addr + falcon_falcon_imemd_r(port), 0);
377 i++;
378 }
379
380 nvgpu_mutex_release(&flcn->copy_lock);
381
382 return 0;
383}
384
385static int gk20a_falcon_bootstrap(struct nvgpu_falcon *flcn,
386 u32 boot_vector)
387{
388 struct gk20a *g = flcn->g;
389 u32 base_addr = flcn->flcn_base;
390
391 nvgpu_log_info(g, "boot vec 0x%x", boot_vector);
392
393 gk20a_writel(g, base_addr + falcon_falcon_dmactl_r(),
394 falcon_falcon_dmactl_require_ctx_f(0));
395
396 gk20a_writel(g, base_addr + falcon_falcon_bootvec_r(),
397 falcon_falcon_bootvec_vec_f(boot_vector));
398
399 gk20a_writel(g, base_addr + falcon_falcon_cpuctl_r(),
400 falcon_falcon_cpuctl_startcpu_f(1));
401
402 return 0;
403}
404
405static u32 gk20a_falcon_mailbox_read(struct nvgpu_falcon *flcn,
406 u32 mailbox_index)
407{
408 struct gk20a *g = flcn->g;
409 u32 data = 0;
410
411 if (mailbox_index < FALCON_MAILBOX_COUNT)
412 data = gk20a_readl(g, flcn->flcn_base + (mailbox_index ?
413 falcon_falcon_mailbox1_r() :
414 falcon_falcon_mailbox0_r()));
415 else
416 nvgpu_err(g, "incorrect mailbox id %d", mailbox_index);
417
418 return data;
419}
420
421static void gk20a_falcon_mailbox_write(struct nvgpu_falcon *flcn,
422 u32 mailbox_index, u32 data)
423{
424 struct gk20a *g = flcn->g;
425
426 if (mailbox_index < FALCON_MAILBOX_COUNT)
427 gk20a_writel(g, flcn->flcn_base + (mailbox_index ?
428 falcon_falcon_mailbox1_r() :
429 falcon_falcon_mailbox0_r()),
430 data);
431 else
432 nvgpu_err(g, "incorrect mailbox id %d", mailbox_index);
433}
434
435static void gk20a_falcon_dump_imblk(struct nvgpu_falcon *flcn)
436{
437 struct gk20a *g = flcn->g;
438 u32 base_addr = flcn->flcn_base;
439 u32 i = 0, j = 0;
440 u32 data[8] = {0};
441 u32 block_count = 0;
442
443 block_count = falcon_falcon_hwcfg_imem_size_v(gk20a_readl(g,
444 flcn->flcn_base + falcon_falcon_hwcfg_r()));
445
446 /* block_count must be multiple of 8 */
447 block_count &= ~0x7;
448 nvgpu_err(g, "FALCON IMEM BLK MAPPING (PA->VA) (%d TOTAL):",
449 block_count);
450
451 for (i = 0; i < block_count; i += 8) {
452 for (j = 0; j < 8; j++) {
453 gk20a_writel(g, flcn->flcn_base +
454 falcon_falcon_imctl_debug_r(),
455 falcon_falcon_imctl_debug_cmd_f(0x2) |
456 falcon_falcon_imctl_debug_addr_blk_f(i + j));
457
458 data[j] = gk20a_readl(g, base_addr +
459 falcon_falcon_imstat_r());
460 }
461
462 nvgpu_err(g, " %#04x: %#010x %#010x %#010x %#010x",
463 i, data[0], data[1], data[2], data[3]);
464 nvgpu_err(g, " %#04x: %#010x %#010x %#010x %#010x",
465 i + 4, data[4], data[5], data[6], data[7]);
466 }
467}
468
469static void gk20a_falcon_dump_pc_trace(struct nvgpu_falcon *flcn)
470{
471 struct gk20a *g = flcn->g;
472 u32 base_addr = flcn->flcn_base;
473 u32 trace_pc_count = 0;
474 u32 pc = 0;
475 u32 i = 0;
476
477 if (gk20a_readl(g, base_addr + falcon_falcon_sctl_r()) & 0x02) {
478 nvgpu_err(g, " falcon is in HS mode, PC TRACE dump not supported");
479 return;
480 }
481
482 trace_pc_count = falcon_falcon_traceidx_maxidx_v(gk20a_readl(g,
483 base_addr + falcon_falcon_traceidx_r()));
484 nvgpu_err(g,
485 "PC TRACE (TOTAL %d ENTRIES. entry 0 is the most recent branch):",
486 trace_pc_count);
487
488 for (i = 0; i < trace_pc_count; i++) {
489 gk20a_writel(g, base_addr + falcon_falcon_traceidx_r(),
490 falcon_falcon_traceidx_idx_f(i));
491
492 pc = falcon_falcon_tracepc_pc_v(gk20a_readl(g,
493 base_addr + falcon_falcon_tracepc_r()));
494 nvgpu_err(g, "FALCON_TRACEPC(%d) : %#010x", i, pc);
495 }
496}
497
498void gk20a_falcon_dump_stats(struct nvgpu_falcon *flcn)
499{
500 struct gk20a *g = flcn->g;
501 u32 base_addr = flcn->flcn_base;
502 unsigned int i;
503
504 nvgpu_err(g, "<<< FALCON id-%d DEBUG INFORMATION - START >>>",
505 flcn->flcn_id);
506
507 /* imblk dump */
508 gk20a_falcon_dump_imblk(flcn);
509 /* PC trace dump */
510 gk20a_falcon_dump_pc_trace(flcn);
511
512 nvgpu_err(g, "FALCON ICD REGISTERS DUMP");
513
514 for (i = 0; i < 4; i++) {
515 gk20a_writel(g, base_addr + falcon_falcon_icd_cmd_r(),
516 falcon_falcon_icd_cmd_opc_rreg_f() |
517 falcon_falcon_icd_cmd_idx_f(FALCON_REG_PC));
518 nvgpu_err(g, "FALCON_REG_PC : 0x%x",
519 gk20a_readl(g, base_addr +
520 falcon_falcon_icd_rdata_r()));
521
522 gk20a_writel(g, base_addr + falcon_falcon_icd_cmd_r(),
523 falcon_falcon_icd_cmd_opc_rreg_f() |
524 falcon_falcon_icd_cmd_idx_f(FALCON_REG_SP));
525 nvgpu_err(g, "FALCON_REG_SP : 0x%x",
526 gk20a_readl(g, base_addr +
527 falcon_falcon_icd_rdata_r()));
528 }
529
530 gk20a_writel(g, base_addr + falcon_falcon_icd_cmd_r(),
531 falcon_falcon_icd_cmd_opc_rreg_f() |
532 falcon_falcon_icd_cmd_idx_f(FALCON_REG_IMB));
533 nvgpu_err(g, "FALCON_REG_IMB : 0x%x",
534 gk20a_readl(g, base_addr + falcon_falcon_icd_rdata_r()));
535
536 gk20a_writel(g, base_addr + falcon_falcon_icd_cmd_r(),
537 falcon_falcon_icd_cmd_opc_rreg_f() |
538 falcon_falcon_icd_cmd_idx_f(FALCON_REG_DMB));
539 nvgpu_err(g, "FALCON_REG_DMB : 0x%x",
540 gk20a_readl(g, base_addr + falcon_falcon_icd_rdata_r()));
541
542 gk20a_writel(g, base_addr + falcon_falcon_icd_cmd_r(),
543 falcon_falcon_icd_cmd_opc_rreg_f() |
544 falcon_falcon_icd_cmd_idx_f(FALCON_REG_CSW));
545 nvgpu_err(g, "FALCON_REG_CSW : 0x%x",
546 gk20a_readl(g, base_addr + falcon_falcon_icd_rdata_r()));
547
548 gk20a_writel(g, base_addr + falcon_falcon_icd_cmd_r(),
549 falcon_falcon_icd_cmd_opc_rreg_f() |
550 falcon_falcon_icd_cmd_idx_f(FALCON_REG_CTX));
551 nvgpu_err(g, "FALCON_REG_CTX : 0x%x",
552 gk20a_readl(g, base_addr + falcon_falcon_icd_rdata_r()));
553
554 gk20a_writel(g, base_addr + falcon_falcon_icd_cmd_r(),
555 falcon_falcon_icd_cmd_opc_rreg_f() |
556 falcon_falcon_icd_cmd_idx_f(FALCON_REG_EXCI));
557 nvgpu_err(g, "FALCON_REG_EXCI : 0x%x",
558 gk20a_readl(g, base_addr + falcon_falcon_icd_rdata_r()));
559
560 for (i = 0; i < 6; i++) {
561 gk20a_writel(g, base_addr + falcon_falcon_icd_cmd_r(),
562 falcon_falcon_icd_cmd_opc_rreg_f() |
563 falcon_falcon_icd_cmd_idx_f(
564 falcon_falcon_icd_cmd_opc_rstat_f()));
565 nvgpu_err(g, "FALCON_REG_RSTAT[%d] : 0x%x", i,
566 gk20a_readl(g, base_addr +
567 falcon_falcon_icd_rdata_r()));
568 }
569
570 nvgpu_err(g, " FALCON REGISTERS DUMP");
571 nvgpu_err(g, "falcon_falcon_os_r : %d",
572 gk20a_readl(g, base_addr + falcon_falcon_os_r()));
573 nvgpu_err(g, "falcon_falcon_cpuctl_r : 0x%x",
574 gk20a_readl(g, base_addr + falcon_falcon_cpuctl_r()));
575 nvgpu_err(g, "falcon_falcon_idlestate_r : 0x%x",
576 gk20a_readl(g, base_addr + falcon_falcon_idlestate_r()));
577 nvgpu_err(g, "falcon_falcon_mailbox0_r : 0x%x",
578 gk20a_readl(g, base_addr + falcon_falcon_mailbox0_r()));
579 nvgpu_err(g, "falcon_falcon_mailbox1_r : 0x%x",
580 gk20a_readl(g, base_addr + falcon_falcon_mailbox1_r()));
581 nvgpu_err(g, "falcon_falcon_irqstat_r : 0x%x",
582 gk20a_readl(g, base_addr + falcon_falcon_irqstat_r()));
583 nvgpu_err(g, "falcon_falcon_irqmode_r : 0x%x",
584 gk20a_readl(g, base_addr + falcon_falcon_irqmode_r()));
585 nvgpu_err(g, "falcon_falcon_irqmask_r : 0x%x",
586 gk20a_readl(g, base_addr + falcon_falcon_irqmask_r()));
587 nvgpu_err(g, "falcon_falcon_irqdest_r : 0x%x",
588 gk20a_readl(g, base_addr + falcon_falcon_irqdest_r()));
589 nvgpu_err(g, "falcon_falcon_debug1_r : 0x%x",
590 gk20a_readl(g, base_addr + falcon_falcon_debug1_r()));
591 nvgpu_err(g, "falcon_falcon_debuginfo_r : 0x%x",
592 gk20a_readl(g, base_addr + falcon_falcon_debuginfo_r()));
593 nvgpu_err(g, "falcon_falcon_bootvec_r : 0x%x",
594 gk20a_readl(g, base_addr + falcon_falcon_bootvec_r()));
595 nvgpu_err(g, "falcon_falcon_hwcfg_r : 0x%x",
596 gk20a_readl(g, base_addr + falcon_falcon_hwcfg_r()));
597 nvgpu_err(g, "falcon_falcon_engctl_r : 0x%x",
598 gk20a_readl(g, base_addr + falcon_falcon_engctl_r()));
599 nvgpu_err(g, "falcon_falcon_curctx_r : 0x%x",
600 gk20a_readl(g, base_addr + falcon_falcon_curctx_r()));
601 nvgpu_err(g, "falcon_falcon_nxtctx_r : 0x%x",
602 gk20a_readl(g, base_addr + falcon_falcon_nxtctx_r()));
603 nvgpu_err(g, "falcon_falcon_exterrstat_r : 0x%x",
604 gk20a_readl(g, base_addr + falcon_falcon_exterrstat_r()));
605 nvgpu_err(g, "falcon_falcon_exterraddr_r : 0x%x",
606 gk20a_readl(g, base_addr + falcon_falcon_exterraddr_r()));
607}
608
609static void gk20a_falcon_engine_dependency_ops(struct nvgpu_falcon *flcn)
610{
611 struct nvgpu_falcon_engine_dependency_ops *flcn_eng_dep_ops =
612 &flcn->flcn_engine_dep_ops;
613
614 switch (flcn->flcn_id) {
615 case FALCON_ID_PMU:
616 flcn_eng_dep_ops->reset_eng = nvgpu_pmu_reset;
617 break;
618 default:
619 /* NULL assignment make sure
620 * CPU hard reset in gk20a_flcn_reset() gets execute
621 * if falcon doesn't need specific reset implementation
622 */
623 flcn_eng_dep_ops->reset_eng = NULL;
624 break;
625 }
626}
627
628void gk20a_falcon_ops(struct nvgpu_falcon *flcn)
629{
630 struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops;
631
632 flcn_ops->reset = gk20a_flcn_reset;
633 flcn_ops->set_irq = gk20a_flcn_set_irq;
634 flcn_ops->clear_halt_interrupt_status =
635 gk20a_flcn_clear_halt_interrupt_status;
636 flcn_ops->is_falcon_cpu_halted = gk20a_is_falcon_cpu_halted;
637 flcn_ops->is_falcon_idle = gk20a_is_falcon_idle;
638 flcn_ops->is_falcon_scrubbing_done = gk20a_is_falcon_scrubbing_done;
639 flcn_ops->copy_from_dmem = gk20a_flcn_copy_from_dmem;
640 flcn_ops->copy_to_dmem = gk20a_flcn_copy_to_dmem;
641 flcn_ops->copy_to_imem = gk20a_flcn_copy_to_imem;
642 flcn_ops->copy_from_imem = gk20a_flcn_copy_from_imem;
643 flcn_ops->bootstrap = gk20a_falcon_bootstrap;
644 flcn_ops->dump_falcon_stats = gk20a_falcon_dump_stats;
645 flcn_ops->mailbox_read = gk20a_falcon_mailbox_read;
646 flcn_ops->mailbox_write = gk20a_falcon_mailbox_write;
647
648 gk20a_falcon_engine_dependency_ops(flcn);
649}
650
651void gk20a_falcon_hal_sw_init(struct nvgpu_falcon *flcn)
652{
653 struct gk20a *g = flcn->g;
654
655 switch (flcn->flcn_id) {
656 case FALCON_ID_PMU:
657 flcn->flcn_base = FALCON_PWR_BASE;
658 flcn->is_falcon_supported = true;
659 flcn->is_interrupt_enabled = true;
660 break;
661 case FALCON_ID_SEC2:
662 flcn->flcn_base = FALCON_SEC_BASE;
663 flcn->is_falcon_supported = false;
664 flcn->is_interrupt_enabled = false;
665 break;
666 case FALCON_ID_FECS:
667 flcn->flcn_base = FALCON_FECS_BASE;
668 flcn->is_falcon_supported = true;
669 flcn->is_interrupt_enabled = false;
670 break;
671 case FALCON_ID_GPCCS:
672 flcn->flcn_base = FALCON_GPCCS_BASE;
673 flcn->is_falcon_supported = true;
674 flcn->is_interrupt_enabled = false;
675 break;
676 case FALCON_ID_NVDEC:
677 flcn->flcn_base = FALCON_NVDEC_BASE;
678 flcn->is_falcon_supported = false;
679 flcn->is_interrupt_enabled = false;
680 break;
681 default:
682 flcn->is_falcon_supported = false;
683 nvgpu_err(g, "Invalid flcn request");
684 break;
685 }
686
687 if (flcn->is_falcon_supported) {
688 nvgpu_mutex_init(&flcn->copy_lock);
689 gk20a_falcon_ops(flcn);
690 } else
691 nvgpu_log_info(g, "falcon 0x%x not supported on %s",
692 flcn->flcn_id, g->name);
693}