summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/falcon/falcon.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common/falcon/falcon.c')
-rw-r--r--drivers/gpu/nvgpu/common/falcon/falcon.c364
1 files changed, 364 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/common/falcon/falcon.c b/drivers/gpu/nvgpu/common/falcon/falcon.c
new file mode 100644
index 00000000..d8420ece
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/falcon/falcon.c
@@ -0,0 +1,364 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22#include <nvgpu/lock.h>
23#include <nvgpu/timers.h>
24#include <nvgpu/falcon.h>
25
26#include "gk20a/gk20a.h"
27
28int nvgpu_flcn_wait_idle(struct nvgpu_falcon *flcn)
29{
30 struct gk20a *g = flcn->g;
31 struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops;
32 struct nvgpu_timeout timeout;
33 u32 idle_stat;
34
35 if (!flcn_ops->is_falcon_idle) {
36 nvgpu_warn(g, "Invalid op on falcon 0x%x ", flcn->flcn_id);
37 return -EINVAL;
38 }
39
40 nvgpu_timeout_init(g, &timeout, 2000, NVGPU_TIMER_RETRY_TIMER);
41
42 /* wait for falcon idle */
43 do {
44 idle_stat = flcn_ops->is_falcon_idle(flcn);
45
46 if (idle_stat)
47 break;
48
49 if (nvgpu_timeout_expired_msg(&timeout,
50 "waiting for falcon idle: 0x%08x", idle_stat))
51 return -EBUSY;
52
53 nvgpu_usleep_range(100, 200);
54 } while (1);
55
56 return 0;
57}
58
59int nvgpu_flcn_reset(struct nvgpu_falcon *flcn)
60{
61 int status = -EINVAL;
62
63 if (flcn->flcn_ops.reset)
64 status = flcn->flcn_ops.reset(flcn);
65 else
66 nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ",
67 flcn->flcn_id);
68
69 return status;
70}
71
72void nvgpu_flcn_set_irq(struct nvgpu_falcon *flcn, bool enable,
73 u32 intr_mask, u32 intr_dest)
74{
75 struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops;
76
77 if (flcn_ops->set_irq) {
78 flcn->intr_mask = intr_mask;
79 flcn->intr_dest = intr_dest;
80 flcn_ops->set_irq(flcn, enable);
81 } else
82 nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ",
83 flcn->flcn_id);
84}
85
86bool nvgpu_flcn_get_mem_scrubbing_status(struct nvgpu_falcon *flcn)
87{
88 struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops;
89 bool status = false;
90
91 if (flcn_ops->is_falcon_scrubbing_done)
92 status = flcn_ops->is_falcon_scrubbing_done(flcn);
93 else
94 nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ",
95 flcn->flcn_id);
96
97 return status;
98}
99
100bool nvgpu_flcn_get_cpu_halted_status(struct nvgpu_falcon *flcn)
101{
102 struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops;
103 bool status = false;
104
105 if (flcn_ops->is_falcon_cpu_halted)
106 status = flcn_ops->is_falcon_cpu_halted(flcn);
107 else
108 nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ",
109 flcn->flcn_id);
110
111 return status;
112}
113
114int nvgpu_flcn_wait_for_halt(struct nvgpu_falcon *flcn, unsigned int timeout)
115{
116 struct gk20a *g = flcn->g;
117 struct nvgpu_timeout to;
118 int status = 0;
119
120 nvgpu_timeout_init(g, &to, timeout, NVGPU_TIMER_CPU_TIMER);
121 do {
122 if (nvgpu_flcn_get_cpu_halted_status(flcn))
123 break;
124
125 nvgpu_udelay(10);
126 } while (!nvgpu_timeout_expired(&to));
127
128 if (nvgpu_timeout_peek_expired(&to))
129 status = -EBUSY;
130
131 return status;
132}
133
134int nvgpu_flcn_clear_halt_intr_status(struct nvgpu_falcon *flcn,
135 unsigned int timeout)
136{
137 struct gk20a *g = flcn->g;
138 struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops;
139 struct nvgpu_timeout to;
140 int status = 0;
141
142 if (!flcn_ops->clear_halt_interrupt_status) {
143 nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ",
144 flcn->flcn_id);
145 return -EINVAL;
146 }
147
148 nvgpu_timeout_init(g, &to, timeout, NVGPU_TIMER_CPU_TIMER);
149 do {
150 if (flcn_ops->clear_halt_interrupt_status(flcn))
151 break;
152
153 nvgpu_udelay(1);
154 } while (!nvgpu_timeout_expired(&to));
155
156 if (nvgpu_timeout_peek_expired(&to))
157 status = -EBUSY;
158
159 return status;
160}
161
162bool nvgpu_flcn_get_idle_status(struct nvgpu_falcon *flcn)
163{
164 struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops;
165 bool status = false;
166
167 if (flcn_ops->is_falcon_idle)
168 status = flcn_ops->is_falcon_idle(flcn);
169 else
170 nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ",
171 flcn->flcn_id);
172
173 return status;
174}
175
176int nvgpu_flcn_copy_from_dmem(struct nvgpu_falcon *flcn,
177 u32 src, u8 *dst, u32 size, u8 port)
178{
179 struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops;
180
181 return flcn_ops->copy_from_dmem(flcn, src, dst, size, port);
182}
183
184int nvgpu_flcn_copy_to_dmem(struct nvgpu_falcon *flcn,
185 u32 dst, u8 *src, u32 size, u8 port)
186{
187 struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops;
188
189 return flcn_ops->copy_to_dmem(flcn, dst, src, size, port);
190}
191
192int nvgpu_flcn_copy_from_imem(struct nvgpu_falcon *flcn,
193 u32 src, u8 *dst, u32 size, u8 port)
194{
195 struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops;
196 int status = -EINVAL;
197
198 if (flcn_ops->copy_from_imem)
199 status = flcn_ops->copy_from_imem(flcn, src, dst, size, port);
200 else
201 nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ",
202 flcn->flcn_id);
203
204 return status;
205}
206
207int nvgpu_flcn_copy_to_imem(struct nvgpu_falcon *flcn,
208 u32 dst, u8 *src, u32 size, u8 port, bool sec, u32 tag)
209{
210 struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops;
211 int status = -EINVAL;
212
213 if (flcn_ops->copy_to_imem)
214 status = flcn_ops->copy_to_imem(flcn, dst, src, size, port,
215 sec, tag);
216 else
217 nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ",
218 flcn->flcn_id);
219
220 return status;
221}
222
223static void nvgpu_flcn_print_mem(struct nvgpu_falcon *flcn, u32 src,
224 u32 size, u32 mem_type)
225{
226 u32 buff[64] = {0};
227 u32 total_block_read = 0;
228 u32 byte_read_count = 0;
229 u32 i = 0;
230 u32 status = 0;
231
232 nvgpu_info(flcn->g, " offset 0x%x size %d bytes", src, size);
233
234 total_block_read = size >> 8;
235 do {
236 byte_read_count = total_block_read ? sizeof(buff) : size;
237
238 if (!byte_read_count)
239 break;
240
241 if (mem_type == MEM_DMEM)
242 status = nvgpu_flcn_copy_from_dmem(flcn, src,
243 (u8 *)buff, byte_read_count, 0);
244 else
245 status = nvgpu_flcn_copy_from_imem(flcn, src,
246 (u8 *)buff, byte_read_count, 0);
247
248 if (status) {
249 nvgpu_err(flcn->g, "MEM print failed");
250 break;
251 }
252
253 for (i = 0; i < (byte_read_count >> 2); i += 4)
254 nvgpu_info(flcn->g, "%#06x: %#010x %#010x %#010x %#010x",
255 src + (i << 2), buff[i], buff[i+1],
256 buff[i+2], buff[i+3]);
257
258 src += byte_read_count;
259 size -= byte_read_count;
260 } while (total_block_read--);
261}
262
263void nvgpu_flcn_print_dmem(struct nvgpu_falcon *flcn, u32 src, u32 size)
264{
265 nvgpu_info(flcn->g, " PRINT DMEM ");
266 nvgpu_flcn_print_mem(flcn, src, size, MEM_DMEM);
267}
268
269void nvgpu_flcn_print_imem(struct nvgpu_falcon *flcn, u32 src, u32 size)
270{
271 nvgpu_info(flcn->g, " PRINT IMEM ");
272 nvgpu_flcn_print_mem(flcn, src, size, MEM_IMEM);
273}
274
275int nvgpu_flcn_bootstrap(struct nvgpu_falcon *flcn, u32 boot_vector)
276{
277 struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops;
278 int status = -EINVAL;
279
280 if (flcn_ops->bootstrap)
281 status = flcn_ops->bootstrap(flcn, boot_vector);
282 else
283 nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ",
284 flcn->flcn_id);
285
286 return status;
287}
288
289u32 nvgpu_flcn_mailbox_read(struct nvgpu_falcon *flcn, u32 mailbox_index)
290{
291 struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops;
292 u32 data = 0;
293
294 if (flcn_ops->mailbox_read)
295 data = flcn_ops->mailbox_read(flcn, mailbox_index);
296 else
297 nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ",
298 flcn->flcn_id);
299
300 return data;
301}
302
303void nvgpu_flcn_mailbox_write(struct nvgpu_falcon *flcn, u32 mailbox_index,
304 u32 data)
305{
306 struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops;
307
308 if (flcn_ops->mailbox_write)
309 flcn_ops->mailbox_write(flcn, mailbox_index, data);
310 else
311 nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ",
312 flcn->flcn_id);
313}
314
315void nvgpu_flcn_dump_stats(struct nvgpu_falcon *flcn)
316{
317 struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops;
318
319 if (flcn_ops->dump_falcon_stats)
320 flcn_ops->dump_falcon_stats(flcn);
321 else
322 nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ",
323 flcn->flcn_id);
324}
325
326void nvgpu_flcn_sw_init(struct gk20a *g, u32 flcn_id)
327{
328 struct nvgpu_falcon *flcn = NULL;
329 struct gpu_ops *gops = &g->ops;
330
331 switch (flcn_id) {
332 case FALCON_ID_PMU:
333 flcn = &g->pmu_flcn;
334 flcn->flcn_id = flcn_id;
335 g->pmu.flcn = &g->pmu_flcn;
336 g->pmu.g = g;
337 break;
338 case FALCON_ID_SEC2:
339 flcn = &g->sec2_flcn;
340 flcn->flcn_id = flcn_id;
341 break;
342 case FALCON_ID_FECS:
343 flcn = &g->fecs_flcn;
344 flcn->flcn_id = flcn_id;
345 break;
346 case FALCON_ID_GPCCS:
347 flcn = &g->gpccs_flcn;
348 flcn->flcn_id = flcn_id;
349 break;
350 case FALCON_ID_NVDEC:
351 flcn = &g->nvdec_flcn;
352 flcn->flcn_id = flcn_id;
353 break;
354 default:
355 nvgpu_err(g, "Invalid/Unsupported falcon ID %x", flcn_id);
356 break;
357 };
358
359 /* call to HAL method to assign flcn base & ops to selected falcon */
360 if (flcn) {
361 flcn->g = g;
362 gops->falcon.falcon_hal_sw_init(flcn);
363 }
364}