diff options
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/fifo_gk20a.c')
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/fifo_gk20a.c | 4167 |
1 files changed, 4167 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c new file mode 100644 index 00000000..648a8c86 --- /dev/null +++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c | |||
@@ -0,0 +1,4167 @@ | |||
1 | /* | ||
2 | * GK20A Graphics FIFO (gr host) | ||
3 | * | ||
4 | * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the "Software"), | ||
8 | * to deal in the Software without restriction, including without limitation | ||
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
11 | * Software is furnished to do so, subject to the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice shall be included in | ||
14 | * all copies or substantial portions of the Software. | ||
15 | * | ||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
21 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
22 | * DEALINGS IN THE SOFTWARE. | ||
23 | */ | ||
24 | |||
25 | #include <trace/events/gk20a.h> | ||
26 | #include <uapi/linux/nvgpu.h> | ||
27 | |||
28 | #include <nvgpu/mm.h> | ||
29 | #include <nvgpu/dma.h> | ||
30 | #include <nvgpu/timers.h> | ||
31 | #include <nvgpu/semaphore.h> | ||
32 | #include <nvgpu/kmem.h> | ||
33 | #include <nvgpu/log.h> | ||
34 | #include <nvgpu/soc.h> | ||
35 | #include <nvgpu/atomic.h> | ||
36 | #include <nvgpu/bug.h> | ||
37 | #include <nvgpu/log2.h> | ||
38 | #include <nvgpu/debug.h> | ||
39 | #include <nvgpu/nvhost.h> | ||
40 | #include <nvgpu/barrier.h> | ||
41 | #include <nvgpu/ctxsw_trace.h> | ||
42 | |||
43 | #include "gk20a.h" | ||
44 | #include "mm_gk20a.h" | ||
45 | |||
46 | #include <nvgpu/hw/gk20a/hw_fifo_gk20a.h> | ||
47 | #include <nvgpu/hw/gk20a/hw_pbdma_gk20a.h> | ||
48 | #include <nvgpu/hw/gk20a/hw_ccsr_gk20a.h> | ||
49 | #include <nvgpu/hw/gk20a/hw_ram_gk20a.h> | ||
50 | #include <nvgpu/hw/gk20a/hw_top_gk20a.h> | ||
51 | #include <nvgpu/hw/gk20a/hw_mc_gk20a.h> | ||
52 | #include <nvgpu/hw/gk20a/hw_gr_gk20a.h> | ||
53 | |||
54 | #define FECS_METHOD_WFI_RESTORE 0x80000 | ||
55 | |||
56 | static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id, | ||
57 | u32 chid, bool add, | ||
58 | bool wait_for_finish); | ||
59 | static u32 gk20a_fifo_engines_on_id(struct gk20a *g, u32 id, bool is_tsg); | ||
60 | |||
61 | static const char *const pbdma_intr_fault_type_desc[] = { | ||
62 | "MEMREQ timeout", "MEMACK_TIMEOUT", "MEMACK_EXTRA acks", | ||
63 | "MEMDAT_TIMEOUT", "MEMDAT_EXTRA acks", "MEMFLUSH noack", | ||
64 | "MEMOP noack", "LBCONNECT noack", "NONE - was LBREQ", | ||
65 | "LBACK_TIMEOUT", "LBACK_EXTRA acks", "LBDAT_TIMEOUT", | ||
66 | "LBDAT_EXTRA acks", "GPFIFO won't fit", "GPPTR invalid", | ||
67 | "GPENTRY invalid", "GPCRC mismatch", "PBPTR get>put", | ||
68 | "PBENTRY invld", "PBCRC mismatch", "NONE - was XBARC", | ||
69 | "METHOD invld", "METHODCRC mismat", "DEVICE sw method", | ||
70 | "[ENGINE]", "SEMAPHORE invlid", "ACQUIRE timeout", | ||
71 | "PRI forbidden", "ILLEGAL SYNCPT", "[NO_CTXSW_SEG]", | ||
72 | "PBSEG badsplit", "SIGNATURE bad" | ||
73 | }; | ||
74 | |||
75 | u32 gk20a_fifo_get_engine_ids(struct gk20a *g, | ||
76 | u32 engine_id[], u32 engine_id_sz, | ||
77 | u32 engine_enum) | ||
78 | { | ||
79 | struct fifo_gk20a *f = NULL; | ||
80 | u32 instance_cnt = 0; | ||
81 | u32 engine_id_idx; | ||
82 | u32 active_engine_id = 0; | ||
83 | struct fifo_engine_info_gk20a *info = NULL; | ||
84 | |||
85 | if (g && engine_id_sz && (engine_enum < ENGINE_INVAL_GK20A)) { | ||
86 | f = &g->fifo; | ||
87 | for (engine_id_idx = 0; engine_id_idx < f->num_engines; ++engine_id_idx) { | ||
88 | active_engine_id = f->active_engines_list[engine_id_idx]; | ||
89 | info = &f->engine_info[active_engine_id]; | ||
90 | |||
91 | if (info->engine_enum == engine_enum) { | ||
92 | if (instance_cnt < engine_id_sz) { | ||
93 | engine_id[instance_cnt] = active_engine_id; | ||
94 | ++instance_cnt; | ||
95 | } else { | ||
96 | gk20a_dbg_info("warning engine_id table sz is small %d", | ||
97 | engine_id_sz); | ||
98 | } | ||
99 | } | ||
100 | } | ||
101 | } | ||
102 | return instance_cnt; | ||
103 | } | ||
104 | |||
105 | struct fifo_engine_info_gk20a *gk20a_fifo_get_engine_info(struct gk20a *g, u32 engine_id) | ||
106 | { | ||
107 | struct fifo_gk20a *f = NULL; | ||
108 | u32 engine_id_idx; | ||
109 | struct fifo_engine_info_gk20a *info = NULL; | ||
110 | |||
111 | if (!g) | ||
112 | return info; | ||
113 | |||
114 | f = &g->fifo; | ||
115 | |||
116 | if (engine_id < f->max_engines) { | ||
117 | for (engine_id_idx = 0; engine_id_idx < f->num_engines; ++engine_id_idx) { | ||
118 | if (engine_id == f->active_engines_list[engine_id_idx]) { | ||
119 | info = &f->engine_info[engine_id]; | ||
120 | break; | ||
121 | } | ||
122 | } | ||
123 | } | ||
124 | |||
125 | if (!info) | ||
126 | nvgpu_err(g, "engine_id is not in active list/invalid %d", engine_id); | ||
127 | |||
128 | return info; | ||
129 | } | ||
130 | |||
131 | bool gk20a_fifo_is_valid_engine_id(struct gk20a *g, u32 engine_id) | ||
132 | { | ||
133 | struct fifo_gk20a *f = NULL; | ||
134 | u32 engine_id_idx; | ||
135 | bool valid = false; | ||
136 | |||
137 | if (!g) | ||
138 | return valid; | ||
139 | |||
140 | f = &g->fifo; | ||
141 | |||
142 | if (engine_id < f->max_engines) { | ||
143 | for (engine_id_idx = 0; engine_id_idx < f->num_engines; ++engine_id_idx) { | ||
144 | if (engine_id == f->active_engines_list[engine_id_idx]) { | ||
145 | valid = true; | ||
146 | break; | ||
147 | } | ||
148 | } | ||
149 | } | ||
150 | |||
151 | if (!valid) | ||
152 | nvgpu_err(g, "engine_id is not in active list/invalid %d", engine_id); | ||
153 | |||
154 | return valid; | ||
155 | } | ||
156 | |||
157 | u32 gk20a_fifo_get_gr_engine_id(struct gk20a *g) | ||
158 | { | ||
159 | u32 gr_engine_cnt = 0; | ||
160 | u32 gr_engine_id = FIFO_INVAL_ENGINE_ID; | ||
161 | |||
162 | /* Consider 1st available GR engine */ | ||
163 | gr_engine_cnt = gk20a_fifo_get_engine_ids(g, &gr_engine_id, | ||
164 | 1, ENGINE_GR_GK20A); | ||
165 | |||
166 | if (!gr_engine_cnt) { | ||
167 | nvgpu_err(g, "No GR engine available on this device!"); | ||
168 | } | ||
169 | |||
170 | return gr_engine_id; | ||
171 | } | ||
172 | |||
173 | u32 gk20a_fifo_get_all_ce_engine_reset_mask(struct gk20a *g) | ||
174 | { | ||
175 | u32 reset_mask = 0; | ||
176 | u32 engine_enum = ENGINE_INVAL_GK20A; | ||
177 | struct fifo_gk20a *f = NULL; | ||
178 | u32 engine_id_idx; | ||
179 | struct fifo_engine_info_gk20a *engine_info; | ||
180 | u32 active_engine_id = 0; | ||
181 | |||
182 | if (!g) | ||
183 | return reset_mask; | ||
184 | |||
185 | f = &g->fifo; | ||
186 | |||
187 | for (engine_id_idx = 0; engine_id_idx < f->num_engines; ++engine_id_idx) { | ||
188 | active_engine_id = f->active_engines_list[engine_id_idx]; | ||
189 | engine_info = &f->engine_info[active_engine_id]; | ||
190 | engine_enum = engine_info->engine_enum; | ||
191 | |||
192 | if ((engine_enum == ENGINE_GRCE_GK20A) || | ||
193 | (engine_enum == ENGINE_ASYNC_CE_GK20A)) | ||
194 | reset_mask |= engine_info->reset_mask; | ||
195 | } | ||
196 | |||
197 | return reset_mask; | ||
198 | } | ||
199 | |||
200 | u32 gk20a_fifo_get_fast_ce_runlist_id(struct gk20a *g) | ||
201 | { | ||
202 | u32 ce_runlist_id = gk20a_fifo_get_gr_runlist_id(g); | ||
203 | u32 engine_enum = ENGINE_INVAL_GK20A; | ||
204 | struct fifo_gk20a *f = NULL; | ||
205 | u32 engine_id_idx; | ||
206 | struct fifo_engine_info_gk20a *engine_info; | ||
207 | u32 active_engine_id = 0; | ||
208 | |||
209 | if (!g) | ||
210 | return ce_runlist_id; | ||
211 | |||
212 | f = &g->fifo; | ||
213 | |||
214 | for (engine_id_idx = 0; engine_id_idx < f->num_engines; ++engine_id_idx) { | ||
215 | active_engine_id = f->active_engines_list[engine_id_idx]; | ||
216 | engine_info = &f->engine_info[active_engine_id]; | ||
217 | engine_enum = engine_info->engine_enum; | ||
218 | |||
219 | /* selecet last available ASYNC_CE if available */ | ||
220 | if (engine_enum == ENGINE_ASYNC_CE_GK20A) | ||
221 | ce_runlist_id = engine_info->runlist_id; | ||
222 | } | ||
223 | |||
224 | return ce_runlist_id; | ||
225 | } | ||
226 | |||
227 | u32 gk20a_fifo_get_gr_runlist_id(struct gk20a *g) | ||
228 | { | ||
229 | u32 gr_engine_cnt = 0; | ||
230 | u32 gr_engine_id = FIFO_INVAL_ENGINE_ID; | ||
231 | struct fifo_engine_info_gk20a *engine_info; | ||
232 | u32 gr_runlist_id = ~0; | ||
233 | |||
234 | /* Consider 1st available GR engine */ | ||
235 | gr_engine_cnt = gk20a_fifo_get_engine_ids(g, &gr_engine_id, | ||
236 | 1, ENGINE_GR_GK20A); | ||
237 | |||
238 | if (!gr_engine_cnt) { | ||
239 | nvgpu_err(g, | ||
240 | "No GR engine available on this device!"); | ||
241 | goto end; | ||
242 | } | ||
243 | |||
244 | engine_info = gk20a_fifo_get_engine_info(g, gr_engine_id); | ||
245 | |||
246 | if (engine_info) { | ||
247 | gr_runlist_id = engine_info->runlist_id; | ||
248 | } else { | ||
249 | nvgpu_err(g, | ||
250 | "gr_engine_id is not in active list/invalid %d", gr_engine_id); | ||
251 | } | ||
252 | |||
253 | end: | ||
254 | return gr_runlist_id; | ||
255 | } | ||
256 | |||
257 | bool gk20a_fifo_is_valid_runlist_id(struct gk20a *g, u32 runlist_id) | ||
258 | { | ||
259 | struct fifo_gk20a *f = NULL; | ||
260 | u32 engine_id_idx; | ||
261 | u32 active_engine_id; | ||
262 | struct fifo_engine_info_gk20a *engine_info; | ||
263 | |||
264 | if (!g) | ||
265 | return false; | ||
266 | |||
267 | f = &g->fifo; | ||
268 | |||
269 | for (engine_id_idx = 0; engine_id_idx < f->num_engines; ++engine_id_idx) { | ||
270 | active_engine_id = f->active_engines_list[engine_id_idx]; | ||
271 | engine_info = gk20a_fifo_get_engine_info(g, active_engine_id); | ||
272 | if (engine_info && (engine_info->runlist_id == runlist_id)) { | ||
273 | return true; | ||
274 | } | ||
275 | } | ||
276 | |||
277 | return false; | ||
278 | } | ||
279 | |||
280 | /* | ||
281 | * Link engine IDs to MMU IDs and vice versa. | ||
282 | */ | ||
283 | |||
284 | static inline u32 gk20a_engine_id_to_mmu_id(struct gk20a *g, u32 engine_id) | ||
285 | { | ||
286 | u32 fault_id = FIFO_INVAL_ENGINE_ID; | ||
287 | struct fifo_engine_info_gk20a *engine_info; | ||
288 | |||
289 | engine_info = gk20a_fifo_get_engine_info(g, engine_id); | ||
290 | |||
291 | if (engine_info) { | ||
292 | fault_id = engine_info->fault_id; | ||
293 | } else { | ||
294 | nvgpu_err(g, "engine_id is not in active list/invalid %d", engine_id); | ||
295 | } | ||
296 | return fault_id; | ||
297 | } | ||
298 | |||
299 | static inline u32 gk20a_mmu_id_to_engine_id(struct gk20a *g, u32 fault_id) | ||
300 | { | ||
301 | u32 engine_id; | ||
302 | u32 active_engine_id; | ||
303 | struct fifo_engine_info_gk20a *engine_info; | ||
304 | struct fifo_gk20a *f = &g->fifo; | ||
305 | |||
306 | for (engine_id = 0; engine_id < f->num_engines; engine_id++) { | ||
307 | active_engine_id = f->active_engines_list[engine_id]; | ||
308 | engine_info = &g->fifo.engine_info[active_engine_id]; | ||
309 | |||
310 | if (engine_info->fault_id == fault_id) | ||
311 | break; | ||
312 | active_engine_id = FIFO_INVAL_ENGINE_ID; | ||
313 | } | ||
314 | return active_engine_id; | ||
315 | } | ||
316 | |||
317 | int gk20a_fifo_engine_enum_from_type(struct gk20a *g, u32 engine_type, | ||
318 | u32 *inst_id) | ||
319 | { | ||
320 | int ret = ENGINE_INVAL_GK20A; | ||
321 | |||
322 | gk20a_dbg_info("engine type %d", engine_type); | ||
323 | if (engine_type == top_device_info_type_enum_graphics_v()) | ||
324 | ret = ENGINE_GR_GK20A; | ||
325 | else if ((engine_type >= top_device_info_type_enum_copy0_v()) && | ||
326 | (engine_type <= top_device_info_type_enum_copy2_v())) { | ||
327 | /* Lets consider all the CE engine have separate runlist at this point | ||
328 | * We can identify the ENGINE_GRCE_GK20A type CE using runlist_id | ||
329 | * comparsion logic with GR runlist_id in init_engine_info() */ | ||
330 | ret = ENGINE_ASYNC_CE_GK20A; | ||
331 | /* inst_id starts from CE0 to CE2 */ | ||
332 | if (inst_id) | ||
333 | *inst_id = (engine_type - top_device_info_type_enum_copy0_v()); | ||
334 | } | ||
335 | |||
336 | return ret; | ||
337 | } | ||
338 | |||
339 | int gk20a_fifo_init_engine_info(struct fifo_gk20a *f) | ||
340 | { | ||
341 | struct gk20a *g = f->g; | ||
342 | u32 i; | ||
343 | u32 max_info_entries = top_device_info__size_1_v(); | ||
344 | u32 engine_enum = ENGINE_INVAL_GK20A; | ||
345 | u32 engine_id = FIFO_INVAL_ENGINE_ID; | ||
346 | u32 runlist_id = ~0; | ||
347 | u32 pbdma_id = ~0; | ||
348 | u32 intr_id = ~0; | ||
349 | u32 reset_id = ~0; | ||
350 | u32 inst_id = 0; | ||
351 | u32 pri_base = 0; | ||
352 | u32 fault_id = 0; | ||
353 | u32 gr_runlist_id = ~0; | ||
354 | bool found_pbdma_for_runlist = false; | ||
355 | |||
356 | gk20a_dbg_fn(""); | ||
357 | |||
358 | f->num_engines = 0; | ||
359 | |||
360 | for (i = 0; i < max_info_entries; i++) { | ||
361 | u32 table_entry = gk20a_readl(f->g, top_device_info_r(i)); | ||
362 | u32 entry = top_device_info_entry_v(table_entry); | ||
363 | u32 runlist_bit; | ||
364 | |||
365 | if (entry == top_device_info_entry_enum_v()) { | ||
366 | if (top_device_info_engine_v(table_entry)) { | ||
367 | engine_id = | ||
368 | top_device_info_engine_enum_v(table_entry); | ||
369 | gk20a_dbg_info("info: engine_id %d", | ||
370 | top_device_info_engine_enum_v(table_entry)); | ||
371 | } | ||
372 | |||
373 | |||
374 | if (top_device_info_runlist_v(table_entry)) { | ||
375 | runlist_id = | ||
376 | top_device_info_runlist_enum_v(table_entry); | ||
377 | gk20a_dbg_info("gr info: runlist_id %d", runlist_id); | ||
378 | |||
379 | runlist_bit = BIT(runlist_id); | ||
380 | |||
381 | found_pbdma_for_runlist = false; | ||
382 | for (pbdma_id = 0; pbdma_id < f->num_pbdma; | ||
383 | pbdma_id++) { | ||
384 | if (f->pbdma_map[pbdma_id] & | ||
385 | runlist_bit) { | ||
386 | gk20a_dbg_info( | ||
387 | "gr info: pbdma_map[%d]=%d", | ||
388 | pbdma_id, | ||
389 | f->pbdma_map[pbdma_id]); | ||
390 | found_pbdma_for_runlist = true; | ||
391 | break; | ||
392 | } | ||
393 | } | ||
394 | |||
395 | if (!found_pbdma_for_runlist) { | ||
396 | nvgpu_err(g, "busted pbdma map"); | ||
397 | return -EINVAL; | ||
398 | } | ||
399 | } | ||
400 | |||
401 | if (top_device_info_intr_v(table_entry)) { | ||
402 | intr_id = | ||
403 | top_device_info_intr_enum_v(table_entry); | ||
404 | gk20a_dbg_info("gr info: intr_id %d", intr_id); | ||
405 | } | ||
406 | |||
407 | if (top_device_info_reset_v(table_entry)) { | ||
408 | reset_id = | ||
409 | top_device_info_reset_enum_v(table_entry); | ||
410 | gk20a_dbg_info("gr info: reset_id %d", | ||
411 | reset_id); | ||
412 | } | ||
413 | } else if (entry == top_device_info_entry_engine_type_v()) { | ||
414 | u32 engine_type = | ||
415 | top_device_info_type_enum_v(table_entry); | ||
416 | engine_enum = | ||
417 | g->ops.fifo.engine_enum_from_type(g, | ||
418 | engine_type, &inst_id); | ||
419 | } else if (entry == top_device_info_entry_data_v()) { | ||
420 | /* gk20a doesn't support device_info_data packet parsing */ | ||
421 | if (g->ops.fifo.device_info_data_parse) | ||
422 | g->ops.fifo.device_info_data_parse(g, | ||
423 | table_entry, &inst_id, &pri_base, | ||
424 | &fault_id); | ||
425 | } | ||
426 | |||
427 | if (!top_device_info_chain_v(table_entry)) { | ||
428 | if (engine_enum < ENGINE_INVAL_GK20A) { | ||
429 | struct fifo_engine_info_gk20a *info = | ||
430 | &g->fifo.engine_info[engine_id]; | ||
431 | |||
432 | info->intr_mask |= BIT(intr_id); | ||
433 | info->reset_mask |= BIT(reset_id); | ||
434 | info->runlist_id = runlist_id; | ||
435 | info->pbdma_id = pbdma_id; | ||
436 | info->inst_id = inst_id; | ||
437 | info->pri_base = pri_base; | ||
438 | |||
439 | if (engine_enum == ENGINE_GR_GK20A) | ||
440 | gr_runlist_id = runlist_id; | ||
441 | |||
442 | /* GR and GR_COPY shares same runlist_id */ | ||
443 | if ((engine_enum == ENGINE_ASYNC_CE_GK20A) && | ||
444 | (gr_runlist_id == runlist_id)) | ||
445 | engine_enum = ENGINE_GRCE_GK20A; | ||
446 | |||
447 | info->engine_enum = engine_enum; | ||
448 | |||
449 | if (!fault_id && (engine_enum == ENGINE_GRCE_GK20A)) | ||
450 | fault_id = 0x1b; | ||
451 | info->fault_id = fault_id; | ||
452 | |||
453 | /* engine_id starts from 0 to NV_HOST_NUM_ENGINES */ | ||
454 | f->active_engines_list[f->num_engines] = engine_id; | ||
455 | |||
456 | ++f->num_engines; | ||
457 | |||
458 | engine_enum = ENGINE_INVAL_GK20A; | ||
459 | } | ||
460 | } | ||
461 | } | ||
462 | |||
463 | return 0; | ||
464 | } | ||
465 | |||
466 | u32 gk20a_fifo_act_eng_interrupt_mask(struct gk20a *g, u32 act_eng_id) | ||
467 | { | ||
468 | struct fifo_engine_info_gk20a *engine_info = NULL; | ||
469 | |||
470 | engine_info = gk20a_fifo_get_engine_info(g, act_eng_id); | ||
471 | if (engine_info) | ||
472 | return engine_info->intr_mask; | ||
473 | |||
474 | return 0; | ||
475 | } | ||
476 | |||
477 | u32 gk20a_fifo_engine_interrupt_mask(struct gk20a *g) | ||
478 | { | ||
479 | u32 eng_intr_mask = 0; | ||
480 | unsigned int i; | ||
481 | u32 active_engine_id = 0; | ||
482 | u32 engine_enum = ENGINE_INVAL_GK20A; | ||
483 | |||
484 | for (i = 0; i < g->fifo.num_engines; i++) { | ||
485 | u32 intr_mask; | ||
486 | active_engine_id = g->fifo.active_engines_list[i]; | ||
487 | intr_mask = g->fifo.engine_info[active_engine_id].intr_mask; | ||
488 | engine_enum = g->fifo.engine_info[active_engine_id].engine_enum; | ||
489 | if (((engine_enum == ENGINE_GRCE_GK20A) || | ||
490 | (engine_enum == ENGINE_ASYNC_CE_GK20A)) && | ||
491 | (!g->ops.ce2.isr_stall || !g->ops.ce2.isr_nonstall)) | ||
492 | continue; | ||
493 | |||
494 | eng_intr_mask |= intr_mask; | ||
495 | } | ||
496 | |||
497 | return eng_intr_mask; | ||
498 | } | ||
499 | |||
500 | void gk20a_fifo_delete_runlist(struct fifo_gk20a *f) | ||
501 | { | ||
502 | u32 i; | ||
503 | u32 runlist_id; | ||
504 | struct fifo_runlist_info_gk20a *runlist; | ||
505 | struct gk20a *g = NULL; | ||
506 | |||
507 | if (!f || !f->runlist_info) | ||
508 | return; | ||
509 | |||
510 | g = f->g; | ||
511 | |||
512 | for (runlist_id = 0; runlist_id < f->max_runlists; runlist_id++) { | ||
513 | runlist = &f->runlist_info[runlist_id]; | ||
514 | for (i = 0; i < MAX_RUNLIST_BUFFERS; i++) { | ||
515 | nvgpu_dma_free(g, &runlist->mem[i]); | ||
516 | } | ||
517 | |||
518 | nvgpu_kfree(g, runlist->active_channels); | ||
519 | runlist->active_channels = NULL; | ||
520 | |||
521 | nvgpu_kfree(g, runlist->active_tsgs); | ||
522 | runlist->active_tsgs = NULL; | ||
523 | |||
524 | nvgpu_mutex_destroy(&runlist->mutex); | ||
525 | |||
526 | } | ||
527 | memset(f->runlist_info, 0, (sizeof(struct fifo_runlist_info_gk20a) * | ||
528 | f->max_runlists)); | ||
529 | |||
530 | nvgpu_kfree(g, f->runlist_info); | ||
531 | f->runlist_info = NULL; | ||
532 | f->max_runlists = 0; | ||
533 | } | ||
534 | |||
535 | static void gk20a_remove_fifo_support(struct fifo_gk20a *f) | ||
536 | { | ||
537 | struct gk20a *g = f->g; | ||
538 | unsigned int i = 0; | ||
539 | |||
540 | gk20a_dbg_fn(""); | ||
541 | |||
542 | nvgpu_channel_worker_deinit(g); | ||
543 | /* | ||
544 | * Make sure all channels are closed before deleting them. | ||
545 | */ | ||
546 | for (; i < f->num_channels; i++) { | ||
547 | struct channel_gk20a *c = f->channel + i; | ||
548 | struct tsg_gk20a *tsg = f->tsg + i; | ||
549 | |||
550 | /* | ||
551 | * Could race but worst that happens is we get an error message | ||
552 | * from gk20a_free_channel() complaining about multiple closes. | ||
553 | */ | ||
554 | if (c->referenceable) | ||
555 | __gk20a_channel_kill(c); | ||
556 | |||
557 | nvgpu_mutex_destroy(&tsg->event_id_list_lock); | ||
558 | |||
559 | nvgpu_mutex_destroy(&c->ioctl_lock); | ||
560 | nvgpu_mutex_destroy(&c->error_notifier_mutex); | ||
561 | nvgpu_mutex_destroy(&c->joblist.cleanup_lock); | ||
562 | nvgpu_mutex_destroy(&c->joblist.pre_alloc.read_lock); | ||
563 | nvgpu_mutex_destroy(&c->sync_lock); | ||
564 | #if defined(CONFIG_GK20A_CYCLE_STATS) | ||
565 | nvgpu_mutex_destroy(&c->cyclestate.cyclestate_buffer_mutex); | ||
566 | nvgpu_mutex_destroy(&c->cs_client_mutex); | ||
567 | #endif | ||
568 | nvgpu_mutex_destroy(&c->event_id_list_lock); | ||
569 | nvgpu_mutex_destroy(&c->dbg_s_lock); | ||
570 | |||
571 | } | ||
572 | |||
573 | nvgpu_vfree(g, f->channel); | ||
574 | nvgpu_vfree(g, f->tsg); | ||
575 | if (g->ops.mm.is_bar1_supported(g)) | ||
576 | nvgpu_dma_unmap_free(g->mm.bar1.vm, &f->userd); | ||
577 | else | ||
578 | nvgpu_dma_free(g, &f->userd); | ||
579 | |||
580 | gk20a_fifo_delete_runlist(f); | ||
581 | |||
582 | nvgpu_kfree(g, f->pbdma_map); | ||
583 | f->pbdma_map = NULL; | ||
584 | nvgpu_kfree(g, f->engine_info); | ||
585 | f->engine_info = NULL; | ||
586 | nvgpu_kfree(g, f->active_engines_list); | ||
587 | f->active_engines_list = NULL; | ||
588 | } | ||
589 | |||
590 | /* reads info from hardware and fills in pbmda exception info record */ | ||
591 | static inline void get_exception_pbdma_info( | ||
592 | struct gk20a *g, | ||
593 | struct fifo_engine_info_gk20a *eng_info) | ||
594 | { | ||
595 | struct fifo_pbdma_exception_info_gk20a *e = | ||
596 | &eng_info->pbdma_exception_info; | ||
597 | |||
598 | u32 pbdma_status_r = e->status_r = gk20a_readl(g, | ||
599 | fifo_pbdma_status_r(eng_info->pbdma_id)); | ||
600 | e->id = fifo_pbdma_status_id_v(pbdma_status_r); /* vs. id_hw_v()? */ | ||
601 | e->id_is_chid = fifo_pbdma_status_id_type_v(pbdma_status_r) == | ||
602 | fifo_pbdma_status_id_type_chid_v(); | ||
603 | e->chan_status_v = fifo_pbdma_status_chan_status_v(pbdma_status_r); | ||
604 | e->next_id_is_chid = | ||
605 | fifo_pbdma_status_next_id_type_v(pbdma_status_r) == | ||
606 | fifo_pbdma_status_next_id_type_chid_v(); | ||
607 | e->next_id = fifo_pbdma_status_next_id_v(pbdma_status_r); | ||
608 | e->chsw_in_progress = | ||
609 | fifo_pbdma_status_chsw_v(pbdma_status_r) == | ||
610 | fifo_pbdma_status_chsw_in_progress_v(); | ||
611 | } | ||
612 | |||
613 | static void fifo_pbdma_exception_status(struct gk20a *g, | ||
614 | struct fifo_engine_info_gk20a *eng_info) | ||
615 | { | ||
616 | struct fifo_pbdma_exception_info_gk20a *e; | ||
617 | get_exception_pbdma_info(g, eng_info); | ||
618 | e = &eng_info->pbdma_exception_info; | ||
619 | |||
620 | gk20a_dbg_fn("pbdma_id %d, " | ||
621 | "id_type %s, id %d, chan_status %d, " | ||
622 | "next_id_type %s, next_id %d, " | ||
623 | "chsw_in_progress %d", | ||
624 | eng_info->pbdma_id, | ||
625 | e->id_is_chid ? "chid" : "tsgid", e->id, e->chan_status_v, | ||
626 | e->next_id_is_chid ? "chid" : "tsgid", e->next_id, | ||
627 | e->chsw_in_progress); | ||
628 | } | ||
629 | |||
630 | /* reads info from hardware and fills in pbmda exception info record */ | ||
631 | static inline void get_exception_engine_info( | ||
632 | struct gk20a *g, | ||
633 | struct fifo_engine_info_gk20a *eng_info) | ||
634 | { | ||
635 | struct fifo_engine_exception_info_gk20a *e = | ||
636 | &eng_info->engine_exception_info; | ||
637 | u32 engine_status_r = e->status_r = | ||
638 | gk20a_readl(g, fifo_engine_status_r(eng_info->engine_id)); | ||
639 | e->id = fifo_engine_status_id_v(engine_status_r); /* vs. id_hw_v()? */ | ||
640 | e->id_is_chid = fifo_engine_status_id_type_v(engine_status_r) == | ||
641 | fifo_engine_status_id_type_chid_v(); | ||
642 | e->ctx_status_v = fifo_engine_status_ctx_status_v(engine_status_r); | ||
643 | e->faulted = | ||
644 | fifo_engine_status_faulted_v(engine_status_r) == | ||
645 | fifo_engine_status_faulted_true_v(); | ||
646 | e->idle = | ||
647 | fifo_engine_status_engine_v(engine_status_r) == | ||
648 | fifo_engine_status_engine_idle_v(); | ||
649 | e->ctxsw_in_progress = | ||
650 | fifo_engine_status_ctxsw_v(engine_status_r) == | ||
651 | fifo_engine_status_ctxsw_in_progress_v(); | ||
652 | } | ||
653 | |||
654 | static void fifo_engine_exception_status(struct gk20a *g, | ||
655 | struct fifo_engine_info_gk20a *eng_info) | ||
656 | { | ||
657 | struct fifo_engine_exception_info_gk20a *e; | ||
658 | get_exception_engine_info(g, eng_info); | ||
659 | e = &eng_info->engine_exception_info; | ||
660 | |||
661 | gk20a_dbg_fn("engine_id %d, id_type %s, id %d, ctx_status %d, " | ||
662 | "faulted %d, idle %d, ctxsw_in_progress %d, ", | ||
663 | eng_info->engine_id, e->id_is_chid ? "chid" : "tsgid", | ||
664 | e->id, e->ctx_status_v, | ||
665 | e->faulted, e->idle, e->ctxsw_in_progress); | ||
666 | } | ||
667 | |||
668 | static int init_runlist(struct gk20a *g, struct fifo_gk20a *f) | ||
669 | { | ||
670 | struct fifo_runlist_info_gk20a *runlist; | ||
671 | unsigned int runlist_id; | ||
672 | u32 i; | ||
673 | size_t runlist_size; | ||
674 | u32 active_engine_id, pbdma_id, engine_id; | ||
675 | struct fifo_engine_info_gk20a *engine_info; | ||
676 | |||
677 | nvgpu_log_fn(g, " "); | ||
678 | |||
679 | f->max_runlists = g->ops.fifo.eng_runlist_base_size(); | ||
680 | f->runlist_info = nvgpu_kzalloc(g, | ||
681 | sizeof(struct fifo_runlist_info_gk20a) * | ||
682 | f->max_runlists); | ||
683 | if (!f->runlist_info) | ||
684 | goto clean_up_runlist; | ||
685 | |||
686 | memset(f->runlist_info, 0, (sizeof(struct fifo_runlist_info_gk20a) * | ||
687 | f->max_runlists)); | ||
688 | |||
689 | for (runlist_id = 0; runlist_id < f->max_runlists; runlist_id++) { | ||
690 | runlist = &f->runlist_info[runlist_id]; | ||
691 | |||
692 | runlist->active_channels = | ||
693 | nvgpu_kzalloc(g, DIV_ROUND_UP(f->num_channels, | ||
694 | BITS_PER_BYTE)); | ||
695 | if (!runlist->active_channels) | ||
696 | goto clean_up_runlist; | ||
697 | |||
698 | runlist->active_tsgs = | ||
699 | nvgpu_kzalloc(g, DIV_ROUND_UP(f->num_channels, | ||
700 | BITS_PER_BYTE)); | ||
701 | if (!runlist->active_tsgs) | ||
702 | goto clean_up_runlist; | ||
703 | |||
704 | runlist_size = f->runlist_entry_size * f->num_runlist_entries; | ||
705 | nvgpu_log(g, gpu_dbg_info, | ||
706 | "runlist_entries %d runlist size %zu", | ||
707 | f->num_runlist_entries, runlist_size); | ||
708 | |||
709 | for (i = 0; i < MAX_RUNLIST_BUFFERS; i++) { | ||
710 | int err = nvgpu_dma_alloc_sys(g, runlist_size, | ||
711 | &runlist->mem[i]); | ||
712 | if (err) { | ||
713 | nvgpu_err(g, "memory allocation failed"); | ||
714 | goto clean_up_runlist; | ||
715 | } | ||
716 | } | ||
717 | nvgpu_mutex_init(&runlist->mutex); | ||
718 | |||
719 | /* None of buffers is pinned if this value doesn't change. | ||
720 | Otherwise, one of them (cur_buffer) must have been pinned. */ | ||
721 | runlist->cur_buffer = MAX_RUNLIST_BUFFERS; | ||
722 | |||
723 | for (pbdma_id = 0; pbdma_id < f->num_pbdma; pbdma_id++) { | ||
724 | if (f->pbdma_map[pbdma_id] & BIT(runlist_id)) | ||
725 | runlist->pbdma_bitmask |= BIT(pbdma_id); | ||
726 | } | ||
727 | nvgpu_log(g, gpu_dbg_info, "runlist %d : pbdma bitmask 0x%x", | ||
728 | runlist_id, runlist->pbdma_bitmask); | ||
729 | |||
730 | for (engine_id = 0; engine_id < f->num_engines; ++engine_id) { | ||
731 | active_engine_id = f->active_engines_list[engine_id]; | ||
732 | engine_info = &f->engine_info[active_engine_id]; | ||
733 | |||
734 | if (engine_info && engine_info->runlist_id == runlist_id) | ||
735 | runlist->eng_bitmask |= BIT(active_engine_id); | ||
736 | } | ||
737 | nvgpu_log(g, gpu_dbg_info, "runlist %d : act eng bitmask 0x%x", | ||
738 | runlist_id, runlist->eng_bitmask); | ||
739 | } | ||
740 | |||
741 | nvgpu_log_fn(g, "done"); | ||
742 | return 0; | ||
743 | |||
744 | clean_up_runlist: | ||
745 | gk20a_fifo_delete_runlist(f); | ||
746 | gk20a_dbg_fn("fail"); | ||
747 | return -ENOMEM; | ||
748 | } | ||
749 | |||
750 | u32 gk20a_fifo_intr_0_error_mask(struct gk20a *g) | ||
751 | { | ||
752 | u32 intr_0_error_mask = | ||
753 | fifo_intr_0_bind_error_pending_f() | | ||
754 | fifo_intr_0_sched_error_pending_f() | | ||
755 | fifo_intr_0_chsw_error_pending_f() | | ||
756 | fifo_intr_0_fb_flush_timeout_pending_f() | | ||
757 | fifo_intr_0_dropped_mmu_fault_pending_f() | | ||
758 | fifo_intr_0_mmu_fault_pending_f() | | ||
759 | fifo_intr_0_lb_error_pending_f() | | ||
760 | fifo_intr_0_pio_error_pending_f(); | ||
761 | |||
762 | return intr_0_error_mask; | ||
763 | } | ||
764 | |||
765 | static u32 gk20a_fifo_intr_0_en_mask(struct gk20a *g) | ||
766 | { | ||
767 | u32 intr_0_en_mask; | ||
768 | |||
769 | intr_0_en_mask = g->ops.fifo.intr_0_error_mask(g); | ||
770 | |||
771 | intr_0_en_mask |= fifo_intr_0_runlist_event_pending_f() | | ||
772 | fifo_intr_0_pbdma_intr_pending_f(); | ||
773 | |||
774 | return intr_0_en_mask; | ||
775 | } | ||
776 | |||
777 | int gk20a_init_fifo_reset_enable_hw(struct gk20a *g) | ||
778 | { | ||
779 | u32 intr_stall; | ||
780 | u32 mask; | ||
781 | u32 timeout; | ||
782 | unsigned int i; | ||
783 | u32 host_num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA); | ||
784 | |||
785 | gk20a_dbg_fn(""); | ||
786 | |||
787 | /* enable pmc pfifo */ | ||
788 | g->ops.mc.reset(g, mc_enable_pfifo_enabled_f()); | ||
789 | |||
790 | if (g->ops.clock_gating.slcg_fifo_load_gating_prod) | ||
791 | g->ops.clock_gating.slcg_fifo_load_gating_prod(g, | ||
792 | g->slcg_enabled); | ||
793 | if (g->ops.clock_gating.blcg_fifo_load_gating_prod) | ||
794 | g->ops.clock_gating.blcg_fifo_load_gating_prod(g, | ||
795 | g->blcg_enabled); | ||
796 | |||
797 | /* enable pbdma */ | ||
798 | mask = 0; | ||
799 | for (i = 0; i < host_num_pbdma; ++i) | ||
800 | mask |= mc_enable_pb_sel_f(mc_enable_pb_0_enabled_v(), i); | ||
801 | gk20a_writel(g, mc_enable_pb_r(), mask); | ||
802 | |||
803 | timeout = gk20a_readl(g, fifo_fb_timeout_r()); | ||
804 | timeout = set_field(timeout, fifo_fb_timeout_period_m(), | ||
805 | fifo_fb_timeout_period_max_f()); | ||
806 | gk20a_dbg_info("fifo_fb_timeout reg val = 0x%08x", timeout); | ||
807 | gk20a_writel(g, fifo_fb_timeout_r(), timeout); | ||
808 | |||
809 | /* write pbdma timeout value */ | ||
810 | for (i = 0; i < host_num_pbdma; i++) { | ||
811 | timeout = gk20a_readl(g, pbdma_timeout_r(i)); | ||
812 | timeout = set_field(timeout, pbdma_timeout_period_m(), | ||
813 | pbdma_timeout_period_max_f()); | ||
814 | gk20a_dbg_info("pbdma_timeout reg val = 0x%08x", timeout); | ||
815 | gk20a_writel(g, pbdma_timeout_r(i), timeout); | ||
816 | } | ||
817 | if (g->ops.fifo.apply_pb_timeout) | ||
818 | g->ops.fifo.apply_pb_timeout(g); | ||
819 | |||
820 | timeout = GRFIFO_TIMEOUT_CHECK_PERIOD_US; | ||
821 | timeout = scale_ptimer(timeout, | ||
822 | ptimer_scalingfactor10x(g->ptimer_src_freq)); | ||
823 | timeout |= fifo_eng_timeout_detection_enabled_f(); | ||
824 | gk20a_writel(g, fifo_eng_timeout_r(), timeout); | ||
825 | |||
826 | /* clear and enable pbdma interrupt */ | ||
827 | for (i = 0; i < host_num_pbdma; i++) { | ||
828 | gk20a_writel(g, pbdma_intr_0_r(i), 0xFFFFFFFF); | ||
829 | gk20a_writel(g, pbdma_intr_1_r(i), 0xFFFFFFFF); | ||
830 | |||
831 | intr_stall = gk20a_readl(g, pbdma_intr_stall_r(i)); | ||
832 | intr_stall &= ~pbdma_intr_stall_lbreq_enabled_f(); | ||
833 | gk20a_writel(g, pbdma_intr_stall_r(i), intr_stall); | ||
834 | gk20a_dbg_info("pbdma id:%u, intr_en_0 0x%08x", i, intr_stall); | ||
835 | gk20a_writel(g, pbdma_intr_en_0_r(i), intr_stall); | ||
836 | |||
837 | gk20a_dbg_info("pbdma id:%u, intr_en_1 0x%08x", i, | ||
838 | ~pbdma_intr_en_0_lbreq_enabled_f()); | ||
839 | gk20a_writel(g, pbdma_intr_en_1_r(i), | ||
840 | ~pbdma_intr_en_0_lbreq_enabled_f()); | ||
841 | } | ||
842 | |||
843 | /* reset runlist interrupts */ | ||
844 | gk20a_writel(g, fifo_intr_runlist_r(), ~0); | ||
845 | |||
846 | /* clear and enable pfifo interrupt */ | ||
847 | gk20a_writel(g, fifo_intr_0_r(), 0xFFFFFFFF); | ||
848 | mask = gk20a_fifo_intr_0_en_mask(g); | ||
849 | gk20a_dbg_info("fifo_intr_en_0 0x%08x", mask); | ||
850 | gk20a_writel(g, fifo_intr_en_0_r(), mask); | ||
851 | gk20a_dbg_info("fifo_intr_en_1 = 0x80000000"); | ||
852 | gk20a_writel(g, fifo_intr_en_1_r(), 0x80000000); | ||
853 | |||
854 | gk20a_dbg_fn("done"); | ||
855 | |||
856 | return 0; | ||
857 | } | ||
858 | |||
859 | static int gk20a_init_fifo_setup_sw(struct gk20a *g) | ||
860 | { | ||
861 | struct fifo_gk20a *f = &g->fifo; | ||
862 | unsigned int chid, i; | ||
863 | int err = 0; | ||
864 | |||
865 | gk20a_dbg_fn(""); | ||
866 | |||
867 | if (f->sw_ready) { | ||
868 | gk20a_dbg_fn("skip init"); | ||
869 | return 0; | ||
870 | } | ||
871 | |||
872 | f->g = g; | ||
873 | |||
874 | nvgpu_mutex_init(&f->intr.isr.mutex); | ||
875 | nvgpu_mutex_init(&f->gr_reset_mutex); | ||
876 | |||
877 | g->ops.fifo.init_pbdma_intr_descs(f); /* just filling in data/tables */ | ||
878 | |||
879 | f->num_channels = g->ops.fifo.get_num_fifos(g); | ||
880 | f->runlist_entry_size = g->ops.fifo.runlist_entry_size(); | ||
881 | f->num_runlist_entries = fifo_eng_runlist_length_max_v(); | ||
882 | f->num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA); | ||
883 | f->max_engines = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_ENGINES); | ||
884 | |||
885 | f->userd_entry_size = 1 << ram_userd_base_shift_v(); | ||
886 | |||
887 | f->channel = nvgpu_vzalloc(g, f->num_channels * sizeof(*f->channel)); | ||
888 | f->tsg = nvgpu_vzalloc(g, f->num_channels * sizeof(*f->tsg)); | ||
889 | f->pbdma_map = nvgpu_kzalloc(g, f->num_pbdma * sizeof(*f->pbdma_map)); | ||
890 | f->engine_info = nvgpu_kzalloc(g, f->max_engines * | ||
891 | sizeof(*f->engine_info)); | ||
892 | f->active_engines_list = nvgpu_kzalloc(g, f->max_engines * sizeof(u32)); | ||
893 | |||
894 | if (!(f->channel && f->tsg && f->pbdma_map && f->engine_info && | ||
895 | f->active_engines_list)) { | ||
896 | err = -ENOMEM; | ||
897 | goto clean_up; | ||
898 | } | ||
899 | memset(f->active_engines_list, 0xff, (f->max_engines * sizeof(u32))); | ||
900 | |||
901 | /* pbdma map needs to be in place before calling engine info init */ | ||
902 | for (i = 0; i < f->num_pbdma; ++i) | ||
903 | f->pbdma_map[i] = gk20a_readl(g, fifo_pbdma_map_r(i)); | ||
904 | |||
905 | g->ops.fifo.init_engine_info(f); | ||
906 | |||
907 | init_runlist(g, f); | ||
908 | |||
909 | nvgpu_init_list_node(&f->free_chs); | ||
910 | nvgpu_mutex_init(&f->free_chs_mutex); | ||
911 | |||
912 | if (g->ops.mm.is_bar1_supported(g)) | ||
913 | err = nvgpu_dma_alloc_map_sys(g->mm.bar1.vm, | ||
914 | f->userd_entry_size * f->num_channels, | ||
915 | &f->userd); | ||
916 | |||
917 | else | ||
918 | err = nvgpu_dma_alloc_sys(g, f->userd_entry_size * | ||
919 | f->num_channels, &f->userd); | ||
920 | if (err) { | ||
921 | nvgpu_err(g, "userd memory allocation failed"); | ||
922 | goto clean_up; | ||
923 | } | ||
924 | gk20a_dbg(gpu_dbg_map, "userd gpu va = 0x%llx", f->userd.gpu_va); | ||
925 | |||
926 | for (chid = 0; chid < f->num_channels; chid++) { | ||
927 | f->channel[chid].userd_iova = | ||
928 | nvgpu_mem_get_addr(g, &f->userd) + | ||
929 | chid * f->userd_entry_size; | ||
930 | f->channel[chid].userd_gpu_va = | ||
931 | f->userd.gpu_va + chid * f->userd_entry_size; | ||
932 | gk20a_init_channel_support(g, chid); | ||
933 | gk20a_init_tsg_support(g, chid); | ||
934 | } | ||
935 | nvgpu_mutex_init(&f->tsg_inuse_mutex); | ||
936 | |||
937 | err = nvgpu_channel_worker_init(g); | ||
938 | if (err) | ||
939 | goto clean_up; | ||
940 | f->remove_support = gk20a_remove_fifo_support; | ||
941 | |||
942 | f->deferred_reset_pending = false; | ||
943 | nvgpu_mutex_init(&f->deferred_reset_mutex); | ||
944 | |||
945 | f->sw_ready = true; | ||
946 | |||
947 | gk20a_dbg_fn("done"); | ||
948 | return 0; | ||
949 | |||
950 | clean_up: | ||
951 | gk20a_dbg_fn("fail"); | ||
952 | if (g->ops.mm.is_bar1_supported(g)) | ||
953 | nvgpu_dma_unmap_free(g->mm.bar1.vm, &f->userd); | ||
954 | else | ||
955 | nvgpu_dma_free(g, &f->userd); | ||
956 | |||
957 | nvgpu_vfree(g, f->channel); | ||
958 | f->channel = NULL; | ||
959 | nvgpu_vfree(g, f->tsg); | ||
960 | f->tsg = NULL; | ||
961 | nvgpu_kfree(g, f->pbdma_map); | ||
962 | f->pbdma_map = NULL; | ||
963 | nvgpu_kfree(g, f->engine_info); | ||
964 | f->engine_info = NULL; | ||
965 | nvgpu_kfree(g, f->active_engines_list); | ||
966 | f->active_engines_list = NULL; | ||
967 | |||
968 | return err; | ||
969 | } | ||
970 | |||
971 | void gk20a_fifo_handle_runlist_event(struct gk20a *g) | ||
972 | { | ||
973 | u32 runlist_event = gk20a_readl(g, fifo_intr_runlist_r()); | ||
974 | |||
975 | gk20a_dbg(gpu_dbg_intr, "runlist event %08x", | ||
976 | runlist_event); | ||
977 | |||
978 | gk20a_writel(g, fifo_intr_runlist_r(), runlist_event); | ||
979 | } | ||
980 | |||
981 | int gk20a_init_fifo_setup_hw(struct gk20a *g) | ||
982 | { | ||
983 | struct fifo_gk20a *f = &g->fifo; | ||
984 | |||
985 | gk20a_dbg_fn(""); | ||
986 | |||
987 | /* test write, read through bar1 @ userd region before | ||
988 | * turning on the snooping */ | ||
989 | { | ||
990 | struct fifo_gk20a *f = &g->fifo; | ||
991 | u32 v, v1 = 0x33, v2 = 0x55; | ||
992 | |||
993 | u32 bar1_vaddr = f->userd.gpu_va; | ||
994 | volatile u32 *cpu_vaddr = f->userd.cpu_va; | ||
995 | |||
996 | gk20a_dbg_info("test bar1 @ vaddr 0x%x", | ||
997 | bar1_vaddr); | ||
998 | |||
999 | v = gk20a_bar1_readl(g, bar1_vaddr); | ||
1000 | |||
1001 | *cpu_vaddr = v1; | ||
1002 | nvgpu_mb(); | ||
1003 | |||
1004 | if (v1 != gk20a_bar1_readl(g, bar1_vaddr)) { | ||
1005 | nvgpu_err(g, "bar1 broken @ gk20a: CPU wrote 0x%x, \ | ||
1006 | GPU read 0x%x", *cpu_vaddr, gk20a_bar1_readl(g, bar1_vaddr)); | ||
1007 | return -EINVAL; | ||
1008 | } | ||
1009 | |||
1010 | gk20a_bar1_writel(g, bar1_vaddr, v2); | ||
1011 | |||
1012 | if (v2 != gk20a_bar1_readl(g, bar1_vaddr)) { | ||
1013 | nvgpu_err(g, "bar1 broken @ gk20a: GPU wrote 0x%x, \ | ||
1014 | CPU read 0x%x", gk20a_bar1_readl(g, bar1_vaddr), *cpu_vaddr); | ||
1015 | return -EINVAL; | ||
1016 | } | ||
1017 | |||
1018 | /* is it visible to the cpu? */ | ||
1019 | if (*cpu_vaddr != v2) { | ||
1020 | nvgpu_err(g, | ||
1021 | "cpu didn't see bar1 write @ %p!", | ||
1022 | cpu_vaddr); | ||
1023 | } | ||
1024 | |||
1025 | /* put it back */ | ||
1026 | gk20a_bar1_writel(g, bar1_vaddr, v); | ||
1027 | } | ||
1028 | |||
1029 | /*XXX all manner of flushes and caching worries, etc */ | ||
1030 | |||
1031 | /* set the base for the userd region now */ | ||
1032 | gk20a_writel(g, fifo_bar1_base_r(), | ||
1033 | fifo_bar1_base_ptr_f(f->userd.gpu_va >> 12) | | ||
1034 | fifo_bar1_base_valid_true_f()); | ||
1035 | |||
1036 | gk20a_dbg_fn("done"); | ||
1037 | |||
1038 | return 0; | ||
1039 | } | ||
1040 | |||
1041 | int gk20a_init_fifo_support(struct gk20a *g) | ||
1042 | { | ||
1043 | u32 err; | ||
1044 | |||
1045 | err = gk20a_init_fifo_setup_sw(g); | ||
1046 | if (err) | ||
1047 | return err; | ||
1048 | |||
1049 | if (g->ops.fifo.init_fifo_setup_hw) | ||
1050 | err = g->ops.fifo.init_fifo_setup_hw(g); | ||
1051 | if (err) | ||
1052 | return err; | ||
1053 | |||
1054 | return err; | ||
1055 | } | ||
1056 | |||
1057 | /* return with a reference to the channel, caller must put it back */ | ||
1058 | struct channel_gk20a * | ||
1059 | gk20a_refch_from_inst_ptr(struct gk20a *g, u64 inst_ptr) | ||
1060 | { | ||
1061 | struct fifo_gk20a *f = &g->fifo; | ||
1062 | unsigned int ci; | ||
1063 | if (unlikely(!f->channel)) | ||
1064 | return NULL; | ||
1065 | for (ci = 0; ci < f->num_channels; ci++) { | ||
1066 | struct channel_gk20a *ch; | ||
1067 | u64 ch_inst_ptr; | ||
1068 | |||
1069 | ch = gk20a_channel_get(&f->channel[ci]); | ||
1070 | /* only alive channels are searched */ | ||
1071 | if (!ch) | ||
1072 | continue; | ||
1073 | |||
1074 | ch_inst_ptr = nvgpu_inst_block_addr(g, &ch->inst_block); | ||
1075 | if (inst_ptr == ch_inst_ptr) | ||
1076 | return ch; | ||
1077 | |||
1078 | gk20a_channel_put(ch); | ||
1079 | } | ||
1080 | return NULL; | ||
1081 | } | ||
1082 | |||
1083 | /* fault info/descriptions. | ||
1084 | * tbd: move to setup | ||
1085 | * */ | ||
1086 | static const char * const fault_type_descs[] = { | ||
1087 | "pde", /*fifo_intr_mmu_fault_info_type_pde_v() == 0 */ | ||
1088 | "pde size", | ||
1089 | "pte", | ||
1090 | "va limit viol", | ||
1091 | "unbound inst", | ||
1092 | "priv viol", | ||
1093 | "ro viol", | ||
1094 | "wo viol", | ||
1095 | "pitch mask", | ||
1096 | "work creation", | ||
1097 | "bad aperture", | ||
1098 | "compression failure", | ||
1099 | "bad kind", | ||
1100 | "region viol", | ||
1101 | "dual ptes", | ||
1102 | "poisoned", | ||
1103 | }; | ||
1104 | /* engine descriptions */ | ||
1105 | static const char * const engine_subid_descs[] = { | ||
1106 | "gpc", | ||
1107 | "hub", | ||
1108 | }; | ||
1109 | |||
1110 | static const char * const hub_client_descs[] = { | ||
1111 | "vip", "ce0", "ce1", "dniso", "fe", "fecs", "host", "host cpu", | ||
1112 | "host cpu nb", "iso", "mmu", "mspdec", "msppp", "msvld", | ||
1113 | "niso", "p2p", "pd", "perf", "pmu", "raster twod", "scc", | ||
1114 | "scc nb", "sec", "ssync", "gr copy", "ce2", "xv", "mmu nb", | ||
1115 | "msenc", "d falcon", "sked", "a falcon", "n/a", | ||
1116 | }; | ||
1117 | |||
1118 | static const char * const gpc_client_descs[] = { | ||
1119 | "l1 0", "t1 0", "pe 0", | ||
1120 | "l1 1", "t1 1", "pe 1", | ||
1121 | "l1 2", "t1 2", "pe 2", | ||
1122 | "l1 3", "t1 3", "pe 3", | ||
1123 | "rast", "gcc", "gpccs", | ||
1124 | "prop 0", "prop 1", "prop 2", "prop 3", | ||
1125 | "l1 4", "t1 4", "pe 4", | ||
1126 | "l1 5", "t1 5", "pe 5", | ||
1127 | "l1 6", "t1 6", "pe 6", | ||
1128 | "l1 7", "t1 7", "pe 7", | ||
1129 | "gpm", | ||
1130 | "ltp utlb 0", "ltp utlb 1", "ltp utlb 2", "ltp utlb 3", | ||
1131 | "rgg utlb", | ||
1132 | }; | ||
1133 | |||
1134 | static const char * const does_not_exist[] = { | ||
1135 | "does not exist" | ||
1136 | }; | ||
1137 | |||
1138 | static void get_exception_mmu_fault_info(struct gk20a *g, u32 mmu_fault_id, | ||
1139 | struct mmu_fault_info *mmfault) | ||
1140 | { | ||
1141 | g->ops.fifo.get_mmu_fault_info(g, mmu_fault_id, mmfault); | ||
1142 | |||
1143 | /* parse info */ | ||
1144 | if (mmfault->fault_type >= ARRAY_SIZE(fault_type_descs)) { | ||
1145 | WARN_ON(mmfault->fault_type >= ARRAY_SIZE(fault_type_descs)); | ||
1146 | mmfault->fault_type_desc = does_not_exist[0]; | ||
1147 | } else { | ||
1148 | mmfault->fault_type_desc = | ||
1149 | fault_type_descs[mmfault->fault_type]; | ||
1150 | } | ||
1151 | |||
1152 | if (mmfault->client_type >= ARRAY_SIZE(engine_subid_descs)) { | ||
1153 | WARN_ON(mmfault->client_type >= ARRAY_SIZE(engine_subid_descs)); | ||
1154 | mmfault->client_type_desc = does_not_exist[0]; | ||
1155 | } else { | ||
1156 | mmfault->client_type_desc = | ||
1157 | engine_subid_descs[mmfault->client_type]; | ||
1158 | } | ||
1159 | |||
1160 | mmfault->client_id_desc = does_not_exist[0]; | ||
1161 | if (mmfault->client_type == | ||
1162 | fifo_intr_mmu_fault_info_engine_subid_hub_v()) { | ||
1163 | |||
1164 | if (mmfault->client_id >= | ||
1165 | ARRAY_SIZE(hub_client_descs)) | ||
1166 | WARN_ON(mmfault->client_id >= | ||
1167 | ARRAY_SIZE(hub_client_descs)); | ||
1168 | else | ||
1169 | mmfault->client_id_desc = | ||
1170 | hub_client_descs[mmfault->client_id]; | ||
1171 | } else if (mmfault->client_type == | ||
1172 | fifo_intr_mmu_fault_info_engine_subid_gpc_v()) { | ||
1173 | if (mmfault->client_id >= ARRAY_SIZE(gpc_client_descs)) | ||
1174 | WARN_ON(mmfault->client_id >= | ||
1175 | ARRAY_SIZE(gpc_client_descs)); | ||
1176 | else | ||
1177 | mmfault->client_id_desc = | ||
1178 | gpc_client_descs[mmfault->client_id]; | ||
1179 | } | ||
1180 | } | ||
1181 | |||
1182 | /* reads info from hardware and fills in mmu fault info record */ | ||
1183 | void gk20a_fifo_get_mmu_fault_info(struct gk20a *g, u32 mmu_fault_id, | ||
1184 | struct mmu_fault_info *mmfault) | ||
1185 | { | ||
1186 | u32 fault_info; | ||
1187 | u32 addr_lo, addr_hi; | ||
1188 | |||
1189 | gk20a_dbg_fn("mmu_fault_id %d", mmu_fault_id); | ||
1190 | |||
1191 | memset(mmfault, 0, sizeof(*mmfault)); | ||
1192 | |||
1193 | fault_info = gk20a_readl(g, | ||
1194 | fifo_intr_mmu_fault_info_r(mmu_fault_id)); | ||
1195 | mmfault->fault_type = | ||
1196 | fifo_intr_mmu_fault_info_type_v(fault_info); | ||
1197 | mmfault->access_type = | ||
1198 | fifo_intr_mmu_fault_info_write_v(fault_info); | ||
1199 | mmfault->client_type = | ||
1200 | fifo_intr_mmu_fault_info_engine_subid_v(fault_info); | ||
1201 | mmfault->client_id = | ||
1202 | fifo_intr_mmu_fault_info_client_v(fault_info); | ||
1203 | |||
1204 | addr_lo = gk20a_readl(g, fifo_intr_mmu_fault_lo_r(mmu_fault_id)); | ||
1205 | addr_hi = gk20a_readl(g, fifo_intr_mmu_fault_hi_r(mmu_fault_id)); | ||
1206 | mmfault->fault_addr = hi32_lo32_to_u64(addr_hi, addr_lo); | ||
1207 | /* note:ignoring aperture on gk20a... */ | ||
1208 | mmfault->inst_ptr = fifo_intr_mmu_fault_inst_ptr_v( | ||
1209 | gk20a_readl(g, fifo_intr_mmu_fault_inst_r(mmu_fault_id))); | ||
1210 | /* note: inst_ptr is a 40b phys addr. */ | ||
1211 | mmfault->inst_ptr <<= fifo_intr_mmu_fault_inst_ptr_align_shift_v(); | ||
1212 | } | ||
1213 | |||
1214 | void gk20a_fifo_reset_engine(struct gk20a *g, u32 engine_id) | ||
1215 | { | ||
1216 | struct fifo_gk20a *f = NULL; | ||
1217 | u32 engine_enum = ENGINE_INVAL_GK20A; | ||
1218 | u32 inst_id = 0; | ||
1219 | struct fifo_engine_info_gk20a *engine_info; | ||
1220 | |||
1221 | gk20a_dbg_fn(""); | ||
1222 | |||
1223 | if (!g) | ||
1224 | return; | ||
1225 | |||
1226 | f = &g->fifo; | ||
1227 | |||
1228 | engine_info = gk20a_fifo_get_engine_info(g, engine_id); | ||
1229 | |||
1230 | if (engine_info) { | ||
1231 | engine_enum = engine_info->engine_enum; | ||
1232 | inst_id = engine_info->inst_id; | ||
1233 | } | ||
1234 | |||
1235 | if (engine_enum == ENGINE_INVAL_GK20A) | ||
1236 | nvgpu_err(g, "unsupported engine_id %d", engine_id); | ||
1237 | |||
1238 | if (engine_enum == ENGINE_GR_GK20A) { | ||
1239 | if (g->support_pmu && g->can_elpg) { | ||
1240 | if (nvgpu_pmu_disable_elpg(g)) | ||
1241 | nvgpu_err(g, "failed to set disable elpg"); | ||
1242 | } | ||
1243 | |||
1244 | #ifdef CONFIG_GK20A_CTXSW_TRACE | ||
1245 | /* | ||
1246 | * Resetting engine will alter read/write index. Need to flush | ||
1247 | * circular buffer before re-enabling FECS. | ||
1248 | */ | ||
1249 | if (g->ops.fecs_trace.reset) | ||
1250 | g->ops.fecs_trace.reset(g); | ||
1251 | #endif | ||
1252 | if (!nvgpu_platform_is_simulation(g)) { | ||
1253 | /*HALT_PIPELINE method, halt GR engine*/ | ||
1254 | if (gr_gk20a_halt_pipe(g)) | ||
1255 | nvgpu_err(g, "failed to HALT gr pipe"); | ||
1256 | /* | ||
1257 | * resetting engine using mc_enable_r() is not | ||
1258 | * enough, we do full init sequence | ||
1259 | */ | ||
1260 | nvgpu_log(g, gpu_dbg_info, "resetting gr engine"); | ||
1261 | gk20a_gr_reset(g); | ||
1262 | } else { | ||
1263 | nvgpu_log(g, gpu_dbg_info, | ||
1264 | "HALT gr pipe not supported and " | ||
1265 | "gr cannot be reset without halting gr pipe"); | ||
1266 | } | ||
1267 | if (g->support_pmu && g->can_elpg) | ||
1268 | nvgpu_pmu_enable_elpg(g); | ||
1269 | } | ||
1270 | if ((engine_enum == ENGINE_GRCE_GK20A) || | ||
1271 | (engine_enum == ENGINE_ASYNC_CE_GK20A)) { | ||
1272 | g->ops.mc.reset(g, engine_info->reset_mask); | ||
1273 | } | ||
1274 | } | ||
1275 | |||
1276 | static void gk20a_fifo_handle_chsw_fault(struct gk20a *g) | ||
1277 | { | ||
1278 | u32 intr; | ||
1279 | |||
1280 | intr = gk20a_readl(g, fifo_intr_chsw_error_r()); | ||
1281 | nvgpu_err(g, "chsw: %08x", intr); | ||
1282 | gk20a_fecs_dump_falcon_stats(g); | ||
1283 | gk20a_writel(g, fifo_intr_chsw_error_r(), intr); | ||
1284 | } | ||
1285 | |||
1286 | static void gk20a_fifo_handle_dropped_mmu_fault(struct gk20a *g) | ||
1287 | { | ||
1288 | u32 fault_id = gk20a_readl(g, fifo_intr_mmu_fault_id_r()); | ||
1289 | nvgpu_err(g, "dropped mmu fault (0x%08x)", fault_id); | ||
1290 | } | ||
1291 | |||
1292 | bool gk20a_is_fault_engine_subid_gpc(struct gk20a *g, u32 engine_subid) | ||
1293 | { | ||
1294 | return (engine_subid == fifo_intr_mmu_fault_info_engine_subid_gpc_v()); | ||
1295 | } | ||
1296 | |||
1297 | bool gk20a_fifo_should_defer_engine_reset(struct gk20a *g, u32 engine_id, | ||
1298 | u32 engine_subid, bool fake_fault) | ||
1299 | { | ||
1300 | u32 engine_enum = ENGINE_INVAL_GK20A; | ||
1301 | struct fifo_gk20a *fifo = NULL; | ||
1302 | struct fifo_engine_info_gk20a *engine_info; | ||
1303 | |||
1304 | if (!g) | ||
1305 | return false; | ||
1306 | |||
1307 | fifo = &g->fifo; | ||
1308 | |||
1309 | engine_info = gk20a_fifo_get_engine_info(g, engine_id); | ||
1310 | |||
1311 | if (engine_info) { | ||
1312 | engine_enum = engine_info->engine_enum; | ||
1313 | } | ||
1314 | |||
1315 | if (engine_enum == ENGINE_INVAL_GK20A) | ||
1316 | return false; | ||
1317 | |||
1318 | /* channel recovery is only deferred if an sm debugger | ||
1319 | is attached and has MMU debug mode is enabled */ | ||
1320 | if (!g->ops.gr.sm_debugger_attached(g) || | ||
1321 | !g->ops.fb.is_debug_mode_enabled(g)) | ||
1322 | return false; | ||
1323 | |||
1324 | /* if this fault is fake (due to RC recovery), don't defer recovery */ | ||
1325 | if (fake_fault) | ||
1326 | return false; | ||
1327 | |||
1328 | if (engine_enum != ENGINE_GR_GK20A) | ||
1329 | return false; | ||
1330 | |||
1331 | return g->ops.fifo.is_fault_engine_subid_gpc(g, engine_subid); | ||
1332 | } | ||
1333 | |||
1334 | /* caller must hold a channel reference */ | ||
1335 | static bool gk20a_fifo_ch_timeout_debug_dump_state(struct gk20a *g, | ||
1336 | struct channel_gk20a *refch) | ||
1337 | { | ||
1338 | bool verbose = true; | ||
1339 | if (!refch) | ||
1340 | return verbose; | ||
1341 | |||
1342 | nvgpu_mutex_acquire(&refch->error_notifier_mutex); | ||
1343 | if (refch->error_notifier_ref) { | ||
1344 | u32 err = refch->error_notifier->info32; | ||
1345 | |||
1346 | if (err == NVGPU_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT) | ||
1347 | verbose = refch->timeout_debug_dump; | ||
1348 | } | ||
1349 | nvgpu_mutex_release(&refch->error_notifier_mutex); | ||
1350 | return verbose; | ||
1351 | } | ||
1352 | |||
1353 | /* caller must hold a channel reference */ | ||
1354 | static void gk20a_fifo_set_has_timedout_and_wake_up_wqs(struct gk20a *g, | ||
1355 | struct channel_gk20a *refch) | ||
1356 | { | ||
1357 | if (refch) { | ||
1358 | /* mark channel as faulted */ | ||
1359 | refch->has_timedout = true; | ||
1360 | nvgpu_smp_wmb(); | ||
1361 | /* unblock pending waits */ | ||
1362 | nvgpu_cond_broadcast_interruptible(&refch->semaphore_wq); | ||
1363 | nvgpu_cond_broadcast_interruptible(&refch->notifier_wq); | ||
1364 | } | ||
1365 | } | ||
1366 | |||
1367 | /* caller must hold a channel reference */ | ||
1368 | bool gk20a_fifo_error_ch(struct gk20a *g, | ||
1369 | struct channel_gk20a *refch) | ||
1370 | { | ||
1371 | bool verbose; | ||
1372 | |||
1373 | verbose = gk20a_fifo_ch_timeout_debug_dump_state(g, refch); | ||
1374 | gk20a_fifo_set_has_timedout_and_wake_up_wqs(g, refch); | ||
1375 | |||
1376 | return verbose; | ||
1377 | } | ||
1378 | |||
1379 | bool gk20a_fifo_error_tsg(struct gk20a *g, | ||
1380 | struct tsg_gk20a *tsg) | ||
1381 | { | ||
1382 | struct channel_gk20a *ch = NULL; | ||
1383 | bool verbose = false; | ||
1384 | |||
1385 | nvgpu_rwsem_down_read(&tsg->ch_list_lock); | ||
1386 | list_for_each_entry(ch, &tsg->ch_list, ch_entry) { | ||
1387 | if (gk20a_channel_get(ch)) { | ||
1388 | verbose |= gk20a_fifo_error_ch(g, ch); | ||
1389 | gk20a_channel_put(ch); | ||
1390 | } | ||
1391 | } | ||
1392 | nvgpu_rwsem_up_read(&tsg->ch_list_lock); | ||
1393 | |||
1394 | return verbose; | ||
1395 | |||
1396 | } | ||
1397 | /* caller must hold a channel reference */ | ||
1398 | void gk20a_fifo_set_ctx_mmu_error_ch(struct gk20a *g, | ||
1399 | struct channel_gk20a *refch) | ||
1400 | { | ||
1401 | nvgpu_err(g, | ||
1402 | "channel %d generated a mmu fault", refch->chid); | ||
1403 | gk20a_set_error_notifier(refch, | ||
1404 | NVGPU_CHANNEL_FIFO_ERROR_MMU_ERR_FLT); | ||
1405 | } | ||
1406 | |||
1407 | void gk20a_fifo_set_ctx_mmu_error_tsg(struct gk20a *g, | ||
1408 | struct tsg_gk20a *tsg) | ||
1409 | { | ||
1410 | struct channel_gk20a *ch = NULL; | ||
1411 | |||
1412 | nvgpu_err(g, | ||
1413 | "TSG %d generated a mmu fault", tsg->tsgid); | ||
1414 | |||
1415 | nvgpu_rwsem_down_read(&tsg->ch_list_lock); | ||
1416 | list_for_each_entry(ch, &tsg->ch_list, ch_entry) { | ||
1417 | if (gk20a_channel_get(ch)) { | ||
1418 | gk20a_fifo_set_ctx_mmu_error_ch(g, ch); | ||
1419 | gk20a_channel_put(ch); | ||
1420 | } | ||
1421 | } | ||
1422 | nvgpu_rwsem_up_read(&tsg->ch_list_lock); | ||
1423 | |||
1424 | } | ||
1425 | |||
1426 | void gk20a_fifo_abort_tsg(struct gk20a *g, u32 tsgid, bool preempt) | ||
1427 | { | ||
1428 | struct tsg_gk20a *tsg = &g->fifo.tsg[tsgid]; | ||
1429 | struct channel_gk20a *ch; | ||
1430 | |||
1431 | gk20a_dbg_fn(""); | ||
1432 | |||
1433 | g->ops.fifo.disable_tsg(tsg); | ||
1434 | |||
1435 | if (preempt) | ||
1436 | g->ops.fifo.preempt_tsg(g, tsgid); | ||
1437 | |||
1438 | nvgpu_rwsem_down_read(&tsg->ch_list_lock); | ||
1439 | list_for_each_entry(ch, &tsg->ch_list, ch_entry) { | ||
1440 | if (gk20a_channel_get(ch)) { | ||
1441 | ch->has_timedout = true; | ||
1442 | gk20a_channel_abort_clean_up(ch); | ||
1443 | gk20a_channel_put(ch); | ||
1444 | } | ||
1445 | } | ||
1446 | nvgpu_rwsem_up_read(&tsg->ch_list_lock); | ||
1447 | } | ||
1448 | |||
1449 | int gk20a_fifo_deferred_reset(struct gk20a *g, struct channel_gk20a *ch) | ||
1450 | { | ||
1451 | u32 engine_id, engines; | ||
1452 | |||
1453 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
1454 | gr_gk20a_disable_ctxsw(g); | ||
1455 | |||
1456 | if (!g->fifo.deferred_reset_pending) | ||
1457 | goto clean_up; | ||
1458 | |||
1459 | if (gk20a_is_channel_marked_as_tsg(ch)) | ||
1460 | engines = gk20a_fifo_engines_on_id(g, ch->tsgid, true); | ||
1461 | else | ||
1462 | engines = gk20a_fifo_engines_on_id(g, ch->chid, false); | ||
1463 | if (!engines) | ||
1464 | goto clean_up; | ||
1465 | |||
1466 | /* | ||
1467 | * If deferred reset is set for an engine, and channel is running | ||
1468 | * on that engine, reset it | ||
1469 | */ | ||
1470 | for_each_set_bit(engine_id, &g->fifo.deferred_fault_engines, 32) { | ||
1471 | if (BIT(engine_id) & engines) | ||
1472 | gk20a_fifo_reset_engine(g, engine_id); | ||
1473 | } | ||
1474 | |||
1475 | g->fifo.deferred_fault_engines = 0; | ||
1476 | g->fifo.deferred_reset_pending = false; | ||
1477 | |||
1478 | clean_up: | ||
1479 | gr_gk20a_enable_ctxsw(g); | ||
1480 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
1481 | |||
1482 | return 0; | ||
1483 | } | ||
1484 | |||
1485 | static bool gk20a_fifo_handle_mmu_fault( | ||
1486 | struct gk20a *g, | ||
1487 | u32 mmu_fault_engines, /* queried from HW if 0 */ | ||
1488 | u32 hw_id, /* queried from HW if ~(u32)0 OR mmu_fault_engines == 0*/ | ||
1489 | bool id_is_tsg) | ||
1490 | { | ||
1491 | bool fake_fault; | ||
1492 | unsigned long fault_id; | ||
1493 | unsigned long engine_mmu_fault_id; | ||
1494 | bool verbose = true; | ||
1495 | u32 grfifo_ctl; | ||
1496 | |||
1497 | gk20a_dbg_fn(""); | ||
1498 | |||
1499 | g->fifo.deferred_reset_pending = false; | ||
1500 | |||
1501 | /* Disable power management */ | ||
1502 | if (g->support_pmu && g->can_elpg) { | ||
1503 | if (nvgpu_pmu_disable_elpg(g)) | ||
1504 | nvgpu_err(g, "failed to set disable elpg"); | ||
1505 | } | ||
1506 | if (g->ops.clock_gating.slcg_gr_load_gating_prod) | ||
1507 | g->ops.clock_gating.slcg_gr_load_gating_prod(g, | ||
1508 | false); | ||
1509 | if (g->ops.clock_gating.slcg_perf_load_gating_prod) | ||
1510 | g->ops.clock_gating.slcg_perf_load_gating_prod(g, | ||
1511 | false); | ||
1512 | if (g->ops.clock_gating.slcg_ltc_load_gating_prod) | ||
1513 | g->ops.clock_gating.slcg_ltc_load_gating_prod(g, | ||
1514 | false); | ||
1515 | |||
1516 | gr_gk20a_init_cg_mode(g, ELCG_MODE, ELCG_RUN); | ||
1517 | |||
1518 | /* Disable fifo access */ | ||
1519 | grfifo_ctl = gk20a_readl(g, gr_gpfifo_ctl_r()); | ||
1520 | grfifo_ctl &= ~gr_gpfifo_ctl_semaphore_access_f(1); | ||
1521 | grfifo_ctl &= ~gr_gpfifo_ctl_access_f(1); | ||
1522 | |||
1523 | gk20a_writel(g, gr_gpfifo_ctl_r(), | ||
1524 | grfifo_ctl | gr_gpfifo_ctl_access_f(0) | | ||
1525 | gr_gpfifo_ctl_semaphore_access_f(0)); | ||
1526 | |||
1527 | if (mmu_fault_engines) { | ||
1528 | fault_id = mmu_fault_engines; | ||
1529 | fake_fault = true; | ||
1530 | } else { | ||
1531 | fault_id = gk20a_readl(g, fifo_intr_mmu_fault_id_r()); | ||
1532 | fake_fault = false; | ||
1533 | gk20a_debug_dump(g); | ||
1534 | } | ||
1535 | |||
1536 | |||
1537 | /* go through all faulted engines */ | ||
1538 | for_each_set_bit(engine_mmu_fault_id, &fault_id, 32) { | ||
1539 | /* bits in fifo_intr_mmu_fault_id_r do not correspond 1:1 to | ||
1540 | * engines. Convert engine_mmu_id to engine_id */ | ||
1541 | u32 engine_id = gk20a_mmu_id_to_engine_id(g, | ||
1542 | engine_mmu_fault_id); | ||
1543 | struct mmu_fault_info mmfault_info; | ||
1544 | struct channel_gk20a *ch = NULL; | ||
1545 | struct tsg_gk20a *tsg = NULL; | ||
1546 | struct channel_gk20a *refch = NULL; | ||
1547 | /* read and parse engine status */ | ||
1548 | u32 status = gk20a_readl(g, fifo_engine_status_r(engine_id)); | ||
1549 | u32 ctx_status = fifo_engine_status_ctx_status_v(status); | ||
1550 | bool ctxsw = (ctx_status == | ||
1551 | fifo_engine_status_ctx_status_ctxsw_switch_v() | ||
1552 | || ctx_status == | ||
1553 | fifo_engine_status_ctx_status_ctxsw_save_v() | ||
1554 | || ctx_status == | ||
1555 | fifo_engine_status_ctx_status_ctxsw_load_v()); | ||
1556 | |||
1557 | get_exception_mmu_fault_info(g, engine_mmu_fault_id, | ||
1558 | &mmfault_info); | ||
1559 | trace_gk20a_mmu_fault(mmfault_info.fault_addr, | ||
1560 | mmfault_info.fault_type, | ||
1561 | mmfault_info.access_type, | ||
1562 | mmfault_info.inst_ptr, | ||
1563 | engine_id, | ||
1564 | mmfault_info.client_type_desc, | ||
1565 | mmfault_info.client_id_desc, | ||
1566 | mmfault_info.fault_type_desc); | ||
1567 | nvgpu_err(g, "%s mmu fault on engine %d, " | ||
1568 | "engine subid %d (%s), client %d (%s), " | ||
1569 | "addr 0x%llx, type %d (%s), access_type 0x%08x," | ||
1570 | "inst_ptr 0x%llx", | ||
1571 | fake_fault ? "fake" : "", | ||
1572 | engine_id, | ||
1573 | mmfault_info.client_type, | ||
1574 | mmfault_info.client_type_desc, | ||
1575 | mmfault_info.client_id, mmfault_info.client_id_desc, | ||
1576 | mmfault_info.fault_addr, | ||
1577 | mmfault_info.fault_type, | ||
1578 | mmfault_info.fault_type_desc, | ||
1579 | mmfault_info.access_type, mmfault_info.inst_ptr); | ||
1580 | |||
1581 | if (ctxsw) { | ||
1582 | gk20a_fecs_dump_falcon_stats(g); | ||
1583 | nvgpu_err(g, "gr_status_r : 0x%x", | ||
1584 | gk20a_readl(g, gr_status_r())); | ||
1585 | } | ||
1586 | |||
1587 | /* get the channel/TSG */ | ||
1588 | if (fake_fault) { | ||
1589 | /* use next_id if context load is failing */ | ||
1590 | u32 id, type; | ||
1591 | |||
1592 | if (hw_id == ~(u32)0) { | ||
1593 | id = (ctx_status == | ||
1594 | fifo_engine_status_ctx_status_ctxsw_load_v()) ? | ||
1595 | fifo_engine_status_next_id_v(status) : | ||
1596 | fifo_engine_status_id_v(status); | ||
1597 | type = (ctx_status == | ||
1598 | fifo_engine_status_ctx_status_ctxsw_load_v()) ? | ||
1599 | fifo_engine_status_next_id_type_v(status) : | ||
1600 | fifo_engine_status_id_type_v(status); | ||
1601 | } else { | ||
1602 | id = hw_id; | ||
1603 | type = id_is_tsg ? | ||
1604 | fifo_engine_status_id_type_tsgid_v() : | ||
1605 | fifo_engine_status_id_type_chid_v(); | ||
1606 | } | ||
1607 | |||
1608 | if (type == fifo_engine_status_id_type_tsgid_v()) | ||
1609 | tsg = &g->fifo.tsg[id]; | ||
1610 | else if (type == fifo_engine_status_id_type_chid_v()) { | ||
1611 | ch = &g->fifo.channel[id]; | ||
1612 | refch = gk20a_channel_get(ch); | ||
1613 | } | ||
1614 | } else { | ||
1615 | /* read channel based on instruction pointer */ | ||
1616 | ch = gk20a_refch_from_inst_ptr(g, | ||
1617 | mmfault_info.inst_ptr); | ||
1618 | refch = ch; | ||
1619 | } | ||
1620 | |||
1621 | if (ch && gk20a_is_channel_marked_as_tsg(ch)) | ||
1622 | tsg = &g->fifo.tsg[ch->tsgid]; | ||
1623 | |||
1624 | /* check if engine reset should be deferred */ | ||
1625 | if (engine_id != FIFO_INVAL_ENGINE_ID) { | ||
1626 | bool defer = gk20a_fifo_should_defer_engine_reset(g, | ||
1627 | engine_id, mmfault_info.client_type, | ||
1628 | fake_fault); | ||
1629 | if ((ch || tsg) && defer) { | ||
1630 | g->fifo.deferred_fault_engines |= BIT(engine_id); | ||
1631 | |||
1632 | /* handled during channel free */ | ||
1633 | g->fifo.deferred_reset_pending = true; | ||
1634 | gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, | ||
1635 | "sm debugger attached," | ||
1636 | " deferring channel recovery to channel free"); | ||
1637 | } else { | ||
1638 | /* if lock is already taken, a reset is taking place | ||
1639 | so no need to repeat */ | ||
1640 | if (nvgpu_mutex_tryacquire(&g->fifo.gr_reset_mutex)) { | ||
1641 | gk20a_fifo_reset_engine(g, engine_id); | ||
1642 | nvgpu_mutex_release(&g->fifo.gr_reset_mutex); | ||
1643 | } | ||
1644 | } | ||
1645 | } | ||
1646 | |||
1647 | #ifdef CONFIG_GK20A_CTXSW_TRACE | ||
1648 | /* | ||
1649 | * For non fake mmu fault, both tsg and ch pointers | ||
1650 | * could be valid. Check tsg first. | ||
1651 | */ | ||
1652 | if (tsg) | ||
1653 | gk20a_ctxsw_trace_tsg_reset(g, tsg); | ||
1654 | else if (ch) | ||
1655 | gk20a_ctxsw_trace_channel_reset(g, ch); | ||
1656 | #endif | ||
1657 | |||
1658 | /* | ||
1659 | * Disable the channel/TSG from hw and increment syncpoints. | ||
1660 | */ | ||
1661 | if (tsg) { | ||
1662 | if (!g->fifo.deferred_reset_pending) { | ||
1663 | if (!fake_fault) | ||
1664 | gk20a_fifo_set_ctx_mmu_error_tsg(g, | ||
1665 | tsg); | ||
1666 | verbose = gk20a_fifo_error_tsg(g, tsg); | ||
1667 | } | ||
1668 | gk20a_fifo_abort_tsg(g, tsg->tsgid, false); | ||
1669 | |||
1670 | /* put back the ref taken early above */ | ||
1671 | if (refch) | ||
1672 | gk20a_channel_put(ch); | ||
1673 | } else if (ch) { | ||
1674 | if (refch) { | ||
1675 | if (!g->fifo.deferred_reset_pending) { | ||
1676 | if (!fake_fault) | ||
1677 | gk20a_fifo_set_ctx_mmu_error_ch( | ||
1678 | g, refch); | ||
1679 | |||
1680 | verbose = gk20a_fifo_error_ch(g, | ||
1681 | refch); | ||
1682 | } | ||
1683 | gk20a_channel_abort(ch, false); | ||
1684 | gk20a_channel_put(ch); | ||
1685 | } else { | ||
1686 | nvgpu_err(g, | ||
1687 | "mmu error in freed channel %d", | ||
1688 | ch->chid); | ||
1689 | } | ||
1690 | } else if (mmfault_info.inst_ptr == | ||
1691 | nvgpu_inst_block_addr(g, &g->mm.bar1.inst_block)) { | ||
1692 | nvgpu_err(g, "mmu fault from bar1"); | ||
1693 | } else if (mmfault_info.inst_ptr == | ||
1694 | nvgpu_inst_block_addr(g, &g->mm.pmu.inst_block)) { | ||
1695 | nvgpu_err(g, "mmu fault from pmu"); | ||
1696 | } else | ||
1697 | nvgpu_err(g, "couldn't locate channel for mmu fault"); | ||
1698 | } | ||
1699 | |||
1700 | /* clear interrupt */ | ||
1701 | gk20a_writel(g, fifo_intr_mmu_fault_id_r(), fault_id); | ||
1702 | |||
1703 | /* resume scheduler */ | ||
1704 | gk20a_writel(g, fifo_error_sched_disable_r(), | ||
1705 | gk20a_readl(g, fifo_error_sched_disable_r())); | ||
1706 | |||
1707 | /* Re-enable fifo access */ | ||
1708 | gk20a_writel(g, gr_gpfifo_ctl_r(), | ||
1709 | gr_gpfifo_ctl_access_enabled_f() | | ||
1710 | gr_gpfifo_ctl_semaphore_access_enabled_f()); | ||
1711 | |||
1712 | /* It is safe to enable ELPG again. */ | ||
1713 | if (g->support_pmu && g->can_elpg) | ||
1714 | nvgpu_pmu_enable_elpg(g); | ||
1715 | |||
1716 | return verbose; | ||
1717 | } | ||
1718 | |||
1719 | static void gk20a_fifo_get_faulty_id_type(struct gk20a *g, int engine_id, | ||
1720 | u32 *id, u32 *type) | ||
1721 | { | ||
1722 | u32 status = gk20a_readl(g, fifo_engine_status_r(engine_id)); | ||
1723 | u32 ctx_status = fifo_engine_status_ctx_status_v(status); | ||
1724 | |||
1725 | /* use next_id if context load is failing */ | ||
1726 | *id = (ctx_status == | ||
1727 | fifo_engine_status_ctx_status_ctxsw_load_v()) ? | ||
1728 | fifo_engine_status_next_id_v(status) : | ||
1729 | fifo_engine_status_id_v(status); | ||
1730 | |||
1731 | *type = (ctx_status == | ||
1732 | fifo_engine_status_ctx_status_ctxsw_load_v()) ? | ||
1733 | fifo_engine_status_next_id_type_v(status) : | ||
1734 | fifo_engine_status_id_type_v(status); | ||
1735 | } | ||
1736 | |||
1737 | static u32 gk20a_fifo_engines_on_id(struct gk20a *g, u32 id, bool is_tsg) | ||
1738 | { | ||
1739 | unsigned int i; | ||
1740 | u32 engines = 0; | ||
1741 | |||
1742 | for (i = 0; i < g->fifo.num_engines; i++) { | ||
1743 | u32 active_engine_id = g->fifo.active_engines_list[i]; | ||
1744 | u32 status = gk20a_readl(g, fifo_engine_status_r(active_engine_id)); | ||
1745 | u32 ctx_status = | ||
1746 | fifo_engine_status_ctx_status_v(status); | ||
1747 | u32 ctx_id = (ctx_status == | ||
1748 | fifo_engine_status_ctx_status_ctxsw_load_v()) ? | ||
1749 | fifo_engine_status_next_id_v(status) : | ||
1750 | fifo_engine_status_id_v(status); | ||
1751 | u32 type = (ctx_status == | ||
1752 | fifo_engine_status_ctx_status_ctxsw_load_v()) ? | ||
1753 | fifo_engine_status_next_id_type_v(status) : | ||
1754 | fifo_engine_status_id_type_v(status); | ||
1755 | bool busy = fifo_engine_status_engine_v(status) == | ||
1756 | fifo_engine_status_engine_busy_v(); | ||
1757 | if (busy && ctx_id == id) { | ||
1758 | if ((is_tsg && type == | ||
1759 | fifo_engine_status_id_type_tsgid_v()) || | ||
1760 | (!is_tsg && type == | ||
1761 | fifo_engine_status_id_type_chid_v())) | ||
1762 | engines |= BIT(active_engine_id); | ||
1763 | } | ||
1764 | } | ||
1765 | |||
1766 | return engines; | ||
1767 | } | ||
1768 | |||
1769 | void gk20a_fifo_recover_ch(struct gk20a *g, u32 chid, bool verbose) | ||
1770 | { | ||
1771 | u32 engines; | ||
1772 | |||
1773 | /* stop context switching to prevent engine assignments from | ||
1774 | changing until channel is recovered */ | ||
1775 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
1776 | gr_gk20a_disable_ctxsw(g); | ||
1777 | |||
1778 | engines = gk20a_fifo_engines_on_id(g, chid, false); | ||
1779 | |||
1780 | if (engines) | ||
1781 | gk20a_fifo_recover(g, engines, chid, false, true, verbose); | ||
1782 | else { | ||
1783 | struct channel_gk20a *ch = &g->fifo.channel[chid]; | ||
1784 | |||
1785 | if (gk20a_channel_get(ch)) { | ||
1786 | gk20a_channel_abort(ch, false); | ||
1787 | |||
1788 | if (gk20a_fifo_error_ch(g, ch)) | ||
1789 | gk20a_debug_dump(g); | ||
1790 | |||
1791 | gk20a_channel_put(ch); | ||
1792 | } | ||
1793 | } | ||
1794 | |||
1795 | gr_gk20a_enable_ctxsw(g); | ||
1796 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
1797 | } | ||
1798 | |||
1799 | void gk20a_fifo_recover_tsg(struct gk20a *g, u32 tsgid, bool verbose) | ||
1800 | { | ||
1801 | u32 engines; | ||
1802 | |||
1803 | /* stop context switching to prevent engine assignments from | ||
1804 | changing until TSG is recovered */ | ||
1805 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
1806 | gr_gk20a_disable_ctxsw(g); | ||
1807 | |||
1808 | engines = gk20a_fifo_engines_on_id(g, tsgid, true); | ||
1809 | |||
1810 | if (engines) | ||
1811 | gk20a_fifo_recover(g, engines, tsgid, true, true, verbose); | ||
1812 | else { | ||
1813 | struct tsg_gk20a *tsg = &g->fifo.tsg[tsgid]; | ||
1814 | |||
1815 | if (gk20a_fifo_error_tsg(g, tsg)) | ||
1816 | gk20a_debug_dump(g); | ||
1817 | |||
1818 | gk20a_fifo_abort_tsg(g, tsgid, false); | ||
1819 | } | ||
1820 | |||
1821 | gr_gk20a_enable_ctxsw(g); | ||
1822 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
1823 | } | ||
1824 | |||
1825 | void gk20a_fifo_teardown_ch_tsg(struct gk20a *g, u32 __engine_ids, | ||
1826 | u32 hw_id, unsigned int id_type, unsigned int rc_type, | ||
1827 | struct mmu_fault_info *mmfault) | ||
1828 | { | ||
1829 | unsigned long engine_id, i; | ||
1830 | unsigned long _engine_ids = __engine_ids; | ||
1831 | unsigned long engine_ids = 0; | ||
1832 | u32 val; | ||
1833 | u32 mmu_fault_engines = 0; | ||
1834 | u32 ref_type; | ||
1835 | u32 ref_id; | ||
1836 | u32 ref_id_is_tsg = false; | ||
1837 | bool id_is_known = (id_type != ID_TYPE_UNKNOWN) ? true : false; | ||
1838 | bool id_is_tsg = (id_type == ID_TYPE_TSG) ? true : false; | ||
1839 | |||
1840 | if (id_is_known) { | ||
1841 | engine_ids = gk20a_fifo_engines_on_id(g, hw_id, id_is_tsg); | ||
1842 | ref_id = hw_id; | ||
1843 | ref_type = id_is_tsg ? | ||
1844 | fifo_engine_status_id_type_tsgid_v() : | ||
1845 | fifo_engine_status_id_type_chid_v(); | ||
1846 | ref_id_is_tsg = id_is_tsg; | ||
1847 | /* atleast one engine will get passed during sched err*/ | ||
1848 | engine_ids |= __engine_ids; | ||
1849 | for_each_set_bit(engine_id, &engine_ids, 32) { | ||
1850 | u32 mmu_id = gk20a_engine_id_to_mmu_id(g, engine_id); | ||
1851 | |||
1852 | if (mmu_id != FIFO_INVAL_ENGINE_ID) | ||
1853 | mmu_fault_engines |= BIT(mmu_id); | ||
1854 | } | ||
1855 | } else { | ||
1856 | /* store faulted engines in advance */ | ||
1857 | for_each_set_bit(engine_id, &_engine_ids, 32) { | ||
1858 | gk20a_fifo_get_faulty_id_type(g, engine_id, &ref_id, | ||
1859 | &ref_type); | ||
1860 | if (ref_type == fifo_engine_status_id_type_tsgid_v()) | ||
1861 | ref_id_is_tsg = true; | ||
1862 | else | ||
1863 | ref_id_is_tsg = false; | ||
1864 | /* Reset *all* engines that use the | ||
1865 | * same channel as faulty engine */ | ||
1866 | for (i = 0; i < g->fifo.num_engines; i++) { | ||
1867 | u32 active_engine_id = g->fifo.active_engines_list[i]; | ||
1868 | u32 type; | ||
1869 | u32 id; | ||
1870 | |||
1871 | gk20a_fifo_get_faulty_id_type(g, active_engine_id, &id, &type); | ||
1872 | if (ref_type == type && ref_id == id) { | ||
1873 | u32 mmu_id = gk20a_engine_id_to_mmu_id(g, active_engine_id); | ||
1874 | |||
1875 | engine_ids |= BIT(active_engine_id); | ||
1876 | if (mmu_id != FIFO_INVAL_ENGINE_ID) | ||
1877 | mmu_fault_engines |= BIT(mmu_id); | ||
1878 | } | ||
1879 | } | ||
1880 | } | ||
1881 | } | ||
1882 | |||
1883 | if (mmu_fault_engines) { | ||
1884 | /* | ||
1885 | * sched error prevents recovery, and ctxsw error will retrigger | ||
1886 | * every 100ms. Disable the sched error to allow recovery. | ||
1887 | */ | ||
1888 | val = gk20a_readl(g, fifo_intr_en_0_r()); | ||
1889 | val &= ~(fifo_intr_en_0_sched_error_m() | | ||
1890 | fifo_intr_en_0_mmu_fault_m()); | ||
1891 | gk20a_writel(g, fifo_intr_en_0_r(), val); | ||
1892 | gk20a_writel(g, fifo_intr_0_r(), | ||
1893 | fifo_intr_0_sched_error_reset_f()); | ||
1894 | |||
1895 | g->ops.fifo.trigger_mmu_fault(g, engine_ids); | ||
1896 | gk20a_fifo_handle_mmu_fault(g, mmu_fault_engines, ref_id, | ||
1897 | ref_id_is_tsg); | ||
1898 | |||
1899 | val = gk20a_readl(g, fifo_intr_en_0_r()); | ||
1900 | val |= fifo_intr_en_0_mmu_fault_f(1) | ||
1901 | | fifo_intr_en_0_sched_error_f(1); | ||
1902 | gk20a_writel(g, fifo_intr_en_0_r(), val); | ||
1903 | } | ||
1904 | } | ||
1905 | |||
1906 | void gk20a_fifo_recover(struct gk20a *g, u32 __engine_ids, | ||
1907 | u32 hw_id, bool id_is_tsg, | ||
1908 | bool id_is_known, bool verbose) | ||
1909 | { | ||
1910 | unsigned int id_type; | ||
1911 | |||
1912 | if (verbose) | ||
1913 | gk20a_debug_dump(g); | ||
1914 | |||
1915 | if (g->ops.ltc.flush) | ||
1916 | g->ops.ltc.flush(g); | ||
1917 | |||
1918 | if (id_is_known) | ||
1919 | id_type = id_is_tsg ? ID_TYPE_TSG : ID_TYPE_CHANNEL; | ||
1920 | else | ||
1921 | id_type = ID_TYPE_UNKNOWN; | ||
1922 | |||
1923 | g->ops.fifo.teardown_ch_tsg(g, __engine_ids, hw_id, id_type, | ||
1924 | RC_TYPE_NORMAL, NULL); | ||
1925 | } | ||
1926 | |||
1927 | /* force reset channel and tsg (if it's part of one) */ | ||
1928 | int gk20a_fifo_force_reset_ch(struct channel_gk20a *ch, | ||
1929 | u32 err_code, bool verbose) | ||
1930 | { | ||
1931 | struct tsg_gk20a *tsg = NULL; | ||
1932 | struct channel_gk20a *ch_tsg = NULL; | ||
1933 | struct gk20a *g = ch->g; | ||
1934 | |||
1935 | if (gk20a_is_channel_marked_as_tsg(ch)) { | ||
1936 | tsg = &g->fifo.tsg[ch->tsgid]; | ||
1937 | |||
1938 | nvgpu_rwsem_down_read(&tsg->ch_list_lock); | ||
1939 | |||
1940 | list_for_each_entry(ch_tsg, &tsg->ch_list, ch_entry) { | ||
1941 | if (gk20a_channel_get(ch_tsg)) { | ||
1942 | gk20a_set_error_notifier(ch_tsg, err_code); | ||
1943 | gk20a_channel_put(ch_tsg); | ||
1944 | } | ||
1945 | } | ||
1946 | |||
1947 | nvgpu_rwsem_up_read(&tsg->ch_list_lock); | ||
1948 | gk20a_fifo_recover_tsg(g, ch->tsgid, verbose); | ||
1949 | } else { | ||
1950 | gk20a_set_error_notifier(ch, err_code); | ||
1951 | gk20a_fifo_recover_ch(g, ch->chid, verbose); | ||
1952 | } | ||
1953 | |||
1954 | return 0; | ||
1955 | } | ||
1956 | |||
1957 | int gk20a_fifo_tsg_unbind_channel_verify_status(struct channel_gk20a *ch) | ||
1958 | { | ||
1959 | struct gk20a *g = ch->g; | ||
1960 | |||
1961 | if (gk20a_fifo_channel_status_is_next(g, ch->chid)) { | ||
1962 | nvgpu_err(g, "Channel %d to be removed from TSG %d has NEXT set!", | ||
1963 | ch->chid, ch->tsgid); | ||
1964 | return -EINVAL; | ||
1965 | } | ||
1966 | |||
1967 | if (g->ops.fifo.tsg_verify_status_ctx_reload) | ||
1968 | g->ops.fifo.tsg_verify_status_ctx_reload(ch); | ||
1969 | |||
1970 | if (g->ops.fifo.tsg_verify_status_faulted) | ||
1971 | g->ops.fifo.tsg_verify_status_faulted(ch); | ||
1972 | |||
1973 | return 0; | ||
1974 | } | ||
1975 | |||
1976 | int gk20a_fifo_tsg_unbind_channel(struct channel_gk20a *ch) | ||
1977 | { | ||
1978 | struct gk20a *g = ch->g; | ||
1979 | struct fifo_gk20a *f = &g->fifo; | ||
1980 | struct tsg_gk20a *tsg = &f->tsg[ch->tsgid]; | ||
1981 | int err; | ||
1982 | bool tsg_timedout = false; | ||
1983 | |||
1984 | /* If one channel in TSG times out, we disable all channels */ | ||
1985 | nvgpu_rwsem_down_write(&tsg->ch_list_lock); | ||
1986 | tsg_timedout = ch->has_timedout; | ||
1987 | nvgpu_rwsem_up_write(&tsg->ch_list_lock); | ||
1988 | |||
1989 | /* Disable TSG and examine status before unbinding channel */ | ||
1990 | g->ops.fifo.disable_tsg(tsg); | ||
1991 | |||
1992 | err = g->ops.fifo.preempt_tsg(g, tsg->tsgid); | ||
1993 | if (err) | ||
1994 | goto fail_enable_tsg; | ||
1995 | |||
1996 | if (g->ops.fifo.tsg_verify_channel_status && !tsg_timedout) { | ||
1997 | err = g->ops.fifo.tsg_verify_channel_status(ch); | ||
1998 | if (err) | ||
1999 | goto fail_enable_tsg; | ||
2000 | } | ||
2001 | |||
2002 | /* Channel should be seen as TSG channel while updating runlist */ | ||
2003 | err = channel_gk20a_update_runlist(ch, false); | ||
2004 | if (err) | ||
2005 | goto fail_enable_tsg; | ||
2006 | |||
2007 | /* Remove channel from TSG and re-enable rest of the channels */ | ||
2008 | nvgpu_rwsem_down_write(&tsg->ch_list_lock); | ||
2009 | nvgpu_list_del(&ch->ch_entry); | ||
2010 | nvgpu_rwsem_up_write(&tsg->ch_list_lock); | ||
2011 | |||
2012 | /* | ||
2013 | * Don't re-enable all channels if TSG has timed out already | ||
2014 | * | ||
2015 | * Note that we can skip disabling and preempting TSG too in case of | ||
2016 | * time out, but we keep that to ensure TSG is kicked out | ||
2017 | */ | ||
2018 | if (!tsg_timedout) | ||
2019 | g->ops.fifo.enable_tsg(tsg); | ||
2020 | |||
2021 | gk20a_channel_abort_clean_up(ch); | ||
2022 | |||
2023 | return 0; | ||
2024 | |||
2025 | fail_enable_tsg: | ||
2026 | if (!tsg_timedout) | ||
2027 | g->ops.fifo.enable_tsg(tsg); | ||
2028 | return err; | ||
2029 | } | ||
2030 | |||
2031 | u32 gk20a_fifo_get_failing_engine_data(struct gk20a *g, | ||
2032 | int *__id, bool *__is_tsg) | ||
2033 | { | ||
2034 | u32 engine_id; | ||
2035 | int id = -1; | ||
2036 | bool is_tsg = false; | ||
2037 | u32 mailbox2; | ||
2038 | u32 active_engine_id = FIFO_INVAL_ENGINE_ID; | ||
2039 | |||
2040 | for (engine_id = 0; engine_id < g->fifo.num_engines; engine_id++) { | ||
2041 | u32 status; | ||
2042 | u32 ctx_status; | ||
2043 | bool failing_engine; | ||
2044 | |||
2045 | active_engine_id = g->fifo.active_engines_list[engine_id]; | ||
2046 | status = gk20a_readl(g, fifo_engine_status_r(active_engine_id)); | ||
2047 | ctx_status = fifo_engine_status_ctx_status_v(status); | ||
2048 | |||
2049 | /* we are interested in busy engines */ | ||
2050 | failing_engine = fifo_engine_status_engine_v(status) == | ||
2051 | fifo_engine_status_engine_busy_v(); | ||
2052 | |||
2053 | /* ..that are doing context switch */ | ||
2054 | failing_engine = failing_engine && | ||
2055 | (ctx_status == | ||
2056 | fifo_engine_status_ctx_status_ctxsw_switch_v() | ||
2057 | || ctx_status == | ||
2058 | fifo_engine_status_ctx_status_ctxsw_save_v() | ||
2059 | || ctx_status == | ||
2060 | fifo_engine_status_ctx_status_ctxsw_load_v()); | ||
2061 | |||
2062 | if (!failing_engine) { | ||
2063 | active_engine_id = FIFO_INVAL_ENGINE_ID; | ||
2064 | continue; | ||
2065 | } | ||
2066 | |||
2067 | if (ctx_status == | ||
2068 | fifo_engine_status_ctx_status_ctxsw_load_v()) { | ||
2069 | id = fifo_engine_status_next_id_v(status); | ||
2070 | is_tsg = fifo_engine_status_next_id_type_v(status) != | ||
2071 | fifo_engine_status_next_id_type_chid_v(); | ||
2072 | } else if (ctx_status == | ||
2073 | fifo_engine_status_ctx_status_ctxsw_switch_v()) { | ||
2074 | mailbox2 = gk20a_readl(g, gr_fecs_ctxsw_mailbox_r(2)); | ||
2075 | if (mailbox2 & FECS_METHOD_WFI_RESTORE) { | ||
2076 | id = fifo_engine_status_next_id_v(status); | ||
2077 | is_tsg = fifo_engine_status_next_id_type_v(status) != | ||
2078 | fifo_engine_status_next_id_type_chid_v(); | ||
2079 | } else { | ||
2080 | id = fifo_engine_status_id_v(status); | ||
2081 | is_tsg = fifo_engine_status_id_type_v(status) != | ||
2082 | fifo_engine_status_id_type_chid_v(); | ||
2083 | } | ||
2084 | } else { | ||
2085 | id = fifo_engine_status_id_v(status); | ||
2086 | is_tsg = fifo_engine_status_id_type_v(status) != | ||
2087 | fifo_engine_status_id_type_chid_v(); | ||
2088 | } | ||
2089 | break; | ||
2090 | } | ||
2091 | |||
2092 | *__id = id; | ||
2093 | *__is_tsg = is_tsg; | ||
2094 | |||
2095 | return active_engine_id; | ||
2096 | } | ||
2097 | |||
2098 | static bool gk20a_fifo_check_ch_ctxsw_timeout(struct channel_gk20a *ch, | ||
2099 | bool *verbose, u32 *ms) | ||
2100 | { | ||
2101 | bool recover = false; | ||
2102 | bool progress = false; | ||
2103 | |||
2104 | if (gk20a_channel_get(ch)) { | ||
2105 | recover = gk20a_channel_update_and_check_timeout(ch, | ||
2106 | GRFIFO_TIMEOUT_CHECK_PERIOD_US / 1000, | ||
2107 | &progress); | ||
2108 | *verbose = ch->timeout_debug_dump; | ||
2109 | *ms = ch->timeout_accumulated_ms; | ||
2110 | if (recover) | ||
2111 | gk20a_set_error_notifier(ch, | ||
2112 | NVGPU_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT); | ||
2113 | |||
2114 | gk20a_channel_put(ch); | ||
2115 | } | ||
2116 | return recover; | ||
2117 | } | ||
2118 | |||
2119 | bool gk20a_fifo_check_tsg_ctxsw_timeout(struct tsg_gk20a *tsg, | ||
2120 | bool *verbose, u32 *ms) | ||
2121 | { | ||
2122 | struct channel_gk20a *ch; | ||
2123 | bool recover = false; | ||
2124 | bool progress = false; | ||
2125 | |||
2126 | *verbose = false; | ||
2127 | *ms = GRFIFO_TIMEOUT_CHECK_PERIOD_US / 1000; | ||
2128 | |||
2129 | nvgpu_rwsem_down_read(&tsg->ch_list_lock); | ||
2130 | |||
2131 | /* check if there was some progress on any of the TSG channels. | ||
2132 | * fifo recovery is needed if at least one channel reached the | ||
2133 | * maximum timeout without progress (update in gpfifo pointers). | ||
2134 | */ | ||
2135 | list_for_each_entry(ch, &tsg->ch_list, ch_entry) { | ||
2136 | if (gk20a_channel_get(ch)) { | ||
2137 | recover = gk20a_channel_update_and_check_timeout(ch, | ||
2138 | *ms, &progress); | ||
2139 | if (progress || recover) | ||
2140 | break; | ||
2141 | gk20a_channel_put(ch); | ||
2142 | } | ||
2143 | } | ||
2144 | |||
2145 | /* if at least one channel in the TSG made some progress, reset | ||
2146 | * accumulated timeout for all channels in the TSG. In particular, | ||
2147 | * this resets timeout for channels that already completed their work | ||
2148 | */ | ||
2149 | if (progress) { | ||
2150 | gk20a_dbg_info("progress on tsg=%d ch=%d", | ||
2151 | tsg->tsgid, ch->chid); | ||
2152 | gk20a_channel_put(ch); | ||
2153 | *ms = GRFIFO_TIMEOUT_CHECK_PERIOD_US / 1000; | ||
2154 | list_for_each_entry(ch, &tsg->ch_list, ch_entry) { | ||
2155 | if (gk20a_channel_get(ch)) { | ||
2156 | ch->timeout_accumulated_ms = *ms; | ||
2157 | gk20a_channel_put(ch); | ||
2158 | } | ||
2159 | } | ||
2160 | } | ||
2161 | |||
2162 | /* if one channel is presumed dead (no progress for too long), then | ||
2163 | * fifo recovery is needed. we can't really figure out which channel | ||
2164 | * caused the problem, so set timeout error notifier for all channels. | ||
2165 | */ | ||
2166 | if (recover) { | ||
2167 | gk20a_dbg_info("timeout on tsg=%d ch=%d", | ||
2168 | tsg->tsgid, ch->chid); | ||
2169 | *ms = ch->timeout_accumulated_ms; | ||
2170 | gk20a_channel_put(ch); | ||
2171 | list_for_each_entry(ch, &tsg->ch_list, ch_entry) { | ||
2172 | if (gk20a_channel_get(ch)) { | ||
2173 | gk20a_set_error_notifier(ch, | ||
2174 | NVGPU_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT); | ||
2175 | *verbose |= ch->timeout_debug_dump; | ||
2176 | gk20a_channel_put(ch); | ||
2177 | } | ||
2178 | } | ||
2179 | } | ||
2180 | |||
2181 | /* if we could not detect progress on any of the channel, but none | ||
2182 | * of them has reached the timeout, there is nothing more to do: | ||
2183 | * timeout_accumulated_ms has been updated for all of them. | ||
2184 | */ | ||
2185 | nvgpu_rwsem_up_read(&tsg->ch_list_lock); | ||
2186 | return recover; | ||
2187 | } | ||
2188 | |||
2189 | bool gk20a_fifo_handle_sched_error(struct gk20a *g) | ||
2190 | { | ||
2191 | u32 sched_error; | ||
2192 | u32 engine_id; | ||
2193 | int id = -1; | ||
2194 | bool is_tsg = false; | ||
2195 | bool ret = false; | ||
2196 | |||
2197 | /* read the scheduler error register */ | ||
2198 | sched_error = gk20a_readl(g, fifo_intr_sched_error_r()); | ||
2199 | |||
2200 | engine_id = gk20a_fifo_get_failing_engine_data(g, &id, &is_tsg); | ||
2201 | |||
2202 | /* could not find the engine - should never happen */ | ||
2203 | if (!gk20a_fifo_is_valid_engine_id(g, engine_id)) { | ||
2204 | nvgpu_err(g, "fifo sched error : 0x%08x, failed to find engine", | ||
2205 | sched_error); | ||
2206 | ret = false; | ||
2207 | goto err; | ||
2208 | } | ||
2209 | |||
2210 | if (fifo_intr_sched_error_code_f(sched_error) == | ||
2211 | fifo_intr_sched_error_code_ctxsw_timeout_v()) { | ||
2212 | struct fifo_gk20a *f = &g->fifo; | ||
2213 | u32 ms = 0; | ||
2214 | bool verbose = false; | ||
2215 | |||
2216 | if (is_tsg) { | ||
2217 | ret = gk20a_fifo_check_tsg_ctxsw_timeout( | ||
2218 | &f->tsg[id], &verbose, &ms); | ||
2219 | } else { | ||
2220 | ret = gk20a_fifo_check_ch_ctxsw_timeout( | ||
2221 | &f->channel[id], &verbose, &ms); | ||
2222 | } | ||
2223 | |||
2224 | if (ret) { | ||
2225 | nvgpu_err(g, | ||
2226 | "fifo sched ctxsw timeout error: " | ||
2227 | "engine=%u, %s=%d, ms=%u", | ||
2228 | engine_id, is_tsg ? "tsg" : "ch", id, ms); | ||
2229 | /* | ||
2230 | * Cancel all channels' timeout since SCHED error might | ||
2231 | * trigger multiple watchdogs at a time | ||
2232 | */ | ||
2233 | gk20a_channel_timeout_restart_all_channels(g); | ||
2234 | gk20a_fifo_recover(g, BIT(engine_id), id, | ||
2235 | is_tsg, true, verbose); | ||
2236 | } else { | ||
2237 | gk20a_dbg_info( | ||
2238 | "fifo is waiting for ctx switch for %d ms, " | ||
2239 | "%s=%d", ms, is_tsg ? "tsg" : "ch", id); | ||
2240 | } | ||
2241 | } else { | ||
2242 | nvgpu_err(g, | ||
2243 | "fifo sched error : 0x%08x, engine=%u, %s=%d", | ||
2244 | sched_error, engine_id, is_tsg ? "tsg" : "ch", id); | ||
2245 | } | ||
2246 | |||
2247 | err: | ||
2248 | return ret; | ||
2249 | } | ||
2250 | |||
2251 | static u32 fifo_error_isr(struct gk20a *g, u32 fifo_intr) | ||
2252 | { | ||
2253 | bool print_channel_reset_log = false; | ||
2254 | u32 handled = 0; | ||
2255 | |||
2256 | gk20a_dbg_fn("fifo_intr=0x%08x", fifo_intr); | ||
2257 | |||
2258 | if (fifo_intr & fifo_intr_0_pio_error_pending_f()) { | ||
2259 | /* pio mode is unused. this shouldn't happen, ever. */ | ||
2260 | /* should we clear it or just leave it pending? */ | ||
2261 | nvgpu_err(g, "fifo pio error!"); | ||
2262 | BUG_ON(1); | ||
2263 | } | ||
2264 | |||
2265 | if (fifo_intr & fifo_intr_0_bind_error_pending_f()) { | ||
2266 | u32 bind_error = gk20a_readl(g, fifo_intr_bind_error_r()); | ||
2267 | nvgpu_err(g, "fifo bind error: 0x%08x", bind_error); | ||
2268 | print_channel_reset_log = true; | ||
2269 | handled |= fifo_intr_0_bind_error_pending_f(); | ||
2270 | } | ||
2271 | |||
2272 | if (fifo_intr & fifo_intr_0_sched_error_pending_f()) { | ||
2273 | print_channel_reset_log = g->ops.fifo.handle_sched_error(g); | ||
2274 | handled |= fifo_intr_0_sched_error_pending_f(); | ||
2275 | } | ||
2276 | |||
2277 | if (fifo_intr & fifo_intr_0_chsw_error_pending_f()) { | ||
2278 | gk20a_fifo_handle_chsw_fault(g); | ||
2279 | handled |= fifo_intr_0_chsw_error_pending_f(); | ||
2280 | } | ||
2281 | |||
2282 | if (fifo_intr & fifo_intr_0_mmu_fault_pending_f()) { | ||
2283 | print_channel_reset_log |= | ||
2284 | gk20a_fifo_handle_mmu_fault(g, 0, | ||
2285 | ~(u32)0, false); | ||
2286 | handled |= fifo_intr_0_mmu_fault_pending_f(); | ||
2287 | } | ||
2288 | |||
2289 | if (fifo_intr & fifo_intr_0_dropped_mmu_fault_pending_f()) { | ||
2290 | gk20a_fifo_handle_dropped_mmu_fault(g); | ||
2291 | handled |= fifo_intr_0_dropped_mmu_fault_pending_f(); | ||
2292 | } | ||
2293 | |||
2294 | print_channel_reset_log = !g->fifo.deferred_reset_pending | ||
2295 | && print_channel_reset_log; | ||
2296 | |||
2297 | if (print_channel_reset_log) { | ||
2298 | unsigned int engine_id; | ||
2299 | nvgpu_err(g, | ||
2300 | "channel reset initiated from %s; intr=0x%08x", | ||
2301 | __func__, fifo_intr); | ||
2302 | for (engine_id = 0; | ||
2303 | engine_id < g->fifo.num_engines; | ||
2304 | engine_id++) { | ||
2305 | u32 active_engine_id = g->fifo.active_engines_list[engine_id]; | ||
2306 | u32 engine_enum = g->fifo.engine_info[active_engine_id].engine_enum; | ||
2307 | gk20a_dbg_fn("enum:%d -> engine_id:%d", engine_enum, | ||
2308 | active_engine_id); | ||
2309 | fifo_pbdma_exception_status(g, | ||
2310 | &g->fifo.engine_info[active_engine_id]); | ||
2311 | fifo_engine_exception_status(g, | ||
2312 | &g->fifo.engine_info[active_engine_id]); | ||
2313 | } | ||
2314 | } | ||
2315 | |||
2316 | return handled; | ||
2317 | } | ||
2318 | |||
2319 | static inline void gk20a_fifo_reset_pbdma_header(struct gk20a *g, int pbdma_id) | ||
2320 | { | ||
2321 | gk20a_writel(g, pbdma_pb_header_r(pbdma_id), | ||
2322 | pbdma_pb_header_first_true_f() | | ||
2323 | pbdma_pb_header_type_non_inc_f()); | ||
2324 | } | ||
2325 | |||
2326 | void gk20a_fifo_reset_pbdma_method(struct gk20a *g, int pbdma_id, | ||
2327 | int pbdma_method_index) | ||
2328 | { | ||
2329 | u32 pbdma_method_stride; | ||
2330 | u32 pbdma_method_reg; | ||
2331 | |||
2332 | pbdma_method_stride = pbdma_method1_r(pbdma_id) - | ||
2333 | pbdma_method0_r(pbdma_id); | ||
2334 | |||
2335 | pbdma_method_reg = pbdma_method0_r(pbdma_id) + | ||
2336 | (pbdma_method_index * pbdma_method_stride); | ||
2337 | |||
2338 | gk20a_writel(g, pbdma_method_reg, | ||
2339 | pbdma_method0_valid_true_f() | | ||
2340 | pbdma_method0_first_true_f() | | ||
2341 | pbdma_method0_addr_f( | ||
2342 | pbdma_udma_nop_r() >> 2)); | ||
2343 | } | ||
2344 | |||
2345 | static bool gk20a_fifo_is_sw_method_subch(struct gk20a *g, int pbdma_id, | ||
2346 | int pbdma_method_index) | ||
2347 | { | ||
2348 | u32 pbdma_method_stride; | ||
2349 | u32 pbdma_method_reg, pbdma_method_subch; | ||
2350 | |||
2351 | pbdma_method_stride = pbdma_method1_r(pbdma_id) - | ||
2352 | pbdma_method0_r(pbdma_id); | ||
2353 | |||
2354 | pbdma_method_reg = pbdma_method0_r(pbdma_id) + | ||
2355 | (pbdma_method_index * pbdma_method_stride); | ||
2356 | |||
2357 | pbdma_method_subch = pbdma_method0_subch_v( | ||
2358 | gk20a_readl(g, pbdma_method_reg)); | ||
2359 | |||
2360 | if (pbdma_method_subch == 5 || pbdma_method_subch == 6 || | ||
2361 | pbdma_method_subch == 7) | ||
2362 | return true; | ||
2363 | |||
2364 | return false; | ||
2365 | } | ||
2366 | |||
2367 | unsigned int gk20a_fifo_handle_pbdma_intr_0(struct gk20a *g, u32 pbdma_id, | ||
2368 | u32 pbdma_intr_0, u32 *handled, u32 *error_notifier) | ||
2369 | { | ||
2370 | struct fifo_gk20a *f = &g->fifo; | ||
2371 | unsigned int rc_type = RC_TYPE_NO_RC; | ||
2372 | int i; | ||
2373 | unsigned long pbdma_intr_err; | ||
2374 | u32 bit; | ||
2375 | |||
2376 | if ((f->intr.pbdma.device_fatal_0 | | ||
2377 | f->intr.pbdma.channel_fatal_0 | | ||
2378 | f->intr.pbdma.restartable_0) & pbdma_intr_0) { | ||
2379 | |||
2380 | pbdma_intr_err = (unsigned long)pbdma_intr_0; | ||
2381 | for_each_set_bit(bit, &pbdma_intr_err, 32) | ||
2382 | nvgpu_err(g, "PBDMA intr %s Error", | ||
2383 | pbdma_intr_fault_type_desc[bit]); | ||
2384 | |||
2385 | nvgpu_err(g, | ||
2386 | "pbdma_intr_0(%d):0x%08x PBH: %08x " | ||
2387 | "SHADOW: %08x gp shadow0: %08x gp shadow1: %08x" | ||
2388 | "M0: %08x %08x %08x %08x ", | ||
2389 | pbdma_id, pbdma_intr_0, | ||
2390 | gk20a_readl(g, pbdma_pb_header_r(pbdma_id)), | ||
2391 | gk20a_readl(g, pbdma_hdr_shadow_r(pbdma_id)), | ||
2392 | gk20a_readl(g, pbdma_gp_shadow_0_r(pbdma_id)), | ||
2393 | gk20a_readl(g, pbdma_gp_shadow_1_r(pbdma_id)), | ||
2394 | gk20a_readl(g, pbdma_method0_r(pbdma_id)), | ||
2395 | gk20a_readl(g, pbdma_method1_r(pbdma_id)), | ||
2396 | gk20a_readl(g, pbdma_method2_r(pbdma_id)), | ||
2397 | gk20a_readl(g, pbdma_method3_r(pbdma_id)) | ||
2398 | ); | ||
2399 | |||
2400 | rc_type = RC_TYPE_PBDMA_FAULT; | ||
2401 | *handled |= ((f->intr.pbdma.device_fatal_0 | | ||
2402 | f->intr.pbdma.channel_fatal_0 | | ||
2403 | f->intr.pbdma.restartable_0) & | ||
2404 | pbdma_intr_0); | ||
2405 | } | ||
2406 | |||
2407 | if (pbdma_intr_0 & pbdma_intr_0_acquire_pending_f()) { | ||
2408 | u32 val = gk20a_readl(g, pbdma_acquire_r(pbdma_id)); | ||
2409 | |||
2410 | val &= ~pbdma_acquire_timeout_en_enable_f(); | ||
2411 | gk20a_writel(g, pbdma_acquire_r(pbdma_id), val); | ||
2412 | if (g->timeouts_enabled) { | ||
2413 | rc_type = RC_TYPE_PBDMA_FAULT; | ||
2414 | nvgpu_err(g, | ||
2415 | "semaphore acquire timeout!"); | ||
2416 | *error_notifier = NVGPU_CHANNEL_GR_SEMAPHORE_TIMEOUT; | ||
2417 | } | ||
2418 | *handled |= pbdma_intr_0_acquire_pending_f(); | ||
2419 | } | ||
2420 | |||
2421 | if (pbdma_intr_0 & pbdma_intr_0_pbentry_pending_f()) { | ||
2422 | gk20a_fifo_reset_pbdma_header(g, pbdma_id); | ||
2423 | gk20a_fifo_reset_pbdma_method(g, pbdma_id, 0); | ||
2424 | rc_type = RC_TYPE_PBDMA_FAULT; | ||
2425 | } | ||
2426 | |||
2427 | if (pbdma_intr_0 & pbdma_intr_0_method_pending_f()) { | ||
2428 | gk20a_fifo_reset_pbdma_method(g, pbdma_id, 0); | ||
2429 | rc_type = RC_TYPE_PBDMA_FAULT; | ||
2430 | } | ||
2431 | |||
2432 | if (pbdma_intr_0 & pbdma_intr_0_pbcrc_pending_f()) { | ||
2433 | *error_notifier = | ||
2434 | NVGPU_CHANNEL_PBDMA_PUSHBUFFER_CRC_MISMATCH; | ||
2435 | rc_type = RC_TYPE_PBDMA_FAULT; | ||
2436 | } | ||
2437 | |||
2438 | if (pbdma_intr_0 & pbdma_intr_0_device_pending_f()) { | ||
2439 | gk20a_fifo_reset_pbdma_header(g, pbdma_id); | ||
2440 | |||
2441 | for (i = 0; i < 4; i++) { | ||
2442 | if (gk20a_fifo_is_sw_method_subch(g, | ||
2443 | pbdma_id, i)) | ||
2444 | gk20a_fifo_reset_pbdma_method(g, | ||
2445 | pbdma_id, i); | ||
2446 | } | ||
2447 | rc_type = RC_TYPE_PBDMA_FAULT; | ||
2448 | } | ||
2449 | |||
2450 | return rc_type; | ||
2451 | } | ||
2452 | |||
2453 | unsigned int gk20a_fifo_handle_pbdma_intr_1(struct gk20a *g, | ||
2454 | u32 pbdma_id, u32 pbdma_intr_1, | ||
2455 | u32 *handled, u32 *error_notifier) | ||
2456 | { | ||
2457 | unsigned int rc_type = RC_TYPE_PBDMA_FAULT; | ||
2458 | |||
2459 | /* | ||
2460 | * all of the interrupts in _intr_1 are "host copy engine" | ||
2461 | * related, which is not supported. For now just make them | ||
2462 | * channel fatal. | ||
2463 | */ | ||
2464 | nvgpu_err(g, "hce err: pbdma_intr_1(%d):0x%08x", | ||
2465 | pbdma_id, pbdma_intr_1); | ||
2466 | *handled |= pbdma_intr_1; | ||
2467 | |||
2468 | return rc_type; | ||
2469 | } | ||
2470 | |||
2471 | static void gk20a_fifo_pbdma_fault_rc(struct gk20a *g, | ||
2472 | struct fifo_gk20a *f, u32 pbdma_id, | ||
2473 | u32 error_notifier) | ||
2474 | { | ||
2475 | u32 status; | ||
2476 | u32 id; | ||
2477 | |||
2478 | nvgpu_log(g, gpu_dbg_info, "pbdma id %d error notifier %d", | ||
2479 | pbdma_id, error_notifier); | ||
2480 | status = gk20a_readl(g, fifo_pbdma_status_r(pbdma_id)); | ||
2481 | /* Remove channel from runlist */ | ||
2482 | id = fifo_pbdma_status_id_v(status); | ||
2483 | if (fifo_pbdma_status_id_type_v(status) | ||
2484 | == fifo_pbdma_status_id_type_chid_v()) { | ||
2485 | struct channel_gk20a *ch = &f->channel[id]; | ||
2486 | |||
2487 | if (gk20a_channel_get(ch)) { | ||
2488 | gk20a_set_error_notifier(ch, error_notifier); | ||
2489 | gk20a_fifo_recover_ch(g, id, true); | ||
2490 | gk20a_channel_put(ch); | ||
2491 | } | ||
2492 | } else if (fifo_pbdma_status_id_type_v(status) | ||
2493 | == fifo_pbdma_status_id_type_tsgid_v()) { | ||
2494 | struct tsg_gk20a *tsg = &f->tsg[id]; | ||
2495 | struct channel_gk20a *ch = NULL; | ||
2496 | |||
2497 | nvgpu_rwsem_down_read(&tsg->ch_list_lock); | ||
2498 | list_for_each_entry(ch, &tsg->ch_list, ch_entry) { | ||
2499 | if (gk20a_channel_get(ch)) { | ||
2500 | gk20a_set_error_notifier(ch, | ||
2501 | error_notifier); | ||
2502 | gk20a_channel_put(ch); | ||
2503 | } | ||
2504 | } | ||
2505 | nvgpu_rwsem_up_read(&tsg->ch_list_lock); | ||
2506 | gk20a_fifo_recover_tsg(g, id, true); | ||
2507 | } | ||
2508 | } | ||
2509 | |||
2510 | u32 gk20a_fifo_handle_pbdma_intr(struct gk20a *g, struct fifo_gk20a *f, | ||
2511 | u32 pbdma_id, unsigned int rc) | ||
2512 | { | ||
2513 | u32 pbdma_intr_0 = gk20a_readl(g, pbdma_intr_0_r(pbdma_id)); | ||
2514 | u32 pbdma_intr_1 = gk20a_readl(g, pbdma_intr_1_r(pbdma_id)); | ||
2515 | |||
2516 | u32 handled = 0; | ||
2517 | u32 error_notifier = NVGPU_CHANNEL_PBDMA_ERROR; | ||
2518 | unsigned int rc_type = RC_TYPE_NO_RC; | ||
2519 | |||
2520 | if (pbdma_intr_0) { | ||
2521 | nvgpu_log(g, gpu_dbg_info | gpu_dbg_intr, | ||
2522 | "pbdma id %d intr_0 0x%08x pending", | ||
2523 | pbdma_id, pbdma_intr_0); | ||
2524 | rc_type = g->ops.fifo.handle_pbdma_intr_0(g, pbdma_id, | ||
2525 | pbdma_intr_0, &handled, &error_notifier); | ||
2526 | gk20a_writel(g, pbdma_intr_0_r(pbdma_id), pbdma_intr_0); | ||
2527 | } | ||
2528 | |||
2529 | if (pbdma_intr_1) { | ||
2530 | nvgpu_log(g, gpu_dbg_info | gpu_dbg_intr, | ||
2531 | "pbdma id %d intr_1 0x%08x pending", | ||
2532 | pbdma_id, pbdma_intr_1); | ||
2533 | rc_type = g->ops.fifo.handle_pbdma_intr_1(g, pbdma_id, | ||
2534 | pbdma_intr_1, &handled, &error_notifier); | ||
2535 | gk20a_writel(g, pbdma_intr_1_r(pbdma_id), pbdma_intr_1); | ||
2536 | } | ||
2537 | |||
2538 | if (rc == RC_YES && rc_type == RC_TYPE_PBDMA_FAULT) | ||
2539 | gk20a_fifo_pbdma_fault_rc(g, f, pbdma_id, error_notifier); | ||
2540 | |||
2541 | return handled; | ||
2542 | } | ||
2543 | |||
2544 | static u32 fifo_pbdma_isr(struct gk20a *g, u32 fifo_intr) | ||
2545 | { | ||
2546 | struct fifo_gk20a *f = &g->fifo; | ||
2547 | u32 clear_intr = 0, i; | ||
2548 | u32 host_num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA); | ||
2549 | u32 pbdma_pending = gk20a_readl(g, fifo_intr_pbdma_id_r()); | ||
2550 | |||
2551 | for (i = 0; i < host_num_pbdma; i++) { | ||
2552 | if (fifo_intr_pbdma_id_status_v(pbdma_pending, i)) { | ||
2553 | gk20a_dbg(gpu_dbg_intr, "pbdma id %d intr pending", i); | ||
2554 | clear_intr |= | ||
2555 | gk20a_fifo_handle_pbdma_intr(g, f, i, RC_YES); | ||
2556 | } | ||
2557 | } | ||
2558 | return fifo_intr_0_pbdma_intr_pending_f(); | ||
2559 | } | ||
2560 | |||
2561 | void gk20a_fifo_isr(struct gk20a *g) | ||
2562 | { | ||
2563 | u32 error_intr_mask; | ||
2564 | u32 clear_intr = 0; | ||
2565 | u32 fifo_intr = gk20a_readl(g, fifo_intr_0_r()); | ||
2566 | |||
2567 | error_intr_mask = g->ops.fifo.intr_0_error_mask(g); | ||
2568 | |||
2569 | if (g->fifo.sw_ready) { | ||
2570 | /* note we're not actually in an "isr", but rather | ||
2571 | * in a threaded interrupt context... */ | ||
2572 | nvgpu_mutex_acquire(&g->fifo.intr.isr.mutex); | ||
2573 | |||
2574 | gk20a_dbg(gpu_dbg_intr, "fifo isr %08x\n", fifo_intr); | ||
2575 | |||
2576 | /* handle runlist update */ | ||
2577 | if (fifo_intr & fifo_intr_0_runlist_event_pending_f()) { | ||
2578 | gk20a_fifo_handle_runlist_event(g); | ||
2579 | clear_intr |= fifo_intr_0_runlist_event_pending_f(); | ||
2580 | } | ||
2581 | if (fifo_intr & fifo_intr_0_pbdma_intr_pending_f()) | ||
2582 | clear_intr |= fifo_pbdma_isr(g, fifo_intr); | ||
2583 | |||
2584 | if (g->ops.fifo.handle_ctxsw_timeout) | ||
2585 | g->ops.fifo.handle_ctxsw_timeout(g, fifo_intr); | ||
2586 | |||
2587 | if (unlikely(fifo_intr & error_intr_mask)) | ||
2588 | clear_intr = fifo_error_isr(g, fifo_intr); | ||
2589 | |||
2590 | nvgpu_mutex_release(&g->fifo.intr.isr.mutex); | ||
2591 | } | ||
2592 | gk20a_writel(g, fifo_intr_0_r(), clear_intr); | ||
2593 | |||
2594 | return; | ||
2595 | } | ||
2596 | |||
2597 | int gk20a_fifo_nonstall_isr(struct gk20a *g) | ||
2598 | { | ||
2599 | u32 fifo_intr = gk20a_readl(g, fifo_intr_0_r()); | ||
2600 | u32 clear_intr = 0; | ||
2601 | |||
2602 | gk20a_dbg(gpu_dbg_intr, "fifo nonstall isr %08x\n", fifo_intr); | ||
2603 | |||
2604 | if (fifo_intr & fifo_intr_0_channel_intr_pending_f()) | ||
2605 | clear_intr = fifo_intr_0_channel_intr_pending_f(); | ||
2606 | |||
2607 | gk20a_writel(g, fifo_intr_0_r(), clear_intr); | ||
2608 | |||
2609 | return gk20a_nonstall_ops_wakeup_semaphore; | ||
2610 | } | ||
2611 | |||
2612 | void gk20a_fifo_issue_preempt(struct gk20a *g, u32 id, bool is_tsg) | ||
2613 | { | ||
2614 | if (is_tsg) | ||
2615 | gk20a_writel(g, fifo_preempt_r(), | ||
2616 | fifo_preempt_id_f(id) | | ||
2617 | fifo_preempt_type_tsg_f()); | ||
2618 | else | ||
2619 | gk20a_writel(g, fifo_preempt_r(), | ||
2620 | fifo_preempt_chid_f(id) | | ||
2621 | fifo_preempt_type_channel_f()); | ||
2622 | } | ||
2623 | |||
2624 | int gk20a_fifo_is_preempt_pending(struct gk20a *g, u32 id, | ||
2625 | unsigned int id_type, unsigned int timeout_rc_type) | ||
2626 | { | ||
2627 | struct nvgpu_timeout timeout; | ||
2628 | u32 delay = GR_IDLE_CHECK_DEFAULT; | ||
2629 | int ret = -EBUSY; | ||
2630 | |||
2631 | nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g), | ||
2632 | NVGPU_TIMER_CPU_TIMER); | ||
2633 | do { | ||
2634 | if (!(gk20a_readl(g, fifo_preempt_r()) & | ||
2635 | fifo_preempt_pending_true_f())) { | ||
2636 | ret = 0; | ||
2637 | break; | ||
2638 | } | ||
2639 | |||
2640 | nvgpu_usleep_range(delay, delay * 2); | ||
2641 | delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); | ||
2642 | } while (!nvgpu_timeout_expired_msg(&timeout, "preempt timeout")); | ||
2643 | |||
2644 | return ret; | ||
2645 | } | ||
2646 | |||
2647 | void __locked_fifo_preempt_timeout_rc(struct gk20a *g, u32 id, | ||
2648 | unsigned int id_type) | ||
2649 | { | ||
2650 | if (id_type == ID_TYPE_TSG) { | ||
2651 | struct tsg_gk20a *tsg = &g->fifo.tsg[id]; | ||
2652 | struct channel_gk20a *ch = NULL; | ||
2653 | |||
2654 | nvgpu_err(g, | ||
2655 | "preempt TSG %d timeout", id); | ||
2656 | |||
2657 | nvgpu_rwsem_down_read(&tsg->ch_list_lock); | ||
2658 | list_for_each_entry(ch, &tsg->ch_list, ch_entry) { | ||
2659 | if (!gk20a_channel_get(ch)) | ||
2660 | continue; | ||
2661 | gk20a_set_error_notifier(ch, | ||
2662 | NVGPU_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT); | ||
2663 | gk20a_channel_put(ch); | ||
2664 | } | ||
2665 | nvgpu_rwsem_up_read(&tsg->ch_list_lock); | ||
2666 | gk20a_fifo_recover_tsg(g, id, true); | ||
2667 | } else { | ||
2668 | struct channel_gk20a *ch = &g->fifo.channel[id]; | ||
2669 | |||
2670 | nvgpu_err(g, | ||
2671 | "preempt channel %d timeout", id); | ||
2672 | |||
2673 | if (gk20a_channel_get(ch)) { | ||
2674 | gk20a_set_error_notifier(ch, | ||
2675 | NVGPU_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT); | ||
2676 | gk20a_fifo_recover_ch(g, id, true); | ||
2677 | gk20a_channel_put(ch); | ||
2678 | } | ||
2679 | } | ||
2680 | } | ||
2681 | |||
2682 | int __locked_fifo_preempt(struct gk20a *g, u32 id, bool is_tsg) | ||
2683 | { | ||
2684 | int ret; | ||
2685 | unsigned int id_type; | ||
2686 | |||
2687 | gk20a_dbg_fn("%d", id); | ||
2688 | |||
2689 | /* issue preempt */ | ||
2690 | gk20a_fifo_issue_preempt(g, id, is_tsg); | ||
2691 | |||
2692 | id_type = is_tsg ? ID_TYPE_TSG : ID_TYPE_CHANNEL; | ||
2693 | |||
2694 | /* wait for preempt */ | ||
2695 | ret = g->ops.fifo.is_preempt_pending(g, id, id_type, | ||
2696 | PREEMPT_TIMEOUT_RC); | ||
2697 | |||
2698 | if (ret) | ||
2699 | __locked_fifo_preempt_timeout_rc(g, id, id_type); | ||
2700 | |||
2701 | return ret; | ||
2702 | } | ||
2703 | |||
2704 | int gk20a_fifo_preempt_channel(struct gk20a *g, u32 chid) | ||
2705 | { | ||
2706 | struct fifo_gk20a *f = &g->fifo; | ||
2707 | u32 ret = 0; | ||
2708 | u32 token = PMU_INVALID_MUTEX_OWNER_ID; | ||
2709 | u32 mutex_ret = 0; | ||
2710 | u32 i; | ||
2711 | |||
2712 | gk20a_dbg_fn("%d", chid); | ||
2713 | |||
2714 | /* we have no idea which runlist we are using. lock all */ | ||
2715 | for (i = 0; i < g->fifo.max_runlists; i++) | ||
2716 | nvgpu_mutex_acquire(&f->runlist_info[i].mutex); | ||
2717 | |||
2718 | mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); | ||
2719 | |||
2720 | ret = __locked_fifo_preempt(g, chid, false); | ||
2721 | |||
2722 | if (!mutex_ret) | ||
2723 | nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); | ||
2724 | |||
2725 | for (i = 0; i < g->fifo.max_runlists; i++) | ||
2726 | nvgpu_mutex_release(&f->runlist_info[i].mutex); | ||
2727 | |||
2728 | return ret; | ||
2729 | } | ||
2730 | |||
2731 | int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid) | ||
2732 | { | ||
2733 | struct fifo_gk20a *f = &g->fifo; | ||
2734 | u32 ret = 0; | ||
2735 | u32 token = PMU_INVALID_MUTEX_OWNER_ID; | ||
2736 | u32 mutex_ret = 0; | ||
2737 | u32 i; | ||
2738 | |||
2739 | gk20a_dbg_fn("%d", tsgid); | ||
2740 | |||
2741 | /* we have no idea which runlist we are using. lock all */ | ||
2742 | for (i = 0; i < g->fifo.max_runlists; i++) | ||
2743 | nvgpu_mutex_acquire(&f->runlist_info[i].mutex); | ||
2744 | |||
2745 | mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); | ||
2746 | |||
2747 | ret = __locked_fifo_preempt(g, tsgid, true); | ||
2748 | |||
2749 | if (!mutex_ret) | ||
2750 | nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); | ||
2751 | |||
2752 | for (i = 0; i < g->fifo.max_runlists; i++) | ||
2753 | nvgpu_mutex_release(&f->runlist_info[i].mutex); | ||
2754 | |||
2755 | return ret; | ||
2756 | } | ||
2757 | |||
2758 | int gk20a_fifo_preempt(struct gk20a *g, struct channel_gk20a *ch) | ||
2759 | { | ||
2760 | int err; | ||
2761 | |||
2762 | if (gk20a_is_channel_marked_as_tsg(ch)) | ||
2763 | err = g->ops.fifo.preempt_tsg(ch->g, ch->tsgid); | ||
2764 | else | ||
2765 | err = g->ops.fifo.preempt_channel(ch->g, ch->chid); | ||
2766 | |||
2767 | return err; | ||
2768 | } | ||
2769 | |||
2770 | static void gk20a_fifo_sched_disable_rw(struct gk20a *g, u32 runlists_mask, | ||
2771 | u32 runlist_state) | ||
2772 | { | ||
2773 | u32 reg_val; | ||
2774 | |||
2775 | reg_val = gk20a_readl(g, fifo_sched_disable_r()); | ||
2776 | |||
2777 | if (runlist_state == RUNLIST_DISABLED) | ||
2778 | reg_val |= runlists_mask; | ||
2779 | else | ||
2780 | reg_val &= (~runlists_mask); | ||
2781 | |||
2782 | gk20a_writel(g, fifo_sched_disable_r(), reg_val); | ||
2783 | |||
2784 | } | ||
2785 | |||
2786 | void gk20a_fifo_set_runlist_state(struct gk20a *g, u32 runlists_mask, | ||
2787 | u32 runlist_state, | ||
2788 | int is_runlist_info_mutex_locked) | ||
2789 | { | ||
2790 | u32 token = PMU_INVALID_MUTEX_OWNER_ID; | ||
2791 | u32 mutex_ret; | ||
2792 | u32 runlist_id; | ||
2793 | |||
2794 | gk20a_dbg_fn(""); | ||
2795 | |||
2796 | if (!is_runlist_info_mutex_locked) { | ||
2797 | gk20a_dbg_info("acquire runlist_info mutex"); | ||
2798 | for (runlist_id = 0; runlist_id < g->fifo.max_runlists; | ||
2799 | runlist_id++) { | ||
2800 | if (runlists_mask & | ||
2801 | fifo_sched_disable_runlist_m(runlist_id)) | ||
2802 | nvgpu_mutex_acquire(&g->fifo. | ||
2803 | runlist_info[runlist_id].mutex); | ||
2804 | } | ||
2805 | } | ||
2806 | |||
2807 | mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); | ||
2808 | |||
2809 | gk20a_fifo_sched_disable_rw(g, runlists_mask, runlist_state); | ||
2810 | |||
2811 | if (!mutex_ret) | ||
2812 | nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); | ||
2813 | |||
2814 | if (!is_runlist_info_mutex_locked) { | ||
2815 | gk20a_dbg_info("release runlist_info mutex"); | ||
2816 | for (runlist_id = 0; runlist_id < g->fifo.max_runlists; | ||
2817 | runlist_id++) { | ||
2818 | if (runlists_mask & | ||
2819 | fifo_sched_disable_runlist_m(runlist_id)) | ||
2820 | |||
2821 | nvgpu_mutex_release(&g->fifo. | ||
2822 | runlist_info[runlist_id].mutex); | ||
2823 | } | ||
2824 | } | ||
2825 | |||
2826 | gk20a_dbg_fn("done"); | ||
2827 | } | ||
2828 | |||
2829 | void gk20a_fifo_enable_tsg_sched(struct gk20a *g, struct tsg_gk20a *tsg) | ||
2830 | { | ||
2831 | gk20a_fifo_set_runlist_state(g, fifo_sched_disable_runlist_m( | ||
2832 | tsg->runlist_id), RUNLIST_ENABLED, | ||
2833 | !RUNLIST_INFO_MUTEX_LOCKED); | ||
2834 | |||
2835 | } | ||
2836 | |||
2837 | void gk20a_fifo_disable_tsg_sched(struct gk20a *g, struct tsg_gk20a *tsg) | ||
2838 | { | ||
2839 | gk20a_fifo_set_runlist_state(g, fifo_sched_disable_runlist_m( | ||
2840 | tsg->runlist_id), RUNLIST_DISABLED, | ||
2841 | !RUNLIST_INFO_MUTEX_LOCKED); | ||
2842 | } | ||
2843 | |||
2844 | int gk20a_fifo_enable_engine_activity(struct gk20a *g, | ||
2845 | struct fifo_engine_info_gk20a *eng_info) | ||
2846 | { | ||
2847 | gk20a_dbg_fn(""); | ||
2848 | |||
2849 | gk20a_fifo_set_runlist_state(g, fifo_sched_disable_runlist_m( | ||
2850 | eng_info->runlist_id), RUNLIST_ENABLED, | ||
2851 | !RUNLIST_INFO_MUTEX_LOCKED); | ||
2852 | |||
2853 | gk20a_dbg_fn("done"); | ||
2854 | |||
2855 | return 0; | ||
2856 | } | ||
2857 | |||
2858 | int gk20a_fifo_enable_all_engine_activity(struct gk20a *g) | ||
2859 | { | ||
2860 | unsigned int i; | ||
2861 | int err = 0, ret = 0; | ||
2862 | |||
2863 | for (i = 0; i < g->fifo.num_engines; i++) { | ||
2864 | u32 active_engine_id = g->fifo.active_engines_list[i]; | ||
2865 | err = gk20a_fifo_enable_engine_activity(g, | ||
2866 | &g->fifo.engine_info[active_engine_id]); | ||
2867 | if (err) { | ||
2868 | nvgpu_err(g, | ||
2869 | "failed to enable engine %d activity", active_engine_id); | ||
2870 | ret = err; | ||
2871 | } | ||
2872 | } | ||
2873 | |||
2874 | return ret; | ||
2875 | } | ||
2876 | |||
2877 | int gk20a_fifo_disable_engine_activity(struct gk20a *g, | ||
2878 | struct fifo_engine_info_gk20a *eng_info, | ||
2879 | bool wait_for_idle) | ||
2880 | { | ||
2881 | u32 gr_stat, pbdma_stat, chan_stat, eng_stat, ctx_stat; | ||
2882 | u32 pbdma_chid = FIFO_INVAL_CHANNEL_ID; | ||
2883 | u32 engine_chid = FIFO_INVAL_CHANNEL_ID; | ||
2884 | u32 token = PMU_INVALID_MUTEX_OWNER_ID; | ||
2885 | u32 mutex_ret; | ||
2886 | u32 err = 0; | ||
2887 | |||
2888 | gk20a_dbg_fn(""); | ||
2889 | |||
2890 | gr_stat = | ||
2891 | gk20a_readl(g, fifo_engine_status_r(eng_info->engine_id)); | ||
2892 | if (fifo_engine_status_engine_v(gr_stat) == | ||
2893 | fifo_engine_status_engine_busy_v() && !wait_for_idle) | ||
2894 | return -EBUSY; | ||
2895 | |||
2896 | mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); | ||
2897 | |||
2898 | gk20a_fifo_set_runlist_state(g, fifo_sched_disable_runlist_m( | ||
2899 | eng_info->runlist_id), RUNLIST_DISABLED, | ||
2900 | !RUNLIST_INFO_MUTEX_LOCKED); | ||
2901 | |||
2902 | /* chid from pbdma status */ | ||
2903 | pbdma_stat = gk20a_readl(g, fifo_pbdma_status_r(eng_info->pbdma_id)); | ||
2904 | chan_stat = fifo_pbdma_status_chan_status_v(pbdma_stat); | ||
2905 | if (chan_stat == fifo_pbdma_status_chan_status_valid_v() || | ||
2906 | chan_stat == fifo_pbdma_status_chan_status_chsw_save_v()) | ||
2907 | pbdma_chid = fifo_pbdma_status_id_v(pbdma_stat); | ||
2908 | else if (chan_stat == fifo_pbdma_status_chan_status_chsw_load_v() || | ||
2909 | chan_stat == fifo_pbdma_status_chan_status_chsw_switch_v()) | ||
2910 | pbdma_chid = fifo_pbdma_status_next_id_v(pbdma_stat); | ||
2911 | |||
2912 | if (pbdma_chid != FIFO_INVAL_CHANNEL_ID) { | ||
2913 | err = g->ops.fifo.preempt_channel(g, pbdma_chid); | ||
2914 | if (err) | ||
2915 | goto clean_up; | ||
2916 | } | ||
2917 | |||
2918 | /* chid from engine status */ | ||
2919 | eng_stat = gk20a_readl(g, fifo_engine_status_r(eng_info->engine_id)); | ||
2920 | ctx_stat = fifo_engine_status_ctx_status_v(eng_stat); | ||
2921 | if (ctx_stat == fifo_engine_status_ctx_status_valid_v() || | ||
2922 | ctx_stat == fifo_engine_status_ctx_status_ctxsw_save_v()) | ||
2923 | engine_chid = fifo_engine_status_id_v(eng_stat); | ||
2924 | else if (ctx_stat == fifo_engine_status_ctx_status_ctxsw_load_v() || | ||
2925 | ctx_stat == fifo_engine_status_ctx_status_ctxsw_switch_v()) | ||
2926 | engine_chid = fifo_engine_status_next_id_v(eng_stat); | ||
2927 | |||
2928 | if (engine_chid != FIFO_INVAL_ENGINE_ID && engine_chid != pbdma_chid) { | ||
2929 | err = g->ops.fifo.preempt_channel(g, engine_chid); | ||
2930 | if (err) | ||
2931 | goto clean_up; | ||
2932 | } | ||
2933 | |||
2934 | clean_up: | ||
2935 | if (!mutex_ret) | ||
2936 | nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); | ||
2937 | |||
2938 | if (err) { | ||
2939 | gk20a_dbg_fn("failed"); | ||
2940 | if (gk20a_fifo_enable_engine_activity(g, eng_info)) | ||
2941 | nvgpu_err(g, | ||
2942 | "failed to enable gr engine activity"); | ||
2943 | } else { | ||
2944 | gk20a_dbg_fn("done"); | ||
2945 | } | ||
2946 | return err; | ||
2947 | } | ||
2948 | |||
2949 | int gk20a_fifo_disable_all_engine_activity(struct gk20a *g, | ||
2950 | bool wait_for_idle) | ||
2951 | { | ||
2952 | unsigned int i; | ||
2953 | int err = 0, ret = 0; | ||
2954 | u32 active_engine_id; | ||
2955 | |||
2956 | for (i = 0; i < g->fifo.num_engines; i++) { | ||
2957 | active_engine_id = g->fifo.active_engines_list[i]; | ||
2958 | err = gk20a_fifo_disable_engine_activity(g, | ||
2959 | &g->fifo.engine_info[active_engine_id], | ||
2960 | wait_for_idle); | ||
2961 | if (err) { | ||
2962 | nvgpu_err(g, "failed to disable engine %d activity", | ||
2963 | active_engine_id); | ||
2964 | ret = err; | ||
2965 | break; | ||
2966 | } | ||
2967 | } | ||
2968 | |||
2969 | if (err) { | ||
2970 | while (i-- != 0) { | ||
2971 | active_engine_id = g->fifo.active_engines_list[i]; | ||
2972 | err = gk20a_fifo_enable_engine_activity(g, | ||
2973 | &g->fifo.engine_info[active_engine_id]); | ||
2974 | if (err) | ||
2975 | nvgpu_err(g, | ||
2976 | "failed to re-enable engine %d activity", | ||
2977 | active_engine_id); | ||
2978 | } | ||
2979 | } | ||
2980 | |||
2981 | return ret; | ||
2982 | } | ||
2983 | |||
2984 | static void gk20a_fifo_runlist_reset_engines(struct gk20a *g, u32 runlist_id) | ||
2985 | { | ||
2986 | struct fifo_gk20a *f = &g->fifo; | ||
2987 | u32 engines = 0; | ||
2988 | unsigned int i; | ||
2989 | |||
2990 | for (i = 0; i < f->num_engines; i++) { | ||
2991 | u32 active_engine_id = g->fifo.active_engines_list[i]; | ||
2992 | u32 status = gk20a_readl(g, fifo_engine_status_r(active_engine_id)); | ||
2993 | bool engine_busy = fifo_engine_status_engine_v(status) == | ||
2994 | fifo_engine_status_engine_busy_v(); | ||
2995 | |||
2996 | if (engine_busy && | ||
2997 | (f->engine_info[active_engine_id].runlist_id == runlist_id)) | ||
2998 | engines |= BIT(active_engine_id); | ||
2999 | } | ||
3000 | |||
3001 | if (engines) | ||
3002 | gk20a_fifo_recover(g, engines, ~(u32)0, false, false, true); | ||
3003 | } | ||
3004 | |||
3005 | static int gk20a_fifo_runlist_wait_pending(struct gk20a *g, u32 runlist_id) | ||
3006 | { | ||
3007 | struct fifo_runlist_info_gk20a *runlist; | ||
3008 | struct nvgpu_timeout timeout; | ||
3009 | unsigned long delay = GR_IDLE_CHECK_DEFAULT; | ||
3010 | int ret = -ETIMEDOUT; | ||
3011 | |||
3012 | nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g), | ||
3013 | NVGPU_TIMER_CPU_TIMER); | ||
3014 | |||
3015 | runlist = &g->fifo.runlist_info[runlist_id]; | ||
3016 | do { | ||
3017 | if ((gk20a_readl(g, fifo_eng_runlist_r(runlist_id)) & | ||
3018 | fifo_eng_runlist_pending_true_f()) == 0) { | ||
3019 | ret = 0; | ||
3020 | break; | ||
3021 | } | ||
3022 | |||
3023 | nvgpu_usleep_range(delay, delay * 2); | ||
3024 | delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); | ||
3025 | } while (!nvgpu_timeout_expired(&timeout)); | ||
3026 | |||
3027 | return ret; | ||
3028 | } | ||
3029 | |||
3030 | void gk20a_get_tsg_runlist_entry(struct tsg_gk20a *tsg, u32 *runlist) | ||
3031 | { | ||
3032 | |||
3033 | u32 runlist_entry_0 = ram_rl_entry_id_f(tsg->tsgid) | | ||
3034 | ram_rl_entry_type_tsg_f() | | ||
3035 | ram_rl_entry_tsg_length_f(tsg->num_active_channels); | ||
3036 | |||
3037 | if (tsg->timeslice_timeout) | ||
3038 | runlist_entry_0 |= | ||
3039 | ram_rl_entry_timeslice_scale_f(tsg->timeslice_scale) | | ||
3040 | ram_rl_entry_timeslice_timeout_f(tsg->timeslice_timeout); | ||
3041 | else | ||
3042 | runlist_entry_0 |= | ||
3043 | ram_rl_entry_timeslice_scale_f( | ||
3044 | NVGPU_FIFO_DEFAULT_TIMESLICE_SCALE) | | ||
3045 | ram_rl_entry_timeslice_timeout_f( | ||
3046 | NVGPU_FIFO_DEFAULT_TIMESLICE_TIMEOUT); | ||
3047 | |||
3048 | runlist[0] = runlist_entry_0; | ||
3049 | runlist[1] = 0; | ||
3050 | |||
3051 | } | ||
3052 | |||
3053 | u32 gk20a_fifo_default_timeslice_us(struct gk20a *g) | ||
3054 | { | ||
3055 | return (((u64)(NVGPU_FIFO_DEFAULT_TIMESLICE_TIMEOUT << | ||
3056 | NVGPU_FIFO_DEFAULT_TIMESLICE_SCALE) * | ||
3057 | (u64)g->ptimer_src_freq) / | ||
3058 | (u64)PTIMER_REF_FREQ_HZ); | ||
3059 | } | ||
3060 | |||
3061 | void gk20a_get_ch_runlist_entry(struct channel_gk20a *ch, u32 *runlist) | ||
3062 | { | ||
3063 | runlist[0] = ram_rl_entry_chid_f(ch->chid); | ||
3064 | runlist[1] = 0; | ||
3065 | } | ||
3066 | |||
3067 | /* recursively construct a runlist with interleaved bare channels and TSGs */ | ||
3068 | static u32 *gk20a_runlist_construct_locked(struct fifo_gk20a *f, | ||
3069 | struct fifo_runlist_info_gk20a *runlist, | ||
3070 | u32 cur_level, | ||
3071 | u32 *runlist_entry, | ||
3072 | bool interleave_enabled, | ||
3073 | bool prev_empty, | ||
3074 | u32 *entries_left) | ||
3075 | { | ||
3076 | bool last_level = cur_level == NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_HIGH; | ||
3077 | struct channel_gk20a *ch; | ||
3078 | bool skip_next = false; | ||
3079 | u32 chid, tsgid, count = 0; | ||
3080 | u32 runlist_entry_words = f->runlist_entry_size / sizeof(u32); | ||
3081 | |||
3082 | gk20a_dbg_fn(""); | ||
3083 | |||
3084 | /* for each bare channel, CH, on this level, insert all higher-level | ||
3085 | channels and TSGs before inserting CH. */ | ||
3086 | for_each_set_bit(chid, runlist->active_channels, f->num_channels) { | ||
3087 | ch = &f->channel[chid]; | ||
3088 | |||
3089 | if (ch->interleave_level != cur_level) | ||
3090 | continue; | ||
3091 | |||
3092 | if (gk20a_is_channel_marked_as_tsg(ch)) | ||
3093 | continue; | ||
3094 | |||
3095 | if (!last_level && !skip_next) { | ||
3096 | runlist_entry = gk20a_runlist_construct_locked(f, | ||
3097 | runlist, | ||
3098 | cur_level + 1, | ||
3099 | runlist_entry, | ||
3100 | interleave_enabled, | ||
3101 | false, | ||
3102 | entries_left); | ||
3103 | /* if interleaving is disabled, higher-level channels | ||
3104 | and TSGs only need to be inserted once */ | ||
3105 | if (!interleave_enabled) | ||
3106 | skip_next = true; | ||
3107 | } | ||
3108 | |||
3109 | if (!(*entries_left)) | ||
3110 | return NULL; | ||
3111 | |||
3112 | gk20a_dbg_info("add channel %d to runlist", chid); | ||
3113 | f->g->ops.fifo.get_ch_runlist_entry(ch, runlist_entry); | ||
3114 | gk20a_dbg_info("run list count %d runlist [0] %x [1] %x\n", | ||
3115 | count, runlist_entry[0], runlist_entry[1]); | ||
3116 | runlist_entry += runlist_entry_words; | ||
3117 | count++; | ||
3118 | (*entries_left)--; | ||
3119 | } | ||
3120 | |||
3121 | /* for each TSG, T, on this level, insert all higher-level channels | ||
3122 | and TSGs before inserting T. */ | ||
3123 | for_each_set_bit(tsgid, runlist->active_tsgs, f->num_channels) { | ||
3124 | struct tsg_gk20a *tsg = &f->tsg[tsgid]; | ||
3125 | |||
3126 | if (tsg->interleave_level != cur_level) | ||
3127 | continue; | ||
3128 | |||
3129 | if (!last_level && !skip_next) { | ||
3130 | runlist_entry = gk20a_runlist_construct_locked(f, | ||
3131 | runlist, | ||
3132 | cur_level + 1, | ||
3133 | runlist_entry, | ||
3134 | interleave_enabled, | ||
3135 | false, | ||
3136 | entries_left); | ||
3137 | if (!interleave_enabled) | ||
3138 | skip_next = true; | ||
3139 | } | ||
3140 | |||
3141 | if (!(*entries_left)) | ||
3142 | return NULL; | ||
3143 | |||
3144 | /* add TSG entry */ | ||
3145 | gk20a_dbg_info("add TSG %d to runlist", tsg->tsgid); | ||
3146 | f->g->ops.fifo.get_tsg_runlist_entry(tsg, runlist_entry); | ||
3147 | gk20a_dbg_info("tsg runlist count %d runlist [0] %x [1] %x\n", | ||
3148 | count, runlist_entry[0], runlist_entry[1]); | ||
3149 | runlist_entry += runlist_entry_words; | ||
3150 | count++; | ||
3151 | (*entries_left)--; | ||
3152 | |||
3153 | nvgpu_rwsem_down_read(&tsg->ch_list_lock); | ||
3154 | /* add runnable channels bound to this TSG */ | ||
3155 | list_for_each_entry(ch, &tsg->ch_list, ch_entry) { | ||
3156 | if (!test_bit(ch->chid, | ||
3157 | runlist->active_channels)) | ||
3158 | continue; | ||
3159 | |||
3160 | if (!(*entries_left)) { | ||
3161 | nvgpu_rwsem_up_read(&tsg->ch_list_lock); | ||
3162 | return NULL; | ||
3163 | } | ||
3164 | |||
3165 | gk20a_dbg_info("add channel %d to runlist", | ||
3166 | ch->chid); | ||
3167 | f->g->ops.fifo.get_ch_runlist_entry(ch, runlist_entry); | ||
3168 | gk20a_dbg_info( | ||
3169 | "run list count %d runlist [0] %x [1] %x\n", | ||
3170 | count, runlist_entry[0], runlist_entry[1]); | ||
3171 | count++; | ||
3172 | runlist_entry += runlist_entry_words; | ||
3173 | (*entries_left)--; | ||
3174 | } | ||
3175 | nvgpu_rwsem_up_read(&tsg->ch_list_lock); | ||
3176 | } | ||
3177 | |||
3178 | /* append entries from higher level if this level is empty */ | ||
3179 | if (!count && !last_level) | ||
3180 | runlist_entry = gk20a_runlist_construct_locked(f, | ||
3181 | runlist, | ||
3182 | cur_level + 1, | ||
3183 | runlist_entry, | ||
3184 | interleave_enabled, | ||
3185 | true, | ||
3186 | entries_left); | ||
3187 | |||
3188 | /* | ||
3189 | * if previous and this level have entries, append | ||
3190 | * entries from higher level. | ||
3191 | * | ||
3192 | * ex. dropping from MEDIUM to LOW, need to insert HIGH | ||
3193 | */ | ||
3194 | if (interleave_enabled && count && !prev_empty && !last_level) | ||
3195 | runlist_entry = gk20a_runlist_construct_locked(f, | ||
3196 | runlist, | ||
3197 | cur_level + 1, | ||
3198 | runlist_entry, | ||
3199 | interleave_enabled, | ||
3200 | false, | ||
3201 | entries_left); | ||
3202 | return runlist_entry; | ||
3203 | } | ||
3204 | |||
3205 | int gk20a_fifo_set_runlist_interleave(struct gk20a *g, | ||
3206 | u32 id, | ||
3207 | bool is_tsg, | ||
3208 | u32 runlist_id, | ||
3209 | u32 new_level) | ||
3210 | { | ||
3211 | gk20a_dbg_fn(""); | ||
3212 | |||
3213 | if (is_tsg) | ||
3214 | g->fifo.tsg[id].interleave_level = new_level; | ||
3215 | else | ||
3216 | g->fifo.channel[id].interleave_level = new_level; | ||
3217 | |||
3218 | return 0; | ||
3219 | } | ||
3220 | |||
3221 | int gk20a_fifo_tsg_set_timeslice(struct tsg_gk20a *tsg, u32 timeslice) | ||
3222 | { | ||
3223 | struct gk20a *g = tsg->g; | ||
3224 | |||
3225 | if (timeslice < g->min_timeslice_us || | ||
3226 | timeslice > g->max_timeslice_us) | ||
3227 | return -EINVAL; | ||
3228 | |||
3229 | gk20a_channel_get_timescale_from_timeslice(g, timeslice, | ||
3230 | &tsg->timeslice_timeout, &tsg->timeslice_scale); | ||
3231 | |||
3232 | tsg->timeslice_us = timeslice; | ||
3233 | |||
3234 | return g->ops.fifo.update_runlist(g, tsg->runlist_id, ~0, true, true); | ||
3235 | } | ||
3236 | |||
3237 | static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id, | ||
3238 | u32 chid, bool add, | ||
3239 | bool wait_for_finish) | ||
3240 | { | ||
3241 | int ret = 0; | ||
3242 | struct fifo_gk20a *f = &g->fifo; | ||
3243 | struct fifo_runlist_info_gk20a *runlist = NULL; | ||
3244 | u32 *runlist_entry_base = NULL; | ||
3245 | u64 runlist_iova; | ||
3246 | u32 old_buf, new_buf; | ||
3247 | struct channel_gk20a *ch = NULL; | ||
3248 | struct tsg_gk20a *tsg = NULL; | ||
3249 | u32 count = 0; | ||
3250 | u32 runlist_entry_words = f->runlist_entry_size / sizeof(u32); | ||
3251 | |||
3252 | runlist = &f->runlist_info[runlist_id]; | ||
3253 | |||
3254 | /* valid channel, add/remove it from active list. | ||
3255 | Otherwise, keep active list untouched for suspend/resume. */ | ||
3256 | if (chid != FIFO_INVAL_CHANNEL_ID) { | ||
3257 | ch = &f->channel[chid]; | ||
3258 | if (gk20a_is_channel_marked_as_tsg(ch)) | ||
3259 | tsg = &f->tsg[ch->tsgid]; | ||
3260 | |||
3261 | if (add) { | ||
3262 | if (test_and_set_bit(chid, | ||
3263 | runlist->active_channels) == 1) | ||
3264 | return 0; | ||
3265 | if (tsg && ++tsg->num_active_channels) | ||
3266 | set_bit(f->channel[chid].tsgid, | ||
3267 | runlist->active_tsgs); | ||
3268 | } else { | ||
3269 | if (test_and_clear_bit(chid, | ||
3270 | runlist->active_channels) == 0) | ||
3271 | return 0; | ||
3272 | if (tsg && --tsg->num_active_channels == 0) | ||
3273 | clear_bit(f->channel[chid].tsgid, | ||
3274 | runlist->active_tsgs); | ||
3275 | } | ||
3276 | } | ||
3277 | |||
3278 | old_buf = runlist->cur_buffer; | ||
3279 | new_buf = !runlist->cur_buffer; | ||
3280 | |||
3281 | runlist_iova = nvgpu_mem_get_addr(g, &runlist->mem[new_buf]); | ||
3282 | |||
3283 | gk20a_dbg_info("runlist_id : %d, switch to new buffer 0x%16llx", | ||
3284 | runlist_id, (u64)runlist_iova); | ||
3285 | |||
3286 | if (!runlist_iova) { | ||
3287 | ret = -EINVAL; | ||
3288 | goto clean_up; | ||
3289 | } | ||
3290 | |||
3291 | runlist_entry_base = runlist->mem[new_buf].cpu_va; | ||
3292 | if (!runlist_entry_base) { | ||
3293 | ret = -ENOMEM; | ||
3294 | goto clean_up; | ||
3295 | } | ||
3296 | |||
3297 | if (chid != FIFO_INVAL_CHANNEL_ID || /* add/remove a valid channel */ | ||
3298 | add /* resume to add all channels back */) { | ||
3299 | u32 max_entries = f->num_runlist_entries; | ||
3300 | u32 *runlist_end; | ||
3301 | |||
3302 | runlist_end = gk20a_runlist_construct_locked(f, | ||
3303 | runlist, | ||
3304 | 0, | ||
3305 | runlist_entry_base, | ||
3306 | g->runlist_interleave, | ||
3307 | true, | ||
3308 | &max_entries); | ||
3309 | if (!runlist_end) { | ||
3310 | ret = -E2BIG; | ||
3311 | goto clean_up; | ||
3312 | } | ||
3313 | count = (runlist_end - runlist_entry_base) / runlist_entry_words; | ||
3314 | WARN_ON(count > f->num_runlist_entries); | ||
3315 | } else /* suspend to remove all channels */ | ||
3316 | count = 0; | ||
3317 | |||
3318 | if (count != 0) { | ||
3319 | gk20a_writel(g, fifo_runlist_base_r(), | ||
3320 | fifo_runlist_base_ptr_f(u64_lo32(runlist_iova >> 12)) | | ||
3321 | nvgpu_aperture_mask(g, &runlist->mem[new_buf], | ||
3322 | fifo_runlist_base_target_sys_mem_ncoh_f(), | ||
3323 | fifo_runlist_base_target_vid_mem_f())); | ||
3324 | } | ||
3325 | |||
3326 | gk20a_writel(g, fifo_runlist_r(), | ||
3327 | fifo_runlist_engine_f(runlist_id) | | ||
3328 | fifo_eng_runlist_length_f(count)); | ||
3329 | |||
3330 | if (wait_for_finish) { | ||
3331 | ret = gk20a_fifo_runlist_wait_pending(g, runlist_id); | ||
3332 | |||
3333 | if (ret == -ETIMEDOUT) { | ||
3334 | nvgpu_err(g, | ||
3335 | "runlist update timeout"); | ||
3336 | |||
3337 | gk20a_fifo_runlist_reset_engines(g, runlist_id); | ||
3338 | |||
3339 | /* engine reset needs the lock. drop it */ | ||
3340 | /* wait until the runlist is active again */ | ||
3341 | ret = gk20a_fifo_runlist_wait_pending(g, runlist_id); | ||
3342 | /* get the lock back. at this point everything should | ||
3343 | * should be fine */ | ||
3344 | |||
3345 | if (ret) | ||
3346 | nvgpu_err(g, | ||
3347 | "runlist update failed: %d", ret); | ||
3348 | } else if (ret == -EINTR) | ||
3349 | nvgpu_err(g, | ||
3350 | "runlist update interrupted"); | ||
3351 | } | ||
3352 | |||
3353 | runlist->cur_buffer = new_buf; | ||
3354 | |||
3355 | clean_up: | ||
3356 | return ret; | ||
3357 | } | ||
3358 | |||
3359 | int gk20a_fifo_update_runlist_ids(struct gk20a *g, u32 runlist_ids, u32 chid, | ||
3360 | bool add, bool wait_for_finish) | ||
3361 | { | ||
3362 | u32 ret = -EINVAL; | ||
3363 | u32 runlist_id = 0; | ||
3364 | u32 errcode; | ||
3365 | unsigned long ulong_runlist_ids = (unsigned long)runlist_ids; | ||
3366 | |||
3367 | if (!g) | ||
3368 | goto end; | ||
3369 | |||
3370 | ret = 0; | ||
3371 | for_each_set_bit(runlist_id, &ulong_runlist_ids, 32) { | ||
3372 | /* Capture the last failure error code */ | ||
3373 | errcode = g->ops.fifo.update_runlist(g, runlist_id, chid, add, wait_for_finish); | ||
3374 | if (errcode) { | ||
3375 | nvgpu_err(g, | ||
3376 | "failed to update_runlist %d %d", runlist_id, errcode); | ||
3377 | ret = errcode; | ||
3378 | } | ||
3379 | } | ||
3380 | end: | ||
3381 | return ret; | ||
3382 | } | ||
3383 | |||
3384 | /* trigger host to expire current timeslice and reschedule runlist from front */ | ||
3385 | int gk20a_fifo_reschedule_runlist(struct gk20a *g, u32 runlist_id) | ||
3386 | { | ||
3387 | struct fifo_runlist_info_gk20a *runlist; | ||
3388 | u32 token = PMU_INVALID_MUTEX_OWNER_ID; | ||
3389 | u32 mutex_ret; | ||
3390 | int ret = 0; | ||
3391 | |||
3392 | runlist = &g->fifo.runlist_info[runlist_id]; | ||
3393 | if (nvgpu_mutex_tryacquire(&runlist->mutex)) { | ||
3394 | mutex_ret = nvgpu_pmu_mutex_acquire( | ||
3395 | &g->pmu, PMU_MUTEX_ID_FIFO, &token); | ||
3396 | |||
3397 | gk20a_writel(g, fifo_runlist_r(), | ||
3398 | gk20a_readl(g, fifo_runlist_r())); | ||
3399 | gk20a_fifo_runlist_wait_pending(g, runlist_id); | ||
3400 | |||
3401 | if (!mutex_ret) | ||
3402 | nvgpu_pmu_mutex_release( | ||
3403 | &g->pmu, PMU_MUTEX_ID_FIFO, &token); | ||
3404 | nvgpu_mutex_release(&runlist->mutex); | ||
3405 | } else { | ||
3406 | /* someone else is writing fifo_runlist_r so not needed here */ | ||
3407 | ret = -EBUSY; | ||
3408 | } | ||
3409 | return ret; | ||
3410 | } | ||
3411 | |||
3412 | /* add/remove a channel from runlist | ||
3413 | special cases below: runlist->active_channels will NOT be changed. | ||
3414 | (chid == ~0 && !add) means remove all active channels from runlist. | ||
3415 | (chid == ~0 && add) means restore all active channels on runlist. */ | ||
3416 | int gk20a_fifo_update_runlist(struct gk20a *g, u32 runlist_id, u32 chid, | ||
3417 | bool add, bool wait_for_finish) | ||
3418 | { | ||
3419 | struct fifo_runlist_info_gk20a *runlist = NULL; | ||
3420 | struct fifo_gk20a *f = &g->fifo; | ||
3421 | u32 token = PMU_INVALID_MUTEX_OWNER_ID; | ||
3422 | u32 mutex_ret; | ||
3423 | u32 ret = 0; | ||
3424 | |||
3425 | gk20a_dbg_fn(""); | ||
3426 | |||
3427 | runlist = &f->runlist_info[runlist_id]; | ||
3428 | |||
3429 | nvgpu_mutex_acquire(&runlist->mutex); | ||
3430 | |||
3431 | mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); | ||
3432 | |||
3433 | ret = gk20a_fifo_update_runlist_locked(g, runlist_id, chid, add, | ||
3434 | wait_for_finish); | ||
3435 | |||
3436 | if (!mutex_ret) | ||
3437 | nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); | ||
3438 | |||
3439 | nvgpu_mutex_release(&runlist->mutex); | ||
3440 | return ret; | ||
3441 | } | ||
3442 | |||
3443 | int gk20a_fifo_suspend(struct gk20a *g) | ||
3444 | { | ||
3445 | gk20a_dbg_fn(""); | ||
3446 | |||
3447 | /* stop bar1 snooping */ | ||
3448 | if (g->ops.mm.is_bar1_supported(g)) | ||
3449 | gk20a_writel(g, fifo_bar1_base_r(), | ||
3450 | fifo_bar1_base_valid_false_f()); | ||
3451 | |||
3452 | /* disable fifo intr */ | ||
3453 | gk20a_writel(g, fifo_intr_en_0_r(), 0); | ||
3454 | gk20a_writel(g, fifo_intr_en_1_r(), 0); | ||
3455 | |||
3456 | gk20a_dbg_fn("done"); | ||
3457 | return 0; | ||
3458 | } | ||
3459 | |||
3460 | bool gk20a_fifo_mmu_fault_pending(struct gk20a *g) | ||
3461 | { | ||
3462 | if (gk20a_readl(g, fifo_intr_0_r()) & | ||
3463 | fifo_intr_0_mmu_fault_pending_f()) | ||
3464 | return true; | ||
3465 | else | ||
3466 | return false; | ||
3467 | } | ||
3468 | |||
3469 | bool gk20a_fifo_is_engine_busy(struct gk20a *g) | ||
3470 | { | ||
3471 | u32 i, host_num_engines; | ||
3472 | |||
3473 | host_num_engines = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_ENGINES); | ||
3474 | |||
3475 | for (i = 0; i < host_num_engines; i++) { | ||
3476 | u32 status = gk20a_readl(g, fifo_engine_status_r(i)); | ||
3477 | if (fifo_engine_status_engine_v(status) == | ||
3478 | fifo_engine_status_engine_busy_v()) | ||
3479 | return true; | ||
3480 | } | ||
3481 | return false; | ||
3482 | } | ||
3483 | |||
3484 | int gk20a_fifo_wait_engine_idle(struct gk20a *g) | ||
3485 | { | ||
3486 | struct nvgpu_timeout timeout; | ||
3487 | unsigned long delay = GR_IDLE_CHECK_DEFAULT; | ||
3488 | int ret = -ETIMEDOUT; | ||
3489 | u32 i, host_num_engines; | ||
3490 | |||
3491 | gk20a_dbg_fn(""); | ||
3492 | |||
3493 | host_num_engines = | ||
3494 | nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_ENGINES); | ||
3495 | |||
3496 | nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g), | ||
3497 | NVGPU_TIMER_CPU_TIMER); | ||
3498 | |||
3499 | for (i = 0; i < host_num_engines; i++) { | ||
3500 | do { | ||
3501 | u32 status = gk20a_readl(g, fifo_engine_status_r(i)); | ||
3502 | if (!fifo_engine_status_engine_v(status)) { | ||
3503 | ret = 0; | ||
3504 | break; | ||
3505 | } | ||
3506 | |||
3507 | nvgpu_usleep_range(delay, delay * 2); | ||
3508 | delay = min_t(unsigned long, | ||
3509 | delay << 1, GR_IDLE_CHECK_MAX); | ||
3510 | } while (!nvgpu_timeout_expired(&timeout)); | ||
3511 | |||
3512 | if (ret) { | ||
3513 | gk20a_dbg_info("cannot idle engine %u", i); | ||
3514 | break; | ||
3515 | } | ||
3516 | } | ||
3517 | |||
3518 | gk20a_dbg_fn("done"); | ||
3519 | |||
3520 | return ret; | ||
3521 | } | ||
3522 | |||
3523 | u32 gk20a_fifo_get_pbdma_signature(struct gk20a *g) | ||
3524 | { | ||
3525 | return pbdma_signature_hw_valid_f() | pbdma_signature_sw_zero_f(); | ||
3526 | } | ||
3527 | |||
3528 | struct channel_gk20a *gk20a_fifo_channel_from_chid(struct gk20a *g, | ||
3529 | u32 chid) | ||
3530 | { | ||
3531 | if (chid != FIFO_INVAL_CHANNEL_ID) | ||
3532 | return g->fifo.channel + chid; | ||
3533 | else | ||
3534 | return NULL; | ||
3535 | } | ||
3536 | |||
3537 | static const char * const ccsr_chan_status_str[] = { | ||
3538 | "idle", | ||
3539 | "pending", | ||
3540 | "pending_ctx_reload", | ||
3541 | "pending_acquire", | ||
3542 | "pending_acq_ctx_reload", | ||
3543 | "on_pbdma", | ||
3544 | "on_pbdma_and_eng", | ||
3545 | "on_eng", | ||
3546 | "on_eng_pending_acquire", | ||
3547 | "on_eng_pending", | ||
3548 | "on_pbdma_ctx_reload", | ||
3549 | "on_pbdma_and_eng_ctx_reload", | ||
3550 | "on_eng_ctx_reload", | ||
3551 | "on_eng_pending_ctx_reload", | ||
3552 | "on_eng_pending_acq_ctx_reload", | ||
3553 | }; | ||
3554 | |||
3555 | static const char * const pbdma_chan_eng_ctx_status_str[] = { | ||
3556 | "invalid", | ||
3557 | "valid", | ||
3558 | "NA", | ||
3559 | "NA", | ||
3560 | "NA", | ||
3561 | "load", | ||
3562 | "save", | ||
3563 | "switch", | ||
3564 | }; | ||
3565 | |||
3566 | static const char * const not_found_str[] = { | ||
3567 | "NOT FOUND" | ||
3568 | }; | ||
3569 | |||
3570 | const char *gk20a_decode_ccsr_chan_status(u32 index) | ||
3571 | { | ||
3572 | if (index >= ARRAY_SIZE(ccsr_chan_status_str)) | ||
3573 | return not_found_str[0]; | ||
3574 | else | ||
3575 | return ccsr_chan_status_str[index]; | ||
3576 | } | ||
3577 | |||
3578 | const char *gk20a_decode_pbdma_chan_eng_ctx_status(u32 index) | ||
3579 | { | ||
3580 | if (index >= ARRAY_SIZE(pbdma_chan_eng_ctx_status_str)) | ||
3581 | return not_found_str[0]; | ||
3582 | else | ||
3583 | return pbdma_chan_eng_ctx_status_str[index]; | ||
3584 | } | ||
3585 | |||
3586 | bool gk20a_fifo_channel_status_is_next(struct gk20a *g, u32 chid) | ||
3587 | { | ||
3588 | u32 channel = gk20a_readl(g, ccsr_channel_r(chid)); | ||
3589 | |||
3590 | return ccsr_channel_next_v(channel) == ccsr_channel_next_true_v(); | ||
3591 | } | ||
3592 | |||
3593 | bool gk20a_fifo_channel_status_is_ctx_reload(struct gk20a *g, u32 chid) | ||
3594 | { | ||
3595 | u32 channel = gk20a_readl(g, ccsr_channel_r(chid)); | ||
3596 | u32 status = ccsr_channel_status_v(channel); | ||
3597 | |||
3598 | return (status == ccsr_channel_status_pending_ctx_reload_v() || | ||
3599 | status == ccsr_channel_status_pending_acq_ctx_reload_v() || | ||
3600 | status == ccsr_channel_status_on_pbdma_ctx_reload_v() || | ||
3601 | status == ccsr_channel_status_on_pbdma_and_eng_ctx_reload_v() || | ||
3602 | status == ccsr_channel_status_on_eng_ctx_reload_v() || | ||
3603 | status == ccsr_channel_status_on_eng_pending_ctx_reload_v() || | ||
3604 | status == ccsr_channel_status_on_eng_pending_acq_ctx_reload_v()); | ||
3605 | } | ||
3606 | |||
3607 | void gk20a_dump_channel_status_ramfc(struct gk20a *g, | ||
3608 | struct gk20a_debug_output *o, | ||
3609 | u32 chid, | ||
3610 | struct ch_state *ch_state) | ||
3611 | { | ||
3612 | u32 channel = gk20a_readl(g, ccsr_channel_r(chid)); | ||
3613 | u32 status = ccsr_channel_status_v(channel); | ||
3614 | u32 syncpointa, syncpointb; | ||
3615 | u32 *inst_mem; | ||
3616 | struct channel_gk20a *c = g->fifo.channel + chid; | ||
3617 | struct nvgpu_semaphore_int *hw_sema = NULL; | ||
3618 | |||
3619 | if (c->hw_sema) | ||
3620 | hw_sema = c->hw_sema; | ||
3621 | |||
3622 | if (!ch_state) | ||
3623 | return; | ||
3624 | |||
3625 | inst_mem = &ch_state->inst_block[0]; | ||
3626 | |||
3627 | syncpointa = inst_mem[ram_fc_syncpointa_w()]; | ||
3628 | syncpointb = inst_mem[ram_fc_syncpointb_w()]; | ||
3629 | |||
3630 | gk20a_debug_output(o, "%d-%s, pid %d, refs %d%s: ", chid, | ||
3631 | g->name, | ||
3632 | ch_state->pid, | ||
3633 | ch_state->refs, | ||
3634 | ch_state->deterministic ? ", deterministic" : ""); | ||
3635 | gk20a_debug_output(o, "channel status: %s in use %s %s\n", | ||
3636 | ccsr_channel_enable_v(channel) ? "" : "not", | ||
3637 | gk20a_decode_ccsr_chan_status(status), | ||
3638 | ccsr_channel_busy_v(channel) ? "busy" : "not busy"); | ||
3639 | gk20a_debug_output(o, "RAMFC : TOP: %016llx PUT: %016llx GET: %016llx " | ||
3640 | "FETCH: %016llx\nHEADER: %08x COUNT: %08x\n" | ||
3641 | "SYNCPOINT %08x %08x SEMAPHORE %08x %08x %08x %08x\n", | ||
3642 | (u64)inst_mem[ram_fc_pb_top_level_get_w()] + | ||
3643 | ((u64)inst_mem[ram_fc_pb_top_level_get_hi_w()] << 32ULL), | ||
3644 | (u64)inst_mem[ram_fc_pb_put_w()] + | ||
3645 | ((u64)inst_mem[ram_fc_pb_put_hi_w()] << 32ULL), | ||
3646 | (u64)inst_mem[ram_fc_pb_get_w()] + | ||
3647 | ((u64)inst_mem[ram_fc_pb_get_hi_w()] << 32ULL), | ||
3648 | (u64)inst_mem[ram_fc_pb_fetch_w()] + | ||
3649 | ((u64)inst_mem[ram_fc_pb_fetch_hi_w()] << 32ULL), | ||
3650 | inst_mem[ram_fc_pb_header_w()], | ||
3651 | inst_mem[ram_fc_pb_count_w()], | ||
3652 | syncpointa, | ||
3653 | syncpointb, | ||
3654 | inst_mem[ram_fc_semaphorea_w()], | ||
3655 | inst_mem[ram_fc_semaphoreb_w()], | ||
3656 | inst_mem[ram_fc_semaphorec_w()], | ||
3657 | inst_mem[ram_fc_semaphored_w()]); | ||
3658 | if (hw_sema) | ||
3659 | gk20a_debug_output(o, "SEMA STATE: value: 0x%08x " | ||
3660 | "next_val: 0x%08x addr: 0x%010llx\n", | ||
3661 | __nvgpu_semaphore_read(hw_sema), | ||
3662 | nvgpu_atomic_read(&hw_sema->next_value), | ||
3663 | nvgpu_hw_sema_addr(hw_sema)); | ||
3664 | |||
3665 | #ifdef CONFIG_TEGRA_GK20A_NVHOST | ||
3666 | if ((pbdma_syncpointb_op_v(syncpointb) == pbdma_syncpointb_op_wait_v()) | ||
3667 | && (pbdma_syncpointb_wait_switch_v(syncpointb) == | ||
3668 | pbdma_syncpointb_wait_switch_en_v())) | ||
3669 | gk20a_debug_output(o, "%s on syncpt %u (%s) val %u\n", | ||
3670 | (status == 3 || status == 8) ? "Waiting" : "Waited", | ||
3671 | pbdma_syncpointb_syncpt_index_v(syncpointb), | ||
3672 | nvgpu_nvhost_syncpt_get_name(g->nvhost_dev, | ||
3673 | pbdma_syncpointb_syncpt_index_v(syncpointb)), | ||
3674 | pbdma_syncpointa_payload_v(syncpointa)); | ||
3675 | #endif | ||
3676 | |||
3677 | gk20a_debug_output(o, "\n"); | ||
3678 | } | ||
3679 | |||
3680 | void gk20a_debug_dump_all_channel_status_ramfc(struct gk20a *g, | ||
3681 | struct gk20a_debug_output *o) | ||
3682 | { | ||
3683 | struct fifo_gk20a *f = &g->fifo; | ||
3684 | u32 chid; | ||
3685 | struct ch_state **ch_state; | ||
3686 | |||
3687 | ch_state = nvgpu_kzalloc(g, sizeof(*ch_state) * f->num_channels); | ||
3688 | if (!ch_state) { | ||
3689 | gk20a_debug_output(o, "cannot alloc memory for channels\n"); | ||
3690 | return; | ||
3691 | } | ||
3692 | |||
3693 | for (chid = 0; chid < f->num_channels; chid++) { | ||
3694 | struct channel_gk20a *ch = &f->channel[chid]; | ||
3695 | if (gk20a_channel_get(ch)) { | ||
3696 | ch_state[chid] = | ||
3697 | nvgpu_kmalloc(g, sizeof(struct ch_state) + | ||
3698 | ram_in_alloc_size_v()); | ||
3699 | /* ref taken stays to below loop with | ||
3700 | * successful allocs */ | ||
3701 | if (!ch_state[chid]) | ||
3702 | gk20a_channel_put(ch); | ||
3703 | } | ||
3704 | } | ||
3705 | |||
3706 | for (chid = 0; chid < f->num_channels; chid++) { | ||
3707 | struct channel_gk20a *ch = &f->channel[chid]; | ||
3708 | if (!ch_state[chid]) | ||
3709 | continue; | ||
3710 | |||
3711 | ch_state[chid]->pid = ch->pid; | ||
3712 | ch_state[chid]->refs = nvgpu_atomic_read(&ch->ref_count); | ||
3713 | ch_state[chid]->deterministic = ch->deterministic; | ||
3714 | nvgpu_mem_rd_n(g, &ch->inst_block, 0, | ||
3715 | &ch_state[chid]->inst_block[0], | ||
3716 | ram_in_alloc_size_v()); | ||
3717 | gk20a_channel_put(ch); | ||
3718 | } | ||
3719 | for (chid = 0; chid < f->num_channels; chid++) { | ||
3720 | if (ch_state[chid]) { | ||
3721 | g->ops.fifo.dump_channel_status_ramfc(g, o, chid, | ||
3722 | ch_state[chid]); | ||
3723 | nvgpu_kfree(g, ch_state[chid]); | ||
3724 | } | ||
3725 | } | ||
3726 | nvgpu_kfree(g, ch_state); | ||
3727 | } | ||
3728 | |||
3729 | void gk20a_dump_pbdma_status(struct gk20a *g, | ||
3730 | struct gk20a_debug_output *o) | ||
3731 | { | ||
3732 | u32 i, host_num_pbdma; | ||
3733 | |||
3734 | host_num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA); | ||
3735 | |||
3736 | for (i = 0; i < host_num_pbdma; i++) { | ||
3737 | u32 status = gk20a_readl(g, fifo_pbdma_status_r(i)); | ||
3738 | u32 chan_status = fifo_pbdma_status_chan_status_v(status); | ||
3739 | |||
3740 | gk20a_debug_output(o, "%s pbdma %d: ", g->name, i); | ||
3741 | gk20a_debug_output(o, | ||
3742 | "id: %d (%s), next_id: %d (%s) chan status: %s\n", | ||
3743 | fifo_pbdma_status_id_v(status), | ||
3744 | fifo_pbdma_status_id_type_v(status) ? | ||
3745 | "tsg" : "channel", | ||
3746 | fifo_pbdma_status_next_id_v(status), | ||
3747 | fifo_pbdma_status_next_id_type_v(status) ? | ||
3748 | "tsg" : "channel", | ||
3749 | gk20a_decode_pbdma_chan_eng_ctx_status(chan_status)); | ||
3750 | gk20a_debug_output(o, "PBDMA_PUT: %016llx PBDMA_GET: %016llx " | ||
3751 | "GP_PUT: %08x GP_GET: %08x " | ||
3752 | "FETCH: %08x HEADER: %08x\n" | ||
3753 | "HDR: %08x SHADOW0: %08x SHADOW1: %08x", | ||
3754 | (u64)gk20a_readl(g, pbdma_put_r(i)) + | ||
3755 | ((u64)gk20a_readl(g, pbdma_put_hi_r(i)) << 32ULL), | ||
3756 | (u64)gk20a_readl(g, pbdma_get_r(i)) + | ||
3757 | ((u64)gk20a_readl(g, pbdma_get_hi_r(i)) << 32ULL), | ||
3758 | gk20a_readl(g, pbdma_gp_put_r(i)), | ||
3759 | gk20a_readl(g, pbdma_gp_get_r(i)), | ||
3760 | gk20a_readl(g, pbdma_gp_fetch_r(i)), | ||
3761 | gk20a_readl(g, pbdma_pb_header_r(i)), | ||
3762 | gk20a_readl(g, pbdma_hdr_shadow_r(i)), | ||
3763 | gk20a_readl(g, pbdma_gp_shadow_0_r(i)), | ||
3764 | gk20a_readl(g, pbdma_gp_shadow_1_r(i))); | ||
3765 | } | ||
3766 | gk20a_debug_output(o, "\n"); | ||
3767 | } | ||
3768 | |||
3769 | void gk20a_dump_eng_status(struct gk20a *g, | ||
3770 | struct gk20a_debug_output *o) | ||
3771 | { | ||
3772 | u32 i, host_num_engines; | ||
3773 | |||
3774 | host_num_engines = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_ENGINES); | ||
3775 | |||
3776 | for (i = 0; i < host_num_engines; i++) { | ||
3777 | u32 status = gk20a_readl(g, fifo_engine_status_r(i)); | ||
3778 | u32 ctx_status = fifo_engine_status_ctx_status_v(status); | ||
3779 | |||
3780 | gk20a_debug_output(o, "%s eng %d: ", g->name, i); | ||
3781 | gk20a_debug_output(o, | ||
3782 | "id: %d (%s), next_id: %d (%s), ctx status: %s ", | ||
3783 | fifo_engine_status_id_v(status), | ||
3784 | fifo_engine_status_id_type_v(status) ? | ||
3785 | "tsg" : "channel", | ||
3786 | fifo_engine_status_next_id_v(status), | ||
3787 | fifo_engine_status_next_id_type_v(status) ? | ||
3788 | "tsg" : "channel", | ||
3789 | gk20a_decode_pbdma_chan_eng_ctx_status(ctx_status)); | ||
3790 | |||
3791 | if (fifo_engine_status_faulted_v(status)) | ||
3792 | gk20a_debug_output(o, "faulted "); | ||
3793 | if (fifo_engine_status_engine_v(status)) | ||
3794 | gk20a_debug_output(o, "busy "); | ||
3795 | gk20a_debug_output(o, "\n"); | ||
3796 | } | ||
3797 | gk20a_debug_output(o, "\n"); | ||
3798 | } | ||
3799 | |||
3800 | void gk20a_fifo_enable_channel(struct channel_gk20a *ch) | ||
3801 | { | ||
3802 | gk20a_writel(ch->g, ccsr_channel_r(ch->chid), | ||
3803 | gk20a_readl(ch->g, ccsr_channel_r(ch->chid)) | | ||
3804 | ccsr_channel_enable_set_true_f()); | ||
3805 | } | ||
3806 | |||
3807 | void gk20a_fifo_disable_channel(struct channel_gk20a *ch) | ||
3808 | { | ||
3809 | gk20a_writel(ch->g, ccsr_channel_r(ch->chid), | ||
3810 | gk20a_readl(ch->g, | ||
3811 | ccsr_channel_r(ch->chid)) | | ||
3812 | ccsr_channel_enable_clr_true_f()); | ||
3813 | } | ||
3814 | |||
3815 | void gk20a_fifo_channel_unbind(struct channel_gk20a *ch_gk20a) | ||
3816 | { | ||
3817 | struct gk20a *g = ch_gk20a->g; | ||
3818 | |||
3819 | gk20a_dbg_fn(""); | ||
3820 | |||
3821 | if (nvgpu_atomic_cmpxchg(&ch_gk20a->bound, true, false)) { | ||
3822 | gk20a_writel(g, ccsr_channel_inst_r(ch_gk20a->chid), | ||
3823 | ccsr_channel_inst_ptr_f(0) | | ||
3824 | ccsr_channel_inst_bind_false_f()); | ||
3825 | } | ||
3826 | } | ||
3827 | |||
3828 | static int gk20a_fifo_commit_userd(struct channel_gk20a *c) | ||
3829 | { | ||
3830 | u32 addr_lo; | ||
3831 | u32 addr_hi; | ||
3832 | struct gk20a *g = c->g; | ||
3833 | |||
3834 | gk20a_dbg_fn(""); | ||
3835 | |||
3836 | addr_lo = u64_lo32(c->userd_iova >> ram_userd_base_shift_v()); | ||
3837 | addr_hi = u64_hi32(c->userd_iova); | ||
3838 | |||
3839 | gk20a_dbg_info("channel %d : set ramfc userd 0x%16llx", | ||
3840 | c->chid, (u64)c->userd_iova); | ||
3841 | |||
3842 | nvgpu_mem_wr32(g, &c->inst_block, | ||
3843 | ram_in_ramfc_w() + ram_fc_userd_w(), | ||
3844 | nvgpu_aperture_mask(g, &g->fifo.userd, | ||
3845 | pbdma_userd_target_sys_mem_ncoh_f(), | ||
3846 | pbdma_userd_target_vid_mem_f()) | | ||
3847 | pbdma_userd_addr_f(addr_lo)); | ||
3848 | |||
3849 | nvgpu_mem_wr32(g, &c->inst_block, | ||
3850 | ram_in_ramfc_w() + ram_fc_userd_hi_w(), | ||
3851 | pbdma_userd_hi_addr_f(addr_hi)); | ||
3852 | |||
3853 | return 0; | ||
3854 | } | ||
3855 | |||
3856 | int gk20a_fifo_setup_ramfc(struct channel_gk20a *c, | ||
3857 | u64 gpfifo_base, u32 gpfifo_entries, | ||
3858 | unsigned long timeout, | ||
3859 | u32 flags) | ||
3860 | { | ||
3861 | struct gk20a *g = c->g; | ||
3862 | struct nvgpu_mem *mem = &c->inst_block; | ||
3863 | |||
3864 | gk20a_dbg_fn(""); | ||
3865 | |||
3866 | nvgpu_memset(g, mem, 0, 0, ram_fc_size_val_v()); | ||
3867 | |||
3868 | nvgpu_mem_wr32(g, mem, ram_fc_gp_base_w(), | ||
3869 | pbdma_gp_base_offset_f( | ||
3870 | u64_lo32(gpfifo_base >> pbdma_gp_base_rsvd_s()))); | ||
3871 | |||
3872 | nvgpu_mem_wr32(g, mem, ram_fc_gp_base_hi_w(), | ||
3873 | pbdma_gp_base_hi_offset_f(u64_hi32(gpfifo_base)) | | ||
3874 | pbdma_gp_base_hi_limit2_f(ilog2(gpfifo_entries))); | ||
3875 | |||
3876 | nvgpu_mem_wr32(g, mem, ram_fc_signature_w(), | ||
3877 | c->g->ops.fifo.get_pbdma_signature(c->g)); | ||
3878 | |||
3879 | nvgpu_mem_wr32(g, mem, ram_fc_formats_w(), | ||
3880 | pbdma_formats_gp_fermi0_f() | | ||
3881 | pbdma_formats_pb_fermi1_f() | | ||
3882 | pbdma_formats_mp_fermi0_f()); | ||
3883 | |||
3884 | nvgpu_mem_wr32(g, mem, ram_fc_pb_header_w(), | ||
3885 | pbdma_pb_header_priv_user_f() | | ||
3886 | pbdma_pb_header_method_zero_f() | | ||
3887 | pbdma_pb_header_subchannel_zero_f() | | ||
3888 | pbdma_pb_header_level_main_f() | | ||
3889 | pbdma_pb_header_first_true_f() | | ||
3890 | pbdma_pb_header_type_inc_f()); | ||
3891 | |||
3892 | nvgpu_mem_wr32(g, mem, ram_fc_subdevice_w(), | ||
3893 | pbdma_subdevice_id_f(1) | | ||
3894 | pbdma_subdevice_status_active_f() | | ||
3895 | pbdma_subdevice_channel_dma_enable_f()); | ||
3896 | |||
3897 | nvgpu_mem_wr32(g, mem, ram_fc_target_w(), pbdma_target_engine_sw_f()); | ||
3898 | |||
3899 | nvgpu_mem_wr32(g, mem, ram_fc_acquire_w(), | ||
3900 | g->ops.fifo.pbdma_acquire_val(timeout)); | ||
3901 | |||
3902 | nvgpu_mem_wr32(g, mem, ram_fc_runlist_timeslice_w(), | ||
3903 | fifo_runlist_timeslice_timeout_128_f() | | ||
3904 | fifo_runlist_timeslice_timescale_3_f() | | ||
3905 | fifo_runlist_timeslice_enable_true_f()); | ||
3906 | |||
3907 | nvgpu_mem_wr32(g, mem, ram_fc_pb_timeslice_w(), | ||
3908 | fifo_pb_timeslice_timeout_16_f() | | ||
3909 | fifo_pb_timeslice_timescale_0_f() | | ||
3910 | fifo_pb_timeslice_enable_true_f()); | ||
3911 | |||
3912 | nvgpu_mem_wr32(g, mem, ram_fc_chid_w(), ram_fc_chid_id_f(c->chid)); | ||
3913 | |||
3914 | if (c->is_privileged_channel) | ||
3915 | gk20a_fifo_setup_ramfc_for_privileged_channel(c); | ||
3916 | |||
3917 | return gk20a_fifo_commit_userd(c); | ||
3918 | } | ||
3919 | |||
3920 | static int channel_gk20a_set_schedule_params(struct channel_gk20a *c) | ||
3921 | { | ||
3922 | int shift = 0, value = 0; | ||
3923 | |||
3924 | gk20a_channel_get_timescale_from_timeslice(c->g, | ||
3925 | c->timeslice_us, &value, &shift); | ||
3926 | |||
3927 | /* disable channel */ | ||
3928 | c->g->ops.fifo.disable_channel(c); | ||
3929 | |||
3930 | /* preempt the channel */ | ||
3931 | WARN_ON(c->g->ops.fifo.preempt_channel(c->g, c->chid)); | ||
3932 | |||
3933 | /* set new timeslice */ | ||
3934 | nvgpu_mem_wr32(c->g, &c->inst_block, ram_fc_runlist_timeslice_w(), | ||
3935 | value | (shift << 12) | | ||
3936 | fifo_runlist_timeslice_enable_true_f()); | ||
3937 | |||
3938 | /* enable channel */ | ||
3939 | c->g->ops.fifo.enable_channel(c); | ||
3940 | |||
3941 | return 0; | ||
3942 | } | ||
3943 | |||
3944 | int gk20a_fifo_set_timeslice(struct channel_gk20a *ch, u32 timeslice) | ||
3945 | { | ||
3946 | struct gk20a *g = ch->g; | ||
3947 | |||
3948 | if (gk20a_is_channel_marked_as_tsg(ch)) { | ||
3949 | nvgpu_err(g, "invalid operation for TSG!"); | ||
3950 | return -EINVAL; | ||
3951 | } | ||
3952 | |||
3953 | if (timeslice < g->min_timeslice_us || | ||
3954 | timeslice > g->max_timeslice_us) | ||
3955 | return -EINVAL; | ||
3956 | |||
3957 | ch->timeslice_us = timeslice; | ||
3958 | |||
3959 | gk20a_dbg(gpu_dbg_sched, "chid=%u timeslice=%u us", | ||
3960 | ch->chid, timeslice); | ||
3961 | |||
3962 | return channel_gk20a_set_schedule_params(ch); | ||
3963 | } | ||
3964 | |||
3965 | void gk20a_fifo_setup_ramfc_for_privileged_channel(struct channel_gk20a *c) | ||
3966 | { | ||
3967 | struct gk20a *g = c->g; | ||
3968 | struct nvgpu_mem *mem = &c->inst_block; | ||
3969 | |||
3970 | gk20a_dbg_info("channel %d : set ramfc privileged_channel", c->chid); | ||
3971 | |||
3972 | /* Enable HCE priv mode for phys mode transfer */ | ||
3973 | nvgpu_mem_wr32(g, mem, ram_fc_hce_ctrl_w(), | ||
3974 | pbdma_hce_ctrl_hce_priv_mode_yes_f()); | ||
3975 | } | ||
3976 | |||
3977 | int gk20a_fifo_setup_userd(struct channel_gk20a *c) | ||
3978 | { | ||
3979 | struct gk20a *g = c->g; | ||
3980 | struct nvgpu_mem *mem = &g->fifo.userd; | ||
3981 | u32 offset = c->chid * g->fifo.userd_entry_size / sizeof(u32); | ||
3982 | |||
3983 | gk20a_dbg_fn(""); | ||
3984 | |||
3985 | nvgpu_mem_wr32(g, mem, offset + ram_userd_put_w(), 0); | ||
3986 | nvgpu_mem_wr32(g, mem, offset + ram_userd_get_w(), 0); | ||
3987 | nvgpu_mem_wr32(g, mem, offset + ram_userd_ref_w(), 0); | ||
3988 | nvgpu_mem_wr32(g, mem, offset + ram_userd_put_hi_w(), 0); | ||
3989 | nvgpu_mem_wr32(g, mem, offset + ram_userd_ref_threshold_w(), 0); | ||
3990 | nvgpu_mem_wr32(g, mem, offset + ram_userd_gp_top_level_get_w(), 0); | ||
3991 | nvgpu_mem_wr32(g, mem, offset + ram_userd_gp_top_level_get_hi_w(), 0); | ||
3992 | nvgpu_mem_wr32(g, mem, offset + ram_userd_get_hi_w(), 0); | ||
3993 | nvgpu_mem_wr32(g, mem, offset + ram_userd_gp_get_w(), 0); | ||
3994 | nvgpu_mem_wr32(g, mem, offset + ram_userd_gp_put_w(), 0); | ||
3995 | |||
3996 | return 0; | ||
3997 | } | ||
3998 | |||
3999 | int gk20a_fifo_alloc_inst(struct gk20a *g, struct channel_gk20a *ch) | ||
4000 | { | ||
4001 | int err; | ||
4002 | |||
4003 | gk20a_dbg_fn(""); | ||
4004 | |||
4005 | err = g->ops.mm.alloc_inst_block(g, &ch->inst_block); | ||
4006 | if (err) | ||
4007 | return err; | ||
4008 | |||
4009 | gk20a_dbg_info("channel %d inst block physical addr: 0x%16llx", | ||
4010 | ch->chid, nvgpu_inst_block_addr(g, &ch->inst_block)); | ||
4011 | |||
4012 | gk20a_dbg_fn("done"); | ||
4013 | return 0; | ||
4014 | } | ||
4015 | |||
4016 | void gk20a_fifo_free_inst(struct gk20a *g, struct channel_gk20a *ch) | ||
4017 | { | ||
4018 | nvgpu_free_inst_block(g, &ch->inst_block); | ||
4019 | } | ||
4020 | |||
4021 | u32 gk20a_fifo_userd_gp_get(struct gk20a *g, struct channel_gk20a *c) | ||
4022 | { | ||
4023 | return gk20a_bar1_readl(g, | ||
4024 | c->userd_gpu_va + sizeof(u32) * ram_userd_gp_get_w()); | ||
4025 | } | ||
4026 | |||
4027 | u64 gk20a_fifo_userd_pb_get(struct gk20a *g, struct channel_gk20a *c) | ||
4028 | { | ||
4029 | u32 lo = gk20a_bar1_readl(g, | ||
4030 | c->userd_gpu_va + sizeof(u32) * ram_userd_get_w()); | ||
4031 | u32 hi = gk20a_bar1_readl(g, | ||
4032 | c->userd_gpu_va + sizeof(u32) * ram_userd_get_hi_w()); | ||
4033 | |||
4034 | return ((u64)hi << 32) | lo; | ||
4035 | } | ||
4036 | |||
4037 | void gk20a_fifo_userd_gp_put(struct gk20a *g, struct channel_gk20a *c) | ||
4038 | { | ||
4039 | gk20a_bar1_writel(g, | ||
4040 | c->userd_gpu_va + sizeof(u32) * ram_userd_gp_put_w(), | ||
4041 | c->gpfifo.put); | ||
4042 | } | ||
4043 | |||
4044 | u32 gk20a_fifo_pbdma_acquire_val(u64 timeout) | ||
4045 | { | ||
4046 | u32 val, exp, man; | ||
4047 | unsigned int val_len; | ||
4048 | |||
4049 | val = pbdma_acquire_retry_man_2_f() | | ||
4050 | pbdma_acquire_retry_exp_2_f(); | ||
4051 | |||
4052 | if (!timeout) | ||
4053 | return val; | ||
4054 | |||
4055 | timeout *= 80UL; | ||
4056 | do_div(timeout, 100); /* set acquire timeout to 80% of channel wdt */ | ||
4057 | timeout *= 1000000UL; /* ms -> ns */ | ||
4058 | do_div(timeout, 1024); /* in unit of 1024ns */ | ||
4059 | val_len = fls(timeout >> 32) + 32; | ||
4060 | if (val_len == 32) | ||
4061 | val_len = fls(timeout); | ||
4062 | if (val_len > 16U + pbdma_acquire_timeout_exp_max_v()) { /* man: 16bits */ | ||
4063 | exp = pbdma_acquire_timeout_exp_max_v(); | ||
4064 | man = pbdma_acquire_timeout_man_max_v(); | ||
4065 | } else if (val_len > 16) { | ||
4066 | exp = val_len - 16; | ||
4067 | man = timeout >> exp; | ||
4068 | } else { | ||
4069 | exp = 0; | ||
4070 | man = timeout; | ||
4071 | } | ||
4072 | |||
4073 | val |= pbdma_acquire_timeout_exp_f(exp) | | ||
4074 | pbdma_acquire_timeout_man_f(man) | | ||
4075 | pbdma_acquire_timeout_en_enable_f(); | ||
4076 | |||
4077 | return val; | ||
4078 | } | ||
4079 | |||
4080 | const char *gk20a_fifo_interleave_level_name(u32 interleave_level) | ||
4081 | { | ||
4082 | switch (interleave_level) { | ||
4083 | case NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_LOW: | ||
4084 | return "LOW"; | ||
4085 | |||
4086 | case NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_MEDIUM: | ||
4087 | return "MEDIUM"; | ||
4088 | |||
4089 | case NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_HIGH: | ||
4090 | return "HIGH"; | ||
4091 | |||
4092 | default: | ||
4093 | return "?"; | ||
4094 | } | ||
4095 | } | ||
4096 | |||
4097 | #ifdef CONFIG_TEGRA_GK20A_NVHOST | ||
4098 | void gk20a_fifo_add_syncpt_wait_cmd(struct gk20a *g, | ||
4099 | struct priv_cmd_entry *cmd, u32 off, | ||
4100 | u32 id, u32 thresh, u64 gpu_va) | ||
4101 | { | ||
4102 | gk20a_dbg_fn(""); | ||
4103 | |||
4104 | off = cmd->off + off; | ||
4105 | /* syncpoint_a */ | ||
4106 | nvgpu_mem_wr32(g, cmd->mem, off++, 0x2001001C); | ||
4107 | /* payload */ | ||
4108 | nvgpu_mem_wr32(g, cmd->mem, off++, thresh); | ||
4109 | /* syncpoint_b */ | ||
4110 | nvgpu_mem_wr32(g, cmd->mem, off++, 0x2001001D); | ||
4111 | /* syncpt_id, switch_en, wait */ | ||
4112 | nvgpu_mem_wr32(g, cmd->mem, off++, (id << 8) | 0x10); | ||
4113 | } | ||
4114 | |||
4115 | u32 gk20a_fifo_get_syncpt_wait_cmd_size(void) | ||
4116 | { | ||
4117 | return 4; | ||
4118 | } | ||
4119 | |||
4120 | void gk20a_fifo_add_syncpt_incr_cmd(struct gk20a *g, | ||
4121 | bool wfi_cmd, struct priv_cmd_entry *cmd, | ||
4122 | u32 id, u64 gpu_va) | ||
4123 | { | ||
4124 | u32 off = cmd->off; | ||
4125 | |||
4126 | gk20a_dbg_fn(""); | ||
4127 | if (wfi_cmd) { | ||
4128 | /* wfi */ | ||
4129 | nvgpu_mem_wr32(g, cmd->mem, off++, 0x2001001E); | ||
4130 | /* handle, ignored */ | ||
4131 | nvgpu_mem_wr32(g, cmd->mem, off++, 0x00000000); | ||
4132 | } | ||
4133 | /* syncpoint_a */ | ||
4134 | nvgpu_mem_wr32(g, cmd->mem, off++, 0x2001001C); | ||
4135 | /* payload, ignored */ | ||
4136 | nvgpu_mem_wr32(g, cmd->mem, off++, 0); | ||
4137 | /* syncpoint_b */ | ||
4138 | nvgpu_mem_wr32(g, cmd->mem, off++, 0x2001001D); | ||
4139 | /* syncpt_id, incr */ | ||
4140 | nvgpu_mem_wr32(g, cmd->mem, off++, (id << 8) | 0x1); | ||
4141 | /* syncpoint_b */ | ||
4142 | nvgpu_mem_wr32(g, cmd->mem, off++, 0x2001001D); | ||
4143 | /* syncpt_id, incr */ | ||
4144 | nvgpu_mem_wr32(g, cmd->mem, off++, (id << 8) | 0x1); | ||
4145 | |||
4146 | } | ||
4147 | |||
4148 | u32 gk20a_fifo_get_syncpt_incr_cmd_size(bool wfi_cmd) | ||
4149 | { | ||
4150 | if (wfi_cmd) | ||
4151 | return 8; | ||
4152 | else | ||
4153 | return 6; | ||
4154 | } | ||
4155 | |||
4156 | void gk20a_fifo_free_syncpt_buf(struct channel_gk20a *c, | ||
4157 | struct nvgpu_mem *syncpt_buf) | ||
4158 | { | ||
4159 | |||
4160 | } | ||
4161 | |||
4162 | int gk20a_fifo_alloc_syncpt_buf(struct channel_gk20a *c, | ||
4163 | u32 syncpt_id, struct nvgpu_mem *syncpt_buf) | ||
4164 | { | ||
4165 | return 0; | ||
4166 | } | ||
4167 | #endif | ||