diff options
Diffstat (limited to 'drivers/gpu/nvgpu/gv11b/fifo_gv11b.c')
-rw-r--r-- | drivers/gpu/nvgpu/gv11b/fifo_gv11b.c | 1907 |
1 files changed, 1907 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c new file mode 100644 index 00000000..f87c6dea --- /dev/null +++ b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c | |||
@@ -0,0 +1,1907 @@ | |||
1 | /* | ||
2 | * GV11B fifo | ||
3 | * | ||
4 | * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the "Software"), | ||
8 | * to deal in the Software without restriction, including without limitation | ||
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
11 | * Software is furnished to do so, subject to the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice shall be included in | ||
14 | * all copies or substantial portions of the Software. | ||
15 | * | ||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
21 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
22 | * DEALINGS IN THE SOFTWARE. | ||
23 | */ | ||
24 | #include <linux/delay.h> | ||
25 | #include <linux/types.h> | ||
26 | |||
27 | #include <nvgpu/semaphore.h> | ||
28 | #include <nvgpu/timers.h> | ||
29 | #include <nvgpu/log.h> | ||
30 | #include <nvgpu/dma.h> | ||
31 | #include <nvgpu/nvgpu_mem.h> | ||
32 | #include <nvgpu/gmmu.h> | ||
33 | #include <nvgpu/soc.h> | ||
34 | #include <nvgpu/debug.h> | ||
35 | #include <nvgpu/nvhost_t19x.h> | ||
36 | #include <nvgpu/barrier.h> | ||
37 | #include <nvgpu/mm.h> | ||
38 | #include <nvgpu/ctxsw_trace.h> | ||
39 | |||
40 | #include "gk20a/gk20a.h" | ||
41 | #include "gk20a/fifo_gk20a.h" | ||
42 | #include "gk20a/channel_gk20a.h" | ||
43 | |||
44 | #include "gp10b/fifo_gp10b.h" | ||
45 | |||
46 | #include <nvgpu/hw/gv11b/hw_pbdma_gv11b.h> | ||
47 | #include <nvgpu/hw/gv11b/hw_fifo_gv11b.h> | ||
48 | #include <nvgpu/hw/gv11b/hw_ram_gv11b.h> | ||
49 | #include <nvgpu/hw/gv11b/hw_ccsr_gv11b.h> | ||
50 | #include <nvgpu/hw/gv11b/hw_usermode_gv11b.h> | ||
51 | #include <nvgpu/hw/gv11b/hw_top_gv11b.h> | ||
52 | #include <nvgpu/hw/gv11b/hw_gmmu_gv11b.h> | ||
53 | #include <nvgpu/hw/gv11b/hw_mc_gv11b.h> | ||
54 | #include <nvgpu/hw/gv11b/hw_gr_gv11b.h> | ||
55 | |||
56 | #include "fifo_gv11b.h" | ||
57 | #include "subctx_gv11b.h" | ||
58 | #include "gr_gv11b.h" | ||
59 | #include "mc_gv11b.h" | ||
60 | |||
61 | #define PBDMA_SUBDEVICE_ID 1 | ||
62 | |||
63 | static void gv11b_fifo_init_ramfc_eng_method_buffer(struct gk20a *g, | ||
64 | struct channel_gk20a *ch, struct nvgpu_mem *mem); | ||
65 | |||
66 | void gv11b_get_tsg_runlist_entry(struct tsg_gk20a *tsg, u32 *runlist) | ||
67 | { | ||
68 | |||
69 | u32 runlist_entry_0 = ram_rl_entry_type_tsg_v(); | ||
70 | |||
71 | if (tsg->timeslice_timeout) | ||
72 | runlist_entry_0 |= | ||
73 | ram_rl_entry_tsg_timeslice_scale_f(tsg->timeslice_scale) | | ||
74 | ram_rl_entry_tsg_timeslice_timeout_f(tsg->timeslice_timeout); | ||
75 | else | ||
76 | runlist_entry_0 |= | ||
77 | ram_rl_entry_tsg_timeslice_scale_f( | ||
78 | ram_rl_entry_tsg_timeslice_scale_3_v()) | | ||
79 | ram_rl_entry_tsg_timeslice_timeout_f( | ||
80 | ram_rl_entry_tsg_timeslice_timeout_128_v()); | ||
81 | |||
82 | runlist[0] = runlist_entry_0; | ||
83 | runlist[1] = ram_rl_entry_tsg_length_f(tsg->num_active_channels); | ||
84 | runlist[2] = ram_rl_entry_tsg_tsgid_f(tsg->tsgid); | ||
85 | runlist[3] = 0; | ||
86 | |||
87 | gk20a_dbg_info("gv11b tsg runlist [0] %x [1] %x [2] %x [3] %x\n", | ||
88 | runlist[0], runlist[1], runlist[2], runlist[3]); | ||
89 | |||
90 | } | ||
91 | |||
92 | void gv11b_get_ch_runlist_entry(struct channel_gk20a *c, u32 *runlist) | ||
93 | { | ||
94 | struct gk20a *g = c->g; | ||
95 | u32 addr_lo, addr_hi; | ||
96 | u32 runlist_entry; | ||
97 | |||
98 | /* Time being use 0 pbdma sequencer */ | ||
99 | runlist_entry = ram_rl_entry_type_channel_v() | | ||
100 | ram_rl_entry_chan_runqueue_selector_f( | ||
101 | c->t19x.runqueue_sel) | | ||
102 | ram_rl_entry_chan_userd_target_f( | ||
103 | nvgpu_aperture_mask(g, &g->fifo.userd, | ||
104 | ram_rl_entry_chan_userd_target_sys_mem_ncoh_v(), | ||
105 | ram_rl_entry_chan_userd_target_vid_mem_v())) | | ||
106 | ram_rl_entry_chan_inst_target_f( | ||
107 | nvgpu_aperture_mask(g, &c->inst_block, | ||
108 | ram_rl_entry_chan_inst_target_sys_mem_ncoh_v(), | ||
109 | ram_rl_entry_chan_inst_target_vid_mem_v())); | ||
110 | |||
111 | addr_lo = u64_lo32(c->userd_iova) >> | ||
112 | ram_rl_entry_chan_userd_ptr_align_shift_v(); | ||
113 | addr_hi = u64_hi32(c->userd_iova); | ||
114 | runlist[0] = runlist_entry | ram_rl_entry_chan_userd_ptr_lo_f(addr_lo); | ||
115 | runlist[1] = ram_rl_entry_chan_userd_ptr_hi_f(addr_hi); | ||
116 | |||
117 | addr_lo = u64_lo32(nvgpu_inst_block_addr(g, &c->inst_block)) >> | ||
118 | ram_rl_entry_chan_inst_ptr_align_shift_v(); | ||
119 | addr_hi = u64_hi32(nvgpu_inst_block_addr(g, &c->inst_block)); | ||
120 | |||
121 | runlist[2] = ram_rl_entry_chan_inst_ptr_lo_f(addr_lo) | | ||
122 | ram_rl_entry_chid_f(c->chid); | ||
123 | runlist[3] = ram_rl_entry_chan_inst_ptr_hi_f(addr_hi); | ||
124 | |||
125 | gk20a_dbg_info("gv11b channel runlist [0] %x [1] %x [2] %x [3] %x\n", | ||
126 | runlist[0], runlist[1], runlist[2], runlist[3]); | ||
127 | } | ||
128 | |||
129 | static void gv11b_userd_writeback_config(struct gk20a *g) | ||
130 | { | ||
131 | gk20a_writel(g, fifo_userd_writeback_r(), fifo_userd_writeback_timer_f( | ||
132 | fifo_userd_writeback_timer_100us_v())); | ||
133 | |||
134 | |||
135 | } | ||
136 | |||
137 | int channel_gv11b_setup_ramfc(struct channel_gk20a *c, | ||
138 | u64 gpfifo_base, u32 gpfifo_entries, | ||
139 | unsigned long acquire_timeout, u32 flags) | ||
140 | { | ||
141 | struct gk20a *g = c->g; | ||
142 | struct nvgpu_mem *mem = &c->inst_block; | ||
143 | u32 data; | ||
144 | |||
145 | gk20a_dbg_fn(""); | ||
146 | |||
147 | nvgpu_memset(g, mem, 0, 0, ram_fc_size_val_v()); | ||
148 | |||
149 | nvgpu_mem_wr32(g, mem, ram_fc_gp_base_w(), | ||
150 | pbdma_gp_base_offset_f( | ||
151 | u64_lo32(gpfifo_base >> pbdma_gp_base_rsvd_s()))); | ||
152 | |||
153 | nvgpu_mem_wr32(g, mem, ram_fc_gp_base_hi_w(), | ||
154 | pbdma_gp_base_hi_offset_f(u64_hi32(gpfifo_base)) | | ||
155 | pbdma_gp_base_hi_limit2_f(ilog2(gpfifo_entries))); | ||
156 | |||
157 | nvgpu_mem_wr32(g, mem, ram_fc_signature_w(), | ||
158 | c->g->ops.fifo.get_pbdma_signature(c->g)); | ||
159 | |||
160 | nvgpu_mem_wr32(g, mem, ram_fc_pb_header_w(), | ||
161 | pbdma_pb_header_priv_user_f() | | ||
162 | pbdma_pb_header_method_zero_f() | | ||
163 | pbdma_pb_header_subchannel_zero_f() | | ||
164 | pbdma_pb_header_level_main_f() | | ||
165 | pbdma_pb_header_first_true_f() | | ||
166 | pbdma_pb_header_type_inc_f()); | ||
167 | |||
168 | nvgpu_mem_wr32(g, mem, ram_fc_subdevice_w(), | ||
169 | pbdma_subdevice_id_f(PBDMA_SUBDEVICE_ID) | | ||
170 | pbdma_subdevice_status_active_f() | | ||
171 | pbdma_subdevice_channel_dma_enable_f()); | ||
172 | |||
173 | nvgpu_mem_wr32(g, mem, ram_fc_target_w(), | ||
174 | pbdma_target_eng_ctx_valid_true_f() | | ||
175 | pbdma_target_ce_ctx_valid_true_f() | | ||
176 | pbdma_target_engine_sw_f()); | ||
177 | |||
178 | nvgpu_mem_wr32(g, mem, ram_fc_acquire_w(), | ||
179 | g->ops.fifo.pbdma_acquire_val(acquire_timeout)); | ||
180 | |||
181 | nvgpu_mem_wr32(g, mem, ram_fc_runlist_timeslice_w(), | ||
182 | pbdma_runlist_timeslice_timeout_128_f() | | ||
183 | pbdma_runlist_timeslice_timescale_3_f() | | ||
184 | pbdma_runlist_timeslice_enable_true_f()); | ||
185 | |||
186 | |||
187 | nvgpu_mem_wr32(g, mem, ram_fc_chid_w(), ram_fc_chid_id_f(c->chid)); | ||
188 | |||
189 | if (c->t19x.subctx_id == CHANNEL_INFO_VEID0) | ||
190 | nvgpu_mem_wr32(g, mem, ram_fc_set_channel_info_w(), | ||
191 | pbdma_set_channel_info_scg_type_graphics_compute0_f() | | ||
192 | pbdma_set_channel_info_veid_f(c->t19x.subctx_id)); | ||
193 | else | ||
194 | nvgpu_mem_wr32(g, mem, ram_fc_set_channel_info_w(), | ||
195 | pbdma_set_channel_info_scg_type_compute1_f() | | ||
196 | pbdma_set_channel_info_veid_f(c->t19x.subctx_id)); | ||
197 | |||
198 | gv11b_fifo_init_ramfc_eng_method_buffer(g, c, mem); | ||
199 | |||
200 | if (c->is_privileged_channel) { | ||
201 | /* Set privilege level for channel */ | ||
202 | nvgpu_mem_wr32(g, mem, ram_fc_config_w(), | ||
203 | pbdma_config_auth_level_privileged_f()); | ||
204 | |||
205 | gk20a_fifo_setup_ramfc_for_privileged_channel(c); | ||
206 | } | ||
207 | |||
208 | /* Enable userd writeback */ | ||
209 | data = nvgpu_mem_rd32(g, mem, ram_fc_config_w()); | ||
210 | data = data | pbdma_config_userd_writeback_enable_f(); | ||
211 | nvgpu_mem_wr32(g, mem, ram_fc_config_w(),data); | ||
212 | |||
213 | gv11b_userd_writeback_config(g); | ||
214 | |||
215 | return channel_gp10b_commit_userd(c); | ||
216 | } | ||
217 | |||
218 | |||
219 | static void gv11b_ring_channel_doorbell(struct channel_gk20a *c) | ||
220 | { | ||
221 | struct fifo_gk20a *f = &c->g->fifo; | ||
222 | u32 hw_chid = f->channel_base + c->chid; | ||
223 | |||
224 | gk20a_dbg_info("channel ring door bell %d\n", c->chid); | ||
225 | |||
226 | gv11b_usermode_writel(c->g, usermode_notify_channel_pending_r(), | ||
227 | usermode_notify_channel_pending_id_f(hw_chid)); | ||
228 | } | ||
229 | |||
230 | u32 gv11b_userd_gp_get(struct gk20a *g, struct channel_gk20a *c) | ||
231 | { | ||
232 | struct nvgpu_mem *userd_mem = &g->fifo.userd; | ||
233 | u32 offset = c->chid * (g->fifo.userd_entry_size / sizeof(u32)); | ||
234 | |||
235 | return nvgpu_mem_rd32(g, userd_mem, | ||
236 | offset + ram_userd_gp_get_w()); | ||
237 | } | ||
238 | |||
239 | u64 gv11b_userd_pb_get(struct gk20a *g, struct channel_gk20a *c) | ||
240 | { | ||
241 | struct nvgpu_mem *userd_mem = &g->fifo.userd; | ||
242 | u32 offset = c->chid * (g->fifo.userd_entry_size / sizeof(u32)); | ||
243 | u32 lo = nvgpu_mem_rd32(g, userd_mem, offset + ram_userd_get_w()); | ||
244 | u32 hi = nvgpu_mem_rd32(g, userd_mem, offset + ram_userd_get_hi_w()); | ||
245 | |||
246 | return ((u64)hi << 32) | lo; | ||
247 | } | ||
248 | |||
249 | void gv11b_userd_gp_put(struct gk20a *g, struct channel_gk20a *c) | ||
250 | { | ||
251 | struct nvgpu_mem *userd_mem = &g->fifo.userd; | ||
252 | u32 offset = c->chid * (g->fifo.userd_entry_size / sizeof(u32)); | ||
253 | |||
254 | nvgpu_mem_wr32(g, userd_mem, offset + ram_userd_gp_put_w(), | ||
255 | c->gpfifo.put); | ||
256 | /* commit everything to cpu */ | ||
257 | nvgpu_smp_mb(); | ||
258 | |||
259 | gv11b_ring_channel_doorbell(c); | ||
260 | } | ||
261 | |||
262 | void channel_gv11b_unbind(struct channel_gk20a *ch) | ||
263 | { | ||
264 | struct gk20a *g = ch->g; | ||
265 | |||
266 | gk20a_dbg_fn(""); | ||
267 | |||
268 | if (nvgpu_atomic_cmpxchg(&ch->bound, true, false)) { | ||
269 | gk20a_writel(g, ccsr_channel_inst_r(ch->chid), | ||
270 | ccsr_channel_inst_ptr_f(0) | | ||
271 | ccsr_channel_inst_bind_false_f()); | ||
272 | |||
273 | gk20a_writel(g, ccsr_channel_r(ch->chid), | ||
274 | ccsr_channel_enable_clr_true_f() | | ||
275 | ccsr_channel_pbdma_faulted_reset_f() | | ||
276 | ccsr_channel_eng_faulted_reset_f()); | ||
277 | } | ||
278 | } | ||
279 | |||
280 | u32 gv11b_fifo_get_num_fifos(struct gk20a *g) | ||
281 | { | ||
282 | return ccsr_channel__size_1_v(); | ||
283 | } | ||
284 | |||
285 | bool gv11b_is_fault_engine_subid_gpc(struct gk20a *g, u32 engine_subid) | ||
286 | { | ||
287 | return (engine_subid == gmmu_fault_client_type_gpc_v()); | ||
288 | } | ||
289 | |||
290 | void gv11b_dump_channel_status_ramfc(struct gk20a *g, | ||
291 | struct gk20a_debug_output *o, | ||
292 | u32 chid, | ||
293 | struct ch_state *ch_state) | ||
294 | { | ||
295 | u32 channel = gk20a_readl(g, ccsr_channel_r(chid)); | ||
296 | u32 status = ccsr_channel_status_v(channel); | ||
297 | u32 *inst_mem; | ||
298 | struct channel_gk20a *c = g->fifo.channel + chid; | ||
299 | struct nvgpu_semaphore_int *hw_sema = NULL; | ||
300 | |||
301 | if (c->hw_sema) | ||
302 | hw_sema = c->hw_sema; | ||
303 | |||
304 | if (!ch_state) | ||
305 | return; | ||
306 | |||
307 | inst_mem = &ch_state->inst_block[0]; | ||
308 | |||
309 | gk20a_debug_output(o, "%d-%s, pid %d, refs: %d: ", chid, | ||
310 | g->name, | ||
311 | ch_state->pid, | ||
312 | ch_state->refs); | ||
313 | gk20a_debug_output(o, "channel status: %s in use %s %s\n", | ||
314 | ccsr_channel_enable_v(channel) ? "" : "not", | ||
315 | gk20a_decode_ccsr_chan_status(status), | ||
316 | ccsr_channel_busy_v(channel) ? "busy" : "not busy"); | ||
317 | gk20a_debug_output(o, "RAMFC : TOP: %016llx PUT: %016llx GET: %016llx " | ||
318 | "FETCH: %016llx\nHEADER: %08x COUNT: %08x\n" | ||
319 | "SEMAPHORE: addr hi: %08x addr lo: %08x\n" | ||
320 | "payload %08x execute %08x\n", | ||
321 | (u64)inst_mem[ram_fc_pb_top_level_get_w()] + | ||
322 | ((u64)inst_mem[ram_fc_pb_top_level_get_hi_w()] << 32ULL), | ||
323 | (u64)inst_mem[ram_fc_pb_put_w()] + | ||
324 | ((u64)inst_mem[ram_fc_pb_put_hi_w()] << 32ULL), | ||
325 | (u64)inst_mem[ram_fc_pb_get_w()] + | ||
326 | ((u64)inst_mem[ram_fc_pb_get_hi_w()] << 32ULL), | ||
327 | (u64)inst_mem[ram_fc_pb_fetch_w()] + | ||
328 | ((u64)inst_mem[ram_fc_pb_fetch_hi_w()] << 32ULL), | ||
329 | inst_mem[ram_fc_pb_header_w()], | ||
330 | inst_mem[ram_fc_pb_count_w()], | ||
331 | inst_mem[ram_fc_sem_addr_hi_w()], | ||
332 | inst_mem[ram_fc_sem_addr_lo_w()], | ||
333 | inst_mem[ram_fc_sem_payload_lo_w()], | ||
334 | inst_mem[ram_fc_sem_execute_w()]); | ||
335 | if (hw_sema) | ||
336 | gk20a_debug_output(o, "SEMA STATE: value: 0x%08x next_val: 0x%08x addr: 0x%010llx\n", | ||
337 | __nvgpu_semaphore_read(hw_sema), | ||
338 | nvgpu_atomic_read(&hw_sema->next_value), | ||
339 | nvgpu_hw_sema_addr(hw_sema)); | ||
340 | gk20a_debug_output(o, "\n"); | ||
341 | } | ||
342 | |||
343 | void gv11b_dump_eng_status(struct gk20a *g, | ||
344 | struct gk20a_debug_output *o) | ||
345 | { | ||
346 | u32 i, host_num_engines; | ||
347 | |||
348 | host_num_engines = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_ENGINES); | ||
349 | |||
350 | for (i = 0; i < host_num_engines; i++) { | ||
351 | u32 status = gk20a_readl(g, fifo_engine_status_r(i)); | ||
352 | u32 ctx_status = fifo_engine_status_ctx_status_v(status); | ||
353 | |||
354 | gk20a_debug_output(o, "%s eng %d: ", g->name, i); | ||
355 | gk20a_debug_output(o, | ||
356 | "id: %d (%s), next_id: %d (%s), ctx status: %s ", | ||
357 | fifo_engine_status_id_v(status), | ||
358 | fifo_engine_status_id_type_v(status) ? | ||
359 | "tsg" : "channel", | ||
360 | fifo_engine_status_next_id_v(status), | ||
361 | fifo_engine_status_next_id_type_v(status) ? | ||
362 | "tsg" : "channel", | ||
363 | gk20a_decode_pbdma_chan_eng_ctx_status(ctx_status)); | ||
364 | |||
365 | if (fifo_engine_status_eng_reload_v(status)) | ||
366 | gk20a_debug_output(o, "ctx_reload "); | ||
367 | if (fifo_engine_status_faulted_v(status)) | ||
368 | gk20a_debug_output(o, "faulted "); | ||
369 | if (fifo_engine_status_engine_v(status)) | ||
370 | gk20a_debug_output(o, "busy "); | ||
371 | gk20a_debug_output(o, "\n"); | ||
372 | } | ||
373 | gk20a_debug_output(o, "\n"); | ||
374 | } | ||
375 | |||
376 | u32 gv11b_fifo_intr_0_error_mask(struct gk20a *g) | ||
377 | { | ||
378 | u32 intr_0_error_mask = | ||
379 | fifo_intr_0_bind_error_pending_f() | | ||
380 | fifo_intr_0_sched_error_pending_f() | | ||
381 | fifo_intr_0_chsw_error_pending_f() | | ||
382 | fifo_intr_0_fb_flush_timeout_pending_f() | | ||
383 | fifo_intr_0_lb_error_pending_f(); | ||
384 | |||
385 | return intr_0_error_mask; | ||
386 | } | ||
387 | |||
388 | u32 gv11b_fifo_get_preempt_timeout(struct gk20a *g) | ||
389 | { | ||
390 | return gk20a_get_gr_idle_timeout(g); | ||
391 | } | ||
392 | |||
393 | static int gv11b_fifo_poll_pbdma_chan_status(struct gk20a *g, u32 id, | ||
394 | u32 pbdma_id, unsigned int timeout_rc_type) | ||
395 | { | ||
396 | struct nvgpu_timeout timeout; | ||
397 | unsigned long delay = GR_IDLE_CHECK_DEFAULT; /* in micro seconds */ | ||
398 | u32 pbdma_stat; | ||
399 | u32 chan_stat; | ||
400 | int ret = -EBUSY; | ||
401 | |||
402 | /* timeout in milli seconds */ | ||
403 | nvgpu_timeout_init(g, &timeout, g->ops.fifo.get_preempt_timeout(g), | ||
404 | NVGPU_TIMER_CPU_TIMER); | ||
405 | |||
406 | nvgpu_log(g, gpu_dbg_info, "wait preempt pbdma %d", pbdma_id); | ||
407 | /* Verify that ch/tsg is no longer on the pbdma */ | ||
408 | do { | ||
409 | /* | ||
410 | * If the PBDMA has a stalling interrupt and receives a NACK, | ||
411 | * the PBDMA won't save out until the STALLING interrupt is | ||
412 | * cleared. Stalling interrupt need not be directly addressed, | ||
413 | * as simply clearing of the interrupt bit will be sufficient | ||
414 | * to allow the PBDMA to save out. If the stalling interrupt | ||
415 | * was due to a SW method or another deterministic failure, | ||
416 | * the PBDMA will assert it when the channel is reloaded | ||
417 | * or resumed. Note that the fault will still be | ||
418 | * reported to SW. | ||
419 | */ | ||
420 | |||
421 | gk20a_fifo_handle_pbdma_intr(g, &g->fifo, pbdma_id, RC_NO); | ||
422 | |||
423 | pbdma_stat = gk20a_readl(g, fifo_pbdma_status_r(pbdma_id)); | ||
424 | chan_stat = fifo_pbdma_status_chan_status_v(pbdma_stat); | ||
425 | |||
426 | if (chan_stat == | ||
427 | fifo_pbdma_status_chan_status_valid_v() || | ||
428 | chan_stat == | ||
429 | fifo_pbdma_status_chan_status_chsw_save_v()) { | ||
430 | |||
431 | if (id != fifo_pbdma_status_id_v(pbdma_stat)) { | ||
432 | ret = 0; | ||
433 | break; | ||
434 | } | ||
435 | |||
436 | } else if (chan_stat == | ||
437 | fifo_pbdma_status_chan_status_chsw_load_v()) { | ||
438 | |||
439 | if (id != fifo_pbdma_status_next_id_v(pbdma_stat)) { | ||
440 | ret = 0; | ||
441 | break; | ||
442 | } | ||
443 | |||
444 | } else if (chan_stat == | ||
445 | fifo_pbdma_status_chan_status_chsw_switch_v()) { | ||
446 | |||
447 | if ((id != fifo_pbdma_status_next_id_v(pbdma_stat)) && | ||
448 | (id != fifo_pbdma_status_id_v(pbdma_stat))) { | ||
449 | ret = 0; | ||
450 | break; | ||
451 | } | ||
452 | } else { | ||
453 | /* pbdma status is invalid i.e. it is not loaded */ | ||
454 | ret = 0; | ||
455 | break; | ||
456 | } | ||
457 | |||
458 | usleep_range(delay, delay * 2); | ||
459 | delay = min_t(unsigned long, | ||
460 | delay << 1, GR_IDLE_CHECK_MAX); | ||
461 | } while (!nvgpu_timeout_expired_msg(&timeout, | ||
462 | "preempt timeout pbdma")); | ||
463 | return ret; | ||
464 | } | ||
465 | |||
466 | static int gv11b_fifo_poll_eng_ctx_status(struct gk20a *g, u32 id, | ||
467 | u32 act_eng_id, u32 *reset_eng_bitmask, | ||
468 | unsigned int timeout_rc_type) | ||
469 | { | ||
470 | struct nvgpu_timeout timeout; | ||
471 | unsigned long delay = GR_IDLE_CHECK_DEFAULT; /* in micro seconds */ | ||
472 | u32 eng_stat; | ||
473 | u32 ctx_stat; | ||
474 | int ret = -EBUSY; | ||
475 | bool stall_intr = false; | ||
476 | |||
477 | /* timeout in milli seconds */ | ||
478 | nvgpu_timeout_init(g, &timeout, g->ops.fifo.get_preempt_timeout(g), | ||
479 | NVGPU_TIMER_CPU_TIMER); | ||
480 | |||
481 | nvgpu_log(g, gpu_dbg_info, "wait preempt act engine id: %u", | ||
482 | act_eng_id); | ||
483 | /* Check if ch/tsg has saved off the engine or if ctxsw is hung */ | ||
484 | do { | ||
485 | eng_stat = gk20a_readl(g, fifo_engine_status_r(act_eng_id)); | ||
486 | ctx_stat = fifo_engine_status_ctx_status_v(eng_stat); | ||
487 | |||
488 | if (gv11b_mc_is_stall_and_eng_intr_pending(g, act_eng_id)) { | ||
489 | stall_intr = true; | ||
490 | nvgpu_log(g, gpu_dbg_info | gpu_dbg_intr, | ||
491 | "stall intr set, " | ||
492 | "preemption will not finish"); | ||
493 | } | ||
494 | if (ctx_stat == | ||
495 | fifo_engine_status_ctx_status_ctxsw_switch_v()) { | ||
496 | /* Eng save hasn't started yet. Continue polling */ | ||
497 | |||
498 | } else if (ctx_stat == | ||
499 | fifo_engine_status_ctx_status_valid_v() || | ||
500 | ctx_stat == | ||
501 | fifo_engine_status_ctx_status_ctxsw_save_v()) { | ||
502 | |||
503 | if (id == fifo_engine_status_id_v(eng_stat)) { | ||
504 | if (stall_intr || | ||
505 | timeout_rc_type == PREEMPT_TIMEOUT_NORC) { | ||
506 | /* preemption will not finish */ | ||
507 | *reset_eng_bitmask |= BIT(act_eng_id); | ||
508 | ret = 0; | ||
509 | break; | ||
510 | } | ||
511 | } else { | ||
512 | /* context is not running on the engine */ | ||
513 | ret = 0; | ||
514 | break; | ||
515 | } | ||
516 | |||
517 | } else if (ctx_stat == | ||
518 | fifo_engine_status_ctx_status_ctxsw_load_v()) { | ||
519 | |||
520 | if (id == fifo_engine_status_next_id_v(eng_stat)) { | ||
521 | |||
522 | if (stall_intr || | ||
523 | timeout_rc_type == PREEMPT_TIMEOUT_NORC) { | ||
524 | /* preemption will not finish */ | ||
525 | *reset_eng_bitmask |= BIT(act_eng_id); | ||
526 | ret = 0; | ||
527 | break; | ||
528 | } | ||
529 | } else { | ||
530 | /* context is not running on the engine */ | ||
531 | ret = 0; | ||
532 | break; | ||
533 | } | ||
534 | |||
535 | } else { | ||
536 | /* Preempt should be finished */ | ||
537 | ret = 0; | ||
538 | break; | ||
539 | } | ||
540 | nvgpu_usleep_range(delay, delay * 2); | ||
541 | delay = min_t(unsigned long, | ||
542 | delay << 1, GR_IDLE_CHECK_MAX); | ||
543 | } while (!nvgpu_timeout_expired_msg(&timeout, | ||
544 | "preempt timeout eng")); | ||
545 | return ret; | ||
546 | } | ||
547 | |||
548 | static void gv11b_reset_eng_faulted_ch(struct gk20a *g, u32 chid) | ||
549 | { | ||
550 | u32 reg_val; | ||
551 | |||
552 | reg_val = gk20a_readl(g, ccsr_channel_r(chid)); | ||
553 | reg_val |= ccsr_channel_eng_faulted_reset_f(); | ||
554 | gk20a_writel(g, ccsr_channel_r(chid), reg_val); | ||
555 | } | ||
556 | |||
557 | static void gv11b_reset_eng_faulted_tsg(struct tsg_gk20a *tsg) | ||
558 | { | ||
559 | struct gk20a *g = tsg->g; | ||
560 | struct channel_gk20a *ch; | ||
561 | |||
562 | nvgpu_rwsem_down_read(&tsg->ch_list_lock); | ||
563 | list_for_each_entry(ch, &tsg->ch_list, ch_entry) { | ||
564 | gv11b_reset_eng_faulted_ch(g, ch->chid); | ||
565 | } | ||
566 | nvgpu_rwsem_up_read(&tsg->ch_list_lock); | ||
567 | } | ||
568 | |||
569 | static void gv11b_reset_pbdma_faulted_ch(struct gk20a *g, u32 chid) | ||
570 | { | ||
571 | u32 reg_val; | ||
572 | |||
573 | reg_val = gk20a_readl(g, ccsr_channel_r(chid)); | ||
574 | reg_val |= ccsr_channel_pbdma_faulted_reset_f(); | ||
575 | gk20a_writel(g, ccsr_channel_r(chid), reg_val); | ||
576 | } | ||
577 | |||
578 | static void gv11b_reset_pbdma_faulted_tsg(struct tsg_gk20a *tsg) | ||
579 | { | ||
580 | struct gk20a *g = tsg->g; | ||
581 | struct channel_gk20a *ch; | ||
582 | |||
583 | nvgpu_rwsem_down_read(&tsg->ch_list_lock); | ||
584 | list_for_each_entry(ch, &tsg->ch_list, ch_entry) { | ||
585 | gv11b_reset_pbdma_faulted_ch(g, ch->chid); | ||
586 | } | ||
587 | nvgpu_rwsem_up_read(&tsg->ch_list_lock); | ||
588 | } | ||
589 | |||
590 | void gv11b_fifo_reset_pbdma_and_eng_faulted(struct gk20a *g, | ||
591 | struct channel_gk20a *refch, | ||
592 | u32 faulted_pbdma, u32 faulted_engine) | ||
593 | { | ||
594 | struct tsg_gk20a *tsg; | ||
595 | |||
596 | nvgpu_log(g, gpu_dbg_intr, "reset faulted pbdma:0x%x eng:0x%x", | ||
597 | faulted_pbdma, faulted_engine); | ||
598 | |||
599 | if (gk20a_is_channel_marked_as_tsg(refch)) { | ||
600 | tsg = &g->fifo.tsg[refch->tsgid]; | ||
601 | if (faulted_pbdma != FIFO_INVAL_PBDMA_ID) | ||
602 | gv11b_reset_pbdma_faulted_tsg(tsg); | ||
603 | if (faulted_engine != FIFO_INVAL_ENGINE_ID) | ||
604 | gv11b_reset_eng_faulted_tsg(tsg); | ||
605 | } else { | ||
606 | if (faulted_pbdma != FIFO_INVAL_PBDMA_ID) | ||
607 | gv11b_reset_pbdma_faulted_ch(g, refch->chid); | ||
608 | if (faulted_engine != FIFO_INVAL_ENGINE_ID) | ||
609 | gv11b_reset_eng_faulted_ch(g, refch->chid); | ||
610 | } | ||
611 | } | ||
612 | |||
613 | static u32 gv11b_fifo_get_runlists_mask(struct gk20a *g, u32 act_eng_bitmask, | ||
614 | u32 id, unsigned int id_type, unsigned int rc_type, | ||
615 | struct mmu_fault_info *mmfault) | ||
616 | { | ||
617 | u32 runlists_mask = 0; | ||
618 | struct fifo_gk20a *f = &g->fifo; | ||
619 | struct fifo_runlist_info_gk20a *runlist; | ||
620 | u32 pbdma_bitmask = 0; | ||
621 | |||
622 | if (id_type != ID_TYPE_UNKNOWN) { | ||
623 | if (id_type == ID_TYPE_TSG) | ||
624 | runlists_mask |= fifo_sched_disable_runlist_m( | ||
625 | f->tsg[id].runlist_id); | ||
626 | else | ||
627 | runlists_mask |= fifo_sched_disable_runlist_m( | ||
628 | f->channel[id].runlist_id); | ||
629 | } | ||
630 | |||
631 | if (rc_type == RC_TYPE_MMU_FAULT && mmfault) { | ||
632 | if (mmfault->faulted_pbdma != FIFO_INVAL_PBDMA_ID) | ||
633 | pbdma_bitmask = BIT(mmfault->faulted_pbdma); | ||
634 | |||
635 | for (id = 0; id < f->max_runlists; id++) { | ||
636 | |||
637 | runlist = &f->runlist_info[id]; | ||
638 | |||
639 | if (runlist->eng_bitmask & act_eng_bitmask) | ||
640 | runlists_mask |= | ||
641 | fifo_sched_disable_runlist_m(id); | ||
642 | |||
643 | if (runlist->pbdma_bitmask & pbdma_bitmask) | ||
644 | runlists_mask |= | ||
645 | fifo_sched_disable_runlist_m(id); | ||
646 | } | ||
647 | } | ||
648 | |||
649 | if (id_type == ID_TYPE_UNKNOWN) { | ||
650 | for (id = 0; id < f->max_runlists; id++) { | ||
651 | if (act_eng_bitmask) { | ||
652 | /* eng ids are known */ | ||
653 | runlist = &f->runlist_info[id]; | ||
654 | if (runlist->eng_bitmask & act_eng_bitmask) | ||
655 | runlists_mask |= | ||
656 | fifo_sched_disable_runlist_m(id); | ||
657 | } else { | ||
658 | runlists_mask |= | ||
659 | fifo_sched_disable_runlist_m(id); | ||
660 | } | ||
661 | } | ||
662 | } | ||
663 | gk20a_dbg_info("runlists_mask = %08x", runlists_mask); | ||
664 | return runlists_mask; | ||
665 | } | ||
666 | |||
667 | static void gv11b_fifo_runlist_event_intr_disable(struct gk20a *g) | ||
668 | { | ||
669 | u32 reg_val; | ||
670 | |||
671 | reg_val = gk20a_readl(g, fifo_intr_en_0_r()); | ||
672 | reg_val &= fifo_intr_0_runlist_event_pending_f(); | ||
673 | gk20a_writel(g, fifo_intr_en_0_r(), reg_val); | ||
674 | } | ||
675 | |||
676 | static void gv11b_fifo_runlist_event_intr_enable(struct gk20a *g) | ||
677 | { | ||
678 | u32 reg_val; | ||
679 | |||
680 | reg_val = gk20a_readl(g, fifo_intr_en_0_r()); | ||
681 | reg_val |= fifo_intr_0_runlist_event_pending_f(); | ||
682 | gk20a_writel(g, fifo_intr_en_0_r(), reg_val); | ||
683 | } | ||
684 | |||
685 | static void gv11b_fifo_issue_runlist_preempt(struct gk20a *g, | ||
686 | u32 runlists_mask) | ||
687 | { | ||
688 | u32 reg_val; | ||
689 | |||
690 | /* issue runlist preempt */ | ||
691 | reg_val = gk20a_readl(g, fifo_runlist_preempt_r()); | ||
692 | reg_val |= runlists_mask; | ||
693 | gk20a_writel(g, fifo_runlist_preempt_r(), reg_val); | ||
694 | } | ||
695 | |||
696 | static int gv11b_fifo_poll_runlist_preempt_pending(struct gk20a *g, | ||
697 | u32 runlists_mask) | ||
698 | { | ||
699 | struct nvgpu_timeout timeout; | ||
700 | u32 delay = GR_IDLE_CHECK_DEFAULT; | ||
701 | int ret = -EBUSY; | ||
702 | |||
703 | nvgpu_timeout_init(g, &timeout, g->ops.fifo.get_preempt_timeout(g), | ||
704 | NVGPU_TIMER_CPU_TIMER); | ||
705 | do { | ||
706 | if (!((gk20a_readl(g, fifo_runlist_preempt_r())) & | ||
707 | runlists_mask)) { | ||
708 | ret = 0; | ||
709 | break; | ||
710 | } | ||
711 | |||
712 | nvgpu_usleep_range(delay, delay * 2); | ||
713 | delay = min_t(unsigned long, | ||
714 | delay << 1, GR_IDLE_CHECK_MAX); | ||
715 | } while (!nvgpu_timeout_expired_msg(&timeout, | ||
716 | "runlist preempt timeout")); | ||
717 | return ret; | ||
718 | } | ||
719 | |||
720 | int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id, | ||
721 | unsigned int id_type, unsigned int timeout_rc_type) | ||
722 | { | ||
723 | struct fifo_gk20a *f = &g->fifo; | ||
724 | unsigned long runlist_served_pbdmas; | ||
725 | unsigned long runlist_served_engines; | ||
726 | u32 pbdma_id; | ||
727 | u32 act_eng_id; | ||
728 | u32 runlist_id; | ||
729 | int func_ret; | ||
730 | int ret = 0; | ||
731 | u32 tsgid; | ||
732 | |||
733 | if (id_type == ID_TYPE_TSG) { | ||
734 | runlist_id = f->tsg[id].runlist_id; | ||
735 | tsgid = id; | ||
736 | } else { | ||
737 | runlist_id = f->channel[id].runlist_id; | ||
738 | tsgid = f->channel[id].tsgid; | ||
739 | } | ||
740 | |||
741 | nvgpu_log_info(g, "Check preempt pending for tsgid = %u", tsgid); | ||
742 | |||
743 | runlist_served_pbdmas = f->runlist_info[runlist_id].pbdma_bitmask; | ||
744 | runlist_served_engines = f->runlist_info[runlist_id].eng_bitmask; | ||
745 | |||
746 | for_each_set_bit(pbdma_id, &runlist_served_pbdmas, f->num_pbdma) { | ||
747 | |||
748 | func_ret = gv11b_fifo_poll_pbdma_chan_status(g, tsgid, pbdma_id, | ||
749 | timeout_rc_type); | ||
750 | if (func_ret != 0) { | ||
751 | gk20a_dbg_info("preempt timeout pbdma %d", pbdma_id); | ||
752 | ret |= func_ret; | ||
753 | } | ||
754 | } | ||
755 | |||
756 | f->runlist_info[runlist_id].reset_eng_bitmask = 0; | ||
757 | |||
758 | for_each_set_bit(act_eng_id, &runlist_served_engines, f->max_engines) { | ||
759 | |||
760 | func_ret = gv11b_fifo_poll_eng_ctx_status(g, tsgid, act_eng_id, | ||
761 | &f->runlist_info[runlist_id].reset_eng_bitmask, | ||
762 | timeout_rc_type); | ||
763 | |||
764 | if (func_ret != 0) { | ||
765 | gk20a_dbg_info("preempt timeout engine %d", act_eng_id); | ||
766 | ret |= func_ret; | ||
767 | } | ||
768 | } | ||
769 | |||
770 | return ret; | ||
771 | } | ||
772 | |||
773 | int gv11b_fifo_preempt_channel(struct gk20a *g, u32 chid) | ||
774 | { | ||
775 | struct fifo_gk20a *f = &g->fifo; | ||
776 | u32 tsgid; | ||
777 | |||
778 | tsgid = f->channel[chid].tsgid; | ||
779 | nvgpu_log_info(g, "chid:%d tsgid:%d", chid, tsgid); | ||
780 | |||
781 | /* Preempt tsg. Channel preempt is NOOP */ | ||
782 | return g->ops.fifo.preempt_tsg(g, tsgid); | ||
783 | } | ||
784 | |||
785 | static int __locked_fifo_preempt_runlists(struct gk20a *g, u32 runlists_mask) | ||
786 | { | ||
787 | int ret; | ||
788 | |||
789 | /* | ||
790 | * Disable runlist event interrupt as it will get | ||
791 | * triggered after runlist preempt finishes | ||
792 | */ | ||
793 | gv11b_fifo_runlist_event_intr_disable(g); | ||
794 | |||
795 | /* issue runlist preempt */ | ||
796 | gv11b_fifo_issue_runlist_preempt(g, runlists_mask); | ||
797 | |||
798 | /* poll for runlist preempt done */ | ||
799 | ret = gv11b_fifo_poll_runlist_preempt_pending(g, runlists_mask); | ||
800 | |||
801 | /* Clear outstanding runlist event */ | ||
802 | gk20a_fifo_handle_runlist_event(g); | ||
803 | |||
804 | /* Enable runlist event interrupt*/ | ||
805 | gv11b_fifo_runlist_event_intr_enable(g); | ||
806 | |||
807 | return ret; | ||
808 | } | ||
809 | |||
810 | /* TSG enable sequence applicable for Volta and onwards */ | ||
811 | int gv11b_fifo_enable_tsg(struct tsg_gk20a *tsg) | ||
812 | { | ||
813 | struct gk20a *g = tsg->g; | ||
814 | struct channel_gk20a *ch; | ||
815 | |||
816 | nvgpu_rwsem_down_read(&tsg->ch_list_lock); | ||
817 | nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) { | ||
818 | g->ops.fifo.enable_channel(ch); | ||
819 | } | ||
820 | nvgpu_rwsem_up_read(&tsg->ch_list_lock); | ||
821 | |||
822 | return 0; | ||
823 | } | ||
824 | |||
825 | int gv11b_fifo_preempt_tsg(struct gk20a *g, u32 tsgid) | ||
826 | { | ||
827 | struct fifo_gk20a *f = &g->fifo; | ||
828 | u32 ret = 0; | ||
829 | u32 token = PMU_INVALID_MUTEX_OWNER_ID; | ||
830 | u32 mutex_ret = 0; | ||
831 | u32 runlist_id; | ||
832 | |||
833 | gk20a_dbg_fn("%d", tsgid); | ||
834 | |||
835 | runlist_id = f->tsg[tsgid].runlist_id; | ||
836 | gk20a_dbg_fn("runlist_id %d", runlist_id); | ||
837 | |||
838 | nvgpu_mutex_acquire(&f->runlist_info[runlist_id].mutex); | ||
839 | |||
840 | mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); | ||
841 | |||
842 | ret = __locked_fifo_preempt(g, tsgid, true); | ||
843 | |||
844 | if (!mutex_ret) | ||
845 | nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); | ||
846 | |||
847 | nvgpu_mutex_release(&f->runlist_info[runlist_id].mutex); | ||
848 | |||
849 | return ret; | ||
850 | } | ||
851 | |||
852 | |||
853 | static int gv11b_fifo_preempt_runlists(struct gk20a *g, u32 runlists_mask) | ||
854 | { | ||
855 | int ret = 0; | ||
856 | u32 token = PMU_INVALID_MUTEX_OWNER_ID; | ||
857 | u32 mutex_ret = 0; | ||
858 | u32 runlist_id; | ||
859 | |||
860 | gk20a_dbg_fn(""); | ||
861 | |||
862 | for (runlist_id = 0; runlist_id < g->fifo.max_runlists; runlist_id++) { | ||
863 | if (runlists_mask & fifo_runlist_preempt_runlist_m(runlist_id)) | ||
864 | nvgpu_mutex_acquire(&g->fifo. | ||
865 | runlist_info[runlist_id].mutex); | ||
866 | } | ||
867 | |||
868 | mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); | ||
869 | |||
870 | ret = __locked_fifo_preempt_runlists(g, runlists_mask); | ||
871 | |||
872 | if (!mutex_ret) | ||
873 | nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); | ||
874 | |||
875 | for (runlist_id = 0; runlist_id < g->fifo.max_runlists; runlist_id++) { | ||
876 | if (runlists_mask & fifo_runlist_preempt_runlist_m(runlist_id)) | ||
877 | nvgpu_mutex_release(&g->fifo. | ||
878 | runlist_info[runlist_id].mutex); | ||
879 | } | ||
880 | |||
881 | return ret; | ||
882 | } | ||
883 | |||
884 | static int __locked_fifo_preempt_ch_tsg(struct gk20a *g, u32 id, | ||
885 | unsigned int id_type, unsigned int timeout_rc_type) | ||
886 | { | ||
887 | int ret; | ||
888 | struct fifo_gk20a *f = &g->fifo; | ||
889 | |||
890 | nvgpu_log_fn(g, "id:%d id_type:%d", id, id_type); | ||
891 | |||
892 | /* Issue tsg preempt. Channel preempt is noop */ | ||
893 | if (id_type == ID_TYPE_CHANNEL) | ||
894 | gk20a_fifo_issue_preempt(g, f->channel[id].tsgid, true); | ||
895 | else | ||
896 | gk20a_fifo_issue_preempt(g, id, true); | ||
897 | |||
898 | /* wait for preempt */ | ||
899 | ret = g->ops.fifo.is_preempt_pending(g, id, id_type, | ||
900 | timeout_rc_type); | ||
901 | |||
902 | if (ret && (timeout_rc_type == PREEMPT_TIMEOUT_RC)) | ||
903 | __locked_fifo_preempt_timeout_rc(g, id, id_type); | ||
904 | |||
905 | return ret; | ||
906 | } | ||
907 | |||
908 | |||
909 | int gv11b_fifo_preempt_ch_tsg(struct gk20a *g, u32 id, | ||
910 | unsigned int id_type, unsigned int timeout_rc_type) | ||
911 | { | ||
912 | struct fifo_gk20a *f = &g->fifo; | ||
913 | u32 ret = 0; | ||
914 | u32 token = PMU_INVALID_MUTEX_OWNER_ID; | ||
915 | u32 mutex_ret = 0; | ||
916 | u32 runlist_id; | ||
917 | |||
918 | if (id_type == ID_TYPE_TSG) | ||
919 | runlist_id = f->tsg[id].runlist_id; | ||
920 | else if (id_type == ID_TYPE_CHANNEL) | ||
921 | runlist_id = f->channel[id].runlist_id; | ||
922 | else | ||
923 | return -EINVAL; | ||
924 | |||
925 | if (runlist_id >= g->fifo.max_runlists) { | ||
926 | gk20a_dbg_info("runlist_id = %d", runlist_id); | ||
927 | return -EINVAL; | ||
928 | } | ||
929 | |||
930 | gk20a_dbg_fn("preempt id = %d, runlist_id = %d", id, runlist_id); | ||
931 | |||
932 | nvgpu_mutex_acquire(&f->runlist_info[runlist_id].mutex); | ||
933 | |||
934 | mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); | ||
935 | |||
936 | ret = __locked_fifo_preempt_ch_tsg(g, id, id_type, timeout_rc_type); | ||
937 | |||
938 | if (!mutex_ret) | ||
939 | nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); | ||
940 | |||
941 | nvgpu_mutex_release(&f->runlist_info[runlist_id].mutex); | ||
942 | |||
943 | return ret; | ||
944 | |||
945 | } | ||
946 | |||
947 | void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask, | ||
948 | u32 id, unsigned int id_type, unsigned int rc_type, | ||
949 | struct mmu_fault_info *mmfault) | ||
950 | { | ||
951 | bool verbose = false; | ||
952 | struct tsg_gk20a *tsg = NULL; | ||
953 | struct channel_gk20a *refch = NULL; | ||
954 | u32 runlists_mask, runlist_id; | ||
955 | struct fifo_runlist_info_gk20a *runlist = NULL; | ||
956 | u32 engine_id, client_type = ~0; | ||
957 | |||
958 | gk20a_dbg_info("active engine ids bitmask =0x%x", act_eng_bitmask); | ||
959 | gk20a_dbg_info("hw id =%d", id); | ||
960 | gk20a_dbg_info("id_type =%d", id_type); | ||
961 | gk20a_dbg_info("rc_type =%d", rc_type); | ||
962 | gk20a_dbg_info("mmu_fault =0x%p", mmfault); | ||
963 | |||
964 | runlists_mask = gv11b_fifo_get_runlists_mask(g, act_eng_bitmask, id, | ||
965 | id_type, rc_type, mmfault); | ||
966 | |||
967 | gk20a_fifo_set_runlist_state(g, runlists_mask, RUNLIST_DISABLED, | ||
968 | !RUNLIST_INFO_MUTEX_LOCKED); | ||
969 | |||
970 | g->fifo.deferred_reset_pending = false; | ||
971 | |||
972 | /* Disable power management */ | ||
973 | if (g->support_pmu && g->elpg_enabled) { | ||
974 | if (nvgpu_pmu_disable_elpg(g)) | ||
975 | nvgpu_err(g, "failed to set disable elpg"); | ||
976 | } | ||
977 | if (g->ops.clock_gating.slcg_gr_load_gating_prod) | ||
978 | g->ops.clock_gating.slcg_gr_load_gating_prod(g, | ||
979 | false); | ||
980 | if (g->ops.clock_gating.slcg_perf_load_gating_prod) | ||
981 | g->ops.clock_gating.slcg_perf_load_gating_prod(g, | ||
982 | false); | ||
983 | if (g->ops.clock_gating.slcg_ltc_load_gating_prod) | ||
984 | g->ops.clock_gating.slcg_ltc_load_gating_prod(g, | ||
985 | false); | ||
986 | |||
987 | gr_gk20a_init_cg_mode(g, ELCG_MODE, ELCG_RUN); | ||
988 | |||
989 | if (rc_type == RC_TYPE_MMU_FAULT) | ||
990 | gk20a_debug_dump(g); | ||
991 | |||
992 | /* get the channel/TSG */ | ||
993 | if (rc_type == RC_TYPE_MMU_FAULT && mmfault && mmfault->refch) { | ||
994 | refch = mmfault->refch; | ||
995 | client_type = mmfault->client_type; | ||
996 | if (gk20a_is_channel_marked_as_tsg(refch)) | ||
997 | tsg = &g->fifo.tsg[refch->tsgid]; | ||
998 | gv11b_fifo_reset_pbdma_and_eng_faulted(g, refch, | ||
999 | mmfault->faulted_pbdma, | ||
1000 | mmfault->faulted_engine); | ||
1001 | } else { | ||
1002 | if (id_type == ID_TYPE_TSG) | ||
1003 | tsg = &g->fifo.tsg[id]; | ||
1004 | else if (id_type == ID_TYPE_CHANNEL) | ||
1005 | refch = gk20a_channel_get(&g->fifo.channel[id]); | ||
1006 | } | ||
1007 | |||
1008 | if (id_type == ID_TYPE_TSG || id_type == ID_TYPE_CHANNEL) { | ||
1009 | g->ops.fifo.preempt_ch_tsg(g, id, id_type, | ||
1010 | PREEMPT_TIMEOUT_NORC); | ||
1011 | } else { | ||
1012 | gv11b_fifo_preempt_runlists(g, runlists_mask); | ||
1013 | } | ||
1014 | |||
1015 | if (tsg) { | ||
1016 | if (!g->fifo.deferred_reset_pending) { | ||
1017 | if (rc_type == RC_TYPE_MMU_FAULT) { | ||
1018 | gk20a_fifo_set_ctx_mmu_error_tsg(g, tsg); | ||
1019 | verbose = gk20a_fifo_error_tsg(g, tsg); | ||
1020 | } | ||
1021 | } | ||
1022 | gk20a_fifo_abort_tsg(g, tsg->tsgid, false); | ||
1023 | if (refch) | ||
1024 | gk20a_channel_put(refch); | ||
1025 | } else if (refch) { | ||
1026 | if (!g->fifo.deferred_reset_pending) { | ||
1027 | if (rc_type == RC_TYPE_MMU_FAULT) { | ||
1028 | gk20a_fifo_set_ctx_mmu_error_ch(g, refch); | ||
1029 | verbose = gk20a_fifo_error_ch(g, refch); | ||
1030 | } | ||
1031 | } | ||
1032 | gk20a_channel_abort(refch, false); | ||
1033 | gk20a_channel_put(refch); | ||
1034 | } else { | ||
1035 | nvgpu_err(g, "id unknown, abort runlist"); | ||
1036 | for (runlist_id = 0; runlist_id < g->fifo.max_runlists; | ||
1037 | runlist_id++) { | ||
1038 | if (runlists_mask & BIT(runlist_id)) | ||
1039 | g->ops.fifo.update_runlist(g, runlist_id, | ||
1040 | FIFO_INVAL_CHANNEL_ID, false, true); | ||
1041 | } | ||
1042 | } | ||
1043 | |||
1044 | /* check if engine reset should be deferred */ | ||
1045 | for (runlist_id = 0; runlist_id < g->fifo.max_runlists; runlist_id++) { | ||
1046 | |||
1047 | runlist = &g->fifo.runlist_info[runlist_id]; | ||
1048 | if ((runlists_mask & BIT(runlist_id)) && | ||
1049 | runlist->reset_eng_bitmask) { | ||
1050 | |||
1051 | unsigned long __reset_eng_bitmask = | ||
1052 | runlist->reset_eng_bitmask; | ||
1053 | |||
1054 | for_each_set_bit(engine_id, &__reset_eng_bitmask, 32) { | ||
1055 | if ((refch || tsg) && | ||
1056 | gk20a_fifo_should_defer_engine_reset(g, | ||
1057 | engine_id, client_type, false)) { | ||
1058 | |||
1059 | g->fifo.deferred_fault_engines |= | ||
1060 | BIT(engine_id); | ||
1061 | |||
1062 | /* handled during channel free */ | ||
1063 | g->fifo.deferred_reset_pending = true; | ||
1064 | gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, | ||
1065 | "sm debugger attached," | ||
1066 | " deferring channel recovery to channel free"); | ||
1067 | } else { | ||
1068 | /* | ||
1069 | * if lock is already taken, a reset is | ||
1070 | * taking place so no need to repeat | ||
1071 | */ | ||
1072 | if (nvgpu_mutex_tryacquire( | ||
1073 | &g->fifo.gr_reset_mutex)) { | ||
1074 | |||
1075 | gk20a_fifo_reset_engine(g, | ||
1076 | engine_id); | ||
1077 | |||
1078 | nvgpu_mutex_release( | ||
1079 | &g->fifo.gr_reset_mutex); | ||
1080 | } | ||
1081 | } | ||
1082 | } | ||
1083 | } | ||
1084 | } | ||
1085 | |||
1086 | #ifdef CONFIG_GK20A_CTXSW_TRACE | ||
1087 | if (refch) | ||
1088 | gk20a_ctxsw_trace_channel_reset(g, refch); | ||
1089 | else if (tsg) | ||
1090 | gk20a_ctxsw_trace_tsg_reset(g, tsg); | ||
1091 | #endif | ||
1092 | |||
1093 | gk20a_fifo_set_runlist_state(g, runlists_mask, RUNLIST_ENABLED, | ||
1094 | !RUNLIST_INFO_MUTEX_LOCKED); | ||
1095 | |||
1096 | /* It is safe to enable ELPG again. */ | ||
1097 | if (g->support_pmu && g->elpg_enabled) | ||
1098 | nvgpu_pmu_enable_elpg(g); | ||
1099 | } | ||
1100 | |||
1101 | void gv11b_fifo_init_pbdma_intr_descs(struct fifo_gk20a *f) | ||
1102 | { | ||
1103 | /* | ||
1104 | * These are all errors which indicate something really wrong | ||
1105 | * going on in the device | ||
1106 | */ | ||
1107 | f->intr.pbdma.device_fatal_0 = | ||
1108 | pbdma_intr_0_memreq_pending_f() | | ||
1109 | pbdma_intr_0_memack_timeout_pending_f() | | ||
1110 | pbdma_intr_0_memack_extra_pending_f() | | ||
1111 | pbdma_intr_0_memdat_timeout_pending_f() | | ||
1112 | pbdma_intr_0_memdat_extra_pending_f() | | ||
1113 | pbdma_intr_0_memflush_pending_f() | | ||
1114 | pbdma_intr_0_memop_pending_f() | | ||
1115 | pbdma_intr_0_lbconnect_pending_f() | | ||
1116 | pbdma_intr_0_lback_timeout_pending_f() | | ||
1117 | pbdma_intr_0_lback_extra_pending_f() | | ||
1118 | pbdma_intr_0_lbdat_timeout_pending_f() | | ||
1119 | pbdma_intr_0_lbdat_extra_pending_f() | | ||
1120 | pbdma_intr_0_pri_pending_f(); | ||
1121 | |||
1122 | /* | ||
1123 | * These are data parsing, framing errors or others which can be | ||
1124 | * recovered from with intervention... or just resetting the | ||
1125 | * channel | ||
1126 | */ | ||
1127 | f->intr.pbdma.channel_fatal_0 = | ||
1128 | pbdma_intr_0_gpfifo_pending_f() | | ||
1129 | pbdma_intr_0_gpptr_pending_f() | | ||
1130 | pbdma_intr_0_gpentry_pending_f() | | ||
1131 | pbdma_intr_0_gpcrc_pending_f() | | ||
1132 | pbdma_intr_0_pbptr_pending_f() | | ||
1133 | pbdma_intr_0_pbentry_pending_f() | | ||
1134 | pbdma_intr_0_pbcrc_pending_f() | | ||
1135 | pbdma_intr_0_method_pending_f() | | ||
1136 | pbdma_intr_0_methodcrc_pending_f() | | ||
1137 | pbdma_intr_0_pbseg_pending_f() | | ||
1138 | pbdma_intr_0_clear_faulted_error_pending_f() | | ||
1139 | pbdma_intr_0_eng_reset_pending_f() | | ||
1140 | pbdma_intr_0_semaphore_pending_f() | | ||
1141 | pbdma_intr_0_signature_pending_f(); | ||
1142 | |||
1143 | /* Can be used for sw-methods, or represents a recoverable timeout. */ | ||
1144 | f->intr.pbdma.restartable_0 = | ||
1145 | pbdma_intr_0_device_pending_f(); | ||
1146 | } | ||
1147 | |||
1148 | static u32 gv11b_fifo_intr_0_en_mask(struct gk20a *g) | ||
1149 | { | ||
1150 | u32 intr_0_en_mask; | ||
1151 | |||
1152 | intr_0_en_mask = g->ops.fifo.intr_0_error_mask(g); | ||
1153 | |||
1154 | intr_0_en_mask |= fifo_intr_0_runlist_event_pending_f() | | ||
1155 | fifo_intr_0_pbdma_intr_pending_f() | | ||
1156 | fifo_intr_0_ctxsw_timeout_pending_f(); | ||
1157 | |||
1158 | return intr_0_en_mask; | ||
1159 | } | ||
1160 | |||
1161 | int gv11b_init_fifo_reset_enable_hw(struct gk20a *g) | ||
1162 | { | ||
1163 | u32 intr_stall; | ||
1164 | u32 mask; | ||
1165 | u32 timeout; | ||
1166 | unsigned int i; | ||
1167 | u32 host_num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA); | ||
1168 | |||
1169 | gk20a_dbg_fn(""); | ||
1170 | |||
1171 | /* enable pmc pfifo */ | ||
1172 | g->ops.mc.reset(g, mc_enable_pfifo_enabled_f()); | ||
1173 | |||
1174 | if (g->ops.clock_gating.slcg_ce2_load_gating_prod) | ||
1175 | g->ops.clock_gating.slcg_ce2_load_gating_prod(g, | ||
1176 | g->slcg_enabled); | ||
1177 | if (g->ops.clock_gating.slcg_fifo_load_gating_prod) | ||
1178 | g->ops.clock_gating.slcg_fifo_load_gating_prod(g, | ||
1179 | g->slcg_enabled); | ||
1180 | if (g->ops.clock_gating.blcg_fifo_load_gating_prod) | ||
1181 | g->ops.clock_gating.blcg_fifo_load_gating_prod(g, | ||
1182 | g->blcg_enabled); | ||
1183 | |||
1184 | /* enable pbdma */ | ||
1185 | mask = 0; | ||
1186 | for (i = 0; i < host_num_pbdma; ++i) | ||
1187 | mask |= mc_enable_pb_sel_f(mc_enable_pb_0_enabled_v(), i); | ||
1188 | gk20a_writel(g, mc_enable_pb_r(), mask); | ||
1189 | |||
1190 | |||
1191 | timeout = gk20a_readl(g, fifo_fb_timeout_r()); | ||
1192 | nvgpu_log_info(g, "fifo_fb_timeout reg val = 0x%08x", timeout); | ||
1193 | if (!nvgpu_platform_is_silicon(g)) { | ||
1194 | timeout = set_field(timeout, fifo_fb_timeout_period_m(), | ||
1195 | fifo_fb_timeout_period_max_f()); | ||
1196 | timeout = set_field(timeout, fifo_fb_timeout_detection_m(), | ||
1197 | fifo_fb_timeout_detection_disabled_f()); | ||
1198 | nvgpu_log_info(g, "new fifo_fb_timeout reg val = 0x%08x", | ||
1199 | timeout); | ||
1200 | gk20a_writel(g, fifo_fb_timeout_r(), timeout); | ||
1201 | } | ||
1202 | |||
1203 | for (i = 0; i < host_num_pbdma; i++) { | ||
1204 | timeout = gk20a_readl(g, pbdma_timeout_r(i)); | ||
1205 | nvgpu_log_info(g, "pbdma_timeout reg val = 0x%08x", | ||
1206 | timeout); | ||
1207 | if (!nvgpu_platform_is_silicon(g)) { | ||
1208 | timeout = set_field(timeout, pbdma_timeout_period_m(), | ||
1209 | pbdma_timeout_period_max_f()); | ||
1210 | nvgpu_log_info(g, "new pbdma_timeout reg val = 0x%08x", | ||
1211 | timeout); | ||
1212 | gk20a_writel(g, pbdma_timeout_r(i), timeout); | ||
1213 | } | ||
1214 | } | ||
1215 | |||
1216 | /* clear and enable pbdma interrupt */ | ||
1217 | for (i = 0; i < host_num_pbdma; i++) { | ||
1218 | gk20a_writel(g, pbdma_intr_0_r(i), 0xFFFFFFFF); | ||
1219 | gk20a_writel(g, pbdma_intr_1_r(i), 0xFFFFFFFF); | ||
1220 | |||
1221 | intr_stall = gk20a_readl(g, pbdma_intr_stall_r(i)); | ||
1222 | gk20a_dbg_info("pbdma id:%u, intr_en_0 0x%08x", i, intr_stall); | ||
1223 | gk20a_writel(g, pbdma_intr_en_0_r(i), intr_stall); | ||
1224 | |||
1225 | intr_stall = gk20a_readl(g, pbdma_intr_stall_1_r(i)); | ||
1226 | gk20a_dbg_info("pbdma id:%u, intr_en_1 0x%08x", i, intr_stall); | ||
1227 | gk20a_writel(g, pbdma_intr_en_1_r(i), intr_stall); | ||
1228 | } | ||
1229 | |||
1230 | /* clear ctxsw timeout interrupts */ | ||
1231 | gk20a_writel(g, fifo_intr_ctxsw_timeout_r(), ~0); | ||
1232 | |||
1233 | if (nvgpu_platform_is_silicon(g)) { | ||
1234 | /* enable ctxsw timeout */ | ||
1235 | timeout = GRFIFO_TIMEOUT_CHECK_PERIOD_US; | ||
1236 | timeout = scale_ptimer(timeout, | ||
1237 | ptimer_scalingfactor10x(g->ptimer_src_freq)); | ||
1238 | timeout |= fifo_eng_ctxsw_timeout_detection_enabled_f(); | ||
1239 | gk20a_writel(g, fifo_eng_ctxsw_timeout_r(), timeout); | ||
1240 | } else { | ||
1241 | timeout = gk20a_readl(g, fifo_eng_ctxsw_timeout_r()); | ||
1242 | nvgpu_log_info(g, "fifo_eng_ctxsw_timeout reg val = 0x%08x", | ||
1243 | timeout); | ||
1244 | timeout = set_field(timeout, fifo_eng_ctxsw_timeout_period_m(), | ||
1245 | fifo_eng_ctxsw_timeout_period_max_f()); | ||
1246 | timeout = set_field(timeout, | ||
1247 | fifo_eng_ctxsw_timeout_detection_m(), | ||
1248 | fifo_eng_ctxsw_timeout_detection_disabled_f()); | ||
1249 | nvgpu_log_info(g, "new fifo_eng_ctxsw_timeout reg val = 0x%08x", | ||
1250 | timeout); | ||
1251 | gk20a_writel(g, fifo_eng_ctxsw_timeout_r(), timeout); | ||
1252 | } | ||
1253 | |||
1254 | /* clear runlist interrupts */ | ||
1255 | gk20a_writel(g, fifo_intr_runlist_r(), ~0); | ||
1256 | |||
1257 | /* clear and enable pfifo interrupt */ | ||
1258 | gk20a_writel(g, fifo_intr_0_r(), 0xFFFFFFFF); | ||
1259 | mask = gv11b_fifo_intr_0_en_mask(g); | ||
1260 | gk20a_dbg_info("fifo_intr_en_0 0x%08x", mask); | ||
1261 | gk20a_writel(g, fifo_intr_en_0_r(), mask); | ||
1262 | gk20a_dbg_info("fifo_intr_en_1 = 0x80000000"); | ||
1263 | gk20a_writel(g, fifo_intr_en_1_r(), 0x80000000); | ||
1264 | |||
1265 | gk20a_dbg_fn("done"); | ||
1266 | |||
1267 | return 0; | ||
1268 | } | ||
1269 | |||
1270 | static const char *const gv11b_sched_error_str[] = { | ||
1271 | "xxx-0", | ||
1272 | "xxx-1", | ||
1273 | "xxx-2", | ||
1274 | "xxx-3", | ||
1275 | "xxx-4", | ||
1276 | "engine_reset", | ||
1277 | "rl_ack_timeout", | ||
1278 | "rl_ack_extra", | ||
1279 | "rl_rdat_timeout", | ||
1280 | "rl_rdat_extra", | ||
1281 | "xxx-a", | ||
1282 | "xxx-b", | ||
1283 | "rl_req_timeout", | ||
1284 | "new_runlist", | ||
1285 | "code_config_while_busy", | ||
1286 | "xxx-f", | ||
1287 | "xxx-0x10", | ||
1288 | "xxx-0x11", | ||
1289 | "xxx-0x12", | ||
1290 | "xxx-0x13", | ||
1291 | "xxx-0x14", | ||
1292 | "xxx-0x15", | ||
1293 | "xxx-0x16", | ||
1294 | "xxx-0x17", | ||
1295 | "xxx-0x18", | ||
1296 | "xxx-0x19", | ||
1297 | "xxx-0x1a", | ||
1298 | "xxx-0x1b", | ||
1299 | "xxx-0x1c", | ||
1300 | "xxx-0x1d", | ||
1301 | "xxx-0x1e", | ||
1302 | "xxx-0x1f", | ||
1303 | "bad_tsg", | ||
1304 | }; | ||
1305 | |||
1306 | bool gv11b_fifo_handle_sched_error(struct gk20a *g) | ||
1307 | { | ||
1308 | u32 sched_error; | ||
1309 | |||
1310 | sched_error = gk20a_readl(g, fifo_intr_sched_error_r()); | ||
1311 | |||
1312 | if (sched_error < ARRAY_SIZE(gv11b_sched_error_str)) | ||
1313 | nvgpu_err(g, "fifo sched error :%s", | ||
1314 | gv11b_sched_error_str[sched_error]); | ||
1315 | else | ||
1316 | nvgpu_err(g, "fifo sched error code not supported"); | ||
1317 | |||
1318 | if (sched_error == SCHED_ERROR_CODE_BAD_TSG ) { | ||
1319 | /* id is unknown, preempt all runlists and do recovery */ | ||
1320 | gk20a_fifo_recover(g, 0, 0, false, false, false); | ||
1321 | } | ||
1322 | |||
1323 | return false; | ||
1324 | } | ||
1325 | |||
1326 | static u32 gv11b_fifo_ctxsw_timeout_info(struct gk20a *g, u32 active_eng_id) | ||
1327 | { | ||
1328 | u32 tsgid = FIFO_INVAL_TSG_ID; | ||
1329 | u32 timeout_info; | ||
1330 | u32 ctx_status, info_status; | ||
1331 | |||
1332 | timeout_info = gk20a_readl(g, | ||
1333 | fifo_intr_ctxsw_timeout_info_r(active_eng_id)); | ||
1334 | |||
1335 | /* | ||
1336 | * ctxsw_state and tsgid are snapped at the point of the timeout and | ||
1337 | * will not change while the corresponding INTR_CTXSW_TIMEOUT_ENGINE bit | ||
1338 | * is PENDING. | ||
1339 | */ | ||
1340 | ctx_status = fifo_intr_ctxsw_timeout_info_ctxsw_state_v(timeout_info); | ||
1341 | if (ctx_status == | ||
1342 | fifo_intr_ctxsw_timeout_info_ctxsw_state_load_v()) { | ||
1343 | |||
1344 | tsgid = fifo_intr_ctxsw_timeout_info_next_tsgid_v(timeout_info); | ||
1345 | |||
1346 | } else if (ctx_status == | ||
1347 | fifo_intr_ctxsw_timeout_info_ctxsw_state_switch_v() || | ||
1348 | ctx_status == | ||
1349 | fifo_intr_ctxsw_timeout_info_ctxsw_state_save_v()) { | ||
1350 | |||
1351 | tsgid = fifo_intr_ctxsw_timeout_info_prev_tsgid_v(timeout_info); | ||
1352 | } | ||
1353 | gk20a_dbg_info("ctxsw timeout info: tsgid = %d", tsgid); | ||
1354 | |||
1355 | /* | ||
1356 | * STATUS indicates whether the context request ack was eventually | ||
1357 | * received and whether a subsequent request timed out. This field is | ||
1358 | * updated live while the corresponding INTR_CTXSW_TIMEOUT_ENGINE bit | ||
1359 | * is PENDING. STATUS starts in AWAITING_ACK, and progresses to | ||
1360 | * ACK_RECEIVED and finally ends with DROPPED_TIMEOUT. | ||
1361 | * | ||
1362 | * AWAITING_ACK - context request ack still not returned from engine. | ||
1363 | * ENG_WAS_RESET - The engine was reset via a PRI write to NV_PMC_ENABLE | ||
1364 | * or NV_PMC_ELPG_ENABLE prior to receiving the ack. Host will not | ||
1365 | * expect ctx ack to return, but if it is already in flight, STATUS will | ||
1366 | * transition shortly to ACK_RECEIVED unless the interrupt is cleared | ||
1367 | * first. Once the engine is reset, additional context switches can | ||
1368 | * occur; if one times out, STATUS will transition to DROPPED_TIMEOUT | ||
1369 | * if the interrupt isn't cleared first. | ||
1370 | * ACK_RECEIVED - The ack for the timed-out context request was | ||
1371 | * received between the point of the timeout and this register being | ||
1372 | * read. Note this STATUS can be reported during the load stage of the | ||
1373 | * same context switch that timed out if the timeout occurred during the | ||
1374 | * save half of a context switch. Additional context requests may have | ||
1375 | * completed or may be outstanding, but no further context timeout has | ||
1376 | * occurred. This simplifies checking for spurious context switch | ||
1377 | * timeouts. | ||
1378 | * DROPPED_TIMEOUT - The originally timed-out context request acked, | ||
1379 | * but a subsequent context request then timed out. | ||
1380 | * Information about the subsequent timeout is not stored; in fact, that | ||
1381 | * context request may also have already been acked by the time SW | ||
1382 | * SW reads this register. If not, there is a chance SW can get the | ||
1383 | * dropped information by clearing the corresponding | ||
1384 | * INTR_CTXSW_TIMEOUT_ENGINE bit and waiting for the timeout to occur | ||
1385 | * again. Note, however, that if the engine does time out again, | ||
1386 | * it may not be from the original request that caused the | ||
1387 | * DROPPED_TIMEOUT state, as that request may | ||
1388 | * be acked in the interim. | ||
1389 | */ | ||
1390 | info_status = fifo_intr_ctxsw_timeout_info_status_v(timeout_info); | ||
1391 | if (info_status == | ||
1392 | fifo_intr_ctxsw_timeout_info_status_awaiting_ack_v()) { | ||
1393 | |||
1394 | gk20a_dbg_info("ctxsw timeout info : awaiting ack"); | ||
1395 | |||
1396 | } else if (info_status == | ||
1397 | fifo_intr_ctxsw_timeout_info_status_eng_was_reset_v()) { | ||
1398 | |||
1399 | gk20a_dbg_info("ctxsw timeout info : eng was reset"); | ||
1400 | |||
1401 | } else if (info_status == | ||
1402 | fifo_intr_ctxsw_timeout_info_status_ack_received_v()) { | ||
1403 | |||
1404 | gk20a_dbg_info("ctxsw timeout info : ack received"); | ||
1405 | /* no need to recover */ | ||
1406 | tsgid = FIFO_INVAL_TSG_ID; | ||
1407 | |||
1408 | } else if (info_status == | ||
1409 | fifo_intr_ctxsw_timeout_info_status_dropped_timeout_v()) { | ||
1410 | |||
1411 | gk20a_dbg_info("ctxsw timeout info : dropped timeout"); | ||
1412 | /* no need to recover */ | ||
1413 | tsgid = FIFO_INVAL_TSG_ID; | ||
1414 | |||
1415 | } else { | ||
1416 | gk20a_dbg_info("ctxsw timeout info status = %u", info_status); | ||
1417 | } | ||
1418 | |||
1419 | return tsgid; | ||
1420 | } | ||
1421 | |||
1422 | bool gv11b_fifo_handle_ctxsw_timeout(struct gk20a *g, u32 fifo_intr) | ||
1423 | { | ||
1424 | bool ret = false; | ||
1425 | u32 tsgid = FIFO_INVAL_TSG_ID; | ||
1426 | u32 engine_id, active_eng_id; | ||
1427 | u32 timeout_val, ctxsw_timeout_engines; | ||
1428 | |||
1429 | |||
1430 | if (!(fifo_intr & fifo_intr_0_ctxsw_timeout_pending_f())) | ||
1431 | return ret; | ||
1432 | |||
1433 | /* get ctxsw timedout engines */ | ||
1434 | ctxsw_timeout_engines = gk20a_readl(g, fifo_intr_ctxsw_timeout_r()); | ||
1435 | if (ctxsw_timeout_engines == 0) { | ||
1436 | nvgpu_err(g, "no eng ctxsw timeout pending"); | ||
1437 | return ret; | ||
1438 | } | ||
1439 | |||
1440 | timeout_val = gk20a_readl(g, fifo_eng_ctxsw_timeout_r()); | ||
1441 | timeout_val = fifo_eng_ctxsw_timeout_period_v(timeout_val); | ||
1442 | |||
1443 | gk20a_dbg_info("eng ctxsw timeout period = 0x%x", timeout_val); | ||
1444 | |||
1445 | for (engine_id = 0; engine_id < g->fifo.num_engines; engine_id++) { | ||
1446 | active_eng_id = g->fifo.active_engines_list[engine_id]; | ||
1447 | |||
1448 | if (ctxsw_timeout_engines & | ||
1449 | fifo_intr_ctxsw_timeout_engine_pending_f( | ||
1450 | active_eng_id)) { | ||
1451 | |||
1452 | struct fifo_gk20a *f = &g->fifo; | ||
1453 | u32 ms = 0; | ||
1454 | bool verbose = false; | ||
1455 | |||
1456 | tsgid = gv11b_fifo_ctxsw_timeout_info(g, active_eng_id); | ||
1457 | |||
1458 | if (tsgid == FIFO_INVAL_TSG_ID) | ||
1459 | continue; | ||
1460 | |||
1461 | if (gk20a_fifo_check_tsg_ctxsw_timeout( | ||
1462 | &f->tsg[tsgid], &verbose, &ms)) { | ||
1463 | ret = true; | ||
1464 | nvgpu_err(g, | ||
1465 | "ctxsw timeout error:" | ||
1466 | "active engine id =%u, %s=%d, ms=%u", | ||
1467 | active_eng_id, "tsg", tsgid, ms); | ||
1468 | |||
1469 | /* Cancel all channels' timeout */ | ||
1470 | gk20a_channel_timeout_restart_all_channels(g); | ||
1471 | gk20a_fifo_recover(g, BIT(active_eng_id), tsgid, | ||
1472 | true, true, verbose); | ||
1473 | } else { | ||
1474 | gk20a_dbg_info( | ||
1475 | "fifo is waiting for ctx switch: " | ||
1476 | "for %d ms, %s=%d", ms, "tsg", tsgid); | ||
1477 | } | ||
1478 | } | ||
1479 | } | ||
1480 | /* clear interrupt */ | ||
1481 | gk20a_writel(g, fifo_intr_ctxsw_timeout_r(), ctxsw_timeout_engines); | ||
1482 | return ret; | ||
1483 | } | ||
1484 | |||
1485 | unsigned int gv11b_fifo_handle_pbdma_intr_0(struct gk20a *g, | ||
1486 | u32 pbdma_id, u32 pbdma_intr_0, | ||
1487 | u32 *handled, u32 *error_notifier) | ||
1488 | { | ||
1489 | unsigned int rc_type = RC_TYPE_NO_RC; | ||
1490 | |||
1491 | rc_type = gk20a_fifo_handle_pbdma_intr_0(g, pbdma_id, | ||
1492 | pbdma_intr_0, handled, error_notifier); | ||
1493 | |||
1494 | if (pbdma_intr_0 & pbdma_intr_0_clear_faulted_error_pending_f()) { | ||
1495 | gk20a_dbg(gpu_dbg_intr, "clear faulted error on pbdma id %d", | ||
1496 | pbdma_id); | ||
1497 | gk20a_fifo_reset_pbdma_method(g, pbdma_id, 0); | ||
1498 | *handled |= pbdma_intr_0_clear_faulted_error_pending_f(); | ||
1499 | rc_type = RC_TYPE_PBDMA_FAULT; | ||
1500 | } | ||
1501 | |||
1502 | if (pbdma_intr_0 & pbdma_intr_0_eng_reset_pending_f()) { | ||
1503 | gk20a_dbg(gpu_dbg_intr, "eng reset intr on pbdma id %d", | ||
1504 | pbdma_id); | ||
1505 | *handled |= pbdma_intr_0_eng_reset_pending_f(); | ||
1506 | rc_type = RC_TYPE_PBDMA_FAULT; | ||
1507 | } | ||
1508 | |||
1509 | return rc_type; | ||
1510 | } | ||
1511 | |||
1512 | /* | ||
1513 | * Pbdma which encountered the ctxnotvalid interrupt will stall and | ||
1514 | * prevent the channel which was loaded at the time the interrupt fired | ||
1515 | * from being swapped out until the interrupt is cleared. | ||
1516 | * CTXNOTVALID pbdma interrupt indicates error conditions related | ||
1517 | * to the *_CTX_VALID fields for a channel. The following | ||
1518 | * conditions trigger the interrupt: | ||
1519 | * * CTX_VALID bit for the targeted engine is FALSE | ||
1520 | * * At channel start/resume, all preemptible eng have CTX_VALID FALSE but: | ||
1521 | * - CTX_RELOAD is set in CCSR_CHANNEL_STATUS, | ||
1522 | * - PBDMA_TARGET_SHOULD_SEND_HOST_TSG_EVENT is TRUE, or | ||
1523 | * - PBDMA_TARGET_NEEDS_HOST_TSG_EVENT is TRUE | ||
1524 | * The field is left NOT_PENDING and the interrupt is not raised if the PBDMA is | ||
1525 | * currently halted. This allows SW to unblock the PBDMA and recover. | ||
1526 | * SW may read METHOD0, CHANNEL_STATUS and TARGET to determine whether the | ||
1527 | * interrupt was due to an engine method, CTX_RELOAD, SHOULD_SEND_HOST_TSG_EVENT | ||
1528 | * or NEEDS_HOST_TSG_EVENT. If METHOD0 VALID is TRUE, lazy context creation | ||
1529 | * can be used or the TSG may be destroyed. | ||
1530 | * If METHOD0 VALID is FALSE, the error is likely a bug in SW, and the TSG | ||
1531 | * will have to be destroyed. | ||
1532 | */ | ||
1533 | |||
1534 | unsigned int gv11b_fifo_handle_pbdma_intr_1(struct gk20a *g, | ||
1535 | u32 pbdma_id, u32 pbdma_intr_1, | ||
1536 | u32 *handled, u32 *error_notifier) | ||
1537 | { | ||
1538 | unsigned int rc_type = RC_TYPE_PBDMA_FAULT; | ||
1539 | u32 pbdma_intr_1_current = gk20a_readl(g, pbdma_intr_1_r(pbdma_id)); | ||
1540 | |||
1541 | /* minimize race with the gpu clearing the pending interrupt */ | ||
1542 | if (!(pbdma_intr_1_current & | ||
1543 | pbdma_intr_1_ctxnotvalid_pending_f())) | ||
1544 | pbdma_intr_1 &= ~pbdma_intr_1_ctxnotvalid_pending_f(); | ||
1545 | |||
1546 | if (pbdma_intr_1 == 0) | ||
1547 | return RC_TYPE_NO_RC; | ||
1548 | |||
1549 | if (pbdma_intr_1 & pbdma_intr_1_ctxnotvalid_pending_f()) { | ||
1550 | gk20a_dbg(gpu_dbg_intr, "ctxnotvalid intr on pbdma id %d", | ||
1551 | pbdma_id); | ||
1552 | nvgpu_err(g, "pbdma_intr_1(%d)= 0x%08x ", | ||
1553 | pbdma_id, pbdma_intr_1); | ||
1554 | *handled |= pbdma_intr_1_ctxnotvalid_pending_f(); | ||
1555 | } else{ | ||
1556 | /* | ||
1557 | * rest of the interrupts in _intr_1 are "host copy engine" | ||
1558 | * related, which is not supported. For now just make them | ||
1559 | * channel fatal. | ||
1560 | */ | ||
1561 | nvgpu_err(g, "hce err: pbdma_intr_1(%d):0x%08x", | ||
1562 | pbdma_id, pbdma_intr_1); | ||
1563 | *handled |= pbdma_intr_1; | ||
1564 | } | ||
1565 | |||
1566 | return rc_type; | ||
1567 | } | ||
1568 | |||
1569 | static void gv11b_fifo_init_ramfc_eng_method_buffer(struct gk20a *g, | ||
1570 | struct channel_gk20a *ch, struct nvgpu_mem *mem) | ||
1571 | { | ||
1572 | struct tsg_gk20a *tsg; | ||
1573 | struct nvgpu_mem *method_buffer_per_runque; | ||
1574 | |||
1575 | tsg = tsg_gk20a_from_ch(ch); | ||
1576 | if (tsg == NULL) { | ||
1577 | nvgpu_err(g, "channel is not part of tsg"); | ||
1578 | return; | ||
1579 | } | ||
1580 | if (tsg->eng_method_buffers == NULL) { | ||
1581 | nvgpu_log_info(g, "eng method buffer NULL"); | ||
1582 | return; | ||
1583 | } | ||
1584 | if (tsg->runlist_id == gk20a_fifo_get_fast_ce_runlist_id(g)) | ||
1585 | method_buffer_per_runque = | ||
1586 | &tsg->eng_method_buffers[ASYNC_CE_RUNQUE]; | ||
1587 | else | ||
1588 | method_buffer_per_runque = | ||
1589 | &tsg->eng_method_buffers[GR_RUNQUE]; | ||
1590 | |||
1591 | nvgpu_mem_wr32(g, mem, ram_in_eng_method_buffer_addr_lo_w(), | ||
1592 | u64_lo32(method_buffer_per_runque->gpu_va)); | ||
1593 | nvgpu_mem_wr32(g, mem, ram_in_eng_method_buffer_addr_hi_w(), | ||
1594 | u64_hi32(method_buffer_per_runque->gpu_va)); | ||
1595 | |||
1596 | nvgpu_log_info(g, "init ramfc with method buffer"); | ||
1597 | } | ||
1598 | |||
1599 | unsigned int gv11b_fifo_get_eng_method_buffer_size(struct gk20a *g) | ||
1600 | { | ||
1601 | unsigned int buffer_size; | ||
1602 | |||
1603 | buffer_size = ((9 + 1 + 3) * g->ops.ce2.get_num_pce(g)) + 2; | ||
1604 | buffer_size = (27 * 5 * buffer_size); | ||
1605 | buffer_size = roundup(buffer_size, PAGE_SIZE); | ||
1606 | nvgpu_log_info(g, "method buffer size in bytes %d", buffer_size); | ||
1607 | |||
1608 | return buffer_size; | ||
1609 | } | ||
1610 | |||
1611 | void gv11b_fifo_init_eng_method_buffers(struct gk20a *g, | ||
1612 | struct tsg_gk20a *tsg) | ||
1613 | { | ||
1614 | struct vm_gk20a *vm = g->mm.bar2.vm; | ||
1615 | int err = 0; | ||
1616 | int i; | ||
1617 | unsigned int runque, method_buffer_size; | ||
1618 | unsigned int num_pbdma = g->fifo.num_pbdma; | ||
1619 | |||
1620 | if (tsg->eng_method_buffers != NULL) | ||
1621 | return; | ||
1622 | |||
1623 | method_buffer_size = gv11b_fifo_get_eng_method_buffer_size(g); | ||
1624 | if (method_buffer_size == 0) { | ||
1625 | nvgpu_info(g, "ce will hit MTHD_BUFFER_FAULT"); | ||
1626 | return; | ||
1627 | } | ||
1628 | |||
1629 | tsg->eng_method_buffers = nvgpu_kzalloc(g, | ||
1630 | num_pbdma * sizeof(struct nvgpu_mem)); | ||
1631 | |||
1632 | for (runque = 0; runque < num_pbdma; runque++) { | ||
1633 | err = nvgpu_dma_alloc_map_sys(vm, method_buffer_size, | ||
1634 | &tsg->eng_method_buffers[runque]); | ||
1635 | if (err) | ||
1636 | break; | ||
1637 | } | ||
1638 | if (err) { | ||
1639 | for (i = (runque - 1); i >= 0; i--) | ||
1640 | nvgpu_dma_unmap_free(vm, | ||
1641 | &tsg->eng_method_buffers[i]); | ||
1642 | |||
1643 | nvgpu_kfree(g, tsg->eng_method_buffers); | ||
1644 | tsg->eng_method_buffers = NULL; | ||
1645 | nvgpu_err(g, "could not alloc eng method buffers"); | ||
1646 | return; | ||
1647 | } | ||
1648 | nvgpu_log_info(g, "eng method buffers allocated"); | ||
1649 | |||
1650 | } | ||
1651 | |||
1652 | void gv11b_fifo_deinit_eng_method_buffers(struct gk20a *g, | ||
1653 | struct tsg_gk20a *tsg) | ||
1654 | { | ||
1655 | struct vm_gk20a *vm = g->mm.bar2.vm; | ||
1656 | unsigned int runque; | ||
1657 | |||
1658 | if (tsg->eng_method_buffers == NULL) | ||
1659 | return; | ||
1660 | |||
1661 | for (runque = 0; runque < g->fifo.num_pbdma; runque++) | ||
1662 | nvgpu_dma_unmap_free(vm, &tsg->eng_method_buffers[runque]); | ||
1663 | |||
1664 | nvgpu_kfree(g, tsg->eng_method_buffers); | ||
1665 | tsg->eng_method_buffers = NULL; | ||
1666 | |||
1667 | nvgpu_log_info(g, "eng method buffers de-allocated"); | ||
1668 | } | ||
1669 | |||
1670 | #ifdef CONFIG_TEGRA_GK20A_NVHOST | ||
1671 | int gv11b_fifo_alloc_syncpt_buf(struct channel_gk20a *c, | ||
1672 | u32 syncpt_id, struct nvgpu_mem *syncpt_buf) | ||
1673 | { | ||
1674 | u32 nr_pages; | ||
1675 | int err = 0; | ||
1676 | struct gk20a *g = c->g; | ||
1677 | struct vm_gk20a *vm = c->vm; | ||
1678 | |||
1679 | /* | ||
1680 | * Add ro map for complete sync point shim range in vm | ||
1681 | * All channels sharing same vm will share same ro mapping. | ||
1682 | * Create rw map for current channel sync point | ||
1683 | */ | ||
1684 | if (!vm->syncpt_ro_map_gpu_va) { | ||
1685 | vm->syncpt_ro_map_gpu_va = nvgpu_gmmu_map(c->vm, | ||
1686 | &g->syncpt_mem, g->syncpt_unit_size, | ||
1687 | 0, gk20a_mem_flag_read_only, | ||
1688 | false, APERTURE_SYSMEM); | ||
1689 | |||
1690 | if (!vm->syncpt_ro_map_gpu_va) { | ||
1691 | nvgpu_err(g, "failed to ro map syncpt buffer"); | ||
1692 | nvgpu_dma_free(g, &g->syncpt_mem); | ||
1693 | err = -ENOMEM; | ||
1694 | } | ||
1695 | } | ||
1696 | |||
1697 | nr_pages = DIV_ROUND_UP(g->syncpt_size, PAGE_SIZE); | ||
1698 | __nvgpu_mem_create_from_phys(g, syncpt_buf, | ||
1699 | (g->syncpt_unit_base + | ||
1700 | nvgpu_nvhost_syncpt_unit_interface_get_byte_offset(syncpt_id)), | ||
1701 | nr_pages); | ||
1702 | syncpt_buf->gpu_va = nvgpu_gmmu_map(c->vm, syncpt_buf, | ||
1703 | g->syncpt_size, 0, gk20a_mem_flag_none, | ||
1704 | false, APERTURE_SYSMEM); | ||
1705 | |||
1706 | if (!syncpt_buf->gpu_va) { | ||
1707 | nvgpu_err(g, "failed to map syncpt buffer"); | ||
1708 | nvgpu_dma_free(g, syncpt_buf); | ||
1709 | err = -ENOMEM; | ||
1710 | } | ||
1711 | return err; | ||
1712 | } | ||
1713 | |||
1714 | void gv11b_fifo_free_syncpt_buf(struct channel_gk20a *c, | ||
1715 | struct nvgpu_mem *syncpt_buf) | ||
1716 | { | ||
1717 | nvgpu_gmmu_unmap(c->vm, syncpt_buf, syncpt_buf->gpu_va); | ||
1718 | nvgpu_dma_free(c->g, syncpt_buf); | ||
1719 | } | ||
1720 | |||
1721 | void gv11b_fifo_add_syncpt_wait_cmd(struct gk20a *g, | ||
1722 | struct priv_cmd_entry *cmd, u32 off, | ||
1723 | u32 id, u32 thresh, u64 gpu_va_base) | ||
1724 | { | ||
1725 | u64 gpu_va = gpu_va_base + | ||
1726 | nvgpu_nvhost_syncpt_unit_interface_get_byte_offset(id); | ||
1727 | |||
1728 | gk20a_dbg_fn(""); | ||
1729 | |||
1730 | off = cmd->off + off; | ||
1731 | |||
1732 | /* semaphore_a */ | ||
1733 | nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010004); | ||
1734 | nvgpu_mem_wr32(g, cmd->mem, off++, | ||
1735 | (gpu_va >> 32) & 0xff); | ||
1736 | /* semaphore_b */ | ||
1737 | nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010005); | ||
1738 | /* offset */ | ||
1739 | nvgpu_mem_wr32(g, cmd->mem, off++, gpu_va & 0xffffffff); | ||
1740 | |||
1741 | /* semaphore_c */ | ||
1742 | nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010006); | ||
1743 | /* payload */ | ||
1744 | nvgpu_mem_wr32(g, cmd->mem, off++, thresh); | ||
1745 | /* semaphore_d */ | ||
1746 | nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010007); | ||
1747 | /* operation: acq_geq, switch_en */ | ||
1748 | nvgpu_mem_wr32(g, cmd->mem, off++, 0x4 | (0x1 << 12)); | ||
1749 | } | ||
1750 | |||
1751 | u32 gv11b_fifo_get_syncpt_wait_cmd_size(void) | ||
1752 | { | ||
1753 | return 8; | ||
1754 | } | ||
1755 | |||
1756 | void gv11b_fifo_add_syncpt_incr_cmd(struct gk20a *g, | ||
1757 | bool wfi_cmd, struct priv_cmd_entry *cmd, | ||
1758 | u32 id, u64 gpu_va) | ||
1759 | { | ||
1760 | u32 off = cmd->off; | ||
1761 | |||
1762 | gk20a_dbg_fn(""); | ||
1763 | |||
1764 | /* semaphore_a */ | ||
1765 | nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010004); | ||
1766 | nvgpu_mem_wr32(g, cmd->mem, off++, | ||
1767 | (gpu_va >> 32) & 0xff); | ||
1768 | /* semaphore_b */ | ||
1769 | nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010005); | ||
1770 | /* offset */ | ||
1771 | nvgpu_mem_wr32(g, cmd->mem, off++, gpu_va & 0xffffffff); | ||
1772 | |||
1773 | /* semaphore_c */ | ||
1774 | nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010006); | ||
1775 | /* payload */ | ||
1776 | nvgpu_mem_wr32(g, cmd->mem, off++, 0x0); | ||
1777 | /* semaphore_d */ | ||
1778 | nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010007); | ||
1779 | |||
1780 | /* operation: release, wfi */ | ||
1781 | nvgpu_mem_wr32(g, cmd->mem, off++, | ||
1782 | 0x2 | ((wfi_cmd ? 0x0 : 0x1) << 20)); | ||
1783 | /* ignored */ | ||
1784 | nvgpu_mem_wr32(g, cmd->mem, off++, 0); | ||
1785 | } | ||
1786 | |||
1787 | u32 gv11b_fifo_get_syncpt_incr_cmd_size(bool wfi_cmd) | ||
1788 | { | ||
1789 | return 9; | ||
1790 | } | ||
1791 | #endif /* CONFIG_TEGRA_GK20A_NVHOST */ | ||
1792 | |||
1793 | int gv11b_init_fifo_setup_hw(struct gk20a *g) | ||
1794 | { | ||
1795 | struct fifo_gk20a *f = &g->fifo; | ||
1796 | |||
1797 | f->t19x.max_subctx_count = | ||
1798 | gr_pri_fe_chip_def_info_max_veid_count_init_v(); | ||
1799 | return 0; | ||
1800 | } | ||
1801 | |||
1802 | static u32 gv11b_mmu_fault_id_to_gr_veid(struct gk20a *g, u32 gr_eng_fault_id, | ||
1803 | u32 mmu_fault_id) | ||
1804 | { | ||
1805 | struct fifo_gk20a *f = &g->fifo; | ||
1806 | u32 num_subctx; | ||
1807 | u32 veid = FIFO_INVAL_VEID; | ||
1808 | |||
1809 | num_subctx = f->t19x.max_subctx_count; | ||
1810 | |||
1811 | if (mmu_fault_id >= gr_eng_fault_id && | ||
1812 | mmu_fault_id < (gr_eng_fault_id + num_subctx)) | ||
1813 | veid = mmu_fault_id - gr_eng_fault_id; | ||
1814 | |||
1815 | return veid; | ||
1816 | } | ||
1817 | |||
1818 | static u32 gv11b_mmu_fault_id_to_eng_id_and_veid(struct gk20a *g, | ||
1819 | u32 mmu_fault_id, u32 *veid) | ||
1820 | { | ||
1821 | u32 engine_id; | ||
1822 | u32 active_engine_id; | ||
1823 | struct fifo_engine_info_gk20a *engine_info; | ||
1824 | struct fifo_gk20a *f = &g->fifo; | ||
1825 | |||
1826 | |||
1827 | for (engine_id = 0; engine_id < f->num_engines; engine_id++) { | ||
1828 | active_engine_id = f->active_engines_list[engine_id]; | ||
1829 | engine_info = &g->fifo.engine_info[active_engine_id]; | ||
1830 | |||
1831 | if (active_engine_id == ENGINE_GR_GK20A) { | ||
1832 | /* get faulted subctx id */ | ||
1833 | *veid = gv11b_mmu_fault_id_to_gr_veid(g, | ||
1834 | engine_info->fault_id, mmu_fault_id); | ||
1835 | if (*veid != FIFO_INVAL_VEID) | ||
1836 | break; | ||
1837 | } else { | ||
1838 | if (engine_info->fault_id == mmu_fault_id) | ||
1839 | break; | ||
1840 | } | ||
1841 | |||
1842 | active_engine_id = FIFO_INVAL_ENGINE_ID; | ||
1843 | } | ||
1844 | return active_engine_id; | ||
1845 | } | ||
1846 | |||
1847 | static u32 gv11b_mmu_fault_id_to_pbdma_id(struct gk20a *g, u32 mmu_fault_id) | ||
1848 | { | ||
1849 | u32 num_pbdma, reg_val, fault_id_pbdma0; | ||
1850 | |||
1851 | reg_val = gk20a_readl(g, fifo_cfg0_r()); | ||
1852 | num_pbdma = fifo_cfg0_num_pbdma_v(reg_val); | ||
1853 | fault_id_pbdma0 = fifo_cfg0_pbdma_fault_id_v(reg_val); | ||
1854 | |||
1855 | if (mmu_fault_id >= fault_id_pbdma0 && | ||
1856 | mmu_fault_id <= fault_id_pbdma0 + num_pbdma - 1) | ||
1857 | return mmu_fault_id - fault_id_pbdma0; | ||
1858 | |||
1859 | return FIFO_INVAL_PBDMA_ID; | ||
1860 | } | ||
1861 | |||
1862 | void gv11b_mmu_fault_id_to_eng_pbdma_id_and_veid(struct gk20a *g, | ||
1863 | u32 mmu_fault_id, u32 *active_engine_id, u32 *veid, u32 *pbdma_id) | ||
1864 | { | ||
1865 | *active_engine_id = gv11b_mmu_fault_id_to_eng_id_and_veid(g, | ||
1866 | mmu_fault_id, veid); | ||
1867 | |||
1868 | if (*active_engine_id == FIFO_INVAL_ENGINE_ID) | ||
1869 | *pbdma_id = gv11b_mmu_fault_id_to_pbdma_id(g, mmu_fault_id); | ||
1870 | else | ||
1871 | *pbdma_id = FIFO_INVAL_PBDMA_ID; | ||
1872 | } | ||
1873 | |||
1874 | static bool gk20a_fifo_channel_status_is_eng_faulted(struct gk20a *g, u32 chid) | ||
1875 | { | ||
1876 | u32 channel = gk20a_readl(g, ccsr_channel_r(chid)); | ||
1877 | |||
1878 | return ccsr_channel_eng_faulted_v(channel) == | ||
1879 | ccsr_channel_eng_faulted_true_v(); | ||
1880 | } | ||
1881 | |||
1882 | void gv11b_fifo_tsg_verify_status_faulted(struct channel_gk20a *ch) | ||
1883 | { | ||
1884 | struct gk20a *g = ch->g; | ||
1885 | struct tsg_gk20a *tsg = &g->fifo.tsg[ch->tsgid]; | ||
1886 | |||
1887 | /* | ||
1888 | * If channel has FAULTED set, clear the CE method buffer | ||
1889 | * if saved out channel is same as faulted channel | ||
1890 | */ | ||
1891 | if (!gk20a_fifo_channel_status_is_eng_faulted(g, ch->chid)) | ||
1892 | return; | ||
1893 | |||
1894 | if (tsg->eng_method_buffers == NULL) | ||
1895 | return; | ||
1896 | |||
1897 | /* | ||
1898 | * CE method buffer format : | ||
1899 | * DWord0 = method count | ||
1900 | * DWord1 = channel id | ||
1901 | * | ||
1902 | * It is sufficient to write 0 to method count to invalidate | ||
1903 | */ | ||
1904 | if ((u32)ch->chid == | ||
1905 | nvgpu_mem_rd32(g, &tsg->eng_method_buffers[ASYNC_CE_RUNQUE], 1)) | ||
1906 | nvgpu_mem_wr32(g, &tsg->eng_method_buffers[ASYNC_CE_RUNQUE], 0, 0); | ||
1907 | } | ||