aboutsummaryrefslogtreecommitdiffstats
path: root/include/gk20a/fifo_gk20a.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/gk20a/fifo_gk20a.h')
-rw-r--r--include/gk20a/fifo_gk20a.h471
1 files changed, 471 insertions, 0 deletions
diff --git a/include/gk20a/fifo_gk20a.h b/include/gk20a/fifo_gk20a.h
new file mode 100644
index 0000000..26365ca
--- /dev/null
+++ b/include/gk20a/fifo_gk20a.h
@@ -0,0 +1,471 @@
1/*
2 * GK20A graphics fifo (gr host)
3 *
4 * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24#ifndef FIFO_GK20A_H
25#define FIFO_GK20A_H
26
27#include <nvgpu/kref.h>
28
29struct gk20a_debug_output;
30struct mmu_fault_info;
31struct nvgpu_semaphore;
32struct channel_gk20a;
33struct tsg_gk20a;
34
35enum {
36 NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_LOW = 0,
37 NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_MEDIUM,
38 NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_HIGH,
39 NVGPU_FIFO_RUNLIST_INTERLEAVE_NUM_LEVELS,
40};
41
42#define MAX_RUNLIST_BUFFERS 2
43
44#define FIFO_INVAL_ENGINE_ID ((u32)~0)
45#define FIFO_INVAL_CHANNEL_ID ((u32)~0)
46#define FIFO_INVAL_TSG_ID ((u32)~0)
47#define FIFO_INVAL_RUNLIST_ID ((u32)~0)
48
49#define ID_TYPE_CHANNEL 0
50#define ID_TYPE_TSG 1
51#define ID_TYPE_UNKNOWN ((u32)~0)
52
53#define RC_YES 1
54#define RC_NO 0
55
56#define GRFIFO_TIMEOUT_CHECK_PERIOD_US 100000
57
58#define RC_TYPE_NO_RC 0
59#define RC_TYPE_MMU_FAULT 1
60#define RC_TYPE_PBDMA_FAULT 2
61#define RC_TYPE_GR_FAULT 3
62#define RC_TYPE_PREEMPT_TIMEOUT 4
63#define RC_TYPE_CTXSW_TIMEOUT 5
64#define RC_TYPE_RUNLIST_UPDATE_TIMEOUT 6
65#define RC_TYPE_FORCE_RESET 7
66#define RC_TYPE_SCHED_ERR 8
67
68#define NVGPU_FIFO_DEFAULT_TIMESLICE_TIMEOUT 128UL
69#define NVGPU_FIFO_DEFAULT_TIMESLICE_SCALE 3UL
70
71/*
72 * Number of entries in the kickoff latency buffer, used to calculate
73 * the profiling and histogram. This number is calculated to be statistically
74 * significative on a histogram on a 5% step
75 */
76#ifdef CONFIG_DEBUG_FS
77#define FIFO_PROFILING_ENTRIES 16384
78#endif
79
80#define RUNLIST_DISABLED 0
81#define RUNLIST_ENABLED 1
82
83/* generally corresponds to the "pbdma" engine */
84
85struct fifo_runlist_info_gk20a {
86 unsigned long *active_channels;
87 unsigned long *active_tsgs;
88 /* Each engine has its own SW and HW runlist buffer.*/
89 struct nvgpu_mem mem[MAX_RUNLIST_BUFFERS];
90 u32 cur_buffer;
91 u32 total_entries;
92 u32 pbdma_bitmask; /* pbdmas supported for this runlist*/
93 u32 eng_bitmask; /* engines using this runlist */
94 u32 reset_eng_bitmask; /* engines to be reset during recovery */
95 u32 count; /* cached runlist_hw_submit parameter */
96 bool stopped;
97 bool support_tsg;
98 /* protect ch/tsg/runlist preempt & runlist update */
99 struct nvgpu_mutex runlist_lock;
100};
101
102enum {
103 ENGINE_GR_GK20A = 0U,
104 ENGINE_GRCE_GK20A = 1U,
105 ENGINE_ASYNC_CE_GK20A = 2U,
106 ENGINE_INVAL_GK20A = 3U,
107};
108
109struct fifo_pbdma_exception_info_gk20a {
110 u32 status_r; /* raw register value from hardware */
111 u32 id, next_id;
112 u32 chan_status_v; /* raw value from hardware */
113 bool id_is_chid, next_id_is_chid;
114 bool chsw_in_progress;
115};
116
117struct fifo_engine_exception_info_gk20a {
118 u32 status_r; /* raw register value from hardware */
119 u32 id, next_id;
120 u32 ctx_status_v; /* raw value from hardware */
121 bool id_is_chid, next_id_is_chid;
122 bool faulted, idle, ctxsw_in_progress;
123};
124
125struct fifo_engine_info_gk20a {
126 u32 engine_id;
127 u32 runlist_id;
128 u32 intr_mask;
129 u32 reset_mask;
130 u32 pbdma_id;
131 u32 inst_id;
132 u32 pri_base;
133 u32 fault_id;
134 u32 engine_enum;
135 struct fifo_pbdma_exception_info_gk20a pbdma_exception_info;
136 struct fifo_engine_exception_info_gk20a engine_exception_info;
137};
138
139enum {
140 PROFILE_IOCTL_ENTRY = 0U,
141 PROFILE_ENTRY,
142 PROFILE_JOB_TRACKING,
143 PROFILE_APPEND,
144 PROFILE_END,
145 PROFILE_IOCTL_EXIT,
146 PROFILE_MAX
147};
148
149struct fifo_profile_gk20a {
150 u64 timestamp[PROFILE_MAX];
151};
152
153struct fifo_gk20a {
154 struct gk20a *g;
155 unsigned int num_channels;
156 unsigned int runlist_entry_size;
157 unsigned int num_runlist_entries;
158
159 unsigned int num_pbdma;
160 u32 *pbdma_map;
161
162 struct fifo_engine_info_gk20a *engine_info;
163 u32 max_engines;
164 u32 num_engines;
165 u32 *active_engines_list;
166
167 struct fifo_runlist_info_gk20a *runlist_info;
168 u32 max_runlists;
169#ifdef CONFIG_DEBUG_FS
170 struct {
171 struct fifo_profile_gk20a *data;
172 nvgpu_atomic_t get;
173 bool enabled;
174 u64 *sorted;
175 struct nvgpu_ref ref;
176 struct nvgpu_mutex lock;
177 } profile;
178#endif
179 struct nvgpu_mem userd;
180 u32 userd_entry_size;
181
182 unsigned int used_channels;
183 struct channel_gk20a *channel;
184 /* zero-kref'd channels here */
185 struct nvgpu_list_node free_chs;
186 struct nvgpu_mutex free_chs_mutex;
187 struct nvgpu_mutex engines_reset_mutex;
188
189 struct tsg_gk20a *tsg;
190 struct nvgpu_mutex tsg_inuse_mutex;
191
192 void (*remove_support)(struct fifo_gk20a *);
193 bool sw_ready;
194 struct {
195 /* share info between isrs and non-isr code */
196 struct {
197 struct nvgpu_mutex mutex;
198 } isr;
199 struct {
200 u32 device_fatal_0;
201 u32 channel_fatal_0;
202 u32 restartable_0;
203 } pbdma;
204 struct {
205
206 } engine;
207
208
209 } intr;
210
211 unsigned long deferred_fault_engines;
212 bool deferred_reset_pending;
213 struct nvgpu_mutex deferred_reset_mutex;
214
215 u32 max_subctx_count;
216 u32 channel_base;
217};
218
219struct ch_state {
220 int pid;
221 int refs;
222 bool deterministic;
223 u32 inst_block[0];
224};
225
226int gk20a_init_fifo_support(struct gk20a *g);
227
228int gk20a_init_fifo_setup_hw(struct gk20a *g);
229
230void gk20a_fifo_isr(struct gk20a *g);
231u32 gk20a_fifo_nonstall_isr(struct gk20a *g);
232
233int gk20a_fifo_preempt_channel(struct gk20a *g, struct channel_gk20a *ch);
234int gk20a_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg);
235int gk20a_fifo_preempt(struct gk20a *g, struct channel_gk20a *ch);
236
237int gk20a_fifo_enable_engine_activity(struct gk20a *g,
238 struct fifo_engine_info_gk20a *eng_info);
239int gk20a_fifo_enable_all_engine_activity(struct gk20a *g);
240int gk20a_fifo_disable_engine_activity(struct gk20a *g,
241 struct fifo_engine_info_gk20a *eng_info,
242 bool wait_for_idle);
243int gk20a_fifo_disable_all_engine_activity(struct gk20a *g,
244 bool wait_for_idle);
245void gk20a_fifo_enable_tsg_sched(struct gk20a *g, struct tsg_gk20a *tsg);
246void gk20a_fifo_disable_tsg_sched(struct gk20a *g, struct tsg_gk20a *tsg);
247
248u32 gk20a_fifo_engines_on_ch(struct gk20a *g, u32 chid);
249
250int gk20a_fifo_reschedule_runlist(struct channel_gk20a *ch, bool preempt_next);
251int nvgpu_fifo_reschedule_runlist(struct channel_gk20a *ch, bool preempt_next,
252 bool wait_preempt);
253
254int gk20a_fifo_update_runlist(struct gk20a *g, u32 engine_id, u32 chid,
255 bool add, bool wait_for_finish);
256
257int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
258 u32 chid, bool add,
259 bool wait_for_finish);
260int gk20a_fifo_suspend(struct gk20a *g);
261
262bool gk20a_fifo_mmu_fault_pending(struct gk20a *g);
263
264void gk20a_fifo_recover(struct gk20a *g,
265 u32 engine_ids, /* if zero, will be queried from HW */
266 u32 hw_id, /* if ~0, will be queried from HW */
267 bool id_is_tsg, /* ignored if hw_id == ~0 */
268 bool id_is_known, bool verbose, int rc_type);
269void gk20a_fifo_recover_ch(struct gk20a *g, struct channel_gk20a *ch,
270 bool verbose, u32 rc_type);
271void gk20a_fifo_recover_tsg(struct gk20a *g, struct tsg_gk20a *tsg,
272 bool verbose, u32 rc_type);
273int gk20a_fifo_force_reset_ch(struct channel_gk20a *ch,
274 u32 err_code, bool verbose);
275void gk20a_fifo_reset_engine(struct gk20a *g, u32 engine_id);
276int gk20a_init_fifo_reset_enable_hw(struct gk20a *g);
277int gk20a_fifo_tsg_unbind_channel(struct channel_gk20a *ch);
278
279void fifo_gk20a_finish_mmu_fault_handling(struct gk20a *g,
280 unsigned long fault_id);
281int gk20a_fifo_wait_engine_idle(struct gk20a *g);
282bool gk20a_fifo_is_engine_busy(struct gk20a *g);
283u32 gk20a_fifo_engine_interrupt_mask(struct gk20a *g);
284u32 gk20a_fifo_act_eng_interrupt_mask(struct gk20a *g, u32 act_eng_id);
285u32 gk20a_fifo_get_pbdma_signature(struct gk20a *g);
286u32 gk20a_fifo_get_failing_engine_data(struct gk20a *g,
287 int *__id, bool *__is_tsg);
288void gk20a_fifo_set_ctx_mmu_error_tsg(struct gk20a *g,
289 struct tsg_gk20a *tsg);
290void gk20a_fifo_abort_tsg(struct gk20a *g, struct tsg_gk20a *tsg, bool preempt);
291void gk20a_fifo_set_ctx_mmu_error_ch(struct gk20a *g,
292 struct channel_gk20a *refch);
293bool gk20a_fifo_error_tsg(struct gk20a *g, struct tsg_gk20a *tsg);
294bool gk20a_fifo_error_ch(struct gk20a *g, struct channel_gk20a *refch);
295
296void gk20a_fifo_issue_preempt(struct gk20a *g, u32 id, bool is_tsg);
297int gk20a_fifo_set_runlist_interleave(struct gk20a *g,
298 u32 id,
299 u32 runlist_id,
300 u32 new_level);
301int gk20a_fifo_tsg_set_timeslice(struct tsg_gk20a *tsg, u32 timeslice);
302
303const char *gk20a_fifo_interleave_level_name(u32 interleave_level);
304
305int gk20a_fifo_engine_enum_from_type(struct gk20a *g, u32 engine_type,
306 u32 *inst_id);
307
308u32 gk20a_fifo_get_engine_ids(struct gk20a *g, u32 engine_id[],
309 u32 engine_id_sz, u32 engine_enum);
310
311void gk20a_fifo_delete_runlist(struct fifo_gk20a *f);
312
313struct fifo_engine_info_gk20a *gk20a_fifo_get_engine_info(struct gk20a *g,
314 u32 engine_id);
315
316bool gk20a_fifo_is_valid_engine_id(struct gk20a *g, u32 engine_id);
317
318u32 gk20a_fifo_get_gr_engine_id(struct gk20a *g);
319
320int gk20a_fifo_deferred_reset(struct gk20a *g, struct channel_gk20a *ch);
321
322u32 gk20a_fifo_get_all_ce_engine_reset_mask(struct gk20a *g);
323
324u32 gk20a_fifo_get_fast_ce_runlist_id(struct gk20a *g);
325
326u32 gk20a_fifo_get_gr_runlist_id(struct gk20a *g);
327
328bool gk20a_fifo_is_valid_runlist_id(struct gk20a *g, u32 runlist_id);
329
330int gk20a_fifo_update_runlist_ids(struct gk20a *g, u32 runlist_ids, u32 chid,
331 bool add, bool wait_for_finish);
332
333int gk20a_fifo_init_engine_info(struct fifo_gk20a *f);
334
335void gk20a_get_tsg_runlist_entry(struct tsg_gk20a *tsg, u32 *runlist);
336void gk20a_get_ch_runlist_entry(struct channel_gk20a *ch, u32 *runlist);
337void gk20a_fifo_set_runlist_state(struct gk20a *g, u32 runlists_mask,
338 u32 runlist_state);
339
340u32 gk20a_fifo_userd_gp_get(struct gk20a *g, struct channel_gk20a *c);
341void gk20a_fifo_userd_gp_put(struct gk20a *g, struct channel_gk20a *c);
342u64 gk20a_fifo_userd_pb_get(struct gk20a *g, struct channel_gk20a *c);
343
344bool gk20a_is_fault_engine_subid_gpc(struct gk20a *g, u32 engine_subid);
345#ifdef CONFIG_DEBUG_FS
346struct fifo_profile_gk20a *gk20a_fifo_profile_acquire(struct gk20a *g);
347void gk20a_fifo_profile_release(struct gk20a *g,
348 struct fifo_profile_gk20a *profile);
349void gk20a_fifo_profile_snapshot(struct fifo_profile_gk20a *profile, int idx);
350#else
351static inline struct fifo_profile_gk20a *
352gk20a_fifo_profile_acquire(struct gk20a *g)
353{
354 return NULL;
355}
356static inline void gk20a_fifo_profile_release(struct gk20a *g,
357 struct fifo_profile_gk20a *profile)
358{
359}
360static inline void gk20a_fifo_profile_snapshot(
361 struct fifo_profile_gk20a *profile, int idx)
362{
363}
364#endif
365
366void gk20a_dump_channel_status_ramfc(struct gk20a *g,
367 struct gk20a_debug_output *o,
368 u32 chid,
369 struct ch_state *ch_state);
370void gk20a_debug_dump_all_channel_status_ramfc(struct gk20a *g,
371 struct gk20a_debug_output *o);
372void gk20a_dump_pbdma_status(struct gk20a *g,
373 struct gk20a_debug_output *o);
374void gk20a_dump_eng_status(struct gk20a *g,
375 struct gk20a_debug_output *o);
376const char *gk20a_decode_ccsr_chan_status(u32 index);
377const char *gk20a_decode_pbdma_chan_eng_ctx_status(u32 index);
378void gk20a_fifo_enable_channel(struct channel_gk20a *ch);
379void gk20a_fifo_disable_channel(struct channel_gk20a *ch);
380
381bool gk20a_fifo_channel_status_is_next(struct gk20a *g, u32 chid);
382bool gk20a_fifo_channel_status_is_ctx_reload(struct gk20a *g, u32 chid);
383int gk20a_fifo_tsg_unbind_channel_verify_status(struct channel_gk20a *ch);
384
385struct channel_gk20a *gk20a_refch_from_inst_ptr(struct gk20a *g, u64 inst_ptr);
386void gk20a_fifo_channel_unbind(struct channel_gk20a *ch_gk20a);
387
388u32 gk20a_fifo_intr_0_error_mask(struct gk20a *g);
389
390int gk20a_fifo_is_preempt_pending(struct gk20a *g, u32 id,
391 unsigned int id_type);
392int __locked_fifo_preempt(struct gk20a *g, u32 id, bool is_tsg);
393void gk20a_fifo_preempt_timeout_rc_tsg(struct gk20a *g, struct tsg_gk20a *tsg);
394void gk20a_fifo_preempt_timeout_rc(struct gk20a *g, struct channel_gk20a *ch);
395int gk20a_fifo_setup_ramfc(struct channel_gk20a *c,
396 u64 gpfifo_base, u32 gpfifo_entries,
397 unsigned long timeout, u32 flags);
398void gk20a_fifo_setup_ramfc_for_privileged_channel(struct channel_gk20a *c);
399int gk20a_fifo_alloc_inst(struct gk20a *g, struct channel_gk20a *ch);
400void gk20a_fifo_free_inst(struct gk20a *g, struct channel_gk20a *ch);
401int gk20a_fifo_setup_userd(struct channel_gk20a *c);
402u32 gk20a_fifo_pbdma_acquire_val(u64 timeout);
403
404
405u32 *gk20a_runlist_construct_locked(struct fifo_gk20a *f,
406 struct fifo_runlist_info_gk20a *runlist,
407 u32 cur_level,
408 u32 *runlist_entry,
409 bool interleave_enabled,
410 bool prev_empty,
411 u32 *entries_left);
412void gk20a_fifo_runlist_hw_submit(struct gk20a *g, u32 runlist_id,
413 u32 count, u32 buffer_index);
414int gk20a_fifo_runlist_wait_pending(struct gk20a *g, u32 runlist_id);
415int gk20a_init_fifo_setup_sw_common(struct gk20a *g);
416int gk20a_init_fifo_setup_sw(struct gk20a *g);
417void gk20a_fifo_handle_runlist_event(struct gk20a *g);
418bool gk20a_fifo_should_defer_engine_reset(struct gk20a *g, u32 engine_id,
419 u32 engine_subid, bool fake_fault);
420
421void gk20a_fifo_teardown_ch_tsg(struct gk20a *g, u32 __engine_ids,
422 u32 hw_id, unsigned int id_type, unsigned int rc_type,
423 struct mmu_fault_info *mmfault);
424
425bool gk20a_fifo_check_ch_ctxsw_timeout(struct channel_gk20a *ch,
426 bool *verbose, u32 *ms);
427bool gk20a_fifo_check_tsg_ctxsw_timeout(struct tsg_gk20a *tsg,
428 bool *verbose, u32 *ms);
429void gk20a_fifo_teardown_mask_intr(struct gk20a *g);
430void gk20a_fifo_teardown_unmask_intr(struct gk20a *g);
431bool gk20a_fifo_handle_sched_error(struct gk20a *g);
432
433void gk20a_fifo_reset_pbdma_method(struct gk20a *g, int pbdma_id,
434 int pbdma_method_index);
435unsigned int gk20a_fifo_handle_pbdma_intr_0(struct gk20a *g, u32 pbdma_id,
436 u32 pbdma_intr_0, u32 *handled, u32 *error_notifier);
437unsigned int gk20a_fifo_handle_pbdma_intr_1(struct gk20a *g, u32 pbdma_id,
438 u32 pbdma_intr_1, u32 *handled, u32 *error_notifier);
439u32 gk20a_fifo_handle_pbdma_intr(struct gk20a *g, struct fifo_gk20a *f,
440 u32 pbdma_id, unsigned int rc);
441
442u32 gk20a_fifo_default_timeslice_us(struct gk20a *g);
443
444#ifdef CONFIG_TEGRA_GK20A_NVHOST
445void gk20a_fifo_add_syncpt_wait_cmd(struct gk20a *g,
446 struct priv_cmd_entry *cmd, u32 off,
447 u32 id, u32 thresh, u64 gpu_va);
448u32 gk20a_fifo_get_syncpt_wait_cmd_size(void);
449u32 gk20a_fifo_get_syncpt_incr_per_release(void);
450void gk20a_fifo_add_syncpt_incr_cmd(struct gk20a *g,
451 bool wfi_cmd, struct priv_cmd_entry *cmd,
452 u32 id, u64 gpu_va);
453u32 gk20a_fifo_get_syncpt_incr_cmd_size(bool wfi_cmd);
454void gk20a_fifo_free_syncpt_buf(struct channel_gk20a *c,
455 struct nvgpu_mem *syncpt_buf);
456int gk20a_fifo_alloc_syncpt_buf(struct channel_gk20a *c,
457 u32 syncpt_id, struct nvgpu_mem *syncpt_buf);
458#endif
459
460void gk20a_fifo_get_mmu_fault_info(struct gk20a *g, u32 mmu_fault_id,
461 struct mmu_fault_info *mmfault);
462void gk20a_fifo_get_mmu_fault_desc(struct mmu_fault_info *mmfault);
463void gk20a_fifo_get_mmu_fault_client_desc(struct mmu_fault_info *mmfault);
464void gk20a_fifo_get_mmu_fault_gpc_desc(struct mmu_fault_info *mmfault);
465u32 gk20a_fifo_get_sema_wait_cmd_size(void);
466u32 gk20a_fifo_get_sema_incr_cmd_size(void);
467void gk20a_fifo_add_sema_cmd(struct gk20a *g,
468 struct nvgpu_semaphore *s, u64 sema_va,
469 struct priv_cmd_entry *cmd,
470 u32 off, bool acquire, bool wfi);
471#endif /* FIFO_GK20A_H */