summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/fifo_gk20a.h')
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.h434
1 files changed, 434 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h
new file mode 100644
index 00000000..3587ffa8
--- /dev/null
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h
@@ -0,0 +1,434 @@
1/*
2 * drivers/video/tegra/host/gk20a/fifo_gk20a.h
3 *
4 * GK20A graphics fifo (gr host)
5 *
6 * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24 * DEALINGS IN THE SOFTWARE.
25 */
26#ifndef __FIFO_GK20A_H__
27#define __FIFO_GK20A_H__
28
29#include "channel_gk20a.h"
30#include "tsg_gk20a.h"
31
32#ifdef CONFIG_TEGRA_19x_GPU
33#include "fifo_t19x.h"
34#endif
35
36#include <nvgpu/kref.h>
37
38struct gk20a_debug_output;
39struct mmu_fault_info;
40
41enum {
42 NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_LOW = 0,
43 NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_MEDIUM,
44 NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_HIGH,
45 NVGPU_FIFO_RUNLIST_INTERLEAVE_NUM_LEVELS,
46};
47
48#define MAX_RUNLIST_BUFFERS 2
49
50#define FIFO_INVAL_ENGINE_ID ((u32)~0)
51#define FIFO_INVAL_CHANNEL_ID ((u32)~0)
52#define FIFO_INVAL_TSG_ID ((u32)~0)
53
54#define ID_TYPE_CHANNEL 0
55#define ID_TYPE_TSG 1
56#define ID_TYPE_UNKNOWN ((u32)~0)
57
58#define PREEMPT_TIMEOUT_RC 1
59#define PREEMPT_TIMEOUT_NORC 0
60
61#define RC_YES 1
62#define RC_NO 0
63
64#define GRFIFO_TIMEOUT_CHECK_PERIOD_US 100000
65
66#define RC_TYPE_NORMAL 0
67#define RC_TYPE_MMU_FAULT 1
68#define RC_TYPE_PBDMA_FAULT 2
69#define RC_TYPE_NO_RC 0xff
70
71#define NVGPU_FIFO_DEFAULT_TIMESLICE_TIMEOUT 128UL
72#define NVGPU_FIFO_DEFAULT_TIMESLICE_SCALE 3UL
73
74/*
75 * Number of entries in the kickoff latency buffer, used to calculate
76 * the profiling and histogram. This number is calculated to be statistically
77 * significative on a histogram on a 5% step
78 */
79#ifdef CONFIG_DEBUG_FS
80#define FIFO_PROFILING_ENTRIES 16384
81#endif
82
83#define RUNLIST_DISABLED 0
84#define RUNLIST_ENABLED 1
85
86#define RUNLIST_INFO_MUTEX_LOCKED 1
87
88/* generally corresponds to the "pbdma" engine */
89
90struct fifo_runlist_info_gk20a {
91 unsigned long *active_channels;
92 unsigned long *active_tsgs;
93 /* Each engine has its own SW and HW runlist buffer.*/
94 struct nvgpu_mem mem[MAX_RUNLIST_BUFFERS];
95 u32 cur_buffer;
96 u32 total_entries;
97 u32 pbdma_bitmask; /* pbdmas supported for this runlist*/
98 u32 eng_bitmask; /* engines using this runlist */
99 u32 reset_eng_bitmask; /* engines to be reset during recovery */
100 bool stopped;
101 bool support_tsg;
102 struct nvgpu_mutex mutex; /* protect channel preempt and runlist update */
103};
104
105enum {
106 ENGINE_GR_GK20A = 0,
107 ENGINE_GRCE_GK20A = 1,
108 ENGINE_ASYNC_CE_GK20A = 2,
109 ENGINE_INVAL_GK20A
110};
111
112struct fifo_pbdma_exception_info_gk20a {
113 u32 status_r; /* raw register value from hardware */
114 u32 id, next_id;
115 u32 chan_status_v; /* raw value from hardware */
116 bool id_is_chid, next_id_is_chid;
117 bool chsw_in_progress;
118};
119
120struct fifo_engine_exception_info_gk20a {
121 u32 status_r; /* raw register value from hardware */
122 u32 id, next_id;
123 u32 ctx_status_v; /* raw value from hardware */
124 bool id_is_chid, next_id_is_chid;
125 bool faulted, idle, ctxsw_in_progress;
126};
127
128struct fifo_engine_info_gk20a {
129 u32 engine_id;
130 u32 runlist_id;
131 u32 intr_mask;
132 u32 reset_mask;
133 u32 pbdma_id;
134 u32 inst_id;
135 u32 pri_base;
136 u32 fault_id;
137 u32 engine_enum;
138 struct fifo_pbdma_exception_info_gk20a pbdma_exception_info;
139 struct fifo_engine_exception_info_gk20a engine_exception_info;
140};
141
142enum {
143 PROFILE_IOCTL_ENTRY = 0,
144 PROFILE_ENTRY,
145 PROFILE_JOB_TRACKING,
146 PROFILE_APPEND,
147 PROFILE_END,
148 PROFILE_IOCTL_EXIT,
149 PROFILE_MAX
150};
151
152struct fifo_profile_gk20a {
153 u64 timestamp[PROFILE_MAX];
154};
155
156struct fifo_gk20a {
157 struct gk20a *g;
158 unsigned int num_channels;
159 unsigned int runlist_entry_size;
160 unsigned int num_runlist_entries;
161
162 unsigned int num_pbdma;
163 u32 *pbdma_map;
164
165 struct fifo_engine_info_gk20a *engine_info;
166 u32 max_engines;
167 u32 num_engines;
168 u32 *active_engines_list;
169
170 struct fifo_runlist_info_gk20a *runlist_info;
171 u32 max_runlists;
172#ifdef CONFIG_DEBUG_FS
173 struct {
174 struct fifo_profile_gk20a *data;
175 nvgpu_atomic_t get;
176 bool enabled;
177 u64 *sorted;
178 struct nvgpu_ref ref;
179 struct nvgpu_mutex lock;
180 } profile;
181#endif
182 struct nvgpu_mem userd;
183 u32 userd_entry_size;
184
185 unsigned int used_channels;
186 struct channel_gk20a *channel;
187 /* zero-kref'd channels here */
188 struct nvgpu_list_node free_chs;
189 struct nvgpu_mutex free_chs_mutex;
190 struct nvgpu_mutex gr_reset_mutex;
191
192 struct tsg_gk20a *tsg;
193 struct nvgpu_mutex tsg_inuse_mutex;
194
195 void (*remove_support)(struct fifo_gk20a *);
196 bool sw_ready;
197 struct {
198 /* share info between isrs and non-isr code */
199 struct {
200 struct nvgpu_mutex mutex;
201 } isr;
202 struct {
203 u32 device_fatal_0;
204 u32 channel_fatal_0;
205 u32 restartable_0;
206 } pbdma;
207 struct {
208
209 } engine;
210
211
212 } intr;
213
214 unsigned long deferred_fault_engines;
215 bool deferred_reset_pending;
216 struct nvgpu_mutex deferred_reset_mutex;
217
218#ifdef CONFIG_TEGRA_19x_GPU
219 struct fifo_t19x t19x;
220#endif
221 u32 channel_base;
222};
223
224struct ch_state {
225 int pid;
226 int refs;
227 bool deterministic;
228 u32 inst_block[0];
229};
230
231int gk20a_init_fifo_support(struct gk20a *g);
232
233int gk20a_init_fifo_setup_hw(struct gk20a *g);
234
235void gk20a_fifo_isr(struct gk20a *g);
236int gk20a_fifo_nonstall_isr(struct gk20a *g);
237
238int gk20a_fifo_preempt_channel(struct gk20a *g, u32 chid);
239int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid);
240int gk20a_fifo_preempt(struct gk20a *g, struct channel_gk20a *ch);
241
242int gk20a_fifo_enable_engine_activity(struct gk20a *g,
243 struct fifo_engine_info_gk20a *eng_info);
244int gk20a_fifo_enable_all_engine_activity(struct gk20a *g);
245int gk20a_fifo_disable_engine_activity(struct gk20a *g,
246 struct fifo_engine_info_gk20a *eng_info,
247 bool wait_for_idle);
248int gk20a_fifo_disable_all_engine_activity(struct gk20a *g,
249 bool wait_for_idle);
250void gk20a_fifo_enable_tsg_sched(struct gk20a *g, struct tsg_gk20a *tsg);
251void gk20a_fifo_disable_tsg_sched(struct gk20a *g, struct tsg_gk20a *tsg);
252
253u32 gk20a_fifo_engines_on_ch(struct gk20a *g, u32 chid);
254
255int gk20a_fifo_reschedule_runlist(struct gk20a *g, u32 runlist_id);
256
257int gk20a_fifo_update_runlist(struct gk20a *g, u32 engine_id, u32 chid,
258 bool add, bool wait_for_finish);
259
260int gk20a_fifo_suspend(struct gk20a *g);
261
262bool gk20a_fifo_mmu_fault_pending(struct gk20a *g);
263
264void gk20a_fifo_recover(struct gk20a *g,
265 u32 engine_ids, /* if zero, will be queried from HW */
266 u32 hw_id, /* if ~0, will be queried from HW */
267 bool hw_id_is_tsg, /* ignored if hw_id == ~0 */
268 bool id_is_known, bool verbose);
269void gk20a_fifo_recover_ch(struct gk20a *g, u32 chid, bool verbose);
270void gk20a_fifo_recover_tsg(struct gk20a *g, u32 tsgid, bool verbose);
271int gk20a_fifo_force_reset_ch(struct channel_gk20a *ch,
272 u32 err_code, bool verbose);
273void gk20a_fifo_reset_engine(struct gk20a *g, u32 engine_id);
274int gk20a_init_fifo_reset_enable_hw(struct gk20a *g);
275int gk20a_fifo_tsg_unbind_channel(struct channel_gk20a *ch);
276
277void fifo_gk20a_finish_mmu_fault_handling(struct gk20a *g,
278 unsigned long fault_id);
279int gk20a_fifo_wait_engine_idle(struct gk20a *g);
280bool gk20a_fifo_is_engine_busy(struct gk20a *g);
281u32 gk20a_fifo_engine_interrupt_mask(struct gk20a *g);
282u32 gk20a_fifo_act_eng_interrupt_mask(struct gk20a *g, u32 act_eng_id);
283u32 gk20a_fifo_get_pbdma_signature(struct gk20a *g);
284u32 gk20a_fifo_get_failing_engine_data(struct gk20a *g,
285 int *__id, bool *__is_tsg);
286void gk20a_fifo_set_ctx_mmu_error_tsg(struct gk20a *g,
287 struct tsg_gk20a *tsg);
288void gk20a_fifo_abort_tsg(struct gk20a *g, u32 tsgid, bool preempt);
289void gk20a_fifo_set_ctx_mmu_error_ch(struct gk20a *g,
290 struct channel_gk20a *refch);
291bool gk20a_fifo_error_tsg(struct gk20a *g, struct tsg_gk20a *tsg);
292bool gk20a_fifo_error_ch(struct gk20a *g, struct channel_gk20a *refch);
293
294struct channel_gk20a *gk20a_fifo_channel_from_chid(struct gk20a *g,
295 u32 chid);
296
297void gk20a_fifo_issue_preempt(struct gk20a *g, u32 id, bool is_tsg);
298int gk20a_fifo_set_runlist_interleave(struct gk20a *g,
299 u32 id,
300 bool is_tsg,
301 u32 runlist_id,
302 u32 new_level);
303int gk20a_fifo_tsg_set_timeslice(struct tsg_gk20a *tsg, u32 timeslice);
304
305
306const char *gk20a_fifo_interleave_level_name(u32 interleave_level);
307
308int gk20a_fifo_engine_enum_from_type(struct gk20a *g, u32 engine_type,
309 u32 *inst_id);
310
311u32 gk20a_fifo_get_engine_ids(struct gk20a *g, u32 engine_id[],
312 u32 engine_id_sz, u32 engine_enum);
313
314void gk20a_fifo_delete_runlist(struct fifo_gk20a *f);
315
316struct fifo_engine_info_gk20a *gk20a_fifo_get_engine_info(struct gk20a *g,
317 u32 engine_id);
318
319bool gk20a_fifo_is_valid_engine_id(struct gk20a *g, u32 engine_id);
320
321u32 gk20a_fifo_get_gr_engine_id(struct gk20a *g);
322
323int gk20a_fifo_deferred_reset(struct gk20a *g, struct channel_gk20a *ch);
324
325u32 gk20a_fifo_get_all_ce_engine_reset_mask(struct gk20a *g);
326
327u32 gk20a_fifo_get_fast_ce_runlist_id(struct gk20a *g);
328
329u32 gk20a_fifo_get_gr_runlist_id(struct gk20a *g);
330
331bool gk20a_fifo_is_valid_runlist_id(struct gk20a *g, u32 runlist_id);
332
333int gk20a_fifo_update_runlist_ids(struct gk20a *g, u32 runlist_ids, u32 chid,
334 bool add, bool wait_for_finish);
335
336int gk20a_fifo_init_engine_info(struct fifo_gk20a *f);
337
338void gk20a_get_tsg_runlist_entry(struct tsg_gk20a *tsg, u32 *runlist);
339void gk20a_get_ch_runlist_entry(struct channel_gk20a *ch, u32 *runlist);
340void gk20a_fifo_set_runlist_state(struct gk20a *g, u32 runlists_mask,
341 u32 runlist_state, int runlist_mutex_state);
342
343u32 gk20a_fifo_userd_gp_get(struct gk20a *g, struct channel_gk20a *c);
344void gk20a_fifo_userd_gp_put(struct gk20a *g, struct channel_gk20a *c);
345u64 gk20a_fifo_userd_pb_get(struct gk20a *g, struct channel_gk20a *c);
346
347bool gk20a_is_fault_engine_subid_gpc(struct gk20a *g, u32 engine_subid);
348#ifdef CONFIG_DEBUG_FS
349struct fifo_profile_gk20a *gk20a_fifo_profile_acquire(struct gk20a *g);
350void gk20a_fifo_profile_release(struct gk20a *g,
351 struct fifo_profile_gk20a *profile);
352#endif
353
354void gk20a_dump_channel_status_ramfc(struct gk20a *g,
355 struct gk20a_debug_output *o,
356 u32 chid,
357 struct ch_state *ch_state);
358void gk20a_debug_dump_all_channel_status_ramfc(struct gk20a *g,
359 struct gk20a_debug_output *o);
360void gk20a_dump_pbdma_status(struct gk20a *g,
361 struct gk20a_debug_output *o);
362void gk20a_dump_eng_status(struct gk20a *g,
363 struct gk20a_debug_output *o);
364const char *gk20a_decode_ccsr_chan_status(u32 index);
365const char *gk20a_decode_pbdma_chan_eng_ctx_status(u32 index);
366void gk20a_fifo_enable_channel(struct channel_gk20a *ch);
367void gk20a_fifo_disable_channel(struct channel_gk20a *ch);
368
369bool gk20a_fifo_channel_status_is_next(struct gk20a *g, u32 chid);
370bool gk20a_fifo_channel_status_is_ctx_reload(struct gk20a *g, u32 chid);
371int gk20a_fifo_tsg_unbind_channel_verify_status(struct channel_gk20a *ch);
372
373struct channel_gk20a *gk20a_refch_from_inst_ptr(struct gk20a *g, u64 inst_ptr);
374void gk20a_fifo_channel_unbind(struct channel_gk20a *ch_gk20a);
375
376u32 gk20a_fifo_intr_0_error_mask(struct gk20a *g);
377
378int gk20a_fifo_is_preempt_pending(struct gk20a *g, u32 id, unsigned int id_type,
379 unsigned int timeout_rc_type);
380int __locked_fifo_preempt(struct gk20a *g, u32 id, bool is_tsg);
381void __locked_fifo_preempt_timeout_rc(struct gk20a *g, u32 id,
382 unsigned int id_type);
383int gk20a_fifo_setup_ramfc(struct channel_gk20a *c,
384 u64 gpfifo_base, u32 gpfifo_entries,
385 unsigned long timeout, u32 flags);
386int gk20a_fifo_set_timeslice(struct channel_gk20a *ch, unsigned int timeslice);
387void gk20a_fifo_setup_ramfc_for_privileged_channel(struct channel_gk20a *c);
388int gk20a_fifo_alloc_inst(struct gk20a *g, struct channel_gk20a *ch);
389void gk20a_fifo_free_inst(struct gk20a *g, struct channel_gk20a *ch);
390int gk20a_fifo_setup_userd(struct channel_gk20a *c);
391u32 gk20a_fifo_pbdma_acquire_val(u64 timeout);
392
393
394void gk20a_fifo_handle_runlist_event(struct gk20a *g);
395bool gk20a_fifo_should_defer_engine_reset(struct gk20a *g, u32 engine_id,
396 u32 engine_subid, bool fake_fault);
397
398void gk20a_fifo_teardown_ch_tsg(struct gk20a *g, u32 __engine_ids,
399 u32 hw_id, unsigned int id_type, unsigned int rc_type,
400 struct mmu_fault_info *mmfault);
401
402bool gk20a_fifo_check_tsg_ctxsw_timeout(struct tsg_gk20a *tsg,
403 bool *verbose, u32 *ms);
404bool gk20a_fifo_handle_sched_error(struct gk20a *g);
405
406void gk20a_fifo_reset_pbdma_method(struct gk20a *g, int pbdma_id,
407 int pbdma_method_index);
408unsigned int gk20a_fifo_handle_pbdma_intr_0(struct gk20a *g, u32 pbdma_id,
409 u32 pbdma_intr_0, u32 *handled, u32 *error_notifier);
410unsigned int gk20a_fifo_handle_pbdma_intr_1(struct gk20a *g, u32 pbdma_id,
411 u32 pbdma_intr_1, u32 *handled, u32 *error_notifier);
412u32 gk20a_fifo_handle_pbdma_intr(struct gk20a *g, struct fifo_gk20a *f,
413 u32 pbdma_id, unsigned int rc);
414
415u32 gk20a_fifo_default_timeslice_us(struct gk20a *g);
416
417#ifdef CONFIG_TEGRA_GK20A_NVHOST
418void gk20a_fifo_add_syncpt_wait_cmd(struct gk20a *g,
419 struct priv_cmd_entry *cmd, u32 off,
420 u32 id, u32 thresh, u64 gpu_va);
421u32 gk20a_fifo_get_syncpt_wait_cmd_size(void);
422void gk20a_fifo_add_syncpt_incr_cmd(struct gk20a *g,
423 bool wfi_cmd, struct priv_cmd_entry *cmd,
424 u32 id, u64 gpu_va);
425u32 gk20a_fifo_get_syncpt_incr_cmd_size(bool wfi_cmd);
426void gk20a_fifo_free_syncpt_buf(struct channel_gk20a *c,
427 struct nvgpu_mem *syncpt_buf);
428int gk20a_fifo_alloc_syncpt_buf(struct channel_gk20a *c,
429 u32 syncpt_id, struct nvgpu_mem *syncpt_buf);
430#endif
431
432void gk20a_fifo_get_mmu_fault_info(struct gk20a *g, u32 mmu_fault_id,
433 struct mmu_fault_info *mmfault);
434#endif /*__GR_GK20A_H__*/