summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/linux/vgpu/fifo_vgpu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common/linux/vgpu/fifo_vgpu.c')
-rw-r--r--drivers/gpu/nvgpu/common/linux/vgpu/fifo_vgpu.c764
1 files changed, 0 insertions, 764 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/vgpu/fifo_vgpu.c b/drivers/gpu/nvgpu/common/linux/vgpu/fifo_vgpu.c
deleted file mode 100644
index fde113e0..00000000
--- a/drivers/gpu/nvgpu/common/linux/vgpu/fifo_vgpu.c
+++ /dev/null
@@ -1,764 +0,0 @@
1/*
2 * Virtualized GPU Fifo
3 *
4 * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <trace/events/gk20a.h>
20
21#include <nvgpu/kmem.h>
22#include <nvgpu/dma.h>
23#include <nvgpu/atomic.h>
24#include <nvgpu/bug.h>
25#include <nvgpu/barrier.h>
26#include <nvgpu/error_notifier.h>
27#include <nvgpu/vgpu/vgpu_ivc.h>
28
29#include "gk20a/gk20a.h"
30#include "vgpu.h"
31#include "fifo_vgpu.h"
32
33#include <nvgpu/hw/gk20a/hw_fifo_gk20a.h>
34#include <nvgpu/hw/gk20a/hw_ram_gk20a.h>
35
36void vgpu_channel_bind(struct channel_gk20a *ch)
37{
38 struct tegra_vgpu_cmd_msg msg;
39 struct tegra_vgpu_channel_config_params *p =
40 &msg.params.channel_config;
41 int err;
42
43 gk20a_dbg_info("bind channel %d", ch->chid);
44
45 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_BIND;
46 msg.handle = vgpu_get_handle(ch->g);
47 p->handle = ch->virt_ctx;
48 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
49 WARN_ON(err || msg.ret);
50
51 nvgpu_smp_wmb();
52 nvgpu_atomic_set(&ch->bound, true);
53}
54
55void vgpu_channel_unbind(struct channel_gk20a *ch)
56{
57
58 gk20a_dbg_fn("");
59
60 if (nvgpu_atomic_cmpxchg(&ch->bound, true, false)) {
61 struct tegra_vgpu_cmd_msg msg;
62 struct tegra_vgpu_channel_config_params *p =
63 &msg.params.channel_config;
64 int err;
65
66 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_UNBIND;
67 msg.handle = vgpu_get_handle(ch->g);
68 p->handle = ch->virt_ctx;
69 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
70 WARN_ON(err || msg.ret);
71 }
72
73}
74
75int vgpu_channel_alloc_inst(struct gk20a *g, struct channel_gk20a *ch)
76{
77 struct tegra_vgpu_cmd_msg msg;
78 struct tegra_vgpu_channel_hwctx_params *p = &msg.params.channel_hwctx;
79 int err;
80
81 gk20a_dbg_fn("");
82
83 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ALLOC_HWCTX;
84 msg.handle = vgpu_get_handle(g);
85 p->id = ch->chid;
86 p->pid = (u64)current->tgid;
87 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
88 if (err || msg.ret) {
89 nvgpu_err(g, "fail");
90 return -ENOMEM;
91 }
92
93 ch->virt_ctx = p->handle;
94 gk20a_dbg_fn("done");
95 return 0;
96}
97
98void vgpu_channel_free_inst(struct gk20a *g, struct channel_gk20a *ch)
99{
100 struct tegra_vgpu_cmd_msg msg;
101 struct tegra_vgpu_channel_hwctx_params *p = &msg.params.channel_hwctx;
102 int err;
103
104 gk20a_dbg_fn("");
105
106 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_FREE_HWCTX;
107 msg.handle = vgpu_get_handle(g);
108 p->handle = ch->virt_ctx;
109 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
110 WARN_ON(err || msg.ret);
111}
112
113void vgpu_channel_enable(struct channel_gk20a *ch)
114{
115 struct tegra_vgpu_cmd_msg msg;
116 struct tegra_vgpu_channel_config_params *p =
117 &msg.params.channel_config;
118 int err;
119
120 gk20a_dbg_fn("");
121
122 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ENABLE;
123 msg.handle = vgpu_get_handle(ch->g);
124 p->handle = ch->virt_ctx;
125 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
126 WARN_ON(err || msg.ret);
127}
128
129void vgpu_channel_disable(struct channel_gk20a *ch)
130{
131 struct tegra_vgpu_cmd_msg msg;
132 struct tegra_vgpu_channel_config_params *p =
133 &msg.params.channel_config;
134 int err;
135
136 gk20a_dbg_fn("");
137
138 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_DISABLE;
139 msg.handle = vgpu_get_handle(ch->g);
140 p->handle = ch->virt_ctx;
141 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
142 WARN_ON(err || msg.ret);
143}
144
145int vgpu_channel_setup_ramfc(struct channel_gk20a *ch, u64 gpfifo_base,
146 u32 gpfifo_entries,
147 unsigned long acquire_timeout, u32 flags)
148{
149 struct tegra_vgpu_cmd_msg msg;
150 struct tegra_vgpu_ramfc_params *p = &msg.params.ramfc;
151 int err;
152
153 gk20a_dbg_fn("");
154
155 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SETUP_RAMFC;
156 msg.handle = vgpu_get_handle(ch->g);
157 p->handle = ch->virt_ctx;
158 p->gpfifo_va = gpfifo_base;
159 p->num_entries = gpfifo_entries;
160 p->userd_addr = ch->userd_iova;
161 p->iova = 0;
162 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
163
164 return (err || msg.ret) ? -ENOMEM : 0;
165}
166
167int vgpu_fifo_init_engine_info(struct fifo_gk20a *f)
168{
169 struct vgpu_priv_data *priv = vgpu_get_priv_data(f->g);
170 struct tegra_vgpu_engines_info *engines = &priv->constants.engines_info;
171 u32 i;
172
173 gk20a_dbg_fn("");
174
175 if (engines->num_engines > TEGRA_VGPU_MAX_ENGINES) {
176 nvgpu_err(f->g, "num_engines %d larger than max %d",
177 engines->num_engines, TEGRA_VGPU_MAX_ENGINES);
178 return -EINVAL;
179 }
180
181 f->num_engines = engines->num_engines;
182 for (i = 0; i < f->num_engines; i++) {
183 struct fifo_engine_info_gk20a *info =
184 &f->engine_info[engines->info[i].engine_id];
185
186 if (engines->info[i].engine_id >= f->max_engines) {
187 nvgpu_err(f->g, "engine id %d larger than max %d",
188 engines->info[i].engine_id,
189 f->max_engines);
190 return -EINVAL;
191 }
192
193 info->intr_mask = engines->info[i].intr_mask;
194 info->reset_mask = engines->info[i].reset_mask;
195 info->runlist_id = engines->info[i].runlist_id;
196 info->pbdma_id = engines->info[i].pbdma_id;
197 info->inst_id = engines->info[i].inst_id;
198 info->pri_base = engines->info[i].pri_base;
199 info->engine_enum = engines->info[i].engine_enum;
200 info->fault_id = engines->info[i].fault_id;
201 f->active_engines_list[i] = engines->info[i].engine_id;
202 }
203
204 gk20a_dbg_fn("done");
205
206 return 0;
207}
208
209static int init_runlist(struct gk20a *g, struct fifo_gk20a *f)
210{
211 struct fifo_runlist_info_gk20a *runlist;
212 unsigned int runlist_id = -1;
213 u32 i;
214 u64 runlist_size;
215
216 gk20a_dbg_fn("");
217
218 f->max_runlists = g->ops.fifo.eng_runlist_base_size();
219 f->runlist_info = nvgpu_kzalloc(g,
220 sizeof(struct fifo_runlist_info_gk20a) *
221 f->max_runlists);
222 if (!f->runlist_info)
223 goto clean_up_runlist;
224
225 memset(f->runlist_info, 0, (sizeof(struct fifo_runlist_info_gk20a) *
226 f->max_runlists));
227
228 for (runlist_id = 0; runlist_id < f->max_runlists; runlist_id++) {
229 runlist = &f->runlist_info[runlist_id];
230
231 runlist->active_channels =
232 nvgpu_kzalloc(g, DIV_ROUND_UP(f->num_channels,
233 BITS_PER_BYTE));
234 if (!runlist->active_channels)
235 goto clean_up_runlist;
236
237 runlist_size = sizeof(u16) * f->num_channels;
238 for (i = 0; i < MAX_RUNLIST_BUFFERS; i++) {
239 int err = nvgpu_dma_alloc_sys(g, runlist_size,
240 &runlist->mem[i]);
241 if (err) {
242 nvgpu_err(g, "memory allocation failed");
243 goto clean_up_runlist;
244 }
245 }
246 nvgpu_mutex_init(&runlist->mutex);
247
248 /* None of buffers is pinned if this value doesn't change.
249 Otherwise, one of them (cur_buffer) must have been pinned. */
250 runlist->cur_buffer = MAX_RUNLIST_BUFFERS;
251 }
252
253 gk20a_dbg_fn("done");
254 return 0;
255
256clean_up_runlist:
257 gk20a_fifo_delete_runlist(f);
258 gk20a_dbg_fn("fail");
259 return -ENOMEM;
260}
261
262static int vgpu_init_fifo_setup_sw(struct gk20a *g)
263{
264 struct fifo_gk20a *f = &g->fifo;
265 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
266 unsigned int chid;
267 int err = 0;
268
269 gk20a_dbg_fn("");
270
271 if (f->sw_ready) {
272 gk20a_dbg_fn("skip init");
273 return 0;
274 }
275
276 f->g = g;
277 f->num_channels = priv->constants.num_channels;
278 f->max_engines = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_ENGINES);
279
280 f->userd_entry_size = 1 << ram_userd_base_shift_v();
281
282 err = nvgpu_dma_alloc_sys(g, f->userd_entry_size * f->num_channels,
283 &f->userd);
284 if (err) {
285 nvgpu_err(g, "memory allocation failed");
286 goto clean_up;
287 }
288
289 /* bar1 va */
290 if (g->ops.mm.is_bar1_supported(g)) {
291 f->userd.gpu_va = vgpu_bar1_map(g, &f->userd);
292 if (!f->userd.gpu_va) {
293 nvgpu_err(g, "gmmu mapping failed");
294 goto clean_up;
295 }
296 /* if reduced BAR1 range is specified, use offset of 0
297 * (server returns offset assuming full BAR1 range)
298 */
299 if (vgpu_is_reduced_bar1(g))
300 f->userd.gpu_va = 0;
301 }
302
303 gk20a_dbg(gpu_dbg_map_v, "userd bar1 va = 0x%llx", f->userd.gpu_va);
304
305 f->channel = nvgpu_vzalloc(g, f->num_channels * sizeof(*f->channel));
306 f->tsg = nvgpu_vzalloc(g, f->num_channels * sizeof(*f->tsg));
307 f->engine_info = nvgpu_kzalloc(g, f->max_engines *
308 sizeof(*f->engine_info));
309 f->active_engines_list = nvgpu_kzalloc(g, f->max_engines * sizeof(u32));
310
311 if (!(f->channel && f->tsg && f->engine_info && f->active_engines_list)) {
312 err = -ENOMEM;
313 goto clean_up;
314 }
315 memset(f->active_engines_list, 0xff, (f->max_engines * sizeof(u32)));
316
317 g->ops.fifo.init_engine_info(f);
318
319 init_runlist(g, f);
320
321 nvgpu_init_list_node(&f->free_chs);
322 nvgpu_mutex_init(&f->free_chs_mutex);
323
324 for (chid = 0; chid < f->num_channels; chid++) {
325 f->channel[chid].userd_iova =
326 nvgpu_mem_get_addr(g, &f->userd) +
327 chid * f->userd_entry_size;
328 f->channel[chid].userd_gpu_va =
329 f->userd.gpu_va + chid * f->userd_entry_size;
330
331 gk20a_init_channel_support(g, chid);
332 gk20a_init_tsg_support(g, chid);
333 }
334 nvgpu_mutex_init(&f->tsg_inuse_mutex);
335
336 err = nvgpu_channel_worker_init(g);
337 if (err)
338 goto clean_up;
339
340 f->deferred_reset_pending = false;
341 nvgpu_mutex_init(&f->deferred_reset_mutex);
342
343 f->channel_base = priv->constants.channel_base;
344
345 f->sw_ready = true;
346
347 gk20a_dbg_fn("done");
348 return 0;
349
350clean_up:
351 gk20a_dbg_fn("fail");
352 /* FIXME: unmap from bar1 */
353 nvgpu_dma_free(g, &f->userd);
354
355 memset(&f->userd, 0, sizeof(f->userd));
356
357 nvgpu_vfree(g, f->channel);
358 f->channel = NULL;
359 nvgpu_vfree(g, f->tsg);
360 f->tsg = NULL;
361 nvgpu_kfree(g, f->engine_info);
362 f->engine_info = NULL;
363 nvgpu_kfree(g, f->active_engines_list);
364 f->active_engines_list = NULL;
365
366 return err;
367}
368
369int vgpu_init_fifo_setup_hw(struct gk20a *g)
370{
371 gk20a_dbg_fn("");
372
373 /* test write, read through bar1 @ userd region before
374 * turning on the snooping */
375 {
376 struct fifo_gk20a *f = &g->fifo;
377 u32 v, v1 = 0x33, v2 = 0x55;
378
379 u32 bar1_vaddr = f->userd.gpu_va;
380 volatile u32 *cpu_vaddr = f->userd.cpu_va;
381
382 gk20a_dbg_info("test bar1 @ vaddr 0x%x",
383 bar1_vaddr);
384
385 v = gk20a_bar1_readl(g, bar1_vaddr);
386
387 *cpu_vaddr = v1;
388 nvgpu_mb();
389
390 if (v1 != gk20a_bar1_readl(g, bar1_vaddr)) {
391 nvgpu_err(g, "bar1 broken @ gk20a!");
392 return -EINVAL;
393 }
394
395 gk20a_bar1_writel(g, bar1_vaddr, v2);
396
397 if (v2 != gk20a_bar1_readl(g, bar1_vaddr)) {
398 nvgpu_err(g, "bar1 broken @ gk20a!");
399 return -EINVAL;
400 }
401
402 /* is it visible to the cpu? */
403 if (*cpu_vaddr != v2) {
404 nvgpu_err(g, "cpu didn't see bar1 write @ %p!",
405 cpu_vaddr);
406 }
407
408 /* put it back */
409 gk20a_bar1_writel(g, bar1_vaddr, v);
410 }
411
412 gk20a_dbg_fn("done");
413
414 return 0;
415}
416
417int vgpu_init_fifo_support(struct gk20a *g)
418{
419 u32 err;
420
421 gk20a_dbg_fn("");
422
423 err = vgpu_init_fifo_setup_sw(g);
424 if (err)
425 return err;
426
427 if (g->ops.fifo.init_fifo_setup_hw)
428 err = g->ops.fifo.init_fifo_setup_hw(g);
429 return err;
430}
431
432int vgpu_fifo_preempt_channel(struct gk20a *g, u32 chid)
433{
434 struct fifo_gk20a *f = &g->fifo;
435 struct channel_gk20a *ch = &f->channel[chid];
436 struct tegra_vgpu_cmd_msg msg;
437 struct tegra_vgpu_channel_config_params *p =
438 &msg.params.channel_config;
439 int err;
440
441 gk20a_dbg_fn("");
442
443 if (!nvgpu_atomic_read(&ch->bound))
444 return 0;
445
446 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_PREEMPT;
447 msg.handle = vgpu_get_handle(g);
448 p->handle = ch->virt_ctx;
449 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
450
451 if (err || msg.ret) {
452 nvgpu_err(g,
453 "preempt channel %d failed", chid);
454 err = -ENOMEM;
455 }
456
457 return err;
458}
459
460int vgpu_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
461{
462 struct tegra_vgpu_cmd_msg msg;
463 struct tegra_vgpu_tsg_preempt_params *p =
464 &msg.params.tsg_preempt;
465 int err;
466
467 gk20a_dbg_fn("");
468
469 msg.cmd = TEGRA_VGPU_CMD_TSG_PREEMPT;
470 msg.handle = vgpu_get_handle(g);
471 p->tsg_id = tsgid;
472 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
473 err = err ? err : msg.ret;
474
475 if (err) {
476 nvgpu_err(g,
477 "preempt tsg %u failed", tsgid);
478 }
479
480 return err;
481}
482
483static int vgpu_submit_runlist(struct gk20a *g, u64 handle, u8 runlist_id,
484 u16 *runlist, u32 num_entries)
485{
486 struct tegra_vgpu_cmd_msg msg;
487 struct tegra_vgpu_runlist_params *p;
488 int err;
489 void *oob_handle;
490 void *oob;
491 size_t size, oob_size;
492
493 oob_handle = vgpu_ivc_oob_get_ptr(vgpu_ivc_get_server_vmid(),
494 TEGRA_VGPU_QUEUE_CMD,
495 &oob, &oob_size);
496 if (!oob_handle)
497 return -EINVAL;
498
499 size = sizeof(*runlist) * num_entries;
500 if (oob_size < size) {
501 err = -ENOMEM;
502 goto done;
503 }
504
505 msg.cmd = TEGRA_VGPU_CMD_SUBMIT_RUNLIST;
506 msg.handle = handle;
507 p = &msg.params.runlist;
508 p->runlist_id = runlist_id;
509 p->num_entries = num_entries;
510
511 memcpy(oob, runlist, size);
512 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
513
514 err = (err || msg.ret) ? -1 : 0;
515
516done:
517 vgpu_ivc_oob_put_ptr(oob_handle);
518 return err;
519}
520
521static int vgpu_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
522 u32 chid, bool add,
523 bool wait_for_finish)
524{
525 struct fifo_gk20a *f = &g->fifo;
526 struct fifo_runlist_info_gk20a *runlist;
527 u16 *runlist_entry = NULL;
528 u32 count = 0;
529
530 gk20a_dbg_fn("");
531
532 runlist = &f->runlist_info[runlist_id];
533
534 /* valid channel, add/remove it from active list.
535 Otherwise, keep active list untouched for suspend/resume. */
536 if (chid != (u32)~0) {
537 if (add) {
538 if (test_and_set_bit(chid,
539 runlist->active_channels) == 1)
540 return 0;
541 } else {
542 if (test_and_clear_bit(chid,
543 runlist->active_channels) == 0)
544 return 0;
545 }
546 }
547
548 if (chid != (u32)~0 || /* add/remove a valid channel */
549 add /* resume to add all channels back */) {
550 u32 cid;
551
552 runlist_entry = runlist->mem[0].cpu_va;
553 for_each_set_bit(cid,
554 runlist->active_channels, f->num_channels) {
555 gk20a_dbg_info("add channel %d to runlist", cid);
556 runlist_entry[0] = cid;
557 runlist_entry++;
558 count++;
559 }
560 } else /* suspend to remove all channels */
561 count = 0;
562
563 return vgpu_submit_runlist(g, vgpu_get_handle(g), runlist_id,
564 runlist->mem[0].cpu_va, count);
565}
566
567/* add/remove a channel from runlist
568 special cases below: runlist->active_channels will NOT be changed.
569 (chid == ~0 && !add) means remove all active channels from runlist.
570 (chid == ~0 && add) means restore all active channels on runlist. */
571int vgpu_fifo_update_runlist(struct gk20a *g, u32 runlist_id,
572 u32 chid, bool add, bool wait_for_finish)
573{
574 struct fifo_runlist_info_gk20a *runlist = NULL;
575 struct fifo_gk20a *f = &g->fifo;
576 u32 ret = 0;
577
578 gk20a_dbg_fn("");
579
580 runlist = &f->runlist_info[runlist_id];
581
582 nvgpu_mutex_acquire(&runlist->mutex);
583
584 ret = vgpu_fifo_update_runlist_locked(g, runlist_id, chid, add,
585 wait_for_finish);
586
587 nvgpu_mutex_release(&runlist->mutex);
588 return ret;
589}
590
591int vgpu_fifo_wait_engine_idle(struct gk20a *g)
592{
593 gk20a_dbg_fn("");
594
595 return 0;
596}
597
598int vgpu_fifo_set_runlist_interleave(struct gk20a *g,
599 u32 id,
600 u32 runlist_id,
601 u32 new_level)
602{
603 struct tegra_vgpu_cmd_msg msg = {0};
604 struct tegra_vgpu_tsg_runlist_interleave_params *p =
605 &msg.params.tsg_interleave;
606 int err;
607
608 gk20a_dbg_fn("");
609
610 msg.cmd = TEGRA_VGPU_CMD_TSG_SET_RUNLIST_INTERLEAVE;
611 msg.handle = vgpu_get_handle(g);
612 p->tsg_id = id;
613 p->level = new_level;
614 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
615 WARN_ON(err || msg.ret);
616 return err ? err : msg.ret;
617}
618
619int vgpu_fifo_force_reset_ch(struct channel_gk20a *ch,
620 u32 err_code, bool verbose)
621{
622 struct tsg_gk20a *tsg = NULL;
623 struct channel_gk20a *ch_tsg = NULL;
624 struct gk20a *g = ch->g;
625 struct tegra_vgpu_cmd_msg msg = {0};
626 struct tegra_vgpu_channel_config_params *p =
627 &msg.params.channel_config;
628 int err;
629
630 gk20a_dbg_fn("");
631
632 if (gk20a_is_channel_marked_as_tsg(ch)) {
633 tsg = &g->fifo.tsg[ch->tsgid];
634
635 nvgpu_rwsem_down_read(&tsg->ch_list_lock);
636
637 nvgpu_list_for_each_entry(ch_tsg, &tsg->ch_list,
638 channel_gk20a, ch_entry) {
639 if (gk20a_channel_get(ch_tsg)) {
640 nvgpu_set_error_notifier(ch_tsg, err_code);
641 ch_tsg->has_timedout = true;
642 gk20a_channel_put(ch_tsg);
643 }
644 }
645
646 nvgpu_rwsem_up_read(&tsg->ch_list_lock);
647 } else {
648 nvgpu_set_error_notifier(ch, err_code);
649 ch->has_timedout = true;
650 }
651
652 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_FORCE_RESET;
653 msg.handle = vgpu_get_handle(ch->g);
654 p->handle = ch->virt_ctx;
655 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
656 WARN_ON(err || msg.ret);
657 if (!err)
658 gk20a_channel_abort(ch, false);
659 return err ? err : msg.ret;
660}
661
662static void vgpu_fifo_set_ctx_mmu_error_ch(struct gk20a *g,
663 struct channel_gk20a *ch)
664{
665 /*
666 * If error code is already set, this mmu fault
667 * was triggered as part of recovery from other
668 * error condition.
669 * Don't overwrite error flag.
670 */
671 nvgpu_set_error_notifier_if_empty(ch,
672 NVGPU_ERR_NOTIFIER_FIFO_ERROR_MMU_ERR_FLT);
673
674 /* mark channel as faulted */
675 ch->has_timedout = true;
676 nvgpu_smp_wmb();
677 /* unblock pending waits */
678 nvgpu_cond_broadcast_interruptible(&ch->semaphore_wq);
679 nvgpu_cond_broadcast_interruptible(&ch->notifier_wq);
680}
681
682static void vgpu_fifo_set_ctx_mmu_error_ch_tsg(struct gk20a *g,
683 struct channel_gk20a *ch)
684{
685 struct tsg_gk20a *tsg = NULL;
686 struct channel_gk20a *ch_tsg = NULL;
687
688 if (gk20a_is_channel_marked_as_tsg(ch)) {
689 tsg = &g->fifo.tsg[ch->tsgid];
690
691 nvgpu_rwsem_down_read(&tsg->ch_list_lock);
692
693 nvgpu_list_for_each_entry(ch_tsg, &tsg->ch_list,
694 channel_gk20a, ch_entry) {
695 if (gk20a_channel_get(ch_tsg)) {
696 vgpu_fifo_set_ctx_mmu_error_ch(g, ch_tsg);
697 gk20a_channel_put(ch_tsg);
698 }
699 }
700
701 nvgpu_rwsem_up_read(&tsg->ch_list_lock);
702 } else {
703 vgpu_fifo_set_ctx_mmu_error_ch(g, ch);
704 }
705}
706
707int vgpu_fifo_isr(struct gk20a *g, struct tegra_vgpu_fifo_intr_info *info)
708{
709 struct fifo_gk20a *f = &g->fifo;
710 struct channel_gk20a *ch = gk20a_channel_get(&f->channel[info->chid]);
711
712 gk20a_dbg_fn("");
713 if (!ch)
714 return 0;
715
716 nvgpu_err(g, "fifo intr (%d) on ch %u",
717 info->type, info->chid);
718
719 trace_gk20a_channel_reset(ch->chid, ch->tsgid);
720
721 switch (info->type) {
722 case TEGRA_VGPU_FIFO_INTR_PBDMA:
723 nvgpu_set_error_notifier(ch, NVGPU_ERR_NOTIFIER_PBDMA_ERROR);
724 break;
725 case TEGRA_VGPU_FIFO_INTR_CTXSW_TIMEOUT:
726 nvgpu_set_error_notifier(ch,
727 NVGPU_ERR_NOTIFIER_FIFO_ERROR_IDLE_TIMEOUT);
728 break;
729 case TEGRA_VGPU_FIFO_INTR_MMU_FAULT:
730 vgpu_fifo_set_ctx_mmu_error_ch_tsg(g, ch);
731 gk20a_channel_abort(ch, false);
732 break;
733 default:
734 WARN_ON(1);
735 break;
736 }
737
738 gk20a_channel_put(ch);
739 return 0;
740}
741
742int vgpu_fifo_nonstall_isr(struct gk20a *g,
743 struct tegra_vgpu_fifo_nonstall_intr_info *info)
744{
745 gk20a_dbg_fn("");
746
747 switch (info->type) {
748 case TEGRA_VGPU_FIFO_NONSTALL_INTR_CHANNEL:
749 gk20a_channel_semaphore_wakeup(g, false);
750 break;
751 default:
752 WARN_ON(1);
753 break;
754 }
755
756 return 0;
757}
758
759u32 vgpu_fifo_default_timeslice_us(struct gk20a *g)
760{
761 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
762
763 return priv->constants.default_timeslice_us;
764}