summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
diff options
context:
space:
mode:
authorRichard Zhao <rizhao@nvidia.com>2018-01-30 02:24:37 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2018-02-27 17:30:52 -0500
commit6393eddfa996fba03464f897b85aa5ec79860fed (patch)
tree557ebe9be93e2b0464118e7d8ec019d9d5dbae5f /drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
parent7932568b7fe9e16b2b83bc58b2b3686c0d5e52d4 (diff)
gpu: nvgpu: vgpu: move common files out of linux folder
Most of files have been moved out of linux folder. More code could be common as halifying going on. Jira EVLR-2364 Change-Id: Ia9dbdbc82f45ceefe5c788eac7517000cd455d5e Signed-off-by: Richard Zhao <rizhao@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1649947 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/vgpu/fifo_vgpu.c')
-rw-r--r--drivers/gpu/nvgpu/vgpu/fifo_vgpu.c770
1 files changed, 770 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c b/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
new file mode 100644
index 00000000..580bfb60
--- /dev/null
+++ b/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
@@ -0,0 +1,770 @@
1/*
2 * Virtualized GPU Fifo
3 *
4 * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include <trace/events/gk20a.h>
26
27#include <nvgpu/kmem.h>
28#include <nvgpu/dma.h>
29#include <nvgpu/atomic.h>
30#include <nvgpu/bug.h>
31#include <nvgpu/barrier.h>
32#include <nvgpu/error_notifier.h>
33#include <nvgpu/vgpu/vgpu_ivc.h>
34#include <nvgpu/vgpu/vgpu.h>
35
36#include "gk20a/gk20a.h"
37#include "fifo_vgpu.h"
38
39#include <nvgpu/hw/gk20a/hw_fifo_gk20a.h>
40#include <nvgpu/hw/gk20a/hw_ram_gk20a.h>
41
42void vgpu_channel_bind(struct channel_gk20a *ch)
43{
44 struct tegra_vgpu_cmd_msg msg;
45 struct tegra_vgpu_channel_config_params *p =
46 &msg.params.channel_config;
47 int err;
48
49 gk20a_dbg_info("bind channel %d", ch->chid);
50
51 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_BIND;
52 msg.handle = vgpu_get_handle(ch->g);
53 p->handle = ch->virt_ctx;
54 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
55 WARN_ON(err || msg.ret);
56
57 nvgpu_smp_wmb();
58 nvgpu_atomic_set(&ch->bound, true);
59}
60
61void vgpu_channel_unbind(struct channel_gk20a *ch)
62{
63
64 gk20a_dbg_fn("");
65
66 if (nvgpu_atomic_cmpxchg(&ch->bound, true, false)) {
67 struct tegra_vgpu_cmd_msg msg;
68 struct tegra_vgpu_channel_config_params *p =
69 &msg.params.channel_config;
70 int err;
71
72 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_UNBIND;
73 msg.handle = vgpu_get_handle(ch->g);
74 p->handle = ch->virt_ctx;
75 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
76 WARN_ON(err || msg.ret);
77 }
78
79}
80
81int vgpu_channel_alloc_inst(struct gk20a *g, struct channel_gk20a *ch)
82{
83 struct tegra_vgpu_cmd_msg msg;
84 struct tegra_vgpu_channel_hwctx_params *p = &msg.params.channel_hwctx;
85 int err;
86
87 gk20a_dbg_fn("");
88
89 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ALLOC_HWCTX;
90 msg.handle = vgpu_get_handle(g);
91 p->id = ch->chid;
92 p->pid = (u64)current->tgid;
93 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
94 if (err || msg.ret) {
95 nvgpu_err(g, "fail");
96 return -ENOMEM;
97 }
98
99 ch->virt_ctx = p->handle;
100 gk20a_dbg_fn("done");
101 return 0;
102}
103
104void vgpu_channel_free_inst(struct gk20a *g, struct channel_gk20a *ch)
105{
106 struct tegra_vgpu_cmd_msg msg;
107 struct tegra_vgpu_channel_hwctx_params *p = &msg.params.channel_hwctx;
108 int err;
109
110 gk20a_dbg_fn("");
111
112 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_FREE_HWCTX;
113 msg.handle = vgpu_get_handle(g);
114 p->handle = ch->virt_ctx;
115 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
116 WARN_ON(err || msg.ret);
117}
118
119void vgpu_channel_enable(struct channel_gk20a *ch)
120{
121 struct tegra_vgpu_cmd_msg msg;
122 struct tegra_vgpu_channel_config_params *p =
123 &msg.params.channel_config;
124 int err;
125
126 gk20a_dbg_fn("");
127
128 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ENABLE;
129 msg.handle = vgpu_get_handle(ch->g);
130 p->handle = ch->virt_ctx;
131 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
132 WARN_ON(err || msg.ret);
133}
134
135void vgpu_channel_disable(struct channel_gk20a *ch)
136{
137 struct tegra_vgpu_cmd_msg msg;
138 struct tegra_vgpu_channel_config_params *p =
139 &msg.params.channel_config;
140 int err;
141
142 gk20a_dbg_fn("");
143
144 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_DISABLE;
145 msg.handle = vgpu_get_handle(ch->g);
146 p->handle = ch->virt_ctx;
147 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
148 WARN_ON(err || msg.ret);
149}
150
151int vgpu_channel_setup_ramfc(struct channel_gk20a *ch, u64 gpfifo_base,
152 u32 gpfifo_entries,
153 unsigned long acquire_timeout, u32 flags)
154{
155 struct tegra_vgpu_cmd_msg msg;
156 struct tegra_vgpu_ramfc_params *p = &msg.params.ramfc;
157 int err;
158
159 gk20a_dbg_fn("");
160
161 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SETUP_RAMFC;
162 msg.handle = vgpu_get_handle(ch->g);
163 p->handle = ch->virt_ctx;
164 p->gpfifo_va = gpfifo_base;
165 p->num_entries = gpfifo_entries;
166 p->userd_addr = ch->userd_iova;
167 p->iova = 0;
168 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
169
170 return (err || msg.ret) ? -ENOMEM : 0;
171}
172
173int vgpu_fifo_init_engine_info(struct fifo_gk20a *f)
174{
175 struct vgpu_priv_data *priv = vgpu_get_priv_data(f->g);
176 struct tegra_vgpu_engines_info *engines = &priv->constants.engines_info;
177 u32 i;
178
179 gk20a_dbg_fn("");
180
181 if (engines->num_engines > TEGRA_VGPU_MAX_ENGINES) {
182 nvgpu_err(f->g, "num_engines %d larger than max %d",
183 engines->num_engines, TEGRA_VGPU_MAX_ENGINES);
184 return -EINVAL;
185 }
186
187 f->num_engines = engines->num_engines;
188 for (i = 0; i < f->num_engines; i++) {
189 struct fifo_engine_info_gk20a *info =
190 &f->engine_info[engines->info[i].engine_id];
191
192 if (engines->info[i].engine_id >= f->max_engines) {
193 nvgpu_err(f->g, "engine id %d larger than max %d",
194 engines->info[i].engine_id,
195 f->max_engines);
196 return -EINVAL;
197 }
198
199 info->intr_mask = engines->info[i].intr_mask;
200 info->reset_mask = engines->info[i].reset_mask;
201 info->runlist_id = engines->info[i].runlist_id;
202 info->pbdma_id = engines->info[i].pbdma_id;
203 info->inst_id = engines->info[i].inst_id;
204 info->pri_base = engines->info[i].pri_base;
205 info->engine_enum = engines->info[i].engine_enum;
206 info->fault_id = engines->info[i].fault_id;
207 f->active_engines_list[i] = engines->info[i].engine_id;
208 }
209
210 gk20a_dbg_fn("done");
211
212 return 0;
213}
214
215static int init_runlist(struct gk20a *g, struct fifo_gk20a *f)
216{
217 struct fifo_runlist_info_gk20a *runlist;
218 unsigned int runlist_id = -1;
219 u32 i;
220 u64 runlist_size;
221
222 gk20a_dbg_fn("");
223
224 f->max_runlists = g->ops.fifo.eng_runlist_base_size();
225 f->runlist_info = nvgpu_kzalloc(g,
226 sizeof(struct fifo_runlist_info_gk20a) *
227 f->max_runlists);
228 if (!f->runlist_info)
229 goto clean_up_runlist;
230
231 memset(f->runlist_info, 0, (sizeof(struct fifo_runlist_info_gk20a) *
232 f->max_runlists));
233
234 for (runlist_id = 0; runlist_id < f->max_runlists; runlist_id++) {
235 runlist = &f->runlist_info[runlist_id];
236
237 runlist->active_channels =
238 nvgpu_kzalloc(g, DIV_ROUND_UP(f->num_channels,
239 BITS_PER_BYTE));
240 if (!runlist->active_channels)
241 goto clean_up_runlist;
242
243 runlist_size = sizeof(u16) * f->num_channels;
244 for (i = 0; i < MAX_RUNLIST_BUFFERS; i++) {
245 int err = nvgpu_dma_alloc_sys(g, runlist_size,
246 &runlist->mem[i]);
247 if (err) {
248 nvgpu_err(g, "memory allocation failed");
249 goto clean_up_runlist;
250 }
251 }
252 nvgpu_mutex_init(&runlist->mutex);
253
254 /* None of buffers is pinned if this value doesn't change.
255 Otherwise, one of them (cur_buffer) must have been pinned. */
256 runlist->cur_buffer = MAX_RUNLIST_BUFFERS;
257 }
258
259 gk20a_dbg_fn("done");
260 return 0;
261
262clean_up_runlist:
263 gk20a_fifo_delete_runlist(f);
264 gk20a_dbg_fn("fail");
265 return -ENOMEM;
266}
267
268static int vgpu_init_fifo_setup_sw(struct gk20a *g)
269{
270 struct fifo_gk20a *f = &g->fifo;
271 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
272 unsigned int chid;
273 int err = 0;
274
275 gk20a_dbg_fn("");
276
277 if (f->sw_ready) {
278 gk20a_dbg_fn("skip init");
279 return 0;
280 }
281
282 f->g = g;
283 f->num_channels = priv->constants.num_channels;
284 f->max_engines = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_ENGINES);
285
286 f->userd_entry_size = 1 << ram_userd_base_shift_v();
287
288 err = nvgpu_dma_alloc_sys(g, f->userd_entry_size * f->num_channels,
289 &f->userd);
290 if (err) {
291 nvgpu_err(g, "memory allocation failed");
292 goto clean_up;
293 }
294
295 /* bar1 va */
296 if (g->ops.mm.is_bar1_supported(g)) {
297 f->userd.gpu_va = vgpu_bar1_map(g, &f->userd);
298 if (!f->userd.gpu_va) {
299 nvgpu_err(g, "gmmu mapping failed");
300 goto clean_up;
301 }
302 /* if reduced BAR1 range is specified, use offset of 0
303 * (server returns offset assuming full BAR1 range)
304 */
305 if (vgpu_is_reduced_bar1(g))
306 f->userd.gpu_va = 0;
307 }
308
309 gk20a_dbg(gpu_dbg_map_v, "userd bar1 va = 0x%llx", f->userd.gpu_va);
310
311 f->channel = nvgpu_vzalloc(g, f->num_channels * sizeof(*f->channel));
312 f->tsg = nvgpu_vzalloc(g, f->num_channels * sizeof(*f->tsg));
313 f->engine_info = nvgpu_kzalloc(g, f->max_engines *
314 sizeof(*f->engine_info));
315 f->active_engines_list = nvgpu_kzalloc(g, f->max_engines * sizeof(u32));
316
317 if (!(f->channel && f->tsg && f->engine_info && f->active_engines_list)) {
318 err = -ENOMEM;
319 goto clean_up;
320 }
321 memset(f->active_engines_list, 0xff, (f->max_engines * sizeof(u32)));
322
323 g->ops.fifo.init_engine_info(f);
324
325 init_runlist(g, f);
326
327 nvgpu_init_list_node(&f->free_chs);
328 nvgpu_mutex_init(&f->free_chs_mutex);
329
330 for (chid = 0; chid < f->num_channels; chid++) {
331 f->channel[chid].userd_iova =
332 nvgpu_mem_get_addr(g, &f->userd) +
333 chid * f->userd_entry_size;
334 f->channel[chid].userd_gpu_va =
335 f->userd.gpu_va + chid * f->userd_entry_size;
336
337 gk20a_init_channel_support(g, chid);
338 gk20a_init_tsg_support(g, chid);
339 }
340 nvgpu_mutex_init(&f->tsg_inuse_mutex);
341
342 err = nvgpu_channel_worker_init(g);
343 if (err)
344 goto clean_up;
345
346 f->deferred_reset_pending = false;
347 nvgpu_mutex_init(&f->deferred_reset_mutex);
348
349 f->channel_base = priv->constants.channel_base;
350
351 f->sw_ready = true;
352
353 gk20a_dbg_fn("done");
354 return 0;
355
356clean_up:
357 gk20a_dbg_fn("fail");
358 /* FIXME: unmap from bar1 */
359 nvgpu_dma_free(g, &f->userd);
360
361 memset(&f->userd, 0, sizeof(f->userd));
362
363 nvgpu_vfree(g, f->channel);
364 f->channel = NULL;
365 nvgpu_vfree(g, f->tsg);
366 f->tsg = NULL;
367 nvgpu_kfree(g, f->engine_info);
368 f->engine_info = NULL;
369 nvgpu_kfree(g, f->active_engines_list);
370 f->active_engines_list = NULL;
371
372 return err;
373}
374
375int vgpu_init_fifo_setup_hw(struct gk20a *g)
376{
377 gk20a_dbg_fn("");
378
379 /* test write, read through bar1 @ userd region before
380 * turning on the snooping */
381 {
382 struct fifo_gk20a *f = &g->fifo;
383 u32 v, v1 = 0x33, v2 = 0x55;
384
385 u32 bar1_vaddr = f->userd.gpu_va;
386 volatile u32 *cpu_vaddr = f->userd.cpu_va;
387
388 gk20a_dbg_info("test bar1 @ vaddr 0x%x",
389 bar1_vaddr);
390
391 v = gk20a_bar1_readl(g, bar1_vaddr);
392
393 *cpu_vaddr = v1;
394 nvgpu_mb();
395
396 if (v1 != gk20a_bar1_readl(g, bar1_vaddr)) {
397 nvgpu_err(g, "bar1 broken @ gk20a!");
398 return -EINVAL;
399 }
400
401 gk20a_bar1_writel(g, bar1_vaddr, v2);
402
403 if (v2 != gk20a_bar1_readl(g, bar1_vaddr)) {
404 nvgpu_err(g, "bar1 broken @ gk20a!");
405 return -EINVAL;
406 }
407
408 /* is it visible to the cpu? */
409 if (*cpu_vaddr != v2) {
410 nvgpu_err(g, "cpu didn't see bar1 write @ %p!",
411 cpu_vaddr);
412 }
413
414 /* put it back */
415 gk20a_bar1_writel(g, bar1_vaddr, v);
416 }
417
418 gk20a_dbg_fn("done");
419
420 return 0;
421}
422
423int vgpu_init_fifo_support(struct gk20a *g)
424{
425 u32 err;
426
427 gk20a_dbg_fn("");
428
429 err = vgpu_init_fifo_setup_sw(g);
430 if (err)
431 return err;
432
433 if (g->ops.fifo.init_fifo_setup_hw)
434 err = g->ops.fifo.init_fifo_setup_hw(g);
435 return err;
436}
437
438int vgpu_fifo_preempt_channel(struct gk20a *g, u32 chid)
439{
440 struct fifo_gk20a *f = &g->fifo;
441 struct channel_gk20a *ch = &f->channel[chid];
442 struct tegra_vgpu_cmd_msg msg;
443 struct tegra_vgpu_channel_config_params *p =
444 &msg.params.channel_config;
445 int err;
446
447 gk20a_dbg_fn("");
448
449 if (!nvgpu_atomic_read(&ch->bound))
450 return 0;
451
452 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_PREEMPT;
453 msg.handle = vgpu_get_handle(g);
454 p->handle = ch->virt_ctx;
455 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
456
457 if (err || msg.ret) {
458 nvgpu_err(g,
459 "preempt channel %d failed", chid);
460 err = -ENOMEM;
461 }
462
463 return err;
464}
465
466int vgpu_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
467{
468 struct tegra_vgpu_cmd_msg msg;
469 struct tegra_vgpu_tsg_preempt_params *p =
470 &msg.params.tsg_preempt;
471 int err;
472
473 gk20a_dbg_fn("");
474
475 msg.cmd = TEGRA_VGPU_CMD_TSG_PREEMPT;
476 msg.handle = vgpu_get_handle(g);
477 p->tsg_id = tsgid;
478 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
479 err = err ? err : msg.ret;
480
481 if (err) {
482 nvgpu_err(g,
483 "preempt tsg %u failed", tsgid);
484 }
485
486 return err;
487}
488
489static int vgpu_submit_runlist(struct gk20a *g, u64 handle, u8 runlist_id,
490 u16 *runlist, u32 num_entries)
491{
492 struct tegra_vgpu_cmd_msg msg;
493 struct tegra_vgpu_runlist_params *p;
494 int err;
495 void *oob_handle;
496 void *oob;
497 size_t size, oob_size;
498
499 oob_handle = vgpu_ivc_oob_get_ptr(vgpu_ivc_get_server_vmid(),
500 TEGRA_VGPU_QUEUE_CMD,
501 &oob, &oob_size);
502 if (!oob_handle)
503 return -EINVAL;
504
505 size = sizeof(*runlist) * num_entries;
506 if (oob_size < size) {
507 err = -ENOMEM;
508 goto done;
509 }
510
511 msg.cmd = TEGRA_VGPU_CMD_SUBMIT_RUNLIST;
512 msg.handle = handle;
513 p = &msg.params.runlist;
514 p->runlist_id = runlist_id;
515 p->num_entries = num_entries;
516
517 memcpy(oob, runlist, size);
518 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
519
520 err = (err || msg.ret) ? -1 : 0;
521
522done:
523 vgpu_ivc_oob_put_ptr(oob_handle);
524 return err;
525}
526
527static int vgpu_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
528 u32 chid, bool add,
529 bool wait_for_finish)
530{
531 struct fifo_gk20a *f = &g->fifo;
532 struct fifo_runlist_info_gk20a *runlist;
533 u16 *runlist_entry = NULL;
534 u32 count = 0;
535
536 gk20a_dbg_fn("");
537
538 runlist = &f->runlist_info[runlist_id];
539
540 /* valid channel, add/remove it from active list.
541 Otherwise, keep active list untouched for suspend/resume. */
542 if (chid != (u32)~0) {
543 if (add) {
544 if (test_and_set_bit(chid,
545 runlist->active_channels) == 1)
546 return 0;
547 } else {
548 if (test_and_clear_bit(chid,
549 runlist->active_channels) == 0)
550 return 0;
551 }
552 }
553
554 if (chid != (u32)~0 || /* add/remove a valid channel */
555 add /* resume to add all channels back */) {
556 u32 cid;
557
558 runlist_entry = runlist->mem[0].cpu_va;
559 for_each_set_bit(cid,
560 runlist->active_channels, f->num_channels) {
561 gk20a_dbg_info("add channel %d to runlist", cid);
562 runlist_entry[0] = cid;
563 runlist_entry++;
564 count++;
565 }
566 } else /* suspend to remove all channels */
567 count = 0;
568
569 return vgpu_submit_runlist(g, vgpu_get_handle(g), runlist_id,
570 runlist->mem[0].cpu_va, count);
571}
572
573/* add/remove a channel from runlist
574 special cases below: runlist->active_channels will NOT be changed.
575 (chid == ~0 && !add) means remove all active channels from runlist.
576 (chid == ~0 && add) means restore all active channels on runlist. */
577int vgpu_fifo_update_runlist(struct gk20a *g, u32 runlist_id,
578 u32 chid, bool add, bool wait_for_finish)
579{
580 struct fifo_runlist_info_gk20a *runlist = NULL;
581 struct fifo_gk20a *f = &g->fifo;
582 u32 ret = 0;
583
584 gk20a_dbg_fn("");
585
586 runlist = &f->runlist_info[runlist_id];
587
588 nvgpu_mutex_acquire(&runlist->mutex);
589
590 ret = vgpu_fifo_update_runlist_locked(g, runlist_id, chid, add,
591 wait_for_finish);
592
593 nvgpu_mutex_release(&runlist->mutex);
594 return ret;
595}
596
597int vgpu_fifo_wait_engine_idle(struct gk20a *g)
598{
599 gk20a_dbg_fn("");
600
601 return 0;
602}
603
604int vgpu_fifo_set_runlist_interleave(struct gk20a *g,
605 u32 id,
606 u32 runlist_id,
607 u32 new_level)
608{
609 struct tegra_vgpu_cmd_msg msg = {0};
610 struct tegra_vgpu_tsg_runlist_interleave_params *p =
611 &msg.params.tsg_interleave;
612 int err;
613
614 gk20a_dbg_fn("");
615
616 msg.cmd = TEGRA_VGPU_CMD_TSG_SET_RUNLIST_INTERLEAVE;
617 msg.handle = vgpu_get_handle(g);
618 p->tsg_id = id;
619 p->level = new_level;
620 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
621 WARN_ON(err || msg.ret);
622 return err ? err : msg.ret;
623}
624
625int vgpu_fifo_force_reset_ch(struct channel_gk20a *ch,
626 u32 err_code, bool verbose)
627{
628 struct tsg_gk20a *tsg = NULL;
629 struct channel_gk20a *ch_tsg = NULL;
630 struct gk20a *g = ch->g;
631 struct tegra_vgpu_cmd_msg msg = {0};
632 struct tegra_vgpu_channel_config_params *p =
633 &msg.params.channel_config;
634 int err;
635
636 gk20a_dbg_fn("");
637
638 if (gk20a_is_channel_marked_as_tsg(ch)) {
639 tsg = &g->fifo.tsg[ch->tsgid];
640
641 nvgpu_rwsem_down_read(&tsg->ch_list_lock);
642
643 nvgpu_list_for_each_entry(ch_tsg, &tsg->ch_list,
644 channel_gk20a, ch_entry) {
645 if (gk20a_channel_get(ch_tsg)) {
646 nvgpu_set_error_notifier(ch_tsg, err_code);
647 ch_tsg->has_timedout = true;
648 gk20a_channel_put(ch_tsg);
649 }
650 }
651
652 nvgpu_rwsem_up_read(&tsg->ch_list_lock);
653 } else {
654 nvgpu_set_error_notifier(ch, err_code);
655 ch->has_timedout = true;
656 }
657
658 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_FORCE_RESET;
659 msg.handle = vgpu_get_handle(ch->g);
660 p->handle = ch->virt_ctx;
661 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
662 WARN_ON(err || msg.ret);
663 if (!err)
664 gk20a_channel_abort(ch, false);
665 return err ? err : msg.ret;
666}
667
668static void vgpu_fifo_set_ctx_mmu_error_ch(struct gk20a *g,
669 struct channel_gk20a *ch)
670{
671 /*
672 * If error code is already set, this mmu fault
673 * was triggered as part of recovery from other
674 * error condition.
675 * Don't overwrite error flag.
676 */
677 nvgpu_set_error_notifier_if_empty(ch,
678 NVGPU_ERR_NOTIFIER_FIFO_ERROR_MMU_ERR_FLT);
679
680 /* mark channel as faulted */
681 ch->has_timedout = true;
682 nvgpu_smp_wmb();
683 /* unblock pending waits */
684 nvgpu_cond_broadcast_interruptible(&ch->semaphore_wq);
685 nvgpu_cond_broadcast_interruptible(&ch->notifier_wq);
686}
687
688static void vgpu_fifo_set_ctx_mmu_error_ch_tsg(struct gk20a *g,
689 struct channel_gk20a *ch)
690{
691 struct tsg_gk20a *tsg = NULL;
692 struct channel_gk20a *ch_tsg = NULL;
693
694 if (gk20a_is_channel_marked_as_tsg(ch)) {
695 tsg = &g->fifo.tsg[ch->tsgid];
696
697 nvgpu_rwsem_down_read(&tsg->ch_list_lock);
698
699 nvgpu_list_for_each_entry(ch_tsg, &tsg->ch_list,
700 channel_gk20a, ch_entry) {
701 if (gk20a_channel_get(ch_tsg)) {
702 vgpu_fifo_set_ctx_mmu_error_ch(g, ch_tsg);
703 gk20a_channel_put(ch_tsg);
704 }
705 }
706
707 nvgpu_rwsem_up_read(&tsg->ch_list_lock);
708 } else {
709 vgpu_fifo_set_ctx_mmu_error_ch(g, ch);
710 }
711}
712
713int vgpu_fifo_isr(struct gk20a *g, struct tegra_vgpu_fifo_intr_info *info)
714{
715 struct fifo_gk20a *f = &g->fifo;
716 struct channel_gk20a *ch = gk20a_channel_get(&f->channel[info->chid]);
717
718 gk20a_dbg_fn("");
719 if (!ch)
720 return 0;
721
722 nvgpu_err(g, "fifo intr (%d) on ch %u",
723 info->type, info->chid);
724
725 trace_gk20a_channel_reset(ch->chid, ch->tsgid);
726
727 switch (info->type) {
728 case TEGRA_VGPU_FIFO_INTR_PBDMA:
729 nvgpu_set_error_notifier(ch, NVGPU_ERR_NOTIFIER_PBDMA_ERROR);
730 break;
731 case TEGRA_VGPU_FIFO_INTR_CTXSW_TIMEOUT:
732 nvgpu_set_error_notifier(ch,
733 NVGPU_ERR_NOTIFIER_FIFO_ERROR_IDLE_TIMEOUT);
734 break;
735 case TEGRA_VGPU_FIFO_INTR_MMU_FAULT:
736 vgpu_fifo_set_ctx_mmu_error_ch_tsg(g, ch);
737 gk20a_channel_abort(ch, false);
738 break;
739 default:
740 WARN_ON(1);
741 break;
742 }
743
744 gk20a_channel_put(ch);
745 return 0;
746}
747
748int vgpu_fifo_nonstall_isr(struct gk20a *g,
749 struct tegra_vgpu_fifo_nonstall_intr_info *info)
750{
751 gk20a_dbg_fn("");
752
753 switch (info->type) {
754 case TEGRA_VGPU_FIFO_NONSTALL_INTR_CHANNEL:
755 gk20a_channel_semaphore_wakeup(g, false);
756 break;
757 default:
758 WARN_ON(1);
759 break;
760 }
761
762 return 0;
763}
764
765u32 vgpu_fifo_default_timeslice_us(struct gk20a *g)
766{
767 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
768
769 return priv->constants.default_timeslice_us;
770}