aboutsummaryrefslogtreecommitdiffstats
path: root/include/os/linux/ioctl_channel.c
diff options
context:
space:
mode:
Diffstat (limited to 'include/os/linux/ioctl_channel.c')
-rw-r--r--include/os/linux/ioctl_channel.c1388
1 files changed, 0 insertions, 1388 deletions
diff --git a/include/os/linux/ioctl_channel.c b/include/os/linux/ioctl_channel.c
deleted file mode 100644
index 0f39cc7..0000000
--- a/include/os/linux/ioctl_channel.c
+++ /dev/null
@@ -1,1388 +0,0 @@
1/*
2 * GK20A Graphics channel
3 *
4 * Copyright (c) 2011-2020, NVIDIA CORPORATION. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <trace/events/gk20a.h>
20#include <linux/file.h>
21#include <linux/anon_inodes.h>
22#include <linux/dma-buf.h>
23#include <linux/poll.h>
24#include <uapi/linux/nvgpu.h>
25
26#include <nvgpu/semaphore.h>
27#include <nvgpu/timers.h>
28#include <nvgpu/kmem.h>
29#include <nvgpu/log.h>
30#include <nvgpu/list.h>
31#include <nvgpu/debug.h>
32#include <nvgpu/enabled.h>
33#include <nvgpu/error_notifier.h>
34#include <nvgpu/barrier.h>
35#include <nvgpu/nvhost.h>
36#include <nvgpu/os_sched.h>
37#include <nvgpu/gk20a.h>
38#include <nvgpu/channel.h>
39#include <nvgpu/channel_sync.h>
40
41#include "gk20a/dbg_gpu_gk20a.h"
42#include "gk20a/fence_gk20a.h"
43
44#include "platform_gk20a.h"
45#include "ioctl_channel.h"
46#include "channel.h"
47#include "os_linux.h"
48#include "ctxsw_trace.h"
49
50/* the minimal size of client buffer */
51#define CSS_MIN_CLIENT_SNAPSHOT_SIZE \
52 (sizeof(struct gk20a_cs_snapshot_fifo) + \
53 sizeof(struct gk20a_cs_snapshot_fifo_entry) * 256)
54
55static const char *gr_gk20a_graphics_preempt_mode_name(u32 graphics_preempt_mode)
56{
57 switch (graphics_preempt_mode) {
58 case NVGPU_PREEMPTION_MODE_GRAPHICS_WFI:
59 return "WFI";
60 default:
61 return "?";
62 }
63}
64
65static const char *gr_gk20a_compute_preempt_mode_name(u32 compute_preempt_mode)
66{
67 switch (compute_preempt_mode) {
68 case NVGPU_PREEMPTION_MODE_COMPUTE_WFI:
69 return "WFI";
70 case NVGPU_PREEMPTION_MODE_COMPUTE_CTA:
71 return "CTA";
72 default:
73 return "?";
74 }
75}
76
77static void gk20a_channel_trace_sched_param(
78 void (*trace)(int chid, int tsgid, pid_t pid, u32 timeslice,
79 u32 timeout, const char *interleave,
80 const char *graphics_preempt_mode,
81 const char *compute_preempt_mode),
82 struct channel_gk20a *ch)
83{
84 struct tsg_gk20a *tsg = tsg_gk20a_from_ch(ch);
85
86 if (!tsg)
87 return;
88
89 (trace)(ch->chid, ch->tsgid, ch->pid,
90 tsg_gk20a_from_ch(ch)->timeslice_us,
91 ch->timeout_ms_max,
92 gk20a_fifo_interleave_level_name(tsg->interleave_level),
93 gr_gk20a_graphics_preempt_mode_name(
94 tsg->gr_ctx.graphics_preempt_mode),
95 gr_gk20a_compute_preempt_mode_name(
96 tsg->gr_ctx.compute_preempt_mode));
97}
98
99/*
100 * Although channels do have pointers back to the gk20a struct that they were
101 * created under in cases where the driver is killed that pointer can be bad.
102 * The channel memory can be freed before the release() function for a given
103 * channel is called. This happens when the driver dies and userspace doesn't
104 * get a chance to call release() until after the entire gk20a driver data is
105 * unloaded and freed.
106 */
107struct channel_priv {
108 struct gk20a *g;
109 struct channel_gk20a *c;
110};
111
112#if defined(CONFIG_GK20A_CYCLE_STATS)
113
114void gk20a_channel_free_cycle_stats_buffer(struct channel_gk20a *ch)
115{
116 struct nvgpu_channel_linux *priv = ch->os_priv;
117
118 /* disable existing cyclestats buffer */
119 nvgpu_mutex_acquire(&ch->cyclestate.cyclestate_buffer_mutex);
120 if (priv->cyclestate_buffer_handler) {
121 dma_buf_vunmap(priv->cyclestate_buffer_handler,
122 ch->cyclestate.cyclestate_buffer);
123 dma_buf_put(priv->cyclestate_buffer_handler);
124 priv->cyclestate_buffer_handler = NULL;
125 ch->cyclestate.cyclestate_buffer = NULL;
126 ch->cyclestate.cyclestate_buffer_size = 0;
127 }
128 nvgpu_mutex_release(&ch->cyclestate.cyclestate_buffer_mutex);
129}
130
131int gk20a_channel_cycle_stats(struct channel_gk20a *ch, int dmabuf_fd)
132{
133 struct dma_buf *dmabuf;
134 void *virtual_address;
135 struct nvgpu_channel_linux *priv = ch->os_priv;
136
137 /* is it allowed to handle calls for current GPU? */
138 if (!nvgpu_is_enabled(ch->g, NVGPU_SUPPORT_CYCLE_STATS))
139 return -ENOSYS;
140
141 if (dmabuf_fd && !priv->cyclestate_buffer_handler) {
142
143 /* set up new cyclestats buffer */
144 dmabuf = dma_buf_get(dmabuf_fd);
145 if (IS_ERR(dmabuf))
146 return PTR_ERR(dmabuf);
147 virtual_address = dma_buf_vmap(dmabuf);
148 if (!virtual_address)
149 return -ENOMEM;
150
151 priv->cyclestate_buffer_handler = dmabuf;
152 ch->cyclestate.cyclestate_buffer = virtual_address;
153 ch->cyclestate.cyclestate_buffer_size = dmabuf->size;
154 return 0;
155
156 } else if (!dmabuf_fd && priv->cyclestate_buffer_handler) {
157 gk20a_channel_free_cycle_stats_buffer(ch);
158 return 0;
159
160 } else if (!dmabuf_fd && !priv->cyclestate_buffer_handler) {
161 /* no request from GL */
162 return 0;
163
164 } else {
165 pr_err("channel already has cyclestats buffer\n");
166 return -EINVAL;
167 }
168}
169
170int gk20a_flush_cycle_stats_snapshot(struct channel_gk20a *ch)
171{
172 int ret;
173
174 nvgpu_mutex_acquire(&ch->cs_client_mutex);
175 if (ch->cs_client)
176 ret = gr_gk20a_css_flush(ch, ch->cs_client);
177 else
178 ret = -EBADF;
179 nvgpu_mutex_release(&ch->cs_client_mutex);
180
181 return ret;
182}
183
184int gk20a_attach_cycle_stats_snapshot(struct channel_gk20a *ch,
185 u32 dmabuf_fd,
186 u32 perfmon_id_count,
187 u32 *perfmon_id_start)
188{
189 int ret = 0;
190 struct gk20a *g = ch->g;
191 struct gk20a_cs_snapshot_client_linux *client_linux;
192 struct gk20a_cs_snapshot_client *client;
193
194 nvgpu_mutex_acquire(&ch->cs_client_mutex);
195 if (ch->cs_client) {
196 nvgpu_mutex_release(&ch->cs_client_mutex);
197 return -EEXIST;
198 }
199
200 client_linux = nvgpu_kzalloc(g, sizeof(*client_linux));
201 if (!client_linux) {
202 ret = -ENOMEM;
203 goto err;
204 }
205
206 client_linux->dmabuf_fd = dmabuf_fd;
207 client_linux->dma_handler = dma_buf_get(client_linux->dmabuf_fd);
208 if (IS_ERR(client_linux->dma_handler)) {
209 ret = PTR_ERR(client_linux->dma_handler);
210 client_linux->dma_handler = NULL;
211 goto err_free;
212 }
213
214 client = &client_linux->cs_client;
215 client->snapshot_size = client_linux->dma_handler->size;
216 if (client->snapshot_size < CSS_MIN_CLIENT_SNAPSHOT_SIZE) {
217 ret = -ENOMEM;
218 goto err_put;
219 }
220
221 client->snapshot = (struct gk20a_cs_snapshot_fifo *)
222 dma_buf_vmap(client_linux->dma_handler);
223 if (!client->snapshot) {
224 ret = -ENOMEM;
225 goto err_put;
226 }
227
228 ch->cs_client = client;
229
230 ret = gr_gk20a_css_attach(ch,
231 perfmon_id_count,
232 perfmon_id_start,
233 ch->cs_client);
234
235 nvgpu_mutex_release(&ch->cs_client_mutex);
236
237 return ret;
238
239err_put:
240 dma_buf_put(client_linux->dma_handler);
241err_free:
242 nvgpu_kfree(g, client_linux);
243err:
244 nvgpu_mutex_release(&ch->cs_client_mutex);
245 return ret;
246}
247
248int gk20a_channel_free_cycle_stats_snapshot(struct channel_gk20a *ch)
249{
250 int ret;
251 struct gk20a_cs_snapshot_client_linux *client_linux;
252
253 nvgpu_mutex_acquire(&ch->cs_client_mutex);
254 if (!ch->cs_client) {
255 nvgpu_mutex_release(&ch->cs_client_mutex);
256 return 0;
257 }
258
259 client_linux = container_of(ch->cs_client,
260 struct gk20a_cs_snapshot_client_linux,
261 cs_client);
262
263 ret = gr_gk20a_css_detach(ch, ch->cs_client);
264
265 if (client_linux->dma_handler) {
266 if (ch->cs_client->snapshot)
267 dma_buf_vunmap(client_linux->dma_handler,
268 ch->cs_client->snapshot);
269 dma_buf_put(client_linux->dma_handler);
270 }
271
272 ch->cs_client = NULL;
273 nvgpu_kfree(ch->g, client_linux);
274
275 nvgpu_mutex_release(&ch->cs_client_mutex);
276
277 return ret;
278}
279#endif
280
281static int gk20a_channel_set_wdt_status(struct channel_gk20a *ch,
282 struct nvgpu_channel_wdt_args *args)
283{
284 u32 status = args->wdt_status & (NVGPU_IOCTL_CHANNEL_DISABLE_WDT |
285 NVGPU_IOCTL_CHANNEL_ENABLE_WDT);
286
287 if (status == NVGPU_IOCTL_CHANNEL_DISABLE_WDT)
288 ch->timeout.enabled = false;
289 else if (status == NVGPU_IOCTL_CHANNEL_ENABLE_WDT)
290 ch->timeout.enabled = true;
291 else
292 return -EINVAL;
293
294 if (args->wdt_status & NVGPU_IOCTL_CHANNEL_WDT_FLAG_SET_TIMEOUT)
295 ch->timeout.limit_ms = args->timeout_ms;
296
297 ch->timeout.debug_dump = (args->wdt_status &
298 NVGPU_IOCTL_CHANNEL_WDT_FLAG_DISABLE_DUMP) == 0;
299
300 return 0;
301}
302
303static void gk20a_channel_free_error_notifiers(struct channel_gk20a *ch)
304{
305 struct nvgpu_channel_linux *priv = ch->os_priv;
306
307 nvgpu_mutex_acquire(&priv->error_notifier.mutex);
308 if (priv->error_notifier.dmabuf) {
309 dma_buf_vunmap(priv->error_notifier.dmabuf, priv->error_notifier.vaddr);
310 dma_buf_put(priv->error_notifier.dmabuf);
311 priv->error_notifier.dmabuf = NULL;
312 priv->error_notifier.notification = NULL;
313 priv->error_notifier.vaddr = NULL;
314 }
315 nvgpu_mutex_release(&priv->error_notifier.mutex);
316}
317
318static int gk20a_init_error_notifier(struct channel_gk20a *ch,
319 struct nvgpu_set_error_notifier *args)
320{
321 struct dma_buf *dmabuf;
322 void *va;
323 u64 end = args->offset + sizeof(struct nvgpu_notification);
324 struct nvgpu_channel_linux *priv = ch->os_priv;
325
326 if (!args->mem) {
327 pr_err("gk20a_init_error_notifier: invalid memory handle\n");
328 return -EINVAL;
329 }
330
331 dmabuf = dma_buf_get(args->mem);
332
333 gk20a_channel_free_error_notifiers(ch);
334
335 if (IS_ERR(dmabuf)) {
336 pr_err("Invalid handle: %d\n", args->mem);
337 return -EINVAL;
338 }
339
340 if (end > dmabuf->size || end < sizeof(struct nvgpu_notification)) {
341 dma_buf_put(dmabuf);
342 nvgpu_err(ch->g, "gk20a_init_error_notifier: invalid offset");
343 return -EINVAL;
344 }
345
346 nvgpu_speculation_barrier();
347
348 /* map handle */
349 va = dma_buf_vmap(dmabuf);
350 if (!va) {
351 dma_buf_put(dmabuf);
352 pr_err("Cannot map notifier handle\n");
353 return -ENOMEM;
354 }
355
356 priv->error_notifier.notification = va + args->offset;
357 priv->error_notifier.vaddr = va;
358 memset(priv->error_notifier.notification, 0,
359 sizeof(struct nvgpu_notification));
360
361 /* set channel notifiers pointer */
362 nvgpu_mutex_acquire(&priv->error_notifier.mutex);
363 priv->error_notifier.dmabuf = dmabuf;
364 nvgpu_mutex_release(&priv->error_notifier.mutex);
365
366 return 0;
367}
368
369/*
370 * This returns the channel with a reference. The caller must
371 * gk20a_channel_put() the ref back after use.
372 *
373 * NULL is returned if the channel was not found.
374 */
375struct channel_gk20a *gk20a_get_channel_from_file(int fd)
376{
377 struct channel_gk20a *ch;
378 struct channel_priv *priv;
379 struct file *f = fget(fd);
380
381 if (!f)
382 return NULL;
383
384 if (f->f_op != &gk20a_channel_ops) {
385 fput(f);
386 return NULL;
387 }
388
389 priv = (struct channel_priv *)f->private_data;
390 ch = gk20a_channel_get(priv->c);
391 fput(f);
392 return ch;
393}
394
395int gk20a_channel_release(struct inode *inode, struct file *filp)
396{
397 struct channel_priv *priv = filp->private_data;
398 struct channel_gk20a *ch;
399 struct gk20a *g;
400
401 int err;
402
403 /* We could still end up here even if the channel_open failed, e.g.
404 * if we ran out of hw channel IDs.
405 */
406 if (!priv)
407 return 0;
408
409 ch = priv->c;
410 g = priv->g;
411
412 err = gk20a_busy(g);
413 if (err) {
414 nvgpu_err(g, "failed to release a channel!");
415 goto channel_release;
416 }
417
418 trace_gk20a_channel_release(dev_name(dev_from_gk20a(g)));
419
420 gk20a_channel_close(ch);
421 gk20a_channel_free_error_notifiers(ch);
422
423 gk20a_idle(g);
424
425channel_release:
426 gk20a_put(g);
427 nvgpu_kfree(g, filp->private_data);
428 filp->private_data = NULL;
429 return 0;
430}
431
432/* note: runlist_id -1 is synonym for the ENGINE_GR_GK20A runlist id */
433static int __gk20a_channel_open(struct gk20a *g,
434 struct file *filp, s32 runlist_id)
435{
436 int err;
437 struct channel_gk20a *ch;
438 struct channel_priv *priv;
439
440 nvgpu_log_fn(g, " ");
441
442 g = gk20a_get(g);
443 if (!g)
444 return -ENODEV;
445
446 trace_gk20a_channel_open(dev_name(dev_from_gk20a(g)));
447
448 priv = nvgpu_kzalloc(g, sizeof(*priv));
449 if (!priv) {
450 err = -ENOMEM;
451 goto free_ref;
452 }
453
454 err = gk20a_busy(g);
455 if (err) {
456 nvgpu_err(g, "failed to power on, %d", err);
457 goto fail_busy;
458 }
459 /* All the user space channel should be non privilege */
460 ch = gk20a_open_new_channel(g, runlist_id, false,
461 nvgpu_current_pid(g), nvgpu_current_tid(g));
462 gk20a_idle(g);
463 if (!ch) {
464 nvgpu_err(g,
465 "failed to get f");
466 err = -ENOMEM;
467 goto fail_busy;
468 }
469
470 gk20a_channel_trace_sched_param(
471 trace_gk20a_channel_sched_defaults, ch);
472
473 priv->g = g;
474 priv->c = ch;
475
476 filp->private_data = priv;
477 return 0;
478
479fail_busy:
480 nvgpu_kfree(g, priv);
481free_ref:
482 gk20a_put(g);
483 return err;
484}
485
486int gk20a_channel_open(struct inode *inode, struct file *filp)
487{
488 struct nvgpu_os_linux *l = container_of(inode->i_cdev,
489 struct nvgpu_os_linux, channel.cdev);
490 struct gk20a *g = &l->g;
491 int ret;
492
493 nvgpu_log_fn(g, "start");
494 ret = __gk20a_channel_open(g, filp, -1);
495
496 nvgpu_log_fn(g, "end");
497 return ret;
498}
499
500int gk20a_channel_open_ioctl(struct gk20a *g,
501 struct nvgpu_channel_open_args *args)
502{
503 int err;
504 int fd;
505 struct file *file;
506 char name[64];
507 s32 runlist_id = args->in.runlist_id;
508 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
509
510 err = get_unused_fd_flags(O_RDWR);
511 if (err < 0)
512 return err;
513 fd = err;
514
515 snprintf(name, sizeof(name), "nvhost-%s-fd%d",
516 dev_name(dev_from_gk20a(g)), fd);
517
518 file = anon_inode_getfile(name, l->channel.cdev.ops, NULL, O_RDWR);
519 if (IS_ERR(file)) {
520 err = PTR_ERR(file);
521 goto clean_up;
522 }
523
524 err = __gk20a_channel_open(g, file, runlist_id);
525 if (err)
526 goto clean_up_file;
527
528 fd_install(fd, file);
529 args->out.channel_fd = fd;
530 return 0;
531
532clean_up_file:
533 fput(file);
534clean_up:
535 put_unused_fd(fd);
536 return err;
537}
538
539static u32 nvgpu_setup_bind_user_flags_to_common_flags(u32 user_flags)
540{
541 u32 flags = 0;
542
543 if (user_flags & NVGPU_CHANNEL_SETUP_BIND_FLAGS_VPR_ENABLED)
544 flags |= NVGPU_SETUP_BIND_FLAGS_SUPPORT_VPR;
545
546 if (user_flags & NVGPU_CHANNEL_SETUP_BIND_FLAGS_DETERMINISTIC)
547 flags |= NVGPU_SETUP_BIND_FLAGS_SUPPORT_DETERMINISTIC;
548
549 if (user_flags & NVGPU_CHANNEL_SETUP_BIND_FLAGS_REPLAYABLE_FAULTS_ENABLE)
550 flags |= NVGPU_SETUP_BIND_FLAGS_REPLAYABLE_FAULTS_ENABLE;
551
552 if (user_flags & NVGPU_CHANNEL_SETUP_BIND_FLAGS_USERMODE_SUPPORT)
553 flags |= NVGPU_SETUP_BIND_FLAGS_USERMODE_SUPPORT;
554
555 return flags;
556}
557
558static void nvgpu_get_setup_bind_args(
559 struct nvgpu_channel_setup_bind_args *channel_setup_bind_args,
560 struct nvgpu_setup_bind_args *setup_bind_args)
561{
562 setup_bind_args->num_gpfifo_entries =
563 channel_setup_bind_args->num_gpfifo_entries;
564 setup_bind_args->num_inflight_jobs =
565 channel_setup_bind_args->num_inflight_jobs;
566 setup_bind_args->userd_dmabuf_fd =
567 channel_setup_bind_args->userd_dmabuf_fd;
568 setup_bind_args->userd_dmabuf_offset =
569 channel_setup_bind_args->userd_dmabuf_offset;
570 setup_bind_args->gpfifo_dmabuf_fd =
571 channel_setup_bind_args->gpfifo_dmabuf_fd;
572 setup_bind_args->gpfifo_dmabuf_offset =
573 channel_setup_bind_args->gpfifo_dmabuf_offset;
574 setup_bind_args->flags = nvgpu_setup_bind_user_flags_to_common_flags(
575 channel_setup_bind_args->flags);
576}
577
578static void nvgpu_get_gpfifo_ex_args(
579 struct nvgpu_alloc_gpfifo_ex_args *alloc_gpfifo_ex_args,
580 struct nvgpu_setup_bind_args *setup_bind_args)
581{
582 setup_bind_args->num_gpfifo_entries = alloc_gpfifo_ex_args->num_entries;
583 setup_bind_args->num_inflight_jobs =
584 alloc_gpfifo_ex_args->num_inflight_jobs;
585 setup_bind_args->flags = nvgpu_setup_bind_user_flags_to_common_flags(
586 alloc_gpfifo_ex_args->flags);
587}
588
589static void nvgpu_get_gpfifo_args(
590 struct nvgpu_alloc_gpfifo_args *alloc_gpfifo_args,
591 struct nvgpu_setup_bind_args *setup_bind_args)
592{
593 /*
594 * Kernel can insert one extra gpfifo entry before user
595 * submitted gpfifos and another one after, for internal usage.
596 * Triple the requested size.
597 */
598 setup_bind_args->num_gpfifo_entries =
599 alloc_gpfifo_args->num_entries * 3;
600 setup_bind_args->num_inflight_jobs = 0;
601 setup_bind_args->flags = nvgpu_setup_bind_user_flags_to_common_flags(
602 alloc_gpfifo_args->flags);
603}
604
605static void nvgpu_get_fence_args(
606 struct nvgpu_fence *fence_args_in,
607 struct nvgpu_channel_fence *fence_args_out)
608{
609 fence_args_out->id = fence_args_in->id;
610 fence_args_out->value = fence_args_in->value;
611}
612
613static int gk20a_channel_wait_semaphore(struct channel_gk20a *ch,
614 ulong id, u32 offset,
615 u32 payload, u32 timeout)
616{
617 struct dma_buf *dmabuf;
618 void *data;
619 u32 *semaphore;
620 int ret = 0;
621
622 /* do not wait if channel has timed out */
623 if (gk20a_channel_check_timedout(ch)) {
624 return -ETIMEDOUT;
625 }
626
627 dmabuf = dma_buf_get(id);
628 if (IS_ERR(dmabuf)) {
629 nvgpu_err(ch->g, "invalid notifier nvmap handle 0x%lx", id);
630 return -EINVAL;
631 }
632
633 data = dma_buf_kmap(dmabuf, offset >> PAGE_SHIFT);
634 if (!data) {
635 nvgpu_err(ch->g, "failed to map notifier memory");
636 ret = -EINVAL;
637 goto cleanup_put;
638 }
639
640 semaphore = data + (offset & ~PAGE_MASK);
641
642 ret = NVGPU_COND_WAIT_INTERRUPTIBLE(
643 &ch->semaphore_wq,
644 *semaphore == payload ||
645 gk20a_channel_check_timedout(ch),
646 timeout);
647
648 dma_buf_kunmap(dmabuf, offset >> PAGE_SHIFT, data);
649cleanup_put:
650 dma_buf_put(dmabuf);
651 return ret;
652}
653
654static int gk20a_channel_wait(struct channel_gk20a *ch,
655 struct nvgpu_wait_args *args)
656{
657 struct dma_buf *dmabuf;
658 struct gk20a *g = ch->g;
659 struct notification *notif;
660 struct timespec tv;
661 u64 jiffies;
662 ulong id;
663 u32 offset;
664 int remain, ret = 0;
665 u64 end;
666
667 nvgpu_log_fn(g, " ");
668
669 if (gk20a_channel_check_timedout(ch)) {
670 return -ETIMEDOUT;
671 }
672
673 switch (args->type) {
674 case NVGPU_WAIT_TYPE_NOTIFIER:
675 id = args->condition.notifier.dmabuf_fd;
676 offset = args->condition.notifier.offset;
677 end = offset + sizeof(struct notification);
678
679 dmabuf = dma_buf_get(id);
680 if (IS_ERR(dmabuf)) {
681 nvgpu_err(g, "invalid notifier nvmap handle 0x%lx",
682 id);
683 return -EINVAL;
684 }
685
686 if (end > dmabuf->size || end < sizeof(struct notification)) {
687 dma_buf_put(dmabuf);
688 nvgpu_err(g, "invalid notifier offset");
689 return -EINVAL;
690 }
691
692 nvgpu_speculation_barrier();
693
694 notif = dma_buf_vmap(dmabuf);
695 if (!notif) {
696 nvgpu_err(g, "failed to map notifier memory");
697 return -ENOMEM;
698 }
699
700 notif = (struct notification *)((uintptr_t)notif + offset);
701
702 /* user should set status pending before
703 * calling this ioctl */
704 remain = NVGPU_COND_WAIT_INTERRUPTIBLE(
705 &ch->notifier_wq,
706 notif->status == 0 ||
707 gk20a_channel_check_timedout(ch),
708 args->timeout);
709
710 if (remain == 0 && notif->status != 0) {
711 ret = -ETIMEDOUT;
712 goto notif_clean_up;
713 } else if (remain < 0) {
714 ret = -EINTR;
715 goto notif_clean_up;
716 }
717
718 /* TBD: fill in correct information */
719 jiffies = get_jiffies_64();
720 jiffies_to_timespec(jiffies, &tv);
721 notif->timestamp.nanoseconds[0] = tv.tv_nsec;
722 notif->timestamp.nanoseconds[1] = tv.tv_sec;
723 notif->info32 = 0xDEADBEEF; /* should be object name */
724 notif->info16 = ch->chid; /* should be method offset */
725
726notif_clean_up:
727 dma_buf_vunmap(dmabuf, notif);
728 return ret;
729
730 case NVGPU_WAIT_TYPE_SEMAPHORE:
731 ret = gk20a_channel_wait_semaphore(ch,
732 args->condition.semaphore.dmabuf_fd,
733 args->condition.semaphore.offset,
734 args->condition.semaphore.payload,
735 args->timeout);
736
737 break;
738
739 default:
740 ret = -EINVAL;
741 break;
742 }
743
744 return ret;
745}
746
747static int gk20a_channel_zcull_bind(struct channel_gk20a *ch,
748 struct nvgpu_zcull_bind_args *args)
749{
750 struct gk20a *g = ch->g;
751 struct gr_gk20a *gr = &g->gr;
752
753 nvgpu_log_fn(gr->g, " ");
754
755 return g->ops.gr.bind_ctxsw_zcull(g, gr, ch,
756 args->gpu_va, args->mode);
757}
758
759static int gk20a_ioctl_channel_submit_gpfifo(
760 struct channel_gk20a *ch,
761 struct nvgpu_submit_gpfifo_args *args)
762{
763 struct nvgpu_channel_fence fence;
764 struct gk20a_fence *fence_out;
765 struct fifo_profile_gk20a *profile = NULL;
766 u32 submit_flags = 0;
767 int fd = -1;
768 struct gk20a *g = ch->g;
769 struct nvgpu_gpfifo_userdata userdata;
770
771 int ret = 0;
772 nvgpu_log_fn(g, " ");
773
774 profile = gk20a_fifo_profile_acquire(ch->g);
775 gk20a_fifo_profile_snapshot(profile, PROFILE_IOCTL_ENTRY);
776
777 if (gk20a_channel_check_timedout(ch)) {
778 return -ETIMEDOUT;
779 }
780
781 nvgpu_get_fence_args(&args->fence, &fence);
782 submit_flags =
783 nvgpu_submit_gpfifo_user_flags_to_common_flags(args->flags);
784
785 /* Try and allocate an fd here*/
786 if ((args->flags & NVGPU_SUBMIT_GPFIFO_FLAGS_FENCE_GET)
787 && (args->flags & NVGPU_SUBMIT_GPFIFO_FLAGS_SYNC_FENCE)) {
788 fd = get_unused_fd_flags(O_RDWR);
789 if (fd < 0)
790 return fd;
791 }
792
793 userdata.entries = (struct nvgpu_gpfifo_entry __user *)
794 (uintptr_t)args->gpfifo;
795 userdata.context = NULL;
796
797 ret = nvgpu_submit_channel_gpfifo_user(ch,
798 userdata, args->num_entries,
799 submit_flags, &fence, &fence_out, profile);
800
801 if (ret) {
802 if (fd != -1)
803 put_unused_fd(fd);
804 goto clean_up;
805 }
806
807 /* Convert fence_out to something we can pass back to user space. */
808 if (args->flags & NVGPU_SUBMIT_GPFIFO_FLAGS_FENCE_GET) {
809 if (args->flags & NVGPU_SUBMIT_GPFIFO_FLAGS_SYNC_FENCE) {
810 ret = gk20a_fence_install_fd(fence_out, fd);
811 if (ret)
812 put_unused_fd(fd);
813 else
814 args->fence.id = fd;
815 } else {
816 args->fence.id = fence_out->syncpt_id;
817 args->fence.value = fence_out->syncpt_value;
818 }
819 }
820 gk20a_fence_put(fence_out);
821
822 gk20a_fifo_profile_snapshot(profile, PROFILE_IOCTL_EXIT);
823 if (profile)
824 gk20a_fifo_profile_release(ch->g, profile);
825
826clean_up:
827 return ret;
828}
829
830/*
831 * Convert linux specific runlist level of the form NVGPU_RUNLIST_INTERLEAVE_LEVEL_*
832 * to common runlist level of the form NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_*
833 */
834u32 nvgpu_get_common_runlist_level(u32 level)
835{
836 nvgpu_speculation_barrier();
837 switch (level) {
838 case NVGPU_RUNLIST_INTERLEAVE_LEVEL_LOW:
839 return NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_LOW;
840 case NVGPU_RUNLIST_INTERLEAVE_LEVEL_MEDIUM:
841 return NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_MEDIUM;
842 case NVGPU_RUNLIST_INTERLEAVE_LEVEL_HIGH:
843 return NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_HIGH;
844 default:
845 pr_err("%s: incorrect runlist level\n", __func__);
846 }
847
848 return level;
849}
850
851static u32 nvgpu_obj_ctx_user_flags_to_common_flags(u32 user_flags)
852{
853 u32 flags = 0;
854
855 if (user_flags & NVGPU_ALLOC_OBJ_FLAGS_GFXP)
856 flags |= NVGPU_OBJ_CTX_FLAGS_SUPPORT_GFXP;
857
858 if (user_flags & NVGPU_ALLOC_OBJ_FLAGS_CILP)
859 flags |= NVGPU_OBJ_CTX_FLAGS_SUPPORT_CILP;
860
861 return flags;
862}
863
864static int nvgpu_ioctl_channel_alloc_obj_ctx(struct channel_gk20a *ch,
865 u32 class_num, u32 user_flags)
866{
867 return ch->g->ops.gr.alloc_obj_ctx(ch, class_num,
868 nvgpu_obj_ctx_user_flags_to_common_flags(user_flags));
869}
870
871/*
872 * Convert common preemption mode flags of the form NVGPU_PREEMPTION_MODE_GRAPHICS_*
873 * into linux preemption mode flags of the form NVGPU_GRAPHICS_PREEMPTION_MODE_*
874 */
875u32 nvgpu_get_ioctl_graphics_preempt_mode_flags(u32 graphics_preempt_mode_flags)
876{
877 u32 flags = 0;
878
879 if (graphics_preempt_mode_flags & NVGPU_PREEMPTION_MODE_GRAPHICS_WFI)
880 flags |= NVGPU_GRAPHICS_PREEMPTION_MODE_WFI;
881 if (graphics_preempt_mode_flags & NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP)
882 flags |= NVGPU_GRAPHICS_PREEMPTION_MODE_GFXP;
883
884 return flags;
885}
886
887/*
888 * Convert common preemption mode flags of the form NVGPU_PREEMPTION_MODE_COMPUTE_*
889 * into linux preemption mode flags of the form NVGPU_COMPUTE_PREEMPTION_MODE_*
890 */
891u32 nvgpu_get_ioctl_compute_preempt_mode_flags(u32 compute_preempt_mode_flags)
892{
893 u32 flags = 0;
894
895 if (compute_preempt_mode_flags & NVGPU_PREEMPTION_MODE_COMPUTE_WFI)
896 flags |= NVGPU_COMPUTE_PREEMPTION_MODE_WFI;
897 if (compute_preempt_mode_flags & NVGPU_PREEMPTION_MODE_COMPUTE_CTA)
898 flags |= NVGPU_COMPUTE_PREEMPTION_MODE_CTA;
899 if (compute_preempt_mode_flags & NVGPU_PREEMPTION_MODE_COMPUTE_CILP)
900 flags |= NVGPU_COMPUTE_PREEMPTION_MODE_CILP;
901
902 return flags;
903}
904
905/*
906 * Convert common preemption modes of the form NVGPU_PREEMPTION_MODE_GRAPHICS_*
907 * into linux preemption modes of the form NVGPU_GRAPHICS_PREEMPTION_MODE_*
908 */
909u32 nvgpu_get_ioctl_graphics_preempt_mode(u32 graphics_preempt_mode)
910{
911 switch (graphics_preempt_mode) {
912 case NVGPU_PREEMPTION_MODE_GRAPHICS_WFI:
913 return NVGPU_GRAPHICS_PREEMPTION_MODE_WFI;
914 case NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP:
915 return NVGPU_GRAPHICS_PREEMPTION_MODE_GFXP;
916 }
917
918 return graphics_preempt_mode;
919}
920
921/*
922 * Convert common preemption modes of the form NVGPU_PREEMPTION_MODE_COMPUTE_*
923 * into linux preemption modes of the form NVGPU_COMPUTE_PREEMPTION_MODE_*
924 */
925u32 nvgpu_get_ioctl_compute_preempt_mode(u32 compute_preempt_mode)
926{
927 switch (compute_preempt_mode) {
928 case NVGPU_PREEMPTION_MODE_COMPUTE_WFI:
929 return NVGPU_COMPUTE_PREEMPTION_MODE_WFI;
930 case NVGPU_PREEMPTION_MODE_COMPUTE_CTA:
931 return NVGPU_COMPUTE_PREEMPTION_MODE_CTA;
932 case NVGPU_PREEMPTION_MODE_COMPUTE_CILP:
933 return NVGPU_COMPUTE_PREEMPTION_MODE_CILP;
934 }
935
936 return compute_preempt_mode;
937}
938
939/*
940 * Convert linux preemption modes of the form NVGPU_GRAPHICS_PREEMPTION_MODE_*
941 * into common preemption modes of the form NVGPU_PREEMPTION_MODE_GRAPHICS_*
942 */
943static u32 nvgpu_get_common_graphics_preempt_mode(u32 graphics_preempt_mode)
944{
945 nvgpu_speculation_barrier();
946 switch (graphics_preempt_mode) {
947 case NVGPU_GRAPHICS_PREEMPTION_MODE_WFI:
948 return NVGPU_PREEMPTION_MODE_GRAPHICS_WFI;
949 case NVGPU_GRAPHICS_PREEMPTION_MODE_GFXP:
950 return NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP;
951 }
952
953 return graphics_preempt_mode;
954}
955
956/*
957 * Convert linux preemption modes of the form NVGPU_COMPUTE_PREEMPTION_MODE_*
958 * into common preemption modes of the form NVGPU_PREEMPTION_MODE_COMPUTE_*
959 */
960static u32 nvgpu_get_common_compute_preempt_mode(u32 compute_preempt_mode)
961{
962 nvgpu_speculation_barrier();
963 switch (compute_preempt_mode) {
964 case NVGPU_COMPUTE_PREEMPTION_MODE_WFI:
965 return NVGPU_PREEMPTION_MODE_COMPUTE_WFI;
966 case NVGPU_COMPUTE_PREEMPTION_MODE_CTA:
967 return NVGPU_PREEMPTION_MODE_COMPUTE_CTA;
968 case NVGPU_COMPUTE_PREEMPTION_MODE_CILP:
969 return NVGPU_PREEMPTION_MODE_COMPUTE_CILP;
970 }
971
972 return compute_preempt_mode;
973}
974
975static int nvgpu_ioctl_channel_set_preemption_mode(struct channel_gk20a *ch,
976 u32 graphics_preempt_mode, u32 compute_preempt_mode)
977{
978 int err;
979
980 if (ch->g->ops.gr.set_preemption_mode) {
981 err = gk20a_busy(ch->g);
982 if (err) {
983 nvgpu_err(ch->g, "failed to power on, %d", err);
984 return err;
985 }
986 err = ch->g->ops.gr.set_preemption_mode(ch,
987 nvgpu_get_common_graphics_preempt_mode(graphics_preempt_mode),
988 nvgpu_get_common_compute_preempt_mode(compute_preempt_mode));
989 gk20a_idle(ch->g);
990 } else {
991 err = -EINVAL;
992 }
993
994 return err;
995}
996
997static int nvgpu_ioctl_channel_get_user_syncpoint(struct channel_gk20a *ch,
998 struct nvgpu_get_user_syncpoint_args *args)
999{
1000#ifdef CONFIG_TEGRA_GK20A_NVHOST
1001 struct gk20a *g = ch->g;
1002 int err;
1003
1004 if (!nvgpu_is_enabled(g, NVGPU_SUPPORT_USER_SYNCPOINT)) {
1005 nvgpu_err(g, "user syncpoints not supported");
1006 return -EINVAL;
1007 }
1008
1009 if (!nvgpu_has_syncpoints(g)) {
1010 nvgpu_err(g, "syncpoints not supported");
1011 return -EINVAL;
1012 }
1013
1014 if (g->aggressive_sync_destroy_thresh) {
1015 nvgpu_err(g, "sufficient syncpoints not available");
1016 return -EINVAL;
1017 }
1018
1019 nvgpu_mutex_acquire(&ch->sync_lock);
1020 if (ch->user_sync) {
1021 nvgpu_mutex_release(&ch->sync_lock);
1022 } else {
1023 ch->user_sync = nvgpu_channel_sync_create(ch, true);
1024 if (!ch->user_sync) {
1025 nvgpu_mutex_release(&ch->sync_lock);
1026 return -ENOMEM;
1027 }
1028 nvgpu_mutex_release(&ch->sync_lock);
1029
1030 if (g->ops.fifo.resetup_ramfc) {
1031 err = g->ops.fifo.resetup_ramfc(ch);
1032 if (err)
1033 return err;
1034 }
1035 }
1036
1037 args->syncpoint_id = ch->user_sync->syncpt_id(ch->user_sync);
1038 args->syncpoint_max = nvgpu_nvhost_syncpt_read_maxval(g->nvhost_dev,
1039 args->syncpoint_id);
1040 if (nvgpu_is_enabled(g, NVGPU_SUPPORT_SYNCPOINT_ADDRESS))
1041 args->gpu_va = ch->user_sync->syncpt_address(ch->user_sync);
1042 else
1043 args->gpu_va = 0;
1044
1045 return 0;
1046#else
1047 return -EINVAL;
1048#endif
1049}
1050
1051long gk20a_channel_ioctl(struct file *filp,
1052 unsigned int cmd, unsigned long arg)
1053{
1054 struct channel_priv *priv = filp->private_data;
1055 struct channel_gk20a *ch = priv->c;
1056 struct device *dev = dev_from_gk20a(ch->g);
1057 u8 buf[NVGPU_IOCTL_CHANNEL_MAX_ARG_SIZE] = {0};
1058 int err = 0;
1059 struct gk20a *g = ch->g;
1060
1061 nvgpu_log_fn(g, "start %d", _IOC_NR(cmd));
1062
1063 if ((_IOC_TYPE(cmd) != NVGPU_IOCTL_MAGIC) ||
1064 (_IOC_NR(cmd) == 0) ||
1065 (_IOC_NR(cmd) > NVGPU_IOCTL_CHANNEL_LAST) ||
1066 (_IOC_SIZE(cmd) > NVGPU_IOCTL_CHANNEL_MAX_ARG_SIZE))
1067 return -EINVAL;
1068
1069 if (_IOC_DIR(cmd) & _IOC_WRITE) {
1070 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
1071 return -EFAULT;
1072 }
1073
1074 /* take a ref or return timeout if channel refs can't be taken */
1075 ch = gk20a_channel_get(ch);
1076 if (!ch)
1077 return -ETIMEDOUT;
1078
1079 /* protect our sanity for threaded userspace - most of the channel is
1080 * not thread safe */
1081 nvgpu_mutex_acquire(&ch->ioctl_lock);
1082
1083 /* this ioctl call keeps a ref to the file which keeps a ref to the
1084 * channel */
1085
1086 nvgpu_speculation_barrier();
1087 switch (cmd) {
1088 case NVGPU_IOCTL_CHANNEL_OPEN:
1089 err = gk20a_channel_open_ioctl(ch->g,
1090 (struct nvgpu_channel_open_args *)buf);
1091 break;
1092 case NVGPU_IOCTL_CHANNEL_SET_NVMAP_FD:
1093 break;
1094 case NVGPU_IOCTL_CHANNEL_ALLOC_OBJ_CTX:
1095 {
1096 struct nvgpu_alloc_obj_ctx_args *args =
1097 (struct nvgpu_alloc_obj_ctx_args *)buf;
1098
1099 err = gk20a_busy(ch->g);
1100 if (err) {
1101 dev_err(dev,
1102 "%s: failed to host gk20a for ioctl cmd: 0x%x",
1103 __func__, cmd);
1104 break;
1105 }
1106 err = nvgpu_ioctl_channel_alloc_obj_ctx(ch, args->class_num, args->flags);
1107 gk20a_idle(ch->g);
1108 break;
1109 }
1110 case NVGPU_IOCTL_CHANNEL_SETUP_BIND:
1111 {
1112 struct nvgpu_channel_setup_bind_args *channel_setup_bind_args =
1113 (struct nvgpu_channel_setup_bind_args *)buf;
1114 struct nvgpu_setup_bind_args setup_bind_args;
1115
1116 nvgpu_get_setup_bind_args(channel_setup_bind_args,
1117 &setup_bind_args);
1118
1119 err = gk20a_busy(ch->g);
1120 if (err) {
1121 dev_err(dev,
1122 "%s: failed to host gk20a for ioctl cmd: 0x%x",
1123 __func__, cmd);
1124 break;
1125 }
1126
1127 if (!is_power_of_2(setup_bind_args.num_gpfifo_entries)) {
1128 err = -EINVAL;
1129 gk20a_idle(ch->g);
1130 break;
1131 }
1132 err = nvgpu_channel_setup_bind(ch, &setup_bind_args);
1133 channel_setup_bind_args->work_submit_token =
1134 setup_bind_args.work_submit_token;
1135 gk20a_idle(ch->g);
1136 break;
1137 }
1138 case NVGPU_IOCTL_CHANNEL_ALLOC_GPFIFO_EX:
1139 {
1140 struct nvgpu_alloc_gpfifo_ex_args *alloc_gpfifo_ex_args =
1141 (struct nvgpu_alloc_gpfifo_ex_args *)buf;
1142 struct nvgpu_setup_bind_args setup_bind_args;
1143
1144 nvgpu_get_gpfifo_ex_args(alloc_gpfifo_ex_args, &setup_bind_args);
1145
1146 err = gk20a_busy(ch->g);
1147 if (err) {
1148 dev_err(dev,
1149 "%s: failed to host gk20a for ioctl cmd: 0x%x",
1150 __func__, cmd);
1151 break;
1152 }
1153
1154 if (!is_power_of_2(alloc_gpfifo_ex_args->num_entries)) {
1155 err = -EINVAL;
1156 gk20a_idle(ch->g);
1157 break;
1158 }
1159 err = nvgpu_channel_setup_bind(ch, &setup_bind_args);
1160 gk20a_idle(ch->g);
1161 break;
1162 }
1163 case NVGPU_IOCTL_CHANNEL_ALLOC_GPFIFO:
1164 {
1165 struct nvgpu_alloc_gpfifo_args *alloc_gpfifo_args =
1166 (struct nvgpu_alloc_gpfifo_args *)buf;
1167 struct nvgpu_setup_bind_args setup_bind_args;
1168
1169 nvgpu_get_gpfifo_args(alloc_gpfifo_args, &setup_bind_args);
1170
1171 err = gk20a_busy(ch->g);
1172 if (err) {
1173 dev_err(dev,
1174 "%s: failed to host gk20a for ioctl cmd: 0x%x",
1175 __func__, cmd);
1176 break;
1177 }
1178
1179 err = nvgpu_channel_setup_bind(ch, &setup_bind_args);
1180 gk20a_idle(ch->g);
1181 break;
1182 }
1183 case NVGPU_IOCTL_CHANNEL_SUBMIT_GPFIFO:
1184 err = gk20a_ioctl_channel_submit_gpfifo(ch,
1185 (struct nvgpu_submit_gpfifo_args *)buf);
1186 break;
1187 case NVGPU_IOCTL_CHANNEL_WAIT:
1188 err = gk20a_busy(ch->g);
1189 if (err) {
1190 dev_err(dev,
1191 "%s: failed to host gk20a for ioctl cmd: 0x%x",
1192 __func__, cmd);
1193 break;
1194 }
1195
1196 /* waiting is thread-safe, not dropping this mutex could
1197 * deadlock in certain conditions */
1198 nvgpu_mutex_release(&ch->ioctl_lock);
1199
1200 err = gk20a_channel_wait(ch,
1201 (struct nvgpu_wait_args *)buf);
1202
1203 nvgpu_mutex_acquire(&ch->ioctl_lock);
1204
1205 gk20a_idle(ch->g);
1206 break;
1207 case NVGPU_IOCTL_CHANNEL_ZCULL_BIND:
1208 err = gk20a_busy(ch->g);
1209 if (err) {
1210 dev_err(dev,
1211 "%s: failed to host gk20a for ioctl cmd: 0x%x",
1212 __func__, cmd);
1213 break;
1214 }
1215 err = gk20a_channel_zcull_bind(ch,
1216 (struct nvgpu_zcull_bind_args *)buf);
1217 gk20a_idle(ch->g);
1218 break;
1219 case NVGPU_IOCTL_CHANNEL_SET_ERROR_NOTIFIER:
1220 err = gk20a_busy(ch->g);
1221 if (err) {
1222 dev_err(dev,
1223 "%s: failed to host gk20a for ioctl cmd: 0x%x",
1224 __func__, cmd);
1225 break;
1226 }
1227 err = gk20a_init_error_notifier(ch,
1228 (struct nvgpu_set_error_notifier *)buf);
1229 gk20a_idle(ch->g);
1230 break;
1231 case NVGPU_IOCTL_CHANNEL_SET_TIMEOUT:
1232 {
1233 u32 timeout =
1234 (u32)((struct nvgpu_set_timeout_args *)buf)->timeout;
1235 nvgpu_log(g, gpu_dbg_gpu_dbg, "setting timeout (%d ms) for chid %d",
1236 timeout, ch->chid);
1237 ch->timeout_ms_max = timeout;
1238 gk20a_channel_trace_sched_param(
1239 trace_gk20a_channel_set_timeout, ch);
1240 break;
1241 }
1242 case NVGPU_IOCTL_CHANNEL_SET_TIMEOUT_EX:
1243 {
1244 u32 timeout =
1245 (u32)((struct nvgpu_set_timeout_args *)buf)->timeout;
1246 bool timeout_debug_dump = !((u32)
1247 ((struct nvgpu_set_timeout_ex_args *)buf)->flags &
1248 (1 << NVGPU_TIMEOUT_FLAG_DISABLE_DUMP));
1249 nvgpu_log(g, gpu_dbg_gpu_dbg, "setting timeout (%d ms) for chid %d",
1250 timeout, ch->chid);
1251 ch->timeout_ms_max = timeout;
1252 ch->timeout_debug_dump = timeout_debug_dump;
1253 gk20a_channel_trace_sched_param(
1254 trace_gk20a_channel_set_timeout, ch);
1255 break;
1256 }
1257 case NVGPU_IOCTL_CHANNEL_GET_TIMEDOUT:
1258 ((struct nvgpu_get_param_args *)buf)->value =
1259 gk20a_channel_check_timedout(ch);
1260 break;
1261 case NVGPU_IOCTL_CHANNEL_ENABLE:
1262 err = gk20a_busy(ch->g);
1263 if (err) {
1264 dev_err(dev,
1265 "%s: failed to host gk20a for ioctl cmd: 0x%x",
1266 __func__, cmd);
1267 break;
1268 }
1269 if (ch->g->ops.fifo.enable_channel)
1270 ch->g->ops.fifo.enable_channel(ch);
1271 else
1272 err = -ENOSYS;
1273 gk20a_idle(ch->g);
1274 break;
1275 case NVGPU_IOCTL_CHANNEL_DISABLE:
1276 err = gk20a_busy(ch->g);
1277 if (err) {
1278 dev_err(dev,
1279 "%s: failed to host gk20a for ioctl cmd: 0x%x",
1280 __func__, cmd);
1281 break;
1282 }
1283 if (ch->g->ops.fifo.disable_channel)
1284 ch->g->ops.fifo.disable_channel(ch);
1285 else
1286 err = -ENOSYS;
1287 gk20a_idle(ch->g);
1288 break;
1289 case NVGPU_IOCTL_CHANNEL_PREEMPT:
1290 err = gk20a_busy(ch->g);
1291 if (err) {
1292 dev_err(dev,
1293 "%s: failed to host gk20a for ioctl cmd: 0x%x",
1294 __func__, cmd);
1295 break;
1296 }
1297 err = gk20a_fifo_preempt(ch->g, ch);
1298 gk20a_idle(ch->g);
1299 break;
1300 case NVGPU_IOCTL_CHANNEL_RESCHEDULE_RUNLIST:
1301 if (!capable(CAP_SYS_NICE)) {
1302 err = -EPERM;
1303 break;
1304 }
1305 if (!ch->g->ops.fifo.reschedule_runlist) {
1306 err = -ENOSYS;
1307 break;
1308 }
1309 err = gk20a_busy(ch->g);
1310 if (err) {
1311 dev_err(dev,
1312 "%s: failed to host gk20a for ioctl cmd: 0x%x",
1313 __func__, cmd);
1314 break;
1315 }
1316 err = ch->g->ops.fifo.reschedule_runlist(ch,
1317 NVGPU_RESCHEDULE_RUNLIST_PREEMPT_NEXT &
1318 ((struct nvgpu_reschedule_runlist_args *)buf)->flags);
1319 gk20a_idle(ch->g);
1320 break;
1321 case NVGPU_IOCTL_CHANNEL_FORCE_RESET:
1322 err = gk20a_busy(ch->g);
1323 if (err) {
1324 dev_err(dev,
1325 "%s: failed to host gk20a for ioctl cmd: 0x%x",
1326 __func__, cmd);
1327 break;
1328 }
1329 err = ch->g->ops.fifo.force_reset_ch(ch,
1330 NVGPU_ERR_NOTIFIER_RESETCHANNEL_VERIF_ERROR, true);
1331 gk20a_idle(ch->g);
1332 break;
1333 case NVGPU_IOCTL_CHANNEL_WDT:
1334 err = gk20a_channel_set_wdt_status(ch,
1335 (struct nvgpu_channel_wdt_args *)buf);
1336 break;
1337 case NVGPU_IOCTL_CHANNEL_SET_PREEMPTION_MODE:
1338 err = nvgpu_ioctl_channel_set_preemption_mode(ch,
1339 ((struct nvgpu_preemption_mode_args *)buf)->graphics_preempt_mode,
1340 ((struct nvgpu_preemption_mode_args *)buf)->compute_preempt_mode);
1341 break;
1342 case NVGPU_IOCTL_CHANNEL_SET_BOOSTED_CTX:
1343 if (ch->g->ops.gr.set_boosted_ctx) {
1344 bool boost =
1345 ((struct nvgpu_boosted_ctx_args *)buf)->boost;
1346
1347 err = gk20a_busy(ch->g);
1348 if (err) {
1349 dev_err(dev,
1350 "%s: failed to host gk20a for ioctl cmd: 0x%x",
1351 __func__, cmd);
1352 break;
1353 }
1354 err = ch->g->ops.gr.set_boosted_ctx(ch, boost);
1355 gk20a_idle(ch->g);
1356 } else {
1357 err = -EINVAL;
1358 }
1359 break;
1360 case NVGPU_IOCTL_CHANNEL_GET_USER_SYNCPOINT:
1361 err = gk20a_busy(ch->g);
1362 if (err) {
1363 dev_err(dev,
1364 "%s: failed to host gk20a for ioctl cmd: 0x%x",
1365 __func__, cmd);
1366 break;
1367 }
1368 err = nvgpu_ioctl_channel_get_user_syncpoint(ch,
1369 (struct nvgpu_get_user_syncpoint_args *)buf);
1370 gk20a_idle(ch->g);
1371 break;
1372 default:
1373 dev_dbg(dev, "unrecognized ioctl cmd: 0x%x", cmd);
1374 err = -ENOTTY;
1375 break;
1376 }
1377
1378 if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
1379 err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd));
1380
1381 nvgpu_mutex_release(&ch->ioctl_lock);
1382
1383 gk20a_channel_put(ch);
1384
1385 nvgpu_log_fn(g, "end");
1386
1387 return err;
1388}