summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/linux/ioctl_channel.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common/linux/ioctl_channel.c')
-rw-r--r--drivers/gpu/nvgpu/common/linux/ioctl_channel.c1452
1 files changed, 1452 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_channel.c b/drivers/gpu/nvgpu/common/linux/ioctl_channel.c
new file mode 100644
index 00000000..31651795
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/linux/ioctl_channel.c
@@ -0,0 +1,1452 @@
1/*
2 * GK20A Graphics channel
3 *
4 * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <trace/events/gk20a.h>
20#include <linux/file.h>
21#include <linux/anon_inodes.h>
22#include <linux/dma-buf.h>
23#include <linux/poll.h>
24#include <uapi/linux/nvgpu.h>
25#include <uapi/linux/nvgpu-t18x.h>
26
27#include <nvgpu/semaphore.h>
28#include <nvgpu/timers.h>
29#include <nvgpu/kmem.h>
30#include <nvgpu/log.h>
31#include <nvgpu/list.h>
32#include <nvgpu/debug.h>
33#include <nvgpu/enabled.h>
34
35#include "gk20a/gk20a.h"
36#include "gk20a/dbg_gpu_gk20a.h"
37#include "gk20a/fence_gk20a.h"
38
39#include "platform_gk20a.h"
40#include "ioctl_channel.h"
41#include "channel.h"
42#include "os_linux.h"
43#include "ctxsw_trace.h"
44
45static const char *gr_gk20a_graphics_preempt_mode_name(u32 graphics_preempt_mode)
46{
47 switch (graphics_preempt_mode) {
48 case NVGPU_PREEMPTION_MODE_GRAPHICS_WFI:
49 return "WFI";
50 default:
51 return "?";
52 }
53}
54
55static const char *gr_gk20a_compute_preempt_mode_name(u32 compute_preempt_mode)
56{
57 switch (compute_preempt_mode) {
58 case NVGPU_PREEMPTION_MODE_COMPUTE_WFI:
59 return "WFI";
60 case NVGPU_PREEMPTION_MODE_COMPUTE_CTA:
61 return "CTA";
62 default:
63 return "?";
64 }
65}
66
67static void gk20a_channel_trace_sched_param(
68 void (*trace)(int chid, int tsgid, pid_t pid, u32 timeslice,
69 u32 timeout, const char *interleave,
70 const char *graphics_preempt_mode,
71 const char *compute_preempt_mode),
72 struct channel_gk20a *ch)
73{
74 (trace)(ch->chid, ch->tsgid, ch->pid,
75 gk20a_is_channel_marked_as_tsg(ch) ?
76 tsg_gk20a_from_ch(ch)->timeslice_us : ch->timeslice_us,
77 ch->timeout_ms_max,
78 gk20a_fifo_interleave_level_name(ch->interleave_level),
79 gr_gk20a_graphics_preempt_mode_name(ch->ch_ctx.gr_ctx ?
80 ch->ch_ctx.gr_ctx->graphics_preempt_mode : 0),
81 gr_gk20a_compute_preempt_mode_name(ch->ch_ctx.gr_ctx ?
82 ch->ch_ctx.gr_ctx->compute_preempt_mode : 0));
83}
84
85/*
86 * Although channels do have pointers back to the gk20a struct that they were
87 * created under in cases where the driver is killed that pointer can be bad.
88 * The channel memory can be freed before the release() function for a given
89 * channel is called. This happens when the driver dies and userspace doesn't
90 * get a chance to call release() until after the entire gk20a driver data is
91 * unloaded and freed.
92 */
93struct channel_priv {
94 struct gk20a *g;
95 struct channel_gk20a *c;
96};
97
98#if defined(CONFIG_GK20A_CYCLE_STATS)
99
100static int gk20a_channel_cycle_stats(struct channel_gk20a *ch,
101 struct nvgpu_cycle_stats_args *args)
102{
103 struct dma_buf *dmabuf;
104 void *virtual_address;
105
106 /* is it allowed to handle calls for current GPU? */
107 if (!nvgpu_is_enabled(ch->g, NVGPU_SUPPORT_CYCLE_STATS))
108 return -ENOSYS;
109
110 if (args->dmabuf_fd && !ch->cyclestate.cyclestate_buffer_handler) {
111
112 /* set up new cyclestats buffer */
113 dmabuf = dma_buf_get(args->dmabuf_fd);
114 if (IS_ERR(dmabuf))
115 return PTR_ERR(dmabuf);
116 virtual_address = dma_buf_vmap(dmabuf);
117 if (!virtual_address)
118 return -ENOMEM;
119
120 ch->cyclestate.cyclestate_buffer_handler = dmabuf;
121 ch->cyclestate.cyclestate_buffer = virtual_address;
122 ch->cyclestate.cyclestate_buffer_size = dmabuf->size;
123 return 0;
124
125 } else if (!args->dmabuf_fd &&
126 ch->cyclestate.cyclestate_buffer_handler) {
127 gk20a_channel_free_cycle_stats_buffer(ch);
128 return 0;
129
130 } else if (!args->dmabuf_fd &&
131 !ch->cyclestate.cyclestate_buffer_handler) {
132 /* no requst from GL */
133 return 0;
134
135 } else {
136 pr_err("channel already has cyclestats buffer\n");
137 return -EINVAL;
138 }
139}
140
141static int gk20a_flush_cycle_stats_snapshot(struct channel_gk20a *ch)
142{
143 int ret;
144
145 nvgpu_mutex_acquire(&ch->cs_client_mutex);
146 if (ch->cs_client)
147 ret = gr_gk20a_css_flush(ch, ch->cs_client);
148 else
149 ret = -EBADF;
150 nvgpu_mutex_release(&ch->cs_client_mutex);
151
152 return ret;
153}
154
155static int gk20a_attach_cycle_stats_snapshot(struct channel_gk20a *ch,
156 u32 dmabuf_fd,
157 u32 perfmon_id_count,
158 u32 *perfmon_id_start)
159{
160 int ret;
161
162 nvgpu_mutex_acquire(&ch->cs_client_mutex);
163 if (ch->cs_client) {
164 ret = -EEXIST;
165 } else {
166 ret = gr_gk20a_css_attach(ch,
167 dmabuf_fd,
168 perfmon_id_count,
169 perfmon_id_start,
170 &ch->cs_client);
171 }
172 nvgpu_mutex_release(&ch->cs_client_mutex);
173
174 return ret;
175}
176
177static int gk20a_channel_cycle_stats_snapshot(struct channel_gk20a *ch,
178 struct nvgpu_cycle_stats_snapshot_args *args)
179{
180 int ret;
181
182 /* is it allowed to handle calls for current GPU? */
183 if (!nvgpu_is_enabled(ch->g, NVGPU_SUPPORT_CYCLE_STATS_SNAPSHOT))
184 return -ENOSYS;
185
186 if (!args->dmabuf_fd)
187 return -EINVAL;
188
189 /* handle the command (most frequent cases first) */
190 switch (args->cmd) {
191 case NVGPU_IOCTL_CHANNEL_CYCLE_STATS_SNAPSHOT_CMD_FLUSH:
192 ret = gk20a_flush_cycle_stats_snapshot(ch);
193 args->extra = 0;
194 break;
195
196 case NVGPU_IOCTL_CHANNEL_CYCLE_STATS_SNAPSHOT_CMD_ATTACH:
197 ret = gk20a_attach_cycle_stats_snapshot(ch,
198 args->dmabuf_fd,
199 args->extra,
200 &args->extra);
201 break;
202
203 case NVGPU_IOCTL_CHANNEL_CYCLE_STATS_SNAPSHOT_CMD_DETACH:
204 ret = gk20a_channel_free_cycle_stats_snapshot(ch);
205 args->extra = 0;
206 break;
207
208 default:
209 pr_err("cyclestats: unknown command %u\n", args->cmd);
210 ret = -EINVAL;
211 break;
212 }
213
214 return ret;
215}
216#endif
217
218static int gk20a_channel_set_wdt_status(struct channel_gk20a *ch,
219 struct nvgpu_channel_wdt_args *args)
220{
221 if (args->wdt_status == NVGPU_IOCTL_CHANNEL_DISABLE_WDT)
222 ch->wdt_enabled = false;
223 else if (args->wdt_status == NVGPU_IOCTL_CHANNEL_ENABLE_WDT)
224 ch->wdt_enabled = true;
225
226 return 0;
227}
228
229static void gk20a_channel_free_error_notifiers(struct channel_gk20a *ch)
230{
231 nvgpu_mutex_acquire(&ch->error_notifier_mutex);
232 if (ch->error_notifier_ref) {
233 dma_buf_vunmap(ch->error_notifier_ref, ch->error_notifier_va);
234 dma_buf_put(ch->error_notifier_ref);
235 ch->error_notifier_ref = NULL;
236 ch->error_notifier = NULL;
237 ch->error_notifier_va = NULL;
238 }
239 nvgpu_mutex_release(&ch->error_notifier_mutex);
240}
241
242static int gk20a_init_error_notifier(struct channel_gk20a *ch,
243 struct nvgpu_set_error_notifier *args)
244{
245 struct dma_buf *dmabuf;
246 void *va;
247 u64 end = args->offset + sizeof(struct nvgpu_notification);
248
249 if (!args->mem) {
250 pr_err("gk20a_init_error_notifier: invalid memory handle\n");
251 return -EINVAL;
252 }
253
254 dmabuf = dma_buf_get(args->mem);
255
256 gk20a_channel_free_error_notifiers(ch);
257
258 if (IS_ERR(dmabuf)) {
259 pr_err("Invalid handle: %d\n", args->mem);
260 return -EINVAL;
261 }
262
263 if (end > dmabuf->size || end < sizeof(struct nvgpu_notification)) {
264 dma_buf_put(dmabuf);
265 nvgpu_err(ch->g, "gk20a_init_error_notifier: invalid offset");
266 return -EINVAL;
267 }
268
269 /* map handle */
270 va = dma_buf_vmap(dmabuf);
271 if (!va) {
272 dma_buf_put(dmabuf);
273 pr_err("Cannot map notifier handle\n");
274 return -ENOMEM;
275 }
276
277 ch->error_notifier = va + args->offset;
278 ch->error_notifier_va = va;
279 memset(ch->error_notifier, 0, sizeof(struct nvgpu_notification));
280
281 /* set channel notifiers pointer */
282 nvgpu_mutex_acquire(&ch->error_notifier_mutex);
283 ch->error_notifier_ref = dmabuf;
284 nvgpu_mutex_release(&ch->error_notifier_mutex);
285
286 return 0;
287}
288
289/*
290 * This returns the channel with a reference. The caller must
291 * gk20a_channel_put() the ref back after use.
292 *
293 * NULL is returned if the channel was not found.
294 */
295struct channel_gk20a *gk20a_get_channel_from_file(int fd)
296{
297 struct channel_gk20a *ch;
298 struct channel_priv *priv;
299 struct file *f = fget(fd);
300
301 if (!f)
302 return NULL;
303
304 if (f->f_op != &gk20a_channel_ops) {
305 fput(f);
306 return NULL;
307 }
308
309 priv = (struct channel_priv *)f->private_data;
310 ch = gk20a_channel_get(priv->c);
311 fput(f);
312 return ch;
313}
314
315int gk20a_channel_release(struct inode *inode, struct file *filp)
316{
317 struct channel_priv *priv = filp->private_data;
318 struct channel_gk20a *ch;
319 struct gk20a *g;
320
321 int err;
322
323 /* We could still end up here even if the channel_open failed, e.g.
324 * if we ran out of hw channel IDs.
325 */
326 if (!priv)
327 return 0;
328
329 ch = priv->c;
330 g = priv->g;
331
332 err = gk20a_busy(g);
333 if (err) {
334 nvgpu_err(g, "failed to release a channel!");
335 goto channel_release;
336 }
337
338 trace_gk20a_channel_release(dev_name(dev_from_gk20a(g)));
339
340 gk20a_channel_close(ch);
341 gk20a_channel_free_error_notifiers(ch);
342
343 gk20a_idle(g);
344
345channel_release:
346 gk20a_put(g);
347 nvgpu_kfree(g, filp->private_data);
348 filp->private_data = NULL;
349 return 0;
350}
351
352/* note: runlist_id -1 is synonym for the ENGINE_GR_GK20A runlist id */
353static int __gk20a_channel_open(struct gk20a *g,
354 struct file *filp, s32 runlist_id)
355{
356 int err;
357 struct channel_gk20a *ch;
358 struct channel_priv *priv;
359
360 gk20a_dbg_fn("");
361
362 g = gk20a_get(g);
363 if (!g)
364 return -ENODEV;
365
366 trace_gk20a_channel_open(dev_name(dev_from_gk20a(g)));
367
368 priv = nvgpu_kzalloc(g, sizeof(*priv));
369 if (!priv) {
370 err = -ENOMEM;
371 goto free_ref;
372 }
373
374 err = gk20a_busy(g);
375 if (err) {
376 nvgpu_err(g, "failed to power on, %d", err);
377 goto fail_busy;
378 }
379 /* All the user space channel should be non privilege */
380 ch = gk20a_open_new_channel(g, runlist_id, false);
381 gk20a_idle(g);
382 if (!ch) {
383 nvgpu_err(g,
384 "failed to get f");
385 err = -ENOMEM;
386 goto fail_busy;
387 }
388
389 gk20a_channel_trace_sched_param(
390 trace_gk20a_channel_sched_defaults, ch);
391
392 priv->g = g;
393 priv->c = ch;
394
395 filp->private_data = priv;
396 return 0;
397
398fail_busy:
399 nvgpu_kfree(g, priv);
400free_ref:
401 gk20a_put(g);
402 return err;
403}
404
405int gk20a_channel_open(struct inode *inode, struct file *filp)
406{
407 struct nvgpu_os_linux *l = container_of(inode->i_cdev,
408 struct nvgpu_os_linux, channel.cdev);
409 struct gk20a *g = &l->g;
410 int ret;
411
412 gk20a_dbg_fn("start");
413 ret = __gk20a_channel_open(g, filp, -1);
414
415 gk20a_dbg_fn("end");
416 return ret;
417}
418
419int gk20a_channel_open_ioctl(struct gk20a *g,
420 struct nvgpu_channel_open_args *args)
421{
422 int err;
423 int fd;
424 struct file *file;
425 char name[64];
426 s32 runlist_id = args->in.runlist_id;
427 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
428
429 err = get_unused_fd_flags(O_RDWR);
430 if (err < 0)
431 return err;
432 fd = err;
433
434 snprintf(name, sizeof(name), "nvhost-%s-fd%d",
435 dev_name(dev_from_gk20a(g)), fd);
436
437 file = anon_inode_getfile(name, l->channel.cdev.ops, NULL, O_RDWR);
438 if (IS_ERR(file)) {
439 err = PTR_ERR(file);
440 goto clean_up;
441 }
442
443 err = __gk20a_channel_open(g, file, runlist_id);
444 if (err)
445 goto clean_up_file;
446
447 fd_install(fd, file);
448 args->out.channel_fd = fd;
449 return 0;
450
451clean_up_file:
452 fput(file);
453clean_up:
454 put_unused_fd(fd);
455 return err;
456}
457
458static u32 nvgpu_gpfifo_user_flags_to_common_flags(u32 user_flags)
459{
460 u32 flags = 0;
461
462 if (user_flags & NVGPU_ALLOC_GPFIFO_EX_FLAGS_VPR_ENABLED)
463 flags |= NVGPU_GPFIFO_FLAGS_SUPPORT_VPR;
464
465 if (user_flags & NVGPU_ALLOC_GPFIFO_EX_FLAGS_DETERMINISTIC)
466 flags |= NVGPU_GPFIFO_FLAGS_SUPPORT_DETERMINISTIC;
467
468 return flags;
469}
470
471static int nvgpu_channel_ioctl_alloc_gpfifo(struct channel_gk20a *c,
472 unsigned int num_entries,
473 unsigned int num_inflight_jobs,
474 u32 user_flags)
475{
476 return gk20a_channel_alloc_gpfifo(c, num_entries,
477 num_inflight_jobs,
478 nvgpu_gpfifo_user_flags_to_common_flags(user_flags));
479}
480
481
482static int gk20a_channel_wait_semaphore(struct channel_gk20a *ch,
483 ulong id, u32 offset,
484 u32 payload, u32 timeout)
485{
486 struct dma_buf *dmabuf;
487 void *data;
488 u32 *semaphore;
489 int ret = 0;
490
491 /* do not wait if channel has timed out */
492 if (ch->has_timedout)
493 return -ETIMEDOUT;
494
495 dmabuf = dma_buf_get(id);
496 if (IS_ERR(dmabuf)) {
497 nvgpu_err(ch->g, "invalid notifier nvmap handle 0x%lx", id);
498 return -EINVAL;
499 }
500
501 data = dma_buf_kmap(dmabuf, offset >> PAGE_SHIFT);
502 if (!data) {
503 nvgpu_err(ch->g, "failed to map notifier memory");
504 ret = -EINVAL;
505 goto cleanup_put;
506 }
507
508 semaphore = data + (offset & ~PAGE_MASK);
509
510 ret = NVGPU_COND_WAIT_INTERRUPTIBLE(
511 &ch->semaphore_wq,
512 *semaphore == payload || ch->has_timedout,
513 timeout);
514
515 dma_buf_kunmap(dmabuf, offset >> PAGE_SHIFT, data);
516cleanup_put:
517 dma_buf_put(dmabuf);
518 return ret;
519}
520
521static int gk20a_channel_wait(struct channel_gk20a *ch,
522 struct nvgpu_wait_args *args)
523{
524 struct dma_buf *dmabuf;
525 struct gk20a *g = ch->g;
526 struct notification *notif;
527 struct timespec tv;
528 u64 jiffies;
529 ulong id;
530 u32 offset;
531 int remain, ret = 0;
532 u64 end;
533
534 gk20a_dbg_fn("");
535
536 if (ch->has_timedout)
537 return -ETIMEDOUT;
538
539 switch (args->type) {
540 case NVGPU_WAIT_TYPE_NOTIFIER:
541 id = args->condition.notifier.dmabuf_fd;
542 offset = args->condition.notifier.offset;
543 end = offset + sizeof(struct notification);
544
545 dmabuf = dma_buf_get(id);
546 if (IS_ERR(dmabuf)) {
547 nvgpu_err(g, "invalid notifier nvmap handle 0x%lx",
548 id);
549 return -EINVAL;
550 }
551
552 if (end > dmabuf->size || end < sizeof(struct notification)) {
553 dma_buf_put(dmabuf);
554 nvgpu_err(g, "invalid notifier offset");
555 return -EINVAL;
556 }
557
558 notif = dma_buf_vmap(dmabuf);
559 if (!notif) {
560 nvgpu_err(g, "failed to map notifier memory");
561 return -ENOMEM;
562 }
563
564 notif = (struct notification *)((uintptr_t)notif + offset);
565
566 /* user should set status pending before
567 * calling this ioctl */
568 remain = NVGPU_COND_WAIT_INTERRUPTIBLE(
569 &ch->notifier_wq,
570 notif->status == 0 || ch->has_timedout,
571 args->timeout);
572
573 if (remain == 0 && notif->status != 0) {
574 ret = -ETIMEDOUT;
575 goto notif_clean_up;
576 } else if (remain < 0) {
577 ret = -EINTR;
578 goto notif_clean_up;
579 }
580
581 /* TBD: fill in correct information */
582 jiffies = get_jiffies_64();
583 jiffies_to_timespec(jiffies, &tv);
584 notif->timestamp.nanoseconds[0] = tv.tv_nsec;
585 notif->timestamp.nanoseconds[1] = tv.tv_sec;
586 notif->info32 = 0xDEADBEEF; /* should be object name */
587 notif->info16 = ch->chid; /* should be method offset */
588
589notif_clean_up:
590 dma_buf_vunmap(dmabuf, notif);
591 return ret;
592
593 case NVGPU_WAIT_TYPE_SEMAPHORE:
594 ret = gk20a_channel_wait_semaphore(ch,
595 args->condition.semaphore.dmabuf_fd,
596 args->condition.semaphore.offset,
597 args->condition.semaphore.payload,
598 args->timeout);
599
600 break;
601
602 default:
603 ret = -EINVAL;
604 break;
605 }
606
607 return ret;
608}
609
610static unsigned int gk20a_event_id_poll(struct file *filep, poll_table *wait)
611{
612 unsigned int mask = 0;
613 struct gk20a_event_id_data *event_id_data = filep->private_data;
614 struct gk20a *g = event_id_data->g;
615 u32 event_id = event_id_data->event_id;
616
617 gk20a_dbg(gpu_dbg_fn | gpu_dbg_info, "");
618
619 poll_wait(filep, &event_id_data->event_id_wq.wq, wait);
620
621 nvgpu_mutex_acquire(&event_id_data->lock);
622
623 if (event_id_data->is_tsg) {
624 struct tsg_gk20a *tsg = g->fifo.tsg + event_id_data->id;
625
626 if (event_id_data->event_posted) {
627 gk20a_dbg_info(
628 "found pending event_id=%d on TSG=%d\n",
629 event_id, tsg->tsgid);
630 mask = (POLLPRI | POLLIN);
631 event_id_data->event_posted = false;
632 }
633 } else {
634 struct channel_gk20a *ch = g->fifo.channel
635 + event_id_data->id;
636
637 if (event_id_data->event_posted) {
638 gk20a_dbg_info(
639 "found pending event_id=%d on chid=%d\n",
640 event_id, ch->chid);
641 mask = (POLLPRI | POLLIN);
642 event_id_data->event_posted = false;
643 }
644 }
645
646 nvgpu_mutex_release(&event_id_data->lock);
647
648 return mask;
649}
650
651static int gk20a_event_id_release(struct inode *inode, struct file *filp)
652{
653 struct gk20a_event_id_data *event_id_data = filp->private_data;
654 struct gk20a *g = event_id_data->g;
655
656 if (event_id_data->is_tsg) {
657 struct tsg_gk20a *tsg = g->fifo.tsg + event_id_data->id;
658
659 nvgpu_mutex_acquire(&tsg->event_id_list_lock);
660 nvgpu_list_del(&event_id_data->event_id_node);
661 nvgpu_mutex_release(&tsg->event_id_list_lock);
662 } else {
663 struct channel_gk20a *ch = g->fifo.channel + event_id_data->id;
664
665 nvgpu_mutex_acquire(&ch->event_id_list_lock);
666 nvgpu_list_del(&event_id_data->event_id_node);
667 nvgpu_mutex_release(&ch->event_id_list_lock);
668 }
669
670 nvgpu_mutex_destroy(&event_id_data->lock);
671 gk20a_put(g);
672 nvgpu_kfree(g, event_id_data);
673 filp->private_data = NULL;
674
675 return 0;
676}
677
678const struct file_operations gk20a_event_id_ops = {
679 .owner = THIS_MODULE,
680 .poll = gk20a_event_id_poll,
681 .release = gk20a_event_id_release,
682};
683
684static int gk20a_channel_get_event_data_from_id(struct channel_gk20a *ch,
685 u32 event_id,
686 struct gk20a_event_id_data **event_id_data)
687{
688 struct gk20a_event_id_data *local_event_id_data;
689 bool event_found = false;
690
691 nvgpu_mutex_acquire(&ch->event_id_list_lock);
692 list_for_each_entry(local_event_id_data, &ch->event_id_list,
693 event_id_node) {
694 if (local_event_id_data->event_id == event_id) {
695 event_found = true;
696 break;
697 }
698 }
699 nvgpu_mutex_release(&ch->event_id_list_lock);
700
701 if (event_found) {
702 *event_id_data = local_event_id_data;
703 return 0;
704 } else {
705 return -1;
706 }
707}
708
709/*
710 * Convert common event_id of the form NVGPU_EVENT_ID_* to Linux specific
711 * event_id of the form NVGPU_IOCTL_CHANNEL_EVENT_ID_* which is used in IOCTLs
712 */
713u32 nvgpu_event_id_to_ioctl_channel_event_id(u32 event_id)
714{
715 switch (event_id) {
716 case NVGPU_EVENT_ID_BPT_INT:
717 return NVGPU_IOCTL_CHANNEL_EVENT_ID_BPT_INT;
718 case NVGPU_EVENT_ID_BPT_PAUSE:
719 return NVGPU_IOCTL_CHANNEL_EVENT_ID_BPT_PAUSE;
720 case NVGPU_EVENT_ID_BLOCKING_SYNC:
721 return NVGPU_IOCTL_CHANNEL_EVENT_ID_BLOCKING_SYNC;
722 case NVGPU_EVENT_ID_CILP_PREEMPTION_STARTED:
723 return NVGPU_IOCTL_CHANNEL_EVENT_ID_CILP_PREEMPTION_STARTED;
724 case NVGPU_EVENT_ID_CILP_PREEMPTION_COMPLETE:
725 return NVGPU_IOCTL_CHANNEL_EVENT_ID_CILP_PREEMPTION_COMPLETE;
726 case NVGPU_EVENT_ID_GR_SEMAPHORE_WRITE_AWAKEN:
727 return NVGPU_IOCTL_CHANNEL_EVENT_ID_GR_SEMAPHORE_WRITE_AWAKEN;
728 }
729
730 return NVGPU_IOCTL_CHANNEL_EVENT_ID_MAX;
731}
732
733void gk20a_channel_event_id_post_event(struct channel_gk20a *ch,
734 u32 __event_id)
735{
736 struct gk20a_event_id_data *event_id_data;
737 u32 event_id;
738 int err = 0;
739
740 event_id = nvgpu_event_id_to_ioctl_channel_event_id(__event_id);
741 if (event_id >= NVGPU_IOCTL_CHANNEL_EVENT_ID_MAX)
742 return;
743
744 err = gk20a_channel_get_event_data_from_id(ch, event_id,
745 &event_id_data);
746 if (err)
747 return;
748
749 nvgpu_mutex_acquire(&event_id_data->lock);
750
751 gk20a_dbg_info(
752 "posting event for event_id=%d on ch=%d\n",
753 event_id, ch->chid);
754 event_id_data->event_posted = true;
755
756 nvgpu_cond_broadcast_interruptible(&event_id_data->event_id_wq);
757
758 nvgpu_mutex_release(&event_id_data->lock);
759}
760
761static int gk20a_channel_event_id_enable(struct channel_gk20a *ch,
762 int event_id,
763 int *fd)
764{
765 struct gk20a *g;
766 int err = 0;
767 int local_fd;
768 struct file *file;
769 char name[64];
770 struct gk20a_event_id_data *event_id_data;
771
772 g = gk20a_get(ch->g);
773 if (!g)
774 return -ENODEV;
775
776 err = gk20a_channel_get_event_data_from_id(ch,
777 event_id, &event_id_data);
778 if (err == 0) {
779 /* We already have event enabled */
780 err = -EINVAL;
781 goto free_ref;
782 }
783
784 err = get_unused_fd_flags(O_RDWR);
785 if (err < 0)
786 goto free_ref;
787 local_fd = err;
788
789 snprintf(name, sizeof(name), "nvgpu-event%d-fd%d",
790 event_id, local_fd);
791 file = anon_inode_getfile(name, &gk20a_event_id_ops,
792 NULL, O_RDWR);
793 if (IS_ERR(file)) {
794 err = PTR_ERR(file);
795 goto clean_up;
796 }
797
798 event_id_data = nvgpu_kzalloc(ch->g, sizeof(*event_id_data));
799 if (!event_id_data) {
800 err = -ENOMEM;
801 goto clean_up_file;
802 }
803 event_id_data->g = g;
804 event_id_data->id = ch->chid;
805 event_id_data->is_tsg = false;
806 event_id_data->event_id = event_id;
807
808 nvgpu_cond_init(&event_id_data->event_id_wq);
809 err = nvgpu_mutex_init(&event_id_data->lock);
810 if (err)
811 goto clean_up_free;
812 nvgpu_init_list_node(&event_id_data->event_id_node);
813
814 nvgpu_mutex_acquire(&ch->event_id_list_lock);
815 nvgpu_list_add_tail(&event_id_data->event_id_node, &ch->event_id_list);
816 nvgpu_mutex_release(&ch->event_id_list_lock);
817
818 fd_install(local_fd, file);
819 file->private_data = event_id_data;
820
821 *fd = local_fd;
822
823 return 0;
824
825clean_up_free:
826 nvgpu_kfree(g, event_id_data);
827clean_up_file:
828 fput(file);
829clean_up:
830 put_unused_fd(local_fd);
831free_ref:
832 gk20a_put(g);
833 return err;
834}
835
836static int gk20a_channel_event_id_ctrl(struct channel_gk20a *ch,
837 struct nvgpu_event_id_ctrl_args *args)
838{
839 int err = 0;
840 int fd = -1;
841
842 if (args->event_id >= NVGPU_IOCTL_CHANNEL_EVENT_ID_MAX)
843 return -EINVAL;
844
845 if (gk20a_is_channel_marked_as_tsg(ch))
846 return -EINVAL;
847
848 switch (args->cmd) {
849 case NVGPU_IOCTL_CHANNEL_EVENT_ID_CMD_ENABLE:
850 err = gk20a_channel_event_id_enable(ch, args->event_id, &fd);
851 if (!err)
852 args->event_fd = fd;
853 break;
854
855 default:
856 nvgpu_err(ch->g,
857 "unrecognized channel event id cmd: 0x%x",
858 args->cmd);
859 err = -EINVAL;
860 break;
861 }
862
863 return err;
864}
865
866static int gk20a_channel_zcull_bind(struct channel_gk20a *ch,
867 struct nvgpu_zcull_bind_args *args)
868{
869 struct gk20a *g = ch->g;
870 struct gr_gk20a *gr = &g->gr;
871
872 gk20a_dbg_fn("");
873
874 return g->ops.gr.bind_ctxsw_zcull(g, gr, ch,
875 args->gpu_va, args->mode);
876}
877
878static int gk20a_ioctl_channel_submit_gpfifo(
879 struct channel_gk20a *ch,
880 struct nvgpu_submit_gpfifo_args *args)
881{
882 struct gk20a_fence *fence_out;
883 struct fifo_profile_gk20a *profile = NULL;
884
885 int ret = 0;
886 gk20a_dbg_fn("");
887
888#ifdef CONFIG_DEBUG_FS
889 profile = gk20a_fifo_profile_acquire(ch->g);
890
891 if (profile)
892 profile->timestamp[PROFILE_IOCTL_ENTRY] = sched_clock();
893#endif
894 if (ch->has_timedout)
895 return -ETIMEDOUT;
896
897 if ((NVGPU_SUBMIT_GPFIFO_FLAGS_RESCHEDULE_RUNLIST & args->flags) &&
898 !capable(CAP_SYS_NICE))
899 return -EPERM;
900
901 ret = gk20a_submit_channel_gpfifo(ch, NULL, args, args->num_entries,
902 args->flags, &args->fence,
903 &fence_out, false, profile);
904
905 if (ret)
906 goto clean_up;
907
908 /* Convert fence_out to something we can pass back to user space. */
909 if (args->flags & NVGPU_SUBMIT_GPFIFO_FLAGS_FENCE_GET) {
910 if (args->flags & NVGPU_SUBMIT_GPFIFO_FLAGS_SYNC_FENCE) {
911 int fd = gk20a_fence_install_fd(fence_out);
912 if (fd < 0)
913 ret = fd;
914 else
915 args->fence.id = fd;
916 } else {
917 args->fence.id = fence_out->syncpt_id;
918 args->fence.value = fence_out->syncpt_value;
919 }
920 }
921 gk20a_fence_put(fence_out);
922#ifdef CONFIG_DEBUG_FS
923 if (profile) {
924 profile->timestamp[PROFILE_IOCTL_EXIT] = sched_clock();
925 gk20a_fifo_profile_release(ch->g, profile);
926 }
927#endif
928clean_up:
929 return ret;
930}
931
932/*
933 * Convert linux specific runlist level of the form NVGPU_RUNLIST_INTERLEAVE_LEVEL_*
934 * to common runlist level of the form NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_*
935 */
936u32 nvgpu_get_common_runlist_level(u32 level)
937{
938 switch (level) {
939 case NVGPU_RUNLIST_INTERLEAVE_LEVEL_LOW:
940 return NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_LOW;
941 case NVGPU_RUNLIST_INTERLEAVE_LEVEL_MEDIUM:
942 return NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_MEDIUM;
943 case NVGPU_RUNLIST_INTERLEAVE_LEVEL_HIGH:
944 return NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_HIGH;
945 default:
946 pr_err("%s: incorrect runlist level\n", __func__);
947 }
948
949 return level;
950}
951
952static int gk20a_ioctl_channel_set_runlist_interleave(struct channel_gk20a *ch,
953 u32 level)
954{
955 int err = 0;
956
957 err = gk20a_busy(ch->g);
958 if (err) {
959 nvgpu_err(ch->g, "failed to power on, %d", err);
960 goto fail;
961 }
962
963 level = nvgpu_get_common_runlist_level(level);
964 err = gk20a_channel_set_runlist_interleave(ch, level);
965
966 gk20a_idle(ch->g);
967 gk20a_channel_trace_sched_param(
968 trace_gk20a_channel_set_runlist_interleave, ch);
969
970fail:
971 return err;
972}
973
974static u32 nvgpu_obj_ctx_user_flags_to_common_flags(u32 user_flags)
975{
976 u32 flags = 0;
977
978 if (user_flags & NVGPU_ALLOC_OBJ_FLAGS_GFXP)
979 flags |= NVGPU_OBJ_CTX_FLAGS_SUPPORT_GFXP;
980
981 if (user_flags & NVGPU_ALLOC_OBJ_FLAGS_CILP)
982 flags |= NVGPU_OBJ_CTX_FLAGS_SUPPORT_CILP;
983
984 return flags;
985}
986
987static int nvgpu_ioctl_channel_alloc_obj_ctx(struct channel_gk20a *ch,
988 u32 class_num, u32 user_flags)
989{
990 return ch->g->ops.gr.alloc_obj_ctx(ch, class_num,
991 nvgpu_obj_ctx_user_flags_to_common_flags(user_flags));
992}
993
994/*
995 * Convert common preemption mode flags of the form NVGPU_PREEMPTION_MODE_GRAPHICS_*
996 * into linux preemption mode flags of the form NVGPU_GRAPHICS_PREEMPTION_MODE_*
997 */
998u32 nvgpu_get_ioctl_graphics_preempt_mode_flags(u32 graphics_preempt_mode_flags)
999{
1000 u32 flags = 0;
1001
1002 if (graphics_preempt_mode_flags & NVGPU_PREEMPTION_MODE_GRAPHICS_WFI)
1003 flags |= NVGPU_GRAPHICS_PREEMPTION_MODE_WFI;
1004 if (graphics_preempt_mode_flags & NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP)
1005 flags |= NVGPU_GRAPHICS_PREEMPTION_MODE_GFXP;
1006
1007 return flags;
1008}
1009
1010/*
1011 * Convert common preemption mode flags of the form NVGPU_PREEMPTION_MODE_COMPUTE_*
1012 * into linux preemption mode flags of the form NVGPU_COMPUTE_PREEMPTION_MODE_*
1013 */
1014u32 nvgpu_get_ioctl_compute_preempt_mode_flags(u32 compute_preempt_mode_flags)
1015{
1016 u32 flags = 0;
1017
1018 if (compute_preempt_mode_flags & NVGPU_PREEMPTION_MODE_COMPUTE_WFI)
1019 flags |= NVGPU_COMPUTE_PREEMPTION_MODE_WFI;
1020 if (compute_preempt_mode_flags & NVGPU_PREEMPTION_MODE_COMPUTE_CTA)
1021 flags |= NVGPU_COMPUTE_PREEMPTION_MODE_CTA;
1022 if (compute_preempt_mode_flags & NVGPU_PREEMPTION_MODE_COMPUTE_CILP)
1023 flags |= NVGPU_COMPUTE_PREEMPTION_MODE_CILP;
1024
1025 return flags;
1026}
1027
1028/*
1029 * Convert common preemption modes of the form NVGPU_PREEMPTION_MODE_GRAPHICS_*
1030 * into linux preemption modes of the form NVGPU_GRAPHICS_PREEMPTION_MODE_*
1031 */
1032u32 nvgpu_get_ioctl_graphics_preempt_mode(u32 graphics_preempt_mode)
1033{
1034 switch (graphics_preempt_mode) {
1035 case NVGPU_PREEMPTION_MODE_GRAPHICS_WFI:
1036 return NVGPU_GRAPHICS_PREEMPTION_MODE_WFI;
1037 case NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP:
1038 return NVGPU_GRAPHICS_PREEMPTION_MODE_GFXP;
1039 }
1040
1041 return graphics_preempt_mode;
1042}
1043
1044/*
1045 * Convert common preemption modes of the form NVGPU_PREEMPTION_MODE_COMPUTE_*
1046 * into linux preemption modes of the form NVGPU_COMPUTE_PREEMPTION_MODE_*
1047 */
1048u32 nvgpu_get_ioctl_compute_preempt_mode(u32 compute_preempt_mode)
1049{
1050 switch (compute_preempt_mode) {
1051 case NVGPU_PREEMPTION_MODE_COMPUTE_WFI:
1052 return NVGPU_COMPUTE_PREEMPTION_MODE_WFI;
1053 case NVGPU_PREEMPTION_MODE_COMPUTE_CTA:
1054 return NVGPU_COMPUTE_PREEMPTION_MODE_CTA;
1055 case NVGPU_PREEMPTION_MODE_COMPUTE_CILP:
1056 return NVGPU_COMPUTE_PREEMPTION_MODE_CILP;
1057 }
1058
1059 return compute_preempt_mode;
1060}
1061
1062/*
1063 * Convert linux preemption modes of the form NVGPU_GRAPHICS_PREEMPTION_MODE_*
1064 * into common preemption modes of the form NVGPU_PREEMPTION_MODE_GRAPHICS_*
1065 */
1066static u32 nvgpu_get_common_graphics_preempt_mode(u32 graphics_preempt_mode)
1067{
1068 switch (graphics_preempt_mode) {
1069 case NVGPU_GRAPHICS_PREEMPTION_MODE_WFI:
1070 return NVGPU_PREEMPTION_MODE_GRAPHICS_WFI;
1071 case NVGPU_GRAPHICS_PREEMPTION_MODE_GFXP:
1072 return NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP;
1073 }
1074
1075 return graphics_preempt_mode;
1076}
1077
1078/*
1079 * Convert linux preemption modes of the form NVGPU_COMPUTE_PREEMPTION_MODE_*
1080 * into common preemption modes of the form NVGPU_PREEMPTION_MODE_COMPUTE_*
1081 */
1082static u32 nvgpu_get_common_compute_preempt_mode(u32 compute_preempt_mode)
1083{
1084 switch (compute_preempt_mode) {
1085 case NVGPU_COMPUTE_PREEMPTION_MODE_WFI:
1086 return NVGPU_PREEMPTION_MODE_COMPUTE_WFI;
1087 case NVGPU_COMPUTE_PREEMPTION_MODE_CTA:
1088 return NVGPU_PREEMPTION_MODE_COMPUTE_CTA;
1089 case NVGPU_COMPUTE_PREEMPTION_MODE_CILP:
1090 return NVGPU_PREEMPTION_MODE_COMPUTE_CILP;
1091 }
1092
1093 return compute_preempt_mode;
1094}
1095
1096static int nvgpu_ioctl_channel_set_preemption_mode(struct channel_gk20a *ch,
1097 u32 graphics_preempt_mode, u32 compute_preempt_mode)
1098{
1099 int err;
1100
1101 if (ch->g->ops.gr.set_preemption_mode) {
1102 err = gk20a_busy(ch->g);
1103 if (err) {
1104 nvgpu_err(ch->g, "failed to power on, %d", err);
1105 return err;
1106 }
1107 err = ch->g->ops.gr.set_preemption_mode(ch,
1108 nvgpu_get_common_graphics_preempt_mode(graphics_preempt_mode),
1109 nvgpu_get_common_compute_preempt_mode(compute_preempt_mode));
1110 gk20a_idle(ch->g);
1111 } else {
1112 err = -EINVAL;
1113 }
1114
1115 return err;
1116}
1117
1118long gk20a_channel_ioctl(struct file *filp,
1119 unsigned int cmd, unsigned long arg)
1120{
1121 struct channel_priv *priv = filp->private_data;
1122 struct channel_gk20a *ch = priv->c;
1123 struct device *dev = dev_from_gk20a(ch->g);
1124 u8 buf[NVGPU_IOCTL_CHANNEL_MAX_ARG_SIZE] = {0};
1125 int err = 0;
1126
1127 gk20a_dbg_fn("start %d", _IOC_NR(cmd));
1128
1129 if ((_IOC_TYPE(cmd) != NVGPU_IOCTL_MAGIC) ||
1130 (_IOC_NR(cmd) == 0) ||
1131 (_IOC_NR(cmd) > NVGPU_IOCTL_CHANNEL_LAST) ||
1132 (_IOC_SIZE(cmd) > NVGPU_IOCTL_CHANNEL_MAX_ARG_SIZE))
1133 return -EINVAL;
1134
1135 if (_IOC_DIR(cmd) & _IOC_WRITE) {
1136 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
1137 return -EFAULT;
1138 }
1139
1140 /* take a ref or return timeout if channel refs can't be taken */
1141 ch = gk20a_channel_get(ch);
1142 if (!ch)
1143 return -ETIMEDOUT;
1144
1145 /* protect our sanity for threaded userspace - most of the channel is
1146 * not thread safe */
1147 nvgpu_mutex_acquire(&ch->ioctl_lock);
1148
1149 /* this ioctl call keeps a ref to the file which keeps a ref to the
1150 * channel */
1151
1152 switch (cmd) {
1153 case NVGPU_IOCTL_CHANNEL_OPEN:
1154 err = gk20a_channel_open_ioctl(ch->g,
1155 (struct nvgpu_channel_open_args *)buf);
1156 break;
1157 case NVGPU_IOCTL_CHANNEL_SET_NVMAP_FD:
1158 break;
1159 case NVGPU_IOCTL_CHANNEL_ALLOC_OBJ_CTX:
1160 {
1161 struct nvgpu_alloc_obj_ctx_args *args =
1162 (struct nvgpu_alloc_obj_ctx_args *)buf;
1163
1164 err = gk20a_busy(ch->g);
1165 if (err) {
1166 dev_err(dev,
1167 "%s: failed to host gk20a for ioctl cmd: 0x%x",
1168 __func__, cmd);
1169 break;
1170 }
1171 err = nvgpu_ioctl_channel_alloc_obj_ctx(ch, args->class_num, args->flags);
1172 gk20a_idle(ch->g);
1173 break;
1174 }
1175 case NVGPU_IOCTL_CHANNEL_ALLOC_GPFIFO_EX:
1176 {
1177 struct nvgpu_alloc_gpfifo_ex_args *alloc_gpfifo_ex_args =
1178 (struct nvgpu_alloc_gpfifo_ex_args *)buf;
1179
1180 err = gk20a_busy(ch->g);
1181 if (err) {
1182 dev_err(dev,
1183 "%s: failed to host gk20a for ioctl cmd: 0x%x",
1184 __func__, cmd);
1185 break;
1186 }
1187
1188 if (!is_power_of_2(alloc_gpfifo_ex_args->num_entries)) {
1189 err = -EINVAL;
1190 gk20a_idle(ch->g);
1191 break;
1192 }
1193 err = nvgpu_channel_ioctl_alloc_gpfifo(ch,
1194 alloc_gpfifo_ex_args->num_entries,
1195 alloc_gpfifo_ex_args->num_inflight_jobs,
1196 alloc_gpfifo_ex_args->flags);
1197 gk20a_idle(ch->g);
1198 break;
1199 }
1200 case NVGPU_IOCTL_CHANNEL_ALLOC_GPFIFO:
1201 {
1202 struct nvgpu_alloc_gpfifo_args *alloc_gpfifo_args =
1203 (struct nvgpu_alloc_gpfifo_args *)buf;
1204
1205 err = gk20a_busy(ch->g);
1206 if (err) {
1207 dev_err(dev,
1208 "%s: failed to host gk20a for ioctl cmd: 0x%x",
1209 __func__, cmd);
1210 break;
1211 }
1212
1213 /*
1214 * Kernel can insert one extra gpfifo entry before user
1215 * submitted gpfifos and another one after, for internal usage.
1216 * Triple the requested size.
1217 */
1218 err = nvgpu_channel_ioctl_alloc_gpfifo(ch,
1219 alloc_gpfifo_args->num_entries * 3,
1220 0,
1221 alloc_gpfifo_args->flags);
1222 gk20a_idle(ch->g);
1223 break;
1224 }
1225 case NVGPU_IOCTL_CHANNEL_SUBMIT_GPFIFO:
1226 err = gk20a_ioctl_channel_submit_gpfifo(ch,
1227 (struct nvgpu_submit_gpfifo_args *)buf);
1228 break;
1229 case NVGPU_IOCTL_CHANNEL_WAIT:
1230 err = gk20a_busy(ch->g);
1231 if (err) {
1232 dev_err(dev,
1233 "%s: failed to host gk20a for ioctl cmd: 0x%x",
1234 __func__, cmd);
1235 break;
1236 }
1237
1238 /* waiting is thread-safe, not dropping this mutex could
1239 * deadlock in certain conditions */
1240 nvgpu_mutex_release(&ch->ioctl_lock);
1241
1242 err = gk20a_channel_wait(ch,
1243 (struct nvgpu_wait_args *)buf);
1244
1245 nvgpu_mutex_acquire(&ch->ioctl_lock);
1246
1247 gk20a_idle(ch->g);
1248 break;
1249 case NVGPU_IOCTL_CHANNEL_ZCULL_BIND:
1250 err = gk20a_busy(ch->g);
1251 if (err) {
1252 dev_err(dev,
1253 "%s: failed to host gk20a for ioctl cmd: 0x%x",
1254 __func__, cmd);
1255 break;
1256 }
1257 err = gk20a_channel_zcull_bind(ch,
1258 (struct nvgpu_zcull_bind_args *)buf);
1259 gk20a_idle(ch->g);
1260 break;
1261 case NVGPU_IOCTL_CHANNEL_SET_ERROR_NOTIFIER:
1262 err = gk20a_busy(ch->g);
1263 if (err) {
1264 dev_err(dev,
1265 "%s: failed to host gk20a for ioctl cmd: 0x%x",
1266 __func__, cmd);
1267 break;
1268 }
1269 err = gk20a_init_error_notifier(ch,
1270 (struct nvgpu_set_error_notifier *)buf);
1271 gk20a_idle(ch->g);
1272 break;
1273#ifdef CONFIG_GK20A_CYCLE_STATS
1274 case NVGPU_IOCTL_CHANNEL_CYCLE_STATS:
1275 err = gk20a_busy(ch->g);
1276 if (err) {
1277 dev_err(dev,
1278 "%s: failed to host gk20a for ioctl cmd: 0x%x",
1279 __func__, cmd);
1280 break;
1281 }
1282 err = gk20a_channel_cycle_stats(ch,
1283 (struct nvgpu_cycle_stats_args *)buf);
1284 gk20a_idle(ch->g);
1285 break;
1286#endif
1287 case NVGPU_IOCTL_CHANNEL_SET_TIMEOUT:
1288 {
1289 u32 timeout =
1290 (u32)((struct nvgpu_set_timeout_args *)buf)->timeout;
1291 gk20a_dbg(gpu_dbg_gpu_dbg, "setting timeout (%d ms) for chid %d",
1292 timeout, ch->chid);
1293 ch->timeout_ms_max = timeout;
1294 gk20a_channel_trace_sched_param(
1295 trace_gk20a_channel_set_timeout, ch);
1296 break;
1297 }
1298 case NVGPU_IOCTL_CHANNEL_SET_TIMEOUT_EX:
1299 {
1300 u32 timeout =
1301 (u32)((struct nvgpu_set_timeout_args *)buf)->timeout;
1302 bool timeout_debug_dump = !((u32)
1303 ((struct nvgpu_set_timeout_ex_args *)buf)->flags &
1304 (1 << NVGPU_TIMEOUT_FLAG_DISABLE_DUMP));
1305 gk20a_dbg(gpu_dbg_gpu_dbg, "setting timeout (%d ms) for chid %d",
1306 timeout, ch->chid);
1307 ch->timeout_ms_max = timeout;
1308 ch->timeout_debug_dump = timeout_debug_dump;
1309 gk20a_channel_trace_sched_param(
1310 trace_gk20a_channel_set_timeout, ch);
1311 break;
1312 }
1313 case NVGPU_IOCTL_CHANNEL_GET_TIMEDOUT:
1314 ((struct nvgpu_get_param_args *)buf)->value =
1315 ch->has_timedout;
1316 break;
1317 case NVGPU_IOCTL_CHANNEL_ENABLE:
1318 err = gk20a_busy(ch->g);
1319 if (err) {
1320 dev_err(dev,
1321 "%s: failed to host gk20a for ioctl cmd: 0x%x",
1322 __func__, cmd);
1323 break;
1324 }
1325 if (ch->g->ops.fifo.enable_channel)
1326 ch->g->ops.fifo.enable_channel(ch);
1327 else
1328 err = -ENOSYS;
1329 gk20a_idle(ch->g);
1330 break;
1331 case NVGPU_IOCTL_CHANNEL_DISABLE:
1332 err = gk20a_busy(ch->g);
1333 if (err) {
1334 dev_err(dev,
1335 "%s: failed to host gk20a for ioctl cmd: 0x%x",
1336 __func__, cmd);
1337 break;
1338 }
1339 if (ch->g->ops.fifo.disable_channel)
1340 ch->g->ops.fifo.disable_channel(ch);
1341 else
1342 err = -ENOSYS;
1343 gk20a_idle(ch->g);
1344 break;
1345 case NVGPU_IOCTL_CHANNEL_PREEMPT:
1346 err = gk20a_busy(ch->g);
1347 if (err) {
1348 dev_err(dev,
1349 "%s: failed to host gk20a for ioctl cmd: 0x%x",
1350 __func__, cmd);
1351 break;
1352 }
1353 err = gk20a_fifo_preempt(ch->g, ch);
1354 gk20a_idle(ch->g);
1355 break;
1356 case NVGPU_IOCTL_CHANNEL_FORCE_RESET:
1357 err = gk20a_busy(ch->g);
1358 if (err) {
1359 dev_err(dev,
1360 "%s: failed to host gk20a for ioctl cmd: 0x%x",
1361 __func__, cmd);
1362 break;
1363 }
1364 err = ch->g->ops.fifo.force_reset_ch(ch,
1365 NVGPU_CHANNEL_RESETCHANNEL_VERIF_ERROR, true);
1366 gk20a_idle(ch->g);
1367 break;
1368 case NVGPU_IOCTL_CHANNEL_EVENT_ID_CTRL:
1369 err = gk20a_channel_event_id_ctrl(ch,
1370 (struct nvgpu_event_id_ctrl_args *)buf);
1371 break;
1372#ifdef CONFIG_GK20A_CYCLE_STATS
1373 case NVGPU_IOCTL_CHANNEL_CYCLE_STATS_SNAPSHOT:
1374 err = gk20a_busy(ch->g);
1375 if (err) {
1376 dev_err(dev,
1377 "%s: failed to host gk20a for ioctl cmd: 0x%x",
1378 __func__, cmd);
1379 break;
1380 }
1381 err = gk20a_channel_cycle_stats_snapshot(ch,
1382 (struct nvgpu_cycle_stats_snapshot_args *)buf);
1383 gk20a_idle(ch->g);
1384 break;
1385#endif
1386 case NVGPU_IOCTL_CHANNEL_WDT:
1387 err = gk20a_channel_set_wdt_status(ch,
1388 (struct nvgpu_channel_wdt_args *)buf);
1389 break;
1390 case NVGPU_IOCTL_CHANNEL_SET_RUNLIST_INTERLEAVE:
1391 err = gk20a_ioctl_channel_set_runlist_interleave(ch,
1392 ((struct nvgpu_runlist_interleave_args *)buf)->level);
1393 break;
1394 case NVGPU_IOCTL_CHANNEL_SET_TIMESLICE:
1395 err = gk20a_busy(ch->g);
1396 if (err) {
1397 dev_err(dev,
1398 "%s: failed to host gk20a for ioctl cmd: 0x%x",
1399 __func__, cmd);
1400 break;
1401 }
1402 err = ch->g->ops.fifo.channel_set_timeslice(ch,
1403 ((struct nvgpu_timeslice_args *)buf)->timeslice_us);
1404
1405 gk20a_idle(ch->g);
1406 gk20a_channel_trace_sched_param(
1407 trace_gk20a_channel_set_timeslice, ch);
1408 break;
1409 case NVGPU_IOCTL_CHANNEL_GET_TIMESLICE:
1410 ((struct nvgpu_timeslice_args *)buf)->timeslice_us =
1411 gk20a_channel_get_timeslice(ch);
1412 break;
1413 case NVGPU_IOCTL_CHANNEL_SET_PREEMPTION_MODE:
1414 err = nvgpu_ioctl_channel_set_preemption_mode(ch,
1415 ((struct nvgpu_preemption_mode_args *)buf)->graphics_preempt_mode,
1416 ((struct nvgpu_preemption_mode_args *)buf)->compute_preempt_mode);
1417 break;
1418 case NVGPU_IOCTL_CHANNEL_SET_BOOSTED_CTX:
1419 if (ch->g->ops.gr.set_boosted_ctx) {
1420 bool boost =
1421 ((struct nvgpu_boosted_ctx_args *)buf)->boost;
1422
1423 err = gk20a_busy(ch->g);
1424 if (err) {
1425 dev_err(dev,
1426 "%s: failed to host gk20a for ioctl cmd: 0x%x",
1427 __func__, cmd);
1428 break;
1429 }
1430 err = ch->g->ops.gr.set_boosted_ctx(ch, boost);
1431 gk20a_idle(ch->g);
1432 } else {
1433 err = -EINVAL;
1434 }
1435 break;
1436 default:
1437 dev_dbg(dev, "unrecognized ioctl cmd: 0x%x", cmd);
1438 err = -ENOTTY;
1439 break;
1440 }
1441
1442 if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
1443 err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd));
1444
1445 nvgpu_mutex_release(&ch->ioctl_lock);
1446
1447 gk20a_channel_put(ch);
1448
1449 gk20a_dbg_fn("end");
1450
1451 return err;
1452}