summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/linux/ioctl_channel.c
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2017-03-23 17:19:01 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-04-04 13:24:44 -0400
commita07e10f494c158ae31d6187e9be3db409528a507 (patch)
tree6ea3d49390aea1d1fb5f074d30027b184aba0cee /drivers/gpu/nvgpu/common/linux/ioctl_channel.c
parentf116320137b0eb835bcbf704d34fc8f7880595d2 (diff)
gpu: nvgpu: Move channel IOCTL code to Linux module
Move channel IOCTL specific code to Linux module. This clears some Linux dependencies from channel_gk20a.c. JIRA NVGPU-32 Change-Id: I41817d612b959709365bcabff9c8a15f2bfe4c60 Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: http://git-master/r/1330804 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/linux/ioctl_channel.c')
-rw-r--r--drivers/gpu/nvgpu/common/linux/ioctl_channel.c1179
1 files changed, 1179 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_channel.c b/drivers/gpu/nvgpu/common/linux/ioctl_channel.c
new file mode 100644
index 00000000..4a9531de
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/linux/ioctl_channel.c
@@ -0,0 +1,1179 @@
1/*
2 * GK20A Graphics channel
3 *
4 * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <linux/list.h>
20#include <trace/events/gk20a.h>
21#include <linux/file.h>
22#include <linux/anon_inodes.h>
23#include <linux/dma-buf.h>
24
25#include <nvgpu/semaphore.h>
26#include <nvgpu/timers.h>
27#include <nvgpu/kmem.h>
28
29#include "gk20a/gk20a.h"
30#include "gk20a/debug_gk20a.h"
31#include "gk20a/ctxsw_trace_gk20a.h"
32#include "gk20a/dbg_gpu_gk20a.h"
33#include "gk20a/fence_gk20a.h"
34
35/*
36 * Although channels do have pointers back to the gk20a struct that they were
37 * created under in cases where the driver is killed that pointer can be bad.
38 * The channel memory can be freed before the release() function for a given
39 * channel is called. This happens when the driver dies and userspace doesn't
40 * get a chance to call release() until after the entire gk20a driver data is
41 * unloaded and freed.
42 */
43struct channel_priv {
44 struct gk20a *g;
45 struct channel_gk20a *c;
46};
47
48#if defined(CONFIG_GK20A_CYCLE_STATS)
49
50static int gk20a_channel_cycle_stats(struct channel_gk20a *ch,
51 struct nvgpu_cycle_stats_args *args)
52{
53 struct dma_buf *dmabuf;
54 void *virtual_address;
55
56 /* is it allowed to handle calls for current GPU? */
57 if (0 == (ch->g->gpu_characteristics.flags &
58 NVGPU_GPU_FLAGS_SUPPORT_CYCLE_STATS))
59 return -ENOSYS;
60
61 if (args->dmabuf_fd && !ch->cyclestate.cyclestate_buffer_handler) {
62
63 /* set up new cyclestats buffer */
64 dmabuf = dma_buf_get(args->dmabuf_fd);
65 if (IS_ERR(dmabuf))
66 return PTR_ERR(dmabuf);
67 virtual_address = dma_buf_vmap(dmabuf);
68 if (!virtual_address)
69 return -ENOMEM;
70
71 ch->cyclestate.cyclestate_buffer_handler = dmabuf;
72 ch->cyclestate.cyclestate_buffer = virtual_address;
73 ch->cyclestate.cyclestate_buffer_size = dmabuf->size;
74 return 0;
75
76 } else if (!args->dmabuf_fd &&
77 ch->cyclestate.cyclestate_buffer_handler) {
78 gk20a_channel_free_cycle_stats_buffer(ch);
79 return 0;
80
81 } else if (!args->dmabuf_fd &&
82 !ch->cyclestate.cyclestate_buffer_handler) {
83 /* no requst from GL */
84 return 0;
85
86 } else {
87 pr_err("channel already has cyclestats buffer\n");
88 return -EINVAL;
89 }
90}
91
92static int gk20a_flush_cycle_stats_snapshot(struct channel_gk20a *ch)
93{
94 int ret;
95
96 nvgpu_mutex_acquire(&ch->cs_client_mutex);
97 if (ch->cs_client)
98 ret = gr_gk20a_css_flush(ch, ch->cs_client);
99 else
100 ret = -EBADF;
101 nvgpu_mutex_release(&ch->cs_client_mutex);
102
103 return ret;
104}
105
106static int gk20a_attach_cycle_stats_snapshot(struct channel_gk20a *ch,
107 u32 dmabuf_fd,
108 u32 perfmon_id_count,
109 u32 *perfmon_id_start)
110{
111 int ret;
112
113 nvgpu_mutex_acquire(&ch->cs_client_mutex);
114 if (ch->cs_client) {
115 ret = -EEXIST;
116 } else {
117 ret = gr_gk20a_css_attach(ch,
118 dmabuf_fd,
119 perfmon_id_count,
120 perfmon_id_start,
121 &ch->cs_client);
122 }
123 nvgpu_mutex_release(&ch->cs_client_mutex);
124
125 return ret;
126}
127
128static int gk20a_channel_cycle_stats_snapshot(struct channel_gk20a *ch,
129 struct nvgpu_cycle_stats_snapshot_args *args)
130{
131 int ret;
132
133 /* is it allowed to handle calls for current GPU? */
134 if (0 == (ch->g->gpu_characteristics.flags &
135 NVGPU_GPU_FLAGS_SUPPORT_CYCLE_STATS_SNAPSHOT))
136 return -ENOSYS;
137
138 if (!args->dmabuf_fd)
139 return -EINVAL;
140
141 /* handle the command (most frequent cases first) */
142 switch (args->cmd) {
143 case NVGPU_IOCTL_CHANNEL_CYCLE_STATS_SNAPSHOT_CMD_FLUSH:
144 ret = gk20a_flush_cycle_stats_snapshot(ch);
145 args->extra = 0;
146 break;
147
148 case NVGPU_IOCTL_CHANNEL_CYCLE_STATS_SNAPSHOT_CMD_ATTACH:
149 ret = gk20a_attach_cycle_stats_snapshot(ch,
150 args->dmabuf_fd,
151 args->extra,
152 &args->extra);
153 break;
154
155 case NVGPU_IOCTL_CHANNEL_CYCLE_STATS_SNAPSHOT_CMD_DETACH:
156 ret = gk20a_channel_free_cycle_stats_snapshot(ch);
157 args->extra = 0;
158 break;
159
160 default:
161 pr_err("cyclestats: unknown command %u\n", args->cmd);
162 ret = -EINVAL;
163 break;
164 }
165
166 return ret;
167}
168#endif
169
170static int gk20a_channel_set_wdt_status(struct channel_gk20a *ch,
171 struct nvgpu_channel_wdt_args *args)
172{
173 if (args->wdt_status == NVGPU_IOCTL_CHANNEL_DISABLE_WDT)
174 ch->wdt_enabled = false;
175 else if (args->wdt_status == NVGPU_IOCTL_CHANNEL_ENABLE_WDT)
176 ch->wdt_enabled = true;
177
178 return 0;
179}
180
181static int gk20a_init_error_notifier(struct channel_gk20a *ch,
182 struct nvgpu_set_error_notifier *args)
183{
184 struct device *dev = dev_from_gk20a(ch->g);
185 struct dma_buf *dmabuf;
186 void *va;
187 u64 end = args->offset + sizeof(struct nvgpu_notification);
188
189 if (!args->mem) {
190 pr_err("gk20a_init_error_notifier: invalid memory handle\n");
191 return -EINVAL;
192 }
193
194 dmabuf = dma_buf_get(args->mem);
195
196 gk20a_channel_free_error_notifiers(ch);
197
198 if (IS_ERR(dmabuf)) {
199 pr_err("Invalid handle: %d\n", args->mem);
200 return -EINVAL;
201 }
202
203 if (end > dmabuf->size || end < sizeof(struct nvgpu_notification)) {
204 dma_buf_put(dmabuf);
205 gk20a_err(dev, "gk20a_init_error_notifier: invalid offset\n");
206 return -EINVAL;
207 }
208
209 /* map handle */
210 va = dma_buf_vmap(dmabuf);
211 if (!va) {
212 dma_buf_put(dmabuf);
213 pr_err("Cannot map notifier handle\n");
214 return -ENOMEM;
215 }
216
217 ch->error_notifier = va + args->offset;
218 ch->error_notifier_va = va;
219 memset(ch->error_notifier, 0, sizeof(struct nvgpu_notification));
220
221 /* set channel notifiers pointer */
222 nvgpu_mutex_acquire(&ch->error_notifier_mutex);
223 ch->error_notifier_ref = dmabuf;
224 nvgpu_mutex_release(&ch->error_notifier_mutex);
225
226 return 0;
227}
228
229struct channel_gk20a *gk20a_get_channel_from_file(int fd)
230{
231 struct channel_priv *priv;
232 struct file *f = fget(fd);
233
234 if (!f)
235 return NULL;
236
237 if (f->f_op != &gk20a_channel_ops) {
238 fput(f);
239 return NULL;
240 }
241
242 priv = (struct channel_priv *)f->private_data;
243 fput(f);
244 return priv->c;
245}
246
247int gk20a_channel_release(struct inode *inode, struct file *filp)
248{
249 struct channel_priv *priv = filp->private_data;
250 struct channel_gk20a *ch = priv->c;
251 struct gk20a *g = priv->g;
252
253 int err;
254
255 err = gk20a_busy(g);
256 if (err) {
257 gk20a_err(dev_from_gk20a(g), "failed to release a channel!");
258 goto channel_release;
259 }
260
261 trace_gk20a_channel_release(dev_name(g->dev));
262
263 gk20a_channel_close(ch);
264 gk20a_idle(g);
265
266channel_release:
267 gk20a_put(g);
268 nvgpu_kfree(g, filp->private_data);
269 filp->private_data = NULL;
270 return 0;
271}
272
273/* note: runlist_id -1 is synonym for the ENGINE_GR_GK20A runlist id */
274static int __gk20a_channel_open(struct gk20a *g,
275 struct file *filp, s32 runlist_id)
276{
277 int err;
278 struct channel_gk20a *ch;
279 struct channel_priv *priv;
280
281 gk20a_dbg_fn("");
282
283 g = gk20a_get(g);
284 if (!g)
285 return -ENODEV;
286
287 trace_gk20a_channel_open(dev_name(g->dev));
288
289 priv = nvgpu_kzalloc(g, sizeof(*priv));
290 if (!priv) {
291 err = -ENOMEM;
292 goto free_ref;
293 }
294
295 err = gk20a_busy(g);
296 if (err) {
297 gk20a_err(dev_from_gk20a(g), "failed to power on, %d", err);
298 goto fail_busy;
299 }
300 /* All the user space channel should be non privilege */
301 ch = gk20a_open_new_channel(g, runlist_id, false);
302 gk20a_idle(g);
303 if (!ch) {
304 gk20a_err(dev_from_gk20a(g),
305 "failed to get f");
306 err = -ENOMEM;
307 goto fail_busy;
308 }
309
310 gk20a_channel_trace_sched_param(
311 trace_gk20a_channel_sched_defaults, ch);
312
313 priv->g = g;
314 priv->c = ch;
315
316 filp->private_data = priv;
317 return 0;
318
319fail_busy:
320 nvgpu_kfree(g, priv);
321free_ref:
322 gk20a_put(g);
323 return err;
324}
325
326int gk20a_channel_open(struct inode *inode, struct file *filp)
327{
328 struct gk20a *g = container_of(inode->i_cdev,
329 struct gk20a, channel.cdev);
330 int ret;
331
332 gk20a_dbg_fn("start");
333 ret = __gk20a_channel_open(g, filp, -1);
334
335 gk20a_dbg_fn("end");
336 return ret;
337}
338
339int gk20a_channel_open_ioctl(struct gk20a *g,
340 struct nvgpu_channel_open_args *args)
341{
342 int err;
343 int fd;
344 struct file *file;
345 char name[64];
346 s32 runlist_id = args->in.runlist_id;
347
348 err = get_unused_fd_flags(O_RDWR);
349 if (err < 0)
350 return err;
351 fd = err;
352
353 snprintf(name, sizeof(name), "nvhost-%s-fd%d",
354 dev_name(g->dev), fd);
355
356 file = anon_inode_getfile(name, g->channel.cdev.ops, NULL, O_RDWR);
357 if (IS_ERR(file)) {
358 err = PTR_ERR(file);
359 goto clean_up;
360 }
361
362 err = __gk20a_channel_open(g, file, runlist_id);
363 if (err)
364 goto clean_up_file;
365
366 fd_install(fd, file);
367 args->out.channel_fd = fd;
368 return 0;
369
370clean_up_file:
371 fput(file);
372clean_up:
373 put_unused_fd(fd);
374 return err;
375}
376
377int nvgpu_channel_ioctl_alloc_gpfifo(struct channel_gk20a *c,
378 struct nvgpu_alloc_gpfifo_ex_args *args)
379{
380 return gk20a_channel_alloc_gpfifo(c, args->num_entries,
381 args->num_inflight_jobs,
382 args->flags);
383}
384
385
386static int gk20a_channel_wait_semaphore(struct channel_gk20a *ch,
387 ulong id, u32 offset,
388 u32 payload, long timeout)
389{
390 struct device *dev = ch->g->dev;
391 struct dma_buf *dmabuf;
392 void *data;
393 u32 *semaphore;
394 int ret = 0;
395 long remain;
396
397 /* do not wait if channel has timed out */
398 if (ch->has_timedout)
399 return -ETIMEDOUT;
400
401 dmabuf = dma_buf_get(id);
402 if (IS_ERR(dmabuf)) {
403 gk20a_err(dev, "invalid notifier nvmap handle 0x%lx", id);
404 return -EINVAL;
405 }
406
407 data = dma_buf_kmap(dmabuf, offset >> PAGE_SHIFT);
408 if (!data) {
409 gk20a_err(dev, "failed to map notifier memory");
410 ret = -EINVAL;
411 goto cleanup_put;
412 }
413
414 semaphore = data + (offset & ~PAGE_MASK);
415
416 remain = wait_event_interruptible_timeout(
417 ch->semaphore_wq,
418 *semaphore == payload || ch->has_timedout,
419 timeout);
420
421 if (remain == 0 && *semaphore != payload)
422 ret = -ETIMEDOUT;
423 else if (remain < 0)
424 ret = remain;
425
426 dma_buf_kunmap(dmabuf, offset >> PAGE_SHIFT, data);
427cleanup_put:
428 dma_buf_put(dmabuf);
429 return ret;
430}
431
432static int gk20a_channel_wait(struct channel_gk20a *ch,
433 struct nvgpu_wait_args *args)
434{
435 struct device *d = dev_from_gk20a(ch->g);
436 struct dma_buf *dmabuf;
437 struct notification *notif;
438 struct timespec tv;
439 u64 jiffies;
440 ulong id;
441 u32 offset;
442 unsigned long timeout;
443 int remain, ret = 0;
444 u64 end;
445
446 gk20a_dbg_fn("");
447
448 if (ch->has_timedout)
449 return -ETIMEDOUT;
450
451 if (args->timeout == NVGPU_NO_TIMEOUT)
452 timeout = MAX_SCHEDULE_TIMEOUT;
453 else
454 timeout = (u32)msecs_to_jiffies(args->timeout);
455
456 switch (args->type) {
457 case NVGPU_WAIT_TYPE_NOTIFIER:
458 id = args->condition.notifier.dmabuf_fd;
459 offset = args->condition.notifier.offset;
460 end = offset + sizeof(struct notification);
461
462 dmabuf = dma_buf_get(id);
463 if (IS_ERR(dmabuf)) {
464 gk20a_err(d, "invalid notifier nvmap handle 0x%lx",
465 id);
466 return -EINVAL;
467 }
468
469 if (end > dmabuf->size || end < sizeof(struct notification)) {
470 dma_buf_put(dmabuf);
471 gk20a_err(d, "invalid notifier offset\n");
472 return -EINVAL;
473 }
474
475 notif = dma_buf_vmap(dmabuf);
476 if (!notif) {
477 gk20a_err(d, "failed to map notifier memory");
478 return -ENOMEM;
479 }
480
481 notif = (struct notification *)((uintptr_t)notif + offset);
482
483 /* user should set status pending before
484 * calling this ioctl */
485 remain = wait_event_interruptible_timeout(
486 ch->notifier_wq,
487 notif->status == 0 || ch->has_timedout,
488 timeout);
489
490 if (remain == 0 && notif->status != 0) {
491 ret = -ETIMEDOUT;
492 goto notif_clean_up;
493 } else if (remain < 0) {
494 ret = -EINTR;
495 goto notif_clean_up;
496 }
497
498 /* TBD: fill in correct information */
499 jiffies = get_jiffies_64();
500 jiffies_to_timespec(jiffies, &tv);
501 notif->timestamp.nanoseconds[0] = tv.tv_nsec;
502 notif->timestamp.nanoseconds[1] = tv.tv_sec;
503 notif->info32 = 0xDEADBEEF; /* should be object name */
504 notif->info16 = ch->hw_chid; /* should be method offset */
505
506notif_clean_up:
507 dma_buf_vunmap(dmabuf, notif);
508 return ret;
509
510 case NVGPU_WAIT_TYPE_SEMAPHORE:
511 ret = gk20a_channel_wait_semaphore(ch,
512 args->condition.semaphore.dmabuf_fd,
513 args->condition.semaphore.offset,
514 args->condition.semaphore.payload,
515 timeout);
516
517 break;
518
519 default:
520 ret = -EINVAL;
521 break;
522 }
523
524 return ret;
525}
526
527static unsigned int gk20a_event_id_poll(struct file *filep, poll_table *wait)
528{
529 unsigned int mask = 0;
530 struct gk20a_event_id_data *event_id_data = filep->private_data;
531 struct gk20a *g = event_id_data->g;
532 u32 event_id = event_id_data->event_id;
533
534 gk20a_dbg(gpu_dbg_fn | gpu_dbg_info, "");
535
536 poll_wait(filep, &event_id_data->event_id_wq, wait);
537
538 nvgpu_mutex_acquire(&event_id_data->lock);
539
540 if (event_id_data->is_tsg) {
541 struct tsg_gk20a *tsg = g->fifo.tsg + event_id_data->id;
542
543 if (event_id_data->event_posted) {
544 gk20a_dbg_info(
545 "found pending event_id=%d on TSG=%d\n",
546 event_id, tsg->tsgid);
547 mask = (POLLPRI | POLLIN);
548 event_id_data->event_posted = false;
549 }
550 } else {
551 struct channel_gk20a *ch = g->fifo.channel
552 + event_id_data->id;
553
554 if (event_id_data->event_posted) {
555 gk20a_dbg_info(
556 "found pending event_id=%d on chid=%d\n",
557 event_id, ch->hw_chid);
558 mask = (POLLPRI | POLLIN);
559 event_id_data->event_posted = false;
560 }
561 }
562
563 nvgpu_mutex_release(&event_id_data->lock);
564
565 return mask;
566}
567
568static int gk20a_event_id_release(struct inode *inode, struct file *filp)
569{
570 struct gk20a_event_id_data *event_id_data = filp->private_data;
571 struct gk20a *g = event_id_data->g;
572
573 if (event_id_data->is_tsg) {
574 struct tsg_gk20a *tsg = g->fifo.tsg + event_id_data->id;
575
576 nvgpu_mutex_acquire(&tsg->event_id_list_lock);
577 list_del_init(&event_id_data->event_id_node);
578 nvgpu_mutex_release(&tsg->event_id_list_lock);
579 } else {
580 struct channel_gk20a *ch = g->fifo.channel + event_id_data->id;
581
582 nvgpu_mutex_acquire(&ch->event_id_list_lock);
583 list_del_init(&event_id_data->event_id_node);
584 nvgpu_mutex_release(&ch->event_id_list_lock);
585 }
586
587 nvgpu_mutex_destroy(&event_id_data->lock);
588 gk20a_put(g);
589 nvgpu_kfree(g, event_id_data);
590 filp->private_data = NULL;
591
592 return 0;
593}
594
595const struct file_operations gk20a_event_id_ops = {
596 .owner = THIS_MODULE,
597 .poll = gk20a_event_id_poll,
598 .release = gk20a_event_id_release,
599};
600
601static int gk20a_channel_get_event_data_from_id(struct channel_gk20a *ch,
602 u32 event_id,
603 struct gk20a_event_id_data **event_id_data)
604{
605 struct gk20a_event_id_data *local_event_id_data;
606 bool event_found = false;
607
608 nvgpu_mutex_acquire(&ch->event_id_list_lock);
609 list_for_each_entry(local_event_id_data, &ch->event_id_list,
610 event_id_node) {
611 if (local_event_id_data->event_id == event_id) {
612 event_found = true;
613 break;
614 }
615 }
616 nvgpu_mutex_release(&ch->event_id_list_lock);
617
618 if (event_found) {
619 *event_id_data = local_event_id_data;
620 return 0;
621 } else {
622 return -1;
623 }
624}
625
626void gk20a_channel_event_id_post_event(struct channel_gk20a *ch,
627 u32 event_id)
628{
629 struct gk20a_event_id_data *event_id_data;
630 int err = 0;
631
632 err = gk20a_channel_get_event_data_from_id(ch, event_id,
633 &event_id_data);
634 if (err)
635 return;
636
637 nvgpu_mutex_acquire(&event_id_data->lock);
638
639 gk20a_dbg_info(
640 "posting event for event_id=%d on ch=%d\n",
641 event_id, ch->hw_chid);
642 event_id_data->event_posted = true;
643
644 wake_up_interruptible_all(&event_id_data->event_id_wq);
645
646 nvgpu_mutex_release(&event_id_data->lock);
647}
648
649static int gk20a_channel_event_id_enable(struct channel_gk20a *ch,
650 int event_id,
651 int *fd)
652{
653 struct gk20a *g;
654 int err = 0;
655 int local_fd;
656 struct file *file;
657 char name[64];
658 struct gk20a_event_id_data *event_id_data;
659
660 g = gk20a_get(ch->g);
661 if (!g)
662 return -ENODEV;
663
664 err = gk20a_channel_get_event_data_from_id(ch,
665 event_id, &event_id_data);
666 if (err == 0) {
667 /* We already have event enabled */
668 err = -EINVAL;
669 goto free_ref;
670 }
671
672 err = get_unused_fd_flags(O_RDWR);
673 if (err < 0)
674 goto free_ref;
675 local_fd = err;
676
677 snprintf(name, sizeof(name), "nvgpu-event%d-fd%d",
678 event_id, local_fd);
679 file = anon_inode_getfile(name, &gk20a_event_id_ops,
680 NULL, O_RDWR);
681 if (IS_ERR(file)) {
682 err = PTR_ERR(file);
683 goto clean_up;
684 }
685
686 event_id_data = nvgpu_kzalloc(ch->g, sizeof(*event_id_data));
687 if (!event_id_data) {
688 err = -ENOMEM;
689 goto clean_up_file;
690 }
691 event_id_data->g = g;
692 event_id_data->id = ch->hw_chid;
693 event_id_data->is_tsg = false;
694 event_id_data->event_id = event_id;
695
696 init_waitqueue_head(&event_id_data->event_id_wq);
697 err = nvgpu_mutex_init(&event_id_data->lock);
698 if (err)
699 goto clean_up_free;
700 INIT_LIST_HEAD(&event_id_data->event_id_node);
701
702 nvgpu_mutex_acquire(&ch->event_id_list_lock);
703 list_add_tail(&event_id_data->event_id_node, &ch->event_id_list);
704 nvgpu_mutex_release(&ch->event_id_list_lock);
705
706 fd_install(local_fd, file);
707 file->private_data = event_id_data;
708
709 *fd = local_fd;
710
711 return 0;
712
713clean_up_free:
714 kfree(event_id_data);
715clean_up_file:
716 fput(file);
717clean_up:
718 put_unused_fd(local_fd);
719free_ref:
720 gk20a_put(g);
721 return err;
722}
723
724static int gk20a_channel_event_id_ctrl(struct channel_gk20a *ch,
725 struct nvgpu_event_id_ctrl_args *args)
726{
727 int err = 0;
728 int fd = -1;
729
730 if (args->event_id >= NVGPU_IOCTL_CHANNEL_EVENT_ID_MAX)
731 return -EINVAL;
732
733 if (gk20a_is_channel_marked_as_tsg(ch))
734 return -EINVAL;
735
736 switch (args->cmd) {
737 case NVGPU_IOCTL_CHANNEL_EVENT_ID_CMD_ENABLE:
738 err = gk20a_channel_event_id_enable(ch, args->event_id, &fd);
739 if (!err)
740 args->event_fd = fd;
741 break;
742
743 default:
744 gk20a_err(dev_from_gk20a(ch->g),
745 "unrecognized channel event id cmd: 0x%x",
746 args->cmd);
747 err = -EINVAL;
748 break;
749 }
750
751 return err;
752}
753
754static int gk20a_channel_zcull_bind(struct channel_gk20a *ch,
755 struct nvgpu_zcull_bind_args *args)
756{
757 struct gk20a *g = ch->g;
758 struct gr_gk20a *gr = &g->gr;
759
760 gk20a_dbg_fn("");
761
762 return g->ops.gr.bind_ctxsw_zcull(g, gr, ch,
763 args->gpu_va, args->mode);
764}
765
766static int gk20a_ioctl_channel_submit_gpfifo(
767 struct channel_gk20a *ch,
768 struct nvgpu_submit_gpfifo_args *args)
769{
770 struct gk20a_fence *fence_out;
771 struct fifo_profile_gk20a *profile = NULL;
772
773 int ret = 0;
774 gk20a_dbg_fn("");
775
776#ifdef CONFIG_DEBUG_FS
777 profile = gk20a_fifo_profile_acquire(ch->g);
778
779 if (profile)
780 profile->timestamp[PROFILE_IOCTL_ENTRY] = sched_clock();
781#endif
782 if (ch->has_timedout)
783 return -ETIMEDOUT;
784 ret = gk20a_submit_channel_gpfifo(ch, NULL, args, args->num_entries,
785 args->flags, &args->fence,
786 &fence_out, false, profile);
787
788 if (ret)
789 goto clean_up;
790
791 /* Convert fence_out to something we can pass back to user space. */
792 if (args->flags & NVGPU_SUBMIT_GPFIFO_FLAGS_FENCE_GET) {
793 if (args->flags & NVGPU_SUBMIT_GPFIFO_FLAGS_SYNC_FENCE) {
794 int fd = gk20a_fence_install_fd(fence_out);
795 if (fd < 0)
796 ret = fd;
797 else
798 args->fence.id = fd;
799 } else {
800 args->fence.id = fence_out->syncpt_id;
801 args->fence.value = fence_out->syncpt_value;
802 }
803 }
804 gk20a_fence_put(fence_out);
805#ifdef CONFIG_DEBUG_FS
806 if (profile) {
807 profile->timestamp[PROFILE_IOCTL_EXIT] = sched_clock();
808 gk20a_fifo_profile_release(ch->g, profile);
809 }
810#endif
811clean_up:
812 return ret;
813}
814
815long gk20a_channel_ioctl(struct file *filp,
816 unsigned int cmd, unsigned long arg)
817{
818 struct channel_priv *priv = filp->private_data;
819 struct channel_gk20a *ch = priv->c;
820 struct device *dev = ch->g->dev;
821 u8 buf[NVGPU_IOCTL_CHANNEL_MAX_ARG_SIZE] = {0};
822 int err = 0;
823
824 gk20a_dbg_fn("start %d", _IOC_NR(cmd));
825
826 if ((_IOC_TYPE(cmd) != NVGPU_IOCTL_MAGIC) ||
827 (_IOC_NR(cmd) == 0) ||
828 (_IOC_NR(cmd) > NVGPU_IOCTL_CHANNEL_LAST) ||
829 (_IOC_SIZE(cmd) > NVGPU_IOCTL_CHANNEL_MAX_ARG_SIZE))
830 return -EINVAL;
831
832 if (_IOC_DIR(cmd) & _IOC_WRITE) {
833 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
834 return -EFAULT;
835 }
836
837 /* take a ref or return timeout if channel refs can't be taken */
838 ch = gk20a_channel_get(ch);
839 if (!ch)
840 return -ETIMEDOUT;
841
842 /* protect our sanity for threaded userspace - most of the channel is
843 * not thread safe */
844 nvgpu_mutex_acquire(&ch->ioctl_lock);
845
846 /* this ioctl call keeps a ref to the file which keeps a ref to the
847 * channel */
848
849 switch (cmd) {
850 case NVGPU_IOCTL_CHANNEL_OPEN:
851 err = gk20a_channel_open_ioctl(ch->g,
852 (struct nvgpu_channel_open_args *)buf);
853 break;
854 case NVGPU_IOCTL_CHANNEL_SET_NVMAP_FD:
855 break;
856 case NVGPU_IOCTL_CHANNEL_ALLOC_OBJ_CTX:
857 err = gk20a_busy(ch->g);
858 if (err) {
859 dev_err(dev,
860 "%s: failed to host gk20a for ioctl cmd: 0x%x",
861 __func__, cmd);
862 break;
863 }
864 err = ch->g->ops.gr.alloc_obj_ctx(ch,
865 (struct nvgpu_alloc_obj_ctx_args *)buf);
866 gk20a_idle(ch->g);
867 break;
868 case NVGPU_IOCTL_CHANNEL_ALLOC_GPFIFO_EX:
869 {
870 struct nvgpu_alloc_gpfifo_ex_args *alloc_gpfifo_ex_args =
871 (struct nvgpu_alloc_gpfifo_ex_args *)buf;
872
873 err = gk20a_busy(ch->g);
874 if (err) {
875 dev_err(dev,
876 "%s: failed to host gk20a for ioctl cmd: 0x%x",
877 __func__, cmd);
878 break;
879 }
880
881 if (!is_power_of_2(alloc_gpfifo_ex_args->num_entries)) {
882 err = -EINVAL;
883 gk20a_idle(ch->g);
884 break;
885 }
886 err = gk20a_channel_alloc_gpfifo(ch,
887 alloc_gpfifo_ex_args->num_entries,
888 alloc_gpfifo_ex_args->num_inflight_jobs,
889 alloc_gpfifo_ex_args->flags);
890 gk20a_idle(ch->g);
891 break;
892 }
893 case NVGPU_IOCTL_CHANNEL_ALLOC_GPFIFO:
894 {
895 struct nvgpu_alloc_gpfifo_args *alloc_gpfifo_args =
896 (struct nvgpu_alloc_gpfifo_args *)buf;
897
898 err = gk20a_busy(ch->g);
899 if (err) {
900 dev_err(dev,
901 "%s: failed to host gk20a for ioctl cmd: 0x%x",
902 __func__, cmd);
903 break;
904 }
905
906 /*
907 * Kernel can insert one extra gpfifo entry before user
908 * submitted gpfifos and another one after, for internal usage.
909 * Triple the requested size.
910 */
911 err = gk20a_channel_alloc_gpfifo(ch,
912 alloc_gpfifo_args->num_entries * 3,
913 0,
914 alloc_gpfifo_args->flags);
915 gk20a_idle(ch->g);
916 break;
917 }
918 case NVGPU_IOCTL_CHANNEL_SUBMIT_GPFIFO:
919 err = gk20a_ioctl_channel_submit_gpfifo(ch,
920 (struct nvgpu_submit_gpfifo_args *)buf);
921 break;
922 case NVGPU_IOCTL_CHANNEL_WAIT:
923 err = gk20a_busy(ch->g);
924 if (err) {
925 dev_err(dev,
926 "%s: failed to host gk20a for ioctl cmd: 0x%x",
927 __func__, cmd);
928 break;
929 }
930
931 /* waiting is thread-safe, not dropping this mutex could
932 * deadlock in certain conditions */
933 nvgpu_mutex_release(&ch->ioctl_lock);
934
935 err = gk20a_channel_wait(ch,
936 (struct nvgpu_wait_args *)buf);
937
938 nvgpu_mutex_acquire(&ch->ioctl_lock);
939
940 gk20a_idle(ch->g);
941 break;
942 case NVGPU_IOCTL_CHANNEL_ZCULL_BIND:
943 err = gk20a_busy(ch->g);
944 if (err) {
945 dev_err(dev,
946 "%s: failed to host gk20a for ioctl cmd: 0x%x",
947 __func__, cmd);
948 break;
949 }
950 err = gk20a_channel_zcull_bind(ch,
951 (struct nvgpu_zcull_bind_args *)buf);
952 gk20a_idle(ch->g);
953 break;
954 case NVGPU_IOCTL_CHANNEL_SET_ERROR_NOTIFIER:
955 err = gk20a_busy(ch->g);
956 if (err) {
957 dev_err(dev,
958 "%s: failed to host gk20a for ioctl cmd: 0x%x",
959 __func__, cmd);
960 break;
961 }
962 err = gk20a_init_error_notifier(ch,
963 (struct nvgpu_set_error_notifier *)buf);
964 gk20a_idle(ch->g);
965 break;
966#ifdef CONFIG_GK20A_CYCLE_STATS
967 case NVGPU_IOCTL_CHANNEL_CYCLE_STATS:
968 err = gk20a_busy(ch->g);
969 if (err) {
970 dev_err(dev,
971 "%s: failed to host gk20a for ioctl cmd: 0x%x",
972 __func__, cmd);
973 break;
974 }
975 err = gk20a_channel_cycle_stats(ch,
976 (struct nvgpu_cycle_stats_args *)buf);
977 gk20a_idle(ch->g);
978 break;
979#endif
980 case NVGPU_IOCTL_CHANNEL_SET_TIMEOUT:
981 {
982 u32 timeout =
983 (u32)((struct nvgpu_set_timeout_args *)buf)->timeout;
984 gk20a_dbg(gpu_dbg_gpu_dbg, "setting timeout (%d ms) for chid %d",
985 timeout, ch->hw_chid);
986 ch->timeout_ms_max = timeout;
987 gk20a_channel_trace_sched_param(
988 trace_gk20a_channel_set_timeout, ch);
989 break;
990 }
991 case NVGPU_IOCTL_CHANNEL_SET_TIMEOUT_EX:
992 {
993 u32 timeout =
994 (u32)((struct nvgpu_set_timeout_args *)buf)->timeout;
995 bool timeout_debug_dump = !((u32)
996 ((struct nvgpu_set_timeout_ex_args *)buf)->flags &
997 (1 << NVGPU_TIMEOUT_FLAG_DISABLE_DUMP));
998 gk20a_dbg(gpu_dbg_gpu_dbg, "setting timeout (%d ms) for chid %d",
999 timeout, ch->hw_chid);
1000 ch->timeout_ms_max = timeout;
1001 ch->timeout_debug_dump = timeout_debug_dump;
1002 gk20a_channel_trace_sched_param(
1003 trace_gk20a_channel_set_timeout, ch);
1004 break;
1005 }
1006 case NVGPU_IOCTL_CHANNEL_GET_TIMEDOUT:
1007 ((struct nvgpu_get_param_args *)buf)->value =
1008 ch->has_timedout;
1009 break;
1010 case NVGPU_IOCTL_CHANNEL_SET_PRIORITY:
1011 err = gk20a_busy(ch->g);
1012 if (err) {
1013 dev_err(dev,
1014 "%s: failed to host gk20a for ioctl cmd: 0x%x",
1015 __func__, cmd);
1016 break;
1017 }
1018 err = ch->g->ops.fifo.channel_set_priority(ch,
1019 ((struct nvgpu_set_priority_args *)buf)->priority);
1020
1021 gk20a_idle(ch->g);
1022 gk20a_channel_trace_sched_param(
1023 trace_gk20a_channel_set_priority, ch);
1024 break;
1025 case NVGPU_IOCTL_CHANNEL_ENABLE:
1026 err = gk20a_busy(ch->g);
1027 if (err) {
1028 dev_err(dev,
1029 "%s: failed to host gk20a for ioctl cmd: 0x%x",
1030 __func__, cmd);
1031 break;
1032 }
1033 if (ch->g->ops.fifo.enable_channel)
1034 ch->g->ops.fifo.enable_channel(ch);
1035 else
1036 err = -ENOSYS;
1037 gk20a_idle(ch->g);
1038 break;
1039 case NVGPU_IOCTL_CHANNEL_DISABLE:
1040 err = gk20a_busy(ch->g);
1041 if (err) {
1042 dev_err(dev,
1043 "%s: failed to host gk20a for ioctl cmd: 0x%x",
1044 __func__, cmd);
1045 break;
1046 }
1047 if (ch->g->ops.fifo.disable_channel)
1048 ch->g->ops.fifo.disable_channel(ch);
1049 else
1050 err = -ENOSYS;
1051 gk20a_idle(ch->g);
1052 break;
1053 case NVGPU_IOCTL_CHANNEL_PREEMPT:
1054 err = gk20a_busy(ch->g);
1055 if (err) {
1056 dev_err(dev,
1057 "%s: failed to host gk20a for ioctl cmd: 0x%x",
1058 __func__, cmd);
1059 break;
1060 }
1061 err = gk20a_fifo_preempt(ch->g, ch);
1062 gk20a_idle(ch->g);
1063 break;
1064 case NVGPU_IOCTL_CHANNEL_FORCE_RESET:
1065 err = gk20a_busy(ch->g);
1066 if (err) {
1067 dev_err(dev,
1068 "%s: failed to host gk20a for ioctl cmd: 0x%x",
1069 __func__, cmd);
1070 break;
1071 }
1072 err = ch->g->ops.fifo.force_reset_ch(ch,
1073 NVGPU_CHANNEL_RESETCHANNEL_VERIF_ERROR, true);
1074 gk20a_idle(ch->g);
1075 break;
1076 case NVGPU_IOCTL_CHANNEL_EVENT_ID_CTRL:
1077 err = gk20a_channel_event_id_ctrl(ch,
1078 (struct nvgpu_event_id_ctrl_args *)buf);
1079 break;
1080#ifdef CONFIG_GK20A_CYCLE_STATS
1081 case NVGPU_IOCTL_CHANNEL_CYCLE_STATS_SNAPSHOT:
1082 err = gk20a_busy(ch->g);
1083 if (err) {
1084 dev_err(dev,
1085 "%s: failed to host gk20a for ioctl cmd: 0x%x",
1086 __func__, cmd);
1087 break;
1088 }
1089 err = gk20a_channel_cycle_stats_snapshot(ch,
1090 (struct nvgpu_cycle_stats_snapshot_args *)buf);
1091 gk20a_idle(ch->g);
1092 break;
1093#endif
1094 case NVGPU_IOCTL_CHANNEL_WDT:
1095 err = gk20a_channel_set_wdt_status(ch,
1096 (struct nvgpu_channel_wdt_args *)buf);
1097 break;
1098 case NVGPU_IOCTL_CHANNEL_SET_RUNLIST_INTERLEAVE:
1099 err = gk20a_busy(ch->g);
1100 if (err) {
1101 dev_err(dev,
1102 "%s: failed to host gk20a for ioctl cmd: 0x%x",
1103 __func__, cmd);
1104 break;
1105 }
1106 err = gk20a_channel_set_runlist_interleave(ch,
1107 ((struct nvgpu_runlist_interleave_args *)buf)->level);
1108
1109 gk20a_idle(ch->g);
1110 gk20a_channel_trace_sched_param(
1111 trace_gk20a_channel_set_runlist_interleave, ch);
1112 break;
1113 case NVGPU_IOCTL_CHANNEL_SET_TIMESLICE:
1114 err = gk20a_busy(ch->g);
1115 if (err) {
1116 dev_err(dev,
1117 "%s: failed to host gk20a for ioctl cmd: 0x%x",
1118 __func__, cmd);
1119 break;
1120 }
1121 err = ch->g->ops.fifo.channel_set_timeslice(ch,
1122 ((struct nvgpu_timeslice_args *)buf)->timeslice_us);
1123
1124 gk20a_idle(ch->g);
1125 gk20a_channel_trace_sched_param(
1126 trace_gk20a_channel_set_timeslice, ch);
1127 break;
1128 case NVGPU_IOCTL_CHANNEL_SET_PREEMPTION_MODE:
1129 if (ch->g->ops.gr.set_preemption_mode) {
1130 err = gk20a_busy(ch->g);
1131 if (err) {
1132 dev_err(dev,
1133 "%s: failed to host gk20a for ioctl cmd: 0x%x",
1134 __func__, cmd);
1135 break;
1136 }
1137 err = ch->g->ops.gr.set_preemption_mode(ch,
1138 ((struct nvgpu_preemption_mode_args *)buf)->graphics_preempt_mode,
1139 ((struct nvgpu_preemption_mode_args *)buf)->compute_preempt_mode);
1140 gk20a_idle(ch->g);
1141 } else {
1142 err = -EINVAL;
1143 }
1144 break;
1145 case NVGPU_IOCTL_CHANNEL_SET_BOOSTED_CTX:
1146 if (ch->g->ops.gr.set_boosted_ctx) {
1147 bool boost =
1148 ((struct nvgpu_boosted_ctx_args *)buf)->boost;
1149
1150 err = gk20a_busy(ch->g);
1151 if (err) {
1152 dev_err(dev,
1153 "%s: failed to host gk20a for ioctl cmd: 0x%x",
1154 __func__, cmd);
1155 break;
1156 }
1157 err = ch->g->ops.gr.set_boosted_ctx(ch, boost);
1158 gk20a_idle(ch->g);
1159 } else {
1160 err = -EINVAL;
1161 }
1162 break;
1163 default:
1164 dev_dbg(dev, "unrecognized ioctl cmd: 0x%x", cmd);
1165 err = -ENOTTY;
1166 break;
1167 }
1168
1169 if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
1170 err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd));
1171
1172 nvgpu_mutex_release(&ch->ioctl_lock);
1173
1174 gk20a_channel_put(ch);
1175
1176 gk20a_dbg_fn("end");
1177
1178 return err;
1179}