aboutsummaryrefslogtreecommitdiffstats
path: root/include/os/linux/ioctl_tsg.c
diff options
context:
space:
mode:
authorJoshua Bakita <bakitajoshua@gmail.com>2024-09-25 16:09:09 -0400
committerJoshua Bakita <bakitajoshua@gmail.com>2024-09-25 16:09:09 -0400
commitf347fde22f1297e4f022600d201780d5ead78114 (patch)
tree76be305d6187003a1e0486ff6e91efb1062ae118 /include/os/linux/ioctl_tsg.c
parent8340d234d78a7d0f46c11a584de538148b78b7cb (diff)
Delete no-longer-needed nvgpu headersHEADmasterjbakita-wip
The dependency on these was removed in commit 8340d234.
Diffstat (limited to 'include/os/linux/ioctl_tsg.c')
-rw-r--r--include/os/linux/ioctl_tsg.c750
1 files changed, 0 insertions, 750 deletions
diff --git a/include/os/linux/ioctl_tsg.c b/include/os/linux/ioctl_tsg.c
deleted file mode 100644
index 296b02b..0000000
--- a/include/os/linux/ioctl_tsg.c
+++ /dev/null
@@ -1,750 +0,0 @@
1/*
2 * Copyright (c) 2014-2021, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/fs.h>
18#include <linux/file.h>
19#include <linux/cdev.h>
20#include <linux/uaccess.h>
21#include <linux/poll.h>
22#include <uapi/linux/nvgpu.h>
23#include <linux/anon_inodes.h>
24
25#include <nvgpu/kmem.h>
26#include <nvgpu/log.h>
27#include <nvgpu/os_sched.h>
28#include <nvgpu/gk20a.h>
29#include <nvgpu/channel.h>
30#include <nvgpu/tsg.h>
31
32#include "gv11b/fifo_gv11b.h"
33#include "platform_gk20a.h"
34#include "ioctl_tsg.h"
35#include "ioctl_channel.h"
36#include "os_linux.h"
37
38struct tsg_private {
39 struct gk20a *g;
40 struct tsg_gk20a *tsg;
41};
42
43static int gk20a_tsg_bind_channel_fd(struct tsg_gk20a *tsg, int ch_fd)
44{
45 struct channel_gk20a *ch;
46 int err;
47
48 ch = gk20a_get_channel_from_file(ch_fd);
49 if (!ch)
50 return -EINVAL;
51
52 err = ch->g->ops.fifo.tsg_bind_channel(tsg, ch);
53
54 gk20a_channel_put(ch);
55 return err;
56}
57
58static int gk20a_tsg_ioctl_bind_channel_ex(struct gk20a *g,
59 struct tsg_gk20a *tsg, struct nvgpu_tsg_bind_channel_ex_args *arg)
60{
61 struct nvgpu_sched_ctrl *sched = &g->sched_ctrl;
62 struct channel_gk20a *ch;
63 struct gr_gk20a *gr = &g->gr;
64 int err = 0;
65
66 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid);
67
68 nvgpu_mutex_acquire(&sched->control_lock);
69 if (sched->control_locked) {
70 err = -EPERM;
71 goto mutex_release;
72 }
73 err = gk20a_busy(g);
74 if (err) {
75 nvgpu_err(g, "failed to power on gpu");
76 goto mutex_release;
77 }
78
79 ch = gk20a_get_channel_from_file(arg->channel_fd);
80 if (!ch) {
81 err = -EINVAL;
82 goto idle;
83 }
84
85 if (arg->tpc_pg_enabled && (!tsg->tpc_num_initialized)) {
86 if ((arg->num_active_tpcs > gr->max_tpc_count) ||
87 !(arg->num_active_tpcs)) {
88 nvgpu_err(g, "Invalid num of active TPCs");
89 err = -EINVAL;
90 goto ch_put;
91 }
92 tsg->tpc_num_initialized = true;
93 tsg->num_active_tpcs = arg->num_active_tpcs;
94 tsg->tpc_pg_enabled = true;
95 } else {
96 tsg->tpc_pg_enabled = false; nvgpu_log(g, gpu_dbg_info, "dynamic TPC-PG not enabled");
97 }
98
99 if (arg->subcontext_id < g->fifo.max_subctx_count) {
100 ch->subctx_id = arg->subcontext_id;
101 } else {
102 err = -EINVAL;
103 goto ch_put;
104 }
105
106 nvgpu_log(g, gpu_dbg_info, "channel id : %d : subctx: %d",
107 ch->chid, ch->subctx_id);
108
109 /* Use runqueue selector 1 for all ASYNC ids */
110 if (ch->subctx_id > CHANNEL_INFO_VEID0)
111 ch->runqueue_sel = 1;
112
113 err = ch->g->ops.fifo.tsg_bind_channel(tsg, ch);
114ch_put:
115 gk20a_channel_put(ch);
116idle:
117 gk20a_idle(g);
118mutex_release:
119 nvgpu_mutex_release(&sched->control_lock);
120 return err;
121}
122
123static int gk20a_tsg_unbind_channel_fd(struct tsg_gk20a *tsg, int ch_fd)
124{
125 struct channel_gk20a *ch;
126 int err = 0;
127
128 ch = gk20a_get_channel_from_file(ch_fd);
129 if (!ch)
130 return -EINVAL;
131
132 if (ch->tsgid != tsg->tsgid) {
133 err = -EINVAL;
134 goto out;
135 }
136
137 err = gk20a_tsg_unbind_channel(ch, false);
138 if (err == -EAGAIN) {
139 goto out;
140 }
141
142 /*
143 * Mark the channel timedout since channel unbound from TSG
144 * has no context of its own so it can't serve any job
145 */
146 gk20a_channel_set_timedout(ch);
147
148out:
149 gk20a_channel_put(ch);
150 return err;
151}
152
153static int gk20a_tsg_get_event_data_from_id(struct tsg_gk20a *tsg,
154 unsigned int event_id,
155 struct gk20a_event_id_data **event_id_data)
156{
157 struct gk20a_event_id_data *local_event_id_data;
158 bool event_found = false;
159
160 nvgpu_mutex_acquire(&tsg->event_id_list_lock);
161 nvgpu_list_for_each_entry(local_event_id_data, &tsg->event_id_list,
162 gk20a_event_id_data, event_id_node) {
163 if (local_event_id_data->event_id == event_id) {
164 event_found = true;
165 break;
166 }
167 }
168 nvgpu_mutex_release(&tsg->event_id_list_lock);
169
170 if (event_found) {
171 *event_id_data = local_event_id_data;
172 return 0;
173 } else {
174 return -1;
175 }
176}
177
178/*
179 * Convert common event_id of the form NVGPU_EVENT_ID_* to Linux specific
180 * event_id of the form NVGPU_IOCTL_CHANNEL_EVENT_ID_* which is used in IOCTLs
181 */
182static u32 nvgpu_event_id_to_ioctl_channel_event_id(u32 event_id)
183{
184 switch (event_id) {
185 case NVGPU_EVENT_ID_BPT_INT:
186 return NVGPU_IOCTL_CHANNEL_EVENT_ID_BPT_INT;
187 case NVGPU_EVENT_ID_BPT_PAUSE:
188 return NVGPU_IOCTL_CHANNEL_EVENT_ID_BPT_PAUSE;
189 case NVGPU_EVENT_ID_BLOCKING_SYNC:
190 return NVGPU_IOCTL_CHANNEL_EVENT_ID_BLOCKING_SYNC;
191 case NVGPU_EVENT_ID_CILP_PREEMPTION_STARTED:
192 return NVGPU_IOCTL_CHANNEL_EVENT_ID_CILP_PREEMPTION_STARTED;
193 case NVGPU_EVENT_ID_CILP_PREEMPTION_COMPLETE:
194 return NVGPU_IOCTL_CHANNEL_EVENT_ID_CILP_PREEMPTION_COMPLETE;
195 case NVGPU_EVENT_ID_GR_SEMAPHORE_WRITE_AWAKEN:
196 return NVGPU_IOCTL_CHANNEL_EVENT_ID_GR_SEMAPHORE_WRITE_AWAKEN;
197 }
198
199 return NVGPU_IOCTL_CHANNEL_EVENT_ID_MAX;
200}
201
202void gk20a_tsg_event_id_post_event(struct tsg_gk20a *tsg,
203 int __event_id)
204{
205 struct gk20a_event_id_data *event_id_data;
206 u32 event_id;
207 int err = 0;
208 struct gk20a *g = tsg->g;
209
210 event_id = nvgpu_event_id_to_ioctl_channel_event_id(__event_id);
211 if (event_id >= NVGPU_IOCTL_CHANNEL_EVENT_ID_MAX)
212 return;
213
214 err = gk20a_tsg_get_event_data_from_id(tsg, event_id,
215 &event_id_data);
216 if (err)
217 return;
218
219 nvgpu_mutex_acquire(&event_id_data->lock);
220
221 nvgpu_log_info(g,
222 "posting event for event_id=%d on tsg=%d\n",
223 event_id, tsg->tsgid);
224 event_id_data->event_posted = true;
225
226 nvgpu_cond_broadcast_interruptible(&event_id_data->event_id_wq);
227
228 nvgpu_mutex_release(&event_id_data->lock);
229}
230
231static unsigned int gk20a_event_id_poll(struct file *filep, poll_table *wait)
232{
233 unsigned int mask = 0;
234 struct gk20a_event_id_data *event_id_data = filep->private_data;
235 struct gk20a *g = event_id_data->g;
236 u32 event_id = event_id_data->event_id;
237 struct tsg_gk20a *tsg = g->fifo.tsg + event_id_data->id;
238
239 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_info, " ");
240
241 poll_wait(filep, &event_id_data->event_id_wq.wq, wait);
242
243 nvgpu_mutex_acquire(&event_id_data->lock);
244
245 if (event_id_data->event_posted) {
246 nvgpu_log_info(g,
247 "found pending event_id=%d on TSG=%d\n",
248 event_id, tsg->tsgid);
249 mask = (POLLPRI | POLLIN);
250 event_id_data->event_posted = false;
251 }
252
253 nvgpu_mutex_release(&event_id_data->lock);
254
255 return mask;
256}
257
258static int gk20a_event_id_release(struct inode *inode, struct file *filp)
259{
260 struct gk20a_event_id_data *event_id_data = filp->private_data;
261 struct gk20a *g = event_id_data->g;
262 struct tsg_gk20a *tsg = g->fifo.tsg + event_id_data->id;
263
264 nvgpu_mutex_acquire(&tsg->event_id_list_lock);
265 nvgpu_list_del(&event_id_data->event_id_node);
266 nvgpu_mutex_release(&tsg->event_id_list_lock);
267
268 nvgpu_mutex_destroy(&event_id_data->lock);
269 gk20a_put(g);
270 nvgpu_kfree(g, event_id_data);
271 filp->private_data = NULL;
272
273 return 0;
274}
275
276const struct file_operations gk20a_event_id_ops = {
277 .owner = THIS_MODULE,
278 .poll = gk20a_event_id_poll,
279 .release = gk20a_event_id_release,
280};
281
282static int gk20a_tsg_event_id_enable(struct tsg_gk20a *tsg,
283 int event_id,
284 int *fd)
285{
286 int err = 0;
287 int local_fd;
288 struct file *file;
289 char name[64];
290 struct gk20a_event_id_data *event_id_data;
291 struct gk20a *g;
292
293 g = gk20a_get(tsg->g);
294 if (!g)
295 return -ENODEV;
296
297 err = gk20a_tsg_get_event_data_from_id(tsg,
298 event_id, &event_id_data);
299 if (err == 0) {
300 /* We already have event enabled */
301 err = -EINVAL;
302 goto free_ref;
303 }
304
305 err = get_unused_fd_flags(O_RDWR);
306 if (err < 0)
307 goto free_ref;
308 local_fd = err;
309
310 snprintf(name, sizeof(name), "nvgpu-event%d-fd%d",
311 event_id, local_fd);
312
313 event_id_data = nvgpu_kzalloc(tsg->g, sizeof(*event_id_data));
314 if (!event_id_data) {
315 err = -ENOMEM;
316 goto clean_up;
317 }
318 event_id_data->g = g;
319 event_id_data->id = tsg->tsgid;
320 event_id_data->event_id = event_id;
321
322 nvgpu_cond_init(&event_id_data->event_id_wq);
323 err = nvgpu_mutex_init(&event_id_data->lock);
324 if (err)
325 goto clean_up_free;
326
327 nvgpu_init_list_node(&event_id_data->event_id_node);
328
329 file = anon_inode_getfile(name, &gk20a_event_id_ops,
330 event_id_data, O_RDWR);
331 if (IS_ERR(file)) {
332 err = PTR_ERR(file);
333 goto clean_up_free;
334 }
335
336 nvgpu_mutex_acquire(&tsg->event_id_list_lock);
337 nvgpu_list_add_tail(&event_id_data->event_id_node, &tsg->event_id_list);
338 nvgpu_mutex_release(&tsg->event_id_list_lock);
339
340 fd_install(local_fd, file);
341
342 *fd = local_fd;
343
344 return 0;
345
346clean_up_free:
347 nvgpu_kfree(g, event_id_data);
348clean_up:
349 put_unused_fd(local_fd);
350free_ref:
351 gk20a_put(g);
352 return err;
353}
354
355static int gk20a_tsg_event_id_ctrl(struct gk20a *g, struct tsg_gk20a *tsg,
356 struct nvgpu_event_id_ctrl_args *args)
357{
358 int err = 0;
359 int fd = -1;
360
361 if (args->event_id >= NVGPU_IOCTL_CHANNEL_EVENT_ID_MAX)
362 return -EINVAL;
363
364 nvgpu_speculation_barrier();
365 switch (args->cmd) {
366 case NVGPU_IOCTL_CHANNEL_EVENT_ID_CMD_ENABLE:
367 err = gk20a_tsg_event_id_enable(tsg, args->event_id, &fd);
368 if (!err)
369 args->event_fd = fd;
370 break;
371
372 default:
373 nvgpu_err(tsg->g, "unrecognized tsg event id cmd: 0x%x",
374 args->cmd);
375 err = -EINVAL;
376 break;
377 }
378
379 return err;
380}
381
382int nvgpu_ioctl_tsg_open(struct gk20a *g, struct file *filp)
383{
384 struct tsg_private *priv;
385 struct tsg_gk20a *tsg;
386 struct device *dev;
387 int err;
388
389 g = gk20a_get(g);
390 if (!g)
391 return -ENODEV;
392
393 dev = dev_from_gk20a(g);
394
395 nvgpu_log(g, gpu_dbg_fn, "tsg: %s", dev_name(dev));
396
397 priv = nvgpu_kmalloc(g, sizeof(*priv));
398 if (!priv) {
399 err = -ENOMEM;
400 goto free_ref;
401 }
402
403 err = gk20a_busy(g);
404 if (err) {
405 nvgpu_err(g, "failed to power on, %d", err);
406 goto free_mem;
407 }
408
409 tsg = gk20a_tsg_open(g, nvgpu_current_pid(g));
410 gk20a_idle(g);
411 if (!tsg) {
412 err = -ENOMEM;
413 goto free_mem;
414 }
415
416 priv->g = g;
417 priv->tsg = tsg;
418 filp->private_data = priv;
419
420 gk20a_sched_ctrl_tsg_added(g, tsg);
421
422 return 0;
423
424free_mem:
425 nvgpu_kfree(g, priv);
426free_ref:
427 gk20a_put(g);
428 return err;
429}
430
431int nvgpu_ioctl_tsg_dev_open(struct inode *inode, struct file *filp)
432{
433 struct nvgpu_os_linux *l;
434 struct gk20a *g;
435 int ret;
436
437 l = container_of(inode->i_cdev,
438 struct nvgpu_os_linux, tsg.cdev);
439 g = &l->g;
440
441 nvgpu_log_fn(g, " ");
442
443 ret = gk20a_busy(g);
444 if (ret) {
445 nvgpu_err(g, "failed to power on, %d", ret);
446 return ret;
447 }
448
449 ret = nvgpu_ioctl_tsg_open(&l->g, filp);
450
451 gk20a_idle(g);
452 nvgpu_log_fn(g, "done");
453 return ret;
454}
455
456void nvgpu_ioctl_tsg_release(struct nvgpu_ref *ref)
457{
458 struct tsg_gk20a *tsg = container_of(ref, struct tsg_gk20a, refcount);
459 struct gk20a *g = tsg->g;
460
461 gk20a_sched_ctrl_tsg_removed(g, tsg);
462
463 gk20a_tsg_release(ref);
464 gk20a_put(g);
465}
466
467int nvgpu_ioctl_tsg_dev_release(struct inode *inode, struct file *filp)
468{
469 struct tsg_private *priv = filp->private_data;
470 struct tsg_gk20a *tsg;
471
472 if (!priv) {
473 /* open failed, never got a tsg for this file */
474 return 0;
475 }
476
477 tsg = priv->tsg;
478
479 nvgpu_ref_put(&tsg->refcount, nvgpu_ioctl_tsg_release);
480 nvgpu_kfree(tsg->g, priv);
481 return 0;
482}
483
484static int gk20a_tsg_ioctl_set_runlist_interleave(struct gk20a *g,
485 struct tsg_gk20a *tsg, struct nvgpu_runlist_interleave_args *arg)
486{
487 struct nvgpu_sched_ctrl *sched = &g->sched_ctrl;
488 u32 level = arg->level;
489 int err;
490
491 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid);
492
493 nvgpu_mutex_acquire(&sched->control_lock);
494 if (sched->control_locked) {
495 err = -EPERM;
496 goto done;
497 }
498 err = gk20a_busy(g);
499 if (err) {
500 nvgpu_err(g, "failed to power on gpu");
501 goto done;
502 }
503
504 level = nvgpu_get_common_runlist_level(level);
505 err = gk20a_tsg_set_runlist_interleave(tsg, level);
506
507 gk20a_idle(g);
508done:
509 nvgpu_mutex_release(&sched->control_lock);
510 return err;
511}
512
513static int gk20a_tsg_ioctl_set_timeslice(struct gk20a *g,
514 struct tsg_gk20a *tsg, struct nvgpu_timeslice_args *arg)
515{
516 struct nvgpu_sched_ctrl *sched = &g->sched_ctrl;
517 int err;
518
519 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid);
520
521 nvgpu_mutex_acquire(&sched->control_lock);
522 if (sched->control_locked) {
523 err = -EPERM;
524 goto done;
525 }
526 err = gk20a_busy(g);
527 if (err) {
528 nvgpu_err(g, "failed to power on gpu");
529 goto done;
530 }
531 err = gk20a_tsg_set_timeslice(tsg, arg->timeslice_us);
532 gk20a_idle(g);
533done:
534 nvgpu_mutex_release(&sched->control_lock);
535 return err;
536}
537
538static int gk20a_tsg_ioctl_get_timeslice(struct gk20a *g,
539 struct tsg_gk20a *tsg, struct nvgpu_timeslice_args *arg)
540{
541 arg->timeslice_us = gk20a_tsg_get_timeslice(tsg);
542 return 0;
543}
544
545static int gk20a_tsg_ioctl_read_single_sm_error_state(struct gk20a *g,
546 struct tsg_gk20a *tsg,
547 struct nvgpu_tsg_read_single_sm_error_state_args *args)
548{
549 struct gr_gk20a *gr = &g->gr;
550 struct nvgpu_tsg_sm_error_state *sm_error_state;
551 struct nvgpu_tsg_sm_error_state_record sm_error_state_record;
552 u32 sm_id;
553 int err = 0;
554
555 sm_id = args->sm_id;
556 if (sm_id >= gr->no_of_sm)
557 return -EINVAL;
558
559 nvgpu_speculation_barrier();
560
561 sm_error_state = tsg->sm_error_states + sm_id;
562 sm_error_state_record.global_esr =
563 sm_error_state->hww_global_esr;
564 sm_error_state_record.warp_esr =
565 sm_error_state->hww_warp_esr;
566 sm_error_state_record.warp_esr_pc =
567 sm_error_state->hww_warp_esr_pc;
568 sm_error_state_record.global_esr_report_mask =
569 sm_error_state->hww_global_esr_report_mask;
570 sm_error_state_record.warp_esr_report_mask =
571 sm_error_state->hww_warp_esr_report_mask;
572
573 if (args->record_size > 0) {
574 size_t write_size = sizeof(*sm_error_state);
575
576 nvgpu_speculation_barrier();
577 if (write_size > args->record_size)
578 write_size = args->record_size;
579
580 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
581 err = copy_to_user((void __user *)(uintptr_t)
582 args->record_mem,
583 &sm_error_state_record,
584 write_size);
585 nvgpu_mutex_release(&g->dbg_sessions_lock);
586 if (err) {
587 nvgpu_err(g, "copy_to_user failed!");
588 return err;
589 }
590
591 args->record_size = write_size;
592 }
593
594 return 0;
595}
596
597long nvgpu_ioctl_tsg_dev_ioctl(struct file *filp, unsigned int cmd,
598 unsigned long arg)
599{
600 struct tsg_private *priv = filp->private_data;
601 struct tsg_gk20a *tsg = priv->tsg;
602 struct gk20a *g = tsg->g;
603 u8 __maybe_unused buf[NVGPU_TSG_IOCTL_MAX_ARG_SIZE];
604 int err = 0;
605
606 nvgpu_log_fn(g, "start %d", _IOC_NR(cmd));
607
608 if ((_IOC_TYPE(cmd) != NVGPU_TSG_IOCTL_MAGIC) ||
609 (_IOC_NR(cmd) == 0) ||
610 (_IOC_NR(cmd) > NVGPU_TSG_IOCTL_LAST) ||
611 (_IOC_SIZE(cmd) > NVGPU_TSG_IOCTL_MAX_ARG_SIZE))
612 return -EINVAL;
613
614 memset(buf, 0, sizeof(buf));
615 if (_IOC_DIR(cmd) & _IOC_WRITE) {
616 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
617 return -EFAULT;
618 }
619
620 if (!g->sw_ready) {
621 err = gk20a_busy(g);
622 if (err)
623 return err;
624
625 gk20a_idle(g);
626 }
627
628 switch (cmd) {
629 case NVGPU_TSG_IOCTL_BIND_CHANNEL:
630 {
631 int ch_fd = *(int *)buf;
632 if (ch_fd < 0) {
633 err = -EINVAL;
634 break;
635 }
636 err = gk20a_tsg_bind_channel_fd(tsg, ch_fd);
637 break;
638 }
639
640 case NVGPU_TSG_IOCTL_BIND_CHANNEL_EX:
641 {
642 err = gk20a_tsg_ioctl_bind_channel_ex(g, tsg,
643 (struct nvgpu_tsg_bind_channel_ex_args *)buf);
644 break;
645 }
646
647 case NVGPU_TSG_IOCTL_UNBIND_CHANNEL:
648 {
649 int ch_fd = *(int *)buf;
650
651 if (ch_fd < 0) {
652 err = -EINVAL;
653 break;
654 }
655 err = gk20a_busy(g);
656 if (err) {
657 nvgpu_err(g,
658 "failed to host gk20a for ioctl cmd: 0x%x", cmd);
659 break;
660 }
661 err = gk20a_tsg_unbind_channel_fd(tsg, ch_fd);
662 gk20a_idle(g);
663 break;
664 }
665
666 case NVGPU_IOCTL_TSG_ENABLE:
667 {
668 err = gk20a_busy(g);
669 if (err) {
670 nvgpu_err(g,
671 "failed to host gk20a for ioctl cmd: 0x%x", cmd);
672 return err;
673 }
674 g->ops.fifo.enable_tsg(tsg);
675 gk20a_idle(g);
676 break;
677 }
678
679 case NVGPU_IOCTL_TSG_DISABLE:
680 {
681 err = gk20a_busy(g);
682 if (err) {
683 nvgpu_err(g,
684 "failed to host gk20a for ioctl cmd: 0x%x", cmd);
685 return err;
686 }
687 g->ops.fifo.disable_tsg(tsg);
688 gk20a_idle(g);
689 break;
690 }
691
692 case NVGPU_IOCTL_TSG_PREEMPT:
693 {
694 err = gk20a_busy(g);
695 if (err) {
696 nvgpu_err(g,
697 "failed to host gk20a for ioctl cmd: 0x%x", cmd);
698 return err;
699 }
700 /* preempt TSG */
701 err = g->ops.fifo.preempt_tsg(g, tsg);
702 gk20a_idle(g);
703 break;
704 }
705
706 case NVGPU_IOCTL_TSG_EVENT_ID_CTRL:
707 {
708 err = gk20a_tsg_event_id_ctrl(g, tsg,
709 (struct nvgpu_event_id_ctrl_args *)buf);
710 break;
711 }
712
713 case NVGPU_IOCTL_TSG_SET_RUNLIST_INTERLEAVE:
714 err = gk20a_tsg_ioctl_set_runlist_interleave(g, tsg,
715 (struct nvgpu_runlist_interleave_args *)buf);
716 break;
717
718 case NVGPU_IOCTL_TSG_SET_TIMESLICE:
719 {
720 err = gk20a_tsg_ioctl_set_timeslice(g, tsg,
721 (struct nvgpu_timeslice_args *)buf);
722 break;
723 }
724 case NVGPU_IOCTL_TSG_GET_TIMESLICE:
725 {
726 err = gk20a_tsg_ioctl_get_timeslice(g, tsg,
727 (struct nvgpu_timeslice_args *)buf);
728 break;
729 }
730
731 case NVGPU_TSG_IOCTL_READ_SINGLE_SM_ERROR_STATE:
732 {
733 err = gk20a_tsg_ioctl_read_single_sm_error_state(g, tsg,
734 (struct nvgpu_tsg_read_single_sm_error_state_args *)buf);
735 break;
736 }
737
738 default:
739 nvgpu_err(g, "unrecognized tsg gpu ioctl cmd: 0x%x",
740 cmd);
741 err = -ENOTTY;
742 break;
743 }
744
745 if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
746 err = copy_to_user((void __user *)arg,
747 buf, _IOC_SIZE(cmd));
748
749 return err;
750}