summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/os/linux/ioctl_tsg.c
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2018-04-18 15:59:00 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-06-15 20:47:31 -0400
commit2a2c16af5f9f1ccfc93a13e820d5381e5c881e92 (patch)
tree2e5d7b042270a649978e5bb540857012c85fb5b5 /drivers/gpu/nvgpu/os/linux/ioctl_tsg.c
parent98d996f4ffb0137d119b5849cae46d7b7e5693e1 (diff)
gpu: nvgpu: Move Linux files away from common
Move all Linux source code files to drivers/gpu/nvgpu/os/linux from drivers/gpu/nvgpu/common/linux. This changes the meaning of common to be OS independent. JIRA NVGPU-598 JIRA NVGPU-601 Change-Id: Ib7f2a43d3688bb0d0b7dcc48469a6783fd988ce9 Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1747714 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/os/linux/ioctl_tsg.c')
-rw-r--r--drivers/gpu/nvgpu/os/linux/ioctl_tsg.c677
1 files changed, 677 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/os/linux/ioctl_tsg.c b/drivers/gpu/nvgpu/os/linux/ioctl_tsg.c
new file mode 100644
index 00000000..4ef99ded
--- /dev/null
+++ b/drivers/gpu/nvgpu/os/linux/ioctl_tsg.c
@@ -0,0 +1,677 @@
1/*
2 * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/fs.h>
18#include <linux/file.h>
19#include <linux/cdev.h>
20#include <linux/uaccess.h>
21#include <linux/poll.h>
22#include <uapi/linux/nvgpu.h>
23#include <linux/anon_inodes.h>
24
25#include <nvgpu/kmem.h>
26#include <nvgpu/log.h>
27#include <nvgpu/os_sched.h>
28
29#include "gk20a/gk20a.h"
30#include "gk20a/tsg_gk20a.h"
31#include "gv11b/fifo_gv11b.h"
32#include "platform_gk20a.h"
33#include "ioctl_tsg.h"
34#include "ioctl_channel.h"
35#include "os_linux.h"
36
37struct tsg_private {
38 struct gk20a *g;
39 struct tsg_gk20a *tsg;
40};
41
42static int gk20a_tsg_bind_channel_fd(struct tsg_gk20a *tsg, int ch_fd)
43{
44 struct channel_gk20a *ch;
45 int err;
46
47 ch = gk20a_get_channel_from_file(ch_fd);
48 if (!ch)
49 return -EINVAL;
50
51 err = ch->g->ops.fifo.tsg_bind_channel(tsg, ch);
52
53 gk20a_channel_put(ch);
54 return err;
55}
56
57static int gk20a_tsg_ioctl_bind_channel_ex(struct gk20a *g,
58 struct tsg_gk20a *tsg, struct nvgpu_tsg_bind_channel_ex_args *arg)
59{
60 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
61 struct gk20a_sched_ctrl *sched = &l->sched_ctrl;
62 struct channel_gk20a *ch;
63 struct gr_gk20a *gr = &g->gr;
64 int err = 0;
65
66 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid);
67
68 nvgpu_mutex_acquire(&sched->control_lock);
69 if (sched->control_locked) {
70 err = -EPERM;
71 goto mutex_release;
72 }
73 err = gk20a_busy(g);
74 if (err) {
75 nvgpu_err(g, "failed to power on gpu");
76 goto mutex_release;
77 }
78
79 ch = gk20a_get_channel_from_file(arg->channel_fd);
80 if (!ch) {
81 err = -EINVAL;
82 goto idle;
83 }
84
85 if (arg->tpc_pg_enabled && (!tsg->tpc_num_initialized)) {
86 if ((arg->num_active_tpcs > gr->max_tpc_count) ||
87 !(arg->num_active_tpcs)) {
88 nvgpu_err(g, "Invalid num of active TPCs");
89 err = -EINVAL;
90 goto ch_put;
91 }
92 tsg->tpc_num_initialized = true;
93 tsg->num_active_tpcs = arg->num_active_tpcs;
94 tsg->tpc_pg_enabled = true;
95 } else {
96 tsg->tpc_pg_enabled = false; nvgpu_log(g, gpu_dbg_info, "dynamic TPC-PG not enabled");
97 }
98
99 if (arg->subcontext_id < g->fifo.max_subctx_count) {
100 ch->subctx_id = arg->subcontext_id;
101 } else {
102 err = -EINVAL;
103 goto ch_put;
104 }
105
106 nvgpu_log(g, gpu_dbg_info, "channel id : %d : subctx: %d",
107 ch->chid, ch->subctx_id);
108
109 /* Use runqueue selector 1 for all ASYNC ids */
110 if (ch->subctx_id > CHANNEL_INFO_VEID0)
111 ch->runqueue_sel = 1;
112
113 err = ch->g->ops.fifo.tsg_bind_channel(tsg, ch);
114ch_put:
115 gk20a_channel_put(ch);
116idle:
117 gk20a_idle(g);
118mutex_release:
119 nvgpu_mutex_release(&sched->control_lock);
120 return err;
121}
122
123static int gk20a_tsg_unbind_channel_fd(struct tsg_gk20a *tsg, int ch_fd)
124{
125 struct channel_gk20a *ch;
126 int err = 0;
127
128 ch = gk20a_get_channel_from_file(ch_fd);
129 if (!ch)
130 return -EINVAL;
131
132 if (ch->tsgid != tsg->tsgid) {
133 err = -EINVAL;
134 goto out;
135 }
136
137 err = gk20a_tsg_unbind_channel(ch);
138
139 /*
140 * Mark the channel timedout since channel unbound from TSG
141 * has no context of its own so it can't serve any job
142 */
143 ch->has_timedout = true;
144
145out:
146 gk20a_channel_put(ch);
147 return err;
148}
149
150static int gk20a_tsg_get_event_data_from_id(struct tsg_gk20a *tsg,
151 unsigned int event_id,
152 struct gk20a_event_id_data **event_id_data)
153{
154 struct gk20a_event_id_data *local_event_id_data;
155 bool event_found = false;
156
157 nvgpu_mutex_acquire(&tsg->event_id_list_lock);
158 nvgpu_list_for_each_entry(local_event_id_data, &tsg->event_id_list,
159 gk20a_event_id_data, event_id_node) {
160 if (local_event_id_data->event_id == event_id) {
161 event_found = true;
162 break;
163 }
164 }
165 nvgpu_mutex_release(&tsg->event_id_list_lock);
166
167 if (event_found) {
168 *event_id_data = local_event_id_data;
169 return 0;
170 } else {
171 return -1;
172 }
173}
174
175/*
176 * Convert common event_id of the form NVGPU_EVENT_ID_* to Linux specific
177 * event_id of the form NVGPU_IOCTL_CHANNEL_EVENT_ID_* which is used in IOCTLs
178 */
179static u32 nvgpu_event_id_to_ioctl_channel_event_id(u32 event_id)
180{
181 switch (event_id) {
182 case NVGPU_EVENT_ID_BPT_INT:
183 return NVGPU_IOCTL_CHANNEL_EVENT_ID_BPT_INT;
184 case NVGPU_EVENT_ID_BPT_PAUSE:
185 return NVGPU_IOCTL_CHANNEL_EVENT_ID_BPT_PAUSE;
186 case NVGPU_EVENT_ID_BLOCKING_SYNC:
187 return NVGPU_IOCTL_CHANNEL_EVENT_ID_BLOCKING_SYNC;
188 case NVGPU_EVENT_ID_CILP_PREEMPTION_STARTED:
189 return NVGPU_IOCTL_CHANNEL_EVENT_ID_CILP_PREEMPTION_STARTED;
190 case NVGPU_EVENT_ID_CILP_PREEMPTION_COMPLETE:
191 return NVGPU_IOCTL_CHANNEL_EVENT_ID_CILP_PREEMPTION_COMPLETE;
192 case NVGPU_EVENT_ID_GR_SEMAPHORE_WRITE_AWAKEN:
193 return NVGPU_IOCTL_CHANNEL_EVENT_ID_GR_SEMAPHORE_WRITE_AWAKEN;
194 }
195
196 return NVGPU_IOCTL_CHANNEL_EVENT_ID_MAX;
197}
198
199void gk20a_tsg_event_id_post_event(struct tsg_gk20a *tsg,
200 int __event_id)
201{
202 struct gk20a_event_id_data *event_id_data;
203 u32 event_id;
204 int err = 0;
205 struct gk20a *g = tsg->g;
206
207 event_id = nvgpu_event_id_to_ioctl_channel_event_id(__event_id);
208 if (event_id >= NVGPU_IOCTL_CHANNEL_EVENT_ID_MAX)
209 return;
210
211 err = gk20a_tsg_get_event_data_from_id(tsg, event_id,
212 &event_id_data);
213 if (err)
214 return;
215
216 nvgpu_mutex_acquire(&event_id_data->lock);
217
218 nvgpu_log_info(g,
219 "posting event for event_id=%d on tsg=%d\n",
220 event_id, tsg->tsgid);
221 event_id_data->event_posted = true;
222
223 nvgpu_cond_broadcast_interruptible(&event_id_data->event_id_wq);
224
225 nvgpu_mutex_release(&event_id_data->lock);
226}
227
228static unsigned int gk20a_event_id_poll(struct file *filep, poll_table *wait)
229{
230 unsigned int mask = 0;
231 struct gk20a_event_id_data *event_id_data = filep->private_data;
232 struct gk20a *g = event_id_data->g;
233 u32 event_id = event_id_data->event_id;
234 struct tsg_gk20a *tsg = g->fifo.tsg + event_id_data->id;
235
236 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_info, " ");
237
238 poll_wait(filep, &event_id_data->event_id_wq.wq, wait);
239
240 nvgpu_mutex_acquire(&event_id_data->lock);
241
242 if (event_id_data->event_posted) {
243 nvgpu_log_info(g,
244 "found pending event_id=%d on TSG=%d\n",
245 event_id, tsg->tsgid);
246 mask = (POLLPRI | POLLIN);
247 event_id_data->event_posted = false;
248 }
249
250 nvgpu_mutex_release(&event_id_data->lock);
251
252 return mask;
253}
254
255static int gk20a_event_id_release(struct inode *inode, struct file *filp)
256{
257 struct gk20a_event_id_data *event_id_data = filp->private_data;
258 struct gk20a *g = event_id_data->g;
259 struct tsg_gk20a *tsg = g->fifo.tsg + event_id_data->id;
260
261 nvgpu_mutex_acquire(&tsg->event_id_list_lock);
262 nvgpu_list_del(&event_id_data->event_id_node);
263 nvgpu_mutex_release(&tsg->event_id_list_lock);
264
265 nvgpu_mutex_destroy(&event_id_data->lock);
266 gk20a_put(g);
267 nvgpu_kfree(g, event_id_data);
268 filp->private_data = NULL;
269
270 return 0;
271}
272
273const struct file_operations gk20a_event_id_ops = {
274 .owner = THIS_MODULE,
275 .poll = gk20a_event_id_poll,
276 .release = gk20a_event_id_release,
277};
278
279static int gk20a_tsg_event_id_enable(struct tsg_gk20a *tsg,
280 int event_id,
281 int *fd)
282{
283 int err = 0;
284 int local_fd;
285 struct file *file;
286 char name[64];
287 struct gk20a_event_id_data *event_id_data;
288 struct gk20a *g;
289
290 g = gk20a_get(tsg->g);
291 if (!g)
292 return -ENODEV;
293
294 err = gk20a_tsg_get_event_data_from_id(tsg,
295 event_id, &event_id_data);
296 if (err == 0) {
297 /* We already have event enabled */
298 err = -EINVAL;
299 goto free_ref;
300 }
301
302 err = get_unused_fd_flags(O_RDWR);
303 if (err < 0)
304 goto free_ref;
305 local_fd = err;
306
307 snprintf(name, sizeof(name), "nvgpu-event%d-fd%d",
308 event_id, local_fd);
309
310 file = anon_inode_getfile(name, &gk20a_event_id_ops,
311 NULL, O_RDWR);
312 if (IS_ERR(file)) {
313 err = PTR_ERR(file);
314 goto clean_up;
315 }
316
317 event_id_data = nvgpu_kzalloc(tsg->g, sizeof(*event_id_data));
318 if (!event_id_data) {
319 err = -ENOMEM;
320 goto clean_up_file;
321 }
322 event_id_data->g = g;
323 event_id_data->id = tsg->tsgid;
324 event_id_data->event_id = event_id;
325
326 nvgpu_cond_init(&event_id_data->event_id_wq);
327 err = nvgpu_mutex_init(&event_id_data->lock);
328 if (err)
329 goto clean_up_free;
330
331 nvgpu_init_list_node(&event_id_data->event_id_node);
332
333 nvgpu_mutex_acquire(&tsg->event_id_list_lock);
334 nvgpu_list_add_tail(&event_id_data->event_id_node, &tsg->event_id_list);
335 nvgpu_mutex_release(&tsg->event_id_list_lock);
336
337 fd_install(local_fd, file);
338 file->private_data = event_id_data;
339
340 *fd = local_fd;
341
342 return 0;
343
344clean_up_free:
345 nvgpu_kfree(g, event_id_data);
346clean_up_file:
347 fput(file);
348clean_up:
349 put_unused_fd(local_fd);
350free_ref:
351 gk20a_put(g);
352 return err;
353}
354
355static int gk20a_tsg_event_id_ctrl(struct gk20a *g, struct tsg_gk20a *tsg,
356 struct nvgpu_event_id_ctrl_args *args)
357{
358 int err = 0;
359 int fd = -1;
360
361 if (args->event_id >= NVGPU_IOCTL_CHANNEL_EVENT_ID_MAX)
362 return -EINVAL;
363
364 switch (args->cmd) {
365 case NVGPU_IOCTL_CHANNEL_EVENT_ID_CMD_ENABLE:
366 err = gk20a_tsg_event_id_enable(tsg, args->event_id, &fd);
367 if (!err)
368 args->event_fd = fd;
369 break;
370
371 default:
372 nvgpu_err(tsg->g, "unrecognized tsg event id cmd: 0x%x",
373 args->cmd);
374 err = -EINVAL;
375 break;
376 }
377
378 return err;
379}
380
381int nvgpu_ioctl_tsg_open(struct gk20a *g, struct file *filp)
382{
383 struct tsg_private *priv;
384 struct tsg_gk20a *tsg;
385 struct device *dev;
386 int err;
387
388 g = gk20a_get(g);
389 if (!g)
390 return -ENODEV;
391
392 dev = dev_from_gk20a(g);
393
394 nvgpu_log(g, gpu_dbg_fn, "tsg: %s", dev_name(dev));
395
396 priv = nvgpu_kmalloc(g, sizeof(*priv));
397 if (!priv) {
398 err = -ENOMEM;
399 goto free_ref;
400 }
401
402 tsg = gk20a_tsg_open(g, nvgpu_current_pid(g));
403 if (!tsg) {
404 nvgpu_kfree(g, priv);
405 err = -ENOMEM;
406 goto free_ref;
407 }
408
409 priv->g = g;
410 priv->tsg = tsg;
411 filp->private_data = priv;
412
413 gk20a_sched_ctrl_tsg_added(g, tsg);
414
415 return 0;
416
417free_ref:
418 gk20a_put(g);
419 return err;
420}
421
422int nvgpu_ioctl_tsg_dev_open(struct inode *inode, struct file *filp)
423{
424 struct nvgpu_os_linux *l;
425 struct gk20a *g;
426 int ret;
427
428 l = container_of(inode->i_cdev,
429 struct nvgpu_os_linux, tsg.cdev);
430 g = &l->g;
431
432 nvgpu_log_fn(g, " ");
433
434 ret = gk20a_busy(g);
435 if (ret) {
436 nvgpu_err(g, "failed to power on, %d", ret);
437 return ret;
438 }
439
440 ret = nvgpu_ioctl_tsg_open(&l->g, filp);
441
442 gk20a_idle(g);
443 nvgpu_log_fn(g, "done");
444 return ret;
445}
446
447void nvgpu_ioctl_tsg_release(struct nvgpu_ref *ref)
448{
449 struct tsg_gk20a *tsg = container_of(ref, struct tsg_gk20a, refcount);
450 struct gk20a *g = tsg->g;
451
452 gk20a_sched_ctrl_tsg_removed(g, tsg);
453
454 gk20a_tsg_release(ref);
455 gk20a_put(g);
456}
457
458int nvgpu_ioctl_tsg_dev_release(struct inode *inode, struct file *filp)
459{
460 struct tsg_private *priv = filp->private_data;
461 struct tsg_gk20a *tsg = priv->tsg;
462
463 nvgpu_ref_put(&tsg->refcount, nvgpu_ioctl_tsg_release);
464 nvgpu_kfree(tsg->g, priv);
465 return 0;
466}
467
468static int gk20a_tsg_ioctl_set_runlist_interleave(struct gk20a *g,
469 struct tsg_gk20a *tsg, struct nvgpu_runlist_interleave_args *arg)
470{
471 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
472 struct gk20a_sched_ctrl *sched = &l->sched_ctrl;
473 u32 level = arg->level;
474 int err;
475
476 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid);
477
478 nvgpu_mutex_acquire(&sched->control_lock);
479 if (sched->control_locked) {
480 err = -EPERM;
481 goto done;
482 }
483 err = gk20a_busy(g);
484 if (err) {
485 nvgpu_err(g, "failed to power on gpu");
486 goto done;
487 }
488
489 level = nvgpu_get_common_runlist_level(level);
490 err = gk20a_tsg_set_runlist_interleave(tsg, level);
491
492 gk20a_idle(g);
493done:
494 nvgpu_mutex_release(&sched->control_lock);
495 return err;
496}
497
498static int gk20a_tsg_ioctl_set_timeslice(struct gk20a *g,
499 struct tsg_gk20a *tsg, struct nvgpu_timeslice_args *arg)
500{
501 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
502 struct gk20a_sched_ctrl *sched = &l->sched_ctrl;
503 int err;
504
505 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid);
506
507 nvgpu_mutex_acquire(&sched->control_lock);
508 if (sched->control_locked) {
509 err = -EPERM;
510 goto done;
511 }
512 err = gk20a_busy(g);
513 if (err) {
514 nvgpu_err(g, "failed to power on gpu");
515 goto done;
516 }
517 err = gk20a_tsg_set_timeslice(tsg, arg->timeslice_us);
518 gk20a_idle(g);
519done:
520 nvgpu_mutex_release(&sched->control_lock);
521 return err;
522}
523
524static int gk20a_tsg_ioctl_get_timeslice(struct gk20a *g,
525 struct tsg_gk20a *tsg, struct nvgpu_timeslice_args *arg)
526{
527 arg->timeslice_us = gk20a_tsg_get_timeslice(tsg);
528 return 0;
529}
530
531long nvgpu_ioctl_tsg_dev_ioctl(struct file *filp, unsigned int cmd,
532 unsigned long arg)
533{
534 struct tsg_private *priv = filp->private_data;
535 struct tsg_gk20a *tsg = priv->tsg;
536 struct gk20a *g = tsg->g;
537 u8 __maybe_unused buf[NVGPU_TSG_IOCTL_MAX_ARG_SIZE];
538 int err = 0;
539
540 nvgpu_log_fn(g, "start %d", _IOC_NR(cmd));
541
542 if ((_IOC_TYPE(cmd) != NVGPU_TSG_IOCTL_MAGIC) ||
543 (_IOC_NR(cmd) == 0) ||
544 (_IOC_NR(cmd) > NVGPU_TSG_IOCTL_LAST) ||
545 (_IOC_SIZE(cmd) > NVGPU_TSG_IOCTL_MAX_ARG_SIZE))
546 return -EINVAL;
547
548 memset(buf, 0, sizeof(buf));
549 if (_IOC_DIR(cmd) & _IOC_WRITE) {
550 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
551 return -EFAULT;
552 }
553
554 if (!g->sw_ready) {
555 err = gk20a_busy(g);
556 if (err)
557 return err;
558
559 gk20a_idle(g);
560 }
561
562 switch (cmd) {
563 case NVGPU_TSG_IOCTL_BIND_CHANNEL:
564 {
565 int ch_fd = *(int *)buf;
566 if (ch_fd < 0) {
567 err = -EINVAL;
568 break;
569 }
570 err = gk20a_tsg_bind_channel_fd(tsg, ch_fd);
571 break;
572 }
573
574 case NVGPU_TSG_IOCTL_BIND_CHANNEL_EX:
575 {
576 err = gk20a_tsg_ioctl_bind_channel_ex(g, tsg,
577 (struct nvgpu_tsg_bind_channel_ex_args *)buf);
578 break;
579 }
580
581 case NVGPU_TSG_IOCTL_UNBIND_CHANNEL:
582 {
583 int ch_fd = *(int *)buf;
584
585 if (ch_fd < 0) {
586 err = -EINVAL;
587 break;
588 }
589 err = gk20a_busy(g);
590 if (err) {
591 nvgpu_err(g,
592 "failed to host gk20a for ioctl cmd: 0x%x", cmd);
593 break;
594 }
595 err = gk20a_tsg_unbind_channel_fd(tsg, ch_fd);
596 gk20a_idle(g);
597 break;
598 }
599
600 case NVGPU_IOCTL_TSG_ENABLE:
601 {
602 err = gk20a_busy(g);
603 if (err) {
604 nvgpu_err(g,
605 "failed to host gk20a for ioctl cmd: 0x%x", cmd);
606 return err;
607 }
608 g->ops.fifo.enable_tsg(tsg);
609 gk20a_idle(g);
610 break;
611 }
612
613 case NVGPU_IOCTL_TSG_DISABLE:
614 {
615 err = gk20a_busy(g);
616 if (err) {
617 nvgpu_err(g,
618 "failed to host gk20a for ioctl cmd: 0x%x", cmd);
619 return err;
620 }
621 g->ops.fifo.disable_tsg(tsg);
622 gk20a_idle(g);
623 break;
624 }
625
626 case NVGPU_IOCTL_TSG_PREEMPT:
627 {
628 err = gk20a_busy(g);
629 if (err) {
630 nvgpu_err(g,
631 "failed to host gk20a for ioctl cmd: 0x%x", cmd);
632 return err;
633 }
634 /* preempt TSG */
635 err = g->ops.fifo.preempt_tsg(g, tsg->tsgid);
636 gk20a_idle(g);
637 break;
638 }
639
640 case NVGPU_IOCTL_TSG_EVENT_ID_CTRL:
641 {
642 err = gk20a_tsg_event_id_ctrl(g, tsg,
643 (struct nvgpu_event_id_ctrl_args *)buf);
644 break;
645 }
646
647 case NVGPU_IOCTL_TSG_SET_RUNLIST_INTERLEAVE:
648 err = gk20a_tsg_ioctl_set_runlist_interleave(g, tsg,
649 (struct nvgpu_runlist_interleave_args *)buf);
650 break;
651
652 case NVGPU_IOCTL_TSG_SET_TIMESLICE:
653 {
654 err = gk20a_tsg_ioctl_set_timeslice(g, tsg,
655 (struct nvgpu_timeslice_args *)buf);
656 break;
657 }
658 case NVGPU_IOCTL_TSG_GET_TIMESLICE:
659 {
660 err = gk20a_tsg_ioctl_get_timeslice(g, tsg,
661 (struct nvgpu_timeslice_args *)buf);
662 break;
663 }
664
665 default:
666 nvgpu_err(g, "unrecognized tsg gpu ioctl cmd: 0x%x",
667 cmd);
668 err = -ENOTTY;
669 break;
670 }
671
672 if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
673 err = copy_to_user((void __user *)arg,
674 buf, _IOC_SIZE(cmd));
675
676 return err;
677}