summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/linux/ioctl_tsg.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common/linux/ioctl_tsg.c')
-rw-r--r--drivers/gpu/nvgpu/common/linux/ioctl_tsg.c492
1 files changed, 492 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_tsg.c b/drivers/gpu/nvgpu/common/linux/ioctl_tsg.c
new file mode 100644
index 00000000..b17d7e74
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/linux/ioctl_tsg.c
@@ -0,0 +1,492 @@
1/*
2 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/fs.h>
18#include <linux/file.h>
19#include <linux/cdev.h>
20#include <linux/uaccess.h>
21#include <uapi/linux/nvgpu.h>
22#include <linux/anon_inodes.h>
23
24#include <nvgpu/kmem.h>
25#include <nvgpu/log.h>
26
27#include "gk20a/gk20a.h"
28#include "gk20a/tsg_gk20a.h"
29#include "platform_gk20a.h"
30#include "ioctl_tsg.h"
31#include "ioctl_channel.h"
32#include "os_linux.h"
33#ifdef CONFIG_TEGRA_19x_GPU
34#include "common/linux/ioctl_tsg_t19x.h"
35#endif
36
37struct tsg_private {
38 struct gk20a *g;
39 struct tsg_gk20a *tsg;
40};
41
42static int gk20a_tsg_bind_channel_fd(struct tsg_gk20a *tsg, int ch_fd)
43{
44 struct channel_gk20a *ch;
45 int err;
46
47 ch = gk20a_get_channel_from_file(ch_fd);
48 if (!ch)
49 return -EINVAL;
50
51 err = ch->g->ops.fifo.tsg_bind_channel(tsg, ch);
52
53 gk20a_channel_put(ch);
54 return err;
55}
56
57static int gk20a_tsg_get_event_data_from_id(struct tsg_gk20a *tsg,
58 unsigned int event_id,
59 struct gk20a_event_id_data **event_id_data)
60{
61 struct gk20a_event_id_data *local_event_id_data;
62 bool event_found = false;
63
64 nvgpu_mutex_acquire(&tsg->event_id_list_lock);
65 nvgpu_list_for_each_entry(local_event_id_data, &tsg->event_id_list,
66 gk20a_event_id_data, event_id_node) {
67 if (local_event_id_data->event_id == event_id) {
68 event_found = true;
69 break;
70 }
71 }
72 nvgpu_mutex_release(&tsg->event_id_list_lock);
73
74 if (event_found) {
75 *event_id_data = local_event_id_data;
76 return 0;
77 } else {
78 return -1;
79 }
80}
81
82void gk20a_tsg_event_id_post_event(struct tsg_gk20a *tsg,
83 int __event_id)
84{
85 struct gk20a_event_id_data *event_id_data;
86 u32 event_id;
87 int err = 0;
88
89 event_id = nvgpu_event_id_to_ioctl_channel_event_id(__event_id);
90 if (event_id >= NVGPU_IOCTL_CHANNEL_EVENT_ID_MAX)
91 return;
92
93 err = gk20a_tsg_get_event_data_from_id(tsg, event_id,
94 &event_id_data);
95 if (err)
96 return;
97
98 nvgpu_mutex_acquire(&event_id_data->lock);
99
100 gk20a_dbg_info(
101 "posting event for event_id=%d on tsg=%d\n",
102 event_id, tsg->tsgid);
103 event_id_data->event_posted = true;
104
105 nvgpu_cond_broadcast_interruptible(&event_id_data->event_id_wq);
106
107 nvgpu_mutex_release(&event_id_data->lock);
108}
109
110static int gk20a_tsg_event_id_enable(struct tsg_gk20a *tsg,
111 int event_id,
112 int *fd)
113{
114 int err = 0;
115 int local_fd;
116 struct file *file;
117 char name[64];
118 struct gk20a_event_id_data *event_id_data;
119 struct gk20a *g;
120
121 g = gk20a_get(tsg->g);
122 if (!g)
123 return -ENODEV;
124
125 err = gk20a_tsg_get_event_data_from_id(tsg,
126 event_id, &event_id_data);
127 if (err == 0) {
128 /* We already have event enabled */
129 err = -EINVAL;
130 goto free_ref;
131 }
132
133 err = get_unused_fd_flags(O_RDWR);
134 if (err < 0)
135 goto free_ref;
136 local_fd = err;
137
138 snprintf(name, sizeof(name), "nvgpu-event%d-fd%d",
139 event_id, local_fd);
140
141 file = anon_inode_getfile(name, &gk20a_event_id_ops,
142 NULL, O_RDWR);
143 if (IS_ERR(file)) {
144 err = PTR_ERR(file);
145 goto clean_up;
146 }
147
148 event_id_data = nvgpu_kzalloc(tsg->g, sizeof(*event_id_data));
149 if (!event_id_data) {
150 err = -ENOMEM;
151 goto clean_up_file;
152 }
153 event_id_data->g = g;
154 event_id_data->id = tsg->tsgid;
155 event_id_data->is_tsg = true;
156 event_id_data->event_id = event_id;
157
158 nvgpu_cond_init(&event_id_data->event_id_wq);
159 err = nvgpu_mutex_init(&event_id_data->lock);
160 if (err)
161 goto clean_up_free;
162
163 nvgpu_init_list_node(&event_id_data->event_id_node);
164
165 nvgpu_mutex_acquire(&tsg->event_id_list_lock);
166 nvgpu_list_add_tail(&event_id_data->event_id_node, &tsg->event_id_list);
167 nvgpu_mutex_release(&tsg->event_id_list_lock);
168
169 fd_install(local_fd, file);
170 file->private_data = event_id_data;
171
172 *fd = local_fd;
173
174 return 0;
175
176clean_up_free:
177 nvgpu_kfree(g, event_id_data);
178clean_up_file:
179 fput(file);
180clean_up:
181 put_unused_fd(local_fd);
182free_ref:
183 gk20a_put(g);
184 return err;
185}
186
187static int gk20a_tsg_event_id_ctrl(struct gk20a *g, struct tsg_gk20a *tsg,
188 struct nvgpu_event_id_ctrl_args *args)
189{
190 int err = 0;
191 int fd = -1;
192
193 if (args->event_id >= NVGPU_IOCTL_CHANNEL_EVENT_ID_MAX)
194 return -EINVAL;
195
196 switch (args->cmd) {
197 case NVGPU_IOCTL_CHANNEL_EVENT_ID_CMD_ENABLE:
198 err = gk20a_tsg_event_id_enable(tsg, args->event_id, &fd);
199 if (!err)
200 args->event_fd = fd;
201 break;
202
203 default:
204 nvgpu_err(tsg->g, "unrecognized tsg event id cmd: 0x%x",
205 args->cmd);
206 err = -EINVAL;
207 break;
208 }
209
210 return err;
211}
212
213int nvgpu_ioctl_tsg_open(struct gk20a *g, struct file *filp)
214{
215 struct tsg_private *priv;
216 struct tsg_gk20a *tsg;
217 struct device *dev;
218 int err;
219
220 g = gk20a_get(g);
221 if (!g)
222 return -ENODEV;
223
224 dev = dev_from_gk20a(g);
225
226 gk20a_dbg(gpu_dbg_fn, "tsg: %s", dev_name(dev));
227
228 priv = nvgpu_kmalloc(g, sizeof(*priv));
229 if (!priv) {
230 err = -ENOMEM;
231 goto free_ref;
232 }
233
234 tsg = gk20a_tsg_open(g);
235 if (!tsg) {
236 nvgpu_kfree(g, priv);
237 err = -ENOMEM;
238 goto free_ref;
239 }
240
241 priv->g = g;
242 priv->tsg = tsg;
243 filp->private_data = priv;
244
245 gk20a_sched_ctrl_tsg_added(g, tsg);
246
247 return 0;
248
249free_ref:
250 gk20a_put(g);
251 return err;
252}
253
254int nvgpu_ioctl_tsg_dev_open(struct inode *inode, struct file *filp)
255{
256 struct nvgpu_os_linux *l;
257 struct gk20a *g;
258 int ret;
259
260 gk20a_dbg_fn("");
261
262 l = container_of(inode->i_cdev,
263 struct nvgpu_os_linux, tsg.cdev);
264 g = &l->g;
265
266 ret = gk20a_busy(g);
267 if (ret) {
268 nvgpu_err(g, "failed to power on, %d", ret);
269 return ret;
270 }
271
272 ret = nvgpu_ioctl_tsg_open(&l->g, filp);
273
274 gk20a_idle(g);
275 gk20a_dbg_fn("done");
276 return ret;
277}
278
279void nvgpu_ioctl_tsg_release(struct nvgpu_ref *ref)
280{
281 struct tsg_gk20a *tsg = container_of(ref, struct tsg_gk20a, refcount);
282 struct gk20a *g = tsg->g;
283
284 gk20a_sched_ctrl_tsg_removed(g, tsg);
285
286 gk20a_tsg_release(ref);
287}
288
289int nvgpu_ioctl_tsg_dev_release(struct inode *inode, struct file *filp)
290{
291 struct tsg_private *priv = filp->private_data;
292 struct tsg_gk20a *tsg = priv->tsg;
293
294 nvgpu_ref_put(&tsg->refcount, nvgpu_ioctl_tsg_release);
295 nvgpu_kfree(tsg->g, priv);
296 return 0;
297}
298
299static int gk20a_tsg_ioctl_set_runlist_interleave(struct gk20a *g,
300 struct tsg_gk20a *tsg, struct nvgpu_runlist_interleave_args *arg)
301{
302 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
303 struct gk20a_sched_ctrl *sched = &l->sched_ctrl;
304 u32 level = arg->level;
305 int err;
306
307 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid);
308
309 nvgpu_mutex_acquire(&sched->control_lock);
310 if (sched->control_locked) {
311 err = -EPERM;
312 goto done;
313 }
314 err = gk20a_busy(g);
315 if (err) {
316 nvgpu_err(g, "failed to power on gpu");
317 goto done;
318 }
319
320 level = nvgpu_get_common_runlist_level(level);
321 err = gk20a_tsg_set_runlist_interleave(tsg, level);
322
323 gk20a_idle(g);
324done:
325 nvgpu_mutex_release(&sched->control_lock);
326 return err;
327}
328
329static int gk20a_tsg_ioctl_set_timeslice(struct gk20a *g,
330 struct tsg_gk20a *tsg, struct nvgpu_timeslice_args *arg)
331{
332 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
333 struct gk20a_sched_ctrl *sched = &l->sched_ctrl;
334 int err;
335
336 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid);
337
338 nvgpu_mutex_acquire(&sched->control_lock);
339 if (sched->control_locked) {
340 err = -EPERM;
341 goto done;
342 }
343 err = gk20a_busy(g);
344 if (err) {
345 nvgpu_err(g, "failed to power on gpu");
346 goto done;
347 }
348 err = gk20a_tsg_set_timeslice(tsg, arg->timeslice_us);
349 gk20a_idle(g);
350done:
351 nvgpu_mutex_release(&sched->control_lock);
352 return err;
353}
354
355static int gk20a_tsg_ioctl_get_timeslice(struct gk20a *g,
356 struct tsg_gk20a *tsg, struct nvgpu_timeslice_args *arg)
357{
358 arg->timeslice_us = gk20a_tsg_get_timeslice(tsg);
359 return 0;
360}
361
362long nvgpu_ioctl_tsg_dev_ioctl(struct file *filp, unsigned int cmd,
363 unsigned long arg)
364{
365 struct tsg_private *priv = filp->private_data;
366 struct tsg_gk20a *tsg = priv->tsg;
367 struct gk20a *g = tsg->g;
368 u8 __maybe_unused buf[NVGPU_TSG_IOCTL_MAX_ARG_SIZE];
369 int err = 0;
370
371 gk20a_dbg_fn("start %d", _IOC_NR(cmd));
372
373 if ((_IOC_TYPE(cmd) != NVGPU_TSG_IOCTL_MAGIC) ||
374 (_IOC_NR(cmd) == 0) ||
375 (_IOC_NR(cmd) > NVGPU_TSG_IOCTL_LAST) ||
376 (_IOC_SIZE(cmd) > NVGPU_TSG_IOCTL_MAX_ARG_SIZE))
377 return -EINVAL;
378
379 memset(buf, 0, sizeof(buf));
380 if (_IOC_DIR(cmd) & _IOC_WRITE) {
381 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
382 return -EFAULT;
383 }
384
385 if (!g->gr.sw_ready) {
386 err = gk20a_busy(g);
387 if (err)
388 return err;
389
390 gk20a_idle(g);
391 }
392
393 switch (cmd) {
394 case NVGPU_TSG_IOCTL_BIND_CHANNEL:
395 {
396 int ch_fd = *(int *)buf;
397 if (ch_fd < 0) {
398 err = -EINVAL;
399 break;
400 }
401 err = gk20a_tsg_bind_channel_fd(tsg, ch_fd);
402 break;
403 }
404
405 case NVGPU_TSG_IOCTL_UNBIND_CHANNEL:
406 /* We do not support explicitly unbinding channel from TSG.
407 * Channel will be unbounded from TSG when it is closed.
408 */
409 break;
410
411 case NVGPU_IOCTL_TSG_ENABLE:
412 {
413 err = gk20a_busy(g);
414 if (err) {
415 nvgpu_err(g,
416 "failed to host gk20a for ioctl cmd: 0x%x", cmd);
417 return err;
418 }
419 g->ops.fifo.enable_tsg(tsg);
420 gk20a_idle(g);
421 break;
422 }
423
424 case NVGPU_IOCTL_TSG_DISABLE:
425 {
426 err = gk20a_busy(g);
427 if (err) {
428 nvgpu_err(g,
429 "failed to host gk20a for ioctl cmd: 0x%x", cmd);
430 return err;
431 }
432 g->ops.fifo.disable_tsg(tsg);
433 gk20a_idle(g);
434 break;
435 }
436
437 case NVGPU_IOCTL_TSG_PREEMPT:
438 {
439 err = gk20a_busy(g);
440 if (err) {
441 nvgpu_err(g,
442 "failed to host gk20a for ioctl cmd: 0x%x", cmd);
443 return err;
444 }
445 /* preempt TSG */
446 err = g->ops.fifo.preempt_tsg(g, tsg->tsgid);
447 gk20a_idle(g);
448 break;
449 }
450
451 case NVGPU_IOCTL_TSG_EVENT_ID_CTRL:
452 {
453 err = gk20a_tsg_event_id_ctrl(g, tsg,
454 (struct nvgpu_event_id_ctrl_args *)buf);
455 break;
456 }
457
458 case NVGPU_IOCTL_TSG_SET_RUNLIST_INTERLEAVE:
459 err = gk20a_tsg_ioctl_set_runlist_interleave(g, tsg,
460 (struct nvgpu_runlist_interleave_args *)buf);
461 break;
462
463 case NVGPU_IOCTL_TSG_SET_TIMESLICE:
464 {
465 err = gk20a_tsg_ioctl_set_timeslice(g, tsg,
466 (struct nvgpu_timeslice_args *)buf);
467 break;
468 }
469 case NVGPU_IOCTL_TSG_GET_TIMESLICE:
470 {
471 err = gk20a_tsg_ioctl_get_timeslice(g, tsg,
472 (struct nvgpu_timeslice_args *)buf);
473 break;
474 }
475
476 default:
477#ifdef CONFIG_TEGRA_19x_GPU
478 err = t19x_tsg_ioctl_handler(g, tsg, cmd, buf);
479#else
480 nvgpu_err(g, "unrecognized tsg gpu ioctl cmd: 0x%x",
481 cmd);
482 err = -ENOTTY;
483#endif
484 break;
485 }
486
487 if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
488 err = copy_to_user((void __user *)arg,
489 buf, _IOC_SIZE(cmd));
490
491 return err;
492}