summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/linux/ioctl_tsg.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common/linux/ioctl_tsg.c')
-rw-r--r--drivers/gpu/nvgpu/common/linux/ioctl_tsg.c470
1 files changed, 470 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_tsg.c b/drivers/gpu/nvgpu/common/linux/ioctl_tsg.c
new file mode 100644
index 00000000..75231c71
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/linux/ioctl_tsg.c
@@ -0,0 +1,470 @@
1/*
2 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/fs.h>
18#include <linux/file.h>
19#include <linux/cdev.h>
20#include <linux/uaccess.h>
21#include <linux/nvhost.h>
22#include <uapi/linux/nvgpu.h>
23#include <linux/anon_inodes.h>
24
25#include <nvgpu/kmem.h>
26
27#include "gk20a/gk20a.h"
28#include "gk20a/tsg_gk20a.h"
29#include "ioctl_channel.h"
30
31struct tsg_private {
32 struct gk20a *g;
33 struct tsg_gk20a *tsg;
34};
35
36static int gk20a_tsg_bind_channel_fd(struct tsg_gk20a *tsg, int ch_fd)
37{
38 struct channel_gk20a *ch;
39 int err;
40
41 ch = gk20a_get_channel_from_file(ch_fd);
42 if (!ch)
43 return -EINVAL;
44
45 err = ch->g->ops.fifo.tsg_bind_channel(tsg, ch);
46 return err;
47}
48
49static int gk20a_tsg_get_event_data_from_id(struct tsg_gk20a *tsg,
50 unsigned int event_id,
51 struct gk20a_event_id_data **event_id_data)
52{
53 struct gk20a_event_id_data *local_event_id_data;
54 bool event_found = false;
55
56 nvgpu_mutex_acquire(&tsg->event_id_list_lock);
57 list_for_each_entry(local_event_id_data, &tsg->event_id_list,
58 event_id_node) {
59 if (local_event_id_data->event_id == event_id) {
60 event_found = true;
61 break;
62 }
63 }
64 nvgpu_mutex_release(&tsg->event_id_list_lock);
65
66 if (event_found) {
67 *event_id_data = local_event_id_data;
68 return 0;
69 } else {
70 return -1;
71 }
72}
73
74void gk20a_tsg_event_id_post_event(struct tsg_gk20a *tsg,
75 int event_id)
76{
77 struct gk20a_event_id_data *event_id_data;
78 int err = 0;
79
80 err = gk20a_tsg_get_event_data_from_id(tsg, event_id,
81 &event_id_data);
82 if (err)
83 return;
84
85 nvgpu_mutex_acquire(&event_id_data->lock);
86
87 gk20a_dbg_info(
88 "posting event for event_id=%d on tsg=%d\n",
89 event_id, tsg->tsgid);
90 event_id_data->event_posted = true;
91
92 wake_up_interruptible_all(&event_id_data->event_id_wq);
93
94 nvgpu_mutex_release(&event_id_data->lock);
95}
96
97static int gk20a_tsg_event_id_enable(struct tsg_gk20a *tsg,
98 int event_id,
99 int *fd)
100{
101 int err = 0;
102 int local_fd;
103 struct file *file;
104 char name[64];
105 struct gk20a_event_id_data *event_id_data;
106 struct gk20a *g;
107
108 g = gk20a_get(tsg->g);
109 if (!g)
110 return -ENODEV;
111
112 err = gk20a_tsg_get_event_data_from_id(tsg,
113 event_id, &event_id_data);
114 if (err == 0) {
115 /* We already have event enabled */
116 err = -EINVAL;
117 goto free_ref;
118 }
119
120 err = get_unused_fd_flags(O_RDWR);
121 if (err < 0)
122 goto free_ref;
123 local_fd = err;
124
125 snprintf(name, sizeof(name), "nvgpu-event%d-fd%d",
126 event_id, local_fd);
127
128 file = anon_inode_getfile(name, &gk20a_event_id_ops,
129 NULL, O_RDWR);
130 if (IS_ERR(file)) {
131 err = PTR_ERR(file);
132 goto clean_up;
133 }
134
135 event_id_data = nvgpu_kzalloc(tsg->g, sizeof(*event_id_data));
136 if (!event_id_data) {
137 err = -ENOMEM;
138 goto clean_up_file;
139 }
140 event_id_data->g = g;
141 event_id_data->id = tsg->tsgid;
142 event_id_data->is_tsg = true;
143 event_id_data->event_id = event_id;
144
145 init_waitqueue_head(&event_id_data->event_id_wq);
146 err = nvgpu_mutex_init(&event_id_data->lock);
147 if (err)
148 goto clean_up_free;
149
150 INIT_LIST_HEAD(&event_id_data->event_id_node);
151
152 nvgpu_mutex_acquire(&tsg->event_id_list_lock);
153 list_add_tail(&event_id_data->event_id_node, &tsg->event_id_list);
154 nvgpu_mutex_release(&tsg->event_id_list_lock);
155
156 fd_install(local_fd, file);
157 file->private_data = event_id_data;
158
159 *fd = local_fd;
160
161 return 0;
162
163clean_up_free:
164 kfree(event_id_data);
165clean_up_file:
166 fput(file);
167clean_up:
168 put_unused_fd(local_fd);
169free_ref:
170 gk20a_put(g);
171 return err;
172}
173
174static int gk20a_tsg_event_id_ctrl(struct gk20a *g, struct tsg_gk20a *tsg,
175 struct nvgpu_event_id_ctrl_args *args)
176{
177 int err = 0;
178 int fd = -1;
179
180 if (args->event_id >= NVGPU_IOCTL_CHANNEL_EVENT_ID_MAX)
181 return -EINVAL;
182
183 switch (args->cmd) {
184 case NVGPU_IOCTL_CHANNEL_EVENT_ID_CMD_ENABLE:
185 err = gk20a_tsg_event_id_enable(tsg, args->event_id, &fd);
186 if (!err)
187 args->event_fd = fd;
188 break;
189
190 default:
191 gk20a_err(dev_from_gk20a(tsg->g),
192 "unrecognized tsg event id cmd: 0x%x",
193 args->cmd);
194 err = -EINVAL;
195 break;
196 }
197
198 return err;
199}
200
201int nvgpu_ioctl_tsg_open(struct gk20a *g, struct file *filp)
202{
203 struct tsg_private *priv;
204 struct tsg_gk20a *tsg;
205 struct device *dev;
206 int err;
207
208 g = gk20a_get(g);
209 if (!g)
210 return -ENODEV;
211
212 dev = dev_from_gk20a(g);
213
214 gk20a_dbg(gpu_dbg_fn, "tsg: %s", dev_name(dev));
215
216 priv = nvgpu_kmalloc(g, sizeof(*priv));
217 if (!priv) {
218 err = -ENOMEM;
219 goto free_ref;
220 }
221
222 tsg = gk20a_tsg_open(g);
223 if (!tsg) {
224 nvgpu_kfree(g, priv);
225 err = -ENOMEM;
226 goto free_ref;
227 }
228
229 priv->g = g;
230 priv->tsg = tsg;
231 filp->private_data = priv;
232
233 return 0;
234
235free_ref:
236 gk20a_put(g);
237 return err;
238}
239
240int nvgpu_ioctl_tsg_dev_open(struct inode *inode, struct file *filp)
241{
242 struct gk20a *g;
243 int ret;
244
245 g = container_of(inode->i_cdev,
246 struct gk20a, tsg.cdev);
247 gk20a_dbg_fn("");
248 ret = nvgpu_ioctl_tsg_open(g, filp);
249 gk20a_dbg_fn("done");
250 return ret;
251}
252
253int nvgpu_ioctl_tsg_dev_release(struct inode *inode, struct file *filp)
254{
255 struct tsg_private *priv = filp->private_data;
256 struct tsg_gk20a *tsg = priv->tsg;
257
258 kref_put(&tsg->refcount, gk20a_tsg_release);
259 nvgpu_kfree(tsg->g, priv);
260 return 0;
261}
262
263static int gk20a_tsg_ioctl_set_priority(struct gk20a *g,
264 struct tsg_gk20a *tsg, struct nvgpu_set_priority_args *arg)
265{
266 struct gk20a_sched_ctrl *sched = &g->sched_ctrl;
267 int err;
268
269 nvgpu_mutex_acquire(&sched->control_lock);
270 if (sched->control_locked) {
271 err = -EPERM;
272 goto done;
273 }
274
275 err = gk20a_busy(g);
276 if (err) {
277 gk20a_err(dev_from_gk20a(g), "failed to power on gpu");
278 goto done;
279 }
280
281 err = gk20a_tsg_set_priority(g, tsg, arg->priority);
282
283 gk20a_idle(g);
284done:
285 nvgpu_mutex_release(&sched->control_lock);
286 return err;
287}
288
289static int gk20a_tsg_ioctl_set_runlist_interleave(struct gk20a *g,
290 struct tsg_gk20a *tsg, struct nvgpu_runlist_interleave_args *arg)
291{
292 struct gk20a_sched_ctrl *sched = &g->sched_ctrl;
293 int err;
294
295 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid);
296
297 nvgpu_mutex_acquire(&sched->control_lock);
298 if (sched->control_locked) {
299 err = -EPERM;
300 goto done;
301 }
302 err = gk20a_busy(g);
303 if (err) {
304 gk20a_err(dev_from_gk20a(g), "failed to power on gpu");
305 goto done;
306 }
307
308 err = gk20a_tsg_set_runlist_interleave(tsg, arg->level);
309
310 gk20a_idle(g);
311done:
312 nvgpu_mutex_release(&sched->control_lock);
313 return err;
314}
315
316static int gk20a_tsg_ioctl_set_timeslice(struct gk20a *g,
317 struct tsg_gk20a *tsg, struct nvgpu_timeslice_args *arg)
318{
319 struct gk20a_sched_ctrl *sched = &g->sched_ctrl;
320 int err;
321
322 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid);
323
324 nvgpu_mutex_acquire(&sched->control_lock);
325 if (sched->control_locked) {
326 err = -EPERM;
327 goto done;
328 }
329 err = gk20a_busy(g);
330 if (err) {
331 gk20a_err(dev_from_gk20a(g), "failed to power on gpu");
332 goto done;
333 }
334 err = gk20a_tsg_set_timeslice(tsg, arg->timeslice_us);
335 gk20a_idle(g);
336done:
337 nvgpu_mutex_release(&sched->control_lock);
338 return err;
339}
340
341
342long nvgpu_ioctl_tsg_dev_ioctl(struct file *filp, unsigned int cmd,
343 unsigned long arg)
344{
345 struct tsg_private *priv = filp->private_data;
346 struct tsg_gk20a *tsg = priv->tsg;
347 struct gk20a *g = tsg->g;
348 u8 __maybe_unused buf[NVGPU_TSG_IOCTL_MAX_ARG_SIZE];
349 int err = 0;
350
351 gk20a_dbg(gpu_dbg_fn, "");
352
353 if ((_IOC_TYPE(cmd) != NVGPU_TSG_IOCTL_MAGIC) ||
354 (_IOC_NR(cmd) == 0) ||
355 (_IOC_NR(cmd) > NVGPU_TSG_IOCTL_LAST) ||
356 (_IOC_SIZE(cmd) > NVGPU_TSG_IOCTL_MAX_ARG_SIZE))
357 return -EINVAL;
358
359 memset(buf, 0, sizeof(buf));
360 if (_IOC_DIR(cmd) & _IOC_WRITE) {
361 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
362 return -EFAULT;
363 }
364
365 if (!g->gr.sw_ready) {
366 err = gk20a_busy(g);
367 if (err)
368 return err;
369
370 gk20a_idle(g);
371 }
372
373 switch (cmd) {
374 case NVGPU_TSG_IOCTL_BIND_CHANNEL:
375 {
376 int ch_fd = *(int *)buf;
377 if (ch_fd < 0) {
378 err = -EINVAL;
379 break;
380 }
381 err = gk20a_tsg_bind_channel_fd(tsg, ch_fd);
382 break;
383 }
384
385 case NVGPU_TSG_IOCTL_UNBIND_CHANNEL:
386 /* We do not support explicitly unbinding channel from TSG.
387 * Channel will be unbounded from TSG when it is closed.
388 */
389 break;
390
391 case NVGPU_IOCTL_TSG_ENABLE:
392 {
393 err = gk20a_busy(g);
394 if (err) {
395 gk20a_err(g->dev,
396 "failed to host gk20a for ioctl cmd: 0x%x", cmd);
397 return err;
398 }
399 gk20a_enable_tsg(tsg);
400 gk20a_idle(g);
401 break;
402 }
403
404 case NVGPU_IOCTL_TSG_DISABLE:
405 {
406 err = gk20a_busy(g);
407 if (err) {
408 gk20a_err(g->dev,
409 "failed to host gk20a for ioctl cmd: 0x%x", cmd);
410 return err;
411 }
412 gk20a_disable_tsg(tsg);
413 gk20a_idle(g);
414 break;
415 }
416
417 case NVGPU_IOCTL_TSG_PREEMPT:
418 {
419 err = gk20a_busy(g);
420 if (err) {
421 gk20a_err(g->dev,
422 "failed to host gk20a for ioctl cmd: 0x%x", cmd);
423 return err;
424 }
425 /* preempt TSG */
426 err = g->ops.fifo.preempt_tsg(g, tsg->tsgid);
427 gk20a_idle(g);
428 break;
429 }
430
431 case NVGPU_IOCTL_TSG_SET_PRIORITY:
432 {
433 err = gk20a_tsg_ioctl_set_priority(g, tsg,
434 (struct nvgpu_set_priority_args *)buf);
435 break;
436 }
437
438 case NVGPU_IOCTL_TSG_EVENT_ID_CTRL:
439 {
440 err = gk20a_tsg_event_id_ctrl(g, tsg,
441 (struct nvgpu_event_id_ctrl_args *)buf);
442 break;
443 }
444
445 case NVGPU_IOCTL_TSG_SET_RUNLIST_INTERLEAVE:
446 err = gk20a_tsg_ioctl_set_runlist_interleave(g, tsg,
447 (struct nvgpu_runlist_interleave_args *)buf);
448 break;
449
450 case NVGPU_IOCTL_TSG_SET_TIMESLICE:
451 {
452 err = gk20a_tsg_ioctl_set_timeslice(g, tsg,
453 (struct nvgpu_timeslice_args *)buf);
454 break;
455 }
456
457 default:
458 gk20a_err(dev_from_gk20a(g),
459 "unrecognized tsg gpu ioctl cmd: 0x%x",
460 cmd);
461 err = -ENOTTY;
462 break;
463 }
464
465 if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
466 err = copy_to_user((void __user *)arg,
467 buf, _IOC_SIZE(cmd));
468
469 return err;
470}