summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2017-03-23 17:19:01 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-04-04 13:24:49 -0400
commit06fe28567d45c8fb1c2a04f0f007fa5d750b849d (patch)
tree8da57e0a1ce577536e50a9e69ccc63641e3996db /drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
parenta07e10f494c158ae31d6187e9be3db409528a507 (diff)
gpu: nvgpu: Move TSG IOCTL code to Linux module
Move TSG IOCTL specific code to Linux module. This clears most Linux dependencies from tsg_gk20a.c. Move also remaining file_operations declarations from channel_gk20a.h to ioctl_channel.h. JIRA NVGPU-32 Change-Id: Idcc2a525ebe12b30db46c3893a2735509c41ff39 Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: http://git-master/r/1330805 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/tsg_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/tsg_gk20a.c459
1 files changed, 12 insertions, 447 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
index cc3d94e4..70e40099 100644
--- a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
@@ -14,24 +14,10 @@
14 * along with this program. If not, see <http://www.gnu.org/licenses/>. 14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */ 15 */
16 16
17#include <linux/fs.h>
18#include <linux/file.h>
19#include <linux/cdev.h>
20#include <linux/uaccess.h>
21#include <linux/nvhost.h>
22#include <uapi/linux/nvgpu.h>
23#include <linux/anon_inodes.h>
24
25#include <nvgpu/kmem.h> 17#include <nvgpu/kmem.h>
26 18
27#include "gk20a.h" 19#include "gk20a.h"
28 20#include "tsg_gk20a.h"
29#include <nvgpu/hw/gk20a/hw_ccsr_gk20a.h>
30
31struct tsg_private {
32 struct gk20a *g;
33 struct tsg_gk20a *tsg;
34};
35 21
36bool gk20a_is_channel_marked_as_tsg(struct channel_gk20a *ch) 22bool gk20a_is_channel_marked_as_tsg(struct channel_gk20a *ch)
37{ 23{
@@ -81,19 +67,6 @@ static bool gk20a_is_channel_active(struct gk20a *g, struct channel_gk20a *ch)
81 return false; 67 return false;
82} 68}
83 69
84static int gk20a_tsg_bind_channel_fd(struct tsg_gk20a *tsg, int ch_fd)
85{
86 struct channel_gk20a *ch;
87 int err;
88
89 ch = gk20a_get_channel_from_file(ch_fd);
90 if (!ch)
91 return -EINVAL;
92
93 err = ch->g->ops.fifo.tsg_bind_channel(tsg, ch);
94 return err;
95}
96
97/* 70/*
98 * API to mark channel as part of TSG 71 * API to mark channel as part of TSG
99 * 72 *
@@ -181,7 +154,7 @@ int gk20a_init_tsg_support(struct gk20a *g, u32 tsgid)
181 return 0; 154 return 0;
182} 155}
183 156
184static int gk20a_tsg_set_priority(struct gk20a *g, struct tsg_gk20a *tsg, 157int gk20a_tsg_set_priority(struct gk20a *g, struct tsg_gk20a *tsg,
185 u32 priority) 158 u32 priority)
186{ 159{
187 u32 timeslice_us; 160 u32 timeslice_us;
@@ -204,158 +177,6 @@ static int gk20a_tsg_set_priority(struct gk20a *g, struct tsg_gk20a *tsg,
204 return gk20a_tsg_set_timeslice(tsg, timeslice_us); 177 return gk20a_tsg_set_timeslice(tsg, timeslice_us);
205} 178}
206 179
207static int gk20a_tsg_get_event_data_from_id(struct tsg_gk20a *tsg,
208 unsigned int event_id,
209 struct gk20a_event_id_data **event_id_data)
210{
211 struct gk20a_event_id_data *local_event_id_data;
212 bool event_found = false;
213
214 nvgpu_mutex_acquire(&tsg->event_id_list_lock);
215 list_for_each_entry(local_event_id_data, &tsg->event_id_list,
216 event_id_node) {
217 if (local_event_id_data->event_id == event_id) {
218 event_found = true;
219 break;
220 }
221 }
222 nvgpu_mutex_release(&tsg->event_id_list_lock);
223
224 if (event_found) {
225 *event_id_data = local_event_id_data;
226 return 0;
227 } else {
228 return -1;
229 }
230}
231
232void gk20a_tsg_event_id_post_event(struct tsg_gk20a *tsg,
233 int event_id)
234{
235 struct gk20a_event_id_data *event_id_data;
236 int err = 0;
237
238 err = gk20a_tsg_get_event_data_from_id(tsg, event_id,
239 &event_id_data);
240 if (err)
241 return;
242
243 nvgpu_mutex_acquire(&event_id_data->lock);
244
245 gk20a_dbg_info(
246 "posting event for event_id=%d on tsg=%d\n",
247 event_id, tsg->tsgid);
248 event_id_data->event_posted = true;
249
250 wake_up_interruptible_all(&event_id_data->event_id_wq);
251
252 nvgpu_mutex_release(&event_id_data->lock);
253}
254
255static int gk20a_tsg_event_id_enable(struct tsg_gk20a *tsg,
256 int event_id,
257 int *fd)
258{
259 int err = 0;
260 int local_fd;
261 struct file *file;
262 char name[64];
263 struct gk20a_event_id_data *event_id_data;
264 struct gk20a *g;
265
266 g = gk20a_get(tsg->g);
267 if (!g)
268 return -ENODEV;
269
270 err = gk20a_tsg_get_event_data_from_id(tsg,
271 event_id, &event_id_data);
272 if (err == 0) {
273 /* We already have event enabled */
274 err = -EINVAL;
275 goto free_ref;
276 }
277
278 err = get_unused_fd_flags(O_RDWR);
279 if (err < 0)
280 goto free_ref;
281 local_fd = err;
282
283 snprintf(name, sizeof(name), "nvgpu-event%d-fd%d",
284 event_id, local_fd);
285
286 file = anon_inode_getfile(name, &gk20a_event_id_ops,
287 NULL, O_RDWR);
288 if (IS_ERR(file)) {
289 err = PTR_ERR(file);
290 goto clean_up;
291 }
292
293 event_id_data = nvgpu_kzalloc(tsg->g, sizeof(*event_id_data));
294 if (!event_id_data) {
295 err = -ENOMEM;
296 goto clean_up_file;
297 }
298 event_id_data->g = g;
299 event_id_data->id = tsg->tsgid;
300 event_id_data->is_tsg = true;
301 event_id_data->event_id = event_id;
302
303 init_waitqueue_head(&event_id_data->event_id_wq);
304 err = nvgpu_mutex_init(&event_id_data->lock);
305 if (err)
306 goto clean_up_free;
307
308 INIT_LIST_HEAD(&event_id_data->event_id_node);
309
310 nvgpu_mutex_acquire(&tsg->event_id_list_lock);
311 list_add_tail(&event_id_data->event_id_node, &tsg->event_id_list);
312 nvgpu_mutex_release(&tsg->event_id_list_lock);
313
314 fd_install(local_fd, file);
315 file->private_data = event_id_data;
316
317 *fd = local_fd;
318
319 return 0;
320
321clean_up_free:
322 kfree(event_id_data);
323clean_up_file:
324 fput(file);
325clean_up:
326 put_unused_fd(local_fd);
327free_ref:
328 gk20a_put(g);
329 return err;
330}
331
332static int gk20a_tsg_event_id_ctrl(struct gk20a *g, struct tsg_gk20a *tsg,
333 struct nvgpu_event_id_ctrl_args *args)
334{
335 int err = 0;
336 int fd = -1;
337
338 if (args->event_id >= NVGPU_IOCTL_CHANNEL_EVENT_ID_MAX)
339 return -EINVAL;
340
341 switch (args->cmd) {
342 case NVGPU_IOCTL_CHANNEL_EVENT_ID_CMD_ENABLE:
343 err = gk20a_tsg_event_id_enable(tsg, args->event_id, &fd);
344 if (!err)
345 args->event_fd = fd;
346 break;
347
348 default:
349 gk20a_err(dev_from_gk20a(tsg->g),
350 "unrecognized tsg event id cmd: 0x%x",
351 args->cmd);
352 err = -EINVAL;
353 break;
354 }
355
356 return err;
357}
358
359int gk20a_tsg_set_runlist_interleave(struct tsg_gk20a *tsg, u32 level) 180int gk20a_tsg_set_runlist_interleave(struct tsg_gk20a *tsg, u32 level)
360{ 181{
361 struct gk20a *g = tsg->g; 182 struct gk20a *g = tsg->g;
@@ -396,7 +217,7 @@ static void release_used_tsg(struct fifo_gk20a *f, struct tsg_gk20a *tsg)
396 nvgpu_mutex_release(&f->tsg_inuse_mutex); 217 nvgpu_mutex_release(&f->tsg_inuse_mutex);
397} 218}
398 219
399static struct tsg_gk20a *acquire_unused_tsg(struct fifo_gk20a *f) 220static struct tsg_gk20a *gk20a_tsg_acquire_unused_tsg(struct fifo_gk20a *f)
400{ 221{
401 struct tsg_gk20a *tsg = NULL; 222 struct tsg_gk20a *tsg = NULL;
402 unsigned int tsgid; 223 unsigned int tsgid;
@@ -414,33 +235,14 @@ static struct tsg_gk20a *acquire_unused_tsg(struct fifo_gk20a *f)
414 return tsg; 235 return tsg;
415} 236}
416 237
417int gk20a_tsg_open(struct gk20a *g, struct file *filp) 238struct tsg_gk20a *gk20a_tsg_open(struct gk20a *g)
418{ 239{
419 struct tsg_private *priv;
420 struct tsg_gk20a *tsg; 240 struct tsg_gk20a *tsg;
421 struct device *dev;
422 int err; 241 int err;
423 242
424 g = gk20a_get(g); 243 tsg = gk20a_tsg_acquire_unused_tsg(&g->fifo);
425 if (!g) 244 if (!tsg)
426 return -ENODEV; 245 return NULL;
427
428 dev = dev_from_gk20a(g);
429
430 gk20a_dbg(gpu_dbg_fn, "tsg: %s", g->name);
431
432 priv = nvgpu_kmalloc(g, sizeof(*priv));
433 if (!priv) {
434 err = -ENOMEM;
435 goto free_ref;
436 }
437
438 tsg = acquire_unused_tsg(&g->fifo);
439 if (!tsg) {
440 nvgpu_kfree(g, priv);
441 err = -ENOMEM;
442 goto free_ref;
443 }
444 246
445 tsg->g = g; 247 tsg->g = g;
446 tsg->num_active_channels = 0; 248 tsg->num_active_channels = 0;
@@ -455,15 +257,12 @@ int gk20a_tsg_open(struct gk20a *g, struct file *filp)
455 tsg->runlist_id = ~0; 257 tsg->runlist_id = ~0;
456 tsg->tgid = current->tgid; 258 tsg->tgid = current->tgid;
457 259
458 priv->g = g;
459 priv->tsg = tsg;
460 filp->private_data = priv;
461
462 if (g->ops.fifo.tsg_open) { 260 if (g->ops.fifo.tsg_open) {
463 err = g->ops.fifo.tsg_open(tsg); 261 err = g->ops.fifo.tsg_open(tsg);
464 if (err) { 262 if (err) {
465 gk20a_err(dev, "tsg %d fifo open failed %d", 263 gk20a_err(dev_from_gk20a(g),
466 tsg->tsgid, err); 264 "tsg %d fifo open failed %d",
265 tsg->tsgid, err);
467 goto clean_up; 266 goto clean_up;
468 } 267 }
469 } 268 }
@@ -472,26 +271,11 @@ int gk20a_tsg_open(struct gk20a *g, struct file *filp)
472 271
473 gk20a_sched_ctrl_tsg_added(g, tsg); 272 gk20a_sched_ctrl_tsg_added(g, tsg);
474 273
475 return 0; 274 return tsg;
476 275
477clean_up: 276clean_up:
478 kref_put(&tsg->refcount, gk20a_tsg_release); 277 kref_put(&tsg->refcount, gk20a_tsg_release);
479free_ref: 278 return NULL;
480 gk20a_put(g);
481 return err;
482}
483
484int gk20a_tsg_dev_open(struct inode *inode, struct file *filp)
485{
486 struct gk20a *g;
487 int ret;
488
489 g = container_of(inode->i_cdev,
490 struct gk20a, tsg.cdev);
491 gk20a_dbg_fn("");
492 ret = gk20a_tsg_open(g, filp);
493 gk20a_dbg_fn("done");
494 return ret;
495} 279}
496 280
497void gk20a_tsg_release(struct kref *ref) 281void gk20a_tsg_release(struct kref *ref)
@@ -528,225 +312,6 @@ void gk20a_tsg_release(struct kref *ref)
528 gk20a_put(g); 312 gk20a_put(g);
529} 313}
530 314
531int gk20a_tsg_dev_release(struct inode *inode, struct file *filp)
532{
533 struct tsg_private *priv = filp->private_data;
534 struct tsg_gk20a *tsg = priv->tsg;
535
536 kref_put(&tsg->refcount, gk20a_tsg_release);
537 nvgpu_kfree(tsg->g, priv);
538 return 0;
539}
540
541static int gk20a_tsg_ioctl_set_priority(struct gk20a *g,
542 struct tsg_gk20a *tsg, struct nvgpu_set_priority_args *arg)
543{
544 struct gk20a_sched_ctrl *sched = &g->sched_ctrl;
545 int err;
546
547 nvgpu_mutex_acquire(&sched->control_lock);
548 if (sched->control_locked) {
549 err = -EPERM;
550 goto done;
551 }
552
553 err = gk20a_busy(g);
554 if (err) {
555 gk20a_err(dev_from_gk20a(g), "failed to power on gpu");
556 goto done;
557 }
558
559 err = gk20a_tsg_set_priority(g, tsg, arg->priority);
560
561 gk20a_idle(g);
562done:
563 nvgpu_mutex_release(&sched->control_lock);
564 return err;
565}
566
567static int gk20a_tsg_ioctl_set_runlist_interleave(struct gk20a *g,
568 struct tsg_gk20a *tsg, struct nvgpu_runlist_interleave_args *arg)
569{
570 struct gk20a_sched_ctrl *sched = &g->sched_ctrl;
571 int err;
572
573 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid);
574
575 nvgpu_mutex_acquire(&sched->control_lock);
576 if (sched->control_locked) {
577 err = -EPERM;
578 goto done;
579 }
580 err = gk20a_busy(g);
581 if (err) {
582 gk20a_err(dev_from_gk20a(g), "failed to power on gpu");
583 goto done;
584 }
585
586 err = gk20a_tsg_set_runlist_interleave(tsg, arg->level);
587
588 gk20a_idle(g);
589done:
590 nvgpu_mutex_release(&sched->control_lock);
591 return err;
592}
593
594static int gk20a_tsg_ioctl_set_timeslice(struct gk20a *g,
595 struct tsg_gk20a *tsg, struct nvgpu_timeslice_args *arg)
596{
597 struct gk20a_sched_ctrl *sched = &g->sched_ctrl;
598 int err;
599
600 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid);
601
602 nvgpu_mutex_acquire(&sched->control_lock);
603 if (sched->control_locked) {
604 err = -EPERM;
605 goto done;
606 }
607 err = gk20a_busy(g);
608 if (err) {
609 gk20a_err(dev_from_gk20a(g), "failed to power on gpu");
610 goto done;
611 }
612 err = gk20a_tsg_set_timeslice(tsg, arg->timeslice_us);
613 gk20a_idle(g);
614done:
615 nvgpu_mutex_release(&sched->control_lock);
616 return err;
617}
618
619
620long gk20a_tsg_dev_ioctl(struct file *filp, unsigned int cmd,
621 unsigned long arg)
622{
623 struct tsg_private *priv = filp->private_data;
624 struct tsg_gk20a *tsg = priv->tsg;
625 struct gk20a *g = tsg->g;
626 u8 __maybe_unused buf[NVGPU_TSG_IOCTL_MAX_ARG_SIZE];
627 int err = 0;
628
629 gk20a_dbg(gpu_dbg_fn, "");
630
631 if ((_IOC_TYPE(cmd) != NVGPU_TSG_IOCTL_MAGIC) ||
632 (_IOC_NR(cmd) == 0) ||
633 (_IOC_NR(cmd) > NVGPU_TSG_IOCTL_LAST) ||
634 (_IOC_SIZE(cmd) > NVGPU_TSG_IOCTL_MAX_ARG_SIZE))
635 return -EINVAL;
636
637 memset(buf, 0, sizeof(buf));
638 if (_IOC_DIR(cmd) & _IOC_WRITE) {
639 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
640 return -EFAULT;
641 }
642
643 if (!g->gr.sw_ready) {
644 err = gk20a_busy(g);
645 if (err)
646 return err;
647
648 gk20a_idle(g);
649 }
650
651 switch (cmd) {
652 case NVGPU_TSG_IOCTL_BIND_CHANNEL:
653 {
654 int ch_fd = *(int *)buf;
655 if (ch_fd < 0) {
656 err = -EINVAL;
657 break;
658 }
659 err = gk20a_tsg_bind_channel_fd(tsg, ch_fd);
660 break;
661 }
662
663 case NVGPU_TSG_IOCTL_UNBIND_CHANNEL:
664 /* We do not support explicitly unbinding channel from TSG.
665 * Channel will be unbounded from TSG when it is closed.
666 */
667 break;
668
669 case NVGPU_IOCTL_TSG_ENABLE:
670 {
671 err = gk20a_busy(g);
672 if (err) {
673 gk20a_err(g->dev,
674 "failed to host gk20a for ioctl cmd: 0x%x", cmd);
675 return err;
676 }
677 gk20a_enable_tsg(tsg);
678 gk20a_idle(g);
679 break;
680 }
681
682 case NVGPU_IOCTL_TSG_DISABLE:
683 {
684 err = gk20a_busy(g);
685 if (err) {
686 gk20a_err(g->dev,
687 "failed to host gk20a for ioctl cmd: 0x%x", cmd);
688 return err;
689 }
690 gk20a_disable_tsg(tsg);
691 gk20a_idle(g);
692 break;
693 }
694
695 case NVGPU_IOCTL_TSG_PREEMPT:
696 {
697 err = gk20a_busy(g);
698 if (err) {
699 gk20a_err(g->dev,
700 "failed to host gk20a for ioctl cmd: 0x%x", cmd);
701 return err;
702 }
703 /* preempt TSG */
704 err = g->ops.fifo.preempt_tsg(g, tsg->tsgid);
705 gk20a_idle(g);
706 break;
707 }
708
709 case NVGPU_IOCTL_TSG_SET_PRIORITY:
710 {
711 err = gk20a_tsg_ioctl_set_priority(g, tsg,
712 (struct nvgpu_set_priority_args *)buf);
713 break;
714 }
715
716 case NVGPU_IOCTL_TSG_EVENT_ID_CTRL:
717 {
718 err = gk20a_tsg_event_id_ctrl(g, tsg,
719 (struct nvgpu_event_id_ctrl_args *)buf);
720 break;
721 }
722
723 case NVGPU_IOCTL_TSG_SET_RUNLIST_INTERLEAVE:
724 err = gk20a_tsg_ioctl_set_runlist_interleave(g, tsg,
725 (struct nvgpu_runlist_interleave_args *)buf);
726 break;
727
728 case NVGPU_IOCTL_TSG_SET_TIMESLICE:
729 {
730 err = gk20a_tsg_ioctl_set_timeslice(g, tsg,
731 (struct nvgpu_timeslice_args *)buf);
732 break;
733 }
734
735 default:
736 gk20a_err(dev_from_gk20a(g),
737 "unrecognized tsg gpu ioctl cmd: 0x%x",
738 cmd);
739 err = -ENOTTY;
740 break;
741 }
742
743 if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
744 err = copy_to_user((void __user *)arg,
745 buf, _IOC_SIZE(cmd));
746
747 return err;
748}
749
750void gk20a_init_tsg_ops(struct gpu_ops *gops) 315void gk20a_init_tsg_ops(struct gpu_ops *gops)
751{ 316{
752 gops->fifo.tsg_bind_channel = gk20a_tsg_bind_channel; 317 gops->fifo.tsg_bind_channel = gk20a_tsg_bind_channel;