summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
diff options
context:
space:
mode:
authorThomas Fleury <tfleury@nvidia.com>2016-05-10 12:05:45 -0400
committerVijayakumar Subbu <vsubbu@nvidia.com>2016-07-19 02:12:51 -0400
commitc8ffe0fdecfa110a9f9beb1b7e0298d3c3c64cc2 (patch)
tree08054741c436ab6a783e710a9efa87fc7a0b71df /drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
parent90988af81237d3b56c063b750c32efcbee9ab9cc (diff)
gpu: nvgpu: add sched control API
Added a dedicated device node to allow an app manager to control TSG scheduling parameters: - Get list of TSGs - Get list of recent TSGs - Get list of TSGs per pid - Get TSG current scheduling parameters - Set TSG timeslice - Set TSG runlist interleave Jira VFND-1586 Change-Id: I014c9d1534bce0eaea6c25ad114cf0cff317af79 Signed-off-by: Thomas Fleury <tfleury@nvidia.com> Reviewed-on: http://git-master/r/1160384 (cherry picked from commit 75ca739517cc7f7f76714b5f6a1a57c39b8cb38e) Reviewed-on: http://git-master/r/1167021 Reviewed-by: Richard Zhao <rizhao@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Alex Waterman <alexw@nvidia.com> Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/tsg_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/tsg_gk20a.c115
1 files changed, 91 insertions, 24 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
index 0fa93da9..af8f0f7b 100644
--- a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
@@ -338,7 +338,7 @@ static int gk20a_tsg_event_id_ctrl(struct gk20a *g, struct tsg_gk20a *tsg,
338 return err; 338 return err;
339} 339}
340 340
341static int gk20a_tsg_set_runlist_interleave(struct tsg_gk20a *tsg, u32 level) 341int gk20a_tsg_set_runlist_interleave(struct tsg_gk20a *tsg, u32 level)
342{ 342{
343 struct gk20a *g = tsg->g; 343 struct gk20a *g = tsg->g;
344 int ret; 344 int ret;
@@ -349,6 +349,8 @@ static int gk20a_tsg_set_runlist_interleave(struct tsg_gk20a *tsg, u32 level)
349 case NVGPU_RUNLIST_INTERLEAVE_LEVEL_HIGH: 349 case NVGPU_RUNLIST_INTERLEAVE_LEVEL_HIGH:
350 ret = g->ops.fifo.set_runlist_interleave(g, tsg->tsgid, 350 ret = g->ops.fifo.set_runlist_interleave(g, tsg->tsgid,
351 true, 0, level); 351 true, 0, level);
352 if (!ret)
353 tsg->interleave_level = level;
352 break; 354 break;
353 default: 355 default:
354 ret = -EINVAL; 356 ret = -EINVAL;
@@ -358,7 +360,7 @@ static int gk20a_tsg_set_runlist_interleave(struct tsg_gk20a *tsg, u32 level)
358 return ret ? ret : g->ops.fifo.update_runlist(g, tsg->runlist_id, ~0, true, true); 360 return ret ? ret : g->ops.fifo.update_runlist(g, tsg->runlist_id, ~0, true, true);
359} 361}
360 362
361static int gk20a_tsg_set_timeslice(struct tsg_gk20a *tsg, u32 timeslice) 363int gk20a_tsg_set_timeslice(struct tsg_gk20a *tsg, u32 timeslice)
362{ 364{
363 struct gk20a *g = tsg->g; 365 struct gk20a *g = tsg->g;
364 366
@@ -369,6 +371,8 @@ static int gk20a_tsg_set_timeslice(struct tsg_gk20a *tsg, u32 timeslice)
369 gk20a_channel_get_timescale_from_timeslice(g, timeslice, 371 gk20a_channel_get_timescale_from_timeslice(g, timeslice,
370 &tsg->timeslice_timeout, &tsg->timeslice_scale); 372 &tsg->timeslice_timeout, &tsg->timeslice_scale);
371 373
374 tsg->timeslice_us = timeslice;
375
372 return g->ops.fifo.update_runlist(g, tsg->runlist_id, ~0, true, true); 376 return g->ops.fifo.update_runlist(g, tsg->runlist_id, ~0, true, true);
373} 377}
374 378
@@ -421,11 +425,14 @@ int gk20a_tsg_open(struct gk20a *g, struct file *filp)
421 tsg->timeslice_timeout = 0; 425 tsg->timeslice_timeout = 0;
422 tsg->timeslice_scale = 0; 426 tsg->timeslice_scale = 0;
423 tsg->runlist_id = ~0; 427 tsg->runlist_id = ~0;
428 tsg->tgid = current->tgid;
424 429
425 filp->private_data = tsg; 430 filp->private_data = tsg;
426 431
427 gk20a_dbg(gpu_dbg_fn, "tsg opened %d\n", tsg->tsgid); 432 gk20a_dbg(gpu_dbg_fn, "tsg opened %d\n", tsg->tsgid);
428 433
434 gk20a_sched_ctrl_tsg_added(g, tsg);
435
429 return 0; 436 return 0;
430} 437}
431 438
@@ -456,6 +463,7 @@ static void gk20a_tsg_release(struct kref *ref)
456 tsg->vm = NULL; 463 tsg->vm = NULL;
457 } 464 }
458 465
466 gk20a_sched_ctrl_tsg_removed(g, tsg);
459 release_used_tsg(&g->fifo, tsg); 467 release_used_tsg(&g->fifo, tsg);
460 468
461 tsg->runlist_id = ~0; 469 tsg->runlist_id = ~0;
@@ -470,6 +478,81 @@ int gk20a_tsg_dev_release(struct inode *inode, struct file *filp)
470 return 0; 478 return 0;
471} 479}
472 480
481static int gk20a_tsg_ioctl_set_priority(struct gk20a *g,
482 struct tsg_gk20a *tsg, struct nvgpu_set_priority_args *arg)
483{
484 struct gk20a_sched_ctrl *sched = &g->sched_ctrl;
485 int err;
486
487 mutex_lock(&sched->control_lock);
488 if (sched->control_locked) {
489 err = -EPERM;
490 goto done;
491 }
492
493 err = gk20a_busy(g->dev);
494 if (err) {
495 gk20a_err(dev_from_gk20a(g), "failed to power on gpu");
496 goto done;
497 }
498
499 err = gk20a_tsg_set_priority(g, tsg, arg->priority);
500
501 gk20a_idle(g->dev);
502done:
503 mutex_unlock(&sched->control_lock);
504 return err;
505}
506
507static int gk20a_tsg_ioctl_set_runlist_interleave(struct gk20a *g,
508 struct tsg_gk20a *tsg, struct nvgpu_runlist_interleave_args *arg)
509{
510 struct gk20a_sched_ctrl *sched = &g->sched_ctrl;
511 int err;
512
513 mutex_lock(&sched->control_lock);
514 if (sched->control_locked) {
515 err = -EPERM;
516 goto done;
517 }
518 err = gk20a_busy(g->dev);
519 if (err) {
520 gk20a_err(dev_from_gk20a(g), "failed to power on gpu");
521 goto done;
522 }
523
524 err = gk20a_tsg_set_runlist_interleave(tsg, arg->level);
525
526 gk20a_idle(g->dev);
527done:
528 mutex_unlock(&sched->control_lock);
529 return err;
530}
531
532static int gk20a_tsg_ioctl_set_timeslice(struct gk20a *g,
533 struct tsg_gk20a *tsg, struct nvgpu_timeslice_args *arg)
534{
535 struct gk20a_sched_ctrl *sched = &g->sched_ctrl;
536 int err;
537
538 mutex_lock(&sched->control_lock);
539 if (sched->control_locked) {
540 err = -EPERM;
541 goto done;
542 }
543 err = gk20a_busy(g->dev);
544 if (err) {
545 gk20a_err(dev_from_gk20a(g), "failed to power on gpu");
546 goto done;
547 }
548 err = gk20a_tsg_set_timeslice(tsg, arg->timeslice_us);
549 gk20a_idle(g->dev);
550done:
551 mutex_unlock(&sched->control_lock);
552 return err;
553}
554
555
473long gk20a_tsg_dev_ioctl(struct file *filp, unsigned int cmd, 556long gk20a_tsg_dev_ioctl(struct file *filp, unsigned int cmd,
474 unsigned long arg) 557 unsigned long arg)
475{ 558{
@@ -561,8 +644,8 @@ long gk20a_tsg_dev_ioctl(struct file *filp, unsigned int cmd,
561 644
562 case NVGPU_IOCTL_TSG_SET_PRIORITY: 645 case NVGPU_IOCTL_TSG_SET_PRIORITY:
563 { 646 {
564 err = gk20a_tsg_set_priority(g, tsg, 647 err = gk20a_tsg_ioctl_set_priority(g, tsg,
565 ((struct nvgpu_set_priority_args *)buf)->priority); 648 (struct nvgpu_set_priority_args *)buf);
566 break; 649 break;
567 } 650 }
568 651
@@ -574,30 +657,14 @@ long gk20a_tsg_dev_ioctl(struct file *filp, unsigned int cmd,
574 } 657 }
575 658
576 case NVGPU_IOCTL_TSG_SET_RUNLIST_INTERLEAVE: 659 case NVGPU_IOCTL_TSG_SET_RUNLIST_INTERLEAVE:
577 { 660 err = gk20a_tsg_ioctl_set_runlist_interleave(g, tsg,
578 err = gk20a_busy(g->dev); 661 (struct nvgpu_runlist_interleave_args *)buf);
579 if (err) {
580 gk20a_err(dev_from_gk20a(g),
581 "failed to host gk20a for ioctl cmd: 0x%x", cmd);
582 return err;
583 }
584 err = gk20a_tsg_set_runlist_interleave(tsg,
585 ((struct nvgpu_runlist_interleave_args *)buf)->level);
586 gk20a_idle(g->dev);
587 break; 662 break;
588 }
589 663
590 case NVGPU_IOCTL_TSG_SET_TIMESLICE: 664 case NVGPU_IOCTL_TSG_SET_TIMESLICE:
591 { 665 {
592 err = gk20a_busy(g->dev); 666 err = gk20a_tsg_ioctl_set_timeslice(g, tsg,
593 if (err) { 667 (struct nvgpu_timeslice_args *)buf);
594 gk20a_err(dev_from_gk20a(g),
595 "failed to host gk20a for ioctl cmd: 0x%x", cmd);
596 return err;
597 }
598 err = g->ops.fifo.tsg_set_timeslice(tsg,
599 ((struct nvgpu_timeslice_args *)buf)->timeslice_us);
600 gk20a_idle(g->dev);
601 break; 668 break;
602 } 669 }
603 670