summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
diff options
context:
space:
mode:
authorThomas Fleury <tfleury@nvidia.com>2017-01-09 13:54:36 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2017-01-16 15:15:23 -0500
commitf6a634ff24194ee83579f90ab399a08d91df233a (patch)
tree56e87309729a5e926ef2fd30bb8ede2abbd23da7 /drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
parentbb5a9e1c6cfbe69b8a984439a092db7a1a75a405 (diff)
gpu: nvgpu: use HAL to set TSG timeslice
Setting timeslice for virtualized case was not effective, because both ioctls NVGPU_TSG_IOCTL_SET_TIMESLICE and NVGPU_SCHED_IOCTL_TSG_SET_TIMESLICE were calling the native function to set TSG timeslice. - Fixed wrapper function to call HAL - Defined HAL function for "native" set TSG timeslice - Also, properly update timeout_us in TSG context, in virtualized case. This change also moves the min/max bounds checking for tsg timeslice into the native function implementation. There is no sysfs node for these parameters for vgpu, as RM server is ultimately responsible for this check. Bug 200263575 Change-Id: Ibceab9427561ad58ec28abfff0c96ca8f592bdb9 Signed-off-by: Thomas Fleury <tfleury@nvidia.com> Reviewed-on: http://git-master/r/1283180 Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/tsg_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/tsg_gk20a.c33
1 files changed, 14 insertions, 19 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
index 81a4b78e..96d6873d 100644
--- a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
@@ -177,27 +177,24 @@ int gk20a_init_tsg_support(struct gk20a *g, u32 tsgid)
177static int gk20a_tsg_set_priority(struct gk20a *g, struct tsg_gk20a *tsg, 177static int gk20a_tsg_set_priority(struct gk20a *g, struct tsg_gk20a *tsg,
178 u32 priority) 178 u32 priority)
179{ 179{
180 u32 timeslice_us;
181
180 switch (priority) { 182 switch (priority) {
181 case NVGPU_PRIORITY_LOW: 183 case NVGPU_PRIORITY_LOW:
182 tsg->timeslice_us = g->timeslice_low_priority_us; 184 timeslice_us = g->timeslice_low_priority_us;
183 break; 185 break;
184 case NVGPU_PRIORITY_MEDIUM: 186 case NVGPU_PRIORITY_MEDIUM:
185 tsg->timeslice_us = g->timeslice_medium_priority_us; 187 timeslice_us = g->timeslice_medium_priority_us;
186 break; 188 break;
187 case NVGPU_PRIORITY_HIGH: 189 case NVGPU_PRIORITY_HIGH:
188 tsg->timeslice_us = g->timeslice_high_priority_us; 190 timeslice_us = g->timeslice_high_priority_us;
189 break; 191 break;
190 default: 192 default:
191 pr_err("Unsupported priority"); 193 pr_err("Unsupported priority");
192 return -EINVAL; 194 return -EINVAL;
193 } 195 }
194 196
195 gk20a_channel_get_timescale_from_timeslice(g, tsg->timeslice_us, 197 return gk20a_tsg_set_timeslice(tsg, timeslice_us);
196 &tsg->timeslice_timeout, &tsg->timeslice_scale);
197
198 g->ops.fifo.update_runlist(g, tsg->runlist_id, ~0, true, true);
199
200 return 0;
201} 198}
202 199
203static int gk20a_tsg_get_event_data_from_id(struct tsg_gk20a *tsg, 200static int gk20a_tsg_get_event_data_from_id(struct tsg_gk20a *tsg,
@@ -343,6 +340,8 @@ int gk20a_tsg_set_runlist_interleave(struct tsg_gk20a *tsg, u32 level)
343 struct gk20a *g = tsg->g; 340 struct gk20a *g = tsg->g;
344 int ret; 341 int ret;
345 342
343 gk20a_dbg(gpu_dbg_sched, "tsgid=%u interleave=%u", tsg->tsgid, level);
344
346 switch (level) { 345 switch (level) {
347 case NVGPU_RUNLIST_INTERLEAVE_LEVEL_LOW: 346 case NVGPU_RUNLIST_INTERLEAVE_LEVEL_LOW:
348 case NVGPU_RUNLIST_INTERLEAVE_LEVEL_MEDIUM: 347 case NVGPU_RUNLIST_INTERLEAVE_LEVEL_MEDIUM:
@@ -364,16 +363,9 @@ int gk20a_tsg_set_timeslice(struct tsg_gk20a *tsg, u32 timeslice)
364{ 363{
365 struct gk20a *g = tsg->g; 364 struct gk20a *g = tsg->g;
366 365
367 if (timeslice < g->min_timeslice_us || 366 gk20a_dbg(gpu_dbg_sched, "tsgid=%u timeslice=%u us", tsg->tsgid, timeslice);
368 timeslice > g->max_timeslice_us)
369 return -EINVAL;
370
371 gk20a_channel_get_timescale_from_timeslice(g, timeslice,
372 &tsg->timeslice_timeout, &tsg->timeslice_scale);
373
374 tsg->timeslice_us = timeslice;
375 367
376 return g->ops.fifo.update_runlist(g, tsg->runlist_id, ~0, true, true); 368 return g->ops.fifo.tsg_set_timeslice(tsg, timeslice);
377} 369}
378 370
379static void release_used_tsg(struct fifo_gk20a *f, struct tsg_gk20a *tsg) 371static void release_used_tsg(struct fifo_gk20a *f, struct tsg_gk20a *tsg)
@@ -551,6 +543,8 @@ static int gk20a_tsg_ioctl_set_runlist_interleave(struct gk20a *g,
551 struct gk20a_sched_ctrl *sched = &g->sched_ctrl; 543 struct gk20a_sched_ctrl *sched = &g->sched_ctrl;
552 int err; 544 int err;
553 545
546 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid);
547
554 mutex_lock(&sched->control_lock); 548 mutex_lock(&sched->control_lock);
555 if (sched->control_locked) { 549 if (sched->control_locked) {
556 err = -EPERM; 550 err = -EPERM;
@@ -576,6 +570,8 @@ static int gk20a_tsg_ioctl_set_timeslice(struct gk20a *g,
576 struct gk20a_sched_ctrl *sched = &g->sched_ctrl; 570 struct gk20a_sched_ctrl *sched = &g->sched_ctrl;
577 int err; 571 int err;
578 572
573 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid);
574
579 mutex_lock(&sched->control_lock); 575 mutex_lock(&sched->control_lock);
580 if (sched->control_locked) { 576 if (sched->control_locked) {
581 err = -EPERM; 577 err = -EPERM;
@@ -728,5 +724,4 @@ void gk20a_init_tsg_ops(struct gpu_ops *gops)
728{ 724{
729 gops->fifo.tsg_bind_channel = gk20a_tsg_bind_channel; 725 gops->fifo.tsg_bind_channel = gk20a_tsg_bind_channel;
730 gops->fifo.tsg_unbind_channel = gk20a_tsg_unbind_channel; 726 gops->fifo.tsg_unbind_channel = gk20a_tsg_unbind_channel;
731 gops->fifo.tsg_set_timeslice = gk20a_tsg_set_timeslice;
732} 727}