summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
diff options
context:
space:
mode:
authorDeepak Nibade <dnibade@nvidia.com>2015-12-10 03:58:32 -0500
committerTerje Bergstrom <tbergstrom@nvidia.com>2015-12-10 11:39:42 -0500
commitc4ac1ed369cb5737de10924908d97be9f11ec875 (patch)
tree294d2bc504f16cad8653413d63a6b47c6753adaa /drivers/gpu/nvgpu/gk20a/channel_gk20a.c
parent54f76d1ac6cf0ace524e073076578c891d1b3f79 (diff)
gpu: nvgpu: preempt before adjusting fences
Current sequence in gk20a_disable_channel() is - disable channel in gk20a_channel_abort() - adjust pending fence in gk20a_channel_abort() - preempt channel But this leads to scenarios where syncpoint has min > max value Hence to fix this, make sequence in gk20a_disable_channel() - disable channel in gk20a_channel_abort() - preempt channel in gk20a_channel_abort() - adjust pending fence in gk20a_channel_abort() If gk20a_channel_abort() is called from other API where preemption is not needed, then use channel_preempt flag and do not preempt channel in those cases Bug 1683059 Change-Id: I4d46d4294cf8597ae5f05f79dfe1b95c4187f2e3 Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: http://git-master/r/921290 Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Tested-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/channel_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.c10
1 files changed, 6 insertions, 4 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
index 7ec5ade4..b480c80a 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
@@ -392,7 +392,7 @@ void channel_gk20a_disable(struct channel_gk20a *ch)
392 ccsr_channel_enable_clr_true_f()); 392 ccsr_channel_enable_clr_true_f());
393} 393}
394 394
395void gk20a_channel_abort(struct channel_gk20a *ch) 395void gk20a_channel_abort(struct channel_gk20a *ch, bool channel_preempt)
396{ 396{
397 struct channel_gk20a_job *job, *n; 397 struct channel_gk20a_job *job, *n;
398 bool released_job_semaphore = false; 398 bool released_job_semaphore = false;
@@ -404,6 +404,9 @@ void gk20a_channel_abort(struct channel_gk20a *ch)
404 404
405 ch->g->ops.fifo.disable_channel(ch); 405 ch->g->ops.fifo.disable_channel(ch);
406 406
407 if (channel_preempt)
408 ch->g->ops.fifo.preempt_channel(ch->g, ch->hw_chid);
409
407 /* ensure no fences are pending */ 410 /* ensure no fences are pending */
408 mutex_lock(&ch->sync_lock); 411 mutex_lock(&ch->sync_lock);
409 if (ch->sync) 412 if (ch->sync)
@@ -455,8 +458,7 @@ int gk20a_wait_channel_idle(struct channel_gk20a *ch)
455 458
456void gk20a_disable_channel(struct channel_gk20a *ch) 459void gk20a_disable_channel(struct channel_gk20a *ch)
457{ 460{
458 gk20a_channel_abort(ch); 461 gk20a_channel_abort(ch, true);
459 ch->g->ops.fifo.preempt_channel(ch->g, ch->hw_chid);
460 channel_gk20a_update_runlist(ch, false); 462 channel_gk20a_update_runlist(ch, false);
461} 463}
462 464
@@ -1621,7 +1623,7 @@ static void gk20a_channel_timeout_handler(struct work_struct *work)
1621 gk20a_fifo_abort_tsg(g, ch->tsgid); 1623 gk20a_fifo_abort_tsg(g, ch->tsgid);
1622 } else { 1624 } else {
1623 gk20a_fifo_set_ctx_mmu_error_ch(g, ch); 1625 gk20a_fifo_set_ctx_mmu_error_ch(g, ch);
1624 gk20a_channel_abort(ch); 1626 gk20a_channel_abort(ch, false);
1625 } 1627 }
1626 } else { 1628 } else {
1627 /* If failing engine, trigger recovery */ 1629 /* If failing engine, trigger recovery */