summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
diff options
context:
space:
mode:
authorDeepak Nibade <dnibade@nvidia.com>2015-12-10 03:58:32 -0500
committerTerje Bergstrom <tbergstrom@nvidia.com>2015-12-10 11:39:42 -0500
commitc4ac1ed369cb5737de10924908d97be9f11ec875 (patch)
tree294d2bc504f16cad8653413d63a6b47c6753adaa /drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
parent54f76d1ac6cf0ace524e073076578c891d1b3f79 (diff)
gpu: nvgpu: preempt before adjusting fences
Current sequence in gk20a_disable_channel() is - disable channel in gk20a_channel_abort() - adjust pending fence in gk20a_channel_abort() - preempt channel But this leads to scenarios where syncpoint has min > max value Hence to fix this, make sequence in gk20a_disable_channel() - disable channel in gk20a_channel_abort() - preempt channel in gk20a_channel_abort() - adjust pending fence in gk20a_channel_abort() If gk20a_channel_abort() is called from other API where preemption is not needed, then use channel_preempt flag and do not preempt channel in those cases Bug 1683059 Change-Id: I4d46d4294cf8597ae5f05f79dfe1b95c4187f2e3 Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: http://git-master/r/921290 Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Tested-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/fifo_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
index 11fcc805..1727cf1d 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
@@ -894,7 +894,7 @@ void gk20a_fifo_abort_tsg(struct gk20a *g, u32 tsgid)
894 mutex_lock(&tsg->ch_list_lock); 894 mutex_lock(&tsg->ch_list_lock);
895 list_for_each_entry(ch, &tsg->ch_list, ch_entry) { 895 list_for_each_entry(ch, &tsg->ch_list, ch_entry) {
896 if (gk20a_channel_get(ch)) { 896 if (gk20a_channel_get(ch)) {
897 gk20a_channel_abort(ch); 897 gk20a_channel_abort(ch, false);
898 gk20a_channel_put(ch); 898 gk20a_channel_put(ch);
899 } 899 }
900 } 900 }
@@ -1064,7 +1064,7 @@ static bool gk20a_fifo_handle_mmu_fault(
1064 if (referenced_channel) { 1064 if (referenced_channel) {
1065 if (!g->fifo.deferred_reset_pending) 1065 if (!g->fifo.deferred_reset_pending)
1066 verbose = gk20a_fifo_set_ctx_mmu_error_ch(g, ch); 1066 verbose = gk20a_fifo_set_ctx_mmu_error_ch(g, ch);
1067 gk20a_channel_abort(ch); 1067 gk20a_channel_abort(ch, false);
1068 gk20a_channel_put(ch); 1068 gk20a_channel_put(ch);
1069 } else { 1069 } else {
1070 gk20a_err(dev_from_gk20a(g), 1070 gk20a_err(dev_from_gk20a(g),
@@ -1217,7 +1217,7 @@ void gk20a_fifo_recover_ch(struct gk20a *g, u32 hw_chid, bool verbose)
1217 struct channel_gk20a *ch = &g->fifo.channel[hw_chid]; 1217 struct channel_gk20a *ch = &g->fifo.channel[hw_chid];
1218 1218
1219 if (gk20a_channel_get(ch)) { 1219 if (gk20a_channel_get(ch)) {
1220 gk20a_channel_abort(ch); 1220 gk20a_channel_abort(ch, false);
1221 1221
1222 if (gk20a_fifo_set_ctx_mmu_error_ch(g, ch)) 1222 if (gk20a_fifo_set_ctx_mmu_error_ch(g, ch))
1223 gk20a_debug_dump(g->dev); 1223 gk20a_debug_dump(g->dev);