diff options
Diffstat (limited to 'drivers/gpu/nvgpu/gv11b')
-rw-r--r-- | drivers/gpu/nvgpu/gv11b/fifo_gv11b.c | 47 |
1 files changed, 25 insertions, 22 deletions
diff --git a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c index b3c59f84..3c2de4f2 100644 --- a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c +++ b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c | |||
@@ -1024,6 +1024,11 @@ void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask, | |||
1024 | u32 num_runlists = 0; | 1024 | u32 num_runlists = 0; |
1025 | unsigned long runlist_served_pbdmas; | 1025 | unsigned long runlist_served_pbdmas; |
1026 | 1026 | ||
1027 | bool deferred_reset_pending = false; | ||
1028 | |||
1029 | nvgpu_log_info(g, "acquire engines_reset_mutex"); | ||
1030 | nvgpu_mutex_acquire(&g->fifo.engines_reset_mutex); | ||
1031 | |||
1027 | nvgpu_log_fn(g, "acquire runlist_lock for all runlists"); | 1032 | nvgpu_log_fn(g, "acquire runlist_lock for all runlists"); |
1028 | for (rlid = 0; rlid < g->fifo.max_runlists; rlid++) { | 1033 | for (rlid = 0; rlid < g->fifo.max_runlists; rlid++) { |
1029 | nvgpu_mutex_acquire(&f->runlist_info[rlid]. | 1034 | nvgpu_mutex_acquire(&f->runlist_info[rlid]. |
@@ -1094,8 +1099,6 @@ void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask, | |||
1094 | /* Disable runlist scheduler */ | 1099 | /* Disable runlist scheduler */ |
1095 | gk20a_fifo_set_runlist_state(g, runlists_mask, RUNLIST_DISABLED); | 1100 | gk20a_fifo_set_runlist_state(g, runlists_mask, RUNLIST_DISABLED); |
1096 | 1101 | ||
1097 | g->fifo.deferred_reset_pending = false; | ||
1098 | |||
1099 | /* Disable power management */ | 1102 | /* Disable power management */ |
1100 | if (g->support_pmu) { | 1103 | if (g->support_pmu) { |
1101 | if (nvgpu_cg_pg_disable(g) != 0) { | 1104 | if (nvgpu_cg_pg_disable(g) != 0) { |
@@ -1143,6 +1146,10 @@ void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask, | |||
1143 | } | 1146 | } |
1144 | } | 1147 | } |
1145 | 1148 | ||
1149 | nvgpu_mutex_acquire(&f->deferred_reset_mutex); | ||
1150 | g->fifo.deferred_reset_pending = false; | ||
1151 | nvgpu_mutex_release(&f->deferred_reset_mutex); | ||
1152 | |||
1146 | /* check if engine reset should be deferred */ | 1153 | /* check if engine reset should be deferred */ |
1147 | for (rlid = 0; rlid < g->fifo.max_runlists; rlid++) { | 1154 | for (rlid = 0; rlid < g->fifo.max_runlists; rlid++) { |
1148 | 1155 | ||
@@ -1159,28 +1166,21 @@ void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask, | |||
1159 | gk20a_fifo_should_defer_engine_reset(g, | 1166 | gk20a_fifo_should_defer_engine_reset(g, |
1160 | engine_id, client_type, false)) { | 1167 | engine_id, client_type, false)) { |
1161 | 1168 | ||
1162 | g->fifo.deferred_fault_engines |= | 1169 | g->fifo.deferred_fault_engines |= |
1163 | BIT(engine_id); | 1170 | BIT(engine_id); |
1164 | 1171 | ||
1165 | /* handled during channel free */ | 1172 | /* handled during channel free */ |
1166 | g->fifo.deferred_reset_pending = true; | 1173 | nvgpu_mutex_acquire(&f->deferred_reset_mutex); |
1167 | nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, | 1174 | g->fifo.deferred_reset_pending = true; |
1168 | "sm debugger attached," | 1175 | nvgpu_mutex_release(&f->deferred_reset_mutex); |
1169 | " deferring channel recovery to channel free"); | 1176 | |
1177 | deferred_reset_pending = true; | ||
1178 | |||
1179 | nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, | ||
1180 | "sm debugger attached," | ||
1181 | " deferring channel recovery to channel free"); | ||
1170 | } else { | 1182 | } else { |
1171 | /* | 1183 | gk20a_fifo_reset_engine(g, engine_id); |
1172 | * if lock is already taken, a reset is | ||
1173 | * taking place so no need to repeat | ||
1174 | */ | ||
1175 | if (nvgpu_mutex_tryacquire( | ||
1176 | &g->fifo.gr_reset_mutex)) { | ||
1177 | |||
1178 | gk20a_fifo_reset_engine(g, | ||
1179 | engine_id); | ||
1180 | |||
1181 | nvgpu_mutex_release( | ||
1182 | &g->fifo.gr_reset_mutex); | ||
1183 | } | ||
1184 | } | 1184 | } |
1185 | } | 1185 | } |
1186 | } | 1186 | } |
@@ -1191,7 +1191,7 @@ void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask, | |||
1191 | gk20a_ctxsw_trace_tsg_reset(g, tsg); | 1191 | gk20a_ctxsw_trace_tsg_reset(g, tsg); |
1192 | #endif | 1192 | #endif |
1193 | if (tsg) { | 1193 | if (tsg) { |
1194 | if (g->fifo.deferred_reset_pending) { | 1194 | if (deferred_reset_pending) { |
1195 | gk20a_disable_tsg(tsg); | 1195 | gk20a_disable_tsg(tsg); |
1196 | } else { | 1196 | } else { |
1197 | if (rc_type == RC_TYPE_MMU_FAULT) { | 1197 | if (rc_type == RC_TYPE_MMU_FAULT) { |
@@ -1228,6 +1228,9 @@ void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask, | |||
1228 | runlist_lock); | 1228 | runlist_lock); |
1229 | } | 1229 | } |
1230 | } | 1230 | } |
1231 | |||
1232 | nvgpu_log_info(g, "release engines_reset_mutex"); | ||
1233 | nvgpu_mutex_release(&g->fifo.engines_reset_mutex); | ||
1231 | } | 1234 | } |
1232 | 1235 | ||
1233 | void gv11b_fifo_init_pbdma_intr_descs(struct fifo_gk20a *f) | 1236 | void gv11b_fifo_init_pbdma_intr_descs(struct fifo_gk20a *f) |