summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.c8
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.c8
-rw-r--r--drivers/gpu/nvgpu/gk20a/sched_gk20a.c8
3 files changed, 13 insertions, 11 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
index 3dda1cbf..376a64b0 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
@@ -906,7 +906,6 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
906 struct dbg_session_gk20a *dbg_s; 906 struct dbg_session_gk20a *dbg_s;
907 struct dbg_session_data *session_data, *tmp_s; 907 struct dbg_session_data *session_data, *tmp_s;
908 struct dbg_session_channel_data *ch_data, *tmp; 908 struct dbg_session_channel_data *ch_data, *tmp;
909 bool was_reset;
910 909
911 gk20a_dbg_fn(""); 910 gk20a_dbg_fn("");
912 911
@@ -953,13 +952,12 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
953 if (g->fifo.deferred_reset_pending) { 952 if (g->fifo.deferred_reset_pending) {
954 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "engine reset was" 953 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "engine reset was"
955 " deferred, running now"); 954 " deferred, running now");
956 was_reset = mutex_is_locked(&g->fifo.gr_reset_mutex);
957 mutex_lock(&g->fifo.gr_reset_mutex);
958 /* if lock is already taken, a reset is taking place 955 /* if lock is already taken, a reset is taking place
959 so no need to repeat */ 956 so no need to repeat */
960 if (!was_reset) 957 if (mutex_trylock(&g->fifo.gr_reset_mutex)) {
961 gk20a_fifo_deferred_reset(g, ch); 958 gk20a_fifo_deferred_reset(g, ch);
962 mutex_unlock(&g->fifo.gr_reset_mutex); 959 mutex_unlock(&g->fifo.gr_reset_mutex);
960 }
963 } 961 }
964 mutex_unlock(&f->deferred_reset_mutex); 962 mutex_unlock(&f->deferred_reset_mutex);
965 963
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
index c6b444f9..4a32194c 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
@@ -1401,7 +1401,6 @@ static bool gk20a_fifo_handle_mmu_fault(
1401 struct channel_gk20a *ch = NULL; 1401 struct channel_gk20a *ch = NULL;
1402 struct tsg_gk20a *tsg = NULL; 1402 struct tsg_gk20a *tsg = NULL;
1403 struct channel_gk20a *referenced_channel = NULL; 1403 struct channel_gk20a *referenced_channel = NULL;
1404 bool was_reset;
1405 /* read and parse engine status */ 1404 /* read and parse engine status */
1406 u32 status = gk20a_readl(g, fifo_engine_status_r(engine_id)); 1405 u32 status = gk20a_readl(g, fifo_engine_status_r(engine_id));
1407 u32 ctx_status = fifo_engine_status_ctx_status_v(status); 1406 u32 ctx_status = fifo_engine_status_ctx_status_v(status);
@@ -1486,13 +1485,12 @@ static bool gk20a_fifo_handle_mmu_fault(
1486 "sm debugger attached," 1485 "sm debugger attached,"
1487 " deferring channel recovery to channel free"); 1486 " deferring channel recovery to channel free");
1488 } else if (engine_id != FIFO_INVAL_ENGINE_ID) { 1487 } else if (engine_id != FIFO_INVAL_ENGINE_ID) {
1489 was_reset = mutex_is_locked(&g->fifo.gr_reset_mutex);
1490 mutex_lock(&g->fifo.gr_reset_mutex);
1491 /* if lock is already taken, a reset is taking place 1488 /* if lock is already taken, a reset is taking place
1492 so no need to repeat */ 1489 so no need to repeat */
1493 if (!was_reset) 1490 if (mutex_trylock(&g->fifo.gr_reset_mutex)) {
1494 gk20a_fifo_reset_engine(g, engine_id); 1491 gk20a_fifo_reset_engine(g, engine_id);
1495 mutex_unlock(&g->fifo.gr_reset_mutex); 1492 mutex_unlock(&g->fifo.gr_reset_mutex);
1493 }
1496 } 1494 }
1497 1495
1498 if (ch) 1496 if (ch)
diff --git a/drivers/gpu/nvgpu/gk20a/sched_gk20a.c b/drivers/gpu/nvgpu/gk20a/sched_gk20a.c
index 54dbcfd1..20cd1232 100644
--- a/drivers/gpu/nvgpu/gk20a/sched_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/sched_gk20a.c
@@ -520,6 +520,7 @@ static int gk20a_sched_debugfs_show(struct seq_file *s, void *unused)
520 struct device *dev = s->private; 520 struct device *dev = s->private;
521 struct gk20a *g = gk20a_get_platform(dev)->g; 521 struct gk20a *g = gk20a_get_platform(dev)->g;
522 struct gk20a_sched_ctrl *sched = &g->sched_ctrl; 522 struct gk20a_sched_ctrl *sched = &g->sched_ctrl;
523 bool sched_busy = true;
523 524
524 int n = sched->bitmap_size / sizeof(u64); 525 int n = sched->bitmap_size / sizeof(u64);
525 int i; 526 int i;
@@ -529,8 +530,13 @@ static int gk20a_sched_debugfs_show(struct seq_file *s, void *unused)
529 if (err) 530 if (err)
530 return err; 531 return err;
531 532
533 if (mutex_trylock(&sched->busy_lock)) {
534 sched_busy = false;
535 mutex_unlock(&sched->busy_lock);
536 }
537
532 seq_printf(s, "control_locked=%d\n", sched->control_locked); 538 seq_printf(s, "control_locked=%d\n", sched->control_locked);
533 seq_printf(s, "busy=%d\n", mutex_is_locked(&sched->busy_lock)); 539 seq_printf(s, "busy=%d\n", sched_busy);
534 seq_printf(s, "bitmap_size=%zu\n", sched->bitmap_size); 540 seq_printf(s, "bitmap_size=%zu\n", sched->bitmap_size);
535 541
536 mutex_lock(&sched->status_lock); 542 mutex_lock(&sched->status_lock);