summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2017-03-15 15:45:08 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-03-21 15:08:13 -0400
commit467d0b1a19fd703ee10bc183c15ffef83f71a644 (patch)
tree59e5a8bc4b615185af9953de3db2d03cfbc02541
parent51f3f542fabb31527024eba5b8f52bf87cc30659 (diff)
gpu: nvgpu: Don't query engine id for inval engine
When we get a PBDMA MMU fault, we won't be able to map the MMU id into an engine id for reset. We still pass FIFO_INVAL_ENGINE_ID to gk20a_fifo_should_defer_engine_reset() which causes an unnecessary debug spew. Check for FIFO_INVAL_ENGINE before calling gk20a_fifo_should_defer_engine_reset(). Change-Id: I6f4a49be194cbc6070c1a1c667059de2ea79790f Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: http://git-master/r/1321492 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.c34
1 files changed, 19 insertions, 15 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
index de2c3f9e..2fa939b9 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
@@ -1566,21 +1566,25 @@ static bool gk20a_fifo_handle_mmu_fault(
1566 tsg = &g->fifo.tsg[ch->tsgid]; 1566 tsg = &g->fifo.tsg[ch->tsgid];
1567 1567
1568 /* check if engine reset should be deferred */ 1568 /* check if engine reset should be deferred */
1569 if ((ch || tsg) && gk20a_fifo_should_defer_engine_reset(g, 1569 if (engine_id != FIFO_INVAL_ENGINE_ID) {
1570 engine_id, f.engine_subid_v, fake_fault)) { 1570 bool defer = gk20a_fifo_should_defer_engine_reset(g,
1571 g->fifo.deferred_fault_engines |= BIT(engine_id); 1571 engine_id, f.engine_subid_v,
1572 1572 fake_fault);
1573 /* handled during channel free */ 1573 if ((ch || tsg) && defer) {
1574 g->fifo.deferred_reset_pending = true; 1574 g->fifo.deferred_fault_engines |= BIT(engine_id);
1575 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, 1575
1576 "sm debugger attached," 1576 /* handled during channel free */
1577 " deferring channel recovery to channel free"); 1577 g->fifo.deferred_reset_pending = true;
1578 } else if (engine_id != FIFO_INVAL_ENGINE_ID) { 1578 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg,
1579 /* if lock is already taken, a reset is taking place 1579 "sm debugger attached,"
1580 so no need to repeat */ 1580 " deferring channel recovery to channel free");
1581 if (nvgpu_mutex_tryacquire(&g->fifo.gr_reset_mutex)) { 1581 } else {
1582 gk20a_fifo_reset_engine(g, engine_id); 1582 /* if lock is already taken, a reset is taking place
1583 nvgpu_mutex_release(&g->fifo.gr_reset_mutex); 1583 so no need to repeat */
1584 if (nvgpu_mutex_tryacquire(&g->fifo.gr_reset_mutex)) {
1585 gk20a_fifo_reset_engine(g, engine_id);
1586 nvgpu_mutex_release(&g->fifo.gr_reset_mutex);
1587 }
1584 } 1588 }
1585 } 1589 }
1586 1590