From 5cda5a3074e4c7dae1857e1dfdf55017b0450786 Mon Sep 17 00:00:00 2001 From: Seema Khowala Date: Tue, 27 Jun 2017 22:10:49 -0700 Subject: gpu: nvgpu: gv11b: add a function to reset pbdma and eng faulted When Host receives a page fault signal from a CE, Host will immediately set _ENG_FAULTED bit in the NV_PCCSR_CHANNEL register for the channel and will trigger a preempt of the TSG/channel. A channel will only be scheduled when _ENABLED=1, _ENG_FAULTED=0 and _PBDMA_FAULTED=0 in pccsr_channel reg for the channel. If a TSG has a faulted channel, Host will not schedule the entire TSG agin until all _FAULTED bits from channels in the TSG are cleared by SW. This function will be required for ce page fault handling. JIRA GPUT19X-46 JIRA GPUT19X-12 Change-Id: Ib58dff7aa24aa144e970f11b5261877dec03f3e6 Signed-off-by: Seema Khowala Reviewed-on: https://git-master/r/1509776 Reviewed-by: mobile promotions Tested-by: mobile promotions --- drivers/gpu/nvgpu/gv11b/fifo_gv11b.c | 38 +++++++++++++++++++++++++----------- 1 file changed, 27 insertions(+), 11 deletions(-) (limited to 'drivers/gpu/nvgpu/gv11b/fifo_gv11b.c') diff --git a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c index 3e73a29e..a1f6d258 100644 --- a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c +++ b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c @@ -564,6 +564,29 @@ static void gv11b_reset_pbdma_faulted_tsg(struct tsg_gk20a *tsg) up_read(&tsg->ch_list_lock); } +void gv11b_fifo_reset_pbdma_and_eng_faulted(struct gk20a *g, + struct channel_gk20a *refch, + u32 faulted_pbdma, u32 faulted_engine) +{ + struct tsg_gk20a *tsg; + + nvgpu_log(g, gpu_dbg_intr, "reset faulted pbdma:0x%x eng:0x%x", + faulted_pbdma, faulted_engine); + + if (gk20a_is_channel_marked_as_tsg(refch)) { + tsg = &g->fifo.tsg[refch->tsgid]; + if (faulted_pbdma != FIFO_INVAL_PBDMA_ID) + gv11b_reset_pbdma_faulted_tsg(tsg); + if (faulted_engine != FIFO_INVAL_ENGINE_ID) + gv11b_reset_eng_faulted_tsg(tsg); + } else { + if (faulted_pbdma != FIFO_INVAL_PBDMA_ID) + gv11b_reset_pbdma_faulted_ch(g, refch->chid); + if (faulted_engine != FIFO_INVAL_ENGINE_ID) + gv11b_reset_eng_faulted_ch(g, refch->chid); + } +} + static u32 gv11b_fifo_get_runlists_mask(struct gk20a *g, u32 act_eng_bitmask, u32 id, unsigned int id_type, unsigned int rc_type, struct mmu_fault_info *mmfault) @@ -928,18 +951,11 @@ static void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask, if (rc_type == RC_TYPE_MMU_FAULT && mmfault && mmfault->refch) { refch = mmfault->refch; client_type = mmfault->client_type; - if (gk20a_is_channel_marked_as_tsg(refch)) { + if (gk20a_is_channel_marked_as_tsg(refch)) tsg = &g->fifo.tsg[refch->tsgid]; - if (mmfault->faulted_pbdma != FIFO_INVAL_PBDMA_ID) - gv11b_reset_pbdma_faulted_tsg(tsg); - if (mmfault->faulted_engine != FIFO_INVAL_ENGINE_ID) - gv11b_reset_eng_faulted_tsg(tsg); - } else { - if (mmfault->faulted_pbdma != FIFO_INVAL_PBDMA_ID) - gv11b_reset_pbdma_faulted_ch(g, refch->chid); - if (mmfault->faulted_engine != FIFO_INVAL_ENGINE_ID) - gv11b_reset_eng_faulted_ch(g, refch->chid); - } + gv11b_fifo_reset_pbdma_and_eng_faulted(g, refch, + mmfault->faulted_pbdma, + mmfault->faulted_engine); } else { if (id_type == ID_TYPE_TSG) tsg = &g->fifo.tsg[id]; -- cgit v1.2.2