From e039dcbc9dd7d0c47895bdbb49cdc3e1d11a3cae Mon Sep 17 00:00:00 2001 From: Terje Bergstrom Date: Mon, 16 Oct 2017 14:58:17 -0700 Subject: gpu: nvgpu: Use nvgpu_rwsem as TSG channel lock Use abstract nvgpu_rwsem as TSG channel list lock instead of the Linux specific rw_semaphore. JIRA NVGPU-259 Change-Id: I41a38b29d4651838b1962d69f102af1384e12cb6 Signed-off-by: Terje Bergstrom Reviewed-on: https://git-master.nvidia.com/r/1579935 Reviewed-by: mobile promotions Tested-by: mobile promotions --- drivers/gpu/nvgpu/gk20a/fifo_gk20a.c | 38 ++++++++++++++++++------------------ 1 file changed, 19 insertions(+), 19 deletions(-) (limited to 'drivers/gpu/nvgpu/gk20a/fifo_gk20a.c') diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c index 9c41c7ef..b636679e 100644 --- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c @@ -1356,14 +1356,14 @@ bool gk20a_fifo_error_tsg(struct gk20a *g, struct channel_gk20a *ch = NULL; bool verbose = false; - down_read(&tsg->ch_list_lock); + nvgpu_rwsem_down_read(&tsg->ch_list_lock); list_for_each_entry(ch, &tsg->ch_list, ch_entry) { if (gk20a_channel_get(ch)) { verbose |= gk20a_fifo_error_ch(g, ch); gk20a_channel_put(ch); } } - up_read(&tsg->ch_list_lock); + nvgpu_rwsem_up_read(&tsg->ch_list_lock); return verbose; @@ -1386,14 +1386,14 @@ void gk20a_fifo_set_ctx_mmu_error_tsg(struct gk20a *g, nvgpu_err(g, "TSG %d generated a mmu fault", tsg->tsgid); - down_read(&tsg->ch_list_lock); + nvgpu_rwsem_down_read(&tsg->ch_list_lock); list_for_each_entry(ch, &tsg->ch_list, ch_entry) { if (gk20a_channel_get(ch)) { gk20a_fifo_set_ctx_mmu_error_ch(g, ch); gk20a_channel_put(ch); } } - up_read(&tsg->ch_list_lock); + nvgpu_rwsem_up_read(&tsg->ch_list_lock); } @@ -1409,7 +1409,7 @@ void gk20a_fifo_abort_tsg(struct gk20a *g, u32 tsgid, bool preempt) if (preempt) g->ops.fifo.preempt_tsg(g, tsgid); - down_read(&tsg->ch_list_lock); + nvgpu_rwsem_down_read(&tsg->ch_list_lock); list_for_each_entry(ch, &tsg->ch_list, ch_entry) { if (gk20a_channel_get(ch)) { ch->has_timedout = true; @@ -1417,7 +1417,7 @@ void gk20a_fifo_abort_tsg(struct gk20a *g, u32 tsgid, bool preempt) gk20a_channel_put(ch); } } - up_read(&tsg->ch_list_lock); + nvgpu_rwsem_up_read(&tsg->ch_list_lock); } int gk20a_fifo_deferred_reset(struct gk20a *g, struct channel_gk20a *ch) @@ -1906,7 +1906,7 @@ int gk20a_fifo_force_reset_ch(struct channel_gk20a *ch, if (gk20a_is_channel_marked_as_tsg(ch)) { tsg = &g->fifo.tsg[ch->tsgid]; - down_read(&tsg->ch_list_lock); + nvgpu_rwsem_down_read(&tsg->ch_list_lock); list_for_each_entry(ch_tsg, &tsg->ch_list, ch_entry) { if (gk20a_channel_get(ch_tsg)) { @@ -1915,7 +1915,7 @@ int gk20a_fifo_force_reset_ch(struct channel_gk20a *ch, } } - up_read(&tsg->ch_list_lock); + nvgpu_rwsem_up_read(&tsg->ch_list_lock); gk20a_fifo_recover_tsg(g, ch->tsgid, verbose); } else { gk20a_set_error_notifier(ch, err_code); @@ -1971,9 +1971,9 @@ int gk20a_fifo_tsg_unbind_channel(struct channel_gk20a *ch) goto fail_enable_tsg; /* Remove channel from TSG and re-enable rest of the channels */ - down_write(&tsg->ch_list_lock); + nvgpu_rwsem_down_write(&tsg->ch_list_lock); nvgpu_list_del(&ch->ch_entry); - up_write(&tsg->ch_list_lock); + nvgpu_rwsem_up_write(&tsg->ch_list_lock); g->ops.fifo.enable_tsg(tsg); @@ -2084,7 +2084,7 @@ bool gk20a_fifo_check_tsg_ctxsw_timeout(struct tsg_gk20a *tsg, *verbose = false; *ms = GRFIFO_TIMEOUT_CHECK_PERIOD_US / 1000; - down_read(&tsg->ch_list_lock); + nvgpu_rwsem_down_read(&tsg->ch_list_lock); /* check if there was some progress on any of the TSG channels. * fifo recovery is needed if at least one channel reached the @@ -2140,7 +2140,7 @@ bool gk20a_fifo_check_tsg_ctxsw_timeout(struct tsg_gk20a *tsg, * of them has reached the timeout, there is nothing more to do: * timeout_accumulated_ms has been updated for all of them. */ - up_read(&tsg->ch_list_lock); + nvgpu_rwsem_up_read(&tsg->ch_list_lock); return recover; } @@ -2470,7 +2470,7 @@ static u32 gk20a_fifo_handle_pbdma_intr(struct gk20a *g, struct tsg_gk20a *tsg = &f->tsg[id]; struct channel_gk20a *ch = NULL; - down_read(&tsg->ch_list_lock); + nvgpu_rwsem_down_read(&tsg->ch_list_lock); list_for_each_entry(ch, &tsg->ch_list, ch_entry) { if (gk20a_channel_get(ch)) { gk20a_set_error_notifier(ch, @@ -2478,7 +2478,7 @@ static u32 gk20a_fifo_handle_pbdma_intr(struct gk20a *g, gk20a_channel_put(ch); } } - up_read(&tsg->ch_list_lock); + nvgpu_rwsem_up_read(&tsg->ch_list_lock); gk20a_fifo_recover_tsg(g, id, true); } } @@ -2599,7 +2599,7 @@ void __locked_fifo_preempt_timeout_rc(struct gk20a *g, u32 id, nvgpu_err(g, "preempt TSG %d timeout", id); - down_read(&tsg->ch_list_lock); + nvgpu_rwsem_down_read(&tsg->ch_list_lock); list_for_each_entry(ch, &tsg->ch_list, ch_entry) { if (!gk20a_channel_get(ch)) continue; @@ -2607,7 +2607,7 @@ void __locked_fifo_preempt_timeout_rc(struct gk20a *g, u32 id, NVGPU_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT); gk20a_channel_put(ch); } - up_read(&tsg->ch_list_lock); + nvgpu_rwsem_up_read(&tsg->ch_list_lock); gk20a_fifo_recover_tsg(g, id, true); } else { struct channel_gk20a *ch = &g->fifo.channel[id]; @@ -3095,7 +3095,7 @@ static u32 *gk20a_runlist_construct_locked(struct fifo_gk20a *f, count++; (*entries_left)--; - down_read(&tsg->ch_list_lock); + nvgpu_rwsem_down_read(&tsg->ch_list_lock); /* add runnable channels bound to this TSG */ list_for_each_entry(ch, &tsg->ch_list, ch_entry) { if (!test_bit(ch->chid, @@ -3103,7 +3103,7 @@ static u32 *gk20a_runlist_construct_locked(struct fifo_gk20a *f, continue; if (!(*entries_left)) { - up_read(&tsg->ch_list_lock); + nvgpu_rwsem_up_read(&tsg->ch_list_lock); return NULL; } @@ -3117,7 +3117,7 @@ static u32 *gk20a_runlist_construct_locked(struct fifo_gk20a *f, runlist_entry += runlist_entry_words; (*entries_left)--; } - up_read(&tsg->ch_list_lock); + nvgpu_rwsem_up_read(&tsg->ch_list_lock); } /* append entries from higher level if this level is empty */ -- cgit v1.2.2