summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
diff options
context:
space:
mode:
authorDebarshi Dutta <ddutta@nvidia.com>2018-11-27 00:35:56 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2019-02-11 11:18:36 -0500
commitef9de9e9925573b691d78760e42334ad24c5797f (patch)
tree068bc5b4bb01de77136ed1da0e5da10d883c164a /drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
parent5b8ecbc51fe2e94a233c2c42d497b05c2eccdaf5 (diff)
gpu: nvgpu: replace input parameter tsgid with pointer to struct tsg_gk20a
gv11b_fifo_preempt_tsg needs to access the runlist_id of the tsg as well as pass the tsg pointer to other public functions such as gk20a_fifo_disable_tsg_sched. This qualifies the preempt_tsg to use a pointer to a struct tsg_gk20a instead of just using the tsgid. Jira NVGPU-1461 Change-Id: I01fbd2370b5746c2a597a0351e0301b0f7d25175 Signed-off-by: Debarshi Dutta <ddutta@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1959068 (cherry picked from commit 1e78d47f15ff050edbb10a88550012178d353288 in rel-32) Reviewed-on: https://git-master.nvidia.com/r/2013725 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Bibek Basu <bbasu@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/fifo_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.c26
1 files changed, 13 insertions, 13 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
index 9ed78640..a2ebb720 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
@@ -1562,7 +1562,7 @@ void gk20a_fifo_abort_tsg(struct gk20a *g, struct tsg_gk20a *tsg, bool preempt)
1562 g->ops.fifo.disable_tsg(tsg); 1562 g->ops.fifo.disable_tsg(tsg);
1563 1563
1564 if (preempt) { 1564 if (preempt) {
1565 g->ops.fifo.preempt_tsg(g, tsg->tsgid); 1565 g->ops.fifo.preempt_tsg(g, tsg);
1566 } 1566 }
1567 1567
1568 nvgpu_rwsem_down_read(&tsg->ch_list_lock); 1568 nvgpu_rwsem_down_read(&tsg->ch_list_lock);
@@ -2194,8 +2194,8 @@ int gk20a_fifo_tsg_unbind_channel(struct channel_gk20a *ch)
2194 /* Disable TSG and examine status before unbinding channel */ 2194 /* Disable TSG and examine status before unbinding channel */
2195 g->ops.fifo.disable_tsg(tsg); 2195 g->ops.fifo.disable_tsg(tsg);
2196 2196
2197 err = g->ops.fifo.preempt_tsg(g, tsg->tsgid); 2197 err = g->ops.fifo.preempt_tsg(g, tsg);
2198 if (err) { 2198 if (err != 0) {
2199 goto fail_enable_tsg; 2199 goto fail_enable_tsg;
2200 } 2200 }
2201 2201
@@ -3000,7 +3000,7 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, u32 chid)
3000 return ret; 3000 return ret;
3001} 3001}
3002 3002
3003int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid) 3003int gk20a_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg)
3004{ 3004{
3005 struct fifo_gk20a *f = &g->fifo; 3005 struct fifo_gk20a *f = &g->fifo;
3006 u32 ret = 0; 3006 u32 ret = 0;
@@ -3008,10 +3008,7 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
3008 u32 mutex_ret = 0; 3008 u32 mutex_ret = 0;
3009 u32 i; 3009 u32 i;
3010 3010
3011 nvgpu_log_fn(g, "tsgid: %d", tsgid); 3011 nvgpu_log_fn(g, "tsgid: %d", tsg->tsgid);
3012 if (tsgid == FIFO_INVAL_TSG_ID) {
3013 return 0;
3014 }
3015 3012
3016 /* we have no idea which runlist we are using. lock all */ 3013 /* we have no idea which runlist we are using. lock all */
3017 for (i = 0; i < g->fifo.max_runlists; i++) { 3014 for (i = 0; i < g->fifo.max_runlists; i++) {
@@ -3020,7 +3017,7 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
3020 3017
3021 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 3018 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
3022 3019
3023 ret = __locked_fifo_preempt(g, tsgid, true); 3020 ret = __locked_fifo_preempt(g, tsg->tsgid, true);
3024 3021
3025 if (!mutex_ret) { 3022 if (!mutex_ret) {
3026 nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 3023 nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
@@ -3033,9 +3030,11 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
3033 if (ret) { 3030 if (ret) {
3034 if (nvgpu_platform_is_silicon(g)) { 3031 if (nvgpu_platform_is_silicon(g)) {
3035 nvgpu_err(g, "preempt timed out for tsgid: %u, " 3032 nvgpu_err(g, "preempt timed out for tsgid: %u, "
3036 "ctxsw timeout will trigger recovery if needed", tsgid); 3033 "ctxsw timeout will trigger recovery if needed",
3034 tsg->tsgid);
3037 } else { 3035 } else {
3038 gk20a_fifo_preempt_timeout_rc(g, tsgid, true); 3036 gk20a_fifo_preempt_timeout_rc(g,
3037 tsg->tsgid, ID_TYPE_TSG);
3039 } 3038 }
3040 } 3039 }
3041 3040
@@ -3045,9 +3044,10 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
3045int gk20a_fifo_preempt(struct gk20a *g, struct channel_gk20a *ch) 3044int gk20a_fifo_preempt(struct gk20a *g, struct channel_gk20a *ch)
3046{ 3045{
3047 int err; 3046 int err;
3047 struct tsg_gk20a *tsg = tsg_gk20a_from_ch(ch);
3048 3048
3049 if (gk20a_is_channel_marked_as_tsg(ch)) { 3049 if (tsg != NULL) {
3050 err = g->ops.fifo.preempt_tsg(ch->g, ch->tsgid); 3050 err = g->ops.fifo.preempt_tsg(ch->g, tsg);
3051 } else { 3051 } else {
3052 err = g->ops.fifo.preempt_channel(ch->g, ch->chid); 3052 err = g->ops.fifo.preempt_channel(ch->g, ch->chid);
3053 } 3053 }