From bf0379997799e7f83514e974cd02aaaab85a4101 Mon Sep 17 00:00:00 2001 From: Seema Khowala Date: Mon, 8 Jan 2018 15:41:39 -0800 Subject: gpu: nvgpu: rename mutex to runlist_lock Rename mutex to runlist_lock in fifo_runlist_info_gk20a struct. This is good to have for code readability. Bug 2065990 Bug 2043838 Change-Id: I716685e3fad538458181d2a9fe592410401862b9 Signed-off-by: Seema Khowala Reviewed-on: https://git-master.nvidia.com/r/1662587 Reviewed-by: mobile promotions Tested-by: mobile promotions --- drivers/gpu/nvgpu/gk20a/fifo_gk20a.c | 20 ++++++++++---------- drivers/gpu/nvgpu/gk20a/fifo_gk20a.h | 3 ++- drivers/gpu/nvgpu/gv11b/fifo_gv11b.c | 12 ++++++------ drivers/gpu/nvgpu/vgpu/fifo_vgpu.c | 6 +++--- 4 files changed, 21 insertions(+), 20 deletions(-) diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c index 709631d4..48982a04 100644 --- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c @@ -522,7 +522,7 @@ void gk20a_fifo_delete_runlist(struct fifo_gk20a *f) nvgpu_kfree(g, runlist->active_tsgs); runlist->active_tsgs = NULL; - nvgpu_mutex_destroy(&runlist->mutex); + nvgpu_mutex_destroy(&runlist->runlist_lock); } memset(f->runlist_info, 0, (sizeof(struct fifo_runlist_info_gk20a) * @@ -716,7 +716,7 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f) goto clean_up_runlist; } } - nvgpu_mutex_init(&runlist->mutex); + nvgpu_mutex_init(&runlist->runlist_lock); /* None of buffers is pinned if this value doesn't change. Otherwise, one of them (cur_buffer) must have been pinned. */ @@ -2771,7 +2771,7 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, u32 chid) /* we have no idea which runlist we are using. lock all */ for (i = 0; i < g->fifo.max_runlists; i++) - nvgpu_mutex_acquire(&f->runlist_info[i].mutex); + nvgpu_mutex_acquire(&f->runlist_info[i].runlist_lock); mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); @@ -2781,7 +2781,7 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, u32 chid) nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); for (i = 0; i < g->fifo.max_runlists; i++) - nvgpu_mutex_release(&f->runlist_info[i].mutex); + nvgpu_mutex_release(&f->runlist_info[i].runlist_lock); return ret; } @@ -2798,7 +2798,7 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid) /* we have no idea which runlist we are using. lock all */ for (i = 0; i < g->fifo.max_runlists; i++) - nvgpu_mutex_acquire(&f->runlist_info[i].mutex); + nvgpu_mutex_acquire(&f->runlist_info[i].runlist_lock); mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); @@ -2808,7 +2808,7 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid) nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); for (i = 0; i < g->fifo.max_runlists; i++) - nvgpu_mutex_release(&f->runlist_info[i].mutex); + nvgpu_mutex_release(&f->runlist_info[i].runlist_lock); return ret; } @@ -3385,7 +3385,7 @@ int gk20a_fifo_reschedule_runlist(struct gk20a *g, u32 runlist_id) int ret = 0; runlist = &g->fifo.runlist_info[runlist_id]; - if (nvgpu_mutex_tryacquire(&runlist->mutex)) { + if (nvgpu_mutex_tryacquire(&runlist->runlist_lock)) { mutex_ret = nvgpu_pmu_mutex_acquire( &g->pmu, PMU_MUTEX_ID_FIFO, &token); @@ -3396,7 +3396,7 @@ int gk20a_fifo_reschedule_runlist(struct gk20a *g, u32 runlist_id) if (!mutex_ret) nvgpu_pmu_mutex_release( &g->pmu, PMU_MUTEX_ID_FIFO, &token); - nvgpu_mutex_release(&runlist->mutex); + nvgpu_mutex_release(&runlist->runlist_lock); } else { /* someone else is writing fifo_runlist_r so not needed here */ ret = -EBUSY; @@ -3421,7 +3421,7 @@ int gk20a_fifo_update_runlist(struct gk20a *g, u32 runlist_id, u32 chid, runlist = &f->runlist_info[runlist_id]; - nvgpu_mutex_acquire(&runlist->mutex); + nvgpu_mutex_acquire(&runlist->runlist_lock); mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); @@ -3431,7 +3431,7 @@ int gk20a_fifo_update_runlist(struct gk20a *g, u32 runlist_id, u32 chid, if (!mutex_ret) nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); - nvgpu_mutex_release(&runlist->mutex); + nvgpu_mutex_release(&runlist->runlist_lock); return ret; } diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h index ea9f4528..c6d34945 100644 --- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h @@ -91,7 +91,8 @@ struct fifo_runlist_info_gk20a { u32 reset_eng_bitmask; /* engines to be reset during recovery */ bool stopped; bool support_tsg; - struct nvgpu_mutex mutex; /* protect channel preempt and runlist update */ + /* protect ch/tsg/runlist preempt & runlist update */ + struct nvgpu_mutex runlist_lock; }; enum { diff --git a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c index 9be29f45..d8976608 100644 --- a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c +++ b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c @@ -817,7 +817,7 @@ int gv11b_fifo_preempt_tsg(struct gk20a *g, u32 tsgid) runlist_id = f->tsg[tsgid].runlist_id; gk20a_dbg_fn("runlist_id %d", runlist_id); - nvgpu_mutex_acquire(&f->runlist_info[runlist_id].mutex); + nvgpu_mutex_acquire(&f->runlist_info[runlist_id].runlist_lock); mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); @@ -826,7 +826,7 @@ int gv11b_fifo_preempt_tsg(struct gk20a *g, u32 tsgid) if (!mutex_ret) nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); - nvgpu_mutex_release(&f->runlist_info[runlist_id].mutex); + nvgpu_mutex_release(&f->runlist_info[runlist_id].runlist_lock); return ret; } @@ -844,7 +844,7 @@ static int gv11b_fifo_preempt_runlists(struct gk20a *g, u32 runlists_mask) for (runlist_id = 0; runlist_id < g->fifo.max_runlists; runlist_id++) { if (runlists_mask & fifo_runlist_preempt_runlist_m(runlist_id)) nvgpu_mutex_acquire(&g->fifo. - runlist_info[runlist_id].mutex); + runlist_info[runlist_id].runlist_lock); } mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); @@ -861,7 +861,7 @@ static int gv11b_fifo_preempt_runlists(struct gk20a *g, u32 runlists_mask) g->fifo.runlist_info[runlist_id].reset_eng_bitmask = g->fifo.runlist_info[runlist_id].eng_bitmask; nvgpu_mutex_release(&g->fifo. - runlist_info[runlist_id].mutex); + runlist_info[runlist_id].runlist_lock); } } @@ -916,7 +916,7 @@ int gv11b_fifo_preempt_ch_tsg(struct gk20a *g, u32 id, gk20a_dbg_fn("preempt id = %d, runlist_id = %d", id, runlist_id); - nvgpu_mutex_acquire(&f->runlist_info[runlist_id].mutex); + nvgpu_mutex_acquire(&f->runlist_info[runlist_id].runlist_lock); mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); @@ -925,7 +925,7 @@ int gv11b_fifo_preempt_ch_tsg(struct gk20a *g, u32 id, if (!mutex_ret) nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); - nvgpu_mutex_release(&f->runlist_info[runlist_id].mutex); + nvgpu_mutex_release(&f->runlist_info[runlist_id].runlist_lock); return ret; diff --git a/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c b/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c index d077f329..3ea326b8 100644 --- a/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c +++ b/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c @@ -249,7 +249,7 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f) goto clean_up_runlist; } } - nvgpu_mutex_init(&runlist->mutex); + nvgpu_mutex_init(&runlist->runlist_lock); /* None of buffers is pinned if this value doesn't change. Otherwise, one of them (cur_buffer) must have been pinned. */ @@ -585,12 +585,12 @@ int vgpu_fifo_update_runlist(struct gk20a *g, u32 runlist_id, runlist = &f->runlist_info[runlist_id]; - nvgpu_mutex_acquire(&runlist->mutex); + nvgpu_mutex_acquire(&runlist->runlist_lock); ret = vgpu_fifo_update_runlist_locked(g, runlist_id, chid, add, wait_for_finish); - nvgpu_mutex_release(&runlist->mutex); + nvgpu_mutex_release(&runlist->runlist_lock); return ret; } -- cgit v1.2.2