summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSeema Khowala <seemaj@nvidia.com>2018-01-08 18:41:39 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2018-05-04 00:42:57 -0400
commitbf0379997799e7f83514e974cd02aaaab85a4101 (patch)
tree218537b6b64305565a34a8956bf7940f9e64d6ac
parentea926889657b255e753baf10a96c7c12facd831c (diff)
gpu: nvgpu: rename mutex to runlist_lock
Rename mutex to runlist_lock in fifo_runlist_info_gk20a struct. This is good to have for code readability. Bug 2065990 Bug 2043838 Change-Id: I716685e3fad538458181d2a9fe592410401862b9 Signed-off-by: Seema Khowala <seemaj@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1662587 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.c20
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.h3
-rw-r--r--drivers/gpu/nvgpu/gv11b/fifo_gv11b.c12
-rw-r--r--drivers/gpu/nvgpu/vgpu/fifo_vgpu.c6
4 files changed, 21 insertions, 20 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
index 709631d4..48982a04 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
@@ -522,7 +522,7 @@ void gk20a_fifo_delete_runlist(struct fifo_gk20a *f)
522 nvgpu_kfree(g, runlist->active_tsgs); 522 nvgpu_kfree(g, runlist->active_tsgs);
523 runlist->active_tsgs = NULL; 523 runlist->active_tsgs = NULL;
524 524
525 nvgpu_mutex_destroy(&runlist->mutex); 525 nvgpu_mutex_destroy(&runlist->runlist_lock);
526 526
527 } 527 }
528 memset(f->runlist_info, 0, (sizeof(struct fifo_runlist_info_gk20a) * 528 memset(f->runlist_info, 0, (sizeof(struct fifo_runlist_info_gk20a) *
@@ -716,7 +716,7 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f)
716 goto clean_up_runlist; 716 goto clean_up_runlist;
717 } 717 }
718 } 718 }
719 nvgpu_mutex_init(&runlist->mutex); 719 nvgpu_mutex_init(&runlist->runlist_lock);
720 720
721 /* None of buffers is pinned if this value doesn't change. 721 /* None of buffers is pinned if this value doesn't change.
722 Otherwise, one of them (cur_buffer) must have been pinned. */ 722 Otherwise, one of them (cur_buffer) must have been pinned. */
@@ -2771,7 +2771,7 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, u32 chid)
2771 2771
2772 /* we have no idea which runlist we are using. lock all */ 2772 /* we have no idea which runlist we are using. lock all */
2773 for (i = 0; i < g->fifo.max_runlists; i++) 2773 for (i = 0; i < g->fifo.max_runlists; i++)
2774 nvgpu_mutex_acquire(&f->runlist_info[i].mutex); 2774 nvgpu_mutex_acquire(&f->runlist_info[i].runlist_lock);
2775 2775
2776 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 2776 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
2777 2777
@@ -2781,7 +2781,7 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, u32 chid)
2781 nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 2781 nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
2782 2782
2783 for (i = 0; i < g->fifo.max_runlists; i++) 2783 for (i = 0; i < g->fifo.max_runlists; i++)
2784 nvgpu_mutex_release(&f->runlist_info[i].mutex); 2784 nvgpu_mutex_release(&f->runlist_info[i].runlist_lock);
2785 2785
2786 return ret; 2786 return ret;
2787} 2787}
@@ -2798,7 +2798,7 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
2798 2798
2799 /* we have no idea which runlist we are using. lock all */ 2799 /* we have no idea which runlist we are using. lock all */
2800 for (i = 0; i < g->fifo.max_runlists; i++) 2800 for (i = 0; i < g->fifo.max_runlists; i++)
2801 nvgpu_mutex_acquire(&f->runlist_info[i].mutex); 2801 nvgpu_mutex_acquire(&f->runlist_info[i].runlist_lock);
2802 2802
2803 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 2803 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
2804 2804
@@ -2808,7 +2808,7 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
2808 nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 2808 nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
2809 2809
2810 for (i = 0; i < g->fifo.max_runlists; i++) 2810 for (i = 0; i < g->fifo.max_runlists; i++)
2811 nvgpu_mutex_release(&f->runlist_info[i].mutex); 2811 nvgpu_mutex_release(&f->runlist_info[i].runlist_lock);
2812 2812
2813 return ret; 2813 return ret;
2814} 2814}
@@ -3385,7 +3385,7 @@ int gk20a_fifo_reschedule_runlist(struct gk20a *g, u32 runlist_id)
3385 int ret = 0; 3385 int ret = 0;
3386 3386
3387 runlist = &g->fifo.runlist_info[runlist_id]; 3387 runlist = &g->fifo.runlist_info[runlist_id];
3388 if (nvgpu_mutex_tryacquire(&runlist->mutex)) { 3388 if (nvgpu_mutex_tryacquire(&runlist->runlist_lock)) {
3389 mutex_ret = nvgpu_pmu_mutex_acquire( 3389 mutex_ret = nvgpu_pmu_mutex_acquire(
3390 &g->pmu, PMU_MUTEX_ID_FIFO, &token); 3390 &g->pmu, PMU_MUTEX_ID_FIFO, &token);
3391 3391
@@ -3396,7 +3396,7 @@ int gk20a_fifo_reschedule_runlist(struct gk20a *g, u32 runlist_id)
3396 if (!mutex_ret) 3396 if (!mutex_ret)
3397 nvgpu_pmu_mutex_release( 3397 nvgpu_pmu_mutex_release(
3398 &g->pmu, PMU_MUTEX_ID_FIFO, &token); 3398 &g->pmu, PMU_MUTEX_ID_FIFO, &token);
3399 nvgpu_mutex_release(&runlist->mutex); 3399 nvgpu_mutex_release(&runlist->runlist_lock);
3400 } else { 3400 } else {
3401 /* someone else is writing fifo_runlist_r so not needed here */ 3401 /* someone else is writing fifo_runlist_r so not needed here */
3402 ret = -EBUSY; 3402 ret = -EBUSY;
@@ -3421,7 +3421,7 @@ int gk20a_fifo_update_runlist(struct gk20a *g, u32 runlist_id, u32 chid,
3421 3421
3422 runlist = &f->runlist_info[runlist_id]; 3422 runlist = &f->runlist_info[runlist_id];
3423 3423
3424 nvgpu_mutex_acquire(&runlist->mutex); 3424 nvgpu_mutex_acquire(&runlist->runlist_lock);
3425 3425
3426 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 3426 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
3427 3427
@@ -3431,7 +3431,7 @@ int gk20a_fifo_update_runlist(struct gk20a *g, u32 runlist_id, u32 chid,
3431 if (!mutex_ret) 3431 if (!mutex_ret)
3432 nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 3432 nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
3433 3433
3434 nvgpu_mutex_release(&runlist->mutex); 3434 nvgpu_mutex_release(&runlist->runlist_lock);
3435 return ret; 3435 return ret;
3436} 3436}
3437 3437
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h
index ea9f4528..c6d34945 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h
@@ -91,7 +91,8 @@ struct fifo_runlist_info_gk20a {
91 u32 reset_eng_bitmask; /* engines to be reset during recovery */ 91 u32 reset_eng_bitmask; /* engines to be reset during recovery */
92 bool stopped; 92 bool stopped;
93 bool support_tsg; 93 bool support_tsg;
94 struct nvgpu_mutex mutex; /* protect channel preempt and runlist update */ 94 /* protect ch/tsg/runlist preempt & runlist update */
95 struct nvgpu_mutex runlist_lock;
95}; 96};
96 97
97enum { 98enum {
diff --git a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c
index 9be29f45..d8976608 100644
--- a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c
@@ -817,7 +817,7 @@ int gv11b_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
817 runlist_id = f->tsg[tsgid].runlist_id; 817 runlist_id = f->tsg[tsgid].runlist_id;
818 gk20a_dbg_fn("runlist_id %d", runlist_id); 818 gk20a_dbg_fn("runlist_id %d", runlist_id);
819 819
820 nvgpu_mutex_acquire(&f->runlist_info[runlist_id].mutex); 820 nvgpu_mutex_acquire(&f->runlist_info[runlist_id].runlist_lock);
821 821
822 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 822 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
823 823
@@ -826,7 +826,7 @@ int gv11b_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
826 if (!mutex_ret) 826 if (!mutex_ret)
827 nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 827 nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
828 828
829 nvgpu_mutex_release(&f->runlist_info[runlist_id].mutex); 829 nvgpu_mutex_release(&f->runlist_info[runlist_id].runlist_lock);
830 830
831 return ret; 831 return ret;
832} 832}
@@ -844,7 +844,7 @@ static int gv11b_fifo_preempt_runlists(struct gk20a *g, u32 runlists_mask)
844 for (runlist_id = 0; runlist_id < g->fifo.max_runlists; runlist_id++) { 844 for (runlist_id = 0; runlist_id < g->fifo.max_runlists; runlist_id++) {
845 if (runlists_mask & fifo_runlist_preempt_runlist_m(runlist_id)) 845 if (runlists_mask & fifo_runlist_preempt_runlist_m(runlist_id))
846 nvgpu_mutex_acquire(&g->fifo. 846 nvgpu_mutex_acquire(&g->fifo.
847 runlist_info[runlist_id].mutex); 847 runlist_info[runlist_id].runlist_lock);
848 } 848 }
849 849
850 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 850 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
@@ -861,7 +861,7 @@ static int gv11b_fifo_preempt_runlists(struct gk20a *g, u32 runlists_mask)
861 g->fifo.runlist_info[runlist_id].reset_eng_bitmask = 861 g->fifo.runlist_info[runlist_id].reset_eng_bitmask =
862 g->fifo.runlist_info[runlist_id].eng_bitmask; 862 g->fifo.runlist_info[runlist_id].eng_bitmask;
863 nvgpu_mutex_release(&g->fifo. 863 nvgpu_mutex_release(&g->fifo.
864 runlist_info[runlist_id].mutex); 864 runlist_info[runlist_id].runlist_lock);
865 } 865 }
866 } 866 }
867 867
@@ -916,7 +916,7 @@ int gv11b_fifo_preempt_ch_tsg(struct gk20a *g, u32 id,
916 916
917 gk20a_dbg_fn("preempt id = %d, runlist_id = %d", id, runlist_id); 917 gk20a_dbg_fn("preempt id = %d, runlist_id = %d", id, runlist_id);
918 918
919 nvgpu_mutex_acquire(&f->runlist_info[runlist_id].mutex); 919 nvgpu_mutex_acquire(&f->runlist_info[runlist_id].runlist_lock);
920 920
921 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 921 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
922 922
@@ -925,7 +925,7 @@ int gv11b_fifo_preempt_ch_tsg(struct gk20a *g, u32 id,
925 if (!mutex_ret) 925 if (!mutex_ret)
926 nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 926 nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
927 927
928 nvgpu_mutex_release(&f->runlist_info[runlist_id].mutex); 928 nvgpu_mutex_release(&f->runlist_info[runlist_id].runlist_lock);
929 929
930 return ret; 930 return ret;
931 931
diff --git a/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c b/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
index d077f329..3ea326b8 100644
--- a/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
@@ -249,7 +249,7 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f)
249 goto clean_up_runlist; 249 goto clean_up_runlist;
250 } 250 }
251 } 251 }
252 nvgpu_mutex_init(&runlist->mutex); 252 nvgpu_mutex_init(&runlist->runlist_lock);
253 253
254 /* None of buffers is pinned if this value doesn't change. 254 /* None of buffers is pinned if this value doesn't change.
255 Otherwise, one of them (cur_buffer) must have been pinned. */ 255 Otherwise, one of them (cur_buffer) must have been pinned. */
@@ -585,12 +585,12 @@ int vgpu_fifo_update_runlist(struct gk20a *g, u32 runlist_id,
585 585
586 runlist = &f->runlist_info[runlist_id]; 586 runlist = &f->runlist_info[runlist_id];
587 587
588 nvgpu_mutex_acquire(&runlist->mutex); 588 nvgpu_mutex_acquire(&runlist->runlist_lock);
589 589
590 ret = vgpu_fifo_update_runlist_locked(g, runlist_id, chid, add, 590 ret = vgpu_fifo_update_runlist_locked(g, runlist_id, chid, add,
591 wait_for_finish); 591 wait_for_finish);
592 592
593 nvgpu_mutex_release(&runlist->mutex); 593 nvgpu_mutex_release(&runlist->runlist_lock);
594 return ret; 594 return ret;
595} 595}
596 596