summaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorSeema Khowala <seemaj@nvidia.com>2018-01-02 17:07:06 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2018-01-11 20:37:55 -0500
commitffa5231d2cf38d133e0ca323494be3e579cc820d (patch)
treee440d71f9c27813e391c75018d14418d14613195 /drivers/gpu
parente7102cf90bb78795a25864d4bc5299189fbfcc53 (diff)
gpu: nvgpu: runlist info mutex not needed for runlist_state
runlist_info mutex for the runlist being enabled or disabled in fifo_sched_disable_r is not needed to be acquired Bug 2043838 Change-Id: Ia9839ab7effbe7daf353c3a54f25a2b4914af5e8 Signed-off-by: Seema Khowala <seemaj@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1630345 GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.c49
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.h4
-rw-r--r--drivers/gpu/nvgpu/gv11b/fifo_gv11b.c6
3 files changed, 11 insertions, 48 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
index b14b2a27..884e4a02 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
@@ -2787,25 +2787,13 @@ static void gk20a_fifo_sched_disable_rw(struct gk20a *g, u32 runlists_mask,
2787} 2787}
2788 2788
2789void gk20a_fifo_set_runlist_state(struct gk20a *g, u32 runlists_mask, 2789void gk20a_fifo_set_runlist_state(struct gk20a *g, u32 runlists_mask,
2790 u32 runlist_state, 2790 u32 runlist_state)
2791 int is_runlist_info_mutex_locked)
2792{ 2791{
2793 u32 token = PMU_INVALID_MUTEX_OWNER_ID; 2792 u32 token = PMU_INVALID_MUTEX_OWNER_ID;
2794 u32 mutex_ret; 2793 u32 mutex_ret;
2795 u32 runlist_id;
2796
2797 gk20a_dbg_fn("");
2798 2794
2799 if (!is_runlist_info_mutex_locked) { 2795 nvgpu_log(g, gpu_dbg_info, "runlist mask = 0x%08x state = 0x%08x",
2800 gk20a_dbg_info("acquire runlist_info mutex"); 2796 runlists_mask, runlist_state);
2801 for (runlist_id = 0; runlist_id < g->fifo.max_runlists;
2802 runlist_id++) {
2803 if (runlists_mask &
2804 fifo_sched_disable_runlist_m(runlist_id))
2805 nvgpu_mutex_acquire(&g->fifo.
2806 runlist_info[runlist_id].mutex);
2807 }
2808 }
2809 2797
2810 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 2798 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
2811 2799
@@ -2813,48 +2801,28 @@ void gk20a_fifo_set_runlist_state(struct gk20a *g, u32 runlists_mask,
2813 2801
2814 if (!mutex_ret) 2802 if (!mutex_ret)
2815 nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 2803 nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
2816
2817 if (!is_runlist_info_mutex_locked) {
2818 gk20a_dbg_info("release runlist_info mutex");
2819 for (runlist_id = 0; runlist_id < g->fifo.max_runlists;
2820 runlist_id++) {
2821 if (runlists_mask &
2822 fifo_sched_disable_runlist_m(runlist_id))
2823
2824 nvgpu_mutex_release(&g->fifo.
2825 runlist_info[runlist_id].mutex);
2826 }
2827 }
2828
2829 gk20a_dbg_fn("done");
2830} 2804}
2831 2805
2832void gk20a_fifo_enable_tsg_sched(struct gk20a *g, struct tsg_gk20a *tsg) 2806void gk20a_fifo_enable_tsg_sched(struct gk20a *g, struct tsg_gk20a *tsg)
2833{ 2807{
2834 gk20a_fifo_set_runlist_state(g, fifo_sched_disable_runlist_m( 2808 gk20a_fifo_set_runlist_state(g, fifo_sched_disable_runlist_m(
2835 tsg->runlist_id), RUNLIST_ENABLED, 2809 tsg->runlist_id), RUNLIST_ENABLED);
2836 !RUNLIST_INFO_MUTEX_LOCKED);
2837 2810
2838} 2811}
2839 2812
2840void gk20a_fifo_disable_tsg_sched(struct gk20a *g, struct tsg_gk20a *tsg) 2813void gk20a_fifo_disable_tsg_sched(struct gk20a *g, struct tsg_gk20a *tsg)
2841{ 2814{
2842 gk20a_fifo_set_runlist_state(g, fifo_sched_disable_runlist_m( 2815 gk20a_fifo_set_runlist_state(g, fifo_sched_disable_runlist_m(
2843 tsg->runlist_id), RUNLIST_DISABLED, 2816 tsg->runlist_id), RUNLIST_DISABLED);
2844 !RUNLIST_INFO_MUTEX_LOCKED);
2845} 2817}
2846 2818
2847int gk20a_fifo_enable_engine_activity(struct gk20a *g, 2819int gk20a_fifo_enable_engine_activity(struct gk20a *g,
2848 struct fifo_engine_info_gk20a *eng_info) 2820 struct fifo_engine_info_gk20a *eng_info)
2849{ 2821{
2850 gk20a_dbg_fn(""); 2822 nvgpu_log(g, gpu_dbg_info, "start");
2851 2823
2852 gk20a_fifo_set_runlist_state(g, fifo_sched_disable_runlist_m( 2824 gk20a_fifo_set_runlist_state(g, fifo_sched_disable_runlist_m(
2853 eng_info->runlist_id), RUNLIST_ENABLED, 2825 eng_info->runlist_id), RUNLIST_ENABLED);
2854 !RUNLIST_INFO_MUTEX_LOCKED);
2855
2856 gk20a_dbg_fn("done");
2857
2858 return 0; 2826 return 0;
2859} 2827}
2860 2828
@@ -2899,8 +2867,7 @@ int gk20a_fifo_disable_engine_activity(struct gk20a *g,
2899 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 2867 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
2900 2868
2901 gk20a_fifo_set_runlist_state(g, fifo_sched_disable_runlist_m( 2869 gk20a_fifo_set_runlist_state(g, fifo_sched_disable_runlist_m(
2902 eng_info->runlist_id), RUNLIST_DISABLED, 2870 eng_info->runlist_id), RUNLIST_DISABLED);
2903 !RUNLIST_INFO_MUTEX_LOCKED);
2904 2871
2905 /* chid from pbdma status */ 2872 /* chid from pbdma status */
2906 pbdma_stat = gk20a_readl(g, fifo_pbdma_status_r(eng_info->pbdma_id)); 2873 pbdma_stat = gk20a_readl(g, fifo_pbdma_status_r(eng_info->pbdma_id));
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h
index 7fdd3b6b..af0630d2 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h
@@ -83,8 +83,6 @@ enum {
83#define RUNLIST_DISABLED 0 83#define RUNLIST_DISABLED 0
84#define RUNLIST_ENABLED 1 84#define RUNLIST_ENABLED 1
85 85
86#define RUNLIST_INFO_MUTEX_LOCKED 1
87
88/* generally corresponds to the "pbdma" engine */ 86/* generally corresponds to the "pbdma" engine */
89 87
90struct fifo_runlist_info_gk20a { 88struct fifo_runlist_info_gk20a {
@@ -336,7 +334,7 @@ int gk20a_fifo_init_engine_info(struct fifo_gk20a *f);
336void gk20a_get_tsg_runlist_entry(struct tsg_gk20a *tsg, u32 *runlist); 334void gk20a_get_tsg_runlist_entry(struct tsg_gk20a *tsg, u32 *runlist);
337void gk20a_get_ch_runlist_entry(struct channel_gk20a *ch, u32 *runlist); 335void gk20a_get_ch_runlist_entry(struct channel_gk20a *ch, u32 *runlist);
338void gk20a_fifo_set_runlist_state(struct gk20a *g, u32 runlists_mask, 336void gk20a_fifo_set_runlist_state(struct gk20a *g, u32 runlists_mask,
339 u32 runlist_state, int runlist_mutex_state); 337 u32 runlist_state);
340 338
341u32 gk20a_fifo_userd_gp_get(struct gk20a *g, struct channel_gk20a *c); 339u32 gk20a_fifo_userd_gp_get(struct gk20a *g, struct channel_gk20a *c);
342void gk20a_fifo_userd_gp_put(struct gk20a *g, struct channel_gk20a *c); 340void gk20a_fifo_userd_gp_put(struct gk20a *g, struct channel_gk20a *c);
diff --git a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c
index 2fb1ee70..f0440a4c 100644
--- a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c
@@ -948,8 +948,7 @@ void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask,
948 runlists_mask = gv11b_fifo_get_runlists_mask(g, act_eng_bitmask, id, 948 runlists_mask = gv11b_fifo_get_runlists_mask(g, act_eng_bitmask, id,
949 id_type, rc_type, mmfault); 949 id_type, rc_type, mmfault);
950 950
951 gk20a_fifo_set_runlist_state(g, runlists_mask, RUNLIST_DISABLED, 951 gk20a_fifo_set_runlist_state(g, runlists_mask, RUNLIST_DISABLED);
952 !RUNLIST_INFO_MUTEX_LOCKED);
953 952
954 g->fifo.deferred_reset_pending = false; 953 g->fifo.deferred_reset_pending = false;
955 954
@@ -1080,8 +1079,7 @@ void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask,
1080 } 1079 }
1081 } 1080 }
1082 1081
1083 gk20a_fifo_set_runlist_state(g, runlists_mask, RUNLIST_ENABLED, 1082 gk20a_fifo_set_runlist_state(g, runlists_mask, RUNLIST_ENABLED);
1084 !RUNLIST_INFO_MUTEX_LOCKED);
1085 1083
1086 /* It is safe to enable ELPG again. */ 1084 /* It is safe to enable ELPG again. */
1087 if (g->support_pmu && g->elpg_enabled) 1085 if (g->support_pmu && g->elpg_enabled)