summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/pmu/pmu_pg.c
diff options
context:
space:
mode:
authorDeepak Goyal <dgoyal@nvidia.com>2018-02-28 06:09:57 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2018-03-06 00:18:20 -0500
commit26b91946031a88293c7ce563ff923802af6509ce (patch)
tree9b7ad56e7feb381b903f0c07a988114cdec0302f /drivers/gpu/nvgpu/common/pmu/pmu_pg.c
parent848af2ce6de6140323a6ffe3075bf8021e119434 (diff)
gpu: nvgpu: gv11b: Correct PMU PG enabled masks.
PMU ucode records supported feature list for a particular chip as support mask sent via PMU_PG_PARAM_CMD_GR_INIT_PARAM. It then enables selective feature list through enable mask sent via PMU_PG_PARAM_CMD_SUB_FEATURE_MASK_UPDATE cmd. Right now only ELPG state machine mask was enabled. Only ELPG state machine was getting executed but other crucial steps in ELPG entry/exit sequence were getting skipped. Bug 200392620. Bug 200296076. Change-Id: I5e1800980990c146c731537290cb7d4c07e937c3 Signed-off-by: Deepak Goyal <dgoyal@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1665767 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Seshendra Gadagottu <sgadagottu@nvidia.com> Tested-by: Seshendra Gadagottu <sgadagottu@nvidia.com> Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/pmu/pmu_pg.c')
-rw-r--r--drivers/gpu/nvgpu/common/pmu/pmu_pg.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_pg.c b/drivers/gpu/nvgpu/common/pmu/pmu_pg.c
index bf39ce19..2d0fc499 100644
--- a/drivers/gpu/nvgpu/common/pmu/pmu_pg.c
+++ b/drivers/gpu/nvgpu/common/pmu/pmu_pg.c
@@ -89,7 +89,7 @@ static void pmu_handle_pg_elpg_msg(struct gk20a *g, struct pmu_msg *msg,
89 if (g->ops.pmu.pmu_pg_engines_feature_list && 89 if (g->ops.pmu.pmu_pg_engines_feature_list &&
90 g->ops.pmu.pmu_pg_engines_feature_list(g, 90 g->ops.pmu.pmu_pg_engines_feature_list(g,
91 PMU_PG_ELPG_ENGINE_ID_GRAPHICS) != 91 PMU_PG_ELPG_ENGINE_ID_GRAPHICS) !=
92 PMU_PG_FEATURE_GR_POWER_GATING_ENABLED) { 92 NVGPU_PMU_GR_FEATURE_MASK_POWER_GATING) {
93 pmu->initialized = true; 93 pmu->initialized = true;
94 nvgpu_pmu_state_change(g, PMU_STATE_STARTED, 94 nvgpu_pmu_state_change(g, PMU_STATE_STARTED,
95 true); 95 true);
@@ -116,7 +116,7 @@ int nvgpu_pmu_pg_global_enable(struct gk20a *g, u32 enable_pg)
116 if (g->ops.pmu.pmu_pg_engines_feature_list && 116 if (g->ops.pmu.pmu_pg_engines_feature_list &&
117 g->ops.pmu.pmu_pg_engines_feature_list(g, 117 g->ops.pmu.pmu_pg_engines_feature_list(g,
118 PMU_PG_ELPG_ENGINE_ID_GRAPHICS) != 118 PMU_PG_ELPG_ENGINE_ID_GRAPHICS) !=
119 PMU_PG_FEATURE_GR_POWER_GATING_ENABLED) { 119 NVGPU_PMU_GR_FEATURE_MASK_POWER_GATING) {
120 if (g->ops.pmu.pmu_lpwr_enable_pg) 120 if (g->ops.pmu.pmu_lpwr_enable_pg)
121 status = g->ops.pmu.pmu_lpwr_enable_pg(g, 121 status = g->ops.pmu.pmu_lpwr_enable_pg(g,
122 true); 122 true);
@@ -126,7 +126,7 @@ int nvgpu_pmu_pg_global_enable(struct gk20a *g, u32 enable_pg)
126 if (g->ops.pmu.pmu_pg_engines_feature_list && 126 if (g->ops.pmu.pmu_pg_engines_feature_list &&
127 g->ops.pmu.pmu_pg_engines_feature_list(g, 127 g->ops.pmu.pmu_pg_engines_feature_list(g,
128 PMU_PG_ELPG_ENGINE_ID_GRAPHICS) != 128 PMU_PG_ELPG_ENGINE_ID_GRAPHICS) !=
129 PMU_PG_FEATURE_GR_POWER_GATING_ENABLED) { 129 NVGPU_PMU_GR_FEATURE_MASK_POWER_GATING) {
130 if (g->ops.pmu.pmu_lpwr_disable_pg) 130 if (g->ops.pmu.pmu_lpwr_disable_pg)
131 status = g->ops.pmu.pmu_lpwr_disable_pg(g, 131 status = g->ops.pmu.pmu_lpwr_disable_pg(g,
132 true); 132 true);