summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
diff options
context:
space:
mode:
authorMahantesh Kumbar <mkumbar@nvidia.com>2016-11-03 11:40:24 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2016-12-26 01:20:09 -0500
commit71fbfdb2b84a4f778f19e44421a66e28e5aadf8d (patch)
tree5bbc2e22682e73b64fa6492bdab27301f254d362 /drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
parent66ed536fb5e57ad73ffbaf24f9c02f0655e7d6cc (diff)
gpu: nvgpu: MSCG support
- Added enable_mscg, mscg_enabled & mscg_stat flags, mscg_enabled flag can be used to controll mscg enable/disable at runtime along with mscg_stat flag. - Added defines & interface to support ms/mclk-change/post-init-param - Added defines for lpwr tables read from vbios. - HAL to support post init param which is require to setup clockgating interface in PMU & interfaces used during mscg state machine. - gk20a_pmu_pg_global_enable() can be called when pg support required to enable/disable, this also checks & wait if pstate switch is in progress till it complets - pg_mutex to protect PG-RPPG/MSCG enable/disable JIRA DNVGPU-71 Change-Id: If312cefc888a4de0a5c96898baeaac1a76e53e46 Signed-off-by: Mahantesh Kumbar <mkumbar@nvidia.com> Reviewed-on: http://git-master/r/1247554 (cherry picked from commit e6c94948b8058ba642ea56677ad798fc56b8a28a) Reviewed-on: http://git-master/r/1270971 GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/pmu_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/pmu_gk20a.c106
1 files changed, 75 insertions, 31 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
index c70f9876..2b847008 100644
--- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
@@ -1364,6 +1364,7 @@ int gk20a_init_pmu(struct pmu_gk20a *pmu)
1364 struct pmu_v *pv = &g->ops.pmu_ver; 1364 struct pmu_v *pv = &g->ops.pmu_ver;
1365 1365
1366 mutex_init(&pmu->elpg_mutex); 1366 mutex_init(&pmu->elpg_mutex);
1367 mutex_init(&pmu->pg_mutex);
1367 mutex_init(&pmu->isr_mutex); 1368 mutex_init(&pmu->isr_mutex);
1368 mutex_init(&pmu->pmu_copy_lock); 1369 mutex_init(&pmu->pmu_copy_lock);
1369 mutex_init(&pmu->pmu_seq_lock); 1370 mutex_init(&pmu->pmu_seq_lock);
@@ -3298,6 +3299,9 @@ void gk20a_init_pmu_ops(struct gpu_ops *gops)
3298 gops->pmu.pmu_pg_init_param = NULL; 3299 gops->pmu.pmu_pg_init_param = NULL;
3299 gops->pmu.pmu_pg_supported_engines_list = gk20a_pmu_pg_engines_list; 3300 gops->pmu.pmu_pg_supported_engines_list = gk20a_pmu_pg_engines_list;
3300 gops->pmu.pmu_pg_engines_feature_list = gk20a_pmu_pg_feature_list; 3301 gops->pmu.pmu_pg_engines_feature_list = gk20a_pmu_pg_feature_list;
3302 gops->pmu.pmu_lpwr_enable_pg = NULL;
3303 gops->pmu.pmu_lpwr_disable_pg = NULL;
3304 gops->pmu.pmu_pg_param_post_init = NULL;
3301 gops->pmu.send_lrf_tex_ltc_dram_overide_en_dis_cmd = NULL; 3305 gops->pmu.send_lrf_tex_ltc_dram_overide_en_dis_cmd = NULL;
3302 gops->pmu.dump_secure_fuses = NULL; 3306 gops->pmu.dump_secure_fuses = NULL;
3303 gops->pmu.is_lazy_bootstrap = NULL; 3307 gops->pmu.is_lazy_bootstrap = NULL;
@@ -3378,6 +3382,7 @@ static void pmu_handle_pg_elpg_msg(struct gk20a *g, struct pmu_msg *msg,
3378 PMU_PG_FEATURE_GR_POWER_GATING_ENABLED) { 3382 PMU_PG_FEATURE_GR_POWER_GATING_ENABLED) {
3379 pmu->initialized = true; 3383 pmu->initialized = true;
3380 pmu->pmu_state = PMU_STATE_STARTED; 3384 pmu->pmu_state = PMU_STATE_STARTED;
3385 pmu->mscg_stat = PMU_MSCG_DISABLED;
3381 } else 3386 } else
3382 schedule_work(&pmu->pg_init); 3387 schedule_work(&pmu->pg_init);
3383 } 3388 }
@@ -3506,6 +3511,9 @@ static int pmu_init_powergating(struct gk20a *g)
3506 } 3511 }
3507 } 3512 }
3508 3513
3514 if (g->ops.pmu.pmu_pg_param_post_init)
3515 g->ops.pmu.pmu_pg_param_post_init(g);
3516
3509 return 0; 3517 return 0;
3510} 3518}
3511 3519
@@ -4693,44 +4701,62 @@ clean_up:
4693 return err; 4701 return err;
4694} 4702}
4695 4703
4696static int gk20a_pmu_enable_elpg_locked(struct gk20a *g) 4704int gk20a_pmu_pg_global_enable(struct gk20a *g, u32 enable_pg)
4705{
4706 u32 status = 0;
4707
4708 if (enable_pg == true) {
4709 if (g->ops.pmu.pmu_pg_engines_feature_list &&
4710 g->ops.pmu.pmu_pg_engines_feature_list(g,
4711 PMU_PG_ELPG_ENGINE_ID_GRAPHICS) !=
4712 PMU_PG_FEATURE_GR_POWER_GATING_ENABLED) {
4713 if (g->ops.pmu.pmu_lpwr_enable_pg)
4714 status = g->ops.pmu.pmu_lpwr_enable_pg(g,
4715 true);
4716 } else if (support_gk20a_pmu(g->dev))
4717 status = gk20a_pmu_enable_elpg(g);
4718 } else if (enable_pg == false) {
4719 if (g->ops.pmu.pmu_pg_engines_feature_list &&
4720 g->ops.pmu.pmu_pg_engines_feature_list(g,
4721 PMU_PG_ELPG_ENGINE_ID_GRAPHICS) !=
4722 PMU_PG_FEATURE_GR_POWER_GATING_ENABLED) {
4723 if (g->ops.pmu.pmu_lpwr_disable_pg)
4724 status = g->ops.pmu.pmu_lpwr_disable_pg(g,
4725 true);
4726 } else if (support_gk20a_pmu(g->dev))
4727 status = gk20a_pmu_disable_elpg(g);
4728 }
4729
4730 return status;
4731}
4732
4733static int gk20a_pmu_enable_elpg_locked(struct gk20a *g, u32 pg_engine_id)
4697{ 4734{
4698 struct pmu_gk20a *pmu = &g->pmu; 4735 struct pmu_gk20a *pmu = &g->pmu;
4699 struct pmu_cmd cmd; 4736 struct pmu_cmd cmd;
4700 u32 seq, status; 4737 u32 seq, status;
4701 u32 pg_engine_id;
4702 u32 pg_engine_id_list = 0;
4703 4738
4704 gk20a_dbg_fn(""); 4739 gk20a_dbg_fn("");
4705 4740
4706 if (g->ops.pmu.pmu_pg_supported_engines_list) 4741 memset(&cmd, 0, sizeof(struct pmu_cmd));
4707 pg_engine_id_list = g->ops.pmu.pmu_pg_supported_engines_list(g); 4742 cmd.hdr.unit_id = PMU_UNIT_PG;
4708 for (pg_engine_id = PMU_PG_ELPG_ENGINE_ID_GRAPHICS; 4743 cmd.hdr.size = PMU_CMD_HDR_SIZE +
4709 pg_engine_id < PMU_PG_ELPG_ENGINE_ID_INVALID_ENGINE; 4744 sizeof(struct pmu_pg_cmd_elpg_cmd);
4710 pg_engine_id++) { 4745 cmd.cmd.pg.elpg_cmd.cmd_type = PMU_PG_CMD_ID_ELPG_CMD;
4711 4746 cmd.cmd.pg.elpg_cmd.engine_id = pg_engine_id;
4712 if (BIT(pg_engine_id) & pg_engine_id_list) { 4747 cmd.cmd.pg.elpg_cmd.cmd = PMU_PG_ELPG_CMD_ALLOW;
4713 memset(&cmd, 0, sizeof(struct pmu_cmd));
4714 cmd.hdr.unit_id = PMU_UNIT_PG;
4715 cmd.hdr.size = PMU_CMD_HDR_SIZE +
4716 sizeof(struct pmu_pg_cmd_elpg_cmd);
4717 cmd.cmd.pg.elpg_cmd.cmd_type = PMU_PG_CMD_ID_ELPG_CMD;
4718 cmd.cmd.pg.elpg_cmd.engine_id = pg_engine_id;
4719 cmd.cmd.pg.elpg_cmd.cmd = PMU_PG_ELPG_CMD_ALLOW;
4720 4748
4721 /* no need to wait ack for ELPG enable but set 4749 /* no need to wait ack for ELPG enable but set
4722 * pending to sync with follow up ELPG disable 4750 * pending to sync with follow up ELPG disable
4723 */ 4751 */
4724 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) 4752 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS)
4725 pmu->elpg_stat = PMU_ELPG_STAT_ON_PENDING; 4753 pmu->elpg_stat = PMU_ELPG_STAT_ON_PENDING;
4726 4754
4727 gk20a_dbg_pmu("cmd post PMU_PG_ELPG_CMD_ALLOW"); 4755 gk20a_dbg_pmu("cmd post PMU_PG_ELPG_CMD_ALLOW");
4728 status = gk20a_pmu_cmd_post(g, &cmd, NULL, NULL, 4756 status = gk20a_pmu_cmd_post(g, &cmd, NULL, NULL,
4729 PMU_COMMAND_QUEUE_HPQ, pmu_handle_pg_elpg_msg, 4757 PMU_COMMAND_QUEUE_HPQ, pmu_handle_pg_elpg_msg,
4730 pmu, &seq, ~0); 4758 pmu, &seq, ~0);
4731 WARN_ON(status != 0); 4759 WARN_ON(status != 0);
4732 }
4733 }
4734 4760
4735 gk20a_dbg_fn("done"); 4761 gk20a_dbg_fn("done");
4736 return 0; 4762 return 0;
@@ -4740,12 +4766,13 @@ int gk20a_pmu_enable_elpg(struct gk20a *g)
4740{ 4766{
4741 struct pmu_gk20a *pmu = &g->pmu; 4767 struct pmu_gk20a *pmu = &g->pmu;
4742 struct gr_gk20a *gr = &g->gr; 4768 struct gr_gk20a *gr = &g->gr;
4769 u32 pg_engine_id;
4770 u32 pg_engine_id_list = 0;
4743 4771
4744 int ret = 0; 4772 int ret = 0;
4745 4773
4746 gk20a_dbg_fn(""); 4774 gk20a_dbg_fn("");
4747 4775
4748
4749 if (!support_gk20a_pmu(g->dev)) 4776 if (!support_gk20a_pmu(g->dev))
4750 return ret; 4777 return ret;
4751 4778
@@ -4772,7 +4799,20 @@ int gk20a_pmu_enable_elpg(struct gk20a *g)
4772 if (pmu->elpg_stat != PMU_ELPG_STAT_OFF) 4799 if (pmu->elpg_stat != PMU_ELPG_STAT_OFF)
4773 goto exit_unlock; 4800 goto exit_unlock;
4774 4801
4775 ret = gk20a_pmu_enable_elpg_locked(g); 4802 if (g->ops.pmu.pmu_pg_supported_engines_list)
4803 pg_engine_id_list = g->ops.pmu.pmu_pg_supported_engines_list(g);
4804
4805 for (pg_engine_id = PMU_PG_ELPG_ENGINE_ID_GRAPHICS;
4806 pg_engine_id < PMU_PG_ELPG_ENGINE_ID_INVALID_ENGINE;
4807 pg_engine_id++) {
4808
4809 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS &&
4810 pmu->mscg_stat == PMU_MSCG_DISABLED)
4811 continue;
4812
4813 if (BIT(pg_engine_id) & pg_engine_id_list)
4814 ret = gk20a_pmu_enable_elpg_locked(g, pg_engine_id);
4815 }
4776 4816
4777exit_unlock: 4817exit_unlock:
4778 mutex_unlock(&pmu->elpg_mutex); 4818 mutex_unlock(&pmu->elpg_mutex);
@@ -4845,6 +4885,10 @@ int gk20a_pmu_disable_elpg(struct gk20a *g)
4845 pg_engine_id < PMU_PG_ELPG_ENGINE_ID_INVALID_ENGINE; 4885 pg_engine_id < PMU_PG_ELPG_ENGINE_ID_INVALID_ENGINE;
4846 pg_engine_id++) { 4886 pg_engine_id++) {
4847 4887
4888 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS &&
4889 pmu->mscg_stat == PMU_MSCG_DISABLED)
4890 continue;
4891
4848 if (BIT(pg_engine_id) & pg_engine_id_list) { 4892 if (BIT(pg_engine_id) & pg_engine_id_list) {
4849 memset(&cmd, 0, sizeof(struct pmu_cmd)); 4893 memset(&cmd, 0, sizeof(struct pmu_cmd));
4850 cmd.hdr.unit_id = PMU_UNIT_PG; 4894 cmd.hdr.unit_id = PMU_UNIT_PG;