summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c7
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a.h4
-rw-r--r--drivers/gpu/nvgpu/gk20a/platform_gk20a.h3
-rw-r--r--drivers/gpu/nvgpu/gk20a/pmu_api.h63
-rw-r--r--drivers/gpu/nvgpu/gk20a/pmu_gk20a.c106
-rw-r--r--drivers/gpu/nvgpu/gk20a/pmu_gk20a.h7
-rw-r--r--drivers/gpu/nvgpu/gm206/bios_gm206.h3
-rw-r--r--drivers/gpu/nvgpu/gm206/pmu_gm206.c3
-rw-r--r--drivers/gpu/nvgpu/gm20b/pmu_gm20b.c3
-rw-r--r--drivers/gpu/nvgpu/nvgpu_common.c2
10 files changed, 166 insertions, 35 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
index cd3ab0c2..f86a7377 100644
--- a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
@@ -1169,8 +1169,8 @@ static int dbg_set_powergate(struct dbg_session_gk20a *dbg_s, u32 powermode)
1169 return -EPERM; 1169 return -EPERM;
1170 1170
1171 /*do elpg disable before clock gating */ 1171 /*do elpg disable before clock gating */
1172 if (support_gk20a_pmu(g->dev)) 1172 gk20a_pmu_pg_global_enable(g, false);
1173 gk20a_pmu_disable_elpg(g); 1173
1174 if (g->ops.clock_gating.slcg_gr_load_gating_prod) 1174 if (g->ops.clock_gating.slcg_gr_load_gating_prod)
1175 g->ops.clock_gating.slcg_gr_load_gating_prod(g, 1175 g->ops.clock_gating.slcg_gr_load_gating_prod(g,
1176 false); 1176 false);
@@ -1216,8 +1216,7 @@ static int dbg_set_powergate(struct dbg_session_gk20a *dbg_s, u32 powermode)
1216 g->ops.clock_gating.slcg_gr_load_gating_prod(g, 1216 g->ops.clock_gating.slcg_gr_load_gating_prod(g,
1217 g->slcg_enabled); 1217 g->slcg_enabled);
1218 1218
1219 if (support_gk20a_pmu(g->dev)) 1219 gk20a_pmu_pg_global_enable(g, true);
1220 gk20a_pmu_enable_elpg(g);
1221 1220
1222 gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn, "module idle"); 1221 gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn, "module idle");
1223 gk20a_idle(dbg_s->dev); 1222 gk20a_idle(dbg_s->dev);
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.h b/drivers/gpu/nvgpu/gk20a/gk20a.h
index a1a8bf36..782469df 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/gk20a.h
@@ -610,6 +610,9 @@ struct gpu_ops {
610 u32 (*pmu_pg_supported_engines_list)(struct gk20a *g); 610 u32 (*pmu_pg_supported_engines_list)(struct gk20a *g);
611 u32 (*pmu_pg_engines_feature_list)(struct gk20a *g, 611 u32 (*pmu_pg_engines_feature_list)(struct gk20a *g,
612 u32 pg_engine_id); 612 u32 pg_engine_id);
613 int (*pmu_lpwr_enable_pg)(struct gk20a *g, bool pstate_lock);
614 int (*pmu_lpwr_disable_pg)(struct gk20a *g, bool pstate_lock);
615 u32 (*pmu_pg_param_post_init)(struct gk20a *g);
613 int (*send_lrf_tex_ltc_dram_overide_en_dis_cmd) 616 int (*send_lrf_tex_ltc_dram_overide_en_dis_cmd)
614 (struct gk20a *g, u32 mask); 617 (struct gk20a *g, u32 mask);
615 void (*dump_secure_fuses)(struct gk20a *g); 618 void (*dump_secure_fuses)(struct gk20a *g);
@@ -847,6 +850,7 @@ struct gk20a {
847 bool elcg_enabled; 850 bool elcg_enabled;
848 bool elpg_enabled; 851 bool elpg_enabled;
849 bool aelpg_enabled; 852 bool aelpg_enabled;
853 bool mscg_enabled;
850 bool forced_idle; 854 bool forced_idle;
851 bool forced_reset; 855 bool forced_reset;
852 bool allow_all; 856 bool allow_all;
diff --git a/drivers/gpu/nvgpu/gk20a/platform_gk20a.h b/drivers/gpu/nvgpu/gk20a/platform_gk20a.h
index 3d5cd1b2..2b17d32a 100644
--- a/drivers/gpu/nvgpu/gk20a/platform_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/platform_gk20a.h
@@ -93,6 +93,9 @@ struct gk20a_platform {
93 /* Adaptative ELPG: true = enable flase = disable */ 93 /* Adaptative ELPG: true = enable flase = disable */
94 bool enable_aelpg; 94 bool enable_aelpg;
95 95
96 /* Memory System Clock Gating: true = enable flase = disable*/
97 bool enable_mscg;
98
96 /* Timeout for per-channel watchdog (in mS) */ 99 /* Timeout for per-channel watchdog (in mS) */
97 u32 ch_wdt_timeout_ms; 100 u32 ch_wdt_timeout_ms;
98 101
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_api.h b/drivers/gpu/nvgpu/gk20a/pmu_api.h
index 2fdd1333..def7bbea 100644
--- a/drivers/gpu/nvgpu/gk20a/pmu_api.h
+++ b/drivers/gpu/nvgpu/gk20a/pmu_api.h
@@ -526,17 +526,77 @@ enum {
526}; 526};
527 527
528#define PMU_PG_PARAM_CMD_GR_INIT_PARAM 0x0 528#define PMU_PG_PARAM_CMD_GR_INIT_PARAM 0x0
529#define PMU_PG_PARAM_CMD_MS_INIT_PARAM 0x01
530#define PMU_PG_PARAM_CMD_MCLK_CHANGE 0x04
531#define PMU_PG_PARAM_CMD_POST_INIT 0x06
529 532
530#define PMU_PG_FEATURE_GR_SDIV_SLOWDOWN_ENABLED (1 << 0) 533#define PMU_PG_FEATURE_GR_SDIV_SLOWDOWN_ENABLED (1 << 0)
531#define PMU_PG_FEATURE_GR_POWER_GATING_ENABLED (1 << 2) 534#define PMU_PG_FEATURE_GR_POWER_GATING_ENABLED (1 << 2)
532#define PMU_PG_FEATURE_GR_RPPG_ENABLED (1 << 3) 535#define PMU_PG_FEATURE_GR_RPPG_ENABLED (1 << 3)
533 536
537#define NVGPU_PMU_GR_FEATURE_MASK_RPPG (1 << 3)
538#define NVGPU_PMU_GR_FEATURE_MASK_ALL \
539 ( \
540 NVGPU_PMU_GR_FEATURE_MASK_RPPG \
541 )
542
543#define NVGPU_PMU_MS_FEATURE_MASK_CLOCK_GATING (1 << 0)
544#define NVGPU_PMU_MS_FEATURE_MASK_SW_ASR (1 << 1)
545#define NVGPU_PMU_MS_FEATURE_MASK_RPPG (1 << 8)
546#define NVGPU_PMU_MS_FEATURE_MASK_FB_TRAINING (1 << 5)
547
548#define NVGPU_PMU_MS_FEATURE_MASK_ALL \
549 ( \
550 NVGPU_PMU_MS_FEATURE_MASK_CLOCK_GATING |\
551 NVGPU_PMU_MS_FEATURE_MASK_SW_ASR |\
552 NVGPU_PMU_MS_FEATURE_MASK_RPPG |\
553 NVGPU_PMU_MS_FEATURE_MASK_FB_TRAINING \
554 )
555
556#define PG_REQUEST_TYPE_GLOBAL 0x0
557#define PG_REQUEST_TYPE_PSTATE 0x1
558
534struct pmu_pg_cmd_gr_init_param { 559struct pmu_pg_cmd_gr_init_param {
535 u8 cmd_type; 560 u8 cmd_type;
536 u16 sub_cmd_id; 561 u16 sub_cmd_id;
537 u8 featuremask; 562 u8 featuremask;
538}; 563};
539 564
565struct pmu_pg_cmd_ms_init_param {
566 u8 cmd_type;
567 u16 cmd_id;
568 u8 psi;
569 u8 idle_flipped_test_enabled;
570 u16 psiSettleTimeUs;
571 u8 rsvd[2];
572 u32 support_mask;
573 u32 abort_timeout_us;
574};
575
576struct pmu_pg_cmd_mclk_change {
577 u8 cmd_type;
578 u16 cmd_id;
579 u8 rsvd;
580 u32 data;
581};
582
583#define PG_VOLT_RAIL_IDX_MAX 2
584
585struct pmu_pg_volt_rail {
586 u8 volt_rail_idx;
587 u8 sleep_volt_dev_idx;
588 u8 sleep_vfe_idx;
589 u32 sleep_voltage_uv;
590 u32 therm_vid0_cache;
591 u32 therm_vid1_cache;
592};
593
594struct pmu_pg_cmd_post_init_param {
595 u8 cmd_type;
596 u16 cmd_id;
597 struct pmu_pg_volt_rail pg_volt_rail[PG_VOLT_RAIL_IDX_MAX];
598};
599
540struct pmu_pg_cmd_stat { 600struct pmu_pg_cmd_stat {
541 u8 cmd_type; 601 u8 cmd_type;
542 u8 engine_id; 602 u8 engine_id;
@@ -553,6 +613,9 @@ struct pmu_pg_cmd {
553 struct pmu_pg_cmd_eng_buf_load_v2 eng_buf_load_v2; 613 struct pmu_pg_cmd_eng_buf_load_v2 eng_buf_load_v2;
554 struct pmu_pg_cmd_stat stat; 614 struct pmu_pg_cmd_stat stat;
555 struct pmu_pg_cmd_gr_init_param gr_init_param; 615 struct pmu_pg_cmd_gr_init_param gr_init_param;
616 struct pmu_pg_cmd_ms_init_param ms_init_param;
617 struct pmu_pg_cmd_mclk_change mclk_change;
618 struct pmu_pg_cmd_post_init_param post_init;
556 /* TBD: other pg commands */ 619 /* TBD: other pg commands */
557 union pmu_ap_cmd ap_cmd; 620 union pmu_ap_cmd ap_cmd;
558 struct nv_pmu_rppg_cmd rppg_cmd; 621 struct nv_pmu_rppg_cmd rppg_cmd;
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
index c70f9876..2b847008 100644
--- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
@@ -1364,6 +1364,7 @@ int gk20a_init_pmu(struct pmu_gk20a *pmu)
1364 struct pmu_v *pv = &g->ops.pmu_ver; 1364 struct pmu_v *pv = &g->ops.pmu_ver;
1365 1365
1366 mutex_init(&pmu->elpg_mutex); 1366 mutex_init(&pmu->elpg_mutex);
1367 mutex_init(&pmu->pg_mutex);
1367 mutex_init(&pmu->isr_mutex); 1368 mutex_init(&pmu->isr_mutex);
1368 mutex_init(&pmu->pmu_copy_lock); 1369 mutex_init(&pmu->pmu_copy_lock);
1369 mutex_init(&pmu->pmu_seq_lock); 1370 mutex_init(&pmu->pmu_seq_lock);
@@ -3298,6 +3299,9 @@ void gk20a_init_pmu_ops(struct gpu_ops *gops)
3298 gops->pmu.pmu_pg_init_param = NULL; 3299 gops->pmu.pmu_pg_init_param = NULL;
3299 gops->pmu.pmu_pg_supported_engines_list = gk20a_pmu_pg_engines_list; 3300 gops->pmu.pmu_pg_supported_engines_list = gk20a_pmu_pg_engines_list;
3300 gops->pmu.pmu_pg_engines_feature_list = gk20a_pmu_pg_feature_list; 3301 gops->pmu.pmu_pg_engines_feature_list = gk20a_pmu_pg_feature_list;
3302 gops->pmu.pmu_lpwr_enable_pg = NULL;
3303 gops->pmu.pmu_lpwr_disable_pg = NULL;
3304 gops->pmu.pmu_pg_param_post_init = NULL;
3301 gops->pmu.send_lrf_tex_ltc_dram_overide_en_dis_cmd = NULL; 3305 gops->pmu.send_lrf_tex_ltc_dram_overide_en_dis_cmd = NULL;
3302 gops->pmu.dump_secure_fuses = NULL; 3306 gops->pmu.dump_secure_fuses = NULL;
3303 gops->pmu.is_lazy_bootstrap = NULL; 3307 gops->pmu.is_lazy_bootstrap = NULL;
@@ -3378,6 +3382,7 @@ static void pmu_handle_pg_elpg_msg(struct gk20a *g, struct pmu_msg *msg,
3378 PMU_PG_FEATURE_GR_POWER_GATING_ENABLED) { 3382 PMU_PG_FEATURE_GR_POWER_GATING_ENABLED) {
3379 pmu->initialized = true; 3383 pmu->initialized = true;
3380 pmu->pmu_state = PMU_STATE_STARTED; 3384 pmu->pmu_state = PMU_STATE_STARTED;
3385 pmu->mscg_stat = PMU_MSCG_DISABLED;
3381 } else 3386 } else
3382 schedule_work(&pmu->pg_init); 3387 schedule_work(&pmu->pg_init);
3383 } 3388 }
@@ -3506,6 +3511,9 @@ static int pmu_init_powergating(struct gk20a *g)
3506 } 3511 }
3507 } 3512 }
3508 3513
3514 if (g->ops.pmu.pmu_pg_param_post_init)
3515 g->ops.pmu.pmu_pg_param_post_init(g);
3516
3509 return 0; 3517 return 0;
3510} 3518}
3511 3519
@@ -4693,44 +4701,62 @@ clean_up:
4693 return err; 4701 return err;
4694} 4702}
4695 4703
4696static int gk20a_pmu_enable_elpg_locked(struct gk20a *g) 4704int gk20a_pmu_pg_global_enable(struct gk20a *g, u32 enable_pg)
4705{
4706 u32 status = 0;
4707
4708 if (enable_pg == true) {
4709 if (g->ops.pmu.pmu_pg_engines_feature_list &&
4710 g->ops.pmu.pmu_pg_engines_feature_list(g,
4711 PMU_PG_ELPG_ENGINE_ID_GRAPHICS) !=
4712 PMU_PG_FEATURE_GR_POWER_GATING_ENABLED) {
4713 if (g->ops.pmu.pmu_lpwr_enable_pg)
4714 status = g->ops.pmu.pmu_lpwr_enable_pg(g,
4715 true);
4716 } else if (support_gk20a_pmu(g->dev))
4717 status = gk20a_pmu_enable_elpg(g);
4718 } else if (enable_pg == false) {
4719 if (g->ops.pmu.pmu_pg_engines_feature_list &&
4720 g->ops.pmu.pmu_pg_engines_feature_list(g,
4721 PMU_PG_ELPG_ENGINE_ID_GRAPHICS) !=
4722 PMU_PG_FEATURE_GR_POWER_GATING_ENABLED) {
4723 if (g->ops.pmu.pmu_lpwr_disable_pg)
4724 status = g->ops.pmu.pmu_lpwr_disable_pg(g,
4725 true);
4726 } else if (support_gk20a_pmu(g->dev))
4727 status = gk20a_pmu_disable_elpg(g);
4728 }
4729
4730 return status;
4731}
4732
4733static int gk20a_pmu_enable_elpg_locked(struct gk20a *g, u32 pg_engine_id)
4697{ 4734{
4698 struct pmu_gk20a *pmu = &g->pmu; 4735 struct pmu_gk20a *pmu = &g->pmu;
4699 struct pmu_cmd cmd; 4736 struct pmu_cmd cmd;
4700 u32 seq, status; 4737 u32 seq, status;
4701 u32 pg_engine_id;
4702 u32 pg_engine_id_list = 0;
4703 4738
4704 gk20a_dbg_fn(""); 4739 gk20a_dbg_fn("");
4705 4740
4706 if (g->ops.pmu.pmu_pg_supported_engines_list) 4741 memset(&cmd, 0, sizeof(struct pmu_cmd));
4707 pg_engine_id_list = g->ops.pmu.pmu_pg_supported_engines_list(g); 4742 cmd.hdr.unit_id = PMU_UNIT_PG;
4708 for (pg_engine_id = PMU_PG_ELPG_ENGINE_ID_GRAPHICS; 4743 cmd.hdr.size = PMU_CMD_HDR_SIZE +
4709 pg_engine_id < PMU_PG_ELPG_ENGINE_ID_INVALID_ENGINE; 4744 sizeof(struct pmu_pg_cmd_elpg_cmd);
4710 pg_engine_id++) { 4745 cmd.cmd.pg.elpg_cmd.cmd_type = PMU_PG_CMD_ID_ELPG_CMD;
4711 4746 cmd.cmd.pg.elpg_cmd.engine_id = pg_engine_id;
4712 if (BIT(pg_engine_id) & pg_engine_id_list) { 4747 cmd.cmd.pg.elpg_cmd.cmd = PMU_PG_ELPG_CMD_ALLOW;
4713 memset(&cmd, 0, sizeof(struct pmu_cmd));
4714 cmd.hdr.unit_id = PMU_UNIT_PG;
4715 cmd.hdr.size = PMU_CMD_HDR_SIZE +
4716 sizeof(struct pmu_pg_cmd_elpg_cmd);
4717 cmd.cmd.pg.elpg_cmd.cmd_type = PMU_PG_CMD_ID_ELPG_CMD;
4718 cmd.cmd.pg.elpg_cmd.engine_id = pg_engine_id;
4719 cmd.cmd.pg.elpg_cmd.cmd = PMU_PG_ELPG_CMD_ALLOW;
4720 4748
4721 /* no need to wait ack for ELPG enable but set 4749 /* no need to wait ack for ELPG enable but set
4722 * pending to sync with follow up ELPG disable 4750 * pending to sync with follow up ELPG disable
4723 */ 4751 */
4724 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) 4752 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS)
4725 pmu->elpg_stat = PMU_ELPG_STAT_ON_PENDING; 4753 pmu->elpg_stat = PMU_ELPG_STAT_ON_PENDING;
4726 4754
4727 gk20a_dbg_pmu("cmd post PMU_PG_ELPG_CMD_ALLOW"); 4755 gk20a_dbg_pmu("cmd post PMU_PG_ELPG_CMD_ALLOW");
4728 status = gk20a_pmu_cmd_post(g, &cmd, NULL, NULL, 4756 status = gk20a_pmu_cmd_post(g, &cmd, NULL, NULL,
4729 PMU_COMMAND_QUEUE_HPQ, pmu_handle_pg_elpg_msg, 4757 PMU_COMMAND_QUEUE_HPQ, pmu_handle_pg_elpg_msg,
4730 pmu, &seq, ~0); 4758 pmu, &seq, ~0);
4731 WARN_ON(status != 0); 4759 WARN_ON(status != 0);
4732 }
4733 }
4734 4760
4735 gk20a_dbg_fn("done"); 4761 gk20a_dbg_fn("done");
4736 return 0; 4762 return 0;
@@ -4740,12 +4766,13 @@ int gk20a_pmu_enable_elpg(struct gk20a *g)
4740{ 4766{
4741 struct pmu_gk20a *pmu = &g->pmu; 4767 struct pmu_gk20a *pmu = &g->pmu;
4742 struct gr_gk20a *gr = &g->gr; 4768 struct gr_gk20a *gr = &g->gr;
4769 u32 pg_engine_id;
4770 u32 pg_engine_id_list = 0;
4743 4771
4744 int ret = 0; 4772 int ret = 0;
4745 4773
4746 gk20a_dbg_fn(""); 4774 gk20a_dbg_fn("");
4747 4775
4748
4749 if (!support_gk20a_pmu(g->dev)) 4776 if (!support_gk20a_pmu(g->dev))
4750 return ret; 4777 return ret;
4751 4778
@@ -4772,7 +4799,20 @@ int gk20a_pmu_enable_elpg(struct gk20a *g)
4772 if (pmu->elpg_stat != PMU_ELPG_STAT_OFF) 4799 if (pmu->elpg_stat != PMU_ELPG_STAT_OFF)
4773 goto exit_unlock; 4800 goto exit_unlock;
4774 4801
4775 ret = gk20a_pmu_enable_elpg_locked(g); 4802 if (g->ops.pmu.pmu_pg_supported_engines_list)
4803 pg_engine_id_list = g->ops.pmu.pmu_pg_supported_engines_list(g);
4804
4805 for (pg_engine_id = PMU_PG_ELPG_ENGINE_ID_GRAPHICS;
4806 pg_engine_id < PMU_PG_ELPG_ENGINE_ID_INVALID_ENGINE;
4807 pg_engine_id++) {
4808
4809 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS &&
4810 pmu->mscg_stat == PMU_MSCG_DISABLED)
4811 continue;
4812
4813 if (BIT(pg_engine_id) & pg_engine_id_list)
4814 ret = gk20a_pmu_enable_elpg_locked(g, pg_engine_id);
4815 }
4776 4816
4777exit_unlock: 4817exit_unlock:
4778 mutex_unlock(&pmu->elpg_mutex); 4818 mutex_unlock(&pmu->elpg_mutex);
@@ -4845,6 +4885,10 @@ int gk20a_pmu_disable_elpg(struct gk20a *g)
4845 pg_engine_id < PMU_PG_ELPG_ENGINE_ID_INVALID_ENGINE; 4885 pg_engine_id < PMU_PG_ELPG_ENGINE_ID_INVALID_ENGINE;
4846 pg_engine_id++) { 4886 pg_engine_id++) {
4847 4887
4888 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS &&
4889 pmu->mscg_stat == PMU_MSCG_DISABLED)
4890 continue;
4891
4848 if (BIT(pg_engine_id) & pg_engine_id_list) { 4892 if (BIT(pg_engine_id) & pg_engine_id_list) {
4849 memset(&cmd, 0, sizeof(struct pmu_cmd)); 4893 memset(&cmd, 0, sizeof(struct pmu_cmd));
4850 cmd.hdr.unit_id = PMU_UNIT_PG; 4894 cmd.hdr.unit_id = PMU_UNIT_PG;
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h
index 78652bcb..56300dc8 100644
--- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h
@@ -629,6 +629,9 @@ struct pmu_pg_stats {
629#define PMU_ELPG_STAT_OFF_ON_PENDING 4 /* elpg is off, caller has requested on, but ALLOW 629#define PMU_ELPG_STAT_OFF_ON_PENDING 4 /* elpg is off, caller has requested on, but ALLOW
630 cmd hasn't been sent due to ENABLE_ALLOW delay */ 630 cmd hasn't been sent due to ENABLE_ALLOW delay */
631 631
632#define PMU_MSCG_DISABLED 0
633#define PMU_MSCG_ENABLED 1
634
632/* Falcon Register index */ 635/* Falcon Register index */
633#define PMU_FALCON_REG_R0 (0) 636#define PMU_FALCON_REG_R0 (0)
634#define PMU_FALCON_REG_R1 (1) 637#define PMU_FALCON_REG_R1 (1)
@@ -716,10 +719,13 @@ struct pmu_gk20a {
716 719
717 u32 elpg_stat; 720 u32 elpg_stat;
718 721
722 u32 mscg_stat;
723
719 int pmu_state; 724 int pmu_state;
720 725
721#define PMU_ELPG_ENABLE_ALLOW_DELAY_MSEC 1 /* msec */ 726#define PMU_ELPG_ENABLE_ALLOW_DELAY_MSEC 1 /* msec */
722 struct work_struct pg_init; 727 struct work_struct pg_init;
728 struct mutex pg_mutex; /* protect pg-RPPG/MSCG enable/disable */
723 struct mutex elpg_mutex; /* protect elpg enable/disable */ 729 struct mutex elpg_mutex; /* protect elpg enable/disable */
724 int elpg_refcnt; /* disable -1, enable +1, <=0 elpg disabled, > 0 elpg enabled */ 730 int elpg_refcnt; /* disable -1, enable +1, <=0 elpg disabled, > 0 elpg enabled */
725 731
@@ -774,6 +780,7 @@ int gk20a_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd, struct pmu_msg *msg
774 780
775int gk20a_pmu_enable_elpg(struct gk20a *g); 781int gk20a_pmu_enable_elpg(struct gk20a *g);
776int gk20a_pmu_disable_elpg(struct gk20a *g); 782int gk20a_pmu_disable_elpg(struct gk20a *g);
783int gk20a_pmu_pg_global_enable(struct gk20a *g, u32 enable_pg);
777 784
778u32 gk20a_pmu_pg_engines_list(struct gk20a *g); 785u32 gk20a_pmu_pg_engines_list(struct gk20a *g);
779u32 gk20a_pmu_pg_feature_list(struct gk20a *g, u32 pg_engine_id); 786u32 gk20a_pmu_pg_feature_list(struct gk20a *g, u32 pg_engine_id);
diff --git a/drivers/gpu/nvgpu/gm206/bios_gm206.h b/drivers/gpu/nvgpu/gm206/bios_gm206.h
index 1d813df5..6fe19fb0 100644
--- a/drivers/gpu/nvgpu/gm206/bios_gm206.h
+++ b/drivers/gpu/nvgpu/gm206/bios_gm206.h
@@ -44,6 +44,9 @@ enum {
44 VOLTAGE_RAIL_TABLE = 26, 44 VOLTAGE_RAIL_TABLE = 26,
45 VOLTAGE_DEVICE_TABLE, 45 VOLTAGE_DEVICE_TABLE,
46 VOLTAGE_POLICY_TABLE, 46 VOLTAGE_POLICY_TABLE,
47 LOWPOWER_TABLE,
48 LOWPOWER_GR_TABLE = 32,
49 LOWPOWER_MS_TABLE = 33,
47}; 50};
48 51
49enum { 52enum {
diff --git a/drivers/gpu/nvgpu/gm206/pmu_gm206.c b/drivers/gpu/nvgpu/gm206/pmu_gm206.c
index d109be97..1aff6ea6 100644
--- a/drivers/gpu/nvgpu/gm206/pmu_gm206.c
+++ b/drivers/gpu/nvgpu/gm206/pmu_gm206.c
@@ -159,6 +159,9 @@ void gm206_init_pmu_ops(struct gpu_ops *gops)
159 gops->pmu.pmu_pg_init_param = NULL; 159 gops->pmu.pmu_pg_init_param = NULL;
160 gops->pmu.pmu_pg_supported_engines_list = NULL; 160 gops->pmu.pmu_pg_supported_engines_list = NULL;
161 gops->pmu.pmu_pg_engines_feature_list = NULL; 161 gops->pmu.pmu_pg_engines_feature_list = NULL;
162 gops->pmu.pmu_lpwr_enable_pg = NULL;
163 gops->pmu.pmu_lpwr_disable_pg = NULL;
164 gops->pmu.pmu_pg_param_post_init = NULL;
162 gops->pmu.send_lrf_tex_ltc_dram_overide_en_dis_cmd = NULL; 165 gops->pmu.send_lrf_tex_ltc_dram_overide_en_dis_cmd = NULL;
163 gops->pmu.dump_secure_fuses = NULL; 166 gops->pmu.dump_secure_fuses = NULL;
164 gops->pmu.reset = gk20a_pmu_reset; 167 gops->pmu.reset = gk20a_pmu_reset;
diff --git a/drivers/gpu/nvgpu/gm20b/pmu_gm20b.c b/drivers/gpu/nvgpu/gm20b/pmu_gm20b.c
index 517c92a1..868e824a 100644
--- a/drivers/gpu/nvgpu/gm20b/pmu_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/pmu_gm20b.c
@@ -288,6 +288,9 @@ void gm20b_init_pmu_ops(struct gpu_ops *gops)
288 gops->pmu.pmu_pg_init_param = NULL; 288 gops->pmu.pmu_pg_init_param = NULL;
289 gops->pmu.pmu_pg_supported_engines_list = gk20a_pmu_pg_engines_list; 289 gops->pmu.pmu_pg_supported_engines_list = gk20a_pmu_pg_engines_list;
290 gops->pmu.pmu_pg_engines_feature_list = gk20a_pmu_pg_feature_list; 290 gops->pmu.pmu_pg_engines_feature_list = gk20a_pmu_pg_feature_list;
291 gops->pmu.pmu_lpwr_enable_pg = NULL;
292 gops->pmu.pmu_lpwr_disable_pg = NULL;
293 gops->pmu.pmu_pg_param_post_init = NULL;
291 gops->pmu.send_lrf_tex_ltc_dram_overide_en_dis_cmd = NULL; 294 gops->pmu.send_lrf_tex_ltc_dram_overide_en_dis_cmd = NULL;
292 gops->pmu.dump_secure_fuses = pmu_dump_security_fuses_gm20b; 295 gops->pmu.dump_secure_fuses = pmu_dump_security_fuses_gm20b;
293 gops->pmu.reset = gk20a_pmu_reset; 296 gops->pmu.reset = gk20a_pmu_reset;
diff --git a/drivers/gpu/nvgpu/nvgpu_common.c b/drivers/gpu/nvgpu/nvgpu_common.c
index 4f0e883f..179464d8 100644
--- a/drivers/gpu/nvgpu/nvgpu_common.c
+++ b/drivers/gpu/nvgpu/nvgpu_common.c
@@ -89,6 +89,8 @@ static void nvgpu_init_pm_vars(struct gk20a *g)
89 tegra_platform_is_silicon() ? platform->enable_elpg : false; 89 tegra_platform_is_silicon() ? platform->enable_elpg : false;
90 g->aelpg_enabled = 90 g->aelpg_enabled =
91 tegra_platform_is_silicon() ? platform->enable_aelpg : false; 91 tegra_platform_is_silicon() ? platform->enable_aelpg : false;
92 g->mscg_enabled =
93 tegra_platform_is_silicon() ? platform->enable_mscg : false;
92 94
93 /* set default values to aelpg parameters */ 95 /* set default values to aelpg parameters */
94 g->pmu.aelpg_param[0] = APCTRL_SAMPLING_PERIOD_PG_DEFAULT_US; 96 g->pmu.aelpg_param[0] = APCTRL_SAMPLING_PERIOD_PG_DEFAULT_US;