summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a
diff options
context:
space:
mode:
authorMahantesh Kumbar <mkumbar@nvidia.com>2017-06-07 12:56:00 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-06-13 16:19:47 -0400
commitc18364d0c4b3fb6581f937c018cd01fc329601bb (patch)
tree923ab682435379dc8bad7852c49725bf7f0f5286 /drivers/gpu/nvgpu/gk20a
parent45355f00e7de9068f403682044f550026fa7e86e (diff)
gpu: nvgpu: moved pg out from pmu_gk20a.c/h
- moved pg related code to pmu_pg.c under common/pmu folder PG state machine support methods PG ACK handlers AELPG methods PG enable/disable methods -prepended with nvgpu_ for elpg/aelpg global methods by replacing gk20a_ JIRA NVGPU-97 Change-Id: I2148a69ff86b5c5d43c521ff6e241db84afafd82 Signed-off-by: Mahantesh Kumbar <mkumbar@nvidia.com> Reviewed-on: http://git-master/r/1498363 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a')
-rw-r--r--drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c4
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.c8
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a_sysfs.c20
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.c4
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.h6
-rw-r--r--drivers/gpu/nvgpu/gk20a/pmu_gk20a.c692
-rw-r--r--drivers/gpu/nvgpu/gk20a/pmu_gk20a.h50
7 files changed, 34 insertions, 750 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
index f018ef89..77890da8 100644
--- a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
@@ -1308,7 +1308,7 @@ static int dbg_set_powergate(struct dbg_session_gk20a *dbg_s, u32 powermode)
1308 return err; 1308 return err;
1309 1309
1310 /*do elpg disable before clock gating */ 1310 /*do elpg disable before clock gating */
1311 gk20a_pmu_pg_global_enable(g, false); 1311 nvgpu_pmu_pg_global_enable(g, false);
1312 1312
1313 if (g->ops.clock_gating.slcg_gr_load_gating_prod) 1313 if (g->ops.clock_gating.slcg_gr_load_gating_prod)
1314 g->ops.clock_gating.slcg_gr_load_gating_prod(g, 1314 g->ops.clock_gating.slcg_gr_load_gating_prod(g,
@@ -1355,7 +1355,7 @@ static int dbg_set_powergate(struct dbg_session_gk20a *dbg_s, u32 powermode)
1355 g->ops.clock_gating.slcg_gr_load_gating_prod(g, 1355 g->ops.clock_gating.slcg_gr_load_gating_prod(g,
1356 g->slcg_enabled); 1356 g->slcg_enabled);
1357 1357
1358 gk20a_pmu_pg_global_enable(g, true); 1358 nvgpu_pmu_pg_global_enable(g, true);
1359 1359
1360 gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn, "module idle"); 1360 gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn, "module idle");
1361 gk20a_idle(g); 1361 gk20a_idle(g);
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
index 00b26cf4..5a571dc8 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
@@ -1248,7 +1248,7 @@ void gk20a_fifo_reset_engine(struct gk20a *g, u32 engine_id)
1248 1248
1249 if (engine_enum == ENGINE_GR_GK20A) { 1249 if (engine_enum == ENGINE_GR_GK20A) {
1250 if (g->support_pmu && g->can_elpg) 1250 if (g->support_pmu && g->can_elpg)
1251 gk20a_pmu_disable_elpg(g); 1251 nvgpu_pmu_disable_elpg(g);
1252 /* resetting engine will alter read/write index. 1252 /* resetting engine will alter read/write index.
1253 * need to flush circular buffer before re-enabling FECS. 1253 * need to flush circular buffer before re-enabling FECS.
1254 */ 1254 */
@@ -1261,7 +1261,7 @@ void gk20a_fifo_reset_engine(struct gk20a *g, u32 engine_id)
1261 enough, we do full init sequence */ 1261 enough, we do full init sequence */
1262 gk20a_gr_reset(g); 1262 gk20a_gr_reset(g);
1263 if (g->support_pmu && g->can_elpg) 1263 if (g->support_pmu && g->can_elpg)
1264 gk20a_pmu_enable_elpg(g); 1264 nvgpu_pmu_enable_elpg(g);
1265 } 1265 }
1266 if ((engine_enum == ENGINE_GRCE_GK20A) || 1266 if ((engine_enum == ENGINE_GRCE_GK20A) ||
1267 (engine_enum == ENGINE_ASYNC_CE_GK20A)) { 1267 (engine_enum == ENGINE_ASYNC_CE_GK20A)) {
@@ -1496,7 +1496,7 @@ static bool gk20a_fifo_handle_mmu_fault(
1496 1496
1497 /* Disable power management */ 1497 /* Disable power management */
1498 if (g->support_pmu && g->can_elpg) 1498 if (g->support_pmu && g->can_elpg)
1499 gk20a_pmu_disable_elpg(g); 1499 nvgpu_pmu_disable_elpg(g);
1500 if (g->ops.clock_gating.slcg_gr_load_gating_prod) 1500 if (g->ops.clock_gating.slcg_gr_load_gating_prod)
1501 g->ops.clock_gating.slcg_gr_load_gating_prod(g, 1501 g->ops.clock_gating.slcg_gr_load_gating_prod(g,
1502 false); 1502 false);
@@ -1699,7 +1699,7 @@ static bool gk20a_fifo_handle_mmu_fault(
1699 1699
1700 /* It is safe to enable ELPG again. */ 1700 /* It is safe to enable ELPG again. */
1701 if (g->support_pmu && g->can_elpg) 1701 if (g->support_pmu && g->can_elpg)
1702 gk20a_pmu_enable_elpg(g); 1702 nvgpu_pmu_enable_elpg(g);
1703 1703
1704 return verbose; 1704 return verbose;
1705} 1705}
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a_sysfs.c b/drivers/gpu/nvgpu/gk20a/gk20a_sysfs.c
index 4a79a142..275b663f 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a_sysfs.c
+++ b/drivers/gpu/nvgpu/gk20a/gk20a_sysfs.c
@@ -456,18 +456,18 @@ static ssize_t elpg_enable_store(struct device *dev,
456 */ 456 */
457 if (val && !g->elpg_enabled) { 457 if (val && !g->elpg_enabled) {
458 g->elpg_enabled = true; 458 g->elpg_enabled = true;
459 gk20a_pmu_pg_global_enable(g, true); 459 nvgpu_pmu_pg_global_enable(g, true);
460 460
461 } else if (!val && g->elpg_enabled) { 461 } else if (!val && g->elpg_enabled) {
462 if (g->ops.pmu.pmu_pg_engines_feature_list && 462 if (g->ops.pmu.pmu_pg_engines_feature_list &&
463 g->ops.pmu.pmu_pg_engines_feature_list(g, 463 g->ops.pmu.pmu_pg_engines_feature_list(g,
464 PMU_PG_ELPG_ENGINE_ID_GRAPHICS) != 464 PMU_PG_ELPG_ENGINE_ID_GRAPHICS) !=
465 PMU_PG_FEATURE_GR_POWER_GATING_ENABLED) { 465 PMU_PG_FEATURE_GR_POWER_GATING_ENABLED) {
466 gk20a_pmu_pg_global_enable(g, false); 466 nvgpu_pmu_pg_global_enable(g, false);
467 g->elpg_enabled = false; 467 g->elpg_enabled = false;
468 } else { 468 } else {
469 g->elpg_enabled = false; 469 g->elpg_enabled = false;
470 gk20a_pmu_pg_global_enable(g, false); 470 nvgpu_pmu_pg_global_enable(g, false);
471 } 471 }
472 } 472 }
473 gk20a_idle(g); 473 gk20a_idle(g);
@@ -524,13 +524,13 @@ static ssize_t mscg_enable_store(struct device *dev,
524 } else if (!val && g->mscg_enabled) { 524 } else if (!val && g->mscg_enabled) {
525 if (g->ops.pmu.pmu_is_lpwr_feature_supported(g, 525 if (g->ops.pmu.pmu_is_lpwr_feature_supported(g,
526 PMU_PG_LPWR_FEATURE_MSCG)) { 526 PMU_PG_LPWR_FEATURE_MSCG)) {
527 gk20a_pmu_pg_global_enable(g, false); 527 nvgpu_pmu_pg_global_enable(g, false);
528 WRITE_ONCE(pmu->mscg_stat, PMU_MSCG_DISABLED); 528 WRITE_ONCE(pmu->mscg_stat, PMU_MSCG_DISABLED);
529 /* make status visible */ 529 /* make status visible */
530 smp_mb(); 530 smp_mb();
531 g->mscg_enabled = false; 531 g->mscg_enabled = false;
532 if (g->elpg_enabled) 532 if (g->elpg_enabled)
533 gk20a_pmu_pg_global_enable(g, true); 533 nvgpu_pmu_pg_global_enable(g, true);
534 } 534 }
535 g->mscg_enabled = false; 535 g->mscg_enabled = false;
536 } 536 }
@@ -584,11 +584,11 @@ static ssize_t aelpg_param_store(struct device *dev,
584 /* Disable AELPG */ 584 /* Disable AELPG */
585 ap_cmd.disable_ctrl.cmd_id = PMU_AP_CMD_ID_DISABLE_CTRL; 585 ap_cmd.disable_ctrl.cmd_id = PMU_AP_CMD_ID_DISABLE_CTRL;
586 ap_cmd.disable_ctrl.ctrl_id = PMU_AP_CTRL_ID_GRAPHICS; 586 ap_cmd.disable_ctrl.ctrl_id = PMU_AP_CTRL_ID_GRAPHICS;
587 status = gk20a_pmu_ap_send_command(g, &ap_cmd, false); 587 status = nvgpu_pmu_ap_send_command(g, &ap_cmd, false);
588 588
589 /* Enable AELPG */ 589 /* Enable AELPG */
590 gk20a_aelpg_init(g); 590 nvgpu_aelpg_init(g);
591 gk20a_aelpg_init_and_enable(g, PMU_AP_CTRL_ID_GRAPHICS); 591 nvgpu_aelpg_init_and_enable(g, PMU_AP_CTRL_ID_GRAPHICS);
592 } 592 }
593 593
594 return count; 594 return count;
@@ -630,13 +630,13 @@ static ssize_t aelpg_enable_store(struct device *dev,
630 /* Enable AELPG */ 630 /* Enable AELPG */
631 ap_cmd.enable_ctrl.cmd_id = PMU_AP_CMD_ID_ENABLE_CTRL; 631 ap_cmd.enable_ctrl.cmd_id = PMU_AP_CMD_ID_ENABLE_CTRL;
632 ap_cmd.enable_ctrl.ctrl_id = PMU_AP_CTRL_ID_GRAPHICS; 632 ap_cmd.enable_ctrl.ctrl_id = PMU_AP_CTRL_ID_GRAPHICS;
633 status = gk20a_pmu_ap_send_command(g, &ap_cmd, false); 633 status = nvgpu_pmu_ap_send_command(g, &ap_cmd, false);
634 } else if (!val && g->aelpg_enabled) { 634 } else if (!val && g->aelpg_enabled) {
635 g->aelpg_enabled = false; 635 g->aelpg_enabled = false;
636 /* Disable AELPG */ 636 /* Disable AELPG */
637 ap_cmd.disable_ctrl.cmd_id = PMU_AP_CMD_ID_DISABLE_CTRL; 637 ap_cmd.disable_ctrl.cmd_id = PMU_AP_CMD_ID_DISABLE_CTRL;
638 ap_cmd.disable_ctrl.ctrl_id = PMU_AP_CTRL_ID_GRAPHICS; 638 ap_cmd.disable_ctrl.ctrl_id = PMU_AP_CTRL_ID_GRAPHICS;
639 status = gk20a_pmu_ap_send_command(g, &ap_cmd, false); 639 status = nvgpu_pmu_ap_send_command(g, &ap_cmd, false);
640 } 640 }
641 } else { 641 } else {
642 dev_info(dev, "PMU is not ready, AELPG request failed\n"); 642 dev_info(dev, "PMU is not ready, AELPG request failed\n");
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
index f56702dc..7631decf 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
@@ -3235,7 +3235,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c,
3235 u32 lockboost; 3235 u32 lockboost;
3236 3236
3237 if (g->support_pmu) { 3237 if (g->support_pmu) {
3238 err = gk20a_pmu_disable_elpg(g); 3238 err = nvgpu_pmu_disable_elpg(g);
3239 if (err) { 3239 if (err) {
3240 nvgpu_err(g, 3240 nvgpu_err(g,
3241 "failed to set disable elpg"); 3241 "failed to set disable elpg");
@@ -3285,7 +3285,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c,
3285 args->flags |= NVGPU_ALLOC_OBJ_FLAGS_LOCKBOOST_ZERO; 3285 args->flags |= NVGPU_ALLOC_OBJ_FLAGS_LOCKBOOST_ZERO;
3286 3286
3287 if (g->support_pmu && g->can_elpg) 3287 if (g->support_pmu && g->can_elpg)
3288 gk20a_pmu_enable_elpg(g); 3288 nvgpu_pmu_enable_elpg(g);
3289 } 3289 }
3290 3290
3291 /* init golden image, ELPG enabled after this is done */ 3291 /* init golden image, ELPG enabled after this is done */
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.h b/drivers/gpu/nvgpu/gk20a/gr_gk20a.h
index deb8ea9c..de80c5e3 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.h
@@ -547,14 +547,14 @@ void gk20a_gr_clear_sm_hww(struct gk20a *g,
547 ({ \ 547 ({ \
548 int err = 0; \ 548 int err = 0; \
549 if (g->support_pmu && g->elpg_enabled) {\ 549 if (g->support_pmu && g->elpg_enabled) {\
550 err = gk20a_pmu_disable_elpg(g); \ 550 err = nvgpu_pmu_disable_elpg(g); \
551 if (err) \ 551 if (err) \
552 gk20a_pmu_enable_elpg(g); \ 552 nvgpu_pmu_enable_elpg(g); \
553 } \ 553 } \
554 if (!err) { \ 554 if (!err) { \
555 err = func; \ 555 err = func; \
556 if (g->support_pmu && g->elpg_enabled) \ 556 if (g->support_pmu && g->elpg_enabled) \
557 gk20a_pmu_enable_elpg(g); \ 557 nvgpu_pmu_enable_elpg(g); \
558 } \ 558 } \
559 err; \ 559 err; \
560 }) 560 })
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
index 247b38a5..32303c6e 100644
--- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
@@ -41,10 +41,6 @@
41#define PMU_MEM_SCRUBBING_TIMEOUT_MAX 1000 41#define PMU_MEM_SCRUBBING_TIMEOUT_MAX 1000
42#define PMU_MEM_SCRUBBING_TIMEOUT_DEFAULT 10 42#define PMU_MEM_SCRUBBING_TIMEOUT_DEFAULT 10
43 43
44static void ap_callback_init_and_enable_ctrl(
45 struct gk20a *g, struct pmu_msg *msg,
46 void *param, u32 seq_desc, u32 status);
47
48bool nvgpu_find_hex_in_string(char *strings, struct gk20a *g, u32 *hex_pos) 44bool nvgpu_find_hex_in_string(char *strings, struct gk20a *g, u32 *hex_pos)
49{ 45{
50 u32 i = 0, j = strlen(strings); 46 u32 i = 0, j = strlen(strings);
@@ -488,6 +484,14 @@ int pmu_bootstrap(struct nvgpu_pmu *pmu)
488 return 0; 484 return 0;
489} 485}
490 486
487void gk20a_pmu_pg_idle_counter_config(struct gk20a *g, u32 pg_engine_id)
488{
489 gk20a_writel(g, pwr_pmu_pg_idlefilth_r(pg_engine_id),
490 PMU_PG_IDLE_THRESHOLD);
491 gk20a_writel(g, pwr_pmu_pg_ppuidlefilth_r(pg_engine_id),
492 PMU_PG_POST_POWERUP_IDLE_THRESHOLD);
493}
494
491int gk20a_pmu_mutex_acquire(struct nvgpu_pmu *pmu, u32 id, u32 *token) 495int gk20a_pmu_mutex_acquire(struct nvgpu_pmu *pmu, u32 id, u32 *token)
492{ 496{
493 struct gk20a *g = gk20a_from_pmu(pmu); 497 struct gk20a *g = gk20a_from_pmu(pmu);
@@ -692,30 +696,6 @@ void gk20a_pmu_msgq_tail(struct nvgpu_pmu *pmu, u32 *tail, bool set)
692 pwr_pmu_msgq_tail_val_f(*tail)); 696 pwr_pmu_msgq_tail_val_f(*tail));
693} 697}
694 698
695static void pmu_handle_pg_buf_config_msg(struct gk20a *g, struct pmu_msg *msg,
696 void *param, u32 handle, u32 status)
697{
698 struct nvgpu_pmu *pmu = param;
699 struct pmu_pg_msg_eng_buf_stat *eng_buf_stat = &msg->msg.pg.eng_buf_stat;
700
701 gk20a_dbg_fn("");
702
703 gk20a_dbg_pmu("reply PMU_PG_CMD_ID_ENG_BUF_LOAD PMU_PGENG_GR_BUFFER_IDX_FECS");
704 if (status != 0) {
705 nvgpu_err(g, "PGENG cmd aborted");
706 /* TBD: disable ELPG */
707 return;
708 }
709
710 pmu->buf_loaded = (eng_buf_stat->status == PMU_PG_MSG_ENG_BUF_LOADED);
711 if ((!pmu->buf_loaded) &&
712 (pmu->pmu_state == PMU_STATE_LOADING_PG_BUF))
713 nvgpu_err(g, "failed to load PGENG buffer");
714 else {
715 nvgpu_pmu_state_change(g, pmu->pmu_state, true);
716 }
717}
718
719static int gk20a_init_pmu_setup_hw1(struct gk20a *g) 699static int gk20a_init_pmu_setup_hw1(struct gk20a *g)
720{ 700{
721 struct nvgpu_pmu *pmu = &g->pmu; 701 struct nvgpu_pmu *pmu = &g->pmu;
@@ -750,80 +730,6 @@ static int gk20a_init_pmu_setup_hw1(struct gk20a *g)
750 730
751} 731}
752 732
753int nvgpu_pmu_init_bind_fecs(struct gk20a *g)
754{
755 struct nvgpu_pmu *pmu = &g->pmu;
756 struct pmu_cmd cmd;
757 u32 desc;
758 int err = 0;
759 u32 gr_engine_id;
760
761 gk20a_dbg_fn("");
762
763 gr_engine_id = gk20a_fifo_get_gr_engine_id(g);
764
765 memset(&cmd, 0, sizeof(struct pmu_cmd));
766 cmd.hdr.unit_id = PMU_UNIT_PG;
767 cmd.hdr.size = PMU_CMD_HDR_SIZE +
768 g->ops.pmu_ver.pg_cmd_eng_buf_load_size(&cmd.cmd.pg);
769 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_cmd_type(&cmd.cmd.pg,
770 PMU_PG_CMD_ID_ENG_BUF_LOAD);
771 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_engine_id(&cmd.cmd.pg,
772 gr_engine_id);
773 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_idx(&cmd.cmd.pg,
774 PMU_PGENG_GR_BUFFER_IDX_FECS);
775 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_size(&cmd.cmd.pg,
776 pmu->pg_buf.size);
777 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_base(&cmd.cmd.pg,
778 u64_lo32(pmu->pg_buf.gpu_va));
779 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_offset(&cmd.cmd.pg,
780 (u8)(pmu->pg_buf.gpu_va & 0xFF));
781 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_idx(&cmd.cmd.pg,
782 PMU_DMAIDX_VIRT);
783
784 pmu->buf_loaded = false;
785 gk20a_dbg_pmu("cmd post PMU_PG_CMD_ID_ENG_BUF_LOAD PMU_PGENG_GR_BUFFER_IDX_FECS");
786 gk20a_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_LPQ,
787 pmu_handle_pg_buf_config_msg, pmu, &desc, ~0);
788 nvgpu_pmu_state_change(g, PMU_STATE_LOADING_PG_BUF, false);
789 return err;
790}
791
792void nvgpu_pmu_setup_hw_load_zbc(struct gk20a *g)
793{
794 struct nvgpu_pmu *pmu = &g->pmu;
795 struct pmu_cmd cmd;
796 u32 desc;
797 u32 gr_engine_id;
798
799 gr_engine_id = gk20a_fifo_get_gr_engine_id(g);
800
801 memset(&cmd, 0, sizeof(struct pmu_cmd));
802 cmd.hdr.unit_id = PMU_UNIT_PG;
803 cmd.hdr.size = PMU_CMD_HDR_SIZE +
804 g->ops.pmu_ver.pg_cmd_eng_buf_load_size(&cmd.cmd.pg);
805 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_cmd_type(&cmd.cmd.pg,
806 PMU_PG_CMD_ID_ENG_BUF_LOAD);
807 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_engine_id(&cmd.cmd.pg,
808 gr_engine_id);
809 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_idx(&cmd.cmd.pg,
810 PMU_PGENG_GR_BUFFER_IDX_ZBC);
811 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_size(&cmd.cmd.pg,
812 pmu->seq_buf.size);
813 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_base(&cmd.cmd.pg,
814 u64_lo32(pmu->seq_buf.gpu_va));
815 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_offset(&cmd.cmd.pg,
816 (u8)(pmu->seq_buf.gpu_va & 0xFF));
817 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_idx(&cmd.cmd.pg,
818 PMU_DMAIDX_VIRT);
819
820 pmu->buf_loaded = false;
821 gk20a_dbg_pmu("cmd post PMU_PG_CMD_ID_ENG_BUF_LOAD PMU_PGENG_GR_BUFFER_IDX_ZBC");
822 gk20a_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_LPQ,
823 pmu_handle_pg_buf_config_msg, pmu, &desc, ~0);
824 nvgpu_pmu_state_change(g, PMU_STATE_LOADING_ZBC, false);
825}
826
827static void gk20a_write_dmatrfbase(struct gk20a *g, u32 addr) 733static void gk20a_write_dmatrfbase(struct gk20a *g, u32 addr)
828{ 734{
829 gk20a_writel(g, pwr_falcon_dmatrfbase_r(), addr); 735 gk20a_writel(g, pwr_falcon_dmatrfbase_r(), addr);
@@ -896,184 +802,6 @@ void gk20a_init_pmu_ops(struct gpu_ops *gops)
896 gops->pmu.reset = gk20a_pmu_reset; 802 gops->pmu.reset = gk20a_pmu_reset;
897} 803}
898 804
899static void pmu_handle_pg_elpg_msg(struct gk20a *g, struct pmu_msg *msg,
900 void *param, u32 handle, u32 status)
901{
902 struct nvgpu_pmu *pmu = param;
903 struct pmu_pg_msg_elpg_msg *elpg_msg = &msg->msg.pg.elpg_msg;
904
905 gk20a_dbg_fn("");
906
907 if (status != 0) {
908 nvgpu_err(g, "ELPG cmd aborted");
909 /* TBD: disable ELPG */
910 return;
911 }
912
913 switch (elpg_msg->msg) {
914 case PMU_PG_ELPG_MSG_INIT_ACK:
915 gk20a_dbg_pmu("INIT_PG is ack from PMU, eng - %d",
916 elpg_msg->engine_id);
917 break;
918 case PMU_PG_ELPG_MSG_ALLOW_ACK:
919 gk20a_dbg_pmu("ALLOW is ack from PMU, eng - %d",
920 elpg_msg->engine_id);
921 if (elpg_msg->engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS)
922 pmu->elpg_stat = PMU_ELPG_STAT_ON;
923 else if (elpg_msg->engine_id == PMU_PG_ELPG_ENGINE_ID_MS)
924 pmu->mscg_transition_state = PMU_ELPG_STAT_ON;
925 break;
926 case PMU_PG_ELPG_MSG_DISALLOW_ACK:
927 gk20a_dbg_pmu("DISALLOW is ack from PMU, eng - %d",
928 elpg_msg->engine_id);
929
930 if (elpg_msg->engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS)
931 pmu->elpg_stat = PMU_ELPG_STAT_OFF;
932 else if (elpg_msg->engine_id == PMU_PG_ELPG_ENGINE_ID_MS)
933 pmu->mscg_transition_state = PMU_ELPG_STAT_OFF;
934
935 if (pmu->pmu_state == PMU_STATE_ELPG_BOOTING) {
936 if (g->ops.pmu.pmu_pg_engines_feature_list &&
937 g->ops.pmu.pmu_pg_engines_feature_list(g,
938 PMU_PG_ELPG_ENGINE_ID_GRAPHICS) !=
939 PMU_PG_FEATURE_GR_POWER_GATING_ENABLED) {
940 pmu->initialized = true;
941 nvgpu_pmu_state_change(g, PMU_STATE_STARTED,
942 false);
943 WRITE_ONCE(pmu->mscg_stat, PMU_MSCG_DISABLED);
944 /* make status visible */
945 smp_mb();
946 } else
947 nvgpu_pmu_state_change(g, PMU_STATE_ELPG_BOOTED,
948 true);
949 }
950 break;
951 default:
952 nvgpu_err(g,
953 "unsupported ELPG message : 0x%04x", elpg_msg->msg);
954 }
955
956 return;
957}
958
959static void pmu_handle_pg_stat_msg(struct gk20a *g, struct pmu_msg *msg,
960 void *param, u32 handle, u32 status)
961{
962 struct nvgpu_pmu *pmu = param;
963
964 gk20a_dbg_fn("");
965
966 if (status != 0) {
967 nvgpu_err(g, "ELPG cmd aborted");
968 /* TBD: disable ELPG */
969 return;
970 }
971
972 switch (msg->msg.pg.stat.sub_msg_id) {
973 case PMU_PG_STAT_MSG_RESP_DMEM_OFFSET:
974 gk20a_dbg_pmu("ALLOC_DMEM_OFFSET is acknowledged from PMU");
975 pmu->stat_dmem_offset[msg->msg.pg.stat.engine_id] =
976 msg->msg.pg.stat.data;
977 break;
978 default:
979 break;
980 }
981}
982
983static int pmu_pg_init_send(struct gk20a *g, u32 pg_engine_id)
984{
985 struct nvgpu_pmu *pmu = &g->pmu;
986 struct pmu_cmd cmd;
987 u32 seq;
988
989 gk20a_dbg_fn("");
990
991 gk20a_writel(g, pwr_pmu_pg_idlefilth_r(pg_engine_id),
992 PMU_PG_IDLE_THRESHOLD);
993 gk20a_writel(g, pwr_pmu_pg_ppuidlefilth_r(pg_engine_id),
994 PMU_PG_POST_POWERUP_IDLE_THRESHOLD);
995
996 if (g->ops.pmu.pmu_pg_init_param)
997 g->ops.pmu.pmu_pg_init_param(g, pg_engine_id);
998
999 /* init ELPG */
1000 memset(&cmd, 0, sizeof(struct pmu_cmd));
1001 cmd.hdr.unit_id = PMU_UNIT_PG;
1002 cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(struct pmu_pg_cmd_elpg_cmd);
1003 cmd.cmd.pg.elpg_cmd.cmd_type = PMU_PG_CMD_ID_ELPG_CMD;
1004 cmd.cmd.pg.elpg_cmd.engine_id = pg_engine_id;
1005 cmd.cmd.pg.elpg_cmd.cmd = PMU_PG_ELPG_CMD_INIT;
1006
1007 gk20a_dbg_pmu("cmd post PMU_PG_ELPG_CMD_INIT");
1008 gk20a_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
1009 pmu_handle_pg_elpg_msg, pmu, &seq, ~0);
1010
1011 /* alloc dmem for powergating state log */
1012 pmu->stat_dmem_offset[pg_engine_id] = 0;
1013 memset(&cmd, 0, sizeof(struct pmu_cmd));
1014 cmd.hdr.unit_id = PMU_UNIT_PG;
1015 cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(struct pmu_pg_cmd_stat);
1016 cmd.cmd.pg.stat.cmd_type = PMU_PG_CMD_ID_PG_STAT;
1017 cmd.cmd.pg.stat.engine_id = pg_engine_id;
1018 cmd.cmd.pg.stat.sub_cmd_id = PMU_PG_STAT_CMD_ALLOC_DMEM;
1019 cmd.cmd.pg.stat.data = 0;
1020
1021 gk20a_dbg_pmu("cmd post PMU_PG_STAT_CMD_ALLOC_DMEM");
1022 gk20a_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_LPQ,
1023 pmu_handle_pg_stat_msg, pmu, &seq, ~0);
1024
1025 /* disallow ELPG initially
1026 PMU ucode requires a disallow cmd before allow cmd */
1027 /* set for wait_event PMU_ELPG_STAT_OFF */
1028 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS)
1029 pmu->elpg_stat = PMU_ELPG_STAT_OFF;
1030 else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS)
1031 pmu->mscg_transition_state = PMU_ELPG_STAT_OFF;
1032 memset(&cmd, 0, sizeof(struct pmu_cmd));
1033 cmd.hdr.unit_id = PMU_UNIT_PG;
1034 cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(struct pmu_pg_cmd_elpg_cmd);
1035 cmd.cmd.pg.elpg_cmd.cmd_type = PMU_PG_CMD_ID_ELPG_CMD;
1036 cmd.cmd.pg.elpg_cmd.engine_id = pg_engine_id;
1037 cmd.cmd.pg.elpg_cmd.cmd = PMU_PG_ELPG_CMD_DISALLOW;
1038
1039 gk20a_dbg_pmu("cmd post PMU_PG_ELPG_CMD_DISALLOW");
1040 gk20a_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
1041 pmu_handle_pg_elpg_msg, pmu, &seq, ~0);
1042
1043 return 0;
1044}
1045
1046int nvgpu_pmu_init_powergating(struct gk20a *g)
1047{
1048 struct nvgpu_pmu *pmu = &g->pmu;
1049 u32 pg_engine_id;
1050 u32 pg_engine_id_list = 0;
1051
1052 gk20a_dbg_fn("");
1053
1054 if (g->ops.pmu.pmu_pg_supported_engines_list)
1055 pg_engine_id_list = g->ops.pmu.pmu_pg_supported_engines_list(g);
1056
1057 gk20a_gr_wait_initialized(g);
1058
1059 for (pg_engine_id = PMU_PG_ELPG_ENGINE_ID_GRAPHICS;
1060 pg_engine_id < PMU_PG_ELPG_ENGINE_ID_INVALID_ENGINE;
1061 pg_engine_id++) {
1062
1063 if (BIT(pg_engine_id) & pg_engine_id_list) {
1064 pmu_pg_init_send(g, pg_engine_id);
1065 if (pmu->pmu_state == PMU_STATE_INIT_RECEIVED)
1066 nvgpu_pmu_state_change(g,
1067 PMU_STATE_ELPG_BOOTING, false);
1068 }
1069 }
1070
1071 if (g->ops.pmu.pmu_pg_param_post_init)
1072 g->ops.pmu.pmu_pg_param_post_init(g);
1073
1074 return 0;
1075}
1076
1077static u8 get_perfmon_id(struct nvgpu_pmu *pmu) 805static u8 get_perfmon_id(struct nvgpu_pmu *pmu)
1078{ 806{
1079 struct gk20a *g = gk20a_from_pmu(pmu); 807 struct gk20a *g = gk20a_from_pmu(pmu);
@@ -1355,7 +1083,7 @@ int nvgpu_pmu_handle_therm_event(struct nvgpu_pmu *pmu,
1355 return 0; 1083 return 0;
1356} 1084}
1357 1085
1358static void pmu_dump_elpg_stats(struct nvgpu_pmu *pmu) 1086void pmu_dump_elpg_stats(struct nvgpu_pmu *pmu)
1359{ 1087{
1360 struct gk20a *g = gk20a_from_pmu(pmu); 1088 struct gk20a *g = gk20a_from_pmu(pmu);
1361 struct pmu_pg_stats stats; 1089 struct pmu_pg_stats stats;
@@ -1631,238 +1359,6 @@ void gk20a_pmu_isr(struct gk20a *g)
1631 nvgpu_mutex_release(&pmu->isr_mutex); 1359 nvgpu_mutex_release(&pmu->isr_mutex);
1632} 1360}
1633 1361
1634int gk20a_pmu_pg_global_enable(struct gk20a *g, u32 enable_pg)
1635{
1636 u32 status = 0;
1637
1638 if (enable_pg == true) {
1639 if (g->ops.pmu.pmu_pg_engines_feature_list &&
1640 g->ops.pmu.pmu_pg_engines_feature_list(g,
1641 PMU_PG_ELPG_ENGINE_ID_GRAPHICS) !=
1642 PMU_PG_FEATURE_GR_POWER_GATING_ENABLED) {
1643 if (g->ops.pmu.pmu_lpwr_enable_pg)
1644 status = g->ops.pmu.pmu_lpwr_enable_pg(g,
1645 true);
1646 } else if (g->support_pmu && g->can_elpg)
1647 status = gk20a_pmu_enable_elpg(g);
1648 } else if (enable_pg == false) {
1649 if (g->ops.pmu.pmu_pg_engines_feature_list &&
1650 g->ops.pmu.pmu_pg_engines_feature_list(g,
1651 PMU_PG_ELPG_ENGINE_ID_GRAPHICS) !=
1652 PMU_PG_FEATURE_GR_POWER_GATING_ENABLED) {
1653 if (g->ops.pmu.pmu_lpwr_disable_pg)
1654 status = g->ops.pmu.pmu_lpwr_disable_pg(g,
1655 true);
1656 } else if (g->support_pmu && g->can_elpg)
1657 status = gk20a_pmu_disable_elpg(g);
1658 }
1659
1660 return status;
1661}
1662
1663static int gk20a_pmu_enable_elpg_locked(struct gk20a *g, u32 pg_engine_id)
1664{
1665 struct nvgpu_pmu *pmu = &g->pmu;
1666 struct pmu_cmd cmd;
1667 u32 seq, status;
1668
1669 gk20a_dbg_fn("");
1670
1671 memset(&cmd, 0, sizeof(struct pmu_cmd));
1672 cmd.hdr.unit_id = PMU_UNIT_PG;
1673 cmd.hdr.size = PMU_CMD_HDR_SIZE +
1674 sizeof(struct pmu_pg_cmd_elpg_cmd);
1675 cmd.cmd.pg.elpg_cmd.cmd_type = PMU_PG_CMD_ID_ELPG_CMD;
1676 cmd.cmd.pg.elpg_cmd.engine_id = pg_engine_id;
1677 cmd.cmd.pg.elpg_cmd.cmd = PMU_PG_ELPG_CMD_ALLOW;
1678
1679 /* no need to wait ack for ELPG enable but set
1680 * pending to sync with follow up ELPG disable
1681 */
1682 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS)
1683 pmu->elpg_stat = PMU_ELPG_STAT_ON_PENDING;
1684
1685 else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS)
1686 pmu->mscg_transition_state = PMU_ELPG_STAT_ON_PENDING;
1687
1688 gk20a_dbg_pmu("cmd post PMU_PG_ELPG_CMD_ALLOW");
1689 status = gk20a_pmu_cmd_post(g, &cmd, NULL, NULL,
1690 PMU_COMMAND_QUEUE_HPQ, pmu_handle_pg_elpg_msg,
1691 pmu, &seq, ~0);
1692 WARN_ON(status != 0);
1693
1694 gk20a_dbg_fn("done");
1695 return 0;
1696}
1697
1698int gk20a_pmu_enable_elpg(struct gk20a *g)
1699{
1700 struct nvgpu_pmu *pmu = &g->pmu;
1701 struct gr_gk20a *gr = &g->gr;
1702 u32 pg_engine_id;
1703 u32 pg_engine_id_list = 0;
1704
1705 int ret = 0;
1706
1707 gk20a_dbg_fn("");
1708
1709 if (!g->support_pmu)
1710 return ret;
1711
1712 nvgpu_mutex_acquire(&pmu->elpg_mutex);
1713
1714 pmu->elpg_refcnt++;
1715 if (pmu->elpg_refcnt <= 0)
1716 goto exit_unlock;
1717
1718 /* something is not right if we end up in following code path */
1719 if (unlikely(pmu->elpg_refcnt > 1)) {
1720 nvgpu_warn(g,
1721 "%s(): possible elpg refcnt mismatch. elpg refcnt=%d",
1722 __func__, pmu->elpg_refcnt);
1723 WARN_ON(1);
1724 }
1725
1726 /* do NOT enable elpg until golden ctx is created,
1727 which is related with the ctx that ELPG save and restore. */
1728 if (unlikely(!gr->ctx_vars.golden_image_initialized))
1729 goto exit_unlock;
1730
1731 /* return if ELPG is already on or on_pending or off_on_pending */
1732 if (pmu->elpg_stat != PMU_ELPG_STAT_OFF)
1733 goto exit_unlock;
1734
1735 if (g->ops.pmu.pmu_pg_supported_engines_list)
1736 pg_engine_id_list = g->ops.pmu.pmu_pg_supported_engines_list(g);
1737
1738 for (pg_engine_id = PMU_PG_ELPG_ENGINE_ID_GRAPHICS;
1739 pg_engine_id < PMU_PG_ELPG_ENGINE_ID_INVALID_ENGINE;
1740 pg_engine_id++) {
1741
1742 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS &&
1743 ACCESS_ONCE(pmu->mscg_stat) == PMU_MSCG_DISABLED)
1744 continue;
1745
1746 if (BIT(pg_engine_id) & pg_engine_id_list)
1747 ret = gk20a_pmu_enable_elpg_locked(g, pg_engine_id);
1748 }
1749
1750exit_unlock:
1751 nvgpu_mutex_release(&pmu->elpg_mutex);
1752 gk20a_dbg_fn("done");
1753 return ret;
1754}
1755
1756int gk20a_pmu_disable_elpg(struct gk20a *g)
1757{
1758 struct nvgpu_pmu *pmu = &g->pmu;
1759 struct pmu_cmd cmd;
1760 u32 seq;
1761 int ret = 0;
1762 u32 pg_engine_id;
1763 u32 pg_engine_id_list = 0;
1764 u32 *ptr = NULL;
1765
1766 gk20a_dbg_fn("");
1767
1768 if (g->ops.pmu.pmu_pg_supported_engines_list)
1769 pg_engine_id_list = g->ops.pmu.pmu_pg_supported_engines_list(g);
1770
1771 if (!g->support_pmu)
1772 return ret;
1773
1774 nvgpu_mutex_acquire(&pmu->elpg_mutex);
1775
1776 pmu->elpg_refcnt--;
1777 if (pmu->elpg_refcnt > 0) {
1778 nvgpu_warn(g,
1779 "%s(): possible elpg refcnt mismatch. elpg refcnt=%d",
1780 __func__, pmu->elpg_refcnt);
1781 WARN_ON(1);
1782 ret = 0;
1783 goto exit_unlock;
1784 }
1785
1786 /* cancel off_on_pending and return */
1787 if (pmu->elpg_stat == PMU_ELPG_STAT_OFF_ON_PENDING) {
1788 pmu->elpg_stat = PMU_ELPG_STAT_OFF;
1789 ret = 0;
1790 goto exit_reschedule;
1791 }
1792 /* wait if on_pending */
1793 else if (pmu->elpg_stat == PMU_ELPG_STAT_ON_PENDING) {
1794
1795 pmu_wait_message_cond(pmu, gk20a_get_gr_idle_timeout(g),
1796 &pmu->elpg_stat, PMU_ELPG_STAT_ON);
1797
1798 if (pmu->elpg_stat != PMU_ELPG_STAT_ON) {
1799 nvgpu_err(g, "ELPG_ALLOW_ACK failed, elpg_stat=%d",
1800 pmu->elpg_stat);
1801 pmu_dump_elpg_stats(pmu);
1802 pmu_dump_falcon_stats(pmu);
1803 ret = -EBUSY;
1804 goto exit_unlock;
1805 }
1806 }
1807 /* return if ELPG is already off */
1808 else if (pmu->elpg_stat != PMU_ELPG_STAT_ON) {
1809 ret = 0;
1810 goto exit_reschedule;
1811 }
1812
1813 for (pg_engine_id = PMU_PG_ELPG_ENGINE_ID_GRAPHICS;
1814 pg_engine_id < PMU_PG_ELPG_ENGINE_ID_INVALID_ENGINE;
1815 pg_engine_id++) {
1816
1817 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS &&
1818 ACCESS_ONCE(pmu->mscg_stat) == PMU_MSCG_DISABLED)
1819 continue;
1820
1821 if (BIT(pg_engine_id) & pg_engine_id_list) {
1822 memset(&cmd, 0, sizeof(struct pmu_cmd));
1823 cmd.hdr.unit_id = PMU_UNIT_PG;
1824 cmd.hdr.size = PMU_CMD_HDR_SIZE +
1825 sizeof(struct pmu_pg_cmd_elpg_cmd);
1826 cmd.cmd.pg.elpg_cmd.cmd_type = PMU_PG_CMD_ID_ELPG_CMD;
1827 cmd.cmd.pg.elpg_cmd.engine_id = pg_engine_id;
1828 cmd.cmd.pg.elpg_cmd.cmd = PMU_PG_ELPG_CMD_DISALLOW;
1829
1830 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS)
1831 pmu->elpg_stat = PMU_ELPG_STAT_OFF_PENDING;
1832 else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS)
1833 pmu->mscg_transition_state =
1834 PMU_ELPG_STAT_OFF_PENDING;
1835
1836 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS)
1837 ptr = &pmu->elpg_stat;
1838 else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS)
1839 ptr = &pmu->mscg_transition_state;
1840
1841 gk20a_dbg_pmu("cmd post PMU_PG_ELPG_CMD_DISALLOW");
1842 gk20a_pmu_cmd_post(g, &cmd, NULL, NULL,
1843 PMU_COMMAND_QUEUE_HPQ, pmu_handle_pg_elpg_msg,
1844 pmu, &seq, ~0);
1845
1846 pmu_wait_message_cond(pmu,
1847 gk20a_get_gr_idle_timeout(g),
1848 ptr, PMU_ELPG_STAT_OFF);
1849 if (*ptr != PMU_ELPG_STAT_OFF) {
1850 nvgpu_err(g, "ELPG_DISALLOW_ACK failed");
1851 pmu_dump_elpg_stats(pmu);
1852 pmu_dump_falcon_stats(pmu);
1853 ret = -EBUSY;
1854 goto exit_unlock;
1855 }
1856 }
1857 }
1858
1859exit_reschedule:
1860exit_unlock:
1861 nvgpu_mutex_release(&pmu->elpg_mutex);
1862 gk20a_dbg_fn("done");
1863 return ret;
1864}
1865
1866int gk20a_pmu_perfmon_enable(struct gk20a *g, bool enable) 1362int gk20a_pmu_perfmon_enable(struct gk20a *g, bool enable)
1867{ 1363{
1868 struct nvgpu_pmu *pmu = &g->pmu; 1364 struct nvgpu_pmu *pmu = &g->pmu;
@@ -1947,173 +1443,3 @@ void gk20a_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id,
1947 pg_stat_data->avg_entry_latency_us = stats.pg_avg_entry_time_us; 1443 pg_stat_data->avg_entry_latency_us = stats.pg_avg_entry_time_us;
1948 pg_stat_data->avg_exit_latency_us = stats.pg_avg_exit_time_us; 1444 pg_stat_data->avg_exit_latency_us = stats.pg_avg_exit_time_us;
1949} 1445}
1950
1951int nvgpu_pmu_get_pg_stats(struct gk20a *g, u32 pg_engine_id,
1952 struct pmu_pg_stats_data *pg_stat_data)
1953{
1954 struct nvgpu_pmu *pmu = &g->pmu;
1955 u32 pg_engine_id_list = 0;
1956
1957 if (!pmu->initialized) {
1958 pg_stat_data->ingating_time = 0;
1959 pg_stat_data->ungating_time = 0;
1960 pg_stat_data->gating_cnt = 0;
1961 return 0;
1962 }
1963
1964 if (g->ops.pmu.pmu_pg_supported_engines_list)
1965 pg_engine_id_list = g->ops.pmu.pmu_pg_supported_engines_list(g);
1966
1967 if (BIT(pg_engine_id) & pg_engine_id_list)
1968 g->ops.pmu.pmu_elpg_statistics(g, pg_engine_id,
1969 pg_stat_data);
1970
1971 return 0;
1972}
1973
1974/* Send an Adaptive Power (AP) related command to PMU */
1975int gk20a_pmu_ap_send_command(struct gk20a *g,
1976 union pmu_ap_cmd *p_ap_cmd, bool b_block)
1977{
1978 struct nvgpu_pmu *pmu = &g->pmu;
1979 /* FIXME: where is the PG structure defined?? */
1980 u32 status = 0;
1981 struct pmu_cmd cmd;
1982 u32 seq;
1983 pmu_callback p_callback = NULL;
1984
1985 memset(&cmd, 0, sizeof(struct pmu_cmd));
1986
1987 /* Copy common members */
1988 cmd.hdr.unit_id = PMU_UNIT_PG;
1989 cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(union pmu_ap_cmd);
1990
1991 cmd.cmd.pg.ap_cmd.cmn.cmd_type = PMU_PG_CMD_ID_AP;
1992 cmd.cmd.pg.ap_cmd.cmn.cmd_id = p_ap_cmd->cmn.cmd_id;
1993
1994 /* Copy other members of command */
1995 switch (p_ap_cmd->cmn.cmd_id) {
1996 case PMU_AP_CMD_ID_INIT:
1997 gk20a_dbg_pmu("cmd post PMU_AP_CMD_ID_INIT");
1998 cmd.cmd.pg.ap_cmd.init.pg_sampling_period_us =
1999 p_ap_cmd->init.pg_sampling_period_us;
2000 break;
2001
2002 case PMU_AP_CMD_ID_INIT_AND_ENABLE_CTRL:
2003 gk20a_dbg_pmu("cmd post PMU_AP_CMD_ID_INIT_AND_ENABLE_CTRL");
2004 cmd.cmd.pg.ap_cmd.init_and_enable_ctrl.ctrl_id =
2005 p_ap_cmd->init_and_enable_ctrl.ctrl_id;
2006 memcpy(
2007 (void *)&(cmd.cmd.pg.ap_cmd.init_and_enable_ctrl.params),
2008 (void *)&(p_ap_cmd->init_and_enable_ctrl.params),
2009 sizeof(struct pmu_ap_ctrl_init_params));
2010
2011 p_callback = ap_callback_init_and_enable_ctrl;
2012 break;
2013
2014 case PMU_AP_CMD_ID_ENABLE_CTRL:
2015 gk20a_dbg_pmu("cmd post PMU_AP_CMD_ID_ENABLE_CTRL");
2016 cmd.cmd.pg.ap_cmd.enable_ctrl.ctrl_id =
2017 p_ap_cmd->enable_ctrl.ctrl_id;
2018 break;
2019
2020 case PMU_AP_CMD_ID_DISABLE_CTRL:
2021 gk20a_dbg_pmu("cmd post PMU_AP_CMD_ID_DISABLE_CTRL");
2022 cmd.cmd.pg.ap_cmd.disable_ctrl.ctrl_id =
2023 p_ap_cmd->disable_ctrl.ctrl_id;
2024 break;
2025
2026 case PMU_AP_CMD_ID_KICK_CTRL:
2027 gk20a_dbg_pmu("cmd post PMU_AP_CMD_ID_KICK_CTRL");
2028 cmd.cmd.pg.ap_cmd.kick_ctrl.ctrl_id =
2029 p_ap_cmd->kick_ctrl.ctrl_id;
2030 cmd.cmd.pg.ap_cmd.kick_ctrl.skip_count =
2031 p_ap_cmd->kick_ctrl.skip_count;
2032 break;
2033
2034 default:
2035 gk20a_dbg_pmu("%s: Invalid Adaptive Power command %d\n",
2036 __func__, p_ap_cmd->cmn.cmd_id);
2037 return 0x2f;
2038 }
2039
2040 status = gk20a_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
2041 p_callback, pmu, &seq, ~0);
2042
2043 if (status) {
2044 gk20a_dbg_pmu(
2045 "%s: Unable to submit Adaptive Power Command %d\n",
2046 __func__, p_ap_cmd->cmn.cmd_id);
2047 goto err_return;
2048 }
2049
2050 /* TODO: Implement blocking calls (b_block) */
2051
2052err_return:
2053 return status;
2054}
2055
2056static void ap_callback_init_and_enable_ctrl(
2057 struct gk20a *g, struct pmu_msg *msg,
2058 void *param, u32 seq_desc, u32 status)
2059{
2060 /* Define p_ap (i.e pointer to pmu_ap structure) */
2061 WARN_ON(!msg);
2062
2063 if (!status) {
2064 switch (msg->msg.pg.ap_msg.cmn.msg_id) {
2065 case PMU_AP_MSG_ID_INIT_ACK:
2066 gk20a_dbg_pmu("reply PMU_AP_CMD_ID_INIT");
2067 break;
2068
2069 default:
2070 gk20a_dbg_pmu(
2071 "%s: Invalid Adaptive Power Message: %x\n",
2072 __func__, msg->msg.pg.ap_msg.cmn.msg_id);
2073 break;
2074 }
2075 }
2076}
2077
2078int gk20a_aelpg_init(struct gk20a *g)
2079{
2080 int status = 0;
2081
2082 /* Remove reliance on app_ctrl field. */
2083 union pmu_ap_cmd ap_cmd;
2084
2085 /* TODO: Check for elpg being ready? */
2086 ap_cmd.init.cmd_id = PMU_AP_CMD_ID_INIT;
2087 ap_cmd.init.pg_sampling_period_us = g->pmu.aelpg_param[0];
2088
2089 status = gk20a_pmu_ap_send_command(g, &ap_cmd, false);
2090 return status;
2091}
2092
2093int gk20a_aelpg_init_and_enable(struct gk20a *g, u8 ctrl_id)
2094{
2095 int status = 0;
2096 union pmu_ap_cmd ap_cmd;
2097
2098 /* TODO: Probably check if ELPG is ready? */
2099 ap_cmd.init_and_enable_ctrl.cmd_id = PMU_AP_CMD_ID_INIT_AND_ENABLE_CTRL;
2100 ap_cmd.init_and_enable_ctrl.ctrl_id = ctrl_id;
2101 ap_cmd.init_and_enable_ctrl.params.min_idle_filter_us =
2102 g->pmu.aelpg_param[1];
2103 ap_cmd.init_and_enable_ctrl.params.min_target_saving_us =
2104 g->pmu.aelpg_param[2];
2105 ap_cmd.init_and_enable_ctrl.params.power_break_even_us =
2106 g->pmu.aelpg_param[3];
2107 ap_cmd.init_and_enable_ctrl.params.cycles_per_sample_max =
2108 g->pmu.aelpg_param[4];
2109
2110 switch (ctrl_id) {
2111 case PMU_AP_CTRL_ID_GRAPHICS:
2112 break;
2113 default:
2114 break;
2115 }
2116
2117 status = gk20a_pmu_ap_send_command(g, &ap_cmd, true);
2118 return status;
2119}
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h
index b5038bd4..55d6f72c 100644
--- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h
@@ -35,51 +35,9 @@ struct nvgpu_firmware;
35#define FUSE_GCPLEX_CONFIG_FUSE_0 0x2C8 35#define FUSE_GCPLEX_CONFIG_FUSE_0 0x2C8
36#endif 36#endif
37 37
38#define PMU_PGENG_GR_BUFFER_IDX_INIT (0)
39#define PMU_PGENG_GR_BUFFER_IDX_ZBC (1)
40#define PMU_PGENG_GR_BUFFER_IDX_FECS (2)
41
42#define PMU_PG_IDLE_THRESHOLD_SIM 1000
43#define PMU_PG_POST_POWERUP_IDLE_THRESHOLD_SIM 4000000
44/* TBD: QT or else ? */
45#define PMU_PG_IDLE_THRESHOLD 15000
46#define PMU_PG_POST_POWERUP_IDLE_THRESHOLD 1000000
47
48#define PMU_PG_LPWR_FEATURE_RPPG 0x0
49#define PMU_PG_LPWR_FEATURE_MSCG 0x1
50
51/* state transition :
52 OFF => [OFF_ON_PENDING optional] => ON_PENDING => ON => OFF
53 ON => OFF is always synchronized */
54#define PMU_ELPG_STAT_OFF 0 /* elpg is off */
55#define PMU_ELPG_STAT_ON 1 /* elpg is on */
56#define PMU_ELPG_STAT_ON_PENDING 2 /* elpg is off, ALLOW cmd has been sent, wait for ack */
57#define PMU_ELPG_STAT_OFF_PENDING 3 /* elpg is on, DISALLOW cmd has been sent, wait for ack */
58#define PMU_ELPG_STAT_OFF_ON_PENDING 4 /* elpg is off, caller has requested on, but ALLOW
59 cmd hasn't been sent due to ENABLE_ALLOW delay */
60
61#define PG_REQUEST_TYPE_GLOBAL 0x0
62#define PG_REQUEST_TYPE_PSTATE 0x1
63
64#define PMU_MSCG_DISABLED 0
65#define PMU_MSCG_ENABLED 1
66
67/* Default Sampling Period of AELPG */
68#define APCTRL_SAMPLING_PERIOD_PG_DEFAULT_US (1000000)
69
70/* Default values of APCTRL parameters */
71#define APCTRL_MINIMUM_IDLE_FILTER_DEFAULT_US (100)
72#define APCTRL_MINIMUM_TARGET_SAVING_DEFAULT_US (10000)
73#define APCTRL_POWER_BREAKEVEN_DEFAULT_US (2000)
74#define APCTRL_CYCLES_PER_SAMPLE_MAX_DEFAULT (200)
75
76bool gk20a_pmu_is_interrupted(struct nvgpu_pmu *pmu); 38bool gk20a_pmu_is_interrupted(struct nvgpu_pmu *pmu);
77void gk20a_pmu_isr(struct gk20a *g); 39void gk20a_pmu_isr(struct gk20a *g);
78 40
79int gk20a_pmu_enable_elpg(struct gk20a *g);
80int gk20a_pmu_disable_elpg(struct gk20a *g);
81int gk20a_pmu_pg_global_enable(struct gk20a *g, u32 enable_pg);
82
83u32 gk20a_pmu_pg_engines_list(struct gk20a *g); 41u32 gk20a_pmu_pg_engines_list(struct gk20a *g);
84u32 gk20a_pmu_pg_feature_list(struct gk20a *g, u32 pg_engine_id); 42u32 gk20a_pmu_pg_feature_list(struct gk20a *g, u32 pg_engine_id);
85 43
@@ -87,6 +45,8 @@ void gk20a_pmu_save_zbc(struct gk20a *g, u32 entries);
87 45
88int gk20a_pmu_perfmon_enable(struct gk20a *g, bool enable); 46int gk20a_pmu_perfmon_enable(struct gk20a *g, bool enable);
89 47
48void gk20a_pmu_pg_idle_counter_config(struct gk20a *g, u32 pg_engine_id);
49
90int gk20a_pmu_mutex_acquire(struct nvgpu_pmu *pmu, u32 id, u32 *token); 50int gk20a_pmu_mutex_acquire(struct nvgpu_pmu *pmu, u32 id, u32 *token);
91int gk20a_pmu_mutex_release(struct nvgpu_pmu *pmu, u32 id, u32 *token); 51int gk20a_pmu_mutex_release(struct nvgpu_pmu *pmu, u32 id, u32 *token);
92 52
@@ -109,12 +69,10 @@ void pmu_copy_from_dmem(struct nvgpu_pmu *pmu,
109 u32 src, u8 *dst, u32 size, u8 port); 69 u32 src, u8 *dst, u32 size, u8 port);
110int pmu_reset(struct nvgpu_pmu *pmu); 70int pmu_reset(struct nvgpu_pmu *pmu);
111int pmu_bootstrap(struct nvgpu_pmu *pmu); 71int pmu_bootstrap(struct nvgpu_pmu *pmu);
72
73void pmu_dump_elpg_stats(struct nvgpu_pmu *pmu);
112void pmu_dump_falcon_stats(struct nvgpu_pmu *pmu); 74void pmu_dump_falcon_stats(struct nvgpu_pmu *pmu);
113 75
114int gk20a_pmu_ap_send_command(struct gk20a *g,
115 union pmu_ap_cmd *p_ap_cmd, bool b_block);
116int gk20a_aelpg_init(struct gk20a *g);
117int gk20a_aelpg_init_and_enable(struct gk20a *g, u8 ctrl_id);
118void pmu_enable_irq(struct nvgpu_pmu *pmu, bool enable); 76void pmu_enable_irq(struct nvgpu_pmu *pmu, bool enable);
119int pmu_wait_message_cond(struct nvgpu_pmu *pmu, u32 timeout_ms, 77int pmu_wait_message_cond(struct nvgpu_pmu *pmu, u32 timeout_ms,
120 u32 *var, u32 val); 78 u32 *var, u32 val);