summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSrirangan <smadhavan@nvidia.com>2018-08-14 05:29:27 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-08-21 18:44:28 -0400
commite988951ccab1031022ac354bbe8f53e1dc849b7a (patch)
tree7fe8d7fa8b46f501c2e1a873b84873a5173478d5
parent652da8116966af2a8438a9a9f135a11b4e5c6c7b (diff)
gpu: nvgpu: common: pmu: Fix MISRA 15.6 violations
MISRA Rule-15.6 requires that all if-else blocks be enclosed in braces, including single statement blocks. Fix errors due to single statement if blocks without braces, introducing the braces. JIRA NVGPU-671 Change-Id: I497fbdb07bb2ec5a404046f06db3c713b3859e8e Signed-off-by: Srirangan <smadhavan@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1799525 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
-rw-r--r--drivers/gpu/nvgpu/common/pmu/pmu.c57
-rw-r--r--drivers/gpu/nvgpu/common/pmu/pmu_fw.c69
-rw-r--r--drivers/gpu/nvgpu/common/pmu/pmu_ipc.c132
-rw-r--r--drivers/gpu/nvgpu/common/pmu/pmu_perfmon.c39
-rw-r--r--drivers/gpu/nvgpu/common/pmu/pmu_pg.c116
5 files changed, 266 insertions, 147 deletions
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu.c b/drivers/gpu/nvgpu/common/pmu/pmu.c
index c71928c3..d72629b5 100644
--- a/drivers/gpu/nvgpu/common/pmu/pmu.c
+++ b/drivers/gpu/nvgpu/common/pmu/pmu.c
@@ -45,13 +45,15 @@ static int pmu_enable_hw(struct nvgpu_pmu *pmu, bool enable)
45 /* bring PMU falcon/engine out of reset */ 45 /* bring PMU falcon/engine out of reset */
46 g->ops.pmu.reset_engine(g, true); 46 g->ops.pmu.reset_engine(g, true);
47 47
48 if (g->ops.clock_gating.slcg_pmu_load_gating_prod) 48 if (g->ops.clock_gating.slcg_pmu_load_gating_prod) {
49 g->ops.clock_gating.slcg_pmu_load_gating_prod(g, 49 g->ops.clock_gating.slcg_pmu_load_gating_prod(g,
50 g->slcg_enabled); 50 g->slcg_enabled);
51 }
51 52
52 if (g->ops.clock_gating.blcg_pmu_load_gating_prod) 53 if (g->ops.clock_gating.blcg_pmu_load_gating_prod) {
53 g->ops.clock_gating.blcg_pmu_load_gating_prod(g, 54 g->ops.clock_gating.blcg_pmu_load_gating_prod(g,
54 g->blcg_enabled); 55 g->blcg_enabled);
56 }
55 57
56 if (nvgpu_flcn_mem_scrub_wait(pmu->flcn)) { 58 if (nvgpu_flcn_mem_scrub_wait(pmu->flcn)) {
57 /* keep PMU falcon/engine in reset 59 /* keep PMU falcon/engine in reset
@@ -84,12 +86,14 @@ static int pmu_enable(struct nvgpu_pmu *pmu, bool enable)
84 } 86 }
85 } else { 87 } else {
86 err = pmu_enable_hw(pmu, true); 88 err = pmu_enable_hw(pmu, true);
87 if (err) 89 if (err) {
88 goto exit; 90 goto exit;
91 }
89 92
90 err = nvgpu_flcn_wait_idle(pmu->flcn); 93 err = nvgpu_flcn_wait_idle(pmu->flcn);
91 if (err) 94 if (err) {
92 goto exit; 95 goto exit;
96 }
93 97
94 pmu_enable_irq(pmu, true); 98 pmu_enable_irq(pmu, true);
95 } 99 }
@@ -107,12 +111,14 @@ int nvgpu_pmu_reset(struct gk20a *g)
107 nvgpu_log_fn(g, " %s ", g->name); 111 nvgpu_log_fn(g, " %s ", g->name);
108 112
109 err = nvgpu_flcn_wait_idle(pmu->flcn); 113 err = nvgpu_flcn_wait_idle(pmu->flcn);
110 if (err) 114 if (err) {
111 goto exit; 115 goto exit;
116 }
112 117
113 err = pmu_enable(pmu, false); 118 err = pmu_enable(pmu, false);
114 if (err) 119 if (err) {
115 goto exit; 120 goto exit;
121 }
116 122
117 err = pmu_enable(pmu, true); 123 err = pmu_enable(pmu, true);
118 124
@@ -136,8 +142,9 @@ static int nvgpu_init_task_pg_init(struct gk20a *g)
136 142
137 err = nvgpu_thread_create(&pmu->pg_init.state_task, g, 143 err = nvgpu_thread_create(&pmu->pg_init.state_task, g,
138 nvgpu_pg_init_task, thread_name); 144 nvgpu_pg_init_task, thread_name);
139 if (err) 145 if (err) {
140 nvgpu_err(g, "failed to start nvgpu_pg_init thread"); 146 nvgpu_err(g, "failed to start nvgpu_pg_init thread");
147 }
141 148
142 return err; 149 return err;
143} 150}
@@ -159,8 +166,9 @@ void nvgpu_kill_task_pg_init(struct gk20a *g)
159 /* wait to confirm thread stopped */ 166 /* wait to confirm thread stopped */
160 nvgpu_timeout_init(g, &timeout, 1000, NVGPU_TIMER_RETRY_TIMER); 167 nvgpu_timeout_init(g, &timeout, 1000, NVGPU_TIMER_RETRY_TIMER);
161 do { 168 do {
162 if (!nvgpu_thread_is_running(&pmu->pg_init.state_task)) 169 if (!nvgpu_thread_is_running(&pmu->pg_init.state_task)) {
163 break; 170 break;
171 }
164 nvgpu_udelay(2); 172 nvgpu_udelay(2);
165 } while (!nvgpu_timeout_expired_msg(&timeout, 173 } while (!nvgpu_timeout_expired_msg(&timeout,
166 "timeout - waiting PMU state machine thread stop")); 174 "timeout - waiting PMU state machine thread stop"));
@@ -199,8 +207,9 @@ static int nvgpu_init_pmu_setup_sw(struct gk20a *g)
199 207
200 /* TBD: sysmon subtask */ 208 /* TBD: sysmon subtask */
201 209
202 if (IS_ENABLED(CONFIG_TEGRA_GK20A_PERFMON)) 210 if (IS_ENABLED(CONFIG_TEGRA_GK20A_PERFMON)) {
203 pmu->perfmon_sampling_enabled = true; 211 pmu->perfmon_sampling_enabled = true;
212 }
204 213
205 pmu->mutex_cnt = g->ops.pmu.pmu_mutex_size(); 214 pmu->mutex_cnt = g->ops.pmu.pmu_mutex_size();
206 pmu->mutex = nvgpu_kzalloc(g, pmu->mutex_cnt * 215 pmu->mutex = nvgpu_kzalloc(g, pmu->mutex_cnt *
@@ -246,8 +255,9 @@ static int nvgpu_init_pmu_setup_sw(struct gk20a *g)
246 err = g->ops.pmu.alloc_super_surface(g, 255 err = g->ops.pmu.alloc_super_surface(g,
247 &pmu->super_surface_buf, 256 &pmu->super_surface_buf,
248 sizeof(struct nv_pmu_super_surface)); 257 sizeof(struct nv_pmu_super_surface));
249 if (err) 258 if (err) {
250 goto err_free_seq_buf; 259 goto err_free_seq_buf;
260 }
251 } 261 }
252 262
253 err = nvgpu_dma_alloc_map(vm, GK20A_PMU_TRACE_BUFSIZE, 263 err = nvgpu_dma_alloc_map(vm, GK20A_PMU_TRACE_BUFSIZE,
@@ -263,8 +273,9 @@ skip_init:
263 nvgpu_log_fn(g, "done"); 273 nvgpu_log_fn(g, "done");
264 return 0; 274 return 0;
265 err_free_super_surface: 275 err_free_super_surface:
266 if (g->ops.pmu.alloc_super_surface) 276 if (g->ops.pmu.alloc_super_surface) {
267 nvgpu_dma_unmap_free(vm, &pmu->super_surface_buf); 277 nvgpu_dma_unmap_free(vm, &pmu->super_surface_buf);
278 }
268 err_free_seq_buf: 279 err_free_seq_buf:
269 nvgpu_dma_unmap_free(vm, &pmu->seq_buf); 280 nvgpu_dma_unmap_free(vm, &pmu->seq_buf);
270 err_free_seq: 281 err_free_seq:
@@ -283,20 +294,24 @@ int nvgpu_init_pmu_support(struct gk20a *g)
283 294
284 nvgpu_log_fn(g, " "); 295 nvgpu_log_fn(g, " ");
285 296
286 if (pmu->initialized) 297 if (pmu->initialized) {
287 return 0; 298 return 0;
299 }
288 300
289 err = pmu_enable_hw(pmu, true); 301 err = pmu_enable_hw(pmu, true);
290 if (err) 302 if (err) {
291 return err; 303 return err;
304 }
292 305
293 if (g->support_pmu) { 306 if (g->support_pmu) {
294 err = nvgpu_init_pmu_setup_sw(g); 307 err = nvgpu_init_pmu_setup_sw(g);
295 if (err) 308 if (err) {
296 return err; 309 return err;
310 }
297 err = g->ops.pmu.pmu_setup_hw_and_bootstrap(g); 311 err = g->ops.pmu.pmu_setup_hw_and_bootstrap(g);
298 if (err) 312 if (err) {
299 return err; 313 return err;
314 }
300 315
301 nvgpu_pmu_state_change(g, PMU_STATE_STARTING, false); 316 nvgpu_pmu_state_change(g, PMU_STATE_STARTING, false);
302 } 317 }
@@ -402,8 +417,9 @@ static void pmu_setup_hw_enable_elpg(struct gk20a *g)
402 417
403 if (g->elpg_enabled) { 418 if (g->elpg_enabled) {
404 /* Init reg with prod values*/ 419 /* Init reg with prod values*/
405 if (g->ops.pmu.pmu_setup_elpg) 420 if (g->ops.pmu.pmu_setup_elpg) {
406 g->ops.pmu.pmu_setup_elpg(g); 421 g->ops.pmu.pmu_setup_elpg(g);
422 }
407 nvgpu_pmu_enable_elpg(g); 423 nvgpu_pmu_enable_elpg(g);
408 } 424 }
409 425
@@ -459,8 +475,9 @@ static int nvgpu_pg_init_task(void *arg)
459 switch (pmu_state) { 475 switch (pmu_state) {
460 case PMU_STATE_INIT_RECEIVED: 476 case PMU_STATE_INIT_RECEIVED:
461 nvgpu_pmu_dbg(g, "pmu starting"); 477 nvgpu_pmu_dbg(g, "pmu starting");
462 if (g->can_elpg) 478 if (g->can_elpg) {
463 nvgpu_pmu_init_powergating(g); 479 nvgpu_pmu_init_powergating(g);
480 }
464 break; 481 break;
465 case PMU_STATE_ELPG_BOOTED: 482 case PMU_STATE_ELPG_BOOTED:
466 nvgpu_pmu_dbg(g, "elpg booted"); 483 nvgpu_pmu_dbg(g, "elpg booted");
@@ -499,16 +516,18 @@ int nvgpu_pmu_destroy(struct gk20a *g)
499 516
500 nvgpu_log_fn(g, " "); 517 nvgpu_log_fn(g, " ");
501 518
502 if (!g->support_pmu) 519 if (!g->support_pmu) {
503 return 0; 520 return 0;
521 }
504 522
505 nvgpu_kill_task_pg_init(g); 523 nvgpu_kill_task_pg_init(g);
506 524
507 nvgpu_pmu_get_pg_stats(g, 525 nvgpu_pmu_get_pg_stats(g,
508 PMU_PG_ELPG_ENGINE_ID_GRAPHICS, &pg_stat_data); 526 PMU_PG_ELPG_ENGINE_ID_GRAPHICS, &pg_stat_data);
509 527
510 if (nvgpu_pmu_disable_elpg(g)) 528 if (nvgpu_pmu_disable_elpg(g)) {
511 nvgpu_err(g, "failed to set disable elpg"); 529 nvgpu_err(g, "failed to set disable elpg");
530 }
512 pmu->initialized = false; 531 pmu->initialized = false;
513 532
514 /* update the s/w ELPG residency counters */ 533 /* update the s/w ELPG residency counters */
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_fw.c b/drivers/gpu/nvgpu/common/pmu/pmu_fw.c
index 8a071e32..87fd2f2a 100644
--- a/drivers/gpu/nvgpu/common/pmu/pmu_fw.c
+++ b/drivers/gpu/nvgpu/common/pmu/pmu_fw.c
@@ -870,14 +870,15 @@ static void get_pmu_init_msg_pmu_queue_params_v4(
870 u8 i; 870 u8 i;
871 u8 tmp_id = id; 871 u8 tmp_id = id;
872 872
873 if (tmp_id == PMU_COMMAND_QUEUE_HPQ) 873 if (tmp_id == PMU_COMMAND_QUEUE_HPQ) {
874 tmp_id = PMU_QUEUE_HPQ_IDX_FOR_V3; 874 tmp_id = PMU_QUEUE_HPQ_IDX_FOR_V3;
875 else if (tmp_id == PMU_COMMAND_QUEUE_LPQ) 875 } else if (tmp_id == PMU_COMMAND_QUEUE_LPQ) {
876 tmp_id = PMU_QUEUE_LPQ_IDX_FOR_V3; 876 tmp_id = PMU_QUEUE_LPQ_IDX_FOR_V3;
877 else if (tmp_id == PMU_MESSAGE_QUEUE) 877 } else if (tmp_id == PMU_MESSAGE_QUEUE) {
878 tmp_id = PMU_QUEUE_MSG_IDX_FOR_V3; 878 tmp_id = PMU_QUEUE_MSG_IDX_FOR_V3;
879 else 879 } else {
880 return; 880 return;
881 }
881 882
882 queue->index = init->queue_index[tmp_id]; 883 queue->index = init->queue_index[tmp_id];
883 queue->size = init->queue_size[tmp_id]; 884 queue->size = init->queue_size[tmp_id];
@@ -898,14 +899,15 @@ static void get_pmu_init_msg_pmu_queue_params_v5(
898 u8 i; 899 u8 i;
899 u8 tmp_id = id; 900 u8 tmp_id = id;
900 901
901 if (tmp_id == PMU_COMMAND_QUEUE_HPQ) 902 if (tmp_id == PMU_COMMAND_QUEUE_HPQ) {
902 tmp_id = PMU_QUEUE_HPQ_IDX_FOR_V3; 903 tmp_id = PMU_QUEUE_HPQ_IDX_FOR_V3;
903 else if (tmp_id == PMU_COMMAND_QUEUE_LPQ) 904 } else if (tmp_id == PMU_COMMAND_QUEUE_LPQ) {
904 tmp_id = PMU_QUEUE_LPQ_IDX_FOR_V3; 905 tmp_id = PMU_QUEUE_LPQ_IDX_FOR_V3;
905 else if (tmp_id == PMU_MESSAGE_QUEUE) 906 } else if (tmp_id == PMU_MESSAGE_QUEUE) {
906 tmp_id = PMU_QUEUE_MSG_IDX_FOR_V5; 907 tmp_id = PMU_QUEUE_MSG_IDX_FOR_V5;
907 else 908 } else {
908 return; 909 return;
910 }
909 911
910 queue->index = init->queue_index[tmp_id]; 912 queue->index = init->queue_index[tmp_id];
911 queue->size = init->queue_size[tmp_id]; 913 queue->size = init->queue_size[tmp_id];
@@ -927,14 +929,15 @@ static void get_pmu_init_msg_pmu_queue_params_v3(
927 u8 i; 929 u8 i;
928 u8 tmp_id = id; 930 u8 tmp_id = id;
929 931
930 if (tmp_id == PMU_COMMAND_QUEUE_HPQ) 932 if (tmp_id == PMU_COMMAND_QUEUE_HPQ) {
931 tmp_id = PMU_QUEUE_HPQ_IDX_FOR_V3; 933 tmp_id = PMU_QUEUE_HPQ_IDX_FOR_V3;
932 else if (tmp_id == PMU_COMMAND_QUEUE_LPQ) 934 } else if (tmp_id == PMU_COMMAND_QUEUE_LPQ) {
933 tmp_id = PMU_QUEUE_LPQ_IDX_FOR_V3; 935 tmp_id = PMU_QUEUE_LPQ_IDX_FOR_V3;
934 else if (tmp_id == PMU_MESSAGE_QUEUE) 936 } else if (tmp_id == PMU_MESSAGE_QUEUE) {
935 tmp_id = PMU_QUEUE_MSG_IDX_FOR_V3; 937 tmp_id = PMU_QUEUE_MSG_IDX_FOR_V3;
936 else 938 } else {
937 return; 939 return;
940 }
938 queue->index = init->queue_index[tmp_id]; 941 queue->index = init->queue_index[tmp_id];
939 queue->size = init->queue_size[tmp_id]; 942 queue->size = init->queue_size[tmp_id];
940 if (tmp_id != 0) { 943 if (tmp_id != 0) {
@@ -1623,8 +1626,9 @@ static void nvgpu_remove_pmu_support(struct nvgpu_pmu *pmu)
1623 1626
1624 nvgpu_log_fn(g, " "); 1627 nvgpu_log_fn(g, " ");
1625 1628
1626 if (nvgpu_alloc_initialized(&pmu->dmem)) 1629 if (nvgpu_alloc_initialized(&pmu->dmem)) {
1627 nvgpu_alloc_destroy(&pmu->dmem); 1630 nvgpu_alloc_destroy(&pmu->dmem);
1631 }
1628 1632
1629 nvgpu_list_for_each_entry_safe(pboardobjgrp, pboardobjgrp_tmp, 1633 nvgpu_list_for_each_entry_safe(pboardobjgrp, pboardobjgrp_tmp,
1630 &g->boardobjgrp_head, boardobjgrp, node) { 1634 &g->boardobjgrp_head, boardobjgrp, node) {
@@ -1636,20 +1640,25 @@ static void nvgpu_remove_pmu_support(struct nvgpu_pmu *pmu)
1636 pboardobj->destruct(pboardobj); 1640 pboardobj->destruct(pboardobj);
1637 } 1641 }
1638 1642
1639 if (pmu->fw) 1643 if (pmu->fw) {
1640 nvgpu_release_firmware(g, pmu->fw); 1644 nvgpu_release_firmware(g, pmu->fw);
1645 }
1641 1646
1642 if (g->acr.pmu_fw) 1647 if (g->acr.pmu_fw) {
1643 nvgpu_release_firmware(g, g->acr.pmu_fw); 1648 nvgpu_release_firmware(g, g->acr.pmu_fw);
1649 }
1644 1650
1645 if (g->acr.pmu_desc) 1651 if (g->acr.pmu_desc) {
1646 nvgpu_release_firmware(g, g->acr.pmu_desc); 1652 nvgpu_release_firmware(g, g->acr.pmu_desc);
1653 }
1647 1654
1648 if (g->acr.acr_fw) 1655 if (g->acr.acr_fw) {
1649 nvgpu_release_firmware(g, g->acr.acr_fw); 1656 nvgpu_release_firmware(g, g->acr.acr_fw);
1657 }
1650 1658
1651 if (g->acr.hsbl_fw) 1659 if (g->acr.hsbl_fw) {
1652 nvgpu_release_firmware(g, g->acr.hsbl_fw); 1660 nvgpu_release_firmware(g, g->acr.hsbl_fw);
1661 }
1653 1662
1654 nvgpu_dma_unmap_free(vm, &g->acr.acr_ucode); 1663 nvgpu_dma_unmap_free(vm, &g->acr.acr_ucode);
1655 nvgpu_dma_unmap_free(vm, &g->acr.hsbl_ucode); 1664 nvgpu_dma_unmap_free(vm, &g->acr.hsbl_ucode);
@@ -1673,30 +1682,36 @@ int nvgpu_init_pmu_fw_support(struct nvgpu_pmu *pmu)
1673 nvgpu_log_fn(g, " "); 1682 nvgpu_log_fn(g, " ");
1674 1683
1675 err = nvgpu_mutex_init(&pmu->elpg_mutex); 1684 err = nvgpu_mutex_init(&pmu->elpg_mutex);
1676 if (err) 1685 if (err) {
1677 return err; 1686 return err;
1687 }
1678 1688
1679 err = nvgpu_mutex_init(&pmu->pg_mutex); 1689 err = nvgpu_mutex_init(&pmu->pg_mutex);
1680 if (err) 1690 if (err) {
1681 goto fail_elpg; 1691 goto fail_elpg;
1692 }
1682 1693
1683 err = nvgpu_mutex_init(&pmu->isr_mutex); 1694 err = nvgpu_mutex_init(&pmu->isr_mutex);
1684 if (err) 1695 if (err) {
1685 goto fail_pg; 1696 goto fail_pg;
1697 }
1686 1698
1687 err = nvgpu_mutex_init(&pmu->pmu_copy_lock); 1699 err = nvgpu_mutex_init(&pmu->pmu_copy_lock);
1688 if (err) 1700 if (err) {
1689 goto fail_isr; 1701 goto fail_isr;
1702 }
1690 1703
1691 err = nvgpu_mutex_init(&pmu->pmu_seq_lock); 1704 err = nvgpu_mutex_init(&pmu->pmu_seq_lock);
1692 if (err) 1705 if (err) {
1693 goto fail_pmu_copy; 1706 goto fail_pmu_copy;
1707 }
1694 1708
1695 pmu->remove_support = nvgpu_remove_pmu_support; 1709 pmu->remove_support = nvgpu_remove_pmu_support;
1696 1710
1697 err = nvgpu_init_pmu_fw_ver_ops(pmu); 1711 err = nvgpu_init_pmu_fw_ver_ops(pmu);
1698 if (err) 1712 if (err) {
1699 goto fail_pmu_seq; 1713 goto fail_pmu_seq;
1714 }
1700 1715
1701 goto exit; 1716 goto exit;
1702 1717
@@ -1723,8 +1738,9 @@ int nvgpu_pmu_prepare_ns_ucode_blob(struct gk20a *g)
1723 1738
1724 nvgpu_log_fn(g, " "); 1739 nvgpu_log_fn(g, " ");
1725 1740
1726 if (pmu->fw) 1741 if (pmu->fw) {
1727 return nvgpu_init_pmu_fw_support(pmu); 1742 return nvgpu_init_pmu_fw_support(pmu);
1743 }
1728 1744
1729 pmu->fw = nvgpu_request_firmware(g, NVGPU_PMU_NS_UCODE_IMAGE, 0); 1745 pmu->fw = nvgpu_request_firmware(g, NVGPU_PMU_NS_UCODE_IMAGE, 0);
1730 if (!pmu->fw) { 1746 if (!pmu->fw) {
@@ -1740,8 +1756,9 @@ int nvgpu_pmu_prepare_ns_ucode_blob(struct gk20a *g)
1740 1756
1741 err = nvgpu_dma_alloc_map_sys(vm, GK20A_PMU_UCODE_SIZE_MAX, 1757 err = nvgpu_dma_alloc_map_sys(vm, GK20A_PMU_UCODE_SIZE_MAX,
1742 &pmu->ucode); 1758 &pmu->ucode);
1743 if (err) 1759 if (err) {
1744 goto err_release_fw; 1760 goto err_release_fw;
1761 }
1745 1762
1746 nvgpu_mem_wr_n(g, &pmu->ucode, 0, pmu->ucode_image, 1763 nvgpu_mem_wr_n(g, &pmu->ucode, 0, pmu->ucode_image,
1747 pmu->desc->app_start_offset + pmu->desc->app_size); 1764 pmu->desc->app_start_offset + pmu->desc->app_size);
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c b/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c
index 37abb34c..39be07cc 100644
--- a/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c
+++ b/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c
@@ -154,33 +154,41 @@ static bool pmu_validate_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd,
154 struct nvgpu_falcon_queue *queue; 154 struct nvgpu_falcon_queue *queue;
155 u32 in_size, out_size; 155 u32 in_size, out_size;
156 156
157 if (!PMU_IS_SW_COMMAND_QUEUE(queue_id)) 157 if (!PMU_IS_SW_COMMAND_QUEUE(queue_id)) {
158 goto invalid_cmd; 158 goto invalid_cmd;
159 }
159 160
160 queue = &pmu->queue[queue_id]; 161 queue = &pmu->queue[queue_id];
161 if (cmd->hdr.size < PMU_CMD_HDR_SIZE) 162 if (cmd->hdr.size < PMU_CMD_HDR_SIZE) {
162 goto invalid_cmd; 163 goto invalid_cmd;
164 }
163 165
164 if (cmd->hdr.size > (queue->size >> 1)) 166 if (cmd->hdr.size > (queue->size >> 1)) {
165 goto invalid_cmd; 167 goto invalid_cmd;
168 }
166 169
167 if (msg != NULL && msg->hdr.size < PMU_MSG_HDR_SIZE) 170 if (msg != NULL && msg->hdr.size < PMU_MSG_HDR_SIZE) {
168 goto invalid_cmd; 171 goto invalid_cmd;
172 }
169 173
170 if (!PMU_UNIT_ID_IS_VALID(cmd->hdr.unit_id)) 174 if (!PMU_UNIT_ID_IS_VALID(cmd->hdr.unit_id)) {
171 goto invalid_cmd; 175 goto invalid_cmd;
176 }
172 177
173 if (payload == NULL) 178 if (payload == NULL) {
174 return true; 179 return true;
180 }
175 181
176 if (payload->in.buf == NULL && payload->out.buf == NULL && 182 if (payload->in.buf == NULL && payload->out.buf == NULL &&
177 payload->rpc.prpc == NULL) 183 payload->rpc.prpc == NULL) {
178 goto invalid_cmd; 184 goto invalid_cmd;
185 }
179 186
180 if ((payload->in.buf != NULL && payload->in.size == 0) || 187 if ((payload->in.buf != NULL && payload->in.size == 0) ||
181 (payload->out.buf != NULL && payload->out.size == 0) || 188 (payload->out.buf != NULL && payload->out.size == 0) ||
182 (payload->rpc.prpc != NULL && payload->rpc.size_rpc == 0)) 189 (payload->rpc.prpc != NULL && payload->rpc.size_rpc == 0)) {
183 goto invalid_cmd; 190 goto invalid_cmd;
191 }
184 192
185 in_size = PMU_CMD_HDR_SIZE; 193 in_size = PMU_CMD_HDR_SIZE;
186 if (payload->in.buf) { 194 if (payload->in.buf) {
@@ -194,13 +202,15 @@ static bool pmu_validate_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd,
194 out_size += g->ops.pmu_ver.get_pmu_allocation_struct_size(pmu); 202 out_size += g->ops.pmu_ver.get_pmu_allocation_struct_size(pmu);
195 } 203 }
196 204
197 if (in_size > cmd->hdr.size || out_size > cmd->hdr.size) 205 if (in_size > cmd->hdr.size || out_size > cmd->hdr.size) {
198 goto invalid_cmd; 206 goto invalid_cmd;
207 }
199 208
200 209
201 if ((payload->in.offset != 0 && payload->in.buf == NULL) || 210 if ((payload->in.offset != 0 && payload->in.buf == NULL) ||
202 (payload->out.offset != 0 && payload->out.buf == NULL)) 211 (payload->out.offset != 0 && payload->out.buf == NULL)) {
203 goto invalid_cmd; 212 goto invalid_cmd;
213 }
204 214
205 return true; 215 return true;
206 216
@@ -233,16 +243,18 @@ static int pmu_write_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd,
233 243
234 do { 244 do {
235 err = nvgpu_flcn_queue_push(pmu->flcn, queue, cmd, cmd->hdr.size); 245 err = nvgpu_flcn_queue_push(pmu->flcn, queue, cmd, cmd->hdr.size);
236 if (err == -EAGAIN && !nvgpu_timeout_expired(&timeout)) 246 if (err == -EAGAIN && !nvgpu_timeout_expired(&timeout)) {
237 nvgpu_usleep_range(1000, 2000); 247 nvgpu_usleep_range(1000, 2000);
238 else 248 } else {
239 break; 249 break;
250 }
240 } while (1); 251 } while (1);
241 252
242 if (err) 253 if (err) {
243 nvgpu_err(g, "fail to write cmd to queue %d", queue_id); 254 nvgpu_err(g, "fail to write cmd to queue %d", queue_id);
244 else 255 } else {
245 nvgpu_log_fn(g, "done"); 256 nvgpu_log_fn(g, "done");
257 }
246 258
247 return err; 259 return err;
248} 260}
@@ -281,10 +293,11 @@ static int pmu_cmd_payload_extract_rpc(struct gk20a *g, struct pmu_cmd *cmd,
281 dmem_alloc_offset); 293 dmem_alloc_offset);
282 294
283clean_up: 295clean_up:
284 if (err) 296 if (err) {
285 nvgpu_log_fn(g, "fail"); 297 nvgpu_log_fn(g, "fail");
286 else 298 } else {
287 nvgpu_log_fn(g, "done"); 299 nvgpu_log_fn(g, "done");
300 }
288 301
289 return err; 302 return err;
290} 303}
@@ -299,25 +312,28 @@ static int pmu_cmd_payload_extract(struct gk20a *g, struct pmu_cmd *cmd,
299 312
300 nvgpu_log_fn(g, " "); 313 nvgpu_log_fn(g, " ");
301 314
302 if (payload) 315 if (payload) {
303 seq->out_payload = payload->out.buf; 316 seq->out_payload = payload->out.buf;
317 }
304 318
305 if (payload && payload->in.offset != 0) { 319 if (payload && payload->in.offset != 0) {
306 pv->set_pmu_allocation_ptr(pmu, &in, 320 pv->set_pmu_allocation_ptr(pmu, &in,
307 ((u8 *)&cmd->cmd + payload->in.offset)); 321 ((u8 *)&cmd->cmd + payload->in.offset));
308 322
309 if (payload->in.buf != payload->out.buf) 323 if (payload->in.buf != payload->out.buf) {
310 pv->pmu_allocation_set_dmem_size(pmu, in, 324 pv->pmu_allocation_set_dmem_size(pmu, in,
311 (u16)payload->in.size); 325 (u16)payload->in.size);
312 else 326 } else {
313 pv->pmu_allocation_set_dmem_size(pmu, in, 327 pv->pmu_allocation_set_dmem_size(pmu, in,
314 (u16)max(payload->in.size, payload->out.size)); 328 (u16)max(payload->in.size, payload->out.size));
329 }
315 330
316 *(pv->pmu_allocation_get_dmem_offset_addr(pmu, in)) = 331 *(pv->pmu_allocation_get_dmem_offset_addr(pmu, in)) =
317 nvgpu_alloc(&pmu->dmem, 332 nvgpu_alloc(&pmu->dmem,
318 pv->pmu_allocation_get_dmem_size(pmu, in)); 333 pv->pmu_allocation_get_dmem_size(pmu, in));
319 if (!*(pv->pmu_allocation_get_dmem_offset_addr(pmu, in))) 334 if (!*(pv->pmu_allocation_get_dmem_offset_addr(pmu, in))) {
320 goto clean_up; 335 goto clean_up;
336 }
321 337
322 if (payload->in.fb_size != 0x0) { 338 if (payload->in.fb_size != 0x0) {
323 seq->in_mem = nvgpu_kzalloc(g, 339 seq->in_mem = nvgpu_kzalloc(g,
@@ -361,8 +377,9 @@ static int pmu_cmd_payload_extract(struct gk20a *g, struct pmu_cmd *cmd,
361 pv->pmu_allocation_get_dmem_size(pmu, 377 pv->pmu_allocation_get_dmem_size(pmu,
362 out)); 378 out));
363 if (!*(pv->pmu_allocation_get_dmem_offset_addr(pmu, 379 if (!*(pv->pmu_allocation_get_dmem_offset_addr(pmu,
364 out))) 380 out))) {
365 goto clean_up; 381 goto clean_up;
382 }
366 383
367 if (payload->out.fb_size != 0x0) { 384 if (payload->out.fb_size != 0x0) {
368 seq->out_mem = nvgpu_kzalloc(g, 385 seq->out_mem = nvgpu_kzalloc(g,
@@ -396,14 +413,17 @@ static int pmu_cmd_payload_extract(struct gk20a *g, struct pmu_cmd *cmd,
396clean_up: 413clean_up:
397 if (err) { 414 if (err) {
398 nvgpu_log_fn(g, "fail"); 415 nvgpu_log_fn(g, "fail");
399 if (in) 416 if (in) {
400 nvgpu_free(&pmu->dmem, 417 nvgpu_free(&pmu->dmem,
401 pv->pmu_allocation_get_dmem_offset(pmu, in)); 418 pv->pmu_allocation_get_dmem_offset(pmu, in));
402 if (out) 419 }
420 if (out) {
403 nvgpu_free(&pmu->dmem, 421 nvgpu_free(&pmu->dmem,
404 pv->pmu_allocation_get_dmem_offset(pmu, out)); 422 pv->pmu_allocation_get_dmem_offset(pmu, out));
405 } else 423 }
424 } else {
406 nvgpu_log_fn(g, "done"); 425 nvgpu_log_fn(g, "done");
426 }
407 427
408 return err; 428 return err;
409} 429}
@@ -420,23 +440,26 @@ int nvgpu_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd,
420 nvgpu_log_fn(g, " "); 440 nvgpu_log_fn(g, " ");
421 441
422 if ((!cmd) || (!seq_desc) || (!pmu->pmu_ready)) { 442 if ((!cmd) || (!seq_desc) || (!pmu->pmu_ready)) {
423 if (!cmd) 443 if (!cmd) {
424 nvgpu_warn(g, "%s(): PMU cmd buffer is NULL", __func__); 444 nvgpu_warn(g, "%s(): PMU cmd buffer is NULL", __func__);
425 else if (!seq_desc) 445 } else if (!seq_desc) {
426 nvgpu_warn(g, "%s(): Seq descriptor is NULL", __func__); 446 nvgpu_warn(g, "%s(): Seq descriptor is NULL", __func__);
427 else 447 } else {
428 nvgpu_warn(g, "%s(): PMU is not ready", __func__); 448 nvgpu_warn(g, "%s(): PMU is not ready", __func__);
449 }
429 450
430 WARN_ON(1); 451 WARN_ON(1);
431 return -EINVAL; 452 return -EINVAL;
432 } 453 }
433 454
434 if (!pmu_validate_cmd(pmu, cmd, msg, payload, queue_id)) 455 if (!pmu_validate_cmd(pmu, cmd, msg, payload, queue_id)) {
435 return -EINVAL; 456 return -EINVAL;
457 }
436 458
437 err = pmu_seq_acquire(pmu, &seq); 459 err = pmu_seq_acquire(pmu, &seq);
438 if (err) 460 if (err) {
439 return err; 461 return err;
462 }
440 463
441 cmd->hdr.seq_id = seq->id; 464 cmd->hdr.seq_id = seq->id;
442 465
@@ -452,19 +475,22 @@ int nvgpu_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd,
452 475
453 *seq_desc = seq->desc; 476 *seq_desc = seq->desc;
454 477
455 if (cmd->cmd.rpc.cmd_type == NV_PMU_RPC_CMD_ID) 478 if (cmd->cmd.rpc.cmd_type == NV_PMU_RPC_CMD_ID) {
456 err = pmu_cmd_payload_extract_rpc(g, cmd, payload, seq); 479 err = pmu_cmd_payload_extract_rpc(g, cmd, payload, seq);
457 else 480 } else {
458 err = pmu_cmd_payload_extract(g, cmd, payload, seq); 481 err = pmu_cmd_payload_extract(g, cmd, payload, seq);
482 }
459 483
460 if (err) 484 if (err) {
461 goto clean_up; 485 goto clean_up;
486 }
462 487
463 seq->state = PMU_SEQ_STATE_USED; 488 seq->state = PMU_SEQ_STATE_USED;
464 489
465 err = pmu_write_cmd(pmu, cmd, queue_id, timeout); 490 err = pmu_write_cmd(pmu, cmd, queue_id, timeout);
466 if (err) 491 if (err) {
467 seq->state = PMU_SEQ_STATE_PENDING; 492 seq->state = PMU_SEQ_STATE_PENDING;
493 }
468 494
469 nvgpu_log_fn(g, "done"); 495 nvgpu_log_fn(g, "done");
470 496
@@ -516,18 +542,21 @@ static int pmu_response_handle(struct nvgpu_pmu *pmu,
516 pv->pmu_allocation_get_dmem_size(pmu, 542 pv->pmu_allocation_get_dmem_size(pmu,
517 pv->get_pmu_seq_out_a_ptr(seq)), 0); 543 pv->get_pmu_seq_out_a_ptr(seq)), 0);
518 } 544 }
519 } else 545 } else {
520 seq->callback = NULL; 546 seq->callback = NULL;
547 }
521 if (pv->pmu_allocation_get_dmem_size(pmu, 548 if (pv->pmu_allocation_get_dmem_size(pmu,
522 pv->get_pmu_seq_in_a_ptr(seq)) != 0) 549 pv->get_pmu_seq_in_a_ptr(seq)) != 0) {
523 nvgpu_free(&pmu->dmem, 550 nvgpu_free(&pmu->dmem,
524 pv->pmu_allocation_get_dmem_offset(pmu, 551 pv->pmu_allocation_get_dmem_offset(pmu,
525 pv->get_pmu_seq_in_a_ptr(seq))); 552 pv->get_pmu_seq_in_a_ptr(seq)));
553 }
526 if (pv->pmu_allocation_get_dmem_size(pmu, 554 if (pv->pmu_allocation_get_dmem_size(pmu,
527 pv->get_pmu_seq_out_a_ptr(seq)) != 0) 555 pv->get_pmu_seq_out_a_ptr(seq)) != 0) {
528 nvgpu_free(&pmu->dmem, 556 nvgpu_free(&pmu->dmem,
529 pv->pmu_allocation_get_dmem_offset(pmu, 557 pv->pmu_allocation_get_dmem_offset(pmu,
530 pv->get_pmu_seq_out_a_ptr(seq))); 558 pv->get_pmu_seq_out_a_ptr(seq)));
559 }
531 560
532 if (seq->out_mem != NULL) { 561 if (seq->out_mem != NULL) {
533 memset(pv->pmu_allocation_get_fb_addr(pmu, 562 memset(pv->pmu_allocation_get_fb_addr(pmu,
@@ -536,10 +565,11 @@ static int pmu_response_handle(struct nvgpu_pmu *pmu,
536 pv->get_pmu_seq_out_a_ptr(seq))); 565 pv->get_pmu_seq_out_a_ptr(seq)));
537 566
538 nvgpu_pmu_surface_free(g, seq->out_mem); 567 nvgpu_pmu_surface_free(g, seq->out_mem);
539 if (seq->out_mem != seq->in_mem) 568 if (seq->out_mem != seq->in_mem) {
540 nvgpu_kfree(g, seq->out_mem); 569 nvgpu_kfree(g, seq->out_mem);
541 else 570 } else {
542 seq->out_mem = NULL; 571 seq->out_mem = NULL;
572 }
543 } 573 }
544 574
545 if (seq->in_mem != NULL) { 575 if (seq->in_mem != NULL) {
@@ -553,8 +583,9 @@ static int pmu_response_handle(struct nvgpu_pmu *pmu,
553 seq->in_mem = NULL; 583 seq->in_mem = NULL;
554 } 584 }
555 585
556 if (seq->callback) 586 if (seq->callback) {
557 seq->callback(g, msg, seq->cb_params, seq->desc, ret); 587 seq->callback(g, msg, seq->cb_params, seq->desc, ret);
588 }
558 589
559 pmu_seq_release(pmu, seq); 590 pmu_seq_release(pmu, seq);
560 591
@@ -667,11 +698,13 @@ int nvgpu_pmu_process_message(struct nvgpu_pmu *pmu)
667 698
668 if (unlikely(!pmu->pmu_ready)) { 699 if (unlikely(!pmu->pmu_ready)) {
669 nvgpu_pmu_process_init_msg(pmu, &msg); 700 nvgpu_pmu_process_init_msg(pmu, &msg);
670 if (g->ops.pmu.init_wpr_region != NULL) 701 if (g->ops.pmu.init_wpr_region != NULL) {
671 g->ops.pmu.init_wpr_region(g); 702 g->ops.pmu.init_wpr_region(g);
703 }
672 704
673 if (nvgpu_is_enabled(g, NVGPU_PMU_PERFMON)) 705 if (nvgpu_is_enabled(g, NVGPU_PMU_PERFMON)) {
674 g->ops.pmu.pmu_init_perfmon(pmu); 706 g->ops.pmu.pmu_init_perfmon(pmu);
707 }
675 708
676 return 0; 709 return 0;
677 } 710 }
@@ -687,10 +720,11 @@ int nvgpu_pmu_process_message(struct nvgpu_pmu *pmu)
687 720
688 msg.hdr.ctrl_flags &= ~PMU_CMD_FLAGS_PMU_MASK; 721 msg.hdr.ctrl_flags &= ~PMU_CMD_FLAGS_PMU_MASK;
689 722
690 if (msg.hdr.ctrl_flags == PMU_CMD_FLAGS_EVENT) 723 if (msg.hdr.ctrl_flags == PMU_CMD_FLAGS_EVENT) {
691 pmu_handle_event(pmu, &msg); 724 pmu_handle_event(pmu, &msg);
692 else 725 } else {
693 pmu_response_handle(pmu, &msg); 726 pmu_response_handle(pmu, &msg);
727 }
694 } 728 }
695 729
696 return 0; 730 return 0;
@@ -706,11 +740,13 @@ int pmu_wait_message_cond(struct nvgpu_pmu *pmu, u32 timeout_ms,
706 nvgpu_timeout_init(g, &timeout, timeout_ms, NVGPU_TIMER_CPU_TIMER); 740 nvgpu_timeout_init(g, &timeout, timeout_ms, NVGPU_TIMER_CPU_TIMER);
707 741
708 do { 742 do {
709 if (*(u8 *)var == val) 743 if (*(u8 *)var == val) {
710 return 0; 744 return 0;
745 }
711 746
712 if (gk20a_pmu_is_interrupted(pmu)) 747 if (gk20a_pmu_is_interrupted(pmu)) {
713 gk20a_pmu_isr(g); 748 gk20a_pmu_isr(g);
749 }
714 750
715 nvgpu_usleep_range(delay, delay * 2); 751 nvgpu_usleep_range(delay, delay * 2);
716 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); 752 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
@@ -816,8 +852,9 @@ static void pmu_rpc_handler(struct gk20a *g, struct pmu_msg *msg,
816 852
817exit: 853exit:
818 /* free allocated memory */ 854 /* free allocated memory */
819 if (rpc_payload->is_mem_free_set) 855 if (rpc_payload->is_mem_free_set) {
820 nvgpu_kfree(g, rpc_payload); 856 nvgpu_kfree(g, rpc_payload);
857 }
821} 858}
822 859
823int nvgpu_pmu_rpc_execute(struct nvgpu_pmu *pmu, struct nv_pmu_rpc_header *rpc, 860int nvgpu_pmu_rpc_execute(struct nvgpu_pmu *pmu, struct nv_pmu_rpc_header *rpc,
@@ -914,8 +951,9 @@ int nvgpu_pmu_rpc_execute(struct nvgpu_pmu *pmu, struct nv_pmu_rpc_header *rpc,
914 951
915exit: 952exit:
916 if (status) { 953 if (status) {
917 if (rpc_payload) 954 if (rpc_payload) {
918 nvgpu_kfree(g, rpc_payload); 955 nvgpu_kfree(g, rpc_payload);
956 }
919 } 957 }
920 958
921 return status; 959 return status;
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_perfmon.c b/drivers/gpu/nvgpu/common/pmu/pmu_perfmon.c
index 964b1488..73893f2c 100644
--- a/drivers/gpu/nvgpu/common/pmu/pmu_perfmon.c
+++ b/drivers/gpu/nvgpu/common/pmu/pmu_perfmon.c
@@ -65,8 +65,9 @@ int nvgpu_pmu_init_perfmon(struct nvgpu_pmu *pmu)
65 struct pmu_payload payload; 65 struct pmu_payload payload;
66 u32 seq; 66 u32 seq;
67 67
68 if (!nvgpu_is_enabled(g, NVGPU_PMU_PERFMON)) 68 if (!nvgpu_is_enabled(g, NVGPU_PMU_PERFMON)) {
69 return 0; 69 return 0;
70 }
70 71
71 nvgpu_log_fn(g, " "); 72 nvgpu_log_fn(g, " ");
72 73
@@ -74,9 +75,10 @@ int nvgpu_pmu_init_perfmon(struct nvgpu_pmu *pmu)
74 75
75 gk20a_pmu_init_perfmon_counter(g); 76 gk20a_pmu_init_perfmon_counter(g);
76 77
77 if (!pmu->sample_buffer) 78 if (!pmu->sample_buffer) {
78 pmu->sample_buffer = nvgpu_alloc(&pmu->dmem, 79 pmu->sample_buffer = nvgpu_alloc(&pmu->dmem,
79 2 * sizeof(u16)); 80 2 * sizeof(u16));
81 }
80 if (!pmu->sample_buffer) { 82 if (!pmu->sample_buffer) {
81 nvgpu_err(g, "failed to allocate perfmon sample buffer"); 83 nvgpu_err(g, "failed to allocate perfmon sample buffer");
82 return -ENOMEM; 84 return -ENOMEM;
@@ -134,8 +136,9 @@ int nvgpu_pmu_perfmon_start_sampling(struct nvgpu_pmu *pmu)
134 struct pmu_payload payload; 136 struct pmu_payload payload;
135 u32 seq; 137 u32 seq;
136 138
137 if (!nvgpu_is_enabled(g, NVGPU_PMU_PERFMON)) 139 if (!nvgpu_is_enabled(g, NVGPU_PMU_PERFMON)) {
138 return 0; 140 return 0;
141 }
139 142
140 /* PERFMON Start */ 143 /* PERFMON Start */
141 memset(&cmd, 0, sizeof(struct pmu_cmd)); 144 memset(&cmd, 0, sizeof(struct pmu_cmd));
@@ -183,8 +186,9 @@ int nvgpu_pmu_perfmon_stop_sampling(struct nvgpu_pmu *pmu)
183 struct pmu_cmd cmd; 186 struct pmu_cmd cmd;
184 u32 seq; 187 u32 seq;
185 188
186 if (!nvgpu_is_enabled(g, NVGPU_PMU_PERFMON)) 189 if (!nvgpu_is_enabled(g, NVGPU_PMU_PERFMON)) {
187 return 0; 190 return 0;
191 }
188 192
189 /* PERFMON Stop */ 193 /* PERFMON Stop */
190 memset(&cmd, 0, sizeof(struct pmu_cmd)); 194 memset(&cmd, 0, sizeof(struct pmu_cmd));
@@ -250,8 +254,9 @@ void nvgpu_pmu_get_load_counters(struct gk20a *g, u32 *busy_cycles,
250 254
251void nvgpu_pmu_reset_load_counters(struct gk20a *g) 255void nvgpu_pmu_reset_load_counters(struct gk20a *g)
252{ 256{
253 if (!g->power_on || gk20a_busy(g)) 257 if (!g->power_on || gk20a_busy(g)) {
254 return; 258 return;
259 }
255 260
256 gk20a_pmu_reset_idle_counter(g, 2); 261 gk20a_pmu_reset_idle_counter(g, 2);
257 gk20a_pmu_reset_idle_counter(g, 1); 262 gk20a_pmu_reset_idle_counter(g, 1);
@@ -288,8 +293,9 @@ int nvgpu_pmu_handle_perfmon_event(struct nvgpu_pmu *pmu,
288 } 293 }
289 294
290 /* restart sampling */ 295 /* restart sampling */
291 if (pmu->perfmon_sampling_enabled) 296 if (pmu->perfmon_sampling_enabled) {
292 return g->ops.pmu.pmu_perfmon_start_sampling(&(g->pmu)); 297 return g->ops.pmu.pmu_perfmon_start_sampling(&(g->pmu));
298 }
293 299
294 return 0; 300 return 0;
295} 301}
@@ -301,8 +307,9 @@ int nvgpu_pmu_init_perfmon_rpc(struct nvgpu_pmu *pmu)
301 struct nv_pmu_rpc_struct_perfmon_init rpc; 307 struct nv_pmu_rpc_struct_perfmon_init rpc;
302 int status = 0; 308 int status = 0;
303 309
304 if (!nvgpu_is_enabled(g, NVGPU_PMU_PERFMON)) 310 if (!nvgpu_is_enabled(g, NVGPU_PMU_PERFMON)) {
305 return 0; 311 return 0;
312 }
306 313
307 nvgpu_log_fn(g, " "); 314 nvgpu_log_fn(g, " ");
308 315
@@ -348,8 +355,9 @@ int nvgpu_pmu_perfmon_start_sampling_rpc(struct nvgpu_pmu *pmu)
348 struct nv_pmu_rpc_struct_perfmon_start rpc; 355 struct nv_pmu_rpc_struct_perfmon_start rpc;
349 int status = 0; 356 int status = 0;
350 357
351 if (!nvgpu_is_enabled(g, NVGPU_PMU_PERFMON)) 358 if (!nvgpu_is_enabled(g, NVGPU_PMU_PERFMON)) {
352 return 0; 359 return 0;
360 }
353 361
354 nvgpu_log_fn(g, " "); 362 nvgpu_log_fn(g, " ");
355 363
@@ -365,8 +373,9 @@ int nvgpu_pmu_perfmon_start_sampling_rpc(struct nvgpu_pmu *pmu)
365 373
366 nvgpu_pmu_dbg(g, "RPC post NV_PMU_RPC_ID_PERFMON_START\n"); 374 nvgpu_pmu_dbg(g, "RPC post NV_PMU_RPC_ID_PERFMON_START\n");
367 PMU_RPC_EXECUTE(status, pmu, PERFMON_T18X, START, &rpc, 0); 375 PMU_RPC_EXECUTE(status, pmu, PERFMON_T18X, START, &rpc, 0);
368 if (status) 376 if (status) {
369 nvgpu_err(g, "Failed to execute RPC, status=0x%x", status); 377 nvgpu_err(g, "Failed to execute RPC, status=0x%x", status);
378 }
370 379
371 return status; 380 return status;
372} 381}
@@ -377,8 +386,9 @@ int nvgpu_pmu_perfmon_stop_sampling_rpc(struct nvgpu_pmu *pmu)
377 struct nv_pmu_rpc_struct_perfmon_stop rpc; 386 struct nv_pmu_rpc_struct_perfmon_stop rpc;
378 int status = 0; 387 int status = 0;
379 388
380 if (!nvgpu_is_enabled(g, NVGPU_PMU_PERFMON)) 389 if (!nvgpu_is_enabled(g, NVGPU_PMU_PERFMON)) {
381 return 0; 390 return 0;
391 }
382 392
383 nvgpu_log_fn(g, " "); 393 nvgpu_log_fn(g, " ");
384 394
@@ -386,8 +396,9 @@ int nvgpu_pmu_perfmon_stop_sampling_rpc(struct nvgpu_pmu *pmu)
386 /* PERFMON Stop */ 396 /* PERFMON Stop */
387 nvgpu_pmu_dbg(g, "RPC post NV_PMU_RPC_ID_PERFMON_STOP\n"); 397 nvgpu_pmu_dbg(g, "RPC post NV_PMU_RPC_ID_PERFMON_STOP\n");
388 PMU_RPC_EXECUTE(status, pmu, PERFMON_T18X, STOP, &rpc, 0); 398 PMU_RPC_EXECUTE(status, pmu, PERFMON_T18X, STOP, &rpc, 0);
389 if (status) 399 if (status) {
390 nvgpu_err(g, "Failed to execute RPC, status=0x%x", status); 400 nvgpu_err(g, "Failed to execute RPC, status=0x%x", status);
401 }
391 402
392 return status; 403 return status;
393} 404}
@@ -398,8 +409,9 @@ int nvgpu_pmu_perfmon_get_samples_rpc(struct nvgpu_pmu *pmu)
398 struct nv_pmu_rpc_struct_perfmon_query rpc; 409 struct nv_pmu_rpc_struct_perfmon_query rpc;
399 int status = 0; 410 int status = 0;
400 411
401 if (!nvgpu_is_enabled(g, NVGPU_PMU_PERFMON)) 412 if (!nvgpu_is_enabled(g, NVGPU_PMU_PERFMON)) {
402 return 0; 413 return 0;
414 }
403 415
404 nvgpu_log_fn(g, " "); 416 nvgpu_log_fn(g, " ");
405 pmu->perfmon_query = 0; 417 pmu->perfmon_query = 0;
@@ -407,8 +419,9 @@ int nvgpu_pmu_perfmon_get_samples_rpc(struct nvgpu_pmu *pmu)
407 /* PERFMON QUERY */ 419 /* PERFMON QUERY */
408 nvgpu_pmu_dbg(g, "RPC post NV_PMU_RPC_ID_PERFMON_QUERY\n"); 420 nvgpu_pmu_dbg(g, "RPC post NV_PMU_RPC_ID_PERFMON_QUERY\n");
409 PMU_RPC_EXECUTE(status, pmu, PERFMON_T18X, QUERY, &rpc, 0); 421 PMU_RPC_EXECUTE(status, pmu, PERFMON_T18X, QUERY, &rpc, 0);
410 if (status) 422 if (status) {
411 nvgpu_err(g, "Failed to execute RPC, status=0x%x", status); 423 nvgpu_err(g, "Failed to execute RPC, status=0x%x", status);
424 }
412 425
413 pmu_wait_message_cond(pmu, gk20a_get_gr_idle_timeout(g), 426 pmu_wait_message_cond(pmu, gk20a_get_gr_idle_timeout(g),
414 &pmu->perfmon_query, 1); 427 &pmu->perfmon_query, 1);
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_pg.c b/drivers/gpu/nvgpu/common/pmu/pmu_pg.c
index c8559fdb..4978708c 100644
--- a/drivers/gpu/nvgpu/common/pmu/pmu_pg.c
+++ b/drivers/gpu/nvgpu/common/pmu/pmu_pg.c
@@ -72,19 +72,21 @@ static void pmu_handle_pg_elpg_msg(struct gk20a *g, struct pmu_msg *msg,
72 case PMU_PG_ELPG_MSG_ALLOW_ACK: 72 case PMU_PG_ELPG_MSG_ALLOW_ACK:
73 nvgpu_pmu_dbg(g, "ALLOW is ack from PMU, eng - %d", 73 nvgpu_pmu_dbg(g, "ALLOW is ack from PMU, eng - %d",
74 elpg_msg->engine_id); 74 elpg_msg->engine_id);
75 if (elpg_msg->engine_id == PMU_PG_ELPG_ENGINE_ID_MS) 75 if (elpg_msg->engine_id == PMU_PG_ELPG_ENGINE_ID_MS) {
76 pmu->mscg_transition_state = PMU_ELPG_STAT_ON; 76 pmu->mscg_transition_state = PMU_ELPG_STAT_ON;
77 else 77 } else {
78 pmu->elpg_stat = PMU_ELPG_STAT_ON; 78 pmu->elpg_stat = PMU_ELPG_STAT_ON;
79 }
79 break; 80 break;
80 case PMU_PG_ELPG_MSG_DISALLOW_ACK: 81 case PMU_PG_ELPG_MSG_DISALLOW_ACK:
81 nvgpu_pmu_dbg(g, "DISALLOW is ack from PMU, eng - %d", 82 nvgpu_pmu_dbg(g, "DISALLOW is ack from PMU, eng - %d",
82 elpg_msg->engine_id); 83 elpg_msg->engine_id);
83 84
84 if (elpg_msg->engine_id == PMU_PG_ELPG_ENGINE_ID_MS) 85 if (elpg_msg->engine_id == PMU_PG_ELPG_ENGINE_ID_MS) {
85 pmu->mscg_transition_state = PMU_ELPG_STAT_OFF; 86 pmu->mscg_transition_state = PMU_ELPG_STAT_OFF;
86 else 87 } else {
87 pmu->elpg_stat = PMU_ELPG_STAT_OFF; 88 pmu->elpg_stat = PMU_ELPG_STAT_OFF;
89 }
88 90
89 if (pmu->pmu_state == PMU_STATE_ELPG_BOOTING) { 91 if (pmu->pmu_state == PMU_STATE_ELPG_BOOTING) {
90 if (g->ops.pmu.pmu_pg_engines_feature_list && 92 if (g->ops.pmu.pmu_pg_engines_feature_list &&
@@ -97,9 +99,10 @@ static void pmu_handle_pg_elpg_msg(struct gk20a *g, struct pmu_msg *msg,
97 WRITE_ONCE(pmu->mscg_stat, PMU_MSCG_DISABLED); 99 WRITE_ONCE(pmu->mscg_stat, PMU_MSCG_DISABLED);
98 /* make status visible */ 100 /* make status visible */
99 nvgpu_smp_mb(); 101 nvgpu_smp_mb();
100 } else 102 } else {
101 nvgpu_pmu_state_change(g, PMU_STATE_ELPG_BOOTED, 103 nvgpu_pmu_state_change(g, PMU_STATE_ELPG_BOOTED,
102 true); 104 true);
105 }
103 } 106 }
104 break; 107 break;
105 default: 108 default:
@@ -118,21 +121,25 @@ int nvgpu_pmu_pg_global_enable(struct gk20a *g, u32 enable_pg)
118 g->ops.pmu.pmu_pg_engines_feature_list(g, 121 g->ops.pmu.pmu_pg_engines_feature_list(g,
119 PMU_PG_ELPG_ENGINE_ID_GRAPHICS) != 122 PMU_PG_ELPG_ENGINE_ID_GRAPHICS) !=
120 NVGPU_PMU_GR_FEATURE_MASK_POWER_GATING) { 123 NVGPU_PMU_GR_FEATURE_MASK_POWER_GATING) {
121 if (g->ops.pmu.pmu_lpwr_enable_pg) 124 if (g->ops.pmu.pmu_lpwr_enable_pg) {
122 status = g->ops.pmu.pmu_lpwr_enable_pg(g, 125 status = g->ops.pmu.pmu_lpwr_enable_pg(g,
123 true); 126 true);
124 } else if (g->support_pmu && g->can_elpg) 127 }
128 } else if (g->support_pmu && g->can_elpg) {
125 status = nvgpu_pmu_enable_elpg(g); 129 status = nvgpu_pmu_enable_elpg(g);
130 }
126 } else if (enable_pg == false) { 131 } else if (enable_pg == false) {
127 if (g->ops.pmu.pmu_pg_engines_feature_list && 132 if (g->ops.pmu.pmu_pg_engines_feature_list &&
128 g->ops.pmu.pmu_pg_engines_feature_list(g, 133 g->ops.pmu.pmu_pg_engines_feature_list(g,
129 PMU_PG_ELPG_ENGINE_ID_GRAPHICS) != 134 PMU_PG_ELPG_ENGINE_ID_GRAPHICS) !=
130 NVGPU_PMU_GR_FEATURE_MASK_POWER_GATING) { 135 NVGPU_PMU_GR_FEATURE_MASK_POWER_GATING) {
131 if (g->ops.pmu.pmu_lpwr_disable_pg) 136 if (g->ops.pmu.pmu_lpwr_disable_pg) {
132 status = g->ops.pmu.pmu_lpwr_disable_pg(g, 137 status = g->ops.pmu.pmu_lpwr_disable_pg(g,
133 true); 138 true);
134 } else if (g->support_pmu && g->can_elpg) 139 }
140 } else if (g->support_pmu && g->can_elpg) {
135 status = nvgpu_pmu_disable_elpg(g); 141 status = nvgpu_pmu_disable_elpg(g);
142 }
136 } 143 }
137 144
138 return status; 145 return status;
@@ -157,10 +164,11 @@ static int pmu_enable_elpg_locked(struct gk20a *g, u32 pg_engine_id)
157 /* no need to wait ack for ELPG enable but set 164 /* no need to wait ack for ELPG enable but set
158 * pending to sync with follow up ELPG disable 165 * pending to sync with follow up ELPG disable
159 */ 166 */
160 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) 167 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) {
161 pmu->elpg_stat = PMU_ELPG_STAT_ON_PENDING; 168 pmu->elpg_stat = PMU_ELPG_STAT_ON_PENDING;
162 else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS) 169 } else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS) {
163 pmu->mscg_transition_state = PMU_ELPG_STAT_ON_PENDING; 170 pmu->mscg_transition_state = PMU_ELPG_STAT_ON_PENDING;
171 }
164 172
165 nvgpu_pmu_dbg(g, "cmd post PMU_PG_ELPG_CMD_ALLOW"); 173 nvgpu_pmu_dbg(g, "cmd post PMU_PG_ELPG_CMD_ALLOW");
166 status = nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, 174 status = nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL,
@@ -183,14 +191,16 @@ int nvgpu_pmu_enable_elpg(struct gk20a *g)
183 191
184 nvgpu_log_fn(g, " "); 192 nvgpu_log_fn(g, " ");
185 193
186 if (!g->support_pmu) 194 if (!g->support_pmu) {
187 return ret; 195 return ret;
196 }
188 197
189 nvgpu_mutex_acquire(&pmu->elpg_mutex); 198 nvgpu_mutex_acquire(&pmu->elpg_mutex);
190 199
191 pmu->elpg_refcnt++; 200 pmu->elpg_refcnt++;
192 if (pmu->elpg_refcnt <= 0) 201 if (pmu->elpg_refcnt <= 0) {
193 goto exit_unlock; 202 goto exit_unlock;
203 }
194 204
195 /* something is not right if we end up in following code path */ 205 /* something is not right if we end up in following code path */
196 if (unlikely(pmu->elpg_refcnt > 1)) { 206 if (unlikely(pmu->elpg_refcnt > 1)) {
@@ -203,26 +213,31 @@ int nvgpu_pmu_enable_elpg(struct gk20a *g)
203 /* do NOT enable elpg until golden ctx is created, 213 /* do NOT enable elpg until golden ctx is created,
204 * which is related with the ctx that ELPG save and restore. 214 * which is related with the ctx that ELPG save and restore.
205 */ 215 */
206 if (unlikely(!gr->ctx_vars.golden_image_initialized)) 216 if (unlikely(!gr->ctx_vars.golden_image_initialized)) {
207 goto exit_unlock; 217 goto exit_unlock;
218 }
208 219
209 /* return if ELPG is already on or on_pending or off_on_pending */ 220 /* return if ELPG is already on or on_pending or off_on_pending */
210 if (pmu->elpg_stat != PMU_ELPG_STAT_OFF) 221 if (pmu->elpg_stat != PMU_ELPG_STAT_OFF) {
211 goto exit_unlock; 222 goto exit_unlock;
223 }
212 224
213 if (g->ops.pmu.pmu_pg_supported_engines_list) 225 if (g->ops.pmu.pmu_pg_supported_engines_list) {
214 pg_engine_id_list = g->ops.pmu.pmu_pg_supported_engines_list(g); 226 pg_engine_id_list = g->ops.pmu.pmu_pg_supported_engines_list(g);
227 }
215 228
216 for (pg_engine_id = PMU_PG_ELPG_ENGINE_ID_GRAPHICS; 229 for (pg_engine_id = PMU_PG_ELPG_ENGINE_ID_GRAPHICS;
217 pg_engine_id < PMU_PG_ELPG_ENGINE_ID_INVALID_ENGINE; 230 pg_engine_id < PMU_PG_ELPG_ENGINE_ID_INVALID_ENGINE;
218 pg_engine_id++) { 231 pg_engine_id++) {
219 232
220 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS && 233 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS &&
221 pmu->mscg_stat == PMU_MSCG_DISABLED) 234 pmu->mscg_stat == PMU_MSCG_DISABLED) {
222 continue; 235 continue;
236 }
223 237
224 if (BIT(pg_engine_id) & pg_engine_id_list) 238 if (BIT(pg_engine_id) & pg_engine_id_list) {
225 ret = pmu_enable_elpg_locked(g, pg_engine_id); 239 ret = pmu_enable_elpg_locked(g, pg_engine_id);
240 }
226 } 241 }
227 242
228exit_unlock: 243exit_unlock:
@@ -243,11 +258,13 @@ int nvgpu_pmu_disable_elpg(struct gk20a *g)
243 258
244 nvgpu_log_fn(g, " "); 259 nvgpu_log_fn(g, " ");
245 260
246 if (g->ops.pmu.pmu_pg_supported_engines_list) 261 if (g->ops.pmu.pmu_pg_supported_engines_list) {
247 pg_engine_id_list = g->ops.pmu.pmu_pg_supported_engines_list(g); 262 pg_engine_id_list = g->ops.pmu.pmu_pg_supported_engines_list(g);
263 }
248 264
249 if (!g->support_pmu) 265 if (!g->support_pmu) {
250 return ret; 266 return ret;
267 }
251 268
252 nvgpu_mutex_acquire(&pmu->elpg_mutex); 269 nvgpu_mutex_acquire(&pmu->elpg_mutex);
253 270
@@ -293,8 +310,9 @@ int nvgpu_pmu_disable_elpg(struct gk20a *g)
293 pg_engine_id++) { 310 pg_engine_id++) {
294 311
295 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS && 312 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS &&
296 pmu->mscg_stat == PMU_MSCG_DISABLED) 313 pmu->mscg_stat == PMU_MSCG_DISABLED) {
297 continue; 314 continue;
315 }
298 316
299 if (BIT(pg_engine_id) & pg_engine_id_list) { 317 if (BIT(pg_engine_id) & pg_engine_id_list) {
300 memset(&cmd, 0, sizeof(struct pmu_cmd)); 318 memset(&cmd, 0, sizeof(struct pmu_cmd));
@@ -305,16 +323,17 @@ int nvgpu_pmu_disable_elpg(struct gk20a *g)
305 cmd.cmd.pg.elpg_cmd.engine_id = pg_engine_id; 323 cmd.cmd.pg.elpg_cmd.engine_id = pg_engine_id;
306 cmd.cmd.pg.elpg_cmd.cmd = PMU_PG_ELPG_CMD_DISALLOW; 324 cmd.cmd.pg.elpg_cmd.cmd = PMU_PG_ELPG_CMD_DISALLOW;
307 325
308 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) 326 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) {
309 pmu->elpg_stat = PMU_ELPG_STAT_OFF_PENDING; 327 pmu->elpg_stat = PMU_ELPG_STAT_OFF_PENDING;
310 else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS) 328 } else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS) {
311 pmu->mscg_transition_state = 329 pmu->mscg_transition_state =
312 PMU_ELPG_STAT_OFF_PENDING; 330 PMU_ELPG_STAT_OFF_PENDING;
313 331 }
314 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) 332 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) {
315 ptr = &pmu->elpg_stat; 333 ptr = &pmu->elpg_stat;
316 else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS) 334 } else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS) {
317 ptr = &pmu->mscg_transition_state; 335 ptr = &pmu->mscg_transition_state;
336 }
318 337
319 nvgpu_pmu_dbg(g, "cmd post PMU_PG_ELPG_CMD_DISALLOW"); 338 nvgpu_pmu_dbg(g, "cmd post PMU_PG_ELPG_CMD_DISALLOW");
320 nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, 339 nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL,
@@ -377,8 +396,9 @@ static int pmu_pg_init_send(struct gk20a *g, u32 pg_engine_id)
377 396
378 gk20a_pmu_pg_idle_counter_config(g, pg_engine_id); 397 gk20a_pmu_pg_idle_counter_config(g, pg_engine_id);
379 398
380 if (g->ops.pmu.pmu_pg_init_param) 399 if (g->ops.pmu.pmu_pg_init_param) {
381 g->ops.pmu.pmu_pg_init_param(g, pg_engine_id); 400 g->ops.pmu.pmu_pg_init_param(g, pg_engine_id);
401 }
382 402
383 /* init ELPG */ 403 /* init ELPG */
384 memset(&cmd, 0, sizeof(struct pmu_cmd)); 404 memset(&cmd, 0, sizeof(struct pmu_cmd));
@@ -391,8 +411,9 @@ static int pmu_pg_init_send(struct gk20a *g, u32 pg_engine_id)
391 nvgpu_pmu_dbg(g, "cmd post PMU_PG_ELPG_CMD_INIT"); 411 nvgpu_pmu_dbg(g, "cmd post PMU_PG_ELPG_CMD_INIT");
392 err = nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, 412 err = nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
393 pmu_handle_pg_elpg_msg, pmu, &seq, ~0); 413 pmu_handle_pg_elpg_msg, pmu, &seq, ~0);
394 if (err) 414 if (err) {
395 nvgpu_err(g, "PMU_PG_ELPG_CMD_INIT cmd failed\n"); 415 nvgpu_err(g, "PMU_PG_ELPG_CMD_INIT cmd failed\n");
416 }
396 417
397 /* alloc dmem for powergating state log */ 418 /* alloc dmem for powergating state log */
398 pmu->stat_dmem_offset[pg_engine_id] = 0; 419 pmu->stat_dmem_offset[pg_engine_id] = 0;
@@ -407,17 +428,19 @@ static int pmu_pg_init_send(struct gk20a *g, u32 pg_engine_id)
407 nvgpu_pmu_dbg(g, "cmd post PMU_PG_STAT_CMD_ALLOC_DMEM"); 428 nvgpu_pmu_dbg(g, "cmd post PMU_PG_STAT_CMD_ALLOC_DMEM");
408 err = nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_LPQ, 429 err = nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_LPQ,
409 pmu_handle_pg_stat_msg, pmu, &seq, ~0); 430 pmu_handle_pg_stat_msg, pmu, &seq, ~0);
410 if (err) 431 if (err) {
411 nvgpu_err(g, "PMU_PG_STAT_CMD_ALLOC_DMEM cmd failed\n"); 432 nvgpu_err(g, "PMU_PG_STAT_CMD_ALLOC_DMEM cmd failed\n");
433 }
412 434
413 /* disallow ELPG initially 435 /* disallow ELPG initially
414 * PMU ucode requires a disallow cmd before allow cmd 436 * PMU ucode requires a disallow cmd before allow cmd
415 */ 437 */
416 /* set for wait_event PMU_ELPG_STAT_OFF */ 438 /* set for wait_event PMU_ELPG_STAT_OFF */
417 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) 439 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) {
418 pmu->elpg_stat = PMU_ELPG_STAT_OFF; 440 pmu->elpg_stat = PMU_ELPG_STAT_OFF;
419 else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS) 441 } else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS) {
420 pmu->mscg_transition_state = PMU_ELPG_STAT_OFF; 442 pmu->mscg_transition_state = PMU_ELPG_STAT_OFF;
443 }
421 memset(&cmd, 0, sizeof(struct pmu_cmd)); 444 memset(&cmd, 0, sizeof(struct pmu_cmd));
422 cmd.hdr.unit_id = PMU_UNIT_PG; 445 cmd.hdr.unit_id = PMU_UNIT_PG;
423 cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(struct pmu_pg_cmd_elpg_cmd); 446 cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(struct pmu_pg_cmd_elpg_cmd);
@@ -428,11 +451,13 @@ static int pmu_pg_init_send(struct gk20a *g, u32 pg_engine_id)
428 nvgpu_pmu_dbg(g, "cmd post PMU_PG_ELPG_CMD_DISALLOW"); 451 nvgpu_pmu_dbg(g, "cmd post PMU_PG_ELPG_CMD_DISALLOW");
429 err = nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, 452 err = nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
430 pmu_handle_pg_elpg_msg, pmu, &seq, ~0); 453 pmu_handle_pg_elpg_msg, pmu, &seq, ~0);
431 if (err) 454 if (err) {
432 nvgpu_err(g, "PMU_PG_ELPG_CMD_DISALLOW cmd failed\n"); 455 nvgpu_err(g, "PMU_PG_ELPG_CMD_DISALLOW cmd failed\n");
456 }
433 457
434 if (g->ops.pmu.pmu_pg_set_sub_feature_mask) 458 if (g->ops.pmu.pmu_pg_set_sub_feature_mask) {
435 g->ops.pmu.pmu_pg_set_sub_feature_mask(g, pg_engine_id); 459 g->ops.pmu.pmu_pg_set_sub_feature_mask(g, pg_engine_id);
460 }
436 461
437 return 0; 462 return 0;
438} 463}
@@ -445,8 +470,9 @@ int nvgpu_pmu_init_powergating(struct gk20a *g)
445 470
446 nvgpu_log_fn(g, " "); 471 nvgpu_log_fn(g, " ");
447 472
448 if (g->ops.pmu.pmu_pg_supported_engines_list) 473 if (g->ops.pmu.pmu_pg_supported_engines_list) {
449 pg_engine_id_list = g->ops.pmu.pmu_pg_supported_engines_list(g); 474 pg_engine_id_list = g->ops.pmu.pmu_pg_supported_engines_list(g);
475 }
450 476
451 gk20a_gr_wait_initialized(g); 477 gk20a_gr_wait_initialized(g);
452 478
@@ -455,15 +481,17 @@ int nvgpu_pmu_init_powergating(struct gk20a *g)
455 pg_engine_id++) { 481 pg_engine_id++) {
456 482
457 if (BIT(pg_engine_id) & pg_engine_id_list) { 483 if (BIT(pg_engine_id) & pg_engine_id_list) {
458 if (pmu && pmu->pmu_state == PMU_STATE_INIT_RECEIVED) 484 if (pmu && pmu->pmu_state == PMU_STATE_INIT_RECEIVED) {
459 nvgpu_pmu_state_change(g, 485 nvgpu_pmu_state_change(g,
460 PMU_STATE_ELPG_BOOTING, false); 486 PMU_STATE_ELPG_BOOTING, false);
487 }
461 pmu_pg_init_send(g, pg_engine_id); 488 pmu_pg_init_send(g, pg_engine_id);
462 } 489 }
463 } 490 }
464 491
465 if (g->ops.pmu.pmu_pg_param_post_init) 492 if (g->ops.pmu.pmu_pg_param_post_init) {
466 g->ops.pmu.pmu_pg_param_post_init(g); 493 g->ops.pmu.pmu_pg_param_post_init(g);
494 }
467 495
468 return 0; 496 return 0;
469} 497}
@@ -487,9 +515,9 @@ static void pmu_handle_pg_buf_config_msg(struct gk20a *g, struct pmu_msg *msg,
487 515
488 pmu->buf_loaded = (eng_buf_stat->status == PMU_PG_MSG_ENG_BUF_LOADED); 516 pmu->buf_loaded = (eng_buf_stat->status == PMU_PG_MSG_ENG_BUF_LOADED);
489 if ((!pmu->buf_loaded) && 517 if ((!pmu->buf_loaded) &&
490 (pmu->pmu_state == PMU_STATE_LOADING_PG_BUF)) 518 (pmu->pmu_state == PMU_STATE_LOADING_PG_BUF)) {
491 nvgpu_err(g, "failed to load PGENG buffer"); 519 nvgpu_err(g, "failed to load PGENG buffer");
492 else { 520 } else {
493 nvgpu_pmu_state_change(g, pmu->pmu_state, true); 521 nvgpu_pmu_state_change(g, pmu->pmu_state, true);
494 } 522 }
495} 523}
@@ -530,8 +558,9 @@ int nvgpu_pmu_init_bind_fecs(struct gk20a *g)
530 nvgpu_pmu_state_change(g, PMU_STATE_LOADING_PG_BUF, false); 558 nvgpu_pmu_state_change(g, PMU_STATE_LOADING_PG_BUF, false);
531 err = nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_LPQ, 559 err = nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_LPQ,
532 pmu_handle_pg_buf_config_msg, pmu, &desc, ~0); 560 pmu_handle_pg_buf_config_msg, pmu, &desc, ~0);
533 if (err) 561 if (err) {
534 nvgpu_err(g, "cmd LOAD PMU_PGENG_GR_BUFFER_IDX_FECS failed\n"); 562 nvgpu_err(g, "cmd LOAD PMU_PGENG_GR_BUFFER_IDX_FECS failed\n");
563 }
535 564
536 return err; 565 return err;
537} 566}
@@ -570,8 +599,9 @@ void nvgpu_pmu_setup_hw_load_zbc(struct gk20a *g)
570 nvgpu_pmu_state_change(g, PMU_STATE_LOADING_ZBC, false); 599 nvgpu_pmu_state_change(g, PMU_STATE_LOADING_ZBC, false);
571 err = nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_LPQ, 600 err = nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_LPQ,
572 pmu_handle_pg_buf_config_msg, pmu, &desc, ~0); 601 pmu_handle_pg_buf_config_msg, pmu, &desc, ~0);
573 if (err) 602 if (err) {
574 nvgpu_err(g, "CMD LOAD PMU_PGENG_GR_BUFFER_IDX_ZBC failed\n"); 603 nvgpu_err(g, "CMD LOAD PMU_PGENG_GR_BUFFER_IDX_ZBC failed\n");
604 }
575} 605}
576 606
577/* stats */ 607/* stats */
@@ -588,12 +618,14 @@ int nvgpu_pmu_get_pg_stats(struct gk20a *g, u32 pg_engine_id,
588 return 0; 618 return 0;
589 } 619 }
590 620
591 if (g->ops.pmu.pmu_pg_supported_engines_list) 621 if (g->ops.pmu.pmu_pg_supported_engines_list) {
592 pg_engine_id_list = g->ops.pmu.pmu_pg_supported_engines_list(g); 622 pg_engine_id_list = g->ops.pmu.pmu_pg_supported_engines_list(g);
623 }
593 624
594 if (BIT(pg_engine_id) & pg_engine_id_list) 625 if (BIT(pg_engine_id) & pg_engine_id_list) {
595 g->ops.pmu.pmu_elpg_statistics(g, pg_engine_id, 626 g->ops.pmu.pmu_elpg_statistics(g, pg_engine_id,
596 pg_stat_data); 627 pg_stat_data);
628 }
597 629
598 return 0; 630 return 0;
599} 631}