summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorSunny He <suhe@nvidia.com>2017-08-01 18:03:26 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-08-21 16:06:04 -0400
commitb50b379c192714d0d08c3f2d33e90c95cf795253 (patch)
treebd7786d1fec51f168a9393fcb16a8fe56ad25044 /drivers
parent192f1039e11893b9216819837eee871612225849 (diff)
gpu: nvgpu: Move non-fp pmu members from gpu_ops
Move non-function pointer members out of the pmu and pmu_ver substructs of gpu_ops. Ideally gpu_ops will have only function ponters, better matching its intended purpose and improving readability. - g.ops.pmu_ver.cmd_id_zbc_table_update has been changed to g.pmu_ver_cmd_id_zbc_table_update - g.ops.pmu.lspmuwprinitdone has been changed to g.pmu_lsf_pmu_wpr_init_done - g.ops.pmu.lsfloadedfalconid has been changed to g.pmu_lsf_loaded_falcon_id Boolean flags have been implemented using the enabled.h API - g.ops.pmu_ver.is_pmu_zbc_save_supported moved to common flag NVGPU_PMU_ZBC_SAVE - g.ops.pmu.fecsbootstrapdone moved to common flag NVGPU_PMU_FECS_BOOTSTRAP_DONE Jira NVGPU-74 Change-Id: I08fb20f8f382277f2c579f06d561914c000ea6e0 Signed-off-by: Sunny He <suhe@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1530981 Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com> Reviewed-by: Automatic_Commit_Validation_User Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/nvgpu/common/pmu/pmu.c7
-rw-r--r--drivers/gpu/nvgpu/common/pmu/pmu_fw.c29
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a.h10
-rw-r--r--drivers/gpu/nvgpu/gk20a/pmu_gk20a.c2
-rw-r--r--drivers/gpu/nvgpu/gm20b/gr_gm20b.c6
-rw-r--r--drivers/gpu/nvgpu/gm20b/pmu_gm20b.c18
-rw-r--r--drivers/gpu/nvgpu/gp106/pmu_gp106.c20
-rw-r--r--drivers/gpu/nvgpu/gp10b/pmu_gp10b.c20
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/enabled.h6
9 files changed, 60 insertions, 58 deletions
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu.c b/drivers/gpu/nvgpu/common/pmu/pmu.c
index 294034a7..58108722 100644
--- a/drivers/gpu/nvgpu/common/pmu/pmu.c
+++ b/drivers/gpu/nvgpu/common/pmu/pmu.c
@@ -15,6 +15,7 @@
15#include <nvgpu/dma.h> 15#include <nvgpu/dma.h>
16#include <nvgpu/log.h> 16#include <nvgpu/log.h>
17#include <nvgpu/pmuif/nvgpu_gpmu_cmdif.h> 17#include <nvgpu/pmuif/nvgpu_gpmu_cmdif.h>
18#include <nvgpu/enabled.h>
18 19
19#include "gk20a/gk20a.h" 20#include "gk20a/gk20a.h"
20 21
@@ -356,7 +357,7 @@ static void pmu_setup_hw_enable_elpg(struct gk20a *g)
356 pmu->initialized = true; 357 pmu->initialized = true;
357 nvgpu_pmu_state_change(g, PMU_STATE_STARTED, true); 358 nvgpu_pmu_state_change(g, PMU_STATE_STARTED, true);
358 359
359 if (g->ops.pmu_ver.is_pmu_zbc_save_supported) { 360 if (nvgpu_is_enabled(g, NVGPU_PMU_ZBC_SAVE)) {
360 /* Save zbc table after PMU is initialized. */ 361 /* Save zbc table after PMU is initialized. */
361 pmu->zbc_ready = true; 362 pmu->zbc_ready = true;
362 gk20a_pmu_save_zbc(g, 0xf); 363 gk20a_pmu_save_zbc(g, 0xf);
@@ -507,8 +508,8 @@ int nvgpu_pmu_destroy(struct gk20a *g)
507 pmu->pmu_ready = false; 508 pmu->pmu_ready = false;
508 pmu->perfmon_ready = false; 509 pmu->perfmon_ready = false;
509 pmu->zbc_ready = false; 510 pmu->zbc_ready = false;
510 g->ops.pmu.lspmuwprinitdone = false; 511 g->pmu_lsf_pmu_wpr_init_done = false;
511 g->ops.pmu.fecsbootstrapdone = false; 512 __nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false);
512 513
513 nvgpu_log_fn(g, "done"); 514 nvgpu_log_fn(g, "done");
514 return 0; 515 return 0;
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_fw.c b/drivers/gpu/nvgpu/common/pmu/pmu_fw.c
index f6229a3a..03c60449 100644
--- a/drivers/gpu/nvgpu/common/pmu/pmu_fw.c
+++ b/drivers/gpu/nvgpu/common/pmu/pmu_fw.c
@@ -16,6 +16,7 @@
16#include <nvgpu/log.h> 16#include <nvgpu/log.h>
17#include <nvgpu/pmuif/nvgpu_gpmu_cmdif.h> 17#include <nvgpu/pmuif/nvgpu_gpmu_cmdif.h>
18#include <nvgpu/firmware.h> 18#include <nvgpu/firmware.h>
19#include <nvgpu/enabled.h>
19 20
20#include "gk20a/gk20a.h" 21#include "gk20a/gk20a.h"
21 22
@@ -1463,8 +1464,8 @@ static int nvgpu_init_pmu_fw_ver_ops(struct nvgpu_pmu *pmu)
1463 g->ops.pmu_ver.set_perfmon_cntr_group_id = 1464 g->ops.pmu_ver.set_perfmon_cntr_group_id =
1464 set_perfmon_cntr_group_id_v2; 1465 set_perfmon_cntr_group_id_v2;
1465 g->ops.pmu_ver.get_perfmon_cntr_sz = pmu_perfmon_cntr_sz_v2; 1466 g->ops.pmu_ver.get_perfmon_cntr_sz = pmu_perfmon_cntr_sz_v2;
1466 g->ops.pmu_ver.cmd_id_zbc_table_update = 16; 1467 g->pmu_ver_cmd_id_zbc_table_update = 16;
1467 g->ops.pmu_ver.is_pmu_zbc_save_supported = true; 1468 __nvgpu_set_enabled(g, NVGPU_PMU_ZBC_SAVE, true);
1468 g->ops.pmu_ver.get_pmu_cmdline_args_size = 1469 g->ops.pmu_ver.get_pmu_cmdline_args_size =
1469 pmu_cmdline_size_v4; 1470 pmu_cmdline_size_v4;
1470 g->ops.pmu_ver.set_pmu_cmdline_args_cpu_freq = 1471 g->ops.pmu_ver.set_pmu_cmdline_args_cpu_freq =
@@ -1565,8 +1566,8 @@ static int nvgpu_init_pmu_fw_ver_ops(struct nvgpu_pmu *pmu)
1565 g->ops.pmu_ver.set_perfmon_cntr_group_id = 1566 g->ops.pmu_ver.set_perfmon_cntr_group_id =
1566 set_perfmon_cntr_group_id_v2; 1567 set_perfmon_cntr_group_id_v2;
1567 g->ops.pmu_ver.get_perfmon_cntr_sz = pmu_perfmon_cntr_sz_v2; 1568 g->ops.pmu_ver.get_perfmon_cntr_sz = pmu_perfmon_cntr_sz_v2;
1568 g->ops.pmu_ver.cmd_id_zbc_table_update = 16; 1569 g->pmu_ver_cmd_id_zbc_table_update = 16;
1569 g->ops.pmu_ver.is_pmu_zbc_save_supported = false; 1570 __nvgpu_set_enabled(g, NVGPU_PMU_ZBC_SAVE, false);
1570 g->ops.pmu_ver.get_pmu_cmdline_args_size = 1571 g->ops.pmu_ver.get_pmu_cmdline_args_size =
1571 pmu_cmdline_size_v6; 1572 pmu_cmdline_size_v6;
1572 g->ops.pmu_ver.set_pmu_cmdline_args_cpu_freq = 1573 g->ops.pmu_ver.set_pmu_cmdline_args_cpu_freq =
@@ -1673,8 +1674,8 @@ static int nvgpu_init_pmu_fw_ver_ops(struct nvgpu_pmu *pmu)
1673 g->ops.pmu_ver.set_perfmon_cntr_group_id = 1674 g->ops.pmu_ver.set_perfmon_cntr_group_id =
1674 set_perfmon_cntr_group_id_v2; 1675 set_perfmon_cntr_group_id_v2;
1675 g->ops.pmu_ver.get_perfmon_cntr_sz = pmu_perfmon_cntr_sz_v2; 1676 g->ops.pmu_ver.get_perfmon_cntr_sz = pmu_perfmon_cntr_sz_v2;
1676 g->ops.pmu_ver.cmd_id_zbc_table_update = 16; 1677 g->pmu_ver_cmd_id_zbc_table_update = 16;
1677 g->ops.pmu_ver.is_pmu_zbc_save_supported = true; 1678 __nvgpu_set_enabled(g, NVGPU_PMU_ZBC_SAVE, true);
1678 g->ops.pmu_ver.get_pmu_cmdline_args_size = 1679 g->ops.pmu_ver.get_pmu_cmdline_args_size =
1679 pmu_cmdline_size_v5; 1680 pmu_cmdline_size_v5;
1680 g->ops.pmu_ver.set_pmu_cmdline_args_cpu_freq = 1681 g->ops.pmu_ver.set_pmu_cmdline_args_cpu_freq =
@@ -1792,8 +1793,8 @@ static int nvgpu_init_pmu_fw_ver_ops(struct nvgpu_pmu *pmu)
1792 g->ops.pmu_ver.set_perfmon_cntr_group_id = 1793 g->ops.pmu_ver.set_perfmon_cntr_group_id =
1793 set_perfmon_cntr_group_id_v2; 1794 set_perfmon_cntr_group_id_v2;
1794 g->ops.pmu_ver.get_perfmon_cntr_sz = pmu_perfmon_cntr_sz_v2; 1795 g->ops.pmu_ver.get_perfmon_cntr_sz = pmu_perfmon_cntr_sz_v2;
1795 g->ops.pmu_ver.cmd_id_zbc_table_update = 16; 1796 g->pmu_ver_cmd_id_zbc_table_update = 16;
1796 g->ops.pmu_ver.is_pmu_zbc_save_supported = true; 1797 __nvgpu_set_enabled(g, NVGPU_PMU_ZBC_SAVE, true);
1797 g->ops.pmu_ver.get_pmu_cmdline_args_size = 1798 g->ops.pmu_ver.get_pmu_cmdline_args_size =
1798 pmu_cmdline_size_v3; 1799 pmu_cmdline_size_v3;
1799 g->ops.pmu_ver.set_pmu_cmdline_args_cpu_freq = 1800 g->ops.pmu_ver.set_pmu_cmdline_args_cpu_freq =
@@ -1895,8 +1896,8 @@ static int nvgpu_init_pmu_fw_ver_ops(struct nvgpu_pmu *pmu)
1895 g->ops.pmu_ver.set_perfmon_cntr_group_id = 1896 g->ops.pmu_ver.set_perfmon_cntr_group_id =
1896 set_perfmon_cntr_group_id_v2; 1897 set_perfmon_cntr_group_id_v2;
1897 g->ops.pmu_ver.get_perfmon_cntr_sz = pmu_perfmon_cntr_sz_v2; 1898 g->ops.pmu_ver.get_perfmon_cntr_sz = pmu_perfmon_cntr_sz_v2;
1898 g->ops.pmu_ver.cmd_id_zbc_table_update = 16; 1899 g->pmu_ver_cmd_id_zbc_table_update = 16;
1899 g->ops.pmu_ver.is_pmu_zbc_save_supported = true; 1900 __nvgpu_set_enabled(g, NVGPU_PMU_ZBC_SAVE, true);
1900 g->ops.pmu_ver.get_pmu_cmdline_args_size = 1901 g->ops.pmu_ver.get_pmu_cmdline_args_size =
1901 pmu_cmdline_size_v2; 1902 pmu_cmdline_size_v2;
1902 g->ops.pmu_ver.set_pmu_cmdline_args_cpu_freq = 1903 g->ops.pmu_ver.set_pmu_cmdline_args_cpu_freq =
@@ -1991,8 +1992,8 @@ static int nvgpu_init_pmu_fw_ver_ops(struct nvgpu_pmu *pmu)
1991 pg_cmd_eng_buf_load_set_dma_offset_v0; 1992 pg_cmd_eng_buf_load_set_dma_offset_v0;
1992 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_idx = 1993 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_idx =
1993 pg_cmd_eng_buf_load_set_dma_idx_v0; 1994 pg_cmd_eng_buf_load_set_dma_idx_v0;
1994 g->ops.pmu_ver.cmd_id_zbc_table_update = 16; 1995 g->pmu_ver_cmd_id_zbc_table_update = 16;
1995 g->ops.pmu_ver.is_pmu_zbc_save_supported = true; 1996 __nvgpu_set_enabled(g, NVGPU_PMU_ZBC_SAVE, true);
1996 g->ops.pmu_ver.get_perfmon_cntr_ptr = get_perfmon_cntr_ptr_v0; 1997 g->ops.pmu_ver.get_perfmon_cntr_ptr = get_perfmon_cntr_ptr_v0;
1997 g->ops.pmu_ver.set_perfmon_cntr_ut = set_perfmon_cntr_ut_v0; 1998 g->ops.pmu_ver.set_perfmon_cntr_ut = set_perfmon_cntr_ut_v0;
1998 g->ops.pmu_ver.set_perfmon_cntr_lt = set_perfmon_cntr_lt_v0; 1999 g->ops.pmu_ver.set_perfmon_cntr_lt = set_perfmon_cntr_lt_v0;
@@ -2093,8 +2094,8 @@ static int nvgpu_init_pmu_fw_ver_ops(struct nvgpu_pmu *pmu)
2093 pg_cmd_eng_buf_load_set_dma_offset_v0; 2094 pg_cmd_eng_buf_load_set_dma_offset_v0;
2094 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_idx = 2095 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_idx =
2095 pg_cmd_eng_buf_load_set_dma_idx_v0; 2096 pg_cmd_eng_buf_load_set_dma_idx_v0;
2096 g->ops.pmu_ver.cmd_id_zbc_table_update = 14; 2097 g->pmu_ver_cmd_id_zbc_table_update = 14;
2097 g->ops.pmu_ver.is_pmu_zbc_save_supported = true; 2098 __nvgpu_set_enabled(g, NVGPU_PMU_ZBC_SAVE, true);
2098 g->ops.pmu_ver.get_perfmon_cntr_ptr = get_perfmon_cntr_ptr_v0; 2099 g->ops.pmu_ver.get_perfmon_cntr_ptr = get_perfmon_cntr_ptr_v0;
2099 g->ops.pmu_ver.set_perfmon_cntr_ut = set_perfmon_cntr_ut_v0; 2100 g->ops.pmu_ver.set_perfmon_cntr_ut = set_perfmon_cntr_ut_v0;
2100 g->ops.pmu_ver.set_perfmon_cntr_lt = set_perfmon_cntr_lt_v0; 2101 g->ops.pmu_ver.set_perfmon_cntr_lt = set_perfmon_cntr_lt_v0;
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.h b/drivers/gpu/nvgpu/gk20a/gk20a.h
index 47fd3aef..19ea76cb 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/gk20a.h
@@ -656,9 +656,6 @@ struct gpu_ops {
656 u8 value); 656 u8 value);
657 void (*pg_cmd_eng_buf_load_set_dma_idx)(struct pmu_pg_cmd *pg, 657 void (*pg_cmd_eng_buf_load_set_dma_idx)(struct pmu_pg_cmd *pg,
658 u8 value); 658 u8 value);
659 /*used for change of enum zbc update cmd id from ver 0 to ver1*/
660 u32 cmd_id_zbc_table_update;
661 bool is_pmu_zbc_save_supported;
662 } pmu_ver; 659 } pmu_ver;
663 struct { 660 struct {
664 int (*get_netlist_name)(struct gk20a *g, int index, char *name); 661 int (*get_netlist_name)(struct gk20a *g, int index, char *name);
@@ -822,9 +819,6 @@ struct gpu_ops {
822 void *lsfm, u32 *p_bl_gen_desc_size, u32 falconid); 819 void *lsfm, u32 *p_bl_gen_desc_size, u32 falconid);
823 void (*handle_ext_irq)(struct gk20a *g, u32 intr); 820 void (*handle_ext_irq)(struct gk20a *g, u32 intr);
824 void (*set_irqmask)(struct gk20a *g); 821 void (*set_irqmask)(struct gk20a *g);
825 u32 lspmuwprinitdone;
826 u32 lsfloadedfalconid;
827 bool fecsbootstrapdone;
828 } pmu; 822 } pmu;
829 struct { 823 struct {
830 int (*init_debugfs)(struct gk20a *g); 824 int (*init_debugfs)(struct gk20a *g);
@@ -1197,6 +1191,10 @@ struct gk20a {
1197 1191
1198 struct gpu_ops ops; 1192 struct gpu_ops ops;
1199 u32 mc_intr_mask_restore[4]; 1193 u32 mc_intr_mask_restore[4];
1194 /*used for change of enum zbc update cmd id from ver 0 to ver1*/
1195 u32 pmu_ver_cmd_id_zbc_table_update;
1196 u32 pmu_lsf_pmu_wpr_init_done;
1197 u32 pmu_lsf_loaded_falcon_id;
1200 1198
1201 int irqs_enabled; 1199 int irqs_enabled;
1202 int irq_stall; /* can be same as irq_nonstall in case of PCI */ 1200 int irq_stall; /* can be same as irq_nonstall in case of PCI */
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
index 7cf8c475..629a22ef 100644
--- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
@@ -598,7 +598,7 @@ void gk20a_pmu_save_zbc(struct gk20a *g, u32 entries)
598 memset(&cmd, 0, sizeof(struct pmu_cmd)); 598 memset(&cmd, 0, sizeof(struct pmu_cmd));
599 cmd.hdr.unit_id = PMU_UNIT_PG; 599 cmd.hdr.unit_id = PMU_UNIT_PG;
600 cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(struct pmu_zbc_cmd); 600 cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(struct pmu_zbc_cmd);
601 cmd.cmd.zbc.cmd_type = g->ops.pmu_ver.cmd_id_zbc_table_update; 601 cmd.cmd.zbc.cmd_type = g->pmu_ver_cmd_id_zbc_table_update;
602 cmd.cmd.zbc.entry_mask = ZBC_MASK(entries); 602 cmd.cmd.zbc.entry_mask = ZBC_MASK(entries);
603 603
604 pmu->zbc_save_done = 0; 604 pmu->zbc_save_done = 0;
diff --git a/drivers/gpu/nvgpu/gm20b/gr_gm20b.c b/drivers/gpu/nvgpu/gm20b/gr_gm20b.c
index 56ebc8ca..5fcc3f7b 100644
--- a/drivers/gpu/nvgpu/gm20b/gr_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/gr_gm20b.c
@@ -755,8 +755,8 @@ static int gr_gm20b_load_ctxsw_ucode(struct gk20a *g)
755 } 755 }
756 756
757 flags = PMU_ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES; 757 flags = PMU_ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES;
758 g->ops.pmu.lsfloadedfalconid = 0; 758 g->pmu_lsf_loaded_falcon_id = 0;
759 if (g->ops.pmu.fecsbootstrapdone) { 759 if (nvgpu_is_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE)) {
760 /* this must be recovery so bootstrap fecs and gpccs */ 760 /* this must be recovery so bootstrap fecs and gpccs */
761 if (!nvgpu_is_enabled(g, NVGPU_SEC_SECUREGPCCS)) { 761 if (!nvgpu_is_enabled(g, NVGPU_SEC_SECUREGPCCS)) {
762 gr_gm20b_load_gpccs_with_bootloader(g); 762 gr_gm20b_load_gpccs_with_bootloader(g);
@@ -776,7 +776,7 @@ static int gr_gm20b_load_ctxsw_ucode(struct gk20a *g)
776 776
777 } else { 777 } else {
778 /* cold boot or rg exit */ 778 /* cold boot or rg exit */
779 g->ops.pmu.fecsbootstrapdone = true; 779 __nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, true);
780 if (!nvgpu_is_enabled(g, NVGPU_SEC_SECUREGPCCS)) { 780 if (!nvgpu_is_enabled(g, NVGPU_SEC_SECUREGPCCS)) {
781 gr_gm20b_load_gpccs_with_bootloader(g); 781 gr_gm20b_load_gpccs_with_bootloader(g);
782 } else { 782 } else {
diff --git a/drivers/gpu/nvgpu/gm20b/pmu_gm20b.c b/drivers/gpu/nvgpu/gm20b/pmu_gm20b.c
index b85e72a0..a5940fcf 100644
--- a/drivers/gpu/nvgpu/gm20b/pmu_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/pmu_gm20b.c
@@ -132,7 +132,7 @@ static void pmu_handle_acr_init_wpr_msg(struct gk20a *g, struct pmu_msg *msg,
132 gm20b_dbg_pmu("reply PMU_ACR_CMD_ID_INIT_WPR_REGION"); 132 gm20b_dbg_pmu("reply PMU_ACR_CMD_ID_INIT_WPR_REGION");
133 133
134 if (msg->msg.acr.acrmsg.errorcode == PMU_ACR_SUCCESS) 134 if (msg->msg.acr.acrmsg.errorcode == PMU_ACR_SUCCESS)
135 g->ops.pmu.lspmuwprinitdone = 1; 135 g->pmu_lsf_pmu_wpr_init_done = 1;
136 gk20a_dbg_fn("done"); 136 gk20a_dbg_fn("done");
137} 137}
138 138
@@ -171,7 +171,7 @@ void pmu_handle_fecs_boot_acr_msg(struct gk20a *g, struct pmu_msg *msg,
171 gm20b_dbg_pmu("reply PMU_ACR_CMD_ID_BOOTSTRAP_FALCON"); 171 gm20b_dbg_pmu("reply PMU_ACR_CMD_ID_BOOTSTRAP_FALCON");
172 172
173 gm20b_dbg_pmu("response code = %x\n", msg->msg.acr.acrmsg.falconid); 173 gm20b_dbg_pmu("response code = %x\n", msg->msg.acr.acrmsg.falconid);
174 g->ops.pmu.lsfloadedfalconid = msg->msg.acr.acrmsg.falconid; 174 g->pmu_lsf_loaded_falcon_id = msg->msg.acr.acrmsg.falconid;
175 gk20a_dbg_fn("done"); 175 gk20a_dbg_fn("done");
176} 176}
177 177
@@ -205,8 +205,8 @@ void gm20b_pmu_load_lsf(struct gk20a *g, u32 falcon_id, u32 flags)
205 205
206 gk20a_dbg_fn(""); 206 gk20a_dbg_fn("");
207 207
208 gm20b_dbg_pmu("wprinit status = %x\n", g->ops.pmu.lspmuwprinitdone); 208 gm20b_dbg_pmu("wprinit status = %x\n", g->pmu_lsf_pmu_wpr_init_done);
209 if (g->ops.pmu.lspmuwprinitdone) { 209 if (g->pmu_lsf_pmu_wpr_init_done) {
210 /* send message to load FECS falcon */ 210 /* send message to load FECS falcon */
211 memset(&cmd, 0, sizeof(struct pmu_cmd)); 211 memset(&cmd, 0, sizeof(struct pmu_cmd));
212 cmd.hdr.unit_id = PMU_UNIT_ACR; 212 cmd.hdr.unit_id = PMU_UNIT_ACR;
@@ -236,12 +236,12 @@ static int gm20b_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
236 if (!(falconidmask == (1 << LSF_FALCON_ID_FECS))) 236 if (!(falconidmask == (1 << LSF_FALCON_ID_FECS)))
237 return -EINVAL; 237 return -EINVAL;
238 /* check whether pmu is ready to bootstrap lsf if not wait for it */ 238 /* check whether pmu is ready to bootstrap lsf if not wait for it */
239 if (!g->ops.pmu.lspmuwprinitdone) { 239 if (!g->pmu_lsf_pmu_wpr_init_done) {
240 pmu_wait_message_cond(&g->pmu, 240 pmu_wait_message_cond(&g->pmu,
241 gk20a_get_gr_idle_timeout(g), 241 gk20a_get_gr_idle_timeout(g),
242 &g->ops.pmu.lspmuwprinitdone, 1); 242 &g->pmu_lsf_pmu_wpr_init_done, 1);
243 /* check again if it still not ready indicate an error */ 243 /* check again if it still not ready indicate an error */
244 if (!g->ops.pmu.lspmuwprinitdone) { 244 if (!g->pmu_lsf_pmu_wpr_init_done) {
245 nvgpu_err(g, "PMU not ready to load LSF"); 245 nvgpu_err(g, "PMU not ready to load LSF");
246 return -ETIMEDOUT; 246 return -ETIMEDOUT;
247 } 247 }
@@ -299,8 +299,8 @@ void gm20b_init_pmu_ops(struct gk20a *g)
299 gops->pmu.pmu_mutex_size = pwr_pmu_mutex__size_1_v; 299 gops->pmu.pmu_mutex_size = pwr_pmu_mutex__size_1_v;
300 gops->pmu.pmu_mutex_acquire = gk20a_pmu_mutex_acquire; 300 gops->pmu.pmu_mutex_acquire = gk20a_pmu_mutex_acquire;
301 gops->pmu.pmu_mutex_release = gk20a_pmu_mutex_release; 301 gops->pmu.pmu_mutex_release = gk20a_pmu_mutex_release;
302 gops->pmu.lspmuwprinitdone = 0; 302 g->pmu_lsf_pmu_wpr_init_done = 0;
303 gops->pmu.fecsbootstrapdone = false; 303 __nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false);
304 gops->pmu.write_dmatrfbase = gm20b_write_dmatrfbase; 304 gops->pmu.write_dmatrfbase = gm20b_write_dmatrfbase;
305 gops->pmu.pmu_elpg_statistics = gk20a_pmu_elpg_statistics; 305 gops->pmu.pmu_elpg_statistics = gk20a_pmu_elpg_statistics;
306 gops->pmu.pmu_pg_init_param = NULL; 306 gops->pmu.pmu_pg_init_param = NULL;
diff --git a/drivers/gpu/nvgpu/gp106/pmu_gp106.c b/drivers/gpu/nvgpu/gp106/pmu_gp106.c
index a9fb794d..3b75b488 100644
--- a/drivers/gpu/nvgpu/gp106/pmu_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/pmu_gp106.c
@@ -233,8 +233,8 @@ static void gp106_pmu_load_multiple_falcons(struct gk20a *g, u32 falconidmask,
233 233
234 gk20a_dbg_fn(""); 234 gk20a_dbg_fn("");
235 235
236 gp106_dbg_pmu("wprinit status = %x\n", g->ops.pmu.lspmuwprinitdone); 236 gp106_dbg_pmu("wprinit status = %x\n", g->pmu_lsf_pmu_wpr_init_done);
237 if (g->ops.pmu.lspmuwprinitdone) { 237 if (g->pmu_lsf_pmu_wpr_init_done) {
238 /* send message to load FECS falcon */ 238 /* send message to load FECS falcon */
239 memset(&cmd, 0, sizeof(struct pmu_cmd)); 239 memset(&cmd, 0, sizeof(struct pmu_cmd));
240 cmd.hdr.unit_id = PMU_UNIT_ACR; 240 cmd.hdr.unit_id = PMU_UNIT_ACR;
@@ -268,14 +268,14 @@ static int gp106_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
268 if (falconidmask & ~((1 << LSF_FALCON_ID_FECS) | 268 if (falconidmask & ~((1 << LSF_FALCON_ID_FECS) |
269 (1 << LSF_FALCON_ID_GPCCS))) 269 (1 << LSF_FALCON_ID_GPCCS)))
270 return -EINVAL; 270 return -EINVAL;
271 g->ops.pmu.lsfloadedfalconid = 0; 271 g->pmu_lsf_loaded_falcon_id = 0;
272 /* check whether pmu is ready to bootstrap lsf if not wait for it */ 272 /* check whether pmu is ready to bootstrap lsf if not wait for it */
273 if (!g->ops.pmu.lspmuwprinitdone) { 273 if (!g->pmu_lsf_pmu_wpr_init_done) {
274 pmu_wait_message_cond(&g->pmu, 274 pmu_wait_message_cond(&g->pmu,
275 gk20a_get_gr_idle_timeout(g), 275 gk20a_get_gr_idle_timeout(g),
276 &g->ops.pmu.lspmuwprinitdone, 1); 276 &g->pmu_lsf_pmu_wpr_init_done, 1);
277 /* check again if it still not ready indicate an error */ 277 /* check again if it still not ready indicate an error */
278 if (!g->ops.pmu.lspmuwprinitdone) { 278 if (!g->pmu_lsf_pmu_wpr_init_done) {
279 nvgpu_err(g, "PMU not ready to load LSF"); 279 nvgpu_err(g, "PMU not ready to load LSF");
280 return -ETIMEDOUT; 280 return -ETIMEDOUT;
281 } 281 }
@@ -284,8 +284,8 @@ static int gp106_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
284 gp106_pmu_load_multiple_falcons(g, falconidmask, flags); 284 gp106_pmu_load_multiple_falcons(g, falconidmask, flags);
285 pmu_wait_message_cond(&g->pmu, 285 pmu_wait_message_cond(&g->pmu,
286 gk20a_get_gr_idle_timeout(g), 286 gk20a_get_gr_idle_timeout(g),
287 &g->ops.pmu.lsfloadedfalconid, falconidmask); 287 &g->pmu_lsf_loaded_falcon_id, falconidmask);
288 if (g->ops.pmu.lsfloadedfalconid != falconidmask) 288 if (g->pmu_lsf_loaded_falcon_id != falconidmask)
289 return -ETIMEDOUT; 289 return -ETIMEDOUT;
290 return 0; 290 return 0;
291} 291}
@@ -318,8 +318,8 @@ void gp106_init_pmu_ops(struct gk20a *g)
318 gops->pmu.pmu_mutex_size = pwr_pmu_mutex__size_1_v; 318 gops->pmu.pmu_mutex_size = pwr_pmu_mutex__size_1_v;
319 gops->pmu.pmu_mutex_acquire = gk20a_pmu_mutex_acquire; 319 gops->pmu.pmu_mutex_acquire = gk20a_pmu_mutex_acquire;
320 gops->pmu.pmu_mutex_release = gk20a_pmu_mutex_release; 320 gops->pmu.pmu_mutex_release = gk20a_pmu_mutex_release;
321 gops->pmu.lspmuwprinitdone = 0; 321 g->pmu_lsf_pmu_wpr_init_done = 0;
322 gops->pmu.fecsbootstrapdone = false; 322 __nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false);
323 gops->pmu.write_dmatrfbase = gp10b_write_dmatrfbase; 323 gops->pmu.write_dmatrfbase = gp10b_write_dmatrfbase;
324 gops->pmu.pmu_elpg_statistics = gp106_pmu_elpg_statistics; 324 gops->pmu.pmu_elpg_statistics = gp106_pmu_elpg_statistics;
325 gops->pmu.pmu_pg_init_param = gp106_pg_param_init; 325 gops->pmu.pmu_pg_init_param = gp106_pg_param_init;
diff --git a/drivers/gpu/nvgpu/gp10b/pmu_gp10b.c b/drivers/gpu/nvgpu/gp10b/pmu_gp10b.c
index da8044cd..f45490db 100644
--- a/drivers/gpu/nvgpu/gp10b/pmu_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/pmu_gp10b.c
@@ -148,8 +148,8 @@ static void gp10b_pmu_load_multiple_falcons(struct gk20a *g, u32 falconidmask,
148 148
149 gk20a_dbg_fn(""); 149 gk20a_dbg_fn("");
150 150
151 gp10b_dbg_pmu("wprinit status = %x\n", g->ops.pmu.lspmuwprinitdone); 151 gp10b_dbg_pmu("wprinit status = %x\n", g->pmu_lsf_pmu_wpr_init_done);
152 if (g->ops.pmu.lspmuwprinitdone) { 152 if (g->pmu_lsf_pmu_wpr_init_done) {
153 /* send message to load FECS falcon */ 153 /* send message to load FECS falcon */
154 memset(&cmd, 0, sizeof(struct pmu_cmd)); 154 memset(&cmd, 0, sizeof(struct pmu_cmd));
155 cmd.hdr.unit_id = PMU_UNIT_ACR; 155 cmd.hdr.unit_id = PMU_UNIT_ACR;
@@ -185,14 +185,14 @@ int gp10b_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
185 if (falconidmask & ~((1 << LSF_FALCON_ID_FECS) | 185 if (falconidmask & ~((1 << LSF_FALCON_ID_FECS) |
186 (1 << LSF_FALCON_ID_GPCCS))) 186 (1 << LSF_FALCON_ID_GPCCS)))
187 return -EINVAL; 187 return -EINVAL;
188 g->ops.pmu.lsfloadedfalconid = 0; 188 g->pmu_lsf_loaded_falcon_id = 0;
189 /* check whether pmu is ready to bootstrap lsf if not wait for it */ 189 /* check whether pmu is ready to bootstrap lsf if not wait for it */
190 if (!g->ops.pmu.lspmuwprinitdone) { 190 if (!g->pmu_lsf_pmu_wpr_init_done) {
191 pmu_wait_message_cond(&g->pmu, 191 pmu_wait_message_cond(&g->pmu,
192 gk20a_get_gr_idle_timeout(g), 192 gk20a_get_gr_idle_timeout(g),
193 &g->ops.pmu.lspmuwprinitdone, 1); 193 &g->pmu_lsf_pmu_wpr_init_done, 1);
194 /* check again if it still not ready indicate an error */ 194 /* check again if it still not ready indicate an error */
195 if (!g->ops.pmu.lspmuwprinitdone) { 195 if (!g->pmu_lsf_pmu_wpr_init_done) {
196 nvgpu_err(g, "PMU not ready to load LSF"); 196 nvgpu_err(g, "PMU not ready to load LSF");
197 return -ETIMEDOUT; 197 return -ETIMEDOUT;
198 } 198 }
@@ -201,8 +201,8 @@ int gp10b_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
201 gp10b_pmu_load_multiple_falcons(g, falconidmask, flags); 201 gp10b_pmu_load_multiple_falcons(g, falconidmask, flags);
202 pmu_wait_message_cond(&g->pmu, 202 pmu_wait_message_cond(&g->pmu,
203 gk20a_get_gr_idle_timeout(g), 203 gk20a_get_gr_idle_timeout(g),
204 &g->ops.pmu.lsfloadedfalconid, falconidmask); 204 &g->pmu_lsf_loaded_falcon_id, falconidmask);
205 if (g->ops.pmu.lsfloadedfalconid != falconidmask) 205 if (g->pmu_lsf_loaded_falcon_id != falconidmask)
206 return -ETIMEDOUT; 206 return -ETIMEDOUT;
207 return 0; 207 return 0;
208} 208}
@@ -418,8 +418,8 @@ void gp10b_init_pmu_ops(struct gk20a *g)
418 gops->pmu.pmu_mutex_size = pwr_pmu_mutex__size_1_v; 418 gops->pmu.pmu_mutex_size = pwr_pmu_mutex__size_1_v;
419 gops->pmu.pmu_mutex_acquire = gk20a_pmu_mutex_acquire; 419 gops->pmu.pmu_mutex_acquire = gk20a_pmu_mutex_acquire;
420 gops->pmu.pmu_mutex_release = gk20a_pmu_mutex_release; 420 gops->pmu.pmu_mutex_release = gk20a_pmu_mutex_release;
421 gops->pmu.lspmuwprinitdone = false; 421 g->pmu_lsf_pmu_wpr_init_done = false;
422 gops->pmu.fecsbootstrapdone = false; 422 __nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false);
423 gops->pmu.write_dmatrfbase = gp10b_write_dmatrfbase; 423 gops->pmu.write_dmatrfbase = gp10b_write_dmatrfbase;
424 gops->pmu.pmu_elpg_statistics = gp10b_pmu_elpg_statistics; 424 gops->pmu.pmu_elpg_statistics = gp10b_pmu_elpg_statistics;
425 gops->pmu.pmu_pg_init_param = gp10b_pg_gr_init; 425 gops->pmu.pmu_pg_init_param = gp10b_pg_gr_init;
diff --git a/drivers/gpu/nvgpu/include/nvgpu/enabled.h b/drivers/gpu/nvgpu/include/nvgpu/enabled.h
index fd29a9eb..5557f31f 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/enabled.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/enabled.h
@@ -48,8 +48,10 @@ struct gk20a;
48 * PMU flags. 48 * PMU flags.
49 */ 49 */
50/* perfmon enabled or disabled for PMU */ 50/* perfmon enabled or disabled for PMU */
51#define NVGPU_PMU_PERFMON 48 51#define NVGPU_PMU_PERFMON 48
52#define NVGPU_PMU_PSTATE 49 52#define NVGPU_PMU_PSTATE 49
53#define NVGPU_PMU_ZBC_SAVE 50
54#define NVGPU_PMU_FECS_BOOTSTRAP_DONE 51
53 55
54/* 56/*
55 * Must be greater than the largest bit offset in the above list. 57 * Must be greater than the largest bit offset in the above list.