summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSunny He <suhe@nvidia.com>2017-08-01 20:10:42 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-08-21 16:06:07 -0400
commit5f010177de985c901c33c914efe70a8498a5974f (patch)
tree1b1a2ac1ab71608a0754a7eb64222f5d198e793c
parentb50b379c192714d0d08c3f2d33e90c95cf795253 (diff)
gpu: nvgpu: Reorg pmu HAL initialization
Reorganize HAL initialization to remove inheritance and construct the gpu_ops struct at compile time. This patch only covers the pmu sub-module of the gpu_ops struct. Perform HAL function assignments in hal_gxxxx.c through the population of a chip-specific copy of gpu_ops. Jira NVGPU-74 Change-Id: I8839ac99e87153637005e23b3013237f57275c54 Signed-off-by: Sunny He <suhe@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1530982 Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com> Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: Automatic_Commit_Validation_User GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
-rw-r--r--drivers/gpu/nvgpu/gk20a/pmu_gk20a.c45
-rw-r--r--drivers/gpu/nvgpu/gk20a/pmu_gk20a.h4
-rw-r--r--drivers/gpu/nvgpu/gm20b/acr_gm20b.c54
-rw-r--r--drivers/gpu/nvgpu/gm20b/acr_gm20b.h15
-rw-r--r--drivers/gpu/nvgpu/gm20b/hal_gm20b.c67
-rw-r--r--drivers/gpu/nvgpu/gm20b/pmu_gm20b.c48
-rw-r--r--drivers/gpu/nvgpu/gm20b/pmu_gm20b.h4
-rw-r--r--drivers/gpu/nvgpu/gp106/acr_gp106.c25
-rw-r--r--drivers/gpu/nvgpu/gp106/acr_gp106.h9
-rw-r--r--drivers/gpu/nvgpu/gp106/hal_gp106.c49
-rw-r--r--drivers/gpu/nvgpu/gp106/pmu_gp106.c64
-rw-r--r--drivers/gpu/nvgpu/gp106/pmu_gp106.h10
-rw-r--r--drivers/gpu/nvgpu/gp10b/hal_gp10b.c63
-rw-r--r--drivers/gpu/nvgpu/gp10b/pmu_gp10b.c57
-rw-r--r--drivers/gpu/nvgpu/gp10b/pmu_gp10b.h10
15 files changed, 258 insertions, 266 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
index 629a22ef..11de11de 100644
--- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
@@ -459,7 +459,7 @@ void gk20a_pmu_msgq_tail(struct nvgpu_pmu *pmu, u32 *tail, bool set)
459 pwr_pmu_msgq_tail_val_f(*tail)); 459 pwr_pmu_msgq_tail_val_f(*tail));
460} 460}
461 461
462static int gk20a_init_pmu_setup_hw1(struct gk20a *g) 462int gk20a_init_pmu_setup_hw1(struct gk20a *g)
463{ 463{
464 struct nvgpu_pmu *pmu = &g->pmu; 464 struct nvgpu_pmu *pmu = &g->pmu;
465 int err = 0; 465 int err = 0;
@@ -493,7 +493,7 @@ static int gk20a_init_pmu_setup_hw1(struct gk20a *g)
493 493
494} 494}
495 495
496static void gk20a_write_dmatrfbase(struct gk20a *g, u32 addr) 496void gk20a_write_dmatrfbase(struct gk20a *g, u32 addr)
497{ 497{
498 gk20a_writel(g, pwr_falcon_dmatrfbase_r(), addr); 498 gk20a_writel(g, pwr_falcon_dmatrfbase_r(), addr);
499} 499}
@@ -521,7 +521,7 @@ int gk20a_pmu_engine_reset(struct gk20a *g, bool do_reset)
521 return 0; 521 return 0;
522} 522}
523 523
524static bool gk20a_is_pmu_supported(struct gk20a *g) 524bool gk20a_is_pmu_supported(struct gk20a *g)
525{ 525{
526 return true; 526 return true;
527} 527}
@@ -539,45 +539,6 @@ u32 gk20a_pmu_pg_feature_list(struct gk20a *g, u32 pg_engine_id)
539 return 0; 539 return 0;
540} 540}
541 541
542void gk20a_init_pmu_ops(struct gpu_ops *gops)
543{
544 gops->pmu.is_pmu_supported = gk20a_is_pmu_supported;
545 gops->pmu.prepare_ucode = nvgpu_pmu_prepare_ns_ucode_blob;
546 gops->pmu.pmu_setup_hw_and_bootstrap = gk20a_init_pmu_setup_hw1;
547 gops->pmu.pmu_nsbootstrap = pmu_bootstrap;
548 gops->pmu.pmu_get_queue_head = pwr_pmu_queue_head_r;
549 gops->pmu.pmu_get_queue_head_size = pwr_pmu_queue_head__size_1_v;
550 gops->pmu.pmu_get_queue_tail = pwr_pmu_queue_tail_r;
551 gops->pmu.pmu_get_queue_tail_size = pwr_pmu_queue_tail__size_1_v;
552 gops->pmu.pmu_queue_head = gk20a_pmu_queue_head;
553 gops->pmu.pmu_queue_tail = gk20a_pmu_queue_tail;
554 gops->pmu.pmu_msgq_tail = gk20a_pmu_msgq_tail;
555 gops->pmu.pmu_mutex_size = pwr_pmu_mutex__size_1_v;
556 gops->pmu.pmu_mutex_acquire = gk20a_pmu_mutex_acquire;
557 gops->pmu.pmu_mutex_release = gk20a_pmu_mutex_release;
558 gops->pmu.pmu_setup_elpg = NULL;
559 gops->pmu.init_wpr_region = NULL;
560 gops->pmu.load_lsfalcon_ucode = NULL;
561 gops->pmu.write_dmatrfbase = gk20a_write_dmatrfbase;
562 gops->pmu.pmu_elpg_statistics = gk20a_pmu_elpg_statistics;
563 gops->pmu.pmu_pg_init_param = NULL;
564 gops->pmu.pmu_pg_supported_engines_list = gk20a_pmu_pg_engines_list;
565 gops->pmu.pmu_pg_engines_feature_list = gk20a_pmu_pg_feature_list;
566 gops->pmu.pmu_is_lpwr_feature_supported = NULL;
567 gops->pmu.pmu_lpwr_enable_pg = NULL;
568 gops->pmu.pmu_lpwr_disable_pg = NULL;
569 gops->pmu.pmu_pg_param_post_init = NULL;
570 gops->pmu.dump_secure_fuses = NULL;
571 gops->pmu.is_lazy_bootstrap = NULL;
572 gops->pmu.is_priv_load = NULL;
573 gops->pmu.get_wpr = NULL;
574 gops->pmu.alloc_blob_space = NULL;
575 gops->pmu.pmu_populate_loader_cfg = NULL;
576 gops->pmu.flcn_populate_bl_dmem_desc = NULL;
577 gops->pmu.reset_engine = gk20a_pmu_engine_reset;
578 gops->pmu.is_engine_in_reset = gk20a_pmu_is_engine_in_reset;
579}
580
581static void pmu_handle_zbc_msg(struct gk20a *g, struct pmu_msg *msg, 542static void pmu_handle_zbc_msg(struct gk20a *g, struct pmu_msg *msg,
582 void *param, u32 handle, u32 status) 543 void *param, u32 handle, u32 status)
583{ 544{
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h
index 997a88d2..f4e8c601 100644
--- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h
@@ -54,7 +54,9 @@ void gk20a_pmu_msgq_tail(struct nvgpu_pmu *pmu, u32 *tail, bool set);
54u32 gk20a_pmu_read_idle_counter(struct gk20a *g, u32 counter_id); 54u32 gk20a_pmu_read_idle_counter(struct gk20a *g, u32 counter_id);
55void gk20a_pmu_reset_idle_counter(struct gk20a *g, u32 counter_id); 55void gk20a_pmu_reset_idle_counter(struct gk20a *g, u32 counter_id);
56 56
57void gk20a_init_pmu_ops(struct gpu_ops *gops); 57int gk20a_init_pmu_setup_hw1(struct gk20a *g);
58void gk20a_write_dmatrfbase(struct gk20a *g, u32 addr);
59bool gk20a_is_pmu_supported(struct gk20a *g);
58 60
59void pmu_copy_to_dmem(struct nvgpu_pmu *pmu, 61void pmu_copy_to_dmem(struct nvgpu_pmu *pmu,
60 u32 dst, u8 *src, u32 size, u8 port); 62 u32 dst, u8 *src, u32 size, u8 port);
diff --git a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
index 4fa1b313..2e904fdf 100644
--- a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
@@ -51,11 +51,6 @@ typedef int (*get_ucode_details)(struct gk20a *g, struct flcn_ucode_img *udata);
51static int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img); 51static int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img);
52static int fecs_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img); 52static int fecs_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img);
53static int gpccs_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img); 53static int gpccs_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img);
54static int gm20b_bootstrap_hs_flcn(struct gk20a *g);
55static int pmu_wait_for_halt(struct gk20a *g, unsigned int timeout);
56static int clear_halt_interrupt_status(struct gk20a *g, unsigned int timeout);
57static int gm20b_init_pmu_setup_hw1(struct gk20a *g,
58 void *desc, u32 bl_sz);
59static int lsfm_discover_ucode_images(struct gk20a *g, 54static int lsfm_discover_ucode_images(struct gk20a *g,
60 struct ls_flcn_mgr *plsfm); 55 struct ls_flcn_mgr *plsfm);
61static int lsfm_add_ucode_img(struct gk20a *g, struct ls_flcn_mgr *plsfm, 56static int lsfm_add_ucode_img(struct gk20a *g, struct ls_flcn_mgr *plsfm,
@@ -68,15 +63,6 @@ static int lsf_gen_wpr_requirements(struct gk20a *g, struct ls_flcn_mgr *plsfm);
68static void lsfm_init_wpr_contents(struct gk20a *g, struct ls_flcn_mgr *plsfm, 63static void lsfm_init_wpr_contents(struct gk20a *g, struct ls_flcn_mgr *plsfm,
69 struct nvgpu_mem *nonwpr); 64 struct nvgpu_mem *nonwpr);
70static void free_acr_resources(struct gk20a *g, struct ls_flcn_mgr *plsfm); 65static void free_acr_resources(struct gk20a *g, struct ls_flcn_mgr *plsfm);
71static int gm20b_pmu_populate_loader_cfg(struct gk20a *g,
72 void *lsfm, u32 *p_bl_gen_desc_size);
73static int gm20b_flcn_populate_bl_dmem_desc(struct gk20a *g,
74 void *lsfm, u32 *p_bl_gen_desc_size, u32 falconid);
75static int gm20b_alloc_blob_space(struct gk20a *g,
76 size_t size, struct nvgpu_mem *mem);
77static bool gm20b_is_priv_load(u32 falcon_id);
78static bool gm20b_is_lazy_bootstrap(u32 falcon_id);
79static void gm20b_wpr_info(struct gk20a *g, struct wpr_carveout_info *inf);
80 66
81/*Globals*/ 67/*Globals*/
82static get_ucode_details pmu_acr_supp_ucode_list[] = { 68static get_ucode_details pmu_acr_supp_ucode_list[] = {
@@ -97,7 +83,7 @@ static void start_gm20b_pmu(struct gk20a *g)
97 pwr_falcon_cpuctl_startcpu_f(1)); 83 pwr_falcon_cpuctl_startcpu_f(1));
98} 84}
99 85
100static void gm20b_wpr_info(struct gk20a *g, struct wpr_carveout_info *inf) 86void gm20b_wpr_info(struct gk20a *g, struct wpr_carveout_info *inf)
101{ 87{
102 struct mc_carveout_info mem_inf; 88 struct mc_carveout_info mem_inf;
103 89
@@ -108,29 +94,11 @@ static void gm20b_wpr_info(struct gk20a *g, struct wpr_carveout_info *inf)
108 inf->size = mem_inf.size; 94 inf->size = mem_inf.size;
109} 95}
110 96
111static bool gm20b_is_pmu_supported(struct gk20a *g) 97bool gm20b_is_pmu_supported(struct gk20a *g)
112{ 98{
113 return true; 99 return true;
114} 100}
115 101
116void gm20b_init_secure_pmu(struct gpu_ops *gops)
117{
118 gops->pmu.is_pmu_supported = gm20b_is_pmu_supported;
119 gops->pmu.prepare_ucode = prepare_ucode_blob;
120 gops->pmu.pmu_setup_hw_and_bootstrap = gm20b_bootstrap_hs_flcn;
121 gops->pmu.is_lazy_bootstrap = gm20b_is_lazy_bootstrap;
122 gops->pmu.is_priv_load = gm20b_is_priv_load;
123 gops->pmu.get_wpr = gm20b_wpr_info;
124 gops->pmu.alloc_blob_space = gm20b_alloc_blob_space;
125 gops->pmu.pmu_populate_loader_cfg = gm20b_pmu_populate_loader_cfg;
126 gops->pmu.flcn_populate_bl_dmem_desc = gm20b_flcn_populate_bl_dmem_desc;
127 gops->pmu.falcon_wait_for_halt = pmu_wait_for_halt;
128 gops->pmu.falcon_clear_halt_interrupt_status =
129 clear_halt_interrupt_status;
130 gops->pmu.init_falcon_setup_hw = gm20b_init_pmu_setup_hw1;
131}
132/* TODO - check if any free blob res needed*/
133
134static int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img) 102static int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img)
135{ 103{
136 struct nvgpu_firmware *pmu_fw, *pmu_desc, *pmu_sig; 104 struct nvgpu_firmware *pmu_fw, *pmu_desc, *pmu_sig;
@@ -334,7 +302,7 @@ rel_sig:
334 return err; 302 return err;
335} 303}
336 304
337static bool gm20b_is_lazy_bootstrap(u32 falcon_id) 305bool gm20b_is_lazy_bootstrap(u32 falcon_id)
338{ 306{
339 bool enable_status = false; 307 bool enable_status = false;
340 308
@@ -352,7 +320,7 @@ static bool gm20b_is_lazy_bootstrap(u32 falcon_id)
352 return enable_status; 320 return enable_status;
353} 321}
354 322
355static bool gm20b_is_priv_load(u32 falcon_id) 323bool gm20b_is_priv_load(u32 falcon_id)
356{ 324{
357 bool enable_status = false; 325 bool enable_status = false;
358 326
@@ -370,7 +338,7 @@ static bool gm20b_is_priv_load(u32 falcon_id)
370 return enable_status; 338 return enable_status;
371} 339}
372 340
373static int gm20b_alloc_blob_space(struct gk20a *g, 341int gm20b_alloc_blob_space(struct gk20a *g,
374 size_t size, struct nvgpu_mem *mem) 342 size_t size, struct nvgpu_mem *mem)
375{ 343{
376 int err; 344 int err;
@@ -554,7 +522,7 @@ static int lsfm_discover_ucode_images(struct gk20a *g,
554} 522}
555 523
556 524
557static int gm20b_pmu_populate_loader_cfg(struct gk20a *g, 525int gm20b_pmu_populate_loader_cfg(struct gk20a *g,
558 void *lsfm, u32 *p_bl_gen_desc_size) 526 void *lsfm, u32 *p_bl_gen_desc_size)
559{ 527{
560 struct wpr_carveout_info wpr_inf; 528 struct wpr_carveout_info wpr_inf;
@@ -626,7 +594,7 @@ static int gm20b_pmu_populate_loader_cfg(struct gk20a *g,
626 return 0; 594 return 0;
627} 595}
628 596
629static int gm20b_flcn_populate_bl_dmem_desc(struct gk20a *g, 597int gm20b_flcn_populate_bl_dmem_desc(struct gk20a *g,
630 void *lsfm, u32 *p_bl_gen_desc_size, u32 falconid) 598 void *lsfm, u32 *p_bl_gen_desc_size, u32 falconid)
631{ 599{
632 struct wpr_carveout_info wpr_inf; 600 struct wpr_carveout_info wpr_inf;
@@ -1066,7 +1034,7 @@ static int lsf_gen_wpr_requirements(struct gk20a *g, struct ls_flcn_mgr *plsfm)
1066 1034
1067/*Loads ACR bin to FB mem and bootstraps PMU with bootloader code 1035/*Loads ACR bin to FB mem and bootstraps PMU with bootloader code
1068 * start and end are addresses of ucode blob in non-WPR region*/ 1036 * start and end are addresses of ucode blob in non-WPR region*/
1069static int gm20b_bootstrap_hs_flcn(struct gk20a *g) 1037int gm20b_bootstrap_hs_flcn(struct gk20a *g)
1070{ 1038{
1071 struct mm_gk20a *mm = &g->mm; 1039 struct mm_gk20a *mm = &g->mm;
1072 struct vm_gk20a *vm = mm->pmu.vm; 1040 struct vm_gk20a *vm = mm->pmu.vm;
@@ -1291,7 +1259,7 @@ int gm20b_init_nspmu_setup_hw1(struct gk20a *g)
1291 return err; 1259 return err;
1292} 1260}
1293 1261
1294static int gm20b_init_pmu_setup_hw1(struct gk20a *g, 1262int gm20b_init_pmu_setup_hw1(struct gk20a *g,
1295 void *desc, u32 bl_sz) 1263 void *desc, u32 bl_sz)
1296{ 1264{
1297 1265
@@ -1461,7 +1429,7 @@ err_done:
1461* @param[in] timeout_ms Timeout in msec for PMU to halt 1429* @param[in] timeout_ms Timeout in msec for PMU to halt
1462* @return '0' if PMU halts 1430* @return '0' if PMU halts
1463*/ 1431*/
1464static int pmu_wait_for_halt(struct gk20a *g, unsigned int timeout_ms) 1432int pmu_wait_for_halt(struct gk20a *g, unsigned int timeout_ms)
1465{ 1433{
1466 struct nvgpu_pmu *pmu = &g->pmu; 1434 struct nvgpu_pmu *pmu = &g->pmu;
1467 u32 data = 0; 1435 u32 data = 0;
@@ -1490,7 +1458,7 @@ static int pmu_wait_for_halt(struct gk20a *g, unsigned int timeout_ms)
1490* @param[in] timeout_ms Timeout in msec for halt to clear 1458* @param[in] timeout_ms Timeout in msec for halt to clear
1491* @return '0' if PMU halt irq status is clear 1459* @return '0' if PMU halt irq status is clear
1492*/ 1460*/
1493static int clear_halt_interrupt_status(struct gk20a *g, unsigned int timeout_ms) 1461int clear_halt_interrupt_status(struct gk20a *g, unsigned int timeout_ms)
1494{ 1462{
1495 struct nvgpu_pmu *pmu = &g->pmu; 1463 struct nvgpu_pmu *pmu = &g->pmu;
1496 int status = 0; 1464 int status = 0;
diff --git a/drivers/gpu/nvgpu/gm20b/acr_gm20b.h b/drivers/gpu/nvgpu/gm20b/acr_gm20b.h
index 84478611..6568d62f 100644
--- a/drivers/gpu/nvgpu/gm20b/acr_gm20b.h
+++ b/drivers/gpu/nvgpu/gm20b/acr_gm20b.h
@@ -24,8 +24,21 @@
24#define GM20B_FECS_UCODE_SIG "fecs_sig.bin" 24#define GM20B_FECS_UCODE_SIG "fecs_sig.bin"
25#define T18x_GPCCS_UCODE_SIG "gpccs_sig.bin" 25#define T18x_GPCCS_UCODE_SIG "gpccs_sig.bin"
26 26
27void gm20b_init_secure_pmu(struct gpu_ops *gops); 27bool gm20b_is_pmu_supported(struct gk20a *g);
28int prepare_ucode_blob(struct gk20a *g); 28int prepare_ucode_blob(struct gk20a *g);
29int gm20b_bootstrap_hs_flcn(struct gk20a *g);
30bool gm20b_is_lazy_bootstrap(u32 falcon_id);
31bool gm20b_is_priv_load(u32 falcon_id);
32void gm20b_wpr_info(struct gk20a *g, struct wpr_carveout_info *inf);
33int gm20b_alloc_blob_space(struct gk20a *g, size_t size, struct nvgpu_mem *mem);
34int gm20b_pmu_populate_loader_cfg(struct gk20a *g,
35 void *lsfm, u32 *p_bl_gen_desc_size);
36int gm20b_flcn_populate_bl_dmem_desc(struct gk20a *g,
37 void *lsfm, u32 *p_bl_gen_desc_size, u32 falconid);
38int pmu_wait_for_halt(struct gk20a *g, unsigned int timeout_ms);
39int clear_halt_interrupt_status(struct gk20a *g, unsigned int timeout);
40int gm20b_init_pmu_setup_hw1(struct gk20a *g, void *desc, u32 bl_sz);
41
29int gm20b_pmu_setup_sw(struct gk20a *g); 42int gm20b_pmu_setup_sw(struct gk20a *g);
30int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt); 43int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt);
31int gm20b_init_nspmu_setup_hw1(struct gk20a *g); 44int gm20b_init_nspmu_setup_hw1(struct gk20a *g);
diff --git a/drivers/gpu/nvgpu/gm20b/hal_gm20b.c b/drivers/gpu/nvgpu/gm20b/hal_gm20b.c
index 073c377e..38e8934b 100644
--- a/drivers/gpu/nvgpu/gm20b/hal_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/hal_gm20b.c
@@ -26,6 +26,7 @@
26#include "gk20a/flcn_gk20a.h" 26#include "gk20a/flcn_gk20a.h"
27#include "gk20a/priv_ring_gk20a.h" 27#include "gk20a/priv_ring_gk20a.h"
28#include "gk20a/regops_gk20a.h" 28#include "gk20a/regops_gk20a.h"
29#include "gk20a/pmu_gk20a.h"
29 30
30#include "ltc_gm20b.h" 31#include "ltc_gm20b.h"
31#include "gr_gm20b.h" 32#include "gr_gm20b.h"
@@ -42,6 +43,7 @@
42#include "therm_gm20b.h" 43#include "therm_gm20b.h"
43#include "bus_gm20b.h" 44#include "bus_gm20b.h"
44#include "hal_gm20b.h" 45#include "hal_gm20b.h"
46#include "acr_gm20b.h"
45 47
46#include <nvgpu/debug.h> 48#include <nvgpu/debug.h>
47#include <nvgpu/bug.h> 49#include <nvgpu/bug.h>
@@ -53,6 +55,8 @@
53#include <nvgpu/hw/gm20b/hw_fifo_gm20b.h> 55#include <nvgpu/hw/gm20b/hw_fifo_gm20b.h>
54#include <nvgpu/hw/gm20b/hw_ram_gm20b.h> 56#include <nvgpu/hw/gm20b/hw_ram_gm20b.h>
55#include <nvgpu/hw/gm20b/hw_top_gm20b.h> 57#include <nvgpu/hw/gm20b/hw_top_gm20b.h>
58#include <nvgpu/hw/gm20b/hw_gr_gm20b.h>
59#include <nvgpu/hw/gm20b/hw_pwr_gm20b.h>
56 60
57#define PRIV_SECURITY_DISABLE 0x01 61#define PRIV_SECURITY_DISABLE 0x01
58 62
@@ -313,6 +317,31 @@ static const struct gpu_ops gm20b_ops = {
313 .init_therm_setup_hw = gm20b_init_therm_setup_hw, 317 .init_therm_setup_hw = gm20b_init_therm_setup_hw,
314 .elcg_init_idle_filters = gk20a_elcg_init_idle_filters, 318 .elcg_init_idle_filters = gk20a_elcg_init_idle_filters,
315 }, 319 },
320 .pmu = {
321 .pmu_setup_elpg = gm20b_pmu_setup_elpg,
322 .pmu_get_queue_head = pwr_pmu_queue_head_r,
323 .pmu_get_queue_head_size = pwr_pmu_queue_head__size_1_v,
324 .pmu_get_queue_tail = pwr_pmu_queue_tail_r,
325 .pmu_get_queue_tail_size = pwr_pmu_queue_tail__size_1_v,
326 .pmu_queue_head = gk20a_pmu_queue_head,
327 .pmu_queue_tail = gk20a_pmu_queue_tail,
328 .pmu_msgq_tail = gk20a_pmu_msgq_tail,
329 .pmu_mutex_size = pwr_pmu_mutex__size_1_v,
330 .pmu_mutex_acquire = gk20a_pmu_mutex_acquire,
331 .pmu_mutex_release = gk20a_pmu_mutex_release,
332 .write_dmatrfbase = gm20b_write_dmatrfbase,
333 .pmu_elpg_statistics = gk20a_pmu_elpg_statistics,
334 .pmu_pg_init_param = NULL,
335 .pmu_pg_supported_engines_list = gk20a_pmu_pg_engines_list,
336 .pmu_pg_engines_feature_list = gk20a_pmu_pg_feature_list,
337 .pmu_is_lpwr_feature_supported = NULL,
338 .pmu_lpwr_enable_pg = NULL,
339 .pmu_lpwr_disable_pg = NULL,
340 .pmu_pg_param_post_init = NULL,
341 .dump_secure_fuses = pmu_dump_security_fuses_gm20b,
342 .reset_engine = gk20a_pmu_engine_reset,
343 .is_engine_in_reset = gk20a_pmu_is_engine_in_reset,
344 },
316 .clk = { 345 .clk = {
317 .init_clk_support = gm20b_init_clk_support, 346 .init_clk_support = gm20b_init_clk_support,
318 .suspend_clk_support = gm20b_suspend_clk_support, 347 .suspend_clk_support = gm20b_suspend_clk_support,
@@ -422,6 +451,7 @@ int gm20b_init_hal(struct gk20a *g)
422 gops->gr_ctx = gm20b_ops.gr_ctx; 451 gops->gr_ctx = gm20b_ops.gr_ctx;
423 gops->mm = gm20b_ops.mm; 452 gops->mm = gm20b_ops.mm;
424 gops->therm = gm20b_ops.therm; 453 gops->therm = gm20b_ops.therm;
454 gops->pmu = gm20b_ops.pmu;
425 /* 455 /*
426 * clk must be assigned member by member 456 * clk must be assigned member by member
427 * since some clk ops are assigned during probe prior to HAL init 457 * since some clk ops are assigned during probe prior to HAL init
@@ -483,9 +513,44 @@ int gm20b_init_hal(struct gk20a *g)
483 } 513 }
484 } 514 }
485#endif 515#endif
516
517 /* priv security dependent ops */
518 if (nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) {
519 /* Add in ops from gm20b acr */
520 gops->pmu.is_pmu_supported = gm20b_is_pmu_supported;
521 gops->pmu.prepare_ucode = prepare_ucode_blob;
522 gops->pmu.pmu_setup_hw_and_bootstrap = gm20b_bootstrap_hs_flcn;
523 gops->pmu.is_lazy_bootstrap = gm20b_is_lazy_bootstrap;
524 gops->pmu.is_priv_load = gm20b_is_priv_load;
525 gops->pmu.get_wpr = gm20b_wpr_info;
526 gops->pmu.alloc_blob_space = gm20b_alloc_blob_space;
527 gops->pmu.pmu_populate_loader_cfg =
528 gm20b_pmu_populate_loader_cfg;
529 gops->pmu.flcn_populate_bl_dmem_desc =
530 gm20b_flcn_populate_bl_dmem_desc;
531 gops->pmu.falcon_wait_for_halt = pmu_wait_for_halt;
532 gops->pmu.falcon_clear_halt_interrupt_status =
533 clear_halt_interrupt_status;
534 gops->pmu.init_falcon_setup_hw = gm20b_init_pmu_setup_hw1;
535
536 gops->pmu.init_wpr_region = gm20b_pmu_init_acr;
537 gops->pmu.load_lsfalcon_ucode = gm20b_load_falcon_ucode;
538 } else {
539 /* Inherit from gk20a */
540 gops->pmu.is_pmu_supported = gk20a_is_pmu_supported;
541 gops->pmu.prepare_ucode = nvgpu_pmu_prepare_ns_ucode_blob;
542 gops->pmu.pmu_setup_hw_and_bootstrap = gk20a_init_pmu_setup_hw1;
543 gops->pmu.pmu_nsbootstrap = pmu_bootstrap;
544
545 gops->pmu.load_lsfalcon_ucode = NULL;
546 gops->pmu.init_wpr_region = NULL;
547 }
548
549 __nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false);
550 g->pmu_lsf_pmu_wpr_init_done = 0;
486 g->bootstrap_owner = LSF_BOOTSTRAP_OWNER_DEFAULT; 551 g->bootstrap_owner = LSF_BOOTSTRAP_OWNER_DEFAULT;
552
487 gm20b_init_gr(g); 553 gm20b_init_gr(g);
488 gm20b_init_pmu_ops(g);
489 554
490 gm20b_init_uncompressed_kind_map(); 555 gm20b_init_uncompressed_kind_map();
491 gm20b_init_kind_attr(); 556 gm20b_init_kind_attr();
diff --git a/drivers/gpu/nvgpu/gm20b/pmu_gm20b.c b/drivers/gpu/nvgpu/gm20b/pmu_gm20b.c
index a5940fcf..99241a53 100644
--- a/drivers/gpu/nvgpu/gm20b/pmu_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/pmu_gm20b.c
@@ -102,7 +102,7 @@ static struct pg_init_sequence_list _pginitseq_gm20b[] = {
102 { 0x0010e040, 0x00000000}, 102 { 0x0010e040, 0x00000000},
103}; 103};
104 104
105static int gm20b_pmu_setup_elpg(struct gk20a *g) 105int gm20b_pmu_setup_elpg(struct gk20a *g)
106{ 106{
107 int ret = 0; 107 int ret = 0;
108 u32 reg_writes; 108 u32 reg_writes;
@@ -226,7 +226,7 @@ void gm20b_pmu_load_lsf(struct gk20a *g, u32 falcon_id, u32 flags)
226 return; 226 return;
227} 227}
228 228
229static int gm20b_load_falcon_ucode(struct gk20a *g, u32 falconidmask) 229int gm20b_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
230{ 230{
231 u32 err = 0; 231 u32 err = 0;
232 u32 flags = PMU_ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES; 232 u32 flags = PMU_ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES;
@@ -261,7 +261,7 @@ void gm20b_write_dmatrfbase(struct gk20a *g, u32 addr)
261} 261}
262 262
263/*Dump Security related fuses*/ 263/*Dump Security related fuses*/
264static void pmu_dump_security_fuses_gm20b(struct gk20a *g) 264void pmu_dump_security_fuses_gm20b(struct gk20a *g)
265{ 265{
266 u32 val; 266 u32 val;
267 267
@@ -272,45 +272,3 @@ static void pmu_dump_security_fuses_gm20b(struct gk20a *g)
272 nvgpu_tegra_fuse_read_gcplex_config_fuse(g, &val); 272 nvgpu_tegra_fuse_read_gcplex_config_fuse(g, &val);
273 nvgpu_err(g, "FUSE_GCPLEX_CONFIG_FUSE_0: 0x%x", val); 273 nvgpu_err(g, "FUSE_GCPLEX_CONFIG_FUSE_0: 0x%x", val);
274} 274}
275
276void gm20b_init_pmu_ops(struct gk20a *g)
277{
278 struct gpu_ops *gops = &g->ops;
279
280 if (nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) {
281 gm20b_init_secure_pmu(gops);
282 gops->pmu.init_wpr_region = gm20b_pmu_init_acr;
283 gops->pmu.load_lsfalcon_ucode = gm20b_load_falcon_ucode;
284 } else {
285 gk20a_init_pmu_ops(gops);
286 gops->pmu.pmu_setup_hw_and_bootstrap =
287 gm20b_init_nspmu_setup_hw1;
288 gops->pmu.load_lsfalcon_ucode = NULL;
289 gops->pmu.init_wpr_region = NULL;
290 }
291 gops->pmu.pmu_setup_elpg = gm20b_pmu_setup_elpg;
292 gops->pmu.pmu_get_queue_head = pwr_pmu_queue_head_r;
293 gops->pmu.pmu_get_queue_head_size = pwr_pmu_queue_head__size_1_v;
294 gops->pmu.pmu_get_queue_tail = pwr_pmu_queue_tail_r;
295 gops->pmu.pmu_get_queue_tail_size = pwr_pmu_queue_tail__size_1_v;
296 gops->pmu.pmu_queue_head = gk20a_pmu_queue_head;
297 gops->pmu.pmu_queue_tail = gk20a_pmu_queue_tail;
298 gops->pmu.pmu_msgq_tail = gk20a_pmu_msgq_tail;
299 gops->pmu.pmu_mutex_size = pwr_pmu_mutex__size_1_v;
300 gops->pmu.pmu_mutex_acquire = gk20a_pmu_mutex_acquire;
301 gops->pmu.pmu_mutex_release = gk20a_pmu_mutex_release;
302 g->pmu_lsf_pmu_wpr_init_done = 0;
303 __nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false);
304 gops->pmu.write_dmatrfbase = gm20b_write_dmatrfbase;
305 gops->pmu.pmu_elpg_statistics = gk20a_pmu_elpg_statistics;
306 gops->pmu.pmu_pg_init_param = NULL;
307 gops->pmu.pmu_pg_supported_engines_list = gk20a_pmu_pg_engines_list;
308 gops->pmu.pmu_pg_engines_feature_list = gk20a_pmu_pg_feature_list;
309 gops->pmu.pmu_is_lpwr_feature_supported = NULL;
310 gops->pmu.pmu_lpwr_enable_pg = NULL;
311 gops->pmu.pmu_lpwr_disable_pg = NULL;
312 gops->pmu.pmu_pg_param_post_init = NULL;
313 gops->pmu.dump_secure_fuses = pmu_dump_security_fuses_gm20b;
314 gops->pmu.reset_engine = gk20a_pmu_engine_reset;
315 gops->pmu.is_engine_in_reset = gk20a_pmu_is_engine_in_reset;
316}
diff --git a/drivers/gpu/nvgpu/gm20b/pmu_gm20b.h b/drivers/gpu/nvgpu/gm20b/pmu_gm20b.h
index 424fab35..ed3a8700 100644
--- a/drivers/gpu/nvgpu/gm20b/pmu_gm20b.h
+++ b/drivers/gpu/nvgpu/gm20b/pmu_gm20b.h
@@ -18,7 +18,9 @@
18 18
19struct gk20a; 19struct gk20a;
20 20
21void gm20b_init_pmu_ops(struct gk20a *g); 21int gm20b_load_falcon_ucode(struct gk20a *g, u32 falconidmask);
22int gm20b_pmu_setup_elpg(struct gk20a *g);
23void pmu_dump_security_fuses_gm20b(struct gk20a *g);
22void gm20b_pmu_load_lsf(struct gk20a *g, u32 falcon_id, u32 flags); 24void gm20b_pmu_load_lsf(struct gk20a *g, u32 falcon_id, u32 flags);
23int gm20b_pmu_init_acr(struct gk20a *g); 25int gm20b_pmu_init_acr(struct gk20a *g);
24void gm20b_write_dmatrfbase(struct gk20a *g, u32 addr); 26void gm20b_write_dmatrfbase(struct gk20a *g, u32 addr);
diff --git a/drivers/gpu/nvgpu/gp106/acr_gp106.c b/drivers/gpu/nvgpu/gp106/acr_gp106.c
index 929036a3..bd47f467 100644
--- a/drivers/gpu/nvgpu/gp106/acr_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/acr_gp106.c
@@ -56,9 +56,6 @@ typedef int (*get_ucode_details)(struct gk20a *g,
56/*Externs*/ 56/*Externs*/
57 57
58/*Forwards*/ 58/*Forwards*/
59static int gp106_bootstrap_hs_flcn(struct gk20a *g);
60
61static int gp106_prepare_ucode_blob(struct gk20a *g);
62 59
63/*Globals*/ 60/*Globals*/
64static get_ucode_details pmu_acr_supp_ucode_list[] = { 61static get_ucode_details pmu_acr_supp_ucode_list[] = {
@@ -67,7 +64,7 @@ static get_ucode_details pmu_acr_supp_ucode_list[] = {
67 gpccs_ucode_details, 64 gpccs_ucode_details,
68}; 65};
69 66
70static void gp106_wpr_info(struct gk20a *g, struct wpr_carveout_info *inf) 67void gp106_wpr_info(struct gk20a *g, struct wpr_carveout_info *inf)
71{ 68{
72 inf->nonwpr_base = g->mm.vidmem.bootstrap_base; 69 inf->nonwpr_base = g->mm.vidmem.bootstrap_base;
73 inf->wpr_base = inf->nonwpr_base + GP106_DGPU_WPR_OFFSET; 70 inf->wpr_base = inf->nonwpr_base + GP106_DGPU_WPR_OFFSET;
@@ -80,7 +77,7 @@ static void flcn64_set_dma(struct falc_u64 *dma_addr, u64 value)
80 dma_addr->hi |= u64_hi32(value); 77 dma_addr->hi |= u64_hi32(value);
81} 78}
82 79
83static int gp106_alloc_blob_space(struct gk20a *g, 80int gp106_alloc_blob_space(struct gk20a *g,
84 size_t size, struct nvgpu_mem *mem) 81 size_t size, struct nvgpu_mem *mem)
85{ 82{
86 struct wpr_carveout_info wpr_inf; 83 struct wpr_carveout_info wpr_inf;
@@ -105,20 +102,6 @@ static int gp106_alloc_blob_space(struct gk20a *g,
105 NVGPU_DMA_NO_KERNEL_MAPPING, wpr_inf.size, mem, 102 NVGPU_DMA_NO_KERNEL_MAPPING, wpr_inf.size, mem,
106 wpr_inf.nonwpr_base); 103 wpr_inf.nonwpr_base);
107} 104}
108
109void gp106_init_secure_pmu(struct gpu_ops *gops)
110{
111 gops->pmu.prepare_ucode = gp106_prepare_ucode_blob;
112 gops->pmu.pmu_setup_hw_and_bootstrap = gp106_bootstrap_hs_flcn;
113 gops->pmu.get_wpr = gp106_wpr_info;
114 gops->pmu.alloc_blob_space = gp106_alloc_blob_space;
115 gops->pmu.pmu_populate_loader_cfg = gp106_pmu_populate_loader_cfg;
116 gops->pmu.flcn_populate_bl_dmem_desc = gp106_flcn_populate_bl_dmem_desc;
117 gops->pmu.falcon_wait_for_halt = sec2_wait_for_halt;
118 gops->pmu.falcon_clear_halt_interrupt_status =
119 sec2_clear_halt_interrupt_status;
120 gops->pmu.init_falcon_setup_hw = init_sec2_setup_hw1;
121}
122/* TODO - check if any free blob res needed*/ 105/* TODO - check if any free blob res needed*/
123 106
124int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img_v1 *p_img) 107int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img_v1 *p_img)
@@ -373,7 +356,7 @@ rel_sig:
373 return err; 356 return err;
374} 357}
375 358
376static int gp106_prepare_ucode_blob(struct gk20a *g) 359int gp106_prepare_ucode_blob(struct gk20a *g)
377{ 360{
378 361
379 int err; 362 int err;
@@ -1040,7 +1023,7 @@ int lsf_gen_wpr_requirements(struct gk20a *g,
1040 1023
1041/*Loads ACR bin to FB mem and bootstraps PMU with bootloader code 1024/*Loads ACR bin to FB mem and bootstraps PMU with bootloader code
1042 * start and end are addresses of ucode blob in non-WPR region*/ 1025 * start and end are addresses of ucode blob in non-WPR region*/
1043static int gp106_bootstrap_hs_flcn(struct gk20a *g) 1026int gp106_bootstrap_hs_flcn(struct gk20a *g)
1044{ 1027{
1045 struct mm_gk20a *mm = &g->mm; 1028 struct mm_gk20a *mm = &g->mm;
1046 struct vm_gk20a *vm = mm->pmu.vm; 1029 struct vm_gk20a *vm = mm->pmu.vm;
diff --git a/drivers/gpu/nvgpu/gp106/acr_gp106.h b/drivers/gpu/nvgpu/gp106/acr_gp106.h
index fe8fbdb1..85448a81 100644
--- a/drivers/gpu/nvgpu/gp106/acr_gp106.h
+++ b/drivers/gpu/nvgpu/gp106/acr_gp106.h
@@ -19,7 +19,14 @@
19#define GP104_FECS_UCODE_SIG "gp104/fecs_sig.bin" 19#define GP104_FECS_UCODE_SIG "gp104/fecs_sig.bin"
20#define GP104_GPCCS_UCODE_SIG "gp104/gpccs_sig.bin" 20#define GP104_GPCCS_UCODE_SIG "gp104/gpccs_sig.bin"
21 21
22void gp106_init_secure_pmu(struct gpu_ops *gops); 22
23int gp106_bootstrap_hs_flcn(struct gk20a *g);
24int gp106_prepare_ucode_blob(struct gk20a *g);
25int gp106_alloc_blob_space(struct gk20a *g,
26 size_t size, struct nvgpu_mem *mem);
27
28void gp106_wpr_info(struct gk20a *g, struct wpr_carveout_info *inf);
29
23void lsfm_free_ucode_img_res(struct gk20a *g, 30void lsfm_free_ucode_img_res(struct gk20a *g,
24 struct flcn_ucode_img_v1 *p_img); 31 struct flcn_ucode_img_v1 *p_img);
25void lsfm_free_nonpmu_ucode_img_res(struct gk20a *g, 32void lsfm_free_nonpmu_ucode_img_res(struct gk20a *g,
diff --git a/drivers/gpu/nvgpu/gp106/hal_gp106.c b/drivers/gpu/nvgpu/gp106/hal_gp106.c
index 6a50be34..21d5fee3 100644
--- a/drivers/gpu/nvgpu/gp106/hal_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/hal_gp106.c
@@ -26,6 +26,7 @@
26#include "gk20a/regops_gk20a.h" 26#include "gk20a/regops_gk20a.h"
27#include "gk20a/mc_gk20a.h" 27#include "gk20a/mc_gk20a.h"
28#include "gk20a/fb_gk20a.h" 28#include "gk20a/fb_gk20a.h"
29#include "gk20a/pmu_gk20a.h"
29 30
30#include "gp10b/ltc_gp10b.h" 31#include "gp10b/ltc_gp10b.h"
31#include "gp10b/gr_gp10b.h" 32#include "gp10b/gr_gp10b.h"
@@ -38,6 +39,7 @@
38#include "gp10b/priv_ring_gp10b.h" 39#include "gp10b/priv_ring_gp10b.h"
39#include "gp10b/fifo_gp10b.h" 40#include "gp10b/fifo_gp10b.h"
40#include "gp10b/fb_gp10b.h" 41#include "gp10b/fb_gp10b.h"
42#include "gp10b/pmu_gp10b.h"
41 43
42#include "gp106/fifo_gp106.h" 44#include "gp106/fifo_gp106.h"
43#include "gp106/regops_gp106.h" 45#include "gp106/regops_gp106.h"
@@ -48,7 +50,10 @@
48#include "gm20b/mm_gm20b.h" 50#include "gm20b/mm_gm20b.h"
49#include "gm20b/pmu_gm20b.h" 51#include "gm20b/pmu_gm20b.h"
50#include "gm20b/fb_gm20b.h" 52#include "gm20b/fb_gm20b.h"
53#include "gm20b/acr_gm20b.h"
51 54
55#include "gp106/acr_gp106.h"
56#include "gp106/sec2_gp106.h"
52#include "gp106/clk_gp106.h" 57#include "gp106/clk_gp106.h"
53#include "gp106/clk_arb_gp106.h" 58#include "gp106/clk_arb_gp106.h"
54#include "gp106/mclk_gp106.h" 59#include "gp106/mclk_gp106.h"
@@ -77,6 +82,7 @@
77#include <nvgpu/hw/gp106/hw_ram_gp106.h> 82#include <nvgpu/hw/gp106/hw_ram_gp106.h>
78#include <nvgpu/hw/gp106/hw_top_gp106.h> 83#include <nvgpu/hw/gp106/hw_top_gp106.h>
79#include <nvgpu/hw/gp106/hw_pram_gp106.h> 84#include <nvgpu/hw/gp106/hw_pram_gp106.h>
85#include <nvgpu/hw/gp106/hw_pwr_gp106.h>
80 86
81 87
82static int gp106_get_litter_value(struct gk20a *g, int value) 88static int gp106_get_litter_value(struct gk20a *g, int value)
@@ -398,6 +404,45 @@ static const struct gpu_ops gp106_ops = {
398 .get_internal_sensor_limits = gp106_get_internal_sensor_limits, 404 .get_internal_sensor_limits = gp106_get_internal_sensor_limits,
399 .configure_therm_alert = gp106_configure_therm_alert, 405 .configure_therm_alert = gp106_configure_therm_alert,
400 }, 406 },
407 .pmu = {
408 .init_wpr_region = gm20b_pmu_init_acr,
409 .load_lsfalcon_ucode = gp106_load_falcon_ucode,
410 .is_lazy_bootstrap = gp106_is_lazy_bootstrap,
411 .is_priv_load = gp106_is_priv_load,
412 .prepare_ucode = gp106_prepare_ucode_blob,
413 .pmu_setup_hw_and_bootstrap = gp106_bootstrap_hs_flcn,
414 .get_wpr = gp106_wpr_info,
415 .alloc_blob_space = gp106_alloc_blob_space,
416 .pmu_populate_loader_cfg = gp106_pmu_populate_loader_cfg,
417 .flcn_populate_bl_dmem_desc = gp106_flcn_populate_bl_dmem_desc,
418 .falcon_wait_for_halt = sec2_wait_for_halt,
419 .falcon_clear_halt_interrupt_status =
420 sec2_clear_halt_interrupt_status,
421 .init_falcon_setup_hw = init_sec2_setup_hw1,
422 .pmu_queue_tail = gk20a_pmu_queue_tail,
423 .pmu_get_queue_head = pwr_pmu_queue_head_r,
424 .pmu_mutex_release = gk20a_pmu_mutex_release,
425 .is_pmu_supported = gp106_is_pmu_supported,
426 .pmu_pg_supported_engines_list = gp106_pmu_pg_engines_list,
427 .pmu_elpg_statistics = gp106_pmu_elpg_statistics,
428 .pmu_mutex_acquire = gk20a_pmu_mutex_acquire,
429 .pmu_is_lpwr_feature_supported =
430 gp106_pmu_is_lpwr_feature_supported,
431 .pmu_msgq_tail = gk20a_pmu_msgq_tail,
432 .pmu_pg_engines_feature_list = gp106_pmu_pg_feature_list,
433 .pmu_get_queue_head_size = pwr_pmu_queue_head__size_1_v,
434 .pmu_queue_head = gk20a_pmu_queue_head,
435 .pmu_pg_param_post_init = nvgpu_lpwr_post_init,
436 .pmu_get_queue_tail_size = pwr_pmu_queue_tail__size_1_v,
437 .pmu_pg_init_param = gp106_pg_param_init,
438 .reset_engine = gp106_pmu_engine_reset,
439 .pmu_lpwr_disable_pg = nvgpu_lpwr_disable_pg,
440 .write_dmatrfbase = gp10b_write_dmatrfbase,
441 .pmu_mutex_size = pwr_pmu_mutex__size_1_v,
442 .is_engine_in_reset = gp106_pmu_is_engine_in_reset,
443 .pmu_get_queue_tail = pwr_pmu_queue_tail_r,
444 .pmu_lpwr_enable_pg = nvgpu_lpwr_enable_pg,
445 },
401 .clk = { 446 .clk = {
402 .init_clk_support = gp106_init_clk_support, 447 .init_clk_support = gp106_init_clk_support,
403 .get_crystal_clk_hz = gp106_crystal_clk_hz, 448 .get_crystal_clk_hz = gp106_crystal_clk_hz,
@@ -532,6 +577,7 @@ int gp106_init_hal(struct gk20a *g)
532 gops->mm = gp106_ops.mm; 577 gops->mm = gp106_ops.mm;
533 gops->pramin = gp106_ops.pramin; 578 gops->pramin = gp106_ops.pramin;
534 gops->therm = gp106_ops.therm; 579 gops->therm = gp106_ops.therm;
580 gops->pmu = gp106_ops.pmu;
535 /* 581 /*
536 * clk must be assigned member by member 582 * clk must be assigned member by member
537 * since some clk ops are assigned during probe prior to HAL init 583 * since some clk ops are assigned during probe prior to HAL init
@@ -568,10 +614,11 @@ int gp106_init_hal(struct gk20a *g)
568 __nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, true); 614 __nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, true);
569 __nvgpu_set_enabled(g, NVGPU_SEC_SECUREGPCCS, true); 615 __nvgpu_set_enabled(g, NVGPU_SEC_SECUREGPCCS, true);
570 __nvgpu_set_enabled(g, NVGPU_PMU_PSTATE, true); 616 __nvgpu_set_enabled(g, NVGPU_PMU_PSTATE, true);
617 __nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false);
571 618
619 g->pmu_lsf_pmu_wpr_init_done = 0;
572 g->bootstrap_owner = LSF_FALCON_ID_SEC2; 620 g->bootstrap_owner = LSF_FALCON_ID_SEC2;
573 gp106_init_gr(g); 621 gp106_init_gr(g);
574 gp106_init_pmu_ops(g);
575 622
576 gp10b_init_uncompressed_kind_map(); 623 gp10b_init_uncompressed_kind_map();
577 gp10b_init_kind_attr(); 624 gp10b_init_kind_attr();
diff --git a/drivers/gpu/nvgpu/gp106/pmu_gp106.c b/drivers/gpu/nvgpu/gp106/pmu_gp106.c
index 3b75b488..998993c9 100644
--- a/drivers/gpu/nvgpu/gp106/pmu_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/pmu_gp106.c
@@ -32,7 +32,7 @@
32#include <nvgpu/hw/gp106/hw_mc_gp106.h> 32#include <nvgpu/hw/gp106/hw_mc_gp106.h>
33#include <nvgpu/hw/gp106/hw_pwr_gp106.h> 33#include <nvgpu/hw/gp106/hw_pwr_gp106.h>
34 34
35static bool gp106_is_pmu_supported(struct gk20a *g) 35bool gp106_is_pmu_supported(struct gk20a *g)
36{ 36{
37 return true; 37 return true;
38} 38}
@@ -69,7 +69,7 @@ int gp106_pmu_engine_reset(struct gk20a *g, bool do_reset)
69 return 0; 69 return 0;
70} 70}
71 71
72static u32 gp106_pmu_pg_feature_list(struct gk20a *g, u32 pg_engine_id) 72u32 gp106_pmu_pg_feature_list(struct gk20a *g, u32 pg_engine_id)
73{ 73{
74 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) 74 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS)
75 return PMU_PG_FEATURE_GR_RPPG_ENABLED; 75 return PMU_PG_FEATURE_GR_RPPG_ENABLED;
@@ -80,7 +80,7 @@ static u32 gp106_pmu_pg_feature_list(struct gk20a *g, u32 pg_engine_id)
80 return 0; 80 return 0;
81} 81}
82 82
83static u32 gp106_pmu_pg_engines_list(struct gk20a *g) 83u32 gp106_pmu_pg_engines_list(struct gk20a *g)
84{ 84{
85 return BIT(PMU_PG_ELPG_ENGINE_ID_GRAPHICS) | 85 return BIT(PMU_PG_ELPG_ENGINE_ID_GRAPHICS) |
86 BIT(PMU_PG_ELPG_ENGINE_ID_MS); 86 BIT(PMU_PG_ELPG_ENGINE_ID_MS);
@@ -100,7 +100,7 @@ static void pmu_handle_param_msg(struct gk20a *g, struct pmu_msg *msg,
100 msg->msg.pg.msg_type); 100 msg->msg.pg.msg_type);
101} 101}
102 102
103static int gp106_pg_param_init(struct gk20a *g, u32 pg_engine_id) 103int gp106_pg_param_init(struct gk20a *g, u32 pg_engine_id)
104{ 104{
105 struct nvgpu_pmu *pmu = &g->pmu; 105 struct nvgpu_pmu *pmu = &g->pmu;
106 struct pmu_cmd cmd; 106 struct pmu_cmd cmd;
@@ -168,7 +168,7 @@ void gp106_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id,
168 pg_stat_data->avg_exit_latency_us = stats.exit_latency_avg_us; 168 pg_stat_data->avg_exit_latency_us = stats.exit_latency_avg_us;
169} 169}
170 170
171static bool gp106_pmu_is_lpwr_feature_supported(struct gk20a *g, u32 feature_id) 171bool gp106_pmu_is_lpwr_feature_supported(struct gk20a *g, u32 feature_id)
172{ 172{
173 bool is_feature_supported = false; 173 bool is_feature_supported = false;
174 174
@@ -188,7 +188,7 @@ static bool gp106_pmu_is_lpwr_feature_supported(struct gk20a *g, u32 feature_id)
188 return is_feature_supported; 188 return is_feature_supported;
189} 189}
190 190
191static bool gp106_is_lazy_bootstrap(u32 falcon_id) 191bool gp106_is_lazy_bootstrap(u32 falcon_id)
192{ 192{
193 bool enable_status = false; 193 bool enable_status = false;
194 194
@@ -206,7 +206,7 @@ static bool gp106_is_lazy_bootstrap(u32 falcon_id)
206 return enable_status; 206 return enable_status;
207} 207}
208 208
209static bool gp106_is_priv_load(u32 falcon_id) 209bool gp106_is_priv_load(u32 falcon_id)
210{ 210{
211 bool enable_status = false; 211 bool enable_status = false;
212 212
@@ -258,7 +258,7 @@ static void gp106_pmu_load_multiple_falcons(struct gk20a *g, u32 falconidmask,
258 gk20a_dbg_fn("done"); 258 gk20a_dbg_fn("done");
259} 259}
260 260
261static int gp106_load_falcon_ucode(struct gk20a *g, u32 falconidmask) 261int gp106_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
262{ 262{
263 u32 flags = PMU_ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES; 263 u32 flags = PMU_ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES;
264 264
@@ -289,51 +289,3 @@ static int gp106_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
289 return -ETIMEDOUT; 289 return -ETIMEDOUT;
290 return 0; 290 return 0;
291} 291}
292
293void gp106_init_pmu_ops(struct gk20a *g)
294{
295 struct gpu_ops *gops = &g->ops;
296 gk20a_dbg_fn("");
297 if (nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) {
298 gp106_init_secure_pmu(gops);
299 gops->pmu.init_wpr_region = gm20b_pmu_init_acr;
300 gops->pmu.load_lsfalcon_ucode = gp106_load_falcon_ucode;
301 gops->pmu.is_lazy_bootstrap = gp106_is_lazy_bootstrap;
302 gops->pmu.is_priv_load = gp106_is_priv_load;
303 } else {
304 gk20a_init_pmu_ops(gops);
305 gops->pmu.pmu_setup_hw_and_bootstrap =
306 gm20b_init_nspmu_setup_hw1;
307 gops->pmu.load_lsfalcon_ucode = NULL;
308 gops->pmu.init_wpr_region = NULL;
309 }
310 gops->pmu.pmu_setup_elpg = NULL;
311 gops->pmu.pmu_get_queue_head = pwr_pmu_queue_head_r;
312 gops->pmu.pmu_get_queue_head_size = pwr_pmu_queue_head__size_1_v;
313 gops->pmu.pmu_get_queue_tail = pwr_pmu_queue_tail_r;
314 gops->pmu.pmu_get_queue_tail_size = pwr_pmu_queue_tail__size_1_v;
315 gops->pmu.pmu_queue_head = gk20a_pmu_queue_head;
316 gops->pmu.pmu_queue_tail = gk20a_pmu_queue_tail;
317 gops->pmu.pmu_msgq_tail = gk20a_pmu_msgq_tail;
318 gops->pmu.pmu_mutex_size = pwr_pmu_mutex__size_1_v;
319 gops->pmu.pmu_mutex_acquire = gk20a_pmu_mutex_acquire;
320 gops->pmu.pmu_mutex_release = gk20a_pmu_mutex_release;
321 g->pmu_lsf_pmu_wpr_init_done = 0;
322 __nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false);
323 gops->pmu.write_dmatrfbase = gp10b_write_dmatrfbase;
324 gops->pmu.pmu_elpg_statistics = gp106_pmu_elpg_statistics;
325 gops->pmu.pmu_pg_init_param = gp106_pg_param_init;
326 gops->pmu.pmu_pg_supported_engines_list = gp106_pmu_pg_engines_list;
327 gops->pmu.pmu_pg_engines_feature_list = gp106_pmu_pg_feature_list;
328 gops->pmu.pmu_is_lpwr_feature_supported =
329 gp106_pmu_is_lpwr_feature_supported;
330 gops->pmu.pmu_lpwr_enable_pg = nvgpu_lpwr_enable_pg;
331 gops->pmu.pmu_lpwr_disable_pg = nvgpu_lpwr_disable_pg;
332 gops->pmu.pmu_pg_param_post_init = nvgpu_lpwr_post_init;
333 gops->pmu.dump_secure_fuses = NULL;
334 gops->pmu.is_pmu_supported = gp106_is_pmu_supported;
335 gops->pmu.reset_engine = gp106_pmu_engine_reset;
336 gops->pmu.is_engine_in_reset = gp106_pmu_is_engine_in_reset;
337
338 gk20a_dbg_fn("done");
339}
diff --git a/drivers/gpu/nvgpu/gp106/pmu_gp106.h b/drivers/gpu/nvgpu/gp106/pmu_gp106.h
index 68a00bb5..1b59b2c4 100644
--- a/drivers/gpu/nvgpu/gp106/pmu_gp106.h
+++ b/drivers/gpu/nvgpu/gp106/pmu_gp106.h
@@ -19,7 +19,15 @@
19 19
20struct gk20a; 20struct gk20a;
21 21
22void gp106_init_pmu_ops(struct gk20a *g); 22bool gp106_is_pmu_supported(struct gk20a *g);
23u32 gp106_pmu_pg_feature_list(struct gk20a *g, u32 pg_engine_id);
24u32 gp106_pmu_pg_engines_list(struct gk20a *g);
25int gp106_pg_param_init(struct gk20a *g, u32 pg_engine_id);
26bool gp106_pmu_is_lpwr_feature_supported(struct gk20a *g, u32 feature_id);
27bool gp106_is_lazy_bootstrap(u32 falcon_id);
28bool gp106_is_priv_load(u32 falcon_id);
29int gp106_load_falcon_ucode(struct gk20a *g, u32 falconidmask);
30
23void gp106_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id, 31void gp106_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id,
24 struct pmu_pg_stats_data *pg_stat_data); 32 struct pmu_pg_stats_data *pg_stat_data);
25bool gp106_pmu_is_engine_in_reset(struct gk20a *g); 33bool gp106_pmu_is_engine_in_reset(struct gk20a *g);
diff --git a/drivers/gpu/nvgpu/gp10b/hal_gp10b.c b/drivers/gpu/nvgpu/gp10b/hal_gp10b.c
index a37295bb..40ef35d5 100644
--- a/drivers/gpu/nvgpu/gp10b/hal_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/hal_gp10b.c
@@ -26,6 +26,7 @@
26#include "gk20a/regops_gk20a.h" 26#include "gk20a/regops_gk20a.h"
27#include "gk20a/mc_gk20a.h" 27#include "gk20a/mc_gk20a.h"
28#include "gk20a/fb_gk20a.h" 28#include "gk20a/fb_gk20a.h"
29#include "gk20a/pmu_gk20a.h"
29 30
30#include "gp10b/gr_gp10b.h" 31#include "gp10b/gr_gp10b.h"
31#include "gp10b/fecs_trace_gp10b.h" 32#include "gp10b/fecs_trace_gp10b.h"
@@ -46,6 +47,7 @@
46#include "gm20b/ltc_gm20b.h" 47#include "gm20b/ltc_gm20b.h"
47#include "gm20b/gr_gm20b.h" 48#include "gm20b/gr_gm20b.h"
48#include "gm20b/fifo_gm20b.h" 49#include "gm20b/fifo_gm20b.h"
50#include "gm20b/acr_gm20b.h"
49#include "gm20b/pmu_gm20b.h" 51#include "gm20b/pmu_gm20b.h"
50#include "gm20b/clk_gm20b.h" 52#include "gm20b/clk_gm20b.h"
51#include "gm20b/fb_gm20b.h" 53#include "gm20b/fb_gm20b.h"
@@ -65,6 +67,7 @@
65#include <nvgpu/hw/gp10b/hw_ram_gp10b.h> 67#include <nvgpu/hw/gp10b/hw_ram_gp10b.h>
66#include <nvgpu/hw/gp10b/hw_top_gp10b.h> 68#include <nvgpu/hw/gp10b/hw_top_gp10b.h>
67#include <nvgpu/hw/gp10b/hw_pram_gp10b.h> 69#include <nvgpu/hw/gp10b/hw_pram_gp10b.h>
70#include <nvgpu/hw/gp10b/hw_pwr_gp10b.h>
68 71
69static int gp10b_get_litter_value(struct gk20a *g, int value) 72static int gp10b_get_litter_value(struct gk20a *g, int value)
70{ 73{
@@ -353,6 +356,27 @@ static const struct gpu_ops gp10b_ops = {
353 .init_therm_setup_hw = gp10b_init_therm_setup_hw, 356 .init_therm_setup_hw = gp10b_init_therm_setup_hw,
354 .elcg_init_idle_filters = gp10b_elcg_init_idle_filters, 357 .elcg_init_idle_filters = gp10b_elcg_init_idle_filters,
355 }, 358 },
359 .pmu = {
360 .pmu_setup_elpg = gp10b_pmu_setup_elpg,
361 .pmu_get_queue_head = pwr_pmu_queue_head_r,
362 .pmu_get_queue_head_size = pwr_pmu_queue_head__size_1_v,
363 .pmu_get_queue_tail = pwr_pmu_queue_tail_r,
364 .pmu_get_queue_tail_size = pwr_pmu_queue_tail__size_1_v,
365 .pmu_queue_head = gk20a_pmu_queue_head,
366 .pmu_queue_tail = gk20a_pmu_queue_tail,
367 .pmu_msgq_tail = gk20a_pmu_msgq_tail,
368 .pmu_mutex_size = pwr_pmu_mutex__size_1_v,
369 .pmu_mutex_acquire = gk20a_pmu_mutex_acquire,
370 .pmu_mutex_release = gk20a_pmu_mutex_release,
371 .write_dmatrfbase = gp10b_write_dmatrfbase,
372 .pmu_elpg_statistics = gp10b_pmu_elpg_statistics,
373 .pmu_pg_init_param = gp10b_pg_gr_init,
374 .pmu_pg_supported_engines_list = gk20a_pmu_pg_engines_list,
375 .pmu_pg_engines_feature_list = gk20a_pmu_pg_feature_list,
376 .dump_secure_fuses = pmu_dump_security_fuses_gp10b,
377 .reset_engine = gk20a_pmu_engine_reset,
378 .is_engine_in_reset = gk20a_pmu_is_engine_in_reset,
379 },
356 .regops = { 380 .regops = {
357 .get_global_whitelist_ranges = 381 .get_global_whitelist_ranges =
358 gp10b_get_global_whitelist_ranges, 382 gp10b_get_global_whitelist_ranges,
@@ -455,6 +479,7 @@ int gp10b_init_hal(struct gk20a *g)
455 gops->mm = gp10b_ops.mm; 479 gops->mm = gp10b_ops.mm;
456 gops->pramin = gp10b_ops.pramin; 480 gops->pramin = gp10b_ops.pramin;
457 gops->therm = gp10b_ops.therm; 481 gops->therm = gp10b_ops.therm;
482 gops->pmu = gp10b_ops.pmu;
458 gops->regops = gp10b_ops.regops; 483 gops->regops = gp10b_ops.regops;
459 gops->mc = gp10b_ops.mc; 484 gops->mc = gp10b_ops.mc;
460 gops->debug = gp10b_ops.debug; 485 gops->debug = gp10b_ops.debug;
@@ -513,9 +538,45 @@ int gp10b_init_hal(struct gk20a *g)
513 } 538 }
514#endif 539#endif
515 540
541 /* priv security dependent ops */
542 if (nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) {
543 /* Add in ops from gm20b acr */
544 gops->pmu.is_pmu_supported = gm20b_is_pmu_supported,
545 gops->pmu.prepare_ucode = prepare_ucode_blob,
546 gops->pmu.pmu_setup_hw_and_bootstrap = gm20b_bootstrap_hs_flcn,
547 gops->pmu.is_lazy_bootstrap = gm20b_is_lazy_bootstrap,
548 gops->pmu.is_priv_load = gm20b_is_priv_load,
549 gops->pmu.get_wpr = gm20b_wpr_info,
550 gops->pmu.alloc_blob_space = gm20b_alloc_blob_space,
551 gops->pmu.pmu_populate_loader_cfg =
552 gm20b_pmu_populate_loader_cfg,
553 gops->pmu.flcn_populate_bl_dmem_desc =
554 gm20b_flcn_populate_bl_dmem_desc,
555 gops->pmu.falcon_wait_for_halt = pmu_wait_for_halt,
556 gops->pmu.falcon_clear_halt_interrupt_status =
557 clear_halt_interrupt_status,
558 gops->pmu.init_falcon_setup_hw = gm20b_init_pmu_setup_hw1,
559
560 gops->pmu.init_wpr_region = gm20b_pmu_init_acr;
561 gops->pmu.load_lsfalcon_ucode = gp10b_load_falcon_ucode;
562 gops->pmu.is_lazy_bootstrap = gp10b_is_lazy_bootstrap;
563 gops->pmu.is_priv_load = gp10b_is_priv_load;
564 } else {
565 /* Inherit from gk20a */
566 gops->pmu.is_pmu_supported = gk20a_is_pmu_supported,
567 gops->pmu.prepare_ucode = nvgpu_pmu_prepare_ns_ucode_blob,
568 gops->pmu.pmu_setup_hw_and_bootstrap = gk20a_init_pmu_setup_hw1,
569 gops->pmu.pmu_nsbootstrap = pmu_bootstrap,
570
571 gops->pmu.load_lsfalcon_ucode = NULL;
572 gops->pmu.init_wpr_region = NULL;
573 gops->pmu.pmu_setup_hw_and_bootstrap = gp10b_init_pmu_setup_hw1;
574 }
575
576 __nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false);
577 g->pmu_lsf_pmu_wpr_init_done = 0;
516 g->bootstrap_owner = LSF_BOOTSTRAP_OWNER_DEFAULT; 578 g->bootstrap_owner = LSF_BOOTSTRAP_OWNER_DEFAULT;
517 gp10b_init_gr(g); 579 gp10b_init_gr(g);
518 gp10b_init_pmu_ops(g);
519 580
520 gp10b_init_uncompressed_kind_map(); 581 gp10b_init_uncompressed_kind_map();
521 gp10b_init_kind_attr(); 582 gp10b_init_kind_attr();
diff --git a/drivers/gpu/nvgpu/gp10b/pmu_gp10b.c b/drivers/gpu/nvgpu/gp10b/pmu_gp10b.c
index f45490db..81568122 100644
--- a/drivers/gpu/nvgpu/gp10b/pmu_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/pmu_gp10b.c
@@ -252,7 +252,7 @@ int gp10b_pg_gr_init(struct gk20a *g, u32 pg_engine_id)
252 return 0; 252 return 0;
253} 253}
254 254
255static void gp10b_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id, 255void gp10b_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id,
256 struct pmu_pg_stats_data *pg_stat_data) 256 struct pmu_pg_stats_data *pg_stat_data)
257{ 257{
258 struct nvgpu_pmu *pmu = &g->pmu; 258 struct nvgpu_pmu *pmu = &g->pmu;
@@ -269,7 +269,7 @@ static void gp10b_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id,
269 pg_stat_data->avg_exit_latency_us = stats.exitlatency_avgus; 269 pg_stat_data->avg_exit_latency_us = stats.exitlatency_avgus;
270} 270}
271 271
272static int gp10b_pmu_setup_elpg(struct gk20a *g) 272int gp10b_pmu_setup_elpg(struct gk20a *g)
273{ 273{
274 int ret = 0; 274 int ret = 0;
275 u32 reg_writes; 275 u32 reg_writes;
@@ -299,7 +299,7 @@ void gp10b_write_dmatrfbase(struct gk20a *g, u32 addr)
299 0x0); 299 0x0);
300} 300}
301 301
302static int gp10b_init_pmu_setup_hw1(struct gk20a *g) 302int gp10b_init_pmu_setup_hw1(struct gk20a *g)
303{ 303{
304 struct nvgpu_pmu *pmu = &g->pmu; 304 struct nvgpu_pmu *pmu = &g->pmu;
305 int err; 305 int err;
@@ -337,7 +337,7 @@ static int gp10b_init_pmu_setup_hw1(struct gk20a *g)
337 337
338} 338}
339 339
340static bool gp10b_is_lazy_bootstrap(u32 falcon_id) 340bool gp10b_is_lazy_bootstrap(u32 falcon_id)
341{ 341{
342 bool enable_status = false; 342 bool enable_status = false;
343 343
@@ -355,7 +355,7 @@ static bool gp10b_is_lazy_bootstrap(u32 falcon_id)
355 return enable_status; 355 return enable_status;
356} 356}
357 357
358static bool gp10b_is_priv_load(u32 falcon_id) 358bool gp10b_is_priv_load(u32 falcon_id)
359{ 359{
360 bool enable_status = false; 360 bool enable_status = false;
361 361
@@ -374,7 +374,7 @@ static bool gp10b_is_priv_load(u32 falcon_id)
374} 374}
375 375
376/*Dump Security related fuses*/ 376/*Dump Security related fuses*/
377static void pmu_dump_security_fuses_gp10b(struct gk20a *g) 377void pmu_dump_security_fuses_gp10b(struct gk20a *g)
378{ 378{
379 u32 val; 379 u32 val;
380 380
@@ -386,50 +386,7 @@ static void pmu_dump_security_fuses_gp10b(struct gk20a *g)
386 nvgpu_err(g, "FUSE_GCPLEX_CONFIG_FUSE_0: 0x%x", val); 386 nvgpu_err(g, "FUSE_GCPLEX_CONFIG_FUSE_0: 0x%x", val);
387} 387}
388 388
389static bool gp10b_is_pmu_supported(struct gk20a *g) 389bool gp10b_is_pmu_supported(struct gk20a *g)
390{ 390{
391 return true; 391 return true;
392} 392}
393
394void gp10b_init_pmu_ops(struct gk20a *g)
395{
396 struct gpu_ops *gops = &g->ops;
397 gops->pmu.is_pmu_supported = gp10b_is_pmu_supported;
398 if (nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) {
399 gm20b_init_secure_pmu(gops);
400 gops->pmu.init_wpr_region = gm20b_pmu_init_acr;
401 gops->pmu.load_lsfalcon_ucode = gp10b_load_falcon_ucode;
402 gops->pmu.is_lazy_bootstrap = gp10b_is_lazy_bootstrap;
403 gops->pmu.is_priv_load = gp10b_is_priv_load;
404 } else {
405 gk20a_init_pmu_ops(gops);
406 gops->pmu.load_lsfalcon_ucode = NULL;
407 gops->pmu.init_wpr_region = NULL;
408 gops->pmu.pmu_setup_hw_and_bootstrap = gp10b_init_pmu_setup_hw1;
409 }
410 gops->pmu.pmu_setup_elpg = gp10b_pmu_setup_elpg;
411 gops->pmu.pmu_get_queue_head = pwr_pmu_queue_head_r;
412 gops->pmu.pmu_get_queue_head_size = pwr_pmu_queue_head__size_1_v;
413 gops->pmu.pmu_get_queue_tail = pwr_pmu_queue_tail_r;
414 gops->pmu.pmu_get_queue_tail_size = pwr_pmu_queue_tail__size_1_v;
415 gops->pmu.pmu_queue_head = gk20a_pmu_queue_head;
416 gops->pmu.pmu_queue_tail = gk20a_pmu_queue_tail;
417 gops->pmu.pmu_msgq_tail = gk20a_pmu_msgq_tail;
418 gops->pmu.pmu_mutex_size = pwr_pmu_mutex__size_1_v;
419 gops->pmu.pmu_mutex_acquire = gk20a_pmu_mutex_acquire;
420 gops->pmu.pmu_mutex_release = gk20a_pmu_mutex_release;
421 g->pmu_lsf_pmu_wpr_init_done = false;
422 __nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false);
423 gops->pmu.write_dmatrfbase = gp10b_write_dmatrfbase;
424 gops->pmu.pmu_elpg_statistics = gp10b_pmu_elpg_statistics;
425 gops->pmu.pmu_pg_init_param = gp10b_pg_gr_init;
426 gops->pmu.pmu_pg_supported_engines_list = gk20a_pmu_pg_engines_list;
427 gops->pmu.pmu_pg_engines_feature_list = gk20a_pmu_pg_feature_list;
428 gops->pmu.pmu_is_lpwr_feature_supported = NULL;
429 gops->pmu.pmu_lpwr_enable_pg = NULL;
430 gops->pmu.pmu_lpwr_disable_pg = NULL;
431 gops->pmu.pmu_pg_param_post_init = NULL;
432 gops->pmu.dump_secure_fuses = pmu_dump_security_fuses_gp10b;
433 gops->pmu.reset_engine = gk20a_pmu_engine_reset;
434 gops->pmu.is_engine_in_reset = gk20a_pmu_is_engine_in_reset;
435}
diff --git a/drivers/gpu/nvgpu/gp10b/pmu_gp10b.h b/drivers/gpu/nvgpu/gp10b/pmu_gp10b.h
index 5ba7bb9b..071740f4 100644
--- a/drivers/gpu/nvgpu/gp10b/pmu_gp10b.h
+++ b/drivers/gpu/nvgpu/gp10b/pmu_gp10b.h
@@ -18,7 +18,15 @@
18 18
19struct gk20a; 19struct gk20a;
20 20
21void gp10b_init_pmu_ops(struct gk20a *g); 21
22bool gp10b_is_lazy_bootstrap(u32 falcon_id);
23bool gp10b_is_priv_load(u32 falcon_id);
24bool gp10b_is_pmu_supported(struct gk20a *g);
25int gp10b_init_pmu_setup_hw1(struct gk20a *g);
26void gp10b_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id,
27 struct pmu_pg_stats_data *pg_stat_data);
28int gp10b_pmu_setup_elpg(struct gk20a *g);
29void pmu_dump_security_fuses_gp10b(struct gk20a *g);
22int gp10b_load_falcon_ucode(struct gk20a *g, u32 falconidmask); 30int gp10b_load_falcon_ucode(struct gk20a *g, u32 falconidmask);
23int gp10b_pg_gr_init(struct gk20a *g, u32 pg_engine_id); 31int gp10b_pg_gr_init(struct gk20a *g, u32 pg_engine_id);
24void gp10b_write_dmatrfbase(struct gk20a *g, u32 addr); 32void gp10b_write_dmatrfbase(struct gk20a *g, u32 addr);