summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gm20b
diff options
context:
space:
mode:
authorSunny He <suhe@nvidia.com>2017-08-01 20:10:42 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-08-21 16:06:07 -0400
commit5f010177de985c901c33c914efe70a8498a5974f (patch)
tree1b1a2ac1ab71608a0754a7eb64222f5d198e793c /drivers/gpu/nvgpu/gm20b
parentb50b379c192714d0d08c3f2d33e90c95cf795253 (diff)
gpu: nvgpu: Reorg pmu HAL initialization
Reorganize HAL initialization to remove inheritance and construct the gpu_ops struct at compile time. This patch only covers the pmu sub-module of the gpu_ops struct. Perform HAL function assignments in hal_gxxxx.c through the population of a chip-specific copy of gpu_ops. Jira NVGPU-74 Change-Id: I8839ac99e87153637005e23b3013237f57275c54 Signed-off-by: Sunny He <suhe@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1530982 Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com> Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: Automatic_Commit_Validation_User GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gm20b')
-rw-r--r--drivers/gpu/nvgpu/gm20b/acr_gm20b.c54
-rw-r--r--drivers/gpu/nvgpu/gm20b/acr_gm20b.h15
-rw-r--r--drivers/gpu/nvgpu/gm20b/hal_gm20b.c67
-rw-r--r--drivers/gpu/nvgpu/gm20b/pmu_gm20b.c48
-rw-r--r--drivers/gpu/nvgpu/gm20b/pmu_gm20b.h4
5 files changed, 97 insertions, 91 deletions
diff --git a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
index 4fa1b313..2e904fdf 100644
--- a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
@@ -51,11 +51,6 @@ typedef int (*get_ucode_details)(struct gk20a *g, struct flcn_ucode_img *udata);
51static int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img); 51static int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img);
52static int fecs_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img); 52static int fecs_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img);
53static int gpccs_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img); 53static int gpccs_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img);
54static int gm20b_bootstrap_hs_flcn(struct gk20a *g);
55static int pmu_wait_for_halt(struct gk20a *g, unsigned int timeout);
56static int clear_halt_interrupt_status(struct gk20a *g, unsigned int timeout);
57static int gm20b_init_pmu_setup_hw1(struct gk20a *g,
58 void *desc, u32 bl_sz);
59static int lsfm_discover_ucode_images(struct gk20a *g, 54static int lsfm_discover_ucode_images(struct gk20a *g,
60 struct ls_flcn_mgr *plsfm); 55 struct ls_flcn_mgr *plsfm);
61static int lsfm_add_ucode_img(struct gk20a *g, struct ls_flcn_mgr *plsfm, 56static int lsfm_add_ucode_img(struct gk20a *g, struct ls_flcn_mgr *plsfm,
@@ -68,15 +63,6 @@ static int lsf_gen_wpr_requirements(struct gk20a *g, struct ls_flcn_mgr *plsfm);
68static void lsfm_init_wpr_contents(struct gk20a *g, struct ls_flcn_mgr *plsfm, 63static void lsfm_init_wpr_contents(struct gk20a *g, struct ls_flcn_mgr *plsfm,
69 struct nvgpu_mem *nonwpr); 64 struct nvgpu_mem *nonwpr);
70static void free_acr_resources(struct gk20a *g, struct ls_flcn_mgr *plsfm); 65static void free_acr_resources(struct gk20a *g, struct ls_flcn_mgr *plsfm);
71static int gm20b_pmu_populate_loader_cfg(struct gk20a *g,
72 void *lsfm, u32 *p_bl_gen_desc_size);
73static int gm20b_flcn_populate_bl_dmem_desc(struct gk20a *g,
74 void *lsfm, u32 *p_bl_gen_desc_size, u32 falconid);
75static int gm20b_alloc_blob_space(struct gk20a *g,
76 size_t size, struct nvgpu_mem *mem);
77static bool gm20b_is_priv_load(u32 falcon_id);
78static bool gm20b_is_lazy_bootstrap(u32 falcon_id);
79static void gm20b_wpr_info(struct gk20a *g, struct wpr_carveout_info *inf);
80 66
81/*Globals*/ 67/*Globals*/
82static get_ucode_details pmu_acr_supp_ucode_list[] = { 68static get_ucode_details pmu_acr_supp_ucode_list[] = {
@@ -97,7 +83,7 @@ static void start_gm20b_pmu(struct gk20a *g)
97 pwr_falcon_cpuctl_startcpu_f(1)); 83 pwr_falcon_cpuctl_startcpu_f(1));
98} 84}
99 85
100static void gm20b_wpr_info(struct gk20a *g, struct wpr_carveout_info *inf) 86void gm20b_wpr_info(struct gk20a *g, struct wpr_carveout_info *inf)
101{ 87{
102 struct mc_carveout_info mem_inf; 88 struct mc_carveout_info mem_inf;
103 89
@@ -108,29 +94,11 @@ static void gm20b_wpr_info(struct gk20a *g, struct wpr_carveout_info *inf)
108 inf->size = mem_inf.size; 94 inf->size = mem_inf.size;
109} 95}
110 96
111static bool gm20b_is_pmu_supported(struct gk20a *g) 97bool gm20b_is_pmu_supported(struct gk20a *g)
112{ 98{
113 return true; 99 return true;
114} 100}
115 101
116void gm20b_init_secure_pmu(struct gpu_ops *gops)
117{
118 gops->pmu.is_pmu_supported = gm20b_is_pmu_supported;
119 gops->pmu.prepare_ucode = prepare_ucode_blob;
120 gops->pmu.pmu_setup_hw_and_bootstrap = gm20b_bootstrap_hs_flcn;
121 gops->pmu.is_lazy_bootstrap = gm20b_is_lazy_bootstrap;
122 gops->pmu.is_priv_load = gm20b_is_priv_load;
123 gops->pmu.get_wpr = gm20b_wpr_info;
124 gops->pmu.alloc_blob_space = gm20b_alloc_blob_space;
125 gops->pmu.pmu_populate_loader_cfg = gm20b_pmu_populate_loader_cfg;
126 gops->pmu.flcn_populate_bl_dmem_desc = gm20b_flcn_populate_bl_dmem_desc;
127 gops->pmu.falcon_wait_for_halt = pmu_wait_for_halt;
128 gops->pmu.falcon_clear_halt_interrupt_status =
129 clear_halt_interrupt_status;
130 gops->pmu.init_falcon_setup_hw = gm20b_init_pmu_setup_hw1;
131}
132/* TODO - check if any free blob res needed*/
133
134static int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img) 102static int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img)
135{ 103{
136 struct nvgpu_firmware *pmu_fw, *pmu_desc, *pmu_sig; 104 struct nvgpu_firmware *pmu_fw, *pmu_desc, *pmu_sig;
@@ -334,7 +302,7 @@ rel_sig:
334 return err; 302 return err;
335} 303}
336 304
337static bool gm20b_is_lazy_bootstrap(u32 falcon_id) 305bool gm20b_is_lazy_bootstrap(u32 falcon_id)
338{ 306{
339 bool enable_status = false; 307 bool enable_status = false;
340 308
@@ -352,7 +320,7 @@ static bool gm20b_is_lazy_bootstrap(u32 falcon_id)
352 return enable_status; 320 return enable_status;
353} 321}
354 322
355static bool gm20b_is_priv_load(u32 falcon_id) 323bool gm20b_is_priv_load(u32 falcon_id)
356{ 324{
357 bool enable_status = false; 325 bool enable_status = false;
358 326
@@ -370,7 +338,7 @@ static bool gm20b_is_priv_load(u32 falcon_id)
370 return enable_status; 338 return enable_status;
371} 339}
372 340
373static int gm20b_alloc_blob_space(struct gk20a *g, 341int gm20b_alloc_blob_space(struct gk20a *g,
374 size_t size, struct nvgpu_mem *mem) 342 size_t size, struct nvgpu_mem *mem)
375{ 343{
376 int err; 344 int err;
@@ -554,7 +522,7 @@ static int lsfm_discover_ucode_images(struct gk20a *g,
554} 522}
555 523
556 524
557static int gm20b_pmu_populate_loader_cfg(struct gk20a *g, 525int gm20b_pmu_populate_loader_cfg(struct gk20a *g,
558 void *lsfm, u32 *p_bl_gen_desc_size) 526 void *lsfm, u32 *p_bl_gen_desc_size)
559{ 527{
560 struct wpr_carveout_info wpr_inf; 528 struct wpr_carveout_info wpr_inf;
@@ -626,7 +594,7 @@ static int gm20b_pmu_populate_loader_cfg(struct gk20a *g,
626 return 0; 594 return 0;
627} 595}
628 596
629static int gm20b_flcn_populate_bl_dmem_desc(struct gk20a *g, 597int gm20b_flcn_populate_bl_dmem_desc(struct gk20a *g,
630 void *lsfm, u32 *p_bl_gen_desc_size, u32 falconid) 598 void *lsfm, u32 *p_bl_gen_desc_size, u32 falconid)
631{ 599{
632 struct wpr_carveout_info wpr_inf; 600 struct wpr_carveout_info wpr_inf;
@@ -1066,7 +1034,7 @@ static int lsf_gen_wpr_requirements(struct gk20a *g, struct ls_flcn_mgr *plsfm)
1066 1034
1067/*Loads ACR bin to FB mem and bootstraps PMU with bootloader code 1035/*Loads ACR bin to FB mem and bootstraps PMU with bootloader code
1068 * start and end are addresses of ucode blob in non-WPR region*/ 1036 * start and end are addresses of ucode blob in non-WPR region*/
1069static int gm20b_bootstrap_hs_flcn(struct gk20a *g) 1037int gm20b_bootstrap_hs_flcn(struct gk20a *g)
1070{ 1038{
1071 struct mm_gk20a *mm = &g->mm; 1039 struct mm_gk20a *mm = &g->mm;
1072 struct vm_gk20a *vm = mm->pmu.vm; 1040 struct vm_gk20a *vm = mm->pmu.vm;
@@ -1291,7 +1259,7 @@ int gm20b_init_nspmu_setup_hw1(struct gk20a *g)
1291 return err; 1259 return err;
1292} 1260}
1293 1261
1294static int gm20b_init_pmu_setup_hw1(struct gk20a *g, 1262int gm20b_init_pmu_setup_hw1(struct gk20a *g,
1295 void *desc, u32 bl_sz) 1263 void *desc, u32 bl_sz)
1296{ 1264{
1297 1265
@@ -1461,7 +1429,7 @@ err_done:
1461* @param[in] timeout_ms Timeout in msec for PMU to halt 1429* @param[in] timeout_ms Timeout in msec for PMU to halt
1462* @return '0' if PMU halts 1430* @return '0' if PMU halts
1463*/ 1431*/
1464static int pmu_wait_for_halt(struct gk20a *g, unsigned int timeout_ms) 1432int pmu_wait_for_halt(struct gk20a *g, unsigned int timeout_ms)
1465{ 1433{
1466 struct nvgpu_pmu *pmu = &g->pmu; 1434 struct nvgpu_pmu *pmu = &g->pmu;
1467 u32 data = 0; 1435 u32 data = 0;
@@ -1490,7 +1458,7 @@ static int pmu_wait_for_halt(struct gk20a *g, unsigned int timeout_ms)
1490* @param[in] timeout_ms Timeout in msec for halt to clear 1458* @param[in] timeout_ms Timeout in msec for halt to clear
1491* @return '0' if PMU halt irq status is clear 1459* @return '0' if PMU halt irq status is clear
1492*/ 1460*/
1493static int clear_halt_interrupt_status(struct gk20a *g, unsigned int timeout_ms) 1461int clear_halt_interrupt_status(struct gk20a *g, unsigned int timeout_ms)
1494{ 1462{
1495 struct nvgpu_pmu *pmu = &g->pmu; 1463 struct nvgpu_pmu *pmu = &g->pmu;
1496 int status = 0; 1464 int status = 0;
diff --git a/drivers/gpu/nvgpu/gm20b/acr_gm20b.h b/drivers/gpu/nvgpu/gm20b/acr_gm20b.h
index 84478611..6568d62f 100644
--- a/drivers/gpu/nvgpu/gm20b/acr_gm20b.h
+++ b/drivers/gpu/nvgpu/gm20b/acr_gm20b.h
@@ -24,8 +24,21 @@
24#define GM20B_FECS_UCODE_SIG "fecs_sig.bin" 24#define GM20B_FECS_UCODE_SIG "fecs_sig.bin"
25#define T18x_GPCCS_UCODE_SIG "gpccs_sig.bin" 25#define T18x_GPCCS_UCODE_SIG "gpccs_sig.bin"
26 26
27void gm20b_init_secure_pmu(struct gpu_ops *gops); 27bool gm20b_is_pmu_supported(struct gk20a *g);
28int prepare_ucode_blob(struct gk20a *g); 28int prepare_ucode_blob(struct gk20a *g);
29int gm20b_bootstrap_hs_flcn(struct gk20a *g);
30bool gm20b_is_lazy_bootstrap(u32 falcon_id);
31bool gm20b_is_priv_load(u32 falcon_id);
32void gm20b_wpr_info(struct gk20a *g, struct wpr_carveout_info *inf);
33int gm20b_alloc_blob_space(struct gk20a *g, size_t size, struct nvgpu_mem *mem);
34int gm20b_pmu_populate_loader_cfg(struct gk20a *g,
35 void *lsfm, u32 *p_bl_gen_desc_size);
36int gm20b_flcn_populate_bl_dmem_desc(struct gk20a *g,
37 void *lsfm, u32 *p_bl_gen_desc_size, u32 falconid);
38int pmu_wait_for_halt(struct gk20a *g, unsigned int timeout_ms);
39int clear_halt_interrupt_status(struct gk20a *g, unsigned int timeout);
40int gm20b_init_pmu_setup_hw1(struct gk20a *g, void *desc, u32 bl_sz);
41
29int gm20b_pmu_setup_sw(struct gk20a *g); 42int gm20b_pmu_setup_sw(struct gk20a *g);
30int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt); 43int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt);
31int gm20b_init_nspmu_setup_hw1(struct gk20a *g); 44int gm20b_init_nspmu_setup_hw1(struct gk20a *g);
diff --git a/drivers/gpu/nvgpu/gm20b/hal_gm20b.c b/drivers/gpu/nvgpu/gm20b/hal_gm20b.c
index 073c377e..38e8934b 100644
--- a/drivers/gpu/nvgpu/gm20b/hal_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/hal_gm20b.c
@@ -26,6 +26,7 @@
26#include "gk20a/flcn_gk20a.h" 26#include "gk20a/flcn_gk20a.h"
27#include "gk20a/priv_ring_gk20a.h" 27#include "gk20a/priv_ring_gk20a.h"
28#include "gk20a/regops_gk20a.h" 28#include "gk20a/regops_gk20a.h"
29#include "gk20a/pmu_gk20a.h"
29 30
30#include "ltc_gm20b.h" 31#include "ltc_gm20b.h"
31#include "gr_gm20b.h" 32#include "gr_gm20b.h"
@@ -42,6 +43,7 @@
42#include "therm_gm20b.h" 43#include "therm_gm20b.h"
43#include "bus_gm20b.h" 44#include "bus_gm20b.h"
44#include "hal_gm20b.h" 45#include "hal_gm20b.h"
46#include "acr_gm20b.h"
45 47
46#include <nvgpu/debug.h> 48#include <nvgpu/debug.h>
47#include <nvgpu/bug.h> 49#include <nvgpu/bug.h>
@@ -53,6 +55,8 @@
53#include <nvgpu/hw/gm20b/hw_fifo_gm20b.h> 55#include <nvgpu/hw/gm20b/hw_fifo_gm20b.h>
54#include <nvgpu/hw/gm20b/hw_ram_gm20b.h> 56#include <nvgpu/hw/gm20b/hw_ram_gm20b.h>
55#include <nvgpu/hw/gm20b/hw_top_gm20b.h> 57#include <nvgpu/hw/gm20b/hw_top_gm20b.h>
58#include <nvgpu/hw/gm20b/hw_gr_gm20b.h>
59#include <nvgpu/hw/gm20b/hw_pwr_gm20b.h>
56 60
57#define PRIV_SECURITY_DISABLE 0x01 61#define PRIV_SECURITY_DISABLE 0x01
58 62
@@ -313,6 +317,31 @@ static const struct gpu_ops gm20b_ops = {
313 .init_therm_setup_hw = gm20b_init_therm_setup_hw, 317 .init_therm_setup_hw = gm20b_init_therm_setup_hw,
314 .elcg_init_idle_filters = gk20a_elcg_init_idle_filters, 318 .elcg_init_idle_filters = gk20a_elcg_init_idle_filters,
315 }, 319 },
320 .pmu = {
321 .pmu_setup_elpg = gm20b_pmu_setup_elpg,
322 .pmu_get_queue_head = pwr_pmu_queue_head_r,
323 .pmu_get_queue_head_size = pwr_pmu_queue_head__size_1_v,
324 .pmu_get_queue_tail = pwr_pmu_queue_tail_r,
325 .pmu_get_queue_tail_size = pwr_pmu_queue_tail__size_1_v,
326 .pmu_queue_head = gk20a_pmu_queue_head,
327 .pmu_queue_tail = gk20a_pmu_queue_tail,
328 .pmu_msgq_tail = gk20a_pmu_msgq_tail,
329 .pmu_mutex_size = pwr_pmu_mutex__size_1_v,
330 .pmu_mutex_acquire = gk20a_pmu_mutex_acquire,
331 .pmu_mutex_release = gk20a_pmu_mutex_release,
332 .write_dmatrfbase = gm20b_write_dmatrfbase,
333 .pmu_elpg_statistics = gk20a_pmu_elpg_statistics,
334 .pmu_pg_init_param = NULL,
335 .pmu_pg_supported_engines_list = gk20a_pmu_pg_engines_list,
336 .pmu_pg_engines_feature_list = gk20a_pmu_pg_feature_list,
337 .pmu_is_lpwr_feature_supported = NULL,
338 .pmu_lpwr_enable_pg = NULL,
339 .pmu_lpwr_disable_pg = NULL,
340 .pmu_pg_param_post_init = NULL,
341 .dump_secure_fuses = pmu_dump_security_fuses_gm20b,
342 .reset_engine = gk20a_pmu_engine_reset,
343 .is_engine_in_reset = gk20a_pmu_is_engine_in_reset,
344 },
316 .clk = { 345 .clk = {
317 .init_clk_support = gm20b_init_clk_support, 346 .init_clk_support = gm20b_init_clk_support,
318 .suspend_clk_support = gm20b_suspend_clk_support, 347 .suspend_clk_support = gm20b_suspend_clk_support,
@@ -422,6 +451,7 @@ int gm20b_init_hal(struct gk20a *g)
422 gops->gr_ctx = gm20b_ops.gr_ctx; 451 gops->gr_ctx = gm20b_ops.gr_ctx;
423 gops->mm = gm20b_ops.mm; 452 gops->mm = gm20b_ops.mm;
424 gops->therm = gm20b_ops.therm; 453 gops->therm = gm20b_ops.therm;
454 gops->pmu = gm20b_ops.pmu;
425 /* 455 /*
426 * clk must be assigned member by member 456 * clk must be assigned member by member
427 * since some clk ops are assigned during probe prior to HAL init 457 * since some clk ops are assigned during probe prior to HAL init
@@ -483,9 +513,44 @@ int gm20b_init_hal(struct gk20a *g)
483 } 513 }
484 } 514 }
485#endif 515#endif
516
517 /* priv security dependent ops */
518 if (nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) {
519 /* Add in ops from gm20b acr */
520 gops->pmu.is_pmu_supported = gm20b_is_pmu_supported;
521 gops->pmu.prepare_ucode = prepare_ucode_blob;
522 gops->pmu.pmu_setup_hw_and_bootstrap = gm20b_bootstrap_hs_flcn;
523 gops->pmu.is_lazy_bootstrap = gm20b_is_lazy_bootstrap;
524 gops->pmu.is_priv_load = gm20b_is_priv_load;
525 gops->pmu.get_wpr = gm20b_wpr_info;
526 gops->pmu.alloc_blob_space = gm20b_alloc_blob_space;
527 gops->pmu.pmu_populate_loader_cfg =
528 gm20b_pmu_populate_loader_cfg;
529 gops->pmu.flcn_populate_bl_dmem_desc =
530 gm20b_flcn_populate_bl_dmem_desc;
531 gops->pmu.falcon_wait_for_halt = pmu_wait_for_halt;
532 gops->pmu.falcon_clear_halt_interrupt_status =
533 clear_halt_interrupt_status;
534 gops->pmu.init_falcon_setup_hw = gm20b_init_pmu_setup_hw1;
535
536 gops->pmu.init_wpr_region = gm20b_pmu_init_acr;
537 gops->pmu.load_lsfalcon_ucode = gm20b_load_falcon_ucode;
538 } else {
539 /* Inherit from gk20a */
540 gops->pmu.is_pmu_supported = gk20a_is_pmu_supported;
541 gops->pmu.prepare_ucode = nvgpu_pmu_prepare_ns_ucode_blob;
542 gops->pmu.pmu_setup_hw_and_bootstrap = gk20a_init_pmu_setup_hw1;
543 gops->pmu.pmu_nsbootstrap = pmu_bootstrap;
544
545 gops->pmu.load_lsfalcon_ucode = NULL;
546 gops->pmu.init_wpr_region = NULL;
547 }
548
549 __nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false);
550 g->pmu_lsf_pmu_wpr_init_done = 0;
486 g->bootstrap_owner = LSF_BOOTSTRAP_OWNER_DEFAULT; 551 g->bootstrap_owner = LSF_BOOTSTRAP_OWNER_DEFAULT;
552
487 gm20b_init_gr(g); 553 gm20b_init_gr(g);
488 gm20b_init_pmu_ops(g);
489 554
490 gm20b_init_uncompressed_kind_map(); 555 gm20b_init_uncompressed_kind_map();
491 gm20b_init_kind_attr(); 556 gm20b_init_kind_attr();
diff --git a/drivers/gpu/nvgpu/gm20b/pmu_gm20b.c b/drivers/gpu/nvgpu/gm20b/pmu_gm20b.c
index a5940fcf..99241a53 100644
--- a/drivers/gpu/nvgpu/gm20b/pmu_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/pmu_gm20b.c
@@ -102,7 +102,7 @@ static struct pg_init_sequence_list _pginitseq_gm20b[] = {
102 { 0x0010e040, 0x00000000}, 102 { 0x0010e040, 0x00000000},
103}; 103};
104 104
105static int gm20b_pmu_setup_elpg(struct gk20a *g) 105int gm20b_pmu_setup_elpg(struct gk20a *g)
106{ 106{
107 int ret = 0; 107 int ret = 0;
108 u32 reg_writes; 108 u32 reg_writes;
@@ -226,7 +226,7 @@ void gm20b_pmu_load_lsf(struct gk20a *g, u32 falcon_id, u32 flags)
226 return; 226 return;
227} 227}
228 228
229static int gm20b_load_falcon_ucode(struct gk20a *g, u32 falconidmask) 229int gm20b_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
230{ 230{
231 u32 err = 0; 231 u32 err = 0;
232 u32 flags = PMU_ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES; 232 u32 flags = PMU_ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES;
@@ -261,7 +261,7 @@ void gm20b_write_dmatrfbase(struct gk20a *g, u32 addr)
261} 261}
262 262
263/*Dump Security related fuses*/ 263/*Dump Security related fuses*/
264static void pmu_dump_security_fuses_gm20b(struct gk20a *g) 264void pmu_dump_security_fuses_gm20b(struct gk20a *g)
265{ 265{
266 u32 val; 266 u32 val;
267 267
@@ -272,45 +272,3 @@ static void pmu_dump_security_fuses_gm20b(struct gk20a *g)
272 nvgpu_tegra_fuse_read_gcplex_config_fuse(g, &val); 272 nvgpu_tegra_fuse_read_gcplex_config_fuse(g, &val);
273 nvgpu_err(g, "FUSE_GCPLEX_CONFIG_FUSE_0: 0x%x", val); 273 nvgpu_err(g, "FUSE_GCPLEX_CONFIG_FUSE_0: 0x%x", val);
274} 274}
275
276void gm20b_init_pmu_ops(struct gk20a *g)
277{
278 struct gpu_ops *gops = &g->ops;
279
280 if (nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) {
281 gm20b_init_secure_pmu(gops);
282 gops->pmu.init_wpr_region = gm20b_pmu_init_acr;
283 gops->pmu.load_lsfalcon_ucode = gm20b_load_falcon_ucode;
284 } else {
285 gk20a_init_pmu_ops(gops);
286 gops->pmu.pmu_setup_hw_and_bootstrap =
287 gm20b_init_nspmu_setup_hw1;
288 gops->pmu.load_lsfalcon_ucode = NULL;
289 gops->pmu.init_wpr_region = NULL;
290 }
291 gops->pmu.pmu_setup_elpg = gm20b_pmu_setup_elpg;
292 gops->pmu.pmu_get_queue_head = pwr_pmu_queue_head_r;
293 gops->pmu.pmu_get_queue_head_size = pwr_pmu_queue_head__size_1_v;
294 gops->pmu.pmu_get_queue_tail = pwr_pmu_queue_tail_r;
295 gops->pmu.pmu_get_queue_tail_size = pwr_pmu_queue_tail__size_1_v;
296 gops->pmu.pmu_queue_head = gk20a_pmu_queue_head;
297 gops->pmu.pmu_queue_tail = gk20a_pmu_queue_tail;
298 gops->pmu.pmu_msgq_tail = gk20a_pmu_msgq_tail;
299 gops->pmu.pmu_mutex_size = pwr_pmu_mutex__size_1_v;
300 gops->pmu.pmu_mutex_acquire = gk20a_pmu_mutex_acquire;
301 gops->pmu.pmu_mutex_release = gk20a_pmu_mutex_release;
302 g->pmu_lsf_pmu_wpr_init_done = 0;
303 __nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false);
304 gops->pmu.write_dmatrfbase = gm20b_write_dmatrfbase;
305 gops->pmu.pmu_elpg_statistics = gk20a_pmu_elpg_statistics;
306 gops->pmu.pmu_pg_init_param = NULL;
307 gops->pmu.pmu_pg_supported_engines_list = gk20a_pmu_pg_engines_list;
308 gops->pmu.pmu_pg_engines_feature_list = gk20a_pmu_pg_feature_list;
309 gops->pmu.pmu_is_lpwr_feature_supported = NULL;
310 gops->pmu.pmu_lpwr_enable_pg = NULL;
311 gops->pmu.pmu_lpwr_disable_pg = NULL;
312 gops->pmu.pmu_pg_param_post_init = NULL;
313 gops->pmu.dump_secure_fuses = pmu_dump_security_fuses_gm20b;
314 gops->pmu.reset_engine = gk20a_pmu_engine_reset;
315 gops->pmu.is_engine_in_reset = gk20a_pmu_is_engine_in_reset;
316}
diff --git a/drivers/gpu/nvgpu/gm20b/pmu_gm20b.h b/drivers/gpu/nvgpu/gm20b/pmu_gm20b.h
index 424fab35..ed3a8700 100644
--- a/drivers/gpu/nvgpu/gm20b/pmu_gm20b.h
+++ b/drivers/gpu/nvgpu/gm20b/pmu_gm20b.h
@@ -18,7 +18,9 @@
18 18
19struct gk20a; 19struct gk20a;
20 20
21void gm20b_init_pmu_ops(struct gk20a *g); 21int gm20b_load_falcon_ucode(struct gk20a *g, u32 falconidmask);
22int gm20b_pmu_setup_elpg(struct gk20a *g);
23void pmu_dump_security_fuses_gm20b(struct gk20a *g);
22void gm20b_pmu_load_lsf(struct gk20a *g, u32 falcon_id, u32 flags); 24void gm20b_pmu_load_lsf(struct gk20a *g, u32 falcon_id, u32 flags);
23int gm20b_pmu_init_acr(struct gk20a *g); 25int gm20b_pmu_init_acr(struct gk20a *g);
24void gm20b_write_dmatrfbase(struct gk20a *g, u32 addr); 26void gm20b_write_dmatrfbase(struct gk20a *g, u32 addr);