summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gm20b/acr_gm20b.c')
-rw-r--r--drivers/gpu/nvgpu/gm20b/acr_gm20b.c132
1 files changed, 66 insertions, 66 deletions
diff --git a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
index cffe7199..615b6b46 100644
--- a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
@@ -42,8 +42,8 @@
42#include <nvgpu/hw/gm20b/hw_pwr_gm20b.h> 42#include <nvgpu/hw/gm20b/hw_pwr_gm20b.h>
43 43
44/*Defines*/ 44/*Defines*/
45#define gm20b_dbg_pmu(fmt, arg...) \ 45#define gm20b_dbg_pmu(g, fmt, arg...) \
46 gk20a_dbg(gpu_dbg_pmu, fmt, ##arg) 46 nvgpu_log(g, gpu_dbg_pmu, fmt, ##arg)
47 47
48typedef int (*get_ucode_details)(struct gk20a *g, struct flcn_ucode_img *udata); 48typedef int (*get_ucode_details)(struct gk20a *g, struct flcn_ucode_img *udata);
49 49
@@ -101,16 +101,16 @@ static int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img)
101 struct nvgpu_pmu *pmu = &g->pmu; 101 struct nvgpu_pmu *pmu = &g->pmu;
102 struct lsf_ucode_desc *lsf_desc; 102 struct lsf_ucode_desc *lsf_desc;
103 int err; 103 int err;
104 gm20b_dbg_pmu("requesting PMU ucode in GM20B\n"); 104 gm20b_dbg_pmu(g, "requesting PMU ucode in GM20B\n");
105 pmu_fw = nvgpu_request_firmware(g, GM20B_PMU_UCODE_IMAGE, 0); 105 pmu_fw = nvgpu_request_firmware(g, GM20B_PMU_UCODE_IMAGE, 0);
106 if (!pmu_fw) { 106 if (!pmu_fw) {
107 nvgpu_err(g, "failed to load pmu ucode!!"); 107 nvgpu_err(g, "failed to load pmu ucode!!");
108 return -ENOENT; 108 return -ENOENT;
109 } 109 }
110 g->acr.pmu_fw = pmu_fw; 110 g->acr.pmu_fw = pmu_fw;
111 gm20b_dbg_pmu("Loaded PMU ucode in for blob preparation"); 111 gm20b_dbg_pmu(g, "Loaded PMU ucode in for blob preparation");
112 112
113 gm20b_dbg_pmu("requesting PMU ucode desc in GM20B\n"); 113 gm20b_dbg_pmu(g, "requesting PMU ucode desc in GM20B\n");
114 pmu_desc = nvgpu_request_firmware(g, GM20B_PMU_UCODE_DESC, 0); 114 pmu_desc = nvgpu_request_firmware(g, GM20B_PMU_UCODE_DESC, 0);
115 if (!pmu_desc) { 115 if (!pmu_desc) {
116 nvgpu_err(g, "failed to load pmu ucode desc!!"); 116 nvgpu_err(g, "failed to load pmu ucode desc!!");
@@ -129,7 +129,7 @@ static int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img)
129 129
130 err = nvgpu_init_pmu_fw_support(pmu); 130 err = nvgpu_init_pmu_fw_support(pmu);
131 if (err) { 131 if (err) {
132 gm20b_dbg_pmu("failed to set function pointers\n"); 132 gm20b_dbg_pmu(g, "failed to set function pointers\n");
133 goto release_sig; 133 goto release_sig;
134 } 134 }
135 135
@@ -148,7 +148,7 @@ static int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img)
148 p_img->fw_ver = NULL; 148 p_img->fw_ver = NULL;
149 p_img->header = NULL; 149 p_img->header = NULL;
150 p_img->lsf_desc = (struct lsf_ucode_desc *)lsf_desc; 150 p_img->lsf_desc = (struct lsf_ucode_desc *)lsf_desc;
151 gm20b_dbg_pmu("requesting PMU ucode in GM20B exit\n"); 151 gm20b_dbg_pmu(g, "requesting PMU ucode in GM20B exit\n");
152 nvgpu_release_firmware(g, pmu_sig); 152 nvgpu_release_firmware(g, pmu_sig);
153 return 0; 153 return 0;
154release_sig: 154release_sig:
@@ -221,7 +221,7 @@ static int fecs_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img)
221 p_img->fw_ver = NULL; 221 p_img->fw_ver = NULL;
222 p_img->header = NULL; 222 p_img->header = NULL;
223 p_img->lsf_desc = (struct lsf_ucode_desc *)lsf_desc; 223 p_img->lsf_desc = (struct lsf_ucode_desc *)lsf_desc;
224 gm20b_dbg_pmu("fecs fw loaded\n"); 224 gm20b_dbg_pmu(g, "fecs fw loaded\n");
225 nvgpu_release_firmware(g, fecs_sig); 225 nvgpu_release_firmware(g, fecs_sig);
226 return 0; 226 return 0;
227free_lsf_desc: 227free_lsf_desc:
@@ -292,7 +292,7 @@ static int gpccs_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img)
292 p_img->fw_ver = NULL; 292 p_img->fw_ver = NULL;
293 p_img->header = NULL; 293 p_img->header = NULL;
294 p_img->lsf_desc = (struct lsf_ucode_desc *)lsf_desc; 294 p_img->lsf_desc = (struct lsf_ucode_desc *)lsf_desc;
295 gm20b_dbg_pmu("gpccs fw loaded\n"); 295 gm20b_dbg_pmu(g, "gpccs fw loaded\n");
296 nvgpu_release_firmware(g, gpccs_sig); 296 nvgpu_release_firmware(g, gpccs_sig);
297 return 0; 297 return 0;
298free_lsf_desc: 298free_lsf_desc:
@@ -361,24 +361,24 @@ int prepare_ucode_blob(struct gk20a *g)
361 non WPR blob of ucodes*/ 361 non WPR blob of ucodes*/
362 err = nvgpu_init_pmu_fw_support(pmu); 362 err = nvgpu_init_pmu_fw_support(pmu);
363 if (err) { 363 if (err) {
364 gm20b_dbg_pmu("failed to set function pointers\n"); 364 gm20b_dbg_pmu(g, "failed to set function pointers\n");
365 return err; 365 return err;
366 } 366 }
367 return 0; 367 return 0;
368 } 368 }
369 plsfm = &lsfm_l; 369 plsfm = &lsfm_l;
370 memset((void *)plsfm, 0, sizeof(struct ls_flcn_mgr)); 370 memset((void *)plsfm, 0, sizeof(struct ls_flcn_mgr));
371 gm20b_dbg_pmu("fetching GMMU regs\n"); 371 gm20b_dbg_pmu(g, "fetching GMMU regs\n");
372 g->ops.fb.vpr_info_fetch(g); 372 g->ops.fb.vpr_info_fetch(g);
373 gr_gk20a_init_ctxsw_ucode(g); 373 gr_gk20a_init_ctxsw_ucode(g);
374 374
375 g->ops.pmu.get_wpr(g, &wpr_inf); 375 g->ops.pmu.get_wpr(g, &wpr_inf);
376 gm20b_dbg_pmu("wpr carveout base:%llx\n", wpr_inf.wpr_base); 376 gm20b_dbg_pmu(g, "wpr carveout base:%llx\n", wpr_inf.wpr_base);
377 gm20b_dbg_pmu("wpr carveout size :%llx\n", wpr_inf.size); 377 gm20b_dbg_pmu(g, "wpr carveout size :%llx\n", wpr_inf.size);
378 378
379 /* Discover all managed falcons*/ 379 /* Discover all managed falcons*/
380 err = lsfm_discover_ucode_images(g, plsfm); 380 err = lsfm_discover_ucode_images(g, plsfm);
381 gm20b_dbg_pmu(" Managed Falcon cnt %d\n", plsfm->managed_flcn_cnt); 381 gm20b_dbg_pmu(g, " Managed Falcon cnt %d\n", plsfm->managed_flcn_cnt);
382 if (err) 382 if (err)
383 goto free_sgt; 383 goto free_sgt;
384 384
@@ -394,13 +394,13 @@ int prepare_ucode_blob(struct gk20a *g)
394 if (err) 394 if (err)
395 goto free_sgt; 395 goto free_sgt;
396 396
397 gm20b_dbg_pmu("managed LS falcon %d, WPR size %d bytes.\n", 397 gm20b_dbg_pmu(g, "managed LS falcon %d, WPR size %d bytes.\n",
398 plsfm->managed_flcn_cnt, plsfm->wpr_size); 398 plsfm->managed_flcn_cnt, plsfm->wpr_size);
399 lsfm_init_wpr_contents(g, plsfm, &g->acr.ucode_blob); 399 lsfm_init_wpr_contents(g, plsfm, &g->acr.ucode_blob);
400 } else { 400 } else {
401 gm20b_dbg_pmu("LSFM is managing no falcons.\n"); 401 gm20b_dbg_pmu(g, "LSFM is managing no falcons.\n");
402 } 402 }
403 gm20b_dbg_pmu("prepare ucode blob return 0\n"); 403 gm20b_dbg_pmu(g, "prepare ucode blob return 0\n");
404 free_acr_resources(g, plsfm); 404 free_acr_resources(g, plsfm);
405free_sgt: 405free_sgt:
406 return err; 406 return err;
@@ -444,13 +444,13 @@ static int lsfm_discover_ucode_images(struct gk20a *g,
444 444
445 plsfm->managed_flcn_cnt++; 445 plsfm->managed_flcn_cnt++;
446 } else { 446 } else {
447 gm20b_dbg_pmu("id not managed %d\n", 447 gm20b_dbg_pmu(g, "id not managed %d\n",
448 ucode_img.lsf_desc->falcon_id); 448 ucode_img.lsf_desc->falcon_id);
449 } 449 }
450 450
451 /*Free any ucode image resources if not managing this falcon*/ 451 /*Free any ucode image resources if not managing this falcon*/
452 if (!(pmu->pmu_mode & PMU_LSFM_MANAGED)) { 452 if (!(pmu->pmu_mode & PMU_LSFM_MANAGED)) {
453 gm20b_dbg_pmu("pmu is not LSFM managed\n"); 453 gm20b_dbg_pmu(g, "pmu is not LSFM managed\n");
454 lsfm_free_ucode_img_res(g, &ucode_img); 454 lsfm_free_ucode_img_res(g, &ucode_img);
455 } 455 }
456 456
@@ -481,7 +481,7 @@ static int lsfm_discover_ucode_images(struct gk20a *g,
481 == 0) 481 == 0)
482 plsfm->managed_flcn_cnt++; 482 plsfm->managed_flcn_cnt++;
483 } else { 483 } else {
484 gm20b_dbg_pmu("not managed %d\n", 484 gm20b_dbg_pmu(g, "not managed %d\n",
485 ucode_img.lsf_desc->falcon_id); 485 ucode_img.lsf_desc->falcon_id);
486 lsfm_free_nonpmu_ucode_img_res(g, 486 lsfm_free_nonpmu_ucode_img_res(g,
487 &ucode_img); 487 &ucode_img);
@@ -489,7 +489,7 @@ static int lsfm_discover_ucode_images(struct gk20a *g,
489 } 489 }
490 } else { 490 } else {
491 /* Consumed all available falcon objects */ 491 /* Consumed all available falcon objects */
492 gm20b_dbg_pmu("Done checking for ucodes %d\n", i); 492 gm20b_dbg_pmu(g, "Done checking for ucodes %d\n", i);
493 break; 493 break;
494 } 494 }
495 } 495 }
@@ -526,26 +526,26 @@ int gm20b_pmu_populate_loader_cfg(struct gk20a *g,
526 addr_base = p_lsfm->lsb_header.ucode_off; 526 addr_base = p_lsfm->lsb_header.ucode_off;
527 g->ops.pmu.get_wpr(g, &wpr_inf); 527 g->ops.pmu.get_wpr(g, &wpr_inf);
528 addr_base += wpr_inf.wpr_base; 528 addr_base += wpr_inf.wpr_base;
529 gm20b_dbg_pmu("pmu loader cfg u32 addrbase %x\n", (u32)addr_base); 529 gm20b_dbg_pmu(g, "pmu loader cfg u32 addrbase %x\n", (u32)addr_base);
530 /*From linux*/ 530 /*From linux*/
531 addr_code = u64_lo32((addr_base + 531 addr_code = u64_lo32((addr_base +
532 desc->app_start_offset + 532 desc->app_start_offset +
533 desc->app_resident_code_offset) >> 8); 533 desc->app_resident_code_offset) >> 8);
534 gm20b_dbg_pmu("app start %d app res code off %d\n", 534 gm20b_dbg_pmu(g, "app start %d app res code off %d\n",
535 desc->app_start_offset, desc->app_resident_code_offset); 535 desc->app_start_offset, desc->app_resident_code_offset);
536 addr_data = u64_lo32((addr_base + 536 addr_data = u64_lo32((addr_base +
537 desc->app_start_offset + 537 desc->app_start_offset +
538 desc->app_resident_data_offset) >> 8); 538 desc->app_resident_data_offset) >> 8);
539 gm20b_dbg_pmu("app res data offset%d\n", 539 gm20b_dbg_pmu(g, "app res data offset%d\n",
540 desc->app_resident_data_offset); 540 desc->app_resident_data_offset);
541 gm20b_dbg_pmu("bl start off %d\n", desc->bootloader_start_offset); 541 gm20b_dbg_pmu(g, "bl start off %d\n", desc->bootloader_start_offset);
542 542
543 addr_args = ((pwr_falcon_hwcfg_dmem_size_v( 543 addr_args = ((pwr_falcon_hwcfg_dmem_size_v(
544 gk20a_readl(g, pwr_falcon_hwcfg_r()))) 544 gk20a_readl(g, pwr_falcon_hwcfg_r())))
545 << GK20A_PMU_DMEM_BLKSIZE2); 545 << GK20A_PMU_DMEM_BLKSIZE2);
546 addr_args -= g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu); 546 addr_args -= g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu);
547 547
548 gm20b_dbg_pmu("addr_args %x\n", addr_args); 548 gm20b_dbg_pmu(g, "addr_args %x\n", addr_args);
549 549
550 /* Populate the loader_config state*/ 550 /* Populate the loader_config state*/
551 ldr_cfg->dma_idx = GK20A_PMU_DMAIDX_UCODE; 551 ldr_cfg->dma_idx = GK20A_PMU_DMAIDX_UCODE;
@@ -599,7 +599,7 @@ int gm20b_flcn_populate_bl_dmem_desc(struct gk20a *g,
599 g->ops.pmu.get_wpr(g, &wpr_inf); 599 g->ops.pmu.get_wpr(g, &wpr_inf);
600 addr_base += wpr_inf.wpr_base; 600 addr_base += wpr_inf.wpr_base;
601 601
602 gm20b_dbg_pmu("gen loader cfg %x u32 addrbase %x ID\n", (u32)addr_base, 602 gm20b_dbg_pmu(g, "gen loader cfg %x u32 addrbase %x ID\n", (u32)addr_base,
603 p_lsfm->wpr_header.falcon_id); 603 p_lsfm->wpr_header.falcon_id);
604 addr_code = u64_lo32((addr_base + 604 addr_code = u64_lo32((addr_base +
605 desc->app_start_offset + 605 desc->app_start_offset +
@@ -608,7 +608,7 @@ int gm20b_flcn_populate_bl_dmem_desc(struct gk20a *g,
608 desc->app_start_offset + 608 desc->app_start_offset +
609 desc->app_resident_data_offset) >> 8); 609 desc->app_resident_data_offset) >> 8);
610 610
611 gm20b_dbg_pmu("gen cfg %x u32 addrcode %x & data %x load offset %xID\n", 611 gm20b_dbg_pmu(g, "gen cfg %x u32 addrcode %x & data %x load offset %xID\n",
612 (u32)addr_code, (u32)addr_data, desc->bootloader_start_offset, 612 (u32)addr_code, (u32)addr_data, desc->bootloader_start_offset,
613 p_lsfm->wpr_header.falcon_id); 613 p_lsfm->wpr_header.falcon_id);
614 614
@@ -631,7 +631,7 @@ static int lsfm_fill_flcn_bl_gen_desc(struct gk20a *g,
631 631
632 struct nvgpu_pmu *pmu = &g->pmu; 632 struct nvgpu_pmu *pmu = &g->pmu;
633 if (pnode->wpr_header.falcon_id != pmu->falcon_id) { 633 if (pnode->wpr_header.falcon_id != pmu->falcon_id) {
634 gm20b_dbg_pmu("non pmu. write flcn bl gen desc\n"); 634 gm20b_dbg_pmu(g, "non pmu. write flcn bl gen desc\n");
635 g->ops.pmu.flcn_populate_bl_dmem_desc(g, 635 g->ops.pmu.flcn_populate_bl_dmem_desc(g,
636 pnode, &pnode->bl_gen_desc_size, 636 pnode, &pnode->bl_gen_desc_size,
637 pnode->wpr_header.falcon_id); 637 pnode->wpr_header.falcon_id);
@@ -639,7 +639,7 @@ static int lsfm_fill_flcn_bl_gen_desc(struct gk20a *g,
639 } 639 }
640 640
641 if (pmu->pmu_mode & PMU_LSFM_MANAGED) { 641 if (pmu->pmu_mode & PMU_LSFM_MANAGED) {
642 gm20b_dbg_pmu("pmu write flcn bl gen desc\n"); 642 gm20b_dbg_pmu(g, "pmu write flcn bl gen desc\n");
643 if (pnode->wpr_header.falcon_id == pmu->falcon_id) 643 if (pnode->wpr_header.falcon_id == pmu->falcon_id)
644 return g->ops.pmu.pmu_populate_loader_cfg(g, pnode, 644 return g->ops.pmu.pmu_populate_loader_cfg(g, pnode,
645 &pnode->bl_gen_desc_size); 645 &pnode->bl_gen_desc_size);
@@ -672,46 +672,46 @@ static void lsfm_init_wpr_contents(struct gk20a *g, struct ls_flcn_mgr *plsfm,
672 nvgpu_mem_wr_n(g, ucode, i * sizeof(pnode->wpr_header), 672 nvgpu_mem_wr_n(g, ucode, i * sizeof(pnode->wpr_header),
673 &pnode->wpr_header, sizeof(pnode->wpr_header)); 673 &pnode->wpr_header, sizeof(pnode->wpr_header));
674 674
675 gm20b_dbg_pmu("wpr header"); 675 gm20b_dbg_pmu(g, "wpr header");
676 gm20b_dbg_pmu("falconid :%d", 676 gm20b_dbg_pmu(g, "falconid :%d",
677 pnode->wpr_header.falcon_id); 677 pnode->wpr_header.falcon_id);
678 gm20b_dbg_pmu("lsb_offset :%x", 678 gm20b_dbg_pmu(g, "lsb_offset :%x",
679 pnode->wpr_header.lsb_offset); 679 pnode->wpr_header.lsb_offset);
680 gm20b_dbg_pmu("bootstrap_owner :%d", 680 gm20b_dbg_pmu(g, "bootstrap_owner :%d",
681 pnode->wpr_header.bootstrap_owner); 681 pnode->wpr_header.bootstrap_owner);
682 gm20b_dbg_pmu("lazy_bootstrap :%d", 682 gm20b_dbg_pmu(g, "lazy_bootstrap :%d",
683 pnode->wpr_header.lazy_bootstrap); 683 pnode->wpr_header.lazy_bootstrap);
684 gm20b_dbg_pmu("status :%d", 684 gm20b_dbg_pmu(g, "status :%d",
685 pnode->wpr_header.status); 685 pnode->wpr_header.status);
686 686
687 /*Flush LSB header to memory*/ 687 /*Flush LSB header to memory*/
688 nvgpu_mem_wr_n(g, ucode, pnode->wpr_header.lsb_offset, 688 nvgpu_mem_wr_n(g, ucode, pnode->wpr_header.lsb_offset,
689 &pnode->lsb_header, sizeof(pnode->lsb_header)); 689 &pnode->lsb_header, sizeof(pnode->lsb_header));
690 690
691 gm20b_dbg_pmu("lsb header"); 691 gm20b_dbg_pmu(g, "lsb header");
692 gm20b_dbg_pmu("ucode_off :%x", 692 gm20b_dbg_pmu(g, "ucode_off :%x",
693 pnode->lsb_header.ucode_off); 693 pnode->lsb_header.ucode_off);
694 gm20b_dbg_pmu("ucode_size :%x", 694 gm20b_dbg_pmu(g, "ucode_size :%x",
695 pnode->lsb_header.ucode_size); 695 pnode->lsb_header.ucode_size);
696 gm20b_dbg_pmu("data_size :%x", 696 gm20b_dbg_pmu(g, "data_size :%x",
697 pnode->lsb_header.data_size); 697 pnode->lsb_header.data_size);
698 gm20b_dbg_pmu("bl_code_size :%x", 698 gm20b_dbg_pmu(g, "bl_code_size :%x",
699 pnode->lsb_header.bl_code_size); 699 pnode->lsb_header.bl_code_size);
700 gm20b_dbg_pmu("bl_imem_off :%x", 700 gm20b_dbg_pmu(g, "bl_imem_off :%x",
701 pnode->lsb_header.bl_imem_off); 701 pnode->lsb_header.bl_imem_off);
702 gm20b_dbg_pmu("bl_data_off :%x", 702 gm20b_dbg_pmu(g, "bl_data_off :%x",
703 pnode->lsb_header.bl_data_off); 703 pnode->lsb_header.bl_data_off);
704 gm20b_dbg_pmu("bl_data_size :%x", 704 gm20b_dbg_pmu(g, "bl_data_size :%x",
705 pnode->lsb_header.bl_data_size); 705 pnode->lsb_header.bl_data_size);
706 gm20b_dbg_pmu("app_code_off :%x", 706 gm20b_dbg_pmu(g, "app_code_off :%x",
707 pnode->lsb_header.app_code_off); 707 pnode->lsb_header.app_code_off);
708 gm20b_dbg_pmu("app_code_size :%x", 708 gm20b_dbg_pmu(g, "app_code_size :%x",
709 pnode->lsb_header.app_code_size); 709 pnode->lsb_header.app_code_size);
710 gm20b_dbg_pmu("app_data_off :%x", 710 gm20b_dbg_pmu(g, "app_data_off :%x",
711 pnode->lsb_header.app_data_off); 711 pnode->lsb_header.app_data_off);
712 gm20b_dbg_pmu("app_data_size :%x", 712 gm20b_dbg_pmu(g, "app_data_size :%x",
713 pnode->lsb_header.app_data_size); 713 pnode->lsb_header.app_data_size);
714 gm20b_dbg_pmu("flags :%x", 714 gm20b_dbg_pmu(g, "flags :%x",
715 pnode->lsb_header.flags); 715 pnode->lsb_header.flags);
716 716
717 /*If this falcon has a boot loader and related args, 717 /*If this falcon has a boot loader and related args,
@@ -1028,7 +1028,7 @@ int gm20b_bootstrap_hs_flcn(struct gk20a *g)
1028 start = nvgpu_mem_get_addr(g, &acr->ucode_blob); 1028 start = nvgpu_mem_get_addr(g, &acr->ucode_blob);
1029 size = acr->ucode_blob.size; 1029 size = acr->ucode_blob.size;
1030 1030
1031 gm20b_dbg_pmu(""); 1031 gm20b_dbg_pmu(g, " ");
1032 1032
1033 if (!acr_fw) { 1033 if (!acr_fw) {
1034 /*First time init case*/ 1034 /*First time init case*/
@@ -1141,14 +1141,14 @@ int acr_ucode_patch_sig(struct gk20a *g,
1141 unsigned int *p_patch_ind) 1141 unsigned int *p_patch_ind)
1142{ 1142{
1143 unsigned int i, *p_sig; 1143 unsigned int i, *p_sig;
1144 gm20b_dbg_pmu(""); 1144 gm20b_dbg_pmu(g, " ");
1145 1145
1146 if (!pmu_is_debug_mode_en(g)) { 1146 if (!pmu_is_debug_mode_en(g)) {
1147 p_sig = p_prod_sig; 1147 p_sig = p_prod_sig;
1148 gm20b_dbg_pmu("PRODUCTION MODE\n"); 1148 gm20b_dbg_pmu(g, "PRODUCTION MODE\n");
1149 } else { 1149 } else {
1150 p_sig = p_dbg_sig; 1150 p_sig = p_dbg_sig;
1151 gm20b_dbg_pmu("DEBUG MODE\n"); 1151 gm20b_dbg_pmu(g, "DEBUG MODE\n");
1152 } 1152 }
1153 1153
1154 /* Patching logic:*/ 1154 /* Patching logic:*/
@@ -1171,7 +1171,7 @@ static int bl_bootstrap(struct nvgpu_pmu *pmu,
1171 struct hsflcn_bl_desc *pmu_bl_gm10x_desc = g->acr.pmu_hsbl_desc; 1171 struct hsflcn_bl_desc *pmu_bl_gm10x_desc = g->acr.pmu_hsbl_desc;
1172 u32 dst; 1172 u32 dst;
1173 1173
1174 gk20a_dbg_fn(""); 1174 nvgpu_log_fn(g, " ");
1175 gk20a_writel(g, pwr_falcon_itfen_r(), 1175 gk20a_writel(g, pwr_falcon_itfen_r(),
1176 gk20a_readl(g, pwr_falcon_itfen_r()) | 1176 gk20a_readl(g, pwr_falcon_itfen_r()) |
1177 pwr_falcon_itfen_ctxen_enable_f()); 1177 pwr_falcon_itfen_ctxen_enable_f());
@@ -1193,7 +1193,7 @@ static int bl_bootstrap(struct nvgpu_pmu *pmu,
1193 (u8 *)(acr->hsbl_ucode.cpu_va), bl_sz, 0, 0, 1193 (u8 *)(acr->hsbl_ucode.cpu_va), bl_sz, 0, 0,
1194 pmu_bl_gm10x_desc->bl_start_tag); 1194 pmu_bl_gm10x_desc->bl_start_tag);
1195 1195
1196 gm20b_dbg_pmu("Before starting falcon with BL\n"); 1196 gm20b_dbg_pmu(g, "Before starting falcon with BL\n");
1197 1197
1198 virt_addr = pmu_bl_gm10x_desc->bl_start_tag << 8; 1198 virt_addr = pmu_bl_gm10x_desc->bl_start_tag << 8;
1199 1199
@@ -1207,7 +1207,7 @@ int gm20b_init_nspmu_setup_hw1(struct gk20a *g)
1207 struct nvgpu_pmu *pmu = &g->pmu; 1207 struct nvgpu_pmu *pmu = &g->pmu;
1208 int err = 0; 1208 int err = 0;
1209 1209
1210 gk20a_dbg_fn(""); 1210 nvgpu_log_fn(g, " ");
1211 1211
1212 nvgpu_mutex_acquire(&pmu->isr_mutex); 1212 nvgpu_mutex_acquire(&pmu->isr_mutex);
1213 nvgpu_flcn_reset(pmu->flcn); 1213 nvgpu_flcn_reset(pmu->flcn);
@@ -1279,7 +1279,7 @@ int gm20b_init_pmu_setup_hw1(struct gk20a *g,
1279 struct nvgpu_pmu *pmu = &g->pmu; 1279 struct nvgpu_pmu *pmu = &g->pmu;
1280 int err; 1280 int err;
1281 1281
1282 gk20a_dbg_fn(""); 1282 nvgpu_log_fn(g, " ");
1283 1283
1284 nvgpu_mutex_acquire(&pmu->isr_mutex); 1284 nvgpu_mutex_acquire(&pmu->isr_mutex);
1285 nvgpu_flcn_reset(pmu->flcn); 1285 nvgpu_flcn_reset(pmu->flcn);
@@ -1324,7 +1324,7 @@ int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt)
1324 struct nvgpu_firmware *hsbl_fw = acr->hsbl_fw; 1324 struct nvgpu_firmware *hsbl_fw = acr->hsbl_fw;
1325 struct hsflcn_bl_desc *pmu_bl_gm10x_desc; 1325 struct hsflcn_bl_desc *pmu_bl_gm10x_desc;
1326 u32 *pmu_bl_gm10x = NULL; 1326 u32 *pmu_bl_gm10x = NULL;
1327 gm20b_dbg_pmu(""); 1327 gm20b_dbg_pmu(g, " ");
1328 1328
1329 if (!hsbl_fw) { 1329 if (!hsbl_fw) {
1330 hsbl_fw = nvgpu_request_firmware(g, 1330 hsbl_fw = nvgpu_request_firmware(g,
@@ -1343,7 +1343,7 @@ int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt)
1343 bl_sz = ALIGN(pmu_bl_gm10x_desc->bl_img_hdr.bl_code_size, 1343 bl_sz = ALIGN(pmu_bl_gm10x_desc->bl_img_hdr.bl_code_size,
1344 256); 1344 256);
1345 acr->hsbl_ucode.size = bl_sz; 1345 acr->hsbl_ucode.size = bl_sz;
1346 gm20b_dbg_pmu("Executing Generic Bootloader\n"); 1346 gm20b_dbg_pmu(g, "Executing Generic Bootloader\n");
1347 1347
1348 /*TODO in code verify that enable PMU is done, 1348 /*TODO in code verify that enable PMU is done,
1349 scrubbing etc is done*/ 1349 scrubbing etc is done*/
@@ -1366,7 +1366,7 @@ int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt)
1366 } 1366 }
1367 1367
1368 nvgpu_mem_wr_n(g, &acr->hsbl_ucode, 0, pmu_bl_gm10x, bl_sz); 1368 nvgpu_mem_wr_n(g, &acr->hsbl_ucode, 0, pmu_bl_gm10x, bl_sz);
1369 gm20b_dbg_pmu("Copied bl ucode to bl_cpuva\n"); 1369 gm20b_dbg_pmu(g, "Copied bl ucode to bl_cpuva\n");
1370 } 1370 }
1371 /* 1371 /*
1372 * Disable interrupts to avoid kernel hitting breakpoint due 1372 * Disable interrupts to avoid kernel hitting breakpoint due
@@ -1377,9 +1377,9 @@ int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt)
1377 gk20a_get_gr_idle_timeout(g))) 1377 gk20a_get_gr_idle_timeout(g)))
1378 goto err_unmap_bl; 1378 goto err_unmap_bl;
1379 1379
1380 gm20b_dbg_pmu("phys sec reg %x\n", gk20a_readl(g, 1380 gm20b_dbg_pmu(g, "phys sec reg %x\n", gk20a_readl(g,
1381 pwr_falcon_mmu_phys_sec_r())); 1381 pwr_falcon_mmu_phys_sec_r()));
1382 gm20b_dbg_pmu("sctl reg %x\n", gk20a_readl(g, pwr_falcon_sctl_r())); 1382 gm20b_dbg_pmu(g, "sctl reg %x\n", gk20a_readl(g, pwr_falcon_sctl_r()));
1383 1383
1384 g->ops.pmu.init_falcon_setup_hw(g, desc, acr->hsbl_ucode.size); 1384 g->ops.pmu.init_falcon_setup_hw(g, desc, acr->hsbl_ucode.size);
1385 1385
@@ -1396,10 +1396,10 @@ int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt)
1396 else 1396 else
1397 goto err_unmap_bl; 1397 goto err_unmap_bl;
1398 } 1398 }
1399 gm20b_dbg_pmu("after waiting for halt, err %x\n", err); 1399 gm20b_dbg_pmu(g, "after waiting for halt, err %x\n", err);
1400 gm20b_dbg_pmu("phys sec reg %x\n", gk20a_readl(g, 1400 gm20b_dbg_pmu(g, "phys sec reg %x\n", gk20a_readl(g,
1401 pwr_falcon_mmu_phys_sec_r())); 1401 pwr_falcon_mmu_phys_sec_r()));
1402 gm20b_dbg_pmu("sctl reg %x\n", gk20a_readl(g, pwr_falcon_sctl_r())); 1402 gm20b_dbg_pmu(g, "sctl reg %x\n", gk20a_readl(g, pwr_falcon_sctl_r()));
1403 start_gm20b_pmu(g); 1403 start_gm20b_pmu(g);
1404 return 0; 1404 return 0;
1405err_unmap_bl: 1405err_unmap_bl:
@@ -1430,7 +1430,7 @@ int pmu_wait_for_halt(struct gk20a *g, unsigned int timeout_ms)
1430 } 1430 }
1431 1431
1432 g->acr.capabilities = gk20a_readl(g, pwr_falcon_mailbox1_r()); 1432 g->acr.capabilities = gk20a_readl(g, pwr_falcon_mailbox1_r());
1433 gm20b_dbg_pmu("ACR capabilities %x\n", g->acr.capabilities); 1433 gm20b_dbg_pmu(g, "ACR capabilities %x\n", g->acr.capabilities);
1434 data = gk20a_readl(g, pwr_falcon_mailbox0_r()); 1434 data = gk20a_readl(g, pwr_falcon_mailbox0_r());
1435 if (data) { 1435 if (data) {
1436 nvgpu_err(g, "ACR boot failed, err %x", data); 1436 nvgpu_err(g, "ACR boot failed, err %x", data);