summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gm20b
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2018-04-18 22:39:46 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-05-09 21:26:04 -0400
commitdd739fcb039d51606e9a5454ec0aab17bcb01965 (patch)
tree806ba8575d146367ad1be00086ca0cdae35a6b28 /drivers/gpu/nvgpu/gm20b
parent7e66f2a63d4855e763fa768047dfc32f6f96b771 (diff)
gpu: nvgpu: Remove gk20a_dbg* functions
Switch all logging to nvgpu_log*(). gk20a_dbg* macros are intentionally left there because of use from other repositories. Because the new functions do not work without a pointer to struct gk20a, and piping it just for logging is excessive, some log messages are deleted. Change-Id: I00e22e75fe4596a330bb0282ab4774b3639ee31e Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1704148 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gm20b')
-rw-r--r--drivers/gpu/nvgpu/gm20b/acr_gm20b.c132
-rw-r--r--drivers/gpu/nvgpu/gm20b/bus_gm20b.c4
-rw-r--r--drivers/gpu/nvgpu/gm20b/clk_gm20b.c41
-rw-r--r--drivers/gpu/nvgpu/gm20b/fb_gm20b.c2
-rw-r--r--drivers/gpu/nvgpu/gm20b/fifo_gm20b.c2
-rw-r--r--drivers/gpu/nvgpu/gm20b/gr_gm20b.c48
-rw-r--r--drivers/gpu/nvgpu/gm20b/ltc_gm20b.c16
-rw-r--r--drivers/gpu/nvgpu/gm20b/mm_gm20b.c8
-rw-r--r--drivers/gpu/nvgpu/gm20b/pmu_gm20b.c40
-rw-r--r--drivers/gpu/nvgpu/gm20b/therm_gm20b.c4
10 files changed, 149 insertions, 148 deletions
diff --git a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
index cffe7199..615b6b46 100644
--- a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
@@ -42,8 +42,8 @@
42#include <nvgpu/hw/gm20b/hw_pwr_gm20b.h> 42#include <nvgpu/hw/gm20b/hw_pwr_gm20b.h>
43 43
44/*Defines*/ 44/*Defines*/
45#define gm20b_dbg_pmu(fmt, arg...) \ 45#define gm20b_dbg_pmu(g, fmt, arg...) \
46 gk20a_dbg(gpu_dbg_pmu, fmt, ##arg) 46 nvgpu_log(g, gpu_dbg_pmu, fmt, ##arg)
47 47
48typedef int (*get_ucode_details)(struct gk20a *g, struct flcn_ucode_img *udata); 48typedef int (*get_ucode_details)(struct gk20a *g, struct flcn_ucode_img *udata);
49 49
@@ -101,16 +101,16 @@ static int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img)
101 struct nvgpu_pmu *pmu = &g->pmu; 101 struct nvgpu_pmu *pmu = &g->pmu;
102 struct lsf_ucode_desc *lsf_desc; 102 struct lsf_ucode_desc *lsf_desc;
103 int err; 103 int err;
104 gm20b_dbg_pmu("requesting PMU ucode in GM20B\n"); 104 gm20b_dbg_pmu(g, "requesting PMU ucode in GM20B\n");
105 pmu_fw = nvgpu_request_firmware(g, GM20B_PMU_UCODE_IMAGE, 0); 105 pmu_fw = nvgpu_request_firmware(g, GM20B_PMU_UCODE_IMAGE, 0);
106 if (!pmu_fw) { 106 if (!pmu_fw) {
107 nvgpu_err(g, "failed to load pmu ucode!!"); 107 nvgpu_err(g, "failed to load pmu ucode!!");
108 return -ENOENT; 108 return -ENOENT;
109 } 109 }
110 g->acr.pmu_fw = pmu_fw; 110 g->acr.pmu_fw = pmu_fw;
111 gm20b_dbg_pmu("Loaded PMU ucode in for blob preparation"); 111 gm20b_dbg_pmu(g, "Loaded PMU ucode in for blob preparation");
112 112
113 gm20b_dbg_pmu("requesting PMU ucode desc in GM20B\n"); 113 gm20b_dbg_pmu(g, "requesting PMU ucode desc in GM20B\n");
114 pmu_desc = nvgpu_request_firmware(g, GM20B_PMU_UCODE_DESC, 0); 114 pmu_desc = nvgpu_request_firmware(g, GM20B_PMU_UCODE_DESC, 0);
115 if (!pmu_desc) { 115 if (!pmu_desc) {
116 nvgpu_err(g, "failed to load pmu ucode desc!!"); 116 nvgpu_err(g, "failed to load pmu ucode desc!!");
@@ -129,7 +129,7 @@ static int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img)
129 129
130 err = nvgpu_init_pmu_fw_support(pmu); 130 err = nvgpu_init_pmu_fw_support(pmu);
131 if (err) { 131 if (err) {
132 gm20b_dbg_pmu("failed to set function pointers\n"); 132 gm20b_dbg_pmu(g, "failed to set function pointers\n");
133 goto release_sig; 133 goto release_sig;
134 } 134 }
135 135
@@ -148,7 +148,7 @@ static int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img)
148 p_img->fw_ver = NULL; 148 p_img->fw_ver = NULL;
149 p_img->header = NULL; 149 p_img->header = NULL;
150 p_img->lsf_desc = (struct lsf_ucode_desc *)lsf_desc; 150 p_img->lsf_desc = (struct lsf_ucode_desc *)lsf_desc;
151 gm20b_dbg_pmu("requesting PMU ucode in GM20B exit\n"); 151 gm20b_dbg_pmu(g, "requesting PMU ucode in GM20B exit\n");
152 nvgpu_release_firmware(g, pmu_sig); 152 nvgpu_release_firmware(g, pmu_sig);
153 return 0; 153 return 0;
154release_sig: 154release_sig:
@@ -221,7 +221,7 @@ static int fecs_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img)
221 p_img->fw_ver = NULL; 221 p_img->fw_ver = NULL;
222 p_img->header = NULL; 222 p_img->header = NULL;
223 p_img->lsf_desc = (struct lsf_ucode_desc *)lsf_desc; 223 p_img->lsf_desc = (struct lsf_ucode_desc *)lsf_desc;
224 gm20b_dbg_pmu("fecs fw loaded\n"); 224 gm20b_dbg_pmu(g, "fecs fw loaded\n");
225 nvgpu_release_firmware(g, fecs_sig); 225 nvgpu_release_firmware(g, fecs_sig);
226 return 0; 226 return 0;
227free_lsf_desc: 227free_lsf_desc:
@@ -292,7 +292,7 @@ static int gpccs_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img)
292 p_img->fw_ver = NULL; 292 p_img->fw_ver = NULL;
293 p_img->header = NULL; 293 p_img->header = NULL;
294 p_img->lsf_desc = (struct lsf_ucode_desc *)lsf_desc; 294 p_img->lsf_desc = (struct lsf_ucode_desc *)lsf_desc;
295 gm20b_dbg_pmu("gpccs fw loaded\n"); 295 gm20b_dbg_pmu(g, "gpccs fw loaded\n");
296 nvgpu_release_firmware(g, gpccs_sig); 296 nvgpu_release_firmware(g, gpccs_sig);
297 return 0; 297 return 0;
298free_lsf_desc: 298free_lsf_desc:
@@ -361,24 +361,24 @@ int prepare_ucode_blob(struct gk20a *g)
361 non WPR blob of ucodes*/ 361 non WPR blob of ucodes*/
362 err = nvgpu_init_pmu_fw_support(pmu); 362 err = nvgpu_init_pmu_fw_support(pmu);
363 if (err) { 363 if (err) {
364 gm20b_dbg_pmu("failed to set function pointers\n"); 364 gm20b_dbg_pmu(g, "failed to set function pointers\n");
365 return err; 365 return err;
366 } 366 }
367 return 0; 367 return 0;
368 } 368 }
369 plsfm = &lsfm_l; 369 plsfm = &lsfm_l;
370 memset((void *)plsfm, 0, sizeof(struct ls_flcn_mgr)); 370 memset((void *)plsfm, 0, sizeof(struct ls_flcn_mgr));
371 gm20b_dbg_pmu("fetching GMMU regs\n"); 371 gm20b_dbg_pmu(g, "fetching GMMU regs\n");
372 g->ops.fb.vpr_info_fetch(g); 372 g->ops.fb.vpr_info_fetch(g);
373 gr_gk20a_init_ctxsw_ucode(g); 373 gr_gk20a_init_ctxsw_ucode(g);
374 374
375 g->ops.pmu.get_wpr(g, &wpr_inf); 375 g->ops.pmu.get_wpr(g, &wpr_inf);
376 gm20b_dbg_pmu("wpr carveout base:%llx\n", wpr_inf.wpr_base); 376 gm20b_dbg_pmu(g, "wpr carveout base:%llx\n", wpr_inf.wpr_base);
377 gm20b_dbg_pmu("wpr carveout size :%llx\n", wpr_inf.size); 377 gm20b_dbg_pmu(g, "wpr carveout size :%llx\n", wpr_inf.size);
378 378
379 /* Discover all managed falcons*/ 379 /* Discover all managed falcons*/
380 err = lsfm_discover_ucode_images(g, plsfm); 380 err = lsfm_discover_ucode_images(g, plsfm);
381 gm20b_dbg_pmu(" Managed Falcon cnt %d\n", plsfm->managed_flcn_cnt); 381 gm20b_dbg_pmu(g, " Managed Falcon cnt %d\n", plsfm->managed_flcn_cnt);
382 if (err) 382 if (err)
383 goto free_sgt; 383 goto free_sgt;
384 384
@@ -394,13 +394,13 @@ int prepare_ucode_blob(struct gk20a *g)
394 if (err) 394 if (err)
395 goto free_sgt; 395 goto free_sgt;
396 396
397 gm20b_dbg_pmu("managed LS falcon %d, WPR size %d bytes.\n", 397 gm20b_dbg_pmu(g, "managed LS falcon %d, WPR size %d bytes.\n",
398 plsfm->managed_flcn_cnt, plsfm->wpr_size); 398 plsfm->managed_flcn_cnt, plsfm->wpr_size);
399 lsfm_init_wpr_contents(g, plsfm, &g->acr.ucode_blob); 399 lsfm_init_wpr_contents(g, plsfm, &g->acr.ucode_blob);
400 } else { 400 } else {
401 gm20b_dbg_pmu("LSFM is managing no falcons.\n"); 401 gm20b_dbg_pmu(g, "LSFM is managing no falcons.\n");
402 } 402 }
403 gm20b_dbg_pmu("prepare ucode blob return 0\n"); 403 gm20b_dbg_pmu(g, "prepare ucode blob return 0\n");
404 free_acr_resources(g, plsfm); 404 free_acr_resources(g, plsfm);
405free_sgt: 405free_sgt:
406 return err; 406 return err;
@@ -444,13 +444,13 @@ static int lsfm_discover_ucode_images(struct gk20a *g,
444 444
445 plsfm->managed_flcn_cnt++; 445 plsfm->managed_flcn_cnt++;
446 } else { 446 } else {
447 gm20b_dbg_pmu("id not managed %d\n", 447 gm20b_dbg_pmu(g, "id not managed %d\n",
448 ucode_img.lsf_desc->falcon_id); 448 ucode_img.lsf_desc->falcon_id);
449 } 449 }
450 450
451 /*Free any ucode image resources if not managing this falcon*/ 451 /*Free any ucode image resources if not managing this falcon*/
452 if (!(pmu->pmu_mode & PMU_LSFM_MANAGED)) { 452 if (!(pmu->pmu_mode & PMU_LSFM_MANAGED)) {
453 gm20b_dbg_pmu("pmu is not LSFM managed\n"); 453 gm20b_dbg_pmu(g, "pmu is not LSFM managed\n");
454 lsfm_free_ucode_img_res(g, &ucode_img); 454 lsfm_free_ucode_img_res(g, &ucode_img);
455 } 455 }
456 456
@@ -481,7 +481,7 @@ static int lsfm_discover_ucode_images(struct gk20a *g,
481 == 0) 481 == 0)
482 plsfm->managed_flcn_cnt++; 482 plsfm->managed_flcn_cnt++;
483 } else { 483 } else {
484 gm20b_dbg_pmu("not managed %d\n", 484 gm20b_dbg_pmu(g, "not managed %d\n",
485 ucode_img.lsf_desc->falcon_id); 485 ucode_img.lsf_desc->falcon_id);
486 lsfm_free_nonpmu_ucode_img_res(g, 486 lsfm_free_nonpmu_ucode_img_res(g,
487 &ucode_img); 487 &ucode_img);
@@ -489,7 +489,7 @@ static int lsfm_discover_ucode_images(struct gk20a *g,
489 } 489 }
490 } else { 490 } else {
491 /* Consumed all available falcon objects */ 491 /* Consumed all available falcon objects */
492 gm20b_dbg_pmu("Done checking for ucodes %d\n", i); 492 gm20b_dbg_pmu(g, "Done checking for ucodes %d\n", i);
493 break; 493 break;
494 } 494 }
495 } 495 }
@@ -526,26 +526,26 @@ int gm20b_pmu_populate_loader_cfg(struct gk20a *g,
526 addr_base = p_lsfm->lsb_header.ucode_off; 526 addr_base = p_lsfm->lsb_header.ucode_off;
527 g->ops.pmu.get_wpr(g, &wpr_inf); 527 g->ops.pmu.get_wpr(g, &wpr_inf);
528 addr_base += wpr_inf.wpr_base; 528 addr_base += wpr_inf.wpr_base;
529 gm20b_dbg_pmu("pmu loader cfg u32 addrbase %x\n", (u32)addr_base); 529 gm20b_dbg_pmu(g, "pmu loader cfg u32 addrbase %x\n", (u32)addr_base);
530 /*From linux*/ 530 /*From linux*/
531 addr_code = u64_lo32((addr_base + 531 addr_code = u64_lo32((addr_base +
532 desc->app_start_offset + 532 desc->app_start_offset +
533 desc->app_resident_code_offset) >> 8); 533 desc->app_resident_code_offset) >> 8);
534 gm20b_dbg_pmu("app start %d app res code off %d\n", 534 gm20b_dbg_pmu(g, "app start %d app res code off %d\n",
535 desc->app_start_offset, desc->app_resident_code_offset); 535 desc->app_start_offset, desc->app_resident_code_offset);
536 addr_data = u64_lo32((addr_base + 536 addr_data = u64_lo32((addr_base +
537 desc->app_start_offset + 537 desc->app_start_offset +
538 desc->app_resident_data_offset) >> 8); 538 desc->app_resident_data_offset) >> 8);
539 gm20b_dbg_pmu("app res data offset%d\n", 539 gm20b_dbg_pmu(g, "app res data offset%d\n",
540 desc->app_resident_data_offset); 540 desc->app_resident_data_offset);
541 gm20b_dbg_pmu("bl start off %d\n", desc->bootloader_start_offset); 541 gm20b_dbg_pmu(g, "bl start off %d\n", desc->bootloader_start_offset);
542 542
543 addr_args = ((pwr_falcon_hwcfg_dmem_size_v( 543 addr_args = ((pwr_falcon_hwcfg_dmem_size_v(
544 gk20a_readl(g, pwr_falcon_hwcfg_r()))) 544 gk20a_readl(g, pwr_falcon_hwcfg_r())))
545 << GK20A_PMU_DMEM_BLKSIZE2); 545 << GK20A_PMU_DMEM_BLKSIZE2);
546 addr_args -= g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu); 546 addr_args -= g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu);
547 547
548 gm20b_dbg_pmu("addr_args %x\n", addr_args); 548 gm20b_dbg_pmu(g, "addr_args %x\n", addr_args);
549 549
550 /* Populate the loader_config state*/ 550 /* Populate the loader_config state*/
551 ldr_cfg->dma_idx = GK20A_PMU_DMAIDX_UCODE; 551 ldr_cfg->dma_idx = GK20A_PMU_DMAIDX_UCODE;
@@ -599,7 +599,7 @@ int gm20b_flcn_populate_bl_dmem_desc(struct gk20a *g,
599 g->ops.pmu.get_wpr(g, &wpr_inf); 599 g->ops.pmu.get_wpr(g, &wpr_inf);
600 addr_base += wpr_inf.wpr_base; 600 addr_base += wpr_inf.wpr_base;
601 601
602 gm20b_dbg_pmu("gen loader cfg %x u32 addrbase %x ID\n", (u32)addr_base, 602 gm20b_dbg_pmu(g, "gen loader cfg %x u32 addrbase %x ID\n", (u32)addr_base,
603 p_lsfm->wpr_header.falcon_id); 603 p_lsfm->wpr_header.falcon_id);
604 addr_code = u64_lo32((addr_base + 604 addr_code = u64_lo32((addr_base +
605 desc->app_start_offset + 605 desc->app_start_offset +
@@ -608,7 +608,7 @@ int gm20b_flcn_populate_bl_dmem_desc(struct gk20a *g,
608 desc->app_start_offset + 608 desc->app_start_offset +
609 desc->app_resident_data_offset) >> 8); 609 desc->app_resident_data_offset) >> 8);
610 610
611 gm20b_dbg_pmu("gen cfg %x u32 addrcode %x & data %x load offset %xID\n", 611 gm20b_dbg_pmu(g, "gen cfg %x u32 addrcode %x & data %x load offset %xID\n",
612 (u32)addr_code, (u32)addr_data, desc->bootloader_start_offset, 612 (u32)addr_code, (u32)addr_data, desc->bootloader_start_offset,
613 p_lsfm->wpr_header.falcon_id); 613 p_lsfm->wpr_header.falcon_id);
614 614
@@ -631,7 +631,7 @@ static int lsfm_fill_flcn_bl_gen_desc(struct gk20a *g,
631 631
632 struct nvgpu_pmu *pmu = &g->pmu; 632 struct nvgpu_pmu *pmu = &g->pmu;
633 if (pnode->wpr_header.falcon_id != pmu->falcon_id) { 633 if (pnode->wpr_header.falcon_id != pmu->falcon_id) {
634 gm20b_dbg_pmu("non pmu. write flcn bl gen desc\n"); 634 gm20b_dbg_pmu(g, "non pmu. write flcn bl gen desc\n");
635 g->ops.pmu.flcn_populate_bl_dmem_desc(g, 635 g->ops.pmu.flcn_populate_bl_dmem_desc(g,
636 pnode, &pnode->bl_gen_desc_size, 636 pnode, &pnode->bl_gen_desc_size,
637 pnode->wpr_header.falcon_id); 637 pnode->wpr_header.falcon_id);
@@ -639,7 +639,7 @@ static int lsfm_fill_flcn_bl_gen_desc(struct gk20a *g,
639 } 639 }
640 640
641 if (pmu->pmu_mode & PMU_LSFM_MANAGED) { 641 if (pmu->pmu_mode & PMU_LSFM_MANAGED) {
642 gm20b_dbg_pmu("pmu write flcn bl gen desc\n"); 642 gm20b_dbg_pmu(g, "pmu write flcn bl gen desc\n");
643 if (pnode->wpr_header.falcon_id == pmu->falcon_id) 643 if (pnode->wpr_header.falcon_id == pmu->falcon_id)
644 return g->ops.pmu.pmu_populate_loader_cfg(g, pnode, 644 return g->ops.pmu.pmu_populate_loader_cfg(g, pnode,
645 &pnode->bl_gen_desc_size); 645 &pnode->bl_gen_desc_size);
@@ -672,46 +672,46 @@ static void lsfm_init_wpr_contents(struct gk20a *g, struct ls_flcn_mgr *plsfm,
672 nvgpu_mem_wr_n(g, ucode, i * sizeof(pnode->wpr_header), 672 nvgpu_mem_wr_n(g, ucode, i * sizeof(pnode->wpr_header),
673 &pnode->wpr_header, sizeof(pnode->wpr_header)); 673 &pnode->wpr_header, sizeof(pnode->wpr_header));
674 674
675 gm20b_dbg_pmu("wpr header"); 675 gm20b_dbg_pmu(g, "wpr header");
676 gm20b_dbg_pmu("falconid :%d", 676 gm20b_dbg_pmu(g, "falconid :%d",
677 pnode->wpr_header.falcon_id); 677 pnode->wpr_header.falcon_id);
678 gm20b_dbg_pmu("lsb_offset :%x", 678 gm20b_dbg_pmu(g, "lsb_offset :%x",
679 pnode->wpr_header.lsb_offset); 679 pnode->wpr_header.lsb_offset);
680 gm20b_dbg_pmu("bootstrap_owner :%d", 680 gm20b_dbg_pmu(g, "bootstrap_owner :%d",
681 pnode->wpr_header.bootstrap_owner); 681 pnode->wpr_header.bootstrap_owner);
682 gm20b_dbg_pmu("lazy_bootstrap :%d", 682 gm20b_dbg_pmu(g, "lazy_bootstrap :%d",
683 pnode->wpr_header.lazy_bootstrap); 683 pnode->wpr_header.lazy_bootstrap);
684 gm20b_dbg_pmu("status :%d", 684 gm20b_dbg_pmu(g, "status :%d",
685 pnode->wpr_header.status); 685 pnode->wpr_header.status);
686 686
687 /*Flush LSB header to memory*/ 687 /*Flush LSB header to memory*/
688 nvgpu_mem_wr_n(g, ucode, pnode->wpr_header.lsb_offset, 688 nvgpu_mem_wr_n(g, ucode, pnode->wpr_header.lsb_offset,
689 &pnode->lsb_header, sizeof(pnode->lsb_header)); 689 &pnode->lsb_header, sizeof(pnode->lsb_header));
690 690
691 gm20b_dbg_pmu("lsb header"); 691 gm20b_dbg_pmu(g, "lsb header");
692 gm20b_dbg_pmu("ucode_off :%x", 692 gm20b_dbg_pmu(g, "ucode_off :%x",
693 pnode->lsb_header.ucode_off); 693 pnode->lsb_header.ucode_off);
694 gm20b_dbg_pmu("ucode_size :%x", 694 gm20b_dbg_pmu(g, "ucode_size :%x",
695 pnode->lsb_header.ucode_size); 695 pnode->lsb_header.ucode_size);
696 gm20b_dbg_pmu("data_size :%x", 696 gm20b_dbg_pmu(g, "data_size :%x",
697 pnode->lsb_header.data_size); 697 pnode->lsb_header.data_size);
698 gm20b_dbg_pmu("bl_code_size :%x", 698 gm20b_dbg_pmu(g, "bl_code_size :%x",
699 pnode->lsb_header.bl_code_size); 699 pnode->lsb_header.bl_code_size);
700 gm20b_dbg_pmu("bl_imem_off :%x", 700 gm20b_dbg_pmu(g, "bl_imem_off :%x",
701 pnode->lsb_header.bl_imem_off); 701 pnode->lsb_header.bl_imem_off);
702 gm20b_dbg_pmu("bl_data_off :%x", 702 gm20b_dbg_pmu(g, "bl_data_off :%x",
703 pnode->lsb_header.bl_data_off); 703 pnode->lsb_header.bl_data_off);
704 gm20b_dbg_pmu("bl_data_size :%x", 704 gm20b_dbg_pmu(g, "bl_data_size :%x",
705 pnode->lsb_header.bl_data_size); 705 pnode->lsb_header.bl_data_size);
706 gm20b_dbg_pmu("app_code_off :%x", 706 gm20b_dbg_pmu(g, "app_code_off :%x",
707 pnode->lsb_header.app_code_off); 707 pnode->lsb_header.app_code_off);
708 gm20b_dbg_pmu("app_code_size :%x", 708 gm20b_dbg_pmu(g, "app_code_size :%x",
709 pnode->lsb_header.app_code_size); 709 pnode->lsb_header.app_code_size);
710 gm20b_dbg_pmu("app_data_off :%x", 710 gm20b_dbg_pmu(g, "app_data_off :%x",
711 pnode->lsb_header.app_data_off); 711 pnode->lsb_header.app_data_off);
712 gm20b_dbg_pmu("app_data_size :%x", 712 gm20b_dbg_pmu(g, "app_data_size :%x",
713 pnode->lsb_header.app_data_size); 713 pnode->lsb_header.app_data_size);
714 gm20b_dbg_pmu("flags :%x", 714 gm20b_dbg_pmu(g, "flags :%x",
715 pnode->lsb_header.flags); 715 pnode->lsb_header.flags);
716 716
717 /*If this falcon has a boot loader and related args, 717 /*If this falcon has a boot loader and related args,
@@ -1028,7 +1028,7 @@ int gm20b_bootstrap_hs_flcn(struct gk20a *g)
1028 start = nvgpu_mem_get_addr(g, &acr->ucode_blob); 1028 start = nvgpu_mem_get_addr(g, &acr->ucode_blob);
1029 size = acr->ucode_blob.size; 1029 size = acr->ucode_blob.size;
1030 1030
1031 gm20b_dbg_pmu(""); 1031 gm20b_dbg_pmu(g, " ");
1032 1032
1033 if (!acr_fw) { 1033 if (!acr_fw) {
1034 /*First time init case*/ 1034 /*First time init case*/
@@ -1141,14 +1141,14 @@ int acr_ucode_patch_sig(struct gk20a *g,
1141 unsigned int *p_patch_ind) 1141 unsigned int *p_patch_ind)
1142{ 1142{
1143 unsigned int i, *p_sig; 1143 unsigned int i, *p_sig;
1144 gm20b_dbg_pmu(""); 1144 gm20b_dbg_pmu(g, " ");
1145 1145
1146 if (!pmu_is_debug_mode_en(g)) { 1146 if (!pmu_is_debug_mode_en(g)) {
1147 p_sig = p_prod_sig; 1147 p_sig = p_prod_sig;
1148 gm20b_dbg_pmu("PRODUCTION MODE\n"); 1148 gm20b_dbg_pmu(g, "PRODUCTION MODE\n");
1149 } else { 1149 } else {
1150 p_sig = p_dbg_sig; 1150 p_sig = p_dbg_sig;
1151 gm20b_dbg_pmu("DEBUG MODE\n"); 1151 gm20b_dbg_pmu(g, "DEBUG MODE\n");
1152 } 1152 }
1153 1153
1154 /* Patching logic:*/ 1154 /* Patching logic:*/
@@ -1171,7 +1171,7 @@ static int bl_bootstrap(struct nvgpu_pmu *pmu,
1171 struct hsflcn_bl_desc *pmu_bl_gm10x_desc = g->acr.pmu_hsbl_desc; 1171 struct hsflcn_bl_desc *pmu_bl_gm10x_desc = g->acr.pmu_hsbl_desc;
1172 u32 dst; 1172 u32 dst;
1173 1173
1174 gk20a_dbg_fn(""); 1174 nvgpu_log_fn(g, " ");
1175 gk20a_writel(g, pwr_falcon_itfen_r(), 1175 gk20a_writel(g, pwr_falcon_itfen_r(),
1176 gk20a_readl(g, pwr_falcon_itfen_r()) | 1176 gk20a_readl(g, pwr_falcon_itfen_r()) |
1177 pwr_falcon_itfen_ctxen_enable_f()); 1177 pwr_falcon_itfen_ctxen_enable_f());
@@ -1193,7 +1193,7 @@ static int bl_bootstrap(struct nvgpu_pmu *pmu,
1193 (u8 *)(acr->hsbl_ucode.cpu_va), bl_sz, 0, 0, 1193 (u8 *)(acr->hsbl_ucode.cpu_va), bl_sz, 0, 0,
1194 pmu_bl_gm10x_desc->bl_start_tag); 1194 pmu_bl_gm10x_desc->bl_start_tag);
1195 1195
1196 gm20b_dbg_pmu("Before starting falcon with BL\n"); 1196 gm20b_dbg_pmu(g, "Before starting falcon with BL\n");
1197 1197
1198 virt_addr = pmu_bl_gm10x_desc->bl_start_tag << 8; 1198 virt_addr = pmu_bl_gm10x_desc->bl_start_tag << 8;
1199 1199
@@ -1207,7 +1207,7 @@ int gm20b_init_nspmu_setup_hw1(struct gk20a *g)
1207 struct nvgpu_pmu *pmu = &g->pmu; 1207 struct nvgpu_pmu *pmu = &g->pmu;
1208 int err = 0; 1208 int err = 0;
1209 1209
1210 gk20a_dbg_fn(""); 1210 nvgpu_log_fn(g, " ");
1211 1211
1212 nvgpu_mutex_acquire(&pmu->isr_mutex); 1212 nvgpu_mutex_acquire(&pmu->isr_mutex);
1213 nvgpu_flcn_reset(pmu->flcn); 1213 nvgpu_flcn_reset(pmu->flcn);
@@ -1279,7 +1279,7 @@ int gm20b_init_pmu_setup_hw1(struct gk20a *g,
1279 struct nvgpu_pmu *pmu = &g->pmu; 1279 struct nvgpu_pmu *pmu = &g->pmu;
1280 int err; 1280 int err;
1281 1281
1282 gk20a_dbg_fn(""); 1282 nvgpu_log_fn(g, " ");
1283 1283
1284 nvgpu_mutex_acquire(&pmu->isr_mutex); 1284 nvgpu_mutex_acquire(&pmu->isr_mutex);
1285 nvgpu_flcn_reset(pmu->flcn); 1285 nvgpu_flcn_reset(pmu->flcn);
@@ -1324,7 +1324,7 @@ int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt)
1324 struct nvgpu_firmware *hsbl_fw = acr->hsbl_fw; 1324 struct nvgpu_firmware *hsbl_fw = acr->hsbl_fw;
1325 struct hsflcn_bl_desc *pmu_bl_gm10x_desc; 1325 struct hsflcn_bl_desc *pmu_bl_gm10x_desc;
1326 u32 *pmu_bl_gm10x = NULL; 1326 u32 *pmu_bl_gm10x = NULL;
1327 gm20b_dbg_pmu(""); 1327 gm20b_dbg_pmu(g, " ");
1328 1328
1329 if (!hsbl_fw) { 1329 if (!hsbl_fw) {
1330 hsbl_fw = nvgpu_request_firmware(g, 1330 hsbl_fw = nvgpu_request_firmware(g,
@@ -1343,7 +1343,7 @@ int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt)
1343 bl_sz = ALIGN(pmu_bl_gm10x_desc->bl_img_hdr.bl_code_size, 1343 bl_sz = ALIGN(pmu_bl_gm10x_desc->bl_img_hdr.bl_code_size,
1344 256); 1344 256);
1345 acr->hsbl_ucode.size = bl_sz; 1345 acr->hsbl_ucode.size = bl_sz;
1346 gm20b_dbg_pmu("Executing Generic Bootloader\n"); 1346 gm20b_dbg_pmu(g, "Executing Generic Bootloader\n");
1347 1347
1348 /*TODO in code verify that enable PMU is done, 1348 /*TODO in code verify that enable PMU is done,
1349 scrubbing etc is done*/ 1349 scrubbing etc is done*/
@@ -1366,7 +1366,7 @@ int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt)
1366 } 1366 }
1367 1367
1368 nvgpu_mem_wr_n(g, &acr->hsbl_ucode, 0, pmu_bl_gm10x, bl_sz); 1368 nvgpu_mem_wr_n(g, &acr->hsbl_ucode, 0, pmu_bl_gm10x, bl_sz);
1369 gm20b_dbg_pmu("Copied bl ucode to bl_cpuva\n"); 1369 gm20b_dbg_pmu(g, "Copied bl ucode to bl_cpuva\n");
1370 } 1370 }
1371 /* 1371 /*
1372 * Disable interrupts to avoid kernel hitting breakpoint due 1372 * Disable interrupts to avoid kernel hitting breakpoint due
@@ -1377,9 +1377,9 @@ int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt)
1377 gk20a_get_gr_idle_timeout(g))) 1377 gk20a_get_gr_idle_timeout(g)))
1378 goto err_unmap_bl; 1378 goto err_unmap_bl;
1379 1379
1380 gm20b_dbg_pmu("phys sec reg %x\n", gk20a_readl(g, 1380 gm20b_dbg_pmu(g, "phys sec reg %x\n", gk20a_readl(g,
1381 pwr_falcon_mmu_phys_sec_r())); 1381 pwr_falcon_mmu_phys_sec_r()));
1382 gm20b_dbg_pmu("sctl reg %x\n", gk20a_readl(g, pwr_falcon_sctl_r())); 1382 gm20b_dbg_pmu(g, "sctl reg %x\n", gk20a_readl(g, pwr_falcon_sctl_r()));
1383 1383
1384 g->ops.pmu.init_falcon_setup_hw(g, desc, acr->hsbl_ucode.size); 1384 g->ops.pmu.init_falcon_setup_hw(g, desc, acr->hsbl_ucode.size);
1385 1385
@@ -1396,10 +1396,10 @@ int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt)
1396 else 1396 else
1397 goto err_unmap_bl; 1397 goto err_unmap_bl;
1398 } 1398 }
1399 gm20b_dbg_pmu("after waiting for halt, err %x\n", err); 1399 gm20b_dbg_pmu(g, "after waiting for halt, err %x\n", err);
1400 gm20b_dbg_pmu("phys sec reg %x\n", gk20a_readl(g, 1400 gm20b_dbg_pmu(g, "phys sec reg %x\n", gk20a_readl(g,
1401 pwr_falcon_mmu_phys_sec_r())); 1401 pwr_falcon_mmu_phys_sec_r()));
1402 gm20b_dbg_pmu("sctl reg %x\n", gk20a_readl(g, pwr_falcon_sctl_r())); 1402 gm20b_dbg_pmu(g, "sctl reg %x\n", gk20a_readl(g, pwr_falcon_sctl_r()));
1403 start_gm20b_pmu(g); 1403 start_gm20b_pmu(g);
1404 return 0; 1404 return 0;
1405err_unmap_bl: 1405err_unmap_bl:
@@ -1430,7 +1430,7 @@ int pmu_wait_for_halt(struct gk20a *g, unsigned int timeout_ms)
1430 } 1430 }
1431 1431
1432 g->acr.capabilities = gk20a_readl(g, pwr_falcon_mailbox1_r()); 1432 g->acr.capabilities = gk20a_readl(g, pwr_falcon_mailbox1_r());
1433 gm20b_dbg_pmu("ACR capabilities %x\n", g->acr.capabilities); 1433 gm20b_dbg_pmu(g, "ACR capabilities %x\n", g->acr.capabilities);
1434 data = gk20a_readl(g, pwr_falcon_mailbox0_r()); 1434 data = gk20a_readl(g, pwr_falcon_mailbox0_r());
1435 if (data) { 1435 if (data) {
1436 nvgpu_err(g, "ACR boot failed, err %x", data); 1436 nvgpu_err(g, "ACR boot failed, err %x", data);
diff --git a/drivers/gpu/nvgpu/gm20b/bus_gm20b.c b/drivers/gpu/nvgpu/gm20b/bus_gm20b.c
index cdd70d5b..ca2a40bf 100644
--- a/drivers/gpu/nvgpu/gm20b/bus_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/bus_gm20b.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * GM20B MMU 2 * GM20B MMU
3 * 3 *
4 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"), 7 * copy of this software and associated documentation files (the "Software"),
@@ -40,7 +40,7 @@ int gm20b_bus_bar1_bind(struct gk20a *g, struct nvgpu_mem *bar1_inst)
40 u64 iova = nvgpu_inst_block_addr(g, bar1_inst); 40 u64 iova = nvgpu_inst_block_addr(g, bar1_inst);
41 u32 ptr_v = (u32)(iova >> bus_bar1_block_ptr_shift_v()); 41 u32 ptr_v = (u32)(iova >> bus_bar1_block_ptr_shift_v());
42 42
43 gk20a_dbg_info("bar1 inst block ptr: 0x%08x", ptr_v); 43 nvgpu_log_info(g, "bar1 inst block ptr: 0x%08x", ptr_v);
44 44
45 gk20a_writel(g, bus_bar1_block_r(), 45 gk20a_writel(g, bus_bar1_block_r(),
46 nvgpu_aperture_mask(g, bar1_inst, 46 nvgpu_aperture_mask(g, bar1_inst,
diff --git a/drivers/gpu/nvgpu/gm20b/clk_gm20b.c b/drivers/gpu/nvgpu/gm20b/clk_gm20b.c
index fa751ecc..fb89752a 100644
--- a/drivers/gpu/nvgpu/gm20b/clk_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/clk_gm20b.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * GM20B Clocks 2 * GM20B Clocks
3 * 3 *
4 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"), 7 * copy of this software and associated documentation files (the "Software"),
@@ -36,8 +36,8 @@
36#include <nvgpu/hw/gm20b/hw_therm_gm20b.h> 36#include <nvgpu/hw/gm20b/hw_therm_gm20b.h>
37#include <nvgpu/hw/gm20b/hw_fuse_gm20b.h> 37#include <nvgpu/hw/gm20b/hw_fuse_gm20b.h>
38 38
39#define gk20a_dbg_clk(fmt, arg...) \ 39#define gk20a_dbg_clk(g, fmt, arg...) \
40 gk20a_dbg(gpu_dbg_clk, fmt, ##arg) 40 nvgpu_log(g, gpu_dbg_clk, fmt, ##arg)
41 41
42#define DFS_DET_RANGE 6 /* -2^6 ... 2^6-1 */ 42#define DFS_DET_RANGE 6 /* -2^6 ... 2^6-1 */
43#define SDM_DIN_RANGE 12 /* -2^12 ... 2^12-1 */ 43#define SDM_DIN_RANGE 12 /* -2^12 ... 2^12-1 */
@@ -138,6 +138,7 @@ static u32 get_interim_pldiv(struct gk20a *g, u32 old_pl, u32 new_pl)
138static int clk_config_pll(struct clk_gk20a *clk, struct pll *pll, 138static int clk_config_pll(struct clk_gk20a *clk, struct pll *pll,
139 struct pll_parms *pll_params, u32 *target_freq, bool best_fit) 139 struct pll_parms *pll_params, u32 *target_freq, bool best_fit)
140{ 140{
141 struct gk20a *g = clk->g;
141 u32 min_vco_f, max_vco_f; 142 u32 min_vco_f, max_vco_f;
142 u32 best_M, best_N; 143 u32 best_M, best_N;
143 u32 low_PL, high_PL, best_PL; 144 u32 low_PL, high_PL, best_PL;
@@ -149,7 +150,7 @@ static int clk_config_pll(struct clk_gk20a *clk, struct pll *pll,
149 150
150 BUG_ON(target_freq == NULL); 151 BUG_ON(target_freq == NULL);
151 152
152 gk20a_dbg_fn("request target freq %d MHz", *target_freq); 153 nvgpu_log_fn(g, "request target freq %d MHz", *target_freq);
153 154
154 ref_clk_f = pll->clk_in; 155 ref_clk_f = pll->clk_in;
155 target_clk_f = *target_freq; 156 target_clk_f = *target_freq;
@@ -172,7 +173,7 @@ static int clk_config_pll(struct clk_gk20a *clk, struct pll *pll,
172 low_PL = min(low_PL, pll_params->max_PL); 173 low_PL = min(low_PL, pll_params->max_PL);
173 low_PL = max(low_PL, pll_params->min_PL); 174 low_PL = max(low_PL, pll_params->min_PL);
174 175
175 gk20a_dbg_info("low_PL %d(div%d), high_PL %d(div%d)", 176 nvgpu_log_info(g, "low_PL %d(div%d), high_PL %d(div%d)",
176 low_PL, nvgpu_pl_to_div(low_PL), high_PL, nvgpu_pl_to_div(high_PL)); 177 low_PL, nvgpu_pl_to_div(low_PL), high_PL, nvgpu_pl_to_div(high_PL));
177 178
178 for (pl = low_PL; pl <= high_PL; pl++) { 179 for (pl = low_PL; pl <= high_PL; pl++) {
@@ -217,7 +218,7 @@ static int clk_config_pll(struct clk_gk20a *clk, struct pll *pll,
217 goto found_match; 218 goto found_match;
218 } 219 }
219 220
220 gk20a_dbg_info("delta %d @ M %d, N %d, PL %d", 221 nvgpu_log_info(g, "delta %d @ M %d, N %d, PL %d",
221 delta, m, n, pl); 222 delta, m, n, pl);
222 } 223 }
223 } 224 }
@@ -229,7 +230,7 @@ found_match:
229 BUG_ON(best_delta == ~0U); 230 BUG_ON(best_delta == ~0U);
230 231
231 if (best_fit && best_delta != 0) 232 if (best_fit && best_delta != 0)
232 gk20a_dbg_clk("no best match for target @ %dMHz on gpc_pll", 233 gk20a_dbg_clk(g, "no best match for target @ %dMHz on gpc_pll",
233 target_clk_f); 234 target_clk_f);
234 235
235 pll->M = best_M; 236 pll->M = best_M;
@@ -241,10 +242,10 @@ found_match:
241 242
242 *target_freq = pll->freq; 243 *target_freq = pll->freq;
243 244
244 gk20a_dbg_clk("actual target freq %d kHz, M %d, N %d, PL %d(div%d)", 245 gk20a_dbg_clk(g, "actual target freq %d kHz, M %d, N %d, PL %d(div%d)",
245 *target_freq, pll->M, pll->N, pll->PL, nvgpu_pl_to_div(pll->PL)); 246 *target_freq, pll->M, pll->N, pll->PL, nvgpu_pl_to_div(pll->PL));
246 247
247 gk20a_dbg_fn("done"); 248 nvgpu_log_fn(g, "done");
248 249
249 return 0; 250 return 0;
250} 251}
@@ -810,7 +811,7 @@ static int clk_lock_gpc_pll_under_bypass(struct gk20a *g, struct pll *gpll)
810 if (gpll->mode == GPC_PLL_MODE_DVFS) { 811 if (gpll->mode == GPC_PLL_MODE_DVFS) {
811 gk20a_readl(g, trim_sys_gpcpll_cfg_r()); 812 gk20a_readl(g, trim_sys_gpcpll_cfg_r());
812 nvgpu_udelay(gpc_pll_params.na_lock_delay); 813 nvgpu_udelay(gpc_pll_params.na_lock_delay);
813 gk20a_dbg_clk("NA config_pll under bypass: %u (%u) kHz %d mV", 814 gk20a_dbg_clk(g, "NA config_pll under bypass: %u (%u) kHz %d mV",
814 gpll->freq, gpll->freq / 2, 815 gpll->freq, gpll->freq / 2,
815 (trim_sys_gpcpll_cfg3_dfs_testout_v( 816 (trim_sys_gpcpll_cfg3_dfs_testout_v(
816 gk20a_readl(g, trim_sys_gpcpll_cfg3_r())) 817 gk20a_readl(g, trim_sys_gpcpll_cfg3_r()))
@@ -843,7 +844,7 @@ static int clk_lock_gpc_pll_under_bypass(struct gk20a *g, struct pll *gpll)
843 return -EBUSY; 844 return -EBUSY;
844 845
845pll_locked: 846pll_locked:
846 gk20a_dbg_clk("locked config_pll under bypass r=0x%x v=0x%x", 847 gk20a_dbg_clk(g, "locked config_pll under bypass r=0x%x v=0x%x",
847 trim_sys_gpcpll_cfg_r(), cfg); 848 trim_sys_gpcpll_cfg_r(), cfg);
848 849
849 /* set SYNC_MODE for glitchless switch out of bypass */ 850 /* set SYNC_MODE for glitchless switch out of bypass */
@@ -878,7 +879,7 @@ static int clk_program_gpc_pll(struct gk20a *g, struct pll *gpll_new,
878 bool can_slide, pldiv_only; 879 bool can_slide, pldiv_only;
879 struct pll gpll; 880 struct pll gpll;
880 881
881 gk20a_dbg_fn(""); 882 nvgpu_log_fn(g, " ");
882 883
883 if (!nvgpu_platform_is_silicon(g)) 884 if (!nvgpu_platform_is_silicon(g))
884 return 0; 885 return 0;
@@ -1028,7 +1029,7 @@ static void clk_config_pll_safe_dvfs(struct gk20a *g, struct pll *gpll)
1028 gpll->N = nsafe; 1029 gpll->N = nsafe;
1029 clk_config_dvfs_ndiv(gpll->dvfs.mv, gpll->N, &gpll->dvfs); 1030 clk_config_dvfs_ndiv(gpll->dvfs.mv, gpll->N, &gpll->dvfs);
1030 1031
1031 gk20a_dbg_clk("safe freq %d kHz, M %d, N %d, PL %d(div%d), mV(cal) %d(%d), DC %d", 1032 gk20a_dbg_clk(g, "safe freq %d kHz, M %d, N %d, PL %d(div%d), mV(cal) %d(%d), DC %d",
1032 gpll->freq, gpll->M, gpll->N, gpll->PL, nvgpu_pl_to_div(gpll->PL), 1033 gpll->freq, gpll->M, gpll->N, gpll->PL, nvgpu_pl_to_div(gpll->PL),
1033 gpll->dvfs.mv, gpll->dvfs.uv_cal / 1000, gpll->dvfs.dfs_coeff); 1034 gpll->dvfs.mv, gpll->dvfs.uv_cal / 1000, gpll->dvfs.dfs_coeff);
1034} 1035}
@@ -1103,7 +1104,7 @@ static int clk_program_na_gpc_pll(struct gk20a *g, struct pll *gpll_new,
1103 clk_set_dfs_ext_cal(g, gpll_new->dvfs.dfs_ext_cal); 1104 clk_set_dfs_ext_cal(g, gpll_new->dvfs.dfs_ext_cal);
1104 clk_set_dfs_coeff(g, gpll_new->dvfs.dfs_coeff); 1105 clk_set_dfs_coeff(g, gpll_new->dvfs.dfs_coeff);
1105 1106
1106 gk20a_dbg_clk("config_pll %d kHz, M %d, N %d, PL %d(div%d), mV(cal) %d(%d), DC %d", 1107 gk20a_dbg_clk(g, "config_pll %d kHz, M %d, N %d, PL %d(div%d), mV(cal) %d(%d), DC %d",
1107 gpll_new->freq, gpll_new->M, gpll_new->N, gpll_new->PL, 1108 gpll_new->freq, gpll_new->M, gpll_new->N, gpll_new->PL,
1108 nvgpu_pl_to_div(gpll_new->PL), 1109 nvgpu_pl_to_div(gpll_new->PL),
1109 max(gpll_new->dvfs.mv, gpll_old->dvfs.mv), 1110 max(gpll_new->dvfs.mv, gpll_old->dvfs.mv),
@@ -1168,14 +1169,14 @@ int gm20b_init_clk_setup_sw(struct gk20a *g)
1168 unsigned long safe_rate; 1169 unsigned long safe_rate;
1169 int err; 1170 int err;
1170 1171
1171 gk20a_dbg_fn(""); 1172 nvgpu_log_fn(g, " ");
1172 1173
1173 err = nvgpu_mutex_init(&clk->clk_mutex); 1174 err = nvgpu_mutex_init(&clk->clk_mutex);
1174 if (err) 1175 if (err)
1175 return err; 1176 return err;
1176 1177
1177 if (clk->sw_ready) { 1178 if (clk->sw_ready) {
1178 gk20a_dbg_fn("skip init"); 1179 nvgpu_log_fn(g, "skip init");
1179 return 0; 1180 return 0;
1180 } 1181 }
1181 1182
@@ -1229,7 +1230,7 @@ int gm20b_init_clk_setup_sw(struct gk20a *g)
1229 1230
1230 clk->sw_ready = true; 1231 clk->sw_ready = true;
1231 1232
1232 gk20a_dbg_fn("done"); 1233 nvgpu_log_fn(g, "done");
1233 nvgpu_info(g, 1234 nvgpu_info(g,
1234 "GPCPLL initial settings:%s M=%u, N=%u, P=%u (id = %u)", 1235 "GPCPLL initial settings:%s M=%u, N=%u, P=%u (id = %u)",
1235 clk->gpc_pll.mode == GPC_PLL_MODE_DVFS ? " NA mode," : "", 1236 clk->gpc_pll.mode == GPC_PLL_MODE_DVFS ? " NA mode," : "",
@@ -1321,7 +1322,7 @@ static int gm20b_init_clk_setup_hw(struct gk20a *g)
1321{ 1322{
1322 u32 data; 1323 u32 data;
1323 1324
1324 gk20a_dbg_fn(""); 1325 nvgpu_log_fn(g, " ");
1325 1326
1326 /* LDIV: Div4 mode (required); both bypass and vco ratios 1:1 */ 1327 /* LDIV: Div4 mode (required); both bypass and vco ratios 1:1 */
1327 data = gk20a_readl(g, trim_sys_gpc2clk_out_r()); 1328 data = gk20a_readl(g, trim_sys_gpc2clk_out_r());
@@ -1394,7 +1395,7 @@ static int set_pll_freq(struct gk20a *g, int allow_slide)
1394 struct clk_gk20a *clk = &g->clk; 1395 struct clk_gk20a *clk = &g->clk;
1395 int err = 0; 1396 int err = 0;
1396 1397
1397 gk20a_dbg_fn("last freq: %dMHz, target freq %dMHz", 1398 nvgpu_log_fn(g, "last freq: %dMHz, target freq %dMHz",
1398 clk->gpc_pll_last.freq, clk->gpc_pll.freq); 1399 clk->gpc_pll_last.freq, clk->gpc_pll.freq);
1399 1400
1400 /* If programming with dynamic sliding failed, re-try under bypass */ 1401 /* If programming with dynamic sliding failed, re-try under bypass */
@@ -1427,7 +1428,7 @@ int gm20b_init_clk_support(struct gk20a *g)
1427 struct clk_gk20a *clk = &g->clk; 1428 struct clk_gk20a *clk = &g->clk;
1428 u32 err; 1429 u32 err;
1429 1430
1430 gk20a_dbg_fn(""); 1431 nvgpu_log_fn(g, " ");
1431 1432
1432 nvgpu_mutex_acquire(&clk->clk_mutex); 1433 nvgpu_mutex_acquire(&clk->clk_mutex);
1433 clk->clk_hw_on = true; 1434 clk->clk_hw_on = true;
diff --git a/drivers/gpu/nvgpu/gm20b/fb_gm20b.c b/drivers/gpu/nvgpu/gm20b/fb_gm20b.c
index 5bc6d452..b2a815fb 100644
--- a/drivers/gpu/nvgpu/gm20b/fb_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/fb_gm20b.c
@@ -38,7 +38,7 @@
38 38
39void fb_gm20b_init_fs_state(struct gk20a *g) 39void fb_gm20b_init_fs_state(struct gk20a *g)
40{ 40{
41 gk20a_dbg_info("initialize gm20b fb"); 41 nvgpu_log_info(g, "initialize gm20b fb");
42 42
43 gk20a_writel(g, fb_fbhub_num_active_ltcs_r(), 43 gk20a_writel(g, fb_fbhub_num_active_ltcs_r(),
44 g->ltc_count); 44 g->ltc_count);
diff --git a/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c b/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c
index 35a7a9e1..b73abeda 100644
--- a/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c
@@ -47,7 +47,7 @@ void channel_gm20b_bind(struct channel_gk20a *c)
47 u32 inst_ptr = nvgpu_inst_block_addr(g, &c->inst_block) 47 u32 inst_ptr = nvgpu_inst_block_addr(g, &c->inst_block)
48 >> ram_in_base_shift_v(); 48 >> ram_in_base_shift_v();
49 49
50 gk20a_dbg_info("bind channel %d inst ptr 0x%08x", 50 nvgpu_log_info(g, "bind channel %d inst ptr 0x%08x",
51 c->chid, inst_ptr); 51 c->chid, inst_ptr);
52 52
53 53
diff --git a/drivers/gpu/nvgpu/gm20b/gr_gm20b.c b/drivers/gpu/nvgpu/gm20b/gr_gm20b.c
index 1c966c22..331c3af9 100644
--- a/drivers/gpu/nvgpu/gm20b/gr_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/gr_gm20b.c
@@ -47,7 +47,7 @@ void gr_gm20b_init_gpc_mmu(struct gk20a *g)
47{ 47{
48 u32 temp; 48 u32 temp;
49 49
50 gk20a_dbg_info("initialize gpc mmu"); 50 nvgpu_log_info(g, "initialize gpc mmu");
51 51
52 if (!nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) { 52 if (!nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) {
53 /* Bypass MMU check for non-secure boot. For 53 /* Bypass MMU check for non-secure boot. For
@@ -168,7 +168,7 @@ void gr_gm20b_commit_global_bundle_cb(struct gk20a *g,
168 168
169 data = min_t(u32, data, g->gr.min_gpm_fifo_depth); 169 data = min_t(u32, data, g->gr.min_gpm_fifo_depth);
170 170
171 gk20a_dbg_info("bundle cb token limit : %d, state limit : %d", 171 nvgpu_log_info(g, "bundle cb token limit : %d, state limit : %d",
172 g->gr.bundle_cb_token_limit, data); 172 g->gr.bundle_cb_token_limit, data);
173 173
174 gr_gk20a_ctx_patch_write(g, ch_ctx, gr_pd_ab_dist_cfg2_r(), 174 gr_gk20a_ctx_patch_write(g, ch_ctx, gr_pd_ab_dist_cfg2_r(),
@@ -193,7 +193,7 @@ int gr_gm20b_commit_global_cb_manager(struct gk20a *g,
193 u32 num_pes_per_gpc = nvgpu_get_litter_value(g, 193 u32 num_pes_per_gpc = nvgpu_get_litter_value(g,
194 GPU_LIT_NUM_PES_PER_GPC); 194 GPU_LIT_NUM_PES_PER_GPC);
195 195
196 gk20a_dbg_fn(""); 196 nvgpu_log_fn(g, " ");
197 197
198 tsg = tsg_gk20a_from_ch(c); 198 tsg = tsg_gk20a_from_ch(c);
199 if (!tsg) 199 if (!tsg)
@@ -280,20 +280,20 @@ void gr_gm20b_set_rd_coalesce(struct gk20a *g, u32 data)
280{ 280{
281 u32 val; 281 u32 val;
282 282
283 gk20a_dbg_fn(""); 283 nvgpu_log_fn(g, " ");
284 284
285 val = gk20a_readl(g, gr_gpcs_tpcs_tex_m_dbg2_r()); 285 val = gk20a_readl(g, gr_gpcs_tpcs_tex_m_dbg2_r());
286 val = set_field(val, gr_gpcs_tpcs_tex_m_dbg2_lg_rd_coalesce_en_m(), 286 val = set_field(val, gr_gpcs_tpcs_tex_m_dbg2_lg_rd_coalesce_en_m(),
287 gr_gpcs_tpcs_tex_m_dbg2_lg_rd_coalesce_en_f(data)); 287 gr_gpcs_tpcs_tex_m_dbg2_lg_rd_coalesce_en_f(data));
288 gk20a_writel(g, gr_gpcs_tpcs_tex_m_dbg2_r(), val); 288 gk20a_writel(g, gr_gpcs_tpcs_tex_m_dbg2_r(), val);
289 289
290 gk20a_dbg_fn("done"); 290 nvgpu_log_fn(g, "done");
291} 291}
292 292
293int gr_gm20b_handle_sw_method(struct gk20a *g, u32 addr, 293int gr_gm20b_handle_sw_method(struct gk20a *g, u32 addr,
294 u32 class_num, u32 offset, u32 data) 294 u32 class_num, u32 offset, u32 data)
295{ 295{
296 gk20a_dbg_fn(""); 296 nvgpu_log_fn(g, " ");
297 297
298 if (class_num == MAXWELL_COMPUTE_B) { 298 if (class_num == MAXWELL_COMPUTE_B) {
299 switch (offset << 2) { 299 switch (offset << 2) {
@@ -341,7 +341,7 @@ void gr_gm20b_set_alpha_circular_buffer_size(struct gk20a *g, u32 data)
341 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); 341 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
342 u32 ppc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_PPC_IN_GPC_STRIDE); 342 u32 ppc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_PPC_IN_GPC_STRIDE);
343 343
344 gk20a_dbg_fn(""); 344 nvgpu_log_fn(g, " ");
345 /* if (NO_ALPHA_BETA_TIMESLICE_SUPPORT_DEF) 345 /* if (NO_ALPHA_BETA_TIMESLICE_SUPPORT_DEF)
346 return; */ 346 return; */
347 347
@@ -390,7 +390,7 @@ void gr_gm20b_set_circular_buffer_size(struct gk20a *g, u32 data)
390 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); 390 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
391 u32 ppc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_PPC_IN_GPC_STRIDE); 391 u32 ppc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_PPC_IN_GPC_STRIDE);
392 392
393 gk20a_dbg_fn(""); 393 nvgpu_log_fn(g, " ");
394 394
395 if (cb_size > gr->attrib_cb_size) 395 if (cb_size > gr->attrib_cb_size)
396 cb_size = gr->attrib_cb_size; 396 cb_size = gr->attrib_cb_size;
@@ -665,7 +665,7 @@ int gr_gm20b_init_fs_state(struct gk20a *g)
665{ 665{
666 int err = 0; 666 int err = 0;
667 667
668 gk20a_dbg_fn(""); 668 nvgpu_log_fn(g, " ");
669 669
670 err = gr_gk20a_init_fs_state(g); 670 err = gr_gk20a_init_fs_state(g);
671 if (err) 671 if (err)
@@ -762,7 +762,7 @@ int gr_gm20b_load_ctxsw_ucode(struct gk20a *g)
762 gr_fecs_falcon_hwcfg_r(); 762 gr_fecs_falcon_hwcfg_r();
763 u8 falcon_id_mask = 0; 763 u8 falcon_id_mask = 0;
764 764
765 gk20a_dbg_fn(""); 765 nvgpu_log_fn(g, " ");
766 766
767 if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) { 767 if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) {
768 gk20a_writel(g, gr_fecs_ctxsw_mailbox_r(7), 768 gk20a_writel(g, gr_fecs_ctxsw_mailbox_r(7),
@@ -829,7 +829,7 @@ int gr_gm20b_load_ctxsw_ucode(struct gk20a *g)
829 gk20a_writel(g, gr_fecs_ctxsw_mailbox_clear_r(6), 0xffffffff); 829 gk20a_writel(g, gr_fecs_ctxsw_mailbox_clear_r(6), 0xffffffff);
830 gk20a_writel(g, gr_fecs_cpuctl_alias_r(), 830 gk20a_writel(g, gr_fecs_cpuctl_alias_r(),
831 gr_fecs_cpuctl_startcpu_f(1)); 831 gr_fecs_cpuctl_startcpu_f(1));
832 gk20a_dbg_fn("done"); 832 nvgpu_log_fn(g, "done");
833 833
834 return 0; 834 return 0;
835} 835}
@@ -858,7 +858,7 @@ int gr_gm20b_alloc_gr_ctx(struct gk20a *g,
858{ 858{
859 int err; 859 int err;
860 860
861 gk20a_dbg_fn(""); 861 nvgpu_log_fn(g, " ");
862 862
863 err = gr_gk20a_alloc_gr_ctx(g, gr_ctx, vm, class, flags); 863 err = gr_gk20a_alloc_gr_ctx(g, gr_ctx, vm, class, flags);
864 if (err) 864 if (err)
@@ -867,7 +867,7 @@ int gr_gm20b_alloc_gr_ctx(struct gk20a *g,
867 if (class == MAXWELL_COMPUTE_B) 867 if (class == MAXWELL_COMPUTE_B)
868 gr_ctx->compute_preempt_mode = NVGPU_PREEMPTION_MODE_COMPUTE_CTA; 868 gr_ctx->compute_preempt_mode = NVGPU_PREEMPTION_MODE_COMPUTE_CTA;
869 869
870 gk20a_dbg_fn("done"); 870 nvgpu_log_fn(g, "done");
871 871
872 return 0; 872 return 0;
873} 873}
@@ -881,7 +881,7 @@ void gr_gm20b_update_ctxsw_preemption_mode(struct gk20a *g,
881 u32 cta_preempt_option = 881 u32 cta_preempt_option =
882 ctxsw_prog_main_image_preemption_options_control_cta_enabled_f(); 882 ctxsw_prog_main_image_preemption_options_control_cta_enabled_f();
883 883
884 gk20a_dbg_fn(""); 884 nvgpu_log_fn(g, " ");
885 885
886 tsg = tsg_gk20a_from_ch(c); 886 tsg = tsg_gk20a_from_ch(c);
887 if (!tsg) 887 if (!tsg)
@@ -889,13 +889,13 @@ void gr_gm20b_update_ctxsw_preemption_mode(struct gk20a *g,
889 889
890 gr_ctx = &tsg->gr_ctx; 890 gr_ctx = &tsg->gr_ctx;
891 if (gr_ctx->compute_preempt_mode == NVGPU_PREEMPTION_MODE_COMPUTE_CTA) { 891 if (gr_ctx->compute_preempt_mode == NVGPU_PREEMPTION_MODE_COMPUTE_CTA) {
892 gk20a_dbg_info("CTA: %x", cta_preempt_option); 892 nvgpu_log_info(g, "CTA: %x", cta_preempt_option);
893 nvgpu_mem_wr(g, mem, 893 nvgpu_mem_wr(g, mem,
894 ctxsw_prog_main_image_preemption_options_o(), 894 ctxsw_prog_main_image_preemption_options_o(),
895 cta_preempt_option); 895 cta_preempt_option);
896 } 896 }
897 897
898 gk20a_dbg_fn("done"); 898 nvgpu_log_fn(g, "done");
899} 899}
900 900
901int gr_gm20b_dump_gr_status_regs(struct gk20a *g, 901int gr_gm20b_dump_gr_status_regs(struct gk20a *g,
@@ -1044,7 +1044,7 @@ int gr_gm20b_update_pc_sampling(struct channel_gk20a *c,
1044 struct nvgpu_mem *mem; 1044 struct nvgpu_mem *mem;
1045 u32 v; 1045 u32 v;
1046 1046
1047 gk20a_dbg_fn(""); 1047 nvgpu_log_fn(c->g, " ");
1048 1048
1049 tsg = tsg_gk20a_from_ch(c); 1049 tsg = tsg_gk20a_from_ch(c);
1050 if (!tsg) 1050 if (!tsg)
@@ -1066,7 +1066,7 @@ int gr_gm20b_update_pc_sampling(struct channel_gk20a *c,
1066 1066
1067 nvgpu_mem_end(c->g, mem); 1067 nvgpu_mem_end(c->g, mem);
1068 1068
1069 gk20a_dbg_fn("done"); 1069 nvgpu_log_fn(c->g, "done");
1070 1070
1071 return 0; 1071 return 0;
1072} 1072}
@@ -1220,19 +1220,19 @@ void gr_gm20b_bpt_reg_info(struct gk20a *g, struct nvgpu_warpstate *w_state)
1220 1220
1221 /* Only for debug purpose */ 1221 /* Only for debug purpose */
1222 for (sm_id = 0; sm_id < gr->no_of_sm; sm_id++) { 1222 for (sm_id = 0; sm_id < gr->no_of_sm; sm_id++) {
1223 gk20a_dbg_fn("w_state[%d].valid_warps[0]: %llx\n", 1223 nvgpu_log_fn(g, "w_state[%d].valid_warps[0]: %llx\n",
1224 sm_id, w_state[sm_id].valid_warps[0]); 1224 sm_id, w_state[sm_id].valid_warps[0]);
1225 gk20a_dbg_fn("w_state[%d].valid_warps[1]: %llx\n", 1225 nvgpu_log_fn(g, "w_state[%d].valid_warps[1]: %llx\n",
1226 sm_id, w_state[sm_id].valid_warps[1]); 1226 sm_id, w_state[sm_id].valid_warps[1]);
1227 1227
1228 gk20a_dbg_fn("w_state[%d].trapped_warps[0]: %llx\n", 1228 nvgpu_log_fn(g, "w_state[%d].trapped_warps[0]: %llx\n",
1229 sm_id, w_state[sm_id].trapped_warps[0]); 1229 sm_id, w_state[sm_id].trapped_warps[0]);
1230 gk20a_dbg_fn("w_state[%d].trapped_warps[1]: %llx\n", 1230 nvgpu_log_fn(g, "w_state[%d].trapped_warps[1]: %llx\n",
1231 sm_id, w_state[sm_id].trapped_warps[1]); 1231 sm_id, w_state[sm_id].trapped_warps[1]);
1232 1232
1233 gk20a_dbg_fn("w_state[%d].paused_warps[0]: %llx\n", 1233 nvgpu_log_fn(g, "w_state[%d].paused_warps[0]: %llx\n",
1234 sm_id, w_state[sm_id].paused_warps[0]); 1234 sm_id, w_state[sm_id].paused_warps[0]);
1235 gk20a_dbg_fn("w_state[%d].paused_warps[1]: %llx\n", 1235 nvgpu_log_fn(g, "w_state[%d].paused_warps[1]: %llx\n",
1236 sm_id, w_state[sm_id].paused_warps[1]); 1236 sm_id, w_state[sm_id].paused_warps[1]);
1237 } 1237 }
1238} 1238}
diff --git a/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c b/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c
index dcb65372..66cd49e7 100644
--- a/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c
@@ -61,7 +61,7 @@ int gm20b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr)
61 61
62 int err; 62 int err;
63 63
64 gk20a_dbg_fn(""); 64 nvgpu_log_fn(g, " ");
65 65
66 if (max_comptag_lines == 0U) 66 if (max_comptag_lines == 0U)
67 return 0; 67 return 0;
@@ -87,9 +87,9 @@ int gm20b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr)
87 if (max_comptag_lines > hw_max_comptag_lines) 87 if (max_comptag_lines > hw_max_comptag_lines)
88 max_comptag_lines = hw_max_comptag_lines; 88 max_comptag_lines = hw_max_comptag_lines;
89 89
90 gk20a_dbg_info("compbit backing store size : %d", 90 nvgpu_log_info(g, "compbit backing store size : %d",
91 compbit_backing_size); 91 compbit_backing_size);
92 gk20a_dbg_info("max comptag lines : %d", 92 nvgpu_log_info(g, "max comptag lines : %d",
93 max_comptag_lines); 93 max_comptag_lines);
94 94
95 err = nvgpu_ltc_alloc_cbc(g, compbit_backing_size); 95 err = nvgpu_ltc_alloc_cbc(g, compbit_backing_size);
@@ -121,7 +121,7 @@ int gm20b_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op,
121 u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE); 121 u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE);
122 const u32 max_lines = 16384U; 122 const u32 max_lines = 16384U;
123 123
124 gk20a_dbg_fn(""); 124 nvgpu_log_fn(g, " ");
125 125
126 trace_gk20a_ltc_cbc_ctrl_start(g->name, op, min, max); 126 trace_gk20a_ltc_cbc_ctrl_start(g->name, op, min, max);
127 127
@@ -134,7 +134,7 @@ int gm20b_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op,
134 134
135 nvgpu_mutex_acquire(&g->mm.l2_op_lock); 135 nvgpu_mutex_acquire(&g->mm.l2_op_lock);
136 136
137 gk20a_dbg_info("clearing CBC lines %u..%u", min, iter_max); 137 nvgpu_log_info(g, "clearing CBC lines %u..%u", min, iter_max);
138 138
139 if (op == gk20a_cbc_op_clear) { 139 if (op == gk20a_cbc_op_clear) {
140 gk20a_writel( 140 gk20a_writel(
@@ -205,11 +205,11 @@ void gm20b_ltc_init_fs_state(struct gk20a *g)
205{ 205{
206 u32 reg; 206 u32 reg;
207 207
208 gk20a_dbg_info("initialize gm20b l2"); 208 nvgpu_log_info(g, "initialize gm20b l2");
209 209
210 g->max_ltc_count = gk20a_readl(g, top_num_ltcs_r()); 210 g->max_ltc_count = gk20a_readl(g, top_num_ltcs_r());
211 g->ltc_count = gk20a_readl(g, pri_ringmaster_enum_ltc_r()); 211 g->ltc_count = gk20a_readl(g, pri_ringmaster_enum_ltc_r());
212 gk20a_dbg_info("%d ltcs out of %d", g->ltc_count, g->max_ltc_count); 212 nvgpu_log_info(g, "%d ltcs out of %d", g->ltc_count, g->max_ltc_count);
213 213
214 gk20a_writel(g, ltc_ltcs_ltss_cbc_num_active_ltcs_r(), 214 gk20a_writel(g, ltc_ltcs_ltss_cbc_num_active_ltcs_r(),
215 g->ltc_count); 215 g->ltc_count);
@@ -459,7 +459,7 @@ void gm20b_ltc_init_cbc(struct gk20a *g, struct gr_gk20a *gr)
459 gk20a_writel(g, ltc_ltcs_ltss_cbc_base_r(), 459 gk20a_writel(g, ltc_ltcs_ltss_cbc_base_r(),
460 compbit_base_post_divide); 460 compbit_base_post_divide);
461 461
462 gk20a_dbg(gpu_dbg_info | gpu_dbg_map_v | gpu_dbg_pte, 462 nvgpu_log(g, gpu_dbg_info | gpu_dbg_map_v | gpu_dbg_pte,
463 "compbit base.pa: 0x%x,%08x cbc_base:0x%08x\n", 463 "compbit base.pa: 0x%x,%08x cbc_base:0x%08x\n",
464 (u32)(compbit_store_iova >> 32), 464 (u32)(compbit_store_iova >> 32),
465 (u32)(compbit_store_iova & 0xffffffff), 465 (u32)(compbit_store_iova & 0xffffffff),
diff --git a/drivers/gpu/nvgpu/gm20b/mm_gm20b.c b/drivers/gpu/nvgpu/gm20b/mm_gm20b.c
index 46cd1fc6..deca6686 100644
--- a/drivers/gpu/nvgpu/gm20b/mm_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/mm_gm20b.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * GM20B MMU 2 * GM20B MMU
3 * 3 *
4 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"), 7 * copy of this software and associated documentation files (the "Software"),
@@ -36,9 +36,9 @@ void gm20b_mm_set_big_page_size(struct gk20a *g,
36{ 36{
37 u32 val; 37 u32 val;
38 38
39 gk20a_dbg_fn(""); 39 nvgpu_log_fn(g, " ");
40 40
41 gk20a_dbg_info("big page size %d\n", size); 41 nvgpu_log_info(g, "big page size %d\n", size);
42 val = nvgpu_mem_rd32(g, mem, ram_in_big_page_size_w()); 42 val = nvgpu_mem_rd32(g, mem, ram_in_big_page_size_w());
43 val &= ~ram_in_big_page_size_m(); 43 val &= ~ram_in_big_page_size_m();
44 44
@@ -48,7 +48,7 @@ void gm20b_mm_set_big_page_size(struct gk20a *g,
48 val |= ram_in_big_page_size_128kb_f(); 48 val |= ram_in_big_page_size_128kb_f();
49 49
50 nvgpu_mem_wr32(g, mem, ram_in_big_page_size_w(), val); 50 nvgpu_mem_wr32(g, mem, ram_in_big_page_size_w(), val);
51 gk20a_dbg_fn("done"); 51 nvgpu_log_fn(g, "done");
52} 52}
53 53
54u32 gm20b_mm_get_big_page_sizes(void) 54u32 gm20b_mm_get_big_page_sizes(void)
diff --git a/drivers/gpu/nvgpu/gm20b/pmu_gm20b.c b/drivers/gpu/nvgpu/gm20b/pmu_gm20b.c
index 1c5fdce0..aa992c37 100644
--- a/drivers/gpu/nvgpu/gm20b/pmu_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/pmu_gm20b.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * GM20B PMU 2 * GM20B PMU
3 * 3 *
4 * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved.
5* 5*
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"), 7 * copy of this software and associated documentation files (the "Software"),
@@ -37,8 +37,8 @@
37#include <nvgpu/hw/gm20b/hw_pwr_gm20b.h> 37#include <nvgpu/hw/gm20b/hw_pwr_gm20b.h>
38#include <nvgpu/hw/gm20b/hw_fuse_gm20b.h> 38#include <nvgpu/hw/gm20b/hw_fuse_gm20b.h>
39 39
40#define gm20b_dbg_pmu(fmt, arg...) \ 40#define gm20b_dbg_pmu(g, fmt, arg...) \
41 gk20a_dbg(gpu_dbg_pmu, fmt, ##arg) 41 nvgpu_log(g, gpu_dbg_pmu, fmt, ##arg)
42 42
43 43
44/* PROD settings for ELPG sequencing registers*/ 44/* PROD settings for ELPG sequencing registers*/
@@ -108,7 +108,7 @@ int gm20b_pmu_setup_elpg(struct gk20a *g)
108 u32 reg_writes; 108 u32 reg_writes;
109 u32 index; 109 u32 index;
110 110
111 gk20a_dbg_fn(""); 111 nvgpu_log_fn(g, " ");
112 112
113 if (g->elpg_enabled) { 113 if (g->elpg_enabled) {
114 reg_writes = ((sizeof(_pginitseq_gm20b) / 114 reg_writes = ((sizeof(_pginitseq_gm20b) /
@@ -120,20 +120,20 @@ int gm20b_pmu_setup_elpg(struct gk20a *g)
120 } 120 }
121 } 121 }
122 122
123 gk20a_dbg_fn("done"); 123 nvgpu_log_fn(g, "done");
124 return ret; 124 return ret;
125} 125}
126 126
127static void pmu_handle_acr_init_wpr_msg(struct gk20a *g, struct pmu_msg *msg, 127static void pmu_handle_acr_init_wpr_msg(struct gk20a *g, struct pmu_msg *msg,
128 void *param, u32 handle, u32 status) 128 void *param, u32 handle, u32 status)
129{ 129{
130 gk20a_dbg_fn(""); 130 nvgpu_log_fn(g, " ");
131 131
132 gm20b_dbg_pmu("reply PMU_ACR_CMD_ID_INIT_WPR_REGION"); 132 gm20b_dbg_pmu(g, "reply PMU_ACR_CMD_ID_INIT_WPR_REGION");
133 133
134 if (msg->msg.acr.acrmsg.errorcode == PMU_ACR_SUCCESS) 134 if (msg->msg.acr.acrmsg.errorcode == PMU_ACR_SUCCESS)
135 g->pmu_lsf_pmu_wpr_init_done = 1; 135 g->pmu_lsf_pmu_wpr_init_done = 1;
136 gk20a_dbg_fn("done"); 136 nvgpu_log_fn(g, "done");
137} 137}
138 138
139 139
@@ -143,7 +143,7 @@ int gm20b_pmu_init_acr(struct gk20a *g)
143 struct pmu_cmd cmd; 143 struct pmu_cmd cmd;
144 u32 seq; 144 u32 seq;
145 145
146 gk20a_dbg_fn(""); 146 nvgpu_log_fn(g, " ");
147 147
148 /* init ACR */ 148 /* init ACR */
149 memset(&cmd, 0, sizeof(struct pmu_cmd)); 149 memset(&cmd, 0, sizeof(struct pmu_cmd));
@@ -153,11 +153,11 @@ int gm20b_pmu_init_acr(struct gk20a *g)
153 cmd.cmd.acr.init_wpr.cmd_type = PMU_ACR_CMD_ID_INIT_WPR_REGION; 153 cmd.cmd.acr.init_wpr.cmd_type = PMU_ACR_CMD_ID_INIT_WPR_REGION;
154 cmd.cmd.acr.init_wpr.regionid = 0x01; 154 cmd.cmd.acr.init_wpr.regionid = 0x01;
155 cmd.cmd.acr.init_wpr.wproffset = 0x00; 155 cmd.cmd.acr.init_wpr.wproffset = 0x00;
156 gm20b_dbg_pmu("cmd post PMU_ACR_CMD_ID_INIT_WPR_REGION"); 156 gm20b_dbg_pmu(g, "cmd post PMU_ACR_CMD_ID_INIT_WPR_REGION");
157 nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, 157 nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
158 pmu_handle_acr_init_wpr_msg, pmu, &seq, ~0); 158 pmu_handle_acr_init_wpr_msg, pmu, &seq, ~0);
159 159
160 gk20a_dbg_fn("done"); 160 nvgpu_log_fn(g, "done");
161 return 0; 161 return 0;
162} 162}
163 163
@@ -165,14 +165,14 @@ void pmu_handle_fecs_boot_acr_msg(struct gk20a *g, struct pmu_msg *msg,
165 void *param, u32 handle, u32 status) 165 void *param, u32 handle, u32 status)
166{ 166{
167 167
168 gk20a_dbg_fn(""); 168 nvgpu_log_fn(g, " ");
169 169
170 170
171 gm20b_dbg_pmu("reply PMU_ACR_CMD_ID_BOOTSTRAP_FALCON"); 171 gm20b_dbg_pmu(g, "reply PMU_ACR_CMD_ID_BOOTSTRAP_FALCON");
172 172
173 gm20b_dbg_pmu("response code = %x\n", msg->msg.acr.acrmsg.falconid); 173 gm20b_dbg_pmu(g, "response code = %x\n", msg->msg.acr.acrmsg.falconid);
174 g->pmu_lsf_loaded_falcon_id = msg->msg.acr.acrmsg.falconid; 174 g->pmu_lsf_loaded_falcon_id = msg->msg.acr.acrmsg.falconid;
175 gk20a_dbg_fn("done"); 175 nvgpu_log_fn(g, "done");
176} 176}
177 177
178static int pmu_gm20b_ctx_wait_lsf_ready(struct gk20a *g, u32 timeout_ms, 178static int pmu_gm20b_ctx_wait_lsf_ready(struct gk20a *g, u32 timeout_ms,
@@ -182,7 +182,7 @@ static int pmu_gm20b_ctx_wait_lsf_ready(struct gk20a *g, u32 timeout_ms,
182 u32 reg; 182 u32 reg;
183 struct nvgpu_timeout timeout; 183 struct nvgpu_timeout timeout;
184 184
185 gk20a_dbg_fn(""); 185 nvgpu_log_fn(g, " ");
186 reg = gk20a_readl(g, gr_fecs_ctxsw_mailbox_r(0)); 186 reg = gk20a_readl(g, gr_fecs_ctxsw_mailbox_r(0));
187 187
188 nvgpu_timeout_init(g, &timeout, timeout_ms, NVGPU_TIMER_CPU_TIMER); 188 nvgpu_timeout_init(g, &timeout, timeout_ms, NVGPU_TIMER_CPU_TIMER);
@@ -203,9 +203,9 @@ void gm20b_pmu_load_lsf(struct gk20a *g, u32 falcon_id, u32 flags)
203 struct pmu_cmd cmd; 203 struct pmu_cmd cmd;
204 u32 seq; 204 u32 seq;
205 205
206 gk20a_dbg_fn(""); 206 nvgpu_log_fn(g, " ");
207 207
208 gm20b_dbg_pmu("wprinit status = %x\n", g->pmu_lsf_pmu_wpr_init_done); 208 gm20b_dbg_pmu(g, "wprinit status = %x\n", g->pmu_lsf_pmu_wpr_init_done);
209 if (g->pmu_lsf_pmu_wpr_init_done) { 209 if (g->pmu_lsf_pmu_wpr_init_done) {
210 /* send message to load FECS falcon */ 210 /* send message to load FECS falcon */
211 memset(&cmd, 0, sizeof(struct pmu_cmd)); 211 memset(&cmd, 0, sizeof(struct pmu_cmd));
@@ -216,13 +216,13 @@ void gm20b_pmu_load_lsf(struct gk20a *g, u32 falcon_id, u32 flags)
216 PMU_ACR_CMD_ID_BOOTSTRAP_FALCON; 216 PMU_ACR_CMD_ID_BOOTSTRAP_FALCON;
217 cmd.cmd.acr.bootstrap_falcon.flags = flags; 217 cmd.cmd.acr.bootstrap_falcon.flags = flags;
218 cmd.cmd.acr.bootstrap_falcon.falconid = falcon_id; 218 cmd.cmd.acr.bootstrap_falcon.falconid = falcon_id;
219 gm20b_dbg_pmu("cmd post PMU_ACR_CMD_ID_BOOTSTRAP_FALCON: %x\n", 219 gm20b_dbg_pmu(g, "cmd post PMU_ACR_CMD_ID_BOOTSTRAP_FALCON: %x\n",
220 falcon_id); 220 falcon_id);
221 nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, 221 nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
222 pmu_handle_fecs_boot_acr_msg, pmu, &seq, ~0); 222 pmu_handle_fecs_boot_acr_msg, pmu, &seq, ~0);
223 } 223 }
224 224
225 gk20a_dbg_fn("done"); 225 nvgpu_log_fn(g, "done");
226 return; 226 return;
227} 227}
228 228
diff --git a/drivers/gpu/nvgpu/gm20b/therm_gm20b.c b/drivers/gpu/nvgpu/gm20b/therm_gm20b.c
index ce4d4fab..dfe977ff 100644
--- a/drivers/gpu/nvgpu/gm20b/therm_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/therm_gm20b.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * GM20B THERMAL 2 * GM20B THERMAL
3 * 3 *
4 * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"), 7 * copy of this software and associated documentation files (the "Software"),
@@ -32,7 +32,7 @@ int gm20b_init_therm_setup_hw(struct gk20a *g)
32{ 32{
33 u32 v; 33 u32 v;
34 34
35 gk20a_dbg_fn(""); 35 nvgpu_log_fn(g, " ");
36 36
37 /* program NV_THERM registers */ 37 /* program NV_THERM registers */
38 gk20a_writel(g, therm_use_a_r(), therm_use_a_ext_therm_0_enable_f() | 38 gk20a_writel(g, therm_use_a_r(), therm_use_a_ext_therm_0_enable_f() |