summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gp106
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gp106')
-rw-r--r--drivers/gpu/nvgpu/gp106/acr_gp106.c98
-rw-r--r--drivers/gpu/nvgpu/gp106/bios_gp106.c20
-rw-r--r--drivers/gpu/nvgpu/gp106/clk_gp106.c7
-rw-r--r--drivers/gpu/nvgpu/gp106/fb_gp106.c4
-rw-r--r--drivers/gpu/nvgpu/gp106/gr_gp106.c10
-rw-r--r--drivers/gpu/nvgpu/gp106/hal_gp106.c4
-rw-r--r--drivers/gpu/nvgpu/gp106/mclk_gp106.c12
-rw-r--r--drivers/gpu/nvgpu/gp106/pmu_gp106.c18
-rw-r--r--drivers/gpu/nvgpu/gp106/pmu_gp106.h6
-rw-r--r--drivers/gpu/nvgpu/gp106/sec2_gp106.c12
-rw-r--r--drivers/gpu/nvgpu/gp106/therm_gp106.c6
-rw-r--r--drivers/gpu/nvgpu/gp106/xve_gp106.c60
-rw-r--r--drivers/gpu/nvgpu/gp106/xve_gp106.h10
13 files changed, 132 insertions, 135 deletions
diff --git a/drivers/gpu/nvgpu/gp106/acr_gp106.c b/drivers/gpu/nvgpu/gp106/acr_gp106.c
index 5ab8cfcc..61b443e0 100644
--- a/drivers/gpu/nvgpu/gp106/acr_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/acr_gp106.c
@@ -43,8 +43,8 @@
43#include <nvgpu/hw/gp106/hw_pwr_gp106.h> 43#include <nvgpu/hw/gp106/hw_pwr_gp106.h>
44 44
45/*Defines*/ 45/*Defines*/
46#define gp106_dbg_pmu(fmt, arg...) \ 46#define gp106_dbg_pmu(g, fmt, arg...) \
47 gk20a_dbg(gpu_dbg_pmu, fmt, ##arg) 47 nvgpu_log(g, gpu_dbg_pmu, fmt, ##arg)
48 48
49typedef int (*get_ucode_details)(struct gk20a *g, 49typedef int (*get_ucode_details)(struct gk20a *g,
50 struct flcn_ucode_img_v1 *udata); 50 struct flcn_ucode_img_v1 *udata);
@@ -113,7 +113,7 @@ int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img_v1 *p_img)
113 struct lsf_ucode_desc_v1 *lsf_desc; 113 struct lsf_ucode_desc_v1 *lsf_desc;
114 int err; 114 int err;
115 115
116 gp106_dbg_pmu("requesting PMU ucode in gp106\n"); 116 gp106_dbg_pmu(g, "requesting PMU ucode in gp106\n");
117 pmu_fw = nvgpu_request_firmware(g, GM20B_PMU_UCODE_IMAGE, 117 pmu_fw = nvgpu_request_firmware(g, GM20B_PMU_UCODE_IMAGE,
118 NVGPU_REQUEST_FIRMWARE_NO_SOC); 118 NVGPU_REQUEST_FIRMWARE_NO_SOC);
119 if (!pmu_fw) { 119 if (!pmu_fw) {
@@ -121,9 +121,9 @@ int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img_v1 *p_img)
121 return -ENOENT; 121 return -ENOENT;
122 } 122 }
123 g->acr.pmu_fw = pmu_fw; 123 g->acr.pmu_fw = pmu_fw;
124 gp106_dbg_pmu("Loaded PMU ucode in for blob preparation"); 124 gp106_dbg_pmu(g, "Loaded PMU ucode in for blob preparation");
125 125
126 gp106_dbg_pmu("requesting PMU ucode desc in GM20B\n"); 126 gp106_dbg_pmu(g, "requesting PMU ucode desc in GM20B\n");
127 pmu_desc = nvgpu_request_firmware(g, GM20B_PMU_UCODE_DESC, 127 pmu_desc = nvgpu_request_firmware(g, GM20B_PMU_UCODE_DESC,
128 NVGPU_REQUEST_FIRMWARE_NO_SOC); 128 NVGPU_REQUEST_FIRMWARE_NO_SOC);
129 if (!pmu_desc) { 129 if (!pmu_desc) {
@@ -164,7 +164,7 @@ int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img_v1 *p_img)
164 p_img->fw_ver = NULL; 164 p_img->fw_ver = NULL;
165 p_img->header = NULL; 165 p_img->header = NULL;
166 p_img->lsf_desc = (struct lsf_ucode_desc_v1 *)lsf_desc; 166 p_img->lsf_desc = (struct lsf_ucode_desc_v1 *)lsf_desc;
167 gp106_dbg_pmu("requesting PMU ucode in GM20B exit\n"); 167 gp106_dbg_pmu(g, "requesting PMU ucode in GM20B exit\n");
168 168
169 nvgpu_release_firmware(g, pmu_sig); 169 nvgpu_release_firmware(g, pmu_sig);
170 return 0; 170 return 0;
@@ -262,7 +262,7 @@ int fecs_ucode_details(struct gk20a *g, struct flcn_ucode_img_v1 *p_img)
262 p_img->fw_ver = NULL; 262 p_img->fw_ver = NULL;
263 p_img->header = NULL; 263 p_img->header = NULL;
264 p_img->lsf_desc = (struct lsf_ucode_desc_v1 *)lsf_desc; 264 p_img->lsf_desc = (struct lsf_ucode_desc_v1 *)lsf_desc;
265 gp106_dbg_pmu("fecs fw loaded\n"); 265 gp106_dbg_pmu(g, "fecs fw loaded\n");
266 nvgpu_release_firmware(g, fecs_sig); 266 nvgpu_release_firmware(g, fecs_sig);
267 return 0; 267 return 0;
268free_lsf_desc: 268free_lsf_desc:
@@ -358,7 +358,7 @@ int gpccs_ucode_details(struct gk20a *g, struct flcn_ucode_img_v1 *p_img)
358 p_img->fw_ver = NULL; 358 p_img->fw_ver = NULL;
359 p_img->header = NULL; 359 p_img->header = NULL;
360 p_img->lsf_desc = (struct lsf_ucode_desc_v1 *)lsf_desc; 360 p_img->lsf_desc = (struct lsf_ucode_desc_v1 *)lsf_desc;
361 gp106_dbg_pmu("gpccs fw loaded\n"); 361 gp106_dbg_pmu(g, "gpccs fw loaded\n");
362 nvgpu_release_firmware(g, gpccs_sig); 362 nvgpu_release_firmware(g, gpccs_sig);
363 return 0; 363 return 0;
364free_lsf_desc: 364free_lsf_desc:
@@ -381,7 +381,7 @@ int gp106_prepare_ucode_blob(struct gk20a *g)
381 non WPR blob of ucodes*/ 381 non WPR blob of ucodes*/
382 err = nvgpu_init_pmu_fw_support(pmu); 382 err = nvgpu_init_pmu_fw_support(pmu);
383 if (err) { 383 if (err) {
384 gp106_dbg_pmu("failed to set function pointers\n"); 384 gp106_dbg_pmu(g, "failed to set function pointers\n");
385 return err; 385 return err;
386 } 386 }
387 return 0; 387 return 0;
@@ -391,12 +391,12 @@ int gp106_prepare_ucode_blob(struct gk20a *g)
391 gr_gk20a_init_ctxsw_ucode(g); 391 gr_gk20a_init_ctxsw_ucode(g);
392 392
393 g->ops.pmu.get_wpr(g, &wpr_inf); 393 g->ops.pmu.get_wpr(g, &wpr_inf);
394 gp106_dbg_pmu("wpr carveout base:%llx\n", (wpr_inf.wpr_base)); 394 gp106_dbg_pmu(g, "wpr carveout base:%llx\n", (wpr_inf.wpr_base));
395 gp106_dbg_pmu("wpr carveout size :%x\n", (u32)wpr_inf.size); 395 gp106_dbg_pmu(g, "wpr carveout size :%x\n", (u32)wpr_inf.size);
396 396
397 /* Discover all managed falcons*/ 397 /* Discover all managed falcons*/
398 err = lsfm_discover_ucode_images(g, plsfm); 398 err = lsfm_discover_ucode_images(g, plsfm);
399 gp106_dbg_pmu(" Managed Falcon cnt %d\n", plsfm->managed_flcn_cnt); 399 gp106_dbg_pmu(g, " Managed Falcon cnt %d\n", plsfm->managed_flcn_cnt);
400 if (err) 400 if (err)
401 goto exit_err; 401 goto exit_err;
402 402
@@ -412,14 +412,14 @@ int gp106_prepare_ucode_blob(struct gk20a *g)
412 if (err) 412 if (err)
413 goto exit_err; 413 goto exit_err;
414 414
415 gp106_dbg_pmu("managed LS falcon %d, WPR size %d bytes.\n", 415 gp106_dbg_pmu(g, "managed LS falcon %d, WPR size %d bytes.\n",
416 plsfm->managed_flcn_cnt, plsfm->wpr_size); 416 plsfm->managed_flcn_cnt, plsfm->wpr_size);
417 417
418 lsfm_init_wpr_contents(g, plsfm, &g->acr.ucode_blob); 418 lsfm_init_wpr_contents(g, plsfm, &g->acr.ucode_blob);
419 } else { 419 } else {
420 gp106_dbg_pmu("LSFM is managing no falcons.\n"); 420 gp106_dbg_pmu(g, "LSFM is managing no falcons.\n");
421 } 421 }
422 gp106_dbg_pmu("prepare ucode blob return 0\n"); 422 gp106_dbg_pmu(g, "prepare ucode blob return 0\n");
423 free_acr_resources(g, plsfm); 423 free_acr_resources(g, plsfm);
424 424
425 exit_err: 425 exit_err:
@@ -465,14 +465,14 @@ int lsfm_discover_ucode_images(struct gk20a *g,
465 465
466 plsfm->managed_flcn_cnt++; 466 plsfm->managed_flcn_cnt++;
467 } else { 467 } else {
468 gp106_dbg_pmu("id not managed %d\n", 468 gp106_dbg_pmu(g, "id not managed %d\n",
469 ucode_img.lsf_desc->falcon_id); 469 ucode_img.lsf_desc->falcon_id);
470 } 470 }
471 } 471 }
472 472
473 /*Free any ucode image resources if not managing this falcon*/ 473 /*Free any ucode image resources if not managing this falcon*/
474 if (!(pmu->pmu_mode & PMU_LSFM_MANAGED)) { 474 if (!(pmu->pmu_mode & PMU_LSFM_MANAGED)) {
475 gp106_dbg_pmu("pmu is not LSFM managed\n"); 475 gp106_dbg_pmu(g, "pmu is not LSFM managed\n");
476 lsfm_free_ucode_img_res(g, &ucode_img); 476 lsfm_free_ucode_img_res(g, &ucode_img);
477 } 477 }
478 478
@@ -503,7 +503,7 @@ int lsfm_discover_ucode_images(struct gk20a *g,
503 == 0) 503 == 0)
504 plsfm->managed_flcn_cnt++; 504 plsfm->managed_flcn_cnt++;
505 } else { 505 } else {
506 gp106_dbg_pmu("not managed %d\n", 506 gp106_dbg_pmu(g, "not managed %d\n",
507 ucode_img.lsf_desc->falcon_id); 507 ucode_img.lsf_desc->falcon_id);
508 lsfm_free_nonpmu_ucode_img_res(g, 508 lsfm_free_nonpmu_ucode_img_res(g,
509 &ucode_img); 509 &ucode_img);
@@ -511,7 +511,7 @@ int lsfm_discover_ucode_images(struct gk20a *g,
511 } 511 }
512 } else { 512 } else {
513 /* Consumed all available falcon objects */ 513 /* Consumed all available falcon objects */
514 gp106_dbg_pmu("Done checking for ucodes %d\n", i); 514 gp106_dbg_pmu(g, "Done checking for ucodes %d\n", i);
515 break; 515 break;
516 } 516 }
517 } 517 }
@@ -549,19 +549,19 @@ int gp106_pmu_populate_loader_cfg(struct gk20a *g,
549 g->ops.pmu.get_wpr(g, &wpr_inf); 549 g->ops.pmu.get_wpr(g, &wpr_inf);
550 addr_base += (wpr_inf.wpr_base); 550 addr_base += (wpr_inf.wpr_base);
551 551
552 gp106_dbg_pmu("pmu loader cfg addrbase 0x%llx\n", addr_base); 552 gp106_dbg_pmu(g, "pmu loader cfg addrbase 0x%llx\n", addr_base);
553 /*From linux*/ 553 /*From linux*/
554 addr_code = addr_base + 554 addr_code = addr_base +
555 desc->app_start_offset + 555 desc->app_start_offset +
556 desc->app_resident_code_offset; 556 desc->app_resident_code_offset;
557 gp106_dbg_pmu("app start %d app res code off %d\n", 557 gp106_dbg_pmu(g, "app start %d app res code off %d\n",
558 desc->app_start_offset, desc->app_resident_code_offset); 558 desc->app_start_offset, desc->app_resident_code_offset);
559 addr_data = addr_base + 559 addr_data = addr_base +
560 desc->app_start_offset + 560 desc->app_start_offset +
561 desc->app_resident_data_offset; 561 desc->app_resident_data_offset;
562 gp106_dbg_pmu("app res data offset%d\n", 562 gp106_dbg_pmu(g, "app res data offset%d\n",
563 desc->app_resident_data_offset); 563 desc->app_resident_data_offset);
564 gp106_dbg_pmu("bl start off %d\n", desc->bootloader_start_offset); 564 gp106_dbg_pmu(g, "bl start off %d\n", desc->bootloader_start_offset);
565 565
566 addr_args = ((pwr_falcon_hwcfg_dmem_size_v( 566 addr_args = ((pwr_falcon_hwcfg_dmem_size_v(
567 gk20a_readl(g, pwr_falcon_hwcfg_r()))) 567 gk20a_readl(g, pwr_falcon_hwcfg_r())))
@@ -569,7 +569,7 @@ int gp106_pmu_populate_loader_cfg(struct gk20a *g,
569 569
570 addr_args -= g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu); 570 addr_args -= g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu);
571 571
572 gp106_dbg_pmu("addr_args %x\n", addr_args); 572 gp106_dbg_pmu(g, "addr_args %x\n", addr_args);
573 573
574 /* Populate the LOADER_CONFIG state */ 574 /* Populate the LOADER_CONFIG state */
575 memset((void *) ldr_cfg, 0, sizeof(struct flcn_bl_dmem_desc_v1)); 575 memset((void *) ldr_cfg, 0, sizeof(struct flcn_bl_dmem_desc_v1));
@@ -621,8 +621,8 @@ int gp106_flcn_populate_bl_dmem_desc(struct gk20a *g,
621 g->ops.pmu.get_wpr(g, &wpr_inf); 621 g->ops.pmu.get_wpr(g, &wpr_inf);
622 addr_base += wpr_inf.wpr_base; 622 addr_base += wpr_inf.wpr_base;
623 623
624 gp106_dbg_pmu("falcon ID %x", p_lsfm->wpr_header.falcon_id); 624 gp106_dbg_pmu(g, "falcon ID %x", p_lsfm->wpr_header.falcon_id);
625 gp106_dbg_pmu("gen loader cfg addrbase %llx ", addr_base); 625 gp106_dbg_pmu(g, "gen loader cfg addrbase %llx ", addr_base);
626 addr_code = addr_base + 626 addr_code = addr_base +
627 desc->app_start_offset + 627 desc->app_start_offset +
628 desc->app_resident_code_offset; 628 desc->app_resident_code_offset;
@@ -630,7 +630,7 @@ int gp106_flcn_populate_bl_dmem_desc(struct gk20a *g,
630 desc->app_start_offset + 630 desc->app_start_offset +
631 desc->app_resident_data_offset; 631 desc->app_resident_data_offset;
632 632
633 gp106_dbg_pmu("gen cfg addrcode %llx data %llx load offset %x", 633 gp106_dbg_pmu(g, "gen cfg addrcode %llx data %llx load offset %x",
634 addr_code, addr_data, desc->bootloader_start_offset); 634 addr_code, addr_data, desc->bootloader_start_offset);
635 635
636 /* Populate the LOADER_CONFIG state */ 636 /* Populate the LOADER_CONFIG state */
@@ -653,7 +653,7 @@ int lsfm_fill_flcn_bl_gen_desc(struct gk20a *g,
653 653
654 struct nvgpu_pmu *pmu = &g->pmu; 654 struct nvgpu_pmu *pmu = &g->pmu;
655 if (pnode->wpr_header.falcon_id != pmu->falcon_id) { 655 if (pnode->wpr_header.falcon_id != pmu->falcon_id) {
656 gp106_dbg_pmu("non pmu. write flcn bl gen desc\n"); 656 gp106_dbg_pmu(g, "non pmu. write flcn bl gen desc\n");
657 g->ops.pmu.flcn_populate_bl_dmem_desc(g, 657 g->ops.pmu.flcn_populate_bl_dmem_desc(g,
658 pnode, &pnode->bl_gen_desc_size, 658 pnode, &pnode->bl_gen_desc_size,
659 pnode->wpr_header.falcon_id); 659 pnode->wpr_header.falcon_id);
@@ -661,7 +661,7 @@ int lsfm_fill_flcn_bl_gen_desc(struct gk20a *g,
661 } 661 }
662 662
663 if (pmu->pmu_mode & PMU_LSFM_MANAGED) { 663 if (pmu->pmu_mode & PMU_LSFM_MANAGED) {
664 gp106_dbg_pmu("pmu write flcn bl gen desc\n"); 664 gp106_dbg_pmu(g, "pmu write flcn bl gen desc\n");
665 if (pnode->wpr_header.falcon_id == pmu->falcon_id) 665 if (pnode->wpr_header.falcon_id == pmu->falcon_id)
666 return g->ops.pmu.pmu_populate_loader_cfg(g, pnode, 666 return g->ops.pmu.pmu_populate_loader_cfg(g, pnode,
667 &pnode->bl_gen_desc_size); 667 &pnode->bl_gen_desc_size);
@@ -694,46 +694,46 @@ void lsfm_init_wpr_contents(struct gk20a *g,
694 nvgpu_mem_wr_n(g, ucode, i * sizeof(pnode->wpr_header), 694 nvgpu_mem_wr_n(g, ucode, i * sizeof(pnode->wpr_header),
695 &pnode->wpr_header, sizeof(pnode->wpr_header)); 695 &pnode->wpr_header, sizeof(pnode->wpr_header));
696 696
697 gp106_dbg_pmu("wpr header"); 697 gp106_dbg_pmu(g, "wpr header");
698 gp106_dbg_pmu("falconid :%d", 698 gp106_dbg_pmu(g, "falconid :%d",
699 pnode->wpr_header.falcon_id); 699 pnode->wpr_header.falcon_id);
700 gp106_dbg_pmu("lsb_offset :%x", 700 gp106_dbg_pmu(g, "lsb_offset :%x",
701 pnode->wpr_header.lsb_offset); 701 pnode->wpr_header.lsb_offset);
702 gp106_dbg_pmu("bootstrap_owner :%d", 702 gp106_dbg_pmu(g, "bootstrap_owner :%d",
703 pnode->wpr_header.bootstrap_owner); 703 pnode->wpr_header.bootstrap_owner);
704 gp106_dbg_pmu("lazy_bootstrap :%d", 704 gp106_dbg_pmu(g, "lazy_bootstrap :%d",
705 pnode->wpr_header.lazy_bootstrap); 705 pnode->wpr_header.lazy_bootstrap);
706 gp106_dbg_pmu("status :%d", 706 gp106_dbg_pmu(g, "status :%d",
707 pnode->wpr_header.status); 707 pnode->wpr_header.status);
708 708
709 /*Flush LSB header to memory*/ 709 /*Flush LSB header to memory*/
710 nvgpu_mem_wr_n(g, ucode, pnode->wpr_header.lsb_offset, 710 nvgpu_mem_wr_n(g, ucode, pnode->wpr_header.lsb_offset,
711 &pnode->lsb_header, sizeof(pnode->lsb_header)); 711 &pnode->lsb_header, sizeof(pnode->lsb_header));
712 712
713 gp106_dbg_pmu("lsb header"); 713 gp106_dbg_pmu(g, "lsb header");
714 gp106_dbg_pmu("ucode_off :%x", 714 gp106_dbg_pmu(g, "ucode_off :%x",
715 pnode->lsb_header.ucode_off); 715 pnode->lsb_header.ucode_off);
716 gp106_dbg_pmu("ucode_size :%x", 716 gp106_dbg_pmu(g, "ucode_size :%x",
717 pnode->lsb_header.ucode_size); 717 pnode->lsb_header.ucode_size);
718 gp106_dbg_pmu("data_size :%x", 718 gp106_dbg_pmu(g, "data_size :%x",
719 pnode->lsb_header.data_size); 719 pnode->lsb_header.data_size);
720 gp106_dbg_pmu("bl_code_size :%x", 720 gp106_dbg_pmu(g, "bl_code_size :%x",
721 pnode->lsb_header.bl_code_size); 721 pnode->lsb_header.bl_code_size);
722 gp106_dbg_pmu("bl_imem_off :%x", 722 gp106_dbg_pmu(g, "bl_imem_off :%x",
723 pnode->lsb_header.bl_imem_off); 723 pnode->lsb_header.bl_imem_off);
724 gp106_dbg_pmu("bl_data_off :%x", 724 gp106_dbg_pmu(g, "bl_data_off :%x",
725 pnode->lsb_header.bl_data_off); 725 pnode->lsb_header.bl_data_off);
726 gp106_dbg_pmu("bl_data_size :%x", 726 gp106_dbg_pmu(g, "bl_data_size :%x",
727 pnode->lsb_header.bl_data_size); 727 pnode->lsb_header.bl_data_size);
728 gp106_dbg_pmu("app_code_off :%x", 728 gp106_dbg_pmu(g, "app_code_off :%x",
729 pnode->lsb_header.app_code_off); 729 pnode->lsb_header.app_code_off);
730 gp106_dbg_pmu("app_code_size :%x", 730 gp106_dbg_pmu(g, "app_code_size :%x",
731 pnode->lsb_header.app_code_size); 731 pnode->lsb_header.app_code_size);
732 gp106_dbg_pmu("app_data_off :%x", 732 gp106_dbg_pmu(g, "app_data_off :%x",
733 pnode->lsb_header.app_data_off); 733 pnode->lsb_header.app_data_off);
734 gp106_dbg_pmu("app_data_size :%x", 734 gp106_dbg_pmu(g, "app_data_size :%x",
735 pnode->lsb_header.app_data_size); 735 pnode->lsb_header.app_data_size);
736 gp106_dbg_pmu("flags :%x", 736 gp106_dbg_pmu(g, "flags :%x",
737 pnode->lsb_header.flags); 737 pnode->lsb_header.flags);
738 738
739 /*If this falcon has a boot loader and related args, 739 /*If this falcon has a boot loader and related args,
@@ -1049,7 +1049,7 @@ int gp106_bootstrap_hs_flcn(struct gk20a *g)
1049 u32 *acr_ucode_data_t210_load; 1049 u32 *acr_ucode_data_t210_load;
1050 struct wpr_carveout_info wpr_inf; 1050 struct wpr_carveout_info wpr_inf;
1051 1051
1052 gp106_dbg_pmu(""); 1052 gp106_dbg_pmu(g, " ");
1053 1053
1054 if (!acr_fw) { 1054 if (!acr_fw) {
1055 /*First time init case*/ 1055 /*First time init case*/
diff --git a/drivers/gpu/nvgpu/gp106/bios_gp106.c b/drivers/gpu/nvgpu/gp106/bios_gp106.c
index 8511d3c2..3363aeba 100644
--- a/drivers/gpu/nvgpu/gp106/bios_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/bios_gp106.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -56,13 +56,13 @@ static void upload_data(struct gk20a *g, u32 dst, u8 *src, u32 size, u8 port)
56 u32 *src_u32 = (u32 *)src; 56 u32 *src_u32 = (u32 *)src;
57 u32 blk; 57 u32 blk;
58 58
59 gk20a_dbg_info("upload %d bytes to %x", size, dst); 59 nvgpu_log_info(g, "upload %d bytes to %x", size, dst);
60 60
61 words = DIV_ROUND_UP(size, 4); 61 words = DIV_ROUND_UP(size, 4);
62 62
63 blk = dst >> 8; 63 blk = dst >> 8;
64 64
65 gk20a_dbg_info("upload %d words to %x blk %d", 65 nvgpu_log_info(g, "upload %d words to %x blk %d",
66 words, dst, blk); 66 words, dst, blk);
67 gk20a_writel(g, pwr_falcon_dmemc_r(port), 67 gk20a_writel(g, pwr_falcon_dmemc_r(port),
68 pwr_falcon_dmemc_offs_f(dst >> 2) | 68 pwr_falcon_dmemc_offs_f(dst >> 2) |
@@ -79,7 +79,7 @@ static int gp106_bios_devinit(struct gk20a *g)
79 int devinit_completed; 79 int devinit_completed;
80 struct nvgpu_timeout timeout; 80 struct nvgpu_timeout timeout;
81 81
82 gk20a_dbg_fn(""); 82 nvgpu_log_fn(g, " ");
83 83
84 if (nvgpu_flcn_reset(g->pmu.flcn)) { 84 if (nvgpu_flcn_reset(g->pmu.flcn)) {
85 err = -ETIMEDOUT; 85 err = -ETIMEDOUT;
@@ -128,7 +128,7 @@ static int gp106_bios_devinit(struct gk20a *g)
128 gk20a_get_gr_idle_timeout(g)); 128 gk20a_get_gr_idle_timeout(g));
129 129
130out: 130out:
131 gk20a_dbg_fn("done"); 131 nvgpu_log_fn(g, "done");
132 return err; 132 return err;
133} 133}
134 134
@@ -146,7 +146,7 @@ static int gp106_bios_preos(struct gk20a *g)
146{ 146{
147 int err = 0; 147 int err = 0;
148 148
149 gk20a_dbg_fn(""); 149 nvgpu_log_fn(g, " ");
150 150
151 if (nvgpu_flcn_reset(g->pmu.flcn)) { 151 if (nvgpu_flcn_reset(g->pmu.flcn)) {
152 err = -ETIMEDOUT; 152 err = -ETIMEDOUT;
@@ -177,7 +177,7 @@ static int gp106_bios_preos(struct gk20a *g)
177 gk20a_get_gr_idle_timeout(g)); 177 gk20a_get_gr_idle_timeout(g));
178 178
179out: 179out:
180 gk20a_dbg_fn("done"); 180 nvgpu_log_fn(g, "done");
181 return err; 181 return err;
182} 182}
183 183
@@ -186,12 +186,12 @@ int gp106_bios_init(struct gk20a *g)
186 unsigned int i; 186 unsigned int i;
187 int err; 187 int err;
188 188
189 gk20a_dbg_fn(""); 189 nvgpu_log_fn(g, " ");
190 190
191 if (g->bios_is_init) 191 if (g->bios_is_init)
192 return 0; 192 return 0;
193 193
194 gk20a_dbg_info("reading bios from EEPROM"); 194 nvgpu_log_info(g, "reading bios from EEPROM");
195 g->bios.size = BIOS_SIZE; 195 g->bios.size = BIOS_SIZE;
196 g->bios.data = nvgpu_vmalloc(g, BIOS_SIZE); 196 g->bios.data = nvgpu_vmalloc(g, BIOS_SIZE);
197 if (!g->bios.data) 197 if (!g->bios.data)
@@ -218,7 +218,7 @@ int gp106_bios_init(struct gk20a *g)
218 goto free_firmware; 218 goto free_firmware;
219 } 219 }
220 220
221 gk20a_dbg_fn("done"); 221 nvgpu_log_fn(g, "done");
222 222
223 err = gp106_bios_devinit(g); 223 err = gp106_bios_devinit(g);
224 if (err) { 224 if (err) {
diff --git a/drivers/gpu/nvgpu/gp106/clk_gp106.c b/drivers/gpu/nvgpu/gp106/clk_gp106.c
index 9a94a7b9..d19baac5 100644
--- a/drivers/gpu/nvgpu/gp106/clk_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/clk_gp106.c
@@ -36,9 +36,6 @@
36 36
37#include <nvgpu/hw/gp106/hw_trim_gp106.h> 37#include <nvgpu/hw/gp106/hw_trim_gp106.h>
38 38
39#define gk20a_dbg_clk(fmt, arg...) \
40 gk20a_dbg(gpu_dbg_clk, fmt, ##arg)
41
42#ifdef CONFIG_DEBUG_FS 39#ifdef CONFIG_DEBUG_FS
43static int clk_gp106_debugfs_init(struct gk20a *g); 40static int clk_gp106_debugfs_init(struct gk20a *g);
44#endif 41#endif
@@ -82,7 +79,7 @@ int gp106_init_clk_support(struct gk20a *g)
82 struct clk_gk20a *clk = &g->clk; 79 struct clk_gk20a *clk = &g->clk;
83 u32 err = 0; 80 u32 err = 0;
84 81
85 gk20a_dbg_fn(""); 82 nvgpu_log_fn(g, " ");
86 83
87 err = nvgpu_mutex_init(&clk->clk_mutex); 84 err = nvgpu_mutex_init(&clk->clk_mutex);
88 if (err) 85 if (err)
@@ -374,7 +371,7 @@ static int clk_gp106_debugfs_init(struct gk20a *g)
374 d = debugfs_create_file("gpc", S_IRUGO | S_IWUSR, clk_freq_ctlr_root, 371 d = debugfs_create_file("gpc", S_IRUGO | S_IWUSR, clk_freq_ctlr_root,
375 g, &gpc_cfc_fops); 372 g, &gpc_cfc_fops);
376 373
377 gk20a_dbg(gpu_dbg_info, "g=%p", g); 374 nvgpu_log(g, gpu_dbg_info, "g=%p", g);
378 375
379 for (i = 0; i < g->clk.namemap_num; i++) { 376 for (i = 0; i < g->clk.namemap_num; i++) {
380 if (g->clk.clk_namemap[i].is_enable) { 377 if (g->clk.clk_namemap[i].is_enable) {
diff --git a/drivers/gpu/nvgpu/gp106/fb_gp106.c b/drivers/gpu/nvgpu/gp106/fb_gp106.c
index 34e9ee30..2bf97f61 100644
--- a/drivers/gpu/nvgpu/gp106/fb_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/fb_gp106.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -39,7 +39,7 @@ void gp106_fb_reset(struct gk20a *g)
39 do { 39 do {
40 u32 w = gk20a_readl(g, fb_niso_scrub_status_r()); 40 u32 w = gk20a_readl(g, fb_niso_scrub_status_r());
41 if (fb_niso_scrub_status_flag_v(w)) { 41 if (fb_niso_scrub_status_flag_v(w)) {
42 gk20a_dbg_fn("done"); 42 nvgpu_log_fn(g, "done");
43 break; 43 break;
44 } 44 }
45 nvgpu_udelay(HW_SCRUB_TIMEOUT_DEFAULT); 45 nvgpu_udelay(HW_SCRUB_TIMEOUT_DEFAULT);
diff --git a/drivers/gpu/nvgpu/gp106/gr_gp106.c b/drivers/gpu/nvgpu/gp106/gr_gp106.c
index 1bd24b45..2e5f29ee 100644
--- a/drivers/gpu/nvgpu/gp106/gr_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/gr_gp106.c
@@ -58,7 +58,7 @@ bool gr_gp106_is_valid_class(struct gk20a *g, u32 class_num)
58 default: 58 default:
59 break; 59 break;
60 } 60 }
61 gk20a_dbg_info("class=0x%x valid=%d", class_num, valid); 61 nvgpu_log_info(g, "class=0x%x valid=%d", class_num, valid);
62 return valid; 62 return valid;
63} 63}
64 64
@@ -75,7 +75,7 @@ static void gr_gp106_set_go_idle_timeout(struct gk20a *g, u32 data)
75int gr_gp106_handle_sw_method(struct gk20a *g, u32 addr, 75int gr_gp106_handle_sw_method(struct gk20a *g, u32 addr,
76 u32 class_num, u32 offset, u32 data) 76 u32 class_num, u32 offset, u32 data)
77{ 77{
78 gk20a_dbg_fn(""); 78 nvgpu_log_fn(g, " ");
79 79
80 if (class_num == PASCAL_COMPUTE_B) { 80 if (class_num == PASCAL_COMPUTE_B) {
81 switch (offset << 2) { 81 switch (offset << 2) {
@@ -177,9 +177,9 @@ int gr_gp106_set_ctxsw_preemption_mode(struct gk20a *g,
177 g->gr.max_tpc_count; 177 g->gr.max_tpc_count;
178 attrib_cb_size = ALIGN(attrib_cb_size, 128); 178 attrib_cb_size = ALIGN(attrib_cb_size, 128);
179 179
180 gk20a_dbg_info("gfxp context spill_size=%d", spill_size); 180 nvgpu_log_info(g, "gfxp context spill_size=%d", spill_size);
181 gk20a_dbg_info("gfxp context pagepool_size=%d", pagepool_size); 181 nvgpu_log_info(g, "gfxp context pagepool_size=%d", pagepool_size);
182 gk20a_dbg_info("gfxp context attrib_cb_size=%d", 182 nvgpu_log_info(g, "gfxp context attrib_cb_size=%d",
183 attrib_cb_size); 183 attrib_cb_size);
184 184
185 err = gr_gp10b_alloc_buffer(vm, 185 err = gr_gp10b_alloc_buffer(vm,
diff --git a/drivers/gpu/nvgpu/gp106/hal_gp106.c b/drivers/gpu/nvgpu/gp106/hal_gp106.c
index 82cc36aa..6d3154e3 100644
--- a/drivers/gpu/nvgpu/gp106/hal_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/hal_gp106.c
@@ -765,7 +765,7 @@ int gp106_init_hal(struct gk20a *g)
765{ 765{
766 struct gpu_ops *gops = &g->ops; 766 struct gpu_ops *gops = &g->ops;
767 767
768 gk20a_dbg_fn(""); 768 nvgpu_log_fn(g, " ");
769 769
770 gops->bios = gp106_ops.bios; 770 gops->bios = gp106_ops.bios;
771 gops->ltc = gp106_ops.ltc; 771 gops->ltc = gp106_ops.ltc;
@@ -828,7 +828,7 @@ int gp106_init_hal(struct gk20a *g)
828 828
829 g->name = "gp10x"; 829 g->name = "gp10x";
830 830
831 gk20a_dbg_fn("done"); 831 nvgpu_log_fn(g, "done");
832 832
833 return 0; 833 return 0;
834} 834}
diff --git a/drivers/gpu/nvgpu/gp106/mclk_gp106.c b/drivers/gpu/nvgpu/gp106/mclk_gp106.c
index 44f0b1d9..bfb66e6e 100644
--- a/drivers/gpu/nvgpu/gp106/mclk_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/mclk_gp106.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -2998,7 +2998,7 @@ static void mclk_seq_pmucmdhandler(struct gk20a *g, struct pmu_msg *_msg,
2998 struct nv_pmu_seq_msg_run_script *seq_msg; 2998 struct nv_pmu_seq_msg_run_script *seq_msg;
2999 u32 msg_status = 0; 2999 u32 msg_status = 0;
3000 3000
3001 gk20a_dbg_info(""); 3001 nvgpu_log_info(g, " ");
3002 3002
3003 if (status != 0) { 3003 if (status != 0) {
3004 nvgpu_err(g, "mclk seq_script cmd aborted"); 3004 nvgpu_err(g, "mclk seq_script cmd aborted");
@@ -3041,7 +3041,7 @@ static int mclk_get_memclk_table(struct gk20a *g)
3041 u8 *mem_entry_ptr = NULL; 3041 u8 *mem_entry_ptr = NULL;
3042 int index; 3042 int index;
3043 3043
3044 gk20a_dbg_info(""); 3044 nvgpu_log_info(g, " ");
3045 3045
3046 mem_table_ptr = (u8 *)nvgpu_bios_get_perf_table_ptrs(g, 3046 mem_table_ptr = (u8 *)nvgpu_bios_get_perf_table_ptrs(g,
3047 g->bios.perf_token, 3047 g->bios.perf_token,
@@ -3213,7 +3213,7 @@ int gp106_mclk_init(struct gk20a *g)
3213 u32 index; 3213 u32 index;
3214 struct memory_config *m; 3214 struct memory_config *m;
3215 3215
3216 gk20a_dbg_fn(""); 3216 nvgpu_log_fn(g, " ");
3217 3217
3218 mclk = &g->clk_pmu.clk_mclk; 3218 mclk = &g->clk_pmu.clk_mclk;
3219 3219
@@ -3316,7 +3316,7 @@ int gp106_mclk_change(struct gk20a *g, u16 val)
3316#endif 3316#endif
3317 u32 speed; 3317 u32 speed;
3318 3318
3319 gk20a_dbg_info(""); 3319 nvgpu_log_info(g, " ");
3320 3320
3321 memset(&payload, 0, sizeof(struct pmu_payload)); 3321 memset(&payload, 0, sizeof(struct pmu_payload));
3322 3322
@@ -3508,7 +3508,7 @@ static int mclk_debugfs_init(struct gk20a *g)
3508 struct dentry *gpu_root = l->debugfs; 3508 struct dentry *gpu_root = l->debugfs;
3509 struct dentry *d; 3509 struct dentry *d;
3510 3510
3511 gk20a_dbg(gpu_dbg_info, "g=%p", g); 3511 nvgpu_log(g, gpu_dbg_info, "g=%p", g);
3512 3512
3513 d = debugfs_create_file( 3513 d = debugfs_create_file(
3514 "mclk_speed_set", 3514 "mclk_speed_set",
diff --git a/drivers/gpu/nvgpu/gp106/pmu_gp106.c b/drivers/gpu/nvgpu/gp106/pmu_gp106.c
index d4041905..2a52dd4e 100644
--- a/drivers/gpu/nvgpu/gp106/pmu_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/pmu_gp106.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -98,14 +98,14 @@ u32 gp106_pmu_pg_engines_list(struct gk20a *g)
98static void pmu_handle_param_msg(struct gk20a *g, struct pmu_msg *msg, 98static void pmu_handle_param_msg(struct gk20a *g, struct pmu_msg *msg,
99 void *param, u32 handle, u32 status) 99 void *param, u32 handle, u32 status)
100{ 100{
101 gk20a_dbg_fn(""); 101 nvgpu_log_fn(g, " ");
102 102
103 if (status != 0) { 103 if (status != 0) {
104 nvgpu_err(g, "PG PARAM cmd aborted"); 104 nvgpu_err(g, "PG PARAM cmd aborted");
105 return; 105 return;
106 } 106 }
107 107
108 gp106_dbg_pmu("PG PARAM is acknowledged from PMU %x", 108 gp106_dbg_pmu(g, "PG PARAM is acknowledged from PMU %x",
109 msg->msg.pg.msg_type); 109 msg->msg.pg.msg_type);
110} 110}
111 111
@@ -135,7 +135,7 @@ int gp106_pg_param_init(struct gk20a *g, u32 pg_engine_id)
135 cmd.cmd.pg.gr_init_param.featuremask = 135 cmd.cmd.pg.gr_init_param.featuremask =
136 NVGPU_PMU_GR_FEATURE_MASK_RPPG; 136 NVGPU_PMU_GR_FEATURE_MASK_RPPG;
137 137
138 gp106_dbg_pmu("cmd post GR PMU_PG_CMD_ID_PG_PARAM"); 138 gp106_dbg_pmu(g, "cmd post GR PMU_PG_CMD_ID_PG_PARAM");
139 nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, 139 nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
140 pmu_handle_param_msg, pmu, &seq, ~0); 140 pmu_handle_param_msg, pmu, &seq, ~0);
141 } else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS) { 141 } else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS) {
@@ -152,7 +152,7 @@ int gp106_pg_param_init(struct gk20a *g, u32 pg_engine_id)
152 NVGPU_PMU_MS_FEATURE_MASK_RPPG | 152 NVGPU_PMU_MS_FEATURE_MASK_RPPG |
153 NVGPU_PMU_MS_FEATURE_MASK_FB_TRAINING; 153 NVGPU_PMU_MS_FEATURE_MASK_FB_TRAINING;
154 154
155 gp106_dbg_pmu("cmd post MS PMU_PG_CMD_ID_PG_PARAM"); 155 gp106_dbg_pmu(g, "cmd post MS PMU_PG_CMD_ID_PG_PARAM");
156 nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, 156 nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
157 pmu_handle_param_msg, pmu, &seq, ~0); 157 pmu_handle_param_msg, pmu, &seq, ~0);
158 } 158 }
@@ -240,9 +240,9 @@ static void gp106_pmu_load_multiple_falcons(struct gk20a *g, u32 falconidmask,
240 struct pmu_cmd cmd; 240 struct pmu_cmd cmd;
241 u32 seq; 241 u32 seq;
242 242
243 gk20a_dbg_fn(""); 243 nvgpu_log_fn(g, " ");
244 244
245 gp106_dbg_pmu("wprinit status = %x\n", g->pmu_lsf_pmu_wpr_init_done); 245 gp106_dbg_pmu(g, "wprinit status = %x\n", g->pmu_lsf_pmu_wpr_init_done);
246 if (g->pmu_lsf_pmu_wpr_init_done) { 246 if (g->pmu_lsf_pmu_wpr_init_done) {
247 /* send message to load FECS falcon */ 247 /* send message to load FECS falcon */
248 memset(&cmd, 0, sizeof(struct pmu_cmd)); 248 memset(&cmd, 0, sizeof(struct pmu_cmd));
@@ -258,13 +258,13 @@ static void gp106_pmu_load_multiple_falcons(struct gk20a *g, u32 falconidmask,
258 cmd.cmd.acr.boot_falcons.wprvirtualbase.lo = 0; 258 cmd.cmd.acr.boot_falcons.wprvirtualbase.lo = 0;
259 cmd.cmd.acr.boot_falcons.wprvirtualbase.hi = 0; 259 cmd.cmd.acr.boot_falcons.wprvirtualbase.hi = 0;
260 260
261 gp106_dbg_pmu("PMU_ACR_CMD_ID_BOOTSTRAP_MULTIPLE_FALCONS:%x\n", 261 gp106_dbg_pmu(g, "PMU_ACR_CMD_ID_BOOTSTRAP_MULTIPLE_FALCONS:%x\n",
262 falconidmask); 262 falconidmask);
263 nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, 263 nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
264 pmu_handle_fecs_boot_acr_msg, pmu, &seq, ~0); 264 pmu_handle_fecs_boot_acr_msg, pmu, &seq, ~0);
265 } 265 }
266 266
267 gk20a_dbg_fn("done"); 267 nvgpu_log_fn(g, "done");
268} 268}
269 269
270int gp106_load_falcon_ucode(struct gk20a *g, u32 falconidmask) 270int gp106_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
diff --git a/drivers/gpu/nvgpu/gp106/pmu_gp106.h b/drivers/gpu/nvgpu/gp106/pmu_gp106.h
index bd640869..361f6e8b 100644
--- a/drivers/gpu/nvgpu/gp106/pmu_gp106.h
+++ b/drivers/gpu/nvgpu/gp106/pmu_gp106.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -23,8 +23,8 @@
23#ifndef __PMU_GP106_H_ 23#ifndef __PMU_GP106_H_
24#define __PMU_GP106_H_ 24#define __PMU_GP106_H_
25 25
26#define gp106_dbg_pmu(fmt, arg...) \ 26#define gp106_dbg_pmu(g, fmt, arg...) \
27 gk20a_dbg(gpu_dbg_pmu, fmt, ##arg) 27 nvgpu_log(g, gpu_dbg_pmu, fmt, ##arg)
28 28
29struct gk20a; 29struct gk20a;
30 30
diff --git a/drivers/gpu/nvgpu/gp106/sec2_gp106.c b/drivers/gpu/nvgpu/gp106/sec2_gp106.c
index 08c7f84a..1c959022 100644
--- a/drivers/gpu/nvgpu/gp106/sec2_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/sec2_gp106.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -31,8 +31,8 @@
31#include <nvgpu/hw/gp106/hw_psec_gp106.h> 31#include <nvgpu/hw/gp106/hw_psec_gp106.h>
32 32
33/*Defines*/ 33/*Defines*/
34#define gm20b_dbg_pmu(fmt, arg...) \ 34#define gm20b_dbg_pmu(g, fmt, arg...) \
35 gk20a_dbg(gpu_dbg_pmu, fmt, ##arg) 35 nvgpu_log(g, gpu_dbg_pmu, fmt, ##arg)
36 36
37int sec2_clear_halt_interrupt_status(struct gk20a *g, unsigned int timeout) 37int sec2_clear_halt_interrupt_status(struct gk20a *g, unsigned int timeout)
38{ 38{
@@ -56,7 +56,7 @@ int sec2_wait_for_halt(struct gk20a *g, unsigned int timeout)
56 } 56 }
57 57
58 g->acr.capabilities = gk20a_readl(g, psec_falcon_mailbox1_r()); 58 g->acr.capabilities = gk20a_readl(g, psec_falcon_mailbox1_r());
59 gm20b_dbg_pmu("ACR capabilities %x\n", g->acr.capabilities); 59 gm20b_dbg_pmu(g, "ACR capabilities %x\n", g->acr.capabilities);
60 data = gk20a_readl(g, psec_falcon_mailbox0_r()); 60 data = gk20a_readl(g, psec_falcon_mailbox0_r());
61 if (data) { 61 if (data) {
62 nvgpu_err(g, "ACR boot failed, err %x", data); 62 nvgpu_err(g, "ACR boot failed, err %x", data);
@@ -87,7 +87,7 @@ int bl_bootstrap_sec2(struct nvgpu_pmu *pmu,
87 u32 data = 0; 87 u32 data = 0;
88 u32 dst; 88 u32 dst;
89 89
90 gk20a_dbg_fn(""); 90 nvgpu_log_fn(g, " ");
91 91
92 /* SEC2 Config */ 92 /* SEC2 Config */
93 gk20a_writel(g, psec_falcon_itfen_r(), 93 gk20a_writel(g, psec_falcon_itfen_r(),
@@ -123,7 +123,7 @@ int bl_bootstrap_sec2(struct nvgpu_pmu *pmu,
123 (u8 *)(acr->hsbl_ucode.cpu_va), bl_sz, 0, 0, 123 (u8 *)(acr->hsbl_ucode.cpu_va), bl_sz, 0, 0,
124 pmu_bl_gm10x_desc->bl_start_tag); 124 pmu_bl_gm10x_desc->bl_start_tag);
125 125
126 gm20b_dbg_pmu("Before starting falcon with BL\n"); 126 gm20b_dbg_pmu(g, "Before starting falcon with BL\n");
127 127
128 gk20a_writel(g, psec_falcon_mailbox0_r(), 0xDEADA5A5); 128 gk20a_writel(g, psec_falcon_mailbox0_r(), 0xDEADA5A5);
129 129
diff --git a/drivers/gpu/nvgpu/gp106/therm_gp106.c b/drivers/gpu/nvgpu/gp106/therm_gp106.c
index 64d602cf..b3862abe 100644
--- a/drivers/gpu/nvgpu/gp106/therm_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/therm_gp106.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -97,7 +97,7 @@ int gp106_elcg_init_idle_filters(struct gk20a *g)
97 u32 active_engine_id = 0; 97 u32 active_engine_id = 0;
98 struct fifo_gk20a *f = &g->fifo; 98 struct fifo_gk20a *f = &g->fifo;
99 99
100 gk20a_dbg_fn(""); 100 nvgpu_log_fn(g, " ");
101 101
102 for (engine_id = 0; engine_id < f->num_engines; engine_id++) { 102 for (engine_id = 0; engine_id < f->num_engines; engine_id++) {
103 active_engine_id = f->active_engines_list[engine_id]; 103 active_engine_id = f->active_engines_list[engine_id];
@@ -124,7 +124,7 @@ int gp106_elcg_init_idle_filters(struct gk20a *g)
124 idle_filter &= ~therm_hubmmu_idle_filter_value_m(); 124 idle_filter &= ~therm_hubmmu_idle_filter_value_m();
125 gk20a_writel(g, therm_hubmmu_idle_filter_r(), idle_filter); 125 gk20a_writel(g, therm_hubmmu_idle_filter_r(), idle_filter);
126 126
127 gk20a_dbg_fn("done"); 127 nvgpu_log_fn(g, "done");
128 return 0; 128 return 0;
129} 129}
130 130
diff --git a/drivers/gpu/nvgpu/gp106/xve_gp106.c b/drivers/gpu/nvgpu/gp106/xve_gp106.c
index 9becd0f2..e77ea5c1 100644
--- a/drivers/gpu/nvgpu/gp106/xve_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/xve_gp106.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -204,19 +204,19 @@ static int __do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed)
204 int attempts = 10, err_status = 0; 204 int attempts = 10, err_status = 0;
205 205
206 g->ops.xve.get_speed(g, &current_link_speed); 206 g->ops.xve.get_speed(g, &current_link_speed);
207 xv_sc_dbg(PRE_CHANGE, "Executing PCIe link change."); 207 xv_sc_dbg(g, PRE_CHANGE, "Executing PCIe link change.");
208 xv_sc_dbg(PRE_CHANGE, " Current speed: %s", 208 xv_sc_dbg(g, PRE_CHANGE, " Current speed: %s",
209 xve_speed_to_str(current_link_speed)); 209 xve_speed_to_str(current_link_speed));
210 xv_sc_dbg(PRE_CHANGE, " Next speed: %s", 210 xv_sc_dbg(g, PRE_CHANGE, " Next speed: %s",
211 xve_speed_to_str(next_link_speed)); 211 xve_speed_to_str(next_link_speed));
212 xv_sc_dbg(PRE_CHANGE, " PL_LINK_CONFIG: 0x%08x", 212 xv_sc_dbg(g, PRE_CHANGE, " PL_LINK_CONFIG: 0x%08x",
213 gk20a_readl(g, xp_pl_link_config_r(0))); 213 gk20a_readl(g, xp_pl_link_config_r(0)));
214 214
215 xv_sc_dbg(DISABLE_ASPM, "Disabling ASPM..."); 215 xv_sc_dbg(g, DISABLE_ASPM, "Disabling ASPM...");
216 disable_aspm_gp106(g); 216 disable_aspm_gp106(g);
217 xv_sc_dbg(DISABLE_ASPM, " Done!"); 217 xv_sc_dbg(g, DISABLE_ASPM, " Done!");
218 218
219 xv_sc_dbg(DL_SAFE_MODE, "Putting DL in safe mode..."); 219 xv_sc_dbg(g, DL_SAFE_MODE, "Putting DL in safe mode...");
220 saved_dl_mgr = gk20a_readl(g, xp_dl_mgr_r(0)); 220 saved_dl_mgr = gk20a_readl(g, xp_dl_mgr_r(0));
221 221
222 /* 222 /*
@@ -225,12 +225,12 @@ static int __do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed)
225 dl_mgr = saved_dl_mgr; 225 dl_mgr = saved_dl_mgr;
226 dl_mgr |= xp_dl_mgr_safe_timing_f(1); 226 dl_mgr |= xp_dl_mgr_safe_timing_f(1);
227 gk20a_writel(g, xp_dl_mgr_r(0), dl_mgr); 227 gk20a_writel(g, xp_dl_mgr_r(0), dl_mgr);
228 xv_sc_dbg(DL_SAFE_MODE, " Done!"); 228 xv_sc_dbg(g, DL_SAFE_MODE, " Done!");
229 229
230 nvgpu_timeout_init(g, &timeout, GPU_XVE_TIMEOUT_MS, 230 nvgpu_timeout_init(g, &timeout, GPU_XVE_TIMEOUT_MS,
231 NVGPU_TIMER_CPU_TIMER); 231 NVGPU_TIMER_CPU_TIMER);
232 232
233 xv_sc_dbg(CHECK_LINK, "Checking for link idle..."); 233 xv_sc_dbg(g, CHECK_LINK, "Checking for link idle...");
234 do { 234 do {
235 pl_link_config = gk20a_readl(g, xp_pl_link_config_r(0)); 235 pl_link_config = gk20a_readl(g, xp_pl_link_config_r(0));
236 if ((xp_pl_link_config_ltssm_status_f(pl_link_config) == 236 if ((xp_pl_link_config_ltssm_status_f(pl_link_config) ==
@@ -245,9 +245,9 @@ static int __do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed)
245 goto done; 245 goto done;
246 } 246 }
247 247
248 xv_sc_dbg(CHECK_LINK, " Done"); 248 xv_sc_dbg(g, CHECK_LINK, " Done");
249 249
250 xv_sc_dbg(LINK_SETTINGS, "Preparing next link settings"); 250 xv_sc_dbg(g, LINK_SETTINGS, "Preparing next link settings");
251 pl_link_config &= ~xp_pl_link_config_max_link_rate_m(); 251 pl_link_config &= ~xp_pl_link_config_max_link_rate_m();
252 switch (next_link_speed) { 252 switch (next_link_speed) {
253 case GPU_XVE_SPEED_2P5: 253 case GPU_XVE_SPEED_2P5:
@@ -297,10 +297,10 @@ static int __do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed)
297 else 297 else
298 BUG(); 298 BUG();
299 299
300 xv_sc_dbg(LINK_SETTINGS, " pl_link_config = 0x%08x", pl_link_config); 300 xv_sc_dbg(g, LINK_SETTINGS, " pl_link_config = 0x%08x", pl_link_config);
301 xv_sc_dbg(LINK_SETTINGS, " Done"); 301 xv_sc_dbg(g, LINK_SETTINGS, " Done");
302 302
303 xv_sc_dbg(EXEC_CHANGE, "Running link speed change..."); 303 xv_sc_dbg(g, EXEC_CHANGE, "Running link speed change...");
304 304
305 nvgpu_timeout_init(g, &timeout, GPU_XVE_TIMEOUT_MS, 305 nvgpu_timeout_init(g, &timeout, GPU_XVE_TIMEOUT_MS,
306 NVGPU_TIMER_CPU_TIMER); 306 NVGPU_TIMER_CPU_TIMER);
@@ -316,7 +316,7 @@ static int __do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed)
316 goto done; 316 goto done;
317 } 317 }
318 318
319 xv_sc_dbg(EXEC_CHANGE, " Wrote PL_LINK_CONFIG."); 319 xv_sc_dbg(g, EXEC_CHANGE, " Wrote PL_LINK_CONFIG.");
320 320
321 pl_link_config = gk20a_readl(g, xp_pl_link_config_r(0)); 321 pl_link_config = gk20a_readl(g, xp_pl_link_config_r(0));
322 322
@@ -326,7 +326,7 @@ static int __do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed)
326 xp_pl_link_config_ltssm_directive_f( 326 xp_pl_link_config_ltssm_directive_f(
327 xp_pl_link_config_ltssm_directive_change_speed_v())); 327 xp_pl_link_config_ltssm_directive_change_speed_v()));
328 328
329 xv_sc_dbg(EXEC_CHANGE, " Executing change (0x%08x)!", 329 xv_sc_dbg(g, EXEC_CHANGE, " Executing change (0x%08x)!",
330 pl_link_config); 330 pl_link_config);
331 gk20a_writel(g, xp_pl_link_config_r(0), pl_link_config); 331 gk20a_writel(g, xp_pl_link_config_r(0), pl_link_config);
332 332
@@ -348,11 +348,11 @@ static int __do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed)
348 348
349 if (nvgpu_timeout_peek_expired(&timeout)) { 349 if (nvgpu_timeout_peek_expired(&timeout)) {
350 err_status = -ETIMEDOUT; 350 err_status = -ETIMEDOUT;
351 xv_sc_dbg(EXEC_CHANGE, " timeout; pl_link_config = 0x%x", 351 xv_sc_dbg(g, EXEC_CHANGE, " timeout; pl_link_config = 0x%x",
352 pl_link_config); 352 pl_link_config);
353 } 353 }
354 354
355 xv_sc_dbg(EXEC_CHANGE, " Change done... Checking status"); 355 xv_sc_dbg(g, EXEC_CHANGE, " Change done... Checking status");
356 356
357 if (pl_link_config == 0xffffffff) { 357 if (pl_link_config == 0xffffffff) {
358 WARN(1, "GPU fell of PCI bus!?"); 358 WARN(1, "GPU fell of PCI bus!?");
@@ -366,19 +366,19 @@ static int __do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed)
366 366
367 link_control_status = 367 link_control_status =
368 g->ops.xve.xve_readl(g, xve_link_control_status_r()); 368 g->ops.xve.xve_readl(g, xve_link_control_status_r());
369 xv_sc_dbg(EXEC_CHANGE, " target %d vs current %d", 369 xv_sc_dbg(g, EXEC_CHANGE, " target %d vs current %d",
370 link_speed_setting, 370 link_speed_setting,
371 xve_link_control_status_link_speed_v(link_control_status)); 371 xve_link_control_status_link_speed_v(link_control_status));
372 372
373 if (err_status == -ETIMEDOUT) { 373 if (err_status == -ETIMEDOUT) {
374 xv_sc_dbg(EXEC_CHANGE, " Oops timed out?"); 374 xv_sc_dbg(g, EXEC_CHANGE, " Oops timed out?");
375 break; 375 break;
376 } 376 }
377 } while (attempts-- > 0 && 377 } while (attempts-- > 0 &&
378 link_speed_setting != 378 link_speed_setting !=
379 xve_link_control_status_link_speed_v(link_control_status)); 379 xve_link_control_status_link_speed_v(link_control_status));
380 380
381 xv_sc_dbg(EXEC_VERIF, "Verifying speed change..."); 381 xv_sc_dbg(g, EXEC_VERIF, "Verifying speed change...");
382 382
383 /* 383 /*
384 * Check that the new link speed is actually active. If we failed to 384 * Check that the new link speed is actually active. If we failed to
@@ -390,10 +390,10 @@ static int __do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed)
390 if (link_speed_setting != new_link_speed) { 390 if (link_speed_setting != new_link_speed) {
391 u32 link_config = gk20a_readl(g, xp_pl_link_config_r(0)); 391 u32 link_config = gk20a_readl(g, xp_pl_link_config_r(0));
392 392
393 xv_sc_dbg(EXEC_VERIF, " Current and target speeds mismatch!"); 393 xv_sc_dbg(g, EXEC_VERIF, " Current and target speeds mismatch!");
394 xv_sc_dbg(EXEC_VERIF, " LINK_CONTROL_STATUS: 0x%08x", 394 xv_sc_dbg(g, EXEC_VERIF, " LINK_CONTROL_STATUS: 0x%08x",
395 g->ops.xve.xve_readl(g, xve_link_control_status_r())); 395 g->ops.xve.xve_readl(g, xve_link_control_status_r()));
396 xv_sc_dbg(EXEC_VERIF, " Link speed is %s - should be %s", 396 xv_sc_dbg(g, EXEC_VERIF, " Link speed is %s - should be %s",
397 xve_speed_to_str(new_link_speed), 397 xve_speed_to_str(new_link_speed),
398 xve_speed_to_str(link_speed_setting)); 398 xve_speed_to_str(link_speed_setting));
399 399
@@ -417,19 +417,19 @@ static int __do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed)
417 gk20a_writel(g, xp_pl_link_config_r(0), link_config); 417 gk20a_writel(g, xp_pl_link_config_r(0), link_config);
418 err_status = -ENODEV; 418 err_status = -ENODEV;
419 } else { 419 } else {
420 xv_sc_dbg(EXEC_VERIF, " Current and target speeds match!"); 420 xv_sc_dbg(g, EXEC_VERIF, " Current and target speeds match!");
421 err_status = 0; 421 err_status = 0;
422 } 422 }
423 423
424done: 424done:
425 /* Restore safe timings. */ 425 /* Restore safe timings. */
426 xv_sc_dbg(CLEANUP, "Restoring saved DL settings..."); 426 xv_sc_dbg(g, CLEANUP, "Restoring saved DL settings...");
427 gk20a_writel(g, xp_dl_mgr_r(0), saved_dl_mgr); 427 gk20a_writel(g, xp_dl_mgr_r(0), saved_dl_mgr);
428 xv_sc_dbg(CLEANUP, " Done"); 428 xv_sc_dbg(g, CLEANUP, " Done");
429 429
430 xv_sc_dbg(CLEANUP, "Re-enabling ASPM settings..."); 430 xv_sc_dbg(g, CLEANUP, "Re-enabling ASPM settings...");
431 enable_aspm_gp106(g); 431 enable_aspm_gp106(g);
432 xv_sc_dbg(CLEANUP, " Done"); 432 xv_sc_dbg(g, CLEANUP, " Done");
433 433
434 return err_status; 434 return err_status;
435} 435}
diff --git a/drivers/gpu/nvgpu/gp106/xve_gp106.h b/drivers/gpu/nvgpu/gp106/xve_gp106.h
index d48b0991..e0be35ac 100644
--- a/drivers/gpu/nvgpu/gp106/xve_gp106.h
+++ b/drivers/gpu/nvgpu/gp106/xve_gp106.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -49,11 +49,11 @@ enum xv_speed_change_steps {
49 CLEANUP 49 CLEANUP
50}; 50};
51 51
52#define xv_dbg(fmt, args...) \ 52#define xv_dbg(g, fmt, args...) \
53 gk20a_dbg(gpu_dbg_xv, fmt, ##args) 53 nvgpu_log(g, gpu_dbg_xv, fmt, ##args)
54 54
55#define xv_sc_dbg(step, fmt, args...) \ 55#define xv_sc_dbg(g, step, fmt, args...) \
56 xv_dbg("[%d] %15s | " fmt, step, __stringify(step), ##args) 56 xv_dbg(g, "[%d] %15s | " fmt, step, __stringify(step), ##args)
57 57
58void xve_xve_writel_gp106(struct gk20a *g, u32 reg, u32 val); 58void xve_xve_writel_gp106(struct gk20a *g, u32 reg, u32 val);
59u32 xve_xve_readl_gp106(struct gk20a *g, u32 reg); 59u32 xve_xve_readl_gp106(struct gk20a *g, u32 reg);