diff options
author | Terje Bergstrom <tbergstrom@nvidia.com> | 2018-04-18 22:39:46 -0400 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2018-05-09 21:26:04 -0400 |
commit | dd739fcb039d51606e9a5454ec0aab17bcb01965 (patch) | |
tree | 806ba8575d146367ad1be00086ca0cdae35a6b28 /drivers/gpu/nvgpu/gp106/acr_gp106.c | |
parent | 7e66f2a63d4855e763fa768047dfc32f6f96b771 (diff) |
gpu: nvgpu: Remove gk20a_dbg* functions
Switch all logging to nvgpu_log*(). gk20a_dbg* macros are
intentionally left there because of use from other repositories.
Because the new functions do not work without a pointer to struct
gk20a, and piping it just for logging is excessive, some log messages
are deleted.
Change-Id: I00e22e75fe4596a330bb0282ab4774b3639ee31e
Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1704148
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gp106/acr_gp106.c')
-rw-r--r-- | drivers/gpu/nvgpu/gp106/acr_gp106.c | 98 |
1 files changed, 49 insertions, 49 deletions
diff --git a/drivers/gpu/nvgpu/gp106/acr_gp106.c b/drivers/gpu/nvgpu/gp106/acr_gp106.c index 5ab8cfcc..61b443e0 100644 --- a/drivers/gpu/nvgpu/gp106/acr_gp106.c +++ b/drivers/gpu/nvgpu/gp106/acr_gp106.c | |||
@@ -43,8 +43,8 @@ | |||
43 | #include <nvgpu/hw/gp106/hw_pwr_gp106.h> | 43 | #include <nvgpu/hw/gp106/hw_pwr_gp106.h> |
44 | 44 | ||
45 | /*Defines*/ | 45 | /*Defines*/ |
46 | #define gp106_dbg_pmu(fmt, arg...) \ | 46 | #define gp106_dbg_pmu(g, fmt, arg...) \ |
47 | gk20a_dbg(gpu_dbg_pmu, fmt, ##arg) | 47 | nvgpu_log(g, gpu_dbg_pmu, fmt, ##arg) |
48 | 48 | ||
49 | typedef int (*get_ucode_details)(struct gk20a *g, | 49 | typedef int (*get_ucode_details)(struct gk20a *g, |
50 | struct flcn_ucode_img_v1 *udata); | 50 | struct flcn_ucode_img_v1 *udata); |
@@ -113,7 +113,7 @@ int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img_v1 *p_img) | |||
113 | struct lsf_ucode_desc_v1 *lsf_desc; | 113 | struct lsf_ucode_desc_v1 *lsf_desc; |
114 | int err; | 114 | int err; |
115 | 115 | ||
116 | gp106_dbg_pmu("requesting PMU ucode in gp106\n"); | 116 | gp106_dbg_pmu(g, "requesting PMU ucode in gp106\n"); |
117 | pmu_fw = nvgpu_request_firmware(g, GM20B_PMU_UCODE_IMAGE, | 117 | pmu_fw = nvgpu_request_firmware(g, GM20B_PMU_UCODE_IMAGE, |
118 | NVGPU_REQUEST_FIRMWARE_NO_SOC); | 118 | NVGPU_REQUEST_FIRMWARE_NO_SOC); |
119 | if (!pmu_fw) { | 119 | if (!pmu_fw) { |
@@ -121,9 +121,9 @@ int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img_v1 *p_img) | |||
121 | return -ENOENT; | 121 | return -ENOENT; |
122 | } | 122 | } |
123 | g->acr.pmu_fw = pmu_fw; | 123 | g->acr.pmu_fw = pmu_fw; |
124 | gp106_dbg_pmu("Loaded PMU ucode in for blob preparation"); | 124 | gp106_dbg_pmu(g, "Loaded PMU ucode in for blob preparation"); |
125 | 125 | ||
126 | gp106_dbg_pmu("requesting PMU ucode desc in GM20B\n"); | 126 | gp106_dbg_pmu(g, "requesting PMU ucode desc in GM20B\n"); |
127 | pmu_desc = nvgpu_request_firmware(g, GM20B_PMU_UCODE_DESC, | 127 | pmu_desc = nvgpu_request_firmware(g, GM20B_PMU_UCODE_DESC, |
128 | NVGPU_REQUEST_FIRMWARE_NO_SOC); | 128 | NVGPU_REQUEST_FIRMWARE_NO_SOC); |
129 | if (!pmu_desc) { | 129 | if (!pmu_desc) { |
@@ -164,7 +164,7 @@ int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img_v1 *p_img) | |||
164 | p_img->fw_ver = NULL; | 164 | p_img->fw_ver = NULL; |
165 | p_img->header = NULL; | 165 | p_img->header = NULL; |
166 | p_img->lsf_desc = (struct lsf_ucode_desc_v1 *)lsf_desc; | 166 | p_img->lsf_desc = (struct lsf_ucode_desc_v1 *)lsf_desc; |
167 | gp106_dbg_pmu("requesting PMU ucode in GM20B exit\n"); | 167 | gp106_dbg_pmu(g, "requesting PMU ucode in GM20B exit\n"); |
168 | 168 | ||
169 | nvgpu_release_firmware(g, pmu_sig); | 169 | nvgpu_release_firmware(g, pmu_sig); |
170 | return 0; | 170 | return 0; |
@@ -262,7 +262,7 @@ int fecs_ucode_details(struct gk20a *g, struct flcn_ucode_img_v1 *p_img) | |||
262 | p_img->fw_ver = NULL; | 262 | p_img->fw_ver = NULL; |
263 | p_img->header = NULL; | 263 | p_img->header = NULL; |
264 | p_img->lsf_desc = (struct lsf_ucode_desc_v1 *)lsf_desc; | 264 | p_img->lsf_desc = (struct lsf_ucode_desc_v1 *)lsf_desc; |
265 | gp106_dbg_pmu("fecs fw loaded\n"); | 265 | gp106_dbg_pmu(g, "fecs fw loaded\n"); |
266 | nvgpu_release_firmware(g, fecs_sig); | 266 | nvgpu_release_firmware(g, fecs_sig); |
267 | return 0; | 267 | return 0; |
268 | free_lsf_desc: | 268 | free_lsf_desc: |
@@ -358,7 +358,7 @@ int gpccs_ucode_details(struct gk20a *g, struct flcn_ucode_img_v1 *p_img) | |||
358 | p_img->fw_ver = NULL; | 358 | p_img->fw_ver = NULL; |
359 | p_img->header = NULL; | 359 | p_img->header = NULL; |
360 | p_img->lsf_desc = (struct lsf_ucode_desc_v1 *)lsf_desc; | 360 | p_img->lsf_desc = (struct lsf_ucode_desc_v1 *)lsf_desc; |
361 | gp106_dbg_pmu("gpccs fw loaded\n"); | 361 | gp106_dbg_pmu(g, "gpccs fw loaded\n"); |
362 | nvgpu_release_firmware(g, gpccs_sig); | 362 | nvgpu_release_firmware(g, gpccs_sig); |
363 | return 0; | 363 | return 0; |
364 | free_lsf_desc: | 364 | free_lsf_desc: |
@@ -381,7 +381,7 @@ int gp106_prepare_ucode_blob(struct gk20a *g) | |||
381 | non WPR blob of ucodes*/ | 381 | non WPR blob of ucodes*/ |
382 | err = nvgpu_init_pmu_fw_support(pmu); | 382 | err = nvgpu_init_pmu_fw_support(pmu); |
383 | if (err) { | 383 | if (err) { |
384 | gp106_dbg_pmu("failed to set function pointers\n"); | 384 | gp106_dbg_pmu(g, "failed to set function pointers\n"); |
385 | return err; | 385 | return err; |
386 | } | 386 | } |
387 | return 0; | 387 | return 0; |
@@ -391,12 +391,12 @@ int gp106_prepare_ucode_blob(struct gk20a *g) | |||
391 | gr_gk20a_init_ctxsw_ucode(g); | 391 | gr_gk20a_init_ctxsw_ucode(g); |
392 | 392 | ||
393 | g->ops.pmu.get_wpr(g, &wpr_inf); | 393 | g->ops.pmu.get_wpr(g, &wpr_inf); |
394 | gp106_dbg_pmu("wpr carveout base:%llx\n", (wpr_inf.wpr_base)); | 394 | gp106_dbg_pmu(g, "wpr carveout base:%llx\n", (wpr_inf.wpr_base)); |
395 | gp106_dbg_pmu("wpr carveout size :%x\n", (u32)wpr_inf.size); | 395 | gp106_dbg_pmu(g, "wpr carveout size :%x\n", (u32)wpr_inf.size); |
396 | 396 | ||
397 | /* Discover all managed falcons*/ | 397 | /* Discover all managed falcons*/ |
398 | err = lsfm_discover_ucode_images(g, plsfm); | 398 | err = lsfm_discover_ucode_images(g, plsfm); |
399 | gp106_dbg_pmu(" Managed Falcon cnt %d\n", plsfm->managed_flcn_cnt); | 399 | gp106_dbg_pmu(g, " Managed Falcon cnt %d\n", plsfm->managed_flcn_cnt); |
400 | if (err) | 400 | if (err) |
401 | goto exit_err; | 401 | goto exit_err; |
402 | 402 | ||
@@ -412,14 +412,14 @@ int gp106_prepare_ucode_blob(struct gk20a *g) | |||
412 | if (err) | 412 | if (err) |
413 | goto exit_err; | 413 | goto exit_err; |
414 | 414 | ||
415 | gp106_dbg_pmu("managed LS falcon %d, WPR size %d bytes.\n", | 415 | gp106_dbg_pmu(g, "managed LS falcon %d, WPR size %d bytes.\n", |
416 | plsfm->managed_flcn_cnt, plsfm->wpr_size); | 416 | plsfm->managed_flcn_cnt, plsfm->wpr_size); |
417 | 417 | ||
418 | lsfm_init_wpr_contents(g, plsfm, &g->acr.ucode_blob); | 418 | lsfm_init_wpr_contents(g, plsfm, &g->acr.ucode_blob); |
419 | } else { | 419 | } else { |
420 | gp106_dbg_pmu("LSFM is managing no falcons.\n"); | 420 | gp106_dbg_pmu(g, "LSFM is managing no falcons.\n"); |
421 | } | 421 | } |
422 | gp106_dbg_pmu("prepare ucode blob return 0\n"); | 422 | gp106_dbg_pmu(g, "prepare ucode blob return 0\n"); |
423 | free_acr_resources(g, plsfm); | 423 | free_acr_resources(g, plsfm); |
424 | 424 | ||
425 | exit_err: | 425 | exit_err: |
@@ -465,14 +465,14 @@ int lsfm_discover_ucode_images(struct gk20a *g, | |||
465 | 465 | ||
466 | plsfm->managed_flcn_cnt++; | 466 | plsfm->managed_flcn_cnt++; |
467 | } else { | 467 | } else { |
468 | gp106_dbg_pmu("id not managed %d\n", | 468 | gp106_dbg_pmu(g, "id not managed %d\n", |
469 | ucode_img.lsf_desc->falcon_id); | 469 | ucode_img.lsf_desc->falcon_id); |
470 | } | 470 | } |
471 | } | 471 | } |
472 | 472 | ||
473 | /*Free any ucode image resources if not managing this falcon*/ | 473 | /*Free any ucode image resources if not managing this falcon*/ |
474 | if (!(pmu->pmu_mode & PMU_LSFM_MANAGED)) { | 474 | if (!(pmu->pmu_mode & PMU_LSFM_MANAGED)) { |
475 | gp106_dbg_pmu("pmu is not LSFM managed\n"); | 475 | gp106_dbg_pmu(g, "pmu is not LSFM managed\n"); |
476 | lsfm_free_ucode_img_res(g, &ucode_img); | 476 | lsfm_free_ucode_img_res(g, &ucode_img); |
477 | } | 477 | } |
478 | 478 | ||
@@ -503,7 +503,7 @@ int lsfm_discover_ucode_images(struct gk20a *g, | |||
503 | == 0) | 503 | == 0) |
504 | plsfm->managed_flcn_cnt++; | 504 | plsfm->managed_flcn_cnt++; |
505 | } else { | 505 | } else { |
506 | gp106_dbg_pmu("not managed %d\n", | 506 | gp106_dbg_pmu(g, "not managed %d\n", |
507 | ucode_img.lsf_desc->falcon_id); | 507 | ucode_img.lsf_desc->falcon_id); |
508 | lsfm_free_nonpmu_ucode_img_res(g, | 508 | lsfm_free_nonpmu_ucode_img_res(g, |
509 | &ucode_img); | 509 | &ucode_img); |
@@ -511,7 +511,7 @@ int lsfm_discover_ucode_images(struct gk20a *g, | |||
511 | } | 511 | } |
512 | } else { | 512 | } else { |
513 | /* Consumed all available falcon objects */ | 513 | /* Consumed all available falcon objects */ |
514 | gp106_dbg_pmu("Done checking for ucodes %d\n", i); | 514 | gp106_dbg_pmu(g, "Done checking for ucodes %d\n", i); |
515 | break; | 515 | break; |
516 | } | 516 | } |
517 | } | 517 | } |
@@ -549,19 +549,19 @@ int gp106_pmu_populate_loader_cfg(struct gk20a *g, | |||
549 | g->ops.pmu.get_wpr(g, &wpr_inf); | 549 | g->ops.pmu.get_wpr(g, &wpr_inf); |
550 | addr_base += (wpr_inf.wpr_base); | 550 | addr_base += (wpr_inf.wpr_base); |
551 | 551 | ||
552 | gp106_dbg_pmu("pmu loader cfg addrbase 0x%llx\n", addr_base); | 552 | gp106_dbg_pmu(g, "pmu loader cfg addrbase 0x%llx\n", addr_base); |
553 | /*From linux*/ | 553 | /*From linux*/ |
554 | addr_code = addr_base + | 554 | addr_code = addr_base + |
555 | desc->app_start_offset + | 555 | desc->app_start_offset + |
556 | desc->app_resident_code_offset; | 556 | desc->app_resident_code_offset; |
557 | gp106_dbg_pmu("app start %d app res code off %d\n", | 557 | gp106_dbg_pmu(g, "app start %d app res code off %d\n", |
558 | desc->app_start_offset, desc->app_resident_code_offset); | 558 | desc->app_start_offset, desc->app_resident_code_offset); |
559 | addr_data = addr_base + | 559 | addr_data = addr_base + |
560 | desc->app_start_offset + | 560 | desc->app_start_offset + |
561 | desc->app_resident_data_offset; | 561 | desc->app_resident_data_offset; |
562 | gp106_dbg_pmu("app res data offset%d\n", | 562 | gp106_dbg_pmu(g, "app res data offset%d\n", |
563 | desc->app_resident_data_offset); | 563 | desc->app_resident_data_offset); |
564 | gp106_dbg_pmu("bl start off %d\n", desc->bootloader_start_offset); | 564 | gp106_dbg_pmu(g, "bl start off %d\n", desc->bootloader_start_offset); |
565 | 565 | ||
566 | addr_args = ((pwr_falcon_hwcfg_dmem_size_v( | 566 | addr_args = ((pwr_falcon_hwcfg_dmem_size_v( |
567 | gk20a_readl(g, pwr_falcon_hwcfg_r()))) | 567 | gk20a_readl(g, pwr_falcon_hwcfg_r()))) |
@@ -569,7 +569,7 @@ int gp106_pmu_populate_loader_cfg(struct gk20a *g, | |||
569 | 569 | ||
570 | addr_args -= g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu); | 570 | addr_args -= g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu); |
571 | 571 | ||
572 | gp106_dbg_pmu("addr_args %x\n", addr_args); | 572 | gp106_dbg_pmu(g, "addr_args %x\n", addr_args); |
573 | 573 | ||
574 | /* Populate the LOADER_CONFIG state */ | 574 | /* Populate the LOADER_CONFIG state */ |
575 | memset((void *) ldr_cfg, 0, sizeof(struct flcn_bl_dmem_desc_v1)); | 575 | memset((void *) ldr_cfg, 0, sizeof(struct flcn_bl_dmem_desc_v1)); |
@@ -621,8 +621,8 @@ int gp106_flcn_populate_bl_dmem_desc(struct gk20a *g, | |||
621 | g->ops.pmu.get_wpr(g, &wpr_inf); | 621 | g->ops.pmu.get_wpr(g, &wpr_inf); |
622 | addr_base += wpr_inf.wpr_base; | 622 | addr_base += wpr_inf.wpr_base; |
623 | 623 | ||
624 | gp106_dbg_pmu("falcon ID %x", p_lsfm->wpr_header.falcon_id); | 624 | gp106_dbg_pmu(g, "falcon ID %x", p_lsfm->wpr_header.falcon_id); |
625 | gp106_dbg_pmu("gen loader cfg addrbase %llx ", addr_base); | 625 | gp106_dbg_pmu(g, "gen loader cfg addrbase %llx ", addr_base); |
626 | addr_code = addr_base + | 626 | addr_code = addr_base + |
627 | desc->app_start_offset + | 627 | desc->app_start_offset + |
628 | desc->app_resident_code_offset; | 628 | desc->app_resident_code_offset; |
@@ -630,7 +630,7 @@ int gp106_flcn_populate_bl_dmem_desc(struct gk20a *g, | |||
630 | desc->app_start_offset + | 630 | desc->app_start_offset + |
631 | desc->app_resident_data_offset; | 631 | desc->app_resident_data_offset; |
632 | 632 | ||
633 | gp106_dbg_pmu("gen cfg addrcode %llx data %llx load offset %x", | 633 | gp106_dbg_pmu(g, "gen cfg addrcode %llx data %llx load offset %x", |
634 | addr_code, addr_data, desc->bootloader_start_offset); | 634 | addr_code, addr_data, desc->bootloader_start_offset); |
635 | 635 | ||
636 | /* Populate the LOADER_CONFIG state */ | 636 | /* Populate the LOADER_CONFIG state */ |
@@ -653,7 +653,7 @@ int lsfm_fill_flcn_bl_gen_desc(struct gk20a *g, | |||
653 | 653 | ||
654 | struct nvgpu_pmu *pmu = &g->pmu; | 654 | struct nvgpu_pmu *pmu = &g->pmu; |
655 | if (pnode->wpr_header.falcon_id != pmu->falcon_id) { | 655 | if (pnode->wpr_header.falcon_id != pmu->falcon_id) { |
656 | gp106_dbg_pmu("non pmu. write flcn bl gen desc\n"); | 656 | gp106_dbg_pmu(g, "non pmu. write flcn bl gen desc\n"); |
657 | g->ops.pmu.flcn_populate_bl_dmem_desc(g, | 657 | g->ops.pmu.flcn_populate_bl_dmem_desc(g, |
658 | pnode, &pnode->bl_gen_desc_size, | 658 | pnode, &pnode->bl_gen_desc_size, |
659 | pnode->wpr_header.falcon_id); | 659 | pnode->wpr_header.falcon_id); |
@@ -661,7 +661,7 @@ int lsfm_fill_flcn_bl_gen_desc(struct gk20a *g, | |||
661 | } | 661 | } |
662 | 662 | ||
663 | if (pmu->pmu_mode & PMU_LSFM_MANAGED) { | 663 | if (pmu->pmu_mode & PMU_LSFM_MANAGED) { |
664 | gp106_dbg_pmu("pmu write flcn bl gen desc\n"); | 664 | gp106_dbg_pmu(g, "pmu write flcn bl gen desc\n"); |
665 | if (pnode->wpr_header.falcon_id == pmu->falcon_id) | 665 | if (pnode->wpr_header.falcon_id == pmu->falcon_id) |
666 | return g->ops.pmu.pmu_populate_loader_cfg(g, pnode, | 666 | return g->ops.pmu.pmu_populate_loader_cfg(g, pnode, |
667 | &pnode->bl_gen_desc_size); | 667 | &pnode->bl_gen_desc_size); |
@@ -694,46 +694,46 @@ void lsfm_init_wpr_contents(struct gk20a *g, | |||
694 | nvgpu_mem_wr_n(g, ucode, i * sizeof(pnode->wpr_header), | 694 | nvgpu_mem_wr_n(g, ucode, i * sizeof(pnode->wpr_header), |
695 | &pnode->wpr_header, sizeof(pnode->wpr_header)); | 695 | &pnode->wpr_header, sizeof(pnode->wpr_header)); |
696 | 696 | ||
697 | gp106_dbg_pmu("wpr header"); | 697 | gp106_dbg_pmu(g, "wpr header"); |
698 | gp106_dbg_pmu("falconid :%d", | 698 | gp106_dbg_pmu(g, "falconid :%d", |
699 | pnode->wpr_header.falcon_id); | 699 | pnode->wpr_header.falcon_id); |
700 | gp106_dbg_pmu("lsb_offset :%x", | 700 | gp106_dbg_pmu(g, "lsb_offset :%x", |
701 | pnode->wpr_header.lsb_offset); | 701 | pnode->wpr_header.lsb_offset); |
702 | gp106_dbg_pmu("bootstrap_owner :%d", | 702 | gp106_dbg_pmu(g, "bootstrap_owner :%d", |
703 | pnode->wpr_header.bootstrap_owner); | 703 | pnode->wpr_header.bootstrap_owner); |
704 | gp106_dbg_pmu("lazy_bootstrap :%d", | 704 | gp106_dbg_pmu(g, "lazy_bootstrap :%d", |
705 | pnode->wpr_header.lazy_bootstrap); | 705 | pnode->wpr_header.lazy_bootstrap); |
706 | gp106_dbg_pmu("status :%d", | 706 | gp106_dbg_pmu(g, "status :%d", |
707 | pnode->wpr_header.status); | 707 | pnode->wpr_header.status); |
708 | 708 | ||
709 | /*Flush LSB header to memory*/ | 709 | /*Flush LSB header to memory*/ |
710 | nvgpu_mem_wr_n(g, ucode, pnode->wpr_header.lsb_offset, | 710 | nvgpu_mem_wr_n(g, ucode, pnode->wpr_header.lsb_offset, |
711 | &pnode->lsb_header, sizeof(pnode->lsb_header)); | 711 | &pnode->lsb_header, sizeof(pnode->lsb_header)); |
712 | 712 | ||
713 | gp106_dbg_pmu("lsb header"); | 713 | gp106_dbg_pmu(g, "lsb header"); |
714 | gp106_dbg_pmu("ucode_off :%x", | 714 | gp106_dbg_pmu(g, "ucode_off :%x", |
715 | pnode->lsb_header.ucode_off); | 715 | pnode->lsb_header.ucode_off); |
716 | gp106_dbg_pmu("ucode_size :%x", | 716 | gp106_dbg_pmu(g, "ucode_size :%x", |
717 | pnode->lsb_header.ucode_size); | 717 | pnode->lsb_header.ucode_size); |
718 | gp106_dbg_pmu("data_size :%x", | 718 | gp106_dbg_pmu(g, "data_size :%x", |
719 | pnode->lsb_header.data_size); | 719 | pnode->lsb_header.data_size); |
720 | gp106_dbg_pmu("bl_code_size :%x", | 720 | gp106_dbg_pmu(g, "bl_code_size :%x", |
721 | pnode->lsb_header.bl_code_size); | 721 | pnode->lsb_header.bl_code_size); |
722 | gp106_dbg_pmu("bl_imem_off :%x", | 722 | gp106_dbg_pmu(g, "bl_imem_off :%x", |
723 | pnode->lsb_header.bl_imem_off); | 723 | pnode->lsb_header.bl_imem_off); |
724 | gp106_dbg_pmu("bl_data_off :%x", | 724 | gp106_dbg_pmu(g, "bl_data_off :%x", |
725 | pnode->lsb_header.bl_data_off); | 725 | pnode->lsb_header.bl_data_off); |
726 | gp106_dbg_pmu("bl_data_size :%x", | 726 | gp106_dbg_pmu(g, "bl_data_size :%x", |
727 | pnode->lsb_header.bl_data_size); | 727 | pnode->lsb_header.bl_data_size); |
728 | gp106_dbg_pmu("app_code_off :%x", | 728 | gp106_dbg_pmu(g, "app_code_off :%x", |
729 | pnode->lsb_header.app_code_off); | 729 | pnode->lsb_header.app_code_off); |
730 | gp106_dbg_pmu("app_code_size :%x", | 730 | gp106_dbg_pmu(g, "app_code_size :%x", |
731 | pnode->lsb_header.app_code_size); | 731 | pnode->lsb_header.app_code_size); |
732 | gp106_dbg_pmu("app_data_off :%x", | 732 | gp106_dbg_pmu(g, "app_data_off :%x", |
733 | pnode->lsb_header.app_data_off); | 733 | pnode->lsb_header.app_data_off); |
734 | gp106_dbg_pmu("app_data_size :%x", | 734 | gp106_dbg_pmu(g, "app_data_size :%x", |
735 | pnode->lsb_header.app_data_size); | 735 | pnode->lsb_header.app_data_size); |
736 | gp106_dbg_pmu("flags :%x", | 736 | gp106_dbg_pmu(g, "flags :%x", |
737 | pnode->lsb_header.flags); | 737 | pnode->lsb_header.flags); |
738 | 738 | ||
739 | /*If this falcon has a boot loader and related args, | 739 | /*If this falcon has a boot loader and related args, |
@@ -1049,7 +1049,7 @@ int gp106_bootstrap_hs_flcn(struct gk20a *g) | |||
1049 | u32 *acr_ucode_data_t210_load; | 1049 | u32 *acr_ucode_data_t210_load; |
1050 | struct wpr_carveout_info wpr_inf; | 1050 | struct wpr_carveout_info wpr_inf; |
1051 | 1051 | ||
1052 | gp106_dbg_pmu(""); | 1052 | gp106_dbg_pmu(g, " "); |
1053 | 1053 | ||
1054 | if (!acr_fw) { | 1054 | if (!acr_fw) { |
1055 | /*First time init case*/ | 1055 | /*First time init case*/ |