diff options
Diffstat (limited to 'drivers/gpu/nvgpu/gm20b/acr_gm20b.c')
-rw-r--r-- | drivers/gpu/nvgpu/gm20b/acr_gm20b.c | 129 |
1 files changed, 62 insertions, 67 deletions
diff --git a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c index 916e7a66..9725ebe7 100644 --- a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c +++ b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c | |||
@@ -37,16 +37,11 @@ | |||
37 | #include <nvgpu/utils.h> | 37 | #include <nvgpu/utils.h> |
38 | 38 | ||
39 | #include "gk20a/gk20a.h" | 39 | #include "gk20a/gk20a.h" |
40 | #include "gk20a/pmu_gk20a.h" | ||
41 | #include "mm_gm20b.h" | 40 | #include "mm_gm20b.h" |
42 | #include "acr_gm20b.h" | 41 | #include "acr_gm20b.h" |
43 | 42 | ||
44 | #include <nvgpu/hw/gm20b/hw_pwr_gm20b.h> | 43 | #include <nvgpu/hw/gm20b/hw_pwr_gm20b.h> |
45 | 44 | ||
46 | /*Defines*/ | ||
47 | #define gm20b_dbg_pmu(g, fmt, arg...) \ | ||
48 | nvgpu_log(g, gpu_dbg_pmu, fmt, ##arg) | ||
49 | |||
50 | typedef int (*get_ucode_details)(struct gk20a *g, struct flcn_ucode_img *udata); | 45 | typedef int (*get_ucode_details)(struct gk20a *g, struct flcn_ucode_img *udata); |
51 | 46 | ||
52 | /*Externs*/ | 47 | /*Externs*/ |
@@ -80,7 +75,7 @@ static void start_gm20b_pmu(struct gk20a *g) | |||
80 | { | 75 | { |
81 | /*disable irqs for hs falcon booting as we will poll for halt*/ | 76 | /*disable irqs for hs falcon booting as we will poll for halt*/ |
82 | nvgpu_mutex_acquire(&g->pmu.isr_mutex); | 77 | nvgpu_mutex_acquire(&g->pmu.isr_mutex); |
83 | pmu_enable_irq(&g->pmu, true); | 78 | g->ops.pmu.pmu_enable_irq(&g->pmu, true); |
84 | g->pmu.isr_enabled = true; | 79 | g->pmu.isr_enabled = true; |
85 | nvgpu_mutex_release(&g->pmu.isr_mutex); | 80 | nvgpu_mutex_release(&g->pmu.isr_mutex); |
86 | gk20a_writel(g, pwr_falcon_cpuctl_alias_r(), | 81 | gk20a_writel(g, pwr_falcon_cpuctl_alias_r(), |
@@ -103,16 +98,16 @@ static int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img) | |||
103 | struct nvgpu_pmu *pmu = &g->pmu; | 98 | struct nvgpu_pmu *pmu = &g->pmu; |
104 | struct lsf_ucode_desc *lsf_desc; | 99 | struct lsf_ucode_desc *lsf_desc; |
105 | int err; | 100 | int err; |
106 | gm20b_dbg_pmu(g, "requesting PMU ucode in GM20B\n"); | 101 | nvgpu_pmu_dbg(g, "requesting PMU ucode in GM20B\n"); |
107 | pmu_fw = nvgpu_request_firmware(g, GM20B_PMU_UCODE_IMAGE, 0); | 102 | pmu_fw = nvgpu_request_firmware(g, GM20B_PMU_UCODE_IMAGE, 0); |
108 | if (!pmu_fw) { | 103 | if (!pmu_fw) { |
109 | nvgpu_err(g, "failed to load pmu ucode!!"); | 104 | nvgpu_err(g, "failed to load pmu ucode!!"); |
110 | return -ENOENT; | 105 | return -ENOENT; |
111 | } | 106 | } |
112 | g->acr.pmu_fw = pmu_fw; | 107 | g->acr.pmu_fw = pmu_fw; |
113 | gm20b_dbg_pmu(g, "Loaded PMU ucode in for blob preparation"); | 108 | nvgpu_pmu_dbg(g, "Loaded PMU ucode in for blob preparation"); |
114 | 109 | ||
115 | gm20b_dbg_pmu(g, "requesting PMU ucode desc in GM20B\n"); | 110 | nvgpu_pmu_dbg(g, "requesting PMU ucode desc in GM20B\n"); |
116 | pmu_desc = nvgpu_request_firmware(g, GM20B_PMU_UCODE_DESC, 0); | 111 | pmu_desc = nvgpu_request_firmware(g, GM20B_PMU_UCODE_DESC, 0); |
117 | if (!pmu_desc) { | 112 | if (!pmu_desc) { |
118 | nvgpu_err(g, "failed to load pmu ucode desc!!"); | 113 | nvgpu_err(g, "failed to load pmu ucode desc!!"); |
@@ -131,7 +126,7 @@ static int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img) | |||
131 | 126 | ||
132 | err = nvgpu_init_pmu_fw_support(pmu); | 127 | err = nvgpu_init_pmu_fw_support(pmu); |
133 | if (err) { | 128 | if (err) { |
134 | gm20b_dbg_pmu(g, "failed to set function pointers\n"); | 129 | nvgpu_pmu_dbg(g, "failed to set function pointers\n"); |
135 | goto release_sig; | 130 | goto release_sig; |
136 | } | 131 | } |
137 | 132 | ||
@@ -150,7 +145,7 @@ static int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img) | |||
150 | p_img->fw_ver = NULL; | 145 | p_img->fw_ver = NULL; |
151 | p_img->header = NULL; | 146 | p_img->header = NULL; |
152 | p_img->lsf_desc = (struct lsf_ucode_desc *)lsf_desc; | 147 | p_img->lsf_desc = (struct lsf_ucode_desc *)lsf_desc; |
153 | gm20b_dbg_pmu(g, "requesting PMU ucode in GM20B exit\n"); | 148 | nvgpu_pmu_dbg(g, "requesting PMU ucode in GM20B exit\n"); |
154 | nvgpu_release_firmware(g, pmu_sig); | 149 | nvgpu_release_firmware(g, pmu_sig); |
155 | return 0; | 150 | return 0; |
156 | release_sig: | 151 | release_sig: |
@@ -223,7 +218,7 @@ static int fecs_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img) | |||
223 | p_img->fw_ver = NULL; | 218 | p_img->fw_ver = NULL; |
224 | p_img->header = NULL; | 219 | p_img->header = NULL; |
225 | p_img->lsf_desc = (struct lsf_ucode_desc *)lsf_desc; | 220 | p_img->lsf_desc = (struct lsf_ucode_desc *)lsf_desc; |
226 | gm20b_dbg_pmu(g, "fecs fw loaded\n"); | 221 | nvgpu_pmu_dbg(g, "fecs fw loaded\n"); |
227 | nvgpu_release_firmware(g, fecs_sig); | 222 | nvgpu_release_firmware(g, fecs_sig); |
228 | return 0; | 223 | return 0; |
229 | free_lsf_desc: | 224 | free_lsf_desc: |
@@ -295,7 +290,7 @@ static int gpccs_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img) | |||
295 | p_img->fw_ver = NULL; | 290 | p_img->fw_ver = NULL; |
296 | p_img->header = NULL; | 291 | p_img->header = NULL; |
297 | p_img->lsf_desc = (struct lsf_ucode_desc *)lsf_desc; | 292 | p_img->lsf_desc = (struct lsf_ucode_desc *)lsf_desc; |
298 | gm20b_dbg_pmu(g, "gpccs fw loaded\n"); | 293 | nvgpu_pmu_dbg(g, "gpccs fw loaded\n"); |
299 | nvgpu_release_firmware(g, gpccs_sig); | 294 | nvgpu_release_firmware(g, gpccs_sig); |
300 | return 0; | 295 | return 0; |
301 | free_lsf_desc: | 296 | free_lsf_desc: |
@@ -364,24 +359,24 @@ int prepare_ucode_blob(struct gk20a *g) | |||
364 | non WPR blob of ucodes*/ | 359 | non WPR blob of ucodes*/ |
365 | err = nvgpu_init_pmu_fw_support(pmu); | 360 | err = nvgpu_init_pmu_fw_support(pmu); |
366 | if (err) { | 361 | if (err) { |
367 | gm20b_dbg_pmu(g, "failed to set function pointers\n"); | 362 | nvgpu_pmu_dbg(g, "failed to set function pointers\n"); |
368 | return err; | 363 | return err; |
369 | } | 364 | } |
370 | return 0; | 365 | return 0; |
371 | } | 366 | } |
372 | plsfm = &lsfm_l; | 367 | plsfm = &lsfm_l; |
373 | memset((void *)plsfm, 0, sizeof(struct ls_flcn_mgr)); | 368 | memset((void *)plsfm, 0, sizeof(struct ls_flcn_mgr)); |
374 | gm20b_dbg_pmu(g, "fetching GMMU regs\n"); | 369 | nvgpu_pmu_dbg(g, "fetching GMMU regs\n"); |
375 | g->ops.fb.vpr_info_fetch(g); | 370 | g->ops.fb.vpr_info_fetch(g); |
376 | gr_gk20a_init_ctxsw_ucode(g); | 371 | gr_gk20a_init_ctxsw_ucode(g); |
377 | 372 | ||
378 | g->ops.pmu.get_wpr(g, &wpr_inf); | 373 | g->ops.pmu.get_wpr(g, &wpr_inf); |
379 | gm20b_dbg_pmu(g, "wpr carveout base:%llx\n", wpr_inf.wpr_base); | 374 | nvgpu_pmu_dbg(g, "wpr carveout base:%llx\n", wpr_inf.wpr_base); |
380 | gm20b_dbg_pmu(g, "wpr carveout size :%llx\n", wpr_inf.size); | 375 | nvgpu_pmu_dbg(g, "wpr carveout size :%llx\n", wpr_inf.size); |
381 | 376 | ||
382 | /* Discover all managed falcons*/ | 377 | /* Discover all managed falcons*/ |
383 | err = lsfm_discover_ucode_images(g, plsfm); | 378 | err = lsfm_discover_ucode_images(g, plsfm); |
384 | gm20b_dbg_pmu(g, " Managed Falcon cnt %d\n", plsfm->managed_flcn_cnt); | 379 | nvgpu_pmu_dbg(g, " Managed Falcon cnt %d\n", plsfm->managed_flcn_cnt); |
385 | if (err) { | 380 | if (err) { |
386 | goto free_sgt; | 381 | goto free_sgt; |
387 | } | 382 | } |
@@ -400,13 +395,13 @@ int prepare_ucode_blob(struct gk20a *g) | |||
400 | goto free_sgt; | 395 | goto free_sgt; |
401 | } | 396 | } |
402 | 397 | ||
403 | gm20b_dbg_pmu(g, "managed LS falcon %d, WPR size %d bytes.\n", | 398 | nvgpu_pmu_dbg(g, "managed LS falcon %d, WPR size %d bytes.\n", |
404 | plsfm->managed_flcn_cnt, plsfm->wpr_size); | 399 | plsfm->managed_flcn_cnt, plsfm->wpr_size); |
405 | lsfm_init_wpr_contents(g, plsfm, &g->acr.ucode_blob); | 400 | lsfm_init_wpr_contents(g, plsfm, &g->acr.ucode_blob); |
406 | } else { | 401 | } else { |
407 | gm20b_dbg_pmu(g, "LSFM is managing no falcons.\n"); | 402 | nvgpu_pmu_dbg(g, "LSFM is managing no falcons.\n"); |
408 | } | 403 | } |
409 | gm20b_dbg_pmu(g, "prepare ucode blob return 0\n"); | 404 | nvgpu_pmu_dbg(g, "prepare ucode blob return 0\n"); |
410 | free_acr_resources(g, plsfm); | 405 | free_acr_resources(g, plsfm); |
411 | free_sgt: | 406 | free_sgt: |
412 | return err; | 407 | return err; |
@@ -452,13 +447,13 @@ static int lsfm_discover_ucode_images(struct gk20a *g, | |||
452 | 447 | ||
453 | plsfm->managed_flcn_cnt++; | 448 | plsfm->managed_flcn_cnt++; |
454 | } else { | 449 | } else { |
455 | gm20b_dbg_pmu(g, "id not managed %d\n", | 450 | nvgpu_pmu_dbg(g, "id not managed %d\n", |
456 | ucode_img.lsf_desc->falcon_id); | 451 | ucode_img.lsf_desc->falcon_id); |
457 | } | 452 | } |
458 | 453 | ||
459 | /*Free any ucode image resources if not managing this falcon*/ | 454 | /*Free any ucode image resources if not managing this falcon*/ |
460 | if (!(pmu->pmu_mode & PMU_LSFM_MANAGED)) { | 455 | if (!(pmu->pmu_mode & PMU_LSFM_MANAGED)) { |
461 | gm20b_dbg_pmu(g, "pmu is not LSFM managed\n"); | 456 | nvgpu_pmu_dbg(g, "pmu is not LSFM managed\n"); |
462 | lsfm_free_ucode_img_res(g, &ucode_img); | 457 | lsfm_free_ucode_img_res(g, &ucode_img); |
463 | } | 458 | } |
464 | 459 | ||
@@ -490,7 +485,7 @@ static int lsfm_discover_ucode_images(struct gk20a *g, | |||
490 | plsfm->managed_flcn_cnt++; | 485 | plsfm->managed_flcn_cnt++; |
491 | } | 486 | } |
492 | } else { | 487 | } else { |
493 | gm20b_dbg_pmu(g, "not managed %d\n", | 488 | nvgpu_pmu_dbg(g, "not managed %d\n", |
494 | ucode_img.lsf_desc->falcon_id); | 489 | ucode_img.lsf_desc->falcon_id); |
495 | lsfm_free_nonpmu_ucode_img_res(g, | 490 | lsfm_free_nonpmu_ucode_img_res(g, |
496 | &ucode_img); | 491 | &ucode_img); |
@@ -498,7 +493,7 @@ static int lsfm_discover_ucode_images(struct gk20a *g, | |||
498 | } | 493 | } |
499 | } else { | 494 | } else { |
500 | /* Consumed all available falcon objects */ | 495 | /* Consumed all available falcon objects */ |
501 | gm20b_dbg_pmu(g, "Done checking for ucodes %d\n", i); | 496 | nvgpu_pmu_dbg(g, "Done checking for ucodes %d\n", i); |
502 | break; | 497 | break; |
503 | } | 498 | } |
504 | } | 499 | } |
@@ -539,26 +534,26 @@ int gm20b_pmu_populate_loader_cfg(struct gk20a *g, | |||
539 | addr_base = p_lsfm->lsb_header.ucode_off; | 534 | addr_base = p_lsfm->lsb_header.ucode_off; |
540 | g->ops.pmu.get_wpr(g, &wpr_inf); | 535 | g->ops.pmu.get_wpr(g, &wpr_inf); |
541 | addr_base += wpr_inf.wpr_base; | 536 | addr_base += wpr_inf.wpr_base; |
542 | gm20b_dbg_pmu(g, "pmu loader cfg u32 addrbase %x\n", (u32)addr_base); | 537 | nvgpu_pmu_dbg(g, "pmu loader cfg u32 addrbase %x\n", (u32)addr_base); |
543 | /*From linux*/ | 538 | /*From linux*/ |
544 | addr_code = u64_lo32((addr_base + | 539 | addr_code = u64_lo32((addr_base + |
545 | desc->app_start_offset + | 540 | desc->app_start_offset + |
546 | desc->app_resident_code_offset) >> 8); | 541 | desc->app_resident_code_offset) >> 8); |
547 | gm20b_dbg_pmu(g, "app start %d app res code off %d\n", | 542 | nvgpu_pmu_dbg(g, "app start %d app res code off %d\n", |
548 | desc->app_start_offset, desc->app_resident_code_offset); | 543 | desc->app_start_offset, desc->app_resident_code_offset); |
549 | addr_data = u64_lo32((addr_base + | 544 | addr_data = u64_lo32((addr_base + |
550 | desc->app_start_offset + | 545 | desc->app_start_offset + |
551 | desc->app_resident_data_offset) >> 8); | 546 | desc->app_resident_data_offset) >> 8); |
552 | gm20b_dbg_pmu(g, "app res data offset%d\n", | 547 | nvgpu_pmu_dbg(g, "app res data offset%d\n", |
553 | desc->app_resident_data_offset); | 548 | desc->app_resident_data_offset); |
554 | gm20b_dbg_pmu(g, "bl start off %d\n", desc->bootloader_start_offset); | 549 | nvgpu_pmu_dbg(g, "bl start off %d\n", desc->bootloader_start_offset); |
555 | 550 | ||
556 | addr_args = ((pwr_falcon_hwcfg_dmem_size_v( | 551 | addr_args = ((pwr_falcon_hwcfg_dmem_size_v( |
557 | gk20a_readl(g, pwr_falcon_hwcfg_r()))) | 552 | gk20a_readl(g, pwr_falcon_hwcfg_r()))) |
558 | << GK20A_PMU_DMEM_BLKSIZE2); | 553 | << GK20A_PMU_DMEM_BLKSIZE2); |
559 | addr_args -= g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu); | 554 | addr_args -= g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu); |
560 | 555 | ||
561 | gm20b_dbg_pmu(g, "addr_args %x\n", addr_args); | 556 | nvgpu_pmu_dbg(g, "addr_args %x\n", addr_args); |
562 | 557 | ||
563 | /* Populate the loader_config state*/ | 558 | /* Populate the loader_config state*/ |
564 | ldr_cfg->dma_idx = GK20A_PMU_DMAIDX_UCODE; | 559 | ldr_cfg->dma_idx = GK20A_PMU_DMAIDX_UCODE; |
@@ -616,7 +611,7 @@ int gm20b_flcn_populate_bl_dmem_desc(struct gk20a *g, | |||
616 | g->ops.pmu.get_wpr(g, &wpr_inf); | 611 | g->ops.pmu.get_wpr(g, &wpr_inf); |
617 | addr_base += wpr_inf.wpr_base; | 612 | addr_base += wpr_inf.wpr_base; |
618 | 613 | ||
619 | gm20b_dbg_pmu(g, "gen loader cfg %x u32 addrbase %x ID\n", (u32)addr_base, | 614 | nvgpu_pmu_dbg(g, "gen loader cfg %x u32 addrbase %x ID\n", (u32)addr_base, |
620 | p_lsfm->wpr_header.falcon_id); | 615 | p_lsfm->wpr_header.falcon_id); |
621 | addr_code = u64_lo32((addr_base + | 616 | addr_code = u64_lo32((addr_base + |
622 | desc->app_start_offset + | 617 | desc->app_start_offset + |
@@ -625,7 +620,7 @@ int gm20b_flcn_populate_bl_dmem_desc(struct gk20a *g, | |||
625 | desc->app_start_offset + | 620 | desc->app_start_offset + |
626 | desc->app_resident_data_offset) >> 8); | 621 | desc->app_resident_data_offset) >> 8); |
627 | 622 | ||
628 | gm20b_dbg_pmu(g, "gen cfg %x u32 addrcode %x & data %x load offset %xID\n", | 623 | nvgpu_pmu_dbg(g, "gen cfg %x u32 addrcode %x & data %x load offset %xID\n", |
629 | (u32)addr_code, (u32)addr_data, desc->bootloader_start_offset, | 624 | (u32)addr_code, (u32)addr_data, desc->bootloader_start_offset, |
630 | p_lsfm->wpr_header.falcon_id); | 625 | p_lsfm->wpr_header.falcon_id); |
631 | 626 | ||
@@ -648,7 +643,7 @@ static int lsfm_fill_flcn_bl_gen_desc(struct gk20a *g, | |||
648 | 643 | ||
649 | struct nvgpu_pmu *pmu = &g->pmu; | 644 | struct nvgpu_pmu *pmu = &g->pmu; |
650 | if (pnode->wpr_header.falcon_id != pmu->falcon_id) { | 645 | if (pnode->wpr_header.falcon_id != pmu->falcon_id) { |
651 | gm20b_dbg_pmu(g, "non pmu. write flcn bl gen desc\n"); | 646 | nvgpu_pmu_dbg(g, "non pmu. write flcn bl gen desc\n"); |
652 | g->ops.pmu.flcn_populate_bl_dmem_desc(g, | 647 | g->ops.pmu.flcn_populate_bl_dmem_desc(g, |
653 | pnode, &pnode->bl_gen_desc_size, | 648 | pnode, &pnode->bl_gen_desc_size, |
654 | pnode->wpr_header.falcon_id); | 649 | pnode->wpr_header.falcon_id); |
@@ -656,7 +651,7 @@ static int lsfm_fill_flcn_bl_gen_desc(struct gk20a *g, | |||
656 | } | 651 | } |
657 | 652 | ||
658 | if (pmu->pmu_mode & PMU_LSFM_MANAGED) { | 653 | if (pmu->pmu_mode & PMU_LSFM_MANAGED) { |
659 | gm20b_dbg_pmu(g, "pmu write flcn bl gen desc\n"); | 654 | nvgpu_pmu_dbg(g, "pmu write flcn bl gen desc\n"); |
660 | if (pnode->wpr_header.falcon_id == pmu->falcon_id) { | 655 | if (pnode->wpr_header.falcon_id == pmu->falcon_id) { |
661 | return g->ops.pmu.pmu_populate_loader_cfg(g, pnode, | 656 | return g->ops.pmu.pmu_populate_loader_cfg(g, pnode, |
662 | &pnode->bl_gen_desc_size); | 657 | &pnode->bl_gen_desc_size); |
@@ -690,46 +685,46 @@ static void lsfm_init_wpr_contents(struct gk20a *g, struct ls_flcn_mgr *plsfm, | |||
690 | nvgpu_mem_wr_n(g, ucode, i * sizeof(pnode->wpr_header), | 685 | nvgpu_mem_wr_n(g, ucode, i * sizeof(pnode->wpr_header), |
691 | &pnode->wpr_header, sizeof(pnode->wpr_header)); | 686 | &pnode->wpr_header, sizeof(pnode->wpr_header)); |
692 | 687 | ||
693 | gm20b_dbg_pmu(g, "wpr header"); | 688 | nvgpu_pmu_dbg(g, "wpr header"); |
694 | gm20b_dbg_pmu(g, "falconid :%d", | 689 | nvgpu_pmu_dbg(g, "falconid :%d", |
695 | pnode->wpr_header.falcon_id); | 690 | pnode->wpr_header.falcon_id); |
696 | gm20b_dbg_pmu(g, "lsb_offset :%x", | 691 | nvgpu_pmu_dbg(g, "lsb_offset :%x", |
697 | pnode->wpr_header.lsb_offset); | 692 | pnode->wpr_header.lsb_offset); |
698 | gm20b_dbg_pmu(g, "bootstrap_owner :%d", | 693 | nvgpu_pmu_dbg(g, "bootstrap_owner :%d", |
699 | pnode->wpr_header.bootstrap_owner); | 694 | pnode->wpr_header.bootstrap_owner); |
700 | gm20b_dbg_pmu(g, "lazy_bootstrap :%d", | 695 | nvgpu_pmu_dbg(g, "lazy_bootstrap :%d", |
701 | pnode->wpr_header.lazy_bootstrap); | 696 | pnode->wpr_header.lazy_bootstrap); |
702 | gm20b_dbg_pmu(g, "status :%d", | 697 | nvgpu_pmu_dbg(g, "status :%d", |
703 | pnode->wpr_header.status); | 698 | pnode->wpr_header.status); |
704 | 699 | ||
705 | /*Flush LSB header to memory*/ | 700 | /*Flush LSB header to memory*/ |
706 | nvgpu_mem_wr_n(g, ucode, pnode->wpr_header.lsb_offset, | 701 | nvgpu_mem_wr_n(g, ucode, pnode->wpr_header.lsb_offset, |
707 | &pnode->lsb_header, sizeof(pnode->lsb_header)); | 702 | &pnode->lsb_header, sizeof(pnode->lsb_header)); |
708 | 703 | ||
709 | gm20b_dbg_pmu(g, "lsb header"); | 704 | nvgpu_pmu_dbg(g, "lsb header"); |
710 | gm20b_dbg_pmu(g, "ucode_off :%x", | 705 | nvgpu_pmu_dbg(g, "ucode_off :%x", |
711 | pnode->lsb_header.ucode_off); | 706 | pnode->lsb_header.ucode_off); |
712 | gm20b_dbg_pmu(g, "ucode_size :%x", | 707 | nvgpu_pmu_dbg(g, "ucode_size :%x", |
713 | pnode->lsb_header.ucode_size); | 708 | pnode->lsb_header.ucode_size); |
714 | gm20b_dbg_pmu(g, "data_size :%x", | 709 | nvgpu_pmu_dbg(g, "data_size :%x", |
715 | pnode->lsb_header.data_size); | 710 | pnode->lsb_header.data_size); |
716 | gm20b_dbg_pmu(g, "bl_code_size :%x", | 711 | nvgpu_pmu_dbg(g, "bl_code_size :%x", |
717 | pnode->lsb_header.bl_code_size); | 712 | pnode->lsb_header.bl_code_size); |
718 | gm20b_dbg_pmu(g, "bl_imem_off :%x", | 713 | nvgpu_pmu_dbg(g, "bl_imem_off :%x", |
719 | pnode->lsb_header.bl_imem_off); | 714 | pnode->lsb_header.bl_imem_off); |
720 | gm20b_dbg_pmu(g, "bl_data_off :%x", | 715 | nvgpu_pmu_dbg(g, "bl_data_off :%x", |
721 | pnode->lsb_header.bl_data_off); | 716 | pnode->lsb_header.bl_data_off); |
722 | gm20b_dbg_pmu(g, "bl_data_size :%x", | 717 | nvgpu_pmu_dbg(g, "bl_data_size :%x", |
723 | pnode->lsb_header.bl_data_size); | 718 | pnode->lsb_header.bl_data_size); |
724 | gm20b_dbg_pmu(g, "app_code_off :%x", | 719 | nvgpu_pmu_dbg(g, "app_code_off :%x", |
725 | pnode->lsb_header.app_code_off); | 720 | pnode->lsb_header.app_code_off); |
726 | gm20b_dbg_pmu(g, "app_code_size :%x", | 721 | nvgpu_pmu_dbg(g, "app_code_size :%x", |
727 | pnode->lsb_header.app_code_size); | 722 | pnode->lsb_header.app_code_size); |
728 | gm20b_dbg_pmu(g, "app_data_off :%x", | 723 | nvgpu_pmu_dbg(g, "app_data_off :%x", |
729 | pnode->lsb_header.app_data_off); | 724 | pnode->lsb_header.app_data_off); |
730 | gm20b_dbg_pmu(g, "app_data_size :%x", | 725 | nvgpu_pmu_dbg(g, "app_data_size :%x", |
731 | pnode->lsb_header.app_data_size); | 726 | pnode->lsb_header.app_data_size); |
732 | gm20b_dbg_pmu(g, "flags :%x", | 727 | nvgpu_pmu_dbg(g, "flags :%x", |
733 | pnode->lsb_header.flags); | 728 | pnode->lsb_header.flags); |
734 | 729 | ||
735 | /*If this falcon has a boot loader and related args, | 730 | /*If this falcon has a boot loader and related args, |
@@ -1049,7 +1044,7 @@ int gm20b_bootstrap_hs_flcn(struct gk20a *g) | |||
1049 | start = nvgpu_mem_get_addr(g, &acr->ucode_blob); | 1044 | start = nvgpu_mem_get_addr(g, &acr->ucode_blob); |
1050 | size = acr->ucode_blob.size; | 1045 | size = acr->ucode_blob.size; |
1051 | 1046 | ||
1052 | gm20b_dbg_pmu(g, " "); | 1047 | nvgpu_pmu_dbg(g, " "); |
1053 | 1048 | ||
1054 | if (!acr_fw) { | 1049 | if (!acr_fw) { |
1055 | /*First time init case*/ | 1050 | /*First time init case*/ |
@@ -1163,14 +1158,14 @@ int acr_ucode_patch_sig(struct gk20a *g, | |||
1163 | unsigned int *p_patch_ind) | 1158 | unsigned int *p_patch_ind) |
1164 | { | 1159 | { |
1165 | unsigned int i, *p_sig; | 1160 | unsigned int i, *p_sig; |
1166 | gm20b_dbg_pmu(g, " "); | 1161 | nvgpu_pmu_dbg(g, " "); |
1167 | 1162 | ||
1168 | if (!pmu_is_debug_mode_en(g)) { | 1163 | if (!pmu_is_debug_mode_en(g)) { |
1169 | p_sig = p_prod_sig; | 1164 | p_sig = p_prod_sig; |
1170 | gm20b_dbg_pmu(g, "PRODUCTION MODE\n"); | 1165 | nvgpu_pmu_dbg(g, "PRODUCTION MODE\n"); |
1171 | } else { | 1166 | } else { |
1172 | p_sig = p_dbg_sig; | 1167 | p_sig = p_dbg_sig; |
1173 | gm20b_dbg_pmu(g, "DEBUG MODE\n"); | 1168 | nvgpu_pmu_dbg(g, "DEBUG MODE\n"); |
1174 | } | 1169 | } |
1175 | 1170 | ||
1176 | /* Patching logic:*/ | 1171 | /* Patching logic:*/ |
@@ -1303,7 +1298,7 @@ int gm20b_init_pmu_setup_hw1(struct gk20a *g, | |||
1303 | 1298 | ||
1304 | /*disable irqs for hs falcon booting as we will poll for halt*/ | 1299 | /*disable irqs for hs falcon booting as we will poll for halt*/ |
1305 | nvgpu_mutex_acquire(&pmu->isr_mutex); | 1300 | nvgpu_mutex_acquire(&pmu->isr_mutex); |
1306 | pmu_enable_irq(pmu, false); | 1301 | g->ops.pmu.pmu_enable_irq(pmu, false); |
1307 | pmu->isr_enabled = false; | 1302 | pmu->isr_enabled = false; |
1308 | nvgpu_mutex_release(&pmu->isr_mutex); | 1303 | nvgpu_mutex_release(&pmu->isr_mutex); |
1309 | /*Clearing mailbox register used to reflect capabilities*/ | 1304 | /*Clearing mailbox register used to reflect capabilities*/ |
@@ -1335,7 +1330,7 @@ int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt) | |||
1335 | struct nvgpu_firmware *hsbl_fw = acr->hsbl_fw; | 1330 | struct nvgpu_firmware *hsbl_fw = acr->hsbl_fw; |
1336 | struct hsflcn_bl_desc *pmu_bl_gm10x_desc; | 1331 | struct hsflcn_bl_desc *pmu_bl_gm10x_desc; |
1337 | u32 *pmu_bl_gm10x = NULL; | 1332 | u32 *pmu_bl_gm10x = NULL; |
1338 | gm20b_dbg_pmu(g, " "); | 1333 | nvgpu_pmu_dbg(g, " "); |
1339 | 1334 | ||
1340 | if (!hsbl_fw) { | 1335 | if (!hsbl_fw) { |
1341 | hsbl_fw = nvgpu_request_firmware(g, | 1336 | hsbl_fw = nvgpu_request_firmware(g, |
@@ -1354,7 +1349,7 @@ int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt) | |||
1354 | bl_sz = ALIGN(pmu_bl_gm10x_desc->bl_img_hdr.bl_code_size, | 1349 | bl_sz = ALIGN(pmu_bl_gm10x_desc->bl_img_hdr.bl_code_size, |
1355 | 256); | 1350 | 256); |
1356 | acr->hsbl_ucode.size = bl_sz; | 1351 | acr->hsbl_ucode.size = bl_sz; |
1357 | gm20b_dbg_pmu(g, "Executing Generic Bootloader\n"); | 1352 | nvgpu_pmu_dbg(g, "Executing Generic Bootloader\n"); |
1358 | 1353 | ||
1359 | /*TODO in code verify that enable PMU is done, | 1354 | /*TODO in code verify that enable PMU is done, |
1360 | scrubbing etc is done*/ | 1355 | scrubbing etc is done*/ |
@@ -1377,7 +1372,7 @@ int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt) | |||
1377 | } | 1372 | } |
1378 | 1373 | ||
1379 | nvgpu_mem_wr_n(g, &acr->hsbl_ucode, 0, pmu_bl_gm10x, bl_sz); | 1374 | nvgpu_mem_wr_n(g, &acr->hsbl_ucode, 0, pmu_bl_gm10x, bl_sz); |
1380 | gm20b_dbg_pmu(g, "Copied bl ucode to bl_cpuva\n"); | 1375 | nvgpu_pmu_dbg(g, "Copied bl ucode to bl_cpuva\n"); |
1381 | } | 1376 | } |
1382 | /* | 1377 | /* |
1383 | * Disable interrupts to avoid kernel hitting breakpoint due | 1378 | * Disable interrupts to avoid kernel hitting breakpoint due |
@@ -1389,9 +1384,9 @@ int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt) | |||
1389 | goto err_unmap_bl; | 1384 | goto err_unmap_bl; |
1390 | } | 1385 | } |
1391 | 1386 | ||
1392 | gm20b_dbg_pmu(g, "phys sec reg %x\n", gk20a_readl(g, | 1387 | nvgpu_pmu_dbg(g, "phys sec reg %x\n", gk20a_readl(g, |
1393 | pwr_falcon_mmu_phys_sec_r())); | 1388 | pwr_falcon_mmu_phys_sec_r())); |
1394 | gm20b_dbg_pmu(g, "sctl reg %x\n", gk20a_readl(g, pwr_falcon_sctl_r())); | 1389 | nvgpu_pmu_dbg(g, "sctl reg %x\n", gk20a_readl(g, pwr_falcon_sctl_r())); |
1395 | 1390 | ||
1396 | g->ops.pmu.init_falcon_setup_hw(g, desc, acr->hsbl_ucode.size); | 1391 | g->ops.pmu.init_falcon_setup_hw(g, desc, acr->hsbl_ucode.size); |
1397 | 1392 | ||
@@ -1409,10 +1404,10 @@ int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt) | |||
1409 | goto err_unmap_bl; | 1404 | goto err_unmap_bl; |
1410 | } | 1405 | } |
1411 | } | 1406 | } |
1412 | gm20b_dbg_pmu(g, "after waiting for halt, err %x\n", err); | 1407 | nvgpu_pmu_dbg(g, "after waiting for halt, err %x\n", err); |
1413 | gm20b_dbg_pmu(g, "phys sec reg %x\n", gk20a_readl(g, | 1408 | nvgpu_pmu_dbg(g, "phys sec reg %x\n", gk20a_readl(g, |
1414 | pwr_falcon_mmu_phys_sec_r())); | 1409 | pwr_falcon_mmu_phys_sec_r())); |
1415 | gm20b_dbg_pmu(g, "sctl reg %x\n", gk20a_readl(g, pwr_falcon_sctl_r())); | 1410 | nvgpu_pmu_dbg(g, "sctl reg %x\n", gk20a_readl(g, pwr_falcon_sctl_r())); |
1416 | start_gm20b_pmu(g); | 1411 | start_gm20b_pmu(g); |
1417 | return 0; | 1412 | return 0; |
1418 | err_unmap_bl: | 1413 | err_unmap_bl: |
@@ -1443,7 +1438,7 @@ int pmu_wait_for_halt(struct gk20a *g, unsigned int timeout_ms) | |||
1443 | } | 1438 | } |
1444 | 1439 | ||
1445 | g->acr.capabilities = gk20a_readl(g, pwr_falcon_mailbox1_r()); | 1440 | g->acr.capabilities = gk20a_readl(g, pwr_falcon_mailbox1_r()); |
1446 | gm20b_dbg_pmu(g, "ACR capabilities %x\n", g->acr.capabilities); | 1441 | nvgpu_pmu_dbg(g, "ACR capabilities %x\n", g->acr.capabilities); |
1447 | data = gk20a_readl(g, pwr_falcon_mailbox0_r()); | 1442 | data = gk20a_readl(g, pwr_falcon_mailbox0_r()); |
1448 | if (data) { | 1443 | if (data) { |
1449 | nvgpu_err(g, "ACR boot failed, err %x", data); | 1444 | nvgpu_err(g, "ACR boot failed, err %x", data); |