From dd739fcb039d51606e9a5454ec0aab17bcb01965 Mon Sep 17 00:00:00 2001 From: Terje Bergstrom Date: Wed, 18 Apr 2018 19:39:46 -0700 Subject: gpu: nvgpu: Remove gk20a_dbg* functions Switch all logging to nvgpu_log*(). gk20a_dbg* macros are intentionally left there because of use from other repositories. Because the new functions do not work without a pointer to struct gk20a, and piping it just for logging is excessive, some log messages are deleted. Change-Id: I00e22e75fe4596a330bb0282ab4774b3639ee31e Signed-off-by: Terje Bergstrom Reviewed-on: https://git-master.nvidia.com/r/1704148 Reviewed-by: mobile promotions Tested-by: mobile promotions --- drivers/gpu/nvgpu/gm20b/acr_gm20b.c | 132 +++++++++++++++++----------------- drivers/gpu/nvgpu/gm20b/bus_gm20b.c | 4 +- drivers/gpu/nvgpu/gm20b/clk_gm20b.c | 41 +++++------ drivers/gpu/nvgpu/gm20b/fb_gm20b.c | 2 +- drivers/gpu/nvgpu/gm20b/fifo_gm20b.c | 2 +- drivers/gpu/nvgpu/gm20b/gr_gm20b.c | 48 ++++++------- drivers/gpu/nvgpu/gm20b/ltc_gm20b.c | 16 ++--- drivers/gpu/nvgpu/gm20b/mm_gm20b.c | 8 +-- drivers/gpu/nvgpu/gm20b/pmu_gm20b.c | 40 +++++------ drivers/gpu/nvgpu/gm20b/therm_gm20b.c | 4 +- 10 files changed, 149 insertions(+), 148 deletions(-) (limited to 'drivers/gpu/nvgpu/gm20b') diff --git a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c index cffe7199..615b6b46 100644 --- a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c +++ b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c @@ -42,8 +42,8 @@ #include /*Defines*/ -#define gm20b_dbg_pmu(fmt, arg...) \ - gk20a_dbg(gpu_dbg_pmu, fmt, ##arg) +#define gm20b_dbg_pmu(g, fmt, arg...) \ + nvgpu_log(g, gpu_dbg_pmu, fmt, ##arg) typedef int (*get_ucode_details)(struct gk20a *g, struct flcn_ucode_img *udata); @@ -101,16 +101,16 @@ static int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img) struct nvgpu_pmu *pmu = &g->pmu; struct lsf_ucode_desc *lsf_desc; int err; - gm20b_dbg_pmu("requesting PMU ucode in GM20B\n"); + gm20b_dbg_pmu(g, "requesting PMU ucode in GM20B\n"); pmu_fw = nvgpu_request_firmware(g, GM20B_PMU_UCODE_IMAGE, 0); if (!pmu_fw) { nvgpu_err(g, "failed to load pmu ucode!!"); return -ENOENT; } g->acr.pmu_fw = pmu_fw; - gm20b_dbg_pmu("Loaded PMU ucode in for blob preparation"); + gm20b_dbg_pmu(g, "Loaded PMU ucode in for blob preparation"); - gm20b_dbg_pmu("requesting PMU ucode desc in GM20B\n"); + gm20b_dbg_pmu(g, "requesting PMU ucode desc in GM20B\n"); pmu_desc = nvgpu_request_firmware(g, GM20B_PMU_UCODE_DESC, 0); if (!pmu_desc) { nvgpu_err(g, "failed to load pmu ucode desc!!"); @@ -129,7 +129,7 @@ static int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img) err = nvgpu_init_pmu_fw_support(pmu); if (err) { - gm20b_dbg_pmu("failed to set function pointers\n"); + gm20b_dbg_pmu(g, "failed to set function pointers\n"); goto release_sig; } @@ -148,7 +148,7 @@ static int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img) p_img->fw_ver = NULL; p_img->header = NULL; p_img->lsf_desc = (struct lsf_ucode_desc *)lsf_desc; - gm20b_dbg_pmu("requesting PMU ucode in GM20B exit\n"); + gm20b_dbg_pmu(g, "requesting PMU ucode in GM20B exit\n"); nvgpu_release_firmware(g, pmu_sig); return 0; release_sig: @@ -221,7 +221,7 @@ static int fecs_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img) p_img->fw_ver = NULL; p_img->header = NULL; p_img->lsf_desc = (struct lsf_ucode_desc *)lsf_desc; - gm20b_dbg_pmu("fecs fw loaded\n"); + gm20b_dbg_pmu(g, "fecs fw loaded\n"); nvgpu_release_firmware(g, fecs_sig); return 0; free_lsf_desc: @@ -292,7 +292,7 @@ static int gpccs_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img) p_img->fw_ver = NULL; p_img->header = NULL; p_img->lsf_desc = (struct lsf_ucode_desc *)lsf_desc; - gm20b_dbg_pmu("gpccs fw loaded\n"); + gm20b_dbg_pmu(g, "gpccs fw loaded\n"); nvgpu_release_firmware(g, gpccs_sig); return 0; free_lsf_desc: @@ -361,24 +361,24 @@ int prepare_ucode_blob(struct gk20a *g) non WPR blob of ucodes*/ err = nvgpu_init_pmu_fw_support(pmu); if (err) { - gm20b_dbg_pmu("failed to set function pointers\n"); + gm20b_dbg_pmu(g, "failed to set function pointers\n"); return err; } return 0; } plsfm = &lsfm_l; memset((void *)plsfm, 0, sizeof(struct ls_flcn_mgr)); - gm20b_dbg_pmu("fetching GMMU regs\n"); + gm20b_dbg_pmu(g, "fetching GMMU regs\n"); g->ops.fb.vpr_info_fetch(g); gr_gk20a_init_ctxsw_ucode(g); g->ops.pmu.get_wpr(g, &wpr_inf); - gm20b_dbg_pmu("wpr carveout base:%llx\n", wpr_inf.wpr_base); - gm20b_dbg_pmu("wpr carveout size :%llx\n", wpr_inf.size); + gm20b_dbg_pmu(g, "wpr carveout base:%llx\n", wpr_inf.wpr_base); + gm20b_dbg_pmu(g, "wpr carveout size :%llx\n", wpr_inf.size); /* Discover all managed falcons*/ err = lsfm_discover_ucode_images(g, plsfm); - gm20b_dbg_pmu(" Managed Falcon cnt %d\n", plsfm->managed_flcn_cnt); + gm20b_dbg_pmu(g, " Managed Falcon cnt %d\n", plsfm->managed_flcn_cnt); if (err) goto free_sgt; @@ -394,13 +394,13 @@ int prepare_ucode_blob(struct gk20a *g) if (err) goto free_sgt; - gm20b_dbg_pmu("managed LS falcon %d, WPR size %d bytes.\n", + gm20b_dbg_pmu(g, "managed LS falcon %d, WPR size %d bytes.\n", plsfm->managed_flcn_cnt, plsfm->wpr_size); lsfm_init_wpr_contents(g, plsfm, &g->acr.ucode_blob); } else { - gm20b_dbg_pmu("LSFM is managing no falcons.\n"); + gm20b_dbg_pmu(g, "LSFM is managing no falcons.\n"); } - gm20b_dbg_pmu("prepare ucode blob return 0\n"); + gm20b_dbg_pmu(g, "prepare ucode blob return 0\n"); free_acr_resources(g, plsfm); free_sgt: return err; @@ -444,13 +444,13 @@ static int lsfm_discover_ucode_images(struct gk20a *g, plsfm->managed_flcn_cnt++; } else { - gm20b_dbg_pmu("id not managed %d\n", + gm20b_dbg_pmu(g, "id not managed %d\n", ucode_img.lsf_desc->falcon_id); } /*Free any ucode image resources if not managing this falcon*/ if (!(pmu->pmu_mode & PMU_LSFM_MANAGED)) { - gm20b_dbg_pmu("pmu is not LSFM managed\n"); + gm20b_dbg_pmu(g, "pmu is not LSFM managed\n"); lsfm_free_ucode_img_res(g, &ucode_img); } @@ -481,7 +481,7 @@ static int lsfm_discover_ucode_images(struct gk20a *g, == 0) plsfm->managed_flcn_cnt++; } else { - gm20b_dbg_pmu("not managed %d\n", + gm20b_dbg_pmu(g, "not managed %d\n", ucode_img.lsf_desc->falcon_id); lsfm_free_nonpmu_ucode_img_res(g, &ucode_img); @@ -489,7 +489,7 @@ static int lsfm_discover_ucode_images(struct gk20a *g, } } else { /* Consumed all available falcon objects */ - gm20b_dbg_pmu("Done checking for ucodes %d\n", i); + gm20b_dbg_pmu(g, "Done checking for ucodes %d\n", i); break; } } @@ -526,26 +526,26 @@ int gm20b_pmu_populate_loader_cfg(struct gk20a *g, addr_base = p_lsfm->lsb_header.ucode_off; g->ops.pmu.get_wpr(g, &wpr_inf); addr_base += wpr_inf.wpr_base; - gm20b_dbg_pmu("pmu loader cfg u32 addrbase %x\n", (u32)addr_base); + gm20b_dbg_pmu(g, "pmu loader cfg u32 addrbase %x\n", (u32)addr_base); /*From linux*/ addr_code = u64_lo32((addr_base + desc->app_start_offset + desc->app_resident_code_offset) >> 8); - gm20b_dbg_pmu("app start %d app res code off %d\n", + gm20b_dbg_pmu(g, "app start %d app res code off %d\n", desc->app_start_offset, desc->app_resident_code_offset); addr_data = u64_lo32((addr_base + desc->app_start_offset + desc->app_resident_data_offset) >> 8); - gm20b_dbg_pmu("app res data offset%d\n", + gm20b_dbg_pmu(g, "app res data offset%d\n", desc->app_resident_data_offset); - gm20b_dbg_pmu("bl start off %d\n", desc->bootloader_start_offset); + gm20b_dbg_pmu(g, "bl start off %d\n", desc->bootloader_start_offset); addr_args = ((pwr_falcon_hwcfg_dmem_size_v( gk20a_readl(g, pwr_falcon_hwcfg_r()))) << GK20A_PMU_DMEM_BLKSIZE2); addr_args -= g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu); - gm20b_dbg_pmu("addr_args %x\n", addr_args); + gm20b_dbg_pmu(g, "addr_args %x\n", addr_args); /* Populate the loader_config state*/ ldr_cfg->dma_idx = GK20A_PMU_DMAIDX_UCODE; @@ -599,7 +599,7 @@ int gm20b_flcn_populate_bl_dmem_desc(struct gk20a *g, g->ops.pmu.get_wpr(g, &wpr_inf); addr_base += wpr_inf.wpr_base; - gm20b_dbg_pmu("gen loader cfg %x u32 addrbase %x ID\n", (u32)addr_base, + gm20b_dbg_pmu(g, "gen loader cfg %x u32 addrbase %x ID\n", (u32)addr_base, p_lsfm->wpr_header.falcon_id); addr_code = u64_lo32((addr_base + desc->app_start_offset + @@ -608,7 +608,7 @@ int gm20b_flcn_populate_bl_dmem_desc(struct gk20a *g, desc->app_start_offset + desc->app_resident_data_offset) >> 8); - gm20b_dbg_pmu("gen cfg %x u32 addrcode %x & data %x load offset %xID\n", + gm20b_dbg_pmu(g, "gen cfg %x u32 addrcode %x & data %x load offset %xID\n", (u32)addr_code, (u32)addr_data, desc->bootloader_start_offset, p_lsfm->wpr_header.falcon_id); @@ -631,7 +631,7 @@ static int lsfm_fill_flcn_bl_gen_desc(struct gk20a *g, struct nvgpu_pmu *pmu = &g->pmu; if (pnode->wpr_header.falcon_id != pmu->falcon_id) { - gm20b_dbg_pmu("non pmu. write flcn bl gen desc\n"); + gm20b_dbg_pmu(g, "non pmu. write flcn bl gen desc\n"); g->ops.pmu.flcn_populate_bl_dmem_desc(g, pnode, &pnode->bl_gen_desc_size, pnode->wpr_header.falcon_id); @@ -639,7 +639,7 @@ static int lsfm_fill_flcn_bl_gen_desc(struct gk20a *g, } if (pmu->pmu_mode & PMU_LSFM_MANAGED) { - gm20b_dbg_pmu("pmu write flcn bl gen desc\n"); + gm20b_dbg_pmu(g, "pmu write flcn bl gen desc\n"); if (pnode->wpr_header.falcon_id == pmu->falcon_id) return g->ops.pmu.pmu_populate_loader_cfg(g, pnode, &pnode->bl_gen_desc_size); @@ -672,46 +672,46 @@ static void lsfm_init_wpr_contents(struct gk20a *g, struct ls_flcn_mgr *plsfm, nvgpu_mem_wr_n(g, ucode, i * sizeof(pnode->wpr_header), &pnode->wpr_header, sizeof(pnode->wpr_header)); - gm20b_dbg_pmu("wpr header"); - gm20b_dbg_pmu("falconid :%d", + gm20b_dbg_pmu(g, "wpr header"); + gm20b_dbg_pmu(g, "falconid :%d", pnode->wpr_header.falcon_id); - gm20b_dbg_pmu("lsb_offset :%x", + gm20b_dbg_pmu(g, "lsb_offset :%x", pnode->wpr_header.lsb_offset); - gm20b_dbg_pmu("bootstrap_owner :%d", + gm20b_dbg_pmu(g, "bootstrap_owner :%d", pnode->wpr_header.bootstrap_owner); - gm20b_dbg_pmu("lazy_bootstrap :%d", + gm20b_dbg_pmu(g, "lazy_bootstrap :%d", pnode->wpr_header.lazy_bootstrap); - gm20b_dbg_pmu("status :%d", + gm20b_dbg_pmu(g, "status :%d", pnode->wpr_header.status); /*Flush LSB header to memory*/ nvgpu_mem_wr_n(g, ucode, pnode->wpr_header.lsb_offset, &pnode->lsb_header, sizeof(pnode->lsb_header)); - gm20b_dbg_pmu("lsb header"); - gm20b_dbg_pmu("ucode_off :%x", + gm20b_dbg_pmu(g, "lsb header"); + gm20b_dbg_pmu(g, "ucode_off :%x", pnode->lsb_header.ucode_off); - gm20b_dbg_pmu("ucode_size :%x", + gm20b_dbg_pmu(g, "ucode_size :%x", pnode->lsb_header.ucode_size); - gm20b_dbg_pmu("data_size :%x", + gm20b_dbg_pmu(g, "data_size :%x", pnode->lsb_header.data_size); - gm20b_dbg_pmu("bl_code_size :%x", + gm20b_dbg_pmu(g, "bl_code_size :%x", pnode->lsb_header.bl_code_size); - gm20b_dbg_pmu("bl_imem_off :%x", + gm20b_dbg_pmu(g, "bl_imem_off :%x", pnode->lsb_header.bl_imem_off); - gm20b_dbg_pmu("bl_data_off :%x", + gm20b_dbg_pmu(g, "bl_data_off :%x", pnode->lsb_header.bl_data_off); - gm20b_dbg_pmu("bl_data_size :%x", + gm20b_dbg_pmu(g, "bl_data_size :%x", pnode->lsb_header.bl_data_size); - gm20b_dbg_pmu("app_code_off :%x", + gm20b_dbg_pmu(g, "app_code_off :%x", pnode->lsb_header.app_code_off); - gm20b_dbg_pmu("app_code_size :%x", + gm20b_dbg_pmu(g, "app_code_size :%x", pnode->lsb_header.app_code_size); - gm20b_dbg_pmu("app_data_off :%x", + gm20b_dbg_pmu(g, "app_data_off :%x", pnode->lsb_header.app_data_off); - gm20b_dbg_pmu("app_data_size :%x", + gm20b_dbg_pmu(g, "app_data_size :%x", pnode->lsb_header.app_data_size); - gm20b_dbg_pmu("flags :%x", + gm20b_dbg_pmu(g, "flags :%x", pnode->lsb_header.flags); /*If this falcon has a boot loader and related args, @@ -1028,7 +1028,7 @@ int gm20b_bootstrap_hs_flcn(struct gk20a *g) start = nvgpu_mem_get_addr(g, &acr->ucode_blob); size = acr->ucode_blob.size; - gm20b_dbg_pmu(""); + gm20b_dbg_pmu(g, " "); if (!acr_fw) { /*First time init case*/ @@ -1141,14 +1141,14 @@ int acr_ucode_patch_sig(struct gk20a *g, unsigned int *p_patch_ind) { unsigned int i, *p_sig; - gm20b_dbg_pmu(""); + gm20b_dbg_pmu(g, " "); if (!pmu_is_debug_mode_en(g)) { p_sig = p_prod_sig; - gm20b_dbg_pmu("PRODUCTION MODE\n"); + gm20b_dbg_pmu(g, "PRODUCTION MODE\n"); } else { p_sig = p_dbg_sig; - gm20b_dbg_pmu("DEBUG MODE\n"); + gm20b_dbg_pmu(g, "DEBUG MODE\n"); } /* Patching logic:*/ @@ -1171,7 +1171,7 @@ static int bl_bootstrap(struct nvgpu_pmu *pmu, struct hsflcn_bl_desc *pmu_bl_gm10x_desc = g->acr.pmu_hsbl_desc; u32 dst; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); gk20a_writel(g, pwr_falcon_itfen_r(), gk20a_readl(g, pwr_falcon_itfen_r()) | pwr_falcon_itfen_ctxen_enable_f()); @@ -1193,7 +1193,7 @@ static int bl_bootstrap(struct nvgpu_pmu *pmu, (u8 *)(acr->hsbl_ucode.cpu_va), bl_sz, 0, 0, pmu_bl_gm10x_desc->bl_start_tag); - gm20b_dbg_pmu("Before starting falcon with BL\n"); + gm20b_dbg_pmu(g, "Before starting falcon with BL\n"); virt_addr = pmu_bl_gm10x_desc->bl_start_tag << 8; @@ -1207,7 +1207,7 @@ int gm20b_init_nspmu_setup_hw1(struct gk20a *g) struct nvgpu_pmu *pmu = &g->pmu; int err = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); nvgpu_mutex_acquire(&pmu->isr_mutex); nvgpu_flcn_reset(pmu->flcn); @@ -1279,7 +1279,7 @@ int gm20b_init_pmu_setup_hw1(struct gk20a *g, struct nvgpu_pmu *pmu = &g->pmu; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); nvgpu_mutex_acquire(&pmu->isr_mutex); nvgpu_flcn_reset(pmu->flcn); @@ -1324,7 +1324,7 @@ int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt) struct nvgpu_firmware *hsbl_fw = acr->hsbl_fw; struct hsflcn_bl_desc *pmu_bl_gm10x_desc; u32 *pmu_bl_gm10x = NULL; - gm20b_dbg_pmu(""); + gm20b_dbg_pmu(g, " "); if (!hsbl_fw) { hsbl_fw = nvgpu_request_firmware(g, @@ -1343,7 +1343,7 @@ int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt) bl_sz = ALIGN(pmu_bl_gm10x_desc->bl_img_hdr.bl_code_size, 256); acr->hsbl_ucode.size = bl_sz; - gm20b_dbg_pmu("Executing Generic Bootloader\n"); + gm20b_dbg_pmu(g, "Executing Generic Bootloader\n"); /*TODO in code verify that enable PMU is done, scrubbing etc is done*/ @@ -1366,7 +1366,7 @@ int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt) } nvgpu_mem_wr_n(g, &acr->hsbl_ucode, 0, pmu_bl_gm10x, bl_sz); - gm20b_dbg_pmu("Copied bl ucode to bl_cpuva\n"); + gm20b_dbg_pmu(g, "Copied bl ucode to bl_cpuva\n"); } /* * Disable interrupts to avoid kernel hitting breakpoint due @@ -1377,9 +1377,9 @@ int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt) gk20a_get_gr_idle_timeout(g))) goto err_unmap_bl; - gm20b_dbg_pmu("phys sec reg %x\n", gk20a_readl(g, + gm20b_dbg_pmu(g, "phys sec reg %x\n", gk20a_readl(g, pwr_falcon_mmu_phys_sec_r())); - gm20b_dbg_pmu("sctl reg %x\n", gk20a_readl(g, pwr_falcon_sctl_r())); + gm20b_dbg_pmu(g, "sctl reg %x\n", gk20a_readl(g, pwr_falcon_sctl_r())); g->ops.pmu.init_falcon_setup_hw(g, desc, acr->hsbl_ucode.size); @@ -1396,10 +1396,10 @@ int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt) else goto err_unmap_bl; } - gm20b_dbg_pmu("after waiting for halt, err %x\n", err); - gm20b_dbg_pmu("phys sec reg %x\n", gk20a_readl(g, + gm20b_dbg_pmu(g, "after waiting for halt, err %x\n", err); + gm20b_dbg_pmu(g, "phys sec reg %x\n", gk20a_readl(g, pwr_falcon_mmu_phys_sec_r())); - gm20b_dbg_pmu("sctl reg %x\n", gk20a_readl(g, pwr_falcon_sctl_r())); + gm20b_dbg_pmu(g, "sctl reg %x\n", gk20a_readl(g, pwr_falcon_sctl_r())); start_gm20b_pmu(g); return 0; err_unmap_bl: @@ -1430,7 +1430,7 @@ int pmu_wait_for_halt(struct gk20a *g, unsigned int timeout_ms) } g->acr.capabilities = gk20a_readl(g, pwr_falcon_mailbox1_r()); - gm20b_dbg_pmu("ACR capabilities %x\n", g->acr.capabilities); + gm20b_dbg_pmu(g, "ACR capabilities %x\n", g->acr.capabilities); data = gk20a_readl(g, pwr_falcon_mailbox0_r()); if (data) { nvgpu_err(g, "ACR boot failed, err %x", data); diff --git a/drivers/gpu/nvgpu/gm20b/bus_gm20b.c b/drivers/gpu/nvgpu/gm20b/bus_gm20b.c index cdd70d5b..ca2a40bf 100644 --- a/drivers/gpu/nvgpu/gm20b/bus_gm20b.c +++ b/drivers/gpu/nvgpu/gm20b/bus_gm20b.c @@ -1,7 +1,7 @@ /* * GM20B MMU * - * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -40,7 +40,7 @@ int gm20b_bus_bar1_bind(struct gk20a *g, struct nvgpu_mem *bar1_inst) u64 iova = nvgpu_inst_block_addr(g, bar1_inst); u32 ptr_v = (u32)(iova >> bus_bar1_block_ptr_shift_v()); - gk20a_dbg_info("bar1 inst block ptr: 0x%08x", ptr_v); + nvgpu_log_info(g, "bar1 inst block ptr: 0x%08x", ptr_v); gk20a_writel(g, bus_bar1_block_r(), nvgpu_aperture_mask(g, bar1_inst, diff --git a/drivers/gpu/nvgpu/gm20b/clk_gm20b.c b/drivers/gpu/nvgpu/gm20b/clk_gm20b.c index fa751ecc..fb89752a 100644 --- a/drivers/gpu/nvgpu/gm20b/clk_gm20b.c +++ b/drivers/gpu/nvgpu/gm20b/clk_gm20b.c @@ -1,7 +1,7 @@ /* * GM20B Clocks * - * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -36,8 +36,8 @@ #include #include -#define gk20a_dbg_clk(fmt, arg...) \ - gk20a_dbg(gpu_dbg_clk, fmt, ##arg) +#define gk20a_dbg_clk(g, fmt, arg...) \ + nvgpu_log(g, gpu_dbg_clk, fmt, ##arg) #define DFS_DET_RANGE 6 /* -2^6 ... 2^6-1 */ #define SDM_DIN_RANGE 12 /* -2^12 ... 2^12-1 */ @@ -138,6 +138,7 @@ static u32 get_interim_pldiv(struct gk20a *g, u32 old_pl, u32 new_pl) static int clk_config_pll(struct clk_gk20a *clk, struct pll *pll, struct pll_parms *pll_params, u32 *target_freq, bool best_fit) { + struct gk20a *g = clk->g; u32 min_vco_f, max_vco_f; u32 best_M, best_N; u32 low_PL, high_PL, best_PL; @@ -149,7 +150,7 @@ static int clk_config_pll(struct clk_gk20a *clk, struct pll *pll, BUG_ON(target_freq == NULL); - gk20a_dbg_fn("request target freq %d MHz", *target_freq); + nvgpu_log_fn(g, "request target freq %d MHz", *target_freq); ref_clk_f = pll->clk_in; target_clk_f = *target_freq; @@ -172,7 +173,7 @@ static int clk_config_pll(struct clk_gk20a *clk, struct pll *pll, low_PL = min(low_PL, pll_params->max_PL); low_PL = max(low_PL, pll_params->min_PL); - gk20a_dbg_info("low_PL %d(div%d), high_PL %d(div%d)", + nvgpu_log_info(g, "low_PL %d(div%d), high_PL %d(div%d)", low_PL, nvgpu_pl_to_div(low_PL), high_PL, nvgpu_pl_to_div(high_PL)); for (pl = low_PL; pl <= high_PL; pl++) { @@ -217,7 +218,7 @@ static int clk_config_pll(struct clk_gk20a *clk, struct pll *pll, goto found_match; } - gk20a_dbg_info("delta %d @ M %d, N %d, PL %d", + nvgpu_log_info(g, "delta %d @ M %d, N %d, PL %d", delta, m, n, pl); } } @@ -229,7 +230,7 @@ found_match: BUG_ON(best_delta == ~0U); if (best_fit && best_delta != 0) - gk20a_dbg_clk("no best match for target @ %dMHz on gpc_pll", + gk20a_dbg_clk(g, "no best match for target @ %dMHz on gpc_pll", target_clk_f); pll->M = best_M; @@ -241,10 +242,10 @@ found_match: *target_freq = pll->freq; - gk20a_dbg_clk("actual target freq %d kHz, M %d, N %d, PL %d(div%d)", + gk20a_dbg_clk(g, "actual target freq %d kHz, M %d, N %d, PL %d(div%d)", *target_freq, pll->M, pll->N, pll->PL, nvgpu_pl_to_div(pll->PL)); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } @@ -810,7 +811,7 @@ static int clk_lock_gpc_pll_under_bypass(struct gk20a *g, struct pll *gpll) if (gpll->mode == GPC_PLL_MODE_DVFS) { gk20a_readl(g, trim_sys_gpcpll_cfg_r()); nvgpu_udelay(gpc_pll_params.na_lock_delay); - gk20a_dbg_clk("NA config_pll under bypass: %u (%u) kHz %d mV", + gk20a_dbg_clk(g, "NA config_pll under bypass: %u (%u) kHz %d mV", gpll->freq, gpll->freq / 2, (trim_sys_gpcpll_cfg3_dfs_testout_v( gk20a_readl(g, trim_sys_gpcpll_cfg3_r())) @@ -843,7 +844,7 @@ static int clk_lock_gpc_pll_under_bypass(struct gk20a *g, struct pll *gpll) return -EBUSY; pll_locked: - gk20a_dbg_clk("locked config_pll under bypass r=0x%x v=0x%x", + gk20a_dbg_clk(g, "locked config_pll under bypass r=0x%x v=0x%x", trim_sys_gpcpll_cfg_r(), cfg); /* set SYNC_MODE for glitchless switch out of bypass */ @@ -878,7 +879,7 @@ static int clk_program_gpc_pll(struct gk20a *g, struct pll *gpll_new, bool can_slide, pldiv_only; struct pll gpll; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (!nvgpu_platform_is_silicon(g)) return 0; @@ -1028,7 +1029,7 @@ static void clk_config_pll_safe_dvfs(struct gk20a *g, struct pll *gpll) gpll->N = nsafe; clk_config_dvfs_ndiv(gpll->dvfs.mv, gpll->N, &gpll->dvfs); - gk20a_dbg_clk("safe freq %d kHz, M %d, N %d, PL %d(div%d), mV(cal) %d(%d), DC %d", + gk20a_dbg_clk(g, "safe freq %d kHz, M %d, N %d, PL %d(div%d), mV(cal) %d(%d), DC %d", gpll->freq, gpll->M, gpll->N, gpll->PL, nvgpu_pl_to_div(gpll->PL), gpll->dvfs.mv, gpll->dvfs.uv_cal / 1000, gpll->dvfs.dfs_coeff); } @@ -1103,7 +1104,7 @@ static int clk_program_na_gpc_pll(struct gk20a *g, struct pll *gpll_new, clk_set_dfs_ext_cal(g, gpll_new->dvfs.dfs_ext_cal); clk_set_dfs_coeff(g, gpll_new->dvfs.dfs_coeff); - gk20a_dbg_clk("config_pll %d kHz, M %d, N %d, PL %d(div%d), mV(cal) %d(%d), DC %d", + gk20a_dbg_clk(g, "config_pll %d kHz, M %d, N %d, PL %d(div%d), mV(cal) %d(%d), DC %d", gpll_new->freq, gpll_new->M, gpll_new->N, gpll_new->PL, nvgpu_pl_to_div(gpll_new->PL), max(gpll_new->dvfs.mv, gpll_old->dvfs.mv), @@ -1168,14 +1169,14 @@ int gm20b_init_clk_setup_sw(struct gk20a *g) unsigned long safe_rate; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); err = nvgpu_mutex_init(&clk->clk_mutex); if (err) return err; if (clk->sw_ready) { - gk20a_dbg_fn("skip init"); + nvgpu_log_fn(g, "skip init"); return 0; } @@ -1229,7 +1230,7 @@ int gm20b_init_clk_setup_sw(struct gk20a *g) clk->sw_ready = true; - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); nvgpu_info(g, "GPCPLL initial settings:%s M=%u, N=%u, P=%u (id = %u)", clk->gpc_pll.mode == GPC_PLL_MODE_DVFS ? " NA mode," : "", @@ -1321,7 +1322,7 @@ static int gm20b_init_clk_setup_hw(struct gk20a *g) { u32 data; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); /* LDIV: Div4 mode (required); both bypass and vco ratios 1:1 */ data = gk20a_readl(g, trim_sys_gpc2clk_out_r()); @@ -1394,7 +1395,7 @@ static int set_pll_freq(struct gk20a *g, int allow_slide) struct clk_gk20a *clk = &g->clk; int err = 0; - gk20a_dbg_fn("last freq: %dMHz, target freq %dMHz", + nvgpu_log_fn(g, "last freq: %dMHz, target freq %dMHz", clk->gpc_pll_last.freq, clk->gpc_pll.freq); /* If programming with dynamic sliding failed, re-try under bypass */ @@ -1427,7 +1428,7 @@ int gm20b_init_clk_support(struct gk20a *g) struct clk_gk20a *clk = &g->clk; u32 err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); nvgpu_mutex_acquire(&clk->clk_mutex); clk->clk_hw_on = true; diff --git a/drivers/gpu/nvgpu/gm20b/fb_gm20b.c b/drivers/gpu/nvgpu/gm20b/fb_gm20b.c index 5bc6d452..b2a815fb 100644 --- a/drivers/gpu/nvgpu/gm20b/fb_gm20b.c +++ b/drivers/gpu/nvgpu/gm20b/fb_gm20b.c @@ -38,7 +38,7 @@ void fb_gm20b_init_fs_state(struct gk20a *g) { - gk20a_dbg_info("initialize gm20b fb"); + nvgpu_log_info(g, "initialize gm20b fb"); gk20a_writel(g, fb_fbhub_num_active_ltcs_r(), g->ltc_count); diff --git a/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c b/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c index 35a7a9e1..b73abeda 100644 --- a/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c +++ b/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c @@ -47,7 +47,7 @@ void channel_gm20b_bind(struct channel_gk20a *c) u32 inst_ptr = nvgpu_inst_block_addr(g, &c->inst_block) >> ram_in_base_shift_v(); - gk20a_dbg_info("bind channel %d inst ptr 0x%08x", + nvgpu_log_info(g, "bind channel %d inst ptr 0x%08x", c->chid, inst_ptr); diff --git a/drivers/gpu/nvgpu/gm20b/gr_gm20b.c b/drivers/gpu/nvgpu/gm20b/gr_gm20b.c index 1c966c22..331c3af9 100644 --- a/drivers/gpu/nvgpu/gm20b/gr_gm20b.c +++ b/drivers/gpu/nvgpu/gm20b/gr_gm20b.c @@ -47,7 +47,7 @@ void gr_gm20b_init_gpc_mmu(struct gk20a *g) { u32 temp; - gk20a_dbg_info("initialize gpc mmu"); + nvgpu_log_info(g, "initialize gpc mmu"); if (!nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) { /* Bypass MMU check for non-secure boot. For @@ -168,7 +168,7 @@ void gr_gm20b_commit_global_bundle_cb(struct gk20a *g, data = min_t(u32, data, g->gr.min_gpm_fifo_depth); - gk20a_dbg_info("bundle cb token limit : %d, state limit : %d", + nvgpu_log_info(g, "bundle cb token limit : %d, state limit : %d", g->gr.bundle_cb_token_limit, data); gr_gk20a_ctx_patch_write(g, ch_ctx, gr_pd_ab_dist_cfg2_r(), @@ -193,7 +193,7 @@ int gr_gm20b_commit_global_cb_manager(struct gk20a *g, u32 num_pes_per_gpc = nvgpu_get_litter_value(g, GPU_LIT_NUM_PES_PER_GPC); - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); tsg = tsg_gk20a_from_ch(c); if (!tsg) @@ -280,20 +280,20 @@ void gr_gm20b_set_rd_coalesce(struct gk20a *g, u32 data) { u32 val; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); val = gk20a_readl(g, gr_gpcs_tpcs_tex_m_dbg2_r()); val = set_field(val, gr_gpcs_tpcs_tex_m_dbg2_lg_rd_coalesce_en_m(), gr_gpcs_tpcs_tex_m_dbg2_lg_rd_coalesce_en_f(data)); gk20a_writel(g, gr_gpcs_tpcs_tex_m_dbg2_r(), val); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); } int gr_gm20b_handle_sw_method(struct gk20a *g, u32 addr, u32 class_num, u32 offset, u32 data) { - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (class_num == MAXWELL_COMPUTE_B) { switch (offset << 2) { @@ -341,7 +341,7 @@ void gr_gm20b_set_alpha_circular_buffer_size(struct gk20a *g, u32 data) u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); u32 ppc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_PPC_IN_GPC_STRIDE); - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); /* if (NO_ALPHA_BETA_TIMESLICE_SUPPORT_DEF) return; */ @@ -390,7 +390,7 @@ void gr_gm20b_set_circular_buffer_size(struct gk20a *g, u32 data) u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); u32 ppc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_PPC_IN_GPC_STRIDE); - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (cb_size > gr->attrib_cb_size) cb_size = gr->attrib_cb_size; @@ -665,7 +665,7 @@ int gr_gm20b_init_fs_state(struct gk20a *g) { int err = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); err = gr_gk20a_init_fs_state(g); if (err) @@ -762,7 +762,7 @@ int gr_gm20b_load_ctxsw_ucode(struct gk20a *g) gr_fecs_falcon_hwcfg_r(); u8 falcon_id_mask = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) { gk20a_writel(g, gr_fecs_ctxsw_mailbox_r(7), @@ -829,7 +829,7 @@ int gr_gm20b_load_ctxsw_ucode(struct gk20a *g) gk20a_writel(g, gr_fecs_ctxsw_mailbox_clear_r(6), 0xffffffff); gk20a_writel(g, gr_fecs_cpuctl_alias_r(), gr_fecs_cpuctl_startcpu_f(1)); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } @@ -858,7 +858,7 @@ int gr_gm20b_alloc_gr_ctx(struct gk20a *g, { int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); err = gr_gk20a_alloc_gr_ctx(g, gr_ctx, vm, class, flags); if (err) @@ -867,7 +867,7 @@ int gr_gm20b_alloc_gr_ctx(struct gk20a *g, if (class == MAXWELL_COMPUTE_B) gr_ctx->compute_preempt_mode = NVGPU_PREEMPTION_MODE_COMPUTE_CTA; - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } @@ -881,7 +881,7 @@ void gr_gm20b_update_ctxsw_preemption_mode(struct gk20a *g, u32 cta_preempt_option = ctxsw_prog_main_image_preemption_options_control_cta_enabled_f(); - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); tsg = tsg_gk20a_from_ch(c); if (!tsg) @@ -889,13 +889,13 @@ void gr_gm20b_update_ctxsw_preemption_mode(struct gk20a *g, gr_ctx = &tsg->gr_ctx; if (gr_ctx->compute_preempt_mode == NVGPU_PREEMPTION_MODE_COMPUTE_CTA) { - gk20a_dbg_info("CTA: %x", cta_preempt_option); + nvgpu_log_info(g, "CTA: %x", cta_preempt_option); nvgpu_mem_wr(g, mem, ctxsw_prog_main_image_preemption_options_o(), cta_preempt_option); } - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); } int gr_gm20b_dump_gr_status_regs(struct gk20a *g, @@ -1044,7 +1044,7 @@ int gr_gm20b_update_pc_sampling(struct channel_gk20a *c, struct nvgpu_mem *mem; u32 v; - gk20a_dbg_fn(""); + nvgpu_log_fn(c->g, " "); tsg = tsg_gk20a_from_ch(c); if (!tsg) @@ -1066,7 +1066,7 @@ int gr_gm20b_update_pc_sampling(struct channel_gk20a *c, nvgpu_mem_end(c->g, mem); - gk20a_dbg_fn("done"); + nvgpu_log_fn(c->g, "done"); return 0; } @@ -1220,19 +1220,19 @@ void gr_gm20b_bpt_reg_info(struct gk20a *g, struct nvgpu_warpstate *w_state) /* Only for debug purpose */ for (sm_id = 0; sm_id < gr->no_of_sm; sm_id++) { - gk20a_dbg_fn("w_state[%d].valid_warps[0]: %llx\n", + nvgpu_log_fn(g, "w_state[%d].valid_warps[0]: %llx\n", sm_id, w_state[sm_id].valid_warps[0]); - gk20a_dbg_fn("w_state[%d].valid_warps[1]: %llx\n", + nvgpu_log_fn(g, "w_state[%d].valid_warps[1]: %llx\n", sm_id, w_state[sm_id].valid_warps[1]); - gk20a_dbg_fn("w_state[%d].trapped_warps[0]: %llx\n", + nvgpu_log_fn(g, "w_state[%d].trapped_warps[0]: %llx\n", sm_id, w_state[sm_id].trapped_warps[0]); - gk20a_dbg_fn("w_state[%d].trapped_warps[1]: %llx\n", + nvgpu_log_fn(g, "w_state[%d].trapped_warps[1]: %llx\n", sm_id, w_state[sm_id].trapped_warps[1]); - gk20a_dbg_fn("w_state[%d].paused_warps[0]: %llx\n", + nvgpu_log_fn(g, "w_state[%d].paused_warps[0]: %llx\n", sm_id, w_state[sm_id].paused_warps[0]); - gk20a_dbg_fn("w_state[%d].paused_warps[1]: %llx\n", + nvgpu_log_fn(g, "w_state[%d].paused_warps[1]: %llx\n", sm_id, w_state[sm_id].paused_warps[1]); } } diff --git a/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c b/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c index dcb65372..66cd49e7 100644 --- a/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c +++ b/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c @@ -61,7 +61,7 @@ int gm20b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr) int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (max_comptag_lines == 0U) return 0; @@ -87,9 +87,9 @@ int gm20b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr) if (max_comptag_lines > hw_max_comptag_lines) max_comptag_lines = hw_max_comptag_lines; - gk20a_dbg_info("compbit backing store size : %d", + nvgpu_log_info(g, "compbit backing store size : %d", compbit_backing_size); - gk20a_dbg_info("max comptag lines : %d", + nvgpu_log_info(g, "max comptag lines : %d", max_comptag_lines); err = nvgpu_ltc_alloc_cbc(g, compbit_backing_size); @@ -121,7 +121,7 @@ int gm20b_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op, u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE); const u32 max_lines = 16384U; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); trace_gk20a_ltc_cbc_ctrl_start(g->name, op, min, max); @@ -134,7 +134,7 @@ int gm20b_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op, nvgpu_mutex_acquire(&g->mm.l2_op_lock); - gk20a_dbg_info("clearing CBC lines %u..%u", min, iter_max); + nvgpu_log_info(g, "clearing CBC lines %u..%u", min, iter_max); if (op == gk20a_cbc_op_clear) { gk20a_writel( @@ -205,11 +205,11 @@ void gm20b_ltc_init_fs_state(struct gk20a *g) { u32 reg; - gk20a_dbg_info("initialize gm20b l2"); + nvgpu_log_info(g, "initialize gm20b l2"); g->max_ltc_count = gk20a_readl(g, top_num_ltcs_r()); g->ltc_count = gk20a_readl(g, pri_ringmaster_enum_ltc_r()); - gk20a_dbg_info("%d ltcs out of %d", g->ltc_count, g->max_ltc_count); + nvgpu_log_info(g, "%d ltcs out of %d", g->ltc_count, g->max_ltc_count); gk20a_writel(g, ltc_ltcs_ltss_cbc_num_active_ltcs_r(), g->ltc_count); @@ -459,7 +459,7 @@ void gm20b_ltc_init_cbc(struct gk20a *g, struct gr_gk20a *gr) gk20a_writel(g, ltc_ltcs_ltss_cbc_base_r(), compbit_base_post_divide); - gk20a_dbg(gpu_dbg_info | gpu_dbg_map_v | gpu_dbg_pte, + nvgpu_log(g, gpu_dbg_info | gpu_dbg_map_v | gpu_dbg_pte, "compbit base.pa: 0x%x,%08x cbc_base:0x%08x\n", (u32)(compbit_store_iova >> 32), (u32)(compbit_store_iova & 0xffffffff), diff --git a/drivers/gpu/nvgpu/gm20b/mm_gm20b.c b/drivers/gpu/nvgpu/gm20b/mm_gm20b.c index 46cd1fc6..deca6686 100644 --- a/drivers/gpu/nvgpu/gm20b/mm_gm20b.c +++ b/drivers/gpu/nvgpu/gm20b/mm_gm20b.c @@ -1,7 +1,7 @@ /* * GM20B MMU * - * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -36,9 +36,9 @@ void gm20b_mm_set_big_page_size(struct gk20a *g, { u32 val; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); - gk20a_dbg_info("big page size %d\n", size); + nvgpu_log_info(g, "big page size %d\n", size); val = nvgpu_mem_rd32(g, mem, ram_in_big_page_size_w()); val &= ~ram_in_big_page_size_m(); @@ -48,7 +48,7 @@ void gm20b_mm_set_big_page_size(struct gk20a *g, val |= ram_in_big_page_size_128kb_f(); nvgpu_mem_wr32(g, mem, ram_in_big_page_size_w(), val); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); } u32 gm20b_mm_get_big_page_sizes(void) diff --git a/drivers/gpu/nvgpu/gm20b/pmu_gm20b.c b/drivers/gpu/nvgpu/gm20b/pmu_gm20b.c index 1c5fdce0..aa992c37 100644 --- a/drivers/gpu/nvgpu/gm20b/pmu_gm20b.c +++ b/drivers/gpu/nvgpu/gm20b/pmu_gm20b.c @@ -1,7 +1,7 @@ /* * GM20B PMU * - * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -37,8 +37,8 @@ #include #include -#define gm20b_dbg_pmu(fmt, arg...) \ - gk20a_dbg(gpu_dbg_pmu, fmt, ##arg) +#define gm20b_dbg_pmu(g, fmt, arg...) \ + nvgpu_log(g, gpu_dbg_pmu, fmt, ##arg) /* PROD settings for ELPG sequencing registers*/ @@ -108,7 +108,7 @@ int gm20b_pmu_setup_elpg(struct gk20a *g) u32 reg_writes; u32 index; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (g->elpg_enabled) { reg_writes = ((sizeof(_pginitseq_gm20b) / @@ -120,20 +120,20 @@ int gm20b_pmu_setup_elpg(struct gk20a *g) } } - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return ret; } static void pmu_handle_acr_init_wpr_msg(struct gk20a *g, struct pmu_msg *msg, void *param, u32 handle, u32 status) { - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); - gm20b_dbg_pmu("reply PMU_ACR_CMD_ID_INIT_WPR_REGION"); + gm20b_dbg_pmu(g, "reply PMU_ACR_CMD_ID_INIT_WPR_REGION"); if (msg->msg.acr.acrmsg.errorcode == PMU_ACR_SUCCESS) g->pmu_lsf_pmu_wpr_init_done = 1; - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); } @@ -143,7 +143,7 @@ int gm20b_pmu_init_acr(struct gk20a *g) struct pmu_cmd cmd; u32 seq; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); /* init ACR */ memset(&cmd, 0, sizeof(struct pmu_cmd)); @@ -153,11 +153,11 @@ int gm20b_pmu_init_acr(struct gk20a *g) cmd.cmd.acr.init_wpr.cmd_type = PMU_ACR_CMD_ID_INIT_WPR_REGION; cmd.cmd.acr.init_wpr.regionid = 0x01; cmd.cmd.acr.init_wpr.wproffset = 0x00; - gm20b_dbg_pmu("cmd post PMU_ACR_CMD_ID_INIT_WPR_REGION"); + gm20b_dbg_pmu(g, "cmd post PMU_ACR_CMD_ID_INIT_WPR_REGION"); nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, pmu_handle_acr_init_wpr_msg, pmu, &seq, ~0); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } @@ -165,14 +165,14 @@ void pmu_handle_fecs_boot_acr_msg(struct gk20a *g, struct pmu_msg *msg, void *param, u32 handle, u32 status) { - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); - gm20b_dbg_pmu("reply PMU_ACR_CMD_ID_BOOTSTRAP_FALCON"); + gm20b_dbg_pmu(g, "reply PMU_ACR_CMD_ID_BOOTSTRAP_FALCON"); - gm20b_dbg_pmu("response code = %x\n", msg->msg.acr.acrmsg.falconid); + gm20b_dbg_pmu(g, "response code = %x\n", msg->msg.acr.acrmsg.falconid); g->pmu_lsf_loaded_falcon_id = msg->msg.acr.acrmsg.falconid; - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); } static int pmu_gm20b_ctx_wait_lsf_ready(struct gk20a *g, u32 timeout_ms, @@ -182,7 +182,7 @@ static int pmu_gm20b_ctx_wait_lsf_ready(struct gk20a *g, u32 timeout_ms, u32 reg; struct nvgpu_timeout timeout; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); reg = gk20a_readl(g, gr_fecs_ctxsw_mailbox_r(0)); nvgpu_timeout_init(g, &timeout, timeout_ms, NVGPU_TIMER_CPU_TIMER); @@ -203,9 +203,9 @@ void gm20b_pmu_load_lsf(struct gk20a *g, u32 falcon_id, u32 flags) struct pmu_cmd cmd; u32 seq; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); - gm20b_dbg_pmu("wprinit status = %x\n", g->pmu_lsf_pmu_wpr_init_done); + gm20b_dbg_pmu(g, "wprinit status = %x\n", g->pmu_lsf_pmu_wpr_init_done); if (g->pmu_lsf_pmu_wpr_init_done) { /* send message to load FECS falcon */ memset(&cmd, 0, sizeof(struct pmu_cmd)); @@ -216,13 +216,13 @@ void gm20b_pmu_load_lsf(struct gk20a *g, u32 falcon_id, u32 flags) PMU_ACR_CMD_ID_BOOTSTRAP_FALCON; cmd.cmd.acr.bootstrap_falcon.flags = flags; cmd.cmd.acr.bootstrap_falcon.falconid = falcon_id; - gm20b_dbg_pmu("cmd post PMU_ACR_CMD_ID_BOOTSTRAP_FALCON: %x\n", + gm20b_dbg_pmu(g, "cmd post PMU_ACR_CMD_ID_BOOTSTRAP_FALCON: %x\n", falcon_id); nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, pmu_handle_fecs_boot_acr_msg, pmu, &seq, ~0); } - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return; } diff --git a/drivers/gpu/nvgpu/gm20b/therm_gm20b.c b/drivers/gpu/nvgpu/gm20b/therm_gm20b.c index ce4d4fab..dfe977ff 100644 --- a/drivers/gpu/nvgpu/gm20b/therm_gm20b.c +++ b/drivers/gpu/nvgpu/gm20b/therm_gm20b.c @@ -1,7 +1,7 @@ /* * GM20B THERMAL * - * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -32,7 +32,7 @@ int gm20b_init_therm_setup_hw(struct gk20a *g) { u32 v; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); /* program NV_THERM registers */ gk20a_writel(g, therm_use_a_r(), therm_use_a_ext_therm_0_enable_f() | -- cgit v1.2.2