From dd739fcb039d51606e9a5454ec0aab17bcb01965 Mon Sep 17 00:00:00 2001 From: Terje Bergstrom Date: Wed, 18 Apr 2018 19:39:46 -0700 Subject: gpu: nvgpu: Remove gk20a_dbg* functions Switch all logging to nvgpu_log*(). gk20a_dbg* macros are intentionally left there because of use from other repositories. Because the new functions do not work without a pointer to struct gk20a, and piping it just for logging is excessive, some log messages are deleted. Change-Id: I00e22e75fe4596a330bb0282ab4774b3639ee31e Signed-off-by: Terje Bergstrom Reviewed-on: https://git-master.nvidia.com/r/1704148 Reviewed-by: mobile promotions Tested-by: mobile promotions --- drivers/gpu/nvgpu/gp106/acr_gp106.c | 98 +++++++++++++++++------------------ drivers/gpu/nvgpu/gp106/bios_gp106.c | 20 +++---- drivers/gpu/nvgpu/gp106/clk_gp106.c | 7 +-- drivers/gpu/nvgpu/gp106/fb_gp106.c | 4 +- drivers/gpu/nvgpu/gp106/gr_gp106.c | 10 ++-- drivers/gpu/nvgpu/gp106/hal_gp106.c | 4 +- drivers/gpu/nvgpu/gp106/mclk_gp106.c | 12 ++--- drivers/gpu/nvgpu/gp106/pmu_gp106.c | 18 +++---- drivers/gpu/nvgpu/gp106/pmu_gp106.h | 6 +-- drivers/gpu/nvgpu/gp106/sec2_gp106.c | 12 ++--- drivers/gpu/nvgpu/gp106/therm_gp106.c | 6 +-- drivers/gpu/nvgpu/gp106/xve_gp106.c | 60 ++++++++++----------- drivers/gpu/nvgpu/gp106/xve_gp106.h | 10 ++-- 13 files changed, 132 insertions(+), 135 deletions(-) (limited to 'drivers/gpu/nvgpu/gp106') diff --git a/drivers/gpu/nvgpu/gp106/acr_gp106.c b/drivers/gpu/nvgpu/gp106/acr_gp106.c index 5ab8cfcc..61b443e0 100644 --- a/drivers/gpu/nvgpu/gp106/acr_gp106.c +++ b/drivers/gpu/nvgpu/gp106/acr_gp106.c @@ -43,8 +43,8 @@ #include /*Defines*/ -#define gp106_dbg_pmu(fmt, arg...) \ - gk20a_dbg(gpu_dbg_pmu, fmt, ##arg) +#define gp106_dbg_pmu(g, fmt, arg...) \ + nvgpu_log(g, gpu_dbg_pmu, fmt, ##arg) typedef int (*get_ucode_details)(struct gk20a *g, struct flcn_ucode_img_v1 *udata); @@ -113,7 +113,7 @@ int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img_v1 *p_img) struct lsf_ucode_desc_v1 *lsf_desc; int err; - gp106_dbg_pmu("requesting PMU ucode in gp106\n"); + gp106_dbg_pmu(g, "requesting PMU ucode in gp106\n"); pmu_fw = nvgpu_request_firmware(g, GM20B_PMU_UCODE_IMAGE, NVGPU_REQUEST_FIRMWARE_NO_SOC); if (!pmu_fw) { @@ -121,9 +121,9 @@ int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img_v1 *p_img) return -ENOENT; } g->acr.pmu_fw = pmu_fw; - gp106_dbg_pmu("Loaded PMU ucode in for blob preparation"); + gp106_dbg_pmu(g, "Loaded PMU ucode in for blob preparation"); - gp106_dbg_pmu("requesting PMU ucode desc in GM20B\n"); + gp106_dbg_pmu(g, "requesting PMU ucode desc in GM20B\n"); pmu_desc = nvgpu_request_firmware(g, GM20B_PMU_UCODE_DESC, NVGPU_REQUEST_FIRMWARE_NO_SOC); if (!pmu_desc) { @@ -164,7 +164,7 @@ int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img_v1 *p_img) p_img->fw_ver = NULL; p_img->header = NULL; p_img->lsf_desc = (struct lsf_ucode_desc_v1 *)lsf_desc; - gp106_dbg_pmu("requesting PMU ucode in GM20B exit\n"); + gp106_dbg_pmu(g, "requesting PMU ucode in GM20B exit\n"); nvgpu_release_firmware(g, pmu_sig); return 0; @@ -262,7 +262,7 @@ int fecs_ucode_details(struct gk20a *g, struct flcn_ucode_img_v1 *p_img) p_img->fw_ver = NULL; p_img->header = NULL; p_img->lsf_desc = (struct lsf_ucode_desc_v1 *)lsf_desc; - gp106_dbg_pmu("fecs fw loaded\n"); + gp106_dbg_pmu(g, "fecs fw loaded\n"); nvgpu_release_firmware(g, fecs_sig); return 0; free_lsf_desc: @@ -358,7 +358,7 @@ int gpccs_ucode_details(struct gk20a *g, struct flcn_ucode_img_v1 *p_img) p_img->fw_ver = NULL; p_img->header = NULL; p_img->lsf_desc = (struct lsf_ucode_desc_v1 *)lsf_desc; - gp106_dbg_pmu("gpccs fw loaded\n"); + gp106_dbg_pmu(g, "gpccs fw loaded\n"); nvgpu_release_firmware(g, gpccs_sig); return 0; free_lsf_desc: @@ -381,7 +381,7 @@ int gp106_prepare_ucode_blob(struct gk20a *g) non WPR blob of ucodes*/ err = nvgpu_init_pmu_fw_support(pmu); if (err) { - gp106_dbg_pmu("failed to set function pointers\n"); + gp106_dbg_pmu(g, "failed to set function pointers\n"); return err; } return 0; @@ -391,12 +391,12 @@ int gp106_prepare_ucode_blob(struct gk20a *g) gr_gk20a_init_ctxsw_ucode(g); g->ops.pmu.get_wpr(g, &wpr_inf); - gp106_dbg_pmu("wpr carveout base:%llx\n", (wpr_inf.wpr_base)); - gp106_dbg_pmu("wpr carveout size :%x\n", (u32)wpr_inf.size); + gp106_dbg_pmu(g, "wpr carveout base:%llx\n", (wpr_inf.wpr_base)); + gp106_dbg_pmu(g, "wpr carveout size :%x\n", (u32)wpr_inf.size); /* Discover all managed falcons*/ err = lsfm_discover_ucode_images(g, plsfm); - gp106_dbg_pmu(" Managed Falcon cnt %d\n", plsfm->managed_flcn_cnt); + gp106_dbg_pmu(g, " Managed Falcon cnt %d\n", plsfm->managed_flcn_cnt); if (err) goto exit_err; @@ -412,14 +412,14 @@ int gp106_prepare_ucode_blob(struct gk20a *g) if (err) goto exit_err; - gp106_dbg_pmu("managed LS falcon %d, WPR size %d bytes.\n", + gp106_dbg_pmu(g, "managed LS falcon %d, WPR size %d bytes.\n", plsfm->managed_flcn_cnt, plsfm->wpr_size); lsfm_init_wpr_contents(g, plsfm, &g->acr.ucode_blob); } else { - gp106_dbg_pmu("LSFM is managing no falcons.\n"); + gp106_dbg_pmu(g, "LSFM is managing no falcons.\n"); } - gp106_dbg_pmu("prepare ucode blob return 0\n"); + gp106_dbg_pmu(g, "prepare ucode blob return 0\n"); free_acr_resources(g, plsfm); exit_err: @@ -465,14 +465,14 @@ int lsfm_discover_ucode_images(struct gk20a *g, plsfm->managed_flcn_cnt++; } else { - gp106_dbg_pmu("id not managed %d\n", + gp106_dbg_pmu(g, "id not managed %d\n", ucode_img.lsf_desc->falcon_id); } } /*Free any ucode image resources if not managing this falcon*/ if (!(pmu->pmu_mode & PMU_LSFM_MANAGED)) { - gp106_dbg_pmu("pmu is not LSFM managed\n"); + gp106_dbg_pmu(g, "pmu is not LSFM managed\n"); lsfm_free_ucode_img_res(g, &ucode_img); } @@ -503,7 +503,7 @@ int lsfm_discover_ucode_images(struct gk20a *g, == 0) plsfm->managed_flcn_cnt++; } else { - gp106_dbg_pmu("not managed %d\n", + gp106_dbg_pmu(g, "not managed %d\n", ucode_img.lsf_desc->falcon_id); lsfm_free_nonpmu_ucode_img_res(g, &ucode_img); @@ -511,7 +511,7 @@ int lsfm_discover_ucode_images(struct gk20a *g, } } else { /* Consumed all available falcon objects */ - gp106_dbg_pmu("Done checking for ucodes %d\n", i); + gp106_dbg_pmu(g, "Done checking for ucodes %d\n", i); break; } } @@ -549,19 +549,19 @@ int gp106_pmu_populate_loader_cfg(struct gk20a *g, g->ops.pmu.get_wpr(g, &wpr_inf); addr_base += (wpr_inf.wpr_base); - gp106_dbg_pmu("pmu loader cfg addrbase 0x%llx\n", addr_base); + gp106_dbg_pmu(g, "pmu loader cfg addrbase 0x%llx\n", addr_base); /*From linux*/ addr_code = addr_base + desc->app_start_offset + desc->app_resident_code_offset; - gp106_dbg_pmu("app start %d app res code off %d\n", + gp106_dbg_pmu(g, "app start %d app res code off %d\n", desc->app_start_offset, desc->app_resident_code_offset); addr_data = addr_base + desc->app_start_offset + desc->app_resident_data_offset; - gp106_dbg_pmu("app res data offset%d\n", + gp106_dbg_pmu(g, "app res data offset%d\n", desc->app_resident_data_offset); - gp106_dbg_pmu("bl start off %d\n", desc->bootloader_start_offset); + gp106_dbg_pmu(g, "bl start off %d\n", desc->bootloader_start_offset); addr_args = ((pwr_falcon_hwcfg_dmem_size_v( gk20a_readl(g, pwr_falcon_hwcfg_r()))) @@ -569,7 +569,7 @@ int gp106_pmu_populate_loader_cfg(struct gk20a *g, addr_args -= g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu); - gp106_dbg_pmu("addr_args %x\n", addr_args); + gp106_dbg_pmu(g, "addr_args %x\n", addr_args); /* Populate the LOADER_CONFIG state */ memset((void *) ldr_cfg, 0, sizeof(struct flcn_bl_dmem_desc_v1)); @@ -621,8 +621,8 @@ int gp106_flcn_populate_bl_dmem_desc(struct gk20a *g, g->ops.pmu.get_wpr(g, &wpr_inf); addr_base += wpr_inf.wpr_base; - gp106_dbg_pmu("falcon ID %x", p_lsfm->wpr_header.falcon_id); - gp106_dbg_pmu("gen loader cfg addrbase %llx ", addr_base); + gp106_dbg_pmu(g, "falcon ID %x", p_lsfm->wpr_header.falcon_id); + gp106_dbg_pmu(g, "gen loader cfg addrbase %llx ", addr_base); addr_code = addr_base + desc->app_start_offset + desc->app_resident_code_offset; @@ -630,7 +630,7 @@ int gp106_flcn_populate_bl_dmem_desc(struct gk20a *g, desc->app_start_offset + desc->app_resident_data_offset; - gp106_dbg_pmu("gen cfg addrcode %llx data %llx load offset %x", + gp106_dbg_pmu(g, "gen cfg addrcode %llx data %llx load offset %x", addr_code, addr_data, desc->bootloader_start_offset); /* Populate the LOADER_CONFIG state */ @@ -653,7 +653,7 @@ int lsfm_fill_flcn_bl_gen_desc(struct gk20a *g, struct nvgpu_pmu *pmu = &g->pmu; if (pnode->wpr_header.falcon_id != pmu->falcon_id) { - gp106_dbg_pmu("non pmu. write flcn bl gen desc\n"); + gp106_dbg_pmu(g, "non pmu. write flcn bl gen desc\n"); g->ops.pmu.flcn_populate_bl_dmem_desc(g, pnode, &pnode->bl_gen_desc_size, pnode->wpr_header.falcon_id); @@ -661,7 +661,7 @@ int lsfm_fill_flcn_bl_gen_desc(struct gk20a *g, } if (pmu->pmu_mode & PMU_LSFM_MANAGED) { - gp106_dbg_pmu("pmu write flcn bl gen desc\n"); + gp106_dbg_pmu(g, "pmu write flcn bl gen desc\n"); if (pnode->wpr_header.falcon_id == pmu->falcon_id) return g->ops.pmu.pmu_populate_loader_cfg(g, pnode, &pnode->bl_gen_desc_size); @@ -694,46 +694,46 @@ void lsfm_init_wpr_contents(struct gk20a *g, nvgpu_mem_wr_n(g, ucode, i * sizeof(pnode->wpr_header), &pnode->wpr_header, sizeof(pnode->wpr_header)); - gp106_dbg_pmu("wpr header"); - gp106_dbg_pmu("falconid :%d", + gp106_dbg_pmu(g, "wpr header"); + gp106_dbg_pmu(g, "falconid :%d", pnode->wpr_header.falcon_id); - gp106_dbg_pmu("lsb_offset :%x", + gp106_dbg_pmu(g, "lsb_offset :%x", pnode->wpr_header.lsb_offset); - gp106_dbg_pmu("bootstrap_owner :%d", + gp106_dbg_pmu(g, "bootstrap_owner :%d", pnode->wpr_header.bootstrap_owner); - gp106_dbg_pmu("lazy_bootstrap :%d", + gp106_dbg_pmu(g, "lazy_bootstrap :%d", pnode->wpr_header.lazy_bootstrap); - gp106_dbg_pmu("status :%d", + gp106_dbg_pmu(g, "status :%d", pnode->wpr_header.status); /*Flush LSB header to memory*/ nvgpu_mem_wr_n(g, ucode, pnode->wpr_header.lsb_offset, &pnode->lsb_header, sizeof(pnode->lsb_header)); - gp106_dbg_pmu("lsb header"); - gp106_dbg_pmu("ucode_off :%x", + gp106_dbg_pmu(g, "lsb header"); + gp106_dbg_pmu(g, "ucode_off :%x", pnode->lsb_header.ucode_off); - gp106_dbg_pmu("ucode_size :%x", + gp106_dbg_pmu(g, "ucode_size :%x", pnode->lsb_header.ucode_size); - gp106_dbg_pmu("data_size :%x", + gp106_dbg_pmu(g, "data_size :%x", pnode->lsb_header.data_size); - gp106_dbg_pmu("bl_code_size :%x", + gp106_dbg_pmu(g, "bl_code_size :%x", pnode->lsb_header.bl_code_size); - gp106_dbg_pmu("bl_imem_off :%x", + gp106_dbg_pmu(g, "bl_imem_off :%x", pnode->lsb_header.bl_imem_off); - gp106_dbg_pmu("bl_data_off :%x", + gp106_dbg_pmu(g, "bl_data_off :%x", pnode->lsb_header.bl_data_off); - gp106_dbg_pmu("bl_data_size :%x", + gp106_dbg_pmu(g, "bl_data_size :%x", pnode->lsb_header.bl_data_size); - gp106_dbg_pmu("app_code_off :%x", + gp106_dbg_pmu(g, "app_code_off :%x", pnode->lsb_header.app_code_off); - gp106_dbg_pmu("app_code_size :%x", + gp106_dbg_pmu(g, "app_code_size :%x", pnode->lsb_header.app_code_size); - gp106_dbg_pmu("app_data_off :%x", + gp106_dbg_pmu(g, "app_data_off :%x", pnode->lsb_header.app_data_off); - gp106_dbg_pmu("app_data_size :%x", + gp106_dbg_pmu(g, "app_data_size :%x", pnode->lsb_header.app_data_size); - gp106_dbg_pmu("flags :%x", + gp106_dbg_pmu(g, "flags :%x", pnode->lsb_header.flags); /*If this falcon has a boot loader and related args, @@ -1049,7 +1049,7 @@ int gp106_bootstrap_hs_flcn(struct gk20a *g) u32 *acr_ucode_data_t210_load; struct wpr_carveout_info wpr_inf; - gp106_dbg_pmu(""); + gp106_dbg_pmu(g, " "); if (!acr_fw) { /*First time init case*/ diff --git a/drivers/gpu/nvgpu/gp106/bios_gp106.c b/drivers/gpu/nvgpu/gp106/bios_gp106.c index 8511d3c2..3363aeba 100644 --- a/drivers/gpu/nvgpu/gp106/bios_gp106.c +++ b/drivers/gpu/nvgpu/gp106/bios_gp106.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -56,13 +56,13 @@ static void upload_data(struct gk20a *g, u32 dst, u8 *src, u32 size, u8 port) u32 *src_u32 = (u32 *)src; u32 blk; - gk20a_dbg_info("upload %d bytes to %x", size, dst); + nvgpu_log_info(g, "upload %d bytes to %x", size, dst); words = DIV_ROUND_UP(size, 4); blk = dst >> 8; - gk20a_dbg_info("upload %d words to %x blk %d", + nvgpu_log_info(g, "upload %d words to %x blk %d", words, dst, blk); gk20a_writel(g, pwr_falcon_dmemc_r(port), pwr_falcon_dmemc_offs_f(dst >> 2) | @@ -79,7 +79,7 @@ static int gp106_bios_devinit(struct gk20a *g) int devinit_completed; struct nvgpu_timeout timeout; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (nvgpu_flcn_reset(g->pmu.flcn)) { err = -ETIMEDOUT; @@ -128,7 +128,7 @@ static int gp106_bios_devinit(struct gk20a *g) gk20a_get_gr_idle_timeout(g)); out: - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return err; } @@ -146,7 +146,7 @@ static int gp106_bios_preos(struct gk20a *g) { int err = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (nvgpu_flcn_reset(g->pmu.flcn)) { err = -ETIMEDOUT; @@ -177,7 +177,7 @@ static int gp106_bios_preos(struct gk20a *g) gk20a_get_gr_idle_timeout(g)); out: - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return err; } @@ -186,12 +186,12 @@ int gp106_bios_init(struct gk20a *g) unsigned int i; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (g->bios_is_init) return 0; - gk20a_dbg_info("reading bios from EEPROM"); + nvgpu_log_info(g, "reading bios from EEPROM"); g->bios.size = BIOS_SIZE; g->bios.data = nvgpu_vmalloc(g, BIOS_SIZE); if (!g->bios.data) @@ -218,7 +218,7 @@ int gp106_bios_init(struct gk20a *g) goto free_firmware; } - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); err = gp106_bios_devinit(g); if (err) { diff --git a/drivers/gpu/nvgpu/gp106/clk_gp106.c b/drivers/gpu/nvgpu/gp106/clk_gp106.c index 9a94a7b9..d19baac5 100644 --- a/drivers/gpu/nvgpu/gp106/clk_gp106.c +++ b/drivers/gpu/nvgpu/gp106/clk_gp106.c @@ -36,9 +36,6 @@ #include -#define gk20a_dbg_clk(fmt, arg...) \ - gk20a_dbg(gpu_dbg_clk, fmt, ##arg) - #ifdef CONFIG_DEBUG_FS static int clk_gp106_debugfs_init(struct gk20a *g); #endif @@ -82,7 +79,7 @@ int gp106_init_clk_support(struct gk20a *g) struct clk_gk20a *clk = &g->clk; u32 err = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); err = nvgpu_mutex_init(&clk->clk_mutex); if (err) @@ -374,7 +371,7 @@ static int clk_gp106_debugfs_init(struct gk20a *g) d = debugfs_create_file("gpc", S_IRUGO | S_IWUSR, clk_freq_ctlr_root, g, &gpc_cfc_fops); - gk20a_dbg(gpu_dbg_info, "g=%p", g); + nvgpu_log(g, gpu_dbg_info, "g=%p", g); for (i = 0; i < g->clk.namemap_num; i++) { if (g->clk.clk_namemap[i].is_enable) { diff --git a/drivers/gpu/nvgpu/gp106/fb_gp106.c b/drivers/gpu/nvgpu/gp106/fb_gp106.c index 34e9ee30..2bf97f61 100644 --- a/drivers/gpu/nvgpu/gp106/fb_gp106.c +++ b/drivers/gpu/nvgpu/gp106/fb_gp106.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -39,7 +39,7 @@ void gp106_fb_reset(struct gk20a *g) do { u32 w = gk20a_readl(g, fb_niso_scrub_status_r()); if (fb_niso_scrub_status_flag_v(w)) { - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); break; } nvgpu_udelay(HW_SCRUB_TIMEOUT_DEFAULT); diff --git a/drivers/gpu/nvgpu/gp106/gr_gp106.c b/drivers/gpu/nvgpu/gp106/gr_gp106.c index 1bd24b45..2e5f29ee 100644 --- a/drivers/gpu/nvgpu/gp106/gr_gp106.c +++ b/drivers/gpu/nvgpu/gp106/gr_gp106.c @@ -58,7 +58,7 @@ bool gr_gp106_is_valid_class(struct gk20a *g, u32 class_num) default: break; } - gk20a_dbg_info("class=0x%x valid=%d", class_num, valid); + nvgpu_log_info(g, "class=0x%x valid=%d", class_num, valid); return valid; } @@ -75,7 +75,7 @@ static void gr_gp106_set_go_idle_timeout(struct gk20a *g, u32 data) int gr_gp106_handle_sw_method(struct gk20a *g, u32 addr, u32 class_num, u32 offset, u32 data) { - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (class_num == PASCAL_COMPUTE_B) { switch (offset << 2) { @@ -177,9 +177,9 @@ int gr_gp106_set_ctxsw_preemption_mode(struct gk20a *g, g->gr.max_tpc_count; attrib_cb_size = ALIGN(attrib_cb_size, 128); - gk20a_dbg_info("gfxp context spill_size=%d", spill_size); - gk20a_dbg_info("gfxp context pagepool_size=%d", pagepool_size); - gk20a_dbg_info("gfxp context attrib_cb_size=%d", + nvgpu_log_info(g, "gfxp context spill_size=%d", spill_size); + nvgpu_log_info(g, "gfxp context pagepool_size=%d", pagepool_size); + nvgpu_log_info(g, "gfxp context attrib_cb_size=%d", attrib_cb_size); err = gr_gp10b_alloc_buffer(vm, diff --git a/drivers/gpu/nvgpu/gp106/hal_gp106.c b/drivers/gpu/nvgpu/gp106/hal_gp106.c index 82cc36aa..6d3154e3 100644 --- a/drivers/gpu/nvgpu/gp106/hal_gp106.c +++ b/drivers/gpu/nvgpu/gp106/hal_gp106.c @@ -765,7 +765,7 @@ int gp106_init_hal(struct gk20a *g) { struct gpu_ops *gops = &g->ops; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); gops->bios = gp106_ops.bios; gops->ltc = gp106_ops.ltc; @@ -828,7 +828,7 @@ int gp106_init_hal(struct gk20a *g) g->name = "gp10x"; - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } diff --git a/drivers/gpu/nvgpu/gp106/mclk_gp106.c b/drivers/gpu/nvgpu/gp106/mclk_gp106.c index 44f0b1d9..bfb66e6e 100644 --- a/drivers/gpu/nvgpu/gp106/mclk_gp106.c +++ b/drivers/gpu/nvgpu/gp106/mclk_gp106.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -2998,7 +2998,7 @@ static void mclk_seq_pmucmdhandler(struct gk20a *g, struct pmu_msg *_msg, struct nv_pmu_seq_msg_run_script *seq_msg; u32 msg_status = 0; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); if (status != 0) { nvgpu_err(g, "mclk seq_script cmd aborted"); @@ -3041,7 +3041,7 @@ static int mclk_get_memclk_table(struct gk20a *g) u8 *mem_entry_ptr = NULL; int index; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); mem_table_ptr = (u8 *)nvgpu_bios_get_perf_table_ptrs(g, g->bios.perf_token, @@ -3213,7 +3213,7 @@ int gp106_mclk_init(struct gk20a *g) u32 index; struct memory_config *m; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); mclk = &g->clk_pmu.clk_mclk; @@ -3316,7 +3316,7 @@ int gp106_mclk_change(struct gk20a *g, u16 val) #endif u32 speed; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); memset(&payload, 0, sizeof(struct pmu_payload)); @@ -3508,7 +3508,7 @@ static int mclk_debugfs_init(struct gk20a *g) struct dentry *gpu_root = l->debugfs; struct dentry *d; - gk20a_dbg(gpu_dbg_info, "g=%p", g); + nvgpu_log(g, gpu_dbg_info, "g=%p", g); d = debugfs_create_file( "mclk_speed_set", diff --git a/drivers/gpu/nvgpu/gp106/pmu_gp106.c b/drivers/gpu/nvgpu/gp106/pmu_gp106.c index d4041905..2a52dd4e 100644 --- a/drivers/gpu/nvgpu/gp106/pmu_gp106.c +++ b/drivers/gpu/nvgpu/gp106/pmu_gp106.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -98,14 +98,14 @@ u32 gp106_pmu_pg_engines_list(struct gk20a *g) static void pmu_handle_param_msg(struct gk20a *g, struct pmu_msg *msg, void *param, u32 handle, u32 status) { - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (status != 0) { nvgpu_err(g, "PG PARAM cmd aborted"); return; } - gp106_dbg_pmu("PG PARAM is acknowledged from PMU %x", + gp106_dbg_pmu(g, "PG PARAM is acknowledged from PMU %x", msg->msg.pg.msg_type); } @@ -135,7 +135,7 @@ int gp106_pg_param_init(struct gk20a *g, u32 pg_engine_id) cmd.cmd.pg.gr_init_param.featuremask = NVGPU_PMU_GR_FEATURE_MASK_RPPG; - gp106_dbg_pmu("cmd post GR PMU_PG_CMD_ID_PG_PARAM"); + gp106_dbg_pmu(g, "cmd post GR PMU_PG_CMD_ID_PG_PARAM"); nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, pmu_handle_param_msg, pmu, &seq, ~0); } else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS) { @@ -152,7 +152,7 @@ int gp106_pg_param_init(struct gk20a *g, u32 pg_engine_id) NVGPU_PMU_MS_FEATURE_MASK_RPPG | NVGPU_PMU_MS_FEATURE_MASK_FB_TRAINING; - gp106_dbg_pmu("cmd post MS PMU_PG_CMD_ID_PG_PARAM"); + gp106_dbg_pmu(g, "cmd post MS PMU_PG_CMD_ID_PG_PARAM"); nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, pmu_handle_param_msg, pmu, &seq, ~0); } @@ -240,9 +240,9 @@ static void gp106_pmu_load_multiple_falcons(struct gk20a *g, u32 falconidmask, struct pmu_cmd cmd; u32 seq; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); - gp106_dbg_pmu("wprinit status = %x\n", g->pmu_lsf_pmu_wpr_init_done); + gp106_dbg_pmu(g, "wprinit status = %x\n", g->pmu_lsf_pmu_wpr_init_done); if (g->pmu_lsf_pmu_wpr_init_done) { /* send message to load FECS falcon */ memset(&cmd, 0, sizeof(struct pmu_cmd)); @@ -258,13 +258,13 @@ static void gp106_pmu_load_multiple_falcons(struct gk20a *g, u32 falconidmask, cmd.cmd.acr.boot_falcons.wprvirtualbase.lo = 0; cmd.cmd.acr.boot_falcons.wprvirtualbase.hi = 0; - gp106_dbg_pmu("PMU_ACR_CMD_ID_BOOTSTRAP_MULTIPLE_FALCONS:%x\n", + gp106_dbg_pmu(g, "PMU_ACR_CMD_ID_BOOTSTRAP_MULTIPLE_FALCONS:%x\n", falconidmask); nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, pmu_handle_fecs_boot_acr_msg, pmu, &seq, ~0); } - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); } int gp106_load_falcon_ucode(struct gk20a *g, u32 falconidmask) diff --git a/drivers/gpu/nvgpu/gp106/pmu_gp106.h b/drivers/gpu/nvgpu/gp106/pmu_gp106.h index bd640869..361f6e8b 100644 --- a/drivers/gpu/nvgpu/gp106/pmu_gp106.h +++ b/drivers/gpu/nvgpu/gp106/pmu_gp106.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -23,8 +23,8 @@ #ifndef __PMU_GP106_H_ #define __PMU_GP106_H_ -#define gp106_dbg_pmu(fmt, arg...) \ - gk20a_dbg(gpu_dbg_pmu, fmt, ##arg) +#define gp106_dbg_pmu(g, fmt, arg...) \ + nvgpu_log(g, gpu_dbg_pmu, fmt, ##arg) struct gk20a; diff --git a/drivers/gpu/nvgpu/gp106/sec2_gp106.c b/drivers/gpu/nvgpu/gp106/sec2_gp106.c index 08c7f84a..1c959022 100644 --- a/drivers/gpu/nvgpu/gp106/sec2_gp106.c +++ b/drivers/gpu/nvgpu/gp106/sec2_gp106.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -31,8 +31,8 @@ #include /*Defines*/ -#define gm20b_dbg_pmu(fmt, arg...) \ - gk20a_dbg(gpu_dbg_pmu, fmt, ##arg) +#define gm20b_dbg_pmu(g, fmt, arg...) \ + nvgpu_log(g, gpu_dbg_pmu, fmt, ##arg) int sec2_clear_halt_interrupt_status(struct gk20a *g, unsigned int timeout) { @@ -56,7 +56,7 @@ int sec2_wait_for_halt(struct gk20a *g, unsigned int timeout) } g->acr.capabilities = gk20a_readl(g, psec_falcon_mailbox1_r()); - gm20b_dbg_pmu("ACR capabilities %x\n", g->acr.capabilities); + gm20b_dbg_pmu(g, "ACR capabilities %x\n", g->acr.capabilities); data = gk20a_readl(g, psec_falcon_mailbox0_r()); if (data) { nvgpu_err(g, "ACR boot failed, err %x", data); @@ -87,7 +87,7 @@ int bl_bootstrap_sec2(struct nvgpu_pmu *pmu, u32 data = 0; u32 dst; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); /* SEC2 Config */ gk20a_writel(g, psec_falcon_itfen_r(), @@ -123,7 +123,7 @@ int bl_bootstrap_sec2(struct nvgpu_pmu *pmu, (u8 *)(acr->hsbl_ucode.cpu_va), bl_sz, 0, 0, pmu_bl_gm10x_desc->bl_start_tag); - gm20b_dbg_pmu("Before starting falcon with BL\n"); + gm20b_dbg_pmu(g, "Before starting falcon with BL\n"); gk20a_writel(g, psec_falcon_mailbox0_r(), 0xDEADA5A5); diff --git a/drivers/gpu/nvgpu/gp106/therm_gp106.c b/drivers/gpu/nvgpu/gp106/therm_gp106.c index 64d602cf..b3862abe 100644 --- a/drivers/gpu/nvgpu/gp106/therm_gp106.c +++ b/drivers/gpu/nvgpu/gp106/therm_gp106.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -97,7 +97,7 @@ int gp106_elcg_init_idle_filters(struct gk20a *g) u32 active_engine_id = 0; struct fifo_gk20a *f = &g->fifo; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); for (engine_id = 0; engine_id < f->num_engines; engine_id++) { active_engine_id = f->active_engines_list[engine_id]; @@ -124,7 +124,7 @@ int gp106_elcg_init_idle_filters(struct gk20a *g) idle_filter &= ~therm_hubmmu_idle_filter_value_m(); gk20a_writel(g, therm_hubmmu_idle_filter_r(), idle_filter); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } diff --git a/drivers/gpu/nvgpu/gp106/xve_gp106.c b/drivers/gpu/nvgpu/gp106/xve_gp106.c index 9becd0f2..e77ea5c1 100644 --- a/drivers/gpu/nvgpu/gp106/xve_gp106.c +++ b/drivers/gpu/nvgpu/gp106/xve_gp106.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -204,19 +204,19 @@ static int __do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed) int attempts = 10, err_status = 0; g->ops.xve.get_speed(g, ¤t_link_speed); - xv_sc_dbg(PRE_CHANGE, "Executing PCIe link change."); - xv_sc_dbg(PRE_CHANGE, " Current speed: %s", + xv_sc_dbg(g, PRE_CHANGE, "Executing PCIe link change."); + xv_sc_dbg(g, PRE_CHANGE, " Current speed: %s", xve_speed_to_str(current_link_speed)); - xv_sc_dbg(PRE_CHANGE, " Next speed: %s", + xv_sc_dbg(g, PRE_CHANGE, " Next speed: %s", xve_speed_to_str(next_link_speed)); - xv_sc_dbg(PRE_CHANGE, " PL_LINK_CONFIG: 0x%08x", + xv_sc_dbg(g, PRE_CHANGE, " PL_LINK_CONFIG: 0x%08x", gk20a_readl(g, xp_pl_link_config_r(0))); - xv_sc_dbg(DISABLE_ASPM, "Disabling ASPM..."); + xv_sc_dbg(g, DISABLE_ASPM, "Disabling ASPM..."); disable_aspm_gp106(g); - xv_sc_dbg(DISABLE_ASPM, " Done!"); + xv_sc_dbg(g, DISABLE_ASPM, " Done!"); - xv_sc_dbg(DL_SAFE_MODE, "Putting DL in safe mode..."); + xv_sc_dbg(g, DL_SAFE_MODE, "Putting DL in safe mode..."); saved_dl_mgr = gk20a_readl(g, xp_dl_mgr_r(0)); /* @@ -225,12 +225,12 @@ static int __do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed) dl_mgr = saved_dl_mgr; dl_mgr |= xp_dl_mgr_safe_timing_f(1); gk20a_writel(g, xp_dl_mgr_r(0), dl_mgr); - xv_sc_dbg(DL_SAFE_MODE, " Done!"); + xv_sc_dbg(g, DL_SAFE_MODE, " Done!"); nvgpu_timeout_init(g, &timeout, GPU_XVE_TIMEOUT_MS, NVGPU_TIMER_CPU_TIMER); - xv_sc_dbg(CHECK_LINK, "Checking for link idle..."); + xv_sc_dbg(g, CHECK_LINK, "Checking for link idle..."); do { pl_link_config = gk20a_readl(g, xp_pl_link_config_r(0)); if ((xp_pl_link_config_ltssm_status_f(pl_link_config) == @@ -245,9 +245,9 @@ static int __do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed) goto done; } - xv_sc_dbg(CHECK_LINK, " Done"); + xv_sc_dbg(g, CHECK_LINK, " Done"); - xv_sc_dbg(LINK_SETTINGS, "Preparing next link settings"); + xv_sc_dbg(g, LINK_SETTINGS, "Preparing next link settings"); pl_link_config &= ~xp_pl_link_config_max_link_rate_m(); switch (next_link_speed) { case GPU_XVE_SPEED_2P5: @@ -297,10 +297,10 @@ static int __do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed) else BUG(); - xv_sc_dbg(LINK_SETTINGS, " pl_link_config = 0x%08x", pl_link_config); - xv_sc_dbg(LINK_SETTINGS, " Done"); + xv_sc_dbg(g, LINK_SETTINGS, " pl_link_config = 0x%08x", pl_link_config); + xv_sc_dbg(g, LINK_SETTINGS, " Done"); - xv_sc_dbg(EXEC_CHANGE, "Running link speed change..."); + xv_sc_dbg(g, EXEC_CHANGE, "Running link speed change..."); nvgpu_timeout_init(g, &timeout, GPU_XVE_TIMEOUT_MS, NVGPU_TIMER_CPU_TIMER); @@ -316,7 +316,7 @@ static int __do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed) goto done; } - xv_sc_dbg(EXEC_CHANGE, " Wrote PL_LINK_CONFIG."); + xv_sc_dbg(g, EXEC_CHANGE, " Wrote PL_LINK_CONFIG."); pl_link_config = gk20a_readl(g, xp_pl_link_config_r(0)); @@ -326,7 +326,7 @@ static int __do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed) xp_pl_link_config_ltssm_directive_f( xp_pl_link_config_ltssm_directive_change_speed_v())); - xv_sc_dbg(EXEC_CHANGE, " Executing change (0x%08x)!", + xv_sc_dbg(g, EXEC_CHANGE, " Executing change (0x%08x)!", pl_link_config); gk20a_writel(g, xp_pl_link_config_r(0), pl_link_config); @@ -348,11 +348,11 @@ static int __do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed) if (nvgpu_timeout_peek_expired(&timeout)) { err_status = -ETIMEDOUT; - xv_sc_dbg(EXEC_CHANGE, " timeout; pl_link_config = 0x%x", + xv_sc_dbg(g, EXEC_CHANGE, " timeout; pl_link_config = 0x%x", pl_link_config); } - xv_sc_dbg(EXEC_CHANGE, " Change done... Checking status"); + xv_sc_dbg(g, EXEC_CHANGE, " Change done... Checking status"); if (pl_link_config == 0xffffffff) { WARN(1, "GPU fell of PCI bus!?"); @@ -366,19 +366,19 @@ static int __do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed) link_control_status = g->ops.xve.xve_readl(g, xve_link_control_status_r()); - xv_sc_dbg(EXEC_CHANGE, " target %d vs current %d", + xv_sc_dbg(g, EXEC_CHANGE, " target %d vs current %d", link_speed_setting, xve_link_control_status_link_speed_v(link_control_status)); if (err_status == -ETIMEDOUT) { - xv_sc_dbg(EXEC_CHANGE, " Oops timed out?"); + xv_sc_dbg(g, EXEC_CHANGE, " Oops timed out?"); break; } } while (attempts-- > 0 && link_speed_setting != xve_link_control_status_link_speed_v(link_control_status)); - xv_sc_dbg(EXEC_VERIF, "Verifying speed change..."); + xv_sc_dbg(g, EXEC_VERIF, "Verifying speed change..."); /* * Check that the new link speed is actually active. If we failed to @@ -390,10 +390,10 @@ static int __do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed) if (link_speed_setting != new_link_speed) { u32 link_config = gk20a_readl(g, xp_pl_link_config_r(0)); - xv_sc_dbg(EXEC_VERIF, " Current and target speeds mismatch!"); - xv_sc_dbg(EXEC_VERIF, " LINK_CONTROL_STATUS: 0x%08x", + xv_sc_dbg(g, EXEC_VERIF, " Current and target speeds mismatch!"); + xv_sc_dbg(g, EXEC_VERIF, " LINK_CONTROL_STATUS: 0x%08x", g->ops.xve.xve_readl(g, xve_link_control_status_r())); - xv_sc_dbg(EXEC_VERIF, " Link speed is %s - should be %s", + xv_sc_dbg(g, EXEC_VERIF, " Link speed is %s - should be %s", xve_speed_to_str(new_link_speed), xve_speed_to_str(link_speed_setting)); @@ -417,19 +417,19 @@ static int __do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed) gk20a_writel(g, xp_pl_link_config_r(0), link_config); err_status = -ENODEV; } else { - xv_sc_dbg(EXEC_VERIF, " Current and target speeds match!"); + xv_sc_dbg(g, EXEC_VERIF, " Current and target speeds match!"); err_status = 0; } done: /* Restore safe timings. */ - xv_sc_dbg(CLEANUP, "Restoring saved DL settings..."); + xv_sc_dbg(g, CLEANUP, "Restoring saved DL settings..."); gk20a_writel(g, xp_dl_mgr_r(0), saved_dl_mgr); - xv_sc_dbg(CLEANUP, " Done"); + xv_sc_dbg(g, CLEANUP, " Done"); - xv_sc_dbg(CLEANUP, "Re-enabling ASPM settings..."); + xv_sc_dbg(g, CLEANUP, "Re-enabling ASPM settings..."); enable_aspm_gp106(g); - xv_sc_dbg(CLEANUP, " Done"); + xv_sc_dbg(g, CLEANUP, " Done"); return err_status; } diff --git a/drivers/gpu/nvgpu/gp106/xve_gp106.h b/drivers/gpu/nvgpu/gp106/xve_gp106.h index d48b0991..e0be35ac 100644 --- a/drivers/gpu/nvgpu/gp106/xve_gp106.h +++ b/drivers/gpu/nvgpu/gp106/xve_gp106.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -49,11 +49,11 @@ enum xv_speed_change_steps { CLEANUP }; -#define xv_dbg(fmt, args...) \ - gk20a_dbg(gpu_dbg_xv, fmt, ##args) +#define xv_dbg(g, fmt, args...) \ + nvgpu_log(g, gpu_dbg_xv, fmt, ##args) -#define xv_sc_dbg(step, fmt, args...) \ - xv_dbg("[%d] %15s | " fmt, step, __stringify(step), ##args) +#define xv_sc_dbg(g, step, fmt, args...) \ + xv_dbg(g, "[%d] %15s | " fmt, step, __stringify(step), ##args) void xve_xve_writel_gp106(struct gk20a *g, u32 reg, u32 val); u32 xve_xve_readl_gp106(struct gk20a *g, u32 reg); -- cgit v1.2.2