From dd739fcb039d51606e9a5454ec0aab17bcb01965 Mon Sep 17 00:00:00 2001 From: Terje Bergstrom Date: Wed, 18 Apr 2018 19:39:46 -0700 Subject: gpu: nvgpu: Remove gk20a_dbg* functions Switch all logging to nvgpu_log*(). gk20a_dbg* macros are intentionally left there because of use from other repositories. Because the new functions do not work without a pointer to struct gk20a, and piping it just for logging is excessive, some log messages are deleted. Change-Id: I00e22e75fe4596a330bb0282ab4774b3639ee31e Signed-off-by: Terje Bergstrom Reviewed-on: https://git-master.nvidia.com/r/1704148 Reviewed-by: mobile promotions Tested-by: mobile promotions --- drivers/gpu/nvgpu/vgpu/ce2_vgpu.c | 2 +- drivers/gpu/nvgpu/vgpu/dbg_vgpu.c | 6 +- drivers/gpu/nvgpu/vgpu/fifo_vgpu.c | 70 ++++++++++++----------- drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c | 14 ++--- drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c | 2 +- drivers/gpu/nvgpu/vgpu/gr_vgpu.c | 82 ++++++++++++++------------- drivers/gpu/nvgpu/vgpu/gv11b/vgpu_gv11b.c | 2 +- drivers/gpu/nvgpu/vgpu/gv11b/vgpu_tsg_gv11b.c | 3 +- drivers/gpu/nvgpu/vgpu/ltc_vgpu.c | 6 +- drivers/gpu/nvgpu/vgpu/mm_vgpu.c | 23 ++++---- drivers/gpu/nvgpu/vgpu/tsg_vgpu.c | 15 +++-- drivers/gpu/nvgpu/vgpu/vgpu.c | 12 ++-- 12 files changed, 128 insertions(+), 109 deletions(-) (limited to 'drivers/gpu/nvgpu/vgpu') diff --git a/drivers/gpu/nvgpu/vgpu/ce2_vgpu.c b/drivers/gpu/nvgpu/vgpu/ce2_vgpu.c index a552ad44..563c3a2b 100644 --- a/drivers/gpu/nvgpu/vgpu/ce2_vgpu.c +++ b/drivers/gpu/nvgpu/vgpu/ce2_vgpu.c @@ -30,7 +30,7 @@ int vgpu_ce2_nonstall_isr(struct gk20a *g, struct tegra_vgpu_ce2_nonstall_intr_info *info) { - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); switch (info->type) { case TEGRA_VGPU_CE2_NONSTALL_INTR_NONBLOCKPIPE: diff --git a/drivers/gpu/nvgpu/vgpu/dbg_vgpu.c b/drivers/gpu/nvgpu/vgpu/dbg_vgpu.c index 092954ed..2bb3b205 100644 --- a/drivers/gpu/nvgpu/vgpu/dbg_vgpu.c +++ b/drivers/gpu/nvgpu/vgpu/dbg_vgpu.c @@ -42,8 +42,9 @@ int vgpu_exec_regops(struct dbg_session_gk20a *dbg_s, size_t oob_size, ops_size; void *handle = NULL; int err = 0; + struct gk20a *g = dbg_s->g; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); BUG_ON(sizeof(*ops) != sizeof(struct tegra_vgpu_reg_op)); handle = vgpu_ivc_oob_get_ptr(vgpu_ivc_get_server_vmid(), @@ -82,8 +83,9 @@ int vgpu_dbg_set_powergate(struct dbg_session_gk20a *dbg_s, bool disable_powerga struct tegra_vgpu_set_powergate_params *p = &msg.params.set_powergate; int err = 0; u32 mode; + struct gk20a *g = dbg_s->g; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); /* Just return if requested mode is the same as the session's mode */ if (disable_powergate) { diff --git a/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c b/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c index 3ea326b8..eb25cf3a 100644 --- a/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c +++ b/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c @@ -45,8 +45,9 @@ void vgpu_channel_bind(struct channel_gk20a *ch) struct tegra_vgpu_channel_config_params *p = &msg.params.channel_config; int err; + struct gk20a *g = ch->g; - gk20a_dbg_info("bind channel %d", ch->chid); + nvgpu_log_info(g, "bind channel %d", ch->chid); msg.cmd = TEGRA_VGPU_CMD_CHANNEL_BIND; msg.handle = vgpu_get_handle(ch->g); @@ -60,8 +61,9 @@ void vgpu_channel_bind(struct channel_gk20a *ch) void vgpu_channel_unbind(struct channel_gk20a *ch) { + struct gk20a *g = ch->g; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (nvgpu_atomic_cmpxchg(&ch->bound, true, false)) { struct tegra_vgpu_cmd_msg msg; @@ -84,7 +86,7 @@ int vgpu_channel_alloc_inst(struct gk20a *g, struct channel_gk20a *ch) struct tegra_vgpu_channel_hwctx_params *p = &msg.params.channel_hwctx; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ALLOC_HWCTX; msg.handle = vgpu_get_handle(g); @@ -97,7 +99,7 @@ int vgpu_channel_alloc_inst(struct gk20a *g, struct channel_gk20a *ch) } ch->virt_ctx = p->handle; - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } @@ -107,7 +109,7 @@ void vgpu_channel_free_inst(struct gk20a *g, struct channel_gk20a *ch) struct tegra_vgpu_channel_hwctx_params *p = &msg.params.channel_hwctx; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); msg.cmd = TEGRA_VGPU_CMD_CHANNEL_FREE_HWCTX; msg.handle = vgpu_get_handle(g); @@ -122,8 +124,9 @@ void vgpu_channel_enable(struct channel_gk20a *ch) struct tegra_vgpu_channel_config_params *p = &msg.params.channel_config; int err; + struct gk20a *g = ch->g; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ENABLE; msg.handle = vgpu_get_handle(ch->g); @@ -138,8 +141,9 @@ void vgpu_channel_disable(struct channel_gk20a *ch) struct tegra_vgpu_channel_config_params *p = &msg.params.channel_config; int err; + struct gk20a *g = ch->g; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); msg.cmd = TEGRA_VGPU_CMD_CHANNEL_DISABLE; msg.handle = vgpu_get_handle(ch->g); @@ -155,8 +159,9 @@ int vgpu_channel_setup_ramfc(struct channel_gk20a *ch, u64 gpfifo_base, struct tegra_vgpu_cmd_msg msg; struct tegra_vgpu_ramfc_params *p = &msg.params.ramfc; int err; + struct gk20a *g = ch->g; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SETUP_RAMFC; msg.handle = vgpu_get_handle(ch->g); @@ -175,8 +180,9 @@ int vgpu_fifo_init_engine_info(struct fifo_gk20a *f) struct vgpu_priv_data *priv = vgpu_get_priv_data(f->g); struct tegra_vgpu_engines_info *engines = &priv->constants.engines_info; u32 i; + struct gk20a *g = f->g; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (engines->num_engines > TEGRA_VGPU_MAX_ENGINES) { nvgpu_err(f->g, "num_engines %d larger than max %d", @@ -207,7 +213,7 @@ int vgpu_fifo_init_engine_info(struct fifo_gk20a *f) f->active_engines_list[i] = engines->info[i].engine_id; } - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } @@ -219,7 +225,7 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f) u32 i; u64 runlist_size; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); f->max_runlists = g->ops.fifo.eng_runlist_base_size(); f->runlist_info = nvgpu_kzalloc(g, @@ -256,12 +262,12 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f) runlist->cur_buffer = MAX_RUNLIST_BUFFERS; } - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; clean_up_runlist: gk20a_fifo_delete_runlist(f); - gk20a_dbg_fn("fail"); + nvgpu_log_fn(g, "fail"); return -ENOMEM; } @@ -272,10 +278,10 @@ static int vgpu_init_fifo_setup_sw(struct gk20a *g) unsigned int chid; int err = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (f->sw_ready) { - gk20a_dbg_fn("skip init"); + nvgpu_log_fn(g, "skip init"); return 0; } @@ -306,7 +312,7 @@ static int vgpu_init_fifo_setup_sw(struct gk20a *g) f->userd.gpu_va = 0; } - gk20a_dbg(gpu_dbg_map_v, "userd bar1 va = 0x%llx", f->userd.gpu_va); + nvgpu_log(g, gpu_dbg_map_v, "userd bar1 va = 0x%llx", f->userd.gpu_va); f->channel = nvgpu_vzalloc(g, f->num_channels * sizeof(*f->channel)); f->tsg = nvgpu_vzalloc(g, f->num_channels * sizeof(*f->tsg)); @@ -350,11 +356,11 @@ static int vgpu_init_fifo_setup_sw(struct gk20a *g) f->sw_ready = true; - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; clean_up: - gk20a_dbg_fn("fail"); + nvgpu_log_fn(g, "fail"); /* FIXME: unmap from bar1 */ nvgpu_dma_free(g, &f->userd); @@ -374,7 +380,7 @@ clean_up: int vgpu_init_fifo_setup_hw(struct gk20a *g) { - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); /* test write, read through bar1 @ userd region before * turning on the snooping */ @@ -385,7 +391,7 @@ int vgpu_init_fifo_setup_hw(struct gk20a *g) u32 bar1_vaddr = f->userd.gpu_va; volatile u32 *cpu_vaddr = f->userd.cpu_va; - gk20a_dbg_info("test bar1 @ vaddr 0x%x", + nvgpu_log_info(g, "test bar1 @ vaddr 0x%x", bar1_vaddr); v = gk20a_bar1_readl(g, bar1_vaddr); @@ -415,7 +421,7 @@ int vgpu_init_fifo_setup_hw(struct gk20a *g) gk20a_bar1_writel(g, bar1_vaddr, v); } - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } @@ -424,7 +430,7 @@ int vgpu_init_fifo_support(struct gk20a *g) { u32 err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); err = vgpu_init_fifo_setup_sw(g); if (err) @@ -444,7 +450,7 @@ int vgpu_fifo_preempt_channel(struct gk20a *g, u32 chid) &msg.params.channel_config; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (!nvgpu_atomic_read(&ch->bound)) return 0; @@ -470,7 +476,7 @@ int vgpu_fifo_preempt_tsg(struct gk20a *g, u32 tsgid) &msg.params.tsg_preempt; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); msg.cmd = TEGRA_VGPU_CMD_TSG_PREEMPT; msg.handle = vgpu_get_handle(g); @@ -533,7 +539,7 @@ static int vgpu_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id, u16 *runlist_entry = NULL; u32 count = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); runlist = &f->runlist_info[runlist_id]; @@ -558,7 +564,7 @@ static int vgpu_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id, runlist_entry = runlist->mem[0].cpu_va; for_each_set_bit(cid, runlist->active_channels, f->num_channels) { - gk20a_dbg_info("add channel %d to runlist", cid); + nvgpu_log_info(g, "add channel %d to runlist", cid); runlist_entry[0] = cid; runlist_entry++; count++; @@ -581,7 +587,7 @@ int vgpu_fifo_update_runlist(struct gk20a *g, u32 runlist_id, struct fifo_gk20a *f = &g->fifo; u32 ret = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); runlist = &f->runlist_info[runlist_id]; @@ -596,7 +602,7 @@ int vgpu_fifo_update_runlist(struct gk20a *g, u32 runlist_id, int vgpu_fifo_wait_engine_idle(struct gk20a *g) { - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); return 0; } @@ -611,7 +617,7 @@ int vgpu_fifo_set_runlist_interleave(struct gk20a *g, &msg.params.tsg_interleave; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); msg.cmd = TEGRA_VGPU_CMD_TSG_SET_RUNLIST_INTERLEAVE; msg.handle = vgpu_get_handle(g); @@ -633,7 +639,7 @@ int vgpu_fifo_force_reset_ch(struct channel_gk20a *ch, &msg.params.channel_config; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (gk20a_is_channel_marked_as_tsg(ch)) { tsg = &g->fifo.tsg[ch->tsgid]; @@ -716,7 +722,7 @@ int vgpu_fifo_isr(struct gk20a *g, struct tegra_vgpu_fifo_intr_info *info) struct fifo_gk20a *f = &g->fifo; struct channel_gk20a *ch = gk20a_channel_get(&f->channel[info->chid]); - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (!ch) return 0; @@ -750,7 +756,7 @@ int vgpu_fifo_isr(struct gk20a *g, struct tegra_vgpu_fifo_intr_info *info) int vgpu_fifo_nonstall_isr(struct gk20a *g, struct tegra_vgpu_fifo_nonstall_intr_info *info) { - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); switch (info->type) { case TEGRA_VGPU_FIFO_NONSTALL_INTR_CHANNEL: diff --git a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c index ab35dc67..86184336 100644 --- a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c +++ b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c @@ -43,7 +43,7 @@ int vgpu_gr_gp10b_alloc_gr_ctx(struct gk20a *g, struct vgpu_priv_data *priv = vgpu_get_priv_data(g); int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); err = vgpu_gr_alloc_gr_ctx(g, gr_ctx, vm, class, flags); if (err) @@ -78,7 +78,7 @@ int vgpu_gr_gp10b_alloc_gr_ctx(struct gk20a *g, } } - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return err; fail: @@ -132,11 +132,11 @@ int vgpu_gr_gp10b_set_ctxsw_preemption_mode(struct gk20a *g, attrib_cb_size = ALIGN(attrib_cb_size, 128); - gk20a_dbg_info("gfxp context preempt size=%d", + nvgpu_log_info(g, "gfxp context preempt size=%d", g->gr.ctx_vars.preempt_image_size); - gk20a_dbg_info("gfxp context spill size=%d", spill_size); - gk20a_dbg_info("gfxp context pagepool size=%d", pagepool_size); - gk20a_dbg_info("gfxp context attrib cb size=%d", + nvgpu_log_info(g, "gfxp context spill size=%d", spill_size); + nvgpu_log_info(g, "gfxp context pagepool size=%d", pagepool_size); + nvgpu_log_info(g, "gfxp context attrib cb size=%d", attrib_cb_size); err = gr_gp10b_alloc_buffer(vm, @@ -293,7 +293,7 @@ int vgpu_gr_gp10b_init_ctx_state(struct gk20a *g) struct vgpu_priv_data *priv = vgpu_get_priv_data(g); int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); err = vgpu_gr_init_ctx_state(g); if (err) diff --git a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c index e615c486..b8c4d2de 100644 --- a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c +++ b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c @@ -78,7 +78,7 @@ u64 vgpu_gp10b_locked_gmmu_map(struct vm_gk20a *vm, u8 prot; struct nvgpu_sgl *sgl; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); /* FIXME: add support for sparse mappings */ diff --git a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c index 2f1280ac..1e633d5f 100644 --- a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c +++ b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c @@ -43,7 +43,7 @@ void vgpu_gr_detect_sm_arch(struct gk20a *g) { struct vgpu_priv_data *priv = vgpu_get_priv_data(g); - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); g->params.sm_arch_sm_version = priv->constants.sm_arch_sm_version; @@ -58,8 +58,9 @@ int vgpu_gr_commit_inst(struct channel_gk20a *c, u64 gpu_va) struct tegra_vgpu_cmd_msg msg; struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx; int err; + struct gk20a *g = c->g; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); msg.cmd = TEGRA_VGPU_CMD_CHANNEL_COMMIT_GR_CTX; msg.handle = vgpu_get_handle(c->g); @@ -76,7 +77,7 @@ static int vgpu_gr_commit_global_ctx_buffers(struct gk20a *g, struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); msg.cmd = TEGRA_VGPU_CMD_CHANNEL_COMMIT_GR_GLOBAL_CTX; msg.handle = vgpu_get_handle(g); @@ -94,7 +95,7 @@ static int vgpu_gr_load_golden_ctx_image(struct gk20a *g, struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); msg.cmd = TEGRA_VGPU_CMD_CHANNEL_LOAD_GR_GOLDEN_CTX; msg.handle = vgpu_get_handle(g); @@ -109,7 +110,7 @@ int vgpu_gr_init_ctx_state(struct gk20a *g) struct gr_gk20a *gr = &g->gr; struct vgpu_priv_data *priv = vgpu_get_priv_data(g); - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); g->gr.ctx_vars.golden_image_size = priv->constants.golden_ctx_size; g->gr.ctx_vars.zcull_ctxsw_image_size = priv->constants.zcull_ctx_size; @@ -135,20 +136,20 @@ static int vgpu_gr_alloc_global_ctx_buffers(struct gk20a *g) u32 pagepool_buffer_size = g->ops.gr.pagepool_default_size(g) * gr_scc_pagepool_total_pages_byte_granularity_v(); - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); attr_buffer_size = g->ops.gr.calc_global_ctx_buffer_size(g); - gk20a_dbg_info("cb_buffer_size : %d", cb_buffer_size); + nvgpu_log_info(g, "cb_buffer_size : %d", cb_buffer_size); gr->global_ctx_buffer[CIRCULAR].mem.size = cb_buffer_size; - gk20a_dbg_info("pagepool_buffer_size : %d", pagepool_buffer_size); + nvgpu_log_info(g, "pagepool_buffer_size : %d", pagepool_buffer_size); gr->global_ctx_buffer[PAGEPOOL].mem.size = pagepool_buffer_size; - gk20a_dbg_info("attr_buffer_size : %d", attr_buffer_size); + nvgpu_log_info(g, "attr_buffer_size : %d", attr_buffer_size); gr->global_ctx_buffer[ATTRIBUTE].mem.size = attr_buffer_size; - gk20a_dbg_info("priv access map size : %d", + nvgpu_log_info(g, "priv access map size : %d", gr->ctx_vars.priv_access_map_size); gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.size = gr->ctx_vars.priv_access_map_size; @@ -170,7 +171,7 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g, u32 i; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); tsg = tsg_gk20a_from_ch(c); if (!tsg) @@ -249,8 +250,9 @@ static void vgpu_gr_unmap_global_ctx_buffers(struct tsg_gk20a *tsg) u64 *g_bfr_va = tsg->gr_ctx.global_ctx_buffer_va; u64 *g_bfr_size = tsg->gr_ctx.global_ctx_buffer_size; u32 i; + struct gk20a *g = tsg->g; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (tsg->gr_ctx.global_ctx_buffer_mapped) { /* server will unmap on channel close */ @@ -279,7 +281,7 @@ int vgpu_gr_alloc_gr_ctx(struct gk20a *g, struct gr_gk20a *gr = &g->gr; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (gr->ctx_vars.buffer_size == 0) return 0; @@ -328,7 +330,7 @@ static int vgpu_gr_alloc_channel_patch_ctx(struct gk20a *g, struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); tsg = tsg_gk20a_from_ch(c); if (!tsg) @@ -359,8 +361,9 @@ static int vgpu_gr_alloc_channel_patch_ctx(struct gk20a *g, static void vgpu_gr_free_channel_patch_ctx(struct tsg_gk20a *tsg) { struct patch_desc *patch_ctx = &tsg->gr_ctx.patch_ctx; + struct gk20a *g = tsg->g; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (patch_ctx->mem.gpu_va) { /* server will free on channel close */ @@ -375,8 +378,9 @@ static void vgpu_gr_free_channel_pm_ctx(struct tsg_gk20a *tsg) { struct nvgpu_gr_ctx *ch_ctx = &tsg->gr_ctx; struct pm_ctx_desc *pm_ctx = &ch_ctx->pm_ctx; + struct gk20a *g = tsg->g; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); /* check if hwpm was ever initialized. If not, nothing to do */ if (pm_ctx->mem.gpu_va == 0) @@ -394,7 +398,7 @@ void vgpu_gr_free_gr_ctx(struct gk20a *g, { struct tsg_gk20a *tsg; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (gr_ctx->mem.gpu_va) { struct tegra_vgpu_cmd_msg msg; @@ -477,7 +481,7 @@ int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags) struct tsg_gk20a *tsg = NULL; int err = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); /* an address space needs to have been bound at this point.*/ if (!gk20a_channel_as_bound(c)) { @@ -577,7 +581,7 @@ int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags) /* PM ctxt switch is off by default */ gr_ctx->pm_ctx.pm_mode = ctxsw_prog_main_image_pm_mode_no_ctxsw_f(); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; out: /* 1. gr_ctx, patch_ctx and global ctx buffer mapping @@ -595,7 +599,7 @@ static int vgpu_gr_init_gr_config(struct gk20a *g, struct gr_gk20a *gr) u32 sm_per_tpc; int err = -ENOMEM; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); gr->max_gpc_count = priv->constants.max_gpc_count; gr->gpc_count = priv->constants.gpc_count; @@ -658,7 +662,7 @@ int vgpu_gr_bind_ctxsw_zcull(struct gk20a *g, struct gr_gk20a *gr, struct tegra_vgpu_zcull_bind_params *p = &msg.params.zcull_bind; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); msg.cmd = TEGRA_VGPU_CMD_CHANNEL_BIND_ZCULL; msg.handle = vgpu_get_handle(g); @@ -677,7 +681,7 @@ int vgpu_gr_get_zcull_info(struct gk20a *g, struct gr_gk20a *gr, struct tegra_vgpu_zcull_info_params *p = &msg.params.zcull_info; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); msg.cmd = TEGRA_VGPU_CMD_GET_ZCULL_INFO; msg.handle = vgpu_get_handle(g); @@ -712,7 +716,7 @@ u32 vgpu_gr_get_max_fbps_count(struct gk20a *g) { struct vgpu_priv_data *priv = vgpu_get_priv_data(g); - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); return priv->constants.num_fbps; } @@ -721,7 +725,7 @@ u32 vgpu_gr_get_fbp_en_mask(struct gk20a *g) { struct vgpu_priv_data *priv = vgpu_get_priv_data(g); - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); return priv->constants.fbp_en_mask; } @@ -730,7 +734,7 @@ u32 vgpu_gr_get_max_ltc_per_fbp(struct gk20a *g) { struct vgpu_priv_data *priv = vgpu_get_priv_data(g); - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); return priv->constants.ltc_per_fbp; } @@ -739,7 +743,7 @@ u32 vgpu_gr_get_max_lts_per_ltc(struct gk20a *g) { struct vgpu_priv_data *priv = vgpu_get_priv_data(g); - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); return priv->constants.max_lts_per_ltc; } @@ -749,7 +753,7 @@ u32 *vgpu_gr_rop_l2_en_mask(struct gk20a *g) struct vgpu_priv_data *priv = vgpu_get_priv_data(g); u32 i, max_fbps_count = priv->constants.num_fbps; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (g->gr.fbp_rop_l2_en_mask == NULL) { g->gr.fbp_rop_l2_en_mask = @@ -772,7 +776,7 @@ int vgpu_gr_add_zbc(struct gk20a *g, struct gr_gk20a *gr, struct tegra_vgpu_zbc_set_table_params *p = &msg.params.zbc_set_table; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); msg.cmd = TEGRA_VGPU_CMD_ZBC_SET_TABLE; msg.handle = vgpu_get_handle(g); @@ -804,7 +808,7 @@ int vgpu_gr_query_zbc(struct gk20a *g, struct gr_gk20a *gr, &msg.params.zbc_query_table; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); msg.cmd = TEGRA_VGPU_CMD_ZBC_QUERY_TABLE; msg.handle = vgpu_get_handle(g); @@ -840,7 +844,7 @@ int vgpu_gr_query_zbc(struct gk20a *g, struct gr_gk20a *gr, static void vgpu_remove_gr_support(struct gr_gk20a *gr) { - gk20a_dbg_fn(""); + nvgpu_log_fn(gr->g, " "); gk20a_comptag_allocator_destroy(gr->g, &gr->comp_tags); @@ -865,10 +869,10 @@ static int vgpu_gr_init_gr_setup_sw(struct gk20a *g) struct gr_gk20a *gr = &g->gr; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (gr->sw_ready) { - gk20a_dbg_fn("skip init"); + nvgpu_log_fn(g, "skip init"); return 0; } @@ -907,7 +911,7 @@ static int vgpu_gr_init_gr_setup_sw(struct gk20a *g) gr->remove_support = vgpu_remove_gr_support; gr->sw_ready = true; - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; clean_up: @@ -918,7 +922,7 @@ clean_up: int vgpu_init_gr_support(struct gk20a *g) { - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); return vgpu_gr_init_gr_setup_sw(g); } @@ -928,7 +932,7 @@ int vgpu_gr_isr(struct gk20a *g, struct tegra_vgpu_gr_intr_info *info) struct fifo_gk20a *f = &g->fifo; struct channel_gk20a *ch = gk20a_channel_get(&f->channel[info->chid]); - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (!ch) return 0; @@ -985,7 +989,7 @@ int vgpu_gr_isr(struct gk20a *g, struct tegra_vgpu_gr_intr_info *info) int vgpu_gr_nonstall_isr(struct gk20a *g, struct tegra_vgpu_gr_nonstall_intr_info *info) { - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); switch (info->type) { case TEGRA_VGPU_GR_NONSTALL_INTR_SEMAPHORE: @@ -1006,7 +1010,7 @@ int vgpu_gr_set_sm_debug_mode(struct gk20a *g, struct tegra_vgpu_sm_debug_mode *p = &msg.params.sm_debug_mode; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); msg.cmd = TEGRA_VGPU_CMD_SET_SM_DEBUG_MODE; msg.handle = vgpu_get_handle(g); @@ -1026,7 +1030,7 @@ int vgpu_gr_update_smpc_ctxsw_mode(struct gk20a *g, struct tegra_vgpu_channel_set_ctxsw_mode *p = &msg.params.set_ctxsw_mode; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SET_SMPC_CTXSW_MODE; msg.handle = vgpu_get_handle(g); @@ -1053,7 +1057,7 @@ int vgpu_gr_update_hwpm_ctxsw_mode(struct gk20a *g, struct tegra_vgpu_channel_set_ctxsw_mode *p = &msg.params.set_ctxsw_mode; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); tsg = tsg_gk20a_from_ch(ch); if (!tsg) diff --git a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_gv11b.c b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_gv11b.c index 933e8357..1bcd151a 100644 --- a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_gv11b.c +++ b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_gv11b.c @@ -30,7 +30,7 @@ int vgpu_gv11b_init_gpu_characteristics(struct gk20a *g) { int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); err = vgpu_init_gpu_characteristics(g); if (err) { diff --git a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_tsg_gv11b.c b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_tsg_gv11b.c index b249b5af..367c1299 100644 --- a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_tsg_gv11b.c +++ b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_tsg_gv11b.c @@ -33,8 +33,9 @@ int vgpu_gv11b_tsg_bind_channel(struct tsg_gk20a *tsg, struct tegra_vgpu_tsg_bind_channel_ex_params *p = &msg.params.tsg_bind_channel_ex; int err; + struct gk20a *g = tsg->g; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); err = gk20a_tsg_bind_channel(tsg, ch); if (err) diff --git a/drivers/gpu/nvgpu/vgpu/ltc_vgpu.c b/drivers/gpu/nvgpu/vgpu/ltc_vgpu.c index d451a1f2..f68c8454 100644 --- a/drivers/gpu/nvgpu/vgpu/ltc_vgpu.c +++ b/drivers/gpu/nvgpu/vgpu/ltc_vgpu.c @@ -31,7 +31,7 @@ int vgpu_determine_L2_size_bytes(struct gk20a *g) { struct vgpu_priv_data *priv = vgpu_get_priv_data(g); - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); return priv->constants.l2_size; } @@ -42,7 +42,7 @@ int vgpu_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr) u32 max_comptag_lines = 0; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); gr->cacheline_size = priv->constants.cacheline_size; gr->comptags_per_cacheline = priv->constants.comptags_per_cacheline; @@ -65,7 +65,7 @@ void vgpu_ltc_init_fs_state(struct gk20a *g) { struct vgpu_priv_data *priv = vgpu_get_priv_data(g); - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); g->ltc_count = priv->constants.ltc_count; } diff --git a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c index 3e75cee3..b8eaa1db 100644 --- a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c +++ b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c @@ -40,10 +40,10 @@ static int vgpu_init_mm_setup_sw(struct gk20a *g) { struct mm_gk20a *mm = &g->mm; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (mm->sw_ready) { - gk20a_dbg_fn("skip init"); + nvgpu_log_fn(g, "skip init"); return 0; } @@ -56,7 +56,7 @@ static int vgpu_init_mm_setup_sw(struct gk20a *g) mm->channel.user_size = NV_MM_DEFAULT_USER_SIZE; mm->channel.kernel_size = NV_MM_DEFAULT_KERNEL_SIZE; - gk20a_dbg_info("channel vm size: user %dMB kernel %dMB", + nvgpu_log_info(g, "channel vm size: user %dMB kernel %dMB", (int)(mm->channel.user_size >> 20), (int)(mm->channel.kernel_size >> 20)); @@ -69,7 +69,7 @@ int vgpu_init_mm_support(struct gk20a *g) { int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); err = vgpu_init_mm_setup_sw(g); if (err) @@ -95,7 +95,7 @@ void vgpu_locked_gmmu_unmap(struct vm_gk20a *vm, struct tegra_vgpu_as_map_params *p = &msg.params.as_map; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); msg.cmd = TEGRA_VGPU_CMD_AS_UNMAP; msg.handle = vgpu_get_handle(g); @@ -183,8 +183,9 @@ int vgpu_vm_bind_channel(struct vm_gk20a *vm, struct tegra_vgpu_cmd_msg msg; struct tegra_vgpu_as_bind_share_params *p = &msg.params.as_bind_share; int err; + struct gk20a *g = ch->g; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); ch->vm = vm; msg.cmd = TEGRA_VGPU_CMD_AS_BIND_SHARE; @@ -220,7 +221,7 @@ static void vgpu_cache_maint(u64 handle, u8 op) int vgpu_mm_fb_flush(struct gk20a *g) { - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); vgpu_cache_maint(vgpu_get_handle(g), TEGRA_VGPU_FB_FLUSH); return 0; @@ -229,7 +230,7 @@ int vgpu_mm_fb_flush(struct gk20a *g) void vgpu_mm_l2_invalidate(struct gk20a *g) { - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); vgpu_cache_maint(vgpu_get_handle(g), TEGRA_VGPU_L2_MAINT_INV); } @@ -238,7 +239,7 @@ void vgpu_mm_l2_flush(struct gk20a *g, bool invalidate) { u8 op; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (invalidate) op = TEGRA_VGPU_L2_MAINT_FLUSH_INV; @@ -250,7 +251,7 @@ void vgpu_mm_l2_flush(struct gk20a *g, bool invalidate) void vgpu_mm_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb) { - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); nvgpu_err(g, "call to RM server not supported"); } @@ -261,7 +262,7 @@ void vgpu_mm_mmu_set_debug_mode(struct gk20a *g, bool enable) struct tegra_vgpu_mmu_debug_mode *p = &msg.params.mmu_debug_mode; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); msg.cmd = TEGRA_VGPU_CMD_SET_MMU_DEBUG_MODE; msg.handle = vgpu_get_handle(g); diff --git a/drivers/gpu/nvgpu/vgpu/tsg_vgpu.c b/drivers/gpu/nvgpu/vgpu/tsg_vgpu.c index a6e493d0..7bb8f671 100644 --- a/drivers/gpu/nvgpu/vgpu/tsg_vgpu.c +++ b/drivers/gpu/nvgpu/vgpu/tsg_vgpu.c @@ -35,8 +35,9 @@ int vgpu_tsg_open(struct tsg_gk20a *tsg) struct tegra_vgpu_tsg_open_rel_params *p = &msg.params.tsg_open; int err; + struct gk20a *g = tsg->g; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); msg.cmd = TEGRA_VGPU_CMD_TSG_OPEN; msg.handle = vgpu_get_handle(tsg->g); @@ -57,8 +58,9 @@ void vgpu_tsg_release(struct tsg_gk20a *tsg) struct tegra_vgpu_tsg_open_rel_params *p = &msg.params.tsg_release; int err; + struct gk20a *g = tsg->g; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); msg.cmd = TEGRA_VGPU_CMD_TSG_RELEASE; msg.handle = vgpu_get_handle(tsg->g); @@ -91,8 +93,9 @@ int vgpu_tsg_bind_channel(struct tsg_gk20a *tsg, struct tegra_vgpu_tsg_bind_unbind_channel_params *p = &msg.params.tsg_bind_unbind_channel; int err; + struct gk20a *g = ch->g; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); err = gk20a_tsg_bind_channel(tsg, ch); if (err) @@ -120,8 +123,9 @@ int vgpu_tsg_unbind_channel(struct channel_gk20a *ch) struct tegra_vgpu_tsg_bind_unbind_channel_params *p = &msg.params.tsg_bind_unbind_channel; int err; + struct gk20a *g = ch->g; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); err = gk20a_fifo_tsg_unbind_channel(ch); if (err) @@ -143,8 +147,9 @@ int vgpu_tsg_set_timeslice(struct tsg_gk20a *tsg, u32 timeslice) struct tegra_vgpu_tsg_timeslice_params *p = &msg.params.tsg_timeslice; int err; + struct gk20a *g = tsg->g; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); msg.cmd = TEGRA_VGPU_CMD_TSG_SET_TIMESLICE; msg.handle = vgpu_get_handle(tsg->g); diff --git a/drivers/gpu/nvgpu/vgpu/vgpu.c b/drivers/gpu/nvgpu/vgpu/vgpu.c index 1e77cda9..17e80cd7 100644 --- a/drivers/gpu/nvgpu/vgpu/vgpu.c +++ b/drivers/gpu/nvgpu/vgpu/vgpu.c @@ -249,7 +249,7 @@ void vgpu_detect_chip(struct gk20a *g) p->gpu_impl = priv->constants.impl; p->gpu_rev = priv->constants.rev; - gk20a_dbg_info("arch: %x, impl: %x, rev: %x\n", + nvgpu_log_info(g, "arch: %x, impl: %x, rev: %x\n", p->gpu_arch, p->gpu_impl, p->gpu_rev); @@ -259,7 +259,7 @@ int vgpu_init_gpu_characteristics(struct gk20a *g) { int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); err = gk20a_init_gpu_characteristics(g); if (err) @@ -279,7 +279,7 @@ int vgpu_read_ptimer(struct gk20a *g, u64 *value) struct tegra_vgpu_read_ptimer_params *p = &msg.params.read_ptimer; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); msg.cmd = TEGRA_VGPU_CMD_READ_PTIMER; msg.handle = vgpu_get_handle(g); @@ -304,7 +304,7 @@ int vgpu_get_timestamps_zipper(struct gk20a *g, int err; u32 i; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (count > TEGRA_VGPU_GET_TIMESTAMPS_ZIPPER_MAX_COUNT) { nvgpu_err(g, "count %u overflow", count); @@ -338,7 +338,7 @@ int vgpu_init_hal(struct gk20a *g) switch (ver) { case NVGPU_GPUID_GP10B: - gk20a_dbg_info("gp10b detected"); + nvgpu_log_info(g, "gp10b detected"); err = vgpu_gp10b_init_hal(g); break; case NVGPU_GPUID_GV11B: @@ -360,7 +360,7 @@ int vgpu_get_constants(struct gk20a *g) struct vgpu_priv_data *priv = vgpu_get_priv_data(g); int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); msg.cmd = TEGRA_VGPU_CMD_GET_CONSTANTS; msg.handle = vgpu_get_handle(g); -- cgit v1.2.2