From dd739fcb039d51606e9a5454ec0aab17bcb01965 Mon Sep 17 00:00:00 2001 From: Terje Bergstrom Date: Wed, 18 Apr 2018 19:39:46 -0700 Subject: gpu: nvgpu: Remove gk20a_dbg* functions Switch all logging to nvgpu_log*(). gk20a_dbg* macros are intentionally left there because of use from other repositories. Because the new functions do not work without a pointer to struct gk20a, and piping it just for logging is excessive, some log messages are deleted. Change-Id: I00e22e75fe4596a330bb0282ab4774b3639ee31e Signed-off-by: Terje Bergstrom Reviewed-on: https://git-master.nvidia.com/r/1704148 Reviewed-by: mobile promotions Tested-by: mobile promotions --- drivers/gpu/nvgpu/gv11b/acr_gv11b.c | 38 ++++----- drivers/gpu/nvgpu/gv11b/css_gr_gv11b.c | 6 +- drivers/gpu/nvgpu/gv11b/dbg_gpu_gv11b.c | 4 +- drivers/gpu/nvgpu/gv11b/fb_gv11b.c | 2 +- drivers/gpu/nvgpu/gv11b/fifo_gv11b.c | 61 +++++++-------- drivers/gpu/nvgpu/gv11b/gr_gv11b.c | 132 ++++++++++++++++---------------- drivers/gpu/nvgpu/gv11b/ltc_gv11b.c | 4 +- drivers/gpu/nvgpu/gv11b/mm_gv11b.c | 2 +- drivers/gpu/nvgpu/gv11b/pmu_gv11b.c | 22 +++--- drivers/gpu/nvgpu/gv11b/therm_gv11b.c | 4 +- 10 files changed, 139 insertions(+), 136 deletions(-) (limited to 'drivers/gpu/nvgpu/gv11b') diff --git a/drivers/gpu/nvgpu/gv11b/acr_gv11b.c b/drivers/gpu/nvgpu/gv11b/acr_gv11b.c index 7ca8c703..673cb7f2 100644 --- a/drivers/gpu/nvgpu/gv11b/acr_gv11b.c +++ b/drivers/gpu/nvgpu/gv11b/acr_gv11b.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -43,8 +43,8 @@ #include /*Defines*/ -#define gv11b_dbg_pmu(fmt, arg...) \ - gk20a_dbg(gpu_dbg_pmu, fmt, ##arg) +#define gv11b_dbg_pmu(g, fmt, arg...) \ + nvgpu_log(g, gpu_dbg_pmu, fmt, ##arg) static void flcn64_set_dma(struct falc_u64 *dma_addr, u64 value) { @@ -60,7 +60,7 @@ int gv11b_alloc_blob_space(struct gk20a *g, { int err; - gv11b_dbg_pmu("alloc blob space: NVGPU_DMA_FORCE_CONTIGUOUS"); + gv11b_dbg_pmu(g, "alloc blob space: NVGPU_DMA_FORCE_CONTIGUOUS"); err = nvgpu_dma_alloc_flags_sys(g, NVGPU_DMA_FORCE_CONTIGUOUS, size, mem); @@ -87,10 +87,10 @@ int gv11b_bootstrap_hs_flcn(struct gk20a *g) start = nvgpu_mem_get_addr(g, &acr->ucode_blob); size = acr->ucode_blob.size; - gv11b_dbg_pmu("acr ucode blob start %llx\n", start); - gv11b_dbg_pmu("acr ucode blob size %x\n", size); + gv11b_dbg_pmu(g, "acr ucode blob start %llx\n", start); + gv11b_dbg_pmu(g, "acr ucode blob size %x\n", size); - gv11b_dbg_pmu(""); + gv11b_dbg_pmu(g, " "); if (!acr_fw) { /*First time init case*/ @@ -110,17 +110,17 @@ int gv11b_bootstrap_hs_flcn(struct gk20a *g) acr->fw_hdr->hdr_offset); img_size_in_bytes = ALIGN((acr->hsbin_hdr->data_size), 256); - gv11b_dbg_pmu("sig dbg offset %u\n", + gv11b_dbg_pmu(g, "sig dbg offset %u\n", acr->fw_hdr->sig_dbg_offset); - gv11b_dbg_pmu("sig dbg size %u\n", acr->fw_hdr->sig_dbg_size); - gv11b_dbg_pmu("sig prod offset %u\n", + gv11b_dbg_pmu(g, "sig dbg size %u\n", acr->fw_hdr->sig_dbg_size); + gv11b_dbg_pmu(g, "sig prod offset %u\n", acr->fw_hdr->sig_prod_offset); - gv11b_dbg_pmu("sig prod size %u\n", + gv11b_dbg_pmu(g, "sig prod size %u\n", acr->fw_hdr->sig_prod_size); - gv11b_dbg_pmu("patch loc %u\n", acr->fw_hdr->patch_loc); - gv11b_dbg_pmu("patch sig %u\n", acr->fw_hdr->patch_sig); - gv11b_dbg_pmu("header offset %u\n", acr->fw_hdr->hdr_offset); - gv11b_dbg_pmu("header size %u\n", acr->fw_hdr->hdr_size); + gv11b_dbg_pmu(g, "patch loc %u\n", acr->fw_hdr->patch_loc); + gv11b_dbg_pmu(g, "patch sig %u\n", acr->fw_hdr->patch_sig); + gv11b_dbg_pmu(g, "header offset %u\n", acr->fw_hdr->hdr_offset); + gv11b_dbg_pmu(g, "header size %u\n", acr->fw_hdr->hdr_size); /* Lets patch the signatures first.. */ if (acr_ucode_patch_sig(g, acr_ucode_data_t210_load, @@ -144,7 +144,7 @@ int gv11b_bootstrap_hs_flcn(struct gk20a *g) } for (index = 0; index < 9; index++) - gv11b_dbg_pmu("acr_ucode_header_t210_load %u\n", + gv11b_dbg_pmu(g, "acr_ucode_header_t210_load %u\n", acr_ucode_header_t210_load[index]); acr_dmem = (u64 *) @@ -212,7 +212,7 @@ static int bl_bootstrap(struct nvgpu_pmu *pmu, struct hsflcn_bl_desc *pmu_bl_gm10x_desc = g->acr.pmu_hsbl_desc; u32 dst; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); gk20a_writel(g, pwr_falcon_itfen_r(), gk20a_readl(g, pwr_falcon_itfen_r()) | @@ -237,7 +237,7 @@ static int bl_bootstrap(struct nvgpu_pmu *pmu, (u8 *)(acr->hsbl_ucode.cpu_va), bl_sz, 0, 0, pmu_bl_gm10x_desc->bl_start_tag); - gv11b_dbg_pmu("Before starting falcon with BL\n"); + gv11b_dbg_pmu(g, "Before starting falcon with BL\n"); virt_addr = pmu_bl_gm10x_desc->bl_start_tag << 8; @@ -281,7 +281,7 @@ int gv11b_init_pmu_setup_hw1(struct gk20a *g, struct nvgpu_pmu *pmu = &g->pmu; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); nvgpu_mutex_acquire(&pmu->isr_mutex); nvgpu_flcn_reset(pmu->flcn); diff --git a/drivers/gpu/nvgpu/gv11b/css_gr_gv11b.c b/drivers/gpu/nvgpu/gv11b/css_gr_gv11b.c index bb7c37bd..b4e2cb79 100644 --- a/drivers/gpu/nvgpu/gv11b/css_gr_gv11b.c +++ b/drivers/gpu/nvgpu/gv11b/css_gr_gv11b.c @@ -1,7 +1,7 @@ /* * GV11B Cycle stats snapshots support * - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -148,7 +148,7 @@ int gv11b_css_hw_enable_snapshot(struct channel_gk20a *ch, perf_pmasys_mem_block_target_lfb_f())); - gk20a_dbg_info("cyclestats: buffer for hardware snapshots enabled\n"); + nvgpu_log_info(g, "cyclestats: buffer for hardware snapshots enabled\n"); return 0; @@ -186,7 +186,7 @@ void gv11b_css_hw_disable_snapshot(struct gr_gk20a *gr) memset(&data->hw_memdesc, 0, sizeof(data->hw_memdesc)); data->hw_snapshot = NULL; - gk20a_dbg_info("cyclestats: buffer for hardware snapshots disabled\n"); + nvgpu_log_info(g, "cyclestats: buffer for hardware snapshots disabled\n"); } int gv11b_css_hw_check_data_available(struct channel_gk20a *ch, u32 *pending, diff --git a/drivers/gpu/nvgpu/gv11b/dbg_gpu_gv11b.c b/drivers/gpu/nvgpu/gv11b/dbg_gpu_gv11b.c index db09016c..5dea7654 100644 --- a/drivers/gpu/nvgpu/gv11b/dbg_gpu_gv11b.c +++ b/drivers/gpu/nvgpu/gv11b/dbg_gpu_gv11b.c @@ -57,7 +57,7 @@ int gv11b_perfbuf_enable_locked(struct gk20a *g, u64 offset, u32 size) u32 inst_pa_page; int err; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " "); err = gk20a_busy(g); if (err) { nvgpu_err(g, "failed to poweron"); @@ -100,7 +100,7 @@ int gv11b_perfbuf_disable_locked(struct gk20a *g) { int err; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " "); err = gk20a_busy(g); if (err) { nvgpu_err(g, "failed to poweron"); diff --git a/drivers/gpu/nvgpu/gv11b/fb_gv11b.c b/drivers/gpu/nvgpu/gv11b/fb_gv11b.c index 30a2bca2..8bbde5c3 100644 --- a/drivers/gpu/nvgpu/gv11b/fb_gv11b.c +++ b/drivers/gpu/nvgpu/gv11b/fb_gv11b.c @@ -1427,7 +1427,7 @@ static int gv11b_fb_mmu_invalidate_replay(struct gk20a *g, u32 reg_val; struct nvgpu_timeout timeout; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); nvgpu_mutex_acquire(&g->mm.tlb_lock); diff --git a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c index 11b393e5..932e7626 100644 --- a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c +++ b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c @@ -60,7 +60,7 @@ void gv11b_get_tsg_runlist_entry(struct tsg_gk20a *tsg, u32 *runlist) { - + struct gk20a *g = tsg->g; u32 runlist_entry_0 = ram_rl_entry_type_tsg_v(); if (tsg->timeslice_timeout) @@ -79,7 +79,7 @@ void gv11b_get_tsg_runlist_entry(struct tsg_gk20a *tsg, u32 *runlist) runlist[2] = ram_rl_entry_tsg_tsgid_f(tsg->tsgid); runlist[3] = 0; - gk20a_dbg_info("gv11b tsg runlist [0] %x [1] %x [2] %x [3] %x\n", + nvgpu_log_info(g, "gv11b tsg runlist [0] %x [1] %x [2] %x [3] %x\n", runlist[0], runlist[1], runlist[2], runlist[3]); } @@ -119,7 +119,7 @@ void gv11b_get_ch_runlist_entry(struct channel_gk20a *c, u32 *runlist) ram_rl_entry_chid_f(c->chid); runlist[3] = ram_rl_entry_chan_inst_ptr_hi_f(addr_hi); - gk20a_dbg_info("gv11b channel runlist [0] %x [1] %x [2] %x [3] %x\n", + nvgpu_log_info(g, "gv11b channel runlist [0] %x [1] %x [2] %x [3] %x\n", runlist[0], runlist[1], runlist[2], runlist[3]); } @@ -139,7 +139,7 @@ int channel_gv11b_setup_ramfc(struct channel_gk20a *c, struct nvgpu_mem *mem = &c->inst_block; u32 data; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); nvgpu_memset(g, mem, 0, 0, ram_fc_size_val_v()); @@ -211,10 +211,11 @@ int channel_gv11b_setup_ramfc(struct channel_gk20a *c, void gv11b_ring_channel_doorbell(struct channel_gk20a *c) { - struct fifo_gk20a *f = &c->g->fifo; + struct gk20a *g = c->g; + struct fifo_gk20a *f = &g->fifo; u32 hw_chid = f->channel_base + c->chid; - gk20a_dbg_info("channel ring door bell %d\n", c->chid); + nvgpu_log_info(g, "channel ring door bell %d\n", c->chid); nvgpu_usermode_writel(c->g, usermode_notify_channel_pending_r(), usermode_notify_channel_pending_id_f(hw_chid)); @@ -256,7 +257,7 @@ void channel_gv11b_unbind(struct channel_gk20a *ch) { struct gk20a *g = ch->g; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (nvgpu_atomic_cmpxchg(&ch->bound, true, false)) { gk20a_writel(g, ccsr_channel_inst_r(ch->chid), @@ -729,7 +730,7 @@ int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id, func_ret = gv11b_fifo_poll_pbdma_chan_status(g, tsgid, pbdma_id, timeout_rc_type); if (func_ret != 0) { - gk20a_dbg_info("preempt timeout pbdma %d", pbdma_id); + nvgpu_log_info(g, "preempt timeout pbdma %d", pbdma_id); ret |= func_ret; } } @@ -743,7 +744,7 @@ int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id, timeout_rc_type); if (func_ret != 0) { - gk20a_dbg_info("preempt timeout engine %d", act_eng_id); + nvgpu_log_info(g, "preempt timeout engine %d", act_eng_id); ret |= func_ret; } } @@ -812,10 +813,10 @@ int gv11b_fifo_preempt_tsg(struct gk20a *g, u32 tsgid) u32 mutex_ret = 0; u32 runlist_id; - gk20a_dbg_fn("%d", tsgid); + nvgpu_log_fn(g, "%d", tsgid); runlist_id = f->tsg[tsgid].runlist_id; - gk20a_dbg_fn("runlist_id %d", runlist_id); + nvgpu_log_fn(g, "runlist_id %d", runlist_id); nvgpu_mutex_acquire(&f->runlist_info[runlist_id].runlist_lock); @@ -839,7 +840,7 @@ static int gv11b_fifo_preempt_runlists(struct gk20a *g, u32 runlists_mask) u32 mutex_ret = 0; u32 runlist_id; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); for (runlist_id = 0; runlist_id < g->fifo.max_runlists; runlist_id++) { if (runlists_mask & fifo_runlist_preempt_runlist_m(runlist_id)) @@ -910,11 +911,11 @@ int gv11b_fifo_preempt_ch_tsg(struct gk20a *g, u32 id, return -EINVAL; if (runlist_id >= g->fifo.max_runlists) { - gk20a_dbg_info("runlist_id = %d", runlist_id); + nvgpu_log_info(g, "runlist_id = %d", runlist_id); return -EINVAL; } - gk20a_dbg_fn("preempt id = %d, runlist_id = %d", id, runlist_id); + nvgpu_log_fn(g, "preempt id = %d, runlist_id = %d", id, runlist_id); nvgpu_mutex_acquire(&f->runlist_info[runlist_id].runlist_lock); @@ -1155,7 +1156,7 @@ int gv11b_init_fifo_reset_enable_hw(struct gk20a *g) unsigned int i; u32 host_num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA); - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); /* enable pmc pfifo */ g->ops.mc.reset(g, mc_enable_pfifo_enabled_f()); @@ -1208,11 +1209,11 @@ int gv11b_init_fifo_reset_enable_hw(struct gk20a *g) gk20a_writel(g, pbdma_intr_1_r(i), 0xFFFFFFFF); intr_stall = gk20a_readl(g, pbdma_intr_stall_r(i)); - gk20a_dbg_info("pbdma id:%u, intr_en_0 0x%08x", i, intr_stall); + nvgpu_log_info(g, "pbdma id:%u, intr_en_0 0x%08x", i, intr_stall); gk20a_writel(g, pbdma_intr_en_0_r(i), intr_stall); intr_stall = gk20a_readl(g, pbdma_intr_stall_1_r(i)); - gk20a_dbg_info("pbdma id:%u, intr_en_1 0x%08x", i, intr_stall); + nvgpu_log_info(g, "pbdma id:%u, intr_en_1 0x%08x", i, intr_stall); gk20a_writel(g, pbdma_intr_en_1_r(i), intr_stall); } @@ -1246,12 +1247,12 @@ int gv11b_init_fifo_reset_enable_hw(struct gk20a *g) /* clear and enable pfifo interrupt */ gk20a_writel(g, fifo_intr_0_r(), 0xFFFFFFFF); mask = gv11b_fifo_intr_0_en_mask(g); - gk20a_dbg_info("fifo_intr_en_0 0x%08x", mask); + nvgpu_log_info(g, "fifo_intr_en_0 0x%08x", mask); gk20a_writel(g, fifo_intr_en_0_r(), mask); - gk20a_dbg_info("fifo_intr_en_1 = 0x80000000"); + nvgpu_log_info(g, "fifo_intr_en_1 = 0x80000000"); gk20a_writel(g, fifo_intr_en_1_r(), 0x80000000); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } @@ -1350,7 +1351,7 @@ static u32 gv11b_fifo_ctxsw_timeout_info(struct gk20a *g, u32 active_eng_id, tsgid = fifo_intr_ctxsw_timeout_info_prev_tsgid_v(timeout_info); } - gk20a_dbg_info("ctxsw timeout info: tsgid = %d", tsgid); + nvgpu_log_info(g, "ctxsw timeout info: tsgid = %d", tsgid); /* * STATUS indicates whether the context request ack was eventually @@ -1391,14 +1392,14 @@ static u32 gv11b_fifo_ctxsw_timeout_info(struct gk20a *g, u32 active_eng_id, if (*info_status == fifo_intr_ctxsw_timeout_info_status_ack_received_v()) { - gk20a_dbg_info("ctxsw timeout info : ack received"); + nvgpu_log_info(g, "ctxsw timeout info : ack received"); /* no need to recover */ tsgid = FIFO_INVAL_TSG_ID; } else if (*info_status == fifo_intr_ctxsw_timeout_info_status_dropped_timeout_v()) { - gk20a_dbg_info("ctxsw timeout info : dropped timeout"); + nvgpu_log_info(g, "ctxsw timeout info : dropped timeout"); /* no need to recover */ tsgid = FIFO_INVAL_TSG_ID; @@ -1429,7 +1430,7 @@ bool gv11b_fifo_handle_ctxsw_timeout(struct gk20a *g, u32 fifo_intr) timeout_val = gk20a_readl(g, fifo_eng_ctxsw_timeout_r()); timeout_val = fifo_eng_ctxsw_timeout_period_v(timeout_val); - gk20a_dbg_info("eng ctxsw timeout period = 0x%x", timeout_val); + nvgpu_log_info(g, "eng ctxsw timeout period = 0x%x", timeout_val); for (engine_id = 0; engine_id < g->fifo.num_engines; engine_id++) { active_eng_id = g->fifo.active_engines_list[engine_id]; @@ -1469,7 +1470,7 @@ bool gv11b_fifo_handle_ctxsw_timeout(struct gk20a *g, u32 fifo_intr) true, true, verbose, RC_TYPE_CTXSW_TIMEOUT); } else { - gk20a_dbg_info( + nvgpu_log_info(g, "fifo is waiting for ctx switch: " "for %d ms, %s=%d", ms, "tsg", tsgid); } @@ -1490,7 +1491,7 @@ unsigned int gv11b_fifo_handle_pbdma_intr_0(struct gk20a *g, pbdma_intr_0, handled, error_notifier); if (pbdma_intr_0 & pbdma_intr_0_clear_faulted_error_pending_f()) { - gk20a_dbg(gpu_dbg_intr, "clear faulted error on pbdma id %d", + nvgpu_log(g, gpu_dbg_intr, "clear faulted error on pbdma id %d", pbdma_id); gk20a_fifo_reset_pbdma_method(g, pbdma_id, 0); *handled |= pbdma_intr_0_clear_faulted_error_pending_f(); @@ -1498,7 +1499,7 @@ unsigned int gv11b_fifo_handle_pbdma_intr_0(struct gk20a *g, } if (pbdma_intr_0 & pbdma_intr_0_eng_reset_pending_f()) { - gk20a_dbg(gpu_dbg_intr, "eng reset intr on pbdma id %d", + nvgpu_log(g, gpu_dbg_intr, "eng reset intr on pbdma id %d", pbdma_id); *handled |= pbdma_intr_0_eng_reset_pending_f(); rc_type = RC_TYPE_PBDMA_FAULT; @@ -1545,7 +1546,7 @@ unsigned int gv11b_fifo_handle_pbdma_intr_1(struct gk20a *g, return RC_TYPE_NO_RC; if (pbdma_intr_1 & pbdma_intr_1_ctxnotvalid_pending_f()) { - gk20a_dbg(gpu_dbg_intr, "ctxnotvalid intr on pbdma id %d", + nvgpu_log(g, gpu_dbg_intr, "ctxnotvalid intr on pbdma id %d", pbdma_id); nvgpu_err(g, "pbdma_intr_1(%d)= 0x%08x ", pbdma_id, pbdma_intr_1); @@ -1753,7 +1754,7 @@ void gv11b_fifo_add_syncpt_wait_cmd(struct gk20a *g, u64 gpu_va = gpu_va_base + nvgpu_nvhost_syncpt_unit_interface_get_byte_offset(id); - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); off = cmd->off + off; @@ -1792,7 +1793,7 @@ void gv11b_fifo_add_syncpt_incr_cmd(struct gk20a *g, { u32 off = cmd->off; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); /* semaphore_a */ nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010004); diff --git a/drivers/gpu/nvgpu/gv11b/gr_gv11b.c b/drivers/gpu/nvgpu/gv11b/gr_gv11b.c index 52e442f3..536d9dcb 100644 --- a/drivers/gpu/nvgpu/gv11b/gr_gv11b.c +++ b/drivers/gpu/nvgpu/gv11b/gr_gv11b.c @@ -96,7 +96,7 @@ bool gr_gv11b_is_valid_class(struct gk20a *g, u32 class_num) default: break; } - gk20a_dbg_info("class=0x%x valid=%d", class_num, valid); + nvgpu_log_info(g, "class=0x%x valid=%d", class_num, valid); return valid; } @@ -190,7 +190,7 @@ static int gr_gv11b_handle_l1_tag_exception(struct gk20a *g, u32 gpc, u32 tpc, gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_uncorrected_err_total_counter_overflow_v(l1_tag_ecc_status); if ((l1_tag_corrected_err_count_delta > 0) || is_l1_tag_ecc_corrected_total_err_overflow) { - gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr, "corrected error (SBE) detected in SM L1 tag! err_mask [%08x] is_overf [%d]", l1_tag_ecc_corrected_err_status, is_l1_tag_ecc_corrected_total_err_overflow); @@ -205,7 +205,7 @@ static int gr_gv11b_handle_l1_tag_exception(struct gk20a *g, u32 gpc, u32 tpc, 0); } if ((l1_tag_uncorrected_err_count_delta > 0) || is_l1_tag_ecc_uncorrected_total_err_overflow) { - gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr, "Uncorrected error (DBE) detected in SM L1 tag! err_mask [%08x] is_overf [%d]", l1_tag_ecc_uncorrected_err_status, is_l1_tag_ecc_uncorrected_total_err_overflow); @@ -282,7 +282,7 @@ static int gr_gv11b_handle_lrf_exception(struct gk20a *g, u32 gpc, u32 tpc, gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_total_counter_overflow_v(lrf_ecc_status); if ((lrf_corrected_err_count_delta > 0) || is_lrf_ecc_corrected_total_err_overflow) { - gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr, "corrected error (SBE) detected in SM LRF! err_mask [%08x] is_overf [%d]", lrf_ecc_corrected_err_status, is_lrf_ecc_corrected_total_err_overflow); @@ -297,7 +297,7 @@ static int gr_gv11b_handle_lrf_exception(struct gk20a *g, u32 gpc, u32 tpc, 0); } if ((lrf_uncorrected_err_count_delta > 0) || is_lrf_ecc_uncorrected_total_err_overflow) { - gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr, "Uncorrected error (DBE) detected in SM LRF! err_mask [%08x] is_overf [%d]", lrf_ecc_uncorrected_err_status, is_lrf_ecc_uncorrected_total_err_overflow); @@ -441,7 +441,7 @@ static int gr_gv11b_handle_cbu_exception(struct gk20a *g, u32 gpc, u32 tpc, gr_pri_gpc0_tpc0_sm_cbu_ecc_status_uncorrected_err_total_counter_overflow_v(cbu_ecc_status); if ((cbu_corrected_err_count_delta > 0) || is_cbu_ecc_corrected_total_err_overflow) { - gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr, "corrected error (SBE) detected in SM CBU! err_mask [%08x] is_overf [%d]", cbu_ecc_corrected_err_status, is_cbu_ecc_corrected_total_err_overflow); @@ -456,7 +456,7 @@ static int gr_gv11b_handle_cbu_exception(struct gk20a *g, u32 gpc, u32 tpc, 0); } if ((cbu_uncorrected_err_count_delta > 0) || is_cbu_ecc_uncorrected_total_err_overflow) { - gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr, "Uncorrected error (DBE) detected in SM CBU! err_mask [%08x] is_overf [%d]", cbu_ecc_uncorrected_err_status, is_cbu_ecc_uncorrected_total_err_overflow); @@ -521,7 +521,7 @@ static int gr_gv11b_handle_l1_data_exception(struct gk20a *g, u32 gpc, u32 tpc, gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_uncorrected_err_total_counter_overflow_v(l1_data_ecc_status); if ((l1_data_corrected_err_count_delta > 0) || is_l1_data_ecc_corrected_total_err_overflow) { - gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr, "corrected error (SBE) detected in SM L1 data! err_mask [%08x] is_overf [%d]", l1_data_ecc_corrected_err_status, is_l1_data_ecc_corrected_total_err_overflow); @@ -536,7 +536,7 @@ static int gr_gv11b_handle_l1_data_exception(struct gk20a *g, u32 gpc, u32 tpc, 0); } if ((l1_data_uncorrected_err_count_delta > 0) || is_l1_data_ecc_uncorrected_total_err_overflow) { - gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr, "Uncorrected error (DBE) detected in SM L1 data! err_mask [%08x] is_overf [%d]", l1_data_ecc_uncorrected_err_status, is_l1_data_ecc_uncorrected_total_err_overflow); @@ -605,7 +605,7 @@ static int gr_gv11b_handle_icache_exception(struct gk20a *g, u32 gpc, u32 tpc, gr_pri_gpc0_tpc0_sm_icache_ecc_status_uncorrected_err_total_counter_overflow_v(icache_ecc_status); if ((icache_corrected_err_count_delta > 0) || is_icache_ecc_corrected_total_err_overflow) { - gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr, "corrected error (SBE) detected in SM L0 && L1 icache! err_mask [%08x] is_overf [%d]", icache_ecc_corrected_err_status, is_icache_ecc_corrected_total_err_overflow); @@ -620,7 +620,7 @@ static int gr_gv11b_handle_icache_exception(struct gk20a *g, u32 gpc, u32 tpc, 0); } if ((icache_uncorrected_err_count_delta > 0) || is_icache_ecc_uncorrected_total_err_overflow) { - gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr, "Uncorrected error (DBE) detected in SM L0 && L1 icache! err_mask [%08x] is_overf [%d]", icache_ecc_uncorrected_err_status, is_icache_ecc_uncorrected_total_err_overflow); @@ -1129,14 +1129,14 @@ static void gr_gv11b_set_coalesce_buffer_size(struct gk20a *g, u32 data) { u32 val; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); val = gk20a_readl(g, gr_gpcs_tc_debug0_r()); val = set_field(val, gr_gpcs_tc_debug0_limit_coalesce_buffer_size_m(), gr_gpcs_tc_debug0_limit_coalesce_buffer_size_f(data)); gk20a_writel(g, gr_gpcs_tc_debug0_r(), val); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); } static void gr_gv11b_set_tex_in_dbg(struct gk20a *g, u32 data) @@ -1144,7 +1144,7 @@ static void gr_gv11b_set_tex_in_dbg(struct gk20a *g, u32 data) u32 val; bool flag; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); val = gk20a_readl(g, gr_gpcs_tpcs_tex_in_dbg_r()); flag = (data & NVC397_SET_TEX_IN_DBG_TSL1_RVCH_INVALIDATE) ? 1 : 0; @@ -1190,7 +1190,7 @@ static void gr_gv11b_set_skedcheck(struct gk20a *g, u32 data) static void gv11b_gr_set_shader_exceptions(struct gk20a *g, u32 data) { - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (data == NVA297_SET_SHADER_EXCEPTIONS_ENABLE_FALSE) { gk20a_writel(g, gr_gpcs_tpcs_sms_hww_warp_esr_report_mask_r(), @@ -1224,7 +1224,7 @@ static void gr_gv11b_set_shader_cut_collector(struct gk20a *g, u32 data) int gr_gv11b_handle_sw_method(struct gk20a *g, u32 addr, u32 class_num, u32 offset, u32 data) { - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (class_num == VOLTA_COMPUTE_A) { switch (offset << 2) { @@ -1315,7 +1315,7 @@ void gr_gv11b_set_alpha_circular_buffer_size(struct gk20a *g, u32 data) u32 pd_ab_max_output; u32 alpha_cb_size = data * 4; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (alpha_cb_size > gr->alpha_cb_size) alpha_cb_size = gr->alpha_cb_size; @@ -1360,7 +1360,7 @@ void gr_gv11b_set_circular_buffer_size(struct gk20a *g, u32 data) u32 gpc_index, ppc_index, stride, val; u32 cb_size_steady = data * 4, cb_size; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (cb_size_steady > gr->attrib_cb_size) cb_size_steady = gr->attrib_cb_size; @@ -1423,8 +1423,9 @@ int gr_gv11b_alloc_buffer(struct vm_gk20a *vm, size_t size, struct nvgpu_mem *mem) { int err; + struct gk20a *g = gk20a_from_vm(vm); - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); err = nvgpu_dma_alloc_sys(vm->mm->g, size, mem); if (err) @@ -1500,9 +1501,9 @@ int gr_gv11b_set_ctxsw_preemption_mode(struct gk20a *g, g->gr.max_tpc_count; attrib_cb_size = ALIGN(attrib_cb_size, 128); - gk20a_dbg_info("gfxp context spill_size=%d", spill_size); - gk20a_dbg_info("gfxp context pagepool_size=%d", pagepool_size); - gk20a_dbg_info("gfxp context attrib_cb_size=%d", + nvgpu_log_info(g, "gfxp context spill_size=%d", spill_size); + nvgpu_log_info(g, "gfxp context pagepool_size=%d", pagepool_size); + nvgpu_log_info(g, "gfxp context attrib_cb_size=%d", attrib_cb_size); err = gr_gp10b_alloc_buffer(vm, @@ -1590,7 +1591,7 @@ void gr_gv11b_update_ctxsw_preemption_mode(struct gk20a *g, ctxsw_prog_main_image_compute_preemption_options_control_cta_f(); int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); tsg = tsg_gk20a_from_ch(c); if (!tsg) @@ -1600,7 +1601,7 @@ void gr_gv11b_update_ctxsw_preemption_mode(struct gk20a *g, if (gr_ctx->graphics_preempt_mode == NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP) { - gk20a_dbg_info("GfxP: %x", gfxp_preempt_option); + nvgpu_log_info(g, "GfxP: %x", gfxp_preempt_option); nvgpu_mem_wr(g, mem, ctxsw_prog_main_image_graphics_preemption_options_o(), gfxp_preempt_option); @@ -1608,7 +1609,7 @@ void gr_gv11b_update_ctxsw_preemption_mode(struct gk20a *g, if (gr_ctx->compute_preempt_mode == NVGPU_PREEMPTION_MODE_COMPUTE_CILP) { - gk20a_dbg_info("CILP: %x", cilp_preempt_option); + nvgpu_log_info(g, "CILP: %x", cilp_preempt_option); nvgpu_mem_wr(g, mem, ctxsw_prog_main_image_compute_preemption_options_o(), cilp_preempt_option); @@ -1616,7 +1617,7 @@ void gr_gv11b_update_ctxsw_preemption_mode(struct gk20a *g, if (gr_ctx->compute_preempt_mode == NVGPU_PREEMPTION_MODE_COMPUTE_CTA) { - gk20a_dbg_info("CTA: %x", cta_preempt_option); + nvgpu_log_info(g, "CTA: %x", cta_preempt_option); nvgpu_mem_wr(g, mem, ctxsw_prog_main_image_compute_preemption_options_o(), cta_preempt_option); @@ -1647,7 +1648,7 @@ void gr_gv11b_update_ctxsw_preemption_mode(struct gk20a *g, (u64_hi32(gr_ctx->betacb_ctxsw_buffer.gpu_va) << (32 - gr_gpcs_setup_attrib_cb_base_addr_39_12_align_bits_v())); - gk20a_dbg_info("attrib cb addr : 0x%016x", addr); + nvgpu_log_info(g, "attrib cb addr : 0x%016x", addr); g->ops.gr.commit_global_attrib_cb(g, gr_ctx, addr, true); addr = (u64_lo32(gr_ctx->pagepool_ctxsw_buffer.gpu_va) >> @@ -1698,7 +1699,7 @@ void gr_gv11b_update_ctxsw_preemption_mode(struct gk20a *g, } out: - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); } static void gr_gv11b_dump_gr_per_sm_regs(struct gk20a *g, struct gk20a_debug_output *o, @@ -1949,7 +1950,7 @@ int gr_gv11b_wait_empty(struct gk20a *g, unsigned long duration_ms, u32 activity0, activity1, activity2, activity4; struct nvgpu_timeout timeout; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); nvgpu_timeout_init(g, &timeout, duration_ms, NVGPU_TIMER_CPU_TIMER); @@ -1974,7 +1975,7 @@ int gr_gv11b_wait_empty(struct gk20a *g, unsigned long duration_ms, gr_activity_empty_or_preempted(activity4)); if (!gr_enabled || (!gr_busy && !ctxsw_active)) { - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } @@ -2191,7 +2192,7 @@ int gr_gv11b_pre_process_sm_exception(struct gk20a *g, NVGPU_PREEMPTION_MODE_COMPUTE_CILP); } - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "SM Exception received on gpc %d tpc %d sm %d = 0x%08x", gpc, tpc, sm, global_esr); @@ -2210,13 +2211,13 @@ int gr_gv11b_pre_process_sm_exception(struct gk20a *g, if (warp_esr != 0 || (global_esr & global_mask) != 0) { *ignore_debugger = true; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: starting wait for LOCKED_DOWN on " "gpc %d tpc %d sm %d", gpc, tpc, sm); if (gk20a_dbg_gpu_broadcast_stop_trigger(fault_ch)) { - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: Broadcasting STOP_TRIGGER from " "gpc %d tpc %d sm %d", gpc, tpc, sm); @@ -2225,7 +2226,7 @@ int gr_gv11b_pre_process_sm_exception(struct gk20a *g, gk20a_dbg_gpu_clear_broadcast_stop_trigger(fault_ch); } else { - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: STOP_TRIGGER from " "gpc %d tpc %d sm %d", gpc, tpc, sm); @@ -2238,12 +2239,12 @@ int gr_gv11b_pre_process_sm_exception(struct gk20a *g, gpc, tpc, sm); g->ops.gr.clear_sm_hww(g, gpc, tpc, sm, global_esr_copy); - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: HWWs cleared for " "gpc %d tpc %d sm %d", gpc, tpc, sm); - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: Setting CILP preempt pending\n"); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: Setting CILP preempt pending\n"); ret = gr_gp10b_set_cilp_preempt_pending(g, fault_ch); if (ret) { nvgpu_err(g, "CILP: error while setting CILP preempt pending!"); @@ -2252,7 +2253,7 @@ int gr_gv11b_pre_process_sm_exception(struct gk20a *g, dbgr_control0 = gk20a_readl(g, gr_gpc0_tpc0_sm0_dbgr_control0_r() + offset); if (dbgr_control0 & gr_gpc0_tpc0_sm0_dbgr_control0_single_step_mode_enable_f()) { - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: clearing SINGLE_STEP_MODE " "before resume for gpc %d tpc %d sm %d", gpc, tpc, sm); @@ -2262,13 +2263,13 @@ int gr_gv11b_pre_process_sm_exception(struct gk20a *g, gk20a_writel(g, gr_gpc0_tpc0_sm0_dbgr_control0_r() + offset, dbgr_control0); } - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: resume for gpc %d tpc %d sm %d", gpc, tpc, sm); g->ops.gr.resume_single_sm(g, gpc, tpc, sm); *ignore_debugger = true; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: All done on gpc %d, tpc %d sm %d", gpc, tpc, sm); } @@ -2388,7 +2389,7 @@ int gr_gv11b_setup_rop_mapping(struct gk20a *g, struct gr_gk20a *gr) GPU_LIT_NUM_TPC_PER_GPC); u32 num_tpcs = num_gpcs * num_tpc_per_gpc; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (!gr->map_tiles) return -1; @@ -2535,7 +2536,7 @@ void gr_gv11b_program_zcull_mapping(struct gk20a *g, u32 zcull_num_entries, { u32 val, i, j; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); for (i = 0, j = 0; i < (zcull_num_entries / 8); i++, j += 8) { val = @@ -2666,8 +2667,9 @@ int gr_gv11b_commit_inst(struct channel_gk20a *c, u64 gpu_va) u32 addr_hi; struct ctx_header_desc *ctx; int err; + struct gk20a *g = c->g; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); err = gv11b_alloc_subctx_header(c); if (err) @@ -2704,7 +2706,7 @@ int gr_gv11b_commit_global_timeslice(struct gk20a *g, struct channel_gk20a *c) u32 pe_vaf; u32 pe_vsc_vpc; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); pd_ab_dist_cfg0 = gk20a_readl(g, gr_pd_ab_dist_cfg0_r()); ds_debug = gk20a_readl(g, gr_ds_debug_r()); @@ -2814,7 +2816,7 @@ void gr_gv11b_load_tpc_mask(struct gk20a *g) } } - gk20a_dbg_info("pes_tpc_mask %u\n", pes_tpc_mask); + nvgpu_log_info(g, "pes_tpc_mask %u\n", pes_tpc_mask); fuse_tpc_mask = g->ops.gr.get_gpc_tpc_mask(g, gpc); if (g->tpc_fs_mask_user && g->tpc_fs_mask_user != fuse_tpc_mask && @@ -2860,7 +2862,7 @@ int gr_gv11b_init_fs_state(struct gk20a *g) u32 ver = g->params.gpu_arch + g->params.gpu_impl; u32 rev = g->params.gpu_rev; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); data = gk20a_readl(g, gr_gpcs_tpcs_sm_texio_control_r()); data = set_field(data, gr_gpcs_tpcs_sm_texio_control_oor_addr_check_mode_m(), @@ -2928,14 +2930,14 @@ void gv11b_gr_get_esr_sm_sel(struct gk20a *g, u32 gpc, u32 tpc, u32 offset = gk20a_gr_gpc_offset(g, gpc) + gk20a_gr_tpc_offset(g, tpc); reg_val = gk20a_readl(g, gr_gpc0_tpc0_sm_tpc_esr_sm_sel_r() + offset); - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "sm tpc esr sm sel reg val: 0x%x", reg_val); *esr_sm_sel = 0; if (gr_gpc0_tpc0_sm_tpc_esr_sm_sel_sm0_error_v(reg_val)) *esr_sm_sel = 1; if (gr_gpc0_tpc0_sm_tpc_esr_sm_sel_sm1_error_v(reg_val)) *esr_sm_sel |= 1 << 1; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "esr_sm_sel bitmask: 0x%x", *esr_sm_sel); } @@ -2954,7 +2956,7 @@ int gv11b_gr_sm_trigger_suspend(struct gk20a *g) gk20a_writel(g, gr_gpcs_tpcs_sms_dbgr_control0_r(), dbgr_control0); - gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "stop trigger enable: broadcast dbgr_control0: 0x%x ", dbgr_control0); @@ -3012,19 +3014,19 @@ void gv11b_gr_bpt_reg_info(struct gk20a *g, struct nvgpu_warpstate *w_state) /* Only for debug purpose */ for (sm_id = 0; sm_id < gr->no_of_sm; sm_id++) { - gk20a_dbg_fn("w_state[%d].valid_warps[0]: %llx\n", + nvgpu_log_fn(g, "w_state[%d].valid_warps[0]: %llx\n", sm_id, w_state[sm_id].valid_warps[0]); - gk20a_dbg_fn("w_state[%d].valid_warps[1]: %llx\n", + nvgpu_log_fn(g, "w_state[%d].valid_warps[1]: %llx\n", sm_id, w_state[sm_id].valid_warps[1]); - gk20a_dbg_fn("w_state[%d].trapped_warps[0]: %llx\n", + nvgpu_log_fn(g, "w_state[%d].trapped_warps[0]: %llx\n", sm_id, w_state[sm_id].trapped_warps[0]); - gk20a_dbg_fn("w_state[%d].trapped_warps[1]: %llx\n", + nvgpu_log_fn(g, "w_state[%d].trapped_warps[1]: %llx\n", sm_id, w_state[sm_id].trapped_warps[1]); - gk20a_dbg_fn("w_state[%d].paused_warps[0]: %llx\n", + nvgpu_log_fn(g, "w_state[%d].paused_warps[0]: %llx\n", sm_id, w_state[sm_id].paused_warps[0]); - gk20a_dbg_fn("w_state[%d].paused_warps[1]: %llx\n", + nvgpu_log_fn(g, "w_state[%d].paused_warps[1]: %llx\n", sm_id, w_state[sm_id].paused_warps[1]); } } @@ -3257,7 +3259,7 @@ bool gv11b_gr_sm_debugger_attached(struct gk20a *g) */ debugger_mode = gr_gpc0_tpc0_sm0_dbgr_control0_debugger_mode_v(dbgr_control0); - gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "SM Debugger Mode: %d", debugger_mode); if (debugger_mode == gr_gpc0_tpc0_sm0_dbgr_control0_debugger_mode_on_v()) @@ -3576,7 +3578,7 @@ static void gv11b_gr_sm_dump_warp_bpt_pause_trap_mask_regs(struct gk20a *g, dbgr_status0, dbgr_control0, warps_valid, warps_paused, warps_trapped); else - gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "STATUS0=0x%x CONTROL0=0x%x VALID_MASK=0x%llx " "PAUSE_MASK=0x%llx TRAP_MASK=0x%llx\n", dbgr_status0, dbgr_control0, warps_valid, @@ -3598,7 +3600,7 @@ int gv11b_gr_wait_for_sm_lock_down(struct gk20a *g, gk20a_gr_tpc_offset(g, tpc) + gv11b_gr_sm_offset(g, sm); - gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "GPC%d TPC%d: locking down SM%d", gpc, tpc, sm); nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g), @@ -3642,7 +3644,7 @@ int gv11b_gr_wait_for_sm_lock_down(struct gk20a *g, } if (locked_down || no_error_pending) { - gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "GPC%d TPC%d: locked down SM%d", gpc, tpc, sm); return 0; } @@ -3677,7 +3679,7 @@ int gv11b_gr_lock_down_sm(struct gk20a *g, u32 offset = gk20a_gr_gpc_offset(g, gpc) + gk20a_gr_tpc_offset(g, tpc) + gv11b_gr_sm_offset(g, sm); - gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "GPC%d TPC%d SM%d: assert stop trigger", gpc, tpc, sm); /* assert stop trigger */ @@ -3699,13 +3701,13 @@ void gv11b_gr_clear_sm_hww(struct gk20a *g, u32 gpc, u32 tpc, u32 sm, gk20a_writel(g, gr_gpc0_tpc0_sm0_hww_global_esr_r() + offset, global_esr); - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "Cleared HWW global esr, current reg val: 0x%x", gk20a_readl(g, gr_gpc0_tpc0_sm0_hww_global_esr_r() + offset)); gk20a_writel(g, gr_gpc0_tpc0_sm0_hww_warp_esr_r() + offset, 0); - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "Cleared HWW warp esr, current reg val: 0x%x", gk20a_readl(g, gr_gpc0_tpc0_sm0_hww_warp_esr_r() + offset)); @@ -4440,7 +4442,7 @@ int gr_gv11b_decode_priv_addr(struct gk20a *g, u32 addr, { u32 gpc_addr; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); /* setup defaults */ *addr_type = CTXSW_ADDR_TYPE_SYS; @@ -4591,12 +4593,12 @@ int gr_gv11b_create_priv_addr_table(struct gk20a *g, t = 0; *num_registers = 0; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); err = g->ops.gr.decode_priv_addr(g, addr, &addr_type, &gpc_num, &tpc_num, &ppc_num, &be_num, &broadcast_flags); - gk20a_dbg(gpu_dbg_gpu_dbg, "addr_type = %d", addr_type); + nvgpu_log(g, gpu_dbg_gpu_dbg, "addr_type = %d", addr_type); if (err) return err; @@ -4690,7 +4692,7 @@ int gr_gv11b_create_priv_addr_table(struct gk20a *g, } else if (((addr_type == CTXSW_ADDR_TYPE_EGPC) || (addr_type == CTXSW_ADDR_TYPE_ETPC)) && g->ops.gr.egpc_etpc_priv_addr_table) { - gk20a_dbg(gpu_dbg_gpu_dbg, "addr_type : EGPC/ETPC"); + nvgpu_log(g, gpu_dbg_gpu_dbg, "addr_type : EGPC/ETPC"); g->ops.gr.egpc_etpc_priv_addr_table(g, addr, gpc_num, tpc_num, broadcast_flags, priv_addr_table, &t); } else if (broadcast_flags & PRI_BROADCAST_FLAGS_LTSS) { diff --git a/drivers/gpu/nvgpu/gv11b/ltc_gv11b.c b/drivers/gpu/nvgpu/gv11b/ltc_gv11b.c index 9f6d176e..9f9ff337 100644 --- a/drivers/gpu/nvgpu/gv11b/ltc_gv11b.c +++ b/drivers/gpu/nvgpu/gv11b/ltc_gv11b.c @@ -56,7 +56,7 @@ void gv11b_ltc_init_fs_state(struct gk20a *g) u32 ltc_intr; u32 reg; - gk20a_dbg_info("initialize gv11b l2"); + nvgpu_log_info(g, "initialize gv11b l2"); g->ops.mc.reset(g, mc_enable_pfb_enabled_f() | mc_enable_l2_enabled_f()); @@ -67,7 +67,7 @@ void gv11b_ltc_init_fs_state(struct gk20a *g) g->max_ltc_count = gk20a_readl(g, top_num_ltcs_r()); g->ltc_count = gk20a_readl(g, pri_ringmaster_enum_ltc_r()); - gk20a_dbg_info("%u ltcs out of %u", g->ltc_count, g->max_ltc_count); + nvgpu_log_info(g, "%u ltcs out of %u", g->ltc_count, g->max_ltc_count); /* Disable LTC interrupts */ reg = gk20a_readl(g, ltc_ltcs_ltss_intr_r()); diff --git a/drivers/gpu/nvgpu/gv11b/mm_gv11b.c b/drivers/gpu/nvgpu/gv11b/mm_gv11b.c index b46ecb0a..f4084ad6 100644 --- a/drivers/gpu/nvgpu/gv11b/mm_gv11b.c +++ b/drivers/gpu/nvgpu/gv11b/mm_gv11b.c @@ -54,7 +54,7 @@ void gv11b_init_inst_block(struct nvgpu_mem *inst_block, { struct gk20a *g = gk20a_from_vm(vm); - gk20a_dbg_info("inst block phys = 0x%llx, kv = 0x%p", + nvgpu_log_info(g, "inst block phys = 0x%llx, kv = 0x%p", nvgpu_inst_block_addr(g, inst_block), inst_block->cpu_va); g->ops.mm.init_pdb(g, inst_block, vm); diff --git a/drivers/gpu/nvgpu/gv11b/pmu_gv11b.c b/drivers/gpu/nvgpu/gv11b/pmu_gv11b.c index c1b519d0..3f0e2f22 100644 --- a/drivers/gpu/nvgpu/gv11b/pmu_gv11b.c +++ b/drivers/gpu/nvgpu/gv11b/pmu_gv11b.c @@ -37,8 +37,8 @@ #include -#define gv11b_dbg_pmu(fmt, arg...) \ - gk20a_dbg(gpu_dbg_pmu, fmt, ##arg) +#define gv11b_dbg_pmu(g, fmt, arg...) \ + nvgpu_log(g, gpu_dbg_pmu, fmt, ##arg) #define ALIGN_4KB 12 @@ -121,7 +121,7 @@ int gv11b_pmu_setup_elpg(struct gk20a *g) u32 reg_writes; u32 index; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (g->elpg_enabled) { reg_writes = ((sizeof(_pginitseq_gv11b) / @@ -133,7 +133,7 @@ int gv11b_pmu_setup_elpg(struct gk20a *g) } } - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return ret; } @@ -187,7 +187,7 @@ int gv11b_pmu_bootstrap(struct nvgpu_pmu *pmu) u64 addr_code_hi, addr_data_hi; u32 i, blocks, addr_args; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); gk20a_writel(g, pwr_falcon_itfen_r(), gk20a_readl(g, pwr_falcon_itfen_r()) | @@ -407,28 +407,28 @@ u32 gv11b_pmu_get_irqdest(struct gk20a *g) static void pmu_handle_pg_sub_feature_msg(struct gk20a *g, struct pmu_msg *msg, void *param, u32 handle, u32 status) { - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (status != 0) { nvgpu_err(g, "Sub-feature mask update cmd aborted\n"); return; } - gv11b_dbg_pmu("sub-feature mask update is acknowledged from PMU %x\n", + gv11b_dbg_pmu(g, "sub-feature mask update is acknowledged from PMU %x\n", msg->msg.pg.msg_type); } static void pmu_handle_pg_param_msg(struct gk20a *g, struct pmu_msg *msg, void *param, u32 handle, u32 status) { - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (status != 0) { nvgpu_err(g, "GR PARAM cmd aborted\n"); return; } - gv11b_dbg_pmu("GR PARAM is acknowledged from PMU %x\n", + gv11b_dbg_pmu(g, "GR PARAM is acknowledged from PMU %x\n", msg->msg.pg.msg_type); } @@ -450,7 +450,7 @@ int gv11b_pg_gr_init(struct gk20a *g, u32 pg_engine_id) cmd.cmd.pg.gr_init_param_v1.featuremask = NVGPU_PMU_GR_FEATURE_MASK_ALL; - gv11b_dbg_pmu("cmd post PMU_PG_CMD_ID_PG_PARAM_INIT\n"); + gv11b_dbg_pmu(g, "cmd post PMU_PG_CMD_ID_PG_PARAM_INIT\n"); nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, pmu_handle_pg_param_msg, pmu, &seq, ~0); @@ -488,7 +488,7 @@ int gv11b_pg_set_subfeature_mask(struct gk20a *g, u32 pg_engine_id) NVGPU_PMU_GR_FEATURE_MASK_ELPG_LOGIC | NVGPU_PMU_GR_FEATURE_MASK_ELPG_L2RPPG; - gv11b_dbg_pmu("cmd post PMU_PG_CMD_SUB_FEATURE_MASK_UPDATE\n"); + gv11b_dbg_pmu(g, "cmd post PMU_PG_CMD_SUB_FEATURE_MASK_UPDATE\n"); nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, pmu_handle_pg_sub_feature_msg, pmu, &seq, ~0); } else diff --git a/drivers/gpu/nvgpu/gv11b/therm_gv11b.c b/drivers/gpu/nvgpu/gv11b/therm_gv11b.c index 067c464b..961ab5c0 100644 --- a/drivers/gpu/nvgpu/gv11b/therm_gv11b.c +++ b/drivers/gpu/nvgpu/gv11b/therm_gv11b.c @@ -34,7 +34,7 @@ int gv11b_init_therm_setup_hw(struct gk20a *g) { u32 v; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); /* program NV_THERM registers */ gk20a_writel(g, therm_use_a_r(), therm_use_a_ext_therm_0_enable_f() | @@ -108,7 +108,7 @@ int gv11b_elcg_init_idle_filters(struct gk20a *g) if (nvgpu_platform_is_simulation(g)) return 0; - gk20a_dbg_info("init clock/power gate reg"); + nvgpu_log_info(g, "init clock/power gate reg"); for (engine_id = 0; engine_id < f->num_engines; engine_id++) { active_engine_id = f->active_engines_list[engine_id]; -- cgit v1.2.2