From dd739fcb039d51606e9a5454ec0aab17bcb01965 Mon Sep 17 00:00:00 2001 From: Terje Bergstrom Date: Wed, 18 Apr 2018 19:39:46 -0700 Subject: gpu: nvgpu: Remove gk20a_dbg* functions Switch all logging to nvgpu_log*(). gk20a_dbg* macros are intentionally left there because of use from other repositories. Because the new functions do not work without a pointer to struct gk20a, and piping it just for logging is excessive, some log messages are deleted. Change-Id: I00e22e75fe4596a330bb0282ab4774b3639ee31e Signed-off-by: Terje Bergstrom Reviewed-on: https://git-master.nvidia.com/r/1704148 Reviewed-by: mobile promotions Tested-by: mobile promotions --- drivers/gpu/nvgpu/gp10b/ce_gp10b.c | 10 +-- drivers/gpu/nvgpu/gp10b/fecs_trace_gp10b.c | 4 +- drivers/gpu/nvgpu/gp10b/fifo_gp10b.c | 25 +++--- drivers/gpu/nvgpu/gp10b/gr_gp10b.c | 123 +++++++++++++++-------------- drivers/gpu/nvgpu/gp10b/ltc_gp10b.c | 14 ++-- drivers/gpu/nvgpu/gp10b/mc_gp10b.c | 4 +- drivers/gpu/nvgpu/gp10b/mm_gp10b.c | 16 ++-- drivers/gpu/nvgpu/gp10b/pmu_gp10b.c | 26 +++--- drivers/gpu/nvgpu/gp10b/rpfb_gp10b.c | 14 ++-- drivers/gpu/nvgpu/gp10b/therm_gp10b.c | 8 +- 10 files changed, 123 insertions(+), 121 deletions(-) (limited to 'drivers/gpu/nvgpu/gp10b') diff --git a/drivers/gpu/nvgpu/gp10b/ce_gp10b.c b/drivers/gpu/nvgpu/gp10b/ce_gp10b.c index 86a2b751..e2ad1bd3 100644 --- a/drivers/gpu/nvgpu/gp10b/ce_gp10b.c +++ b/drivers/gpu/nvgpu/gp10b/ce_gp10b.c @@ -1,7 +1,7 @@ /* * Pascal GPU series Copy Engine. * - * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -30,14 +30,14 @@ static u32 ce_blockpipe_isr(struct gk20a *g, u32 fifo_intr) { - gk20a_dbg(gpu_dbg_intr, "ce blocking pipe interrupt\n"); + nvgpu_log(g, gpu_dbg_intr, "ce blocking pipe interrupt\n"); return ce_intr_status_blockpipe_pending_f(); } static u32 ce_launcherr_isr(struct gk20a *g, u32 fifo_intr) { - gk20a_dbg(gpu_dbg_intr, "ce launch error interrupt\n"); + nvgpu_log(g, gpu_dbg_intr, "ce launch error interrupt\n"); return ce_intr_status_launcherr_pending_f(); } @@ -47,7 +47,7 @@ void gp10b_ce_isr(struct gk20a *g, u32 inst_id, u32 pri_base) u32 ce_intr = gk20a_readl(g, ce_intr_status_r(inst_id)); u32 clear_intr = 0; - gk20a_dbg(gpu_dbg_intr, "ce isr %08x %08x\n", ce_intr, inst_id); + nvgpu_log(g, gpu_dbg_intr, "ce isr %08x %08x\n", ce_intr, inst_id); /* clear blocking interrupts: they exibit broken behavior */ if (ce_intr & ce_intr_status_blockpipe_pending_f()) @@ -65,7 +65,7 @@ int gp10b_ce_nonstall_isr(struct gk20a *g, u32 inst_id, u32 pri_base) int ops = 0; u32 ce_intr = gk20a_readl(g, ce_intr_status_r(inst_id)); - gk20a_dbg(gpu_dbg_intr, "ce nonstall isr %08x %08x\n", ce_intr, inst_id); + nvgpu_log(g, gpu_dbg_intr, "ce nonstall isr %08x %08x\n", ce_intr, inst_id); if (ce_intr & ce_intr_status_nonblockpipe_pending_f()) { gk20a_writel(g, ce_intr_status_r(inst_id), diff --git a/drivers/gpu/nvgpu/gp10b/fecs_trace_gp10b.c b/drivers/gpu/nvgpu/gp10b/fecs_trace_gp10b.c index 511d565a..c477c77d 100644 --- a/drivers/gpu/nvgpu/gp10b/fecs_trace_gp10b.c +++ b/drivers/gpu/nvgpu/gp10b/fecs_trace_gp10b.c @@ -1,7 +1,7 @@ /* * GP10B GPU FECS traces * - * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -43,7 +43,7 @@ int gp10b_fecs_trace_flush(struct gk20a *g) }; int err; - gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, ""); + nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, " "); err = gr_gk20a_elpg_protected_call(g, gr_gk20a_submit_fecs_method_op(g, op, false)); diff --git a/drivers/gpu/nvgpu/gp10b/fifo_gp10b.c b/drivers/gpu/nvgpu/gp10b/fifo_gp10b.c index 66f3012f..fd4ec34e 100644 --- a/drivers/gpu/nvgpu/gp10b/fifo_gp10b.c +++ b/drivers/gpu/nvgpu/gp10b/fifo_gp10b.c @@ -43,7 +43,7 @@ static void gp10b_set_pdb_fault_replay_flags(struct gk20a *g, { u32 val; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); val = nvgpu_mem_rd32(g, mem, ram_in_page_dir_base_fault_replay_tex_w()); @@ -59,7 +59,7 @@ static void gp10b_set_pdb_fault_replay_flags(struct gk20a *g, nvgpu_mem_wr32(g, mem, ram_in_page_dir_base_fault_replay_gcc_w(), val); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); } int channel_gp10b_commit_userd(struct channel_gk20a *c) @@ -68,12 +68,12 @@ int channel_gp10b_commit_userd(struct channel_gk20a *c) u32 addr_hi; struct gk20a *g = c->g; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); addr_lo = u64_lo32(c->userd_iova >> ram_userd_base_shift_v()); addr_hi = u64_hi32(c->userd_iova); - gk20a_dbg_info("channel %d : set ramfc userd 0x%16llx", + nvgpu_log_info(g, "channel %d : set ramfc userd 0x%16llx", c->chid, (u64)c->userd_iova); nvgpu_mem_wr32(g, &c->inst_block, @@ -98,7 +98,7 @@ int channel_gp10b_setup_ramfc(struct channel_gk20a *c, struct gk20a *g = c->g; struct nvgpu_mem *mem = &c->inst_block; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); nvgpu_memset(g, mem, 0, 0, ram_fc_size_val_v()); @@ -167,8 +167,9 @@ int gp10b_fifo_resetup_ramfc(struct channel_gk20a *c) { u32 new_syncpt = 0, old_syncpt; u32 v; + struct gk20a *g = c->g; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); v = nvgpu_mem_rd32(c->g, &c->inst_block, ram_fc_allowed_syncpoints_w()); @@ -185,7 +186,7 @@ int gp10b_fifo_resetup_ramfc(struct channel_gk20a *c) v = pbdma_allowed_syncpoints_0_valid_f(1); - gk20a_dbg_info("Channel %d, syncpt id %d\n", + nvgpu_log_info(g, "Channel %d, syncpt id %d\n", c->chid, new_syncpt); v |= pbdma_allowed_syncpoints_0_index_f(new_syncpt); @@ -197,7 +198,7 @@ int gp10b_fifo_resetup_ramfc(struct channel_gk20a *c) /* enable channel */ gk20a_enable_channel_tsg(c->g, c); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } @@ -207,7 +208,7 @@ int gp10b_fifo_engine_enum_from_type(struct gk20a *g, u32 engine_type, { int ret = ENGINE_INVAL_GK20A; - gk20a_dbg_info("engine type %d", engine_type); + nvgpu_log_info(g, "engine type %d", engine_type); if (engine_type == top_device_info_type_enum_graphics_v()) ret = ENGINE_GR_GK20A; else if (engine_type == top_device_info_type_enum_lce_v()) { @@ -229,13 +230,13 @@ void gp10b_device_info_data_parse(struct gk20a *g, u32 table_entry, *pri_base = (top_device_info_data_pri_base_v(table_entry) << top_device_info_data_pri_base_align_v()); - gk20a_dbg_info("device info: pri_base: %d", *pri_base); + nvgpu_log_info(g, "device info: pri_base: %d", *pri_base); } if (fault_id && (top_device_info_data_fault_id_v(table_entry) == top_device_info_data_fault_id_valid_v())) { *fault_id = g->ops.fifo.device_info_fault_id(table_entry); - gk20a_dbg_info("device info: fault_id: %d", *fault_id); + nvgpu_log_info(g, "device info: fault_id: %d", *fault_id); } } else nvgpu_err(g, "unknown device_info_data %d", @@ -293,7 +294,7 @@ void gp10b_fifo_get_mmu_fault_info(struct gk20a *g, u32 mmu_fault_id, u32 fault_info; u32 addr_lo, addr_hi; - gk20a_dbg_fn("mmu_fault_id %d", mmu_fault_id); + nvgpu_log_fn(g, "mmu_fault_id %d", mmu_fault_id); memset(mmfault, 0, sizeof(*mmfault)); diff --git a/drivers/gpu/nvgpu/gp10b/gr_gp10b.c b/drivers/gpu/nvgpu/gp10b/gr_gp10b.c index 0178abbf..bc982d30 100644 --- a/drivers/gpu/nvgpu/gp10b/gr_gp10b.c +++ b/drivers/gpu/nvgpu/gp10b/gr_gp10b.c @@ -69,7 +69,7 @@ bool gr_gp10b_is_valid_class(struct gk20a *g, u32 class_num) default: break; } - gk20a_dbg_info("class=0x%x valid=%d", class_num, valid); + nvgpu_log_info(g, "class=0x%x valid=%d", class_num, valid); return valid; } @@ -169,7 +169,7 @@ int gr_gp10b_handle_sm_exception(struct gk20a *g, gr_pri_gpc0_tpc0_sm_lrf_ecc_double_err_count_r() + offset, 0); if (lrf_ecc_sed_status) { - gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr, "Single bit error detected in SM LRF!"); gr_gp10b_sm_lrf_ecc_overcount_war(1, @@ -181,7 +181,7 @@ int gr_gp10b_handle_sm_exception(struct gk20a *g, lrf_single_count_delta; } if (lrf_ecc_ded_status) { - gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr, "Double bit error detected in SM LRF!"); gr_gp10b_sm_lrf_ecc_overcount_war(0, @@ -208,7 +208,7 @@ int gr_gp10b_handle_sm_exception(struct gk20a *g, gr_pri_gpc0_tpc0_sm_shm_ecc_status_single_err_detected_shm1_pending_f()) ) { u32 ecc_stats_reg_val; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr, "Single bit error detected in SM SHM!"); ecc_stats_reg_val = @@ -230,7 +230,7 @@ int gr_gp10b_handle_sm_exception(struct gk20a *g, gr_pri_gpc0_tpc0_sm_shm_ecc_status_double_err_detected_shm1_pending_f()) ) { u32 ecc_stats_reg_val; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr, "Double bit error detected in SM SHM!"); ecc_stats_reg_val = @@ -260,14 +260,14 @@ int gr_gp10b_handle_tex_exception(struct gk20a *g, u32 gpc, u32 tpc, u32 esr; u32 ecc_stats_reg_val; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " "); esr = gk20a_readl(g, gr_gpc0_tpc0_tex_m_hww_esr_r() + offset); - gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "0x%08x", esr); + nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "0x%08x", esr); if (esr & gr_gpc0_tpc0_tex_m_hww_esr_ecc_sec_pending_f()) { - gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr, "Single bit error detected in TEX!"); /* Pipe 0 counters */ @@ -323,7 +323,7 @@ int gr_gp10b_handle_tex_exception(struct gk20a *g, u32 gpc, u32 tpc, gr_pri_gpc0_tpc0_tex_m_routing_sel_default_f()); } if (esr & gr_gpc0_tpc0_tex_m_hww_esr_ecc_ded_pending_f()) { - gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr, "Double bit error detected in TEX!"); /* Pipe 0 counters */ @@ -403,7 +403,7 @@ int gr_gp10b_commit_global_cb_manager(struct gk20a *g, u32 ppc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_PPC_IN_GPC_STRIDE); u32 num_pes_per_gpc = nvgpu_get_litter_value(g, GPU_LIT_NUM_PES_PER_GPC); - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); tsg = tsg_gk20a_from_ch(c); if (!tsg) @@ -660,21 +660,21 @@ static void gr_gp10b_set_coalesce_buffer_size(struct gk20a *g, u32 data) { u32 val; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); val = gk20a_readl(g, gr_gpcs_tc_debug0_r()); val = set_field(val, gr_gpcs_tc_debug0_limit_coalesce_buffer_size_m(), gr_gpcs_tc_debug0_limit_coalesce_buffer_size_f(data)); gk20a_writel(g, gr_gpcs_tc_debug0_r(), val); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); } void gr_gp10b_set_bes_crop_debug3(struct gk20a *g, u32 data) { u32 val; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); val = gk20a_readl(g, gr_bes_crop_debug3_r()); if ((data & 1)) { @@ -722,7 +722,7 @@ void gr_gp10b_set_bes_crop_debug4(struct gk20a *g, u32 data) int gr_gp10b_handle_sw_method(struct gk20a *g, u32 addr, u32 class_num, u32 offset, u32 data) { - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (class_num == PASCAL_COMPUTE_A) { switch (offset << 2) { @@ -800,7 +800,7 @@ void gr_gp10b_set_alpha_circular_buffer_size(struct gk20a *g, u32 data) u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); u32 ppc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_PPC_IN_GPC_STRIDE); - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (alpha_cb_size > gr->alpha_cb_size) alpha_cb_size = gr->alpha_cb_size; @@ -853,7 +853,7 @@ void gr_gp10b_set_circular_buffer_size(struct gk20a *g, u32 data) u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); u32 ppc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_PPC_IN_GPC_STRIDE); - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (cb_size_steady > gr->attrib_cb_size) cb_size_steady = gr->attrib_cb_size; @@ -923,7 +923,7 @@ int gr_gp10b_init_ctx_state(struct gk20a *g) }; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); err = gr_gk20a_init_ctx_state(g); if (err) @@ -940,10 +940,10 @@ int gr_gp10b_init_ctx_state(struct gk20a *g) } } - gk20a_dbg_info("preempt image size: %u", + nvgpu_log_info(g, "preempt image size: %u", g->gr.ctx_vars.preempt_image_size); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } @@ -952,8 +952,9 @@ int gr_gp10b_alloc_buffer(struct vm_gk20a *vm, size_t size, struct nvgpu_mem *mem) { int err; + struct gk20a *g = gk20a_from_vm(vm); - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); err = nvgpu_dma_alloc_sys(vm->mm->g, size, mem); if (err) @@ -1029,9 +1030,9 @@ int gr_gp10b_set_ctxsw_preemption_mode(struct gk20a *g, g->gr.max_tpc_count; attrib_cb_size = ALIGN(attrib_cb_size, 128); - gk20a_dbg_info("gfxp context spill_size=%d", spill_size); - gk20a_dbg_info("gfxp context pagepool_size=%d", pagepool_size); - gk20a_dbg_info("gfxp context attrib_cb_size=%d", + nvgpu_log_info(g, "gfxp context spill_size=%d", spill_size); + nvgpu_log_info(g, "gfxp context pagepool_size=%d", pagepool_size); + nvgpu_log_info(g, "gfxp context attrib_cb_size=%d", attrib_cb_size); err = gr_gp10b_alloc_buffer(vm, @@ -1112,7 +1113,7 @@ int gr_gp10b_alloc_gr_ctx(struct gk20a *g, u32 graphics_preempt_mode = 0; u32 compute_preempt_mode = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); err = gr_gk20a_alloc_gr_ctx(g, gr_ctx, vm, class, flags); if (err) @@ -1137,7 +1138,7 @@ int gr_gp10b_alloc_gr_ctx(struct gk20a *g, goto fail_free_gk20a_ctx; } - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; @@ -1215,7 +1216,7 @@ void gr_gp10b_update_ctxsw_preemption_mode(struct gk20a *g, ctxsw_prog_main_image_compute_preemption_options_control_cta_f(); int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); tsg = tsg_gk20a_from_ch(c); if (!tsg) @@ -1224,21 +1225,21 @@ void gr_gp10b_update_ctxsw_preemption_mode(struct gk20a *g, gr_ctx = &tsg->gr_ctx; if (gr_ctx->graphics_preempt_mode == NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP) { - gk20a_dbg_info("GfxP: %x", gfxp_preempt_option); + nvgpu_log_info(g, "GfxP: %x", gfxp_preempt_option); nvgpu_mem_wr(g, mem, ctxsw_prog_main_image_graphics_preemption_options_o(), gfxp_preempt_option); } if (gr_ctx->compute_preempt_mode == NVGPU_PREEMPTION_MODE_COMPUTE_CILP) { - gk20a_dbg_info("CILP: %x", cilp_preempt_option); + nvgpu_log_info(g, "CILP: %x", cilp_preempt_option); nvgpu_mem_wr(g, mem, ctxsw_prog_main_image_compute_preemption_options_o(), cilp_preempt_option); } if (gr_ctx->compute_preempt_mode == NVGPU_PREEMPTION_MODE_COMPUTE_CTA) { - gk20a_dbg_info("CTA: %x", cta_preempt_option); + nvgpu_log_info(g, "CTA: %x", cta_preempt_option); nvgpu_mem_wr(g, mem, ctxsw_prog_main_image_compute_preemption_options_o(), cta_preempt_option); @@ -1269,7 +1270,7 @@ void gr_gp10b_update_ctxsw_preemption_mode(struct gk20a *g, (u64_hi32(gr_ctx->betacb_ctxsw_buffer.gpu_va) << (32 - gr_gpcs_setup_attrib_cb_base_addr_39_12_align_bits_v())); - gk20a_dbg_info("attrib cb addr : 0x%016x", addr); + nvgpu_log_info(g, "attrib cb addr : 0x%016x", addr); g->ops.gr.commit_global_attrib_cb(g, gr_ctx, addr, true); addr = (u64_lo32(gr_ctx->pagepool_ctxsw_buffer.gpu_va) >> @@ -1315,7 +1316,7 @@ void gr_gp10b_update_ctxsw_preemption_mode(struct gk20a *g, } out: - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); } int gr_gp10b_dump_gr_status_regs(struct gk20a *g, @@ -1475,7 +1476,7 @@ int gr_gp10b_wait_empty(struct gk20a *g, unsigned long duration_ms, u32 activity0, activity1, activity2, activity4; struct nvgpu_timeout timeout; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); nvgpu_timeout_init(g, &timeout, duration_ms, NVGPU_TIMER_CPU_TIMER); @@ -1500,7 +1501,7 @@ int gr_gp10b_wait_empty(struct gk20a *g, unsigned long duration_ms, gr_activity_empty_or_preempted(activity4)); if (!gr_enabled || (!gr_busy && !ctxsw_active)) { - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } @@ -1569,7 +1570,7 @@ void gr_gp10b_commit_global_bundle_cb(struct gk20a *g, data = min_t(u32, data, g->gr.min_gpm_fifo_depth); - gk20a_dbg_info("bundle cb token limit : %d, state limit : %d", + nvgpu_log_info(g, "bundle cb token limit : %d, state limit : %d", g->gr.bundle_cb_token_limit, data); gr_gk20a_ctx_patch_write(g, gr_ctx, gr_pd_ab_dist_cfg2_r(), @@ -1626,7 +1627,7 @@ int gr_gp10b_init_fs_state(struct gk20a *g) { u32 data; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); data = gk20a_readl(g, gr_gpcs_tpcs_sm_texio_control_r()); data = set_field(data, gr_gpcs_tpcs_sm_texio_control_oor_addr_check_mode_m(), @@ -1705,7 +1706,7 @@ static int gr_gp10b_disable_channel_or_tsg(struct gk20a *g, struct channel_gk20a { int ret = 0; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, ""); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, " "); ret = gk20a_disable_channel_tsg(g, fault_ch); if (ret) { @@ -1721,18 +1722,18 @@ static int gr_gp10b_disable_channel_or_tsg(struct gk20a *g, struct channel_gk20a return ret; } - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, "CILP: restarted runlist"); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, "CILP: restarted runlist"); - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, "CILP: tsgid: 0x%x", fault_ch->tsgid); if (gk20a_is_channel_marked_as_tsg(fault_ch)) { gk20a_fifo_issue_preempt(g, fault_ch->tsgid, true); - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, "CILP: preempted tsg"); } else { gk20a_fifo_issue_preempt(g, fault_ch->chid, false); - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, "CILP: preempted channel"); } @@ -1746,7 +1747,7 @@ int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g, struct tsg_gk20a *tsg; struct nvgpu_gr_ctx *gr_ctx; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, ""); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, " "); tsg = tsg_gk20a_from_ch(fault_ch); if (!tsg) @@ -1755,7 +1756,7 @@ int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g, gr_ctx = &tsg->gr_ctx; if (gr_ctx->cilp_preempt_pending) { - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, "CILP is already pending for chid %d", fault_ch->chid); return 0; @@ -1763,7 +1764,7 @@ int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g, /* get ctx_id from the ucode image */ if (!gr_ctx->ctx_id_valid) { - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, "CILP: looking up ctx id"); ret = gr_gk20a_get_ctx_id(g, fault_ch, &gr_ctx->ctx_id); if (ret) { @@ -1773,7 +1774,7 @@ int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g, gr_ctx->ctx_id_valid = true; } - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, "CILP: ctx id is 0x%x", gr_ctx->ctx_id); /* send ucode method to set ctxsw interrupt */ @@ -1795,10 +1796,10 @@ int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g, return ret; } - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, "CILP: enabled ctxsw completion interrupt"); - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, "CILP: disabling channel %d", fault_ch->chid); @@ -1826,7 +1827,7 @@ static int gr_gp10b_clear_cilp_preempt_pending(struct gk20a *g, struct tsg_gk20a *tsg; struct nvgpu_gr_ctx *gr_ctx; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, ""); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, " "); tsg = tsg_gk20a_from_ch(fault_ch); if (!tsg) @@ -1837,7 +1838,7 @@ static int gr_gp10b_clear_cilp_preempt_pending(struct gk20a *g, /* The ucode is self-clearing, so all we need to do here is to clear cilp_preempt_pending. */ if (!gr_ctx->cilp_preempt_pending) { - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, "CILP is already cleared for chid %d\n", fault_ch->chid); return 0; @@ -1878,7 +1879,7 @@ int gr_gp10b_pre_process_sm_exception(struct gk20a *g, NVGPU_PREEMPTION_MODE_COMPUTE_CILP); } - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "SM Exception received on gpc %d tpc %d = %u\n", + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "SM Exception received on gpc %d tpc %d = %u\n", gpc, tpc, global_esr); if (cilp_enabled && sm_debugger_attached) { @@ -1900,19 +1901,19 @@ int gr_gp10b_pre_process_sm_exception(struct gk20a *g, if (warp_esr != 0 || (global_esr & global_mask) != 0) { *ignore_debugger = true; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: starting wait for LOCKED_DOWN on gpc %d tpc %d\n", gpc, tpc); if (gk20a_dbg_gpu_broadcast_stop_trigger(fault_ch)) { - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: Broadcasting STOP_TRIGGER from gpc %d tpc %d\n", gpc, tpc); g->ops.gr.suspend_all_sms(g, global_mask, false); gk20a_dbg_gpu_clear_broadcast_stop_trigger(fault_ch); } else { - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: STOP_TRIGGER from gpc %d tpc %d\n", gpc, tpc); g->ops.gr.suspend_single_sm(g, gpc, tpc, sm, global_mask, true); @@ -1923,11 +1924,11 @@ int gr_gp10b_pre_process_sm_exception(struct gk20a *g, gpc, tpc, sm); g->ops.gr.clear_sm_hww(g, gpc, tpc, sm, global_esr_copy); - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: HWWs cleared for gpc %d tpc %d\n", gpc, tpc); - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: Setting CILP preempt pending\n"); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: Setting CILP preempt pending\n"); ret = gr_gp10b_set_cilp_preempt_pending(g, fault_ch); if (ret) { nvgpu_err(g, "CILP: error while setting CILP preempt pending!"); @@ -1936,7 +1937,7 @@ int gr_gp10b_pre_process_sm_exception(struct gk20a *g, dbgr_control0 = gk20a_readl(g, gr_gpc0_tpc0_sm_dbgr_control0_r() + offset); if (dbgr_control0 & gr_gpcs_tpcs_sm_dbgr_control0_single_step_mode_enable_f()) { - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: clearing SINGLE_STEP_MODE before resume for gpc %d tpc %d\n", gpc, tpc); dbgr_control0 = set_field(dbgr_control0, @@ -1945,13 +1946,13 @@ int gr_gp10b_pre_process_sm_exception(struct gk20a *g, gk20a_writel(g, gr_gpc0_tpc0_sm_dbgr_control0_r() + offset, dbgr_control0); } - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: resume for gpc %d tpc %d\n", gpc, tpc); g->ops.gr.resume_single_sm(g, gpc, tpc, sm); *ignore_debugger = true; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: All done on gpc %d, tpc %d\n", gpc, tpc); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: All done on gpc %d, tpc %d\n", gpc, tpc); } *early_exit = true; @@ -1999,14 +2000,14 @@ int gr_gp10b_handle_fecs_error(struct gk20a *g, int ret = 0; struct tsg_gk20a *tsg; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, ""); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, " "); /* * INTR1 (bit 1 of the HOST_INT_STATUS_CTXSW_INTR) * indicates that a CILP ctxsw save has finished */ if (gr_fecs_intr & gr_fecs_host_int_status_ctxsw_intr_f(2)) { - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, "CILP: ctxsw save completed!\n"); /* now clear the interrupt */ @@ -2162,7 +2163,7 @@ int gr_gp10b_suspend_contexts(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx; struct nvgpu_timeout timeout; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, "CILP preempt pending, waiting %lu msecs for preemption", gk20a_get_gr_idle_timeout(g)); @@ -2285,7 +2286,7 @@ int gr_gp10b_set_preemption_mode(struct channel_gk20a *ch, if (g->ops.gr.set_ctxsw_preemption_mode) { - gk20a_dbg(gpu_dbg_sched, "chid=%d tsgid=%d pid=%d " + nvgpu_log(g, gpu_dbg_sched, "chid=%d tsgid=%d pid=%d " "graphics_preempt=%d compute_preempt=%d", ch->chid, ch->tsgid, diff --git a/drivers/gpu/nvgpu/gp10b/ltc_gp10b.c b/drivers/gpu/nvgpu/gp10b/ltc_gp10b.c index 71764a7c..f74ca8f3 100644 --- a/drivers/gpu/nvgpu/gp10b/ltc_gp10b.c +++ b/drivers/gpu/nvgpu/gp10b/ltc_gp10b.c @@ -41,7 +41,7 @@ int gp10b_determine_L2_size_bytes(struct gk20a *g) u32 tmp; int ret; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); tmp = gk20a_readl(g, ltc_ltc0_lts0_tstg_info_1_r()); @@ -49,9 +49,9 @@ int gp10b_determine_L2_size_bytes(struct gk20a *g) ltc_ltc0_lts0_tstg_info_1_slice_size_in_kb_v(tmp)*1024 * ltc_ltc0_lts0_tstg_info_1_slices_per_l2_v(tmp); - gk20a_dbg(gpu_dbg_info, "L2 size: %d\n", ret); + nvgpu_log(g, gpu_dbg_info, "L2 size: %d\n", ret); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return ret; } @@ -83,7 +83,7 @@ int gp10b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr) int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (max_comptag_lines == 0U) return 0; @@ -109,11 +109,11 @@ int gp10b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr) /* must be a multiple of 64KB */ compbit_backing_size = roundup(compbit_backing_size, 64*1024); - gk20a_dbg_info("compbit backing store size : %d", + nvgpu_log_info(g, "compbit backing store size : %d", compbit_backing_size); - gk20a_dbg_info("max comptag lines : %d", + nvgpu_log_info(g, "max comptag lines : %d", max_comptag_lines); - gk20a_dbg_info("gobs_per_comptagline_per_slice: %d", + nvgpu_log_info(g, "gobs_per_comptagline_per_slice: %d", gobs_per_comptagline_per_slice); err = nvgpu_ltc_alloc_cbc(g, compbit_backing_size); diff --git a/drivers/gpu/nvgpu/gp10b/mc_gp10b.c b/drivers/gpu/nvgpu/gp10b/mc_gp10b.c index dde12854..5969e45d 100644 --- a/drivers/gpu/nvgpu/gp10b/mc_gp10b.c +++ b/drivers/gpu/nvgpu/gp10b/mc_gp10b.c @@ -87,7 +87,7 @@ void mc_gp10b_isr_stall(struct gk20a *g) mc_intr_0 = gk20a_readl(g, mc_intr_r(0)); - gk20a_dbg(gpu_dbg_intr, "stall intr 0x%08x\n", mc_intr_0); + nvgpu_log(g, gpu_dbg_intr, "stall intr 0x%08x\n", mc_intr_0); for (engine_id_idx = 0; engine_id_idx < g->fifo.num_engines; engine_id_idx++) { active_engine_id = g->fifo.active_engines_list[engine_id_idx]; @@ -126,7 +126,7 @@ void mc_gp10b_isr_stall(struct gk20a *g) g->ops.mc.is_intr_nvlink_pending(g, mc_intr_0)) g->ops.nvlink.isr(g); - gk20a_dbg(gpu_dbg_intr, "stall intr done 0x%08x\n", mc_intr_0); + nvgpu_log(g, gpu_dbg_intr, "stall intr done 0x%08x\n", mc_intr_0); } diff --git a/drivers/gpu/nvgpu/gp10b/mm_gp10b.c b/drivers/gpu/nvgpu/gp10b/mm_gp10b.c index 978b6f50..811697c3 100644 --- a/drivers/gpu/nvgpu/gp10b/mm_gp10b.c +++ b/drivers/gpu/nvgpu/gp10b/mm_gp10b.c @@ -1,7 +1,7 @@ /* * GP10B MMU * - * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -53,7 +53,7 @@ int gp10b_init_mm_setup_hw(struct gk20a *g) struct nvgpu_mem *inst_block = &mm->bar1.inst_block; int err = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); g->ops.fb.set_mmu_page_size(g); @@ -73,7 +73,7 @@ int gp10b_init_mm_setup_hw(struct gk20a *g) err = gp10b_replayable_pagefault_buffer_init(g); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return err; } @@ -87,7 +87,7 @@ int gp10b_init_bar2_vm(struct gk20a *g) /* BAR2 aperture size is 32MB */ mm->bar2.aperture_size = 32 << 20; - gk20a_dbg_info("bar2 vm size = 0x%x", mm->bar2.aperture_size); + nvgpu_log_info(g, "bar2 vm size = 0x%x", mm->bar2.aperture_size); mm->bar2.vm = nvgpu_vm_init(g, big_page_size, SZ_4K, mm->bar2.aperture_size - SZ_4K, @@ -115,12 +115,12 @@ int gp10b_init_bar2_mm_hw_setup(struct gk20a *g) struct nvgpu_mem *inst_block = &mm->bar2.inst_block; u64 inst_pa = nvgpu_inst_block_addr(g, inst_block); - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); g->ops.fb.set_mmu_page_size(g); inst_pa = (u32)(inst_pa >> bus_bar2_block_ptr_shift_v()); - gk20a_dbg_info("bar2 inst block ptr: 0x%08x", (u32)inst_pa); + nvgpu_log_info(g, "bar2 inst block ptr: 0x%08x", (u32)inst_pa); gk20a_writel(g, bus_bar2_block_r(), nvgpu_aperture_mask(g, inst_block, @@ -130,7 +130,7 @@ int gp10b_init_bar2_mm_hw_setup(struct gk20a *g) bus_bar2_block_mode_virtual_f() | bus_bar2_block_ptr_f(inst_pa)); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } @@ -433,7 +433,7 @@ void gp10b_mm_init_pdb(struct gk20a *g, struct nvgpu_mem *inst_block, u32 pdb_addr_lo = u64_lo32(pdb_addr >> ram_in_base_shift_v()); u32 pdb_addr_hi = u64_hi32(pdb_addr); - gk20a_dbg_info("pde pa=0x%llx", pdb_addr); + nvgpu_log_info(g, "pde pa=0x%llx", pdb_addr); nvgpu_mem_wr32(g, inst_block, ram_in_page_dir_base_lo_w(), nvgpu_aperture_mask(g, vm->pdb.mem, diff --git a/drivers/gpu/nvgpu/gp10b/pmu_gp10b.c b/drivers/gpu/nvgpu/gp10b/pmu_gp10b.c index c94d580a..ca111725 100644 --- a/drivers/gpu/nvgpu/gp10b/pmu_gp10b.c +++ b/drivers/gpu/nvgpu/gp10b/pmu_gp10b.c @@ -37,8 +37,8 @@ #include #include -#define gp10b_dbg_pmu(fmt, arg...) \ - gk20a_dbg(gpu_dbg_pmu, fmt, ##arg) +#define gp10b_dbg_pmu(g, fmt, arg...) \ + nvgpu_log(g, gpu_dbg_pmu, fmt, ##arg) /* PROD settings for ELPG sequencing registers*/ static struct pg_init_sequence_list _pginitseq_gp10b[] = { @@ -147,9 +147,9 @@ static void gp10b_pmu_load_multiple_falcons(struct gk20a *g, u32 falconidmask, struct pmu_cmd cmd; u32 seq; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); - gp10b_dbg_pmu("wprinit status = %x\n", g->pmu_lsf_pmu_wpr_init_done); + gp10b_dbg_pmu(g, "wprinit status = %x\n", g->pmu_lsf_pmu_wpr_init_done); if (g->pmu_lsf_pmu_wpr_init_done) { /* send message to load FECS falcon */ memset(&cmd, 0, sizeof(struct pmu_cmd)); @@ -164,13 +164,13 @@ static void gp10b_pmu_load_multiple_falcons(struct gk20a *g, u32 falconidmask, cmd.cmd.acr.boot_falcons.usevamask = 0; cmd.cmd.acr.boot_falcons.wprvirtualbase.lo = 0x0; cmd.cmd.acr.boot_falcons.wprvirtualbase.hi = 0x0; - gp10b_dbg_pmu("PMU_ACR_CMD_ID_BOOTSTRAP_MULTIPLE_FALCONS:%x\n", + gp10b_dbg_pmu(g, "PMU_ACR_CMD_ID_BOOTSTRAP_MULTIPLE_FALCONS:%x\n", falconidmask); nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, pmu_handle_fecs_boot_acr_msg, pmu, &seq, ~0); } - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return; } @@ -209,7 +209,7 @@ int gp10b_load_falcon_ucode(struct gk20a *g, u32 falconidmask) static void pmu_handle_gr_param_msg(struct gk20a *g, struct pmu_msg *msg, void *param, u32 handle, u32 status) { - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (status != 0) { nvgpu_err(g, "GR PARAM cmd aborted"); @@ -217,7 +217,7 @@ static void pmu_handle_gr_param_msg(struct gk20a *g, struct pmu_msg *msg, return; } - gp10b_dbg_pmu("GR PARAM is acknowledged from PMU %x \n", + gp10b_dbg_pmu(g, "GR PARAM is acknowledged from PMU %x \n", msg->msg.pg.msg_type); return; @@ -243,7 +243,7 @@ int gp10b_pg_gr_init(struct gk20a *g, u32 pg_engine_id) cmd.cmd.pg.gr_init_param_v2.ldiv_slowdown_factor = g->ldiv_slowdown_factor; - gp10b_dbg_pmu("cmd post PMU_PG_CMD_ID_PG_PARAM "); + gp10b_dbg_pmu(g, "cmd post PMU_PG_CMD_ID_PG_PARAM "); nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, pmu_handle_gr_param_msg, pmu, &seq, ~0); @@ -276,7 +276,7 @@ int gp10b_pmu_setup_elpg(struct gk20a *g) u32 reg_writes; u32 index; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (g->elpg_enabled) { reg_writes = ((sizeof(_pginitseq_gp10b) / @@ -288,7 +288,7 @@ int gp10b_pmu_setup_elpg(struct gk20a *g) } } - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return ret; } @@ -305,7 +305,7 @@ int gp10b_init_pmu_setup_hw1(struct gk20a *g) struct nvgpu_pmu *pmu = &g->pmu; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); nvgpu_mutex_acquire(&pmu->isr_mutex); nvgpu_flcn_reset(pmu->flcn); @@ -333,7 +333,7 @@ int gp10b_init_pmu_setup_hw1(struct gk20a *g) if (err) return err; - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } diff --git a/drivers/gpu/nvgpu/gp10b/rpfb_gp10b.c b/drivers/gpu/nvgpu/gp10b/rpfb_gp10b.c index 385bebbd..3f089545 100644 --- a/drivers/gpu/nvgpu/gp10b/rpfb_gp10b.c +++ b/drivers/gpu/nvgpu/gp10b/rpfb_gp10b.c @@ -1,7 +1,7 @@ /* * GP10B RPFB * - * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -42,7 +42,7 @@ int gp10b_replayable_pagefault_buffer_init(struct gk20a *g) size_t rbfb_size = NV_UVM_FAULT_BUF_SIZE * fifo_replay_fault_buffer_size_hw_entries_v(); - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (!g->mm.bar2_desc.gpu_va) { err = nvgpu_dma_alloc_map_sys(vm, rbfb_size, @@ -60,7 +60,7 @@ int gp10b_replayable_pagefault_buffer_init(struct gk20a *g) gk20a_writel(g, fifo_replay_fault_buffer_lo_r(), fifo_replay_fault_buffer_lo_base_f(addr_lo) | fifo_replay_fault_buffer_lo_enable_true_v()); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } @@ -75,14 +75,14 @@ u32 gp10b_replayable_pagefault_buffer_get_index(struct gk20a *g) { u32 get_idx = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); get_idx = gk20a_readl(g, fifo_replay_fault_buffer_get_r()); if (get_idx >= fifo_replay_fault_buffer_size_hw_entries_v()) nvgpu_err(g, "Error in replayable fault buffer"); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return get_idx; } @@ -90,13 +90,13 @@ u32 gp10b_replayable_pagefault_buffer_put_index(struct gk20a *g) { u32 put_idx = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); put_idx = gk20a_readl(g, fifo_replay_fault_buffer_put_r()); if (put_idx >= fifo_replay_fault_buffer_size_hw_entries_v()) nvgpu_err(g, "Error in UVM"); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return put_idx; } diff --git a/drivers/gpu/nvgpu/gp10b/therm_gp10b.c b/drivers/gpu/nvgpu/gp10b/therm_gp10b.c index c69bd0bb..4f1de559 100644 --- a/drivers/gpu/nvgpu/gp10b/therm_gp10b.c +++ b/drivers/gpu/nvgpu/gp10b/therm_gp10b.c @@ -1,7 +1,7 @@ /* * GP10B Therm * - * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -33,7 +33,7 @@ int gp10b_init_therm_setup_hw(struct gk20a *g) { u32 v; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); /* program NV_THERM registers */ gk20a_writel(g, therm_use_a_r(), therm_use_a_ext_therm_0_enable_f() | @@ -96,7 +96,7 @@ int gp10b_elcg_init_idle_filters(struct gk20a *g) u32 active_engine_id = 0; struct fifo_gk20a *f = &g->fifo; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); for (engine_id = 0; engine_id < f->num_engines; engine_id++) { active_engine_id = f->active_engines_list[engine_id]; @@ -130,6 +130,6 @@ int gp10b_elcg_init_idle_filters(struct gk20a *g) idle_filter &= ~therm_hubmmu_idle_filter_value_m(); gk20a_writel(g, therm_hubmmu_idle_filter_r(), idle_filter); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } -- cgit v1.2.2