From dd739fcb039d51606e9a5454ec0aab17bcb01965 Mon Sep 17 00:00:00 2001 From: Terje Bergstrom Date: Wed, 18 Apr 2018 19:39:46 -0700 Subject: gpu: nvgpu: Remove gk20a_dbg* functions Switch all logging to nvgpu_log*(). gk20a_dbg* macros are intentionally left there because of use from other repositories. Because the new functions do not work without a pointer to struct gk20a, and piping it just for logging is excessive, some log messages are deleted. Change-Id: I00e22e75fe4596a330bb0282ab4774b3639ee31e Signed-off-by: Terje Bergstrom Reviewed-on: https://git-master.nvidia.com/r/1704148 Reviewed-by: mobile promotions Tested-by: mobile promotions --- drivers/gpu/nvgpu/gk20a/ce2_gk20a.c | 14 +- drivers/gpu/nvgpu/gk20a/channel_gk20a.c | 46 ++-- drivers/gpu/nvgpu/gk20a/css_gr_gk20a.c | 6 +- drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c | 17 +- drivers/gpu/nvgpu/gk20a/fb_gk20a.c | 6 +- drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c | 36 +-- drivers/gpu/nvgpu/gk20a/fifo_gk20a.c | 150 ++++++------ drivers/gpu/nvgpu/gk20a/gk20a.c | 12 +- drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.c | 82 ++++--- drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a_sim.c | 12 +- drivers/gpu/nvgpu/gk20a/gr_gk20a.c | 364 ++++++++++++++--------------- drivers/gpu/nvgpu/gk20a/hal.c | 2 +- drivers/gpu/nvgpu/gk20a/mc_gk20a.c | 8 +- drivers/gpu/nvgpu/gk20a/mm_gk20a.c | 28 +-- drivers/gpu/nvgpu/gk20a/pmu_gk20a.c | 52 +++-- drivers/gpu/nvgpu/gk20a/pramin_gk20a.c | 4 +- drivers/gpu/nvgpu/gk20a/priv_ring_gk20a.c | 8 +- drivers/gpu/nvgpu/gk20a/regops_gk20a.c | 16 +- drivers/gpu/nvgpu/gk20a/therm_gk20a.c | 8 +- drivers/gpu/nvgpu/gk20a/tsg_gk20a.c | 18 +- 20 files changed, 446 insertions(+), 443 deletions(-) (limited to 'drivers/gpu/nvgpu/gk20a') diff --git a/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c b/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c index 0280bbbb..086d4e7b 100644 --- a/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c @@ -50,21 +50,21 @@ static u32 ce2_nonblockpipe_isr(struct gk20a *g, u32 fifo_intr) { - gk20a_dbg(gpu_dbg_intr, "ce2 non-blocking pipe interrupt\n"); + nvgpu_log(g, gpu_dbg_intr, "ce2 non-blocking pipe interrupt\n"); return ce2_intr_status_nonblockpipe_pending_f(); } static u32 ce2_blockpipe_isr(struct gk20a *g, u32 fifo_intr) { - gk20a_dbg(gpu_dbg_intr, "ce2 blocking pipe interrupt\n"); + nvgpu_log(g, gpu_dbg_intr, "ce2 blocking pipe interrupt\n"); return ce2_intr_status_blockpipe_pending_f(); } static u32 ce2_launcherr_isr(struct gk20a *g, u32 fifo_intr) { - gk20a_dbg(gpu_dbg_intr, "ce2 launch error interrupt\n"); + nvgpu_log(g, gpu_dbg_intr, "ce2 launch error interrupt\n"); return ce2_intr_status_launcherr_pending_f(); } @@ -74,7 +74,7 @@ void gk20a_ce2_isr(struct gk20a *g, u32 inst_id, u32 pri_base) u32 ce2_intr = gk20a_readl(g, ce2_intr_status_r()); u32 clear_intr = 0; - gk20a_dbg(gpu_dbg_intr, "ce2 isr %08x\n", ce2_intr); + nvgpu_log(g, gpu_dbg_intr, "ce2 isr %08x\n", ce2_intr); /* clear blocking interrupts: they exibit broken behavior */ if (ce2_intr & ce2_intr_status_blockpipe_pending_f()) @@ -92,7 +92,7 @@ int gk20a_ce2_nonstall_isr(struct gk20a *g, u32 inst_id, u32 pri_base) int ops = 0; u32 ce2_intr = gk20a_readl(g, ce2_intr_status_r()); - gk20a_dbg(gpu_dbg_intr, "ce2 nonstall isr %08x\n", ce2_intr); + nvgpu_log(g, gpu_dbg_intr, "ce2 nonstall isr %08x\n", ce2_intr); if (ce2_intr & ce2_intr_status_nonblockpipe_pending_f()) { gk20a_writel(g, ce2_intr_status_r(), @@ -340,7 +340,7 @@ int gk20a_init_ce_support(struct gk20a *g) return 0; } - gk20a_dbg(gpu_dbg_fn, "ce: init"); + nvgpu_log(g, gpu_dbg_fn, "ce: init"); err = nvgpu_mutex_init(&ce_app->app_mutex); if (err) @@ -355,7 +355,7 @@ int gk20a_init_ce_support(struct gk20a *g) ce_app->app_state = NVGPU_CE_ACTIVE; nvgpu_mutex_release(&ce_app->app_mutex); - gk20a_dbg(gpu_dbg_cde_ctx, "ce: init finished"); + nvgpu_log(g, gpu_dbg_cde_ctx, "ce: init finished"); return 0; } diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c index e65ed278..21abdf9a 100644 --- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c @@ -116,7 +116,7 @@ int channel_gk20a_commit_va(struct channel_gk20a *c) { struct gk20a *g = c->g; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); g->ops.mm.init_inst_block(&c->inst_block, c->vm, c->vm->gmmu_page_sizes[gmmu_page_size_big]); @@ -208,7 +208,7 @@ void gk20a_channel_abort_clean_up(struct channel_gk20a *ch) void gk20a_channel_abort(struct channel_gk20a *ch, bool channel_preempt) { - gk20a_dbg_fn(""); + nvgpu_log_fn(ch->g, " "); if (gk20a_is_channel_marked_as_tsg(ch)) return gk20a_fifo_abort_tsg(ch->g, ch->tsgid, channel_preempt); @@ -291,7 +291,7 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force) struct dbg_session_channel_data *ch_data, *tmp; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); WARN_ON(ch->g == NULL); @@ -351,7 +351,7 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force) /* if engine reset was deferred, perform it now */ nvgpu_mutex_acquire(&f->deferred_reset_mutex); if (g->fifo.deferred_reset_pending) { - gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "engine reset was" + nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "engine reset was" " deferred, running now"); /* if lock is already taken, a reset is taking place so no need to repeat */ @@ -365,7 +365,7 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force) if (!gk20a_channel_as_bound(ch)) goto unbind; - gk20a_dbg_info("freeing bound channel context, timeout=%ld", + nvgpu_log_info(g, "freeing bound channel context, timeout=%ld", timeout); #ifdef CONFIG_GK20A_CTXSW_TRACE @@ -626,7 +626,7 @@ struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g, runlist_id = gk20a_fifo_get_gr_runlist_id(g); } - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); ch = allocate_channel(f); if (ch == NULL) { @@ -765,7 +765,7 @@ int gk20a_channel_alloc_priv_cmdbuf(struct channel_gk20a *c, u32 orig_size, u32 free_count; u32 size = orig_size; - gk20a_dbg_fn("size %d", orig_size); + nvgpu_log_fn(c->g, "size %d", orig_size); if (!e) { nvgpu_err(c->g, @@ -779,7 +779,7 @@ int gk20a_channel_alloc_priv_cmdbuf(struct channel_gk20a *c, u32 orig_size, if (q->put + size > q->size) size = orig_size + (q->size - q->put); - gk20a_dbg_info("ch %d: priv cmd queue get:put %d:%d", + nvgpu_log_info(c->g, "ch %d: priv cmd queue get:put %d:%d", c->chid, q->get, q->put); free_count = (q->size - (q->put - q->get) - 1) % q->size; @@ -812,7 +812,7 @@ int gk20a_channel_alloc_priv_cmdbuf(struct channel_gk20a *c, u32 orig_size, nvgpu_smp_wmb(); e->valid = true; - gk20a_dbg_fn("done"); + nvgpu_log_fn(c->g, "done"); return 0; } @@ -1132,7 +1132,7 @@ int gk20a_channel_alloc_gpfifo(struct channel_gk20a *c, c->gpfifo.entry_num = gpfifo_size; c->gpfifo.get = c->gpfifo.put = 0; - gk20a_dbg_info("channel %d : gpfifo_base 0x%016llx, size %d", + nvgpu_log_info(g, "channel %d : gpfifo_base 0x%016llx, size %d", c->chid, c->gpfifo.mem.gpu_va, c->gpfifo.entry_num); g->ops.fifo.setup_userd(c); @@ -1184,7 +1184,7 @@ int gk20a_channel_alloc_gpfifo(struct channel_gk20a *c, g->ops.fifo.bind_channel(c); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; clean_up_priv_cmd: @@ -1400,7 +1400,7 @@ static void gk20a_channel_timeout_handler(struct channel_gk20a *ch) u64 pb_get; u64 new_pb_get; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); /* Get status and clear the timer */ nvgpu_raw_spinlock_acquire(&ch->timeout.lock); @@ -1480,7 +1480,7 @@ static void gk20a_channel_poll_timeouts(struct gk20a *g) */ static void gk20a_channel_worker_process_ch(struct channel_gk20a *ch) { - gk20a_dbg_fn(""); + nvgpu_log_fn(ch->g, " "); gk20a_channel_clean_up_jobs(ch, true); @@ -1499,7 +1499,7 @@ static int __gk20a_channel_worker_wakeup(struct gk20a *g) { int put; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); /* * Currently, the only work type is associated with a lock, which deals @@ -1596,7 +1596,7 @@ static int gk20a_channel_poll_worker(void *arg) struct nvgpu_timeout timeout; int get = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); nvgpu_timeout_init(g, &timeout, watchdog_interval, NVGPU_TIMER_CPU_TIMER); @@ -1699,7 +1699,7 @@ static void gk20a_channel_worker_enqueue(struct channel_gk20a *ch) { struct gk20a *g = ch->g; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); /* * Warn if worker thread cannot run @@ -2142,12 +2142,12 @@ int gk20a_channel_suspend(struct gk20a *g) bool channels_in_use = false; u32 active_runlist_ids = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); for (chid = 0; chid < f->num_channels; chid++) { struct channel_gk20a *ch = &f->channel[chid]; if (gk20a_channel_get(ch)) { - gk20a_dbg_info("suspend channel %d", chid); + nvgpu_log_info(g, "suspend channel %d", chid); /* disable channel */ gk20a_disable_channel_tsg(g, ch); /* preempt the channel */ @@ -2175,7 +2175,7 @@ int gk20a_channel_suspend(struct gk20a *g) } } - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } @@ -2186,11 +2186,11 @@ int gk20a_channel_resume(struct gk20a *g) bool channels_in_use = false; u32 active_runlist_ids = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); for (chid = 0; chid < f->num_channels; chid++) { if (gk20a_channel_get(&f->channel[chid])) { - gk20a_dbg_info("resume channel %d", chid); + nvgpu_log_info(g, "resume channel %d", chid); g->ops.fifo.bind_channel(&f->channel[chid]); channels_in_use = true; active_runlist_ids |= BIT(f->channel[chid].runlist_id); @@ -2201,7 +2201,7 @@ int gk20a_channel_resume(struct gk20a *g) if (channels_in_use) gk20a_fifo_update_runlist_ids(g, active_runlist_ids, ~0, true, true); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } @@ -2210,7 +2210,7 @@ void gk20a_channel_semaphore_wakeup(struct gk20a *g, bool post_events) struct fifo_gk20a *f = &g->fifo; u32 chid; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); /* * Ensure that all pending writes are actually done before trying to diff --git a/drivers/gpu/nvgpu/gk20a/css_gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/css_gr_gk20a.c index 114386a2..0fc39bf4 100644 --- a/drivers/gpu/nvgpu/gk20a/css_gr_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/css_gr_gk20a.c @@ -1,7 +1,7 @@ /* * GK20A Cycle stats snapshots support (subsystem for gr_gk20a). * - * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -189,7 +189,7 @@ int css_hw_enable_snapshot(struct channel_gk20a *ch, perf_pmasys_mem_block_valid_true_f() | perf_pmasys_mem_block_target_lfb_f()); - gk20a_dbg_info("cyclestats: buffer for hardware snapshots enabled\n"); + nvgpu_log_info(g, "cyclestats: buffer for hardware snapshots enabled\n"); return 0; @@ -227,7 +227,7 @@ void css_hw_disable_snapshot(struct gr_gk20a *gr) memset(&data->hw_memdesc, 0, sizeof(data->hw_memdesc)); data->hw_snapshot = NULL; - gk20a_dbg_info("cyclestats: buffer for hardware snapshots disabled\n"); + nvgpu_log_info(g, "cyclestats: buffer for hardware snapshots disabled\n"); } static void css_gr_free_shared_data(struct gr_gk20a *gr) diff --git a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c index ce06e78b..97de7138 100644 --- a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c @@ -90,8 +90,9 @@ void gk20a_dbg_gpu_post_events(struct channel_gk20a *ch) { struct dbg_session_data *session_data; struct dbg_session_gk20a *dbg_s; + struct gk20a *g = ch->g; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " "); /* guard against the session list being modified */ nvgpu_mutex_acquire(&ch->dbg_s_lock); @@ -100,9 +101,9 @@ void gk20a_dbg_gpu_post_events(struct channel_gk20a *ch) dbg_session_data, dbg_s_entry) { dbg_s = session_data->dbg_s; if (dbg_s->dbg_events.events_enabled) { - gk20a_dbg(gpu_dbg_gpu_dbg, "posting event on session id %d", + nvgpu_log(g, gpu_dbg_gpu_dbg, "posting event on session id %d", dbg_s->id); - gk20a_dbg(gpu_dbg_gpu_dbg, "%d events pending", + nvgpu_log(g, gpu_dbg_gpu_dbg, "%d events pending", dbg_s->dbg_events.num_pending_events); dbg_s->dbg_events.num_pending_events++; @@ -119,8 +120,9 @@ bool gk20a_dbg_gpu_broadcast_stop_trigger(struct channel_gk20a *ch) struct dbg_session_data *session_data; struct dbg_session_gk20a *dbg_s; bool broadcast = false; + struct gk20a *g = ch->g; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, ""); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, " "); /* guard against the session list being modified */ nvgpu_mutex_acquire(&ch->dbg_s_lock); @@ -129,7 +131,7 @@ bool gk20a_dbg_gpu_broadcast_stop_trigger(struct channel_gk20a *ch) dbg_session_data, dbg_s_entry) { dbg_s = session_data->dbg_s; if (dbg_s->broadcast_stop_trigger) { - gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn | gpu_dbg_intr, + nvgpu_log(g, gpu_dbg_gpu_dbg | gpu_dbg_fn | gpu_dbg_intr, "stop trigger broadcast enabled"); broadcast = true; break; @@ -145,8 +147,9 @@ int gk20a_dbg_gpu_clear_broadcast_stop_trigger(struct channel_gk20a *ch) { struct dbg_session_data *session_data; struct dbg_session_gk20a *dbg_s; + struct gk20a *g = ch->g; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, ""); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, " "); /* guard against the session list being modified */ nvgpu_mutex_acquire(&ch->dbg_s_lock); @@ -155,7 +158,7 @@ int gk20a_dbg_gpu_clear_broadcast_stop_trigger(struct channel_gk20a *ch) dbg_session_data, dbg_s_entry) { dbg_s = session_data->dbg_s; if (dbg_s->broadcast_stop_trigger) { - gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn | gpu_dbg_intr, + nvgpu_log(g, gpu_dbg_gpu_dbg | gpu_dbg_fn | gpu_dbg_intr, "stop trigger broadcast disabled"); dbg_s->broadcast_stop_trigger = false; } diff --git a/drivers/gpu/nvgpu/gk20a/fb_gk20a.c b/drivers/gpu/nvgpu/gk20a/fb_gk20a.c index e3052701..c4be3313 100644 --- a/drivers/gpu/nvgpu/gk20a/fb_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/fb_gk20a.c @@ -1,7 +1,7 @@ /* * GK20A memory interface * - * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -36,7 +36,7 @@ void fb_gk20a_reset(struct gk20a *g) { u32 val; - gk20a_dbg_info("reset gk20a fb"); + nvgpu_log_info(g, "reset gk20a fb"); g->ops.mc.reset(g, mc_enable_pfb_enabled_f() | mc_enable_l2_enabled_f() | @@ -63,7 +63,7 @@ void gk20a_fb_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb) u32 addr_lo; u32 data; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); /* pagetables are considered sw states which are preserved after prepare_poweroff. When gk20a deinit releases those pagetables, diff --git a/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c b/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c index 4fda0d2e..c9d7ea06 100644 --- a/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -137,7 +137,7 @@ static int gk20a_fecs_trace_get_write_index(struct gk20a *g) static int gk20a_fecs_trace_set_read_index(struct gk20a *g, int index) { - gk20a_dbg(gpu_dbg_ctxsw, "set read=%d", index); + nvgpu_log(g, gpu_dbg_ctxsw, "set read=%d", index); return gr_gk20a_elpg_protected_call(g, (gk20a_writel(g, gr_fecs_mailbox1_r(), index), 0)); } @@ -148,12 +148,12 @@ void gk20a_fecs_trace_hash_dump(struct gk20a *g) struct gk20a_fecs_trace_hash_ent *ent; struct gk20a_fecs_trace *trace = g->fecs_trace; - gk20a_dbg(gpu_dbg_ctxsw, "dumping hash table"); + nvgpu_log(g, gpu_dbg_ctxsw, "dumping hash table"); nvgpu_mutex_acquire(&trace->hash_lock); hash_for_each(trace->pid_hash_table, bkt, ent, node) { - gk20a_dbg(gpu_dbg_ctxsw, " ent=%p bkt=%x context_ptr=%x pid=%d", + nvgpu_log(g, gpu_dbg_ctxsw, " ent=%p bkt=%x context_ptr=%x pid=%d", ent, bkt, ent->context_ptr, ent->pid); } @@ -165,7 +165,7 @@ static int gk20a_fecs_trace_hash_add(struct gk20a *g, u32 context_ptr, pid_t pid struct gk20a_fecs_trace_hash_ent *he; struct gk20a_fecs_trace *trace = g->fecs_trace; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_ctxsw, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_ctxsw, "adding hash entry context_ptr=%x -> pid=%d", context_ptr, pid); he = nvgpu_kzalloc(g, sizeof(*he)); @@ -190,7 +190,7 @@ static void gk20a_fecs_trace_hash_del(struct gk20a *g, u32 context_ptr) struct gk20a_fecs_trace_hash_ent *ent; struct gk20a_fecs_trace *trace = g->fecs_trace; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_ctxsw, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_ctxsw, "freeing hash entry context_ptr=%x", context_ptr); nvgpu_mutex_acquire(&trace->hash_lock); @@ -198,7 +198,7 @@ static void gk20a_fecs_trace_hash_del(struct gk20a *g, u32 context_ptr) context_ptr) { if (ent->context_ptr == context_ptr) { hash_del(&ent->node); - gk20a_dbg(gpu_dbg_ctxsw, + nvgpu_log(g, gpu_dbg_ctxsw, "freed hash entry=%p context_ptr=%x", ent, ent->context_ptr); nvgpu_kfree(g, ent); @@ -215,7 +215,7 @@ static void gk20a_fecs_trace_free_hash_table(struct gk20a *g) struct gk20a_fecs_trace_hash_ent *ent; struct gk20a_fecs_trace *trace = g->fecs_trace; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_ctxsw, "trace=%p", trace); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_ctxsw, "trace=%p", trace); nvgpu_mutex_acquire(&trace->hash_lock); hash_for_each_safe(trace->pid_hash_table, bkt, tmp, ent, node) { @@ -235,7 +235,7 @@ static pid_t gk20a_fecs_trace_find_pid(struct gk20a *g, u32 context_ptr) nvgpu_mutex_acquire(&trace->hash_lock); hash_for_each_possible(trace->pid_hash_table, ent, node, context_ptr) { if (ent->context_ptr == context_ptr) { - gk20a_dbg(gpu_dbg_ctxsw, + nvgpu_log(g, gpu_dbg_ctxsw, "found context_ptr=%x -> pid=%d", ent->context_ptr, ent->pid); pid = ent->pid; @@ -265,7 +265,7 @@ static int gk20a_fecs_trace_ring_read(struct gk20a *g, int index) struct gk20a_fecs_trace_record *r = gk20a_fecs_trace_get_record( trace, index); - gk20a_dbg(gpu_dbg_fn | gpu_dbg_ctxsw, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_ctxsw, "consuming record trace=%p read=%d record=%p", trace, index, r); if (unlikely(!gk20a_fecs_trace_is_valid_record(r))) { @@ -284,7 +284,7 @@ static int gk20a_fecs_trace_ring_read(struct gk20a *g, int index) cur_pid = gk20a_fecs_trace_find_pid(g, r->context_ptr); new_pid = gk20a_fecs_trace_find_pid(g, r->new_context_ptr); - gk20a_dbg(gpu_dbg_fn | gpu_dbg_ctxsw, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_ctxsw, "context_ptr=%x (pid=%d) new_context_ptr=%x (pid=%d)", r->context_ptr, cur_pid, r->new_context_ptr, new_pid); @@ -298,7 +298,7 @@ static int gk20a_fecs_trace_ring_read(struct gk20a *g, int index) entry.timestamp = gk20a_fecs_trace_record_ts_timestamp_v(r->ts[i]); entry.timestamp <<= GK20A_FECS_TRACE_PTIMER_SHIFT; - gk20a_dbg(gpu_dbg_ctxsw, + nvgpu_log(g, gpu_dbg_ctxsw, "tag=%x timestamp=%llx context_id=%08x new_context_id=%08x", entry.tag, entry.timestamp, r->context_id, r->new_context_id); @@ -327,7 +327,7 @@ static int gk20a_fecs_trace_ring_read(struct gk20a *g, int index) continue; } - gk20a_dbg(gpu_dbg_ctxsw, "tag=%x context_id=%x pid=%lld", + nvgpu_log(g, gpu_dbg_ctxsw, "tag=%x context_id=%x pid=%lld", entry.tag, entry.context_id, entry.pid); if (!entry.context_id) @@ -368,7 +368,7 @@ int gk20a_fecs_trace_poll(struct gk20a *g) if (!cnt) goto done; - gk20a_dbg(gpu_dbg_ctxsw, + nvgpu_log(g, gpu_dbg_ctxsw, "circular buffer: read=%d (mailbox=%d) write=%d cnt=%d", read, gk20a_fecs_trace_get_read_index(g), write, cnt); @@ -633,7 +633,7 @@ int gk20a_fecs_trace_bind_channel(struct gk20a *g, pid_t pid; u32 aperture; - gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, + nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, "chid=%d context_ptr=%x inst_block=%llx", ch->chid, context_ptr, nvgpu_inst_block_addr(g, &ch->inst_block)); @@ -662,7 +662,7 @@ int gk20a_fecs_trace_bind_channel(struct gk20a *g, lo = u64_lo32(pa); hi = u64_hi32(pa); - gk20a_dbg(gpu_dbg_ctxsw, "addr_hi=%x addr_lo=%x count=%d", hi, + nvgpu_log(g, gpu_dbg_ctxsw, "addr_hi=%x addr_lo=%x count=%d", hi, lo, GK20A_FECS_TRACE_NUM_RECORDS); nvgpu_mem_wr(g, mem, @@ -696,7 +696,7 @@ int gk20a_fecs_trace_unbind_channel(struct gk20a *g, struct channel_gk20a *ch) u32 context_ptr = gk20a_fecs_trace_fecs_context_ptr(g, ch); if (g->fecs_trace) { - gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, + nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, "ch=%p context_ptr=%x", ch, context_ptr); if (g->ops.fecs_trace.is_enabled(g)) { @@ -711,7 +711,7 @@ int gk20a_fecs_trace_unbind_channel(struct gk20a *g, struct channel_gk20a *ch) int gk20a_fecs_trace_reset(struct gk20a *g) { - gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, ""); + nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, " "); if (!g->ops.fecs_trace.is_enabled(g)) return 0; diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c index 75d66968..cc63c3b8 100644 --- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c @@ -94,7 +94,7 @@ u32 gk20a_fifo_get_engine_ids(struct gk20a *g, engine_id[instance_cnt] = active_engine_id; ++instance_cnt; } else { - gk20a_dbg_info("warning engine_id table sz is small %d", + nvgpu_log_info(g, "warning engine_id table sz is small %d", engine_id_sz); } } @@ -320,7 +320,7 @@ int gk20a_fifo_engine_enum_from_type(struct gk20a *g, u32 engine_type, { int ret = ENGINE_INVAL_GK20A; - gk20a_dbg_info("engine type %d", engine_type); + nvgpu_log_info(g, "engine type %d", engine_type); if (engine_type == top_device_info_type_enum_graphics_v()) ret = ENGINE_GR_GK20A; else if ((engine_type >= top_device_info_type_enum_copy0_v()) && @@ -354,7 +354,7 @@ int gk20a_fifo_init_engine_info(struct fifo_gk20a *f) u32 gr_runlist_id = ~0; bool found_pbdma_for_runlist = false; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); f->num_engines = 0; @@ -367,7 +367,7 @@ int gk20a_fifo_init_engine_info(struct fifo_gk20a *f) if (top_device_info_engine_v(table_entry)) { engine_id = top_device_info_engine_enum_v(table_entry); - gk20a_dbg_info("info: engine_id %d", + nvgpu_log_info(g, "info: engine_id %d", top_device_info_engine_enum_v(table_entry)); } @@ -375,7 +375,7 @@ int gk20a_fifo_init_engine_info(struct fifo_gk20a *f) if (top_device_info_runlist_v(table_entry)) { runlist_id = top_device_info_runlist_enum_v(table_entry); - gk20a_dbg_info("gr info: runlist_id %d", runlist_id); + nvgpu_log_info(g, "gr info: runlist_id %d", runlist_id); runlist_bit = BIT(runlist_id); @@ -384,7 +384,7 @@ int gk20a_fifo_init_engine_info(struct fifo_gk20a *f) pbdma_id++) { if (f->pbdma_map[pbdma_id] & runlist_bit) { - gk20a_dbg_info( + nvgpu_log_info(g, "gr info: pbdma_map[%d]=%d", pbdma_id, f->pbdma_map[pbdma_id]); @@ -402,13 +402,13 @@ int gk20a_fifo_init_engine_info(struct fifo_gk20a *f) if (top_device_info_intr_v(table_entry)) { intr_id = top_device_info_intr_enum_v(table_entry); - gk20a_dbg_info("gr info: intr_id %d", intr_id); + nvgpu_log_info(g, "gr info: intr_id %d", intr_id); } if (top_device_info_reset_v(table_entry)) { reset_id = top_device_info_reset_enum_v(table_entry); - gk20a_dbg_info("gr info: reset_id %d", + nvgpu_log_info(g, "gr info: reset_id %d", reset_id); } } else if (entry == top_device_info_entry_engine_type_v()) { @@ -538,7 +538,7 @@ static void gk20a_remove_fifo_support(struct fifo_gk20a *f) struct gk20a *g = f->g; unsigned int i = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); nvgpu_channel_worker_deinit(g); /* @@ -616,7 +616,7 @@ static void fifo_pbdma_exception_status(struct gk20a *g, get_exception_pbdma_info(g, eng_info); e = &eng_info->pbdma_exception_info; - gk20a_dbg_fn("pbdma_id %d, " + nvgpu_log_fn(g, "pbdma_id %d, " "id_type %s, id %d, chan_status %d, " "next_id_type %s, next_id %d, " "chsw_in_progress %d", @@ -657,7 +657,7 @@ static void fifo_engine_exception_status(struct gk20a *g, get_exception_engine_info(g, eng_info); e = &eng_info->engine_exception_info; - gk20a_dbg_fn("engine_id %d, id_type %s, id %d, ctx_status %d, " + nvgpu_log_fn(g, "engine_id %d, id_type %s, id %d, ctx_status %d, " "faulted %d, idle %d, ctxsw_in_progress %d, ", eng_info->engine_id, e->id_is_chid ? "chid" : "tsgid", e->id, e->ctx_status_v, @@ -745,7 +745,7 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f) clean_up_runlist: gk20a_fifo_delete_runlist(f); - gk20a_dbg_fn("fail"); + nvgpu_log_fn(g, "fail"); return -ENOMEM; } @@ -784,7 +784,7 @@ int gk20a_init_fifo_reset_enable_hw(struct gk20a *g) unsigned int i; u32 host_num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA); - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); /* enable pmc pfifo */ g->ops.mc.reset(g, mc_enable_pfifo_enabled_f()); @@ -805,7 +805,7 @@ int gk20a_init_fifo_reset_enable_hw(struct gk20a *g) timeout = gk20a_readl(g, fifo_fb_timeout_r()); timeout = set_field(timeout, fifo_fb_timeout_period_m(), fifo_fb_timeout_period_max_f()); - gk20a_dbg_info("fifo_fb_timeout reg val = 0x%08x", timeout); + nvgpu_log_info(g, "fifo_fb_timeout reg val = 0x%08x", timeout); gk20a_writel(g, fifo_fb_timeout_r(), timeout); /* write pbdma timeout value */ @@ -813,7 +813,7 @@ int gk20a_init_fifo_reset_enable_hw(struct gk20a *g) timeout = gk20a_readl(g, pbdma_timeout_r(i)); timeout = set_field(timeout, pbdma_timeout_period_m(), pbdma_timeout_period_max_f()); - gk20a_dbg_info("pbdma_timeout reg val = 0x%08x", timeout); + nvgpu_log_info(g, "pbdma_timeout reg val = 0x%08x", timeout); gk20a_writel(g, pbdma_timeout_r(i), timeout); } if (g->ops.fifo.apply_pb_timeout) @@ -837,10 +837,10 @@ int gk20a_init_fifo_reset_enable_hw(struct gk20a *g) intr_stall = gk20a_readl(g, pbdma_intr_stall_r(i)); intr_stall &= ~pbdma_intr_stall_lbreq_enabled_f(); gk20a_writel(g, pbdma_intr_stall_r(i), intr_stall); - gk20a_dbg_info("pbdma id:%u, intr_en_0 0x%08x", i, intr_stall); + nvgpu_log_info(g, "pbdma id:%u, intr_en_0 0x%08x", i, intr_stall); gk20a_writel(g, pbdma_intr_en_0_r(i), intr_stall); - gk20a_dbg_info("pbdma id:%u, intr_en_1 0x%08x", i, + nvgpu_log_info(g, "pbdma id:%u, intr_en_1 0x%08x", i, ~pbdma_intr_en_0_lbreq_enabled_f()); gk20a_writel(g, pbdma_intr_en_1_r(i), ~pbdma_intr_en_0_lbreq_enabled_f()); @@ -852,12 +852,12 @@ int gk20a_init_fifo_reset_enable_hw(struct gk20a *g) /* clear and enable pfifo interrupt */ gk20a_writel(g, fifo_intr_0_r(), 0xFFFFFFFF); mask = gk20a_fifo_intr_0_en_mask(g); - gk20a_dbg_info("fifo_intr_en_0 0x%08x", mask); + nvgpu_log_info(g, "fifo_intr_en_0 0x%08x", mask); gk20a_writel(g, fifo_intr_en_0_r(), mask); - gk20a_dbg_info("fifo_intr_en_1 = 0x80000000"); + nvgpu_log_info(g, "fifo_intr_en_1 = 0x80000000"); gk20a_writel(g, fifo_intr_en_1_r(), 0x80000000); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } @@ -868,7 +868,7 @@ int gk20a_init_fifo_setup_sw_common(struct gk20a *g) unsigned int chid, i; int err = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); f->g = g; @@ -945,7 +945,7 @@ int gk20a_init_fifo_setup_sw_common(struct gk20a *g) goto clean_up; } - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; clean_up: @@ -972,10 +972,10 @@ int gk20a_init_fifo_setup_sw(struct gk20a *g) u64 userd_base; int err = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (f->sw_ready) { - gk20a_dbg_fn("skip init"); + nvgpu_log_fn(g, "skip init"); return 0; } @@ -997,7 +997,7 @@ int gk20a_init_fifo_setup_sw(struct gk20a *g) nvgpu_err(g, "userd memory allocation failed"); goto clean_up; } - gk20a_dbg(gpu_dbg_map, "userd gpu va = 0x%llx", f->userd.gpu_va); + nvgpu_log(g, gpu_dbg_map, "userd gpu va = 0x%llx", f->userd.gpu_va); userd_base = nvgpu_mem_get_addr(g, &f->userd); for (chid = 0; chid < f->num_channels; chid++) { @@ -1013,11 +1013,11 @@ int gk20a_init_fifo_setup_sw(struct gk20a *g) f->sw_ready = true; - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; clean_up: - gk20a_dbg_fn("fail"); + nvgpu_log_fn(g, "fail"); if (nvgpu_mem_is_valid(&f->userd)) { if (g->ops.mm.is_bar1_supported(g)) nvgpu_dma_unmap_free(g->mm.bar1.vm, &f->userd); @@ -1032,7 +1032,7 @@ void gk20a_fifo_handle_runlist_event(struct gk20a *g) { u32 runlist_event = gk20a_readl(g, fifo_intr_runlist_r()); - gk20a_dbg(gpu_dbg_intr, "runlist event %08x", + nvgpu_log(g, gpu_dbg_intr, "runlist event %08x", runlist_event); gk20a_writel(g, fifo_intr_runlist_r(), runlist_event); @@ -1042,7 +1042,7 @@ int gk20a_init_fifo_setup_hw(struct gk20a *g) { struct fifo_gk20a *f = &g->fifo; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); /* test write, read through bar1 @ userd region before * turning on the snooping */ @@ -1053,7 +1053,7 @@ int gk20a_init_fifo_setup_hw(struct gk20a *g) u32 bar1_vaddr = f->userd.gpu_va; volatile u32 *cpu_vaddr = f->userd.cpu_va; - gk20a_dbg_info("test bar1 @ vaddr 0x%x", + nvgpu_log_info(g, "test bar1 @ vaddr 0x%x", bar1_vaddr); v = gk20a_bar1_readl(g, bar1_vaddr); @@ -1093,7 +1093,7 @@ int gk20a_init_fifo_setup_hw(struct gk20a *g) fifo_bar1_base_ptr_f(f->userd.gpu_va >> 12) | fifo_bar1_base_valid_true_f()); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } @@ -1261,7 +1261,7 @@ void gk20a_fifo_get_mmu_fault_info(struct gk20a *g, u32 mmu_fault_id, u32 fault_info; u32 addr_lo, addr_hi; - gk20a_dbg_fn("mmu_fault_id %d", mmu_fault_id); + nvgpu_log_fn(g, "mmu_fault_id %d", mmu_fault_id); memset(mmfault, 0, sizeof(*mmfault)); @@ -1291,7 +1291,7 @@ void gk20a_fifo_reset_engine(struct gk20a *g, u32 engine_id) u32 engine_enum = ENGINE_INVAL_GK20A; struct fifo_engine_info_gk20a *engine_info; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (!g) return; @@ -1489,7 +1489,7 @@ void gk20a_fifo_abort_tsg(struct gk20a *g, u32 tsgid, bool preempt) struct tsg_gk20a *tsg = &g->fifo.tsg[tsgid]; struct channel_gk20a *ch; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); g->ops.fifo.disable_tsg(tsg); @@ -1556,7 +1556,7 @@ static bool gk20a_fifo_handle_mmu_fault( bool verbose = true; u32 grfifo_ctl; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); g->fifo.deferred_reset_pending = false; @@ -1693,7 +1693,7 @@ static bool gk20a_fifo_handle_mmu_fault( /* handled during channel free */ g->fifo.deferred_reset_pending = true; - gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "sm debugger attached," " deferring channel recovery to channel free"); } else { @@ -2196,6 +2196,7 @@ bool gk20a_fifo_check_tsg_ctxsw_timeout(struct tsg_gk20a *tsg, struct channel_gk20a *ch; bool recover = false; bool progress = false; + struct gk20a *g = tsg->g; *verbose = false; *ms = GRFIFO_TIMEOUT_CHECK_PERIOD_US / 1000; @@ -2221,7 +2222,7 @@ bool gk20a_fifo_check_tsg_ctxsw_timeout(struct tsg_gk20a *tsg, * this resets timeout for channels that already completed their work */ if (progress) { - gk20a_dbg_info("progress on tsg=%d ch=%d", + nvgpu_log_info(g, "progress on tsg=%d ch=%d", tsg->tsgid, ch->chid); gk20a_channel_put(ch); *ms = GRFIFO_TIMEOUT_CHECK_PERIOD_US / 1000; @@ -2239,7 +2240,7 @@ bool gk20a_fifo_check_tsg_ctxsw_timeout(struct tsg_gk20a *tsg, * caused the problem, so set timeout error notifier for all channels. */ if (recover) { - gk20a_dbg_info("timeout on tsg=%d ch=%d", + nvgpu_log_info(g, "timeout on tsg=%d ch=%d", tsg->tsgid, ch->chid); *ms = ch->timeout_accumulated_ms; gk20a_channel_put(ch); @@ -2311,7 +2312,7 @@ bool gk20a_fifo_handle_sched_error(struct gk20a *g) is_tsg, true, verbose, RC_TYPE_CTXSW_TIMEOUT); } else { - gk20a_dbg_info( + nvgpu_log_info(g, "fifo is waiting for ctx switch for %d ms, " "%s=%d", ms, is_tsg ? "tsg" : "ch", id); } @@ -2330,7 +2331,7 @@ static u32 fifo_error_isr(struct gk20a *g, u32 fifo_intr) bool print_channel_reset_log = false; u32 handled = 0; - gk20a_dbg_fn("fifo_intr=0x%08x", fifo_intr); + nvgpu_log_fn(g, "fifo_intr=0x%08x", fifo_intr); if (fifo_intr & fifo_intr_0_pio_error_pending_f()) { /* pio mode is unused. this shouldn't happen, ever. */ @@ -2381,7 +2382,7 @@ static u32 fifo_error_isr(struct gk20a *g, u32 fifo_intr) engine_id++) { u32 active_engine_id = g->fifo.active_engines_list[engine_id]; u32 engine_enum = g->fifo.engine_info[active_engine_id].engine_enum; - gk20a_dbg_fn("enum:%d -> engine_id:%d", engine_enum, + nvgpu_log_fn(g, "enum:%d -> engine_id:%d", engine_enum, active_engine_id); fifo_pbdma_exception_status(g, &g->fifo.engine_info[active_engine_id]); @@ -2632,7 +2633,7 @@ static u32 fifo_pbdma_isr(struct gk20a *g, u32 fifo_intr) for (i = 0; i < host_num_pbdma; i++) { if (fifo_intr_pbdma_id_status_v(pbdma_pending, i)) { - gk20a_dbg(gpu_dbg_intr, "pbdma id %d intr pending", i); + nvgpu_log(g, gpu_dbg_intr, "pbdma id %d intr pending", i); clear_intr |= gk20a_fifo_handle_pbdma_intr(g, f, i, RC_YES); } @@ -2653,7 +2654,7 @@ void gk20a_fifo_isr(struct gk20a *g) * in a threaded interrupt context... */ nvgpu_mutex_acquire(&g->fifo.intr.isr.mutex); - gk20a_dbg(gpu_dbg_intr, "fifo isr %08x\n", fifo_intr); + nvgpu_log(g, gpu_dbg_intr, "fifo isr %08x\n", fifo_intr); /* handle runlist update */ if (fifo_intr & fifo_intr_0_runlist_event_pending_f()) { @@ -2681,7 +2682,7 @@ int gk20a_fifo_nonstall_isr(struct gk20a *g) u32 fifo_intr = gk20a_readl(g, fifo_intr_0_r()); u32 clear_intr = 0; - gk20a_dbg(gpu_dbg_intr, "fifo nonstall isr %08x\n", fifo_intr); + nvgpu_log(g, gpu_dbg_intr, "fifo nonstall isr %08x\n", fifo_intr); if (fifo_intr & fifo_intr_0_channel_intr_pending_f()) clear_intr = fifo_intr_0_channel_intr_pending_f(); @@ -2769,7 +2770,7 @@ int __locked_fifo_preempt(struct gk20a *g, u32 id, bool is_tsg) int ret; unsigned int id_type; - gk20a_dbg_fn("%d", id); + nvgpu_log_fn(g, "%d", id); /* issue preempt */ gk20a_fifo_issue_preempt(g, id, is_tsg); @@ -2794,7 +2795,7 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, u32 chid) u32 mutex_ret = 0; u32 i; - gk20a_dbg_fn("%d", chid); + nvgpu_log_fn(g, "%d", chid); /* we have no idea which runlist we are using. lock all */ for (i = 0; i < g->fifo.max_runlists; i++) @@ -2821,7 +2822,7 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid) u32 mutex_ret = 0; u32 i; - gk20a_dbg_fn("%d", tsgid); + nvgpu_log_fn(g, "%d", tsgid); /* we have no idea which runlist we are using. lock all */ for (i = 0; i < g->fifo.max_runlists; i++) @@ -2938,7 +2939,7 @@ int gk20a_fifo_disable_engine_activity(struct gk20a *g, u32 mutex_ret; u32 err = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); gr_stat = gk20a_readl(g, fifo_engine_status_r(eng_info->engine_id)); @@ -2988,12 +2989,12 @@ clean_up: nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); if (err) { - gk20a_dbg_fn("failed"); + nvgpu_log_fn(g, "failed"); if (gk20a_fifo_enable_engine_activity(g, eng_info)) nvgpu_err(g, "failed to enable gr engine activity"); } else { - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); } return err; } @@ -3129,8 +3130,9 @@ u32 *gk20a_runlist_construct_locked(struct fifo_gk20a *f, bool skip_next = false; u32 tsgid, count = 0; u32 runlist_entry_words = f->runlist_entry_size / sizeof(u32); + struct gk20a *g = f->g; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); /* for each TSG, T, on this level, insert all higher-level channels and TSGs before inserting T. */ @@ -3156,9 +3158,9 @@ u32 *gk20a_runlist_construct_locked(struct fifo_gk20a *f, return NULL; /* add TSG entry */ - gk20a_dbg_info("add TSG %d to runlist", tsg->tsgid); + nvgpu_log_info(g, "add TSG %d to runlist", tsg->tsgid); f->g->ops.fifo.get_tsg_runlist_entry(tsg, runlist_entry); - gk20a_dbg_info("tsg runlist count %d runlist [0] %x [1] %x\n", + nvgpu_log_info(g, "tsg runlist count %d runlist [0] %x [1] %x\n", count, runlist_entry[0], runlist_entry[1]); runlist_entry += runlist_entry_words; count++; @@ -3177,10 +3179,10 @@ u32 *gk20a_runlist_construct_locked(struct fifo_gk20a *f, return NULL; } - gk20a_dbg_info("add channel %d to runlist", + nvgpu_log_info(g, "add channel %d to runlist", ch->chid); f->g->ops.fifo.get_ch_runlist_entry(ch, runlist_entry); - gk20a_dbg_info( + nvgpu_log_info(g, "run list count %d runlist [0] %x [1] %x\n", count, runlist_entry[0], runlist_entry[1]); count++; @@ -3222,7 +3224,7 @@ int gk20a_fifo_set_runlist_interleave(struct gk20a *g, u32 runlist_id, u32 new_level) { - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); g->fifo.tsg[id].interleave_level = new_level; @@ -3313,7 +3315,7 @@ static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id, runlist_iova = nvgpu_mem_get_addr(g, &runlist->mem[new_buf]); - gk20a_dbg_info("runlist_id : %d, switch to new buffer 0x%16llx", + nvgpu_log_info(g, "runlist_id : %d, switch to new buffer 0x%16llx", runlist_id, (u64)runlist_iova); if (!runlist_iova) { @@ -3445,7 +3447,7 @@ int gk20a_fifo_update_runlist(struct gk20a *g, u32 runlist_id, u32 chid, u32 mutex_ret; u32 ret = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); runlist = &f->runlist_info[runlist_id]; @@ -3465,7 +3467,7 @@ int gk20a_fifo_update_runlist(struct gk20a *g, u32 runlist_id, u32 chid, int gk20a_fifo_suspend(struct gk20a *g) { - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); /* stop bar1 snooping */ if (g->ops.mm.is_bar1_supported(g)) @@ -3476,7 +3478,7 @@ int gk20a_fifo_suspend(struct gk20a *g) gk20a_writel(g, fifo_intr_en_0_r(), 0); gk20a_writel(g, fifo_intr_en_1_r(), 0); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } @@ -3511,7 +3513,7 @@ int gk20a_fifo_wait_engine_idle(struct gk20a *g) int ret = -ETIMEDOUT; u32 i, host_num_engines; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); host_num_engines = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_ENGINES); @@ -3533,12 +3535,12 @@ int gk20a_fifo_wait_engine_idle(struct gk20a *g) } while (!nvgpu_timeout_expired(&timeout)); if (ret) { - gk20a_dbg_info("cannot idle engine %u", i); + nvgpu_log_info(g, "cannot idle engine %u", i); break; } } - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return ret; } @@ -3839,7 +3841,7 @@ void gk20a_fifo_channel_unbind(struct channel_gk20a *ch_gk20a) { struct gk20a *g = ch_gk20a->g; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (nvgpu_atomic_cmpxchg(&ch_gk20a->bound, true, false)) { gk20a_writel(g, ccsr_channel_inst_r(ch_gk20a->chid), @@ -3854,12 +3856,12 @@ static int gk20a_fifo_commit_userd(struct channel_gk20a *c) u32 addr_hi; struct gk20a *g = c->g; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); addr_lo = u64_lo32(c->userd_iova >> ram_userd_base_shift_v()); addr_hi = u64_hi32(c->userd_iova); - gk20a_dbg_info("channel %d : set ramfc userd 0x%16llx", + nvgpu_log_info(g, "channel %d : set ramfc userd 0x%16llx", c->chid, (u64)c->userd_iova); nvgpu_mem_wr32(g, &c->inst_block, @@ -3885,7 +3887,7 @@ int gk20a_fifo_setup_ramfc(struct channel_gk20a *c, struct gk20a *g = c->g; struct nvgpu_mem *mem = &c->inst_block; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); nvgpu_memset(g, mem, 0, 0, ram_fc_size_val_v()); @@ -3946,7 +3948,7 @@ void gk20a_fifo_setup_ramfc_for_privileged_channel(struct channel_gk20a *c) struct gk20a *g = c->g; struct nvgpu_mem *mem = &c->inst_block; - gk20a_dbg_info("channel %d : set ramfc privileged_channel", c->chid); + nvgpu_log_info(g, "channel %d : set ramfc privileged_channel", c->chid); /* Enable HCE priv mode for phys mode transfer */ nvgpu_mem_wr32(g, mem, ram_fc_hce_ctrl_w(), @@ -3959,7 +3961,7 @@ int gk20a_fifo_setup_userd(struct channel_gk20a *c) struct nvgpu_mem *mem; u32 offset; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (nvgpu_mem_is_valid(&c->usermode_userd)) { mem = &c->usermode_userd; @@ -3987,16 +3989,16 @@ int gk20a_fifo_alloc_inst(struct gk20a *g, struct channel_gk20a *ch) { int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); err = g->ops.mm.alloc_inst_block(g, &ch->inst_block); if (err) return err; - gk20a_dbg_info("channel %d inst block physical addr: 0x%16llx", + nvgpu_log_info(g, "channel %d inst block physical addr: 0x%16llx", ch->chid, nvgpu_inst_block_addr(g, &ch->inst_block)); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } @@ -4086,7 +4088,7 @@ void gk20a_fifo_add_syncpt_wait_cmd(struct gk20a *g, struct priv_cmd_entry *cmd, u32 off, u32 id, u32 thresh, u64 gpu_va) { - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); off = cmd->off + off; /* syncpoint_a */ @@ -4115,7 +4117,7 @@ void gk20a_fifo_add_syncpt_incr_cmd(struct gk20a *g, { u32 off = cmd->off; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (wfi_cmd) { /* wfi */ nvgpu_mem_wr32(g, cmd->mem, off++, 0x2001001E); diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.c b/drivers/gpu/nvgpu/gk20a/gk20a.c index e8008937..e862f2e4 100644 --- a/drivers/gpu/nvgpu/gk20a/gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/gk20a.c @@ -77,7 +77,7 @@ int gk20a_detect_chip(struct gk20a *g) gk20a_mc_boot_0(g, &p->gpu_arch, &p->gpu_impl, &p->gpu_rev); - gk20a_dbg_info("arch: %x, impl: %x, rev: %x\n", + nvgpu_log_info(g, "arch: %x, impl: %x, rev: %x\n", g->params.gpu_arch, g->params.gpu_impl, g->params.gpu_rev); @@ -89,7 +89,7 @@ int gk20a_prepare_poweroff(struct gk20a *g) { int ret = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (g->ops.fifo.channel_suspend) { ret = g->ops.fifo.channel_suspend(g); @@ -126,7 +126,7 @@ int gk20a_finalize_poweron(struct gk20a *g) u32 nr_pages; #endif - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (g->power_on) return 0; @@ -434,7 +434,7 @@ static void gk20a_free_cb(struct nvgpu_ref *refcount) struct gk20a *g = container_of(refcount, struct gk20a, refcount); - gk20a_dbg(gpu_dbg_shutdown, "Freeing GK20A struct!"); + nvgpu_log(g, gpu_dbg_shutdown, "Freeing GK20A struct!"); gk20a_ce_destroy(g); @@ -465,7 +465,7 @@ struct gk20a * __must_check gk20a_get(struct gk20a *g) */ success = nvgpu_ref_get_unless_zero(&g->refcount); - gk20a_dbg(gpu_dbg_shutdown, "GET: refs currently %d %s", + nvgpu_log(g, gpu_dbg_shutdown, "GET: refs currently %d %s", nvgpu_atomic_read(&g->refcount.refcount), success ? "" : "(FAILED)"); @@ -490,7 +490,7 @@ void gk20a_put(struct gk20a *g) * ... PUT: refs currently 2 * ... Freeing GK20A struct! */ - gk20a_dbg(gpu_dbg_shutdown, "PUT: refs currently %d", + nvgpu_log(g, gpu_dbg_shutdown, "PUT: refs currently %d", nvgpu_atomic_read(&g->refcount.refcount)); nvgpu_ref_put(&g->refcount, gk20a_free_cb); diff --git a/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.c index 7120059c..f829cb3a 100644 --- a/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.c @@ -1,9 +1,7 @@ /* - * drivers/video/tegra/host/gk20a/gr_ctx_gk20a.c - * * GK20A Graphics Context * - * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -79,7 +77,7 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr) u32 i, major_v = ~0, major_v_hw, netlist_num; int net, max, err = -ENOENT; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (g->ops.gr_ctx.is_fw_defined()) { net = NETLIST_FINAL; @@ -114,63 +112,63 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr) switch (netlist->regions[i].region_id) { case NETLIST_REGIONID_FECS_UCODE_DATA: - gk20a_dbg_info("NETLIST_REGIONID_FECS_UCODE_DATA"); + nvgpu_log_info(g, "NETLIST_REGIONID_FECS_UCODE_DATA"); err = gr_gk20a_alloc_load_netlist_u32(g, src, size, &g->gr.ctx_vars.ucode.fecs.data); if (err) goto clean_up; break; case NETLIST_REGIONID_FECS_UCODE_INST: - gk20a_dbg_info("NETLIST_REGIONID_FECS_UCODE_INST"); + nvgpu_log_info(g, "NETLIST_REGIONID_FECS_UCODE_INST"); err = gr_gk20a_alloc_load_netlist_u32(g, src, size, &g->gr.ctx_vars.ucode.fecs.inst); if (err) goto clean_up; break; case NETLIST_REGIONID_GPCCS_UCODE_DATA: - gk20a_dbg_info("NETLIST_REGIONID_GPCCS_UCODE_DATA"); + nvgpu_log_info(g, "NETLIST_REGIONID_GPCCS_UCODE_DATA"); err = gr_gk20a_alloc_load_netlist_u32(g, src, size, &g->gr.ctx_vars.ucode.gpccs.data); if (err) goto clean_up; break; case NETLIST_REGIONID_GPCCS_UCODE_INST: - gk20a_dbg_info("NETLIST_REGIONID_GPCCS_UCODE_INST"); + nvgpu_log_info(g, "NETLIST_REGIONID_GPCCS_UCODE_INST"); err = gr_gk20a_alloc_load_netlist_u32(g, src, size, &g->gr.ctx_vars.ucode.gpccs.inst); if (err) goto clean_up; break; case NETLIST_REGIONID_SW_BUNDLE_INIT: - gk20a_dbg_info("NETLIST_REGIONID_SW_BUNDLE_INIT"); + nvgpu_log_info(g, "NETLIST_REGIONID_SW_BUNDLE_INIT"); err = gr_gk20a_alloc_load_netlist_av(g, src, size, &g->gr.ctx_vars.sw_bundle_init); if (err) goto clean_up; break; case NETLIST_REGIONID_SW_METHOD_INIT: - gk20a_dbg_info("NETLIST_REGIONID_SW_METHOD_INIT"); + nvgpu_log_info(g, "NETLIST_REGIONID_SW_METHOD_INIT"); err = gr_gk20a_alloc_load_netlist_av(g, src, size, &g->gr.ctx_vars.sw_method_init); if (err) goto clean_up; break; case NETLIST_REGIONID_SW_CTX_LOAD: - gk20a_dbg_info("NETLIST_REGIONID_SW_CTX_LOAD"); + nvgpu_log_info(g, "NETLIST_REGIONID_SW_CTX_LOAD"); err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.sw_ctx_load); if (err) goto clean_up; break; case NETLIST_REGIONID_SW_NON_CTX_LOAD: - gk20a_dbg_info("NETLIST_REGIONID_SW_NON_CTX_LOAD"); + nvgpu_log_info(g, "NETLIST_REGIONID_SW_NON_CTX_LOAD"); err = gr_gk20a_alloc_load_netlist_av(g, src, size, &g->gr.ctx_vars.sw_non_ctx_load); if (err) goto clean_up; break; case NETLIST_REGIONID_SWVEIDBUNDLEINIT: - gk20a_dbg_info( + nvgpu_log_info(g, "NETLIST_REGIONID_SW_VEID_BUNDLE_INIT"); err = gr_gk20a_alloc_load_netlist_av(g, src, size, @@ -179,56 +177,56 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr) goto clean_up; break; case NETLIST_REGIONID_CTXREG_SYS: - gk20a_dbg_info("NETLIST_REGIONID_CTXREG_SYS"); + nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_SYS"); err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.sys); if (err) goto clean_up; break; case NETLIST_REGIONID_CTXREG_GPC: - gk20a_dbg_info("NETLIST_REGIONID_CTXREG_GPC"); + nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_GPC"); err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.gpc); if (err) goto clean_up; break; case NETLIST_REGIONID_CTXREG_TPC: - gk20a_dbg_info("NETLIST_REGIONID_CTXREG_TPC"); + nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_TPC"); err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.tpc); if (err) goto clean_up; break; case NETLIST_REGIONID_CTXREG_ZCULL_GPC: - gk20a_dbg_info("NETLIST_REGIONID_CTXREG_ZCULL_GPC"); + nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_ZCULL_GPC"); err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.zcull_gpc); if (err) goto clean_up; break; case NETLIST_REGIONID_CTXREG_PPC: - gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PPC"); + nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PPC"); err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.ppc); if (err) goto clean_up; break; case NETLIST_REGIONID_CTXREG_PM_SYS: - gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PM_SYS"); + nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PM_SYS"); err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.pm_sys); if (err) goto clean_up; break; case NETLIST_REGIONID_CTXREG_PM_GPC: - gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PM_GPC"); + nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PM_GPC"); err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.pm_gpc); if (err) goto clean_up; break; case NETLIST_REGIONID_CTXREG_PM_TPC: - gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PM_TPC"); + nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PM_TPC"); err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.pm_tpc); if (err) @@ -236,110 +234,110 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr) break; case NETLIST_REGIONID_BUFFER_SIZE: g->gr.ctx_vars.buffer_size = *src; - gk20a_dbg_info("NETLIST_REGIONID_BUFFER_SIZE : %d", + nvgpu_log_info(g, "NETLIST_REGIONID_BUFFER_SIZE : %d", g->gr.ctx_vars.buffer_size); break; case NETLIST_REGIONID_CTXSW_REG_BASE_INDEX: g->gr.ctx_vars.regs_base_index = *src; - gk20a_dbg_info("NETLIST_REGIONID_CTXSW_REG_BASE_INDEX : %u", + nvgpu_log_info(g, "NETLIST_REGIONID_CTXSW_REG_BASE_INDEX : %u", g->gr.ctx_vars.regs_base_index); break; case NETLIST_REGIONID_MAJORV: major_v = *src; - gk20a_dbg_info("NETLIST_REGIONID_MAJORV : %d", + nvgpu_log_info(g, "NETLIST_REGIONID_MAJORV : %d", major_v); break; case NETLIST_REGIONID_NETLIST_NUM: netlist_num = *src; - gk20a_dbg_info("NETLIST_REGIONID_NETLIST_NUM : %d", + nvgpu_log_info(g, "NETLIST_REGIONID_NETLIST_NUM : %d", netlist_num); break; case NETLIST_REGIONID_CTXREG_PMPPC: - gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PMPPC"); + nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PMPPC"); err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.pm_ppc); if (err) goto clean_up; break; case NETLIST_REGIONID_NVPERF_CTXREG_SYS: - gk20a_dbg_info("NETLIST_REGIONID_NVPERF_CTXREG_SYS"); + nvgpu_log_info(g, "NETLIST_REGIONID_NVPERF_CTXREG_SYS"); err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.perf_sys); if (err) goto clean_up; break; case NETLIST_REGIONID_NVPERF_FBP_CTXREGS: - gk20a_dbg_info("NETLIST_REGIONID_NVPERF_FBP_CTXREGS"); + nvgpu_log_info(g, "NETLIST_REGIONID_NVPERF_FBP_CTXREGS"); err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.fbp); if (err) goto clean_up; break; case NETLIST_REGIONID_NVPERF_CTXREG_GPC: - gk20a_dbg_info("NETLIST_REGIONID_NVPERF_CTXREG_GPC"); + nvgpu_log_info(g, "NETLIST_REGIONID_NVPERF_CTXREG_GPC"); err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.perf_gpc); if (err) goto clean_up; break; case NETLIST_REGIONID_NVPERF_FBP_ROUTER: - gk20a_dbg_info("NETLIST_REGIONID_NVPERF_FBP_ROUTER"); + nvgpu_log_info(g, "NETLIST_REGIONID_NVPERF_FBP_ROUTER"); err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.fbp_router); if (err) goto clean_up; break; case NETLIST_REGIONID_NVPERF_GPC_ROUTER: - gk20a_dbg_info("NETLIST_REGIONID_NVPERF_GPC_ROUTER"); + nvgpu_log_info(g, "NETLIST_REGIONID_NVPERF_GPC_ROUTER"); err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.gpc_router); if (err) goto clean_up; break; case NETLIST_REGIONID_CTXREG_PMLTC: - gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PMLTC"); + nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PMLTC"); err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.pm_ltc); if (err) goto clean_up; break; case NETLIST_REGIONID_CTXREG_PMFBPA: - gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PMFBPA"); + nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PMFBPA"); err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.pm_fbpa); if (err) goto clean_up; break; case NETLIST_REGIONID_NVPERF_SYS_ROUTER: - gk20a_dbg_info("NETLIST_REGIONID_NVPERF_SYS_ROUTER"); + nvgpu_log_info(g, "NETLIST_REGIONID_NVPERF_SYS_ROUTER"); err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.perf_sys_router); if (err) goto clean_up; break; case NETLIST_REGIONID_NVPERF_PMA: - gk20a_dbg_info("NETLIST_REGIONID_NVPERF_PMA"); + nvgpu_log_info(g, "NETLIST_REGIONID_NVPERF_PMA"); err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.perf_pma); if (err) goto clean_up; break; case NETLIST_REGIONID_CTXREG_PMROP: - gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PMROP"); + nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PMROP"); err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.pm_rop); if (err) goto clean_up; break; case NETLIST_REGIONID_CTXREG_PMUCGPC: - gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PMUCGPC"); + nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PMUCGPC"); err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.pm_ucgpc); if (err) goto clean_up; break; case NETLIST_REGIONID_CTXREG_ETPC: - gk20a_dbg_info("NETLIST_REGIONID_CTXREG_ETPC"); + nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_ETPC"); err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.etpc); if (err) @@ -347,13 +345,13 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr) break; default: - gk20a_dbg_info("unrecognized region %d skipped", i); + nvgpu_log_info(g, "unrecognized region %d skipped", i); break; } } if (net != NETLIST_FINAL && major_v != major_v_hw) { - gk20a_dbg_info("skip %s: major_v 0x%08x doesn't match hw 0x%08x", + nvgpu_log_info(g, "skip %s: major_v 0x%08x doesn't match hw 0x%08x", name, major_v, major_v_hw); goto clean_up; } @@ -362,7 +360,7 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr) g->gr.netlist = net; nvgpu_release_firmware(g, netlist_fw); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); goto done; clean_up: @@ -403,7 +401,7 @@ clean_up: done: if (g->gr.ctx_vars.valid) { - gk20a_dbg_info("netlist image %s loaded", name); + nvgpu_log_info(g, "netlist image %s loaded", name); return 0; } else { nvgpu_err(g, "failed to load netlist image!!"); diff --git a/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a_sim.c b/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a_sim.c index 9674e2d6..01c7ed3c 100644 --- a/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a_sim.c +++ b/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a_sim.c @@ -1,9 +1,7 @@ /* - * drivers/video/tegra/host/gk20a/gr_ctx_sim_gk20a.c - * * GK20A Graphics Context for Simulation * - * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -35,7 +33,7 @@ int gr_gk20a_init_ctx_vars_sim(struct gk20a *g, struct gr_gk20a *gr) int err = 0; u32 i, temp; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_info, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_info, "querying grctx info from chiplib"); g->gr.ctx_vars.dynamic = true; @@ -250,7 +248,7 @@ int gr_gk20a_init_ctx_vars_sim(struct gk20a *g, struct gr_gk20a *gr) i, &l[i].value); } - gk20a_dbg(gpu_dbg_info | gpu_dbg_fn, "query GRCTX_REG_LIST_ETPC"); + nvgpu_log(g, gpu_dbg_info | gpu_dbg_fn, "query GRCTX_REG_LIST_ETPC"); for (i = 0; i < g->gr.ctx_vars.ctxsw_regs.etpc.count; i++) { struct aiv_gk20a *l = g->gr.ctx_vars.ctxsw_regs.etpc.l; g->sim->esc_readl(g, "GRCTX_REG_LIST_ETPC:ADDR", @@ -259,7 +257,7 @@ int gr_gk20a_init_ctx_vars_sim(struct gk20a *g, struct gr_gk20a *gr) i, &l[i].index); g->sim->esc_readl(g, "GRCTX_REG_LIST_ETPC:VALUE", i, &l[i].value); - gk20a_dbg(gpu_dbg_info | gpu_dbg_fn, + nvgpu_log(g, gpu_dbg_info | gpu_dbg_fn, "addr:0x%#08x index:0x%08x value:0x%08x", l[i].addr, l[i].index, l[i].value); } @@ -269,7 +267,7 @@ int gr_gk20a_init_ctx_vars_sim(struct gk20a *g, struct gr_gk20a *gr) g->sim->esc_readl(g, "GRCTX_GEN_CTX_REGS_BASE_INDEX", 0, &g->gr.ctx_vars.regs_base_index); - gk20a_dbg(gpu_dbg_info | gpu_dbg_fn, "finished querying grctx info from chiplib"); + nvgpu_log(g, gpu_dbg_info | gpu_dbg_fn, "finished querying grctx info from chiplib"); return 0; fail: nvgpu_err(g, "failed querying grctx info from chiplib"); diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c index 86111321..00f26650 100644 --- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c @@ -122,7 +122,7 @@ int gr_gk20a_get_ctx_id(struct gk20a *g, *ctx_id = nvgpu_mem_rd(g, mem, ctxsw_prog_main_image_context_id_o()); - gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, "ctx_id: 0x%x", *ctx_id); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr, "ctx_id: 0x%x", *ctx_id); nvgpu_mem_end(g, mem); @@ -220,7 +220,7 @@ static void gr_gk20a_load_falcon_dmem(struct gk20a *g) const u32 *ucode_u32_data; u32 checksum; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); gk20a_writel(g, gr_gpccs_dmemc_r(0), (gr_gpccs_dmemc_offs_f(0) | gr_gpccs_dmemc_blk_f(0) | @@ -245,7 +245,7 @@ static void gr_gk20a_load_falcon_dmem(struct gk20a *g) gk20a_writel(g, gr_fecs_dmemd_r(0), ucode_u32_data[i]); checksum += ucode_u32_data[i]; } - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); } static void gr_gk20a_load_falcon_imem(struct gk20a *g) @@ -255,7 +255,7 @@ static void gr_gk20a_load_falcon_imem(struct gk20a *g) u32 tag, i, pad_start, pad_end; u32 checksum; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); cfg = gk20a_readl(g, gr_fecs_cfg_r()); fecs_imem_size = gr_fecs_cfg_imem_sz_v(cfg); @@ -343,7 +343,7 @@ int gr_gk20a_wait_idle(struct gk20a *g, unsigned long duration_ms, bool ctx_status_invalid; struct nvgpu_timeout timeout; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); gr_engine_id = gk20a_fifo_get_gr_engine_id(g); @@ -372,7 +372,7 @@ int gr_gk20a_wait_idle(struct gk20a *g, unsigned long duration_ms, if (!gr_enabled || ctx_status_invalid || (!gr_busy && !ctxsw_active)) { - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } @@ -398,7 +398,7 @@ int gr_gk20a_wait_fe_idle(struct gk20a *g, unsigned long duration_ms, if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) return 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); nvgpu_timeout_init(g, &timeout, duration_ms, NVGPU_TIMER_CPU_TIMER); @@ -406,7 +406,7 @@ int gr_gk20a_wait_fe_idle(struct gk20a *g, unsigned long duration_ms, val = gk20a_readl(g, gr_status_r()); if (!gr_status_fe_method_lower_v(val)) { - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } @@ -430,7 +430,7 @@ int gr_gk20a_ctx_wait_ucode(struct gk20a *g, u32 mailbox_id, u32 check = WAIT_UCODE_LOOP; u32 reg; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (sleepduringwait) delay = GR_IDLE_CHECK_DEFAULT; @@ -532,7 +532,7 @@ int gr_gk20a_ctx_wait_ucode(struct gk20a *g, u32 mailbox_id, return -1; } - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } @@ -618,7 +618,7 @@ int gr_gk20a_disable_ctxsw(struct gk20a *g) { int err = 0; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " "); nvgpu_mutex_acquire(&g->ctxsw_disable_lock); g->ctxsw_disable_count++; @@ -635,7 +635,7 @@ int gr_gk20a_enable_ctxsw(struct gk20a *g) { int err = 0; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " "); nvgpu_mutex_acquire(&g->ctxsw_disable_lock); g->ctxsw_disable_count--; @@ -669,7 +669,7 @@ int gr_gk20a_commit_inst(struct channel_gk20a *c, u64 gpu_va) u32 addr_lo; u32 addr_hi; - gk20a_dbg_fn(""); + nvgpu_log_fn(c->g, " "); addr_lo = u64_lo32(gpu_va) >> 12; addr_hi = u64_hi32(gpu_va); @@ -775,7 +775,7 @@ int gr_gk20a_fecs_ctx_bind_channel(struct gk20a *g, u32 data = fecs_current_ctx_data(g, &c->inst_block); u32 ret; - gk20a_dbg_info("bind channel %d inst ptr 0x%08x", + nvgpu_log_info(g, "bind channel %d inst ptr 0x%08x", c->chid, inst_base_ptr); ret = gr_gk20a_submit_fecs_method_op(g, @@ -823,7 +823,7 @@ static int gr_gk20a_ctx_zcull_setup(struct gk20a *g, struct channel_gk20a *c) struct nvgpu_mem *ctxheader = &ctx->mem; int ret = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); tsg = tsg_gk20a_from_ch(c); if (!tsg) @@ -905,7 +905,7 @@ int gr_gk20a_commit_global_ctx_buffers(struct gk20a *g, u64 addr; u32 size; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); tsg = tsg_gk20a_from_ch(c); if (!tsg) @@ -931,7 +931,7 @@ int gr_gk20a_commit_global_ctx_buffers(struct gk20a *g, if (size == g->ops.gr.pagepool_default_size(g)) size = gr_scc_pagepool_total_pages_hwmax_v(); - gk20a_dbg_info("pagepool buffer addr : 0x%016llx, size : %d", + nvgpu_log_info(g, "pagepool buffer addr : 0x%016llx, size : %d", addr, size); g->ops.gr.commit_global_pagepool(g, gr_ctx, addr, size, patch); @@ -944,7 +944,7 @@ int gr_gk20a_commit_global_ctx_buffers(struct gk20a *g, size = gr->bundle_cb_default_size; - gk20a_dbg_info("bundle cb addr : 0x%016llx, size : %d", + nvgpu_log_info(g, "bundle cb addr : 0x%016llx, size : %d", addr, size); g->ops.gr.commit_global_bundle_cb(g, gr_ctx, addr, size, patch); @@ -955,7 +955,7 @@ int gr_gk20a_commit_global_ctx_buffers(struct gk20a *g, (u64_hi32(gr_ctx->global_ctx_buffer_va[ATTRIBUTE_VA]) << (32 - gr_gpcs_setup_attrib_cb_base_addr_39_12_align_bits_v())); - gk20a_dbg_info("attrib cb addr : 0x%016llx", addr); + nvgpu_log_info(g, "attrib cb addr : 0x%016llx", addr); g->ops.gr.commit_global_attrib_cb(g, gr_ctx, addr, patch); g->ops.gr.commit_global_cb_manager(g, c, patch); @@ -976,7 +976,7 @@ int gr_gk20a_commit_global_timeslice(struct gk20a *g, struct channel_gk20a *c) u32 pe_vaf; u32 pe_vsc_vpc; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); gpm_pd_cfg = gk20a_readl(g, gr_gpcs_gpm_pd_cfg_r()); pd_ab_dist_cfg0 = gk20a_readl(g, gr_pd_ab_dist_cfg0_r()); @@ -1036,7 +1036,7 @@ int gr_gk20a_setup_rop_mapping(struct gk20a *g, struct gr_gk20a *gr) if (!gr->map_tiles) return -1; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); gk20a_writel(g, gr_crstr_map_table_cfg_r(), gr_crstr_map_table_cfg_row_offset_f(gr->map_row_offset) | @@ -1219,7 +1219,7 @@ int gr_gk20a_init_fs_state(struct gk20a *g) u32 reg_index; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (g->ops.gr.init_sm_id_table) { err = g->ops.gr.init_sm_id_table(g); @@ -1302,7 +1302,7 @@ int gr_gk20a_fecs_ctx_image_save(struct channel_gk20a *c, u32 save_type) struct gk20a *g = c->g; int ret; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); ret = gr_gk20a_submit_fecs_method_op(g, (struct fecs_method_op_gk20a) { @@ -1411,7 +1411,7 @@ static int gr_gk20a_init_golden_ctx_image(struct gk20a *g, struct av_list_gk20a *sw_method_init = &g->gr.ctx_vars.sw_method_init; u32 last_method_data = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); tsg = tsg_gk20a_from_ch(c); if (!tsg) @@ -1647,7 +1647,7 @@ clean_up: if (err) nvgpu_err(g, "fail"); else - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); nvgpu_mem_end(g, gold_mem); nvgpu_mem_end(g, gr_mem); @@ -1666,7 +1666,7 @@ int gr_gk20a_update_smpc_ctxsw_mode(struct gk20a *g, u32 data; int ret; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); tsg = tsg_gk20a_from_ch(c); if (!tsg) @@ -1732,7 +1732,7 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g, struct nvgpu_mem *ctxheader = &ctx->mem; int ret; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); tsg = tsg_gk20a_from_ch(c); if (!tsg) @@ -1884,7 +1884,7 @@ int gr_gk20a_load_golden_ctx_image(struct gk20a *g, int ret = 0; struct nvgpu_mem *mem; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); tsg = tsg_gk20a_from_ch(c); if (!tsg) @@ -1991,7 +1991,7 @@ int gr_gk20a_load_golden_ctx_image(struct gk20a *g, static void gr_gk20a_start_falcon_ucode(struct gk20a *g) { - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); gk20a_writel(g, gr_fecs_ctxsw_mailbox_clear_r(0), gr_fecs_ctxsw_mailbox_clear_value_f(~0)); @@ -2002,7 +2002,7 @@ static void gr_gk20a_start_falcon_ucode(struct gk20a *g) gk20a_writel(g, gr_gpccs_cpuctl_r(), gr_gpccs_cpuctl_startcpu_f(1)); gk20a_writel(g, gr_fecs_cpuctl_r(), gr_fecs_cpuctl_startcpu_f(1)); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); } static int gr_gk20a_init_ctxsw_ucode_vaspace(struct gk20a *g) @@ -2392,7 +2392,7 @@ int gr_gk20a_load_ctxsw_ucode(struct gk20a *g) { int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) { gk20a_writel(g, gr_fecs_ctxsw_mailbox_r(7), @@ -2419,7 +2419,7 @@ int gr_gk20a_load_ctxsw_ucode(struct gk20a *g) gr_gk20a_load_falcon_with_bootloader(g); g->gr.skip_ucode_init = true; } - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } @@ -2427,7 +2427,7 @@ static int gr_gk20a_wait_ctxsw_ready(struct gk20a *g) { u32 ret; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); ret = gr_gk20a_ctx_wait_ucode(g, 0, NULL, GR_IS_UCODE_OP_EQUAL, @@ -2448,7 +2448,7 @@ static int gr_gk20a_wait_ctxsw_ready(struct gk20a *g) gk20a_writel(g, gr_fecs_method_push_r(), gr_fecs_method_push_adr_set_watchdog_timeout_f()); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } @@ -2463,7 +2463,7 @@ int gr_gk20a_init_ctx_state(struct gk20a *g) .cond.fail = GR_IS_UCODE_OP_SKIP, }; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); /* query ctxsw image sizes, if golden context is not created */ if (!g->gr.ctx_vars.golden_image_initialized) { op.method.addr = @@ -2496,7 +2496,7 @@ int gr_gk20a_init_ctx_state(struct gk20a *g) g->gr.ctx_vars.priv_access_map_size = 512 * 1024; } - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } @@ -2543,7 +2543,7 @@ static void gr_gk20a_free_global_ctx_buffers(struct gk20a *g) } } - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); } static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g) @@ -2557,11 +2557,11 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g) u32 pagepool_buffer_size = g->ops.gr.pagepool_default_size(g) * gr_scc_pagepool_total_pages_byte_granularity_v(); - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); attr_buffer_size = g->ops.gr.calc_global_ctx_buffer_size(g); - gk20a_dbg_info("cb_buffer_size : %d", cb_buffer_size); + nvgpu_log_info(g, "cb_buffer_size : %d", cb_buffer_size); err = gk20a_gr_alloc_ctx_buffer(g, &gr->global_ctx_buffer[CIRCULAR], cb_buffer_size); @@ -2576,7 +2576,7 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g) goto clean_up; } - gk20a_dbg_info("pagepool_buffer_size : %d", pagepool_buffer_size); + nvgpu_log_info(g, "pagepool_buffer_size : %d", pagepool_buffer_size); err = gk20a_gr_alloc_ctx_buffer(g, &gr->global_ctx_buffer[PAGEPOOL], pagepool_buffer_size); @@ -2591,7 +2591,7 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g) goto clean_up; } - gk20a_dbg_info("attr_buffer_size : %d", attr_buffer_size); + nvgpu_log_info(g, "attr_buffer_size : %d", attr_buffer_size); err = gk20a_gr_alloc_ctx_buffer(g, &gr->global_ctx_buffer[ATTRIBUTE], attr_buffer_size); @@ -2606,7 +2606,7 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g) goto clean_up; } - gk20a_dbg_info("golden_image_size : %d", + nvgpu_log_info(g, "golden_image_size : %d", gr->ctx_vars.golden_image_size); err = gk20a_gr_alloc_ctx_buffer(g, @@ -2615,7 +2615,7 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g) if (err) goto clean_up; - gk20a_dbg_info("priv_access_map_size : %d", + nvgpu_log_info(g, "priv_access_map_size : %d", gr->ctx_vars.priv_access_map_size); err = gk20a_gr_alloc_ctx_buffer(g, @@ -2625,7 +2625,7 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g) if (err) goto clean_up; - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; clean_up: @@ -2643,7 +2643,7 @@ static void gr_gk20a_unmap_global_ctx_buffers(struct gk20a *g, int *g_bfr_index = gr_ctx->global_ctx_buffer_index; u32 i; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); for (i = 0; i < NR_GLOBAL_CTX_BUF_VA; i++) { if (g_bfr_index[i]) { @@ -2679,7 +2679,7 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g, struct nvgpu_mem *mem; u64 gpu_va; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); tsg = tsg_gk20a_from_ch(c); if (!tsg) @@ -2780,7 +2780,7 @@ int gr_gk20a_alloc_gr_ctx(struct gk20a *g, struct gr_gk20a *gr = &g->gr; int err = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (gr->ctx_vars.buffer_size == 0) return 0; @@ -2835,7 +2835,7 @@ static int gr_gk20a_alloc_tsg_gr_ctx(struct gk20a *g, void gr_gk20a_free_gr_ctx(struct gk20a *g, struct vm_gk20a *vm, struct nvgpu_gr_ctx *gr_ctx) { - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (gr_ctx->mem.gpu_va) { gr_gk20a_unmap_global_ctx_buffers(g, vm, gr_ctx); @@ -2881,7 +2881,7 @@ static int gr_gk20a_alloc_channel_patch_ctx(struct gk20a *g, u32 alloc_size; int err = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); tsg = tsg_gk20a_from_ch(c); if (!tsg) @@ -2899,7 +2899,7 @@ static int gr_gk20a_alloc_channel_patch_ctx(struct gk20a *g, if (err) return err; - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } @@ -2909,7 +2909,7 @@ static void gr_gk20a_free_channel_patch_ctx(struct gk20a *g, { struct patch_desc *patch_ctx = &gr_ctx->patch_ctx; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (patch_ctx->mem.gpu_va) nvgpu_gmmu_unmap(vm, &patch_ctx->mem, @@ -2925,7 +2925,7 @@ static void gr_gk20a_free_channel_pm_ctx(struct gk20a *g, { struct pm_ctx_desc *pm_ctx = &gr_ctx->pm_ctx; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (pm_ctx->mem.gpu_va) { nvgpu_gmmu_unmap(vm, &pm_ctx->mem, pm_ctx->mem.gpu_va); @@ -2942,7 +2942,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags) struct tsg_gk20a *tsg = NULL; int err = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); /* an address space needs to have been bound at this point.*/ if (!gk20a_channel_as_bound(c) && !c->vm) { @@ -3047,7 +3047,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags) } } - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; out: /* 1. gr_ctx, patch_ctx and global ctx buffer mapping @@ -3062,7 +3062,7 @@ static void gk20a_remove_gr_support(struct gr_gk20a *gr) { struct gk20a *g = gr->g; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); gr_gk20a_free_cyclestats_snapshot_data(g); @@ -3322,35 +3322,35 @@ static int gr_gk20a_init_gr_config(struct gk20a *g, struct gr_gk20a *gr) sm_per_tpc * sizeof(struct sm_info)); gr->no_of_sm = 0; - gk20a_dbg_info("fbps: %d", gr->num_fbps); - gk20a_dbg_info("max_gpc_count: %d", gr->max_gpc_count); - gk20a_dbg_info("max_fbps_count: %d", gr->max_fbps_count); - gk20a_dbg_info("max_tpc_per_gpc_count: %d", gr->max_tpc_per_gpc_count); - gk20a_dbg_info("max_zcull_per_gpc_count: %d", gr->max_zcull_per_gpc_count); - gk20a_dbg_info("max_tpc_count: %d", gr->max_tpc_count); - gk20a_dbg_info("sys_count: %d", gr->sys_count); - gk20a_dbg_info("gpc_count: %d", gr->gpc_count); - gk20a_dbg_info("pe_count_per_gpc: %d", gr->pe_count_per_gpc); - gk20a_dbg_info("tpc_count: %d", gr->tpc_count); - gk20a_dbg_info("ppc_count: %d", gr->ppc_count); + nvgpu_log_info(g, "fbps: %d", gr->num_fbps); + nvgpu_log_info(g, "max_gpc_count: %d", gr->max_gpc_count); + nvgpu_log_info(g, "max_fbps_count: %d", gr->max_fbps_count); + nvgpu_log_info(g, "max_tpc_per_gpc_count: %d", gr->max_tpc_per_gpc_count); + nvgpu_log_info(g, "max_zcull_per_gpc_count: %d", gr->max_zcull_per_gpc_count); + nvgpu_log_info(g, "max_tpc_count: %d", gr->max_tpc_count); + nvgpu_log_info(g, "sys_count: %d", gr->sys_count); + nvgpu_log_info(g, "gpc_count: %d", gr->gpc_count); + nvgpu_log_info(g, "pe_count_per_gpc: %d", gr->pe_count_per_gpc); + nvgpu_log_info(g, "tpc_count: %d", gr->tpc_count); + nvgpu_log_info(g, "ppc_count: %d", gr->ppc_count); for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) - gk20a_dbg_info("gpc_tpc_count[%d] : %d", + nvgpu_log_info(g, "gpc_tpc_count[%d] : %d", gpc_index, gr->gpc_tpc_count[gpc_index]); for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) - gk20a_dbg_info("gpc_zcb_count[%d] : %d", + nvgpu_log_info(g, "gpc_zcb_count[%d] : %d", gpc_index, gr->gpc_zcb_count[gpc_index]); for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) - gk20a_dbg_info("gpc_ppc_count[%d] : %d", + nvgpu_log_info(g, "gpc_ppc_count[%d] : %d", gpc_index, gr->gpc_ppc_count[gpc_index]); for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) - gk20a_dbg_info("gpc_skip_mask[%d] : %d", + nvgpu_log_info(g, "gpc_skip_mask[%d] : %d", gpc_index, gr->gpc_skip_mask[gpc_index]); for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) for (pes_index = 0; pes_index < gr->pe_count_per_gpc; pes_index++) - gk20a_dbg_info("pes_tpc_count[%d][%d] : %d", + nvgpu_log_info(g, "pes_tpc_count[%d][%d] : %d", pes_index, gpc_index, gr->pes_tpc_count[pes_index][gpc_index]); @@ -3358,7 +3358,7 @@ static int gr_gk20a_init_gr_config(struct gk20a *g, struct gr_gk20a *gr) for (pes_index = 0; pes_index < gr->pe_count_per_gpc; pes_index++) - gk20a_dbg_info("pes_tpc_mask[%d][%d] : %d", + nvgpu_log_info(g, "pes_tpc_mask[%d][%d] : %d", pes_index, gpc_index, gr->pes_tpc_mask[pes_index][gpc_index]); @@ -3367,16 +3367,16 @@ static int gr_gk20a_init_gr_config(struct gk20a *g, struct gr_gk20a *gr) g->ops.gr.calc_global_ctx_buffer_size(g); gr->timeslice_mode = gr_gpcs_ppcs_cbm_cfg_timeslice_mode_enable_v(); - gk20a_dbg_info("bundle_cb_default_size: %d", + nvgpu_log_info(g, "bundle_cb_default_size: %d", gr->bundle_cb_default_size); - gk20a_dbg_info("min_gpm_fifo_depth: %d", gr->min_gpm_fifo_depth); - gk20a_dbg_info("bundle_cb_token_limit: %d", gr->bundle_cb_token_limit); - gk20a_dbg_info("attrib_cb_default_size: %d", + nvgpu_log_info(g, "min_gpm_fifo_depth: %d", gr->min_gpm_fifo_depth); + nvgpu_log_info(g, "bundle_cb_token_limit: %d", gr->bundle_cb_token_limit); + nvgpu_log_info(g, "attrib_cb_default_size: %d", gr->attrib_cb_default_size); - gk20a_dbg_info("attrib_cb_size: %d", gr->attrib_cb_size); - gk20a_dbg_info("alpha_cb_default_size: %d", gr->alpha_cb_default_size); - gk20a_dbg_info("alpha_cb_size: %d", gr->alpha_cb_size); - gk20a_dbg_info("timeslice_mode: %d", gr->timeslice_mode); + nvgpu_log_info(g, "attrib_cb_size: %d", gr->attrib_cb_size); + nvgpu_log_info(g, "alpha_cb_default_size: %d", gr->alpha_cb_default_size); + nvgpu_log_info(g, "alpha_cb_size: %d", gr->alpha_cb_size); + nvgpu_log_info(g, "timeslice_mode: %d", gr->timeslice_mode); return 0; @@ -3582,7 +3582,7 @@ clean_up: if (ret) nvgpu_err(g, "fail"); else - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return ret; } @@ -4094,7 +4094,7 @@ clean_up: int gk20a_gr_zbc_set_table(struct gk20a *g, struct gr_gk20a *gr, struct zbc_entry *zbc_val) { - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); return gr_gk20a_elpg_protected_call(g, gr_gk20a_add_zbc(g, gr, zbc_val)); @@ -4197,10 +4197,10 @@ void gr_gk20a_program_zcull_mapping(struct gk20a *g, u32 zcull_num_entries, { u32 val; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (zcull_num_entries >= 8) { - gk20a_dbg_fn("map0"); + nvgpu_log_fn(g, "map0"); val = gr_gpcs_zcull_sm_in_gpc_number_map0_tile_0_f( zcull_map_tiles[0]) | @@ -4223,7 +4223,7 @@ void gr_gk20a_program_zcull_mapping(struct gk20a *g, u32 zcull_num_entries, } if (zcull_num_entries >= 16) { - gk20a_dbg_fn("map1"); + nvgpu_log_fn(g, "map1"); val = gr_gpcs_zcull_sm_in_gpc_number_map1_tile_8_f( zcull_map_tiles[8]) | @@ -4246,7 +4246,7 @@ void gr_gk20a_program_zcull_mapping(struct gk20a *g, u32 zcull_num_entries, } if (zcull_num_entries >= 24) { - gk20a_dbg_fn("map2"); + nvgpu_log_fn(g, "map2"); val = gr_gpcs_zcull_sm_in_gpc_number_map2_tile_16_f( zcull_map_tiles[16]) | @@ -4269,7 +4269,7 @@ void gr_gk20a_program_zcull_mapping(struct gk20a *g, u32 zcull_num_entries, } if (zcull_num_entries >= 32) { - gk20a_dbg_fn("map3"); + nvgpu_log_fn(g, "map3"); val = gr_gpcs_zcull_sm_in_gpc_number_map3_tile_24_f( zcull_map_tiles[24]) | @@ -4452,7 +4452,7 @@ static int gk20a_init_gr_setup_hw(struct gk20a *g) u32 last_method_data = 0; u32 i, err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); /* init mmu debug buffer */ addr = nvgpu_mem_get_addr(g, &gr->mmu_wr_mem); @@ -4613,13 +4613,13 @@ restore_fe_go_idle: } out: - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return err; } static void gr_gk20a_load_gating_prod(struct gk20a *g) { - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); /* slcg prod values */ if (g->ops.clock_gating.slcg_bus_load_gating_prod) @@ -4657,7 +4657,7 @@ static void gr_gk20a_load_gating_prod(struct gk20a *g) if (g->ops.clock_gating.pg_gr_load_gating_prod) g->ops.clock_gating.pg_gr_load_gating_prod(g, true); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); } static int gk20a_init_gr_prepare(struct gk20a *g) @@ -4703,7 +4703,7 @@ static int gr_gk20a_wait_mem_scrubbing(struct gk20a *g) bool fecs_scrubbing; bool gpccs_scrubbing; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); nvgpu_timeout_init(g, &timeout, CTXSW_MEM_SCRUBBING_TIMEOUT_MAX / @@ -4719,7 +4719,7 @@ static int gr_gk20a_wait_mem_scrubbing(struct gk20a *g) gr_gpccs_dmactl_imem_scrubbing_m()); if (!fecs_scrubbing && !gpccs_scrubbing) { - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } @@ -4746,7 +4746,7 @@ out: if (err) nvgpu_err(g, "fail"); else - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return err; } @@ -4756,7 +4756,7 @@ static int gk20a_init_gr_reset_enable_hw(struct gk20a *g) struct av_list_gk20a *sw_non_ctx_load = &g->gr.ctx_vars.sw_non_ctx_load; u32 i, err = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); /* enable interrupts */ gk20a_writel(g, gr_intr_r(), ~0); @@ -4780,7 +4780,7 @@ out: if (err) nvgpu_err(g, "fail"); else - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } @@ -4810,7 +4810,7 @@ static int gr_gk20a_init_access_map(struct gk20a *g) map_bit = whitelist[w] >> 2; map_byte = map_bit >> 3; map_shift = map_bit & 0x7; /* i.e. 0-7 */ - gk20a_dbg_info("access map addr:0x%x byte:0x%x bit:%d", + nvgpu_log_info(g, "access map addr:0x%x byte:0x%x bit:%d", whitelist[w], map_byte, map_shift); x = nvgpu_mem_rd32(g, mem, map_byte / sizeof(u32)); x |= 1 << ( @@ -4828,10 +4828,10 @@ static int gk20a_init_gr_setup_sw(struct gk20a *g) struct gr_gk20a *gr = &g->gr; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (gr->sw_ready) { - gk20a_dbg_fn("skip init"); + nvgpu_log_fn(g, "skip init"); return 0; } @@ -4888,7 +4888,7 @@ static int gk20a_init_gr_setup_sw(struct gk20a *g) if (g->ops.gr.create_gr_sysfs) g->ops.gr.create_gr_sysfs(g); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; clean_up: @@ -4906,7 +4906,7 @@ static int gk20a_init_gr_bind_fecs_elpg(struct gk20a *g) u32 size; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); size = 0; @@ -4947,7 +4947,7 @@ int gk20a_init_gr_support(struct gk20a *g) { u32 err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); /* this is required before gr_gk20a_init_ctx_state */ nvgpu_mutex_init(&g->gr.fecs_mutex); @@ -4999,7 +4999,7 @@ void gk20a_gr_wait_initialized(struct gk20a *g) void gk20a_gr_set_shader_exceptions(struct gk20a *g, u32 data) { - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (data == NVA297_SET_SHADER_EXCEPTIONS_ENABLE_FALSE) { gk20a_writel(g, @@ -5046,7 +5046,7 @@ int gk20a_enable_gr_hw(struct gk20a *g) { int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); err = gk20a_init_gr_prepare(g); if (err) @@ -5056,7 +5056,7 @@ int gk20a_enable_gr_hw(struct gk20a *g) if (err) return err; - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } @@ -5163,7 +5163,7 @@ static void gk20a_gr_set_error_notifier(struct gk20a *g, static int gk20a_gr_handle_semaphore_timeout_pending(struct gk20a *g, struct gr_gk20a_isr_data *isr_data) { - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); gk20a_gr_set_error_notifier(g, isr_data, NVGPU_ERR_NOTIFIER_GR_SEMAPHORE_TIMEOUT); nvgpu_err(g, @@ -5174,7 +5174,7 @@ static int gk20a_gr_handle_semaphore_timeout_pending(struct gk20a *g, static int gk20a_gr_intr_illegal_notify_pending(struct gk20a *g, struct gr_gk20a_isr_data *isr_data) { - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); gk20a_gr_set_error_notifier(g, isr_data, NVGPU_ERR_NOTIFIER_GR_ILLEGAL_NOTIFY); /* This is an unrecoverable error, reset is needed */ @@ -5202,7 +5202,7 @@ static int gk20a_gr_handle_illegal_method(struct gk20a *g, static int gk20a_gr_handle_illegal_class(struct gk20a *g, struct gr_gk20a_isr_data *isr_data) { - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); gk20a_gr_set_error_notifier(g, isr_data, NVGPU_ERR_NOTIFIER_GR_ERROR_SW_NOTIFY); nvgpu_err(g, @@ -5243,7 +5243,7 @@ static int gk20a_gr_handle_class_error(struct gk20a *g, { u32 gr_class_error; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); gr_class_error = gr_class_error_code_v(gk20a_readl(g, gr_class_error_r())); @@ -5274,7 +5274,7 @@ static int gk20a_gr_handle_class_error(struct gk20a *g, static int gk20a_gr_handle_firmware_method(struct gk20a *g, struct gr_gk20a_isr_data *isr_data) { - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); gk20a_gr_set_error_notifier(g, isr_data, NVGPU_ERR_NOTIFIER_GR_ERROR_SW_NOTIFY); @@ -5450,7 +5450,7 @@ int gk20a_gr_handle_notify_pending(struct gk20a *g, } nvgpu_mutex_release(&ch->cyclestate.cyclestate_buffer_mutex); #endif - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); nvgpu_cond_broadcast_interruptible(&ch->notifier_wq); return 0; } @@ -5543,7 +5543,7 @@ int gk20a_gr_lock_down_sm(struct gk20a *g, u32 offset = gk20a_gr_gpc_offset(g, gpc) + gk20a_gr_tpc_offset(g, tpc); u32 dbgr_control0; - gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "GPC%d TPC%d SM%d: assert stop trigger", gpc, tpc, sm); /* assert stop trigger */ @@ -5582,7 +5582,7 @@ int gr_gk20a_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc, u32 sm, bool sm_debugger_attached; u32 global_esr, warp_esr, global_mask; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " "); sm_debugger_attached = g->ops.gr.sm_debugger_attached(g); @@ -5597,7 +5597,7 @@ int gr_gk20a_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc, u32 sm, return -EFAULT; } - gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "sm hww global 0x%08x warp 0x%08x", global_esr, warp_esr); gr_gk20a_elpg_protected_call(g, @@ -5617,7 +5617,7 @@ int gr_gk20a_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc, u32 sm, } if (early_exit) { - gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "returning early"); return ret; } @@ -5640,13 +5640,13 @@ int gr_gk20a_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc, u32 sm, gk20a_writel(g, gr_gpc0_tpc0_tpccs_tpc_exception_en_r() + offset, tpc_exception_en); - gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "SM Exceptions disabled"); + nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "SM Exceptions disabled"); } /* if a debugger is present and an error has occurred, do a warp sync */ if (!ignore_debugger && ((warp_esr != 0) || ((global_esr & ~global_mask) != 0))) { - gk20a_dbg(gpu_dbg_intr, "warp sync needed"); + nvgpu_log(g, gpu_dbg_intr, "warp sync needed"); do_warp_sync = true; } @@ -5660,7 +5660,7 @@ int gr_gk20a_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc, u32 sm, } if (ignore_debugger) - gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "ignore_debugger set, skipping event posting"); else *post_event |= true; @@ -5677,11 +5677,11 @@ int gr_gk20a_handle_tex_exception(struct gk20a *g, u32 gpc, u32 tpc, u32 offset = gpc_stride * gpc + tpc_in_gpc_stride * tpc; u32 esr; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " "); esr = gk20a_readl(g, gr_gpc0_tpc0_tex_m_hww_esr_r() + offset); - gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "0x%08x", esr); + nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "0x%08x", esr); gk20a_writel(g, gr_gpc0_tpc0_tex_m_hww_esr_r() + offset, @@ -5706,7 +5706,7 @@ static int gk20a_gr_handle_tpc_exception(struct gk20a *g, u32 gpc, u32 tpc, + offset); u32 sm_per_tpc = nvgpu_get_litter_value(g, GPU_LIT_NUM_SM_PER_TPC); - gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "GPC%d TPC%d: pending exception 0x%x", gpc, tpc, tpc_exception); @@ -5715,7 +5715,7 @@ static int gk20a_gr_handle_tpc_exception(struct gk20a *g, u32 gpc, u32 tpc, gr_gpc0_tpc0_tpccs_tpc_exception_sm_pending_v()) { u32 esr_sm_sel, sm; - gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "GPC%d TPC%d: SM exception pending", gpc, tpc); if (g->ops.gr.handle_tpc_sm_ecc_exception) @@ -5729,7 +5729,7 @@ static int gk20a_gr_handle_tpc_exception(struct gk20a *g, u32 gpc, u32 tpc, if (!(esr_sm_sel & (1 << sm))) continue; - gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "GPC%d TPC%d: SM%d exception pending", gpc, tpc, sm); @@ -5750,7 +5750,7 @@ static int gk20a_gr_handle_tpc_exception(struct gk20a *g, u32 gpc, u32 tpc, /* check if a tex exeption is pending */ if (gr_gpc0_tpc0_tpccs_tpc_exception_tex_v(tpc_exception) == gr_gpc0_tpc0_tpccs_tpc_exception_tex_pending_v()) { - gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "GPC%d TPC%d: TEX exception pending", gpc, tpc); ret |= g->ops.gr.handle_tex_exception(g, gpc, tpc, post_event); } @@ -5771,13 +5771,13 @@ static int gk20a_gr_handle_gpc_exception(struct gk20a *g, bool *post_event, u32 exception1 = gk20a_readl(g, gr_exception1_r()); u32 gpc_exception; - gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, ""); + nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, " "); for (gpc = 0; gpc < gr->gpc_count; gpc++) { if ((exception1 & (1 << gpc)) == 0) continue; - gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "GPC%d exception pending", gpc); gpc_offset = gk20a_gr_gpc_offset(g, gpc); @@ -5791,7 +5791,7 @@ static int gk20a_gr_handle_gpc_exception(struct gk20a *g, bool *post_event, (1 << tpc)) == 0) continue; - gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "GPC%d: TPC%d exception pending", gpc, tpc); ret |= gk20a_gr_handle_tpc_exception(g, gpc, tpc, @@ -5860,8 +5860,8 @@ int gk20a_gr_isr(struct gk20a *g) u32 gr_engine_id; u32 global_esr = 0; - gk20a_dbg_fn(""); - gk20a_dbg(gpu_dbg_intr, "pgraph intr %08x", gr_intr); + nvgpu_log_fn(g, " "); + nvgpu_log(g, gpu_dbg_intr, "pgraph intr %08x", gr_intr); if (!gr_intr) return 0; @@ -5896,7 +5896,7 @@ int gk20a_gr_isr(struct gk20a *g) nvgpu_err(g, "ch id is INVALID 0xffffffff"); } - gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "channel %d: addr 0x%08x, " "data 0x%08x 0x%08x," "ctx 0x%08x, offset 0x%08x, " @@ -5968,7 +5968,7 @@ int gk20a_gr_isr(struct gk20a *g) * register using set_falcon[4] */ if (gr_intr & gr_intr_firmware_method_pending_f()) { need_reset |= gk20a_gr_handle_firmware_method(g, &isr_data); - gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "firmware method intr pending\n"); + nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "firmware method intr pending\n"); gk20a_writel(g, gr_intr_r(), gr_intr_firmware_method_reset_f()); gr_intr &= ~gr_intr_firmware_method_pending_f(); @@ -5977,7 +5977,7 @@ int gk20a_gr_isr(struct gk20a *g) if (gr_intr & gr_intr_exception_pending_f()) { u32 exception = gk20a_readl(g, gr_exception_r()); - gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "exception %08x\n", exception); + nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "exception %08x\n", exception); if (exception & gr_exception_fe_m()) { u32 fe = gk20a_readl(g, gr_fe_hww_esr_r()); @@ -6057,7 +6057,7 @@ int gk20a_gr_isr(struct gk20a *g) if (exception & gr_exception_gpc_m() && need_reset == 0) { bool post_event = false; - gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "GPC exception pending"); fault_ch = gk20a_fifo_channel_from_chid(g, @@ -6133,7 +6133,7 @@ int gk20a_gr_nonstall_isr(struct gk20a *g) int ops = 0; u32 gr_intr = gk20a_readl(g, gr_intr_nonstall_r()); - gk20a_dbg(gpu_dbg_intr, "pgraph nonstall intr %08x", gr_intr); + nvgpu_log(g, gpu_dbg_intr, "pgraph nonstall intr %08x", gr_intr); if (gr_intr & gr_intr_nonstall_trap_pending_f()) { /* Clear the interrupt */ @@ -6201,7 +6201,7 @@ int gk20a_gr_suspend(struct gk20a *g) { u32 ret = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); ret = g->ops.gr.wait_empty(g, gk20a_get_gr_idle_timeout(g), GR_IDLE_CHECK_DEFAULT); @@ -6227,7 +6227,7 @@ int gk20a_gr_suspend(struct gk20a *g) g->gr.initialized = false; - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return ret; } @@ -6250,7 +6250,7 @@ int gr_gk20a_decode_priv_addr(struct gk20a *g, u32 addr, { u32 gpc_addr; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); /* setup defaults */ *addr_type = CTXSW_ADDR_TYPE_SYS; @@ -6338,7 +6338,7 @@ int gr_gk20a_split_ppc_broadcast_addr(struct gk20a *g, u32 addr, { u32 ppc_num; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); for (ppc_num = 0; ppc_num < g->gr.pe_count_per_gpc; ppc_num++) priv_addr_table[(*t)++] = pri_ppc_addr(g, pri_ppccs_addr_mask(addr), @@ -6369,12 +6369,12 @@ int gr_gk20a_create_priv_addr_table(struct gk20a *g, t = 0; *num_registers = 0; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); err = g->ops.gr.decode_priv_addr(g, addr, &addr_type, &gpc_num, &tpc_num, &ppc_num, &be_num, &broadcast_flags); - gk20a_dbg(gpu_dbg_gpu_dbg, "addr_type = %d", addr_type); + nvgpu_log(g, gpu_dbg_gpu_dbg, "addr_type = %d", addr_type); if (err) return err; @@ -6428,7 +6428,7 @@ int gr_gk20a_create_priv_addr_table(struct gk20a *g, } else if (((addr_type == CTXSW_ADDR_TYPE_EGPC) || (addr_type == CTXSW_ADDR_TYPE_ETPC)) && g->ops.gr.egpc_etpc_priv_addr_table) { - gk20a_dbg(gpu_dbg_gpu_dbg, "addr_type : EGPC/ETPC"); + nvgpu_log(g, gpu_dbg_gpu_dbg, "addr_type : EGPC/ETPC"); g->ops.gr.egpc_etpc_priv_addr_table(g, addr, gpc_num, tpc_num, broadcast_flags, priv_addr_table, &t); } else if (broadcast_flags & PRI_BROADCAST_FLAGS_LTSS) { @@ -6477,11 +6477,11 @@ int gr_gk20a_get_ctx_buffer_offsets(struct gk20a *g, u32 potential_offsets = gr->max_gpc_count * gr->max_tpc_per_gpc_count * sm_per_tpc; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); /* implementation is crossed-up if either of these happen */ if (max_offsets > potential_offsets) { - gk20a_dbg_fn("max_offsets > potential_offsets"); + nvgpu_log_fn(g, "max_offsets > potential_offsets"); return -EINVAL; } @@ -6490,7 +6490,7 @@ int gr_gk20a_get_ctx_buffer_offsets(struct gk20a *g, priv_registers = nvgpu_kzalloc(g, sizeof(u32) * potential_offsets); if (!priv_registers) { - gk20a_dbg_fn("failed alloc for potential_offsets=%d", potential_offsets); + nvgpu_log_fn(g, "failed alloc for potential_offsets=%d", potential_offsets); err = PTR_ERR(priv_registers); goto cleanup; } @@ -6502,7 +6502,7 @@ int gr_gk20a_get_ctx_buffer_offsets(struct gk20a *g, &num_registers); if ((max_offsets > 1) && (num_registers > max_offsets)) { - gk20a_dbg_fn("max_offsets = %d, num_registers = %d", + nvgpu_log_fn(g, "max_offsets = %d, num_registers = %d", max_offsets, num_registers); err = -EINVAL; goto cleanup; @@ -6512,7 +6512,7 @@ int gr_gk20a_get_ctx_buffer_offsets(struct gk20a *g, num_registers = 1; if (!g->gr.ctx_vars.local_golden_image) { - gk20a_dbg_fn("no context switch header info to work with"); + nvgpu_log_fn(g, "no context switch header info to work with"); err = -EINVAL; goto cleanup; } @@ -6525,7 +6525,7 @@ int gr_gk20a_get_ctx_buffer_offsets(struct gk20a *g, g->gr.ctx_vars.golden_image_size, &priv_offset); if (err) { - gk20a_dbg_fn("Could not determine priv_offset for addr:0x%x", + nvgpu_log_fn(g, "Could not determine priv_offset for addr:0x%x", addr); /*, grPriRegStr(addr)));*/ goto cleanup; } @@ -6558,7 +6558,7 @@ int gr_gk20a_get_pm_ctx_buffer_offsets(struct gk20a *g, u32 potential_offsets = gr->max_gpc_count * gr->max_tpc_per_gpc_count * sm_per_tpc; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); /* implementation is crossed-up if either of these happen */ if (max_offsets > potential_offsets) @@ -6569,7 +6569,7 @@ int gr_gk20a_get_pm_ctx_buffer_offsets(struct gk20a *g, priv_registers = nvgpu_kzalloc(g, sizeof(u32) * potential_offsets); if (!priv_registers) { - gk20a_dbg_fn("failed alloc for potential_offsets=%d", potential_offsets); + nvgpu_log_fn(g, "failed alloc for potential_offsets=%d", potential_offsets); return -ENOMEM; } memset(offsets, 0, sizeof(u32) * max_offsets); @@ -6588,7 +6588,7 @@ int gr_gk20a_get_pm_ctx_buffer_offsets(struct gk20a *g, num_registers = 1; if (!g->gr.ctx_vars.local_golden_image) { - gk20a_dbg_fn("no context switch header info to work with"); + nvgpu_log_fn(g, "no context switch header info to work with"); err = -EINVAL; goto cleanup; } @@ -6598,7 +6598,7 @@ int gr_gk20a_get_pm_ctx_buffer_offsets(struct gk20a *g, priv_registers[i], &priv_offset); if (err) { - gk20a_dbg_fn("Could not determine priv_offset for addr:0x%x", + nvgpu_log_fn(g, "Could not determine priv_offset for addr:0x%x", addr); /*, grPriRegStr(addr)));*/ goto cleanup; } @@ -6684,7 +6684,7 @@ static int gr_gk20a_ctx_patch_smpc(struct gk20a *g, g->ops.gr.init_sm_dsm_reg_info(); g->ops.gr.get_ovr_perf_regs(g, &num_ovr_perf_regs, &ovr_perf_regs); - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); for (reg = 0; reg < num_ovr_perf_regs; reg++) { for (gpc = 0; gpc < num_gpc; gpc++) { @@ -6754,13 +6754,11 @@ static int gr_gk20a_ctx_patch_smpc(struct gk20a *g, static inline bool check_main_image_header_magic(u8 *context) { u32 magic = *(u32 *)(context + ctxsw_prog_main_image_magic_value_o()); - gk20a_dbg(gpu_dbg_gpu_dbg, "main image magic=0x%x", magic); return magic == ctxsw_prog_main_image_magic_value_v_value_v(); } static inline bool check_local_header_magic(u8 *context) { u32 magic = *(u32 *)(context + ctxsw_prog_local_magic_value_o()); - gk20a_dbg(gpu_dbg_gpu_dbg, "local magic=0x%x", magic); return magic == ctxsw_prog_local_magic_value_v_value_v(); } @@ -6823,14 +6821,14 @@ static int gr_gk20a_find_priv_offset_in_ext_buffer(struct gk20a *g, else return -EINVAL; - gk20a_dbg_info(" gpc = %d tpc = %d", + nvgpu_log_info(g, " gpc = %d tpc = %d", gpc_num, tpc_num); } else if ((g->ops.gr.is_etpc_addr) && g->ops.gr.is_etpc_addr(g, addr)) { g->ops.gr.get_egpc_etpc_num(g, addr, &gpc_num, &tpc_num); gpc_base = g->ops.gr.get_egpc_base(g); } else { - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "does not exist in extended region"); return -EINVAL; } @@ -6857,7 +6855,7 @@ static int gr_gk20a_find_priv_offset_in_ext_buffer(struct gk20a *g, data32 = *(u32 *)(context + ctxsw_prog_main_extended_buffer_ctl_o()); ext_priv_size = ctxsw_prog_main_extended_buffer_ctl_size_v(data32); if (0 == ext_priv_size) { - gk20a_dbg_info(" No extended memory in context buffer"); + nvgpu_log_info(g, " No extended memory in context buffer"); return -EINVAL; } ext_priv_offset = ctxsw_prog_main_extended_buffer_ctl_offset_v(data32); @@ -6891,7 +6889,7 @@ static int gr_gk20a_find_priv_offset_in_ext_buffer(struct gk20a *g, if ((addr & tpc_gpc_mask) == (sm_dsm_perf_regs[i] & tpc_gpc_mask)) { sm_dsm_perf_reg_id = i; - gk20a_dbg_info("register match: 0x%08x", + nvgpu_log_info(g, "register match: 0x%08x", sm_dsm_perf_regs[i]); chk_addr = (gpc_base + gpc_stride * gpc_num) + @@ -6921,7 +6919,7 @@ static int gr_gk20a_find_priv_offset_in_ext_buffer(struct gk20a *g, (sm_dsm_perf_ctrl_regs[i] & tpc_gpc_mask)) { sm_dsm_perf_ctrl_reg_id = i; - gk20a_dbg_info("register match: 0x%08x", + nvgpu_log_info(g, "register match: 0x%08x", sm_dsm_perf_ctrl_regs[i]); chk_addr = (gpc_base + gpc_stride * gpc_num) + @@ -7032,7 +7030,7 @@ gr_gk20a_process_context_buffer_priv_segment(struct gk20a *g, u32 tpc_in_gpc_base = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_BASE); u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_STRIDE); - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "pri_addr=0x%x", pri_addr); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "pri_addr=0x%x", pri_addr); if (!g->gr.ctx_vars.valid) return -EINVAL; @@ -7215,12 +7213,12 @@ static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g, u8 *context; u32 offset_to_segment; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); err = g->ops.gr.decode_priv_addr(g, addr, &addr_type, &gpc_num, &tpc_num, &ppc_num, &be_num, &broadcast_flags); - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr_type = %d, broadcast_flags: %08x", addr_type, broadcast_flags); if (err) @@ -7243,7 +7241,7 @@ static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g, } data32 = *(u32 *)(context + ctxsw_prog_local_priv_register_ctl_o()); sys_priv_offset = ctxsw_prog_local_priv_register_ctl_offset_v(data32); - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "sys_priv_offset=0x%x", sys_priv_offset); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "sys_priv_offset=0x%x", sys_priv_offset); /* If found in Ext buffer, ok. * If it failed and we expected to find it there (quad offset) @@ -7253,7 +7251,7 @@ static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g, addr, is_quad, quad, context_buffer, context_buffer_size, priv_offset); if (!err || (err && is_quad)) { - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "err = %d, is_quad = %s", err, is_quad ? "true" : false); return err; @@ -7357,7 +7355,7 @@ static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g, num_tpcs) << 2); } } else { - gk20a_dbg_fn("Unknown address type."); + nvgpu_log_fn(g, "Unknown address type."); return -EINVAL; } err = gr_gk20a_process_context_buffer_priv_segment(g, @@ -7668,7 +7666,7 @@ static int gr_gk20a_create_hwpm_ctxsw_buffer_offset_map(struct gk20a *g) u32 num_ltc = g->ops.gr.get_max_ltc_per_fbp(g) * g->gr.num_fbps; if (hwpm_ctxsw_buffer_size == 0) { - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "no PM Ctxsw buffer memory in context buffer"); return -EINVAL; } @@ -7760,10 +7758,10 @@ static int gr_gk20a_create_hwpm_ctxsw_buffer_offset_map(struct gk20a *g) g->gr.ctx_vars.hwpm_ctxsw_buffer_offset_map = map; g->gr.ctx_vars.hwpm_ctxsw_buffer_offset_map_count = count; - gk20a_dbg_info("Reg Addr => HWPM Ctxt switch buffer offset"); + nvgpu_log_info(g, "Reg Addr => HWPM Ctxt switch buffer offset"); for (i = 0; i < count; i++) - gk20a_dbg_info("%08x => %08x", map[i].addr, map[i].offset); + nvgpu_log_info(g, "%08x => %08x", map[i].addr, map[i].offset); return 0; cleanup: @@ -7785,7 +7783,7 @@ static int gr_gk20a_find_priv_offset_in_pm_buffer(struct gk20a *g, u32 count; struct ctxsw_buf_offset_map_entry *map, *result, map_key; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); /* Create map of pri address and pm offset if necessary */ if (gr->ctx_vars.hwpm_ctxsw_buffer_offset_map == NULL) { @@ -7831,7 +7829,7 @@ bool gk20a_is_channel_ctx_resident(struct channel_gk20a *ch) curr_ch = gk20a_gr_get_channel_from_ctx(g, curr_gr_ctx, &curr_gr_tsgid); - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "curr_gr_chid=%d curr_tsgid=%d, ch->tsgid=%d" " ch->chid=%d", curr_ch ? curr_ch->chid : -1, @@ -7873,7 +7871,7 @@ int __gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch, u32 ctx_op_nr, num_ctx_ops[2] = {num_ctx_wr_ops, num_ctx_rd_ops}; int err = 0, pass; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "wr_ops=%d rd_ops=%d", + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "wr_ops=%d rd_ops=%d", num_ctx_wr_ops, num_ctx_rd_ops); tsg = tsg_gk20a_from_ch(ch); @@ -7906,7 +7904,7 @@ int __gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch, v |= ctx_ops[i].value_lo; gk20a_writel(g, offset, v); - gk20a_dbg(gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_gpu_dbg, "direct wr: offset=0x%x v=0x%x", offset, v); @@ -7916,7 +7914,7 @@ int __gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch, v |= ctx_ops[i].value_hi; gk20a_writel(g, offset + 4, v); - gk20a_dbg(gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_gpu_dbg, "direct wr: offset=0x%x v=0x%x", offset + 4, v); } @@ -7925,7 +7923,7 @@ int __gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch, ctx_ops[i].value_lo = gk20a_readl(g, offset); - gk20a_dbg(gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_gpu_dbg, "direct rd: offset=0x%x v=0x%x", offset, ctx_ops[i].value_lo); @@ -7933,7 +7931,7 @@ int __gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch, ctx_ops[i].value_hi = gk20a_readl(g, offset + 4); - gk20a_dbg(gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_gpu_dbg, "direct rd: offset=0x%x v=0x%x", offset, ctx_ops[i].value_lo); } else @@ -8001,7 +7999,7 @@ int __gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch, offsets, offset_addrs, &num_offsets); if (err) { - gk20a_dbg(gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_gpu_dbg, "ctx op invalid offset: offset=0x%x", ctx_ops[i].offset); ctx_ops[i].status = @@ -8044,7 +8042,7 @@ int __gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch, v |= ctx_ops[i].value_lo; nvgpu_mem_wr(g, current_mem, offsets[j], v); - gk20a_dbg(gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_gpu_dbg, "context wr: offset=0x%x v=0x%x", offsets[j], v); @@ -8054,7 +8052,7 @@ int __gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch, v |= ctx_ops[i].value_hi; nvgpu_mem_wr(g, current_mem, offsets[j] + 4, v); - gk20a_dbg(gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_gpu_dbg, "context wr: offset=0x%x v=0x%x", offsets[j] + 4, v); } @@ -8068,14 +8066,14 @@ int __gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch, ctx_ops[i].value_lo = nvgpu_mem_rd(g, current_mem, offsets[0]); - gk20a_dbg(gpu_dbg_gpu_dbg, "context rd: offset=0x%x v=0x%x", + nvgpu_log(g, gpu_dbg_gpu_dbg, "context rd: offset=0x%x v=0x%x", offsets[0], ctx_ops[i].value_lo); if (ctx_ops[i].op == REGOP(READ_64)) { ctx_ops[i].value_hi = nvgpu_mem_rd(g, current_mem, offsets[0] + 4); - gk20a_dbg(gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_gpu_dbg, "context rd: offset=0x%x v=0x%x", offsets[0] + 4, ctx_ops[i].value_hi); } else @@ -8121,7 +8119,7 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch, ch_is_curr_ctx = gk20a_is_channel_ctx_resident(ch); - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "is curr ctx=%d", + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "is curr ctx=%d", ch_is_curr_ctx); err = __gr_gk20a_exec_ctx_ops(ch, ctx_ops, num_ops, num_ctx_wr_ops, @@ -8176,7 +8174,7 @@ int gk20a_gr_wait_for_sm_lock_down(struct gk20a *g, u32 gpc, u32 tpc, u32 sm, struct nvgpu_timeout timeout; u32 warp_esr; - gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "GPC%d TPC%d SM%d: locking down SM", gpc, tpc, sm); nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g), @@ -8201,7 +8199,7 @@ int gk20a_gr_wait_for_sm_lock_down(struct gk20a *g, u32 gpc, u32 tpc, u32 sm, ((global_esr & ~global_esr_mask) == 0); if (locked_down || no_error_pending) { - gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "GPC%d TPC%d SM%d: locked down SM", gpc, tpc, sm); return 0; diff --git a/drivers/gpu/nvgpu/gk20a/hal.c b/drivers/gpu/nvgpu/gk20a/hal.c index 939567e7..1787f573 100644 --- a/drivers/gpu/nvgpu/gk20a/hal.c +++ b/drivers/gpu/nvgpu/gk20a/hal.c @@ -41,7 +41,7 @@ int gpu_init_hal(struct gk20a *g) switch (ver) { case GK20A_GPUID_GM20B: case GK20A_GPUID_GM20B_B: - gk20a_dbg_info("gm20b detected"); + nvgpu_log_info(g, "gm20b detected"); if (gm20b_init_hal(g)) return -ENODEV; break; diff --git a/drivers/gpu/nvgpu/gk20a/mc_gk20a.c b/drivers/gpu/nvgpu/gk20a/mc_gk20a.c index 7fed410e..9473ad4f 100644 --- a/drivers/gpu/nvgpu/gk20a/mc_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/mc_gk20a.c @@ -1,7 +1,7 @@ /* * GK20A Master Control * - * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -40,7 +40,7 @@ void mc_gk20a_isr_stall(struct gk20a *g) mc_intr_0 = g->ops.mc.intr_stall(g); - gk20a_dbg(gpu_dbg_intr, "stall intr %08x\n", mc_intr_0); + nvgpu_log(g, gpu_dbg_intr, "stall intr %08x\n", mc_intr_0); for (engine_id_idx = 0; engine_id_idx < g->fifo.num_engines; engine_id_idx++) { active_engine_id = g->fifo.active_engines_list[engine_id_idx]; @@ -200,7 +200,7 @@ void gk20a_mc_disable(struct gk20a *g, u32 units) { u32 pmc; - gk20a_dbg(gpu_dbg_info, "pmc disable: %08x\n", units); + nvgpu_log(g, gpu_dbg_info, "pmc disable: %08x\n", units); nvgpu_spinlock_acquire(&g->mc_enable_lock); pmc = gk20a_readl(g, mc_enable_r()); @@ -213,7 +213,7 @@ void gk20a_mc_enable(struct gk20a *g, u32 units) { u32 pmc; - gk20a_dbg(gpu_dbg_info, "pmc enable: %08x\n", units); + nvgpu_log(g, gpu_dbg_info, "pmc enable: %08x\n", units); nvgpu_spinlock_acquire(&g->mc_enable_lock); pmc = gk20a_readl(g, mc_enable_r()); diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c index 14876296..dfdcc3a4 100644 --- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c @@ -91,7 +91,7 @@ int gk20a_init_mm_setup_hw(struct gk20a *g) struct mm_gk20a *mm = &g->mm; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); g->ops.fb.set_mmu_page_size(g); if (g->ops.fb.set_use_full_comp_tag_line) @@ -112,7 +112,7 @@ int gk20a_init_mm_setup_hw(struct gk20a *g) if (gk20a_mm_fb_flush(g) || gk20a_mm_fb_flush(g)) return -EBUSY; - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } @@ -336,7 +336,7 @@ int gk20a_vm_bind_channel(struct vm_gk20a *vm, struct channel_gk20a *ch) { int err = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(ch->g, " "); nvgpu_vm_get(vm); ch->vm = vm; @@ -357,7 +357,7 @@ void gk20a_mm_init_pdb(struct gk20a *g, struct nvgpu_mem *inst_block, u32 pdb_addr_lo = u64_lo32(pdb_addr >> ram_in_base_shift_v()); u32 pdb_addr_hi = u64_hi32(pdb_addr); - gk20a_dbg_info("pde pa=0x%llx", pdb_addr); + nvgpu_log_info(g, "pde pa=0x%llx", pdb_addr); nvgpu_mem_wr32(g, inst_block, ram_in_page_dir_base_lo_w(), nvgpu_aperture_mask(g, vm->pdb.mem, @@ -376,7 +376,7 @@ void gk20a_init_inst_block(struct nvgpu_mem *inst_block, struct vm_gk20a *vm, { struct gk20a *g = gk20a_from_vm(vm); - gk20a_dbg_info("inst block phys = 0x%llx, kv = 0x%p", + nvgpu_log_info(g, "inst block phys = 0x%llx, kv = 0x%p", nvgpu_inst_block_addr(g, inst_block), inst_block->cpu_va); g->ops.mm.init_pdb(g, inst_block, vm); @@ -395,7 +395,7 @@ int gk20a_alloc_inst_block(struct gk20a *g, struct nvgpu_mem *inst_block) { int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); err = nvgpu_dma_alloc(g, ram_in_alloc_size_v(), inst_block); if (err) { @@ -403,7 +403,7 @@ int gk20a_alloc_inst_block(struct gk20a *g, struct nvgpu_mem *inst_block) return err; } - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } @@ -415,7 +415,7 @@ int gk20a_mm_fb_flush(struct gk20a *g) struct nvgpu_timeout timeout; u32 retries; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); gk20a_busy_noresume(g); if (!g->power_on) { @@ -448,7 +448,7 @@ int gk20a_mm_fb_flush(struct gk20a *g) flush_fb_flush_outstanding_true_v() || flush_fb_flush_pending_v(data) == flush_fb_flush_pending_busy_v()) { - gk20a_dbg_info("fb_flush 0x%x", data); + nvgpu_log_info(g, "fb_flush 0x%x", data); nvgpu_udelay(5); } else break; @@ -494,7 +494,7 @@ static void gk20a_mm_l2_invalidate_locked(struct gk20a *g) flush_l2_system_invalidate_outstanding_true_v() || flush_l2_system_invalidate_pending_v(data) == flush_l2_system_invalidate_pending_busy_v()) { - gk20a_dbg_info("l2_system_invalidate 0x%x", + nvgpu_log_info(g, "l2_system_invalidate 0x%x", data); nvgpu_udelay(5); } else @@ -526,7 +526,7 @@ void gk20a_mm_l2_flush(struct gk20a *g, bool invalidate) struct nvgpu_timeout timeout; u32 retries = 2000; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); gk20a_busy_noresume(g); if (!g->power_on) @@ -553,7 +553,7 @@ void gk20a_mm_l2_flush(struct gk20a *g, bool invalidate) flush_l2_flush_dirty_outstanding_true_v() || flush_l2_flush_dirty_pending_v(data) == flush_l2_flush_dirty_pending_busy_v()) { - gk20a_dbg_info("l2_flush_dirty 0x%x", data); + nvgpu_log_info(g, "l2_flush_dirty 0x%x", data); nvgpu_udelay(5); } else break; @@ -578,7 +578,7 @@ void gk20a_mm_cbc_clean(struct gk20a *g) struct nvgpu_timeout timeout; u32 retries = 200; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); gk20a_busy_noresume(g); if (!g->power_on) @@ -602,7 +602,7 @@ void gk20a_mm_cbc_clean(struct gk20a *g) flush_l2_clean_comptags_outstanding_true_v() || flush_l2_clean_comptags_pending_v(data) == flush_l2_clean_comptags_pending_busy_v()) { - gk20a_dbg_info("l2_clean_comptags 0x%x", data); + nvgpu_log_info(g, "l2_clean_comptags 0x%x", data); nvgpu_udelay(5); } else break; diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c index 0531b387..400a49a3 100644 --- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c @@ -39,8 +39,8 @@ #include #include -#define gk20a_dbg_pmu(fmt, arg...) \ - gk20a_dbg(gpu_dbg_pmu, fmt, ##arg) +#define gk20a_dbg_pmu(g, fmt, arg...) \ + nvgpu_log(g, gpu_dbg_pmu, fmt, ##arg) bool nvgpu_find_hex_in_string(char *strings, struct gk20a *g, u32 *hex_pos) { @@ -139,7 +139,7 @@ void pmu_enable_irq(struct nvgpu_pmu *pmu, bool enable) u32 intr_mask; u32 intr_dest; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); g->ops.mc.intr_unit_config(g, MC_INTR_UNIT_DISABLE, true, mc_intr_mask_0_pmu_enabled_f()); @@ -166,7 +166,7 @@ void pmu_enable_irq(struct nvgpu_pmu *pmu, bool enable) mc_intr_mask_0_pmu_enabled_f()); } - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); } @@ -179,7 +179,7 @@ int pmu_bootstrap(struct nvgpu_pmu *pmu) u64 addr_code, addr_data, addr_load; u32 i, blocks, addr_args; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); gk20a_writel(g, pwr_falcon_itfen_r(), gk20a_readl(g, pwr_falcon_itfen_r()) | @@ -286,7 +286,7 @@ int gk20a_pmu_mutex_acquire(struct nvgpu_pmu *pmu, u32 id, u32 *token) if (*token != PMU_INVALID_MUTEX_OWNER_ID && *token == owner) { BUG_ON(mutex->ref_cnt == 0); - gk20a_dbg_pmu("already acquired by owner : 0x%08x", *token); + gk20a_dbg_pmu(g, "already acquired by owner : 0x%08x", *token); mutex->ref_cnt++; return 0; } @@ -313,12 +313,12 @@ int gk20a_pmu_mutex_acquire(struct nvgpu_pmu *pmu, u32 id, u32 *token) if (owner == data) { mutex->ref_cnt = 1; - gk20a_dbg_pmu("mutex acquired: id=%d, token=0x%x", + gk20a_dbg_pmu(g, "mutex acquired: id=%d, token=0x%x", mutex->index, *token); *token = owner; return 0; } else { - gk20a_dbg_info("fail to acquire mutex idx=0x%08x", + nvgpu_log_info(g, "fail to acquire mutex idx=0x%08x", mutex->index); data = gk20a_readl(g, pwr_pmu_mutex_id_release_r()); @@ -370,7 +370,7 @@ int gk20a_pmu_mutex_release(struct nvgpu_pmu *pmu, u32 id, u32 *token) pwr_pmu_mutex_id_release_value_f(owner)); gk20a_writel(g, pwr_pmu_mutex_id_release_r(), data); - gk20a_dbg_pmu("mutex released: id=%d, token=0x%x", + gk20a_dbg_pmu(g, "mutex released: id=%d, token=0x%x", mutex->index, *token); return 0; @@ -475,7 +475,7 @@ int gk20a_init_pmu_setup_hw1(struct gk20a *g) struct nvgpu_pmu *pmu = &g->pmu; int err = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); nvgpu_mutex_acquire(&pmu->isr_mutex); nvgpu_flcn_reset(pmu->flcn); @@ -554,7 +554,7 @@ static void pmu_handle_zbc_msg(struct gk20a *g, struct pmu_msg *msg, void *param, u32 handle, u32 status) { struct nvgpu_pmu *pmu = param; - gk20a_dbg_pmu("reply ZBC_TABLE_UPDATE"); + gk20a_dbg_pmu(g, "reply ZBC_TABLE_UPDATE"); pmu->zbc_save_done = 1; } @@ -575,7 +575,7 @@ void gk20a_pmu_save_zbc(struct gk20a *g, u32 entries) pmu->zbc_save_done = 0; - gk20a_dbg_pmu("cmd post ZBC_TABLE_UPDATE"); + gk20a_dbg_pmu(g, "cmd post ZBC_TABLE_UPDATE"); nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, pmu_handle_zbc_msg, pmu, &seq, ~0); pmu_wait_message_cond(pmu, gk20a_get_gr_idle_timeout(g), @@ -587,18 +587,20 @@ void gk20a_pmu_save_zbc(struct gk20a *g, u32 entries) int nvgpu_pmu_handle_therm_event(struct nvgpu_pmu *pmu, struct nv_pmu_therm_msg *msg) { - gk20a_dbg_fn(""); + struct gk20a *g = gk20a_from_pmu(pmu); + + nvgpu_log_fn(g, " "); switch (msg->msg_type) { case NV_PMU_THERM_MSG_ID_EVENT_HW_SLOWDOWN_NOTIFICATION: if (msg->hw_slct_msg.mask == BIT(NV_PMU_THERM_EVENT_THERMAL_1)) nvgpu_clk_arb_send_thermal_alarm(pmu->g); else - gk20a_dbg_pmu("Unwanted/Unregistered thermal event received %d", + gk20a_dbg_pmu(g, "Unwanted/Unregistered thermal event received %d", msg->hw_slct_msg.mask); break; default: - gk20a_dbg_pmu("unkown therm event received %d", msg->msg_type); + gk20a_dbg_pmu(g, "unkown therm event received %d", msg->msg_type); break; } @@ -609,22 +611,22 @@ void gk20a_pmu_dump_elpg_stats(struct nvgpu_pmu *pmu) { struct gk20a *g = gk20a_from_pmu(pmu); - gk20a_dbg_pmu("pwr_pmu_idle_mask_supp_r(3): 0x%08x", + gk20a_dbg_pmu(g, "pwr_pmu_idle_mask_supp_r(3): 0x%08x", gk20a_readl(g, pwr_pmu_idle_mask_supp_r(3))); - gk20a_dbg_pmu("pwr_pmu_idle_mask_1_supp_r(3): 0x%08x", + gk20a_dbg_pmu(g, "pwr_pmu_idle_mask_1_supp_r(3): 0x%08x", gk20a_readl(g, pwr_pmu_idle_mask_1_supp_r(3))); - gk20a_dbg_pmu("pwr_pmu_idle_ctrl_supp_r(3): 0x%08x", + gk20a_dbg_pmu(g, "pwr_pmu_idle_ctrl_supp_r(3): 0x%08x", gk20a_readl(g, pwr_pmu_idle_ctrl_supp_r(3))); - gk20a_dbg_pmu("pwr_pmu_pg_idle_cnt_r(0): 0x%08x", + gk20a_dbg_pmu(g, "pwr_pmu_pg_idle_cnt_r(0): 0x%08x", gk20a_readl(g, pwr_pmu_pg_idle_cnt_r(0))); - gk20a_dbg_pmu("pwr_pmu_pg_intren_r(0): 0x%08x", + gk20a_dbg_pmu(g, "pwr_pmu_pg_intren_r(0): 0x%08x", gk20a_readl(g, pwr_pmu_pg_intren_r(0))); - gk20a_dbg_pmu("pwr_pmu_idle_count_r(3): 0x%08x", + gk20a_dbg_pmu(g, "pwr_pmu_idle_count_r(3): 0x%08x", gk20a_readl(g, pwr_pmu_idle_count_r(3))); - gk20a_dbg_pmu("pwr_pmu_idle_count_r(4): 0x%08x", + gk20a_dbg_pmu(g, "pwr_pmu_idle_count_r(4): 0x%08x", gk20a_readl(g, pwr_pmu_idle_count_r(4))); - gk20a_dbg_pmu("pwr_pmu_idle_count_r(7): 0x%08x", + gk20a_dbg_pmu(g, "pwr_pmu_idle_count_r(7): 0x%08x", gk20a_readl(g, pwr_pmu_idle_count_r(7))); } @@ -693,7 +695,7 @@ void gk20a_pmu_isr(struct gk20a *g) u32 intr, mask; bool recheck = false; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); nvgpu_mutex_acquire(&pmu->isr_mutex); if (!pmu->isr_enabled) { @@ -706,7 +708,7 @@ void gk20a_pmu_isr(struct gk20a *g) intr = gk20a_readl(g, pwr_falcon_irqstat_r()); - gk20a_dbg_pmu("received falcon interrupt: 0x%08x", intr); + gk20a_dbg_pmu(g, "received falcon interrupt: 0x%08x", intr); intr = gk20a_readl(g, pwr_falcon_irqstat_r()) & mask; if (!intr || pmu->pmu_state == PMU_STATE_OFF) { diff --git a/drivers/gpu/nvgpu/gk20a/pramin_gk20a.c b/drivers/gpu/nvgpu/gk20a/pramin_gk20a.c index a76e2580..8dde61a2 100644 --- a/drivers/gpu/nvgpu/gk20a/pramin_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/pramin_gk20a.c @@ -45,7 +45,7 @@ u32 gk20a_pramin_enter(struct gk20a *g, struct nvgpu_mem *mem, bus_bar0_window_target_vid_mem_f()) | bus_bar0_window_base_f(hi); - gk20a_dbg(gpu_dbg_mem, + nvgpu_log(g, gpu_dbg_mem, "0x%08x:%08x begin for %p,%p at [%llx,%llx] (sz %llx)", hi, lo, mem, sgl, bufbase, bufbase + nvgpu_sgt_get_phys(g, sgt, sgl), @@ -67,7 +67,7 @@ u32 gk20a_pramin_enter(struct gk20a *g, struct nvgpu_mem *mem, void gk20a_pramin_exit(struct gk20a *g, struct nvgpu_mem *mem, struct nvgpu_sgl *sgl) { - gk20a_dbg(gpu_dbg_mem, "end for %p,%p", mem, sgl); + nvgpu_log(g, gpu_dbg_mem, "end for %p,%p", mem, sgl); nvgpu_spinlock_release(&g->mm.pramin_window_lock); } diff --git a/drivers/gpu/nvgpu/gk20a/priv_ring_gk20a.c b/drivers/gpu/nvgpu/gk20a/priv_ring_gk20a.c index ed5327cb..dea42b55 100644 --- a/drivers/gpu/nvgpu/gk20a/priv_ring_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/priv_ring_gk20a.c @@ -1,7 +1,7 @@ /* * GK20A priv ring * - * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -66,11 +66,11 @@ void gk20a_priv_ring_isr(struct gk20a *g) status0 = gk20a_readl(g, pri_ringmaster_intr_status0_r()); status1 = gk20a_readl(g, pri_ringmaster_intr_status1_r()); - gk20a_dbg(gpu_dbg_intr, "ringmaster intr status0: 0x%08x," + nvgpu_log(g, gpu_dbg_intr, "ringmaster intr status0: 0x%08x," "status1: 0x%08x", status0, status1); if (pri_ringmaster_intr_status0_gbl_write_error_sys_v(status0) != 0) { - gk20a_dbg(gpu_dbg_intr, "SYS write error. ADR %08x WRDAT %08x INFO %08x, CODE %08x", + nvgpu_log(g, gpu_dbg_intr, "SYS write error. ADR %08x WRDAT %08x INFO %08x, CODE %08x", gk20a_readl(g, pri_ringstation_sys_priv_error_adr_r()), gk20a_readl(g, pri_ringstation_sys_priv_error_wrdat_r()), gk20a_readl(g, pri_ringstation_sys_priv_error_info_r()), @@ -79,7 +79,7 @@ void gk20a_priv_ring_isr(struct gk20a *g) for (gpc = 0; gpc < g->gr.gpc_count; gpc++) { if (status1 & BIT(gpc)) { - gk20a_dbg(gpu_dbg_intr, "GPC%u write error. ADR %08x WRDAT %08x INFO %08x, CODE %08x", gpc, + nvgpu_log(g, gpu_dbg_intr, "GPC%u write error. ADR %08x WRDAT %08x INFO %08x, CODE %08x", gpc, gk20a_readl(g, pri_ringstation_gpc_gpc0_priv_error_adr_r() + gpc * gpc_priv_stride), gk20a_readl(g, pri_ringstation_gpc_gpc0_priv_error_wrdat_r() + gpc * gpc_priv_stride), gk20a_readl(g, pri_ringstation_gpc_gpc0_priv_error_info_r() + gpc * gpc_priv_stride), diff --git a/drivers/gpu/nvgpu/gk20a/regops_gk20a.c b/drivers/gpu/nvgpu/gk20a/regops_gk20a.c index 60162f9d..5b9f973b 100644 --- a/drivers/gpu/nvgpu/gk20a/regops_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/regops_gk20a.c @@ -1,7 +1,7 @@ /* * Tegra GK20A GPU Debugger Driver Register Ops * - * Copyright (c) 2013-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2013-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -72,7 +72,7 @@ int exec_regops_gk20a(struct dbg_session_gk20a *dbg_s, bool skip_read_lo, skip_read_hi; bool ok; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " "); ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); @@ -108,7 +108,7 @@ int exec_regops_gk20a(struct dbg_session_gk20a *dbg_s, case REGOP(READ_32): ops[i].value_hi = 0; ops[i].value_lo = gk20a_readl(g, ops[i].offset); - gk20a_dbg(gpu_dbg_gpu_dbg, "read_32 0x%08x from 0x%08x", + nvgpu_log(g, gpu_dbg_gpu_dbg, "read_32 0x%08x from 0x%08x", ops[i].value_lo, ops[i].offset); break; @@ -118,7 +118,7 @@ int exec_regops_gk20a(struct dbg_session_gk20a *dbg_s, ops[i].value_hi = gk20a_readl(g, ops[i].offset + 4); - gk20a_dbg(gpu_dbg_gpu_dbg, "read_64 0x%08x:%08x from 0x%08x", + nvgpu_log(g, gpu_dbg_gpu_dbg, "read_64 0x%08x:%08x from 0x%08x", ops[i].value_hi, ops[i].value_lo, ops[i].offset); break; @@ -157,12 +157,12 @@ int exec_regops_gk20a(struct dbg_session_gk20a *dbg_s, /* now update first 32bits */ gk20a_writel(g, ops[i].offset, data32_lo); - gk20a_dbg(gpu_dbg_gpu_dbg, "Wrote 0x%08x to 0x%08x ", + nvgpu_log(g, gpu_dbg_gpu_dbg, "Wrote 0x%08x to 0x%08x ", data32_lo, ops[i].offset); /* if desired, update second 32bits */ if (ops[i].op == REGOP(WRITE_64)) { gk20a_writel(g, ops[i].offset + 4, data32_hi); - gk20a_dbg(gpu_dbg_gpu_dbg, "Wrote 0x%08x to 0x%08x ", + nvgpu_log(g, gpu_dbg_gpu_dbg, "Wrote 0x%08x to 0x%08x ", data32_hi, ops[i].offset + 4); } @@ -189,7 +189,7 @@ int exec_regops_gk20a(struct dbg_session_gk20a *dbg_s, } clean_up: - gk20a_dbg(gpu_dbg_gpu_dbg, "ret=%d", err); + nvgpu_log(g, gpu_dbg_gpu_dbg, "ret=%d", err); return err; } @@ -395,7 +395,7 @@ static bool validate_reg_ops(struct dbg_session_gk20a *dbg_s, } } - gk20a_dbg(gpu_dbg_gpu_dbg, "ctx_wrs:%d ctx_rds:%d", + nvgpu_log(g, gpu_dbg_gpu_dbg, "ctx_wrs:%d ctx_rds:%d", *ctx_wr_count, *ctx_rd_count); return ok; diff --git a/drivers/gpu/nvgpu/gk20a/therm_gk20a.c b/drivers/gpu/nvgpu/gk20a/therm_gk20a.c index de5d0f78..b08f3e0a 100644 --- a/drivers/gpu/nvgpu/gk20a/therm_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/therm_gk20a.c @@ -1,7 +1,7 @@ /* * GK20A Therm * - * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -43,7 +43,7 @@ int gk20a_init_therm_support(struct gk20a *g) { u32 err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); err = gk20a_init_therm_reset_enable_hw(g); if (err) @@ -73,7 +73,7 @@ int gk20a_elcg_init_idle_filters(struct gk20a *g) u32 active_engine_id = 0; struct fifo_gk20a *f = &g->fifo; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); for (engine_id = 0; engine_id < f->num_engines; engine_id++) { active_engine_id = f->active_engines_list[engine_id]; @@ -104,6 +104,6 @@ int gk20a_elcg_init_idle_filters(struct gk20a *g) idle_filter &= ~therm_hubmmu_idle_filter_value_m(); gk20a_writel(g, therm_hubmmu_idle_filter_r(), idle_filter); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } diff --git a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c index 05b8fc61..62763da3 100644 --- a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c @@ -107,7 +107,9 @@ static bool gk20a_is_channel_active(struct gk20a *g, struct channel_gk20a *ch) int gk20a_tsg_bind_channel(struct tsg_gk20a *tsg, struct channel_gk20a *ch) { - gk20a_dbg_fn(""); + struct gk20a *g = ch->g; + + nvgpu_log_fn(g, " "); /* check if channel is already bound to some TSG */ if (gk20a_is_channel_marked_as_tsg(ch)) { @@ -137,10 +139,10 @@ int gk20a_tsg_bind_channel(struct tsg_gk20a *tsg, nvgpu_ref_get(&tsg->refcount); - gk20a_dbg(gpu_dbg_fn, "BIND tsg:%d channel:%d\n", + nvgpu_log(g, gpu_dbg_fn, "BIND tsg:%d channel:%d\n", tsg->tsgid, ch->chid); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } @@ -167,7 +169,7 @@ int gk20a_tsg_unbind_channel(struct channel_gk20a *ch) nvgpu_ref_put(&tsg->refcount, gk20a_tsg_release); ch->tsgid = NVGPU_INVALID_TSG_ID; - gk20a_dbg(gpu_dbg_fn, "UNBIND tsg:%d channel:%d\n", + nvgpu_log(g, gpu_dbg_fn, "UNBIND tsg:%d channel:%d\n", tsg->tsgid, ch->chid); return 0; @@ -204,7 +206,7 @@ int gk20a_tsg_set_runlist_interleave(struct tsg_gk20a *tsg, u32 level) struct gk20a *g = tsg->g; int ret; - gk20a_dbg(gpu_dbg_sched, "tsgid=%u interleave=%u", tsg->tsgid, level); + nvgpu_log(g, gpu_dbg_sched, "tsgid=%u interleave=%u", tsg->tsgid, level); switch (level) { case NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_LOW: @@ -227,7 +229,7 @@ int gk20a_tsg_set_timeslice(struct tsg_gk20a *tsg, u32 timeslice) { struct gk20a *g = tsg->g; - gk20a_dbg(gpu_dbg_sched, "tsgid=%u timeslice=%u us", tsg->tsgid, timeslice); + nvgpu_log(g, gpu_dbg_sched, "tsgid=%u timeslice=%u us", tsg->tsgid, timeslice); return g->ops.fifo.tsg_set_timeslice(tsg, timeslice); } @@ -300,7 +302,7 @@ struct tsg_gk20a *gk20a_tsg_open(struct gk20a *g, pid_t pid) } } - gk20a_dbg(gpu_dbg_fn, "tsg opened %d\n", tsg->tsgid); + nvgpu_log(g, gpu_dbg_fn, "tsg opened %d\n", tsg->tsgid); return tsg; @@ -343,7 +345,7 @@ void gk20a_tsg_release(struct nvgpu_ref *ref) tsg->runlist_id = ~0; - gk20a_dbg(gpu_dbg_fn, "tsg released %d\n", tsg->tsgid); + nvgpu_log(g, gpu_dbg_fn, "tsg released %d\n", tsg->tsgid); } struct tsg_gk20a *tsg_gk20a_from_ch(struct channel_gk20a *ch) -- cgit v1.2.2