diff options
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a')
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/ce2_gk20a.c | 14 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/channel_gk20a.c | 46 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/css_gr_gk20a.c | 6 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c | 17 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/fb_gk20a.c | 6 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c | 36 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/fifo_gk20a.c | 150 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/gk20a.c | 12 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.c | 82 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a_sim.c | 12 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/gr_gk20a.c | 364 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/hal.c | 2 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/mc_gk20a.c | 8 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/mm_gk20a.c | 28 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/pmu_gk20a.c | 52 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/pramin_gk20a.c | 4 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/priv_ring_gk20a.c | 8 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/regops_gk20a.c | 16 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/therm_gk20a.c | 8 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/tsg_gk20a.c | 18 |
20 files changed, 446 insertions, 443 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c b/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c index 0280bbbb..086d4e7b 100644 --- a/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c | |||
@@ -50,21 +50,21 @@ | |||
50 | 50 | ||
51 | static u32 ce2_nonblockpipe_isr(struct gk20a *g, u32 fifo_intr) | 51 | static u32 ce2_nonblockpipe_isr(struct gk20a *g, u32 fifo_intr) |
52 | { | 52 | { |
53 | gk20a_dbg(gpu_dbg_intr, "ce2 non-blocking pipe interrupt\n"); | 53 | nvgpu_log(g, gpu_dbg_intr, "ce2 non-blocking pipe interrupt\n"); |
54 | 54 | ||
55 | return ce2_intr_status_nonblockpipe_pending_f(); | 55 | return ce2_intr_status_nonblockpipe_pending_f(); |
56 | } | 56 | } |
57 | 57 | ||
58 | static u32 ce2_blockpipe_isr(struct gk20a *g, u32 fifo_intr) | 58 | static u32 ce2_blockpipe_isr(struct gk20a *g, u32 fifo_intr) |
59 | { | 59 | { |
60 | gk20a_dbg(gpu_dbg_intr, "ce2 blocking pipe interrupt\n"); | 60 | nvgpu_log(g, gpu_dbg_intr, "ce2 blocking pipe interrupt\n"); |
61 | 61 | ||
62 | return ce2_intr_status_blockpipe_pending_f(); | 62 | return ce2_intr_status_blockpipe_pending_f(); |
63 | } | 63 | } |
64 | 64 | ||
65 | static u32 ce2_launcherr_isr(struct gk20a *g, u32 fifo_intr) | 65 | static u32 ce2_launcherr_isr(struct gk20a *g, u32 fifo_intr) |
66 | { | 66 | { |
67 | gk20a_dbg(gpu_dbg_intr, "ce2 launch error interrupt\n"); | 67 | nvgpu_log(g, gpu_dbg_intr, "ce2 launch error interrupt\n"); |
68 | 68 | ||
69 | return ce2_intr_status_launcherr_pending_f(); | 69 | return ce2_intr_status_launcherr_pending_f(); |
70 | } | 70 | } |
@@ -74,7 +74,7 @@ void gk20a_ce2_isr(struct gk20a *g, u32 inst_id, u32 pri_base) | |||
74 | u32 ce2_intr = gk20a_readl(g, ce2_intr_status_r()); | 74 | u32 ce2_intr = gk20a_readl(g, ce2_intr_status_r()); |
75 | u32 clear_intr = 0; | 75 | u32 clear_intr = 0; |
76 | 76 | ||
77 | gk20a_dbg(gpu_dbg_intr, "ce2 isr %08x\n", ce2_intr); | 77 | nvgpu_log(g, gpu_dbg_intr, "ce2 isr %08x\n", ce2_intr); |
78 | 78 | ||
79 | /* clear blocking interrupts: they exibit broken behavior */ | 79 | /* clear blocking interrupts: they exibit broken behavior */ |
80 | if (ce2_intr & ce2_intr_status_blockpipe_pending_f()) | 80 | if (ce2_intr & ce2_intr_status_blockpipe_pending_f()) |
@@ -92,7 +92,7 @@ int gk20a_ce2_nonstall_isr(struct gk20a *g, u32 inst_id, u32 pri_base) | |||
92 | int ops = 0; | 92 | int ops = 0; |
93 | u32 ce2_intr = gk20a_readl(g, ce2_intr_status_r()); | 93 | u32 ce2_intr = gk20a_readl(g, ce2_intr_status_r()); |
94 | 94 | ||
95 | gk20a_dbg(gpu_dbg_intr, "ce2 nonstall isr %08x\n", ce2_intr); | 95 | nvgpu_log(g, gpu_dbg_intr, "ce2 nonstall isr %08x\n", ce2_intr); |
96 | 96 | ||
97 | if (ce2_intr & ce2_intr_status_nonblockpipe_pending_f()) { | 97 | if (ce2_intr & ce2_intr_status_nonblockpipe_pending_f()) { |
98 | gk20a_writel(g, ce2_intr_status_r(), | 98 | gk20a_writel(g, ce2_intr_status_r(), |
@@ -340,7 +340,7 @@ int gk20a_init_ce_support(struct gk20a *g) | |||
340 | return 0; | 340 | return 0; |
341 | } | 341 | } |
342 | 342 | ||
343 | gk20a_dbg(gpu_dbg_fn, "ce: init"); | 343 | nvgpu_log(g, gpu_dbg_fn, "ce: init"); |
344 | 344 | ||
345 | err = nvgpu_mutex_init(&ce_app->app_mutex); | 345 | err = nvgpu_mutex_init(&ce_app->app_mutex); |
346 | if (err) | 346 | if (err) |
@@ -355,7 +355,7 @@ int gk20a_init_ce_support(struct gk20a *g) | |||
355 | ce_app->app_state = NVGPU_CE_ACTIVE; | 355 | ce_app->app_state = NVGPU_CE_ACTIVE; |
356 | 356 | ||
357 | nvgpu_mutex_release(&ce_app->app_mutex); | 357 | nvgpu_mutex_release(&ce_app->app_mutex); |
358 | gk20a_dbg(gpu_dbg_cde_ctx, "ce: init finished"); | 358 | nvgpu_log(g, gpu_dbg_cde_ctx, "ce: init finished"); |
359 | 359 | ||
360 | return 0; | 360 | return 0; |
361 | } | 361 | } |
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c index e65ed278..21abdf9a 100644 --- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c | |||
@@ -116,7 +116,7 @@ int channel_gk20a_commit_va(struct channel_gk20a *c) | |||
116 | { | 116 | { |
117 | struct gk20a *g = c->g; | 117 | struct gk20a *g = c->g; |
118 | 118 | ||
119 | gk20a_dbg_fn(""); | 119 | nvgpu_log_fn(g, " "); |
120 | 120 | ||
121 | g->ops.mm.init_inst_block(&c->inst_block, c->vm, | 121 | g->ops.mm.init_inst_block(&c->inst_block, c->vm, |
122 | c->vm->gmmu_page_sizes[gmmu_page_size_big]); | 122 | c->vm->gmmu_page_sizes[gmmu_page_size_big]); |
@@ -208,7 +208,7 @@ void gk20a_channel_abort_clean_up(struct channel_gk20a *ch) | |||
208 | 208 | ||
209 | void gk20a_channel_abort(struct channel_gk20a *ch, bool channel_preempt) | 209 | void gk20a_channel_abort(struct channel_gk20a *ch, bool channel_preempt) |
210 | { | 210 | { |
211 | gk20a_dbg_fn(""); | 211 | nvgpu_log_fn(ch->g, " "); |
212 | 212 | ||
213 | if (gk20a_is_channel_marked_as_tsg(ch)) | 213 | if (gk20a_is_channel_marked_as_tsg(ch)) |
214 | return gk20a_fifo_abort_tsg(ch->g, ch->tsgid, channel_preempt); | 214 | return gk20a_fifo_abort_tsg(ch->g, ch->tsgid, channel_preempt); |
@@ -291,7 +291,7 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force) | |||
291 | struct dbg_session_channel_data *ch_data, *tmp; | 291 | struct dbg_session_channel_data *ch_data, *tmp; |
292 | int err; | 292 | int err; |
293 | 293 | ||
294 | gk20a_dbg_fn(""); | 294 | nvgpu_log_fn(g, " "); |
295 | 295 | ||
296 | WARN_ON(ch->g == NULL); | 296 | WARN_ON(ch->g == NULL); |
297 | 297 | ||
@@ -351,7 +351,7 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force) | |||
351 | /* if engine reset was deferred, perform it now */ | 351 | /* if engine reset was deferred, perform it now */ |
352 | nvgpu_mutex_acquire(&f->deferred_reset_mutex); | 352 | nvgpu_mutex_acquire(&f->deferred_reset_mutex); |
353 | if (g->fifo.deferred_reset_pending) { | 353 | if (g->fifo.deferred_reset_pending) { |
354 | gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "engine reset was" | 354 | nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "engine reset was" |
355 | " deferred, running now"); | 355 | " deferred, running now"); |
356 | /* if lock is already taken, a reset is taking place | 356 | /* if lock is already taken, a reset is taking place |
357 | so no need to repeat */ | 357 | so no need to repeat */ |
@@ -365,7 +365,7 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force) | |||
365 | if (!gk20a_channel_as_bound(ch)) | 365 | if (!gk20a_channel_as_bound(ch)) |
366 | goto unbind; | 366 | goto unbind; |
367 | 367 | ||
368 | gk20a_dbg_info("freeing bound channel context, timeout=%ld", | 368 | nvgpu_log_info(g, "freeing bound channel context, timeout=%ld", |
369 | timeout); | 369 | timeout); |
370 | 370 | ||
371 | #ifdef CONFIG_GK20A_CTXSW_TRACE | 371 | #ifdef CONFIG_GK20A_CTXSW_TRACE |
@@ -626,7 +626,7 @@ struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g, | |||
626 | runlist_id = gk20a_fifo_get_gr_runlist_id(g); | 626 | runlist_id = gk20a_fifo_get_gr_runlist_id(g); |
627 | } | 627 | } |
628 | 628 | ||
629 | gk20a_dbg_fn(""); | 629 | nvgpu_log_fn(g, " "); |
630 | 630 | ||
631 | ch = allocate_channel(f); | 631 | ch = allocate_channel(f); |
632 | if (ch == NULL) { | 632 | if (ch == NULL) { |
@@ -765,7 +765,7 @@ int gk20a_channel_alloc_priv_cmdbuf(struct channel_gk20a *c, u32 orig_size, | |||
765 | u32 free_count; | 765 | u32 free_count; |
766 | u32 size = orig_size; | 766 | u32 size = orig_size; |
767 | 767 | ||
768 | gk20a_dbg_fn("size %d", orig_size); | 768 | nvgpu_log_fn(c->g, "size %d", orig_size); |
769 | 769 | ||
770 | if (!e) { | 770 | if (!e) { |
771 | nvgpu_err(c->g, | 771 | nvgpu_err(c->g, |
@@ -779,7 +779,7 @@ int gk20a_channel_alloc_priv_cmdbuf(struct channel_gk20a *c, u32 orig_size, | |||
779 | if (q->put + size > q->size) | 779 | if (q->put + size > q->size) |
780 | size = orig_size + (q->size - q->put); | 780 | size = orig_size + (q->size - q->put); |
781 | 781 | ||
782 | gk20a_dbg_info("ch %d: priv cmd queue get:put %d:%d", | 782 | nvgpu_log_info(c->g, "ch %d: priv cmd queue get:put %d:%d", |
783 | c->chid, q->get, q->put); | 783 | c->chid, q->get, q->put); |
784 | 784 | ||
785 | free_count = (q->size - (q->put - q->get) - 1) % q->size; | 785 | free_count = (q->size - (q->put - q->get) - 1) % q->size; |
@@ -812,7 +812,7 @@ int gk20a_channel_alloc_priv_cmdbuf(struct channel_gk20a *c, u32 orig_size, | |||
812 | nvgpu_smp_wmb(); | 812 | nvgpu_smp_wmb(); |
813 | 813 | ||
814 | e->valid = true; | 814 | e->valid = true; |
815 | gk20a_dbg_fn("done"); | 815 | nvgpu_log_fn(c->g, "done"); |
816 | 816 | ||
817 | return 0; | 817 | return 0; |
818 | } | 818 | } |
@@ -1132,7 +1132,7 @@ int gk20a_channel_alloc_gpfifo(struct channel_gk20a *c, | |||
1132 | c->gpfifo.entry_num = gpfifo_size; | 1132 | c->gpfifo.entry_num = gpfifo_size; |
1133 | c->gpfifo.get = c->gpfifo.put = 0; | 1133 | c->gpfifo.get = c->gpfifo.put = 0; |
1134 | 1134 | ||
1135 | gk20a_dbg_info("channel %d : gpfifo_base 0x%016llx, size %d", | 1135 | nvgpu_log_info(g, "channel %d : gpfifo_base 0x%016llx, size %d", |
1136 | c->chid, c->gpfifo.mem.gpu_va, c->gpfifo.entry_num); | 1136 | c->chid, c->gpfifo.mem.gpu_va, c->gpfifo.entry_num); |
1137 | 1137 | ||
1138 | g->ops.fifo.setup_userd(c); | 1138 | g->ops.fifo.setup_userd(c); |
@@ -1184,7 +1184,7 @@ int gk20a_channel_alloc_gpfifo(struct channel_gk20a *c, | |||
1184 | 1184 | ||
1185 | g->ops.fifo.bind_channel(c); | 1185 | g->ops.fifo.bind_channel(c); |
1186 | 1186 | ||
1187 | gk20a_dbg_fn("done"); | 1187 | nvgpu_log_fn(g, "done"); |
1188 | return 0; | 1188 | return 0; |
1189 | 1189 | ||
1190 | clean_up_priv_cmd: | 1190 | clean_up_priv_cmd: |
@@ -1400,7 +1400,7 @@ static void gk20a_channel_timeout_handler(struct channel_gk20a *ch) | |||
1400 | u64 pb_get; | 1400 | u64 pb_get; |
1401 | u64 new_pb_get; | 1401 | u64 new_pb_get; |
1402 | 1402 | ||
1403 | gk20a_dbg_fn(""); | 1403 | nvgpu_log_fn(g, " "); |
1404 | 1404 | ||
1405 | /* Get status and clear the timer */ | 1405 | /* Get status and clear the timer */ |
1406 | nvgpu_raw_spinlock_acquire(&ch->timeout.lock); | 1406 | nvgpu_raw_spinlock_acquire(&ch->timeout.lock); |
@@ -1480,7 +1480,7 @@ static void gk20a_channel_poll_timeouts(struct gk20a *g) | |||
1480 | */ | 1480 | */ |
1481 | static void gk20a_channel_worker_process_ch(struct channel_gk20a *ch) | 1481 | static void gk20a_channel_worker_process_ch(struct channel_gk20a *ch) |
1482 | { | 1482 | { |
1483 | gk20a_dbg_fn(""); | 1483 | nvgpu_log_fn(ch->g, " "); |
1484 | 1484 | ||
1485 | gk20a_channel_clean_up_jobs(ch, true); | 1485 | gk20a_channel_clean_up_jobs(ch, true); |
1486 | 1486 | ||
@@ -1499,7 +1499,7 @@ static int __gk20a_channel_worker_wakeup(struct gk20a *g) | |||
1499 | { | 1499 | { |
1500 | int put; | 1500 | int put; |
1501 | 1501 | ||
1502 | gk20a_dbg_fn(""); | 1502 | nvgpu_log_fn(g, " "); |
1503 | 1503 | ||
1504 | /* | 1504 | /* |
1505 | * Currently, the only work type is associated with a lock, which deals | 1505 | * Currently, the only work type is associated with a lock, which deals |
@@ -1596,7 +1596,7 @@ static int gk20a_channel_poll_worker(void *arg) | |||
1596 | struct nvgpu_timeout timeout; | 1596 | struct nvgpu_timeout timeout; |
1597 | int get = 0; | 1597 | int get = 0; |
1598 | 1598 | ||
1599 | gk20a_dbg_fn(""); | 1599 | nvgpu_log_fn(g, " "); |
1600 | 1600 | ||
1601 | nvgpu_timeout_init(g, &timeout, watchdog_interval, | 1601 | nvgpu_timeout_init(g, &timeout, watchdog_interval, |
1602 | NVGPU_TIMER_CPU_TIMER); | 1602 | NVGPU_TIMER_CPU_TIMER); |
@@ -1699,7 +1699,7 @@ static void gk20a_channel_worker_enqueue(struct channel_gk20a *ch) | |||
1699 | { | 1699 | { |
1700 | struct gk20a *g = ch->g; | 1700 | struct gk20a *g = ch->g; |
1701 | 1701 | ||
1702 | gk20a_dbg_fn(""); | 1702 | nvgpu_log_fn(g, " "); |
1703 | 1703 | ||
1704 | /* | 1704 | /* |
1705 | * Warn if worker thread cannot run | 1705 | * Warn if worker thread cannot run |
@@ -2142,12 +2142,12 @@ int gk20a_channel_suspend(struct gk20a *g) | |||
2142 | bool channels_in_use = false; | 2142 | bool channels_in_use = false; |
2143 | u32 active_runlist_ids = 0; | 2143 | u32 active_runlist_ids = 0; |
2144 | 2144 | ||
2145 | gk20a_dbg_fn(""); | 2145 | nvgpu_log_fn(g, " "); |
2146 | 2146 | ||
2147 | for (chid = 0; chid < f->num_channels; chid++) { | 2147 | for (chid = 0; chid < f->num_channels; chid++) { |
2148 | struct channel_gk20a *ch = &f->channel[chid]; | 2148 | struct channel_gk20a *ch = &f->channel[chid]; |
2149 | if (gk20a_channel_get(ch)) { | 2149 | if (gk20a_channel_get(ch)) { |
2150 | gk20a_dbg_info("suspend channel %d", chid); | 2150 | nvgpu_log_info(g, "suspend channel %d", chid); |
2151 | /* disable channel */ | 2151 | /* disable channel */ |
2152 | gk20a_disable_channel_tsg(g, ch); | 2152 | gk20a_disable_channel_tsg(g, ch); |
2153 | /* preempt the channel */ | 2153 | /* preempt the channel */ |
@@ -2175,7 +2175,7 @@ int gk20a_channel_suspend(struct gk20a *g) | |||
2175 | } | 2175 | } |
2176 | } | 2176 | } |
2177 | 2177 | ||
2178 | gk20a_dbg_fn("done"); | 2178 | nvgpu_log_fn(g, "done"); |
2179 | return 0; | 2179 | return 0; |
2180 | } | 2180 | } |
2181 | 2181 | ||
@@ -2186,11 +2186,11 @@ int gk20a_channel_resume(struct gk20a *g) | |||
2186 | bool channels_in_use = false; | 2186 | bool channels_in_use = false; |
2187 | u32 active_runlist_ids = 0; | 2187 | u32 active_runlist_ids = 0; |
2188 | 2188 | ||
2189 | gk20a_dbg_fn(""); | 2189 | nvgpu_log_fn(g, " "); |
2190 | 2190 | ||
2191 | for (chid = 0; chid < f->num_channels; chid++) { | 2191 | for (chid = 0; chid < f->num_channels; chid++) { |
2192 | if (gk20a_channel_get(&f->channel[chid])) { | 2192 | if (gk20a_channel_get(&f->channel[chid])) { |
2193 | gk20a_dbg_info("resume channel %d", chid); | 2193 | nvgpu_log_info(g, "resume channel %d", chid); |
2194 | g->ops.fifo.bind_channel(&f->channel[chid]); | 2194 | g->ops.fifo.bind_channel(&f->channel[chid]); |
2195 | channels_in_use = true; | 2195 | channels_in_use = true; |
2196 | active_runlist_ids |= BIT(f->channel[chid].runlist_id); | 2196 | active_runlist_ids |= BIT(f->channel[chid].runlist_id); |
@@ -2201,7 +2201,7 @@ int gk20a_channel_resume(struct gk20a *g) | |||
2201 | if (channels_in_use) | 2201 | if (channels_in_use) |
2202 | gk20a_fifo_update_runlist_ids(g, active_runlist_ids, ~0, true, true); | 2202 | gk20a_fifo_update_runlist_ids(g, active_runlist_ids, ~0, true, true); |
2203 | 2203 | ||
2204 | gk20a_dbg_fn("done"); | 2204 | nvgpu_log_fn(g, "done"); |
2205 | return 0; | 2205 | return 0; |
2206 | } | 2206 | } |
2207 | 2207 | ||
@@ -2210,7 +2210,7 @@ void gk20a_channel_semaphore_wakeup(struct gk20a *g, bool post_events) | |||
2210 | struct fifo_gk20a *f = &g->fifo; | 2210 | struct fifo_gk20a *f = &g->fifo; |
2211 | u32 chid; | 2211 | u32 chid; |
2212 | 2212 | ||
2213 | gk20a_dbg_fn(""); | 2213 | nvgpu_log_fn(g, " "); |
2214 | 2214 | ||
2215 | /* | 2215 | /* |
2216 | * Ensure that all pending writes are actually done before trying to | 2216 | * Ensure that all pending writes are actually done before trying to |
diff --git a/drivers/gpu/nvgpu/gk20a/css_gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/css_gr_gk20a.c index 114386a2..0fc39bf4 100644 --- a/drivers/gpu/nvgpu/gk20a/css_gr_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/css_gr_gk20a.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * GK20A Cycle stats snapshots support (subsystem for gr_gk20a). | 2 | * GK20A Cycle stats snapshots support (subsystem for gr_gk20a). |
3 | * | 3 | * |
4 | * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved. | 4 | * Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved. |
5 | * | 5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | 6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the "Software"), | 7 | * copy of this software and associated documentation files (the "Software"), |
@@ -189,7 +189,7 @@ int css_hw_enable_snapshot(struct channel_gk20a *ch, | |||
189 | perf_pmasys_mem_block_valid_true_f() | | 189 | perf_pmasys_mem_block_valid_true_f() | |
190 | perf_pmasys_mem_block_target_lfb_f()); | 190 | perf_pmasys_mem_block_target_lfb_f()); |
191 | 191 | ||
192 | gk20a_dbg_info("cyclestats: buffer for hardware snapshots enabled\n"); | 192 | nvgpu_log_info(g, "cyclestats: buffer for hardware snapshots enabled\n"); |
193 | 193 | ||
194 | return 0; | 194 | return 0; |
195 | 195 | ||
@@ -227,7 +227,7 @@ void css_hw_disable_snapshot(struct gr_gk20a *gr) | |||
227 | memset(&data->hw_memdesc, 0, sizeof(data->hw_memdesc)); | 227 | memset(&data->hw_memdesc, 0, sizeof(data->hw_memdesc)); |
228 | data->hw_snapshot = NULL; | 228 | data->hw_snapshot = NULL; |
229 | 229 | ||
230 | gk20a_dbg_info("cyclestats: buffer for hardware snapshots disabled\n"); | 230 | nvgpu_log_info(g, "cyclestats: buffer for hardware snapshots disabled\n"); |
231 | } | 231 | } |
232 | 232 | ||
233 | static void css_gr_free_shared_data(struct gr_gk20a *gr) | 233 | static void css_gr_free_shared_data(struct gr_gk20a *gr) |
diff --git a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c index ce06e78b..97de7138 100644 --- a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c | |||
@@ -90,8 +90,9 @@ void gk20a_dbg_gpu_post_events(struct channel_gk20a *ch) | |||
90 | { | 90 | { |
91 | struct dbg_session_data *session_data; | 91 | struct dbg_session_data *session_data; |
92 | struct dbg_session_gk20a *dbg_s; | 92 | struct dbg_session_gk20a *dbg_s; |
93 | struct gk20a *g = ch->g; | ||
93 | 94 | ||
94 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); | 95 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " "); |
95 | 96 | ||
96 | /* guard against the session list being modified */ | 97 | /* guard against the session list being modified */ |
97 | nvgpu_mutex_acquire(&ch->dbg_s_lock); | 98 | nvgpu_mutex_acquire(&ch->dbg_s_lock); |
@@ -100,9 +101,9 @@ void gk20a_dbg_gpu_post_events(struct channel_gk20a *ch) | |||
100 | dbg_session_data, dbg_s_entry) { | 101 | dbg_session_data, dbg_s_entry) { |
101 | dbg_s = session_data->dbg_s; | 102 | dbg_s = session_data->dbg_s; |
102 | if (dbg_s->dbg_events.events_enabled) { | 103 | if (dbg_s->dbg_events.events_enabled) { |
103 | gk20a_dbg(gpu_dbg_gpu_dbg, "posting event on session id %d", | 104 | nvgpu_log(g, gpu_dbg_gpu_dbg, "posting event on session id %d", |
104 | dbg_s->id); | 105 | dbg_s->id); |
105 | gk20a_dbg(gpu_dbg_gpu_dbg, "%d events pending", | 106 | nvgpu_log(g, gpu_dbg_gpu_dbg, "%d events pending", |
106 | dbg_s->dbg_events.num_pending_events); | 107 | dbg_s->dbg_events.num_pending_events); |
107 | 108 | ||
108 | dbg_s->dbg_events.num_pending_events++; | 109 | dbg_s->dbg_events.num_pending_events++; |
@@ -119,8 +120,9 @@ bool gk20a_dbg_gpu_broadcast_stop_trigger(struct channel_gk20a *ch) | |||
119 | struct dbg_session_data *session_data; | 120 | struct dbg_session_data *session_data; |
120 | struct dbg_session_gk20a *dbg_s; | 121 | struct dbg_session_gk20a *dbg_s; |
121 | bool broadcast = false; | 122 | bool broadcast = false; |
123 | struct gk20a *g = ch->g; | ||
122 | 124 | ||
123 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, ""); | 125 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, " "); |
124 | 126 | ||
125 | /* guard against the session list being modified */ | 127 | /* guard against the session list being modified */ |
126 | nvgpu_mutex_acquire(&ch->dbg_s_lock); | 128 | nvgpu_mutex_acquire(&ch->dbg_s_lock); |
@@ -129,7 +131,7 @@ bool gk20a_dbg_gpu_broadcast_stop_trigger(struct channel_gk20a *ch) | |||
129 | dbg_session_data, dbg_s_entry) { | 131 | dbg_session_data, dbg_s_entry) { |
130 | dbg_s = session_data->dbg_s; | 132 | dbg_s = session_data->dbg_s; |
131 | if (dbg_s->broadcast_stop_trigger) { | 133 | if (dbg_s->broadcast_stop_trigger) { |
132 | gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn | gpu_dbg_intr, | 134 | nvgpu_log(g, gpu_dbg_gpu_dbg | gpu_dbg_fn | gpu_dbg_intr, |
133 | "stop trigger broadcast enabled"); | 135 | "stop trigger broadcast enabled"); |
134 | broadcast = true; | 136 | broadcast = true; |
135 | break; | 137 | break; |
@@ -145,8 +147,9 @@ int gk20a_dbg_gpu_clear_broadcast_stop_trigger(struct channel_gk20a *ch) | |||
145 | { | 147 | { |
146 | struct dbg_session_data *session_data; | 148 | struct dbg_session_data *session_data; |
147 | struct dbg_session_gk20a *dbg_s; | 149 | struct dbg_session_gk20a *dbg_s; |
150 | struct gk20a *g = ch->g; | ||
148 | 151 | ||
149 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, ""); | 152 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, " "); |
150 | 153 | ||
151 | /* guard against the session list being modified */ | 154 | /* guard against the session list being modified */ |
152 | nvgpu_mutex_acquire(&ch->dbg_s_lock); | 155 | nvgpu_mutex_acquire(&ch->dbg_s_lock); |
@@ -155,7 +158,7 @@ int gk20a_dbg_gpu_clear_broadcast_stop_trigger(struct channel_gk20a *ch) | |||
155 | dbg_session_data, dbg_s_entry) { | 158 | dbg_session_data, dbg_s_entry) { |
156 | dbg_s = session_data->dbg_s; | 159 | dbg_s = session_data->dbg_s; |
157 | if (dbg_s->broadcast_stop_trigger) { | 160 | if (dbg_s->broadcast_stop_trigger) { |
158 | gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn | gpu_dbg_intr, | 161 | nvgpu_log(g, gpu_dbg_gpu_dbg | gpu_dbg_fn | gpu_dbg_intr, |
159 | "stop trigger broadcast disabled"); | 162 | "stop trigger broadcast disabled"); |
160 | dbg_s->broadcast_stop_trigger = false; | 163 | dbg_s->broadcast_stop_trigger = false; |
161 | } | 164 | } |
diff --git a/drivers/gpu/nvgpu/gk20a/fb_gk20a.c b/drivers/gpu/nvgpu/gk20a/fb_gk20a.c index e3052701..c4be3313 100644 --- a/drivers/gpu/nvgpu/gk20a/fb_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/fb_gk20a.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * GK20A memory interface | 2 | * GK20A memory interface |
3 | * | 3 | * |
4 | * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. | 4 | * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved. |
5 | * | 5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | 6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the "Software"), | 7 | * copy of this software and associated documentation files (the "Software"), |
@@ -36,7 +36,7 @@ void fb_gk20a_reset(struct gk20a *g) | |||
36 | { | 36 | { |
37 | u32 val; | 37 | u32 val; |
38 | 38 | ||
39 | gk20a_dbg_info("reset gk20a fb"); | 39 | nvgpu_log_info(g, "reset gk20a fb"); |
40 | 40 | ||
41 | g->ops.mc.reset(g, mc_enable_pfb_enabled_f() | | 41 | g->ops.mc.reset(g, mc_enable_pfb_enabled_f() | |
42 | mc_enable_l2_enabled_f() | | 42 | mc_enable_l2_enabled_f() | |
@@ -63,7 +63,7 @@ void gk20a_fb_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb) | |||
63 | u32 addr_lo; | 63 | u32 addr_lo; |
64 | u32 data; | 64 | u32 data; |
65 | 65 | ||
66 | gk20a_dbg_fn(""); | 66 | nvgpu_log_fn(g, " "); |
67 | 67 | ||
68 | /* pagetables are considered sw states which are preserved after | 68 | /* pagetables are considered sw states which are preserved after |
69 | prepare_poweroff. When gk20a deinit releases those pagetables, | 69 | prepare_poweroff. When gk20a deinit releases those pagetables, |
diff --git a/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c b/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c index 4fda0d2e..c9d7ea06 100644 --- a/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. | 2 | * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. |
3 | * | 3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), | 5 | * copy of this software and associated documentation files (the "Software"), |
@@ -137,7 +137,7 @@ static int gk20a_fecs_trace_get_write_index(struct gk20a *g) | |||
137 | 137 | ||
138 | static int gk20a_fecs_trace_set_read_index(struct gk20a *g, int index) | 138 | static int gk20a_fecs_trace_set_read_index(struct gk20a *g, int index) |
139 | { | 139 | { |
140 | gk20a_dbg(gpu_dbg_ctxsw, "set read=%d", index); | 140 | nvgpu_log(g, gpu_dbg_ctxsw, "set read=%d", index); |
141 | return gr_gk20a_elpg_protected_call(g, | 141 | return gr_gk20a_elpg_protected_call(g, |
142 | (gk20a_writel(g, gr_fecs_mailbox1_r(), index), 0)); | 142 | (gk20a_writel(g, gr_fecs_mailbox1_r(), index), 0)); |
143 | } | 143 | } |
@@ -148,12 +148,12 @@ void gk20a_fecs_trace_hash_dump(struct gk20a *g) | |||
148 | struct gk20a_fecs_trace_hash_ent *ent; | 148 | struct gk20a_fecs_trace_hash_ent *ent; |
149 | struct gk20a_fecs_trace *trace = g->fecs_trace; | 149 | struct gk20a_fecs_trace *trace = g->fecs_trace; |
150 | 150 | ||
151 | gk20a_dbg(gpu_dbg_ctxsw, "dumping hash table"); | 151 | nvgpu_log(g, gpu_dbg_ctxsw, "dumping hash table"); |
152 | 152 | ||
153 | nvgpu_mutex_acquire(&trace->hash_lock); | 153 | nvgpu_mutex_acquire(&trace->hash_lock); |
154 | hash_for_each(trace->pid_hash_table, bkt, ent, node) | 154 | hash_for_each(trace->pid_hash_table, bkt, ent, node) |
155 | { | 155 | { |
156 | gk20a_dbg(gpu_dbg_ctxsw, " ent=%p bkt=%x context_ptr=%x pid=%d", | 156 | nvgpu_log(g, gpu_dbg_ctxsw, " ent=%p bkt=%x context_ptr=%x pid=%d", |
157 | ent, bkt, ent->context_ptr, ent->pid); | 157 | ent, bkt, ent->context_ptr, ent->pid); |
158 | 158 | ||
159 | } | 159 | } |
@@ -165,7 +165,7 @@ static int gk20a_fecs_trace_hash_add(struct gk20a *g, u32 context_ptr, pid_t pid | |||
165 | struct gk20a_fecs_trace_hash_ent *he; | 165 | struct gk20a_fecs_trace_hash_ent *he; |
166 | struct gk20a_fecs_trace *trace = g->fecs_trace; | 166 | struct gk20a_fecs_trace *trace = g->fecs_trace; |
167 | 167 | ||
168 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_ctxsw, | 168 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_ctxsw, |
169 | "adding hash entry context_ptr=%x -> pid=%d", context_ptr, pid); | 169 | "adding hash entry context_ptr=%x -> pid=%d", context_ptr, pid); |
170 | 170 | ||
171 | he = nvgpu_kzalloc(g, sizeof(*he)); | 171 | he = nvgpu_kzalloc(g, sizeof(*he)); |
@@ -190,7 +190,7 @@ static void gk20a_fecs_trace_hash_del(struct gk20a *g, u32 context_ptr) | |||
190 | struct gk20a_fecs_trace_hash_ent *ent; | 190 | struct gk20a_fecs_trace_hash_ent *ent; |
191 | struct gk20a_fecs_trace *trace = g->fecs_trace; | 191 | struct gk20a_fecs_trace *trace = g->fecs_trace; |
192 | 192 | ||
193 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_ctxsw, | 193 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_ctxsw, |
194 | "freeing hash entry context_ptr=%x", context_ptr); | 194 | "freeing hash entry context_ptr=%x", context_ptr); |
195 | 195 | ||
196 | nvgpu_mutex_acquire(&trace->hash_lock); | 196 | nvgpu_mutex_acquire(&trace->hash_lock); |
@@ -198,7 +198,7 @@ static void gk20a_fecs_trace_hash_del(struct gk20a *g, u32 context_ptr) | |||
198 | context_ptr) { | 198 | context_ptr) { |
199 | if (ent->context_ptr == context_ptr) { | 199 | if (ent->context_ptr == context_ptr) { |
200 | hash_del(&ent->node); | 200 | hash_del(&ent->node); |
201 | gk20a_dbg(gpu_dbg_ctxsw, | 201 | nvgpu_log(g, gpu_dbg_ctxsw, |
202 | "freed hash entry=%p context_ptr=%x", ent, | 202 | "freed hash entry=%p context_ptr=%x", ent, |
203 | ent->context_ptr); | 203 | ent->context_ptr); |
204 | nvgpu_kfree(g, ent); | 204 | nvgpu_kfree(g, ent); |
@@ -215,7 +215,7 @@ static void gk20a_fecs_trace_free_hash_table(struct gk20a *g) | |||
215 | struct gk20a_fecs_trace_hash_ent *ent; | 215 | struct gk20a_fecs_trace_hash_ent *ent; |
216 | struct gk20a_fecs_trace *trace = g->fecs_trace; | 216 | struct gk20a_fecs_trace *trace = g->fecs_trace; |
217 | 217 | ||
218 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_ctxsw, "trace=%p", trace); | 218 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_ctxsw, "trace=%p", trace); |
219 | 219 | ||
220 | nvgpu_mutex_acquire(&trace->hash_lock); | 220 | nvgpu_mutex_acquire(&trace->hash_lock); |
221 | hash_for_each_safe(trace->pid_hash_table, bkt, tmp, ent, node) { | 221 | hash_for_each_safe(trace->pid_hash_table, bkt, tmp, ent, node) { |
@@ -235,7 +235,7 @@ static pid_t gk20a_fecs_trace_find_pid(struct gk20a *g, u32 context_ptr) | |||
235 | nvgpu_mutex_acquire(&trace->hash_lock); | 235 | nvgpu_mutex_acquire(&trace->hash_lock); |
236 | hash_for_each_possible(trace->pid_hash_table, ent, node, context_ptr) { | 236 | hash_for_each_possible(trace->pid_hash_table, ent, node, context_ptr) { |
237 | if (ent->context_ptr == context_ptr) { | 237 | if (ent->context_ptr == context_ptr) { |
238 | gk20a_dbg(gpu_dbg_ctxsw, | 238 | nvgpu_log(g, gpu_dbg_ctxsw, |
239 | "found context_ptr=%x -> pid=%d", | 239 | "found context_ptr=%x -> pid=%d", |
240 | ent->context_ptr, ent->pid); | 240 | ent->context_ptr, ent->pid); |
241 | pid = ent->pid; | 241 | pid = ent->pid; |
@@ -265,7 +265,7 @@ static int gk20a_fecs_trace_ring_read(struct gk20a *g, int index) | |||
265 | struct gk20a_fecs_trace_record *r = gk20a_fecs_trace_get_record( | 265 | struct gk20a_fecs_trace_record *r = gk20a_fecs_trace_get_record( |
266 | trace, index); | 266 | trace, index); |
267 | 267 | ||
268 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_ctxsw, | 268 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_ctxsw, |
269 | "consuming record trace=%p read=%d record=%p", trace, index, r); | 269 | "consuming record trace=%p read=%d record=%p", trace, index, r); |
270 | 270 | ||
271 | if (unlikely(!gk20a_fecs_trace_is_valid_record(r))) { | 271 | if (unlikely(!gk20a_fecs_trace_is_valid_record(r))) { |
@@ -284,7 +284,7 @@ static int gk20a_fecs_trace_ring_read(struct gk20a *g, int index) | |||
284 | cur_pid = gk20a_fecs_trace_find_pid(g, r->context_ptr); | 284 | cur_pid = gk20a_fecs_trace_find_pid(g, r->context_ptr); |
285 | new_pid = gk20a_fecs_trace_find_pid(g, r->new_context_ptr); | 285 | new_pid = gk20a_fecs_trace_find_pid(g, r->new_context_ptr); |
286 | 286 | ||
287 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_ctxsw, | 287 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_ctxsw, |
288 | "context_ptr=%x (pid=%d) new_context_ptr=%x (pid=%d)", | 288 | "context_ptr=%x (pid=%d) new_context_ptr=%x (pid=%d)", |
289 | r->context_ptr, cur_pid, r->new_context_ptr, new_pid); | 289 | r->context_ptr, cur_pid, r->new_context_ptr, new_pid); |
290 | 290 | ||
@@ -298,7 +298,7 @@ static int gk20a_fecs_trace_ring_read(struct gk20a *g, int index) | |||
298 | entry.timestamp = gk20a_fecs_trace_record_ts_timestamp_v(r->ts[i]); | 298 | entry.timestamp = gk20a_fecs_trace_record_ts_timestamp_v(r->ts[i]); |
299 | entry.timestamp <<= GK20A_FECS_TRACE_PTIMER_SHIFT; | 299 | entry.timestamp <<= GK20A_FECS_TRACE_PTIMER_SHIFT; |
300 | 300 | ||
301 | gk20a_dbg(gpu_dbg_ctxsw, | 301 | nvgpu_log(g, gpu_dbg_ctxsw, |
302 | "tag=%x timestamp=%llx context_id=%08x new_context_id=%08x", | 302 | "tag=%x timestamp=%llx context_id=%08x new_context_id=%08x", |
303 | entry.tag, entry.timestamp, r->context_id, | 303 | entry.tag, entry.timestamp, r->context_id, |
304 | r->new_context_id); | 304 | r->new_context_id); |
@@ -327,7 +327,7 @@ static int gk20a_fecs_trace_ring_read(struct gk20a *g, int index) | |||
327 | continue; | 327 | continue; |
328 | } | 328 | } |
329 | 329 | ||
330 | gk20a_dbg(gpu_dbg_ctxsw, "tag=%x context_id=%x pid=%lld", | 330 | nvgpu_log(g, gpu_dbg_ctxsw, "tag=%x context_id=%x pid=%lld", |
331 | entry.tag, entry.context_id, entry.pid); | 331 | entry.tag, entry.context_id, entry.pid); |
332 | 332 | ||
333 | if (!entry.context_id) | 333 | if (!entry.context_id) |
@@ -368,7 +368,7 @@ int gk20a_fecs_trace_poll(struct gk20a *g) | |||
368 | if (!cnt) | 368 | if (!cnt) |
369 | goto done; | 369 | goto done; |
370 | 370 | ||
371 | gk20a_dbg(gpu_dbg_ctxsw, | 371 | nvgpu_log(g, gpu_dbg_ctxsw, |
372 | "circular buffer: read=%d (mailbox=%d) write=%d cnt=%d", | 372 | "circular buffer: read=%d (mailbox=%d) write=%d cnt=%d", |
373 | read, gk20a_fecs_trace_get_read_index(g), write, cnt); | 373 | read, gk20a_fecs_trace_get_read_index(g), write, cnt); |
374 | 374 | ||
@@ -633,7 +633,7 @@ int gk20a_fecs_trace_bind_channel(struct gk20a *g, | |||
633 | pid_t pid; | 633 | pid_t pid; |
634 | u32 aperture; | 634 | u32 aperture; |
635 | 635 | ||
636 | gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, | 636 | nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, |
637 | "chid=%d context_ptr=%x inst_block=%llx", | 637 | "chid=%d context_ptr=%x inst_block=%llx", |
638 | ch->chid, context_ptr, | 638 | ch->chid, context_ptr, |
639 | nvgpu_inst_block_addr(g, &ch->inst_block)); | 639 | nvgpu_inst_block_addr(g, &ch->inst_block)); |
@@ -662,7 +662,7 @@ int gk20a_fecs_trace_bind_channel(struct gk20a *g, | |||
662 | lo = u64_lo32(pa); | 662 | lo = u64_lo32(pa); |
663 | hi = u64_hi32(pa); | 663 | hi = u64_hi32(pa); |
664 | 664 | ||
665 | gk20a_dbg(gpu_dbg_ctxsw, "addr_hi=%x addr_lo=%x count=%d", hi, | 665 | nvgpu_log(g, gpu_dbg_ctxsw, "addr_hi=%x addr_lo=%x count=%d", hi, |
666 | lo, GK20A_FECS_TRACE_NUM_RECORDS); | 666 | lo, GK20A_FECS_TRACE_NUM_RECORDS); |
667 | 667 | ||
668 | nvgpu_mem_wr(g, mem, | 668 | nvgpu_mem_wr(g, mem, |
@@ -696,7 +696,7 @@ int gk20a_fecs_trace_unbind_channel(struct gk20a *g, struct channel_gk20a *ch) | |||
696 | u32 context_ptr = gk20a_fecs_trace_fecs_context_ptr(g, ch); | 696 | u32 context_ptr = gk20a_fecs_trace_fecs_context_ptr(g, ch); |
697 | 697 | ||
698 | if (g->fecs_trace) { | 698 | if (g->fecs_trace) { |
699 | gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, | 699 | nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, |
700 | "ch=%p context_ptr=%x", ch, context_ptr); | 700 | "ch=%p context_ptr=%x", ch, context_ptr); |
701 | 701 | ||
702 | if (g->ops.fecs_trace.is_enabled(g)) { | 702 | if (g->ops.fecs_trace.is_enabled(g)) { |
@@ -711,7 +711,7 @@ int gk20a_fecs_trace_unbind_channel(struct gk20a *g, struct channel_gk20a *ch) | |||
711 | 711 | ||
712 | int gk20a_fecs_trace_reset(struct gk20a *g) | 712 | int gk20a_fecs_trace_reset(struct gk20a *g) |
713 | { | 713 | { |
714 | gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, ""); | 714 | nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, " "); |
715 | 715 | ||
716 | if (!g->ops.fecs_trace.is_enabled(g)) | 716 | if (!g->ops.fecs_trace.is_enabled(g)) |
717 | return 0; | 717 | return 0; |
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c index 75d66968..cc63c3b8 100644 --- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c | |||
@@ -94,7 +94,7 @@ u32 gk20a_fifo_get_engine_ids(struct gk20a *g, | |||
94 | engine_id[instance_cnt] = active_engine_id; | 94 | engine_id[instance_cnt] = active_engine_id; |
95 | ++instance_cnt; | 95 | ++instance_cnt; |
96 | } else { | 96 | } else { |
97 | gk20a_dbg_info("warning engine_id table sz is small %d", | 97 | nvgpu_log_info(g, "warning engine_id table sz is small %d", |
98 | engine_id_sz); | 98 | engine_id_sz); |
99 | } | 99 | } |
100 | } | 100 | } |
@@ -320,7 +320,7 @@ int gk20a_fifo_engine_enum_from_type(struct gk20a *g, u32 engine_type, | |||
320 | { | 320 | { |
321 | int ret = ENGINE_INVAL_GK20A; | 321 | int ret = ENGINE_INVAL_GK20A; |
322 | 322 | ||
323 | gk20a_dbg_info("engine type %d", engine_type); | 323 | nvgpu_log_info(g, "engine type %d", engine_type); |
324 | if (engine_type == top_device_info_type_enum_graphics_v()) | 324 | if (engine_type == top_device_info_type_enum_graphics_v()) |
325 | ret = ENGINE_GR_GK20A; | 325 | ret = ENGINE_GR_GK20A; |
326 | else if ((engine_type >= top_device_info_type_enum_copy0_v()) && | 326 | else if ((engine_type >= top_device_info_type_enum_copy0_v()) && |
@@ -354,7 +354,7 @@ int gk20a_fifo_init_engine_info(struct fifo_gk20a *f) | |||
354 | u32 gr_runlist_id = ~0; | 354 | u32 gr_runlist_id = ~0; |
355 | bool found_pbdma_for_runlist = false; | 355 | bool found_pbdma_for_runlist = false; |
356 | 356 | ||
357 | gk20a_dbg_fn(""); | 357 | nvgpu_log_fn(g, " "); |
358 | 358 | ||
359 | f->num_engines = 0; | 359 | f->num_engines = 0; |
360 | 360 | ||
@@ -367,7 +367,7 @@ int gk20a_fifo_init_engine_info(struct fifo_gk20a *f) | |||
367 | if (top_device_info_engine_v(table_entry)) { | 367 | if (top_device_info_engine_v(table_entry)) { |
368 | engine_id = | 368 | engine_id = |
369 | top_device_info_engine_enum_v(table_entry); | 369 | top_device_info_engine_enum_v(table_entry); |
370 | gk20a_dbg_info("info: engine_id %d", | 370 | nvgpu_log_info(g, "info: engine_id %d", |
371 | top_device_info_engine_enum_v(table_entry)); | 371 | top_device_info_engine_enum_v(table_entry)); |
372 | } | 372 | } |
373 | 373 | ||
@@ -375,7 +375,7 @@ int gk20a_fifo_init_engine_info(struct fifo_gk20a *f) | |||
375 | if (top_device_info_runlist_v(table_entry)) { | 375 | if (top_device_info_runlist_v(table_entry)) { |
376 | runlist_id = | 376 | runlist_id = |
377 | top_device_info_runlist_enum_v(table_entry); | 377 | top_device_info_runlist_enum_v(table_entry); |
378 | gk20a_dbg_info("gr info: runlist_id %d", runlist_id); | 378 | nvgpu_log_info(g, "gr info: runlist_id %d", runlist_id); |
379 | 379 | ||
380 | runlist_bit = BIT(runlist_id); | 380 | runlist_bit = BIT(runlist_id); |
381 | 381 | ||
@@ -384,7 +384,7 @@ int gk20a_fifo_init_engine_info(struct fifo_gk20a *f) | |||
384 | pbdma_id++) { | 384 | pbdma_id++) { |
385 | if (f->pbdma_map[pbdma_id] & | 385 | if (f->pbdma_map[pbdma_id] & |
386 | runlist_bit) { | 386 | runlist_bit) { |
387 | gk20a_dbg_info( | 387 | nvgpu_log_info(g, |
388 | "gr info: pbdma_map[%d]=%d", | 388 | "gr info: pbdma_map[%d]=%d", |
389 | pbdma_id, | 389 | pbdma_id, |
390 | f->pbdma_map[pbdma_id]); | 390 | f->pbdma_map[pbdma_id]); |
@@ -402,13 +402,13 @@ int gk20a_fifo_init_engine_info(struct fifo_gk20a *f) | |||
402 | if (top_device_info_intr_v(table_entry)) { | 402 | if (top_device_info_intr_v(table_entry)) { |
403 | intr_id = | 403 | intr_id = |
404 | top_device_info_intr_enum_v(table_entry); | 404 | top_device_info_intr_enum_v(table_entry); |
405 | gk20a_dbg_info("gr info: intr_id %d", intr_id); | 405 | nvgpu_log_info(g, "gr info: intr_id %d", intr_id); |
406 | } | 406 | } |
407 | 407 | ||
408 | if (top_device_info_reset_v(table_entry)) { | 408 | if (top_device_info_reset_v(table_entry)) { |
409 | reset_id = | 409 | reset_id = |
410 | top_device_info_reset_enum_v(table_entry); | 410 | top_device_info_reset_enum_v(table_entry); |
411 | gk20a_dbg_info("gr info: reset_id %d", | 411 | nvgpu_log_info(g, "gr info: reset_id %d", |
412 | reset_id); | 412 | reset_id); |
413 | } | 413 | } |
414 | } else if (entry == top_device_info_entry_engine_type_v()) { | 414 | } else if (entry == top_device_info_entry_engine_type_v()) { |
@@ -538,7 +538,7 @@ static void gk20a_remove_fifo_support(struct fifo_gk20a *f) | |||
538 | struct gk20a *g = f->g; | 538 | struct gk20a *g = f->g; |
539 | unsigned int i = 0; | 539 | unsigned int i = 0; |
540 | 540 | ||
541 | gk20a_dbg_fn(""); | 541 | nvgpu_log_fn(g, " "); |
542 | 542 | ||
543 | nvgpu_channel_worker_deinit(g); | 543 | nvgpu_channel_worker_deinit(g); |
544 | /* | 544 | /* |
@@ -616,7 +616,7 @@ static void fifo_pbdma_exception_status(struct gk20a *g, | |||
616 | get_exception_pbdma_info(g, eng_info); | 616 | get_exception_pbdma_info(g, eng_info); |
617 | e = &eng_info->pbdma_exception_info; | 617 | e = &eng_info->pbdma_exception_info; |
618 | 618 | ||
619 | gk20a_dbg_fn("pbdma_id %d, " | 619 | nvgpu_log_fn(g, "pbdma_id %d, " |
620 | "id_type %s, id %d, chan_status %d, " | 620 | "id_type %s, id %d, chan_status %d, " |
621 | "next_id_type %s, next_id %d, " | 621 | "next_id_type %s, next_id %d, " |
622 | "chsw_in_progress %d", | 622 | "chsw_in_progress %d", |
@@ -657,7 +657,7 @@ static void fifo_engine_exception_status(struct gk20a *g, | |||
657 | get_exception_engine_info(g, eng_info); | 657 | get_exception_engine_info(g, eng_info); |
658 | e = &eng_info->engine_exception_info; | 658 | e = &eng_info->engine_exception_info; |
659 | 659 | ||
660 | gk20a_dbg_fn("engine_id %d, id_type %s, id %d, ctx_status %d, " | 660 | nvgpu_log_fn(g, "engine_id %d, id_type %s, id %d, ctx_status %d, " |
661 | "faulted %d, idle %d, ctxsw_in_progress %d, ", | 661 | "faulted %d, idle %d, ctxsw_in_progress %d, ", |
662 | eng_info->engine_id, e->id_is_chid ? "chid" : "tsgid", | 662 | eng_info->engine_id, e->id_is_chid ? "chid" : "tsgid", |
663 | e->id, e->ctx_status_v, | 663 | e->id, e->ctx_status_v, |
@@ -745,7 +745,7 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f) | |||
745 | 745 | ||
746 | clean_up_runlist: | 746 | clean_up_runlist: |
747 | gk20a_fifo_delete_runlist(f); | 747 | gk20a_fifo_delete_runlist(f); |
748 | gk20a_dbg_fn("fail"); | 748 | nvgpu_log_fn(g, "fail"); |
749 | return -ENOMEM; | 749 | return -ENOMEM; |
750 | } | 750 | } |
751 | 751 | ||
@@ -784,7 +784,7 @@ int gk20a_init_fifo_reset_enable_hw(struct gk20a *g) | |||
784 | unsigned int i; | 784 | unsigned int i; |
785 | u32 host_num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA); | 785 | u32 host_num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA); |
786 | 786 | ||
787 | gk20a_dbg_fn(""); | 787 | nvgpu_log_fn(g, " "); |
788 | 788 | ||
789 | /* enable pmc pfifo */ | 789 | /* enable pmc pfifo */ |
790 | g->ops.mc.reset(g, mc_enable_pfifo_enabled_f()); | 790 | g->ops.mc.reset(g, mc_enable_pfifo_enabled_f()); |
@@ -805,7 +805,7 @@ int gk20a_init_fifo_reset_enable_hw(struct gk20a *g) | |||
805 | timeout = gk20a_readl(g, fifo_fb_timeout_r()); | 805 | timeout = gk20a_readl(g, fifo_fb_timeout_r()); |
806 | timeout = set_field(timeout, fifo_fb_timeout_period_m(), | 806 | timeout = set_field(timeout, fifo_fb_timeout_period_m(), |
807 | fifo_fb_timeout_period_max_f()); | 807 | fifo_fb_timeout_period_max_f()); |
808 | gk20a_dbg_info("fifo_fb_timeout reg val = 0x%08x", timeout); | 808 | nvgpu_log_info(g, "fifo_fb_timeout reg val = 0x%08x", timeout); |
809 | gk20a_writel(g, fifo_fb_timeout_r(), timeout); | 809 | gk20a_writel(g, fifo_fb_timeout_r(), timeout); |
810 | 810 | ||
811 | /* write pbdma timeout value */ | 811 | /* write pbdma timeout value */ |
@@ -813,7 +813,7 @@ int gk20a_init_fifo_reset_enable_hw(struct gk20a *g) | |||
813 | timeout = gk20a_readl(g, pbdma_timeout_r(i)); | 813 | timeout = gk20a_readl(g, pbdma_timeout_r(i)); |
814 | timeout = set_field(timeout, pbdma_timeout_period_m(), | 814 | timeout = set_field(timeout, pbdma_timeout_period_m(), |
815 | pbdma_timeout_period_max_f()); | 815 | pbdma_timeout_period_max_f()); |
816 | gk20a_dbg_info("pbdma_timeout reg val = 0x%08x", timeout); | 816 | nvgpu_log_info(g, "pbdma_timeout reg val = 0x%08x", timeout); |
817 | gk20a_writel(g, pbdma_timeout_r(i), timeout); | 817 | gk20a_writel(g, pbdma_timeout_r(i), timeout); |
818 | } | 818 | } |
819 | if (g->ops.fifo.apply_pb_timeout) | 819 | if (g->ops.fifo.apply_pb_timeout) |
@@ -837,10 +837,10 @@ int gk20a_init_fifo_reset_enable_hw(struct gk20a *g) | |||
837 | intr_stall = gk20a_readl(g, pbdma_intr_stall_r(i)); | 837 | intr_stall = gk20a_readl(g, pbdma_intr_stall_r(i)); |
838 | intr_stall &= ~pbdma_intr_stall_lbreq_enabled_f(); | 838 | intr_stall &= ~pbdma_intr_stall_lbreq_enabled_f(); |
839 | gk20a_writel(g, pbdma_intr_stall_r(i), intr_stall); | 839 | gk20a_writel(g, pbdma_intr_stall_r(i), intr_stall); |
840 | gk20a_dbg_info("pbdma id:%u, intr_en_0 0x%08x", i, intr_stall); | 840 | nvgpu_log_info(g, "pbdma id:%u, intr_en_0 0x%08x", i, intr_stall); |
841 | gk20a_writel(g, pbdma_intr_en_0_r(i), intr_stall); | 841 | gk20a_writel(g, pbdma_intr_en_0_r(i), intr_stall); |
842 | 842 | ||
843 | gk20a_dbg_info("pbdma id:%u, intr_en_1 0x%08x", i, | 843 | nvgpu_log_info(g, "pbdma id:%u, intr_en_1 0x%08x", i, |
844 | ~pbdma_intr_en_0_lbreq_enabled_f()); | 844 | ~pbdma_intr_en_0_lbreq_enabled_f()); |
845 | gk20a_writel(g, pbdma_intr_en_1_r(i), | 845 | gk20a_writel(g, pbdma_intr_en_1_r(i), |
846 | ~pbdma_intr_en_0_lbreq_enabled_f()); | 846 | ~pbdma_intr_en_0_lbreq_enabled_f()); |
@@ -852,12 +852,12 @@ int gk20a_init_fifo_reset_enable_hw(struct gk20a *g) | |||
852 | /* clear and enable pfifo interrupt */ | 852 | /* clear and enable pfifo interrupt */ |
853 | gk20a_writel(g, fifo_intr_0_r(), 0xFFFFFFFF); | 853 | gk20a_writel(g, fifo_intr_0_r(), 0xFFFFFFFF); |
854 | mask = gk20a_fifo_intr_0_en_mask(g); | 854 | mask = gk20a_fifo_intr_0_en_mask(g); |
855 | gk20a_dbg_info("fifo_intr_en_0 0x%08x", mask); | 855 | nvgpu_log_info(g, "fifo_intr_en_0 0x%08x", mask); |
856 | gk20a_writel(g, fifo_intr_en_0_r(), mask); | 856 | gk20a_writel(g, fifo_intr_en_0_r(), mask); |
857 | gk20a_dbg_info("fifo_intr_en_1 = 0x80000000"); | 857 | nvgpu_log_info(g, "fifo_intr_en_1 = 0x80000000"); |
858 | gk20a_writel(g, fifo_intr_en_1_r(), 0x80000000); | 858 | gk20a_writel(g, fifo_intr_en_1_r(), 0x80000000); |
859 | 859 | ||
860 | gk20a_dbg_fn("done"); | 860 | nvgpu_log_fn(g, "done"); |
861 | 861 | ||
862 | return 0; | 862 | return 0; |
863 | } | 863 | } |
@@ -868,7 +868,7 @@ int gk20a_init_fifo_setup_sw_common(struct gk20a *g) | |||
868 | unsigned int chid, i; | 868 | unsigned int chid, i; |
869 | int err = 0; | 869 | int err = 0; |
870 | 870 | ||
871 | gk20a_dbg_fn(""); | 871 | nvgpu_log_fn(g, " "); |
872 | 872 | ||
873 | f->g = g; | 873 | f->g = g; |
874 | 874 | ||
@@ -945,7 +945,7 @@ int gk20a_init_fifo_setup_sw_common(struct gk20a *g) | |||
945 | goto clean_up; | 945 | goto clean_up; |
946 | } | 946 | } |
947 | 947 | ||
948 | gk20a_dbg_fn("done"); | 948 | nvgpu_log_fn(g, "done"); |
949 | return 0; | 949 | return 0; |
950 | 950 | ||
951 | clean_up: | 951 | clean_up: |
@@ -972,10 +972,10 @@ int gk20a_init_fifo_setup_sw(struct gk20a *g) | |||
972 | u64 userd_base; | 972 | u64 userd_base; |
973 | int err = 0; | 973 | int err = 0; |
974 | 974 | ||
975 | gk20a_dbg_fn(""); | 975 | nvgpu_log_fn(g, " "); |
976 | 976 | ||
977 | if (f->sw_ready) { | 977 | if (f->sw_ready) { |
978 | gk20a_dbg_fn("skip init"); | 978 | nvgpu_log_fn(g, "skip init"); |
979 | return 0; | 979 | return 0; |
980 | } | 980 | } |
981 | 981 | ||
@@ -997,7 +997,7 @@ int gk20a_init_fifo_setup_sw(struct gk20a *g) | |||
997 | nvgpu_err(g, "userd memory allocation failed"); | 997 | nvgpu_err(g, "userd memory allocation failed"); |
998 | goto clean_up; | 998 | goto clean_up; |
999 | } | 999 | } |
1000 | gk20a_dbg(gpu_dbg_map, "userd gpu va = 0x%llx", f->userd.gpu_va); | 1000 | nvgpu_log(g, gpu_dbg_map, "userd gpu va = 0x%llx", f->userd.gpu_va); |
1001 | 1001 | ||
1002 | userd_base = nvgpu_mem_get_addr(g, &f->userd); | 1002 | userd_base = nvgpu_mem_get_addr(g, &f->userd); |
1003 | for (chid = 0; chid < f->num_channels; chid++) { | 1003 | for (chid = 0; chid < f->num_channels; chid++) { |
@@ -1013,11 +1013,11 @@ int gk20a_init_fifo_setup_sw(struct gk20a *g) | |||
1013 | 1013 | ||
1014 | f->sw_ready = true; | 1014 | f->sw_ready = true; |
1015 | 1015 | ||
1016 | gk20a_dbg_fn("done"); | 1016 | nvgpu_log_fn(g, "done"); |
1017 | return 0; | 1017 | return 0; |
1018 | 1018 | ||
1019 | clean_up: | 1019 | clean_up: |
1020 | gk20a_dbg_fn("fail"); | 1020 | nvgpu_log_fn(g, "fail"); |
1021 | if (nvgpu_mem_is_valid(&f->userd)) { | 1021 | if (nvgpu_mem_is_valid(&f->userd)) { |
1022 | if (g->ops.mm.is_bar1_supported(g)) | 1022 | if (g->ops.mm.is_bar1_supported(g)) |
1023 | nvgpu_dma_unmap_free(g->mm.bar1.vm, &f->userd); | 1023 | nvgpu_dma_unmap_free(g->mm.bar1.vm, &f->userd); |
@@ -1032,7 +1032,7 @@ void gk20a_fifo_handle_runlist_event(struct gk20a *g) | |||
1032 | { | 1032 | { |
1033 | u32 runlist_event = gk20a_readl(g, fifo_intr_runlist_r()); | 1033 | u32 runlist_event = gk20a_readl(g, fifo_intr_runlist_r()); |
1034 | 1034 | ||
1035 | gk20a_dbg(gpu_dbg_intr, "runlist event %08x", | 1035 | nvgpu_log(g, gpu_dbg_intr, "runlist event %08x", |
1036 | runlist_event); | 1036 | runlist_event); |
1037 | 1037 | ||
1038 | gk20a_writel(g, fifo_intr_runlist_r(), runlist_event); | 1038 | gk20a_writel(g, fifo_intr_runlist_r(), runlist_event); |
@@ -1042,7 +1042,7 @@ int gk20a_init_fifo_setup_hw(struct gk20a *g) | |||
1042 | { | 1042 | { |
1043 | struct fifo_gk20a *f = &g->fifo; | 1043 | struct fifo_gk20a *f = &g->fifo; |
1044 | 1044 | ||
1045 | gk20a_dbg_fn(""); | 1045 | nvgpu_log_fn(g, " "); |
1046 | 1046 | ||
1047 | /* test write, read through bar1 @ userd region before | 1047 | /* test write, read through bar1 @ userd region before |
1048 | * turning on the snooping */ | 1048 | * turning on the snooping */ |
@@ -1053,7 +1053,7 @@ int gk20a_init_fifo_setup_hw(struct gk20a *g) | |||
1053 | u32 bar1_vaddr = f->userd.gpu_va; | 1053 | u32 bar1_vaddr = f->userd.gpu_va; |
1054 | volatile u32 *cpu_vaddr = f->userd.cpu_va; | 1054 | volatile u32 *cpu_vaddr = f->userd.cpu_va; |
1055 | 1055 | ||
1056 | gk20a_dbg_info("test bar1 @ vaddr 0x%x", | 1056 | nvgpu_log_info(g, "test bar1 @ vaddr 0x%x", |
1057 | bar1_vaddr); | 1057 | bar1_vaddr); |
1058 | 1058 | ||
1059 | v = gk20a_bar1_readl(g, bar1_vaddr); | 1059 | v = gk20a_bar1_readl(g, bar1_vaddr); |
@@ -1093,7 +1093,7 @@ int gk20a_init_fifo_setup_hw(struct gk20a *g) | |||
1093 | fifo_bar1_base_ptr_f(f->userd.gpu_va >> 12) | | 1093 | fifo_bar1_base_ptr_f(f->userd.gpu_va >> 12) | |
1094 | fifo_bar1_base_valid_true_f()); | 1094 | fifo_bar1_base_valid_true_f()); |
1095 | 1095 | ||
1096 | gk20a_dbg_fn("done"); | 1096 | nvgpu_log_fn(g, "done"); |
1097 | 1097 | ||
1098 | return 0; | 1098 | return 0; |
1099 | } | 1099 | } |
@@ -1261,7 +1261,7 @@ void gk20a_fifo_get_mmu_fault_info(struct gk20a *g, u32 mmu_fault_id, | |||
1261 | u32 fault_info; | 1261 | u32 fault_info; |
1262 | u32 addr_lo, addr_hi; | 1262 | u32 addr_lo, addr_hi; |
1263 | 1263 | ||
1264 | gk20a_dbg_fn("mmu_fault_id %d", mmu_fault_id); | 1264 | nvgpu_log_fn(g, "mmu_fault_id %d", mmu_fault_id); |
1265 | 1265 | ||
1266 | memset(mmfault, 0, sizeof(*mmfault)); | 1266 | memset(mmfault, 0, sizeof(*mmfault)); |
1267 | 1267 | ||
@@ -1291,7 +1291,7 @@ void gk20a_fifo_reset_engine(struct gk20a *g, u32 engine_id) | |||
1291 | u32 engine_enum = ENGINE_INVAL_GK20A; | 1291 | u32 engine_enum = ENGINE_INVAL_GK20A; |
1292 | struct fifo_engine_info_gk20a *engine_info; | 1292 | struct fifo_engine_info_gk20a *engine_info; |
1293 | 1293 | ||
1294 | gk20a_dbg_fn(""); | 1294 | nvgpu_log_fn(g, " "); |
1295 | 1295 | ||
1296 | if (!g) | 1296 | if (!g) |
1297 | return; | 1297 | return; |
@@ -1489,7 +1489,7 @@ void gk20a_fifo_abort_tsg(struct gk20a *g, u32 tsgid, bool preempt) | |||
1489 | struct tsg_gk20a *tsg = &g->fifo.tsg[tsgid]; | 1489 | struct tsg_gk20a *tsg = &g->fifo.tsg[tsgid]; |
1490 | struct channel_gk20a *ch; | 1490 | struct channel_gk20a *ch; |
1491 | 1491 | ||
1492 | gk20a_dbg_fn(""); | 1492 | nvgpu_log_fn(g, " "); |
1493 | 1493 | ||
1494 | g->ops.fifo.disable_tsg(tsg); | 1494 | g->ops.fifo.disable_tsg(tsg); |
1495 | 1495 | ||
@@ -1556,7 +1556,7 @@ static bool gk20a_fifo_handle_mmu_fault( | |||
1556 | bool verbose = true; | 1556 | bool verbose = true; |
1557 | u32 grfifo_ctl; | 1557 | u32 grfifo_ctl; |
1558 | 1558 | ||
1559 | gk20a_dbg_fn(""); | 1559 | nvgpu_log_fn(g, " "); |
1560 | 1560 | ||
1561 | g->fifo.deferred_reset_pending = false; | 1561 | g->fifo.deferred_reset_pending = false; |
1562 | 1562 | ||
@@ -1693,7 +1693,7 @@ static bool gk20a_fifo_handle_mmu_fault( | |||
1693 | 1693 | ||
1694 | /* handled during channel free */ | 1694 | /* handled during channel free */ |
1695 | g->fifo.deferred_reset_pending = true; | 1695 | g->fifo.deferred_reset_pending = true; |
1696 | gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, | 1696 | nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, |
1697 | "sm debugger attached," | 1697 | "sm debugger attached," |
1698 | " deferring channel recovery to channel free"); | 1698 | " deferring channel recovery to channel free"); |
1699 | } else { | 1699 | } else { |
@@ -2196,6 +2196,7 @@ bool gk20a_fifo_check_tsg_ctxsw_timeout(struct tsg_gk20a *tsg, | |||
2196 | struct channel_gk20a *ch; | 2196 | struct channel_gk20a *ch; |
2197 | bool recover = false; | 2197 | bool recover = false; |
2198 | bool progress = false; | 2198 | bool progress = false; |
2199 | struct gk20a *g = tsg->g; | ||
2199 | 2200 | ||
2200 | *verbose = false; | 2201 | *verbose = false; |
2201 | *ms = GRFIFO_TIMEOUT_CHECK_PERIOD_US / 1000; | 2202 | *ms = GRFIFO_TIMEOUT_CHECK_PERIOD_US / 1000; |
@@ -2221,7 +2222,7 @@ bool gk20a_fifo_check_tsg_ctxsw_timeout(struct tsg_gk20a *tsg, | |||
2221 | * this resets timeout for channels that already completed their work | 2222 | * this resets timeout for channels that already completed their work |
2222 | */ | 2223 | */ |
2223 | if (progress) { | 2224 | if (progress) { |
2224 | gk20a_dbg_info("progress on tsg=%d ch=%d", | 2225 | nvgpu_log_info(g, "progress on tsg=%d ch=%d", |
2225 | tsg->tsgid, ch->chid); | 2226 | tsg->tsgid, ch->chid); |
2226 | gk20a_channel_put(ch); | 2227 | gk20a_channel_put(ch); |
2227 | *ms = GRFIFO_TIMEOUT_CHECK_PERIOD_US / 1000; | 2228 | *ms = GRFIFO_TIMEOUT_CHECK_PERIOD_US / 1000; |
@@ -2239,7 +2240,7 @@ bool gk20a_fifo_check_tsg_ctxsw_timeout(struct tsg_gk20a *tsg, | |||
2239 | * caused the problem, so set timeout error notifier for all channels. | 2240 | * caused the problem, so set timeout error notifier for all channels. |
2240 | */ | 2241 | */ |
2241 | if (recover) { | 2242 | if (recover) { |
2242 | gk20a_dbg_info("timeout on tsg=%d ch=%d", | 2243 | nvgpu_log_info(g, "timeout on tsg=%d ch=%d", |
2243 | tsg->tsgid, ch->chid); | 2244 | tsg->tsgid, ch->chid); |
2244 | *ms = ch->timeout_accumulated_ms; | 2245 | *ms = ch->timeout_accumulated_ms; |
2245 | gk20a_channel_put(ch); | 2246 | gk20a_channel_put(ch); |
@@ -2311,7 +2312,7 @@ bool gk20a_fifo_handle_sched_error(struct gk20a *g) | |||
2311 | is_tsg, true, verbose, | 2312 | is_tsg, true, verbose, |
2312 | RC_TYPE_CTXSW_TIMEOUT); | 2313 | RC_TYPE_CTXSW_TIMEOUT); |
2313 | } else { | 2314 | } else { |
2314 | gk20a_dbg_info( | 2315 | nvgpu_log_info(g, |
2315 | "fifo is waiting for ctx switch for %d ms, " | 2316 | "fifo is waiting for ctx switch for %d ms, " |
2316 | "%s=%d", ms, is_tsg ? "tsg" : "ch", id); | 2317 | "%s=%d", ms, is_tsg ? "tsg" : "ch", id); |
2317 | } | 2318 | } |
@@ -2330,7 +2331,7 @@ static u32 fifo_error_isr(struct gk20a *g, u32 fifo_intr) | |||
2330 | bool print_channel_reset_log = false; | 2331 | bool print_channel_reset_log = false; |
2331 | u32 handled = 0; | 2332 | u32 handled = 0; |
2332 | 2333 | ||
2333 | gk20a_dbg_fn("fifo_intr=0x%08x", fifo_intr); | 2334 | nvgpu_log_fn(g, "fifo_intr=0x%08x", fifo_intr); |
2334 | 2335 | ||
2335 | if (fifo_intr & fifo_intr_0_pio_error_pending_f()) { | 2336 | if (fifo_intr & fifo_intr_0_pio_error_pending_f()) { |
2336 | /* pio mode is unused. this shouldn't happen, ever. */ | 2337 | /* pio mode is unused. this shouldn't happen, ever. */ |
@@ -2381,7 +2382,7 @@ static u32 fifo_error_isr(struct gk20a *g, u32 fifo_intr) | |||
2381 | engine_id++) { | 2382 | engine_id++) { |
2382 | u32 active_engine_id = g->fifo.active_engines_list[engine_id]; | 2383 | u32 active_engine_id = g->fifo.active_engines_list[engine_id]; |
2383 | u32 engine_enum = g->fifo.engine_info[active_engine_id].engine_enum; | 2384 | u32 engine_enum = g->fifo.engine_info[active_engine_id].engine_enum; |
2384 | gk20a_dbg_fn("enum:%d -> engine_id:%d", engine_enum, | 2385 | nvgpu_log_fn(g, "enum:%d -> engine_id:%d", engine_enum, |
2385 | active_engine_id); | 2386 | active_engine_id); |
2386 | fifo_pbdma_exception_status(g, | 2387 | fifo_pbdma_exception_status(g, |
2387 | &g->fifo.engine_info[active_engine_id]); | 2388 | &g->fifo.engine_info[active_engine_id]); |
@@ -2632,7 +2633,7 @@ static u32 fifo_pbdma_isr(struct gk20a *g, u32 fifo_intr) | |||
2632 | 2633 | ||
2633 | for (i = 0; i < host_num_pbdma; i++) { | 2634 | for (i = 0; i < host_num_pbdma; i++) { |
2634 | if (fifo_intr_pbdma_id_status_v(pbdma_pending, i)) { | 2635 | if (fifo_intr_pbdma_id_status_v(pbdma_pending, i)) { |
2635 | gk20a_dbg(gpu_dbg_intr, "pbdma id %d intr pending", i); | 2636 | nvgpu_log(g, gpu_dbg_intr, "pbdma id %d intr pending", i); |
2636 | clear_intr |= | 2637 | clear_intr |= |
2637 | gk20a_fifo_handle_pbdma_intr(g, f, i, RC_YES); | 2638 | gk20a_fifo_handle_pbdma_intr(g, f, i, RC_YES); |
2638 | } | 2639 | } |
@@ -2653,7 +2654,7 @@ void gk20a_fifo_isr(struct gk20a *g) | |||
2653 | * in a threaded interrupt context... */ | 2654 | * in a threaded interrupt context... */ |
2654 | nvgpu_mutex_acquire(&g->fifo.intr.isr.mutex); | 2655 | nvgpu_mutex_acquire(&g->fifo.intr.isr.mutex); |
2655 | 2656 | ||
2656 | gk20a_dbg(gpu_dbg_intr, "fifo isr %08x\n", fifo_intr); | 2657 | nvgpu_log(g, gpu_dbg_intr, "fifo isr %08x\n", fifo_intr); |
2657 | 2658 | ||
2658 | /* handle runlist update */ | 2659 | /* handle runlist update */ |
2659 | if (fifo_intr & fifo_intr_0_runlist_event_pending_f()) { | 2660 | if (fifo_intr & fifo_intr_0_runlist_event_pending_f()) { |
@@ -2681,7 +2682,7 @@ int gk20a_fifo_nonstall_isr(struct gk20a *g) | |||
2681 | u32 fifo_intr = gk20a_readl(g, fifo_intr_0_r()); | 2682 | u32 fifo_intr = gk20a_readl(g, fifo_intr_0_r()); |
2682 | u32 clear_intr = 0; | 2683 | u32 clear_intr = 0; |
2683 | 2684 | ||
2684 | gk20a_dbg(gpu_dbg_intr, "fifo nonstall isr %08x\n", fifo_intr); | 2685 | nvgpu_log(g, gpu_dbg_intr, "fifo nonstall isr %08x\n", fifo_intr); |
2685 | 2686 | ||
2686 | if (fifo_intr & fifo_intr_0_channel_intr_pending_f()) | 2687 | if (fifo_intr & fifo_intr_0_channel_intr_pending_f()) |
2687 | clear_intr = fifo_intr_0_channel_intr_pending_f(); | 2688 | clear_intr = fifo_intr_0_channel_intr_pending_f(); |
@@ -2769,7 +2770,7 @@ int __locked_fifo_preempt(struct gk20a *g, u32 id, bool is_tsg) | |||
2769 | int ret; | 2770 | int ret; |
2770 | unsigned int id_type; | 2771 | unsigned int id_type; |
2771 | 2772 | ||
2772 | gk20a_dbg_fn("%d", id); | 2773 | nvgpu_log_fn(g, "%d", id); |
2773 | 2774 | ||
2774 | /* issue preempt */ | 2775 | /* issue preempt */ |
2775 | gk20a_fifo_issue_preempt(g, id, is_tsg); | 2776 | gk20a_fifo_issue_preempt(g, id, is_tsg); |
@@ -2794,7 +2795,7 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, u32 chid) | |||
2794 | u32 mutex_ret = 0; | 2795 | u32 mutex_ret = 0; |
2795 | u32 i; | 2796 | u32 i; |
2796 | 2797 | ||
2797 | gk20a_dbg_fn("%d", chid); | 2798 | nvgpu_log_fn(g, "%d", chid); |
2798 | 2799 | ||
2799 | /* we have no idea which runlist we are using. lock all */ | 2800 | /* we have no idea which runlist we are using. lock all */ |
2800 | for (i = 0; i < g->fifo.max_runlists; i++) | 2801 | for (i = 0; i < g->fifo.max_runlists; i++) |
@@ -2821,7 +2822,7 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid) | |||
2821 | u32 mutex_ret = 0; | 2822 | u32 mutex_ret = 0; |
2822 | u32 i; | 2823 | u32 i; |
2823 | 2824 | ||
2824 | gk20a_dbg_fn("%d", tsgid); | 2825 | nvgpu_log_fn(g, "%d", tsgid); |
2825 | 2826 | ||
2826 | /* we have no idea which runlist we are using. lock all */ | 2827 | /* we have no idea which runlist we are using. lock all */ |
2827 | for (i = 0; i < g->fifo.max_runlists; i++) | 2828 | for (i = 0; i < g->fifo.max_runlists; i++) |
@@ -2938,7 +2939,7 @@ int gk20a_fifo_disable_engine_activity(struct gk20a *g, | |||
2938 | u32 mutex_ret; | 2939 | u32 mutex_ret; |
2939 | u32 err = 0; | 2940 | u32 err = 0; |
2940 | 2941 | ||
2941 | gk20a_dbg_fn(""); | 2942 | nvgpu_log_fn(g, " "); |
2942 | 2943 | ||
2943 | gr_stat = | 2944 | gr_stat = |
2944 | gk20a_readl(g, fifo_engine_status_r(eng_info->engine_id)); | 2945 | gk20a_readl(g, fifo_engine_status_r(eng_info->engine_id)); |
@@ -2988,12 +2989,12 @@ clean_up: | |||
2988 | nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); | 2989 | nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); |
2989 | 2990 | ||
2990 | if (err) { | 2991 | if (err) { |
2991 | gk20a_dbg_fn("failed"); | 2992 | nvgpu_log_fn(g, "failed"); |
2992 | if (gk20a_fifo_enable_engine_activity(g, eng_info)) | 2993 | if (gk20a_fifo_enable_engine_activity(g, eng_info)) |
2993 | nvgpu_err(g, | 2994 | nvgpu_err(g, |
2994 | "failed to enable gr engine activity"); | 2995 | "failed to enable gr engine activity"); |
2995 | } else { | 2996 | } else { |
2996 | gk20a_dbg_fn("done"); | 2997 | nvgpu_log_fn(g, "done"); |
2997 | } | 2998 | } |
2998 | return err; | 2999 | return err; |
2999 | } | 3000 | } |
@@ -3129,8 +3130,9 @@ u32 *gk20a_runlist_construct_locked(struct fifo_gk20a *f, | |||
3129 | bool skip_next = false; | 3130 | bool skip_next = false; |
3130 | u32 tsgid, count = 0; | 3131 | u32 tsgid, count = 0; |
3131 | u32 runlist_entry_words = f->runlist_entry_size / sizeof(u32); | 3132 | u32 runlist_entry_words = f->runlist_entry_size / sizeof(u32); |
3133 | struct gk20a *g = f->g; | ||
3132 | 3134 | ||
3133 | gk20a_dbg_fn(""); | 3135 | nvgpu_log_fn(g, " "); |
3134 | 3136 | ||
3135 | /* for each TSG, T, on this level, insert all higher-level channels | 3137 | /* for each TSG, T, on this level, insert all higher-level channels |
3136 | and TSGs before inserting T. */ | 3138 | and TSGs before inserting T. */ |
@@ -3156,9 +3158,9 @@ u32 *gk20a_runlist_construct_locked(struct fifo_gk20a *f, | |||
3156 | return NULL; | 3158 | return NULL; |
3157 | 3159 | ||
3158 | /* add TSG entry */ | 3160 | /* add TSG entry */ |
3159 | gk20a_dbg_info("add TSG %d to runlist", tsg->tsgid); | 3161 | nvgpu_log_info(g, "add TSG %d to runlist", tsg->tsgid); |
3160 | f->g->ops.fifo.get_tsg_runlist_entry(tsg, runlist_entry); | 3162 | f->g->ops.fifo.get_tsg_runlist_entry(tsg, runlist_entry); |
3161 | gk20a_dbg_info("tsg runlist count %d runlist [0] %x [1] %x\n", | 3163 | nvgpu_log_info(g, "tsg runlist count %d runlist [0] %x [1] %x\n", |
3162 | count, runlist_entry[0], runlist_entry[1]); | 3164 | count, runlist_entry[0], runlist_entry[1]); |
3163 | runlist_entry += runlist_entry_words; | 3165 | runlist_entry += runlist_entry_words; |
3164 | count++; | 3166 | count++; |
@@ -3177,10 +3179,10 @@ u32 *gk20a_runlist_construct_locked(struct fifo_gk20a *f, | |||
3177 | return NULL; | 3179 | return NULL; |
3178 | } | 3180 | } |
3179 | 3181 | ||
3180 | gk20a_dbg_info("add channel %d to runlist", | 3182 | nvgpu_log_info(g, "add channel %d to runlist", |
3181 | ch->chid); | 3183 | ch->chid); |
3182 | f->g->ops.fifo.get_ch_runlist_entry(ch, runlist_entry); | 3184 | f->g->ops.fifo.get_ch_runlist_entry(ch, runlist_entry); |
3183 | gk20a_dbg_info( | 3185 | nvgpu_log_info(g, |
3184 | "run list count %d runlist [0] %x [1] %x\n", | 3186 | "run list count %d runlist [0] %x [1] %x\n", |
3185 | count, runlist_entry[0], runlist_entry[1]); | 3187 | count, runlist_entry[0], runlist_entry[1]); |
3186 | count++; | 3188 | count++; |
@@ -3222,7 +3224,7 @@ int gk20a_fifo_set_runlist_interleave(struct gk20a *g, | |||
3222 | u32 runlist_id, | 3224 | u32 runlist_id, |
3223 | u32 new_level) | 3225 | u32 new_level) |
3224 | { | 3226 | { |
3225 | gk20a_dbg_fn(""); | 3227 | nvgpu_log_fn(g, " "); |
3226 | 3228 | ||
3227 | g->fifo.tsg[id].interleave_level = new_level; | 3229 | g->fifo.tsg[id].interleave_level = new_level; |
3228 | 3230 | ||
@@ -3313,7 +3315,7 @@ static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id, | |||
3313 | 3315 | ||
3314 | runlist_iova = nvgpu_mem_get_addr(g, &runlist->mem[new_buf]); | 3316 | runlist_iova = nvgpu_mem_get_addr(g, &runlist->mem[new_buf]); |
3315 | 3317 | ||
3316 | gk20a_dbg_info("runlist_id : %d, switch to new buffer 0x%16llx", | 3318 | nvgpu_log_info(g, "runlist_id : %d, switch to new buffer 0x%16llx", |
3317 | runlist_id, (u64)runlist_iova); | 3319 | runlist_id, (u64)runlist_iova); |
3318 | 3320 | ||
3319 | if (!runlist_iova) { | 3321 | if (!runlist_iova) { |
@@ -3445,7 +3447,7 @@ int gk20a_fifo_update_runlist(struct gk20a *g, u32 runlist_id, u32 chid, | |||
3445 | u32 mutex_ret; | 3447 | u32 mutex_ret; |
3446 | u32 ret = 0; | 3448 | u32 ret = 0; |
3447 | 3449 | ||
3448 | gk20a_dbg_fn(""); | 3450 | nvgpu_log_fn(g, " "); |
3449 | 3451 | ||
3450 | runlist = &f->runlist_info[runlist_id]; | 3452 | runlist = &f->runlist_info[runlist_id]; |
3451 | 3453 | ||
@@ -3465,7 +3467,7 @@ int gk20a_fifo_update_runlist(struct gk20a *g, u32 runlist_id, u32 chid, | |||
3465 | 3467 | ||
3466 | int gk20a_fifo_suspend(struct gk20a *g) | 3468 | int gk20a_fifo_suspend(struct gk20a *g) |
3467 | { | 3469 | { |
3468 | gk20a_dbg_fn(""); | 3470 | nvgpu_log_fn(g, " "); |
3469 | 3471 | ||
3470 | /* stop bar1 snooping */ | 3472 | /* stop bar1 snooping */ |
3471 | if (g->ops.mm.is_bar1_supported(g)) | 3473 | if (g->ops.mm.is_bar1_supported(g)) |
@@ -3476,7 +3478,7 @@ int gk20a_fifo_suspend(struct gk20a *g) | |||
3476 | gk20a_writel(g, fifo_intr_en_0_r(), 0); | 3478 | gk20a_writel(g, fifo_intr_en_0_r(), 0); |
3477 | gk20a_writel(g, fifo_intr_en_1_r(), 0); | 3479 | gk20a_writel(g, fifo_intr_en_1_r(), 0); |
3478 | 3480 | ||
3479 | gk20a_dbg_fn("done"); | 3481 | nvgpu_log_fn(g, "done"); |
3480 | return 0; | 3482 | return 0; |
3481 | } | 3483 | } |
3482 | 3484 | ||
@@ -3511,7 +3513,7 @@ int gk20a_fifo_wait_engine_idle(struct gk20a *g) | |||
3511 | int ret = -ETIMEDOUT; | 3513 | int ret = -ETIMEDOUT; |
3512 | u32 i, host_num_engines; | 3514 | u32 i, host_num_engines; |
3513 | 3515 | ||
3514 | gk20a_dbg_fn(""); | 3516 | nvgpu_log_fn(g, " "); |
3515 | 3517 | ||
3516 | host_num_engines = | 3518 | host_num_engines = |
3517 | nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_ENGINES); | 3519 | nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_ENGINES); |
@@ -3533,12 +3535,12 @@ int gk20a_fifo_wait_engine_idle(struct gk20a *g) | |||
3533 | } while (!nvgpu_timeout_expired(&timeout)); | 3535 | } while (!nvgpu_timeout_expired(&timeout)); |
3534 | 3536 | ||
3535 | if (ret) { | 3537 | if (ret) { |
3536 | gk20a_dbg_info("cannot idle engine %u", i); | 3538 | nvgpu_log_info(g, "cannot idle engine %u", i); |
3537 | break; | 3539 | break; |
3538 | } | 3540 | } |
3539 | } | 3541 | } |
3540 | 3542 | ||
3541 | gk20a_dbg_fn("done"); | 3543 | nvgpu_log_fn(g, "done"); |
3542 | 3544 | ||
3543 | return ret; | 3545 | return ret; |
3544 | } | 3546 | } |
@@ -3839,7 +3841,7 @@ void gk20a_fifo_channel_unbind(struct channel_gk20a *ch_gk20a) | |||
3839 | { | 3841 | { |
3840 | struct gk20a *g = ch_gk20a->g; | 3842 | struct gk20a *g = ch_gk20a->g; |
3841 | 3843 | ||
3842 | gk20a_dbg_fn(""); | 3844 | nvgpu_log_fn(g, " "); |
3843 | 3845 | ||
3844 | if (nvgpu_atomic_cmpxchg(&ch_gk20a->bound, true, false)) { | 3846 | if (nvgpu_atomic_cmpxchg(&ch_gk20a->bound, true, false)) { |
3845 | gk20a_writel(g, ccsr_channel_inst_r(ch_gk20a->chid), | 3847 | gk20a_writel(g, ccsr_channel_inst_r(ch_gk20a->chid), |
@@ -3854,12 +3856,12 @@ static int gk20a_fifo_commit_userd(struct channel_gk20a *c) | |||
3854 | u32 addr_hi; | 3856 | u32 addr_hi; |
3855 | struct gk20a *g = c->g; | 3857 | struct gk20a *g = c->g; |
3856 | 3858 | ||
3857 | gk20a_dbg_fn(""); | 3859 | nvgpu_log_fn(g, " "); |
3858 | 3860 | ||
3859 | addr_lo = u64_lo32(c->userd_iova >> ram_userd_base_shift_v()); | 3861 | addr_lo = u64_lo32(c->userd_iova >> ram_userd_base_shift_v()); |
3860 | addr_hi = u64_hi32(c->userd_iova); | 3862 | addr_hi = u64_hi32(c->userd_iova); |
3861 | 3863 | ||
3862 | gk20a_dbg_info("channel %d : set ramfc userd 0x%16llx", | 3864 | nvgpu_log_info(g, "channel %d : set ramfc userd 0x%16llx", |
3863 | c->chid, (u64)c->userd_iova); | 3865 | c->chid, (u64)c->userd_iova); |
3864 | 3866 | ||
3865 | nvgpu_mem_wr32(g, &c->inst_block, | 3867 | nvgpu_mem_wr32(g, &c->inst_block, |
@@ -3885,7 +3887,7 @@ int gk20a_fifo_setup_ramfc(struct channel_gk20a *c, | |||
3885 | struct gk20a *g = c->g; | 3887 | struct gk20a *g = c->g; |
3886 | struct nvgpu_mem *mem = &c->inst_block; | 3888 | struct nvgpu_mem *mem = &c->inst_block; |
3887 | 3889 | ||
3888 | gk20a_dbg_fn(""); | 3890 | nvgpu_log_fn(g, " "); |
3889 | 3891 | ||
3890 | nvgpu_memset(g, mem, 0, 0, ram_fc_size_val_v()); | 3892 | nvgpu_memset(g, mem, 0, 0, ram_fc_size_val_v()); |
3891 | 3893 | ||
@@ -3946,7 +3948,7 @@ void gk20a_fifo_setup_ramfc_for_privileged_channel(struct channel_gk20a *c) | |||
3946 | struct gk20a *g = c->g; | 3948 | struct gk20a *g = c->g; |
3947 | struct nvgpu_mem *mem = &c->inst_block; | 3949 | struct nvgpu_mem *mem = &c->inst_block; |
3948 | 3950 | ||
3949 | gk20a_dbg_info("channel %d : set ramfc privileged_channel", c->chid); | 3951 | nvgpu_log_info(g, "channel %d : set ramfc privileged_channel", c->chid); |
3950 | 3952 | ||
3951 | /* Enable HCE priv mode for phys mode transfer */ | 3953 | /* Enable HCE priv mode for phys mode transfer */ |
3952 | nvgpu_mem_wr32(g, mem, ram_fc_hce_ctrl_w(), | 3954 | nvgpu_mem_wr32(g, mem, ram_fc_hce_ctrl_w(), |
@@ -3959,7 +3961,7 @@ int gk20a_fifo_setup_userd(struct channel_gk20a *c) | |||
3959 | struct nvgpu_mem *mem; | 3961 | struct nvgpu_mem *mem; |
3960 | u32 offset; | 3962 | u32 offset; |
3961 | 3963 | ||
3962 | gk20a_dbg_fn(""); | 3964 | nvgpu_log_fn(g, " "); |
3963 | 3965 | ||
3964 | if (nvgpu_mem_is_valid(&c->usermode_userd)) { | 3966 | if (nvgpu_mem_is_valid(&c->usermode_userd)) { |
3965 | mem = &c->usermode_userd; | 3967 | mem = &c->usermode_userd; |
@@ -3987,16 +3989,16 @@ int gk20a_fifo_alloc_inst(struct gk20a *g, struct channel_gk20a *ch) | |||
3987 | { | 3989 | { |
3988 | int err; | 3990 | int err; |
3989 | 3991 | ||
3990 | gk20a_dbg_fn(""); | 3992 | nvgpu_log_fn(g, " "); |
3991 | 3993 | ||
3992 | err = g->ops.mm.alloc_inst_block(g, &ch->inst_block); | 3994 | err = g->ops.mm.alloc_inst_block(g, &ch->inst_block); |
3993 | if (err) | 3995 | if (err) |
3994 | return err; | 3996 | return err; |
3995 | 3997 | ||
3996 | gk20a_dbg_info("channel %d inst block physical addr: 0x%16llx", | 3998 | nvgpu_log_info(g, "channel %d inst block physical addr: 0x%16llx", |
3997 | ch->chid, nvgpu_inst_block_addr(g, &ch->inst_block)); | 3999 | ch->chid, nvgpu_inst_block_addr(g, &ch->inst_block)); |
3998 | 4000 | ||
3999 | gk20a_dbg_fn("done"); | 4001 | nvgpu_log_fn(g, "done"); |
4000 | return 0; | 4002 | return 0; |
4001 | } | 4003 | } |
4002 | 4004 | ||
@@ -4086,7 +4088,7 @@ void gk20a_fifo_add_syncpt_wait_cmd(struct gk20a *g, | |||
4086 | struct priv_cmd_entry *cmd, u32 off, | 4088 | struct priv_cmd_entry *cmd, u32 off, |
4087 | u32 id, u32 thresh, u64 gpu_va) | 4089 | u32 id, u32 thresh, u64 gpu_va) |
4088 | { | 4090 | { |
4089 | gk20a_dbg_fn(""); | 4091 | nvgpu_log_fn(g, " "); |
4090 | 4092 | ||
4091 | off = cmd->off + off; | 4093 | off = cmd->off + off; |
4092 | /* syncpoint_a */ | 4094 | /* syncpoint_a */ |
@@ -4115,7 +4117,7 @@ void gk20a_fifo_add_syncpt_incr_cmd(struct gk20a *g, | |||
4115 | { | 4117 | { |
4116 | u32 off = cmd->off; | 4118 | u32 off = cmd->off; |
4117 | 4119 | ||
4118 | gk20a_dbg_fn(""); | 4120 | nvgpu_log_fn(g, " "); |
4119 | if (wfi_cmd) { | 4121 | if (wfi_cmd) { |
4120 | /* wfi */ | 4122 | /* wfi */ |
4121 | nvgpu_mem_wr32(g, cmd->mem, off++, 0x2001001E); | 4123 | nvgpu_mem_wr32(g, cmd->mem, off++, 0x2001001E); |
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.c b/drivers/gpu/nvgpu/gk20a/gk20a.c index e8008937..e862f2e4 100644 --- a/drivers/gpu/nvgpu/gk20a/gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/gk20a.c | |||
@@ -77,7 +77,7 @@ int gk20a_detect_chip(struct gk20a *g) | |||
77 | 77 | ||
78 | gk20a_mc_boot_0(g, &p->gpu_arch, &p->gpu_impl, &p->gpu_rev); | 78 | gk20a_mc_boot_0(g, &p->gpu_arch, &p->gpu_impl, &p->gpu_rev); |
79 | 79 | ||
80 | gk20a_dbg_info("arch: %x, impl: %x, rev: %x\n", | 80 | nvgpu_log_info(g, "arch: %x, impl: %x, rev: %x\n", |
81 | g->params.gpu_arch, | 81 | g->params.gpu_arch, |
82 | g->params.gpu_impl, | 82 | g->params.gpu_impl, |
83 | g->params.gpu_rev); | 83 | g->params.gpu_rev); |
@@ -89,7 +89,7 @@ int gk20a_prepare_poweroff(struct gk20a *g) | |||
89 | { | 89 | { |
90 | int ret = 0; | 90 | int ret = 0; |
91 | 91 | ||
92 | gk20a_dbg_fn(""); | 92 | nvgpu_log_fn(g, " "); |
93 | 93 | ||
94 | if (g->ops.fifo.channel_suspend) { | 94 | if (g->ops.fifo.channel_suspend) { |
95 | ret = g->ops.fifo.channel_suspend(g); | 95 | ret = g->ops.fifo.channel_suspend(g); |
@@ -126,7 +126,7 @@ int gk20a_finalize_poweron(struct gk20a *g) | |||
126 | u32 nr_pages; | 126 | u32 nr_pages; |
127 | #endif | 127 | #endif |
128 | 128 | ||
129 | gk20a_dbg_fn(""); | 129 | nvgpu_log_fn(g, " "); |
130 | 130 | ||
131 | if (g->power_on) | 131 | if (g->power_on) |
132 | return 0; | 132 | return 0; |
@@ -434,7 +434,7 @@ static void gk20a_free_cb(struct nvgpu_ref *refcount) | |||
434 | struct gk20a *g = container_of(refcount, | 434 | struct gk20a *g = container_of(refcount, |
435 | struct gk20a, refcount); | 435 | struct gk20a, refcount); |
436 | 436 | ||
437 | gk20a_dbg(gpu_dbg_shutdown, "Freeing GK20A struct!"); | 437 | nvgpu_log(g, gpu_dbg_shutdown, "Freeing GK20A struct!"); |
438 | 438 | ||
439 | gk20a_ce_destroy(g); | 439 | gk20a_ce_destroy(g); |
440 | 440 | ||
@@ -465,7 +465,7 @@ struct gk20a * __must_check gk20a_get(struct gk20a *g) | |||
465 | */ | 465 | */ |
466 | success = nvgpu_ref_get_unless_zero(&g->refcount); | 466 | success = nvgpu_ref_get_unless_zero(&g->refcount); |
467 | 467 | ||
468 | gk20a_dbg(gpu_dbg_shutdown, "GET: refs currently %d %s", | 468 | nvgpu_log(g, gpu_dbg_shutdown, "GET: refs currently %d %s", |
469 | nvgpu_atomic_read(&g->refcount.refcount), | 469 | nvgpu_atomic_read(&g->refcount.refcount), |
470 | success ? "" : "(FAILED)"); | 470 | success ? "" : "(FAILED)"); |
471 | 471 | ||
@@ -490,7 +490,7 @@ void gk20a_put(struct gk20a *g) | |||
490 | * ... PUT: refs currently 2 | 490 | * ... PUT: refs currently 2 |
491 | * ... Freeing GK20A struct! | 491 | * ... Freeing GK20A struct! |
492 | */ | 492 | */ |
493 | gk20a_dbg(gpu_dbg_shutdown, "PUT: refs currently %d", | 493 | nvgpu_log(g, gpu_dbg_shutdown, "PUT: refs currently %d", |
494 | nvgpu_atomic_read(&g->refcount.refcount)); | 494 | nvgpu_atomic_read(&g->refcount.refcount)); |
495 | 495 | ||
496 | nvgpu_ref_put(&g->refcount, gk20a_free_cb); | 496 | nvgpu_ref_put(&g->refcount, gk20a_free_cb); |
diff --git a/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.c index 7120059c..f829cb3a 100644 --- a/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.c | |||
@@ -1,9 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * drivers/video/tegra/host/gk20a/gr_ctx_gk20a.c | ||
3 | * | ||
4 | * GK20A Graphics Context | 2 | * GK20A Graphics Context |
5 | * | 3 | * |
6 | * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. | 4 | * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. |
7 | * | 5 | * |
8 | * Permission is hereby granted, free of charge, to any person obtaining a | 6 | * Permission is hereby granted, free of charge, to any person obtaining a |
9 | * copy of this software and associated documentation files (the "Software"), | 7 | * copy of this software and associated documentation files (the "Software"), |
@@ -79,7 +77,7 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr) | |||
79 | u32 i, major_v = ~0, major_v_hw, netlist_num; | 77 | u32 i, major_v = ~0, major_v_hw, netlist_num; |
80 | int net, max, err = -ENOENT; | 78 | int net, max, err = -ENOENT; |
81 | 79 | ||
82 | gk20a_dbg_fn(""); | 80 | nvgpu_log_fn(g, " "); |
83 | 81 | ||
84 | if (g->ops.gr_ctx.is_fw_defined()) { | 82 | if (g->ops.gr_ctx.is_fw_defined()) { |
85 | net = NETLIST_FINAL; | 83 | net = NETLIST_FINAL; |
@@ -114,63 +112,63 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr) | |||
114 | 112 | ||
115 | switch (netlist->regions[i].region_id) { | 113 | switch (netlist->regions[i].region_id) { |
116 | case NETLIST_REGIONID_FECS_UCODE_DATA: | 114 | case NETLIST_REGIONID_FECS_UCODE_DATA: |
117 | gk20a_dbg_info("NETLIST_REGIONID_FECS_UCODE_DATA"); | 115 | nvgpu_log_info(g, "NETLIST_REGIONID_FECS_UCODE_DATA"); |
118 | err = gr_gk20a_alloc_load_netlist_u32(g, | 116 | err = gr_gk20a_alloc_load_netlist_u32(g, |
119 | src, size, &g->gr.ctx_vars.ucode.fecs.data); | 117 | src, size, &g->gr.ctx_vars.ucode.fecs.data); |
120 | if (err) | 118 | if (err) |
121 | goto clean_up; | 119 | goto clean_up; |
122 | break; | 120 | break; |
123 | case NETLIST_REGIONID_FECS_UCODE_INST: | 121 | case NETLIST_REGIONID_FECS_UCODE_INST: |
124 | gk20a_dbg_info("NETLIST_REGIONID_FECS_UCODE_INST"); | 122 | nvgpu_log_info(g, "NETLIST_REGIONID_FECS_UCODE_INST"); |
125 | err = gr_gk20a_alloc_load_netlist_u32(g, | 123 | err = gr_gk20a_alloc_load_netlist_u32(g, |
126 | src, size, &g->gr.ctx_vars.ucode.fecs.inst); | 124 | src, size, &g->gr.ctx_vars.ucode.fecs.inst); |
127 | if (err) | 125 | if (err) |
128 | goto clean_up; | 126 | goto clean_up; |
129 | break; | 127 | break; |
130 | case NETLIST_REGIONID_GPCCS_UCODE_DATA: | 128 | case NETLIST_REGIONID_GPCCS_UCODE_DATA: |
131 | gk20a_dbg_info("NETLIST_REGIONID_GPCCS_UCODE_DATA"); | 129 | nvgpu_log_info(g, "NETLIST_REGIONID_GPCCS_UCODE_DATA"); |
132 | err = gr_gk20a_alloc_load_netlist_u32(g, | 130 | err = gr_gk20a_alloc_load_netlist_u32(g, |
133 | src, size, &g->gr.ctx_vars.ucode.gpccs.data); | 131 | src, size, &g->gr.ctx_vars.ucode.gpccs.data); |
134 | if (err) | 132 | if (err) |
135 | goto clean_up; | 133 | goto clean_up; |
136 | break; | 134 | break; |
137 | case NETLIST_REGIONID_GPCCS_UCODE_INST: | 135 | case NETLIST_REGIONID_GPCCS_UCODE_INST: |
138 | gk20a_dbg_info("NETLIST_REGIONID_GPCCS_UCODE_INST"); | 136 | nvgpu_log_info(g, "NETLIST_REGIONID_GPCCS_UCODE_INST"); |
139 | err = gr_gk20a_alloc_load_netlist_u32(g, | 137 | err = gr_gk20a_alloc_load_netlist_u32(g, |
140 | src, size, &g->gr.ctx_vars.ucode.gpccs.inst); | 138 | src, size, &g->gr.ctx_vars.ucode.gpccs.inst); |
141 | if (err) | 139 | if (err) |
142 | goto clean_up; | 140 | goto clean_up; |
143 | break; | 141 | break; |
144 | case NETLIST_REGIONID_SW_BUNDLE_INIT: | 142 | case NETLIST_REGIONID_SW_BUNDLE_INIT: |
145 | gk20a_dbg_info("NETLIST_REGIONID_SW_BUNDLE_INIT"); | 143 | nvgpu_log_info(g, "NETLIST_REGIONID_SW_BUNDLE_INIT"); |
146 | err = gr_gk20a_alloc_load_netlist_av(g, | 144 | err = gr_gk20a_alloc_load_netlist_av(g, |
147 | src, size, &g->gr.ctx_vars.sw_bundle_init); | 145 | src, size, &g->gr.ctx_vars.sw_bundle_init); |
148 | if (err) | 146 | if (err) |
149 | goto clean_up; | 147 | goto clean_up; |
150 | break; | 148 | break; |
151 | case NETLIST_REGIONID_SW_METHOD_INIT: | 149 | case NETLIST_REGIONID_SW_METHOD_INIT: |
152 | gk20a_dbg_info("NETLIST_REGIONID_SW_METHOD_INIT"); | 150 | nvgpu_log_info(g, "NETLIST_REGIONID_SW_METHOD_INIT"); |
153 | err = gr_gk20a_alloc_load_netlist_av(g, | 151 | err = gr_gk20a_alloc_load_netlist_av(g, |
154 | src, size, &g->gr.ctx_vars.sw_method_init); | 152 | src, size, &g->gr.ctx_vars.sw_method_init); |
155 | if (err) | 153 | if (err) |
156 | goto clean_up; | 154 | goto clean_up; |
157 | break; | 155 | break; |
158 | case NETLIST_REGIONID_SW_CTX_LOAD: | 156 | case NETLIST_REGIONID_SW_CTX_LOAD: |
159 | gk20a_dbg_info("NETLIST_REGIONID_SW_CTX_LOAD"); | 157 | nvgpu_log_info(g, "NETLIST_REGIONID_SW_CTX_LOAD"); |
160 | err = gr_gk20a_alloc_load_netlist_aiv(g, | 158 | err = gr_gk20a_alloc_load_netlist_aiv(g, |
161 | src, size, &g->gr.ctx_vars.sw_ctx_load); | 159 | src, size, &g->gr.ctx_vars.sw_ctx_load); |
162 | if (err) | 160 | if (err) |
163 | goto clean_up; | 161 | goto clean_up; |
164 | break; | 162 | break; |
165 | case NETLIST_REGIONID_SW_NON_CTX_LOAD: | 163 | case NETLIST_REGIONID_SW_NON_CTX_LOAD: |
166 | gk20a_dbg_info("NETLIST_REGIONID_SW_NON_CTX_LOAD"); | 164 | nvgpu_log_info(g, "NETLIST_REGIONID_SW_NON_CTX_LOAD"); |
167 | err = gr_gk20a_alloc_load_netlist_av(g, | 165 | err = gr_gk20a_alloc_load_netlist_av(g, |
168 | src, size, &g->gr.ctx_vars.sw_non_ctx_load); | 166 | src, size, &g->gr.ctx_vars.sw_non_ctx_load); |
169 | if (err) | 167 | if (err) |
170 | goto clean_up; | 168 | goto clean_up; |
171 | break; | 169 | break; |
172 | case NETLIST_REGIONID_SWVEIDBUNDLEINIT: | 170 | case NETLIST_REGIONID_SWVEIDBUNDLEINIT: |
173 | gk20a_dbg_info( | 171 | nvgpu_log_info(g, |
174 | "NETLIST_REGIONID_SW_VEID_BUNDLE_INIT"); | 172 | "NETLIST_REGIONID_SW_VEID_BUNDLE_INIT"); |
175 | err = gr_gk20a_alloc_load_netlist_av(g, | 173 | err = gr_gk20a_alloc_load_netlist_av(g, |
176 | src, size, | 174 | src, size, |
@@ -179,56 +177,56 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr) | |||
179 | goto clean_up; | 177 | goto clean_up; |
180 | break; | 178 | break; |
181 | case NETLIST_REGIONID_CTXREG_SYS: | 179 | case NETLIST_REGIONID_CTXREG_SYS: |
182 | gk20a_dbg_info("NETLIST_REGIONID_CTXREG_SYS"); | 180 | nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_SYS"); |
183 | err = gr_gk20a_alloc_load_netlist_aiv(g, | 181 | err = gr_gk20a_alloc_load_netlist_aiv(g, |
184 | src, size, &g->gr.ctx_vars.ctxsw_regs.sys); | 182 | src, size, &g->gr.ctx_vars.ctxsw_regs.sys); |
185 | if (err) | 183 | if (err) |
186 | goto clean_up; | 184 | goto clean_up; |
187 | break; | 185 | break; |
188 | case NETLIST_REGIONID_CTXREG_GPC: | 186 | case NETLIST_REGIONID_CTXREG_GPC: |
189 | gk20a_dbg_info("NETLIST_REGIONID_CTXREG_GPC"); | 187 | nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_GPC"); |
190 | err = gr_gk20a_alloc_load_netlist_aiv(g, | 188 | err = gr_gk20a_alloc_load_netlist_aiv(g, |
191 | src, size, &g->gr.ctx_vars.ctxsw_regs.gpc); | 189 | src, size, &g->gr.ctx_vars.ctxsw_regs.gpc); |
192 | if (err) | 190 | if (err) |
193 | goto clean_up; | 191 | goto clean_up; |
194 | break; | 192 | break; |
195 | case NETLIST_REGIONID_CTXREG_TPC: | 193 | case NETLIST_REGIONID_CTXREG_TPC: |
196 | gk20a_dbg_info("NETLIST_REGIONID_CTXREG_TPC"); | 194 | nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_TPC"); |
197 | err = gr_gk20a_alloc_load_netlist_aiv(g, | 195 | err = gr_gk20a_alloc_load_netlist_aiv(g, |
198 | src, size, &g->gr.ctx_vars.ctxsw_regs.tpc); | 196 | src, size, &g->gr.ctx_vars.ctxsw_regs.tpc); |
199 | if (err) | 197 | if (err) |
200 | goto clean_up; | 198 | goto clean_up; |
201 | break; | 199 | break; |
202 | case NETLIST_REGIONID_CTXREG_ZCULL_GPC: | 200 | case NETLIST_REGIONID_CTXREG_ZCULL_GPC: |
203 | gk20a_dbg_info("NETLIST_REGIONID_CTXREG_ZCULL_GPC"); | 201 | nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_ZCULL_GPC"); |
204 | err = gr_gk20a_alloc_load_netlist_aiv(g, | 202 | err = gr_gk20a_alloc_load_netlist_aiv(g, |
205 | src, size, &g->gr.ctx_vars.ctxsw_regs.zcull_gpc); | 203 | src, size, &g->gr.ctx_vars.ctxsw_regs.zcull_gpc); |
206 | if (err) | 204 | if (err) |
207 | goto clean_up; | 205 | goto clean_up; |
208 | break; | 206 | break; |
209 | case NETLIST_REGIONID_CTXREG_PPC: | 207 | case NETLIST_REGIONID_CTXREG_PPC: |
210 | gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PPC"); | 208 | nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PPC"); |
211 | err = gr_gk20a_alloc_load_netlist_aiv(g, | 209 | err = gr_gk20a_alloc_load_netlist_aiv(g, |
212 | src, size, &g->gr.ctx_vars.ctxsw_regs.ppc); | 210 | src, size, &g->gr.ctx_vars.ctxsw_regs.ppc); |
213 | if (err) | 211 | if (err) |
214 | goto clean_up; | 212 | goto clean_up; |
215 | break; | 213 | break; |
216 | case NETLIST_REGIONID_CTXREG_PM_SYS: | 214 | case NETLIST_REGIONID_CTXREG_PM_SYS: |
217 | gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PM_SYS"); | 215 | nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PM_SYS"); |
218 | err = gr_gk20a_alloc_load_netlist_aiv(g, | 216 | err = gr_gk20a_alloc_load_netlist_aiv(g, |
219 | src, size, &g->gr.ctx_vars.ctxsw_regs.pm_sys); | 217 | src, size, &g->gr.ctx_vars.ctxsw_regs.pm_sys); |
220 | if (err) | 218 | if (err) |
221 | goto clean_up; | 219 | goto clean_up; |
222 | break; | 220 | break; |
223 | case NETLIST_REGIONID_CTXREG_PM_GPC: | 221 | case NETLIST_REGIONID_CTXREG_PM_GPC: |
224 | gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PM_GPC"); | 222 | nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PM_GPC"); |
225 | err = gr_gk20a_alloc_load_netlist_aiv(g, | 223 | err = gr_gk20a_alloc_load_netlist_aiv(g, |
226 | src, size, &g->gr.ctx_vars.ctxsw_regs.pm_gpc); | 224 | src, size, &g->gr.ctx_vars.ctxsw_regs.pm_gpc); |
227 | if (err) | 225 | if (err) |
228 | goto clean_up; | 226 | goto clean_up; |
229 | break; | 227 | break; |
230 | case NETLIST_REGIONID_CTXREG_PM_TPC: | 228 | case NETLIST_REGIONID_CTXREG_PM_TPC: |
231 | gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PM_TPC"); | 229 | nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PM_TPC"); |
232 | err = gr_gk20a_alloc_load_netlist_aiv(g, | 230 | err = gr_gk20a_alloc_load_netlist_aiv(g, |
233 | src, size, &g->gr.ctx_vars.ctxsw_regs.pm_tpc); | 231 | src, size, &g->gr.ctx_vars.ctxsw_regs.pm_tpc); |
234 | if (err) | 232 | if (err) |
@@ -236,110 +234,110 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr) | |||
236 | break; | 234 | break; |
237 | case NETLIST_REGIONID_BUFFER_SIZE: | 235 | case NETLIST_REGIONID_BUFFER_SIZE: |
238 | g->gr.ctx_vars.buffer_size = *src; | 236 | g->gr.ctx_vars.buffer_size = *src; |
239 | gk20a_dbg_info("NETLIST_REGIONID_BUFFER_SIZE : %d", | 237 | nvgpu_log_info(g, "NETLIST_REGIONID_BUFFER_SIZE : %d", |
240 | g->gr.ctx_vars.buffer_size); | 238 | g->gr.ctx_vars.buffer_size); |
241 | break; | 239 | break; |
242 | case NETLIST_REGIONID_CTXSW_REG_BASE_INDEX: | 240 | case NETLIST_REGIONID_CTXSW_REG_BASE_INDEX: |
243 | g->gr.ctx_vars.regs_base_index = *src; | 241 | g->gr.ctx_vars.regs_base_index = *src; |
244 | gk20a_dbg_info("NETLIST_REGIONID_CTXSW_REG_BASE_INDEX : %u", | 242 | nvgpu_log_info(g, "NETLIST_REGIONID_CTXSW_REG_BASE_INDEX : %u", |
245 | g->gr.ctx_vars.regs_base_index); | 243 | g->gr.ctx_vars.regs_base_index); |
246 | break; | 244 | break; |
247 | case NETLIST_REGIONID_MAJORV: | 245 | case NETLIST_REGIONID_MAJORV: |
248 | major_v = *src; | 246 | major_v = *src; |
249 | gk20a_dbg_info("NETLIST_REGIONID_MAJORV : %d", | 247 | nvgpu_log_info(g, "NETLIST_REGIONID_MAJORV : %d", |
250 | major_v); | 248 | major_v); |
251 | break; | 249 | break; |
252 | case NETLIST_REGIONID_NETLIST_NUM: | 250 | case NETLIST_REGIONID_NETLIST_NUM: |
253 | netlist_num = *src; | 251 | netlist_num = *src; |
254 | gk20a_dbg_info("NETLIST_REGIONID_NETLIST_NUM : %d", | 252 | nvgpu_log_info(g, "NETLIST_REGIONID_NETLIST_NUM : %d", |
255 | netlist_num); | 253 | netlist_num); |
256 | break; | 254 | break; |
257 | case NETLIST_REGIONID_CTXREG_PMPPC: | 255 | case NETLIST_REGIONID_CTXREG_PMPPC: |
258 | gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PMPPC"); | 256 | nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PMPPC"); |
259 | err = gr_gk20a_alloc_load_netlist_aiv(g, | 257 | err = gr_gk20a_alloc_load_netlist_aiv(g, |
260 | src, size, &g->gr.ctx_vars.ctxsw_regs.pm_ppc); | 258 | src, size, &g->gr.ctx_vars.ctxsw_regs.pm_ppc); |
261 | if (err) | 259 | if (err) |
262 | goto clean_up; | 260 | goto clean_up; |
263 | break; | 261 | break; |
264 | case NETLIST_REGIONID_NVPERF_CTXREG_SYS: | 262 | case NETLIST_REGIONID_NVPERF_CTXREG_SYS: |
265 | gk20a_dbg_info("NETLIST_REGIONID_NVPERF_CTXREG_SYS"); | 263 | nvgpu_log_info(g, "NETLIST_REGIONID_NVPERF_CTXREG_SYS"); |
266 | err = gr_gk20a_alloc_load_netlist_aiv(g, | 264 | err = gr_gk20a_alloc_load_netlist_aiv(g, |
267 | src, size, &g->gr.ctx_vars.ctxsw_regs.perf_sys); | 265 | src, size, &g->gr.ctx_vars.ctxsw_regs.perf_sys); |
268 | if (err) | 266 | if (err) |
269 | goto clean_up; | 267 | goto clean_up; |
270 | break; | 268 | break; |
271 | case NETLIST_REGIONID_NVPERF_FBP_CTXREGS: | 269 | case NETLIST_REGIONID_NVPERF_FBP_CTXREGS: |
272 | gk20a_dbg_info("NETLIST_REGIONID_NVPERF_FBP_CTXREGS"); | 270 | nvgpu_log_info(g, "NETLIST_REGIONID_NVPERF_FBP_CTXREGS"); |
273 | err = gr_gk20a_alloc_load_netlist_aiv(g, | 271 | err = gr_gk20a_alloc_load_netlist_aiv(g, |
274 | src, size, &g->gr.ctx_vars.ctxsw_regs.fbp); | 272 | src, size, &g->gr.ctx_vars.ctxsw_regs.fbp); |
275 | if (err) | 273 | if (err) |
276 | goto clean_up; | 274 | goto clean_up; |
277 | break; | 275 | break; |
278 | case NETLIST_REGIONID_NVPERF_CTXREG_GPC: | 276 | case NETLIST_REGIONID_NVPERF_CTXREG_GPC: |
279 | gk20a_dbg_info("NETLIST_REGIONID_NVPERF_CTXREG_GPC"); | 277 | nvgpu_log_info(g, "NETLIST_REGIONID_NVPERF_CTXREG_GPC"); |
280 | err = gr_gk20a_alloc_load_netlist_aiv(g, | 278 | err = gr_gk20a_alloc_load_netlist_aiv(g, |
281 | src, size, &g->gr.ctx_vars.ctxsw_regs.perf_gpc); | 279 | src, size, &g->gr.ctx_vars.ctxsw_regs.perf_gpc); |
282 | if (err) | 280 | if (err) |
283 | goto clean_up; | 281 | goto clean_up; |
284 | break; | 282 | break; |
285 | case NETLIST_REGIONID_NVPERF_FBP_ROUTER: | 283 | case NETLIST_REGIONID_NVPERF_FBP_ROUTER: |
286 | gk20a_dbg_info("NETLIST_REGIONID_NVPERF_FBP_ROUTER"); | 284 | nvgpu_log_info(g, "NETLIST_REGIONID_NVPERF_FBP_ROUTER"); |
287 | err = gr_gk20a_alloc_load_netlist_aiv(g, | 285 | err = gr_gk20a_alloc_load_netlist_aiv(g, |
288 | src, size, &g->gr.ctx_vars.ctxsw_regs.fbp_router); | 286 | src, size, &g->gr.ctx_vars.ctxsw_regs.fbp_router); |
289 | if (err) | 287 | if (err) |
290 | goto clean_up; | 288 | goto clean_up; |
291 | break; | 289 | break; |
292 | case NETLIST_REGIONID_NVPERF_GPC_ROUTER: | 290 | case NETLIST_REGIONID_NVPERF_GPC_ROUTER: |
293 | gk20a_dbg_info("NETLIST_REGIONID_NVPERF_GPC_ROUTER"); | 291 | nvgpu_log_info(g, "NETLIST_REGIONID_NVPERF_GPC_ROUTER"); |
294 | err = gr_gk20a_alloc_load_netlist_aiv(g, | 292 | err = gr_gk20a_alloc_load_netlist_aiv(g, |
295 | src, size, &g->gr.ctx_vars.ctxsw_regs.gpc_router); | 293 | src, size, &g->gr.ctx_vars.ctxsw_regs.gpc_router); |
296 | if (err) | 294 | if (err) |
297 | goto clean_up; | 295 | goto clean_up; |
298 | break; | 296 | break; |
299 | case NETLIST_REGIONID_CTXREG_PMLTC: | 297 | case NETLIST_REGIONID_CTXREG_PMLTC: |
300 | gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PMLTC"); | 298 | nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PMLTC"); |
301 | err = gr_gk20a_alloc_load_netlist_aiv(g, | 299 | err = gr_gk20a_alloc_load_netlist_aiv(g, |
302 | src, size, &g->gr.ctx_vars.ctxsw_regs.pm_ltc); | 300 | src, size, &g->gr.ctx_vars.ctxsw_regs.pm_ltc); |
303 | if (err) | 301 | if (err) |
304 | goto clean_up; | 302 | goto clean_up; |
305 | break; | 303 | break; |
306 | case NETLIST_REGIONID_CTXREG_PMFBPA: | 304 | case NETLIST_REGIONID_CTXREG_PMFBPA: |
307 | gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PMFBPA"); | 305 | nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PMFBPA"); |
308 | err = gr_gk20a_alloc_load_netlist_aiv(g, | 306 | err = gr_gk20a_alloc_load_netlist_aiv(g, |
309 | src, size, &g->gr.ctx_vars.ctxsw_regs.pm_fbpa); | 307 | src, size, &g->gr.ctx_vars.ctxsw_regs.pm_fbpa); |
310 | if (err) | 308 | if (err) |
311 | goto clean_up; | 309 | goto clean_up; |
312 | break; | 310 | break; |
313 | case NETLIST_REGIONID_NVPERF_SYS_ROUTER: | 311 | case NETLIST_REGIONID_NVPERF_SYS_ROUTER: |
314 | gk20a_dbg_info("NETLIST_REGIONID_NVPERF_SYS_ROUTER"); | 312 | nvgpu_log_info(g, "NETLIST_REGIONID_NVPERF_SYS_ROUTER"); |
315 | err = gr_gk20a_alloc_load_netlist_aiv(g, | 313 | err = gr_gk20a_alloc_load_netlist_aiv(g, |
316 | src, size, &g->gr.ctx_vars.ctxsw_regs.perf_sys_router); | 314 | src, size, &g->gr.ctx_vars.ctxsw_regs.perf_sys_router); |
317 | if (err) | 315 | if (err) |
318 | goto clean_up; | 316 | goto clean_up; |
319 | break; | 317 | break; |
320 | case NETLIST_REGIONID_NVPERF_PMA: | 318 | case NETLIST_REGIONID_NVPERF_PMA: |
321 | gk20a_dbg_info("NETLIST_REGIONID_NVPERF_PMA"); | 319 | nvgpu_log_info(g, "NETLIST_REGIONID_NVPERF_PMA"); |
322 | err = gr_gk20a_alloc_load_netlist_aiv(g, | 320 | err = gr_gk20a_alloc_load_netlist_aiv(g, |
323 | src, size, &g->gr.ctx_vars.ctxsw_regs.perf_pma); | 321 | src, size, &g->gr.ctx_vars.ctxsw_regs.perf_pma); |
324 | if (err) | 322 | if (err) |
325 | goto clean_up; | 323 | goto clean_up; |
326 | break; | 324 | break; |
327 | case NETLIST_REGIONID_CTXREG_PMROP: | 325 | case NETLIST_REGIONID_CTXREG_PMROP: |
328 | gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PMROP"); | 326 | nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PMROP"); |
329 | err = gr_gk20a_alloc_load_netlist_aiv(g, | 327 | err = gr_gk20a_alloc_load_netlist_aiv(g, |
330 | src, size, &g->gr.ctx_vars.ctxsw_regs.pm_rop); | 328 | src, size, &g->gr.ctx_vars.ctxsw_regs.pm_rop); |
331 | if (err) | 329 | if (err) |
332 | goto clean_up; | 330 | goto clean_up; |
333 | break; | 331 | break; |
334 | case NETLIST_REGIONID_CTXREG_PMUCGPC: | 332 | case NETLIST_REGIONID_CTXREG_PMUCGPC: |
335 | gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PMUCGPC"); | 333 | nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PMUCGPC"); |
336 | err = gr_gk20a_alloc_load_netlist_aiv(g, | 334 | err = gr_gk20a_alloc_load_netlist_aiv(g, |
337 | src, size, &g->gr.ctx_vars.ctxsw_regs.pm_ucgpc); | 335 | src, size, &g->gr.ctx_vars.ctxsw_regs.pm_ucgpc); |
338 | if (err) | 336 | if (err) |
339 | goto clean_up; | 337 | goto clean_up; |
340 | break; | 338 | break; |
341 | case NETLIST_REGIONID_CTXREG_ETPC: | 339 | case NETLIST_REGIONID_CTXREG_ETPC: |
342 | gk20a_dbg_info("NETLIST_REGIONID_CTXREG_ETPC"); | 340 | nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_ETPC"); |
343 | err = gr_gk20a_alloc_load_netlist_aiv(g, | 341 | err = gr_gk20a_alloc_load_netlist_aiv(g, |
344 | src, size, &g->gr.ctx_vars.ctxsw_regs.etpc); | 342 | src, size, &g->gr.ctx_vars.ctxsw_regs.etpc); |
345 | if (err) | 343 | if (err) |
@@ -347,13 +345,13 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr) | |||
347 | break; | 345 | break; |
348 | 346 | ||
349 | default: | 347 | default: |
350 | gk20a_dbg_info("unrecognized region %d skipped", i); | 348 | nvgpu_log_info(g, "unrecognized region %d skipped", i); |
351 | break; | 349 | break; |
352 | } | 350 | } |
353 | } | 351 | } |
354 | 352 | ||
355 | if (net != NETLIST_FINAL && major_v != major_v_hw) { | 353 | if (net != NETLIST_FINAL && major_v != major_v_hw) { |
356 | gk20a_dbg_info("skip %s: major_v 0x%08x doesn't match hw 0x%08x", | 354 | nvgpu_log_info(g, "skip %s: major_v 0x%08x doesn't match hw 0x%08x", |
357 | name, major_v, major_v_hw); | 355 | name, major_v, major_v_hw); |
358 | goto clean_up; | 356 | goto clean_up; |
359 | } | 357 | } |
@@ -362,7 +360,7 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr) | |||
362 | g->gr.netlist = net; | 360 | g->gr.netlist = net; |
363 | 361 | ||
364 | nvgpu_release_firmware(g, netlist_fw); | 362 | nvgpu_release_firmware(g, netlist_fw); |
365 | gk20a_dbg_fn("done"); | 363 | nvgpu_log_fn(g, "done"); |
366 | goto done; | 364 | goto done; |
367 | 365 | ||
368 | clean_up: | 366 | clean_up: |
@@ -403,7 +401,7 @@ clean_up: | |||
403 | 401 | ||
404 | done: | 402 | done: |
405 | if (g->gr.ctx_vars.valid) { | 403 | if (g->gr.ctx_vars.valid) { |
406 | gk20a_dbg_info("netlist image %s loaded", name); | 404 | nvgpu_log_info(g, "netlist image %s loaded", name); |
407 | return 0; | 405 | return 0; |
408 | } else { | 406 | } else { |
409 | nvgpu_err(g, "failed to load netlist image!!"); | 407 | nvgpu_err(g, "failed to load netlist image!!"); |
diff --git a/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a_sim.c b/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a_sim.c index 9674e2d6..01c7ed3c 100644 --- a/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a_sim.c +++ b/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a_sim.c | |||
@@ -1,9 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * drivers/video/tegra/host/gk20a/gr_ctx_sim_gk20a.c | ||
3 | * | ||
4 | * GK20A Graphics Context for Simulation | 2 | * GK20A Graphics Context for Simulation |
5 | * | 3 | * |
6 | * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. | 4 | * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. |
7 | * | 5 | * |
8 | * Permission is hereby granted, free of charge, to any person obtaining a | 6 | * Permission is hereby granted, free of charge, to any person obtaining a |
9 | * copy of this software and associated documentation files (the "Software"), | 7 | * copy of this software and associated documentation files (the "Software"), |
@@ -35,7 +33,7 @@ int gr_gk20a_init_ctx_vars_sim(struct gk20a *g, struct gr_gk20a *gr) | |||
35 | int err = 0; | 33 | int err = 0; |
36 | u32 i, temp; | 34 | u32 i, temp; |
37 | 35 | ||
38 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_info, | 36 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_info, |
39 | "querying grctx info from chiplib"); | 37 | "querying grctx info from chiplib"); |
40 | 38 | ||
41 | g->gr.ctx_vars.dynamic = true; | 39 | g->gr.ctx_vars.dynamic = true; |
@@ -250,7 +248,7 @@ int gr_gk20a_init_ctx_vars_sim(struct gk20a *g, struct gr_gk20a *gr) | |||
250 | i, &l[i].value); | 248 | i, &l[i].value); |
251 | } | 249 | } |
252 | 250 | ||
253 | gk20a_dbg(gpu_dbg_info | gpu_dbg_fn, "query GRCTX_REG_LIST_ETPC"); | 251 | nvgpu_log(g, gpu_dbg_info | gpu_dbg_fn, "query GRCTX_REG_LIST_ETPC"); |
254 | for (i = 0; i < g->gr.ctx_vars.ctxsw_regs.etpc.count; i++) { | 252 | for (i = 0; i < g->gr.ctx_vars.ctxsw_regs.etpc.count; i++) { |
255 | struct aiv_gk20a *l = g->gr.ctx_vars.ctxsw_regs.etpc.l; | 253 | struct aiv_gk20a *l = g->gr.ctx_vars.ctxsw_regs.etpc.l; |
256 | g->sim->esc_readl(g, "GRCTX_REG_LIST_ETPC:ADDR", | 254 | g->sim->esc_readl(g, "GRCTX_REG_LIST_ETPC:ADDR", |
@@ -259,7 +257,7 @@ int gr_gk20a_init_ctx_vars_sim(struct gk20a *g, struct gr_gk20a *gr) | |||
259 | i, &l[i].index); | 257 | i, &l[i].index); |
260 | g->sim->esc_readl(g, "GRCTX_REG_LIST_ETPC:VALUE", | 258 | g->sim->esc_readl(g, "GRCTX_REG_LIST_ETPC:VALUE", |
261 | i, &l[i].value); | 259 | i, &l[i].value); |
262 | gk20a_dbg(gpu_dbg_info | gpu_dbg_fn, | 260 | nvgpu_log(g, gpu_dbg_info | gpu_dbg_fn, |
263 | "addr:0x%#08x index:0x%08x value:0x%08x", | 261 | "addr:0x%#08x index:0x%08x value:0x%08x", |
264 | l[i].addr, l[i].index, l[i].value); | 262 | l[i].addr, l[i].index, l[i].value); |
265 | } | 263 | } |
@@ -269,7 +267,7 @@ int gr_gk20a_init_ctx_vars_sim(struct gk20a *g, struct gr_gk20a *gr) | |||
269 | g->sim->esc_readl(g, "GRCTX_GEN_CTX_REGS_BASE_INDEX", 0, | 267 | g->sim->esc_readl(g, "GRCTX_GEN_CTX_REGS_BASE_INDEX", 0, |
270 | &g->gr.ctx_vars.regs_base_index); | 268 | &g->gr.ctx_vars.regs_base_index); |
271 | 269 | ||
272 | gk20a_dbg(gpu_dbg_info | gpu_dbg_fn, "finished querying grctx info from chiplib"); | 270 | nvgpu_log(g, gpu_dbg_info | gpu_dbg_fn, "finished querying grctx info from chiplib"); |
273 | return 0; | 271 | return 0; |
274 | fail: | 272 | fail: |
275 | nvgpu_err(g, "failed querying grctx info from chiplib"); | 273 | nvgpu_err(g, "failed querying grctx info from chiplib"); |
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c index 86111321..00f26650 100644 --- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c | |||
@@ -122,7 +122,7 @@ int gr_gk20a_get_ctx_id(struct gk20a *g, | |||
122 | 122 | ||
123 | *ctx_id = nvgpu_mem_rd(g, mem, | 123 | *ctx_id = nvgpu_mem_rd(g, mem, |
124 | ctxsw_prog_main_image_context_id_o()); | 124 | ctxsw_prog_main_image_context_id_o()); |
125 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, "ctx_id: 0x%x", *ctx_id); | 125 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr, "ctx_id: 0x%x", *ctx_id); |
126 | 126 | ||
127 | nvgpu_mem_end(g, mem); | 127 | nvgpu_mem_end(g, mem); |
128 | 128 | ||
@@ -220,7 +220,7 @@ static void gr_gk20a_load_falcon_dmem(struct gk20a *g) | |||
220 | const u32 *ucode_u32_data; | 220 | const u32 *ucode_u32_data; |
221 | u32 checksum; | 221 | u32 checksum; |
222 | 222 | ||
223 | gk20a_dbg_fn(""); | 223 | nvgpu_log_fn(g, " "); |
224 | 224 | ||
225 | gk20a_writel(g, gr_gpccs_dmemc_r(0), (gr_gpccs_dmemc_offs_f(0) | | 225 | gk20a_writel(g, gr_gpccs_dmemc_r(0), (gr_gpccs_dmemc_offs_f(0) | |
226 | gr_gpccs_dmemc_blk_f(0) | | 226 | gr_gpccs_dmemc_blk_f(0) | |
@@ -245,7 +245,7 @@ static void gr_gk20a_load_falcon_dmem(struct gk20a *g) | |||
245 | gk20a_writel(g, gr_fecs_dmemd_r(0), ucode_u32_data[i]); | 245 | gk20a_writel(g, gr_fecs_dmemd_r(0), ucode_u32_data[i]); |
246 | checksum += ucode_u32_data[i]; | 246 | checksum += ucode_u32_data[i]; |
247 | } | 247 | } |
248 | gk20a_dbg_fn("done"); | 248 | nvgpu_log_fn(g, "done"); |
249 | } | 249 | } |
250 | 250 | ||
251 | static void gr_gk20a_load_falcon_imem(struct gk20a *g) | 251 | static void gr_gk20a_load_falcon_imem(struct gk20a *g) |
@@ -255,7 +255,7 @@ static void gr_gk20a_load_falcon_imem(struct gk20a *g) | |||
255 | u32 tag, i, pad_start, pad_end; | 255 | u32 tag, i, pad_start, pad_end; |
256 | u32 checksum; | 256 | u32 checksum; |
257 | 257 | ||
258 | gk20a_dbg_fn(""); | 258 | nvgpu_log_fn(g, " "); |
259 | 259 | ||
260 | cfg = gk20a_readl(g, gr_fecs_cfg_r()); | 260 | cfg = gk20a_readl(g, gr_fecs_cfg_r()); |
261 | fecs_imem_size = gr_fecs_cfg_imem_sz_v(cfg); | 261 | fecs_imem_size = gr_fecs_cfg_imem_sz_v(cfg); |
@@ -343,7 +343,7 @@ int gr_gk20a_wait_idle(struct gk20a *g, unsigned long duration_ms, | |||
343 | bool ctx_status_invalid; | 343 | bool ctx_status_invalid; |
344 | struct nvgpu_timeout timeout; | 344 | struct nvgpu_timeout timeout; |
345 | 345 | ||
346 | gk20a_dbg_fn(""); | 346 | nvgpu_log_fn(g, " "); |
347 | 347 | ||
348 | gr_engine_id = gk20a_fifo_get_gr_engine_id(g); | 348 | gr_engine_id = gk20a_fifo_get_gr_engine_id(g); |
349 | 349 | ||
@@ -372,7 +372,7 @@ int gr_gk20a_wait_idle(struct gk20a *g, unsigned long duration_ms, | |||
372 | 372 | ||
373 | if (!gr_enabled || ctx_status_invalid | 373 | if (!gr_enabled || ctx_status_invalid |
374 | || (!gr_busy && !ctxsw_active)) { | 374 | || (!gr_busy && !ctxsw_active)) { |
375 | gk20a_dbg_fn("done"); | 375 | nvgpu_log_fn(g, "done"); |
376 | return 0; | 376 | return 0; |
377 | } | 377 | } |
378 | 378 | ||
@@ -398,7 +398,7 @@ int gr_gk20a_wait_fe_idle(struct gk20a *g, unsigned long duration_ms, | |||
398 | if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) | 398 | if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) |
399 | return 0; | 399 | return 0; |
400 | 400 | ||
401 | gk20a_dbg_fn(""); | 401 | nvgpu_log_fn(g, " "); |
402 | 402 | ||
403 | nvgpu_timeout_init(g, &timeout, duration_ms, NVGPU_TIMER_CPU_TIMER); | 403 | nvgpu_timeout_init(g, &timeout, duration_ms, NVGPU_TIMER_CPU_TIMER); |
404 | 404 | ||
@@ -406,7 +406,7 @@ int gr_gk20a_wait_fe_idle(struct gk20a *g, unsigned long duration_ms, | |||
406 | val = gk20a_readl(g, gr_status_r()); | 406 | val = gk20a_readl(g, gr_status_r()); |
407 | 407 | ||
408 | if (!gr_status_fe_method_lower_v(val)) { | 408 | if (!gr_status_fe_method_lower_v(val)) { |
409 | gk20a_dbg_fn("done"); | 409 | nvgpu_log_fn(g, "done"); |
410 | return 0; | 410 | return 0; |
411 | } | 411 | } |
412 | 412 | ||
@@ -430,7 +430,7 @@ int gr_gk20a_ctx_wait_ucode(struct gk20a *g, u32 mailbox_id, | |||
430 | u32 check = WAIT_UCODE_LOOP; | 430 | u32 check = WAIT_UCODE_LOOP; |
431 | u32 reg; | 431 | u32 reg; |
432 | 432 | ||
433 | gk20a_dbg_fn(""); | 433 | nvgpu_log_fn(g, " "); |
434 | 434 | ||
435 | if (sleepduringwait) | 435 | if (sleepduringwait) |
436 | delay = GR_IDLE_CHECK_DEFAULT; | 436 | delay = GR_IDLE_CHECK_DEFAULT; |
@@ -532,7 +532,7 @@ int gr_gk20a_ctx_wait_ucode(struct gk20a *g, u32 mailbox_id, | |||
532 | return -1; | 532 | return -1; |
533 | } | 533 | } |
534 | 534 | ||
535 | gk20a_dbg_fn("done"); | 535 | nvgpu_log_fn(g, "done"); |
536 | return 0; | 536 | return 0; |
537 | } | 537 | } |
538 | 538 | ||
@@ -618,7 +618,7 @@ int gr_gk20a_disable_ctxsw(struct gk20a *g) | |||
618 | { | 618 | { |
619 | int err = 0; | 619 | int err = 0; |
620 | 620 | ||
621 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); | 621 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " "); |
622 | 622 | ||
623 | nvgpu_mutex_acquire(&g->ctxsw_disable_lock); | 623 | nvgpu_mutex_acquire(&g->ctxsw_disable_lock); |
624 | g->ctxsw_disable_count++; | 624 | g->ctxsw_disable_count++; |
@@ -635,7 +635,7 @@ int gr_gk20a_enable_ctxsw(struct gk20a *g) | |||
635 | { | 635 | { |
636 | int err = 0; | 636 | int err = 0; |
637 | 637 | ||
638 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); | 638 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " "); |
639 | 639 | ||
640 | nvgpu_mutex_acquire(&g->ctxsw_disable_lock); | 640 | nvgpu_mutex_acquire(&g->ctxsw_disable_lock); |
641 | g->ctxsw_disable_count--; | 641 | g->ctxsw_disable_count--; |
@@ -669,7 +669,7 @@ int gr_gk20a_commit_inst(struct channel_gk20a *c, u64 gpu_va) | |||
669 | u32 addr_lo; | 669 | u32 addr_lo; |
670 | u32 addr_hi; | 670 | u32 addr_hi; |
671 | 671 | ||
672 | gk20a_dbg_fn(""); | 672 | nvgpu_log_fn(c->g, " "); |
673 | 673 | ||
674 | addr_lo = u64_lo32(gpu_va) >> 12; | 674 | addr_lo = u64_lo32(gpu_va) >> 12; |
675 | addr_hi = u64_hi32(gpu_va); | 675 | addr_hi = u64_hi32(gpu_va); |
@@ -775,7 +775,7 @@ int gr_gk20a_fecs_ctx_bind_channel(struct gk20a *g, | |||
775 | u32 data = fecs_current_ctx_data(g, &c->inst_block); | 775 | u32 data = fecs_current_ctx_data(g, &c->inst_block); |
776 | u32 ret; | 776 | u32 ret; |
777 | 777 | ||
778 | gk20a_dbg_info("bind channel %d inst ptr 0x%08x", | 778 | nvgpu_log_info(g, "bind channel %d inst ptr 0x%08x", |
779 | c->chid, inst_base_ptr); | 779 | c->chid, inst_base_ptr); |
780 | 780 | ||
781 | ret = gr_gk20a_submit_fecs_method_op(g, | 781 | ret = gr_gk20a_submit_fecs_method_op(g, |
@@ -823,7 +823,7 @@ static int gr_gk20a_ctx_zcull_setup(struct gk20a *g, struct channel_gk20a *c) | |||
823 | struct nvgpu_mem *ctxheader = &ctx->mem; | 823 | struct nvgpu_mem *ctxheader = &ctx->mem; |
824 | int ret = 0; | 824 | int ret = 0; |
825 | 825 | ||
826 | gk20a_dbg_fn(""); | 826 | nvgpu_log_fn(g, " "); |
827 | 827 | ||
828 | tsg = tsg_gk20a_from_ch(c); | 828 | tsg = tsg_gk20a_from_ch(c); |
829 | if (!tsg) | 829 | if (!tsg) |
@@ -905,7 +905,7 @@ int gr_gk20a_commit_global_ctx_buffers(struct gk20a *g, | |||
905 | u64 addr; | 905 | u64 addr; |
906 | u32 size; | 906 | u32 size; |
907 | 907 | ||
908 | gk20a_dbg_fn(""); | 908 | nvgpu_log_fn(g, " "); |
909 | 909 | ||
910 | tsg = tsg_gk20a_from_ch(c); | 910 | tsg = tsg_gk20a_from_ch(c); |
911 | if (!tsg) | 911 | if (!tsg) |
@@ -931,7 +931,7 @@ int gr_gk20a_commit_global_ctx_buffers(struct gk20a *g, | |||
931 | if (size == g->ops.gr.pagepool_default_size(g)) | 931 | if (size == g->ops.gr.pagepool_default_size(g)) |
932 | size = gr_scc_pagepool_total_pages_hwmax_v(); | 932 | size = gr_scc_pagepool_total_pages_hwmax_v(); |
933 | 933 | ||
934 | gk20a_dbg_info("pagepool buffer addr : 0x%016llx, size : %d", | 934 | nvgpu_log_info(g, "pagepool buffer addr : 0x%016llx, size : %d", |
935 | addr, size); | 935 | addr, size); |
936 | 936 | ||
937 | g->ops.gr.commit_global_pagepool(g, gr_ctx, addr, size, patch); | 937 | g->ops.gr.commit_global_pagepool(g, gr_ctx, addr, size, patch); |
@@ -944,7 +944,7 @@ int gr_gk20a_commit_global_ctx_buffers(struct gk20a *g, | |||
944 | 944 | ||
945 | size = gr->bundle_cb_default_size; | 945 | size = gr->bundle_cb_default_size; |
946 | 946 | ||
947 | gk20a_dbg_info("bundle cb addr : 0x%016llx, size : %d", | 947 | nvgpu_log_info(g, "bundle cb addr : 0x%016llx, size : %d", |
948 | addr, size); | 948 | addr, size); |
949 | 949 | ||
950 | g->ops.gr.commit_global_bundle_cb(g, gr_ctx, addr, size, patch); | 950 | g->ops.gr.commit_global_bundle_cb(g, gr_ctx, addr, size, patch); |
@@ -955,7 +955,7 @@ int gr_gk20a_commit_global_ctx_buffers(struct gk20a *g, | |||
955 | (u64_hi32(gr_ctx->global_ctx_buffer_va[ATTRIBUTE_VA]) << | 955 | (u64_hi32(gr_ctx->global_ctx_buffer_va[ATTRIBUTE_VA]) << |
956 | (32 - gr_gpcs_setup_attrib_cb_base_addr_39_12_align_bits_v())); | 956 | (32 - gr_gpcs_setup_attrib_cb_base_addr_39_12_align_bits_v())); |
957 | 957 | ||
958 | gk20a_dbg_info("attrib cb addr : 0x%016llx", addr); | 958 | nvgpu_log_info(g, "attrib cb addr : 0x%016llx", addr); |
959 | g->ops.gr.commit_global_attrib_cb(g, gr_ctx, addr, patch); | 959 | g->ops.gr.commit_global_attrib_cb(g, gr_ctx, addr, patch); |
960 | g->ops.gr.commit_global_cb_manager(g, c, patch); | 960 | g->ops.gr.commit_global_cb_manager(g, c, patch); |
961 | 961 | ||
@@ -976,7 +976,7 @@ int gr_gk20a_commit_global_timeslice(struct gk20a *g, struct channel_gk20a *c) | |||
976 | u32 pe_vaf; | 976 | u32 pe_vaf; |
977 | u32 pe_vsc_vpc; | 977 | u32 pe_vsc_vpc; |
978 | 978 | ||
979 | gk20a_dbg_fn(""); | 979 | nvgpu_log_fn(g, " "); |
980 | 980 | ||
981 | gpm_pd_cfg = gk20a_readl(g, gr_gpcs_gpm_pd_cfg_r()); | 981 | gpm_pd_cfg = gk20a_readl(g, gr_gpcs_gpm_pd_cfg_r()); |
982 | pd_ab_dist_cfg0 = gk20a_readl(g, gr_pd_ab_dist_cfg0_r()); | 982 | pd_ab_dist_cfg0 = gk20a_readl(g, gr_pd_ab_dist_cfg0_r()); |
@@ -1036,7 +1036,7 @@ int gr_gk20a_setup_rop_mapping(struct gk20a *g, struct gr_gk20a *gr) | |||
1036 | if (!gr->map_tiles) | 1036 | if (!gr->map_tiles) |
1037 | return -1; | 1037 | return -1; |
1038 | 1038 | ||
1039 | gk20a_dbg_fn(""); | 1039 | nvgpu_log_fn(g, " "); |
1040 | 1040 | ||
1041 | gk20a_writel(g, gr_crstr_map_table_cfg_r(), | 1041 | gk20a_writel(g, gr_crstr_map_table_cfg_r(), |
1042 | gr_crstr_map_table_cfg_row_offset_f(gr->map_row_offset) | | 1042 | gr_crstr_map_table_cfg_row_offset_f(gr->map_row_offset) | |
@@ -1219,7 +1219,7 @@ int gr_gk20a_init_fs_state(struct gk20a *g) | |||
1219 | u32 reg_index; | 1219 | u32 reg_index; |
1220 | int err; | 1220 | int err; |
1221 | 1221 | ||
1222 | gk20a_dbg_fn(""); | 1222 | nvgpu_log_fn(g, " "); |
1223 | 1223 | ||
1224 | if (g->ops.gr.init_sm_id_table) { | 1224 | if (g->ops.gr.init_sm_id_table) { |
1225 | err = g->ops.gr.init_sm_id_table(g); | 1225 | err = g->ops.gr.init_sm_id_table(g); |
@@ -1302,7 +1302,7 @@ int gr_gk20a_fecs_ctx_image_save(struct channel_gk20a *c, u32 save_type) | |||
1302 | struct gk20a *g = c->g; | 1302 | struct gk20a *g = c->g; |
1303 | int ret; | 1303 | int ret; |
1304 | 1304 | ||
1305 | gk20a_dbg_fn(""); | 1305 | nvgpu_log_fn(g, " "); |
1306 | 1306 | ||
1307 | ret = gr_gk20a_submit_fecs_method_op(g, | 1307 | ret = gr_gk20a_submit_fecs_method_op(g, |
1308 | (struct fecs_method_op_gk20a) { | 1308 | (struct fecs_method_op_gk20a) { |
@@ -1411,7 +1411,7 @@ static int gr_gk20a_init_golden_ctx_image(struct gk20a *g, | |||
1411 | struct av_list_gk20a *sw_method_init = &g->gr.ctx_vars.sw_method_init; | 1411 | struct av_list_gk20a *sw_method_init = &g->gr.ctx_vars.sw_method_init; |
1412 | u32 last_method_data = 0; | 1412 | u32 last_method_data = 0; |
1413 | 1413 | ||
1414 | gk20a_dbg_fn(""); | 1414 | nvgpu_log_fn(g, " "); |
1415 | 1415 | ||
1416 | tsg = tsg_gk20a_from_ch(c); | 1416 | tsg = tsg_gk20a_from_ch(c); |
1417 | if (!tsg) | 1417 | if (!tsg) |
@@ -1647,7 +1647,7 @@ clean_up: | |||
1647 | if (err) | 1647 | if (err) |
1648 | nvgpu_err(g, "fail"); | 1648 | nvgpu_err(g, "fail"); |
1649 | else | 1649 | else |
1650 | gk20a_dbg_fn("done"); | 1650 | nvgpu_log_fn(g, "done"); |
1651 | 1651 | ||
1652 | nvgpu_mem_end(g, gold_mem); | 1652 | nvgpu_mem_end(g, gold_mem); |
1653 | nvgpu_mem_end(g, gr_mem); | 1653 | nvgpu_mem_end(g, gr_mem); |
@@ -1666,7 +1666,7 @@ int gr_gk20a_update_smpc_ctxsw_mode(struct gk20a *g, | |||
1666 | u32 data; | 1666 | u32 data; |
1667 | int ret; | 1667 | int ret; |
1668 | 1668 | ||
1669 | gk20a_dbg_fn(""); | 1669 | nvgpu_log_fn(g, " "); |
1670 | 1670 | ||
1671 | tsg = tsg_gk20a_from_ch(c); | 1671 | tsg = tsg_gk20a_from_ch(c); |
1672 | if (!tsg) | 1672 | if (!tsg) |
@@ -1732,7 +1732,7 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g, | |||
1732 | struct nvgpu_mem *ctxheader = &ctx->mem; | 1732 | struct nvgpu_mem *ctxheader = &ctx->mem; |
1733 | int ret; | 1733 | int ret; |
1734 | 1734 | ||
1735 | gk20a_dbg_fn(""); | 1735 | nvgpu_log_fn(g, " "); |
1736 | 1736 | ||
1737 | tsg = tsg_gk20a_from_ch(c); | 1737 | tsg = tsg_gk20a_from_ch(c); |
1738 | if (!tsg) | 1738 | if (!tsg) |
@@ -1884,7 +1884,7 @@ int gr_gk20a_load_golden_ctx_image(struct gk20a *g, | |||
1884 | int ret = 0; | 1884 | int ret = 0; |
1885 | struct nvgpu_mem *mem; | 1885 | struct nvgpu_mem *mem; |
1886 | 1886 | ||
1887 | gk20a_dbg_fn(""); | 1887 | nvgpu_log_fn(g, " "); |
1888 | 1888 | ||
1889 | tsg = tsg_gk20a_from_ch(c); | 1889 | tsg = tsg_gk20a_from_ch(c); |
1890 | if (!tsg) | 1890 | if (!tsg) |
@@ -1991,7 +1991,7 @@ int gr_gk20a_load_golden_ctx_image(struct gk20a *g, | |||
1991 | 1991 | ||
1992 | static void gr_gk20a_start_falcon_ucode(struct gk20a *g) | 1992 | static void gr_gk20a_start_falcon_ucode(struct gk20a *g) |
1993 | { | 1993 | { |
1994 | gk20a_dbg_fn(""); | 1994 | nvgpu_log_fn(g, " "); |
1995 | 1995 | ||
1996 | gk20a_writel(g, gr_fecs_ctxsw_mailbox_clear_r(0), | 1996 | gk20a_writel(g, gr_fecs_ctxsw_mailbox_clear_r(0), |
1997 | gr_fecs_ctxsw_mailbox_clear_value_f(~0)); | 1997 | gr_fecs_ctxsw_mailbox_clear_value_f(~0)); |
@@ -2002,7 +2002,7 @@ static void gr_gk20a_start_falcon_ucode(struct gk20a *g) | |||
2002 | gk20a_writel(g, gr_gpccs_cpuctl_r(), gr_gpccs_cpuctl_startcpu_f(1)); | 2002 | gk20a_writel(g, gr_gpccs_cpuctl_r(), gr_gpccs_cpuctl_startcpu_f(1)); |
2003 | gk20a_writel(g, gr_fecs_cpuctl_r(), gr_fecs_cpuctl_startcpu_f(1)); | 2003 | gk20a_writel(g, gr_fecs_cpuctl_r(), gr_fecs_cpuctl_startcpu_f(1)); |
2004 | 2004 | ||
2005 | gk20a_dbg_fn("done"); | 2005 | nvgpu_log_fn(g, "done"); |
2006 | } | 2006 | } |
2007 | 2007 | ||
2008 | static int gr_gk20a_init_ctxsw_ucode_vaspace(struct gk20a *g) | 2008 | static int gr_gk20a_init_ctxsw_ucode_vaspace(struct gk20a *g) |
@@ -2392,7 +2392,7 @@ int gr_gk20a_load_ctxsw_ucode(struct gk20a *g) | |||
2392 | { | 2392 | { |
2393 | int err; | 2393 | int err; |
2394 | 2394 | ||
2395 | gk20a_dbg_fn(""); | 2395 | nvgpu_log_fn(g, " "); |
2396 | 2396 | ||
2397 | if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) { | 2397 | if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) { |
2398 | gk20a_writel(g, gr_fecs_ctxsw_mailbox_r(7), | 2398 | gk20a_writel(g, gr_fecs_ctxsw_mailbox_r(7), |
@@ -2419,7 +2419,7 @@ int gr_gk20a_load_ctxsw_ucode(struct gk20a *g) | |||
2419 | gr_gk20a_load_falcon_with_bootloader(g); | 2419 | gr_gk20a_load_falcon_with_bootloader(g); |
2420 | g->gr.skip_ucode_init = true; | 2420 | g->gr.skip_ucode_init = true; |
2421 | } | 2421 | } |
2422 | gk20a_dbg_fn("done"); | 2422 | nvgpu_log_fn(g, "done"); |
2423 | return 0; | 2423 | return 0; |
2424 | } | 2424 | } |
2425 | 2425 | ||
@@ -2427,7 +2427,7 @@ static int gr_gk20a_wait_ctxsw_ready(struct gk20a *g) | |||
2427 | { | 2427 | { |
2428 | u32 ret; | 2428 | u32 ret; |
2429 | 2429 | ||
2430 | gk20a_dbg_fn(""); | 2430 | nvgpu_log_fn(g, " "); |
2431 | 2431 | ||
2432 | ret = gr_gk20a_ctx_wait_ucode(g, 0, NULL, | 2432 | ret = gr_gk20a_ctx_wait_ucode(g, 0, NULL, |
2433 | GR_IS_UCODE_OP_EQUAL, | 2433 | GR_IS_UCODE_OP_EQUAL, |
@@ -2448,7 +2448,7 @@ static int gr_gk20a_wait_ctxsw_ready(struct gk20a *g) | |||
2448 | gk20a_writel(g, gr_fecs_method_push_r(), | 2448 | gk20a_writel(g, gr_fecs_method_push_r(), |
2449 | gr_fecs_method_push_adr_set_watchdog_timeout_f()); | 2449 | gr_fecs_method_push_adr_set_watchdog_timeout_f()); |
2450 | 2450 | ||
2451 | gk20a_dbg_fn("done"); | 2451 | nvgpu_log_fn(g, "done"); |
2452 | return 0; | 2452 | return 0; |
2453 | } | 2453 | } |
2454 | 2454 | ||
@@ -2463,7 +2463,7 @@ int gr_gk20a_init_ctx_state(struct gk20a *g) | |||
2463 | .cond.fail = GR_IS_UCODE_OP_SKIP, | 2463 | .cond.fail = GR_IS_UCODE_OP_SKIP, |
2464 | }; | 2464 | }; |
2465 | 2465 | ||
2466 | gk20a_dbg_fn(""); | 2466 | nvgpu_log_fn(g, " "); |
2467 | /* query ctxsw image sizes, if golden context is not created */ | 2467 | /* query ctxsw image sizes, if golden context is not created */ |
2468 | if (!g->gr.ctx_vars.golden_image_initialized) { | 2468 | if (!g->gr.ctx_vars.golden_image_initialized) { |
2469 | op.method.addr = | 2469 | op.method.addr = |
@@ -2496,7 +2496,7 @@ int gr_gk20a_init_ctx_state(struct gk20a *g) | |||
2496 | g->gr.ctx_vars.priv_access_map_size = 512 * 1024; | 2496 | g->gr.ctx_vars.priv_access_map_size = 512 * 1024; |
2497 | } | 2497 | } |
2498 | 2498 | ||
2499 | gk20a_dbg_fn("done"); | 2499 | nvgpu_log_fn(g, "done"); |
2500 | return 0; | 2500 | return 0; |
2501 | } | 2501 | } |
2502 | 2502 | ||
@@ -2543,7 +2543,7 @@ static void gr_gk20a_free_global_ctx_buffers(struct gk20a *g) | |||
2543 | } | 2543 | } |
2544 | } | 2544 | } |
2545 | 2545 | ||
2546 | gk20a_dbg_fn("done"); | 2546 | nvgpu_log_fn(g, "done"); |
2547 | } | 2547 | } |
2548 | 2548 | ||
2549 | static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g) | 2549 | static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g) |
@@ -2557,11 +2557,11 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g) | |||
2557 | u32 pagepool_buffer_size = g->ops.gr.pagepool_default_size(g) * | 2557 | u32 pagepool_buffer_size = g->ops.gr.pagepool_default_size(g) * |
2558 | gr_scc_pagepool_total_pages_byte_granularity_v(); | 2558 | gr_scc_pagepool_total_pages_byte_granularity_v(); |
2559 | 2559 | ||
2560 | gk20a_dbg_fn(""); | 2560 | nvgpu_log_fn(g, " "); |
2561 | 2561 | ||
2562 | attr_buffer_size = g->ops.gr.calc_global_ctx_buffer_size(g); | 2562 | attr_buffer_size = g->ops.gr.calc_global_ctx_buffer_size(g); |
2563 | 2563 | ||
2564 | gk20a_dbg_info("cb_buffer_size : %d", cb_buffer_size); | 2564 | nvgpu_log_info(g, "cb_buffer_size : %d", cb_buffer_size); |
2565 | 2565 | ||
2566 | err = gk20a_gr_alloc_ctx_buffer(g, &gr->global_ctx_buffer[CIRCULAR], | 2566 | err = gk20a_gr_alloc_ctx_buffer(g, &gr->global_ctx_buffer[CIRCULAR], |
2567 | cb_buffer_size); | 2567 | cb_buffer_size); |
@@ -2576,7 +2576,7 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g) | |||
2576 | goto clean_up; | 2576 | goto clean_up; |
2577 | } | 2577 | } |
2578 | 2578 | ||
2579 | gk20a_dbg_info("pagepool_buffer_size : %d", pagepool_buffer_size); | 2579 | nvgpu_log_info(g, "pagepool_buffer_size : %d", pagepool_buffer_size); |
2580 | 2580 | ||
2581 | err = gk20a_gr_alloc_ctx_buffer(g, &gr->global_ctx_buffer[PAGEPOOL], | 2581 | err = gk20a_gr_alloc_ctx_buffer(g, &gr->global_ctx_buffer[PAGEPOOL], |
2582 | pagepool_buffer_size); | 2582 | pagepool_buffer_size); |
@@ -2591,7 +2591,7 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g) | |||
2591 | goto clean_up; | 2591 | goto clean_up; |
2592 | } | 2592 | } |
2593 | 2593 | ||
2594 | gk20a_dbg_info("attr_buffer_size : %d", attr_buffer_size); | 2594 | nvgpu_log_info(g, "attr_buffer_size : %d", attr_buffer_size); |
2595 | 2595 | ||
2596 | err = gk20a_gr_alloc_ctx_buffer(g, &gr->global_ctx_buffer[ATTRIBUTE], | 2596 | err = gk20a_gr_alloc_ctx_buffer(g, &gr->global_ctx_buffer[ATTRIBUTE], |
2597 | attr_buffer_size); | 2597 | attr_buffer_size); |
@@ -2606,7 +2606,7 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g) | |||
2606 | goto clean_up; | 2606 | goto clean_up; |
2607 | } | 2607 | } |
2608 | 2608 | ||
2609 | gk20a_dbg_info("golden_image_size : %d", | 2609 | nvgpu_log_info(g, "golden_image_size : %d", |
2610 | gr->ctx_vars.golden_image_size); | 2610 | gr->ctx_vars.golden_image_size); |
2611 | 2611 | ||
2612 | err = gk20a_gr_alloc_ctx_buffer(g, | 2612 | err = gk20a_gr_alloc_ctx_buffer(g, |
@@ -2615,7 +2615,7 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g) | |||
2615 | if (err) | 2615 | if (err) |
2616 | goto clean_up; | 2616 | goto clean_up; |
2617 | 2617 | ||
2618 | gk20a_dbg_info("priv_access_map_size : %d", | 2618 | nvgpu_log_info(g, "priv_access_map_size : %d", |
2619 | gr->ctx_vars.priv_access_map_size); | 2619 | gr->ctx_vars.priv_access_map_size); |
2620 | 2620 | ||
2621 | err = gk20a_gr_alloc_ctx_buffer(g, | 2621 | err = gk20a_gr_alloc_ctx_buffer(g, |
@@ -2625,7 +2625,7 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g) | |||
2625 | if (err) | 2625 | if (err) |
2626 | goto clean_up; | 2626 | goto clean_up; |
2627 | 2627 | ||
2628 | gk20a_dbg_fn("done"); | 2628 | nvgpu_log_fn(g, "done"); |
2629 | return 0; | 2629 | return 0; |
2630 | 2630 | ||
2631 | clean_up: | 2631 | clean_up: |
@@ -2643,7 +2643,7 @@ static void gr_gk20a_unmap_global_ctx_buffers(struct gk20a *g, | |||
2643 | int *g_bfr_index = gr_ctx->global_ctx_buffer_index; | 2643 | int *g_bfr_index = gr_ctx->global_ctx_buffer_index; |
2644 | u32 i; | 2644 | u32 i; |
2645 | 2645 | ||
2646 | gk20a_dbg_fn(""); | 2646 | nvgpu_log_fn(g, " "); |
2647 | 2647 | ||
2648 | for (i = 0; i < NR_GLOBAL_CTX_BUF_VA; i++) { | 2648 | for (i = 0; i < NR_GLOBAL_CTX_BUF_VA; i++) { |
2649 | if (g_bfr_index[i]) { | 2649 | if (g_bfr_index[i]) { |
@@ -2679,7 +2679,7 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g, | |||
2679 | struct nvgpu_mem *mem; | 2679 | struct nvgpu_mem *mem; |
2680 | u64 gpu_va; | 2680 | u64 gpu_va; |
2681 | 2681 | ||
2682 | gk20a_dbg_fn(""); | 2682 | nvgpu_log_fn(g, " "); |
2683 | 2683 | ||
2684 | tsg = tsg_gk20a_from_ch(c); | 2684 | tsg = tsg_gk20a_from_ch(c); |
2685 | if (!tsg) | 2685 | if (!tsg) |
@@ -2780,7 +2780,7 @@ int gr_gk20a_alloc_gr_ctx(struct gk20a *g, | |||
2780 | struct gr_gk20a *gr = &g->gr; | 2780 | struct gr_gk20a *gr = &g->gr; |
2781 | int err = 0; | 2781 | int err = 0; |
2782 | 2782 | ||
2783 | gk20a_dbg_fn(""); | 2783 | nvgpu_log_fn(g, " "); |
2784 | 2784 | ||
2785 | if (gr->ctx_vars.buffer_size == 0) | 2785 | if (gr->ctx_vars.buffer_size == 0) |
2786 | return 0; | 2786 | return 0; |
@@ -2835,7 +2835,7 @@ static int gr_gk20a_alloc_tsg_gr_ctx(struct gk20a *g, | |||
2835 | void gr_gk20a_free_gr_ctx(struct gk20a *g, | 2835 | void gr_gk20a_free_gr_ctx(struct gk20a *g, |
2836 | struct vm_gk20a *vm, struct nvgpu_gr_ctx *gr_ctx) | 2836 | struct vm_gk20a *vm, struct nvgpu_gr_ctx *gr_ctx) |
2837 | { | 2837 | { |
2838 | gk20a_dbg_fn(""); | 2838 | nvgpu_log_fn(g, " "); |
2839 | 2839 | ||
2840 | if (gr_ctx->mem.gpu_va) { | 2840 | if (gr_ctx->mem.gpu_va) { |
2841 | gr_gk20a_unmap_global_ctx_buffers(g, vm, gr_ctx); | 2841 | gr_gk20a_unmap_global_ctx_buffers(g, vm, gr_ctx); |
@@ -2881,7 +2881,7 @@ static int gr_gk20a_alloc_channel_patch_ctx(struct gk20a *g, | |||
2881 | u32 alloc_size; | 2881 | u32 alloc_size; |
2882 | int err = 0; | 2882 | int err = 0; |
2883 | 2883 | ||
2884 | gk20a_dbg_fn(""); | 2884 | nvgpu_log_fn(g, " "); |
2885 | 2885 | ||
2886 | tsg = tsg_gk20a_from_ch(c); | 2886 | tsg = tsg_gk20a_from_ch(c); |
2887 | if (!tsg) | 2887 | if (!tsg) |
@@ -2899,7 +2899,7 @@ static int gr_gk20a_alloc_channel_patch_ctx(struct gk20a *g, | |||
2899 | if (err) | 2899 | if (err) |
2900 | return err; | 2900 | return err; |
2901 | 2901 | ||
2902 | gk20a_dbg_fn("done"); | 2902 | nvgpu_log_fn(g, "done"); |
2903 | return 0; | 2903 | return 0; |
2904 | } | 2904 | } |
2905 | 2905 | ||
@@ -2909,7 +2909,7 @@ static void gr_gk20a_free_channel_patch_ctx(struct gk20a *g, | |||
2909 | { | 2909 | { |
2910 | struct patch_desc *patch_ctx = &gr_ctx->patch_ctx; | 2910 | struct patch_desc *patch_ctx = &gr_ctx->patch_ctx; |
2911 | 2911 | ||
2912 | gk20a_dbg_fn(""); | 2912 | nvgpu_log_fn(g, " "); |
2913 | 2913 | ||
2914 | if (patch_ctx->mem.gpu_va) | 2914 | if (patch_ctx->mem.gpu_va) |
2915 | nvgpu_gmmu_unmap(vm, &patch_ctx->mem, | 2915 | nvgpu_gmmu_unmap(vm, &patch_ctx->mem, |
@@ -2925,7 +2925,7 @@ static void gr_gk20a_free_channel_pm_ctx(struct gk20a *g, | |||
2925 | { | 2925 | { |
2926 | struct pm_ctx_desc *pm_ctx = &gr_ctx->pm_ctx; | 2926 | struct pm_ctx_desc *pm_ctx = &gr_ctx->pm_ctx; |
2927 | 2927 | ||
2928 | gk20a_dbg_fn(""); | 2928 | nvgpu_log_fn(g, " "); |
2929 | 2929 | ||
2930 | if (pm_ctx->mem.gpu_va) { | 2930 | if (pm_ctx->mem.gpu_va) { |
2931 | nvgpu_gmmu_unmap(vm, &pm_ctx->mem, pm_ctx->mem.gpu_va); | 2931 | nvgpu_gmmu_unmap(vm, &pm_ctx->mem, pm_ctx->mem.gpu_va); |
@@ -2942,7 +2942,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags) | |||
2942 | struct tsg_gk20a *tsg = NULL; | 2942 | struct tsg_gk20a *tsg = NULL; |
2943 | int err = 0; | 2943 | int err = 0; |
2944 | 2944 | ||
2945 | gk20a_dbg_fn(""); | 2945 | nvgpu_log_fn(g, " "); |
2946 | 2946 | ||
2947 | /* an address space needs to have been bound at this point.*/ | 2947 | /* an address space needs to have been bound at this point.*/ |
2948 | if (!gk20a_channel_as_bound(c) && !c->vm) { | 2948 | if (!gk20a_channel_as_bound(c) && !c->vm) { |
@@ -3047,7 +3047,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags) | |||
3047 | } | 3047 | } |
3048 | } | 3048 | } |
3049 | 3049 | ||
3050 | gk20a_dbg_fn("done"); | 3050 | nvgpu_log_fn(g, "done"); |
3051 | return 0; | 3051 | return 0; |
3052 | out: | 3052 | out: |
3053 | /* 1. gr_ctx, patch_ctx and global ctx buffer mapping | 3053 | /* 1. gr_ctx, patch_ctx and global ctx buffer mapping |
@@ -3062,7 +3062,7 @@ static void gk20a_remove_gr_support(struct gr_gk20a *gr) | |||
3062 | { | 3062 | { |
3063 | struct gk20a *g = gr->g; | 3063 | struct gk20a *g = gr->g; |
3064 | 3064 | ||
3065 | gk20a_dbg_fn(""); | 3065 | nvgpu_log_fn(g, " "); |
3066 | 3066 | ||
3067 | gr_gk20a_free_cyclestats_snapshot_data(g); | 3067 | gr_gk20a_free_cyclestats_snapshot_data(g); |
3068 | 3068 | ||
@@ -3322,35 +3322,35 @@ static int gr_gk20a_init_gr_config(struct gk20a *g, struct gr_gk20a *gr) | |||
3322 | sm_per_tpc * sizeof(struct sm_info)); | 3322 | sm_per_tpc * sizeof(struct sm_info)); |
3323 | gr->no_of_sm = 0; | 3323 | gr->no_of_sm = 0; |
3324 | 3324 | ||
3325 | gk20a_dbg_info("fbps: %d", gr->num_fbps); | 3325 | nvgpu_log_info(g, "fbps: %d", gr->num_fbps); |
3326 | gk20a_dbg_info("max_gpc_count: %d", gr->max_gpc_count); | 3326 | nvgpu_log_info(g, "max_gpc_count: %d", gr->max_gpc_count); |
3327 | gk20a_dbg_info("max_fbps_count: %d", gr->max_fbps_count); | 3327 | nvgpu_log_info(g, "max_fbps_count: %d", gr->max_fbps_count); |
3328 | gk20a_dbg_info("max_tpc_per_gpc_count: %d", gr->max_tpc_per_gpc_count); | 3328 | nvgpu_log_info(g, "max_tpc_per_gpc_count: %d", gr->max_tpc_per_gpc_count); |
3329 | gk20a_dbg_info("max_zcull_per_gpc_count: %d", gr->max_zcull_per_gpc_count); | 3329 | nvgpu_log_info(g, "max_zcull_per_gpc_count: %d", gr->max_zcull_per_gpc_count); |
3330 | gk20a_dbg_info("max_tpc_count: %d", gr->max_tpc_count); | 3330 | nvgpu_log_info(g, "max_tpc_count: %d", gr->max_tpc_count); |
3331 | gk20a_dbg_info("sys_count: %d", gr->sys_count); | 3331 | nvgpu_log_info(g, "sys_count: %d", gr->sys_count); |
3332 | gk20a_dbg_info("gpc_count: %d", gr->gpc_count); | 3332 | nvgpu_log_info(g, "gpc_count: %d", gr->gpc_count); |
3333 | gk20a_dbg_info("pe_count_per_gpc: %d", gr->pe_count_per_gpc); | 3333 | nvgpu_log_info(g, "pe_count_per_gpc: %d", gr->pe_count_per_gpc); |
3334 | gk20a_dbg_info("tpc_count: %d", gr->tpc_count); | 3334 | nvgpu_log_info(g, "tpc_count: %d", gr->tpc_count); |
3335 | gk20a_dbg_info("ppc_count: %d", gr->ppc_count); | 3335 | nvgpu_log_info(g, "ppc_count: %d", gr->ppc_count); |
3336 | 3336 | ||
3337 | for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) | 3337 | for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) |
3338 | gk20a_dbg_info("gpc_tpc_count[%d] : %d", | 3338 | nvgpu_log_info(g, "gpc_tpc_count[%d] : %d", |
3339 | gpc_index, gr->gpc_tpc_count[gpc_index]); | 3339 | gpc_index, gr->gpc_tpc_count[gpc_index]); |
3340 | for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) | 3340 | for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) |
3341 | gk20a_dbg_info("gpc_zcb_count[%d] : %d", | 3341 | nvgpu_log_info(g, "gpc_zcb_count[%d] : %d", |
3342 | gpc_index, gr->gpc_zcb_count[gpc_index]); | 3342 | gpc_index, gr->gpc_zcb_count[gpc_index]); |
3343 | for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) | 3343 | for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) |
3344 | gk20a_dbg_info("gpc_ppc_count[%d] : %d", | 3344 | nvgpu_log_info(g, "gpc_ppc_count[%d] : %d", |
3345 | gpc_index, gr->gpc_ppc_count[gpc_index]); | 3345 | gpc_index, gr->gpc_ppc_count[gpc_index]); |
3346 | for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) | 3346 | for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) |
3347 | gk20a_dbg_info("gpc_skip_mask[%d] : %d", | 3347 | nvgpu_log_info(g, "gpc_skip_mask[%d] : %d", |
3348 | gpc_index, gr->gpc_skip_mask[gpc_index]); | 3348 | gpc_index, gr->gpc_skip_mask[gpc_index]); |
3349 | for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) | 3349 | for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) |
3350 | for (pes_index = 0; | 3350 | for (pes_index = 0; |
3351 | pes_index < gr->pe_count_per_gpc; | 3351 | pes_index < gr->pe_count_per_gpc; |
3352 | pes_index++) | 3352 | pes_index++) |
3353 | gk20a_dbg_info("pes_tpc_count[%d][%d] : %d", | 3353 | nvgpu_log_info(g, "pes_tpc_count[%d][%d] : %d", |
3354 | pes_index, gpc_index, | 3354 | pes_index, gpc_index, |
3355 | gr->pes_tpc_count[pes_index][gpc_index]); | 3355 | gr->pes_tpc_count[pes_index][gpc_index]); |
3356 | 3356 | ||
@@ -3358,7 +3358,7 @@ static int gr_gk20a_init_gr_config(struct gk20a *g, struct gr_gk20a *gr) | |||
3358 | for (pes_index = 0; | 3358 | for (pes_index = 0; |
3359 | pes_index < gr->pe_count_per_gpc; | 3359 | pes_index < gr->pe_count_per_gpc; |
3360 | pes_index++) | 3360 | pes_index++) |
3361 | gk20a_dbg_info("pes_tpc_mask[%d][%d] : %d", | 3361 | nvgpu_log_info(g, "pes_tpc_mask[%d][%d] : %d", |
3362 | pes_index, gpc_index, | 3362 | pes_index, gpc_index, |
3363 | gr->pes_tpc_mask[pes_index][gpc_index]); | 3363 | gr->pes_tpc_mask[pes_index][gpc_index]); |
3364 | 3364 | ||
@@ -3367,16 +3367,16 @@ static int gr_gk20a_init_gr_config(struct gk20a *g, struct gr_gk20a *gr) | |||
3367 | g->ops.gr.calc_global_ctx_buffer_size(g); | 3367 | g->ops.gr.calc_global_ctx_buffer_size(g); |
3368 | gr->timeslice_mode = gr_gpcs_ppcs_cbm_cfg_timeslice_mode_enable_v(); | 3368 | gr->timeslice_mode = gr_gpcs_ppcs_cbm_cfg_timeslice_mode_enable_v(); |
3369 | 3369 | ||
3370 | gk20a_dbg_info("bundle_cb_default_size: %d", | 3370 | nvgpu_log_info(g, "bundle_cb_default_size: %d", |
3371 | gr->bundle_cb_default_size); | 3371 | gr->bundle_cb_default_size); |
3372 | gk20a_dbg_info("min_gpm_fifo_depth: %d", gr->min_gpm_fifo_depth); | 3372 | nvgpu_log_info(g, "min_gpm_fifo_depth: %d", gr->min_gpm_fifo_depth); |
3373 | gk20a_dbg_info("bundle_cb_token_limit: %d", gr->bundle_cb_token_limit); | 3373 | nvgpu_log_info(g, "bundle_cb_token_limit: %d", gr->bundle_cb_token_limit); |
3374 | gk20a_dbg_info("attrib_cb_default_size: %d", | 3374 | nvgpu_log_info(g, "attrib_cb_default_size: %d", |
3375 | gr->attrib_cb_default_size); | 3375 | gr->attrib_cb_default_size); |
3376 | gk20a_dbg_info("attrib_cb_size: %d", gr->attrib_cb_size); | 3376 | nvgpu_log_info(g, "attrib_cb_size: %d", gr->attrib_cb_size); |
3377 | gk20a_dbg_info("alpha_cb_default_size: %d", gr->alpha_cb_default_size); | 3377 | nvgpu_log_info(g, "alpha_cb_default_size: %d", gr->alpha_cb_default_size); |
3378 | gk20a_dbg_info("alpha_cb_size: %d", gr->alpha_cb_size); | 3378 | nvgpu_log_info(g, "alpha_cb_size: %d", gr->alpha_cb_size); |
3379 | gk20a_dbg_info("timeslice_mode: %d", gr->timeslice_mode); | 3379 | nvgpu_log_info(g, "timeslice_mode: %d", gr->timeslice_mode); |
3380 | 3380 | ||
3381 | return 0; | 3381 | return 0; |
3382 | 3382 | ||
@@ -3582,7 +3582,7 @@ clean_up: | |||
3582 | if (ret) | 3582 | if (ret) |
3583 | nvgpu_err(g, "fail"); | 3583 | nvgpu_err(g, "fail"); |
3584 | else | 3584 | else |
3585 | gk20a_dbg_fn("done"); | 3585 | nvgpu_log_fn(g, "done"); |
3586 | 3586 | ||
3587 | return ret; | 3587 | return ret; |
3588 | } | 3588 | } |
@@ -4094,7 +4094,7 @@ clean_up: | |||
4094 | int gk20a_gr_zbc_set_table(struct gk20a *g, struct gr_gk20a *gr, | 4094 | int gk20a_gr_zbc_set_table(struct gk20a *g, struct gr_gk20a *gr, |
4095 | struct zbc_entry *zbc_val) | 4095 | struct zbc_entry *zbc_val) |
4096 | { | 4096 | { |
4097 | gk20a_dbg_fn(""); | 4097 | nvgpu_log_fn(g, " "); |
4098 | 4098 | ||
4099 | return gr_gk20a_elpg_protected_call(g, | 4099 | return gr_gk20a_elpg_protected_call(g, |
4100 | gr_gk20a_add_zbc(g, gr, zbc_val)); | 4100 | gr_gk20a_add_zbc(g, gr, zbc_val)); |
@@ -4197,10 +4197,10 @@ void gr_gk20a_program_zcull_mapping(struct gk20a *g, u32 zcull_num_entries, | |||
4197 | { | 4197 | { |
4198 | u32 val; | 4198 | u32 val; |
4199 | 4199 | ||
4200 | gk20a_dbg_fn(""); | 4200 | nvgpu_log_fn(g, " "); |
4201 | 4201 | ||
4202 | if (zcull_num_entries >= 8) { | 4202 | if (zcull_num_entries >= 8) { |
4203 | gk20a_dbg_fn("map0"); | 4203 | nvgpu_log_fn(g, "map0"); |
4204 | val = | 4204 | val = |
4205 | gr_gpcs_zcull_sm_in_gpc_number_map0_tile_0_f( | 4205 | gr_gpcs_zcull_sm_in_gpc_number_map0_tile_0_f( |
4206 | zcull_map_tiles[0]) | | 4206 | zcull_map_tiles[0]) | |
@@ -4223,7 +4223,7 @@ void gr_gk20a_program_zcull_mapping(struct gk20a *g, u32 zcull_num_entries, | |||
4223 | } | 4223 | } |
4224 | 4224 | ||
4225 | if (zcull_num_entries >= 16) { | 4225 | if (zcull_num_entries >= 16) { |
4226 | gk20a_dbg_fn("map1"); | 4226 | nvgpu_log_fn(g, "map1"); |
4227 | val = | 4227 | val = |
4228 | gr_gpcs_zcull_sm_in_gpc_number_map1_tile_8_f( | 4228 | gr_gpcs_zcull_sm_in_gpc_number_map1_tile_8_f( |
4229 | zcull_map_tiles[8]) | | 4229 | zcull_map_tiles[8]) | |
@@ -4246,7 +4246,7 @@ void gr_gk20a_program_zcull_mapping(struct gk20a *g, u32 zcull_num_entries, | |||
4246 | } | 4246 | } |
4247 | 4247 | ||
4248 | if (zcull_num_entries >= 24) { | 4248 | if (zcull_num_entries >= 24) { |
4249 | gk20a_dbg_fn("map2"); | 4249 | nvgpu_log_fn(g, "map2"); |
4250 | val = | 4250 | val = |
4251 | gr_gpcs_zcull_sm_in_gpc_number_map2_tile_16_f( | 4251 | gr_gpcs_zcull_sm_in_gpc_number_map2_tile_16_f( |
4252 | zcull_map_tiles[16]) | | 4252 | zcull_map_tiles[16]) | |
@@ -4269,7 +4269,7 @@ void gr_gk20a_program_zcull_mapping(struct gk20a *g, u32 zcull_num_entries, | |||
4269 | } | 4269 | } |
4270 | 4270 | ||
4271 | if (zcull_num_entries >= 32) { | 4271 | if (zcull_num_entries >= 32) { |
4272 | gk20a_dbg_fn("map3"); | 4272 | nvgpu_log_fn(g, "map3"); |
4273 | val = | 4273 | val = |
4274 | gr_gpcs_zcull_sm_in_gpc_number_map3_tile_24_f( | 4274 | gr_gpcs_zcull_sm_in_gpc_number_map3_tile_24_f( |
4275 | zcull_map_tiles[24]) | | 4275 | zcull_map_tiles[24]) | |
@@ -4452,7 +4452,7 @@ static int gk20a_init_gr_setup_hw(struct gk20a *g) | |||
4452 | u32 last_method_data = 0; | 4452 | u32 last_method_data = 0; |
4453 | u32 i, err; | 4453 | u32 i, err; |
4454 | 4454 | ||
4455 | gk20a_dbg_fn(""); | 4455 | nvgpu_log_fn(g, " "); |
4456 | 4456 | ||
4457 | /* init mmu debug buffer */ | 4457 | /* init mmu debug buffer */ |
4458 | addr = nvgpu_mem_get_addr(g, &gr->mmu_wr_mem); | 4458 | addr = nvgpu_mem_get_addr(g, &gr->mmu_wr_mem); |
@@ -4613,13 +4613,13 @@ restore_fe_go_idle: | |||
4613 | } | 4613 | } |
4614 | 4614 | ||
4615 | out: | 4615 | out: |
4616 | gk20a_dbg_fn("done"); | 4616 | nvgpu_log_fn(g, "done"); |
4617 | return err; | 4617 | return err; |
4618 | } | 4618 | } |
4619 | 4619 | ||
4620 | static void gr_gk20a_load_gating_prod(struct gk20a *g) | 4620 | static void gr_gk20a_load_gating_prod(struct gk20a *g) |
4621 | { | 4621 | { |
4622 | gk20a_dbg_fn(""); | 4622 | nvgpu_log_fn(g, " "); |
4623 | 4623 | ||
4624 | /* slcg prod values */ | 4624 | /* slcg prod values */ |
4625 | if (g->ops.clock_gating.slcg_bus_load_gating_prod) | 4625 | if (g->ops.clock_gating.slcg_bus_load_gating_prod) |
@@ -4657,7 +4657,7 @@ static void gr_gk20a_load_gating_prod(struct gk20a *g) | |||
4657 | if (g->ops.clock_gating.pg_gr_load_gating_prod) | 4657 | if (g->ops.clock_gating.pg_gr_load_gating_prod) |
4658 | g->ops.clock_gating.pg_gr_load_gating_prod(g, true); | 4658 | g->ops.clock_gating.pg_gr_load_gating_prod(g, true); |
4659 | 4659 | ||
4660 | gk20a_dbg_fn("done"); | 4660 | nvgpu_log_fn(g, "done"); |
4661 | } | 4661 | } |
4662 | 4662 | ||
4663 | static int gk20a_init_gr_prepare(struct gk20a *g) | 4663 | static int gk20a_init_gr_prepare(struct gk20a *g) |
@@ -4703,7 +4703,7 @@ static int gr_gk20a_wait_mem_scrubbing(struct gk20a *g) | |||
4703 | bool fecs_scrubbing; | 4703 | bool fecs_scrubbing; |
4704 | bool gpccs_scrubbing; | 4704 | bool gpccs_scrubbing; |
4705 | 4705 | ||
4706 | gk20a_dbg_fn(""); | 4706 | nvgpu_log_fn(g, " "); |
4707 | 4707 | ||
4708 | nvgpu_timeout_init(g, &timeout, | 4708 | nvgpu_timeout_init(g, &timeout, |
4709 | CTXSW_MEM_SCRUBBING_TIMEOUT_MAX / | 4709 | CTXSW_MEM_SCRUBBING_TIMEOUT_MAX / |
@@ -4719,7 +4719,7 @@ static int gr_gk20a_wait_mem_scrubbing(struct gk20a *g) | |||
4719 | gr_gpccs_dmactl_imem_scrubbing_m()); | 4719 | gr_gpccs_dmactl_imem_scrubbing_m()); |
4720 | 4720 | ||
4721 | if (!fecs_scrubbing && !gpccs_scrubbing) { | 4721 | if (!fecs_scrubbing && !gpccs_scrubbing) { |
4722 | gk20a_dbg_fn("done"); | 4722 | nvgpu_log_fn(g, "done"); |
4723 | return 0; | 4723 | return 0; |
4724 | } | 4724 | } |
4725 | 4725 | ||
@@ -4746,7 +4746,7 @@ out: | |||
4746 | if (err) | 4746 | if (err) |
4747 | nvgpu_err(g, "fail"); | 4747 | nvgpu_err(g, "fail"); |
4748 | else | 4748 | else |
4749 | gk20a_dbg_fn("done"); | 4749 | nvgpu_log_fn(g, "done"); |
4750 | 4750 | ||
4751 | return err; | 4751 | return err; |
4752 | } | 4752 | } |
@@ -4756,7 +4756,7 @@ static int gk20a_init_gr_reset_enable_hw(struct gk20a *g) | |||
4756 | struct av_list_gk20a *sw_non_ctx_load = &g->gr.ctx_vars.sw_non_ctx_load; | 4756 | struct av_list_gk20a *sw_non_ctx_load = &g->gr.ctx_vars.sw_non_ctx_load; |
4757 | u32 i, err = 0; | 4757 | u32 i, err = 0; |
4758 | 4758 | ||
4759 | gk20a_dbg_fn(""); | 4759 | nvgpu_log_fn(g, " "); |
4760 | 4760 | ||
4761 | /* enable interrupts */ | 4761 | /* enable interrupts */ |
4762 | gk20a_writel(g, gr_intr_r(), ~0); | 4762 | gk20a_writel(g, gr_intr_r(), ~0); |
@@ -4780,7 +4780,7 @@ out: | |||
4780 | if (err) | 4780 | if (err) |
4781 | nvgpu_err(g, "fail"); | 4781 | nvgpu_err(g, "fail"); |
4782 | else | 4782 | else |
4783 | gk20a_dbg_fn("done"); | 4783 | nvgpu_log_fn(g, "done"); |
4784 | 4784 | ||
4785 | return 0; | 4785 | return 0; |
4786 | } | 4786 | } |
@@ -4810,7 +4810,7 @@ static int gr_gk20a_init_access_map(struct gk20a *g) | |||
4810 | map_bit = whitelist[w] >> 2; | 4810 | map_bit = whitelist[w] >> 2; |
4811 | map_byte = map_bit >> 3; | 4811 | map_byte = map_bit >> 3; |
4812 | map_shift = map_bit & 0x7; /* i.e. 0-7 */ | 4812 | map_shift = map_bit & 0x7; /* i.e. 0-7 */ |
4813 | gk20a_dbg_info("access map addr:0x%x byte:0x%x bit:%d", | 4813 | nvgpu_log_info(g, "access map addr:0x%x byte:0x%x bit:%d", |
4814 | whitelist[w], map_byte, map_shift); | 4814 | whitelist[w], map_byte, map_shift); |
4815 | x = nvgpu_mem_rd32(g, mem, map_byte / sizeof(u32)); | 4815 | x = nvgpu_mem_rd32(g, mem, map_byte / sizeof(u32)); |
4816 | x |= 1 << ( | 4816 | x |= 1 << ( |
@@ -4828,10 +4828,10 @@ static int gk20a_init_gr_setup_sw(struct gk20a *g) | |||
4828 | struct gr_gk20a *gr = &g->gr; | 4828 | struct gr_gk20a *gr = &g->gr; |
4829 | int err; | 4829 | int err; |
4830 | 4830 | ||
4831 | gk20a_dbg_fn(""); | 4831 | nvgpu_log_fn(g, " "); |
4832 | 4832 | ||
4833 | if (gr->sw_ready) { | 4833 | if (gr->sw_ready) { |
4834 | gk20a_dbg_fn("skip init"); | 4834 | nvgpu_log_fn(g, "skip init"); |
4835 | return 0; | 4835 | return 0; |
4836 | } | 4836 | } |
4837 | 4837 | ||
@@ -4888,7 +4888,7 @@ static int gk20a_init_gr_setup_sw(struct gk20a *g) | |||
4888 | if (g->ops.gr.create_gr_sysfs) | 4888 | if (g->ops.gr.create_gr_sysfs) |
4889 | g->ops.gr.create_gr_sysfs(g); | 4889 | g->ops.gr.create_gr_sysfs(g); |
4890 | 4890 | ||
4891 | gk20a_dbg_fn("done"); | 4891 | nvgpu_log_fn(g, "done"); |
4892 | return 0; | 4892 | return 0; |
4893 | 4893 | ||
4894 | clean_up: | 4894 | clean_up: |
@@ -4906,7 +4906,7 @@ static int gk20a_init_gr_bind_fecs_elpg(struct gk20a *g) | |||
4906 | 4906 | ||
4907 | u32 size; | 4907 | u32 size; |
4908 | 4908 | ||
4909 | gk20a_dbg_fn(""); | 4909 | nvgpu_log_fn(g, " "); |
4910 | 4910 | ||
4911 | size = 0; | 4911 | size = 0; |
4912 | 4912 | ||
@@ -4947,7 +4947,7 @@ int gk20a_init_gr_support(struct gk20a *g) | |||
4947 | { | 4947 | { |
4948 | u32 err; | 4948 | u32 err; |
4949 | 4949 | ||
4950 | gk20a_dbg_fn(""); | 4950 | nvgpu_log_fn(g, " "); |
4951 | 4951 | ||
4952 | /* this is required before gr_gk20a_init_ctx_state */ | 4952 | /* this is required before gr_gk20a_init_ctx_state */ |
4953 | nvgpu_mutex_init(&g->gr.fecs_mutex); | 4953 | nvgpu_mutex_init(&g->gr.fecs_mutex); |
@@ -4999,7 +4999,7 @@ void gk20a_gr_wait_initialized(struct gk20a *g) | |||
4999 | 4999 | ||
5000 | void gk20a_gr_set_shader_exceptions(struct gk20a *g, u32 data) | 5000 | void gk20a_gr_set_shader_exceptions(struct gk20a *g, u32 data) |
5001 | { | 5001 | { |
5002 | gk20a_dbg_fn(""); | 5002 | nvgpu_log_fn(g, " "); |
5003 | 5003 | ||
5004 | if (data == NVA297_SET_SHADER_EXCEPTIONS_ENABLE_FALSE) { | 5004 | if (data == NVA297_SET_SHADER_EXCEPTIONS_ENABLE_FALSE) { |
5005 | gk20a_writel(g, | 5005 | gk20a_writel(g, |
@@ -5046,7 +5046,7 @@ int gk20a_enable_gr_hw(struct gk20a *g) | |||
5046 | { | 5046 | { |
5047 | int err; | 5047 | int err; |
5048 | 5048 | ||
5049 | gk20a_dbg_fn(""); | 5049 | nvgpu_log_fn(g, " "); |
5050 | 5050 | ||
5051 | err = gk20a_init_gr_prepare(g); | 5051 | err = gk20a_init_gr_prepare(g); |
5052 | if (err) | 5052 | if (err) |
@@ -5056,7 +5056,7 @@ int gk20a_enable_gr_hw(struct gk20a *g) | |||
5056 | if (err) | 5056 | if (err) |
5057 | return err; | 5057 | return err; |
5058 | 5058 | ||
5059 | gk20a_dbg_fn("done"); | 5059 | nvgpu_log_fn(g, "done"); |
5060 | 5060 | ||
5061 | return 0; | 5061 | return 0; |
5062 | } | 5062 | } |
@@ -5163,7 +5163,7 @@ static void gk20a_gr_set_error_notifier(struct gk20a *g, | |||
5163 | static int gk20a_gr_handle_semaphore_timeout_pending(struct gk20a *g, | 5163 | static int gk20a_gr_handle_semaphore_timeout_pending(struct gk20a *g, |
5164 | struct gr_gk20a_isr_data *isr_data) | 5164 | struct gr_gk20a_isr_data *isr_data) |
5165 | { | 5165 | { |
5166 | gk20a_dbg_fn(""); | 5166 | nvgpu_log_fn(g, " "); |
5167 | gk20a_gr_set_error_notifier(g, isr_data, | 5167 | gk20a_gr_set_error_notifier(g, isr_data, |
5168 | NVGPU_ERR_NOTIFIER_GR_SEMAPHORE_TIMEOUT); | 5168 | NVGPU_ERR_NOTIFIER_GR_SEMAPHORE_TIMEOUT); |
5169 | nvgpu_err(g, | 5169 | nvgpu_err(g, |
@@ -5174,7 +5174,7 @@ static int gk20a_gr_handle_semaphore_timeout_pending(struct gk20a *g, | |||
5174 | static int gk20a_gr_intr_illegal_notify_pending(struct gk20a *g, | 5174 | static int gk20a_gr_intr_illegal_notify_pending(struct gk20a *g, |
5175 | struct gr_gk20a_isr_data *isr_data) | 5175 | struct gr_gk20a_isr_data *isr_data) |
5176 | { | 5176 | { |
5177 | gk20a_dbg_fn(""); | 5177 | nvgpu_log_fn(g, " "); |
5178 | gk20a_gr_set_error_notifier(g, isr_data, | 5178 | gk20a_gr_set_error_notifier(g, isr_data, |
5179 | NVGPU_ERR_NOTIFIER_GR_ILLEGAL_NOTIFY); | 5179 | NVGPU_ERR_NOTIFIER_GR_ILLEGAL_NOTIFY); |
5180 | /* This is an unrecoverable error, reset is needed */ | 5180 | /* This is an unrecoverable error, reset is needed */ |
@@ -5202,7 +5202,7 @@ static int gk20a_gr_handle_illegal_method(struct gk20a *g, | |||
5202 | static int gk20a_gr_handle_illegal_class(struct gk20a *g, | 5202 | static int gk20a_gr_handle_illegal_class(struct gk20a *g, |
5203 | struct gr_gk20a_isr_data *isr_data) | 5203 | struct gr_gk20a_isr_data *isr_data) |
5204 | { | 5204 | { |
5205 | gk20a_dbg_fn(""); | 5205 | nvgpu_log_fn(g, " "); |
5206 | gk20a_gr_set_error_notifier(g, isr_data, | 5206 | gk20a_gr_set_error_notifier(g, isr_data, |
5207 | NVGPU_ERR_NOTIFIER_GR_ERROR_SW_NOTIFY); | 5207 | NVGPU_ERR_NOTIFIER_GR_ERROR_SW_NOTIFY); |
5208 | nvgpu_err(g, | 5208 | nvgpu_err(g, |
@@ -5243,7 +5243,7 @@ static int gk20a_gr_handle_class_error(struct gk20a *g, | |||
5243 | { | 5243 | { |
5244 | u32 gr_class_error; | 5244 | u32 gr_class_error; |
5245 | 5245 | ||
5246 | gk20a_dbg_fn(""); | 5246 | nvgpu_log_fn(g, " "); |
5247 | 5247 | ||
5248 | gr_class_error = | 5248 | gr_class_error = |
5249 | gr_class_error_code_v(gk20a_readl(g, gr_class_error_r())); | 5249 | gr_class_error_code_v(gk20a_readl(g, gr_class_error_r())); |
@@ -5274,7 +5274,7 @@ static int gk20a_gr_handle_class_error(struct gk20a *g, | |||
5274 | static int gk20a_gr_handle_firmware_method(struct gk20a *g, | 5274 | static int gk20a_gr_handle_firmware_method(struct gk20a *g, |
5275 | struct gr_gk20a_isr_data *isr_data) | 5275 | struct gr_gk20a_isr_data *isr_data) |
5276 | { | 5276 | { |
5277 | gk20a_dbg_fn(""); | 5277 | nvgpu_log_fn(g, " "); |
5278 | 5278 | ||
5279 | gk20a_gr_set_error_notifier(g, isr_data, | 5279 | gk20a_gr_set_error_notifier(g, isr_data, |
5280 | NVGPU_ERR_NOTIFIER_GR_ERROR_SW_NOTIFY); | 5280 | NVGPU_ERR_NOTIFIER_GR_ERROR_SW_NOTIFY); |
@@ -5450,7 +5450,7 @@ int gk20a_gr_handle_notify_pending(struct gk20a *g, | |||
5450 | } | 5450 | } |
5451 | nvgpu_mutex_release(&ch->cyclestate.cyclestate_buffer_mutex); | 5451 | nvgpu_mutex_release(&ch->cyclestate.cyclestate_buffer_mutex); |
5452 | #endif | 5452 | #endif |
5453 | gk20a_dbg_fn(""); | 5453 | nvgpu_log_fn(g, " "); |
5454 | nvgpu_cond_broadcast_interruptible(&ch->notifier_wq); | 5454 | nvgpu_cond_broadcast_interruptible(&ch->notifier_wq); |
5455 | return 0; | 5455 | return 0; |
5456 | } | 5456 | } |
@@ -5543,7 +5543,7 @@ int gk20a_gr_lock_down_sm(struct gk20a *g, | |||
5543 | u32 offset = gk20a_gr_gpc_offset(g, gpc) + gk20a_gr_tpc_offset(g, tpc); | 5543 | u32 offset = gk20a_gr_gpc_offset(g, gpc) + gk20a_gr_tpc_offset(g, tpc); |
5544 | u32 dbgr_control0; | 5544 | u32 dbgr_control0; |
5545 | 5545 | ||
5546 | gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, | 5546 | nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, |
5547 | "GPC%d TPC%d SM%d: assert stop trigger", gpc, tpc, sm); | 5547 | "GPC%d TPC%d SM%d: assert stop trigger", gpc, tpc, sm); |
5548 | 5548 | ||
5549 | /* assert stop trigger */ | 5549 | /* assert stop trigger */ |
@@ -5582,7 +5582,7 @@ int gr_gk20a_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc, u32 sm, | |||
5582 | bool sm_debugger_attached; | 5582 | bool sm_debugger_attached; |
5583 | u32 global_esr, warp_esr, global_mask; | 5583 | u32 global_esr, warp_esr, global_mask; |
5584 | 5584 | ||
5585 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); | 5585 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " "); |
5586 | 5586 | ||
5587 | sm_debugger_attached = g->ops.gr.sm_debugger_attached(g); | 5587 | sm_debugger_attached = g->ops.gr.sm_debugger_attached(g); |
5588 | 5588 | ||
@@ -5597,7 +5597,7 @@ int gr_gk20a_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc, u32 sm, | |||
5597 | return -EFAULT; | 5597 | return -EFAULT; |
5598 | } | 5598 | } |
5599 | 5599 | ||
5600 | gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, | 5600 | nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, |
5601 | "sm hww global 0x%08x warp 0x%08x", global_esr, warp_esr); | 5601 | "sm hww global 0x%08x warp 0x%08x", global_esr, warp_esr); |
5602 | 5602 | ||
5603 | gr_gk20a_elpg_protected_call(g, | 5603 | gr_gk20a_elpg_protected_call(g, |
@@ -5617,7 +5617,7 @@ int gr_gk20a_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc, u32 sm, | |||
5617 | } | 5617 | } |
5618 | 5618 | ||
5619 | if (early_exit) { | 5619 | if (early_exit) { |
5620 | gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, | 5620 | nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, |
5621 | "returning early"); | 5621 | "returning early"); |
5622 | return ret; | 5622 | return ret; |
5623 | } | 5623 | } |
@@ -5640,13 +5640,13 @@ int gr_gk20a_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc, u32 sm, | |||
5640 | gk20a_writel(g, | 5640 | gk20a_writel(g, |
5641 | gr_gpc0_tpc0_tpccs_tpc_exception_en_r() + offset, | 5641 | gr_gpc0_tpc0_tpccs_tpc_exception_en_r() + offset, |
5642 | tpc_exception_en); | 5642 | tpc_exception_en); |
5643 | gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "SM Exceptions disabled"); | 5643 | nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "SM Exceptions disabled"); |
5644 | } | 5644 | } |
5645 | 5645 | ||
5646 | /* if a debugger is present and an error has occurred, do a warp sync */ | 5646 | /* if a debugger is present and an error has occurred, do a warp sync */ |
5647 | if (!ignore_debugger && | 5647 | if (!ignore_debugger && |
5648 | ((warp_esr != 0) || ((global_esr & ~global_mask) != 0))) { | 5648 | ((warp_esr != 0) || ((global_esr & ~global_mask) != 0))) { |
5649 | gk20a_dbg(gpu_dbg_intr, "warp sync needed"); | 5649 | nvgpu_log(g, gpu_dbg_intr, "warp sync needed"); |
5650 | do_warp_sync = true; | 5650 | do_warp_sync = true; |
5651 | } | 5651 | } |
5652 | 5652 | ||
@@ -5660,7 +5660,7 @@ int gr_gk20a_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc, u32 sm, | |||
5660 | } | 5660 | } |
5661 | 5661 | ||
5662 | if (ignore_debugger) | 5662 | if (ignore_debugger) |
5663 | gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, | 5663 | nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, |
5664 | "ignore_debugger set, skipping event posting"); | 5664 | "ignore_debugger set, skipping event posting"); |
5665 | else | 5665 | else |
5666 | *post_event |= true; | 5666 | *post_event |= true; |
@@ -5677,11 +5677,11 @@ int gr_gk20a_handle_tex_exception(struct gk20a *g, u32 gpc, u32 tpc, | |||
5677 | u32 offset = gpc_stride * gpc + tpc_in_gpc_stride * tpc; | 5677 | u32 offset = gpc_stride * gpc + tpc_in_gpc_stride * tpc; |
5678 | u32 esr; | 5678 | u32 esr; |
5679 | 5679 | ||
5680 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); | 5680 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " "); |
5681 | 5681 | ||
5682 | esr = gk20a_readl(g, | 5682 | esr = gk20a_readl(g, |
5683 | gr_gpc0_tpc0_tex_m_hww_esr_r() + offset); | 5683 | gr_gpc0_tpc0_tex_m_hww_esr_r() + offset); |
5684 | gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "0x%08x", esr); | 5684 | nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "0x%08x", esr); |
5685 | 5685 | ||
5686 | gk20a_writel(g, | 5686 | gk20a_writel(g, |
5687 | gr_gpc0_tpc0_tex_m_hww_esr_r() + offset, | 5687 | gr_gpc0_tpc0_tex_m_hww_esr_r() + offset, |
@@ -5706,7 +5706,7 @@ static int gk20a_gr_handle_tpc_exception(struct gk20a *g, u32 gpc, u32 tpc, | |||
5706 | + offset); | 5706 | + offset); |
5707 | u32 sm_per_tpc = nvgpu_get_litter_value(g, GPU_LIT_NUM_SM_PER_TPC); | 5707 | u32 sm_per_tpc = nvgpu_get_litter_value(g, GPU_LIT_NUM_SM_PER_TPC); |
5708 | 5708 | ||
5709 | gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, | 5709 | nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, |
5710 | "GPC%d TPC%d: pending exception 0x%x", | 5710 | "GPC%d TPC%d: pending exception 0x%x", |
5711 | gpc, tpc, tpc_exception); | 5711 | gpc, tpc, tpc_exception); |
5712 | 5712 | ||
@@ -5715,7 +5715,7 @@ static int gk20a_gr_handle_tpc_exception(struct gk20a *g, u32 gpc, u32 tpc, | |||
5715 | gr_gpc0_tpc0_tpccs_tpc_exception_sm_pending_v()) { | 5715 | gr_gpc0_tpc0_tpccs_tpc_exception_sm_pending_v()) { |
5716 | u32 esr_sm_sel, sm; | 5716 | u32 esr_sm_sel, sm; |
5717 | 5717 | ||
5718 | gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, | 5718 | nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, |
5719 | "GPC%d TPC%d: SM exception pending", gpc, tpc); | 5719 | "GPC%d TPC%d: SM exception pending", gpc, tpc); |
5720 | 5720 | ||
5721 | if (g->ops.gr.handle_tpc_sm_ecc_exception) | 5721 | if (g->ops.gr.handle_tpc_sm_ecc_exception) |
@@ -5729,7 +5729,7 @@ static int gk20a_gr_handle_tpc_exception(struct gk20a *g, u32 gpc, u32 tpc, | |||
5729 | if (!(esr_sm_sel & (1 << sm))) | 5729 | if (!(esr_sm_sel & (1 << sm))) |
5730 | continue; | 5730 | continue; |
5731 | 5731 | ||
5732 | gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, | 5732 | nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, |
5733 | "GPC%d TPC%d: SM%d exception pending", | 5733 | "GPC%d TPC%d: SM%d exception pending", |
5734 | gpc, tpc, sm); | 5734 | gpc, tpc, sm); |
5735 | 5735 | ||
@@ -5750,7 +5750,7 @@ static int gk20a_gr_handle_tpc_exception(struct gk20a *g, u32 gpc, u32 tpc, | |||
5750 | /* check if a tex exeption is pending */ | 5750 | /* check if a tex exeption is pending */ |
5751 | if (gr_gpc0_tpc0_tpccs_tpc_exception_tex_v(tpc_exception) == | 5751 | if (gr_gpc0_tpc0_tpccs_tpc_exception_tex_v(tpc_exception) == |
5752 | gr_gpc0_tpc0_tpccs_tpc_exception_tex_pending_v()) { | 5752 | gr_gpc0_tpc0_tpccs_tpc_exception_tex_pending_v()) { |
5753 | gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, | 5753 | nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, |
5754 | "GPC%d TPC%d: TEX exception pending", gpc, tpc); | 5754 | "GPC%d TPC%d: TEX exception pending", gpc, tpc); |
5755 | ret |= g->ops.gr.handle_tex_exception(g, gpc, tpc, post_event); | 5755 | ret |= g->ops.gr.handle_tex_exception(g, gpc, tpc, post_event); |
5756 | } | 5756 | } |
@@ -5771,13 +5771,13 @@ static int gk20a_gr_handle_gpc_exception(struct gk20a *g, bool *post_event, | |||
5771 | u32 exception1 = gk20a_readl(g, gr_exception1_r()); | 5771 | u32 exception1 = gk20a_readl(g, gr_exception1_r()); |
5772 | u32 gpc_exception; | 5772 | u32 gpc_exception; |
5773 | 5773 | ||
5774 | gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, ""); | 5774 | nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, " "); |
5775 | 5775 | ||
5776 | for (gpc = 0; gpc < gr->gpc_count; gpc++) { | 5776 | for (gpc = 0; gpc < gr->gpc_count; gpc++) { |
5777 | if ((exception1 & (1 << gpc)) == 0) | 5777 | if ((exception1 & (1 << gpc)) == 0) |
5778 | continue; | 5778 | continue; |
5779 | 5779 | ||
5780 | gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, | 5780 | nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, |
5781 | "GPC%d exception pending", gpc); | 5781 | "GPC%d exception pending", gpc); |
5782 | 5782 | ||
5783 | gpc_offset = gk20a_gr_gpc_offset(g, gpc); | 5783 | gpc_offset = gk20a_gr_gpc_offset(g, gpc); |
@@ -5791,7 +5791,7 @@ static int gk20a_gr_handle_gpc_exception(struct gk20a *g, bool *post_event, | |||
5791 | (1 << tpc)) == 0) | 5791 | (1 << tpc)) == 0) |
5792 | continue; | 5792 | continue; |
5793 | 5793 | ||
5794 | gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, | 5794 | nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, |
5795 | "GPC%d: TPC%d exception pending", gpc, tpc); | 5795 | "GPC%d: TPC%d exception pending", gpc, tpc); |
5796 | 5796 | ||
5797 | ret |= gk20a_gr_handle_tpc_exception(g, gpc, tpc, | 5797 | ret |= gk20a_gr_handle_tpc_exception(g, gpc, tpc, |
@@ -5860,8 +5860,8 @@ int gk20a_gr_isr(struct gk20a *g) | |||
5860 | u32 gr_engine_id; | 5860 | u32 gr_engine_id; |
5861 | u32 global_esr = 0; | 5861 | u32 global_esr = 0; |
5862 | 5862 | ||
5863 | gk20a_dbg_fn(""); | 5863 | nvgpu_log_fn(g, " "); |
5864 | gk20a_dbg(gpu_dbg_intr, "pgraph intr %08x", gr_intr); | 5864 | nvgpu_log(g, gpu_dbg_intr, "pgraph intr %08x", gr_intr); |
5865 | 5865 | ||
5866 | if (!gr_intr) | 5866 | if (!gr_intr) |
5867 | return 0; | 5867 | return 0; |
@@ -5896,7 +5896,7 @@ int gk20a_gr_isr(struct gk20a *g) | |||
5896 | nvgpu_err(g, "ch id is INVALID 0xffffffff"); | 5896 | nvgpu_err(g, "ch id is INVALID 0xffffffff"); |
5897 | } | 5897 | } |
5898 | 5898 | ||
5899 | gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, | 5899 | nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, |
5900 | "channel %d: addr 0x%08x, " | 5900 | "channel %d: addr 0x%08x, " |
5901 | "data 0x%08x 0x%08x," | 5901 | "data 0x%08x 0x%08x," |
5902 | "ctx 0x%08x, offset 0x%08x, " | 5902 | "ctx 0x%08x, offset 0x%08x, " |
@@ -5968,7 +5968,7 @@ int gk20a_gr_isr(struct gk20a *g) | |||
5968 | * register using set_falcon[4] */ | 5968 | * register using set_falcon[4] */ |
5969 | if (gr_intr & gr_intr_firmware_method_pending_f()) { | 5969 | if (gr_intr & gr_intr_firmware_method_pending_f()) { |
5970 | need_reset |= gk20a_gr_handle_firmware_method(g, &isr_data); | 5970 | need_reset |= gk20a_gr_handle_firmware_method(g, &isr_data); |
5971 | gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "firmware method intr pending\n"); | 5971 | nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "firmware method intr pending\n"); |
5972 | gk20a_writel(g, gr_intr_r(), | 5972 | gk20a_writel(g, gr_intr_r(), |
5973 | gr_intr_firmware_method_reset_f()); | 5973 | gr_intr_firmware_method_reset_f()); |
5974 | gr_intr &= ~gr_intr_firmware_method_pending_f(); | 5974 | gr_intr &= ~gr_intr_firmware_method_pending_f(); |
@@ -5977,7 +5977,7 @@ int gk20a_gr_isr(struct gk20a *g) | |||
5977 | if (gr_intr & gr_intr_exception_pending_f()) { | 5977 | if (gr_intr & gr_intr_exception_pending_f()) { |
5978 | u32 exception = gk20a_readl(g, gr_exception_r()); | 5978 | u32 exception = gk20a_readl(g, gr_exception_r()); |
5979 | 5979 | ||
5980 | gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "exception %08x\n", exception); | 5980 | nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "exception %08x\n", exception); |
5981 | 5981 | ||
5982 | if (exception & gr_exception_fe_m()) { | 5982 | if (exception & gr_exception_fe_m()) { |
5983 | u32 fe = gk20a_readl(g, gr_fe_hww_esr_r()); | 5983 | u32 fe = gk20a_readl(g, gr_fe_hww_esr_r()); |
@@ -6057,7 +6057,7 @@ int gk20a_gr_isr(struct gk20a *g) | |||
6057 | if (exception & gr_exception_gpc_m() && need_reset == 0) { | 6057 | if (exception & gr_exception_gpc_m() && need_reset == 0) { |
6058 | bool post_event = false; | 6058 | bool post_event = false; |
6059 | 6059 | ||
6060 | gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, | 6060 | nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, |
6061 | "GPC exception pending"); | 6061 | "GPC exception pending"); |
6062 | 6062 | ||
6063 | fault_ch = gk20a_fifo_channel_from_chid(g, | 6063 | fault_ch = gk20a_fifo_channel_from_chid(g, |
@@ -6133,7 +6133,7 @@ int gk20a_gr_nonstall_isr(struct gk20a *g) | |||
6133 | int ops = 0; | 6133 | int ops = 0; |
6134 | u32 gr_intr = gk20a_readl(g, gr_intr_nonstall_r()); | 6134 | u32 gr_intr = gk20a_readl(g, gr_intr_nonstall_r()); |
6135 | 6135 | ||
6136 | gk20a_dbg(gpu_dbg_intr, "pgraph nonstall intr %08x", gr_intr); | 6136 | nvgpu_log(g, gpu_dbg_intr, "pgraph nonstall intr %08x", gr_intr); |
6137 | 6137 | ||
6138 | if (gr_intr & gr_intr_nonstall_trap_pending_f()) { | 6138 | if (gr_intr & gr_intr_nonstall_trap_pending_f()) { |
6139 | /* Clear the interrupt */ | 6139 | /* Clear the interrupt */ |
@@ -6201,7 +6201,7 @@ int gk20a_gr_suspend(struct gk20a *g) | |||
6201 | { | 6201 | { |
6202 | u32 ret = 0; | 6202 | u32 ret = 0; |
6203 | 6203 | ||
6204 | gk20a_dbg_fn(""); | 6204 | nvgpu_log_fn(g, " "); |
6205 | 6205 | ||
6206 | ret = g->ops.gr.wait_empty(g, gk20a_get_gr_idle_timeout(g), | 6206 | ret = g->ops.gr.wait_empty(g, gk20a_get_gr_idle_timeout(g), |
6207 | GR_IDLE_CHECK_DEFAULT); | 6207 | GR_IDLE_CHECK_DEFAULT); |
@@ -6227,7 +6227,7 @@ int gk20a_gr_suspend(struct gk20a *g) | |||
6227 | 6227 | ||
6228 | g->gr.initialized = false; | 6228 | g->gr.initialized = false; |
6229 | 6229 | ||
6230 | gk20a_dbg_fn("done"); | 6230 | nvgpu_log_fn(g, "done"); |
6231 | return ret; | 6231 | return ret; |
6232 | } | 6232 | } |
6233 | 6233 | ||
@@ -6250,7 +6250,7 @@ int gr_gk20a_decode_priv_addr(struct gk20a *g, u32 addr, | |||
6250 | { | 6250 | { |
6251 | u32 gpc_addr; | 6251 | u32 gpc_addr; |
6252 | 6252 | ||
6253 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); | 6253 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); |
6254 | 6254 | ||
6255 | /* setup defaults */ | 6255 | /* setup defaults */ |
6256 | *addr_type = CTXSW_ADDR_TYPE_SYS; | 6256 | *addr_type = CTXSW_ADDR_TYPE_SYS; |
@@ -6338,7 +6338,7 @@ int gr_gk20a_split_ppc_broadcast_addr(struct gk20a *g, u32 addr, | |||
6338 | { | 6338 | { |
6339 | u32 ppc_num; | 6339 | u32 ppc_num; |
6340 | 6340 | ||
6341 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); | 6341 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); |
6342 | 6342 | ||
6343 | for (ppc_num = 0; ppc_num < g->gr.pe_count_per_gpc; ppc_num++) | 6343 | for (ppc_num = 0; ppc_num < g->gr.pe_count_per_gpc; ppc_num++) |
6344 | priv_addr_table[(*t)++] = pri_ppc_addr(g, pri_ppccs_addr_mask(addr), | 6344 | priv_addr_table[(*t)++] = pri_ppc_addr(g, pri_ppccs_addr_mask(addr), |
@@ -6369,12 +6369,12 @@ int gr_gk20a_create_priv_addr_table(struct gk20a *g, | |||
6369 | t = 0; | 6369 | t = 0; |
6370 | *num_registers = 0; | 6370 | *num_registers = 0; |
6371 | 6371 | ||
6372 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); | 6372 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); |
6373 | 6373 | ||
6374 | err = g->ops.gr.decode_priv_addr(g, addr, &addr_type, | 6374 | err = g->ops.gr.decode_priv_addr(g, addr, &addr_type, |
6375 | &gpc_num, &tpc_num, &ppc_num, &be_num, | 6375 | &gpc_num, &tpc_num, &ppc_num, &be_num, |
6376 | &broadcast_flags); | 6376 | &broadcast_flags); |
6377 | gk20a_dbg(gpu_dbg_gpu_dbg, "addr_type = %d", addr_type); | 6377 | nvgpu_log(g, gpu_dbg_gpu_dbg, "addr_type = %d", addr_type); |
6378 | if (err) | 6378 | if (err) |
6379 | return err; | 6379 | return err; |
6380 | 6380 | ||
@@ -6428,7 +6428,7 @@ int gr_gk20a_create_priv_addr_table(struct gk20a *g, | |||
6428 | } else if (((addr_type == CTXSW_ADDR_TYPE_EGPC) || | 6428 | } else if (((addr_type == CTXSW_ADDR_TYPE_EGPC) || |
6429 | (addr_type == CTXSW_ADDR_TYPE_ETPC)) && | 6429 | (addr_type == CTXSW_ADDR_TYPE_ETPC)) && |
6430 | g->ops.gr.egpc_etpc_priv_addr_table) { | 6430 | g->ops.gr.egpc_etpc_priv_addr_table) { |
6431 | gk20a_dbg(gpu_dbg_gpu_dbg, "addr_type : EGPC/ETPC"); | 6431 | nvgpu_log(g, gpu_dbg_gpu_dbg, "addr_type : EGPC/ETPC"); |
6432 | g->ops.gr.egpc_etpc_priv_addr_table(g, addr, gpc_num, tpc_num, | 6432 | g->ops.gr.egpc_etpc_priv_addr_table(g, addr, gpc_num, tpc_num, |
6433 | broadcast_flags, priv_addr_table, &t); | 6433 | broadcast_flags, priv_addr_table, &t); |
6434 | } else if (broadcast_flags & PRI_BROADCAST_FLAGS_LTSS) { | 6434 | } else if (broadcast_flags & PRI_BROADCAST_FLAGS_LTSS) { |
@@ -6477,11 +6477,11 @@ int gr_gk20a_get_ctx_buffer_offsets(struct gk20a *g, | |||
6477 | u32 potential_offsets = gr->max_gpc_count * gr->max_tpc_per_gpc_count * | 6477 | u32 potential_offsets = gr->max_gpc_count * gr->max_tpc_per_gpc_count * |
6478 | sm_per_tpc; | 6478 | sm_per_tpc; |
6479 | 6479 | ||
6480 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); | 6480 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); |
6481 | 6481 | ||
6482 | /* implementation is crossed-up if either of these happen */ | 6482 | /* implementation is crossed-up if either of these happen */ |
6483 | if (max_offsets > potential_offsets) { | 6483 | if (max_offsets > potential_offsets) { |
6484 | gk20a_dbg_fn("max_offsets > potential_offsets"); | 6484 | nvgpu_log_fn(g, "max_offsets > potential_offsets"); |
6485 | return -EINVAL; | 6485 | return -EINVAL; |
6486 | } | 6486 | } |
6487 | 6487 | ||
@@ -6490,7 +6490,7 @@ int gr_gk20a_get_ctx_buffer_offsets(struct gk20a *g, | |||
6490 | 6490 | ||
6491 | priv_registers = nvgpu_kzalloc(g, sizeof(u32) * potential_offsets); | 6491 | priv_registers = nvgpu_kzalloc(g, sizeof(u32) * potential_offsets); |
6492 | if (!priv_registers) { | 6492 | if (!priv_registers) { |
6493 | gk20a_dbg_fn("failed alloc for potential_offsets=%d", potential_offsets); | 6493 | nvgpu_log_fn(g, "failed alloc for potential_offsets=%d", potential_offsets); |
6494 | err = PTR_ERR(priv_registers); | 6494 | err = PTR_ERR(priv_registers); |
6495 | goto cleanup; | 6495 | goto cleanup; |
6496 | } | 6496 | } |
@@ -6502,7 +6502,7 @@ int gr_gk20a_get_ctx_buffer_offsets(struct gk20a *g, | |||
6502 | &num_registers); | 6502 | &num_registers); |
6503 | 6503 | ||
6504 | if ((max_offsets > 1) && (num_registers > max_offsets)) { | 6504 | if ((max_offsets > 1) && (num_registers > max_offsets)) { |
6505 | gk20a_dbg_fn("max_offsets = %d, num_registers = %d", | 6505 | nvgpu_log_fn(g, "max_offsets = %d, num_registers = %d", |
6506 | max_offsets, num_registers); | 6506 | max_offsets, num_registers); |
6507 | err = -EINVAL; | 6507 | err = -EINVAL; |
6508 | goto cleanup; | 6508 | goto cleanup; |
@@ -6512,7 +6512,7 @@ int gr_gk20a_get_ctx_buffer_offsets(struct gk20a *g, | |||
6512 | num_registers = 1; | 6512 | num_registers = 1; |
6513 | 6513 | ||
6514 | if (!g->gr.ctx_vars.local_golden_image) { | 6514 | if (!g->gr.ctx_vars.local_golden_image) { |
6515 | gk20a_dbg_fn("no context switch header info to work with"); | 6515 | nvgpu_log_fn(g, "no context switch header info to work with"); |
6516 | err = -EINVAL; | 6516 | err = -EINVAL; |
6517 | goto cleanup; | 6517 | goto cleanup; |
6518 | } | 6518 | } |
@@ -6525,7 +6525,7 @@ int gr_gk20a_get_ctx_buffer_offsets(struct gk20a *g, | |||
6525 | g->gr.ctx_vars.golden_image_size, | 6525 | g->gr.ctx_vars.golden_image_size, |
6526 | &priv_offset); | 6526 | &priv_offset); |
6527 | if (err) { | 6527 | if (err) { |
6528 | gk20a_dbg_fn("Could not determine priv_offset for addr:0x%x", | 6528 | nvgpu_log_fn(g, "Could not determine priv_offset for addr:0x%x", |
6529 | addr); /*, grPriRegStr(addr)));*/ | 6529 | addr); /*, grPriRegStr(addr)));*/ |
6530 | goto cleanup; | 6530 | goto cleanup; |
6531 | } | 6531 | } |
@@ -6558,7 +6558,7 @@ int gr_gk20a_get_pm_ctx_buffer_offsets(struct gk20a *g, | |||
6558 | u32 potential_offsets = gr->max_gpc_count * gr->max_tpc_per_gpc_count * | 6558 | u32 potential_offsets = gr->max_gpc_count * gr->max_tpc_per_gpc_count * |
6559 | sm_per_tpc; | 6559 | sm_per_tpc; |
6560 | 6560 | ||
6561 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); | 6561 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); |
6562 | 6562 | ||
6563 | /* implementation is crossed-up if either of these happen */ | 6563 | /* implementation is crossed-up if either of these happen */ |
6564 | if (max_offsets > potential_offsets) | 6564 | if (max_offsets > potential_offsets) |
@@ -6569,7 +6569,7 @@ int gr_gk20a_get_pm_ctx_buffer_offsets(struct gk20a *g, | |||
6569 | 6569 | ||
6570 | priv_registers = nvgpu_kzalloc(g, sizeof(u32) * potential_offsets); | 6570 | priv_registers = nvgpu_kzalloc(g, sizeof(u32) * potential_offsets); |
6571 | if (!priv_registers) { | 6571 | if (!priv_registers) { |
6572 | gk20a_dbg_fn("failed alloc for potential_offsets=%d", potential_offsets); | 6572 | nvgpu_log_fn(g, "failed alloc for potential_offsets=%d", potential_offsets); |
6573 | return -ENOMEM; | 6573 | return -ENOMEM; |
6574 | } | 6574 | } |
6575 | memset(offsets, 0, sizeof(u32) * max_offsets); | 6575 | memset(offsets, 0, sizeof(u32) * max_offsets); |
@@ -6588,7 +6588,7 @@ int gr_gk20a_get_pm_ctx_buffer_offsets(struct gk20a *g, | |||
6588 | num_registers = 1; | 6588 | num_registers = 1; |
6589 | 6589 | ||
6590 | if (!g->gr.ctx_vars.local_golden_image) { | 6590 | if (!g->gr.ctx_vars.local_golden_image) { |
6591 | gk20a_dbg_fn("no context switch header info to work with"); | 6591 | nvgpu_log_fn(g, "no context switch header info to work with"); |
6592 | err = -EINVAL; | 6592 | err = -EINVAL; |
6593 | goto cleanup; | 6593 | goto cleanup; |
6594 | } | 6594 | } |
@@ -6598,7 +6598,7 @@ int gr_gk20a_get_pm_ctx_buffer_offsets(struct gk20a *g, | |||
6598 | priv_registers[i], | 6598 | priv_registers[i], |
6599 | &priv_offset); | 6599 | &priv_offset); |
6600 | if (err) { | 6600 | if (err) { |
6601 | gk20a_dbg_fn("Could not determine priv_offset for addr:0x%x", | 6601 | nvgpu_log_fn(g, "Could not determine priv_offset for addr:0x%x", |
6602 | addr); /*, grPriRegStr(addr)));*/ | 6602 | addr); /*, grPriRegStr(addr)));*/ |
6603 | goto cleanup; | 6603 | goto cleanup; |
6604 | } | 6604 | } |
@@ -6684,7 +6684,7 @@ static int gr_gk20a_ctx_patch_smpc(struct gk20a *g, | |||
6684 | g->ops.gr.init_sm_dsm_reg_info(); | 6684 | g->ops.gr.init_sm_dsm_reg_info(); |
6685 | g->ops.gr.get_ovr_perf_regs(g, &num_ovr_perf_regs, &ovr_perf_regs); | 6685 | g->ops.gr.get_ovr_perf_regs(g, &num_ovr_perf_regs, &ovr_perf_regs); |
6686 | 6686 | ||
6687 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); | 6687 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); |
6688 | 6688 | ||
6689 | for (reg = 0; reg < num_ovr_perf_regs; reg++) { | 6689 | for (reg = 0; reg < num_ovr_perf_regs; reg++) { |
6690 | for (gpc = 0; gpc < num_gpc; gpc++) { | 6690 | for (gpc = 0; gpc < num_gpc; gpc++) { |
@@ -6754,13 +6754,11 @@ static int gr_gk20a_ctx_patch_smpc(struct gk20a *g, | |||
6754 | static inline bool check_main_image_header_magic(u8 *context) | 6754 | static inline bool check_main_image_header_magic(u8 *context) |
6755 | { | 6755 | { |
6756 | u32 magic = *(u32 *)(context + ctxsw_prog_main_image_magic_value_o()); | 6756 | u32 magic = *(u32 *)(context + ctxsw_prog_main_image_magic_value_o()); |
6757 | gk20a_dbg(gpu_dbg_gpu_dbg, "main image magic=0x%x", magic); | ||
6758 | return magic == ctxsw_prog_main_image_magic_value_v_value_v(); | 6757 | return magic == ctxsw_prog_main_image_magic_value_v_value_v(); |
6759 | } | 6758 | } |
6760 | static inline bool check_local_header_magic(u8 *context) | 6759 | static inline bool check_local_header_magic(u8 *context) |
6761 | { | 6760 | { |
6762 | u32 magic = *(u32 *)(context + ctxsw_prog_local_magic_value_o()); | 6761 | u32 magic = *(u32 *)(context + ctxsw_prog_local_magic_value_o()); |
6763 | gk20a_dbg(gpu_dbg_gpu_dbg, "local magic=0x%x", magic); | ||
6764 | return magic == ctxsw_prog_local_magic_value_v_value_v(); | 6762 | return magic == ctxsw_prog_local_magic_value_v_value_v(); |
6765 | 6763 | ||
6766 | } | 6764 | } |
@@ -6823,14 +6821,14 @@ static int gr_gk20a_find_priv_offset_in_ext_buffer(struct gk20a *g, | |||
6823 | else | 6821 | else |
6824 | return -EINVAL; | 6822 | return -EINVAL; |
6825 | 6823 | ||
6826 | gk20a_dbg_info(" gpc = %d tpc = %d", | 6824 | nvgpu_log_info(g, " gpc = %d tpc = %d", |
6827 | gpc_num, tpc_num); | 6825 | gpc_num, tpc_num); |
6828 | } else if ((g->ops.gr.is_etpc_addr) && | 6826 | } else if ((g->ops.gr.is_etpc_addr) && |
6829 | g->ops.gr.is_etpc_addr(g, addr)) { | 6827 | g->ops.gr.is_etpc_addr(g, addr)) { |
6830 | g->ops.gr.get_egpc_etpc_num(g, addr, &gpc_num, &tpc_num); | 6828 | g->ops.gr.get_egpc_etpc_num(g, addr, &gpc_num, &tpc_num); |
6831 | gpc_base = g->ops.gr.get_egpc_base(g); | 6829 | gpc_base = g->ops.gr.get_egpc_base(g); |
6832 | } else { | 6830 | } else { |
6833 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, | 6831 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, |
6834 | "does not exist in extended region"); | 6832 | "does not exist in extended region"); |
6835 | return -EINVAL; | 6833 | return -EINVAL; |
6836 | } | 6834 | } |
@@ -6857,7 +6855,7 @@ static int gr_gk20a_find_priv_offset_in_ext_buffer(struct gk20a *g, | |||
6857 | data32 = *(u32 *)(context + ctxsw_prog_main_extended_buffer_ctl_o()); | 6855 | data32 = *(u32 *)(context + ctxsw_prog_main_extended_buffer_ctl_o()); |
6858 | ext_priv_size = ctxsw_prog_main_extended_buffer_ctl_size_v(data32); | 6856 | ext_priv_size = ctxsw_prog_main_extended_buffer_ctl_size_v(data32); |
6859 | if (0 == ext_priv_size) { | 6857 | if (0 == ext_priv_size) { |
6860 | gk20a_dbg_info(" No extended memory in context buffer"); | 6858 | nvgpu_log_info(g, " No extended memory in context buffer"); |
6861 | return -EINVAL; | 6859 | return -EINVAL; |
6862 | } | 6860 | } |
6863 | ext_priv_offset = ctxsw_prog_main_extended_buffer_ctl_offset_v(data32); | 6861 | ext_priv_offset = ctxsw_prog_main_extended_buffer_ctl_offset_v(data32); |
@@ -6891,7 +6889,7 @@ static int gr_gk20a_find_priv_offset_in_ext_buffer(struct gk20a *g, | |||
6891 | if ((addr & tpc_gpc_mask) == (sm_dsm_perf_regs[i] & tpc_gpc_mask)) { | 6889 | if ((addr & tpc_gpc_mask) == (sm_dsm_perf_regs[i] & tpc_gpc_mask)) { |
6892 | sm_dsm_perf_reg_id = i; | 6890 | sm_dsm_perf_reg_id = i; |
6893 | 6891 | ||
6894 | gk20a_dbg_info("register match: 0x%08x", | 6892 | nvgpu_log_info(g, "register match: 0x%08x", |
6895 | sm_dsm_perf_regs[i]); | 6893 | sm_dsm_perf_regs[i]); |
6896 | 6894 | ||
6897 | chk_addr = (gpc_base + gpc_stride * gpc_num) + | 6895 | chk_addr = (gpc_base + gpc_stride * gpc_num) + |
@@ -6921,7 +6919,7 @@ static int gr_gk20a_find_priv_offset_in_ext_buffer(struct gk20a *g, | |||
6921 | (sm_dsm_perf_ctrl_regs[i] & tpc_gpc_mask)) { | 6919 | (sm_dsm_perf_ctrl_regs[i] & tpc_gpc_mask)) { |
6922 | sm_dsm_perf_ctrl_reg_id = i; | 6920 | sm_dsm_perf_ctrl_reg_id = i; |
6923 | 6921 | ||
6924 | gk20a_dbg_info("register match: 0x%08x", | 6922 | nvgpu_log_info(g, "register match: 0x%08x", |
6925 | sm_dsm_perf_ctrl_regs[i]); | 6923 | sm_dsm_perf_ctrl_regs[i]); |
6926 | 6924 | ||
6927 | chk_addr = (gpc_base + gpc_stride * gpc_num) + | 6925 | chk_addr = (gpc_base + gpc_stride * gpc_num) + |
@@ -7032,7 +7030,7 @@ gr_gk20a_process_context_buffer_priv_segment(struct gk20a *g, | |||
7032 | u32 tpc_in_gpc_base = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_BASE); | 7030 | u32 tpc_in_gpc_base = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_BASE); |
7033 | u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_STRIDE); | 7031 | u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_STRIDE); |
7034 | 7032 | ||
7035 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "pri_addr=0x%x", pri_addr); | 7033 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "pri_addr=0x%x", pri_addr); |
7036 | 7034 | ||
7037 | if (!g->gr.ctx_vars.valid) | 7035 | if (!g->gr.ctx_vars.valid) |
7038 | return -EINVAL; | 7036 | return -EINVAL; |
@@ -7215,12 +7213,12 @@ static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g, | |||
7215 | u8 *context; | 7213 | u8 *context; |
7216 | u32 offset_to_segment; | 7214 | u32 offset_to_segment; |
7217 | 7215 | ||
7218 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); | 7216 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); |
7219 | 7217 | ||
7220 | err = g->ops.gr.decode_priv_addr(g, addr, &addr_type, | 7218 | err = g->ops.gr.decode_priv_addr(g, addr, &addr_type, |
7221 | &gpc_num, &tpc_num, &ppc_num, &be_num, | 7219 | &gpc_num, &tpc_num, &ppc_num, &be_num, |
7222 | &broadcast_flags); | 7220 | &broadcast_flags); |
7223 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, | 7221 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, |
7224 | "addr_type = %d, broadcast_flags: %08x", | 7222 | "addr_type = %d, broadcast_flags: %08x", |
7225 | addr_type, broadcast_flags); | 7223 | addr_type, broadcast_flags); |
7226 | if (err) | 7224 | if (err) |
@@ -7243,7 +7241,7 @@ static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g, | |||
7243 | } | 7241 | } |
7244 | data32 = *(u32 *)(context + ctxsw_prog_local_priv_register_ctl_o()); | 7242 | data32 = *(u32 *)(context + ctxsw_prog_local_priv_register_ctl_o()); |
7245 | sys_priv_offset = ctxsw_prog_local_priv_register_ctl_offset_v(data32); | 7243 | sys_priv_offset = ctxsw_prog_local_priv_register_ctl_offset_v(data32); |
7246 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "sys_priv_offset=0x%x", sys_priv_offset); | 7244 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "sys_priv_offset=0x%x", sys_priv_offset); |
7247 | 7245 | ||
7248 | /* If found in Ext buffer, ok. | 7246 | /* If found in Ext buffer, ok. |
7249 | * If it failed and we expected to find it there (quad offset) | 7247 | * If it failed and we expected to find it there (quad offset) |
@@ -7253,7 +7251,7 @@ static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g, | |||
7253 | addr, is_quad, quad, context_buffer, | 7251 | addr, is_quad, quad, context_buffer, |
7254 | context_buffer_size, priv_offset); | 7252 | context_buffer_size, priv_offset); |
7255 | if (!err || (err && is_quad)) { | 7253 | if (!err || (err && is_quad)) { |
7256 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, | 7254 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, |
7257 | "err = %d, is_quad = %s", | 7255 | "err = %d, is_quad = %s", |
7258 | err, is_quad ? "true" : false); | 7256 | err, is_quad ? "true" : false); |
7259 | return err; | 7257 | return err; |
@@ -7357,7 +7355,7 @@ static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g, | |||
7357 | num_tpcs) << 2); | 7355 | num_tpcs) << 2); |
7358 | } | 7356 | } |
7359 | } else { | 7357 | } else { |
7360 | gk20a_dbg_fn("Unknown address type."); | 7358 | nvgpu_log_fn(g, "Unknown address type."); |
7361 | return -EINVAL; | 7359 | return -EINVAL; |
7362 | } | 7360 | } |
7363 | err = gr_gk20a_process_context_buffer_priv_segment(g, | 7361 | err = gr_gk20a_process_context_buffer_priv_segment(g, |
@@ -7668,7 +7666,7 @@ static int gr_gk20a_create_hwpm_ctxsw_buffer_offset_map(struct gk20a *g) | |||
7668 | u32 num_ltc = g->ops.gr.get_max_ltc_per_fbp(g) * g->gr.num_fbps; | 7666 | u32 num_ltc = g->ops.gr.get_max_ltc_per_fbp(g) * g->gr.num_fbps; |
7669 | 7667 | ||
7670 | if (hwpm_ctxsw_buffer_size == 0) { | 7668 | if (hwpm_ctxsw_buffer_size == 0) { |
7671 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, | 7669 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, |
7672 | "no PM Ctxsw buffer memory in context buffer"); | 7670 | "no PM Ctxsw buffer memory in context buffer"); |
7673 | return -EINVAL; | 7671 | return -EINVAL; |
7674 | } | 7672 | } |
@@ -7760,10 +7758,10 @@ static int gr_gk20a_create_hwpm_ctxsw_buffer_offset_map(struct gk20a *g) | |||
7760 | g->gr.ctx_vars.hwpm_ctxsw_buffer_offset_map = map; | 7758 | g->gr.ctx_vars.hwpm_ctxsw_buffer_offset_map = map; |
7761 | g->gr.ctx_vars.hwpm_ctxsw_buffer_offset_map_count = count; | 7759 | g->gr.ctx_vars.hwpm_ctxsw_buffer_offset_map_count = count; |
7762 | 7760 | ||
7763 | gk20a_dbg_info("Reg Addr => HWPM Ctxt switch buffer offset"); | 7761 | nvgpu_log_info(g, "Reg Addr => HWPM Ctxt switch buffer offset"); |
7764 | 7762 | ||
7765 | for (i = 0; i < count; i++) | 7763 | for (i = 0; i < count; i++) |
7766 | gk20a_dbg_info("%08x => %08x", map[i].addr, map[i].offset); | 7764 | nvgpu_log_info(g, "%08x => %08x", map[i].addr, map[i].offset); |
7767 | 7765 | ||
7768 | return 0; | 7766 | return 0; |
7769 | cleanup: | 7767 | cleanup: |
@@ -7785,7 +7783,7 @@ static int gr_gk20a_find_priv_offset_in_pm_buffer(struct gk20a *g, | |||
7785 | u32 count; | 7783 | u32 count; |
7786 | struct ctxsw_buf_offset_map_entry *map, *result, map_key; | 7784 | struct ctxsw_buf_offset_map_entry *map, *result, map_key; |
7787 | 7785 | ||
7788 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); | 7786 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); |
7789 | 7787 | ||
7790 | /* Create map of pri address and pm offset if necessary */ | 7788 | /* Create map of pri address and pm offset if necessary */ |
7791 | if (gr->ctx_vars.hwpm_ctxsw_buffer_offset_map == NULL) { | 7789 | if (gr->ctx_vars.hwpm_ctxsw_buffer_offset_map == NULL) { |
@@ -7831,7 +7829,7 @@ bool gk20a_is_channel_ctx_resident(struct channel_gk20a *ch) | |||
7831 | curr_ch = gk20a_gr_get_channel_from_ctx(g, curr_gr_ctx, | 7829 | curr_ch = gk20a_gr_get_channel_from_ctx(g, curr_gr_ctx, |
7832 | &curr_gr_tsgid); | 7830 | &curr_gr_tsgid); |
7833 | 7831 | ||
7834 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, | 7832 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, |
7835 | "curr_gr_chid=%d curr_tsgid=%d, ch->tsgid=%d" | 7833 | "curr_gr_chid=%d curr_tsgid=%d, ch->tsgid=%d" |
7836 | " ch->chid=%d", | 7834 | " ch->chid=%d", |
7837 | curr_ch ? curr_ch->chid : -1, | 7835 | curr_ch ? curr_ch->chid : -1, |
@@ -7873,7 +7871,7 @@ int __gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch, | |||
7873 | u32 ctx_op_nr, num_ctx_ops[2] = {num_ctx_wr_ops, num_ctx_rd_ops}; | 7871 | u32 ctx_op_nr, num_ctx_ops[2] = {num_ctx_wr_ops, num_ctx_rd_ops}; |
7874 | int err = 0, pass; | 7872 | int err = 0, pass; |
7875 | 7873 | ||
7876 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "wr_ops=%d rd_ops=%d", | 7874 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "wr_ops=%d rd_ops=%d", |
7877 | num_ctx_wr_ops, num_ctx_rd_ops); | 7875 | num_ctx_wr_ops, num_ctx_rd_ops); |
7878 | 7876 | ||
7879 | tsg = tsg_gk20a_from_ch(ch); | 7877 | tsg = tsg_gk20a_from_ch(ch); |
@@ -7906,7 +7904,7 @@ int __gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch, | |||
7906 | v |= ctx_ops[i].value_lo; | 7904 | v |= ctx_ops[i].value_lo; |
7907 | gk20a_writel(g, offset, v); | 7905 | gk20a_writel(g, offset, v); |
7908 | 7906 | ||
7909 | gk20a_dbg(gpu_dbg_gpu_dbg, | 7907 | nvgpu_log(g, gpu_dbg_gpu_dbg, |
7910 | "direct wr: offset=0x%x v=0x%x", | 7908 | "direct wr: offset=0x%x v=0x%x", |
7911 | offset, v); | 7909 | offset, v); |
7912 | 7910 | ||
@@ -7916,7 +7914,7 @@ int __gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch, | |||
7916 | v |= ctx_ops[i].value_hi; | 7914 | v |= ctx_ops[i].value_hi; |
7917 | gk20a_writel(g, offset + 4, v); | 7915 | gk20a_writel(g, offset + 4, v); |
7918 | 7916 | ||
7919 | gk20a_dbg(gpu_dbg_gpu_dbg, | 7917 | nvgpu_log(g, gpu_dbg_gpu_dbg, |
7920 | "direct wr: offset=0x%x v=0x%x", | 7918 | "direct wr: offset=0x%x v=0x%x", |
7921 | offset + 4, v); | 7919 | offset + 4, v); |
7922 | } | 7920 | } |
@@ -7925,7 +7923,7 @@ int __gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch, | |||
7925 | ctx_ops[i].value_lo = | 7923 | ctx_ops[i].value_lo = |
7926 | gk20a_readl(g, offset); | 7924 | gk20a_readl(g, offset); |
7927 | 7925 | ||
7928 | gk20a_dbg(gpu_dbg_gpu_dbg, | 7926 | nvgpu_log(g, gpu_dbg_gpu_dbg, |
7929 | "direct rd: offset=0x%x v=0x%x", | 7927 | "direct rd: offset=0x%x v=0x%x", |
7930 | offset, ctx_ops[i].value_lo); | 7928 | offset, ctx_ops[i].value_lo); |
7931 | 7929 | ||
@@ -7933,7 +7931,7 @@ int __gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch, | |||
7933 | ctx_ops[i].value_hi = | 7931 | ctx_ops[i].value_hi = |
7934 | gk20a_readl(g, offset + 4); | 7932 | gk20a_readl(g, offset + 4); |
7935 | 7933 | ||
7936 | gk20a_dbg(gpu_dbg_gpu_dbg, | 7934 | nvgpu_log(g, gpu_dbg_gpu_dbg, |
7937 | "direct rd: offset=0x%x v=0x%x", | 7935 | "direct rd: offset=0x%x v=0x%x", |
7938 | offset, ctx_ops[i].value_lo); | 7936 | offset, ctx_ops[i].value_lo); |
7939 | } else | 7937 | } else |
@@ -8001,7 +7999,7 @@ int __gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch, | |||
8001 | offsets, offset_addrs, | 7999 | offsets, offset_addrs, |
8002 | &num_offsets); | 8000 | &num_offsets); |
8003 | if (err) { | 8001 | if (err) { |
8004 | gk20a_dbg(gpu_dbg_gpu_dbg, | 8002 | nvgpu_log(g, gpu_dbg_gpu_dbg, |
8005 | "ctx op invalid offset: offset=0x%x", | 8003 | "ctx op invalid offset: offset=0x%x", |
8006 | ctx_ops[i].offset); | 8004 | ctx_ops[i].offset); |
8007 | ctx_ops[i].status = | 8005 | ctx_ops[i].status = |
@@ -8044,7 +8042,7 @@ int __gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch, | |||
8044 | v |= ctx_ops[i].value_lo; | 8042 | v |= ctx_ops[i].value_lo; |
8045 | nvgpu_mem_wr(g, current_mem, offsets[j], v); | 8043 | nvgpu_mem_wr(g, current_mem, offsets[j], v); |
8046 | 8044 | ||
8047 | gk20a_dbg(gpu_dbg_gpu_dbg, | 8045 | nvgpu_log(g, gpu_dbg_gpu_dbg, |
8048 | "context wr: offset=0x%x v=0x%x", | 8046 | "context wr: offset=0x%x v=0x%x", |
8049 | offsets[j], v); | 8047 | offsets[j], v); |
8050 | 8048 | ||
@@ -8054,7 +8052,7 @@ int __gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch, | |||
8054 | v |= ctx_ops[i].value_hi; | 8052 | v |= ctx_ops[i].value_hi; |
8055 | nvgpu_mem_wr(g, current_mem, offsets[j] + 4, v); | 8053 | nvgpu_mem_wr(g, current_mem, offsets[j] + 4, v); |
8056 | 8054 | ||
8057 | gk20a_dbg(gpu_dbg_gpu_dbg, | 8055 | nvgpu_log(g, gpu_dbg_gpu_dbg, |
8058 | "context wr: offset=0x%x v=0x%x", | 8056 | "context wr: offset=0x%x v=0x%x", |
8059 | offsets[j] + 4, v); | 8057 | offsets[j] + 4, v); |
8060 | } | 8058 | } |
@@ -8068,14 +8066,14 @@ int __gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch, | |||
8068 | ctx_ops[i].value_lo = | 8066 | ctx_ops[i].value_lo = |
8069 | nvgpu_mem_rd(g, current_mem, offsets[0]); | 8067 | nvgpu_mem_rd(g, current_mem, offsets[0]); |
8070 | 8068 | ||
8071 | gk20a_dbg(gpu_dbg_gpu_dbg, "context rd: offset=0x%x v=0x%x", | 8069 | nvgpu_log(g, gpu_dbg_gpu_dbg, "context rd: offset=0x%x v=0x%x", |
8072 | offsets[0], ctx_ops[i].value_lo); | 8070 | offsets[0], ctx_ops[i].value_lo); |
8073 | 8071 | ||
8074 | if (ctx_ops[i].op == REGOP(READ_64)) { | 8072 | if (ctx_ops[i].op == REGOP(READ_64)) { |
8075 | ctx_ops[i].value_hi = | 8073 | ctx_ops[i].value_hi = |
8076 | nvgpu_mem_rd(g, current_mem, offsets[0] + 4); | 8074 | nvgpu_mem_rd(g, current_mem, offsets[0] + 4); |
8077 | 8075 | ||
8078 | gk20a_dbg(gpu_dbg_gpu_dbg, | 8076 | nvgpu_log(g, gpu_dbg_gpu_dbg, |
8079 | "context rd: offset=0x%x v=0x%x", | 8077 | "context rd: offset=0x%x v=0x%x", |
8080 | offsets[0] + 4, ctx_ops[i].value_hi); | 8078 | offsets[0] + 4, ctx_ops[i].value_hi); |
8081 | } else | 8079 | } else |
@@ -8121,7 +8119,7 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch, | |||
8121 | 8119 | ||
8122 | ch_is_curr_ctx = gk20a_is_channel_ctx_resident(ch); | 8120 | ch_is_curr_ctx = gk20a_is_channel_ctx_resident(ch); |
8123 | 8121 | ||
8124 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "is curr ctx=%d", | 8122 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "is curr ctx=%d", |
8125 | ch_is_curr_ctx); | 8123 | ch_is_curr_ctx); |
8126 | 8124 | ||
8127 | err = __gr_gk20a_exec_ctx_ops(ch, ctx_ops, num_ops, num_ctx_wr_ops, | 8125 | err = __gr_gk20a_exec_ctx_ops(ch, ctx_ops, num_ops, num_ctx_wr_ops, |
@@ -8176,7 +8174,7 @@ int gk20a_gr_wait_for_sm_lock_down(struct gk20a *g, u32 gpc, u32 tpc, u32 sm, | |||
8176 | struct nvgpu_timeout timeout; | 8174 | struct nvgpu_timeout timeout; |
8177 | u32 warp_esr; | 8175 | u32 warp_esr; |
8178 | 8176 | ||
8179 | gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, | 8177 | nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, |
8180 | "GPC%d TPC%d SM%d: locking down SM", gpc, tpc, sm); | 8178 | "GPC%d TPC%d SM%d: locking down SM", gpc, tpc, sm); |
8181 | 8179 | ||
8182 | nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g), | 8180 | nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g), |
@@ -8201,7 +8199,7 @@ int gk20a_gr_wait_for_sm_lock_down(struct gk20a *g, u32 gpc, u32 tpc, u32 sm, | |||
8201 | ((global_esr & ~global_esr_mask) == 0); | 8199 | ((global_esr & ~global_esr_mask) == 0); |
8202 | 8200 | ||
8203 | if (locked_down || no_error_pending) { | 8201 | if (locked_down || no_error_pending) { |
8204 | gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, | 8202 | nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, |
8205 | "GPC%d TPC%d SM%d: locked down SM", | 8203 | "GPC%d TPC%d SM%d: locked down SM", |
8206 | gpc, tpc, sm); | 8204 | gpc, tpc, sm); |
8207 | return 0; | 8205 | return 0; |
diff --git a/drivers/gpu/nvgpu/gk20a/hal.c b/drivers/gpu/nvgpu/gk20a/hal.c index 939567e7..1787f573 100644 --- a/drivers/gpu/nvgpu/gk20a/hal.c +++ b/drivers/gpu/nvgpu/gk20a/hal.c | |||
@@ -41,7 +41,7 @@ int gpu_init_hal(struct gk20a *g) | |||
41 | switch (ver) { | 41 | switch (ver) { |
42 | case GK20A_GPUID_GM20B: | 42 | case GK20A_GPUID_GM20B: |
43 | case GK20A_GPUID_GM20B_B: | 43 | case GK20A_GPUID_GM20B_B: |
44 | gk20a_dbg_info("gm20b detected"); | 44 | nvgpu_log_info(g, "gm20b detected"); |
45 | if (gm20b_init_hal(g)) | 45 | if (gm20b_init_hal(g)) |
46 | return -ENODEV; | 46 | return -ENODEV; |
47 | break; | 47 | break; |
diff --git a/drivers/gpu/nvgpu/gk20a/mc_gk20a.c b/drivers/gpu/nvgpu/gk20a/mc_gk20a.c index 7fed410e..9473ad4f 100644 --- a/drivers/gpu/nvgpu/gk20a/mc_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/mc_gk20a.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * GK20A Master Control | 2 | * GK20A Master Control |
3 | * | 3 | * |
4 | * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. | 4 | * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved. |
5 | * | 5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | 6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the "Software"), | 7 | * copy of this software and associated documentation files (the "Software"), |
@@ -40,7 +40,7 @@ void mc_gk20a_isr_stall(struct gk20a *g) | |||
40 | 40 | ||
41 | mc_intr_0 = g->ops.mc.intr_stall(g); | 41 | mc_intr_0 = g->ops.mc.intr_stall(g); |
42 | 42 | ||
43 | gk20a_dbg(gpu_dbg_intr, "stall intr %08x\n", mc_intr_0); | 43 | nvgpu_log(g, gpu_dbg_intr, "stall intr %08x\n", mc_intr_0); |
44 | 44 | ||
45 | for (engine_id_idx = 0; engine_id_idx < g->fifo.num_engines; engine_id_idx++) { | 45 | for (engine_id_idx = 0; engine_id_idx < g->fifo.num_engines; engine_id_idx++) { |
46 | active_engine_id = g->fifo.active_engines_list[engine_id_idx]; | 46 | active_engine_id = g->fifo.active_engines_list[engine_id_idx]; |
@@ -200,7 +200,7 @@ void gk20a_mc_disable(struct gk20a *g, u32 units) | |||
200 | { | 200 | { |
201 | u32 pmc; | 201 | u32 pmc; |
202 | 202 | ||
203 | gk20a_dbg(gpu_dbg_info, "pmc disable: %08x\n", units); | 203 | nvgpu_log(g, gpu_dbg_info, "pmc disable: %08x\n", units); |
204 | 204 | ||
205 | nvgpu_spinlock_acquire(&g->mc_enable_lock); | 205 | nvgpu_spinlock_acquire(&g->mc_enable_lock); |
206 | pmc = gk20a_readl(g, mc_enable_r()); | 206 | pmc = gk20a_readl(g, mc_enable_r()); |
@@ -213,7 +213,7 @@ void gk20a_mc_enable(struct gk20a *g, u32 units) | |||
213 | { | 213 | { |
214 | u32 pmc; | 214 | u32 pmc; |
215 | 215 | ||
216 | gk20a_dbg(gpu_dbg_info, "pmc enable: %08x\n", units); | 216 | nvgpu_log(g, gpu_dbg_info, "pmc enable: %08x\n", units); |
217 | 217 | ||
218 | nvgpu_spinlock_acquire(&g->mc_enable_lock); | 218 | nvgpu_spinlock_acquire(&g->mc_enable_lock); |
219 | pmc = gk20a_readl(g, mc_enable_r()); | 219 | pmc = gk20a_readl(g, mc_enable_r()); |
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c index 14876296..dfdcc3a4 100644 --- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c | |||
@@ -91,7 +91,7 @@ int gk20a_init_mm_setup_hw(struct gk20a *g) | |||
91 | struct mm_gk20a *mm = &g->mm; | 91 | struct mm_gk20a *mm = &g->mm; |
92 | int err; | 92 | int err; |
93 | 93 | ||
94 | gk20a_dbg_fn(""); | 94 | nvgpu_log_fn(g, " "); |
95 | 95 | ||
96 | g->ops.fb.set_mmu_page_size(g); | 96 | g->ops.fb.set_mmu_page_size(g); |
97 | if (g->ops.fb.set_use_full_comp_tag_line) | 97 | if (g->ops.fb.set_use_full_comp_tag_line) |
@@ -112,7 +112,7 @@ int gk20a_init_mm_setup_hw(struct gk20a *g) | |||
112 | if (gk20a_mm_fb_flush(g) || gk20a_mm_fb_flush(g)) | 112 | if (gk20a_mm_fb_flush(g) || gk20a_mm_fb_flush(g)) |
113 | return -EBUSY; | 113 | return -EBUSY; |
114 | 114 | ||
115 | gk20a_dbg_fn("done"); | 115 | nvgpu_log_fn(g, "done"); |
116 | return 0; | 116 | return 0; |
117 | } | 117 | } |
118 | 118 | ||
@@ -336,7 +336,7 @@ int gk20a_vm_bind_channel(struct vm_gk20a *vm, struct channel_gk20a *ch) | |||
336 | { | 336 | { |
337 | int err = 0; | 337 | int err = 0; |
338 | 338 | ||
339 | gk20a_dbg_fn(""); | 339 | nvgpu_log_fn(ch->g, " "); |
340 | 340 | ||
341 | nvgpu_vm_get(vm); | 341 | nvgpu_vm_get(vm); |
342 | ch->vm = vm; | 342 | ch->vm = vm; |
@@ -357,7 +357,7 @@ void gk20a_mm_init_pdb(struct gk20a *g, struct nvgpu_mem *inst_block, | |||
357 | u32 pdb_addr_lo = u64_lo32(pdb_addr >> ram_in_base_shift_v()); | 357 | u32 pdb_addr_lo = u64_lo32(pdb_addr >> ram_in_base_shift_v()); |
358 | u32 pdb_addr_hi = u64_hi32(pdb_addr); | 358 | u32 pdb_addr_hi = u64_hi32(pdb_addr); |
359 | 359 | ||
360 | gk20a_dbg_info("pde pa=0x%llx", pdb_addr); | 360 | nvgpu_log_info(g, "pde pa=0x%llx", pdb_addr); |
361 | 361 | ||
362 | nvgpu_mem_wr32(g, inst_block, ram_in_page_dir_base_lo_w(), | 362 | nvgpu_mem_wr32(g, inst_block, ram_in_page_dir_base_lo_w(), |
363 | nvgpu_aperture_mask(g, vm->pdb.mem, | 363 | nvgpu_aperture_mask(g, vm->pdb.mem, |
@@ -376,7 +376,7 @@ void gk20a_init_inst_block(struct nvgpu_mem *inst_block, struct vm_gk20a *vm, | |||
376 | { | 376 | { |
377 | struct gk20a *g = gk20a_from_vm(vm); | 377 | struct gk20a *g = gk20a_from_vm(vm); |
378 | 378 | ||
379 | gk20a_dbg_info("inst block phys = 0x%llx, kv = 0x%p", | 379 | nvgpu_log_info(g, "inst block phys = 0x%llx, kv = 0x%p", |
380 | nvgpu_inst_block_addr(g, inst_block), inst_block->cpu_va); | 380 | nvgpu_inst_block_addr(g, inst_block), inst_block->cpu_va); |
381 | 381 | ||
382 | g->ops.mm.init_pdb(g, inst_block, vm); | 382 | g->ops.mm.init_pdb(g, inst_block, vm); |
@@ -395,7 +395,7 @@ int gk20a_alloc_inst_block(struct gk20a *g, struct nvgpu_mem *inst_block) | |||
395 | { | 395 | { |
396 | int err; | 396 | int err; |
397 | 397 | ||
398 | gk20a_dbg_fn(""); | 398 | nvgpu_log_fn(g, " "); |
399 | 399 | ||
400 | err = nvgpu_dma_alloc(g, ram_in_alloc_size_v(), inst_block); | 400 | err = nvgpu_dma_alloc(g, ram_in_alloc_size_v(), inst_block); |
401 | if (err) { | 401 | if (err) { |
@@ -403,7 +403,7 @@ int gk20a_alloc_inst_block(struct gk20a *g, struct nvgpu_mem *inst_block) | |||
403 | return err; | 403 | return err; |
404 | } | 404 | } |
405 | 405 | ||
406 | gk20a_dbg_fn("done"); | 406 | nvgpu_log_fn(g, "done"); |
407 | return 0; | 407 | return 0; |
408 | } | 408 | } |
409 | 409 | ||
@@ -415,7 +415,7 @@ int gk20a_mm_fb_flush(struct gk20a *g) | |||
415 | struct nvgpu_timeout timeout; | 415 | struct nvgpu_timeout timeout; |
416 | u32 retries; | 416 | u32 retries; |
417 | 417 | ||
418 | gk20a_dbg_fn(""); | 418 | nvgpu_log_fn(g, " "); |
419 | 419 | ||
420 | gk20a_busy_noresume(g); | 420 | gk20a_busy_noresume(g); |
421 | if (!g->power_on) { | 421 | if (!g->power_on) { |
@@ -448,7 +448,7 @@ int gk20a_mm_fb_flush(struct gk20a *g) | |||
448 | flush_fb_flush_outstanding_true_v() || | 448 | flush_fb_flush_outstanding_true_v() || |
449 | flush_fb_flush_pending_v(data) == | 449 | flush_fb_flush_pending_v(data) == |
450 | flush_fb_flush_pending_busy_v()) { | 450 | flush_fb_flush_pending_busy_v()) { |
451 | gk20a_dbg_info("fb_flush 0x%x", data); | 451 | nvgpu_log_info(g, "fb_flush 0x%x", data); |
452 | nvgpu_udelay(5); | 452 | nvgpu_udelay(5); |
453 | } else | 453 | } else |
454 | break; | 454 | break; |
@@ -494,7 +494,7 @@ static void gk20a_mm_l2_invalidate_locked(struct gk20a *g) | |||
494 | flush_l2_system_invalidate_outstanding_true_v() || | 494 | flush_l2_system_invalidate_outstanding_true_v() || |
495 | flush_l2_system_invalidate_pending_v(data) == | 495 | flush_l2_system_invalidate_pending_v(data) == |
496 | flush_l2_system_invalidate_pending_busy_v()) { | 496 | flush_l2_system_invalidate_pending_busy_v()) { |
497 | gk20a_dbg_info("l2_system_invalidate 0x%x", | 497 | nvgpu_log_info(g, "l2_system_invalidate 0x%x", |
498 | data); | 498 | data); |
499 | nvgpu_udelay(5); | 499 | nvgpu_udelay(5); |
500 | } else | 500 | } else |
@@ -526,7 +526,7 @@ void gk20a_mm_l2_flush(struct gk20a *g, bool invalidate) | |||
526 | struct nvgpu_timeout timeout; | 526 | struct nvgpu_timeout timeout; |
527 | u32 retries = 2000; | 527 | u32 retries = 2000; |
528 | 528 | ||
529 | gk20a_dbg_fn(""); | 529 | nvgpu_log_fn(g, " "); |
530 | 530 | ||
531 | gk20a_busy_noresume(g); | 531 | gk20a_busy_noresume(g); |
532 | if (!g->power_on) | 532 | if (!g->power_on) |
@@ -553,7 +553,7 @@ void gk20a_mm_l2_flush(struct gk20a *g, bool invalidate) | |||
553 | flush_l2_flush_dirty_outstanding_true_v() || | 553 | flush_l2_flush_dirty_outstanding_true_v() || |
554 | flush_l2_flush_dirty_pending_v(data) == | 554 | flush_l2_flush_dirty_pending_v(data) == |
555 | flush_l2_flush_dirty_pending_busy_v()) { | 555 | flush_l2_flush_dirty_pending_busy_v()) { |
556 | gk20a_dbg_info("l2_flush_dirty 0x%x", data); | 556 | nvgpu_log_info(g, "l2_flush_dirty 0x%x", data); |
557 | nvgpu_udelay(5); | 557 | nvgpu_udelay(5); |
558 | } else | 558 | } else |
559 | break; | 559 | break; |
@@ -578,7 +578,7 @@ void gk20a_mm_cbc_clean(struct gk20a *g) | |||
578 | struct nvgpu_timeout timeout; | 578 | struct nvgpu_timeout timeout; |
579 | u32 retries = 200; | 579 | u32 retries = 200; |
580 | 580 | ||
581 | gk20a_dbg_fn(""); | 581 | nvgpu_log_fn(g, " "); |
582 | 582 | ||
583 | gk20a_busy_noresume(g); | 583 | gk20a_busy_noresume(g); |
584 | if (!g->power_on) | 584 | if (!g->power_on) |
@@ -602,7 +602,7 @@ void gk20a_mm_cbc_clean(struct gk20a *g) | |||
602 | flush_l2_clean_comptags_outstanding_true_v() || | 602 | flush_l2_clean_comptags_outstanding_true_v() || |
603 | flush_l2_clean_comptags_pending_v(data) == | 603 | flush_l2_clean_comptags_pending_v(data) == |
604 | flush_l2_clean_comptags_pending_busy_v()) { | 604 | flush_l2_clean_comptags_pending_busy_v()) { |
605 | gk20a_dbg_info("l2_clean_comptags 0x%x", data); | 605 | nvgpu_log_info(g, "l2_clean_comptags 0x%x", data); |
606 | nvgpu_udelay(5); | 606 | nvgpu_udelay(5); |
607 | } else | 607 | } else |
608 | break; | 608 | break; |
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c index 0531b387..400a49a3 100644 --- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c | |||
@@ -39,8 +39,8 @@ | |||
39 | #include <nvgpu/hw/gk20a/hw_pwr_gk20a.h> | 39 | #include <nvgpu/hw/gk20a/hw_pwr_gk20a.h> |
40 | #include <nvgpu/hw/gk20a/hw_top_gk20a.h> | 40 | #include <nvgpu/hw/gk20a/hw_top_gk20a.h> |
41 | 41 | ||
42 | #define gk20a_dbg_pmu(fmt, arg...) \ | 42 | #define gk20a_dbg_pmu(g, fmt, arg...) \ |
43 | gk20a_dbg(gpu_dbg_pmu, fmt, ##arg) | 43 | nvgpu_log(g, gpu_dbg_pmu, fmt, ##arg) |
44 | 44 | ||
45 | bool nvgpu_find_hex_in_string(char *strings, struct gk20a *g, u32 *hex_pos) | 45 | bool nvgpu_find_hex_in_string(char *strings, struct gk20a *g, u32 *hex_pos) |
46 | { | 46 | { |
@@ -139,7 +139,7 @@ void pmu_enable_irq(struct nvgpu_pmu *pmu, bool enable) | |||
139 | u32 intr_mask; | 139 | u32 intr_mask; |
140 | u32 intr_dest; | 140 | u32 intr_dest; |
141 | 141 | ||
142 | gk20a_dbg_fn(""); | 142 | nvgpu_log_fn(g, " "); |
143 | 143 | ||
144 | g->ops.mc.intr_unit_config(g, MC_INTR_UNIT_DISABLE, true, | 144 | g->ops.mc.intr_unit_config(g, MC_INTR_UNIT_DISABLE, true, |
145 | mc_intr_mask_0_pmu_enabled_f()); | 145 | mc_intr_mask_0_pmu_enabled_f()); |
@@ -166,7 +166,7 @@ void pmu_enable_irq(struct nvgpu_pmu *pmu, bool enable) | |||
166 | mc_intr_mask_0_pmu_enabled_f()); | 166 | mc_intr_mask_0_pmu_enabled_f()); |
167 | } | 167 | } |
168 | 168 | ||
169 | gk20a_dbg_fn("done"); | 169 | nvgpu_log_fn(g, "done"); |
170 | } | 170 | } |
171 | 171 | ||
172 | 172 | ||
@@ -179,7 +179,7 @@ int pmu_bootstrap(struct nvgpu_pmu *pmu) | |||
179 | u64 addr_code, addr_data, addr_load; | 179 | u64 addr_code, addr_data, addr_load; |
180 | u32 i, blocks, addr_args; | 180 | u32 i, blocks, addr_args; |
181 | 181 | ||
182 | gk20a_dbg_fn(""); | 182 | nvgpu_log_fn(g, " "); |
183 | 183 | ||
184 | gk20a_writel(g, pwr_falcon_itfen_r(), | 184 | gk20a_writel(g, pwr_falcon_itfen_r(), |
185 | gk20a_readl(g, pwr_falcon_itfen_r()) | | 185 | gk20a_readl(g, pwr_falcon_itfen_r()) | |
@@ -286,7 +286,7 @@ int gk20a_pmu_mutex_acquire(struct nvgpu_pmu *pmu, u32 id, u32 *token) | |||
286 | 286 | ||
287 | if (*token != PMU_INVALID_MUTEX_OWNER_ID && *token == owner) { | 287 | if (*token != PMU_INVALID_MUTEX_OWNER_ID && *token == owner) { |
288 | BUG_ON(mutex->ref_cnt == 0); | 288 | BUG_ON(mutex->ref_cnt == 0); |
289 | gk20a_dbg_pmu("already acquired by owner : 0x%08x", *token); | 289 | gk20a_dbg_pmu(g, "already acquired by owner : 0x%08x", *token); |
290 | mutex->ref_cnt++; | 290 | mutex->ref_cnt++; |
291 | return 0; | 291 | return 0; |
292 | } | 292 | } |
@@ -313,12 +313,12 @@ int gk20a_pmu_mutex_acquire(struct nvgpu_pmu *pmu, u32 id, u32 *token) | |||
313 | 313 | ||
314 | if (owner == data) { | 314 | if (owner == data) { |
315 | mutex->ref_cnt = 1; | 315 | mutex->ref_cnt = 1; |
316 | gk20a_dbg_pmu("mutex acquired: id=%d, token=0x%x", | 316 | gk20a_dbg_pmu(g, "mutex acquired: id=%d, token=0x%x", |
317 | mutex->index, *token); | 317 | mutex->index, *token); |
318 | *token = owner; | 318 | *token = owner; |
319 | return 0; | 319 | return 0; |
320 | } else { | 320 | } else { |
321 | gk20a_dbg_info("fail to acquire mutex idx=0x%08x", | 321 | nvgpu_log_info(g, "fail to acquire mutex idx=0x%08x", |
322 | mutex->index); | 322 | mutex->index); |
323 | 323 | ||
324 | data = gk20a_readl(g, pwr_pmu_mutex_id_release_r()); | 324 | data = gk20a_readl(g, pwr_pmu_mutex_id_release_r()); |
@@ -370,7 +370,7 @@ int gk20a_pmu_mutex_release(struct nvgpu_pmu *pmu, u32 id, u32 *token) | |||
370 | pwr_pmu_mutex_id_release_value_f(owner)); | 370 | pwr_pmu_mutex_id_release_value_f(owner)); |
371 | gk20a_writel(g, pwr_pmu_mutex_id_release_r(), data); | 371 | gk20a_writel(g, pwr_pmu_mutex_id_release_r(), data); |
372 | 372 | ||
373 | gk20a_dbg_pmu("mutex released: id=%d, token=0x%x", | 373 | gk20a_dbg_pmu(g, "mutex released: id=%d, token=0x%x", |
374 | mutex->index, *token); | 374 | mutex->index, *token); |
375 | 375 | ||
376 | return 0; | 376 | return 0; |
@@ -475,7 +475,7 @@ int gk20a_init_pmu_setup_hw1(struct gk20a *g) | |||
475 | struct nvgpu_pmu *pmu = &g->pmu; | 475 | struct nvgpu_pmu *pmu = &g->pmu; |
476 | int err = 0; | 476 | int err = 0; |
477 | 477 | ||
478 | gk20a_dbg_fn(""); | 478 | nvgpu_log_fn(g, " "); |
479 | 479 | ||
480 | nvgpu_mutex_acquire(&pmu->isr_mutex); | 480 | nvgpu_mutex_acquire(&pmu->isr_mutex); |
481 | nvgpu_flcn_reset(pmu->flcn); | 481 | nvgpu_flcn_reset(pmu->flcn); |
@@ -554,7 +554,7 @@ static void pmu_handle_zbc_msg(struct gk20a *g, struct pmu_msg *msg, | |||
554 | void *param, u32 handle, u32 status) | 554 | void *param, u32 handle, u32 status) |
555 | { | 555 | { |
556 | struct nvgpu_pmu *pmu = param; | 556 | struct nvgpu_pmu *pmu = param; |
557 | gk20a_dbg_pmu("reply ZBC_TABLE_UPDATE"); | 557 | gk20a_dbg_pmu(g, "reply ZBC_TABLE_UPDATE"); |
558 | pmu->zbc_save_done = 1; | 558 | pmu->zbc_save_done = 1; |
559 | } | 559 | } |
560 | 560 | ||
@@ -575,7 +575,7 @@ void gk20a_pmu_save_zbc(struct gk20a *g, u32 entries) | |||
575 | 575 | ||
576 | pmu->zbc_save_done = 0; | 576 | pmu->zbc_save_done = 0; |
577 | 577 | ||
578 | gk20a_dbg_pmu("cmd post ZBC_TABLE_UPDATE"); | 578 | gk20a_dbg_pmu(g, "cmd post ZBC_TABLE_UPDATE"); |
579 | nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, | 579 | nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, |
580 | pmu_handle_zbc_msg, pmu, &seq, ~0); | 580 | pmu_handle_zbc_msg, pmu, &seq, ~0); |
581 | pmu_wait_message_cond(pmu, gk20a_get_gr_idle_timeout(g), | 581 | pmu_wait_message_cond(pmu, gk20a_get_gr_idle_timeout(g), |
@@ -587,18 +587,20 @@ void gk20a_pmu_save_zbc(struct gk20a *g, u32 entries) | |||
587 | int nvgpu_pmu_handle_therm_event(struct nvgpu_pmu *pmu, | 587 | int nvgpu_pmu_handle_therm_event(struct nvgpu_pmu *pmu, |
588 | struct nv_pmu_therm_msg *msg) | 588 | struct nv_pmu_therm_msg *msg) |
589 | { | 589 | { |
590 | gk20a_dbg_fn(""); | 590 | struct gk20a *g = gk20a_from_pmu(pmu); |
591 | |||
592 | nvgpu_log_fn(g, " "); | ||
591 | 593 | ||
592 | switch (msg->msg_type) { | 594 | switch (msg->msg_type) { |
593 | case NV_PMU_THERM_MSG_ID_EVENT_HW_SLOWDOWN_NOTIFICATION: | 595 | case NV_PMU_THERM_MSG_ID_EVENT_HW_SLOWDOWN_NOTIFICATION: |
594 | if (msg->hw_slct_msg.mask == BIT(NV_PMU_THERM_EVENT_THERMAL_1)) | 596 | if (msg->hw_slct_msg.mask == BIT(NV_PMU_THERM_EVENT_THERMAL_1)) |
595 | nvgpu_clk_arb_send_thermal_alarm(pmu->g); | 597 | nvgpu_clk_arb_send_thermal_alarm(pmu->g); |
596 | else | 598 | else |
597 | gk20a_dbg_pmu("Unwanted/Unregistered thermal event received %d", | 599 | gk20a_dbg_pmu(g, "Unwanted/Unregistered thermal event received %d", |
598 | msg->hw_slct_msg.mask); | 600 | msg->hw_slct_msg.mask); |
599 | break; | 601 | break; |
600 | default: | 602 | default: |
601 | gk20a_dbg_pmu("unkown therm event received %d", msg->msg_type); | 603 | gk20a_dbg_pmu(g, "unkown therm event received %d", msg->msg_type); |
602 | break; | 604 | break; |
603 | } | 605 | } |
604 | 606 | ||
@@ -609,22 +611,22 @@ void gk20a_pmu_dump_elpg_stats(struct nvgpu_pmu *pmu) | |||
609 | { | 611 | { |
610 | struct gk20a *g = gk20a_from_pmu(pmu); | 612 | struct gk20a *g = gk20a_from_pmu(pmu); |
611 | 613 | ||
612 | gk20a_dbg_pmu("pwr_pmu_idle_mask_supp_r(3): 0x%08x", | 614 | gk20a_dbg_pmu(g, "pwr_pmu_idle_mask_supp_r(3): 0x%08x", |
613 | gk20a_readl(g, pwr_pmu_idle_mask_supp_r(3))); | 615 | gk20a_readl(g, pwr_pmu_idle_mask_supp_r(3))); |
614 | gk20a_dbg_pmu("pwr_pmu_idle_mask_1_supp_r(3): 0x%08x", | 616 | gk20a_dbg_pmu(g, "pwr_pmu_idle_mask_1_supp_r(3): 0x%08x", |
615 | gk20a_readl(g, pwr_pmu_idle_mask_1_supp_r(3))); | 617 | gk20a_readl(g, pwr_pmu_idle_mask_1_supp_r(3))); |
616 | gk20a_dbg_pmu("pwr_pmu_idle_ctrl_supp_r(3): 0x%08x", | 618 | gk20a_dbg_pmu(g, "pwr_pmu_idle_ctrl_supp_r(3): 0x%08x", |
617 | gk20a_readl(g, pwr_pmu_idle_ctrl_supp_r(3))); | 619 | gk20a_readl(g, pwr_pmu_idle_ctrl_supp_r(3))); |
618 | gk20a_dbg_pmu("pwr_pmu_pg_idle_cnt_r(0): 0x%08x", | 620 | gk20a_dbg_pmu(g, "pwr_pmu_pg_idle_cnt_r(0): 0x%08x", |
619 | gk20a_readl(g, pwr_pmu_pg_idle_cnt_r(0))); | 621 | gk20a_readl(g, pwr_pmu_pg_idle_cnt_r(0))); |
620 | gk20a_dbg_pmu("pwr_pmu_pg_intren_r(0): 0x%08x", | 622 | gk20a_dbg_pmu(g, "pwr_pmu_pg_intren_r(0): 0x%08x", |
621 | gk20a_readl(g, pwr_pmu_pg_intren_r(0))); | 623 | gk20a_readl(g, pwr_pmu_pg_intren_r(0))); |
622 | 624 | ||
623 | gk20a_dbg_pmu("pwr_pmu_idle_count_r(3): 0x%08x", | 625 | gk20a_dbg_pmu(g, "pwr_pmu_idle_count_r(3): 0x%08x", |
624 | gk20a_readl(g, pwr_pmu_idle_count_r(3))); | 626 | gk20a_readl(g, pwr_pmu_idle_count_r(3))); |
625 | gk20a_dbg_pmu("pwr_pmu_idle_count_r(4): 0x%08x", | 627 | gk20a_dbg_pmu(g, "pwr_pmu_idle_count_r(4): 0x%08x", |
626 | gk20a_readl(g, pwr_pmu_idle_count_r(4))); | 628 | gk20a_readl(g, pwr_pmu_idle_count_r(4))); |
627 | gk20a_dbg_pmu("pwr_pmu_idle_count_r(7): 0x%08x", | 629 | gk20a_dbg_pmu(g, "pwr_pmu_idle_count_r(7): 0x%08x", |
628 | gk20a_readl(g, pwr_pmu_idle_count_r(7))); | 630 | gk20a_readl(g, pwr_pmu_idle_count_r(7))); |
629 | } | 631 | } |
630 | 632 | ||
@@ -693,7 +695,7 @@ void gk20a_pmu_isr(struct gk20a *g) | |||
693 | u32 intr, mask; | 695 | u32 intr, mask; |
694 | bool recheck = false; | 696 | bool recheck = false; |
695 | 697 | ||
696 | gk20a_dbg_fn(""); | 698 | nvgpu_log_fn(g, " "); |
697 | 699 | ||
698 | nvgpu_mutex_acquire(&pmu->isr_mutex); | 700 | nvgpu_mutex_acquire(&pmu->isr_mutex); |
699 | if (!pmu->isr_enabled) { | 701 | if (!pmu->isr_enabled) { |
@@ -706,7 +708,7 @@ void gk20a_pmu_isr(struct gk20a *g) | |||
706 | 708 | ||
707 | intr = gk20a_readl(g, pwr_falcon_irqstat_r()); | 709 | intr = gk20a_readl(g, pwr_falcon_irqstat_r()); |
708 | 710 | ||
709 | gk20a_dbg_pmu("received falcon interrupt: 0x%08x", intr); | 711 | gk20a_dbg_pmu(g, "received falcon interrupt: 0x%08x", intr); |
710 | 712 | ||
711 | intr = gk20a_readl(g, pwr_falcon_irqstat_r()) & mask; | 713 | intr = gk20a_readl(g, pwr_falcon_irqstat_r()) & mask; |
712 | if (!intr || pmu->pmu_state == PMU_STATE_OFF) { | 714 | if (!intr || pmu->pmu_state == PMU_STATE_OFF) { |
diff --git a/drivers/gpu/nvgpu/gk20a/pramin_gk20a.c b/drivers/gpu/nvgpu/gk20a/pramin_gk20a.c index a76e2580..8dde61a2 100644 --- a/drivers/gpu/nvgpu/gk20a/pramin_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/pramin_gk20a.c | |||
@@ -45,7 +45,7 @@ u32 gk20a_pramin_enter(struct gk20a *g, struct nvgpu_mem *mem, | |||
45 | bus_bar0_window_target_vid_mem_f()) | | 45 | bus_bar0_window_target_vid_mem_f()) | |
46 | bus_bar0_window_base_f(hi); | 46 | bus_bar0_window_base_f(hi); |
47 | 47 | ||
48 | gk20a_dbg(gpu_dbg_mem, | 48 | nvgpu_log(g, gpu_dbg_mem, |
49 | "0x%08x:%08x begin for %p,%p at [%llx,%llx] (sz %llx)", | 49 | "0x%08x:%08x begin for %p,%p at [%llx,%llx] (sz %llx)", |
50 | hi, lo, mem, sgl, bufbase, | 50 | hi, lo, mem, sgl, bufbase, |
51 | bufbase + nvgpu_sgt_get_phys(g, sgt, sgl), | 51 | bufbase + nvgpu_sgt_get_phys(g, sgt, sgl), |
@@ -67,7 +67,7 @@ u32 gk20a_pramin_enter(struct gk20a *g, struct nvgpu_mem *mem, | |||
67 | void gk20a_pramin_exit(struct gk20a *g, struct nvgpu_mem *mem, | 67 | void gk20a_pramin_exit(struct gk20a *g, struct nvgpu_mem *mem, |
68 | struct nvgpu_sgl *sgl) | 68 | struct nvgpu_sgl *sgl) |
69 | { | 69 | { |
70 | gk20a_dbg(gpu_dbg_mem, "end for %p,%p", mem, sgl); | 70 | nvgpu_log(g, gpu_dbg_mem, "end for %p,%p", mem, sgl); |
71 | 71 | ||
72 | nvgpu_spinlock_release(&g->mm.pramin_window_lock); | 72 | nvgpu_spinlock_release(&g->mm.pramin_window_lock); |
73 | } | 73 | } |
diff --git a/drivers/gpu/nvgpu/gk20a/priv_ring_gk20a.c b/drivers/gpu/nvgpu/gk20a/priv_ring_gk20a.c index ed5327cb..dea42b55 100644 --- a/drivers/gpu/nvgpu/gk20a/priv_ring_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/priv_ring_gk20a.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * GK20A priv ring | 2 | * GK20A priv ring |
3 | * | 3 | * |
4 | * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. | 4 | * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. |
5 | * | 5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | 6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the "Software"), | 7 | * copy of this software and associated documentation files (the "Software"), |
@@ -66,11 +66,11 @@ void gk20a_priv_ring_isr(struct gk20a *g) | |||
66 | status0 = gk20a_readl(g, pri_ringmaster_intr_status0_r()); | 66 | status0 = gk20a_readl(g, pri_ringmaster_intr_status0_r()); |
67 | status1 = gk20a_readl(g, pri_ringmaster_intr_status1_r()); | 67 | status1 = gk20a_readl(g, pri_ringmaster_intr_status1_r()); |
68 | 68 | ||
69 | gk20a_dbg(gpu_dbg_intr, "ringmaster intr status0: 0x%08x," | 69 | nvgpu_log(g, gpu_dbg_intr, "ringmaster intr status0: 0x%08x," |
70 | "status1: 0x%08x", status0, status1); | 70 | "status1: 0x%08x", status0, status1); |
71 | 71 | ||
72 | if (pri_ringmaster_intr_status0_gbl_write_error_sys_v(status0) != 0) { | 72 | if (pri_ringmaster_intr_status0_gbl_write_error_sys_v(status0) != 0) { |
73 | gk20a_dbg(gpu_dbg_intr, "SYS write error. ADR %08x WRDAT %08x INFO %08x, CODE %08x", | 73 | nvgpu_log(g, gpu_dbg_intr, "SYS write error. ADR %08x WRDAT %08x INFO %08x, CODE %08x", |
74 | gk20a_readl(g, pri_ringstation_sys_priv_error_adr_r()), | 74 | gk20a_readl(g, pri_ringstation_sys_priv_error_adr_r()), |
75 | gk20a_readl(g, pri_ringstation_sys_priv_error_wrdat_r()), | 75 | gk20a_readl(g, pri_ringstation_sys_priv_error_wrdat_r()), |
76 | gk20a_readl(g, pri_ringstation_sys_priv_error_info_r()), | 76 | gk20a_readl(g, pri_ringstation_sys_priv_error_info_r()), |
@@ -79,7 +79,7 @@ void gk20a_priv_ring_isr(struct gk20a *g) | |||
79 | 79 | ||
80 | for (gpc = 0; gpc < g->gr.gpc_count; gpc++) { | 80 | for (gpc = 0; gpc < g->gr.gpc_count; gpc++) { |
81 | if (status1 & BIT(gpc)) { | 81 | if (status1 & BIT(gpc)) { |
82 | gk20a_dbg(gpu_dbg_intr, "GPC%u write error. ADR %08x WRDAT %08x INFO %08x, CODE %08x", gpc, | 82 | nvgpu_log(g, gpu_dbg_intr, "GPC%u write error. ADR %08x WRDAT %08x INFO %08x, CODE %08x", gpc, |
83 | gk20a_readl(g, pri_ringstation_gpc_gpc0_priv_error_adr_r() + gpc * gpc_priv_stride), | 83 | gk20a_readl(g, pri_ringstation_gpc_gpc0_priv_error_adr_r() + gpc * gpc_priv_stride), |
84 | gk20a_readl(g, pri_ringstation_gpc_gpc0_priv_error_wrdat_r() + gpc * gpc_priv_stride), | 84 | gk20a_readl(g, pri_ringstation_gpc_gpc0_priv_error_wrdat_r() + gpc * gpc_priv_stride), |
85 | gk20a_readl(g, pri_ringstation_gpc_gpc0_priv_error_info_r() + gpc * gpc_priv_stride), | 85 | gk20a_readl(g, pri_ringstation_gpc_gpc0_priv_error_info_r() + gpc * gpc_priv_stride), |
diff --git a/drivers/gpu/nvgpu/gk20a/regops_gk20a.c b/drivers/gpu/nvgpu/gk20a/regops_gk20a.c index 60162f9d..5b9f973b 100644 --- a/drivers/gpu/nvgpu/gk20a/regops_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/regops_gk20a.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Tegra GK20A GPU Debugger Driver Register Ops | 2 | * Tegra GK20A GPU Debugger Driver Register Ops |
3 | * | 3 | * |
4 | * Copyright (c) 2013-2017, NVIDIA CORPORATION. All rights reserved. | 4 | * Copyright (c) 2013-2018, NVIDIA CORPORATION. All rights reserved. |
5 | * | 5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | 6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the "Software"), | 7 | * copy of this software and associated documentation files (the "Software"), |
@@ -72,7 +72,7 @@ int exec_regops_gk20a(struct dbg_session_gk20a *dbg_s, | |||
72 | bool skip_read_lo, skip_read_hi; | 72 | bool skip_read_lo, skip_read_hi; |
73 | bool ok; | 73 | bool ok; |
74 | 74 | ||
75 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); | 75 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " "); |
76 | 76 | ||
77 | ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); | 77 | ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); |
78 | 78 | ||
@@ -108,7 +108,7 @@ int exec_regops_gk20a(struct dbg_session_gk20a *dbg_s, | |||
108 | case REGOP(READ_32): | 108 | case REGOP(READ_32): |
109 | ops[i].value_hi = 0; | 109 | ops[i].value_hi = 0; |
110 | ops[i].value_lo = gk20a_readl(g, ops[i].offset); | 110 | ops[i].value_lo = gk20a_readl(g, ops[i].offset); |
111 | gk20a_dbg(gpu_dbg_gpu_dbg, "read_32 0x%08x from 0x%08x", | 111 | nvgpu_log(g, gpu_dbg_gpu_dbg, "read_32 0x%08x from 0x%08x", |
112 | ops[i].value_lo, ops[i].offset); | 112 | ops[i].value_lo, ops[i].offset); |
113 | 113 | ||
114 | break; | 114 | break; |
@@ -118,7 +118,7 @@ int exec_regops_gk20a(struct dbg_session_gk20a *dbg_s, | |||
118 | ops[i].value_hi = | 118 | ops[i].value_hi = |
119 | gk20a_readl(g, ops[i].offset + 4); | 119 | gk20a_readl(g, ops[i].offset + 4); |
120 | 120 | ||
121 | gk20a_dbg(gpu_dbg_gpu_dbg, "read_64 0x%08x:%08x from 0x%08x", | 121 | nvgpu_log(g, gpu_dbg_gpu_dbg, "read_64 0x%08x:%08x from 0x%08x", |
122 | ops[i].value_hi, ops[i].value_lo, | 122 | ops[i].value_hi, ops[i].value_lo, |
123 | ops[i].offset); | 123 | ops[i].offset); |
124 | break; | 124 | break; |
@@ -157,12 +157,12 @@ int exec_regops_gk20a(struct dbg_session_gk20a *dbg_s, | |||
157 | 157 | ||
158 | /* now update first 32bits */ | 158 | /* now update first 32bits */ |
159 | gk20a_writel(g, ops[i].offset, data32_lo); | 159 | gk20a_writel(g, ops[i].offset, data32_lo); |
160 | gk20a_dbg(gpu_dbg_gpu_dbg, "Wrote 0x%08x to 0x%08x ", | 160 | nvgpu_log(g, gpu_dbg_gpu_dbg, "Wrote 0x%08x to 0x%08x ", |
161 | data32_lo, ops[i].offset); | 161 | data32_lo, ops[i].offset); |
162 | /* if desired, update second 32bits */ | 162 | /* if desired, update second 32bits */ |
163 | if (ops[i].op == REGOP(WRITE_64)) { | 163 | if (ops[i].op == REGOP(WRITE_64)) { |
164 | gk20a_writel(g, ops[i].offset + 4, data32_hi); | 164 | gk20a_writel(g, ops[i].offset + 4, data32_hi); |
165 | gk20a_dbg(gpu_dbg_gpu_dbg, "Wrote 0x%08x to 0x%08x ", | 165 | nvgpu_log(g, gpu_dbg_gpu_dbg, "Wrote 0x%08x to 0x%08x ", |
166 | data32_hi, ops[i].offset + 4); | 166 | data32_hi, ops[i].offset + 4); |
167 | 167 | ||
168 | } | 168 | } |
@@ -189,7 +189,7 @@ int exec_regops_gk20a(struct dbg_session_gk20a *dbg_s, | |||
189 | } | 189 | } |
190 | 190 | ||
191 | clean_up: | 191 | clean_up: |
192 | gk20a_dbg(gpu_dbg_gpu_dbg, "ret=%d", err); | 192 | nvgpu_log(g, gpu_dbg_gpu_dbg, "ret=%d", err); |
193 | return err; | 193 | return err; |
194 | 194 | ||
195 | } | 195 | } |
@@ -395,7 +395,7 @@ static bool validate_reg_ops(struct dbg_session_gk20a *dbg_s, | |||
395 | } | 395 | } |
396 | } | 396 | } |
397 | 397 | ||
398 | gk20a_dbg(gpu_dbg_gpu_dbg, "ctx_wrs:%d ctx_rds:%d", | 398 | nvgpu_log(g, gpu_dbg_gpu_dbg, "ctx_wrs:%d ctx_rds:%d", |
399 | *ctx_wr_count, *ctx_rd_count); | 399 | *ctx_wr_count, *ctx_rd_count); |
400 | 400 | ||
401 | return ok; | 401 | return ok; |
diff --git a/drivers/gpu/nvgpu/gk20a/therm_gk20a.c b/drivers/gpu/nvgpu/gk20a/therm_gk20a.c index de5d0f78..b08f3e0a 100644 --- a/drivers/gpu/nvgpu/gk20a/therm_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/therm_gk20a.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * GK20A Therm | 2 | * GK20A Therm |
3 | * | 3 | * |
4 | * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. | 4 | * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. |
5 | * | 5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | 6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the "Software"), | 7 | * copy of this software and associated documentation files (the "Software"), |
@@ -43,7 +43,7 @@ int gk20a_init_therm_support(struct gk20a *g) | |||
43 | { | 43 | { |
44 | u32 err; | 44 | u32 err; |
45 | 45 | ||
46 | gk20a_dbg_fn(""); | 46 | nvgpu_log_fn(g, " "); |
47 | 47 | ||
48 | err = gk20a_init_therm_reset_enable_hw(g); | 48 | err = gk20a_init_therm_reset_enable_hw(g); |
49 | if (err) | 49 | if (err) |
@@ -73,7 +73,7 @@ int gk20a_elcg_init_idle_filters(struct gk20a *g) | |||
73 | u32 active_engine_id = 0; | 73 | u32 active_engine_id = 0; |
74 | struct fifo_gk20a *f = &g->fifo; | 74 | struct fifo_gk20a *f = &g->fifo; |
75 | 75 | ||
76 | gk20a_dbg_fn(""); | 76 | nvgpu_log_fn(g, " "); |
77 | 77 | ||
78 | for (engine_id = 0; engine_id < f->num_engines; engine_id++) { | 78 | for (engine_id = 0; engine_id < f->num_engines; engine_id++) { |
79 | active_engine_id = f->active_engines_list[engine_id]; | 79 | active_engine_id = f->active_engines_list[engine_id]; |
@@ -104,6 +104,6 @@ int gk20a_elcg_init_idle_filters(struct gk20a *g) | |||
104 | idle_filter &= ~therm_hubmmu_idle_filter_value_m(); | 104 | idle_filter &= ~therm_hubmmu_idle_filter_value_m(); |
105 | gk20a_writel(g, therm_hubmmu_idle_filter_r(), idle_filter); | 105 | gk20a_writel(g, therm_hubmmu_idle_filter_r(), idle_filter); |
106 | 106 | ||
107 | gk20a_dbg_fn("done"); | 107 | nvgpu_log_fn(g, "done"); |
108 | return 0; | 108 | return 0; |
109 | } | 109 | } |
diff --git a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c index 05b8fc61..62763da3 100644 --- a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c | |||
@@ -107,7 +107,9 @@ static bool gk20a_is_channel_active(struct gk20a *g, struct channel_gk20a *ch) | |||
107 | int gk20a_tsg_bind_channel(struct tsg_gk20a *tsg, | 107 | int gk20a_tsg_bind_channel(struct tsg_gk20a *tsg, |
108 | struct channel_gk20a *ch) | 108 | struct channel_gk20a *ch) |
109 | { | 109 | { |
110 | gk20a_dbg_fn(""); | 110 | struct gk20a *g = ch->g; |
111 | |||
112 | nvgpu_log_fn(g, " "); | ||
111 | 113 | ||
112 | /* check if channel is already bound to some TSG */ | 114 | /* check if channel is already bound to some TSG */ |
113 | if (gk20a_is_channel_marked_as_tsg(ch)) { | 115 | if (gk20a_is_channel_marked_as_tsg(ch)) { |
@@ -137,10 +139,10 @@ int gk20a_tsg_bind_channel(struct tsg_gk20a *tsg, | |||
137 | 139 | ||
138 | nvgpu_ref_get(&tsg->refcount); | 140 | nvgpu_ref_get(&tsg->refcount); |
139 | 141 | ||
140 | gk20a_dbg(gpu_dbg_fn, "BIND tsg:%d channel:%d\n", | 142 | nvgpu_log(g, gpu_dbg_fn, "BIND tsg:%d channel:%d\n", |
141 | tsg->tsgid, ch->chid); | 143 | tsg->tsgid, ch->chid); |
142 | 144 | ||
143 | gk20a_dbg_fn("done"); | 145 | nvgpu_log_fn(g, "done"); |
144 | return 0; | 146 | return 0; |
145 | } | 147 | } |
146 | 148 | ||
@@ -167,7 +169,7 @@ int gk20a_tsg_unbind_channel(struct channel_gk20a *ch) | |||
167 | nvgpu_ref_put(&tsg->refcount, gk20a_tsg_release); | 169 | nvgpu_ref_put(&tsg->refcount, gk20a_tsg_release); |
168 | ch->tsgid = NVGPU_INVALID_TSG_ID; | 170 | ch->tsgid = NVGPU_INVALID_TSG_ID; |
169 | 171 | ||
170 | gk20a_dbg(gpu_dbg_fn, "UNBIND tsg:%d channel:%d\n", | 172 | nvgpu_log(g, gpu_dbg_fn, "UNBIND tsg:%d channel:%d\n", |
171 | tsg->tsgid, ch->chid); | 173 | tsg->tsgid, ch->chid); |
172 | 174 | ||
173 | return 0; | 175 | return 0; |
@@ -204,7 +206,7 @@ int gk20a_tsg_set_runlist_interleave(struct tsg_gk20a *tsg, u32 level) | |||
204 | struct gk20a *g = tsg->g; | 206 | struct gk20a *g = tsg->g; |
205 | int ret; | 207 | int ret; |
206 | 208 | ||
207 | gk20a_dbg(gpu_dbg_sched, "tsgid=%u interleave=%u", tsg->tsgid, level); | 209 | nvgpu_log(g, gpu_dbg_sched, "tsgid=%u interleave=%u", tsg->tsgid, level); |
208 | 210 | ||
209 | switch (level) { | 211 | switch (level) { |
210 | case NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_LOW: | 212 | case NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_LOW: |
@@ -227,7 +229,7 @@ int gk20a_tsg_set_timeslice(struct tsg_gk20a *tsg, u32 timeslice) | |||
227 | { | 229 | { |
228 | struct gk20a *g = tsg->g; | 230 | struct gk20a *g = tsg->g; |
229 | 231 | ||
230 | gk20a_dbg(gpu_dbg_sched, "tsgid=%u timeslice=%u us", tsg->tsgid, timeslice); | 232 | nvgpu_log(g, gpu_dbg_sched, "tsgid=%u timeslice=%u us", tsg->tsgid, timeslice); |
231 | 233 | ||
232 | return g->ops.fifo.tsg_set_timeslice(tsg, timeslice); | 234 | return g->ops.fifo.tsg_set_timeslice(tsg, timeslice); |
233 | } | 235 | } |
@@ -300,7 +302,7 @@ struct tsg_gk20a *gk20a_tsg_open(struct gk20a *g, pid_t pid) | |||
300 | } | 302 | } |
301 | } | 303 | } |
302 | 304 | ||
303 | gk20a_dbg(gpu_dbg_fn, "tsg opened %d\n", tsg->tsgid); | 305 | nvgpu_log(g, gpu_dbg_fn, "tsg opened %d\n", tsg->tsgid); |
304 | 306 | ||
305 | return tsg; | 307 | return tsg; |
306 | 308 | ||
@@ -343,7 +345,7 @@ void gk20a_tsg_release(struct nvgpu_ref *ref) | |||
343 | 345 | ||
344 | tsg->runlist_id = ~0; | 346 | tsg->runlist_id = ~0; |
345 | 347 | ||
346 | gk20a_dbg(gpu_dbg_fn, "tsg released %d\n", tsg->tsgid); | 348 | nvgpu_log(g, gpu_dbg_fn, "tsg released %d\n", tsg->tsgid); |
347 | } | 349 | } |
348 | 350 | ||
349 | struct tsg_gk20a *tsg_gk20a_from_ch(struct channel_gk20a *ch) | 351 | struct tsg_gk20a *tsg_gk20a_from_ch(struct channel_gk20a *ch) |