summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2018-04-18 22:39:46 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-05-09 21:26:04 -0400
commitdd739fcb039d51606e9a5454ec0aab17bcb01965 (patch)
tree806ba8575d146367ad1be00086ca0cdae35a6b28 /drivers/gpu/nvgpu/gk20a/channel_gk20a.c
parent7e66f2a63d4855e763fa768047dfc32f6f96b771 (diff)
gpu: nvgpu: Remove gk20a_dbg* functions
Switch all logging to nvgpu_log*(). gk20a_dbg* macros are intentionally left there because of use from other repositories. Because the new functions do not work without a pointer to struct gk20a, and piping it just for logging is excessive, some log messages are deleted. Change-Id: I00e22e75fe4596a330bb0282ab4774b3639ee31e Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1704148 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/channel_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.c46
1 files changed, 23 insertions, 23 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
index e65ed278..21abdf9a 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
@@ -116,7 +116,7 @@ int channel_gk20a_commit_va(struct channel_gk20a *c)
116{ 116{
117 struct gk20a *g = c->g; 117 struct gk20a *g = c->g;
118 118
119 gk20a_dbg_fn(""); 119 nvgpu_log_fn(g, " ");
120 120
121 g->ops.mm.init_inst_block(&c->inst_block, c->vm, 121 g->ops.mm.init_inst_block(&c->inst_block, c->vm,
122 c->vm->gmmu_page_sizes[gmmu_page_size_big]); 122 c->vm->gmmu_page_sizes[gmmu_page_size_big]);
@@ -208,7 +208,7 @@ void gk20a_channel_abort_clean_up(struct channel_gk20a *ch)
208 208
209void gk20a_channel_abort(struct channel_gk20a *ch, bool channel_preempt) 209void gk20a_channel_abort(struct channel_gk20a *ch, bool channel_preempt)
210{ 210{
211 gk20a_dbg_fn(""); 211 nvgpu_log_fn(ch->g, " ");
212 212
213 if (gk20a_is_channel_marked_as_tsg(ch)) 213 if (gk20a_is_channel_marked_as_tsg(ch))
214 return gk20a_fifo_abort_tsg(ch->g, ch->tsgid, channel_preempt); 214 return gk20a_fifo_abort_tsg(ch->g, ch->tsgid, channel_preempt);
@@ -291,7 +291,7 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
291 struct dbg_session_channel_data *ch_data, *tmp; 291 struct dbg_session_channel_data *ch_data, *tmp;
292 int err; 292 int err;
293 293
294 gk20a_dbg_fn(""); 294 nvgpu_log_fn(g, " ");
295 295
296 WARN_ON(ch->g == NULL); 296 WARN_ON(ch->g == NULL);
297 297
@@ -351,7 +351,7 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
351 /* if engine reset was deferred, perform it now */ 351 /* if engine reset was deferred, perform it now */
352 nvgpu_mutex_acquire(&f->deferred_reset_mutex); 352 nvgpu_mutex_acquire(&f->deferred_reset_mutex);
353 if (g->fifo.deferred_reset_pending) { 353 if (g->fifo.deferred_reset_pending) {
354 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "engine reset was" 354 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "engine reset was"
355 " deferred, running now"); 355 " deferred, running now");
356 /* if lock is already taken, a reset is taking place 356 /* if lock is already taken, a reset is taking place
357 so no need to repeat */ 357 so no need to repeat */
@@ -365,7 +365,7 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
365 if (!gk20a_channel_as_bound(ch)) 365 if (!gk20a_channel_as_bound(ch))
366 goto unbind; 366 goto unbind;
367 367
368 gk20a_dbg_info("freeing bound channel context, timeout=%ld", 368 nvgpu_log_info(g, "freeing bound channel context, timeout=%ld",
369 timeout); 369 timeout);
370 370
371#ifdef CONFIG_GK20A_CTXSW_TRACE 371#ifdef CONFIG_GK20A_CTXSW_TRACE
@@ -626,7 +626,7 @@ struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g,
626 runlist_id = gk20a_fifo_get_gr_runlist_id(g); 626 runlist_id = gk20a_fifo_get_gr_runlist_id(g);
627 } 627 }
628 628
629 gk20a_dbg_fn(""); 629 nvgpu_log_fn(g, " ");
630 630
631 ch = allocate_channel(f); 631 ch = allocate_channel(f);
632 if (ch == NULL) { 632 if (ch == NULL) {
@@ -765,7 +765,7 @@ int gk20a_channel_alloc_priv_cmdbuf(struct channel_gk20a *c, u32 orig_size,
765 u32 free_count; 765 u32 free_count;
766 u32 size = orig_size; 766 u32 size = orig_size;
767 767
768 gk20a_dbg_fn("size %d", orig_size); 768 nvgpu_log_fn(c->g, "size %d", orig_size);
769 769
770 if (!e) { 770 if (!e) {
771 nvgpu_err(c->g, 771 nvgpu_err(c->g,
@@ -779,7 +779,7 @@ int gk20a_channel_alloc_priv_cmdbuf(struct channel_gk20a *c, u32 orig_size,
779 if (q->put + size > q->size) 779 if (q->put + size > q->size)
780 size = orig_size + (q->size - q->put); 780 size = orig_size + (q->size - q->put);
781 781
782 gk20a_dbg_info("ch %d: priv cmd queue get:put %d:%d", 782 nvgpu_log_info(c->g, "ch %d: priv cmd queue get:put %d:%d",
783 c->chid, q->get, q->put); 783 c->chid, q->get, q->put);
784 784
785 free_count = (q->size - (q->put - q->get) - 1) % q->size; 785 free_count = (q->size - (q->put - q->get) - 1) % q->size;
@@ -812,7 +812,7 @@ int gk20a_channel_alloc_priv_cmdbuf(struct channel_gk20a *c, u32 orig_size,
812 nvgpu_smp_wmb(); 812 nvgpu_smp_wmb();
813 813
814 e->valid = true; 814 e->valid = true;
815 gk20a_dbg_fn("done"); 815 nvgpu_log_fn(c->g, "done");
816 816
817 return 0; 817 return 0;
818} 818}
@@ -1132,7 +1132,7 @@ int gk20a_channel_alloc_gpfifo(struct channel_gk20a *c,
1132 c->gpfifo.entry_num = gpfifo_size; 1132 c->gpfifo.entry_num = gpfifo_size;
1133 c->gpfifo.get = c->gpfifo.put = 0; 1133 c->gpfifo.get = c->gpfifo.put = 0;
1134 1134
1135 gk20a_dbg_info("channel %d : gpfifo_base 0x%016llx, size %d", 1135 nvgpu_log_info(g, "channel %d : gpfifo_base 0x%016llx, size %d",
1136 c->chid, c->gpfifo.mem.gpu_va, c->gpfifo.entry_num); 1136 c->chid, c->gpfifo.mem.gpu_va, c->gpfifo.entry_num);
1137 1137
1138 g->ops.fifo.setup_userd(c); 1138 g->ops.fifo.setup_userd(c);
@@ -1184,7 +1184,7 @@ int gk20a_channel_alloc_gpfifo(struct channel_gk20a *c,
1184 1184
1185 g->ops.fifo.bind_channel(c); 1185 g->ops.fifo.bind_channel(c);
1186 1186
1187 gk20a_dbg_fn("done"); 1187 nvgpu_log_fn(g, "done");
1188 return 0; 1188 return 0;
1189 1189
1190clean_up_priv_cmd: 1190clean_up_priv_cmd:
@@ -1400,7 +1400,7 @@ static void gk20a_channel_timeout_handler(struct channel_gk20a *ch)
1400 u64 pb_get; 1400 u64 pb_get;
1401 u64 new_pb_get; 1401 u64 new_pb_get;
1402 1402
1403 gk20a_dbg_fn(""); 1403 nvgpu_log_fn(g, " ");
1404 1404
1405 /* Get status and clear the timer */ 1405 /* Get status and clear the timer */
1406 nvgpu_raw_spinlock_acquire(&ch->timeout.lock); 1406 nvgpu_raw_spinlock_acquire(&ch->timeout.lock);
@@ -1480,7 +1480,7 @@ static void gk20a_channel_poll_timeouts(struct gk20a *g)
1480 */ 1480 */
1481static void gk20a_channel_worker_process_ch(struct channel_gk20a *ch) 1481static void gk20a_channel_worker_process_ch(struct channel_gk20a *ch)
1482{ 1482{
1483 gk20a_dbg_fn(""); 1483 nvgpu_log_fn(ch->g, " ");
1484 1484
1485 gk20a_channel_clean_up_jobs(ch, true); 1485 gk20a_channel_clean_up_jobs(ch, true);
1486 1486
@@ -1499,7 +1499,7 @@ static int __gk20a_channel_worker_wakeup(struct gk20a *g)
1499{ 1499{
1500 int put; 1500 int put;
1501 1501
1502 gk20a_dbg_fn(""); 1502 nvgpu_log_fn(g, " ");
1503 1503
1504 /* 1504 /*
1505 * Currently, the only work type is associated with a lock, which deals 1505 * Currently, the only work type is associated with a lock, which deals
@@ -1596,7 +1596,7 @@ static int gk20a_channel_poll_worker(void *arg)
1596 struct nvgpu_timeout timeout; 1596 struct nvgpu_timeout timeout;
1597 int get = 0; 1597 int get = 0;
1598 1598
1599 gk20a_dbg_fn(""); 1599 nvgpu_log_fn(g, " ");
1600 1600
1601 nvgpu_timeout_init(g, &timeout, watchdog_interval, 1601 nvgpu_timeout_init(g, &timeout, watchdog_interval,
1602 NVGPU_TIMER_CPU_TIMER); 1602 NVGPU_TIMER_CPU_TIMER);
@@ -1699,7 +1699,7 @@ static void gk20a_channel_worker_enqueue(struct channel_gk20a *ch)
1699{ 1699{
1700 struct gk20a *g = ch->g; 1700 struct gk20a *g = ch->g;
1701 1701
1702 gk20a_dbg_fn(""); 1702 nvgpu_log_fn(g, " ");
1703 1703
1704 /* 1704 /*
1705 * Warn if worker thread cannot run 1705 * Warn if worker thread cannot run
@@ -2142,12 +2142,12 @@ int gk20a_channel_suspend(struct gk20a *g)
2142 bool channels_in_use = false; 2142 bool channels_in_use = false;
2143 u32 active_runlist_ids = 0; 2143 u32 active_runlist_ids = 0;
2144 2144
2145 gk20a_dbg_fn(""); 2145 nvgpu_log_fn(g, " ");
2146 2146
2147 for (chid = 0; chid < f->num_channels; chid++) { 2147 for (chid = 0; chid < f->num_channels; chid++) {
2148 struct channel_gk20a *ch = &f->channel[chid]; 2148 struct channel_gk20a *ch = &f->channel[chid];
2149 if (gk20a_channel_get(ch)) { 2149 if (gk20a_channel_get(ch)) {
2150 gk20a_dbg_info("suspend channel %d", chid); 2150 nvgpu_log_info(g, "suspend channel %d", chid);
2151 /* disable channel */ 2151 /* disable channel */
2152 gk20a_disable_channel_tsg(g, ch); 2152 gk20a_disable_channel_tsg(g, ch);
2153 /* preempt the channel */ 2153 /* preempt the channel */
@@ -2175,7 +2175,7 @@ int gk20a_channel_suspend(struct gk20a *g)
2175 } 2175 }
2176 } 2176 }
2177 2177
2178 gk20a_dbg_fn("done"); 2178 nvgpu_log_fn(g, "done");
2179 return 0; 2179 return 0;
2180} 2180}
2181 2181
@@ -2186,11 +2186,11 @@ int gk20a_channel_resume(struct gk20a *g)
2186 bool channels_in_use = false; 2186 bool channels_in_use = false;
2187 u32 active_runlist_ids = 0; 2187 u32 active_runlist_ids = 0;
2188 2188
2189 gk20a_dbg_fn(""); 2189 nvgpu_log_fn(g, " ");
2190 2190
2191 for (chid = 0; chid < f->num_channels; chid++) { 2191 for (chid = 0; chid < f->num_channels; chid++) {
2192 if (gk20a_channel_get(&f->channel[chid])) { 2192 if (gk20a_channel_get(&f->channel[chid])) {
2193 gk20a_dbg_info("resume channel %d", chid); 2193 nvgpu_log_info(g, "resume channel %d", chid);
2194 g->ops.fifo.bind_channel(&f->channel[chid]); 2194 g->ops.fifo.bind_channel(&f->channel[chid]);
2195 channels_in_use = true; 2195 channels_in_use = true;
2196 active_runlist_ids |= BIT(f->channel[chid].runlist_id); 2196 active_runlist_ids |= BIT(f->channel[chid].runlist_id);
@@ -2201,7 +2201,7 @@ int gk20a_channel_resume(struct gk20a *g)
2201 if (channels_in_use) 2201 if (channels_in_use)
2202 gk20a_fifo_update_runlist_ids(g, active_runlist_ids, ~0, true, true); 2202 gk20a_fifo_update_runlist_ids(g, active_runlist_ids, ~0, true, true);
2203 2203
2204 gk20a_dbg_fn("done"); 2204 nvgpu_log_fn(g, "done");
2205 return 0; 2205 return 0;
2206} 2206}
2207 2207
@@ -2210,7 +2210,7 @@ void gk20a_channel_semaphore_wakeup(struct gk20a *g, bool post_events)
2210 struct fifo_gk20a *f = &g->fifo; 2210 struct fifo_gk20a *f = &g->fifo;
2211 u32 chid; 2211 u32 chid;
2212 2212
2213 gk20a_dbg_fn(""); 2213 nvgpu_log_fn(g, " ");
2214 2214
2215 /* 2215 /*
2216 * Ensure that all pending writes are actually done before trying to 2216 * Ensure that all pending writes are actually done before trying to