summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/channel_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.c54
1 files changed, 27 insertions, 27 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
index ef9ad731..bca201cf 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
@@ -118,7 +118,7 @@ static void free_channel(struct fifo_gk20a *f,
118{ 118{
119 struct gk20a *g = f->g; 119 struct gk20a *g = f->g;
120 120
121 trace_gk20a_release_used_channel(ch->hw_chid); 121 trace_gk20a_release_used_channel(ch->chid);
122 /* refcount is zero here and channel is in a freed/dead state */ 122 /* refcount is zero here and channel is in a freed/dead state */
123 nvgpu_mutex_acquire(&f->free_chs_mutex); 123 nvgpu_mutex_acquire(&f->free_chs_mutex);
124 /* add to head to increase visibility of timing-related bugs */ 124 /* add to head to increase visibility of timing-related bugs */
@@ -189,7 +189,7 @@ int gk20a_channel_get_timescale_from_timeslice(struct gk20a *g,
189 189
190static int channel_gk20a_update_runlist(struct channel_gk20a *c, bool add) 190static int channel_gk20a_update_runlist(struct channel_gk20a *c, bool add)
191{ 191{
192 return c->g->ops.fifo.update_runlist(c->g, c->runlist_id, c->hw_chid, add, true); 192 return c->g->ops.fifo.update_runlist(c->g, c->runlist_id, c->chid, add, true);
193} 193}
194 194
195int gk20a_enable_channel_tsg(struct gk20a *g, struct channel_gk20a *ch) 195int gk20a_enable_channel_tsg(struct gk20a *g, struct channel_gk20a *ch)
@@ -295,7 +295,7 @@ void gk20a_channel_abort(struct channel_gk20a *ch, bool channel_preempt)
295 ch->g->ops.fifo.disable_channel(ch); 295 ch->g->ops.fifo.disable_channel(ch);
296 296
297 if (channel_preempt && ch->ch_ctx.gr_ctx) 297 if (channel_preempt && ch->ch_ctx.gr_ctx)
298 ch->g->ops.fifo.preempt_channel(ch->g, ch->hw_chid); 298 ch->g->ops.fifo.preempt_channel(ch->g, ch->chid);
299 299
300 gk20a_channel_abort_clean_up(ch); 300 gk20a_channel_abort_clean_up(ch);
301} 301}
@@ -320,7 +320,7 @@ int gk20a_wait_channel_idle(struct channel_gk20a *ch)
320 320
321 if (!channel_idle) { 321 if (!channel_idle) {
322 nvgpu_err(ch->g, "jobs not freed for channel %d", 322 nvgpu_err(ch->g, "jobs not freed for channel %d",
323 ch->hw_chid); 323 ch->chid);
324 return -EBUSY; 324 return -EBUSY;
325 } 325 }
326 326
@@ -348,7 +348,7 @@ int gk20a_channel_set_runlist_interleave(struct channel_gk20a *ch,
348 case NVGPU_RUNLIST_INTERLEAVE_LEVEL_LOW: 348 case NVGPU_RUNLIST_INTERLEAVE_LEVEL_LOW:
349 case NVGPU_RUNLIST_INTERLEAVE_LEVEL_MEDIUM: 349 case NVGPU_RUNLIST_INTERLEAVE_LEVEL_MEDIUM:
350 case NVGPU_RUNLIST_INTERLEAVE_LEVEL_HIGH: 350 case NVGPU_RUNLIST_INTERLEAVE_LEVEL_HIGH:
351 ret = g->ops.fifo.set_runlist_interleave(g, ch->hw_chid, 351 ret = g->ops.fifo.set_runlist_interleave(g, ch->chid,
352 false, 0, level); 352 false, 0, level);
353 break; 353 break;
354 default: 354 default:
@@ -356,7 +356,7 @@ int gk20a_channel_set_runlist_interleave(struct channel_gk20a *ch,
356 break; 356 break;
357 } 357 }
358 358
359 gk20a_dbg(gpu_dbg_sched, "chid=%u interleave=%u", ch->hw_chid, level); 359 gk20a_dbg(gpu_dbg_sched, "chid=%u interleave=%u", ch->chid, level);
360 360
361 return ret ? ret : g->ops.fifo.update_runlist(g, ch->runlist_id, ~0, true, true); 361 return ret ? ret : g->ops.fifo.update_runlist(g, ch->runlist_id, ~0, true, true);
362} 362}
@@ -381,7 +381,7 @@ void gk20a_set_error_notifier_locked(struct channel_gk20a *ch, __u32 error)
381 ch->error_notifier->status = 0xffff; 381 ch->error_notifier->status = 0xffff;
382 382
383 nvgpu_err(ch->g, 383 nvgpu_err(ch->g,
384 "error notifier set to %d for ch %d", error, ch->hw_chid); 384 "error notifier set to %d for ch %d", error, ch->chid);
385 } 385 }
386} 386}
387 387
@@ -405,7 +405,7 @@ static void gk20a_wait_until_counter_is_N(
405 405
406 nvgpu_warn(ch->g, 406 nvgpu_warn(ch->g,
407 "%s: channel %d, still waiting, %s left: %d, waiting for: %d", 407 "%s: channel %d, still waiting, %s left: %d, waiting for: %d",
408 caller, ch->hw_chid, counter_name, 408 caller, ch->chid, counter_name,
409 atomic_read(counter), wait_value); 409 atomic_read(counter), wait_value);
410 410
411 gk20a_channel_dump_ref_actions(ch); 411 gk20a_channel_dump_ref_actions(ch);
@@ -462,7 +462,7 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
462 462
463 WARN_ON(ch->g == NULL); 463 WARN_ON(ch->g == NULL);
464 464
465 trace_gk20a_free_channel(ch->hw_chid); 465 trace_gk20a_free_channel(ch->chid);
466 466
467 /* abort channel and remove from runlist */ 467 /* abort channel and remove from runlist */
468 gk20a_disable_channel(ch); 468 gk20a_disable_channel(ch);
@@ -483,7 +483,7 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
483 nvgpu_spinlock_release(&ch->ref_obtain_lock); 483 nvgpu_spinlock_release(&ch->ref_obtain_lock);
484 nvgpu_err(ch->g, 484 nvgpu_err(ch->g,
485 "Extra %s() called to channel %u", 485 "Extra %s() called to channel %u",
486 __func__, ch->hw_chid); 486 __func__, ch->chid);
487 return; 487 return;
488 } 488 }
489 ch->referenceable = false; 489 ch->referenceable = false;
@@ -597,7 +597,7 @@ unbind:
597 nvgpu_mutex_acquire(&dbg_s->ch_list_lock); 597 nvgpu_mutex_acquire(&dbg_s->ch_list_lock);
598 list_for_each_entry_safe(ch_data, tmp, 598 list_for_each_entry_safe(ch_data, tmp,
599 &dbg_s->ch_list, ch_entry) { 599 &dbg_s->ch_list, ch_entry) {
600 if (ch_data->chid == ch->hw_chid) 600 if (ch_data->chid == ch->chid)
601 dbg_unbind_single_channel_gk20a(dbg_s, ch_data); 601 dbg_unbind_single_channel_gk20a(dbg_s, ch_data);
602 } 602 }
603 nvgpu_mutex_release(&dbg_s->ch_list_lock); 603 nvgpu_mutex_release(&dbg_s->ch_list_lock);
@@ -634,7 +634,7 @@ static void gk20a_channel_dump_ref_actions(struct channel_gk20a *ch)
634 nvgpu_spinlock_acquire(&ch->ref_actions_lock); 634 nvgpu_spinlock_acquire(&ch->ref_actions_lock);
635 635
636 dev_info(dev, "ch %d: refs %d. Actions, most recent last:\n", 636 dev_info(dev, "ch %d: refs %d. Actions, most recent last:\n",
637 ch->hw_chid, atomic_read(&ch->ref_count)); 637 ch->chid, atomic_read(&ch->ref_count));
638 638
639 /* start at the oldest possible entry. put is next insertion point */ 639 /* start at the oldest possible entry. put is next insertion point */
640 get = ch->ref_actions_put; 640 get = ch->ref_actions_put;
@@ -695,7 +695,7 @@ static void gk20a_channel_save_ref_source(struct channel_gk20a *ch,
695 * reference must be held to it - either by you or the caller, which should be 695 * reference must be held to it - either by you or the caller, which should be
696 * documented well or otherwise clearly seen. This usually boils down to the 696 * documented well or otherwise clearly seen. This usually boils down to the
697 * file from ioctls directly, or an explicit get in exception handlers when the 697 * file from ioctls directly, or an explicit get in exception handlers when the
698 * channel is found by a hw_chid. 698 * channel is found by a chid.
699 * 699 *
700 * Most global functions in this file require a reference to be held by the 700 * Most global functions in this file require a reference to be held by the
701 * caller. 701 * caller.
@@ -716,7 +716,7 @@ struct channel_gk20a *_gk20a_channel_get(struct channel_gk20a *ch,
716 nvgpu_spinlock_release(&ch->ref_obtain_lock); 716 nvgpu_spinlock_release(&ch->ref_obtain_lock);
717 717
718 if (ret) 718 if (ret)
719 trace_gk20a_channel_get(ch->hw_chid, caller); 719 trace_gk20a_channel_get(ch->chid, caller);
720 720
721 return ret; 721 return ret;
722} 722}
@@ -724,7 +724,7 @@ struct channel_gk20a *_gk20a_channel_get(struct channel_gk20a *ch,
724void _gk20a_channel_put(struct channel_gk20a *ch, const char *caller) 724void _gk20a_channel_put(struct channel_gk20a *ch, const char *caller)
725{ 725{
726 gk20a_channel_save_ref_source(ch, channel_gk20a_ref_action_put); 726 gk20a_channel_save_ref_source(ch, channel_gk20a_ref_action_put);
727 trace_gk20a_channel_put(ch->hw_chid, caller); 727 trace_gk20a_channel_put(ch->chid, caller);
728 atomic_dec(&ch->ref_count); 728 atomic_dec(&ch->ref_count);
729 nvgpu_cond_broadcast(&ch->ref_count_dec_wq); 729 nvgpu_cond_broadcast(&ch->ref_count_dec_wq);
730 730
@@ -809,7 +809,7 @@ struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g,
809 return NULL; 809 return NULL;
810 } 810 }
811 811
812 trace_gk20a_open_new_channel(ch->hw_chid); 812 trace_gk20a_open_new_channel(ch->chid);
813 813
814 BUG_ON(ch->g); 814 BUG_ON(ch->g);
815 ch->g = g; 815 ch->g = g;
@@ -951,7 +951,7 @@ int gk20a_channel_alloc_priv_cmdbuf(struct channel_gk20a *c, u32 orig_size,
951 if (!e) { 951 if (!e) {
952 nvgpu_err(c->g, 952 nvgpu_err(c->g,
953 "ch %d: priv cmd entry is null", 953 "ch %d: priv cmd entry is null",
954 c->hw_chid); 954 c->chid);
955 return -EINVAL; 955 return -EINVAL;
956 } 956 }
957 957
@@ -961,7 +961,7 @@ int gk20a_channel_alloc_priv_cmdbuf(struct channel_gk20a *c, u32 orig_size,
961 size = orig_size + (q->size - q->put); 961 size = orig_size + (q->size - q->put);
962 962
963 gk20a_dbg_info("ch %d: priv cmd queue get:put %d:%d", 963 gk20a_dbg_info("ch %d: priv cmd queue get:put %d:%d",
964 c->hw_chid, q->get, q->put); 964 c->chid, q->get, q->put);
965 965
966 free_count = (q->size - (q->put - q->get) - 1) % q->size; 966 free_count = (q->size - (q->put - q->get) - 1) % q->size;
967 967
@@ -1268,7 +1268,7 @@ int gk20a_channel_alloc_gpfifo(struct channel_gk20a *c,
1268 1268
1269 if (c->gpfifo.mem.size) { 1269 if (c->gpfifo.mem.size) {
1270 nvgpu_err(g, "channel %d :" 1270 nvgpu_err(g, "channel %d :"
1271 "gpfifo already allocated", c->hw_chid); 1271 "gpfifo already allocated", c->chid);
1272 err = -EEXIST; 1272 err = -EEXIST;
1273 goto clean_up_idle; 1273 goto clean_up_idle;
1274 } 1274 }
@@ -1294,7 +1294,7 @@ int gk20a_channel_alloc_gpfifo(struct channel_gk20a *c,
1294 c->gpfifo.get = c->gpfifo.put = 0; 1294 c->gpfifo.get = c->gpfifo.put = 0;
1295 1295
1296 gk20a_dbg_info("channel %d : gpfifo_base 0x%016llx, size %d", 1296 gk20a_dbg_info("channel %d : gpfifo_base 0x%016llx, size %d",
1297 c->hw_chid, c->gpfifo.mem.gpu_va, c->gpfifo.entry_num); 1297 c->chid, c->gpfifo.mem.gpu_va, c->gpfifo.entry_num);
1298 1298
1299 g->ops.fifo.setup_userd(c); 1299 g->ops.fifo.setup_userd(c);
1300 1300
@@ -1653,7 +1653,7 @@ static void gk20a_channel_timeout_handler(struct channel_gk20a *ch)
1653 } 1653 }
1654 1654
1655 nvgpu_err(g, "Job on channel %d timed out", 1655 nvgpu_err(g, "Job on channel %d timed out",
1656 ch->hw_chid); 1656 ch->chid);
1657 1657
1658 gk20a_debug_dump(g); 1658 gk20a_debug_dump(g);
1659 gk20a_gr_debug_dump(g); 1659 gk20a_gr_debug_dump(g);
@@ -1934,7 +1934,7 @@ int gk20a_free_priv_cmdbuf(struct channel_gk20a *c, struct priv_cmd_entry *e)
1934 rmb(); 1934 rmb();
1935 if ((q->get != e->off) && e->off != 0) 1935 if ((q->get != e->off) && e->off != 0)
1936 nvgpu_err(g, "requests out-of-order, ch=%d", 1936 nvgpu_err(g, "requests out-of-order, ch=%d",
1937 c->hw_chid); 1937 c->chid);
1938 q->get = e->off + e->size; 1938 q->get = e->off + e->size;
1939 } 1939 }
1940 1940
@@ -2161,7 +2161,7 @@ void gk20a_channel_update(struct channel_gk20a *c)
2161 return; 2161 return;
2162 } 2162 }
2163 2163
2164 trace_gk20a_channel_update(c->hw_chid); 2164 trace_gk20a_channel_update(c->chid);
2165 /* A queued channel is always checked for job cleanup. */ 2165 /* A queued channel is always checked for job cleanup. */
2166 gk20a_channel_worker_enqueue(c); 2166 gk20a_channel_worker_enqueue(c);
2167} 2167}
@@ -2492,7 +2492,7 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
2492 g->ops.ltc.sync_debugfs(g); 2492 g->ops.ltc.sync_debugfs(g);
2493#endif 2493#endif
2494 2494
2495 gk20a_dbg_info("channel %d", c->hw_chid); 2495 gk20a_dbg_info("channel %d", c->chid);
2496 2496
2497 /* 2497 /*
2498 * Job tracking is necessary for any of the following conditions: 2498 * Job tracking is necessary for any of the following conditions:
@@ -2585,7 +2585,7 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
2585 down_read(&g->deterministic_busy); 2585 down_read(&g->deterministic_busy);
2586 2586
2587 trace_gk20a_channel_submit_gpfifo(g->name, 2587 trace_gk20a_channel_submit_gpfifo(g->name,
2588 c->hw_chid, 2588 c->chid,
2589 num_entries, 2589 num_entries,
2590 flags, 2590 flags,
2591 fence ? fence->id : 0, 2591 fence ? fence->id : 0,
@@ -2661,7 +2661,7 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
2661 up_read(&g->deterministic_busy); 2661 up_read(&g->deterministic_busy);
2662 2662
2663 trace_gk20a_channel_submitted_gpfifo(g->name, 2663 trace_gk20a_channel_submitted_gpfifo(g->name,
2664 c->hw_chid, 2664 c->chid,
2665 num_entries, 2665 num_entries,
2666 flags, 2666 flags,
2667 post_fence ? post_fence->syncpt_id : 0, 2667 post_fence ? post_fence->syncpt_id : 0,
@@ -2771,7 +2771,7 @@ int gk20a_init_channel_support(struct gk20a *g, u32 chid)
2771 int err; 2771 int err;
2772 2772
2773 c->g = NULL; 2773 c->g = NULL;
2774 c->hw_chid = chid; 2774 c->chid = chid;
2775 atomic_set(&c->bound, false); 2775 atomic_set(&c->bound, false);
2776 nvgpu_spinlock_init(&c->ref_obtain_lock); 2776 nvgpu_spinlock_init(&c->ref_obtain_lock);
2777 atomic_set(&c->ref_count, 0); 2777 atomic_set(&c->ref_count, 0);