summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorRichard Zhao <rizhao@nvidia.com>2017-06-27 14:20:58 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-06-30 01:34:35 -0400
commit7d584bf868e53638f5c05b588dcd307e71cf9c82 (patch)
treeebd3eafd0f71a018f51ac34ec10f55e8669c013d /drivers
parentd32bd6605d37f576e186d05e0853120cd9782fd3 (diff)
gpu: nvgpu: rename hw_chid to chid
hw_chid is a relative id for vgpu. For native it's same as hw id. Renaming it to chid to avoid confusing. Jira VFND-3796 Change-Id: I1c7924da1757330ace715a7c52ac61ec9dc7065c Signed-off-by: Richard Zhao <rizhao@nvidia.com> Reviewed-on: https://git-master/r/1509530 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/nvgpu/common/linux/debug_fifo.c4
-rw-r--r--drivers/gpu/nvgpu/common/linux/ioctl_channel.c14
-rw-r--r--drivers/gpu/nvgpu/common/semaphore.c2
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.c54
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.h2
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c12
-rw-r--r--drivers/gpu/nvgpu/gk20a/ctxsw_trace_gk20a.c2
-rw-r--r--drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c8
-rw-r--r--drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c4
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.c116
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.h16
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a.h6
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.c22
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.h2
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.h2
-rw-r--r--drivers/gpu/nvgpu/gk20a/sync_gk20a.c2
-rw-r--r--drivers/gpu/nvgpu/gk20a/tsg_gk20a.c4
-rw-r--r--drivers/gpu/nvgpu/gm20b/fifo_gm20b.c8
-rw-r--r--drivers/gpu/nvgpu/gp10b/fifo_gp10b.c6
-rw-r--r--drivers/gpu/nvgpu/gp10b/gr_gp10b.c16
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/semaphore.h4
-rw-r--r--drivers/gpu/nvgpu/vgpu/fifo_vgpu.c32
-rw-r--r--drivers/gpu/nvgpu/vgpu/tsg_vgpu.c2
23 files changed, 170 insertions, 170 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/debug_fifo.c b/drivers/gpu/nvgpu/common/linux/debug_fifo.c
index 6a28b1a5..04937dbe 100644
--- a/drivers/gpu/nvgpu/common/linux/debug_fifo.c
+++ b/drivers/gpu/nvgpu/common/linux/debug_fifo.c
@@ -77,7 +77,7 @@ static int gk20a_fifo_sched_debugfs_seq_show(
77 ret = 0; 77 ret = 0;
78 } 78 }
79 79
80 if (!test_bit(ch->hw_chid, runlist->active_channels)) 80 if (!test_bit(ch->chid, runlist->active_channels))
81 return ret; 81 return ret;
82 82
83 if (gk20a_channel_get(ch)) { 83 if (gk20a_channel_get(ch)) {
@@ -85,7 +85,7 @@ static int gk20a_fifo_sched_debugfs_seq_show(
85 tsg = &f->tsg[ch->tsgid]; 85 tsg = &f->tsg[ch->tsgid];
86 86
87 seq_printf(s, "%-8d %-8d %-8d %-9d %-8d %-10d %-8d %-8d\n", 87 seq_printf(s, "%-8d %-8d %-8d %-9d %-8d %-10d %-8d %-8d\n",
88 ch->hw_chid, 88 ch->chid,
89 ch->tsgid, 89 ch->tsgid,
90 ch->tgid, 90 ch->tgid,
91 tsg ? tsg->timeslice_us : ch->timeslice_us, 91 tsg ? tsg->timeslice_us : ch->timeslice_us,
diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_channel.c b/drivers/gpu/nvgpu/common/linux/ioctl_channel.c
index 2466db40..5905e5a6 100644
--- a/drivers/gpu/nvgpu/common/linux/ioctl_channel.c
+++ b/drivers/gpu/nvgpu/common/linux/ioctl_channel.c
@@ -42,7 +42,7 @@ static void gk20a_channel_trace_sched_param(
42 const char *compute_preempt_mode), 42 const char *compute_preempt_mode),
43 struct channel_gk20a *ch) 43 struct channel_gk20a *ch)
44{ 44{
45 (trace)(ch->hw_chid, ch->tsgid, ch->pid, 45 (trace)(ch->chid, ch->tsgid, ch->pid,
46 gk20a_is_channel_marked_as_tsg(ch) ? 46 gk20a_is_channel_marked_as_tsg(ch) ?
47 tsg_gk20a_from_ch(ch)->timeslice_us : ch->timeslice_us, 47 tsg_gk20a_from_ch(ch)->timeslice_us : ch->timeslice_us,
48 ch->timeout_ms_max, 48 ch->timeout_ms_max,
@@ -525,7 +525,7 @@ static int gk20a_channel_wait(struct channel_gk20a *ch,
525 notif->timestamp.nanoseconds[0] = tv.tv_nsec; 525 notif->timestamp.nanoseconds[0] = tv.tv_nsec;
526 notif->timestamp.nanoseconds[1] = tv.tv_sec; 526 notif->timestamp.nanoseconds[1] = tv.tv_sec;
527 notif->info32 = 0xDEADBEEF; /* should be object name */ 527 notif->info32 = 0xDEADBEEF; /* should be object name */
528 notif->info16 = ch->hw_chid; /* should be method offset */ 528 notif->info16 = ch->chid; /* should be method offset */
529 529
530notif_clean_up: 530notif_clean_up:
531 dma_buf_vunmap(dmabuf, notif); 531 dma_buf_vunmap(dmabuf, notif);
@@ -578,7 +578,7 @@ static unsigned int gk20a_event_id_poll(struct file *filep, poll_table *wait)
578 if (event_id_data->event_posted) { 578 if (event_id_data->event_posted) {
579 gk20a_dbg_info( 579 gk20a_dbg_info(
580 "found pending event_id=%d on chid=%d\n", 580 "found pending event_id=%d on chid=%d\n",
581 event_id, ch->hw_chid); 581 event_id, ch->chid);
582 mask = (POLLPRI | POLLIN); 582 mask = (POLLPRI | POLLIN);
583 event_id_data->event_posted = false; 583 event_id_data->event_posted = false;
584 } 584 }
@@ -662,7 +662,7 @@ void gk20a_channel_event_id_post_event(struct channel_gk20a *ch,
662 662
663 gk20a_dbg_info( 663 gk20a_dbg_info(
664 "posting event for event_id=%d on ch=%d\n", 664 "posting event for event_id=%d on ch=%d\n",
665 event_id, ch->hw_chid); 665 event_id, ch->chid);
666 event_id_data->event_posted = true; 666 event_id_data->event_posted = true;
667 667
668 wake_up_interruptible_all(&event_id_data->event_id_wq); 668 wake_up_interruptible_all(&event_id_data->event_id_wq);
@@ -713,7 +713,7 @@ static int gk20a_channel_event_id_enable(struct channel_gk20a *ch,
713 goto clean_up_file; 713 goto clean_up_file;
714 } 714 }
715 event_id_data->g = g; 715 event_id_data->g = g;
716 event_id_data->id = ch->hw_chid; 716 event_id_data->id = ch->chid;
717 event_id_data->is_tsg = false; 717 event_id_data->is_tsg = false;
718 event_id_data->event_id = event_id; 718 event_id_data->event_id = event_id;
719 719
@@ -1006,7 +1006,7 @@ long gk20a_channel_ioctl(struct file *filp,
1006 u32 timeout = 1006 u32 timeout =
1007 (u32)((struct nvgpu_set_timeout_args *)buf)->timeout; 1007 (u32)((struct nvgpu_set_timeout_args *)buf)->timeout;
1008 gk20a_dbg(gpu_dbg_gpu_dbg, "setting timeout (%d ms) for chid %d", 1008 gk20a_dbg(gpu_dbg_gpu_dbg, "setting timeout (%d ms) for chid %d",
1009 timeout, ch->hw_chid); 1009 timeout, ch->chid);
1010 ch->timeout_ms_max = timeout; 1010 ch->timeout_ms_max = timeout;
1011 gk20a_channel_trace_sched_param( 1011 gk20a_channel_trace_sched_param(
1012 trace_gk20a_channel_set_timeout, ch); 1012 trace_gk20a_channel_set_timeout, ch);
@@ -1020,7 +1020,7 @@ long gk20a_channel_ioctl(struct file *filp,
1020 ((struct nvgpu_set_timeout_ex_args *)buf)->flags & 1020 ((struct nvgpu_set_timeout_ex_args *)buf)->flags &
1021 (1 << NVGPU_TIMEOUT_FLAG_DISABLE_DUMP)); 1021 (1 << NVGPU_TIMEOUT_FLAG_DISABLE_DUMP));
1022 gk20a_dbg(gpu_dbg_gpu_dbg, "setting timeout (%d ms) for chid %d", 1022 gk20a_dbg(gpu_dbg_gpu_dbg, "setting timeout (%d ms) for chid %d",
1023 timeout, ch->hw_chid); 1023 timeout, ch->chid);
1024 ch->timeout_ms_max = timeout; 1024 ch->timeout_ms_max = timeout;
1025 ch->timeout_debug_dump = timeout_debug_dump; 1025 ch->timeout_debug_dump = timeout_debug_dump;
1026 gk20a_channel_trace_sched_param( 1026 gk20a_channel_trace_sched_param(
diff --git a/drivers/gpu/nvgpu/common/semaphore.c b/drivers/gpu/nvgpu/common/semaphore.c
index a54ce831..3e916b9d 100644
--- a/drivers/gpu/nvgpu/common/semaphore.c
+++ b/drivers/gpu/nvgpu/common/semaphore.c
@@ -433,7 +433,7 @@ struct nvgpu_semaphore *nvgpu_semaphore_alloc(struct channel_gk20a *ch)
433 */ 433 */
434 nvgpu_semaphore_pool_get(s->hw_sema->p); 434 nvgpu_semaphore_pool_get(s->hw_sema->p);
435 435
436 gpu_sema_dbg(ch->g, "Allocated semaphore (c=%d)", ch->hw_chid); 436 gpu_sema_dbg(ch->g, "Allocated semaphore (c=%d)", ch->chid);
437 437
438 return s; 438 return s;
439} 439}
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
index ef9ad731..bca201cf 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
@@ -118,7 +118,7 @@ static void free_channel(struct fifo_gk20a *f,
118{ 118{
119 struct gk20a *g = f->g; 119 struct gk20a *g = f->g;
120 120
121 trace_gk20a_release_used_channel(ch->hw_chid); 121 trace_gk20a_release_used_channel(ch->chid);
122 /* refcount is zero here and channel is in a freed/dead state */ 122 /* refcount is zero here and channel is in a freed/dead state */
123 nvgpu_mutex_acquire(&f->free_chs_mutex); 123 nvgpu_mutex_acquire(&f->free_chs_mutex);
124 /* add to head to increase visibility of timing-related bugs */ 124 /* add to head to increase visibility of timing-related bugs */
@@ -189,7 +189,7 @@ int gk20a_channel_get_timescale_from_timeslice(struct gk20a *g,
189 189
190static int channel_gk20a_update_runlist(struct channel_gk20a *c, bool add) 190static int channel_gk20a_update_runlist(struct channel_gk20a *c, bool add)
191{ 191{
192 return c->g->ops.fifo.update_runlist(c->g, c->runlist_id, c->hw_chid, add, true); 192 return c->g->ops.fifo.update_runlist(c->g, c->runlist_id, c->chid, add, true);
193} 193}
194 194
195int gk20a_enable_channel_tsg(struct gk20a *g, struct channel_gk20a *ch) 195int gk20a_enable_channel_tsg(struct gk20a *g, struct channel_gk20a *ch)
@@ -295,7 +295,7 @@ void gk20a_channel_abort(struct channel_gk20a *ch, bool channel_preempt)
295 ch->g->ops.fifo.disable_channel(ch); 295 ch->g->ops.fifo.disable_channel(ch);
296 296
297 if (channel_preempt && ch->ch_ctx.gr_ctx) 297 if (channel_preempt && ch->ch_ctx.gr_ctx)
298 ch->g->ops.fifo.preempt_channel(ch->g, ch->hw_chid); 298 ch->g->ops.fifo.preempt_channel(ch->g, ch->chid);
299 299
300 gk20a_channel_abort_clean_up(ch); 300 gk20a_channel_abort_clean_up(ch);
301} 301}
@@ -320,7 +320,7 @@ int gk20a_wait_channel_idle(struct channel_gk20a *ch)
320 320
321 if (!channel_idle) { 321 if (!channel_idle) {
322 nvgpu_err(ch->g, "jobs not freed for channel %d", 322 nvgpu_err(ch->g, "jobs not freed for channel %d",
323 ch->hw_chid); 323 ch->chid);
324 return -EBUSY; 324 return -EBUSY;
325 } 325 }
326 326
@@ -348,7 +348,7 @@ int gk20a_channel_set_runlist_interleave(struct channel_gk20a *ch,
348 case NVGPU_RUNLIST_INTERLEAVE_LEVEL_LOW: 348 case NVGPU_RUNLIST_INTERLEAVE_LEVEL_LOW:
349 case NVGPU_RUNLIST_INTERLEAVE_LEVEL_MEDIUM: 349 case NVGPU_RUNLIST_INTERLEAVE_LEVEL_MEDIUM:
350 case NVGPU_RUNLIST_INTERLEAVE_LEVEL_HIGH: 350 case NVGPU_RUNLIST_INTERLEAVE_LEVEL_HIGH:
351 ret = g->ops.fifo.set_runlist_interleave(g, ch->hw_chid, 351 ret = g->ops.fifo.set_runlist_interleave(g, ch->chid,
352 false, 0, level); 352 false, 0, level);
353 break; 353 break;
354 default: 354 default:
@@ -356,7 +356,7 @@ int gk20a_channel_set_runlist_interleave(struct channel_gk20a *ch,
356 break; 356 break;
357 } 357 }
358 358
359 gk20a_dbg(gpu_dbg_sched, "chid=%u interleave=%u", ch->hw_chid, level); 359 gk20a_dbg(gpu_dbg_sched, "chid=%u interleave=%u", ch->chid, level);
360 360
361 return ret ? ret : g->ops.fifo.update_runlist(g, ch->runlist_id, ~0, true, true); 361 return ret ? ret : g->ops.fifo.update_runlist(g, ch->runlist_id, ~0, true, true);
362} 362}
@@ -381,7 +381,7 @@ void gk20a_set_error_notifier_locked(struct channel_gk20a *ch, __u32 error)
381 ch->error_notifier->status = 0xffff; 381 ch->error_notifier->status = 0xffff;
382 382
383 nvgpu_err(ch->g, 383 nvgpu_err(ch->g,
384 "error notifier set to %d for ch %d", error, ch->hw_chid); 384 "error notifier set to %d for ch %d", error, ch->chid);
385 } 385 }
386} 386}
387 387
@@ -405,7 +405,7 @@ static void gk20a_wait_until_counter_is_N(
405 405
406 nvgpu_warn(ch->g, 406 nvgpu_warn(ch->g,
407 "%s: channel %d, still waiting, %s left: %d, waiting for: %d", 407 "%s: channel %d, still waiting, %s left: %d, waiting for: %d",
408 caller, ch->hw_chid, counter_name, 408 caller, ch->chid, counter_name,
409 atomic_read(counter), wait_value); 409 atomic_read(counter), wait_value);
410 410
411 gk20a_channel_dump_ref_actions(ch); 411 gk20a_channel_dump_ref_actions(ch);
@@ -462,7 +462,7 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
462 462
463 WARN_ON(ch->g == NULL); 463 WARN_ON(ch->g == NULL);
464 464
465 trace_gk20a_free_channel(ch->hw_chid); 465 trace_gk20a_free_channel(ch->chid);
466 466
467 /* abort channel and remove from runlist */ 467 /* abort channel and remove from runlist */
468 gk20a_disable_channel(ch); 468 gk20a_disable_channel(ch);
@@ -483,7 +483,7 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
483 nvgpu_spinlock_release(&ch->ref_obtain_lock); 483 nvgpu_spinlock_release(&ch->ref_obtain_lock);
484 nvgpu_err(ch->g, 484 nvgpu_err(ch->g,
485 "Extra %s() called to channel %u", 485 "Extra %s() called to channel %u",
486 __func__, ch->hw_chid); 486 __func__, ch->chid);
487 return; 487 return;
488 } 488 }
489 ch->referenceable = false; 489 ch->referenceable = false;
@@ -597,7 +597,7 @@ unbind:
597 nvgpu_mutex_acquire(&dbg_s->ch_list_lock); 597 nvgpu_mutex_acquire(&dbg_s->ch_list_lock);
598 list_for_each_entry_safe(ch_data, tmp, 598 list_for_each_entry_safe(ch_data, tmp,
599 &dbg_s->ch_list, ch_entry) { 599 &dbg_s->ch_list, ch_entry) {
600 if (ch_data->chid == ch->hw_chid) 600 if (ch_data->chid == ch->chid)
601 dbg_unbind_single_channel_gk20a(dbg_s, ch_data); 601 dbg_unbind_single_channel_gk20a(dbg_s, ch_data);
602 } 602 }
603 nvgpu_mutex_release(&dbg_s->ch_list_lock); 603 nvgpu_mutex_release(&dbg_s->ch_list_lock);
@@ -634,7 +634,7 @@ static void gk20a_channel_dump_ref_actions(struct channel_gk20a *ch)
634 nvgpu_spinlock_acquire(&ch->ref_actions_lock); 634 nvgpu_spinlock_acquire(&ch->ref_actions_lock);
635 635
636 dev_info(dev, "ch %d: refs %d. Actions, most recent last:\n", 636 dev_info(dev, "ch %d: refs %d. Actions, most recent last:\n",
637 ch->hw_chid, atomic_read(&ch->ref_count)); 637 ch->chid, atomic_read(&ch->ref_count));
638 638
639 /* start at the oldest possible entry. put is next insertion point */ 639 /* start at the oldest possible entry. put is next insertion point */
640 get = ch->ref_actions_put; 640 get = ch->ref_actions_put;
@@ -695,7 +695,7 @@ static void gk20a_channel_save_ref_source(struct channel_gk20a *ch,
695 * reference must be held to it - either by you or the caller, which should be 695 * reference must be held to it - either by you or the caller, which should be
696 * documented well or otherwise clearly seen. This usually boils down to the 696 * documented well or otherwise clearly seen. This usually boils down to the
697 * file from ioctls directly, or an explicit get in exception handlers when the 697 * file from ioctls directly, or an explicit get in exception handlers when the
698 * channel is found by a hw_chid. 698 * channel is found by a chid.
699 * 699 *
700 * Most global functions in this file require a reference to be held by the 700 * Most global functions in this file require a reference to be held by the
701 * caller. 701 * caller.
@@ -716,7 +716,7 @@ struct channel_gk20a *_gk20a_channel_get(struct channel_gk20a *ch,
716 nvgpu_spinlock_release(&ch->ref_obtain_lock); 716 nvgpu_spinlock_release(&ch->ref_obtain_lock);
717 717
718 if (ret) 718 if (ret)
719 trace_gk20a_channel_get(ch->hw_chid, caller); 719 trace_gk20a_channel_get(ch->chid, caller);
720 720
721 return ret; 721 return ret;
722} 722}
@@ -724,7 +724,7 @@ struct channel_gk20a *_gk20a_channel_get(struct channel_gk20a *ch,
724void _gk20a_channel_put(struct channel_gk20a *ch, const char *caller) 724void _gk20a_channel_put(struct channel_gk20a *ch, const char *caller)
725{ 725{
726 gk20a_channel_save_ref_source(ch, channel_gk20a_ref_action_put); 726 gk20a_channel_save_ref_source(ch, channel_gk20a_ref_action_put);
727 trace_gk20a_channel_put(ch->hw_chid, caller); 727 trace_gk20a_channel_put(ch->chid, caller);
728 atomic_dec(&ch->ref_count); 728 atomic_dec(&ch->ref_count);
729 nvgpu_cond_broadcast(&ch->ref_count_dec_wq); 729 nvgpu_cond_broadcast(&ch->ref_count_dec_wq);
730 730
@@ -809,7 +809,7 @@ struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g,
809 return NULL; 809 return NULL;
810 } 810 }
811 811
812 trace_gk20a_open_new_channel(ch->hw_chid); 812 trace_gk20a_open_new_channel(ch->chid);
813 813
814 BUG_ON(ch->g); 814 BUG_ON(ch->g);
815 ch->g = g; 815 ch->g = g;
@@ -951,7 +951,7 @@ int gk20a_channel_alloc_priv_cmdbuf(struct channel_gk20a *c, u32 orig_size,
951 if (!e) { 951 if (!e) {
952 nvgpu_err(c->g, 952 nvgpu_err(c->g,
953 "ch %d: priv cmd entry is null", 953 "ch %d: priv cmd entry is null",
954 c->hw_chid); 954 c->chid);
955 return -EINVAL; 955 return -EINVAL;
956 } 956 }
957 957
@@ -961,7 +961,7 @@ int gk20a_channel_alloc_priv_cmdbuf(struct channel_gk20a *c, u32 orig_size,
961 size = orig_size + (q->size - q->put); 961 size = orig_size + (q->size - q->put);
962 962
963 gk20a_dbg_info("ch %d: priv cmd queue get:put %d:%d", 963 gk20a_dbg_info("ch %d: priv cmd queue get:put %d:%d",
964 c->hw_chid, q->get, q->put); 964 c->chid, q->get, q->put);
965 965
966 free_count = (q->size - (q->put - q->get) - 1) % q->size; 966 free_count = (q->size - (q->put - q->get) - 1) % q->size;
967 967
@@ -1268,7 +1268,7 @@ int gk20a_channel_alloc_gpfifo(struct channel_gk20a *c,
1268 1268
1269 if (c->gpfifo.mem.size) { 1269 if (c->gpfifo.mem.size) {
1270 nvgpu_err(g, "channel %d :" 1270 nvgpu_err(g, "channel %d :"
1271 "gpfifo already allocated", c->hw_chid); 1271 "gpfifo already allocated", c->chid);
1272 err = -EEXIST; 1272 err = -EEXIST;
1273 goto clean_up_idle; 1273 goto clean_up_idle;
1274 } 1274 }
@@ -1294,7 +1294,7 @@ int gk20a_channel_alloc_gpfifo(struct channel_gk20a *c,
1294 c->gpfifo.get = c->gpfifo.put = 0; 1294 c->gpfifo.get = c->gpfifo.put = 0;
1295 1295
1296 gk20a_dbg_info("channel %d : gpfifo_base 0x%016llx, size %d", 1296 gk20a_dbg_info("channel %d : gpfifo_base 0x%016llx, size %d",
1297 c->hw_chid, c->gpfifo.mem.gpu_va, c->gpfifo.entry_num); 1297 c->chid, c->gpfifo.mem.gpu_va, c->gpfifo.entry_num);
1298 1298
1299 g->ops.fifo.setup_userd(c); 1299 g->ops.fifo.setup_userd(c);
1300 1300
@@ -1653,7 +1653,7 @@ static void gk20a_channel_timeout_handler(struct channel_gk20a *ch)
1653 } 1653 }
1654 1654
1655 nvgpu_err(g, "Job on channel %d timed out", 1655 nvgpu_err(g, "Job on channel %d timed out",
1656 ch->hw_chid); 1656 ch->chid);
1657 1657
1658 gk20a_debug_dump(g); 1658 gk20a_debug_dump(g);
1659 gk20a_gr_debug_dump(g); 1659 gk20a_gr_debug_dump(g);
@@ -1934,7 +1934,7 @@ int gk20a_free_priv_cmdbuf(struct channel_gk20a *c, struct priv_cmd_entry *e)
1934 rmb(); 1934 rmb();
1935 if ((q->get != e->off) && e->off != 0) 1935 if ((q->get != e->off) && e->off != 0)
1936 nvgpu_err(g, "requests out-of-order, ch=%d", 1936 nvgpu_err(g, "requests out-of-order, ch=%d",
1937 c->hw_chid); 1937 c->chid);
1938 q->get = e->off + e->size; 1938 q->get = e->off + e->size;
1939 } 1939 }
1940 1940
@@ -2161,7 +2161,7 @@ void gk20a_channel_update(struct channel_gk20a *c)
2161 return; 2161 return;
2162 } 2162 }
2163 2163
2164 trace_gk20a_channel_update(c->hw_chid); 2164 trace_gk20a_channel_update(c->chid);
2165 /* A queued channel is always checked for job cleanup. */ 2165 /* A queued channel is always checked for job cleanup. */
2166 gk20a_channel_worker_enqueue(c); 2166 gk20a_channel_worker_enqueue(c);
2167} 2167}
@@ -2492,7 +2492,7 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
2492 g->ops.ltc.sync_debugfs(g); 2492 g->ops.ltc.sync_debugfs(g);
2493#endif 2493#endif
2494 2494
2495 gk20a_dbg_info("channel %d", c->hw_chid); 2495 gk20a_dbg_info("channel %d", c->chid);
2496 2496
2497 /* 2497 /*
2498 * Job tracking is necessary for any of the following conditions: 2498 * Job tracking is necessary for any of the following conditions:
@@ -2585,7 +2585,7 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
2585 down_read(&g->deterministic_busy); 2585 down_read(&g->deterministic_busy);
2586 2586
2587 trace_gk20a_channel_submit_gpfifo(g->name, 2587 trace_gk20a_channel_submit_gpfifo(g->name,
2588 c->hw_chid, 2588 c->chid,
2589 num_entries, 2589 num_entries,
2590 flags, 2590 flags,
2591 fence ? fence->id : 0, 2591 fence ? fence->id : 0,
@@ -2661,7 +2661,7 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
2661 up_read(&g->deterministic_busy); 2661 up_read(&g->deterministic_busy);
2662 2662
2663 trace_gk20a_channel_submitted_gpfifo(g->name, 2663 trace_gk20a_channel_submitted_gpfifo(g->name,
2664 c->hw_chid, 2664 c->chid,
2665 num_entries, 2665 num_entries,
2666 flags, 2666 flags,
2667 post_fence ? post_fence->syncpt_id : 0, 2667 post_fence ? post_fence->syncpt_id : 0,
@@ -2771,7 +2771,7 @@ int gk20a_init_channel_support(struct gk20a *g, u32 chid)
2771 int err; 2771 int err;
2772 2772
2773 c->g = NULL; 2773 c->g = NULL;
2774 c->hw_chid = chid; 2774 c->chid = chid;
2775 atomic_set(&c->bound, false); 2775 atomic_set(&c->bound, false);
2776 nvgpu_spinlock_init(&c->ref_obtain_lock); 2776 nvgpu_spinlock_init(&c->ref_obtain_lock);
2777 atomic_set(&c->ref_count, 0); 2777 atomic_set(&c->ref_count, 0);
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.h b/drivers/gpu/nvgpu/gk20a/channel_gk20a.h
index 8cf00e7c..380440b9 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.h
@@ -185,7 +185,7 @@ struct channel_gk20a {
185 185
186 struct nvgpu_semaphore_int *hw_sema; 186 struct nvgpu_semaphore_int *hw_sema;
187 187
188 int hw_chid; 188 int chid;
189 bool wdt_enabled; 189 bool wdt_enabled;
190 atomic_t bound; 190 atomic_t bound;
191 bool first_init; 191 bool first_init;
diff --git a/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
index 1b650cdd..c9c03d37 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
@@ -331,10 +331,10 @@ gk20a_channel_syncpt_create(struct channel_gk20a *c)
331 sp->nvhost_dev = c->g->nvhost_dev; 331 sp->nvhost_dev = c->g->nvhost_dev;
332 332
333 snprintf(syncpt_name, sizeof(syncpt_name), 333 snprintf(syncpt_name, sizeof(syncpt_name),
334 "%s_%d", c->g->name, c->hw_chid); 334 "%s_%d", c->g->name, c->chid);
335 335
336 sp->id = nvgpu_nvhost_get_syncpt_host_managed(sp->nvhost_dev, 336 sp->id = nvgpu_nvhost_get_syncpt_host_managed(sp->nvhost_dev,
337 c->hw_chid, syncpt_name); 337 c->chid, syncpt_name);
338 if (!sp->id) { 338 if (!sp->id) {
339 nvgpu_kfree(c->g, sp); 339 nvgpu_kfree(c->g, sp);
340 nvgpu_err(c->g, "failed to get free syncpt"); 340 nvgpu_err(c->g, "failed to get free syncpt");
@@ -497,7 +497,7 @@ static void add_sema_cmd(struct gk20a *g, struct channel_gk20a *c,
497 struct nvgpu_semaphore *s, struct priv_cmd_entry *cmd, 497 struct nvgpu_semaphore *s, struct priv_cmd_entry *cmd,
498 int cmd_size, bool acquire, bool wfi) 498 int cmd_size, bool acquire, bool wfi)
499{ 499{
500 int ch = c->hw_chid; 500 int ch = c->chid;
501 u32 ob, off = cmd->off; 501 u32 ob, off = cmd->off;
502 u64 va; 502 u64 va;
503 503
@@ -557,7 +557,7 @@ static void add_sema_cmd(struct gk20a *g, struct channel_gk20a *c,
557 gpu_sema_verbose_dbg(g, "(A) c=%d ACQ_GE %-4u owner=%-3d" 557 gpu_sema_verbose_dbg(g, "(A) c=%d ACQ_GE %-4u owner=%-3d"
558 "va=0x%llx cmd_mem=0x%llx b=0x%llx off=%u", 558 "va=0x%llx cmd_mem=0x%llx b=0x%llx off=%u",
559 ch, nvgpu_semaphore_get_value(s), 559 ch, nvgpu_semaphore_get_value(s),
560 s->hw_sema->ch->hw_chid, va, cmd->gva, 560 s->hw_sema->ch->chid, va, cmd->gva,
561 cmd->mem->gpu_va, ob); 561 cmd->mem->gpu_va, ob);
562 else 562 else
563 gpu_sema_verbose_dbg(g, "(R) c=%d INCR %u (%u) va=0x%llx " 563 gpu_sema_verbose_dbg(g, "(R) c=%d INCR %u (%u) va=0x%llx "
@@ -911,12 +911,12 @@ gk20a_channel_semaphore_create(struct channel_gk20a *c)
911 if (c->vm->as_share) 911 if (c->vm->as_share)
912 asid = c->vm->as_share->id; 912 asid = c->vm->as_share->id;
913 913
914 sprintf(pool_name, "semaphore_pool-%d", c->hw_chid); 914 sprintf(pool_name, "semaphore_pool-%d", c->chid);
915 sema->pool = c->vm->sema_pool; 915 sema->pool = c->vm->sema_pool;
916 916
917#ifdef CONFIG_SYNC 917#ifdef CONFIG_SYNC
918 sema->timeline = gk20a_sync_timeline_create( 918 sema->timeline = gk20a_sync_timeline_create(
919 "gk20a_ch%d_as%d", c->hw_chid, asid); 919 "gk20a_ch%d_as%d", c->chid, asid);
920 if (!sema->timeline) { 920 if (!sema->timeline) {
921 gk20a_channel_semaphore_destroy(&sema->ops); 921 gk20a_channel_semaphore_destroy(&sema->ops);
922 return NULL; 922 return NULL;
diff --git a/drivers/gpu/nvgpu/gk20a/ctxsw_trace_gk20a.c b/drivers/gpu/nvgpu/gk20a/ctxsw_trace_gk20a.c
index c189a00c..f3866d6a 100644
--- a/drivers/gpu/nvgpu/gk20a/ctxsw_trace_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/ctxsw_trace_gk20a.c
@@ -693,7 +693,7 @@ void gk20a_ctxsw_trace_channel_reset(struct gk20a *g, struct channel_gk20a *ch)
693 gk20a_ctxsw_trace_write(g, &entry); 693 gk20a_ctxsw_trace_write(g, &entry);
694 gk20a_ctxsw_trace_wake_up(g, 0); 694 gk20a_ctxsw_trace_wake_up(g, 0);
695#endif 695#endif
696 trace_gk20a_channel_reset(ch->hw_chid, ch->tsgid); 696 trace_gk20a_channel_reset(ch->chid, ch->tsgid);
697} 697}
698 698
699void gk20a_ctxsw_trace_tsg_reset(struct gk20a *g, struct tsg_gk20a *tsg) 699void gk20a_ctxsw_trace_tsg_reset(struct gk20a *g, struct tsg_gk20a *tsg)
diff --git a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
index 9b0d9456..c7552f04 100644
--- a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
@@ -445,7 +445,7 @@ int dbg_unbind_single_channel_gk20a(struct dbg_session_gk20a *dbg_s,
445 nvgpu_list_for_each_entry_safe(prof_obj, tmp_obj, &g->profiler_objects, 445 nvgpu_list_for_each_entry_safe(prof_obj, tmp_obj, &g->profiler_objects,
446 dbg_profiler_object_data, prof_obj_entry) { 446 dbg_profiler_object_data, prof_obj_entry) {
447 if ((prof_obj->session_id == dbg_s->id) && 447 if ((prof_obj->session_id == dbg_s->id) &&
448 (prof_obj->ch->hw_chid == chid)) { 448 (prof_obj->ch->chid == chid)) {
449 if (prof_obj->has_reservation) { 449 if (prof_obj->has_reservation) {
450 g->ops.dbg_session_ops. 450 g->ops.dbg_session_ops.
451 release_profiler_reservation(dbg_s, prof_obj); 451 release_profiler_reservation(dbg_s, prof_obj);
@@ -504,7 +504,7 @@ static int dbg_unbind_channel_gk20a(struct dbg_session_gk20a *dbg_s,
504 nvgpu_mutex_acquire(&dbg_s->ch_list_lock); 504 nvgpu_mutex_acquire(&dbg_s->ch_list_lock);
505 nvgpu_list_for_each_entry(ch_data, &dbg_s->ch_list, 505 nvgpu_list_for_each_entry(ch_data, &dbg_s->ch_list,
506 dbg_session_channel_data, ch_entry) { 506 dbg_session_channel_data, ch_entry) {
507 if (ch->hw_chid == ch_data->chid) { 507 if (ch->chid == ch_data->chid) {
508 channel_found = true; 508 channel_found = true;
509 break; 509 break;
510 } 510 }
@@ -601,7 +601,7 @@ static int dbg_bind_channel_gk20a(struct dbg_session_gk20a *dbg_s,
601 return -EINVAL; 601 return -EINVAL;
602 } 602 }
603 603
604 gk20a_dbg_fn("%s hwchid=%d", g->name, ch->hw_chid); 604 gk20a_dbg_fn("%s hwchid=%d", g->name, ch->chid);
605 605
606 nvgpu_mutex_acquire(&g->dbg_sessions_lock); 606 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
607 nvgpu_mutex_acquire(&ch->dbg_s_lock); 607 nvgpu_mutex_acquire(&ch->dbg_s_lock);
@@ -613,7 +613,7 @@ static int dbg_bind_channel_gk20a(struct dbg_session_gk20a *dbg_s,
613 } 613 }
614 ch_data->ch_f = f; 614 ch_data->ch_f = f;
615 ch_data->channel_fd = args->channel_fd; 615 ch_data->channel_fd = args->channel_fd;
616 ch_data->chid = ch->hw_chid; 616 ch_data->chid = ch->chid;
617 nvgpu_init_list_node(&ch_data->ch_entry); 617 nvgpu_init_list_node(&ch_data->ch_entry);
618 618
619 session_data = nvgpu_kzalloc(g, sizeof(*session_data)); 619 session_data = nvgpu_kzalloc(g, sizeof(*session_data));
diff --git a/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c b/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c
index 4589b83e..4235788b 100644
--- a/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c
@@ -628,8 +628,8 @@ static int gk20a_fecs_trace_bind_channel(struct gk20a *g,
628 u32 aperture; 628 u32 aperture;
629 629
630 gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, 630 gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw,
631 "hw_chid=%d context_ptr=%x inst_block=%llx", 631 "chid=%d context_ptr=%x inst_block=%llx",
632 ch->hw_chid, context_ptr, 632 ch->chid, context_ptr,
633 gk20a_mm_inst_block_addr(g, &ch->inst_block)); 633 gk20a_mm_inst_block_addr(g, &ch->inst_block));
634 634
635 if (!trace) 635 if (!trace)
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
index 69a3e706..17f3743f 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
@@ -46,7 +46,7 @@
46#define FECS_METHOD_WFI_RESTORE 0x80000 46#define FECS_METHOD_WFI_RESTORE 0x80000
47 47
48static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id, 48static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
49 u32 hw_chid, bool add, 49 u32 chid, bool add,
50 bool wait_for_finish); 50 bool wait_for_finish);
51static u32 gk20a_fifo_engines_on_id(struct gk20a *g, u32 id, bool is_tsg); 51static u32 gk20a_fifo_engines_on_id(struct gk20a *g, u32 id, bool is_tsg);
52 52
@@ -1395,7 +1395,7 @@ void gk20a_fifo_set_ctx_mmu_error_ch(struct gk20a *g,
1395 struct channel_gk20a *refch) 1395 struct channel_gk20a *refch)
1396{ 1396{
1397 nvgpu_err(g, 1397 nvgpu_err(g,
1398 "channel %d generated a mmu fault", refch->hw_chid); 1398 "channel %d generated a mmu fault", refch->chid);
1399 gk20a_set_error_notifier(refch, 1399 gk20a_set_error_notifier(refch,
1400 NVGPU_CHANNEL_FIFO_ERROR_MMU_ERR_FLT); 1400 NVGPU_CHANNEL_FIFO_ERROR_MMU_ERR_FLT);
1401} 1401}
@@ -1455,7 +1455,7 @@ int gk20a_fifo_deferred_reset(struct gk20a *g, struct channel_gk20a *ch)
1455 if (gk20a_is_channel_marked_as_tsg(ch)) 1455 if (gk20a_is_channel_marked_as_tsg(ch))
1456 engines = gk20a_fifo_engines_on_id(g, ch->tsgid, true); 1456 engines = gk20a_fifo_engines_on_id(g, ch->tsgid, true);
1457 else 1457 else
1458 engines = gk20a_fifo_engines_on_id(g, ch->hw_chid, false); 1458 engines = gk20a_fifo_engines_on_id(g, ch->chid, false);
1459 if (!engines) 1459 if (!engines)
1460 goto clean_up; 1460 goto clean_up;
1461 1461
@@ -1673,7 +1673,7 @@ static bool gk20a_fifo_handle_mmu_fault(
1673 } else { 1673 } else {
1674 nvgpu_err(g, 1674 nvgpu_err(g,
1675 "mmu error in freed channel %d", 1675 "mmu error in freed channel %d",
1676 ch->hw_chid); 1676 ch->chid);
1677 } 1677 }
1678 } else if (mmfault_info.inst_ptr == 1678 } else if (mmfault_info.inst_ptr ==
1679 gk20a_mm_inst_block_addr(g, &g->mm.bar1.inst_block)) { 1679 gk20a_mm_inst_block_addr(g, &g->mm.bar1.inst_block)) {
@@ -1794,7 +1794,7 @@ static u32 gk20a_fifo_engines_on_id(struct gk20a *g, u32 id, bool is_tsg)
1794 return engines; 1794 return engines;
1795} 1795}
1796 1796
1797void gk20a_fifo_recover_ch(struct gk20a *g, u32 hw_chid, bool verbose) 1797void gk20a_fifo_recover_ch(struct gk20a *g, u32 chid, bool verbose)
1798{ 1798{
1799 u32 engines; 1799 u32 engines;
1800 1800
@@ -1803,12 +1803,12 @@ void gk20a_fifo_recover_ch(struct gk20a *g, u32 hw_chid, bool verbose)
1803 nvgpu_mutex_acquire(&g->dbg_sessions_lock); 1803 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
1804 gr_gk20a_disable_ctxsw(g); 1804 gr_gk20a_disable_ctxsw(g);
1805 1805
1806 engines = gk20a_fifo_engines_on_id(g, hw_chid, false); 1806 engines = gk20a_fifo_engines_on_id(g, chid, false);
1807 1807
1808 if (engines) 1808 if (engines)
1809 gk20a_fifo_recover(g, engines, hw_chid, false, true, verbose); 1809 gk20a_fifo_recover(g, engines, chid, false, true, verbose);
1810 else { 1810 else {
1811 struct channel_gk20a *ch = &g->fifo.channel[hw_chid]; 1811 struct channel_gk20a *ch = &g->fifo.channel[chid];
1812 1812
1813 if (gk20a_channel_get(ch)) { 1813 if (gk20a_channel_get(ch)) {
1814 gk20a_channel_abort(ch, false); 1814 gk20a_channel_abort(ch, false);
@@ -1976,7 +1976,7 @@ int gk20a_fifo_force_reset_ch(struct channel_gk20a *ch,
1976 gk20a_fifo_recover_tsg(g, ch->tsgid, verbose); 1976 gk20a_fifo_recover_tsg(g, ch->tsgid, verbose);
1977 } else { 1977 } else {
1978 gk20a_set_error_notifier(ch, err_code); 1978 gk20a_set_error_notifier(ch, err_code);
1979 gk20a_fifo_recover_ch(g, ch->hw_chid, verbose); 1979 gk20a_fifo_recover_ch(g, ch->chid, verbose);
1980 } 1980 }
1981 1981
1982 return 0; 1982 return 0;
@@ -2102,7 +2102,7 @@ bool gk20a_fifo_check_tsg_ctxsw_timeout(struct tsg_gk20a *tsg,
2102 */ 2102 */
2103 if (progress) { 2103 if (progress) {
2104 gk20a_dbg_info("progress on tsg=%d ch=%d", 2104 gk20a_dbg_info("progress on tsg=%d ch=%d",
2105 tsg->tsgid, ch->hw_chid); 2105 tsg->tsgid, ch->chid);
2106 gk20a_channel_put(ch); 2106 gk20a_channel_put(ch);
2107 *ms = GRFIFO_TIMEOUT_CHECK_PERIOD_US / 1000; 2107 *ms = GRFIFO_TIMEOUT_CHECK_PERIOD_US / 1000;
2108 list_for_each_entry(ch, &tsg->ch_list, ch_entry) { 2108 list_for_each_entry(ch, &tsg->ch_list, ch_entry) {
@@ -2119,7 +2119,7 @@ bool gk20a_fifo_check_tsg_ctxsw_timeout(struct tsg_gk20a *tsg,
2119 */ 2119 */
2120 if (recover) { 2120 if (recover) {
2121 gk20a_dbg_info("timeout on tsg=%d ch=%d", 2121 gk20a_dbg_info("timeout on tsg=%d ch=%d",
2122 tsg->tsgid, ch->hw_chid); 2122 tsg->tsgid, ch->chid);
2123 *ms = ch->timeout_accumulated_ms; 2123 *ms = ch->timeout_accumulated_ms;
2124 gk20a_channel_put(ch); 2124 gk20a_channel_put(ch);
2125 list_for_each_entry(ch, &tsg->ch_list, ch_entry) { 2125 list_for_each_entry(ch, &tsg->ch_list, ch_entry) {
@@ -2629,7 +2629,7 @@ int __locked_fifo_preempt(struct gk20a *g, u32 id, bool is_tsg)
2629 return ret; 2629 return ret;
2630} 2630}
2631 2631
2632int gk20a_fifo_preempt_channel(struct gk20a *g, u32 hw_chid) 2632int gk20a_fifo_preempt_channel(struct gk20a *g, u32 chid)
2633{ 2633{
2634 struct fifo_gk20a *f = &g->fifo; 2634 struct fifo_gk20a *f = &g->fifo;
2635 u32 ret = 0; 2635 u32 ret = 0;
@@ -2637,7 +2637,7 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, u32 hw_chid)
2637 u32 mutex_ret = 0; 2637 u32 mutex_ret = 0;
2638 u32 i; 2638 u32 i;
2639 2639
2640 gk20a_dbg_fn("%d", hw_chid); 2640 gk20a_dbg_fn("%d", chid);
2641 2641
2642 /* we have no idea which runlist we are using. lock all */ 2642 /* we have no idea which runlist we are using. lock all */
2643 for (i = 0; i < g->fifo.max_runlists; i++) 2643 for (i = 0; i < g->fifo.max_runlists; i++)
@@ -2645,7 +2645,7 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, u32 hw_chid)
2645 2645
2646 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 2646 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
2647 2647
2648 ret = __locked_fifo_preempt(g, hw_chid, false); 2648 ret = __locked_fifo_preempt(g, chid, false);
2649 2649
2650 if (!mutex_ret) 2650 if (!mutex_ret)
2651 nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 2651 nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
@@ -2690,7 +2690,7 @@ int gk20a_fifo_preempt(struct gk20a *g, struct channel_gk20a *ch)
2690 if (gk20a_is_channel_marked_as_tsg(ch)) 2690 if (gk20a_is_channel_marked_as_tsg(ch))
2691 err = g->ops.fifo.preempt_tsg(ch->g, ch->tsgid); 2691 err = g->ops.fifo.preempt_tsg(ch->g, ch->tsgid);
2692 else 2692 else
2693 err = g->ops.fifo.preempt_channel(ch->g, ch->hw_chid); 2693 err = g->ops.fifo.preempt_channel(ch->g, ch->chid);
2694 2694
2695 return err; 2695 return err;
2696} 2696}
@@ -2973,7 +2973,7 @@ u32 gk20a_fifo_default_timeslice_us(struct gk20a *g)
2973 2973
2974void gk20a_get_ch_runlist_entry(struct channel_gk20a *ch, u32 *runlist) 2974void gk20a_get_ch_runlist_entry(struct channel_gk20a *ch, u32 *runlist)
2975{ 2975{
2976 runlist[0] = ram_rl_entry_chid_f(ch->hw_chid); 2976 runlist[0] = ram_rl_entry_chid_f(ch->chid);
2977 runlist[1] = 0; 2977 runlist[1] = 0;
2978} 2978}
2979 2979
@@ -3066,7 +3066,7 @@ static u32 *gk20a_runlist_construct_locked(struct fifo_gk20a *f,
3066 down_read(&tsg->ch_list_lock); 3066 down_read(&tsg->ch_list_lock);
3067 /* add runnable channels bound to this TSG */ 3067 /* add runnable channels bound to this TSG */
3068 list_for_each_entry(ch, &tsg->ch_list, ch_entry) { 3068 list_for_each_entry(ch, &tsg->ch_list, ch_entry) {
3069 if (!test_bit(ch->hw_chid, 3069 if (!test_bit(ch->chid,
3070 runlist->active_channels)) 3070 runlist->active_channels))
3071 continue; 3071 continue;
3072 3072
@@ -3076,7 +3076,7 @@ static u32 *gk20a_runlist_construct_locked(struct fifo_gk20a *f,
3076 } 3076 }
3077 3077
3078 gk20a_dbg_info("add channel %d to runlist", 3078 gk20a_dbg_info("add channel %d to runlist",
3079 ch->hw_chid); 3079 ch->chid);
3080 f->g->ops.fifo.get_ch_runlist_entry(ch, runlist_entry); 3080 f->g->ops.fifo.get_ch_runlist_entry(ch, runlist_entry);
3081 gk20a_dbg_info( 3081 gk20a_dbg_info(
3082 "run list count %d runlist [0] %x [1] %x\n", 3082 "run list count %d runlist [0] %x [1] %x\n",
@@ -3148,7 +3148,7 @@ int gk20a_fifo_tsg_set_timeslice(struct tsg_gk20a *tsg, u32 timeslice)
3148} 3148}
3149 3149
3150static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id, 3150static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
3151 u32 hw_chid, bool add, 3151 u32 chid, bool add,
3152 bool wait_for_finish) 3152 bool wait_for_finish)
3153{ 3153{
3154 int ret = 0; 3154 int ret = 0;
@@ -3166,24 +3166,24 @@ static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
3166 3166
3167 /* valid channel, add/remove it from active list. 3167 /* valid channel, add/remove it from active list.
3168 Otherwise, keep active list untouched for suspend/resume. */ 3168 Otherwise, keep active list untouched for suspend/resume. */
3169 if (hw_chid != FIFO_INVAL_CHANNEL_ID) { 3169 if (chid != FIFO_INVAL_CHANNEL_ID) {
3170 ch = &f->channel[hw_chid]; 3170 ch = &f->channel[chid];
3171 if (gk20a_is_channel_marked_as_tsg(ch)) 3171 if (gk20a_is_channel_marked_as_tsg(ch))
3172 tsg = &f->tsg[ch->tsgid]; 3172 tsg = &f->tsg[ch->tsgid];
3173 3173
3174 if (add) { 3174 if (add) {
3175 if (test_and_set_bit(hw_chid, 3175 if (test_and_set_bit(chid,
3176 runlist->active_channels) == 1) 3176 runlist->active_channels) == 1)
3177 return 0; 3177 return 0;
3178 if (tsg && ++tsg->num_active_channels) 3178 if (tsg && ++tsg->num_active_channels)
3179 set_bit(f->channel[hw_chid].tsgid, 3179 set_bit(f->channel[chid].tsgid,
3180 runlist->active_tsgs); 3180 runlist->active_tsgs);
3181 } else { 3181 } else {
3182 if (test_and_clear_bit(hw_chid, 3182 if (test_and_clear_bit(chid,
3183 runlist->active_channels) == 0) 3183 runlist->active_channels) == 0)
3184 return 0; 3184 return 0;
3185 if (tsg && --tsg->num_active_channels == 0) 3185 if (tsg && --tsg->num_active_channels == 0)
3186 clear_bit(f->channel[hw_chid].tsgid, 3186 clear_bit(f->channel[chid].tsgid,
3187 runlist->active_tsgs); 3187 runlist->active_tsgs);
3188 } 3188 }
3189 } 3189 }
@@ -3208,7 +3208,7 @@ static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
3208 goto clean_up; 3208 goto clean_up;
3209 } 3209 }
3210 3210
3211 if (hw_chid != FIFO_INVAL_CHANNEL_ID || /* add/remove a valid channel */ 3211 if (chid != FIFO_INVAL_CHANNEL_ID || /* add/remove a valid channel */
3212 add /* resume to add all channels back */) { 3212 add /* resume to add all channels back */) {
3213 u32 max_entries = f->num_runlist_entries; 3213 u32 max_entries = f->num_runlist_entries;
3214 u32 *runlist_end; 3214 u32 *runlist_end;
@@ -3270,7 +3270,7 @@ clean_up:
3270 return ret; 3270 return ret;
3271} 3271}
3272 3272
3273int gk20a_fifo_update_runlist_ids(struct gk20a *g, u32 runlist_ids, u32 hw_chid, 3273int gk20a_fifo_update_runlist_ids(struct gk20a *g, u32 runlist_ids, u32 chid,
3274 bool add, bool wait_for_finish) 3274 bool add, bool wait_for_finish)
3275{ 3275{
3276 u32 ret = -EINVAL; 3276 u32 ret = -EINVAL;
@@ -3284,7 +3284,7 @@ int gk20a_fifo_update_runlist_ids(struct gk20a *g, u32 runlist_ids, u32 hw_chid,
3284 ret = 0; 3284 ret = 0;
3285 for_each_set_bit(runlist_id, &ulong_runlist_ids, 32) { 3285 for_each_set_bit(runlist_id, &ulong_runlist_ids, 32) {
3286 /* Capture the last failure error code */ 3286 /* Capture the last failure error code */
3287 errcode = g->ops.fifo.update_runlist(g, runlist_id, hw_chid, add, wait_for_finish); 3287 errcode = g->ops.fifo.update_runlist(g, runlist_id, chid, add, wait_for_finish);
3288 if (errcode) { 3288 if (errcode) {
3289 nvgpu_err(g, 3289 nvgpu_err(g,
3290 "failed to update_runlist %d %d", runlist_id, errcode); 3290 "failed to update_runlist %d %d", runlist_id, errcode);
@@ -3297,9 +3297,9 @@ end:
3297 3297
3298/* add/remove a channel from runlist 3298/* add/remove a channel from runlist
3299 special cases below: runlist->active_channels will NOT be changed. 3299 special cases below: runlist->active_channels will NOT be changed.
3300 (hw_chid == ~0 && !add) means remove all active channels from runlist. 3300 (chid == ~0 && !add) means remove all active channels from runlist.
3301 (hw_chid == ~0 && add) means restore all active channels on runlist. */ 3301 (chid == ~0 && add) means restore all active channels on runlist. */
3302int gk20a_fifo_update_runlist(struct gk20a *g, u32 runlist_id, u32 hw_chid, 3302int gk20a_fifo_update_runlist(struct gk20a *g, u32 runlist_id, u32 chid,
3303 bool add, bool wait_for_finish) 3303 bool add, bool wait_for_finish)
3304{ 3304{
3305 struct fifo_runlist_info_gk20a *runlist = NULL; 3305 struct fifo_runlist_info_gk20a *runlist = NULL;
@@ -3316,7 +3316,7 @@ int gk20a_fifo_update_runlist(struct gk20a *g, u32 runlist_id, u32 hw_chid,
3316 3316
3317 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 3317 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
3318 3318
3319 ret = gk20a_fifo_update_runlist_locked(g, runlist_id, hw_chid, add, 3319 ret = gk20a_fifo_update_runlist_locked(g, runlist_id, chid, add,
3320 wait_for_finish); 3320 wait_for_finish);
3321 3321
3322 if (!mutex_ret) 3322 if (!mutex_ret)
@@ -3427,11 +3427,11 @@ u32 gk20a_fifo_get_pbdma_signature(struct gk20a *g)
3427 return pbdma_signature_hw_valid_f() | pbdma_signature_sw_zero_f(); 3427 return pbdma_signature_hw_valid_f() | pbdma_signature_sw_zero_f();
3428} 3428}
3429 3429
3430struct channel_gk20a *gk20a_fifo_channel_from_hw_chid(struct gk20a *g, 3430struct channel_gk20a *gk20a_fifo_channel_from_chid(struct gk20a *g,
3431 u32 hw_chid) 3431 u32 chid)
3432{ 3432{
3433 if (hw_chid != FIFO_INVAL_CHANNEL_ID) 3433 if (chid != FIFO_INVAL_CHANNEL_ID)
3434 return g->fifo.channel + hw_chid; 3434 return g->fifo.channel + chid;
3435 else 3435 else
3436 return NULL; 3436 return NULL;
3437} 3437}
@@ -3487,14 +3487,14 @@ const char *gk20a_decode_pbdma_chan_eng_ctx_status(u32 index)
3487 3487
3488void gk20a_dump_channel_status_ramfc(struct gk20a *g, 3488void gk20a_dump_channel_status_ramfc(struct gk20a *g,
3489 struct gk20a_debug_output *o, 3489 struct gk20a_debug_output *o,
3490 u32 hw_chid, 3490 u32 chid,
3491 struct ch_state *ch_state) 3491 struct ch_state *ch_state)
3492{ 3492{
3493 u32 channel = gk20a_readl(g, ccsr_channel_r(hw_chid)); 3493 u32 channel = gk20a_readl(g, ccsr_channel_r(chid));
3494 u32 status = ccsr_channel_status_v(channel); 3494 u32 status = ccsr_channel_status_v(channel);
3495 u32 syncpointa, syncpointb; 3495 u32 syncpointa, syncpointb;
3496 u32 *inst_mem; 3496 u32 *inst_mem;
3497 struct channel_gk20a *c = g->fifo.channel + hw_chid; 3497 struct channel_gk20a *c = g->fifo.channel + chid;
3498 struct nvgpu_semaphore_int *hw_sema = NULL; 3498 struct nvgpu_semaphore_int *hw_sema = NULL;
3499 3499
3500 if (c->hw_sema) 3500 if (c->hw_sema)
@@ -3508,7 +3508,7 @@ void gk20a_dump_channel_status_ramfc(struct gk20a *g,
3508 syncpointa = inst_mem[ram_fc_syncpointa_w()]; 3508 syncpointa = inst_mem[ram_fc_syncpointa_w()];
3509 syncpointb = inst_mem[ram_fc_syncpointb_w()]; 3509 syncpointb = inst_mem[ram_fc_syncpointb_w()];
3510 3510
3511 gk20a_debug_output(o, "%d-%s, pid %d, refs %d%s: ", hw_chid, 3511 gk20a_debug_output(o, "%d-%s, pid %d, refs %d%s: ", chid,
3512 g->name, 3512 g->name,
3513 ch_state->pid, 3513 ch_state->pid,
3514 ch_state->refs, 3514 ch_state->refs,
@@ -3673,16 +3673,16 @@ void gk20a_dump_eng_status(struct gk20a *g,
3673 3673
3674void gk20a_fifo_enable_channel(struct channel_gk20a *ch) 3674void gk20a_fifo_enable_channel(struct channel_gk20a *ch)
3675{ 3675{
3676 gk20a_writel(ch->g, ccsr_channel_r(ch->hw_chid), 3676 gk20a_writel(ch->g, ccsr_channel_r(ch->chid),
3677 gk20a_readl(ch->g, ccsr_channel_r(ch->hw_chid)) | 3677 gk20a_readl(ch->g, ccsr_channel_r(ch->chid)) |
3678 ccsr_channel_enable_set_true_f()); 3678 ccsr_channel_enable_set_true_f());
3679} 3679}
3680 3680
3681void gk20a_fifo_disable_channel(struct channel_gk20a *ch) 3681void gk20a_fifo_disable_channel(struct channel_gk20a *ch)
3682{ 3682{
3683 gk20a_writel(ch->g, ccsr_channel_r(ch->hw_chid), 3683 gk20a_writel(ch->g, ccsr_channel_r(ch->chid),
3684 gk20a_readl(ch->g, 3684 gk20a_readl(ch->g,
3685 ccsr_channel_r(ch->hw_chid)) | 3685 ccsr_channel_r(ch->chid)) |
3686 ccsr_channel_enable_clr_true_f()); 3686 ccsr_channel_enable_clr_true_f());
3687} 3687}
3688 3688
@@ -3693,23 +3693,23 @@ static void gk20a_fifo_channel_bind(struct channel_gk20a *c)
3693 ram_in_base_shift_v(); 3693 ram_in_base_shift_v();
3694 3694
3695 gk20a_dbg_info("bind channel %d inst ptr 0x%08x", 3695 gk20a_dbg_info("bind channel %d inst ptr 0x%08x",
3696 c->hw_chid, inst_ptr); 3696 c->chid, inst_ptr);
3697 3697
3698 3698
3699 gk20a_writel(g, ccsr_channel_r(c->hw_chid), 3699 gk20a_writel(g, ccsr_channel_r(c->chid),
3700 (gk20a_readl(g, ccsr_channel_r(c->hw_chid)) & 3700 (gk20a_readl(g, ccsr_channel_r(c->chid)) &
3701 ~ccsr_channel_runlist_f(~0)) | 3701 ~ccsr_channel_runlist_f(~0)) |
3702 ccsr_channel_runlist_f(c->runlist_id)); 3702 ccsr_channel_runlist_f(c->runlist_id));
3703 3703
3704 gk20a_writel(g, ccsr_channel_inst_r(c->hw_chid), 3704 gk20a_writel(g, ccsr_channel_inst_r(c->chid),
3705 ccsr_channel_inst_ptr_f(inst_ptr) | 3705 ccsr_channel_inst_ptr_f(inst_ptr) |
3706 nvgpu_aperture_mask(g, &c->inst_block, 3706 nvgpu_aperture_mask(g, &c->inst_block,
3707 ccsr_channel_inst_target_sys_mem_ncoh_f(), 3707 ccsr_channel_inst_target_sys_mem_ncoh_f(),
3708 ccsr_channel_inst_target_vid_mem_f()) | 3708 ccsr_channel_inst_target_vid_mem_f()) |
3709 ccsr_channel_inst_bind_true_f()); 3709 ccsr_channel_inst_bind_true_f());
3710 3710
3711 gk20a_writel(g, ccsr_channel_r(c->hw_chid), 3711 gk20a_writel(g, ccsr_channel_r(c->chid),
3712 (gk20a_readl(g, ccsr_channel_r(c->hw_chid)) & 3712 (gk20a_readl(g, ccsr_channel_r(c->chid)) &
3713 ~ccsr_channel_enable_set_f(~0)) | 3713 ~ccsr_channel_enable_set_f(~0)) |
3714 ccsr_channel_enable_set_true_f()); 3714 ccsr_channel_enable_set_true_f());
3715 3715
@@ -3725,7 +3725,7 @@ void gk20a_fifo_channel_unbind(struct channel_gk20a *ch_gk20a)
3725 gk20a_dbg_fn(""); 3725 gk20a_dbg_fn("");
3726 3726
3727 if (atomic_cmpxchg(&ch_gk20a->bound, true, false)) { 3727 if (atomic_cmpxchg(&ch_gk20a->bound, true, false)) {
3728 gk20a_writel(g, ccsr_channel_inst_r(ch_gk20a->hw_chid), 3728 gk20a_writel(g, ccsr_channel_inst_r(ch_gk20a->chid),
3729 ccsr_channel_inst_ptr_f(0) | 3729 ccsr_channel_inst_ptr_f(0) |
3730 ccsr_channel_inst_bind_false_f()); 3730 ccsr_channel_inst_bind_false_f());
3731 } 3731 }
@@ -3743,7 +3743,7 @@ static int gk20a_fifo_commit_userd(struct channel_gk20a *c)
3743 addr_hi = u64_hi32(c->userd_iova); 3743 addr_hi = u64_hi32(c->userd_iova);
3744 3744
3745 gk20a_dbg_info("channel %d : set ramfc userd 0x%16llx", 3745 gk20a_dbg_info("channel %d : set ramfc userd 0x%16llx",
3746 c->hw_chid, (u64)c->userd_iova); 3746 c->chid, (u64)c->userd_iova);
3747 3747
3748 nvgpu_mem_wr32(g, &c->inst_block, 3748 nvgpu_mem_wr32(g, &c->inst_block,
3749 ram_in_ramfc_w() + ram_fc_userd_w(), 3749 ram_in_ramfc_w() + ram_fc_userd_w(),
@@ -3815,7 +3815,7 @@ int gk20a_fifo_setup_ramfc(struct channel_gk20a *c,
3815 fifo_pb_timeslice_timescale_0_f() | 3815 fifo_pb_timeslice_timescale_0_f() |
3816 fifo_pb_timeslice_enable_true_f()); 3816 fifo_pb_timeslice_enable_true_f());
3817 3817
3818 nvgpu_mem_wr32(g, mem, ram_fc_chid_w(), ram_fc_chid_id_f(c->hw_chid)); 3818 nvgpu_mem_wr32(g, mem, ram_fc_chid_w(), ram_fc_chid_id_f(c->chid));
3819 3819
3820 if (c->is_privileged_channel) 3820 if (c->is_privileged_channel)
3821 gk20a_fifo_setup_ramfc_for_privileged_channel(c); 3821 gk20a_fifo_setup_ramfc_for_privileged_channel(c);
@@ -3834,7 +3834,7 @@ static int channel_gk20a_set_schedule_params(struct channel_gk20a *c)
3834 c->g->ops.fifo.disable_channel(c); 3834 c->g->ops.fifo.disable_channel(c);
3835 3835
3836 /* preempt the channel */ 3836 /* preempt the channel */
3837 WARN_ON(c->g->ops.fifo.preempt_channel(c->g, c->hw_chid)); 3837 WARN_ON(c->g->ops.fifo.preempt_channel(c->g, c->chid));
3838 3838
3839 /* set new timeslice */ 3839 /* set new timeslice */
3840 nvgpu_mem_wr32(c->g, &c->inst_block, ram_fc_runlist_timeslice_w(), 3840 nvgpu_mem_wr32(c->g, &c->inst_block, ram_fc_runlist_timeslice_w(),
@@ -3863,7 +3863,7 @@ int gk20a_fifo_set_timeslice(struct channel_gk20a *ch, u32 timeslice)
3863 ch->timeslice_us = timeslice; 3863 ch->timeslice_us = timeslice;
3864 3864
3865 gk20a_dbg(gpu_dbg_sched, "chid=%u timeslice=%u us", 3865 gk20a_dbg(gpu_dbg_sched, "chid=%u timeslice=%u us",
3866 ch->hw_chid, timeslice); 3866 ch->chid, timeslice);
3867 3867
3868 return channel_gk20a_set_schedule_params(ch); 3868 return channel_gk20a_set_schedule_params(ch);
3869} 3869}
@@ -3899,7 +3899,7 @@ void gk20a_fifo_setup_ramfc_for_privileged_channel(struct channel_gk20a *c)
3899 struct gk20a *g = c->g; 3899 struct gk20a *g = c->g;
3900 struct nvgpu_mem *mem = &c->inst_block; 3900 struct nvgpu_mem *mem = &c->inst_block;
3901 3901
3902 gk20a_dbg_info("channel %d : set ramfc privileged_channel", c->hw_chid); 3902 gk20a_dbg_info("channel %d : set ramfc privileged_channel", c->chid);
3903 3903
3904 /* Enable HCE priv mode for phys mode transfer */ 3904 /* Enable HCE priv mode for phys mode transfer */
3905 nvgpu_mem_wr32(g, mem, ram_fc_hce_ctrl_w(), 3905 nvgpu_mem_wr32(g, mem, ram_fc_hce_ctrl_w(),
@@ -3910,7 +3910,7 @@ int gk20a_fifo_setup_userd(struct channel_gk20a *c)
3910{ 3910{
3911 struct gk20a *g = c->g; 3911 struct gk20a *g = c->g;
3912 struct nvgpu_mem *mem = &g->fifo.userd; 3912 struct nvgpu_mem *mem = &g->fifo.userd;
3913 u32 offset = c->hw_chid * g->fifo.userd_entry_size / sizeof(u32); 3913 u32 offset = c->chid * g->fifo.userd_entry_size / sizeof(u32);
3914 3914
3915 gk20a_dbg_fn(""); 3915 gk20a_dbg_fn("");
3916 3916
@@ -3939,7 +3939,7 @@ int gk20a_fifo_alloc_inst(struct gk20a *g, struct channel_gk20a *ch)
3939 return err; 3939 return err;
3940 3940
3941 gk20a_dbg_info("channel %d inst block physical addr: 0x%16llx", 3941 gk20a_dbg_info("channel %d inst block physical addr: 0x%16llx",
3942 ch->hw_chid, gk20a_mm_inst_block_addr(g, &ch->inst_block)); 3942 ch->chid, gk20a_mm_inst_block_addr(g, &ch->inst_block));
3943 3943
3944 gk20a_dbg_fn("done"); 3944 gk20a_dbg_fn("done");
3945 return 0; 3945 return 0;
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h
index 7e919346..6537b00f 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h
@@ -227,7 +227,7 @@ int gk20a_init_fifo_setup_hw(struct gk20a *g);
227void gk20a_fifo_isr(struct gk20a *g); 227void gk20a_fifo_isr(struct gk20a *g);
228int gk20a_fifo_nonstall_isr(struct gk20a *g); 228int gk20a_fifo_nonstall_isr(struct gk20a *g);
229 229
230int gk20a_fifo_preempt_channel(struct gk20a *g, u32 hw_chid); 230int gk20a_fifo_preempt_channel(struct gk20a *g, u32 chid);
231int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid); 231int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid);
232int gk20a_fifo_preempt(struct gk20a *g, struct channel_gk20a *ch); 232int gk20a_fifo_preempt(struct gk20a *g, struct channel_gk20a *ch);
233 233
@@ -239,9 +239,9 @@ int gk20a_fifo_disable_engine_activity(struct gk20a *g,
239 bool wait_for_idle); 239 bool wait_for_idle);
240int gk20a_fifo_disable_all_engine_activity(struct gk20a *g, 240int gk20a_fifo_disable_all_engine_activity(struct gk20a *g,
241 bool wait_for_idle); 241 bool wait_for_idle);
242u32 gk20a_fifo_engines_on_ch(struct gk20a *g, u32 hw_chid); 242u32 gk20a_fifo_engines_on_ch(struct gk20a *g, u32 chid);
243 243
244int gk20a_fifo_update_runlist(struct gk20a *g, u32 engine_id, u32 hw_chid, 244int gk20a_fifo_update_runlist(struct gk20a *g, u32 engine_id, u32 chid,
245 bool add, bool wait_for_finish); 245 bool add, bool wait_for_finish);
246 246
247int gk20a_fifo_suspend(struct gk20a *g); 247int gk20a_fifo_suspend(struct gk20a *g);
@@ -253,7 +253,7 @@ void gk20a_fifo_recover(struct gk20a *g,
253 u32 hw_id, /* if ~0, will be queried from HW */ 253 u32 hw_id, /* if ~0, will be queried from HW */
254 bool hw_id_is_tsg, /* ignored if hw_id == ~0 */ 254 bool hw_id_is_tsg, /* ignored if hw_id == ~0 */
255 bool id_is_known, bool verbose); 255 bool id_is_known, bool verbose);
256void gk20a_fifo_recover_ch(struct gk20a *g, u32 hw_chid, bool verbose); 256void gk20a_fifo_recover_ch(struct gk20a *g, u32 chid, bool verbose);
257void gk20a_fifo_recover_tsg(struct gk20a *g, u32 tsgid, bool verbose); 257void gk20a_fifo_recover_tsg(struct gk20a *g, u32 tsgid, bool verbose);
258int gk20a_fifo_force_reset_ch(struct channel_gk20a *ch, 258int gk20a_fifo_force_reset_ch(struct channel_gk20a *ch,
259 u32 err_code, bool verbose); 259 u32 err_code, bool verbose);
@@ -277,8 +277,8 @@ void gk20a_fifo_set_ctx_mmu_error_ch(struct gk20a *g,
277bool gk20a_fifo_error_tsg(struct gk20a *g, struct tsg_gk20a *tsg); 277bool gk20a_fifo_error_tsg(struct gk20a *g, struct tsg_gk20a *tsg);
278bool gk20a_fifo_error_ch(struct gk20a *g, struct channel_gk20a *refch); 278bool gk20a_fifo_error_ch(struct gk20a *g, struct channel_gk20a *refch);
279 279
280struct channel_gk20a *gk20a_fifo_channel_from_hw_chid(struct gk20a *g, 280struct channel_gk20a *gk20a_fifo_channel_from_chid(struct gk20a *g,
281 u32 hw_chid); 281 u32 chid);
282 282
283void gk20a_fifo_issue_preempt(struct gk20a *g, u32 id, bool is_tsg); 283void gk20a_fifo_issue_preempt(struct gk20a *g, u32 id, bool is_tsg);
284int gk20a_fifo_set_runlist_interleave(struct gk20a *g, 284int gk20a_fifo_set_runlist_interleave(struct gk20a *g,
@@ -316,7 +316,7 @@ u32 gk20a_fifo_get_gr_runlist_id(struct gk20a *g);
316 316
317bool gk20a_fifo_is_valid_runlist_id(struct gk20a *g, u32 runlist_id); 317bool gk20a_fifo_is_valid_runlist_id(struct gk20a *g, u32 runlist_id);
318 318
319int gk20a_fifo_update_runlist_ids(struct gk20a *g, u32 runlist_ids, u32 hw_chid, 319int gk20a_fifo_update_runlist_ids(struct gk20a *g, u32 runlist_ids, u32 chid,
320 bool add, bool wait_for_finish); 320 bool add, bool wait_for_finish);
321 321
322int gk20a_fifo_init_engine_info(struct fifo_gk20a *f); 322int gk20a_fifo_init_engine_info(struct fifo_gk20a *f);
@@ -339,7 +339,7 @@ void gk20a_fifo_profile_release(struct gk20a *g,
339 339
340void gk20a_dump_channel_status_ramfc(struct gk20a *g, 340void gk20a_dump_channel_status_ramfc(struct gk20a *g,
341 struct gk20a_debug_output *o, 341 struct gk20a_debug_output *o,
342 u32 hw_chid, 342 u32 chid,
343 struct ch_state *ch_state); 343 struct ch_state *ch_state);
344void gk20a_debug_dump_all_channel_status_ramfc(struct gk20a *g, 344void gk20a_debug_dump_all_channel_status_ramfc(struct gk20a *g,
345 struct gk20a_debug_output *o); 345 struct gk20a_debug_output *o);
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.h b/drivers/gpu/nvgpu/gk20a/gk20a.h
index ce84a93e..06db2b7f 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/gk20a.h
@@ -426,10 +426,10 @@ struct gpu_ops {
426 unsigned long acquire_timeout, 426 unsigned long acquire_timeout,
427 u32 flags); 427 u32 flags);
428 int (*resetup_ramfc)(struct channel_gk20a *c); 428 int (*resetup_ramfc)(struct channel_gk20a *c);
429 int (*preempt_channel)(struct gk20a *g, u32 hw_chid); 429 int (*preempt_channel)(struct gk20a *g, u32 chid);
430 int (*preempt_tsg)(struct gk20a *g, u32 tsgid); 430 int (*preempt_tsg)(struct gk20a *g, u32 tsgid);
431 int (*update_runlist)(struct gk20a *g, u32 runlist_id, 431 int (*update_runlist)(struct gk20a *g, u32 runlist_id,
432 u32 hw_chid, bool add, 432 u32 chid, bool add,
433 bool wait_for_finish); 433 bool wait_for_finish);
434 void (*trigger_mmu_fault)(struct gk20a *g, 434 void (*trigger_mmu_fault)(struct gk20a *g,
435 unsigned long engine_ids); 435 unsigned long engine_ids);
@@ -477,7 +477,7 @@ struct gpu_ops {
477 void (*dump_eng_status)(struct gk20a *g, 477 void (*dump_eng_status)(struct gk20a *g,
478 struct gk20a_debug_output *o); 478 struct gk20a_debug_output *o);
479 void (*dump_channel_status_ramfc)(struct gk20a *g, 479 void (*dump_channel_status_ramfc)(struct gk20a *g,
480 struct gk20a_debug_output *o, u32 hw_chid, 480 struct gk20a_debug_output *o, u32 chid,
481 struct ch_state *ch_state); 481 struct ch_state *ch_state);
482 u32 (*intr_0_error_mask)(struct gk20a *g); 482 u32 (*intr_0_error_mask)(struct gk20a *g);
483 int (*is_preempt_pending)(struct gk20a *g, u32 id, 483 int (*is_preempt_pending)(struct gk20a *g, u32 id,
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
index 2ee2048c..a9632eaa 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
@@ -725,7 +725,7 @@ static int gr_gk20a_fecs_ctx_bind_channel(struct gk20a *g,
725 u32 ret; 725 u32 ret;
726 726
727 gk20a_dbg_info("bind channel %d inst ptr 0x%08x", 727 gk20a_dbg_info("bind channel %d inst ptr 0x%08x",
728 c->hw_chid, inst_base_ptr); 728 c->chid, inst_base_ptr);
729 729
730 ret = gr_gk20a_submit_fecs_method_op(g, 730 ret = gr_gk20a_submit_fecs_method_op(g,
731 (struct fecs_method_op_gk20a) { 731 (struct fecs_method_op_gk20a) {
@@ -5933,7 +5933,7 @@ static struct channel_gk20a *gk20a_gr_get_channel_from_ctx(
5933 /* check cache first */ 5933 /* check cache first */
5934 for (i = 0; i < GR_CHANNEL_MAP_TLB_SIZE; i++) { 5934 for (i = 0; i < GR_CHANNEL_MAP_TLB_SIZE; i++) {
5935 if (gr->chid_tlb[i].curr_ctx == curr_ctx) { 5935 if (gr->chid_tlb[i].curr_ctx == curr_ctx) {
5936 chid = gr->chid_tlb[i].hw_chid; 5936 chid = gr->chid_tlb[i].chid;
5937 tsgid = gr->chid_tlb[i].tsgid; 5937 tsgid = gr->chid_tlb[i].tsgid;
5938 ret = gk20a_channel_get(&f->channel[chid]); 5938 ret = gk20a_channel_get(&f->channel[chid]);
5939 goto unlock; 5939 goto unlock;
@@ -5964,7 +5964,7 @@ static struct channel_gk20a *gk20a_gr_get_channel_from_ctx(
5964 for (i = 0; i < GR_CHANNEL_MAP_TLB_SIZE; i++) { 5964 for (i = 0; i < GR_CHANNEL_MAP_TLB_SIZE; i++) {
5965 if (gr->chid_tlb[i].curr_ctx == 0) { 5965 if (gr->chid_tlb[i].curr_ctx == 0) {
5966 gr->chid_tlb[i].curr_ctx = curr_ctx; 5966 gr->chid_tlb[i].curr_ctx = curr_ctx;
5967 gr->chid_tlb[i].hw_chid = chid; 5967 gr->chid_tlb[i].chid = chid;
5968 gr->chid_tlb[i].tsgid = tsgid; 5968 gr->chid_tlb[i].tsgid = tsgid;
5969 goto unlock; 5969 goto unlock;
5970 } 5970 }
@@ -5972,7 +5972,7 @@ static struct channel_gk20a *gk20a_gr_get_channel_from_ctx(
5972 5972
5973 /* no free entry, flush one */ 5973 /* no free entry, flush one */
5974 gr->chid_tlb[gr->channel_tlb_flush_index].curr_ctx = curr_ctx; 5974 gr->chid_tlb[gr->channel_tlb_flush_index].curr_ctx = curr_ctx;
5975 gr->chid_tlb[gr->channel_tlb_flush_index].hw_chid = chid; 5975 gr->chid_tlb[gr->channel_tlb_flush_index].chid = chid;
5976 gr->chid_tlb[gr->channel_tlb_flush_index].tsgid = tsgid; 5976 gr->chid_tlb[gr->channel_tlb_flush_index].tsgid = tsgid;
5977 5977
5978 gr->channel_tlb_flush_index = 5978 gr->channel_tlb_flush_index =
@@ -6514,7 +6514,7 @@ int gk20a_gr_isr(struct gk20a *g)
6514 6514
6515 ch = gk20a_gr_get_channel_from_ctx(g, isr_data.curr_ctx, &tsgid); 6515 ch = gk20a_gr_get_channel_from_ctx(g, isr_data.curr_ctx, &tsgid);
6516 if (ch) { 6516 if (ch) {
6517 isr_data.chid = ch->hw_chid; 6517 isr_data.chid = ch->chid;
6518 } else { 6518 } else {
6519 isr_data.chid = FIFO_INVAL_CHANNEL_ID; 6519 isr_data.chid = FIFO_INVAL_CHANNEL_ID;
6520 nvgpu_err(g, "ch id is INVALID 0xffffffff"); 6520 nvgpu_err(g, "ch id is INVALID 0xffffffff");
@@ -6626,7 +6626,7 @@ int gk20a_gr_isr(struct gk20a *g)
6626 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, 6626 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg,
6627 "GPC exception pending"); 6627 "GPC exception pending");
6628 6628
6629 fault_ch = gk20a_fifo_channel_from_hw_chid(g, 6629 fault_ch = gk20a_fifo_channel_from_chid(g,
6630 isr_data.chid); 6630 isr_data.chid);
6631 6631
6632 /*isr_data.chid can be ~0 and fault_ch can be NULL */ 6632 /*isr_data.chid can be ~0 and fault_ch can be NULL */
@@ -6673,7 +6673,7 @@ int gk20a_gr_isr(struct gk20a *g)
6673 tsgid, true, true, true); 6673 tsgid, true, true, true);
6674 else if (ch) 6674 else if (ch)
6675 gk20a_fifo_recover(g, gr_engine_id, 6675 gk20a_fifo_recover(g, gr_engine_id,
6676 ch->hw_chid, false, true, true); 6676 ch->chid, false, true, true);
6677 else 6677 else
6678 gk20a_fifo_recover(g, gr_engine_id, 6678 gk20a_fifo_recover(g, gr_engine_id,
6679 0, false, false, true); 6679 0, false, false, true);
@@ -8337,16 +8337,16 @@ bool gk20a_is_channel_ctx_resident(struct channel_gk20a *ch)
8337 8337
8338 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, 8338 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg,
8339 "curr_gr_chid=%d curr_tsgid=%d, ch->tsgid=%d" 8339 "curr_gr_chid=%d curr_tsgid=%d, ch->tsgid=%d"
8340 " ch->hw_chid=%d", 8340 " ch->chid=%d",
8341 curr_ch ? curr_ch->hw_chid : -1, 8341 curr_ch ? curr_ch->chid : -1,
8342 curr_gr_tsgid, 8342 curr_gr_tsgid,
8343 ch->tsgid, 8343 ch->tsgid,
8344 ch->hw_chid); 8344 ch->chid);
8345 8345
8346 if (!curr_ch) 8346 if (!curr_ch)
8347 return false; 8347 return false;
8348 8348
8349 if (ch->hw_chid == curr_ch->hw_chid) 8349 if (ch->chid == curr_ch->chid)
8350 ret = true; 8350 ret = true;
8351 8351
8352 if (gk20a_is_channel_marked_as_tsg(ch) && (ch->tsgid == curr_gr_tsgid)) 8352 if (gk20a_is_channel_marked_as_tsg(ch) && (ch->tsgid == curr_gr_tsgid))
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.h b/drivers/gpu/nvgpu/gk20a/gr_gk20a.h
index 745848ab..109ae0a3 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.h
@@ -112,7 +112,7 @@ enum {
112 112
113struct gr_channel_map_tlb_entry { 113struct gr_channel_map_tlb_entry {
114 u32 curr_ctx; 114 u32 curr_ctx;
115 u32 hw_chid; 115 u32 chid;
116 u32 tsgid; 116 u32 tsgid;
117}; 117};
118 118
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
index 2581bc0d..7c476526 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
@@ -171,7 +171,7 @@ struct mmu_fault_info {
171 u32 faulted_pbdma; 171 u32 faulted_pbdma;
172 u32 faulted_engine; 172 u32 faulted_engine;
173 u32 faulted_subid; 173 u32 faulted_subid;
174 u32 hw_chid; 174 u32 chid;
175 struct channel_gk20a *refch; 175 struct channel_gk20a *refch;
176 const char *client_type_desc; 176 const char *client_type_desc;
177 const char *fault_type_desc; 177 const char *fault_type_desc;
diff --git a/drivers/gpu/nvgpu/gk20a/sync_gk20a.c b/drivers/gpu/nvgpu/gk20a/sync_gk20a.c
index 10c4f57b..247f3d63 100644
--- a/drivers/gpu/nvgpu/gk20a/sync_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/sync_gk20a.c
@@ -360,7 +360,7 @@ static void gk20a_sync_pt_value_str_for_sema(struct gk20a_sync_pt *pt,
360 struct nvgpu_semaphore *s = pt->sema; 360 struct nvgpu_semaphore *s = pt->sema;
361 361
362 snprintf(str, size, "S: c=%d [v=%u,r_v=%u]", 362 snprintf(str, size, "S: c=%d [v=%u,r_v=%u]",
363 s->hw_sema->ch->hw_chid, 363 s->hw_sema->ch->chid,
364 nvgpu_semaphore_get_value(s), 364 nvgpu_semaphore_get_value(s),
365 nvgpu_semaphore_read(s)); 365 nvgpu_semaphore_read(s));
366} 366}
diff --git a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
index 46db3dd3..0d07c790 100644
--- a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
@@ -61,7 +61,7 @@ static bool gk20a_is_channel_active(struct gk20a *g, struct channel_gk20a *ch)
61 61
62 for (i = 0; i < f->max_runlists; ++i) { 62 for (i = 0; i < f->max_runlists; ++i) {
63 runlist = &f->runlist_info[i]; 63 runlist = &f->runlist_info[i];
64 if (test_bit(ch->hw_chid, runlist->active_channels)) 64 if (test_bit(ch->chid, runlist->active_channels))
65 return true; 65 return true;
66 } 66 }
67 67
@@ -107,7 +107,7 @@ int gk20a_tsg_bind_channel(struct tsg_gk20a *tsg,
107 kref_get(&tsg->refcount); 107 kref_get(&tsg->refcount);
108 108
109 gk20a_dbg(gpu_dbg_fn, "BIND tsg:%d channel:%d\n", 109 gk20a_dbg(gpu_dbg_fn, "BIND tsg:%d channel:%d\n",
110 tsg->tsgid, ch->hw_chid); 110 tsg->tsgid, ch->chid);
111 111
112 gk20a_dbg_fn("done"); 112 gk20a_dbg_fn("done");
113 return 0; 113 return 0;
diff --git a/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c b/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c
index efa0c589..19782412 100644
--- a/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c
@@ -36,18 +36,18 @@ static void channel_gm20b_bind(struct channel_gk20a *c)
36 >> ram_in_base_shift_v(); 36 >> ram_in_base_shift_v();
37 37
38 gk20a_dbg_info("bind channel %d inst ptr 0x%08x", 38 gk20a_dbg_info("bind channel %d inst ptr 0x%08x",
39 c->hw_chid, inst_ptr); 39 c->chid, inst_ptr);
40 40
41 41
42 gk20a_writel(g, ccsr_channel_inst_r(c->hw_chid), 42 gk20a_writel(g, ccsr_channel_inst_r(c->chid),
43 ccsr_channel_inst_ptr_f(inst_ptr) | 43 ccsr_channel_inst_ptr_f(inst_ptr) |
44 nvgpu_aperture_mask(g, &c->inst_block, 44 nvgpu_aperture_mask(g, &c->inst_block,
45 ccsr_channel_inst_target_sys_mem_ncoh_f(), 45 ccsr_channel_inst_target_sys_mem_ncoh_f(),
46 ccsr_channel_inst_target_vid_mem_f()) | 46 ccsr_channel_inst_target_vid_mem_f()) |
47 ccsr_channel_inst_bind_true_f()); 47 ccsr_channel_inst_bind_true_f());
48 48
49 gk20a_writel(g, ccsr_channel_r(c->hw_chid), 49 gk20a_writel(g, ccsr_channel_r(c->chid),
50 (gk20a_readl(g, ccsr_channel_r(c->hw_chid)) & 50 (gk20a_readl(g, ccsr_channel_r(c->chid)) &
51 ~ccsr_channel_enable_set_f(~0)) | 51 ~ccsr_channel_enable_set_f(~0)) |
52 ccsr_channel_enable_set_true_f()); 52 ccsr_channel_enable_set_true_f());
53 wmb(); 53 wmb();
diff --git a/drivers/gpu/nvgpu/gp10b/fifo_gp10b.c b/drivers/gpu/nvgpu/gp10b/fifo_gp10b.c
index cfc2eb8d..633fbfb7 100644
--- a/drivers/gpu/nvgpu/gp10b/fifo_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/fifo_gp10b.c
@@ -64,7 +64,7 @@ int channel_gp10b_commit_userd(struct channel_gk20a *c)
64 addr_hi = u64_hi32(c->userd_iova); 64 addr_hi = u64_hi32(c->userd_iova);
65 65
66 gk20a_dbg_info("channel %d : set ramfc userd 0x%16llx", 66 gk20a_dbg_info("channel %d : set ramfc userd 0x%16llx",
67 c->hw_chid, (u64)c->userd_iova); 67 c->chid, (u64)c->userd_iova);
68 68
69 nvgpu_mem_wr32(g, &c->inst_block, 69 nvgpu_mem_wr32(g, &c->inst_block,
70 ram_in_ramfc_w() + ram_fc_userd_w(), 70 ram_in_ramfc_w() + ram_fc_userd_w(),
@@ -134,7 +134,7 @@ static int channel_gp10b_setup_ramfc(struct channel_gk20a *c,
134 gp10b_set_pdb_fault_replay_flags(c->g, mem); 134 gp10b_set_pdb_fault_replay_flags(c->g, mem);
135 135
136 136
137 nvgpu_mem_wr32(g, mem, ram_fc_chid_w(), ram_fc_chid_id_f(c->hw_chid)); 137 nvgpu_mem_wr32(g, mem, ram_fc_chid_w(), ram_fc_chid_id_f(c->chid));
138 138
139 if (c->is_privileged_channel) { 139 if (c->is_privileged_channel) {
140 /* Set privilege level for channel */ 140 /* Set privilege level for channel */
@@ -176,7 +176,7 @@ static int gp10b_fifo_resetup_ramfc(struct channel_gk20a *c)
176 v = pbdma_allowed_syncpoints_0_valid_f(1); 176 v = pbdma_allowed_syncpoints_0_valid_f(1);
177 177
178 gk20a_dbg_info("Channel %d, syncpt id %d\n", 178 gk20a_dbg_info("Channel %d, syncpt id %d\n",
179 c->hw_chid, new_syncpt); 179 c->chid, new_syncpt);
180 180
181 v |= pbdma_allowed_syncpoints_0_index_f(new_syncpt); 181 v |= pbdma_allowed_syncpoints_0_index_f(new_syncpt);
182 182
diff --git a/drivers/gpu/nvgpu/gp10b/gr_gp10b.c b/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
index 2356f9f3..9ff34325 100644
--- a/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
@@ -1653,7 +1653,7 @@ static int gr_gp10b_disable_channel_or_tsg(struct gk20a *g, struct channel_gk20a
1653 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, 1653 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr,
1654 "CILP: preempted tsg"); 1654 "CILP: preempted tsg");
1655 } else { 1655 } else {
1656 gk20a_fifo_issue_preempt(g, fault_ch->hw_chid, false); 1656 gk20a_fifo_issue_preempt(g, fault_ch->chid, false);
1657 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, 1657 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr,
1658 "CILP: preempted channel"); 1658 "CILP: preempted channel");
1659 } 1659 }
@@ -1675,7 +1675,7 @@ int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g,
1675 if (gr_ctx->t18x.cilp_preempt_pending) { 1675 if (gr_ctx->t18x.cilp_preempt_pending) {
1676 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, 1676 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr,
1677 "CILP is already pending for chid %d", 1677 "CILP is already pending for chid %d",
1678 fault_ch->hw_chid); 1678 fault_ch->chid);
1679 return 0; 1679 return 0;
1680 } 1680 }
1681 1681
@@ -1718,7 +1718,7 @@ int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g,
1718 1718
1719 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, 1719 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr,
1720 "CILP: disabling channel %d", 1720 "CILP: disabling channel %d",
1721 fault_ch->hw_chid); 1721 fault_ch->chid);
1722 1722
1723 ret = gr_gp10b_disable_channel_or_tsg(g, fault_ch); 1723 ret = gr_gp10b_disable_channel_or_tsg(g, fault_ch);
1724 if (ret) { 1724 if (ret) {
@@ -1728,7 +1728,7 @@ int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g,
1728 1728
1729 /* set cilp_preempt_pending = true and record the channel */ 1729 /* set cilp_preempt_pending = true and record the channel */
1730 gr_ctx->t18x.cilp_preempt_pending = true; 1730 gr_ctx->t18x.cilp_preempt_pending = true;
1731 g->gr.t18x.cilp_preempt_pending_chid = fault_ch->hw_chid; 1731 g->gr.t18x.cilp_preempt_pending_chid = fault_ch->chid;
1732 1732
1733 if (gk20a_is_channel_marked_as_tsg(fault_ch)) { 1733 if (gk20a_is_channel_marked_as_tsg(fault_ch)) {
1734 struct tsg_gk20a *tsg = &g->fifo.tsg[fault_ch->tsgid]; 1734 struct tsg_gk20a *tsg = &g->fifo.tsg[fault_ch->tsgid];
@@ -1758,7 +1758,7 @@ static int gr_gp10b_clear_cilp_preempt_pending(struct gk20a *g,
1758 if (!gr_ctx->t18x.cilp_preempt_pending) { 1758 if (!gr_ctx->t18x.cilp_preempt_pending) {
1759 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, 1759 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr,
1760 "CILP is already cleared for chid %d\n", 1760 "CILP is already cleared for chid %d\n",
1761 fault_ch->hw_chid); 1761 fault_ch->chid);
1762 return 0; 1762 return 0;
1763 } 1763 }
1764 1764
@@ -1879,7 +1879,7 @@ static int gr_gp10b_get_cilp_preempt_pending_chid(struct gk20a *g, int *__chid)
1879 1879
1880 chid = g->gr.t18x.cilp_preempt_pending_chid; 1880 chid = g->gr.t18x.cilp_preempt_pending_chid;
1881 1881
1882 ch = gk20a_channel_get(gk20a_fifo_channel_from_hw_chid(g, chid)); 1882 ch = gk20a_channel_get(gk20a_fifo_channel_from_chid(g, chid));
1883 if (!ch) 1883 if (!ch)
1884 return ret; 1884 return ret;
1885 1885
@@ -1923,7 +1923,7 @@ int gr_gp10b_handle_fecs_error(struct gk20a *g,
1923 goto clean_up; 1923 goto clean_up;
1924 1924
1925 ch = gk20a_channel_get( 1925 ch = gk20a_channel_get(
1926 gk20a_fifo_channel_from_hw_chid(g, chid)); 1926 gk20a_fifo_channel_from_chid(g, chid));
1927 if (!ch) 1927 if (!ch)
1928 goto clean_up; 1928 goto clean_up;
1929 1929
@@ -2171,7 +2171,7 @@ static int gr_gp10b_set_preemption_mode(struct channel_gk20a *ch,
2171 2171
2172 gk20a_dbg(gpu_dbg_sched, "chid=%d tsgid=%d pid=%d " 2172 gk20a_dbg(gpu_dbg_sched, "chid=%d tsgid=%d pid=%d "
2173 "graphics_preempt=%d compute_preempt=%d", 2173 "graphics_preempt=%d compute_preempt=%d",
2174 ch->hw_chid, 2174 ch->chid,
2175 ch->tsgid, 2175 ch->tsgid,
2176 ch->tgid, 2176 ch->tgid,
2177 graphics_preempt_mode, 2177 graphics_preempt_mode,
diff --git a/drivers/gpu/nvgpu/include/nvgpu/semaphore.h b/drivers/gpu/nvgpu/include/nvgpu/semaphore.h
index 45a3af5a..faa8d945 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/semaphore.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/semaphore.h
@@ -299,7 +299,7 @@ static inline void __nvgpu_semaphore_release(struct nvgpu_semaphore *s,
299 nvgpu_mem_wr(hw_sema->ch->g, &hw_sema->p->rw_mem, hw_sema->offset, val); 299 nvgpu_mem_wr(hw_sema->ch->g, &hw_sema->p->rw_mem, hw_sema->offset, val);
300 300
301 gpu_sema_verbose_dbg(hw_sema->p->sema_sea->gk20a, 301 gpu_sema_verbose_dbg(hw_sema->p->sema_sea->gk20a,
302 "(c=%d) WRITE %u", hw_sema->ch->hw_chid, val); 302 "(c=%d) WRITE %u", hw_sema->ch->chid, val);
303} 303}
304 304
305static inline void nvgpu_semaphore_release(struct nvgpu_semaphore *s) 305static inline void nvgpu_semaphore_release(struct nvgpu_semaphore *s)
@@ -325,7 +325,7 @@ static inline void nvgpu_semaphore_incr(struct nvgpu_semaphore *s)
325 325
326 gpu_sema_verbose_dbg(s->hw_sema->p->sema_sea->gk20a, 326 gpu_sema_verbose_dbg(s->hw_sema->p->sema_sea->gk20a,
327 "INCR sema for c=%d (%u)", 327 "INCR sema for c=%d (%u)",
328 s->hw_sema->ch->hw_chid, 328 s->hw_sema->ch->chid,
329 nvgpu_semaphore_next_value(s)); 329 nvgpu_semaphore_next_value(s));
330} 330}
331#endif 331#endif
diff --git a/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c b/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
index 8d12bb42..55b3de07 100644
--- a/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
@@ -34,7 +34,7 @@ static void vgpu_channel_bind(struct channel_gk20a *ch)
34 &msg.params.channel_config; 34 &msg.params.channel_config;
35 int err; 35 int err;
36 36
37 gk20a_dbg_info("bind channel %d", ch->hw_chid); 37 gk20a_dbg_info("bind channel %d", ch->chid);
38 38
39 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_BIND; 39 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_BIND;
40 msg.handle = vgpu_get_handle(ch->g); 40 msg.handle = vgpu_get_handle(ch->g);
@@ -76,7 +76,7 @@ static int vgpu_channel_alloc_inst(struct gk20a *g, struct channel_gk20a *ch)
76 76
77 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ALLOC_HWCTX; 77 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ALLOC_HWCTX;
78 msg.handle = vgpu_get_handle(g); 78 msg.handle = vgpu_get_handle(g);
79 p->id = ch->hw_chid; 79 p->id = ch->chid;
80 p->pid = (u64)current->tgid; 80 p->pid = (u64)current->tgid;
81 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 81 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
82 if (err || msg.ret) { 82 if (err || msg.ret) {
@@ -407,10 +407,10 @@ int vgpu_init_fifo_support(struct gk20a *g)
407 return err; 407 return err;
408} 408}
409 409
410static int vgpu_fifo_preempt_channel(struct gk20a *g, u32 hw_chid) 410static int vgpu_fifo_preempt_channel(struct gk20a *g, u32 chid)
411{ 411{
412 struct fifo_gk20a *f = &g->fifo; 412 struct fifo_gk20a *f = &g->fifo;
413 struct channel_gk20a *ch = &f->channel[hw_chid]; 413 struct channel_gk20a *ch = &f->channel[chid];
414 struct tegra_vgpu_cmd_msg msg; 414 struct tegra_vgpu_cmd_msg msg;
415 struct tegra_vgpu_channel_config_params *p = 415 struct tegra_vgpu_channel_config_params *p =
416 &msg.params.channel_config; 416 &msg.params.channel_config;
@@ -428,7 +428,7 @@ static int vgpu_fifo_preempt_channel(struct gk20a *g, u32 hw_chid)
428 428
429 if (err || msg.ret) { 429 if (err || msg.ret) {
430 nvgpu_err(g, 430 nvgpu_err(g,
431 "preempt channel %d failed", hw_chid); 431 "preempt channel %d failed", chid);
432 err = -ENOMEM; 432 err = -ENOMEM;
433 } 433 }
434 434
@@ -497,7 +497,7 @@ done:
497} 497}
498 498
499static int vgpu_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id, 499static int vgpu_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
500 u32 hw_chid, bool add, 500 u32 chid, bool add,
501 bool wait_for_finish) 501 bool wait_for_finish)
502{ 502{
503 struct fifo_gk20a *f = &g->fifo; 503 struct fifo_gk20a *f = &g->fifo;
@@ -511,19 +511,19 @@ static int vgpu_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
511 511
512 /* valid channel, add/remove it from active list. 512 /* valid channel, add/remove it from active list.
513 Otherwise, keep active list untouched for suspend/resume. */ 513 Otherwise, keep active list untouched for suspend/resume. */
514 if (hw_chid != (u32)~0) { 514 if (chid != (u32)~0) {
515 if (add) { 515 if (add) {
516 if (test_and_set_bit(hw_chid, 516 if (test_and_set_bit(chid,
517 runlist->active_channels) == 1) 517 runlist->active_channels) == 1)
518 return 0; 518 return 0;
519 } else { 519 } else {
520 if (test_and_clear_bit(hw_chid, 520 if (test_and_clear_bit(chid,
521 runlist->active_channels) == 0) 521 runlist->active_channels) == 0)
522 return 0; 522 return 0;
523 } 523 }
524 } 524 }
525 525
526 if (hw_chid != (u32)~0 || /* add/remove a valid channel */ 526 if (chid != (u32)~0 || /* add/remove a valid channel */
527 add /* resume to add all channels back */) { 527 add /* resume to add all channels back */) {
528 u32 chid; 528 u32 chid;
529 529
@@ -544,10 +544,10 @@ static int vgpu_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
544 544
545/* add/remove a channel from runlist 545/* add/remove a channel from runlist
546 special cases below: runlist->active_channels will NOT be changed. 546 special cases below: runlist->active_channels will NOT be changed.
547 (hw_chid == ~0 && !add) means remove all active channels from runlist. 547 (chid == ~0 && !add) means remove all active channels from runlist.
548 (hw_chid == ~0 && add) means restore all active channels on runlist. */ 548 (chid == ~0 && add) means restore all active channels on runlist. */
549static int vgpu_fifo_update_runlist(struct gk20a *g, u32 runlist_id, 549static int vgpu_fifo_update_runlist(struct gk20a *g, u32 runlist_id,
550 u32 hw_chid, bool add, bool wait_for_finish) 550 u32 chid, bool add, bool wait_for_finish)
551{ 551{
552 struct fifo_runlist_info_gk20a *runlist = NULL; 552 struct fifo_runlist_info_gk20a *runlist = NULL;
553 struct fifo_gk20a *f = &g->fifo; 553 struct fifo_gk20a *f = &g->fifo;
@@ -559,7 +559,7 @@ static int vgpu_fifo_update_runlist(struct gk20a *g, u32 runlist_id,
559 559
560 nvgpu_mutex_acquire(&runlist->mutex); 560 nvgpu_mutex_acquire(&runlist->mutex);
561 561
562 ret = vgpu_fifo_update_runlist_locked(g, runlist_id, hw_chid, add, 562 ret = vgpu_fifo_update_runlist_locked(g, runlist_id, chid, add,
563 wait_for_finish); 563 wait_for_finish);
564 564
565 nvgpu_mutex_release(&runlist->mutex); 565 nvgpu_mutex_release(&runlist->mutex);
@@ -580,7 +580,7 @@ static int vgpu_channel_set_priority(struct channel_gk20a *ch, u32 priority)
580 &msg.params.channel_priority; 580 &msg.params.channel_priority;
581 int err; 581 int err;
582 582
583 gk20a_dbg_info("channel %d set priority %u", ch->hw_chid, priority); 583 gk20a_dbg_info("channel %d set priority %u", ch->chid, priority);
584 584
585 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SET_PRIORITY; 585 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SET_PRIORITY;
586 msg.handle = vgpu_get_handle(ch->g); 586 msg.handle = vgpu_get_handle(ch->g);
@@ -739,7 +739,7 @@ int vgpu_fifo_isr(struct gk20a *g, struct tegra_vgpu_fifo_intr_info *info)
739 nvgpu_err(g, "fifo intr (%d) on ch %u", 739 nvgpu_err(g, "fifo intr (%d) on ch %u",
740 info->type, info->chid); 740 info->type, info->chid);
741 741
742 trace_gk20a_channel_reset(ch->hw_chid, ch->tsgid); 742 trace_gk20a_channel_reset(ch->chid, ch->tsgid);
743 743
744 switch (info->type) { 744 switch (info->type) {
745 case TEGRA_VGPU_FIFO_INTR_PBDMA: 745 case TEGRA_VGPU_FIFO_INTR_PBDMA:
diff --git a/drivers/gpu/nvgpu/vgpu/tsg_vgpu.c b/drivers/gpu/nvgpu/vgpu/tsg_vgpu.c
index 3f6b6fd9..3ce4aa1f 100644
--- a/drivers/gpu/nvgpu/vgpu/tsg_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/tsg_vgpu.c
@@ -69,7 +69,7 @@ static int vgpu_tsg_bind_channel(struct tsg_gk20a *tsg,
69 if (err) { 69 if (err) {
70 nvgpu_err(tsg->g, 70 nvgpu_err(tsg->g,
71 "vgpu_tsg_bind_channel failed, ch %d tsgid %d", 71 "vgpu_tsg_bind_channel failed, ch %d tsgid %d",
72 ch->hw_chid, tsg->tsgid); 72 ch->chid, tsg->tsgid);
73 gk20a_tsg_unbind_channel(ch); 73 gk20a_tsg_unbind_channel(ch);
74 } 74 }
75 75