summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/fifo_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.c116
1 files changed, 58 insertions, 58 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
index 69a3e706..17f3743f 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
@@ -46,7 +46,7 @@
46#define FECS_METHOD_WFI_RESTORE 0x80000 46#define FECS_METHOD_WFI_RESTORE 0x80000
47 47
48static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id, 48static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
49 u32 hw_chid, bool add, 49 u32 chid, bool add,
50 bool wait_for_finish); 50 bool wait_for_finish);
51static u32 gk20a_fifo_engines_on_id(struct gk20a *g, u32 id, bool is_tsg); 51static u32 gk20a_fifo_engines_on_id(struct gk20a *g, u32 id, bool is_tsg);
52 52
@@ -1395,7 +1395,7 @@ void gk20a_fifo_set_ctx_mmu_error_ch(struct gk20a *g,
1395 struct channel_gk20a *refch) 1395 struct channel_gk20a *refch)
1396{ 1396{
1397 nvgpu_err(g, 1397 nvgpu_err(g,
1398 "channel %d generated a mmu fault", refch->hw_chid); 1398 "channel %d generated a mmu fault", refch->chid);
1399 gk20a_set_error_notifier(refch, 1399 gk20a_set_error_notifier(refch,
1400 NVGPU_CHANNEL_FIFO_ERROR_MMU_ERR_FLT); 1400 NVGPU_CHANNEL_FIFO_ERROR_MMU_ERR_FLT);
1401} 1401}
@@ -1455,7 +1455,7 @@ int gk20a_fifo_deferred_reset(struct gk20a *g, struct channel_gk20a *ch)
1455 if (gk20a_is_channel_marked_as_tsg(ch)) 1455 if (gk20a_is_channel_marked_as_tsg(ch))
1456 engines = gk20a_fifo_engines_on_id(g, ch->tsgid, true); 1456 engines = gk20a_fifo_engines_on_id(g, ch->tsgid, true);
1457 else 1457 else
1458 engines = gk20a_fifo_engines_on_id(g, ch->hw_chid, false); 1458 engines = gk20a_fifo_engines_on_id(g, ch->chid, false);
1459 if (!engines) 1459 if (!engines)
1460 goto clean_up; 1460 goto clean_up;
1461 1461
@@ -1673,7 +1673,7 @@ static bool gk20a_fifo_handle_mmu_fault(
1673 } else { 1673 } else {
1674 nvgpu_err(g, 1674 nvgpu_err(g,
1675 "mmu error in freed channel %d", 1675 "mmu error in freed channel %d",
1676 ch->hw_chid); 1676 ch->chid);
1677 } 1677 }
1678 } else if (mmfault_info.inst_ptr == 1678 } else if (mmfault_info.inst_ptr ==
1679 gk20a_mm_inst_block_addr(g, &g->mm.bar1.inst_block)) { 1679 gk20a_mm_inst_block_addr(g, &g->mm.bar1.inst_block)) {
@@ -1794,7 +1794,7 @@ static u32 gk20a_fifo_engines_on_id(struct gk20a *g, u32 id, bool is_tsg)
1794 return engines; 1794 return engines;
1795} 1795}
1796 1796
1797void gk20a_fifo_recover_ch(struct gk20a *g, u32 hw_chid, bool verbose) 1797void gk20a_fifo_recover_ch(struct gk20a *g, u32 chid, bool verbose)
1798{ 1798{
1799 u32 engines; 1799 u32 engines;
1800 1800
@@ -1803,12 +1803,12 @@ void gk20a_fifo_recover_ch(struct gk20a *g, u32 hw_chid, bool verbose)
1803 nvgpu_mutex_acquire(&g->dbg_sessions_lock); 1803 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
1804 gr_gk20a_disable_ctxsw(g); 1804 gr_gk20a_disable_ctxsw(g);
1805 1805
1806 engines = gk20a_fifo_engines_on_id(g, hw_chid, false); 1806 engines = gk20a_fifo_engines_on_id(g, chid, false);
1807 1807
1808 if (engines) 1808 if (engines)
1809 gk20a_fifo_recover(g, engines, hw_chid, false, true, verbose); 1809 gk20a_fifo_recover(g, engines, chid, false, true, verbose);
1810 else { 1810 else {
1811 struct channel_gk20a *ch = &g->fifo.channel[hw_chid]; 1811 struct channel_gk20a *ch = &g->fifo.channel[chid];
1812 1812
1813 if (gk20a_channel_get(ch)) { 1813 if (gk20a_channel_get(ch)) {
1814 gk20a_channel_abort(ch, false); 1814 gk20a_channel_abort(ch, false);
@@ -1976,7 +1976,7 @@ int gk20a_fifo_force_reset_ch(struct channel_gk20a *ch,
1976 gk20a_fifo_recover_tsg(g, ch->tsgid, verbose); 1976 gk20a_fifo_recover_tsg(g, ch->tsgid, verbose);
1977 } else { 1977 } else {
1978 gk20a_set_error_notifier(ch, err_code); 1978 gk20a_set_error_notifier(ch, err_code);
1979 gk20a_fifo_recover_ch(g, ch->hw_chid, verbose); 1979 gk20a_fifo_recover_ch(g, ch->chid, verbose);
1980 } 1980 }
1981 1981
1982 return 0; 1982 return 0;
@@ -2102,7 +2102,7 @@ bool gk20a_fifo_check_tsg_ctxsw_timeout(struct tsg_gk20a *tsg,
2102 */ 2102 */
2103 if (progress) { 2103 if (progress) {
2104 gk20a_dbg_info("progress on tsg=%d ch=%d", 2104 gk20a_dbg_info("progress on tsg=%d ch=%d",
2105 tsg->tsgid, ch->hw_chid); 2105 tsg->tsgid, ch->chid);
2106 gk20a_channel_put(ch); 2106 gk20a_channel_put(ch);
2107 *ms = GRFIFO_TIMEOUT_CHECK_PERIOD_US / 1000; 2107 *ms = GRFIFO_TIMEOUT_CHECK_PERIOD_US / 1000;
2108 list_for_each_entry(ch, &tsg->ch_list, ch_entry) { 2108 list_for_each_entry(ch, &tsg->ch_list, ch_entry) {
@@ -2119,7 +2119,7 @@ bool gk20a_fifo_check_tsg_ctxsw_timeout(struct tsg_gk20a *tsg,
2119 */ 2119 */
2120 if (recover) { 2120 if (recover) {
2121 gk20a_dbg_info("timeout on tsg=%d ch=%d", 2121 gk20a_dbg_info("timeout on tsg=%d ch=%d",
2122 tsg->tsgid, ch->hw_chid); 2122 tsg->tsgid, ch->chid);
2123 *ms = ch->timeout_accumulated_ms; 2123 *ms = ch->timeout_accumulated_ms;
2124 gk20a_channel_put(ch); 2124 gk20a_channel_put(ch);
2125 list_for_each_entry(ch, &tsg->ch_list, ch_entry) { 2125 list_for_each_entry(ch, &tsg->ch_list, ch_entry) {
@@ -2629,7 +2629,7 @@ int __locked_fifo_preempt(struct gk20a *g, u32 id, bool is_tsg)
2629 return ret; 2629 return ret;
2630} 2630}
2631 2631
2632int gk20a_fifo_preempt_channel(struct gk20a *g, u32 hw_chid) 2632int gk20a_fifo_preempt_channel(struct gk20a *g, u32 chid)
2633{ 2633{
2634 struct fifo_gk20a *f = &g->fifo; 2634 struct fifo_gk20a *f = &g->fifo;
2635 u32 ret = 0; 2635 u32 ret = 0;
@@ -2637,7 +2637,7 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, u32 hw_chid)
2637 u32 mutex_ret = 0; 2637 u32 mutex_ret = 0;
2638 u32 i; 2638 u32 i;
2639 2639
2640 gk20a_dbg_fn("%d", hw_chid); 2640 gk20a_dbg_fn("%d", chid);
2641 2641
2642 /* we have no idea which runlist we are using. lock all */ 2642 /* we have no idea which runlist we are using. lock all */
2643 for (i = 0; i < g->fifo.max_runlists; i++) 2643 for (i = 0; i < g->fifo.max_runlists; i++)
@@ -2645,7 +2645,7 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, u32 hw_chid)
2645 2645
2646 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 2646 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
2647 2647
2648 ret = __locked_fifo_preempt(g, hw_chid, false); 2648 ret = __locked_fifo_preempt(g, chid, false);
2649 2649
2650 if (!mutex_ret) 2650 if (!mutex_ret)
2651 nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 2651 nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
@@ -2690,7 +2690,7 @@ int gk20a_fifo_preempt(struct gk20a *g, struct channel_gk20a *ch)
2690 if (gk20a_is_channel_marked_as_tsg(ch)) 2690 if (gk20a_is_channel_marked_as_tsg(ch))
2691 err = g->ops.fifo.preempt_tsg(ch->g, ch->tsgid); 2691 err = g->ops.fifo.preempt_tsg(ch->g, ch->tsgid);
2692 else 2692 else
2693 err = g->ops.fifo.preempt_channel(ch->g, ch->hw_chid); 2693 err = g->ops.fifo.preempt_channel(ch->g, ch->chid);
2694 2694
2695 return err; 2695 return err;
2696} 2696}
@@ -2973,7 +2973,7 @@ u32 gk20a_fifo_default_timeslice_us(struct gk20a *g)
2973 2973
2974void gk20a_get_ch_runlist_entry(struct channel_gk20a *ch, u32 *runlist) 2974void gk20a_get_ch_runlist_entry(struct channel_gk20a *ch, u32 *runlist)
2975{ 2975{
2976 runlist[0] = ram_rl_entry_chid_f(ch->hw_chid); 2976 runlist[0] = ram_rl_entry_chid_f(ch->chid);
2977 runlist[1] = 0; 2977 runlist[1] = 0;
2978} 2978}
2979 2979
@@ -3066,7 +3066,7 @@ static u32 *gk20a_runlist_construct_locked(struct fifo_gk20a *f,
3066 down_read(&tsg->ch_list_lock); 3066 down_read(&tsg->ch_list_lock);
3067 /* add runnable channels bound to this TSG */ 3067 /* add runnable channels bound to this TSG */
3068 list_for_each_entry(ch, &tsg->ch_list, ch_entry) { 3068 list_for_each_entry(ch, &tsg->ch_list, ch_entry) {
3069 if (!test_bit(ch->hw_chid, 3069 if (!test_bit(ch->chid,
3070 runlist->active_channels)) 3070 runlist->active_channels))
3071 continue; 3071 continue;
3072 3072
@@ -3076,7 +3076,7 @@ static u32 *gk20a_runlist_construct_locked(struct fifo_gk20a *f,
3076 } 3076 }
3077 3077
3078 gk20a_dbg_info("add channel %d to runlist", 3078 gk20a_dbg_info("add channel %d to runlist",
3079 ch->hw_chid); 3079 ch->chid);
3080 f->g->ops.fifo.get_ch_runlist_entry(ch, runlist_entry); 3080 f->g->ops.fifo.get_ch_runlist_entry(ch, runlist_entry);
3081 gk20a_dbg_info( 3081 gk20a_dbg_info(
3082 "run list count %d runlist [0] %x [1] %x\n", 3082 "run list count %d runlist [0] %x [1] %x\n",
@@ -3148,7 +3148,7 @@ int gk20a_fifo_tsg_set_timeslice(struct tsg_gk20a *tsg, u32 timeslice)
3148} 3148}
3149 3149
3150static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id, 3150static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
3151 u32 hw_chid, bool add, 3151 u32 chid, bool add,
3152 bool wait_for_finish) 3152 bool wait_for_finish)
3153{ 3153{
3154 int ret = 0; 3154 int ret = 0;
@@ -3166,24 +3166,24 @@ static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
3166 3166
3167 /* valid channel, add/remove it from active list. 3167 /* valid channel, add/remove it from active list.
3168 Otherwise, keep active list untouched for suspend/resume. */ 3168 Otherwise, keep active list untouched for suspend/resume. */
3169 if (hw_chid != FIFO_INVAL_CHANNEL_ID) { 3169 if (chid != FIFO_INVAL_CHANNEL_ID) {
3170 ch = &f->channel[hw_chid]; 3170 ch = &f->channel[chid];
3171 if (gk20a_is_channel_marked_as_tsg(ch)) 3171 if (gk20a_is_channel_marked_as_tsg(ch))
3172 tsg = &f->tsg[ch->tsgid]; 3172 tsg = &f->tsg[ch->tsgid];
3173 3173
3174 if (add) { 3174 if (add) {
3175 if (test_and_set_bit(hw_chid, 3175 if (test_and_set_bit(chid,
3176 runlist->active_channels) == 1) 3176 runlist->active_channels) == 1)
3177 return 0; 3177 return 0;
3178 if (tsg && ++tsg->num_active_channels) 3178 if (tsg && ++tsg->num_active_channels)
3179 set_bit(f->channel[hw_chid].tsgid, 3179 set_bit(f->channel[chid].tsgid,
3180 runlist->active_tsgs); 3180 runlist->active_tsgs);
3181 } else { 3181 } else {
3182 if (test_and_clear_bit(hw_chid, 3182 if (test_and_clear_bit(chid,
3183 runlist->active_channels) == 0) 3183 runlist->active_channels) == 0)
3184 return 0; 3184 return 0;
3185 if (tsg && --tsg->num_active_channels == 0) 3185 if (tsg && --tsg->num_active_channels == 0)
3186 clear_bit(f->channel[hw_chid].tsgid, 3186 clear_bit(f->channel[chid].tsgid,
3187 runlist->active_tsgs); 3187 runlist->active_tsgs);
3188 } 3188 }
3189 } 3189 }
@@ -3208,7 +3208,7 @@ static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
3208 goto clean_up; 3208 goto clean_up;
3209 } 3209 }
3210 3210
3211 if (hw_chid != FIFO_INVAL_CHANNEL_ID || /* add/remove a valid channel */ 3211 if (chid != FIFO_INVAL_CHANNEL_ID || /* add/remove a valid channel */
3212 add /* resume to add all channels back */) { 3212 add /* resume to add all channels back */) {
3213 u32 max_entries = f->num_runlist_entries; 3213 u32 max_entries = f->num_runlist_entries;
3214 u32 *runlist_end; 3214 u32 *runlist_end;
@@ -3270,7 +3270,7 @@ clean_up:
3270 return ret; 3270 return ret;
3271} 3271}
3272 3272
3273int gk20a_fifo_update_runlist_ids(struct gk20a *g, u32 runlist_ids, u32 hw_chid, 3273int gk20a_fifo_update_runlist_ids(struct gk20a *g, u32 runlist_ids, u32 chid,
3274 bool add, bool wait_for_finish) 3274 bool add, bool wait_for_finish)
3275{ 3275{
3276 u32 ret = -EINVAL; 3276 u32 ret = -EINVAL;
@@ -3284,7 +3284,7 @@ int gk20a_fifo_update_runlist_ids(struct gk20a *g, u32 runlist_ids, u32 hw_chid,
3284 ret = 0; 3284 ret = 0;
3285 for_each_set_bit(runlist_id, &ulong_runlist_ids, 32) { 3285 for_each_set_bit(runlist_id, &ulong_runlist_ids, 32) {
3286 /* Capture the last failure error code */ 3286 /* Capture the last failure error code */
3287 errcode = g->ops.fifo.update_runlist(g, runlist_id, hw_chid, add, wait_for_finish); 3287 errcode = g->ops.fifo.update_runlist(g, runlist_id, chid, add, wait_for_finish);
3288 if (errcode) { 3288 if (errcode) {
3289 nvgpu_err(g, 3289 nvgpu_err(g,
3290 "failed to update_runlist %d %d", runlist_id, errcode); 3290 "failed to update_runlist %d %d", runlist_id, errcode);
@@ -3297,9 +3297,9 @@ end:
3297 3297
3298/* add/remove a channel from runlist 3298/* add/remove a channel from runlist
3299 special cases below: runlist->active_channels will NOT be changed. 3299 special cases below: runlist->active_channels will NOT be changed.
3300 (hw_chid == ~0 && !add) means remove all active channels from runlist. 3300 (chid == ~0 && !add) means remove all active channels from runlist.
3301 (hw_chid == ~0 && add) means restore all active channels on runlist. */ 3301 (chid == ~0 && add) means restore all active channels on runlist. */
3302int gk20a_fifo_update_runlist(struct gk20a *g, u32 runlist_id, u32 hw_chid, 3302int gk20a_fifo_update_runlist(struct gk20a *g, u32 runlist_id, u32 chid,
3303 bool add, bool wait_for_finish) 3303 bool add, bool wait_for_finish)
3304{ 3304{
3305 struct fifo_runlist_info_gk20a *runlist = NULL; 3305 struct fifo_runlist_info_gk20a *runlist = NULL;
@@ -3316,7 +3316,7 @@ int gk20a_fifo_update_runlist(struct gk20a *g, u32 runlist_id, u32 hw_chid,
3316 3316
3317 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 3317 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
3318 3318
3319 ret = gk20a_fifo_update_runlist_locked(g, runlist_id, hw_chid, add, 3319 ret = gk20a_fifo_update_runlist_locked(g, runlist_id, chid, add,
3320 wait_for_finish); 3320 wait_for_finish);
3321 3321
3322 if (!mutex_ret) 3322 if (!mutex_ret)
@@ -3427,11 +3427,11 @@ u32 gk20a_fifo_get_pbdma_signature(struct gk20a *g)
3427 return pbdma_signature_hw_valid_f() | pbdma_signature_sw_zero_f(); 3427 return pbdma_signature_hw_valid_f() | pbdma_signature_sw_zero_f();
3428} 3428}
3429 3429
3430struct channel_gk20a *gk20a_fifo_channel_from_hw_chid(struct gk20a *g, 3430struct channel_gk20a *gk20a_fifo_channel_from_chid(struct gk20a *g,
3431 u32 hw_chid) 3431 u32 chid)
3432{ 3432{
3433 if (hw_chid != FIFO_INVAL_CHANNEL_ID) 3433 if (chid != FIFO_INVAL_CHANNEL_ID)
3434 return g->fifo.channel + hw_chid; 3434 return g->fifo.channel + chid;
3435 else 3435 else
3436 return NULL; 3436 return NULL;
3437} 3437}
@@ -3487,14 +3487,14 @@ const char *gk20a_decode_pbdma_chan_eng_ctx_status(u32 index)
3487 3487
3488void gk20a_dump_channel_status_ramfc(struct gk20a *g, 3488void gk20a_dump_channel_status_ramfc(struct gk20a *g,
3489 struct gk20a_debug_output *o, 3489 struct gk20a_debug_output *o,
3490 u32 hw_chid, 3490 u32 chid,
3491 struct ch_state *ch_state) 3491 struct ch_state *ch_state)
3492{ 3492{
3493 u32 channel = gk20a_readl(g, ccsr_channel_r(hw_chid)); 3493 u32 channel = gk20a_readl(g, ccsr_channel_r(chid));
3494 u32 status = ccsr_channel_status_v(channel); 3494 u32 status = ccsr_channel_status_v(channel);
3495 u32 syncpointa, syncpointb; 3495 u32 syncpointa, syncpointb;
3496 u32 *inst_mem; 3496 u32 *inst_mem;
3497 struct channel_gk20a *c = g->fifo.channel + hw_chid; 3497 struct channel_gk20a *c = g->fifo.channel + chid;
3498 struct nvgpu_semaphore_int *hw_sema = NULL; 3498 struct nvgpu_semaphore_int *hw_sema = NULL;
3499 3499
3500 if (c->hw_sema) 3500 if (c->hw_sema)
@@ -3508,7 +3508,7 @@ void gk20a_dump_channel_status_ramfc(struct gk20a *g,
3508 syncpointa = inst_mem[ram_fc_syncpointa_w()]; 3508 syncpointa = inst_mem[ram_fc_syncpointa_w()];
3509 syncpointb = inst_mem[ram_fc_syncpointb_w()]; 3509 syncpointb = inst_mem[ram_fc_syncpointb_w()];
3510 3510
3511 gk20a_debug_output(o, "%d-%s, pid %d, refs %d%s: ", hw_chid, 3511 gk20a_debug_output(o, "%d-%s, pid %d, refs %d%s: ", chid,
3512 g->name, 3512 g->name,
3513 ch_state->pid, 3513 ch_state->pid,
3514 ch_state->refs, 3514 ch_state->refs,
@@ -3673,16 +3673,16 @@ void gk20a_dump_eng_status(struct gk20a *g,
3673 3673
3674void gk20a_fifo_enable_channel(struct channel_gk20a *ch) 3674void gk20a_fifo_enable_channel(struct channel_gk20a *ch)
3675{ 3675{
3676 gk20a_writel(ch->g, ccsr_channel_r(ch->hw_chid), 3676 gk20a_writel(ch->g, ccsr_channel_r(ch->chid),
3677 gk20a_readl(ch->g, ccsr_channel_r(ch->hw_chid)) | 3677 gk20a_readl(ch->g, ccsr_channel_r(ch->chid)) |
3678 ccsr_channel_enable_set_true_f()); 3678 ccsr_channel_enable_set_true_f());
3679} 3679}
3680 3680
3681void gk20a_fifo_disable_channel(struct channel_gk20a *ch) 3681void gk20a_fifo_disable_channel(struct channel_gk20a *ch)
3682{ 3682{
3683 gk20a_writel(ch->g, ccsr_channel_r(ch->hw_chid), 3683 gk20a_writel(ch->g, ccsr_channel_r(ch->chid),
3684 gk20a_readl(ch->g, 3684 gk20a_readl(ch->g,
3685 ccsr_channel_r(ch->hw_chid)) | 3685 ccsr_channel_r(ch->chid)) |
3686 ccsr_channel_enable_clr_true_f()); 3686 ccsr_channel_enable_clr_true_f());
3687} 3687}
3688 3688
@@ -3693,23 +3693,23 @@ static void gk20a_fifo_channel_bind(struct channel_gk20a *c)
3693 ram_in_base_shift_v(); 3693 ram_in_base_shift_v();
3694 3694
3695 gk20a_dbg_info("bind channel %d inst ptr 0x%08x", 3695 gk20a_dbg_info("bind channel %d inst ptr 0x%08x",
3696 c->hw_chid, inst_ptr); 3696 c->chid, inst_ptr);
3697 3697
3698 3698
3699 gk20a_writel(g, ccsr_channel_r(c->hw_chid), 3699 gk20a_writel(g, ccsr_channel_r(c->chid),
3700 (gk20a_readl(g, ccsr_channel_r(c->hw_chid)) & 3700 (gk20a_readl(g, ccsr_channel_r(c->chid)) &
3701 ~ccsr_channel_runlist_f(~0)) | 3701 ~ccsr_channel_runlist_f(~0)) |
3702 ccsr_channel_runlist_f(c->runlist_id)); 3702 ccsr_channel_runlist_f(c->runlist_id));
3703 3703
3704 gk20a_writel(g, ccsr_channel_inst_r(c->hw_chid), 3704 gk20a_writel(g, ccsr_channel_inst_r(c->chid),
3705 ccsr_channel_inst_ptr_f(inst_ptr) | 3705 ccsr_channel_inst_ptr_f(inst_ptr) |
3706 nvgpu_aperture_mask(g, &c->inst_block, 3706 nvgpu_aperture_mask(g, &c->inst_block,
3707 ccsr_channel_inst_target_sys_mem_ncoh_f(), 3707 ccsr_channel_inst_target_sys_mem_ncoh_f(),
3708 ccsr_channel_inst_target_vid_mem_f()) | 3708 ccsr_channel_inst_target_vid_mem_f()) |
3709 ccsr_channel_inst_bind_true_f()); 3709 ccsr_channel_inst_bind_true_f());
3710 3710
3711 gk20a_writel(g, ccsr_channel_r(c->hw_chid), 3711 gk20a_writel(g, ccsr_channel_r(c->chid),
3712 (gk20a_readl(g, ccsr_channel_r(c->hw_chid)) & 3712 (gk20a_readl(g, ccsr_channel_r(c->chid)) &
3713 ~ccsr_channel_enable_set_f(~0)) | 3713 ~ccsr_channel_enable_set_f(~0)) |
3714 ccsr_channel_enable_set_true_f()); 3714 ccsr_channel_enable_set_true_f());
3715 3715
@@ -3725,7 +3725,7 @@ void gk20a_fifo_channel_unbind(struct channel_gk20a *ch_gk20a)
3725 gk20a_dbg_fn(""); 3725 gk20a_dbg_fn("");
3726 3726
3727 if (atomic_cmpxchg(&ch_gk20a->bound, true, false)) { 3727 if (atomic_cmpxchg(&ch_gk20a->bound, true, false)) {
3728 gk20a_writel(g, ccsr_channel_inst_r(ch_gk20a->hw_chid), 3728 gk20a_writel(g, ccsr_channel_inst_r(ch_gk20a->chid),
3729 ccsr_channel_inst_ptr_f(0) | 3729 ccsr_channel_inst_ptr_f(0) |
3730 ccsr_channel_inst_bind_false_f()); 3730 ccsr_channel_inst_bind_false_f());
3731 } 3731 }
@@ -3743,7 +3743,7 @@ static int gk20a_fifo_commit_userd(struct channel_gk20a *c)
3743 addr_hi = u64_hi32(c->userd_iova); 3743 addr_hi = u64_hi32(c->userd_iova);
3744 3744
3745 gk20a_dbg_info("channel %d : set ramfc userd 0x%16llx", 3745 gk20a_dbg_info("channel %d : set ramfc userd 0x%16llx",
3746 c->hw_chid, (u64)c->userd_iova); 3746 c->chid, (u64)c->userd_iova);
3747 3747
3748 nvgpu_mem_wr32(g, &c->inst_block, 3748 nvgpu_mem_wr32(g, &c->inst_block,
3749 ram_in_ramfc_w() + ram_fc_userd_w(), 3749 ram_in_ramfc_w() + ram_fc_userd_w(),
@@ -3815,7 +3815,7 @@ int gk20a_fifo_setup_ramfc(struct channel_gk20a *c,
3815 fifo_pb_timeslice_timescale_0_f() | 3815 fifo_pb_timeslice_timescale_0_f() |
3816 fifo_pb_timeslice_enable_true_f()); 3816 fifo_pb_timeslice_enable_true_f());
3817 3817
3818 nvgpu_mem_wr32(g, mem, ram_fc_chid_w(), ram_fc_chid_id_f(c->hw_chid)); 3818 nvgpu_mem_wr32(g, mem, ram_fc_chid_w(), ram_fc_chid_id_f(c->chid));
3819 3819
3820 if (c->is_privileged_channel) 3820 if (c->is_privileged_channel)
3821 gk20a_fifo_setup_ramfc_for_privileged_channel(c); 3821 gk20a_fifo_setup_ramfc_for_privileged_channel(c);
@@ -3834,7 +3834,7 @@ static int channel_gk20a_set_schedule_params(struct channel_gk20a *c)
3834 c->g->ops.fifo.disable_channel(c); 3834 c->g->ops.fifo.disable_channel(c);
3835 3835
3836 /* preempt the channel */ 3836 /* preempt the channel */
3837 WARN_ON(c->g->ops.fifo.preempt_channel(c->g, c->hw_chid)); 3837 WARN_ON(c->g->ops.fifo.preempt_channel(c->g, c->chid));
3838 3838
3839 /* set new timeslice */ 3839 /* set new timeslice */
3840 nvgpu_mem_wr32(c->g, &c->inst_block, ram_fc_runlist_timeslice_w(), 3840 nvgpu_mem_wr32(c->g, &c->inst_block, ram_fc_runlist_timeslice_w(),
@@ -3863,7 +3863,7 @@ int gk20a_fifo_set_timeslice(struct channel_gk20a *ch, u32 timeslice)
3863 ch->timeslice_us = timeslice; 3863 ch->timeslice_us = timeslice;
3864 3864
3865 gk20a_dbg(gpu_dbg_sched, "chid=%u timeslice=%u us", 3865 gk20a_dbg(gpu_dbg_sched, "chid=%u timeslice=%u us",
3866 ch->hw_chid, timeslice); 3866 ch->chid, timeslice);
3867 3867
3868 return channel_gk20a_set_schedule_params(ch); 3868 return channel_gk20a_set_schedule_params(ch);
3869} 3869}
@@ -3899,7 +3899,7 @@ void gk20a_fifo_setup_ramfc_for_privileged_channel(struct channel_gk20a *c)
3899 struct gk20a *g = c->g; 3899 struct gk20a *g = c->g;
3900 struct nvgpu_mem *mem = &c->inst_block; 3900 struct nvgpu_mem *mem = &c->inst_block;
3901 3901
3902 gk20a_dbg_info("channel %d : set ramfc privileged_channel", c->hw_chid); 3902 gk20a_dbg_info("channel %d : set ramfc privileged_channel", c->chid);
3903 3903
3904 /* Enable HCE priv mode for phys mode transfer */ 3904 /* Enable HCE priv mode for phys mode transfer */
3905 nvgpu_mem_wr32(g, mem, ram_fc_hce_ctrl_w(), 3905 nvgpu_mem_wr32(g, mem, ram_fc_hce_ctrl_w(),
@@ -3910,7 +3910,7 @@ int gk20a_fifo_setup_userd(struct channel_gk20a *c)
3910{ 3910{
3911 struct gk20a *g = c->g; 3911 struct gk20a *g = c->g;
3912 struct nvgpu_mem *mem = &g->fifo.userd; 3912 struct nvgpu_mem *mem = &g->fifo.userd;
3913 u32 offset = c->hw_chid * g->fifo.userd_entry_size / sizeof(u32); 3913 u32 offset = c->chid * g->fifo.userd_entry_size / sizeof(u32);
3914 3914
3915 gk20a_dbg_fn(""); 3915 gk20a_dbg_fn("");
3916 3916
@@ -3939,7 +3939,7 @@ int gk20a_fifo_alloc_inst(struct gk20a *g, struct channel_gk20a *ch)
3939 return err; 3939 return err;
3940 3940
3941 gk20a_dbg_info("channel %d inst block physical addr: 0x%16llx", 3941 gk20a_dbg_info("channel %d inst block physical addr: 0x%16llx",
3942 ch->hw_chid, gk20a_mm_inst_block_addr(g, &ch->inst_block)); 3942 ch->chid, gk20a_mm_inst_block_addr(g, &ch->inst_block));
3943 3943
3944 gk20a_dbg_fn("done"); 3944 gk20a_dbg_fn("done");
3945 return 0; 3945 return 0;