summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu
diff options
context:
space:
mode:
authorRichard Zhao <rizhao@nvidia.com>2017-06-27 14:15:41 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-06-30 01:34:37 -0400
commite1655282238b4632c2af4ac809d81b297d5183f2 (patch)
tree242b731867781ab8aabeef7b1bd906005a5bbbd0 /drivers/gpu/nvgpu
parent3e195bb02d62694728d80774cd03b486bb34ff14 (diff)
gpu: nvgpu: gv11b: rename hw_chid to chid
hw_chid is a relative id for vgpu. For native it's same as hw id. Renaming it to chid to avoid confusing. Jira VFND-3796 Change-Id: Ie94c1a15e9e45fc823d85790ce6a69da53a685bf Signed-off-by: Richard Zhao <rizhao@nvidia.com> Reviewed-on: https://git-master/r/1509531 Reviewed-by: Automatic_Commit_Validation_User GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu')
-rw-r--r--drivers/gpu/nvgpu/gv11b/fifo_gv11b.c48
1 files changed, 24 insertions, 24 deletions
diff --git a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c
index 5425eaa0..dbde2fed 100644
--- a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c
@@ -99,7 +99,7 @@ static void gv11b_get_ch_runlist_entry(struct channel_gk20a *c, u32 *runlist)
99 addr_hi = u64_hi32(gk20a_mm_inst_block_addr(g, &c->inst_block)); 99 addr_hi = u64_hi32(gk20a_mm_inst_block_addr(g, &c->inst_block));
100 100
101 runlist[2] = ram_rl_entry_chan_inst_ptr_lo_f(addr_lo) | 101 runlist[2] = ram_rl_entry_chan_inst_ptr_lo_f(addr_lo) |
102 ram_rl_entry_chid_f(c->hw_chid); 102 ram_rl_entry_chid_f(c->chid);
103 runlist[3] = ram_rl_entry_chan_inst_ptr_hi_f(addr_hi); 103 runlist[3] = ram_rl_entry_chan_inst_ptr_hi_f(addr_hi);
104 104
105 gk20a_dbg_info("gv11b channel runlist [0] %x [1] %x [2] %x [3] %x\n", 105 gk20a_dbg_info("gv11b channel runlist [0] %x [1] %x [2] %x [3] %x\n",
@@ -164,7 +164,7 @@ static int channel_gv11b_setup_ramfc(struct channel_gk20a *c,
164 pbdma_runlist_timeslice_enable_true_f()); 164 pbdma_runlist_timeslice_enable_true_f());
165 165
166 166
167 nvgpu_mem_wr32(g, mem, ram_fc_chid_w(), ram_fc_chid_id_f(c->hw_chid)); 167 nvgpu_mem_wr32(g, mem, ram_fc_chid_w(), ram_fc_chid_id_f(c->chid));
168 168
169 /* Until full subcontext is supported, always use VEID0 */ 169 /* Until full subcontext is supported, always use VEID0 */
170 nvgpu_mem_wr32(g, mem, ram_fc_set_channel_info_w(), 170 nvgpu_mem_wr32(g, mem, ram_fc_set_channel_info_w(),
@@ -192,16 +192,16 @@ static int channel_gv11b_setup_ramfc(struct channel_gk20a *c,
192 192
193static void gv11b_ring_channel_doorbell(struct channel_gk20a *c) 193static void gv11b_ring_channel_doorbell(struct channel_gk20a *c)
194{ 194{
195 gk20a_dbg_info("channel ring door bell %d\n", c->hw_chid); 195 gk20a_dbg_info("channel ring door bell %d\n", c->chid);
196 196
197 gk20a_writel(c->g, usermode_notify_channel_pending_r(), 197 gk20a_writel(c->g, usermode_notify_channel_pending_r(),
198 usermode_notify_channel_pending_id_f(c->hw_chid)); 198 usermode_notify_channel_pending_id_f(c->chid));
199} 199}
200 200
201static u32 gv11b_userd_gp_get(struct gk20a *g, struct channel_gk20a *c) 201static u32 gv11b_userd_gp_get(struct gk20a *g, struct channel_gk20a *c)
202{ 202{
203 struct nvgpu_mem *userd_mem = &g->fifo.userd; 203 struct nvgpu_mem *userd_mem = &g->fifo.userd;
204 u32 offset = c->hw_chid * (g->fifo.userd_entry_size / sizeof(u32)); 204 u32 offset = c->chid * (g->fifo.userd_entry_size / sizeof(u32));
205 205
206 return nvgpu_mem_rd32(g, userd_mem, 206 return nvgpu_mem_rd32(g, userd_mem,
207 offset + ram_userd_gp_get_w()); 207 offset + ram_userd_gp_get_w());
@@ -210,7 +210,7 @@ static u32 gv11b_userd_gp_get(struct gk20a *g, struct channel_gk20a *c)
210static u64 gv11b_userd_pb_get(struct gk20a *g, struct channel_gk20a *c) 210static u64 gv11b_userd_pb_get(struct gk20a *g, struct channel_gk20a *c)
211{ 211{
212 struct nvgpu_mem *userd_mem = &g->fifo.userd; 212 struct nvgpu_mem *userd_mem = &g->fifo.userd;
213 u32 offset = c->hw_chid * (g->fifo.userd_entry_size / sizeof(u32)); 213 u32 offset = c->chid * (g->fifo.userd_entry_size / sizeof(u32));
214 u32 lo = nvgpu_mem_rd32(g, userd_mem, offset + ram_userd_get_w()); 214 u32 lo = nvgpu_mem_rd32(g, userd_mem, offset + ram_userd_get_w());
215 u32 hi = nvgpu_mem_rd32(g, userd_mem, offset + ram_userd_get_hi_w()); 215 u32 hi = nvgpu_mem_rd32(g, userd_mem, offset + ram_userd_get_hi_w());
216 216
@@ -220,7 +220,7 @@ static u64 gv11b_userd_pb_get(struct gk20a *g, struct channel_gk20a *c)
220static void gv11b_userd_gp_put(struct gk20a *g, struct channel_gk20a *c) 220static void gv11b_userd_gp_put(struct gk20a *g, struct channel_gk20a *c)
221{ 221{
222 struct nvgpu_mem *userd_mem = &g->fifo.userd; 222 struct nvgpu_mem *userd_mem = &g->fifo.userd;
223 u32 offset = c->hw_chid * (g->fifo.userd_entry_size / sizeof(u32)); 223 u32 offset = c->chid * (g->fifo.userd_entry_size / sizeof(u32));
224 224
225 nvgpu_mem_wr32(g, userd_mem, offset + ram_userd_gp_put_w(), 225 nvgpu_mem_wr32(g, userd_mem, offset + ram_userd_gp_put_w(),
226 c->gpfifo.put); 226 c->gpfifo.put);
@@ -249,13 +249,13 @@ static bool gv11b_is_fault_engine_subid_gpc(struct gk20a *g, u32 engine_subid)
249 249
250static void gv11b_dump_channel_status_ramfc(struct gk20a *g, 250static void gv11b_dump_channel_status_ramfc(struct gk20a *g,
251 struct gk20a_debug_output *o, 251 struct gk20a_debug_output *o,
252 u32 hw_chid, 252 u32 chid,
253 struct ch_state *ch_state) 253 struct ch_state *ch_state)
254{ 254{
255 u32 channel = gk20a_readl(g, ccsr_channel_r(hw_chid)); 255 u32 channel = gk20a_readl(g, ccsr_channel_r(chid));
256 u32 status = ccsr_channel_status_v(channel); 256 u32 status = ccsr_channel_status_v(channel);
257 u32 *inst_mem; 257 u32 *inst_mem;
258 struct channel_gk20a *c = g->fifo.channel + hw_chid; 258 struct channel_gk20a *c = g->fifo.channel + chid;
259 struct nvgpu_semaphore_int *hw_sema = NULL; 259 struct nvgpu_semaphore_int *hw_sema = NULL;
260 260
261 if (c->hw_sema) 261 if (c->hw_sema)
@@ -266,7 +266,7 @@ static void gv11b_dump_channel_status_ramfc(struct gk20a *g,
266 266
267 inst_mem = &ch_state->inst_block[0]; 267 inst_mem = &ch_state->inst_block[0];
268 268
269 gk20a_debug_output(o, "%d-%s, pid %d, refs: %d: ", hw_chid, 269 gk20a_debug_output(o, "%d-%s, pid %d, refs: %d: ", chid,
270 dev_name(g->dev), 270 dev_name(g->dev),
271 ch_state->pid, 271 ch_state->pid,
272 ch_state->refs); 272 ch_state->refs);
@@ -505,13 +505,13 @@ static int gv11b_fifo_poll_eng_ctx_status(struct gk20a *g, u32 id,
505 return ret; 505 return ret;
506} 506}
507 507
508static void gv11b_reset_eng_faulted_ch(struct gk20a *g, u32 hw_chid) 508static void gv11b_reset_eng_faulted_ch(struct gk20a *g, u32 chid)
509{ 509{
510 u32 reg_val; 510 u32 reg_val;
511 511
512 reg_val = gk20a_readl(g, ccsr_channel_r(hw_chid)); 512 reg_val = gk20a_readl(g, ccsr_channel_r(chid));
513 reg_val |= ccsr_channel_eng_faulted_reset_f(); 513 reg_val |= ccsr_channel_eng_faulted_reset_f();
514 gk20a_writel(g, ccsr_channel_r(hw_chid), reg_val); 514 gk20a_writel(g, ccsr_channel_r(chid), reg_val);
515} 515}
516 516
517static void gv11b_reset_eng_faulted_tsg(struct tsg_gk20a *tsg) 517static void gv11b_reset_eng_faulted_tsg(struct tsg_gk20a *tsg)
@@ -521,18 +521,18 @@ static void gv11b_reset_eng_faulted_tsg(struct tsg_gk20a *tsg)
521 521
522 down_read(&tsg->ch_list_lock); 522 down_read(&tsg->ch_list_lock);
523 list_for_each_entry(ch, &tsg->ch_list, ch_entry) { 523 list_for_each_entry(ch, &tsg->ch_list, ch_entry) {
524 gv11b_reset_eng_faulted_ch(g, ch->hw_chid); 524 gv11b_reset_eng_faulted_ch(g, ch->chid);
525 } 525 }
526 up_read(&tsg->ch_list_lock); 526 up_read(&tsg->ch_list_lock);
527} 527}
528 528
529static void gv11b_reset_pbdma_faulted_ch(struct gk20a *g, u32 hw_chid) 529static void gv11b_reset_pbdma_faulted_ch(struct gk20a *g, u32 chid)
530{ 530{
531 u32 reg_val; 531 u32 reg_val;
532 532
533 reg_val = gk20a_readl(g, ccsr_channel_r(hw_chid)); 533 reg_val = gk20a_readl(g, ccsr_channel_r(chid));
534 reg_val |= ccsr_channel_pbdma_faulted_reset_f(); 534 reg_val |= ccsr_channel_pbdma_faulted_reset_f();
535 gk20a_writel(g, ccsr_channel_r(hw_chid), reg_val); 535 gk20a_writel(g, ccsr_channel_r(chid), reg_val);
536} 536}
537 537
538static void gv11b_reset_pbdma_faulted_tsg(struct tsg_gk20a *tsg) 538static void gv11b_reset_pbdma_faulted_tsg(struct tsg_gk20a *tsg)
@@ -542,7 +542,7 @@ static void gv11b_reset_pbdma_faulted_tsg(struct tsg_gk20a *tsg)
542 542
543 down_read(&tsg->ch_list_lock); 543 down_read(&tsg->ch_list_lock);
544 list_for_each_entry(ch, &tsg->ch_list, ch_entry) { 544 list_for_each_entry(ch, &tsg->ch_list, ch_entry) {
545 gv11b_reset_pbdma_faulted_ch(g, ch->hw_chid); 545 gv11b_reset_pbdma_faulted_ch(g, ch->chid);
546 } 546 }
547 up_read(&tsg->ch_list_lock); 547 up_read(&tsg->ch_list_lock);
548} 548}
@@ -703,13 +703,13 @@ static int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id,
703 return ret; 703 return ret;
704} 704}
705 705
706static int gv11b_fifo_preempt_channel(struct gk20a *g, u32 hw_chid) 706static int gv11b_fifo_preempt_channel(struct gk20a *g, u32 chid)
707{ 707{
708 struct fifo_gk20a *f = &g->fifo; 708 struct fifo_gk20a *f = &g->fifo;
709 u32 tsgid; 709 u32 tsgid;
710 710
711 tsgid = f->channel[hw_chid].tsgid; 711 tsgid = f->channel[chid].tsgid;
712 nvgpu_log_info(g, "chid:%d tsgid:%d", hw_chid, tsgid); 712 nvgpu_log_info(g, "chid:%d tsgid:%d", chid, tsgid);
713 713
714 /* Preempt tsg. Channel preempt is NOOP */ 714 /* Preempt tsg. Channel preempt is NOOP */
715 return g->ops.fifo.preempt_tsg(g, tsgid); 715 return g->ops.fifo.preempt_tsg(g, tsgid);
@@ -919,9 +919,9 @@ static void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask,
919 gv11b_reset_eng_faulted_tsg(tsg); 919 gv11b_reset_eng_faulted_tsg(tsg);
920 } else { 920 } else {
921 if (mmfault->faulted_pbdma != FIFO_INVAL_PBDMA_ID) 921 if (mmfault->faulted_pbdma != FIFO_INVAL_PBDMA_ID)
922 gv11b_reset_pbdma_faulted_ch(g, refch->hw_chid); 922 gv11b_reset_pbdma_faulted_ch(g, refch->chid);
923 if (mmfault->faulted_engine != FIFO_INVAL_ENGINE_ID) 923 if (mmfault->faulted_engine != FIFO_INVAL_ENGINE_ID)
924 gv11b_reset_eng_faulted_ch(g, refch->hw_chid); 924 gv11b_reset_eng_faulted_ch(g, refch->chid);
925 } 925 }
926 } else { 926 } else {
927 if (id_type == ID_TYPE_TSG) 927 if (id_type == ID_TYPE_TSG)