summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gv11b/fifo_gv11b.c')
-rw-r--r--drivers/gpu/nvgpu/gv11b/fifo_gv11b.c61
1 files changed, 31 insertions, 30 deletions
diff --git a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c
index 11b393e5..932e7626 100644
--- a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c
@@ -60,7 +60,7 @@
60 60
61void gv11b_get_tsg_runlist_entry(struct tsg_gk20a *tsg, u32 *runlist) 61void gv11b_get_tsg_runlist_entry(struct tsg_gk20a *tsg, u32 *runlist)
62{ 62{
63 63 struct gk20a *g = tsg->g;
64 u32 runlist_entry_0 = ram_rl_entry_type_tsg_v(); 64 u32 runlist_entry_0 = ram_rl_entry_type_tsg_v();
65 65
66 if (tsg->timeslice_timeout) 66 if (tsg->timeslice_timeout)
@@ -79,7 +79,7 @@ void gv11b_get_tsg_runlist_entry(struct tsg_gk20a *tsg, u32 *runlist)
79 runlist[2] = ram_rl_entry_tsg_tsgid_f(tsg->tsgid); 79 runlist[2] = ram_rl_entry_tsg_tsgid_f(tsg->tsgid);
80 runlist[3] = 0; 80 runlist[3] = 0;
81 81
82 gk20a_dbg_info("gv11b tsg runlist [0] %x [1] %x [2] %x [3] %x\n", 82 nvgpu_log_info(g, "gv11b tsg runlist [0] %x [1] %x [2] %x [3] %x\n",
83 runlist[0], runlist[1], runlist[2], runlist[3]); 83 runlist[0], runlist[1], runlist[2], runlist[3]);
84 84
85} 85}
@@ -119,7 +119,7 @@ void gv11b_get_ch_runlist_entry(struct channel_gk20a *c, u32 *runlist)
119 ram_rl_entry_chid_f(c->chid); 119 ram_rl_entry_chid_f(c->chid);
120 runlist[3] = ram_rl_entry_chan_inst_ptr_hi_f(addr_hi); 120 runlist[3] = ram_rl_entry_chan_inst_ptr_hi_f(addr_hi);
121 121
122 gk20a_dbg_info("gv11b channel runlist [0] %x [1] %x [2] %x [3] %x\n", 122 nvgpu_log_info(g, "gv11b channel runlist [0] %x [1] %x [2] %x [3] %x\n",
123 runlist[0], runlist[1], runlist[2], runlist[3]); 123 runlist[0], runlist[1], runlist[2], runlist[3]);
124} 124}
125 125
@@ -139,7 +139,7 @@ int channel_gv11b_setup_ramfc(struct channel_gk20a *c,
139 struct nvgpu_mem *mem = &c->inst_block; 139 struct nvgpu_mem *mem = &c->inst_block;
140 u32 data; 140 u32 data;
141 141
142 gk20a_dbg_fn(""); 142 nvgpu_log_fn(g, " ");
143 143
144 nvgpu_memset(g, mem, 0, 0, ram_fc_size_val_v()); 144 nvgpu_memset(g, mem, 0, 0, ram_fc_size_val_v());
145 145
@@ -211,10 +211,11 @@ int channel_gv11b_setup_ramfc(struct channel_gk20a *c,
211 211
212void gv11b_ring_channel_doorbell(struct channel_gk20a *c) 212void gv11b_ring_channel_doorbell(struct channel_gk20a *c)
213{ 213{
214 struct fifo_gk20a *f = &c->g->fifo; 214 struct gk20a *g = c->g;
215 struct fifo_gk20a *f = &g->fifo;
215 u32 hw_chid = f->channel_base + c->chid; 216 u32 hw_chid = f->channel_base + c->chid;
216 217
217 gk20a_dbg_info("channel ring door bell %d\n", c->chid); 218 nvgpu_log_info(g, "channel ring door bell %d\n", c->chid);
218 219
219 nvgpu_usermode_writel(c->g, usermode_notify_channel_pending_r(), 220 nvgpu_usermode_writel(c->g, usermode_notify_channel_pending_r(),
220 usermode_notify_channel_pending_id_f(hw_chid)); 221 usermode_notify_channel_pending_id_f(hw_chid));
@@ -256,7 +257,7 @@ void channel_gv11b_unbind(struct channel_gk20a *ch)
256{ 257{
257 struct gk20a *g = ch->g; 258 struct gk20a *g = ch->g;
258 259
259 gk20a_dbg_fn(""); 260 nvgpu_log_fn(g, " ");
260 261
261 if (nvgpu_atomic_cmpxchg(&ch->bound, true, false)) { 262 if (nvgpu_atomic_cmpxchg(&ch->bound, true, false)) {
262 gk20a_writel(g, ccsr_channel_inst_r(ch->chid), 263 gk20a_writel(g, ccsr_channel_inst_r(ch->chid),
@@ -729,7 +730,7 @@ int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id,
729 func_ret = gv11b_fifo_poll_pbdma_chan_status(g, tsgid, pbdma_id, 730 func_ret = gv11b_fifo_poll_pbdma_chan_status(g, tsgid, pbdma_id,
730 timeout_rc_type); 731 timeout_rc_type);
731 if (func_ret != 0) { 732 if (func_ret != 0) {
732 gk20a_dbg_info("preempt timeout pbdma %d", pbdma_id); 733 nvgpu_log_info(g, "preempt timeout pbdma %d", pbdma_id);
733 ret |= func_ret; 734 ret |= func_ret;
734 } 735 }
735 } 736 }
@@ -743,7 +744,7 @@ int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id,
743 timeout_rc_type); 744 timeout_rc_type);
744 745
745 if (func_ret != 0) { 746 if (func_ret != 0) {
746 gk20a_dbg_info("preempt timeout engine %d", act_eng_id); 747 nvgpu_log_info(g, "preempt timeout engine %d", act_eng_id);
747 ret |= func_ret; 748 ret |= func_ret;
748 } 749 }
749 } 750 }
@@ -812,10 +813,10 @@ int gv11b_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
812 u32 mutex_ret = 0; 813 u32 mutex_ret = 0;
813 u32 runlist_id; 814 u32 runlist_id;
814 815
815 gk20a_dbg_fn("%d", tsgid); 816 nvgpu_log_fn(g, "%d", tsgid);
816 817
817 runlist_id = f->tsg[tsgid].runlist_id; 818 runlist_id = f->tsg[tsgid].runlist_id;
818 gk20a_dbg_fn("runlist_id %d", runlist_id); 819 nvgpu_log_fn(g, "runlist_id %d", runlist_id);
819 820
820 nvgpu_mutex_acquire(&f->runlist_info[runlist_id].runlist_lock); 821 nvgpu_mutex_acquire(&f->runlist_info[runlist_id].runlist_lock);
821 822
@@ -839,7 +840,7 @@ static int gv11b_fifo_preempt_runlists(struct gk20a *g, u32 runlists_mask)
839 u32 mutex_ret = 0; 840 u32 mutex_ret = 0;
840 u32 runlist_id; 841 u32 runlist_id;
841 842
842 gk20a_dbg_fn(""); 843 nvgpu_log_fn(g, " ");
843 844
844 for (runlist_id = 0; runlist_id < g->fifo.max_runlists; runlist_id++) { 845 for (runlist_id = 0; runlist_id < g->fifo.max_runlists; runlist_id++) {
845 if (runlists_mask & fifo_runlist_preempt_runlist_m(runlist_id)) 846 if (runlists_mask & fifo_runlist_preempt_runlist_m(runlist_id))
@@ -910,11 +911,11 @@ int gv11b_fifo_preempt_ch_tsg(struct gk20a *g, u32 id,
910 return -EINVAL; 911 return -EINVAL;
911 912
912 if (runlist_id >= g->fifo.max_runlists) { 913 if (runlist_id >= g->fifo.max_runlists) {
913 gk20a_dbg_info("runlist_id = %d", runlist_id); 914 nvgpu_log_info(g, "runlist_id = %d", runlist_id);
914 return -EINVAL; 915 return -EINVAL;
915 } 916 }
916 917
917 gk20a_dbg_fn("preempt id = %d, runlist_id = %d", id, runlist_id); 918 nvgpu_log_fn(g, "preempt id = %d, runlist_id = %d", id, runlist_id);
918 919
919 nvgpu_mutex_acquire(&f->runlist_info[runlist_id].runlist_lock); 920 nvgpu_mutex_acquire(&f->runlist_info[runlist_id].runlist_lock);
920 921
@@ -1155,7 +1156,7 @@ int gv11b_init_fifo_reset_enable_hw(struct gk20a *g)
1155 unsigned int i; 1156 unsigned int i;
1156 u32 host_num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA); 1157 u32 host_num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA);
1157 1158
1158 gk20a_dbg_fn(""); 1159 nvgpu_log_fn(g, " ");
1159 1160
1160 /* enable pmc pfifo */ 1161 /* enable pmc pfifo */
1161 g->ops.mc.reset(g, mc_enable_pfifo_enabled_f()); 1162 g->ops.mc.reset(g, mc_enable_pfifo_enabled_f());
@@ -1208,11 +1209,11 @@ int gv11b_init_fifo_reset_enable_hw(struct gk20a *g)
1208 gk20a_writel(g, pbdma_intr_1_r(i), 0xFFFFFFFF); 1209 gk20a_writel(g, pbdma_intr_1_r(i), 0xFFFFFFFF);
1209 1210
1210 intr_stall = gk20a_readl(g, pbdma_intr_stall_r(i)); 1211 intr_stall = gk20a_readl(g, pbdma_intr_stall_r(i));
1211 gk20a_dbg_info("pbdma id:%u, intr_en_0 0x%08x", i, intr_stall); 1212 nvgpu_log_info(g, "pbdma id:%u, intr_en_0 0x%08x", i, intr_stall);
1212 gk20a_writel(g, pbdma_intr_en_0_r(i), intr_stall); 1213 gk20a_writel(g, pbdma_intr_en_0_r(i), intr_stall);
1213 1214
1214 intr_stall = gk20a_readl(g, pbdma_intr_stall_1_r(i)); 1215 intr_stall = gk20a_readl(g, pbdma_intr_stall_1_r(i));
1215 gk20a_dbg_info("pbdma id:%u, intr_en_1 0x%08x", i, intr_stall); 1216 nvgpu_log_info(g, "pbdma id:%u, intr_en_1 0x%08x", i, intr_stall);
1216 gk20a_writel(g, pbdma_intr_en_1_r(i), intr_stall); 1217 gk20a_writel(g, pbdma_intr_en_1_r(i), intr_stall);
1217 } 1218 }
1218 1219
@@ -1246,12 +1247,12 @@ int gv11b_init_fifo_reset_enable_hw(struct gk20a *g)
1246 /* clear and enable pfifo interrupt */ 1247 /* clear and enable pfifo interrupt */
1247 gk20a_writel(g, fifo_intr_0_r(), 0xFFFFFFFF); 1248 gk20a_writel(g, fifo_intr_0_r(), 0xFFFFFFFF);
1248 mask = gv11b_fifo_intr_0_en_mask(g); 1249 mask = gv11b_fifo_intr_0_en_mask(g);
1249 gk20a_dbg_info("fifo_intr_en_0 0x%08x", mask); 1250 nvgpu_log_info(g, "fifo_intr_en_0 0x%08x", mask);
1250 gk20a_writel(g, fifo_intr_en_0_r(), mask); 1251 gk20a_writel(g, fifo_intr_en_0_r(), mask);
1251 gk20a_dbg_info("fifo_intr_en_1 = 0x80000000"); 1252 nvgpu_log_info(g, "fifo_intr_en_1 = 0x80000000");
1252 gk20a_writel(g, fifo_intr_en_1_r(), 0x80000000); 1253 gk20a_writel(g, fifo_intr_en_1_r(), 0x80000000);
1253 1254
1254 gk20a_dbg_fn("done"); 1255 nvgpu_log_fn(g, "done");
1255 1256
1256 return 0; 1257 return 0;
1257} 1258}
@@ -1350,7 +1351,7 @@ static u32 gv11b_fifo_ctxsw_timeout_info(struct gk20a *g, u32 active_eng_id,
1350 1351
1351 tsgid = fifo_intr_ctxsw_timeout_info_prev_tsgid_v(timeout_info); 1352 tsgid = fifo_intr_ctxsw_timeout_info_prev_tsgid_v(timeout_info);
1352 } 1353 }
1353 gk20a_dbg_info("ctxsw timeout info: tsgid = %d", tsgid); 1354 nvgpu_log_info(g, "ctxsw timeout info: tsgid = %d", tsgid);
1354 1355
1355 /* 1356 /*
1356 * STATUS indicates whether the context request ack was eventually 1357 * STATUS indicates whether the context request ack was eventually
@@ -1391,14 +1392,14 @@ static u32 gv11b_fifo_ctxsw_timeout_info(struct gk20a *g, u32 active_eng_id,
1391 if (*info_status == 1392 if (*info_status ==
1392 fifo_intr_ctxsw_timeout_info_status_ack_received_v()) { 1393 fifo_intr_ctxsw_timeout_info_status_ack_received_v()) {
1393 1394
1394 gk20a_dbg_info("ctxsw timeout info : ack received"); 1395 nvgpu_log_info(g, "ctxsw timeout info : ack received");
1395 /* no need to recover */ 1396 /* no need to recover */
1396 tsgid = FIFO_INVAL_TSG_ID; 1397 tsgid = FIFO_INVAL_TSG_ID;
1397 1398
1398 } else if (*info_status == 1399 } else if (*info_status ==
1399 fifo_intr_ctxsw_timeout_info_status_dropped_timeout_v()) { 1400 fifo_intr_ctxsw_timeout_info_status_dropped_timeout_v()) {
1400 1401
1401 gk20a_dbg_info("ctxsw timeout info : dropped timeout"); 1402 nvgpu_log_info(g, "ctxsw timeout info : dropped timeout");
1402 /* no need to recover */ 1403 /* no need to recover */
1403 tsgid = FIFO_INVAL_TSG_ID; 1404 tsgid = FIFO_INVAL_TSG_ID;
1404 1405
@@ -1429,7 +1430,7 @@ bool gv11b_fifo_handle_ctxsw_timeout(struct gk20a *g, u32 fifo_intr)
1429 timeout_val = gk20a_readl(g, fifo_eng_ctxsw_timeout_r()); 1430 timeout_val = gk20a_readl(g, fifo_eng_ctxsw_timeout_r());
1430 timeout_val = fifo_eng_ctxsw_timeout_period_v(timeout_val); 1431 timeout_val = fifo_eng_ctxsw_timeout_period_v(timeout_val);
1431 1432
1432 gk20a_dbg_info("eng ctxsw timeout period = 0x%x", timeout_val); 1433 nvgpu_log_info(g, "eng ctxsw timeout period = 0x%x", timeout_val);
1433 1434
1434 for (engine_id = 0; engine_id < g->fifo.num_engines; engine_id++) { 1435 for (engine_id = 0; engine_id < g->fifo.num_engines; engine_id++) {
1435 active_eng_id = g->fifo.active_engines_list[engine_id]; 1436 active_eng_id = g->fifo.active_engines_list[engine_id];
@@ -1469,7 +1470,7 @@ bool gv11b_fifo_handle_ctxsw_timeout(struct gk20a *g, u32 fifo_intr)
1469 true, true, verbose, 1470 true, true, verbose,
1470 RC_TYPE_CTXSW_TIMEOUT); 1471 RC_TYPE_CTXSW_TIMEOUT);
1471 } else { 1472 } else {
1472 gk20a_dbg_info( 1473 nvgpu_log_info(g,
1473 "fifo is waiting for ctx switch: " 1474 "fifo is waiting for ctx switch: "
1474 "for %d ms, %s=%d", ms, "tsg", tsgid); 1475 "for %d ms, %s=%d", ms, "tsg", tsgid);
1475 } 1476 }
@@ -1490,7 +1491,7 @@ unsigned int gv11b_fifo_handle_pbdma_intr_0(struct gk20a *g,
1490 pbdma_intr_0, handled, error_notifier); 1491 pbdma_intr_0, handled, error_notifier);
1491 1492
1492 if (pbdma_intr_0 & pbdma_intr_0_clear_faulted_error_pending_f()) { 1493 if (pbdma_intr_0 & pbdma_intr_0_clear_faulted_error_pending_f()) {
1493 gk20a_dbg(gpu_dbg_intr, "clear faulted error on pbdma id %d", 1494 nvgpu_log(g, gpu_dbg_intr, "clear faulted error on pbdma id %d",
1494 pbdma_id); 1495 pbdma_id);
1495 gk20a_fifo_reset_pbdma_method(g, pbdma_id, 0); 1496 gk20a_fifo_reset_pbdma_method(g, pbdma_id, 0);
1496 *handled |= pbdma_intr_0_clear_faulted_error_pending_f(); 1497 *handled |= pbdma_intr_0_clear_faulted_error_pending_f();
@@ -1498,7 +1499,7 @@ unsigned int gv11b_fifo_handle_pbdma_intr_0(struct gk20a *g,
1498 } 1499 }
1499 1500
1500 if (pbdma_intr_0 & pbdma_intr_0_eng_reset_pending_f()) { 1501 if (pbdma_intr_0 & pbdma_intr_0_eng_reset_pending_f()) {
1501 gk20a_dbg(gpu_dbg_intr, "eng reset intr on pbdma id %d", 1502 nvgpu_log(g, gpu_dbg_intr, "eng reset intr on pbdma id %d",
1502 pbdma_id); 1503 pbdma_id);
1503 *handled |= pbdma_intr_0_eng_reset_pending_f(); 1504 *handled |= pbdma_intr_0_eng_reset_pending_f();
1504 rc_type = RC_TYPE_PBDMA_FAULT; 1505 rc_type = RC_TYPE_PBDMA_FAULT;
@@ -1545,7 +1546,7 @@ unsigned int gv11b_fifo_handle_pbdma_intr_1(struct gk20a *g,
1545 return RC_TYPE_NO_RC; 1546 return RC_TYPE_NO_RC;
1546 1547
1547 if (pbdma_intr_1 & pbdma_intr_1_ctxnotvalid_pending_f()) { 1548 if (pbdma_intr_1 & pbdma_intr_1_ctxnotvalid_pending_f()) {
1548 gk20a_dbg(gpu_dbg_intr, "ctxnotvalid intr on pbdma id %d", 1549 nvgpu_log(g, gpu_dbg_intr, "ctxnotvalid intr on pbdma id %d",
1549 pbdma_id); 1550 pbdma_id);
1550 nvgpu_err(g, "pbdma_intr_1(%d)= 0x%08x ", 1551 nvgpu_err(g, "pbdma_intr_1(%d)= 0x%08x ",
1551 pbdma_id, pbdma_intr_1); 1552 pbdma_id, pbdma_intr_1);
@@ -1753,7 +1754,7 @@ void gv11b_fifo_add_syncpt_wait_cmd(struct gk20a *g,
1753 u64 gpu_va = gpu_va_base + 1754 u64 gpu_va = gpu_va_base +
1754 nvgpu_nvhost_syncpt_unit_interface_get_byte_offset(id); 1755 nvgpu_nvhost_syncpt_unit_interface_get_byte_offset(id);
1755 1756
1756 gk20a_dbg_fn(""); 1757 nvgpu_log_fn(g, " ");
1757 1758
1758 off = cmd->off + off; 1759 off = cmd->off + off;
1759 1760
@@ -1792,7 +1793,7 @@ void gv11b_fifo_add_syncpt_incr_cmd(struct gk20a *g,
1792{ 1793{
1793 u32 off = cmd->off; 1794 u32 off = cmd->off;
1794 1795
1795 gk20a_dbg_fn(""); 1796 nvgpu_log_fn(g, " ");
1796 1797
1797 /* semaphore_a */ 1798 /* semaphore_a */
1798 nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010004); 1799 nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010004);