summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2017-03-30 10:44:03 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-04-10 22:04:19 -0400
commit3ba374a5d94f8c2067731155afaf79f03e6c390c (patch)
treed8a2bd0d52b1e8862510aedeb7529944c0b7e28e /drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
parent2be51206af88aba6662cdd9de5bd6c18989bbcbd (diff)
gpu: nvgpu: gk20a: Use new error macro
gk20a_err() and gk20a_warn() require a struct device pointer, which is not portable across operating systems. The new nvgpu_err() and nvgpu_warn() macros take struct gk20a pointer. Convert code to use the more portable macros. JIRA NVGPU-16 Change-Id: Ia51f36d94c5ce57a5a0ab83b3c83a6bce09e2d5c Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: http://git-master/r/1331694 Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com> Reviewed-by: Alex Waterman <alexw@nvidia.com> GVS: Gerrit_Virtual_Submit
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/fifo_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.c100
1 files changed, 46 insertions, 54 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
index ca09c22a..48253e59 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
@@ -30,6 +30,7 @@
30#include <nvgpu/timers.h> 30#include <nvgpu/timers.h>
31#include <nvgpu/semaphore.h> 31#include <nvgpu/semaphore.h>
32#include <nvgpu/kmem.h> 32#include <nvgpu/kmem.h>
33#include <nvgpu/log.h>
33 34
34#include "gk20a.h" 35#include "gk20a.h"
35#include "debug_gk20a.h" 36#include "debug_gk20a.h"
@@ -105,7 +106,7 @@ struct fifo_engine_info_gk20a *gk20a_fifo_get_engine_info(struct gk20a *g, u32 e
105 } 106 }
106 107
107 if (!info) 108 if (!info)
108 gk20a_err(g->dev, "engine_id is not in active list/invalid %d", engine_id); 109 nvgpu_err(g, "engine_id is not in active list/invalid %d", engine_id);
109 110
110 return info; 111 return info;
111} 112}
@@ -131,7 +132,7 @@ bool gk20a_fifo_is_valid_engine_id(struct gk20a *g, u32 engine_id)
131 } 132 }
132 133
133 if (!valid) 134 if (!valid)
134 gk20a_err(g->dev, "engine_id is not in active list/invalid %d", engine_id); 135 nvgpu_err(g, "engine_id is not in active list/invalid %d", engine_id);
135 136
136 return valid; 137 return valid;
137} 138}
@@ -146,7 +147,7 @@ u32 gk20a_fifo_get_gr_engine_id(struct gk20a *g)
146 1, ENGINE_GR_GK20A); 147 1, ENGINE_GR_GK20A);
147 148
148 if (!gr_engine_cnt) { 149 if (!gr_engine_cnt) {
149 gk20a_err(dev_from_gk20a(g), "No GR engine available on this device!\n"); 150 nvgpu_err(g, "No GR engine available on this device!\n");
150 } 151 }
151 152
152 return gr_engine_id; 153 return gr_engine_id;
@@ -218,7 +219,7 @@ u32 gk20a_fifo_get_gr_runlist_id(struct gk20a *g)
218 1, ENGINE_GR_GK20A); 219 1, ENGINE_GR_GK20A);
219 220
220 if (!gr_engine_cnt) { 221 if (!gr_engine_cnt) {
221 gk20a_err(dev_from_gk20a(g), 222 nvgpu_err(g,
222 "No GR engine available on this device!"); 223 "No GR engine available on this device!");
223 goto end; 224 goto end;
224 } 225 }
@@ -228,7 +229,7 @@ u32 gk20a_fifo_get_gr_runlist_id(struct gk20a *g)
228 if (engine_info) { 229 if (engine_info) {
229 gr_runlist_id = engine_info->runlist_id; 230 gr_runlist_id = engine_info->runlist_id;
230 } else { 231 } else {
231 gk20a_err(g->dev, 232 nvgpu_err(g,
232 "gr_engine_id is not in active list/invalid %d", gr_engine_id); 233 "gr_engine_id is not in active list/invalid %d", gr_engine_id);
233 } 234 }
234 235
@@ -273,7 +274,7 @@ static inline u32 gk20a_engine_id_to_mmu_id(struct gk20a *g, u32 engine_id)
273 if (engine_info) { 274 if (engine_info) {
274 fault_id = engine_info->fault_id; 275 fault_id = engine_info->fault_id;
275 } else { 276 } else {
276 gk20a_err(g->dev, "engine_id is not in active list/invalid %d", engine_id); 277 nvgpu_err(g, "engine_id is not in active list/invalid %d", engine_id);
277 } 278 }
278 return fault_id; 279 return fault_id;
279} 280}
@@ -321,7 +322,6 @@ int gk20a_fifo_engine_enum_from_type(struct gk20a *g, u32 engine_type,
321int gk20a_fifo_init_engine_info(struct fifo_gk20a *f) 322int gk20a_fifo_init_engine_info(struct fifo_gk20a *f)
322{ 323{
323 struct gk20a *g = f->g; 324 struct gk20a *g = f->g;
324 struct device *d = dev_from_gk20a(g);
325 u32 i; 325 u32 i;
326 u32 max_info_entries = top_device_info__size_1_v(); 326 u32 max_info_entries = top_device_info__size_1_v();
327 u32 engine_enum = ENGINE_INVAL_GK20A; 327 u32 engine_enum = ENGINE_INVAL_GK20A;
@@ -375,7 +375,7 @@ int gk20a_fifo_init_engine_info(struct fifo_gk20a *f)
375 } 375 }
376 376
377 if (!found_pbdma_for_runlist) { 377 if (!found_pbdma_for_runlist) {
378 gk20a_err(d, "busted pbdma map"); 378 nvgpu_err(g, "busted pbdma map");
379 return -EINVAL; 379 return -EINVAL;
380 } 380 }
381 } 381 }
@@ -647,7 +647,6 @@ static void fifo_engine_exception_status(struct gk20a *g,
647static int init_runlist(struct gk20a *g, struct fifo_gk20a *f) 647static int init_runlist(struct gk20a *g, struct fifo_gk20a *f)
648{ 648{
649 struct fifo_runlist_info_gk20a *runlist; 649 struct fifo_runlist_info_gk20a *runlist;
650 struct device *d = dev_from_gk20a(g);
651 unsigned int runlist_id; 650 unsigned int runlist_id;
652 u32 i; 651 u32 i;
653 size_t runlist_size; 652 size_t runlist_size;
@@ -689,7 +688,7 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f)
689 int err = nvgpu_dma_alloc_sys(g, runlist_size, 688 int err = nvgpu_dma_alloc_sys(g, runlist_size,
690 &runlist->mem[i]); 689 &runlist->mem[i]);
691 if (err) { 690 if (err) {
692 dev_err(d, "memory allocation failed\n"); 691 nvgpu_err(g, "memory allocation failed\n");
693 goto clean_up_runlist; 692 goto clean_up_runlist;
694 } 693 }
695 } 694 }
@@ -888,7 +887,6 @@ static void gk20a_fifo_init_pbdma_intr_descs(struct fifo_gk20a *f)
888static int gk20a_init_fifo_setup_sw(struct gk20a *g) 887static int gk20a_init_fifo_setup_sw(struct gk20a *g)
889{ 888{
890 struct fifo_gk20a *f = &g->fifo; 889 struct fifo_gk20a *f = &g->fifo;
891 struct device *d = dev_from_gk20a(g);
892 unsigned int chid, i; 890 unsigned int chid, i;
893 int err = 0; 891 int err = 0;
894 892
@@ -948,7 +946,7 @@ static int gk20a_init_fifo_setup_sw(struct gk20a *g)
948 err = nvgpu_dma_alloc_sys(g, f->userd_entry_size * 946 err = nvgpu_dma_alloc_sys(g, f->userd_entry_size *
949 f->num_channels, &f->userd); 947 f->num_channels, &f->userd);
950 if (err) { 948 if (err) {
951 dev_err(d, "userd memory allocation failed\n"); 949 nvgpu_err(g, "userd memory allocation failed\n");
952 goto clean_up; 950 goto clean_up;
953 } 951 }
954 gk20a_dbg(gpu_dbg_map, "userd gpu va = 0x%llx", f->userd.gpu_va); 952 gk20a_dbg(gpu_dbg_map, "userd gpu va = 0x%llx", f->userd.gpu_va);
@@ -1032,7 +1030,7 @@ int gk20a_init_fifo_setup_hw(struct gk20a *g)
1032 smp_mb(); 1030 smp_mb();
1033 1031
1034 if (v1 != gk20a_bar1_readl(g, bar1_vaddr)) { 1032 if (v1 != gk20a_bar1_readl(g, bar1_vaddr)) {
1035 gk20a_err(dev_from_gk20a(g), "bar1 broken @ gk20a: CPU wrote 0x%x, \ 1033 nvgpu_err(g, "bar1 broken @ gk20a: CPU wrote 0x%x, \
1036 GPU read 0x%x", *cpu_vaddr, gk20a_bar1_readl(g, bar1_vaddr)); 1034 GPU read 0x%x", *cpu_vaddr, gk20a_bar1_readl(g, bar1_vaddr));
1037 return -EINVAL; 1035 return -EINVAL;
1038 } 1036 }
@@ -1040,14 +1038,14 @@ int gk20a_init_fifo_setup_hw(struct gk20a *g)
1040 gk20a_bar1_writel(g, bar1_vaddr, v2); 1038 gk20a_bar1_writel(g, bar1_vaddr, v2);
1041 1039
1042 if (v2 != gk20a_bar1_readl(g, bar1_vaddr)) { 1040 if (v2 != gk20a_bar1_readl(g, bar1_vaddr)) {
1043 gk20a_err(dev_from_gk20a(g), "bar1 broken @ gk20a: GPU wrote 0x%x, \ 1041 nvgpu_err(g, "bar1 broken @ gk20a: GPU wrote 0x%x, \
1044 CPU read 0x%x", gk20a_bar1_readl(g, bar1_vaddr), *cpu_vaddr); 1042 CPU read 0x%x", gk20a_bar1_readl(g, bar1_vaddr), *cpu_vaddr);
1045 return -EINVAL; 1043 return -EINVAL;
1046 } 1044 }
1047 1045
1048 /* is it visible to the cpu? */ 1046 /* is it visible to the cpu? */
1049 if (*cpu_vaddr != v2) { 1047 if (*cpu_vaddr != v2) {
1050 gk20a_err(dev_from_gk20a(g), 1048 nvgpu_err(g,
1051 "cpu didn't see bar1 write @ %p!", 1049 "cpu didn't see bar1 write @ %p!",
1052 cpu_vaddr); 1050 cpu_vaddr);
1053 } 1051 }
@@ -1230,7 +1228,7 @@ void gk20a_fifo_reset_engine(struct gk20a *g, u32 engine_id)
1230 } 1228 }
1231 1229
1232 if (engine_enum == ENGINE_INVAL_GK20A) 1230 if (engine_enum == ENGINE_INVAL_GK20A)
1233 gk20a_err(dev_from_gk20a(g), "unsupported engine_id %d", engine_id); 1231 nvgpu_err(g, "unsupported engine_id %d", engine_id);
1234 1232
1235 if (engine_enum == ENGINE_GR_GK20A) { 1233 if (engine_enum == ENGINE_GR_GK20A) {
1236 if (support_gk20a_pmu(g->dev) && g->elpg_enabled) 1234 if (support_gk20a_pmu(g->dev) && g->elpg_enabled)
@@ -1242,7 +1240,7 @@ void gk20a_fifo_reset_engine(struct gk20a *g, u32 engine_id)
1242 g->ops.fecs_trace.reset(g); 1240 g->ops.fecs_trace.reset(g);
1243 /*HALT_PIPELINE method, halt GR engine*/ 1241 /*HALT_PIPELINE method, halt GR engine*/
1244 if (gr_gk20a_halt_pipe(g)) 1242 if (gr_gk20a_halt_pipe(g))
1245 gk20a_err(dev_from_gk20a(g), "failed to HALT gr pipe"); 1243 nvgpu_err(g, "failed to HALT gr pipe");
1246 /* resetting engine using mc_enable_r() is not 1244 /* resetting engine using mc_enable_r() is not
1247 enough, we do full init sequence */ 1245 enough, we do full init sequence */
1248 gk20a_gr_reset(g); 1246 gk20a_gr_reset(g);
@@ -1260,16 +1258,15 @@ static void gk20a_fifo_handle_chsw_fault(struct gk20a *g)
1260 u32 intr; 1258 u32 intr;
1261 1259
1262 intr = gk20a_readl(g, fifo_intr_chsw_error_r()); 1260 intr = gk20a_readl(g, fifo_intr_chsw_error_r());
1263 gk20a_err(dev_from_gk20a(g), "chsw: %08x\n", intr); 1261 nvgpu_err(g, "chsw: %08x\n", intr);
1264 gk20a_fecs_dump_falcon_stats(g); 1262 gk20a_fecs_dump_falcon_stats(g);
1265 gk20a_writel(g, fifo_intr_chsw_error_r(), intr); 1263 gk20a_writel(g, fifo_intr_chsw_error_r(), intr);
1266} 1264}
1267 1265
1268static void gk20a_fifo_handle_dropped_mmu_fault(struct gk20a *g) 1266static void gk20a_fifo_handle_dropped_mmu_fault(struct gk20a *g)
1269{ 1267{
1270 struct device *dev = dev_from_gk20a(g);
1271 u32 fault_id = gk20a_readl(g, fifo_intr_mmu_fault_id_r()); 1268 u32 fault_id = gk20a_readl(g, fifo_intr_mmu_fault_id_r());
1272 gk20a_err(dev, "dropped mmu fault (0x%08x)", fault_id); 1269 nvgpu_err(g, "dropped mmu fault (0x%08x)", fault_id);
1273} 1270}
1274 1271
1275bool gk20a_is_fault_engine_subid_gpc(struct gk20a *g, u32 engine_subid) 1272bool gk20a_is_fault_engine_subid_gpc(struct gk20a *g, u32 engine_subid)
@@ -1381,7 +1378,7 @@ bool gk20a_fifo_error_tsg(struct gk20a *g,
1381void gk20a_fifo_set_ctx_mmu_error_ch(struct gk20a *g, 1378void gk20a_fifo_set_ctx_mmu_error_ch(struct gk20a *g,
1382 struct channel_gk20a *refch) 1379 struct channel_gk20a *refch)
1383{ 1380{
1384 gk20a_err(dev_from_gk20a(g), 1381 nvgpu_err(g,
1385 "channel %d generated a mmu fault", refch->hw_chid); 1382 "channel %d generated a mmu fault", refch->hw_chid);
1386 gk20a_set_error_notifier(refch, 1383 gk20a_set_error_notifier(refch,
1387 NVGPU_CHANNEL_FIFO_ERROR_MMU_ERR_FLT); 1384 NVGPU_CHANNEL_FIFO_ERROR_MMU_ERR_FLT);
@@ -1392,7 +1389,7 @@ void gk20a_fifo_set_ctx_mmu_error_tsg(struct gk20a *g,
1392{ 1389{
1393 struct channel_gk20a *ch = NULL; 1390 struct channel_gk20a *ch = NULL;
1394 1391
1395 gk20a_err(dev_from_gk20a(g), 1392 nvgpu_err(g,
1396 "TSG %d generated a mmu fault", tsg->tsgid); 1393 "TSG %d generated a mmu fault", tsg->tsgid);
1397 1394
1398 down_read(&tsg->ch_list_lock); 1395 down_read(&tsg->ch_list_lock);
@@ -1544,7 +1541,7 @@ static bool gk20a_fifo_handle_mmu_fault(
1544 f.engine_subid_desc, 1541 f.engine_subid_desc,
1545 f.client_desc, 1542 f.client_desc,
1546 f.fault_type_desc); 1543 f.fault_type_desc);
1547 gk20a_err(dev_from_gk20a(g), "%s mmu fault on engine %d, " 1544 nvgpu_err(g, "%s mmu fault on engine %d, "
1548 "engine subid %d (%s), client %d (%s), " 1545 "engine subid %d (%s), client %d (%s), "
1549 "addr 0x%08x:0x%08x, type %d (%s), info 0x%08x," 1546 "addr 0x%08x:0x%08x, type %d (%s), info 0x%08x,"
1550 "inst_ptr 0x%llx\n", 1547 "inst_ptr 0x%llx\n",
@@ -1558,7 +1555,7 @@ static bool gk20a_fifo_handle_mmu_fault(
1558 1555
1559 if (ctxsw) { 1556 if (ctxsw) {
1560 gk20a_fecs_dump_falcon_stats(g); 1557 gk20a_fecs_dump_falcon_stats(g);
1561 gk20a_err(dev_from_gk20a(g), "gr_status_r : 0x%x", 1558 nvgpu_err(g, "gr_status_r : 0x%x",
1562 gk20a_readl(g, gr_status_r())); 1559 gk20a_readl(g, gr_status_r()));
1563 } 1560 }
1564 1561
@@ -1654,18 +1651,18 @@ static bool gk20a_fifo_handle_mmu_fault(
1654 gk20a_channel_abort(ch, false); 1651 gk20a_channel_abort(ch, false);
1655 gk20a_channel_put(ch); 1652 gk20a_channel_put(ch);
1656 } else { 1653 } else {
1657 gk20a_err(dev_from_gk20a(g), 1654 nvgpu_err(g,
1658 "mmu error in freed channel %d", 1655 "mmu error in freed channel %d",
1659 ch->hw_chid); 1656 ch->hw_chid);
1660 } 1657 }
1661 } else if (f.inst_ptr == 1658 } else if (f.inst_ptr ==
1662 gk20a_mm_inst_block_addr(g, &g->mm.bar1.inst_block)) { 1659 gk20a_mm_inst_block_addr(g, &g->mm.bar1.inst_block)) {
1663 gk20a_err(dev_from_gk20a(g), "mmu fault from bar1"); 1660 nvgpu_err(g, "mmu fault from bar1");
1664 } else if (f.inst_ptr == 1661 } else if (f.inst_ptr ==
1665 gk20a_mm_inst_block_addr(g, &g->mm.pmu.inst_block)) { 1662 gk20a_mm_inst_block_addr(g, &g->mm.pmu.inst_block)) {
1666 gk20a_err(dev_from_gk20a(g), "mmu fault from pmu"); 1663 nvgpu_err(g, "mmu fault from pmu");
1667 } else 1664 } else
1668 gk20a_err(dev_from_gk20a(g), "couldn't locate channel for mmu fault"); 1665 nvgpu_err(g, "couldn't locate channel for mmu fault");
1669 } 1666 }
1670 1667
1671 /* clear interrupt */ 1668 /* clear interrupt */
@@ -2137,7 +2134,7 @@ static bool gk20a_fifo_handle_sched_error(struct gk20a *g)
2137 2134
2138 /* could not find the engine - should never happen */ 2135 /* could not find the engine - should never happen */
2139 if (!gk20a_fifo_is_valid_engine_id(g, engine_id)) { 2136 if (!gk20a_fifo_is_valid_engine_id(g, engine_id)) {
2140 gk20a_err(dev_from_gk20a(g), "fifo sched error : 0x%08x, failed to find engine\n", 2137 nvgpu_err(g, "fifo sched error : 0x%08x, failed to find engine\n",
2141 sched_error); 2138 sched_error);
2142 ret = false; 2139 ret = false;
2143 goto err; 2140 goto err;
@@ -2158,7 +2155,7 @@ static bool gk20a_fifo_handle_sched_error(struct gk20a *g)
2158 } 2155 }
2159 2156
2160 if (ret) { 2157 if (ret) {
2161 gk20a_err(dev_from_gk20a(g), 2158 nvgpu_err(g,
2162 "fifo sched ctxsw timeout error: " 2159 "fifo sched ctxsw timeout error: "
2163 "engine=%u, %s=%d, ms=%u", 2160 "engine=%u, %s=%d, ms=%u",
2164 engine_id, is_tsg ? "tsg" : "ch", id, ms); 2161 engine_id, is_tsg ? "tsg" : "ch", id, ms);
@@ -2175,7 +2172,7 @@ static bool gk20a_fifo_handle_sched_error(struct gk20a *g)
2175 "%s=%d", ms, is_tsg ? "tsg" : "ch", id); 2172 "%s=%d", ms, is_tsg ? "tsg" : "ch", id);
2176 } 2173 }
2177 } else { 2174 } else {
2178 gk20a_err(dev_from_gk20a(g), 2175 nvgpu_err(g,
2179 "fifo sched error : 0x%08x, engine=%u, %s=%d", 2176 "fifo sched error : 0x%08x, engine=%u, %s=%d",
2180 sched_error, engine_id, is_tsg ? "tsg" : "ch", id); 2177 sched_error, engine_id, is_tsg ? "tsg" : "ch", id);
2181 } 2178 }
@@ -2187,7 +2184,6 @@ err:
2187static u32 fifo_error_isr(struct gk20a *g, u32 fifo_intr) 2184static u32 fifo_error_isr(struct gk20a *g, u32 fifo_intr)
2188{ 2185{
2189 bool print_channel_reset_log = false; 2186 bool print_channel_reset_log = false;
2190 struct device *dev = dev_from_gk20a(g);
2191 u32 handled = 0; 2187 u32 handled = 0;
2192 2188
2193 gk20a_dbg_fn("fifo_intr=0x%08x", fifo_intr); 2189 gk20a_dbg_fn("fifo_intr=0x%08x", fifo_intr);
@@ -2195,13 +2191,13 @@ static u32 fifo_error_isr(struct gk20a *g, u32 fifo_intr)
2195 if (fifo_intr & fifo_intr_0_pio_error_pending_f()) { 2191 if (fifo_intr & fifo_intr_0_pio_error_pending_f()) {
2196 /* pio mode is unused. this shouldn't happen, ever. */ 2192 /* pio mode is unused. this shouldn't happen, ever. */
2197 /* should we clear it or just leave it pending? */ 2193 /* should we clear it or just leave it pending? */
2198 gk20a_err(dev, "fifo pio error!\n"); 2194 nvgpu_err(g, "fifo pio error!\n");
2199 BUG_ON(1); 2195 BUG_ON(1);
2200 } 2196 }
2201 2197
2202 if (fifo_intr & fifo_intr_0_bind_error_pending_f()) { 2198 if (fifo_intr & fifo_intr_0_bind_error_pending_f()) {
2203 u32 bind_error = gk20a_readl(g, fifo_intr_bind_error_r()); 2199 u32 bind_error = gk20a_readl(g, fifo_intr_bind_error_r());
2204 gk20a_err(dev, "fifo bind error: 0x%08x", bind_error); 2200 nvgpu_err(g, "fifo bind error: 0x%08x", bind_error);
2205 print_channel_reset_log = true; 2201 print_channel_reset_log = true;
2206 handled |= fifo_intr_0_bind_error_pending_f(); 2202 handled |= fifo_intr_0_bind_error_pending_f();
2207 } 2203 }
@@ -2233,7 +2229,7 @@ static u32 fifo_error_isr(struct gk20a *g, u32 fifo_intr)
2233 2229
2234 if (print_channel_reset_log) { 2230 if (print_channel_reset_log) {
2235 unsigned int engine_id; 2231 unsigned int engine_id;
2236 gk20a_err(dev_from_gk20a(g), 2232 nvgpu_err(g,
2237 "channel reset initiated from %s; intr=0x%08x", 2233 "channel reset initiated from %s; intr=0x%08x",
2238 __func__, fifo_intr); 2234 __func__, fifo_intr);
2239 for (engine_id = 0; 2235 for (engine_id = 0;
@@ -2301,8 +2297,7 @@ static bool gk20a_fifo_is_sw_method_subch(struct gk20a *g, int pbdma_id,
2301 return false; 2297 return false;
2302} 2298}
2303 2299
2304static u32 gk20a_fifo_handle_pbdma_intr(struct device *dev, 2300static u32 gk20a_fifo_handle_pbdma_intr(struct gk20a *g,
2305 struct gk20a *g,
2306 struct fifo_gk20a *f, 2301 struct fifo_gk20a *f,
2307 u32 pbdma_id) 2302 u32 pbdma_id)
2308{ 2303{
@@ -2323,7 +2318,7 @@ static u32 gk20a_fifo_handle_pbdma_intr(struct device *dev,
2323 if ((f->intr.pbdma.device_fatal_0 | 2318 if ((f->intr.pbdma.device_fatal_0 |
2324 f->intr.pbdma.channel_fatal_0 | 2319 f->intr.pbdma.channel_fatal_0 |
2325 f->intr.pbdma.restartable_0) & pbdma_intr_0) { 2320 f->intr.pbdma.restartable_0) & pbdma_intr_0) {
2326 gk20a_err(dev_from_gk20a(g), 2321 nvgpu_err(g,
2327 "pbdma_intr_0(%d):0x%08x PBH: %08x SHADOW: %08x M0: %08x %08x %08x %08x", 2322 "pbdma_intr_0(%d):0x%08x PBH: %08x SHADOW: %08x M0: %08x %08x %08x %08x",
2328 pbdma_id, pbdma_intr_0, 2323 pbdma_id, pbdma_intr_0,
2329 gk20a_readl(g, pbdma_pb_header_r(pbdma_id)), 2324 gk20a_readl(g, pbdma_pb_header_r(pbdma_id)),
@@ -2346,7 +2341,7 @@ static u32 gk20a_fifo_handle_pbdma_intr(struct device *dev,
2346 gk20a_writel(g, pbdma_acquire_r(pbdma_id), val); 2341 gk20a_writel(g, pbdma_acquire_r(pbdma_id), val);
2347 if (g->timeouts_enabled) { 2342 if (g->timeouts_enabled) {
2348 reset = true; 2343 reset = true;
2349 gk20a_err(dev_from_gk20a(g), 2344 nvgpu_err(g,
2350 "semaphore acquire timeout!"); 2345 "semaphore acquire timeout!");
2351 } 2346 }
2352 handled |= pbdma_intr_0_acquire_pending_f(); 2347 handled |= pbdma_intr_0_acquire_pending_f();
@@ -2387,7 +2382,7 @@ static u32 gk20a_fifo_handle_pbdma_intr(struct device *dev,
2387 /* all intrs in _intr_1 are "host copy engine" related, 2382 /* all intrs in _intr_1 are "host copy engine" related,
2388 * which gk20a doesn't have. for now just make them channel fatal. */ 2383 * which gk20a doesn't have. for now just make them channel fatal. */
2389 if (pbdma_intr_1) { 2384 if (pbdma_intr_1) {
2390 dev_err(dev, "channel hce error: pbdma_intr_1(%d): 0x%08x", 2385 nvgpu_err(g, "channel hce error: pbdma_intr_1(%d): 0x%08x",
2391 pbdma_id, pbdma_intr_1); 2386 pbdma_id, pbdma_intr_1);
2392 reset = true; 2387 reset = true;
2393 gk20a_writel(g, pbdma_intr_1_r(pbdma_id), pbdma_intr_1); 2388 gk20a_writel(g, pbdma_intr_1_r(pbdma_id), pbdma_intr_1);
@@ -2428,7 +2423,6 @@ static u32 gk20a_fifo_handle_pbdma_intr(struct device *dev,
2428 2423
2429static u32 fifo_pbdma_isr(struct gk20a *g, u32 fifo_intr) 2424static u32 fifo_pbdma_isr(struct gk20a *g, u32 fifo_intr)
2430{ 2425{
2431 struct device *dev = dev_from_gk20a(g);
2432 struct fifo_gk20a *f = &g->fifo; 2426 struct fifo_gk20a *f = &g->fifo;
2433 u32 clear_intr = 0, i; 2427 u32 clear_intr = 0, i;
2434 u32 host_num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA); 2428 u32 host_num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA);
@@ -2438,7 +2432,7 @@ static u32 fifo_pbdma_isr(struct gk20a *g, u32 fifo_intr)
2438 if (fifo_intr_pbdma_id_status_v(pbdma_pending, i)) { 2432 if (fifo_intr_pbdma_id_status_v(pbdma_pending, i)) {
2439 gk20a_dbg(gpu_dbg_intr, "pbdma id %d intr pending", i); 2433 gk20a_dbg(gpu_dbg_intr, "pbdma id %d intr pending", i);
2440 clear_intr |= 2434 clear_intr |=
2441 gk20a_fifo_handle_pbdma_intr(dev, g, f, i); 2435 gk20a_fifo_handle_pbdma_intr(g, f, i);
2442 } 2436 }
2443 } 2437 }
2444 return fifo_intr_0_pbdma_intr_pending_f(); 2438 return fifo_intr_0_pbdma_intr_pending_f();
@@ -2534,7 +2528,7 @@ void __locked_fifo_preempt_timeout_rc(struct gk20a *g, u32 id,
2534 struct tsg_gk20a *tsg = &g->fifo.tsg[id]; 2528 struct tsg_gk20a *tsg = &g->fifo.tsg[id];
2535 struct channel_gk20a *ch = NULL; 2529 struct channel_gk20a *ch = NULL;
2536 2530
2537 gk20a_err(dev_from_gk20a(g), 2531 nvgpu_err(g,
2538 "preempt TSG %d timeout\n", id); 2532 "preempt TSG %d timeout\n", id);
2539 2533
2540 down_read(&tsg->ch_list_lock); 2534 down_read(&tsg->ch_list_lock);
@@ -2550,7 +2544,7 @@ void __locked_fifo_preempt_timeout_rc(struct gk20a *g, u32 id,
2550 } else { 2544 } else {
2551 struct channel_gk20a *ch = &g->fifo.channel[id]; 2545 struct channel_gk20a *ch = &g->fifo.channel[id];
2552 2546
2553 gk20a_err(dev_from_gk20a(g), 2547 nvgpu_err(g,
2554 "preempt channel %d timeout\n", id); 2548 "preempt channel %d timeout\n", id);
2555 2549
2556 if (gk20a_channel_get(ch)) { 2550 if (gk20a_channel_get(ch)) {
@@ -2733,7 +2727,7 @@ int gk20a_fifo_enable_all_engine_activity(struct gk20a *g)
2733 err = gk20a_fifo_enable_engine_activity(g, 2727 err = gk20a_fifo_enable_engine_activity(g,
2734 &g->fifo.engine_info[active_engine_id]); 2728 &g->fifo.engine_info[active_engine_id]);
2735 if (err) { 2729 if (err) {
2736 gk20a_err(dev_from_gk20a(g), 2730 nvgpu_err(g,
2737 "failed to enable engine %d activity\n", active_engine_id); 2731 "failed to enable engine %d activity\n", active_engine_id);
2738 ret = err; 2732 ret = err;
2739 } 2733 }
@@ -2806,7 +2800,7 @@ clean_up:
2806 if (err) { 2800 if (err) {
2807 gk20a_dbg_fn("failed"); 2801 gk20a_dbg_fn("failed");
2808 if (gk20a_fifo_enable_engine_activity(g, eng_info)) 2802 if (gk20a_fifo_enable_engine_activity(g, eng_info))
2809 gk20a_err(dev_from_gk20a(g), 2803 nvgpu_err(g,
2810 "failed to enable gr engine activity\n"); 2804 "failed to enable gr engine activity\n");
2811 } else { 2805 } else {
2812 gk20a_dbg_fn("done"); 2806 gk20a_dbg_fn("done");
@@ -3155,7 +3149,7 @@ static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
3155 ret = gk20a_fifo_runlist_wait_pending(g, runlist_id); 3149 ret = gk20a_fifo_runlist_wait_pending(g, runlist_id);
3156 3150
3157 if (ret == -ETIMEDOUT) { 3151 if (ret == -ETIMEDOUT) {
3158 gk20a_err(dev_from_gk20a(g), 3152 nvgpu_err(g,
3159 "runlist update timeout"); 3153 "runlist update timeout");
3160 3154
3161 gk20a_fifo_runlist_reset_engines(g, runlist_id); 3155 gk20a_fifo_runlist_reset_engines(g, runlist_id);
@@ -3167,10 +3161,10 @@ static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
3167 * should be fine */ 3161 * should be fine */
3168 3162
3169 if (ret) 3163 if (ret)
3170 gk20a_err(dev_from_gk20a(g), 3164 nvgpu_err(g,
3171 "runlist update failed: %d", ret); 3165 "runlist update failed: %d", ret);
3172 } else if (ret == -EINTR) 3166 } else if (ret == -EINTR)
3173 gk20a_err(dev_from_gk20a(g), 3167 nvgpu_err(g,
3174 "runlist update interrupted"); 3168 "runlist update interrupted");
3175 } 3169 }
3176 3170
@@ -3196,7 +3190,7 @@ int gk20a_fifo_update_runlist_ids(struct gk20a *g, u32 runlist_ids, u32 hw_chid,
3196 /* Capture the last failure error code */ 3190 /* Capture the last failure error code */
3197 errcode = g->ops.fifo.update_runlist(g, runlist_id, hw_chid, add, wait_for_finish); 3191 errcode = g->ops.fifo.update_runlist(g, runlist_id, hw_chid, add, wait_for_finish);
3198 if (errcode) { 3192 if (errcode) {
3199 gk20a_err(dev_from_gk20a(g), 3193 nvgpu_err(g,
3200 "failed to update_runlist %d %d", runlist_id, errcode); 3194 "failed to update_runlist %d %d", runlist_id, errcode);
3201 ret = errcode; 3195 ret = errcode;
3202 } 3196 }
@@ -4051,8 +4045,7 @@ int gk20a_fifo_set_timeslice(struct channel_gk20a *ch, u32 timeslice)
4051 struct gk20a *g = ch->g; 4045 struct gk20a *g = ch->g;
4052 4046
4053 if (gk20a_is_channel_marked_as_tsg(ch)) { 4047 if (gk20a_is_channel_marked_as_tsg(ch)) {
4054 gk20a_err(dev_from_gk20a(ch->g), 4048 nvgpu_err(g, "invalid operation for TSG!\n");
4055 "invalid operation for TSG!\n");
4056 return -EINVAL; 4049 return -EINVAL;
4057 } 4050 }
4058 4051
@@ -4071,8 +4064,7 @@ int gk20a_fifo_set_timeslice(struct channel_gk20a *ch, u32 timeslice)
4071int gk20a_fifo_set_priority(struct channel_gk20a *ch, u32 priority) 4064int gk20a_fifo_set_priority(struct channel_gk20a *ch, u32 priority)
4072{ 4065{
4073 if (gk20a_is_channel_marked_as_tsg(ch)) { 4066 if (gk20a_is_channel_marked_as_tsg(ch)) {
4074 gk20a_err(dev_from_gk20a(ch->g), 4067 nvgpu_err(ch->g, "invalid operation for TSG!\n");
4075 "invalid operation for TSG!\n");
4076 return -EINVAL; 4068 return -EINVAL;
4077 } 4069 }
4078 4070