summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/nvgpu/common/linux/ioctl_channel.c4
-rw-r--r--drivers/gpu/nvgpu/common/linux/ioctl_ctrl.c8
-rw-r--r--drivers/gpu/nvgpu/common/linux/kmem.c2
-rw-r--r--drivers/gpu/nvgpu/common/linux/vm.c2
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.c12
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c2
-rw-r--r--drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c16
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.c32
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a.c2
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.c98
-rw-r--r--drivers/gpu/nvgpu/gk20a/ltc_gk20a.c2
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.c4
-rw-r--r--drivers/gpu/nvgpu/gk20a/pmu_gk20a.c12
-rw-r--r--drivers/gpu/nvgpu/gk20a/regops_gk20a.c4
-rw-r--r--drivers/gpu/nvgpu/gk20a/sim_gk20a.c14
-rw-r--r--drivers/gpu/nvgpu/gk20a/tsg_gk20a.c2
-rw-r--r--drivers/gpu/nvgpu/gm20b/acr_gm20b.c2
-rw-r--r--drivers/gpu/nvgpu/gm20b/ltc_gm20b.c2
-rw-r--r--drivers/gpu/nvgpu/gp106/xve_gp106.c2
-rw-r--r--drivers/gpu/nvgpu/gp10b/gp10b_sysfs.c4
-rw-r--r--drivers/gpu/nvgpu/gp10b/gr_gp10b.c4
-rw-r--r--drivers/gpu/nvgpu/therm/thrmpmu.c2
-rw-r--r--drivers/gpu/nvgpu/vgpu/fifo_vgpu.c4
-rw-r--r--drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c4
-rw-r--r--drivers/gpu/nvgpu/vgpu/mm_vgpu.c6
25 files changed, 123 insertions, 123 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_channel.c b/drivers/gpu/nvgpu/common/linux/ioctl_channel.c
index c7adb76c..2502ff30 100644
--- a/drivers/gpu/nvgpu/common/linux/ioctl_channel.c
+++ b/drivers/gpu/nvgpu/common/linux/ioctl_channel.c
@@ -203,7 +203,7 @@ static int gk20a_init_error_notifier(struct channel_gk20a *ch,
203 203
204 if (end > dmabuf->size || end < sizeof(struct nvgpu_notification)) { 204 if (end > dmabuf->size || end < sizeof(struct nvgpu_notification)) {
205 dma_buf_put(dmabuf); 205 dma_buf_put(dmabuf);
206 nvgpu_err(ch->g, "gk20a_init_error_notifier: invalid offset\n"); 206 nvgpu_err(ch->g, "gk20a_init_error_notifier: invalid offset");
207 return -EINVAL; 207 return -EINVAL;
208 } 208 }
209 209
@@ -462,7 +462,7 @@ static int gk20a_channel_wait(struct channel_gk20a *ch,
462 462
463 if (end > dmabuf->size || end < sizeof(struct notification)) { 463 if (end > dmabuf->size || end < sizeof(struct notification)) {
464 dma_buf_put(dmabuf); 464 dma_buf_put(dmabuf);
465 nvgpu_err(g, "invalid notifier offset\n"); 465 nvgpu_err(g, "invalid notifier offset");
466 return -EINVAL; 466 return -EINVAL;
467 } 467 }
468 468
diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_ctrl.c b/drivers/gpu/nvgpu/common/linux/ioctl_ctrl.c
index 8d95dd35..0f3d762d 100644
--- a/drivers/gpu/nvgpu/common/linux/ioctl_ctrl.c
+++ b/drivers/gpu/nvgpu/common/linux/ioctl_ctrl.c
@@ -357,7 +357,7 @@ static int nvgpu_gpu_ioctl_set_mmu_debug_mode(
357 struct nvgpu_gpu_mmu_debug_mode_args *args) 357 struct nvgpu_gpu_mmu_debug_mode_args *args)
358{ 358{
359 if (gk20a_busy(g)) { 359 if (gk20a_busy(g)) {
360 nvgpu_err(g, "failed to power on gpu\n"); 360 nvgpu_err(g, "failed to power on gpu");
361 return -EINVAL; 361 return -EINVAL;
362 } 362 }
363 363
@@ -559,7 +559,7 @@ static inline int get_timestamps_zipper(struct gk20a *g,
559 unsigned int i = 0; 559 unsigned int i = 0;
560 560
561 if (gk20a_busy(g)) { 561 if (gk20a_busy(g)) {
562 nvgpu_err(g, "GPU not powered on\n"); 562 nvgpu_err(g, "GPU not powered on");
563 err = -EINVAL; 563 err = -EINVAL;
564 goto end; 564 goto end;
565 } 565 }
@@ -598,7 +598,7 @@ static int nvgpu_gpu_get_cpu_time_correlation_info(
598 get_cpu_timestamp = get_cpu_timestamp_timeofday; 598 get_cpu_timestamp = get_cpu_timestamp_timeofday;
599 break; 599 break;
600 default: 600 default:
601 nvgpu_err(g, "invalid cpu clock source id\n"); 601 nvgpu_err(g, "invalid cpu clock source id");
602 return -EINVAL; 602 return -EINVAL;
603 } 603 }
604 604
@@ -663,7 +663,7 @@ static int nvgpu_gpu_get_engine_info(
663 break; 663 break;
664 664
665 default: 665 default:
666 nvgpu_err(g, "Unmapped engine enum %u\n", 666 nvgpu_err(g, "Unmapped engine enum %u",
667 engine_enum); 667 engine_enum);
668 continue; 668 continue;
669 } 669 }
diff --git a/drivers/gpu/nvgpu/common/linux/kmem.c b/drivers/gpu/nvgpu/common/linux/kmem.c
index 0d185e56..8422d761 100644
--- a/drivers/gpu/nvgpu/common/linux/kmem.c
+++ b/drivers/gpu/nvgpu/common/linux/kmem.c
@@ -819,7 +819,7 @@ void nvgpu_kmem_fini(struct gk20a *g, int flags)
819 if (flags & NVGPU_KMEM_FINI_WARN) { 819 if (flags & NVGPU_KMEM_FINI_WARN) {
820 WARN(1, "Letting %d allocs leak!!\n", count); 820 WARN(1, "Letting %d allocs leak!!\n", count);
821 } else if (flags & NVGPU_KMEM_FINI_BUG) { 821 } else if (flags & NVGPU_KMEM_FINI_BUG) {
822 nvgpu_err(g, "Letting %d allocs leak!!\n", count); 822 nvgpu_err(g, "Letting %d allocs leak!!", count);
823 BUG(); 823 BUG();
824 } 824 }
825 } 825 }
diff --git a/drivers/gpu/nvgpu/common/linux/vm.c b/drivers/gpu/nvgpu/common/linux/vm.c
index 9238a9df..c2d80e2d 100644
--- a/drivers/gpu/nvgpu/common/linux/vm.c
+++ b/drivers/gpu/nvgpu/common/linux/vm.c
@@ -399,7 +399,7 @@ clean_up:
399 gk20a_mm_unpin(g->dev, dmabuf, bfr.sgt); 399 gk20a_mm_unpin(g->dev, dmabuf, bfr.sgt);
400 400
401 nvgpu_mutex_release(&vm->update_gmmu_lock); 401 nvgpu_mutex_release(&vm->update_gmmu_lock);
402 nvgpu_log_info(g, "err=%d\n", err); 402 nvgpu_log_info(g, "err=%d", err);
403 return 0; 403 return 0;
404} 404}
405 405
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
index ab6b8ec1..6e80a109 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
@@ -315,7 +315,7 @@ int gk20a_wait_channel_idle(struct channel_gk20a *ch)
315 } while (!nvgpu_timeout_expired(&timeout)); 315 } while (!nvgpu_timeout_expired(&timeout));
316 316
317 if (!channel_idle) { 317 if (!channel_idle) {
318 nvgpu_err(ch->g, "jobs not freed for channel %d\n", 318 nvgpu_err(ch->g, "jobs not freed for channel %d",
319 ch->hw_chid); 319 ch->hw_chid);
320 return -EBUSY; 320 return -EBUSY;
321 } 321 }
@@ -336,7 +336,7 @@ int gk20a_channel_set_runlist_interleave(struct channel_gk20a *ch,
336 int ret; 336 int ret;
337 337
338 if (gk20a_is_channel_marked_as_tsg(ch)) { 338 if (gk20a_is_channel_marked_as_tsg(ch)) {
339 nvgpu_err(g, "invalid operation for TSG!\n"); 339 nvgpu_err(g, "invalid operation for TSG!");
340 return -EINVAL; 340 return -EINVAL;
341 } 341 }
342 342
@@ -916,7 +916,7 @@ static int channel_gk20a_alloc_priv_cmdbuf(struct channel_gk20a *c)
916 916
917 err = nvgpu_dma_alloc_map_sys(ch_vm, size, &q->mem); 917 err = nvgpu_dma_alloc_map_sys(ch_vm, size, &q->mem);
918 if (err) { 918 if (err) {
919 nvgpu_err(g, "%s: memory allocation failed\n", __func__); 919 nvgpu_err(g, "%s: memory allocation failed", __func__);
920 goto clean_up; 920 goto clean_up;
921 } 921 }
922 922
@@ -1032,7 +1032,7 @@ static int channel_gk20a_alloc_job(struct channel_gk20a *c,
1032 *job_out = &c->joblist.pre_alloc.jobs[put]; 1032 *job_out = &c->joblist.pre_alloc.jobs[put];
1033 else { 1033 else {
1034 nvgpu_warn(c->g, 1034 nvgpu_warn(c->g,
1035 "out of job ringbuffer space\n"); 1035 "out of job ringbuffer space");
1036 err = -EAGAIN; 1036 err = -EAGAIN;
1037 } 1037 }
1038 } else { 1038 } else {
@@ -1261,7 +1261,7 @@ int gk20a_channel_alloc_gpfifo(struct channel_gk20a *c,
1261 gpfifo_size * sizeof(struct nvgpu_gpfifo), 1261 gpfifo_size * sizeof(struct nvgpu_gpfifo),
1262 &c->gpfifo.mem); 1262 &c->gpfifo.mem);
1263 if (err) { 1263 if (err) {
1264 nvgpu_err(g, "%s: memory allocation failed\n", __func__); 1264 nvgpu_err(g, "%s: memory allocation failed", __func__);
1265 goto clean_up; 1265 goto clean_up;
1266 } 1266 }
1267 1267
@@ -1906,7 +1906,7 @@ int gk20a_free_priv_cmdbuf(struct channel_gk20a *c, struct priv_cmd_entry *e)
1906 /* read the entry's valid flag before reading its contents */ 1906 /* read the entry's valid flag before reading its contents */
1907 rmb(); 1907 rmb();
1908 if ((q->get != e->off) && e->off != 0) 1908 if ((q->get != e->off) && e->off != 0)
1909 nvgpu_err(g, "requests out-of-order, ch=%d\n", 1909 nvgpu_err(g, "requests out-of-order, ch=%d",
1910 c->hw_chid); 1910 c->hw_chid);
1911 q->get = e->off + e->size; 1911 q->get = e->off + e->size;
1912 } 1912 }
diff --git a/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
index ed83663d..eac7dbc3 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
@@ -503,7 +503,7 @@ static void gk20a_channel_semaphore_launcher(
503 fence, fence->name); 503 fence, fence->name);
504 err = sync_fence_wait(fence, -1); 504 err = sync_fence_wait(fence, -1);
505 if (err < 0) 505 if (err < 0)
506 nvgpu_err(g, "error waiting pre-fence: %d\n", err); 506 nvgpu_err(g, "error waiting pre-fence: %d", err);
507 507
508 gk20a_dbg_info( 508 gk20a_dbg_info(
509 "wait completed (%d) for fence %p '%s', triggering gpu work", 509 "wait completed (%d) for fence %p '%s', triggering gpu work",
diff --git a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
index 1f9b856d..09268b6b 100644
--- a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
@@ -241,7 +241,7 @@ static int gk20a_dbg_gpu_events_ctrl(struct dbg_session_gk20a *dbg_s,
241 ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); 241 ch = nvgpu_dbg_gpu_get_session_channel(dbg_s);
242 if (!ch) { 242 if (!ch) {
243 nvgpu_err(dbg_s->g, 243 nvgpu_err(dbg_s->g,
244 "no channel bound to dbg session\n"); 244 "no channel bound to dbg session");
245 return -EINVAL; 245 return -EINVAL;
246 } 246 }
247 247
@@ -759,7 +759,7 @@ static int nvgpu_dbg_gpu_ioctl_read_single_sm_error_state(
759 write_size); 759 write_size);
760 nvgpu_mutex_release(&g->dbg_sessions_lock); 760 nvgpu_mutex_release(&g->dbg_sessions_lock);
761 if (err) { 761 if (err) {
762 nvgpu_err(g, "copy_to_user failed!\n"); 762 nvgpu_err(g, "copy_to_user failed!");
763 return err; 763 return err;
764 } 764 }
765 765
@@ -1197,7 +1197,7 @@ static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s,
1197 /* be sure that ctx info is in place */ 1197 /* be sure that ctx info is in place */
1198 if (!g->is_virtual && 1198 if (!g->is_virtual &&
1199 !gr_context_info_available(dbg_s, &g->gr)) { 1199 !gr_context_info_available(dbg_s, &g->gr)) {
1200 nvgpu_err(g, "gr context data not available\n"); 1200 nvgpu_err(g, "gr context data not available");
1201 return -ENODEV; 1201 return -ENODEV;
1202 } 1202 }
1203 1203
@@ -1414,7 +1414,7 @@ static int nvgpu_dbg_gpu_ioctl_smpc_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
1414 ch_gk20a = nvgpu_dbg_gpu_get_session_channel(dbg_s); 1414 ch_gk20a = nvgpu_dbg_gpu_get_session_channel(dbg_s);
1415 if (!ch_gk20a) { 1415 if (!ch_gk20a) {
1416 nvgpu_err(g, 1416 nvgpu_err(g,
1417 "no bound channel for smpc ctxsw mode update\n"); 1417 "no bound channel for smpc ctxsw mode update");
1418 err = -EINVAL; 1418 err = -EINVAL;
1419 goto clean_up; 1419 goto clean_up;
1420 } 1420 }
@@ -1423,7 +1423,7 @@ static int nvgpu_dbg_gpu_ioctl_smpc_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
1423 args->mode == NVGPU_DBG_GPU_SMPC_CTXSW_MODE_CTXSW); 1423 args->mode == NVGPU_DBG_GPU_SMPC_CTXSW_MODE_CTXSW);
1424 if (err) { 1424 if (err) {
1425 nvgpu_err(g, 1425 nvgpu_err(g,
1426 "error (%d) during smpc ctxsw mode update\n", err); 1426 "error (%d) during smpc ctxsw mode update", err);
1427 goto clean_up; 1427 goto clean_up;
1428 } 1428 }
1429 1429
@@ -1466,7 +1466,7 @@ static int nvgpu_dbg_gpu_ioctl_hwpm_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
1466 ch_gk20a = nvgpu_dbg_gpu_get_session_channel(dbg_s); 1466 ch_gk20a = nvgpu_dbg_gpu_get_session_channel(dbg_s);
1467 if (!ch_gk20a) { 1467 if (!ch_gk20a) {
1468 nvgpu_err(g, 1468 nvgpu_err(g,
1469 "no bound channel for pm ctxsw mode update\n"); 1469 "no bound channel for pm ctxsw mode update");
1470 err = -EINVAL; 1470 err = -EINVAL;
1471 goto clean_up; 1471 goto clean_up;
1472 } 1472 }
@@ -1475,7 +1475,7 @@ static int nvgpu_dbg_gpu_ioctl_hwpm_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
1475 args->mode == NVGPU_DBG_GPU_HWPM_CTXSW_MODE_CTXSW); 1475 args->mode == NVGPU_DBG_GPU_HWPM_CTXSW_MODE_CTXSW);
1476 if (err) 1476 if (err)
1477 nvgpu_err(g, 1477 nvgpu_err(g,
1478 "error (%d) during pm ctxsw mode update\n", err); 1478 "error (%d) during pm ctxsw mode update", err);
1479 1479
1480 /* gk20a would require a WAR to set the core PM_ENABLE bit, not 1480 /* gk20a would require a WAR to set the core PM_ENABLE bit, not
1481 * added here with gk20a being deprecated 1481 * added here with gk20a being deprecated
@@ -1528,7 +1528,7 @@ static int nvgpu_dbg_gpu_ioctl_suspend_resume_sm(
1528 1528
1529 err = gr_gk20a_enable_ctxsw(g); 1529 err = gr_gk20a_enable_ctxsw(g);
1530 if (err) 1530 if (err)
1531 nvgpu_err(g, "unable to restart ctxsw!\n"); 1531 nvgpu_err(g, "unable to restart ctxsw!");
1532 1532
1533clean_up: 1533clean_up:
1534 nvgpu_mutex_release(&g->dbg_sessions_lock); 1534 nvgpu_mutex_release(&g->dbg_sessions_lock);
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
index f2fc6234..05c13374 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
@@ -152,7 +152,7 @@ u32 gk20a_fifo_get_gr_engine_id(struct gk20a *g)
152 1, ENGINE_GR_GK20A); 152 1, ENGINE_GR_GK20A);
153 153
154 if (!gr_engine_cnt) { 154 if (!gr_engine_cnt) {
155 nvgpu_err(g, "No GR engine available on this device!\n"); 155 nvgpu_err(g, "No GR engine available on this device!");
156 } 156 }
157 157
158 return gr_engine_id; 158 return gr_engine_id;
@@ -693,7 +693,7 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f)
693 int err = nvgpu_dma_alloc_sys(g, runlist_size, 693 int err = nvgpu_dma_alloc_sys(g, runlist_size,
694 &runlist->mem[i]); 694 &runlist->mem[i]);
695 if (err) { 695 if (err) {
696 nvgpu_err(g, "memory allocation failed\n"); 696 nvgpu_err(g, "memory allocation failed");
697 goto clean_up_runlist; 697 goto clean_up_runlist;
698 } 698 }
699 } 699 }
@@ -947,7 +947,7 @@ static int gk20a_init_fifo_setup_sw(struct gk20a *g)
947 err = nvgpu_dma_alloc_sys(g, f->userd_entry_size * 947 err = nvgpu_dma_alloc_sys(g, f->userd_entry_size *
948 f->num_channels, &f->userd); 948 f->num_channels, &f->userd);
949 if (err) { 949 if (err) {
950 nvgpu_err(g, "userd memory allocation failed\n"); 950 nvgpu_err(g, "userd memory allocation failed");
951 goto clean_up; 951 goto clean_up;
952 } 952 }
953 gk20a_dbg(gpu_dbg_map, "userd gpu va = 0x%llx", f->userd.gpu_va); 953 gk20a_dbg(gpu_dbg_map, "userd gpu va = 0x%llx", f->userd.gpu_va);
@@ -1001,7 +1001,7 @@ void gk20a_fifo_handle_runlist_event(struct gk20a *g)
1001{ 1001{
1002 u32 runlist_event = gk20a_readl(g, fifo_intr_runlist_r()); 1002 u32 runlist_event = gk20a_readl(g, fifo_intr_runlist_r());
1003 1003
1004 gk20a_dbg(gpu_dbg_intr, "runlist event %08x\n", 1004 gk20a_dbg(gpu_dbg_intr, "runlist event %08x",
1005 runlist_event); 1005 runlist_event);
1006 1006
1007 gk20a_writel(g, fifo_intr_runlist_r(), runlist_event); 1007 gk20a_writel(g, fifo_intr_runlist_r(), runlist_event);
@@ -1259,7 +1259,7 @@ static void gk20a_fifo_handle_chsw_fault(struct gk20a *g)
1259 u32 intr; 1259 u32 intr;
1260 1260
1261 intr = gk20a_readl(g, fifo_intr_chsw_error_r()); 1261 intr = gk20a_readl(g, fifo_intr_chsw_error_r());
1262 nvgpu_err(g, "chsw: %08x\n", intr); 1262 nvgpu_err(g, "chsw: %08x", intr);
1263 gk20a_fecs_dump_falcon_stats(g); 1263 gk20a_fecs_dump_falcon_stats(g);
1264 gk20a_writel(g, fifo_intr_chsw_error_r(), intr); 1264 gk20a_writel(g, fifo_intr_chsw_error_r(), intr);
1265} 1265}
@@ -1545,7 +1545,7 @@ static bool gk20a_fifo_handle_mmu_fault(
1545 nvgpu_err(g, "%s mmu fault on engine %d, " 1545 nvgpu_err(g, "%s mmu fault on engine %d, "
1546 "engine subid %d (%s), client %d (%s), " 1546 "engine subid %d (%s), client %d (%s), "
1547 "addr 0x%08x:0x%08x, type %d (%s), info 0x%08x," 1547 "addr 0x%08x:0x%08x, type %d (%s), info 0x%08x,"
1548 "inst_ptr 0x%llx\n", 1548 "inst_ptr 0x%llx",
1549 fake_fault ? "fake" : "", 1549 fake_fault ? "fake" : "",
1550 engine_id, 1550 engine_id,
1551 f.engine_subid_v, f.engine_subid_desc, 1551 f.engine_subid_v, f.engine_subid_desc,
@@ -2136,7 +2136,7 @@ bool gk20a_fifo_handle_sched_error(struct gk20a *g)
2136 2136
2137 /* could not find the engine - should never happen */ 2137 /* could not find the engine - should never happen */
2138 if (!gk20a_fifo_is_valid_engine_id(g, engine_id)) { 2138 if (!gk20a_fifo_is_valid_engine_id(g, engine_id)) {
2139 nvgpu_err(g, "fifo sched error : 0x%08x, failed to find engine\n", 2139 nvgpu_err(g, "fifo sched error : 0x%08x, failed to find engine",
2140 sched_error); 2140 sched_error);
2141 ret = false; 2141 ret = false;
2142 goto err; 2142 goto err;
@@ -2193,7 +2193,7 @@ static u32 fifo_error_isr(struct gk20a *g, u32 fifo_intr)
2193 if (fifo_intr & fifo_intr_0_pio_error_pending_f()) { 2193 if (fifo_intr & fifo_intr_0_pio_error_pending_f()) {
2194 /* pio mode is unused. this shouldn't happen, ever. */ 2194 /* pio mode is unused. this shouldn't happen, ever. */
2195 /* should we clear it or just leave it pending? */ 2195 /* should we clear it or just leave it pending? */
2196 nvgpu_err(g, "fifo pio error!\n"); 2196 nvgpu_err(g, "fifo pio error!");
2197 BUG_ON(1); 2197 BUG_ON(1);
2198 } 2198 }
2199 2199
@@ -2547,7 +2547,7 @@ void __locked_fifo_preempt_timeout_rc(struct gk20a *g, u32 id,
2547 struct channel_gk20a *ch = NULL; 2547 struct channel_gk20a *ch = NULL;
2548 2548
2549 nvgpu_err(g, 2549 nvgpu_err(g,
2550 "preempt TSG %d timeout\n", id); 2550 "preempt TSG %d timeout", id);
2551 2551
2552 down_read(&tsg->ch_list_lock); 2552 down_read(&tsg->ch_list_lock);
2553 list_for_each_entry(ch, &tsg->ch_list, ch_entry) { 2553 list_for_each_entry(ch, &tsg->ch_list, ch_entry) {
@@ -2563,7 +2563,7 @@ void __locked_fifo_preempt_timeout_rc(struct gk20a *g, u32 id,
2563 struct channel_gk20a *ch = &g->fifo.channel[id]; 2563 struct channel_gk20a *ch = &g->fifo.channel[id];
2564 2564
2565 nvgpu_err(g, 2565 nvgpu_err(g,
2566 "preempt channel %d timeout\n", id); 2566 "preempt channel %d timeout", id);
2567 2567
2568 if (gk20a_channel_get(ch)) { 2568 if (gk20a_channel_get(ch)) {
2569 gk20a_set_error_notifier(ch, 2569 gk20a_set_error_notifier(ch,
@@ -2746,7 +2746,7 @@ int gk20a_fifo_enable_all_engine_activity(struct gk20a *g)
2746 &g->fifo.engine_info[active_engine_id]); 2746 &g->fifo.engine_info[active_engine_id]);
2747 if (err) { 2747 if (err) {
2748 nvgpu_err(g, 2748 nvgpu_err(g,
2749 "failed to enable engine %d activity\n", active_engine_id); 2749 "failed to enable engine %d activity", active_engine_id);
2750 ret = err; 2750 ret = err;
2751 } 2751 }
2752 } 2752 }
@@ -2819,7 +2819,7 @@ clean_up:
2819 gk20a_dbg_fn("failed"); 2819 gk20a_dbg_fn("failed");
2820 if (gk20a_fifo_enable_engine_activity(g, eng_info)) 2820 if (gk20a_fifo_enable_engine_activity(g, eng_info))
2821 nvgpu_err(g, 2821 nvgpu_err(g,
2822 "failed to enable gr engine activity\n"); 2822 "failed to enable gr engine activity");
2823 } else { 2823 } else {
2824 gk20a_dbg_fn("done"); 2824 gk20a_dbg_fn("done");
2825 } 2825 }
@@ -2839,7 +2839,7 @@ int gk20a_fifo_disable_all_engine_activity(struct gk20a *g,
2839 &g->fifo.engine_info[active_engine_id], 2839 &g->fifo.engine_info[active_engine_id],
2840 wait_for_idle); 2840 wait_for_idle);
2841 if (err) { 2841 if (err) {
2842 nvgpu_err(g, "failed to disable engine %d activity\n", 2842 nvgpu_err(g, "failed to disable engine %d activity",
2843 active_engine_id); 2843 active_engine_id);
2844 ret = err; 2844 ret = err;
2845 break; 2845 break;
@@ -2853,7 +2853,7 @@ int gk20a_fifo_disable_all_engine_activity(struct gk20a *g,
2853 &g->fifo.engine_info[active_engine_id]); 2853 &g->fifo.engine_info[active_engine_id]);
2854 if (err) 2854 if (err)
2855 nvgpu_err(g, 2855 nvgpu_err(g,
2856 "failed to re-enable engine %d activity\n", 2856 "failed to re-enable engine %d activity",
2857 active_engine_id); 2857 active_engine_id);
2858 } 2858 }
2859 } 2859 }
@@ -4108,7 +4108,7 @@ int gk20a_fifo_set_timeslice(struct channel_gk20a *ch, u32 timeslice)
4108 struct gk20a *g = ch->g; 4108 struct gk20a *g = ch->g;
4109 4109
4110 if (gk20a_is_channel_marked_as_tsg(ch)) { 4110 if (gk20a_is_channel_marked_as_tsg(ch)) {
4111 nvgpu_err(g, "invalid operation for TSG!\n"); 4111 nvgpu_err(g, "invalid operation for TSG!");
4112 return -EINVAL; 4112 return -EINVAL;
4113 } 4113 }
4114 4114
@@ -4127,7 +4127,7 @@ int gk20a_fifo_set_timeslice(struct channel_gk20a *ch, u32 timeslice)
4127int gk20a_fifo_set_priority(struct channel_gk20a *ch, u32 priority) 4127int gk20a_fifo_set_priority(struct channel_gk20a *ch, u32 priority)
4128{ 4128{
4129 if (gk20a_is_channel_marked_as_tsg(ch)) { 4129 if (gk20a_is_channel_marked_as_tsg(ch)) {
4130 nvgpu_err(ch->g, "invalid operation for TSG!\n"); 4130 nvgpu_err(ch->g, "invalid operation for TSG!");
4131 return -EINVAL; 4131 return -EINVAL;
4132 } 4132 }
4133 4133
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.c b/drivers/gpu/nvgpu/gk20a/gk20a.c
index 3d730022..f4e7fe45 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gk20a.c
@@ -346,7 +346,7 @@ int gk20a_finalize_poweron(struct gk20a *g)
346 speed = 1 << (fls(speed) - 1); 346 speed = 1 << (fls(speed) - 1);
347 err = g->ops.xve.set_speed(g, speed); 347 err = g->ops.xve.set_speed(g, speed);
348 if (err) { 348 if (err) {
349 nvgpu_err(g, "Failed to set PCIe bus speed!\n"); 349 nvgpu_err(g, "Failed to set PCIe bus speed!");
350 goto done; 350 goto done;
351 } 351 }
352 } 352 }
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
index 2b1013a0..f9c76ae5 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
@@ -784,13 +784,13 @@ static int gr_gk20a_ctx_zcull_setup(struct gk20a *g, struct channel_gk20a *c)
784 784
785 ret = gk20a_disable_channel_tsg(g, c); 785 ret = gk20a_disable_channel_tsg(g, c);
786 if (ret) { 786 if (ret) {
787 nvgpu_err(g, "failed to disable channel/TSG\n"); 787 nvgpu_err(g, "failed to disable channel/TSG");
788 goto clean_up; 788 goto clean_up;
789 } 789 }
790 ret = gk20a_fifo_preempt(g, c); 790 ret = gk20a_fifo_preempt(g, c);
791 if (ret) { 791 if (ret) {
792 gk20a_enable_channel_tsg(g, c); 792 gk20a_enable_channel_tsg(g, c);
793 nvgpu_err(g, "failed to preempt channel/TSG\n"); 793 nvgpu_err(g, "failed to preempt channel/TSG");
794 goto clean_up; 794 goto clean_up;
795 } 795 }
796 796
@@ -1857,13 +1857,13 @@ int gr_gk20a_update_smpc_ctxsw_mode(struct gk20a *g,
1857 1857
1858 ret = gk20a_disable_channel_tsg(g, c); 1858 ret = gk20a_disable_channel_tsg(g, c);
1859 if (ret) { 1859 if (ret) {
1860 nvgpu_err(g, "failed to disable channel/TSG\n"); 1860 nvgpu_err(g, "failed to disable channel/TSG");
1861 goto out; 1861 goto out;
1862 } 1862 }
1863 ret = gk20a_fifo_preempt(g, c); 1863 ret = gk20a_fifo_preempt(g, c);
1864 if (ret) { 1864 if (ret) {
1865 gk20a_enable_channel_tsg(g, c); 1865 gk20a_enable_channel_tsg(g, c);
1866 nvgpu_err(g, "failed to preempt channel/TSG\n"); 1866 nvgpu_err(g, "failed to preempt channel/TSG");
1867 goto out; 1867 goto out;
1868 } 1868 }
1869 1869
@@ -1925,14 +1925,14 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g,
1925 1925
1926 ret = gk20a_disable_channel_tsg(g, c); 1926 ret = gk20a_disable_channel_tsg(g, c);
1927 if (ret) { 1927 if (ret) {
1928 nvgpu_err(g, "failed to disable channel/TSG\n"); 1928 nvgpu_err(g, "failed to disable channel/TSG");
1929 return ret; 1929 return ret;
1930 } 1930 }
1931 1931
1932 ret = gk20a_fifo_preempt(g, c); 1932 ret = gk20a_fifo_preempt(g, c);
1933 if (ret) { 1933 if (ret) {
1934 gk20a_enable_channel_tsg(g, c); 1934 gk20a_enable_channel_tsg(g, c);
1935 nvgpu_err(g, "failed to preempt channel/TSG\n"); 1935 nvgpu_err(g, "failed to preempt channel/TSG");
1936 return ret; 1936 return ret;
1937 } 1937 }
1938 1938
@@ -2213,7 +2213,7 @@ static int gr_gk20a_init_ctxsw_ucode_vaspace(struct gk20a *g)
2213 false, 2213 false,
2214 ucode_info->surface_desc.aperture); 2214 ucode_info->surface_desc.aperture);
2215 if (!ucode_info->surface_desc.gpu_va) { 2215 if (!ucode_info->surface_desc.gpu_va) {
2216 nvgpu_err(g, "failed to update gmmu ptes\n"); 2216 nvgpu_err(g, "failed to update gmmu ptes");
2217 return -ENOMEM; 2217 return -ENOMEM;
2218 } 2218 }
2219 2219
@@ -2977,7 +2977,7 @@ static int gr_gk20a_alloc_tsg_gr_ctx(struct gk20a *g,
2977 int err; 2977 int err;
2978 2978
2979 if (!tsg->vm) { 2979 if (!tsg->vm) {
2980 nvgpu_err(tsg->g, "No address space bound\n"); 2980 nvgpu_err(tsg->g, "No address space bound");
2981 return -ENOMEM; 2981 return -ENOMEM;
2982 } 2982 }
2983 2983
@@ -3017,7 +3017,7 @@ void gr_gk20a_free_gr_ctx(struct gk20a *g,
3017void gr_gk20a_free_tsg_gr_ctx(struct tsg_gk20a *tsg) 3017void gr_gk20a_free_tsg_gr_ctx(struct tsg_gk20a *tsg)
3018{ 3018{
3019 if (!tsg->vm) { 3019 if (!tsg->vm) {
3020 nvgpu_err(tsg->g, "No address space bound\n"); 3020 nvgpu_err(tsg->g, "No address space bound");
3021 return; 3021 return;
3022 } 3022 }
3023 tsg->g->ops.gr.free_gr_ctx(tsg->g, tsg->vm, tsg->tsg_gr_ctx); 3023 tsg->g->ops.gr.free_gr_ctx(tsg->g, tsg->vm, tsg->tsg_gr_ctx);
@@ -3942,7 +3942,7 @@ static void gr_gk20a_detect_sm_arch(struct gk20a *g)
3942 if (raw_version == gr_gpc0_tpc0_sm_arch_spa_version_smkepler_lp_v()) 3942 if (raw_version == gr_gpc0_tpc0_sm_arch_spa_version_smkepler_lp_v())
3943 version = 0x320; /* SM 3.2 */ 3943 version = 0x320; /* SM 3.2 */
3944 else 3944 else
3945 nvgpu_err(g, "Unknown SM version 0x%x\n", 3945 nvgpu_err(g, "Unknown SM version 0x%x",
3946 raw_version); 3946 raw_version);
3947 3947
3948 /* on Kepler, SM version == SPA version */ 3948 /* on Kepler, SM version == SPA version */
@@ -4056,7 +4056,7 @@ clean_up:
4056 ret = gk20a_fifo_enable_engine_activity(g, gr_info); 4056 ret = gk20a_fifo_enable_engine_activity(g, gr_info);
4057 if (ret) { 4057 if (ret) {
4058 nvgpu_err(g, 4058 nvgpu_err(g,
4059 "failed to enable gr engine activity\n"); 4059 "failed to enable gr engine activity");
4060 } 4060 }
4061} 4061}
4062 4062
@@ -4181,7 +4181,7 @@ int gr_gk20a_query_zbc(struct gk20a *g, struct gr_gk20a *gr,
4181 case GK20A_ZBC_TYPE_COLOR: 4181 case GK20A_ZBC_TYPE_COLOR:
4182 if (index >= GK20A_ZBC_TABLE_SIZE) { 4182 if (index >= GK20A_ZBC_TABLE_SIZE) {
4183 nvgpu_err(g, 4183 nvgpu_err(g,
4184 "invalid zbc color table index\n"); 4184 "invalid zbc color table index");
4185 return -EINVAL; 4185 return -EINVAL;
4186 } 4186 }
4187 for (i = 0; i < GK20A_ZBC_COLOR_VALUE_SIZE; i++) { 4187 for (i = 0; i < GK20A_ZBC_COLOR_VALUE_SIZE; i++) {
@@ -4196,7 +4196,7 @@ int gr_gk20a_query_zbc(struct gk20a *g, struct gr_gk20a *gr,
4196 case GK20A_ZBC_TYPE_DEPTH: 4196 case GK20A_ZBC_TYPE_DEPTH:
4197 if (index >= GK20A_ZBC_TABLE_SIZE) { 4197 if (index >= GK20A_ZBC_TABLE_SIZE) {
4198 nvgpu_err(g, 4198 nvgpu_err(g,
4199 "invalid zbc depth table index\n"); 4199 "invalid zbc depth table index");
4200 return -EINVAL; 4200 return -EINVAL;
4201 } 4201 }
4202 query_params->depth = gr->zbc_dep_tbl[index].depth; 4202 query_params->depth = gr->zbc_dep_tbl[index].depth;
@@ -4209,13 +4209,13 @@ int gr_gk20a_query_zbc(struct gk20a *g, struct gr_gk20a *gr,
4209 query_params); 4209 query_params);
4210 } else { 4210 } else {
4211 nvgpu_err(g, 4211 nvgpu_err(g,
4212 "invalid zbc table type\n"); 4212 "invalid zbc table type");
4213 return -EINVAL; 4213 return -EINVAL;
4214 } 4214 }
4215 break; 4215 break;
4216 default: 4216 default:
4217 nvgpu_err(g, 4217 nvgpu_err(g,
4218 "invalid zbc table type\n"); 4218 "invalid zbc table type");
4219 return -EINVAL; 4219 return -EINVAL;
4220 } 4220 }
4221 4221
@@ -4305,7 +4305,7 @@ int gr_gk20a_load_zbc_default_table(struct gk20a *g, struct gr_gk20a *gr)
4305 gr->max_default_color_index = 3; 4305 gr->max_default_color_index = 3;
4306 else { 4306 else {
4307 nvgpu_err(g, 4307 nvgpu_err(g,
4308 "fail to load default zbc color table\n"); 4308 "fail to load default zbc color table");
4309 return err; 4309 return err;
4310 } 4310 }
4311 4311
@@ -4324,7 +4324,7 @@ int gr_gk20a_load_zbc_default_table(struct gk20a *g, struct gr_gk20a *gr)
4324 gr->max_default_depth_index = 2; 4324 gr->max_default_depth_index = 2;
4325 else { 4325 else {
4326 nvgpu_err(g, 4326 nvgpu_err(g,
4327 "fail to load default zbc depth table\n"); 4327 "fail to load default zbc depth table");
4328 return err; 4328 return err;
4329 } 4329 }
4330 4330
@@ -5212,7 +5212,7 @@ static int gk20a_init_gr_bind_fecs_elpg(struct gk20a *g)
5212 if (!pmu->pg_buf.cpu_va) { 5212 if (!pmu->pg_buf.cpu_va) {
5213 err = nvgpu_dma_alloc_map_sys(vm, size, &pmu->pg_buf); 5213 err = nvgpu_dma_alloc_map_sys(vm, size, &pmu->pg_buf);
5214 if (err) { 5214 if (err) {
5215 nvgpu_err(g, "failed to allocate memory\n"); 5215 nvgpu_err(g, "failed to allocate memory");
5216 return -ENOMEM; 5216 return -ENOMEM;
5217 } 5217 }
5218 } 5218 }
@@ -5589,7 +5589,7 @@ static int gk20a_gr_handle_semaphore_timeout_pending(struct gk20a *g,
5589 gk20a_gr_set_error_notifier(g, isr_data, 5589 gk20a_gr_set_error_notifier(g, isr_data,
5590 NVGPU_CHANNEL_GR_SEMAPHORE_TIMEOUT); 5590 NVGPU_CHANNEL_GR_SEMAPHORE_TIMEOUT);
5591 nvgpu_err(g, 5591 nvgpu_err(g,
5592 "gr semaphore timeout\n"); 5592 "gr semaphore timeout");
5593 return -EINVAL; 5593 return -EINVAL;
5594} 5594}
5595 5595
@@ -5601,7 +5601,7 @@ static int gk20a_gr_intr_illegal_notify_pending(struct gk20a *g,
5601 NVGPU_CHANNEL_GR_ILLEGAL_NOTIFY); 5601 NVGPU_CHANNEL_GR_ILLEGAL_NOTIFY);
5602 /* This is an unrecoverable error, reset is needed */ 5602 /* This is an unrecoverable error, reset is needed */
5603 nvgpu_err(g, 5603 nvgpu_err(g,
5604 "gr semaphore timeout\n"); 5604 "gr semaphore timeout");
5605 return -EINVAL; 5605 return -EINVAL;
5606} 5606}
5607 5607
@@ -5615,7 +5615,7 @@ static int gk20a_gr_handle_illegal_method(struct gk20a *g,
5615 gk20a_gr_set_error_notifier(g, isr_data, 5615 gk20a_gr_set_error_notifier(g, isr_data,
5616 NVGPU_CHANNEL_GR_ILLEGAL_NOTIFY); 5616 NVGPU_CHANNEL_GR_ILLEGAL_NOTIFY);
5617 nvgpu_err(g, "invalid method class 0x%08x" 5617 nvgpu_err(g, "invalid method class 0x%08x"
5618 ", offset 0x%08x address 0x%08x\n", 5618 ", offset 0x%08x address 0x%08x",
5619 isr_data->class_num, isr_data->offset, isr_data->addr); 5619 isr_data->class_num, isr_data->offset, isr_data->addr);
5620 } 5620 }
5621 return ret; 5621 return ret;
@@ -5675,7 +5675,7 @@ static int gk20a_gr_handle_class_error(struct gk20a *g,
5675 NVGPU_CHANNEL_GR_ERROR_SW_NOTIFY); 5675 NVGPU_CHANNEL_GR_ERROR_SW_NOTIFY);
5676 nvgpu_err(g, 5676 nvgpu_err(g,
5677 "class error 0x%08x, offset 0x%08x," 5677 "class error 0x%08x, offset 0x%08x,"
5678 " unhandled intr 0x%08x for channel %u\n", 5678 " unhandled intr 0x%08x for channel %u",
5679 isr_data->class_num, isr_data->offset, 5679 isr_data->class_num, isr_data->offset,
5680 gr_class_error, isr_data->chid); 5680 gr_class_error, isr_data->chid);
5681 5681
@@ -5690,7 +5690,7 @@ static int gk20a_gr_handle_firmware_method(struct gk20a *g,
5690 gk20a_gr_set_error_notifier(g, isr_data, 5690 gk20a_gr_set_error_notifier(g, isr_data,
5691 NVGPU_CHANNEL_GR_ERROR_SW_NOTIFY); 5691 NVGPU_CHANNEL_GR_ERROR_SW_NOTIFY);
5692 nvgpu_err(g, 5692 nvgpu_err(g,
5693 "firmware method 0x%08x, offset 0x%08x for channel %u\n", 5693 "firmware method 0x%08x, offset 0x%08x for channel %u",
5694 isr_data->class_num, isr_data->offset, 5694 isr_data->class_num, isr_data->offset,
5695 isr_data->chid); 5695 isr_data->chid);
5696 return -EINVAL; 5696 return -EINVAL;
@@ -5768,7 +5768,7 @@ static int gk20a_gr_handle_notify_pending(struct gk20a *g,
5768 if (offset + sizeof(struct share_buffer_head) > buffer_size || 5768 if (offset + sizeof(struct share_buffer_head) > buffer_size ||
5769 offset + sizeof(struct share_buffer_head) < offset) { 5769 offset + sizeof(struct share_buffer_head) < offset) {
5770 nvgpu_err(g, 5770 nvgpu_err(g,
5771 "cyclestats buffer overrun at offset 0x%x\n", 5771 "cyclestats buffer overrun at offset 0x%x",
5772 offset); 5772 offset);
5773 break; 5773 break;
5774 } 5774 }
@@ -5786,7 +5786,7 @@ static int gk20a_gr_handle_notify_pending(struct gk20a *g,
5786 offset + sh_hdr->size > buffer_size || 5786 offset + sh_hdr->size > buffer_size ||
5787 offset + sh_hdr->size < offset) { 5787 offset + sh_hdr->size < offset) {
5788 nvgpu_err(g, 5788 nvgpu_err(g,
5789 "bad cyclestate buffer header size at offset 0x%x\n", 5789 "bad cyclestate buffer header size at offset 0x%x",
5790 offset); 5790 offset);
5791 sh_hdr->failed = true; 5791 sh_hdr->failed = true;
5792 break; 5792 break;
@@ -5810,7 +5810,7 @@ static int gk20a_gr_handle_notify_pending(struct gk20a *g,
5810 5810
5811 if (!valid) { 5811 if (!valid) {
5812 nvgpu_err(g, 5812 nvgpu_err(g,
5813 "invalid cycletstats op offset: 0x%x\n", 5813 "invalid cycletstats op offset: 0x%x",
5814 op_elem->offset_bar0); 5814 op_elem->offset_bar0);
5815 5815
5816 sh_hdr->failed = exit = true; 5816 sh_hdr->failed = exit = true;
@@ -6065,7 +6065,7 @@ static int gk20a_gr_update_sm_error_state(struct gk20a *g,
6065 6065
6066 err = gr_gk20a_disable_ctxsw(g); 6066 err = gr_gk20a_disable_ctxsw(g);
6067 if (err) { 6067 if (err) {
6068 nvgpu_err(g, "unable to stop gr ctxsw\n"); 6068 nvgpu_err(g, "unable to stop gr ctxsw");
6069 goto fail; 6069 goto fail;
6070 } 6070 }
6071 6071
@@ -6125,7 +6125,7 @@ static int gk20a_gr_clear_sm_error_state(struct gk20a *g,
6125 6125
6126 err = gr_gk20a_disable_ctxsw(g); 6126 err = gr_gk20a_disable_ctxsw(g);
6127 if (err) { 6127 if (err) {
6128 nvgpu_err(g, "unable to stop gr ctxsw\n"); 6128 nvgpu_err(g, "unable to stop gr ctxsw");
6129 goto fail; 6129 goto fail;
6130 } 6130 }
6131 6131
@@ -6179,7 +6179,7 @@ int gr_gk20a_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc,
6179 warp_esr = g->ops.gr.mask_hww_warp_esr(warp_esr); 6179 warp_esr = g->ops.gr.mask_hww_warp_esr(warp_esr);
6180 6180
6181 if (!sm_debugger_attached) { 6181 if (!sm_debugger_attached) {
6182 nvgpu_err(g, "sm hww global %08x warp %08x\n", 6182 nvgpu_err(g, "sm hww global %08x warp %08x",
6183 global_esr, warp_esr); 6183 global_esr, warp_esr);
6184 return -EFAULT; 6184 return -EFAULT;
6185 } 6185 }
@@ -6199,7 +6199,7 @@ int gr_gk20a_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc,
6199 &early_exit, 6199 &early_exit,
6200 &ignore_debugger); 6200 &ignore_debugger);
6201 if (ret) { 6201 if (ret) {
6202 nvgpu_err(g, "could not pre-process sm error!\n"); 6202 nvgpu_err(g, "could not pre-process sm error!");
6203 return ret; 6203 return ret;
6204 } 6204 }
6205 } 6205 }
@@ -6241,7 +6241,7 @@ int gr_gk20a_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc,
6241 if (do_warp_sync) { 6241 if (do_warp_sync) {
6242 ret = gk20a_gr_lock_down_sm(g, gpc, tpc, global_mask, true); 6242 ret = gk20a_gr_lock_down_sm(g, gpc, tpc, global_mask, true);
6243 if (ret) { 6243 if (ret) {
6244 nvgpu_err(g, "sm did not lock down!\n"); 6244 nvgpu_err(g, "sm did not lock down!");
6245 return ret; 6245 return ret;
6246 } 6246 }
6247 } 6247 }
@@ -7357,7 +7357,7 @@ static int gr_gk20a_find_priv_offset_in_ext_buffer(struct gk20a *g,
7357 num_gpcs = *(u32 *)(context + ctxsw_prog_main_image_num_gpcs_o()); 7357 num_gpcs = *(u32 *)(context + ctxsw_prog_main_image_num_gpcs_o());
7358 if (gpc_num >= num_gpcs) { 7358 if (gpc_num >= num_gpcs) {
7359 nvgpu_err(g, 7359 nvgpu_err(g,
7360 "GPC 0x%08x is greater than total count 0x%08x!\n", 7360 "GPC 0x%08x is greater than total count 0x%08x!",
7361 gpc_num, num_gpcs); 7361 gpc_num, num_gpcs);
7362 return -EINVAL; 7362 return -EINVAL;
7363 } 7363 }
@@ -7378,7 +7378,7 @@ static int gr_gk20a_find_priv_offset_in_ext_buffer(struct gk20a *g,
7378 context += ctxsw_prog_ucode_header_size_in_bytes(); 7378 context += ctxsw_prog_ucode_header_size_in_bytes();
7379 if (!check_local_header_magic(context)) { 7379 if (!check_local_header_magic(context)) {
7380 nvgpu_err(g, 7380 nvgpu_err(g,
7381 "Invalid local header: magic value\n"); 7381 "Invalid local header: magic value");
7382 return -EINVAL; 7382 return -EINVAL;
7383 } 7383 }
7384 7384
@@ -7409,7 +7409,7 @@ static int gr_gk20a_find_priv_offset_in_ext_buffer(struct gk20a *g,
7409 7409
7410 if (chk_addr != addr) { 7410 if (chk_addr != addr) {
7411 nvgpu_err(g, 7411 nvgpu_err(g,
7412 "Oops addr miss-match! : 0x%08x != 0x%08x\n", 7412 "Oops addr miss-match! : 0x%08x != 0x%08x",
7413 addr, chk_addr); 7413 addr, chk_addr);
7414 return -EINVAL; 7414 return -EINVAL;
7415 } 7415 }
@@ -7440,7 +7440,7 @@ static int gr_gk20a_find_priv_offset_in_ext_buffer(struct gk20a *g,
7440 7440
7441 if (chk_addr != addr) { 7441 if (chk_addr != addr) {
7442 nvgpu_err(g, 7442 nvgpu_err(g,
7443 "Oops addr miss-match! : 0x%08x != 0x%08x\n", 7443 "Oops addr miss-match! : 0x%08x != 0x%08x",
7444 addr, chk_addr); 7444 addr, chk_addr);
7445 return -EINVAL; 7445 return -EINVAL;
7446 7446
@@ -7509,7 +7509,7 @@ static int gr_gk20a_find_priv_offset_in_ext_buffer(struct gk20a *g,
7509 * extended buffer? */ 7509 * extended buffer? */
7510 if (offset_to_segment > offset_to_segment_end) { 7510 if (offset_to_segment > offset_to_segment_end) {
7511 nvgpu_err(g, 7511 nvgpu_err(g,
7512 "Overflow ctxsw buffer! 0x%08x > 0x%08x\n", 7512 "Overflow ctxsw buffer! 0x%08x > 0x%08x",
7513 offset_to_segment, offset_to_segment_end); 7513 offset_to_segment, offset_to_segment_end);
7514 return -EINVAL; 7514 return -EINVAL;
7515 } 7515 }
@@ -7710,7 +7710,7 @@ static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g,
7710 context += ctxsw_prog_ucode_header_size_in_bytes(); 7710 context += ctxsw_prog_ucode_header_size_in_bytes();
7711 if (!check_local_header_magic(context)) { 7711 if (!check_local_header_magic(context)) {
7712 nvgpu_err(g, 7712 nvgpu_err(g,
7713 "Invalid FECS local header: magic value\n"); 7713 "Invalid FECS local header: magic value");
7714 return -EINVAL; 7714 return -EINVAL;
7715 } 7715 }
7716 data32 = *(u32 *)(context + ctxsw_prog_local_priv_register_ctl_o()); 7716 data32 = *(u32 *)(context + ctxsw_prog_local_priv_register_ctl_o());
@@ -7745,7 +7745,7 @@ static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g,
7745 7745
7746 if ((gpc_num + 1) > num_gpcs) { 7746 if ((gpc_num + 1) > num_gpcs) {
7747 nvgpu_err(g, 7747 nvgpu_err(g,
7748 "GPC %d not in this context buffer.\n", 7748 "GPC %d not in this context buffer.",
7749 gpc_num); 7749 gpc_num);
7750 return -EINVAL; 7750 return -EINVAL;
7751 } 7751 }
@@ -7755,7 +7755,7 @@ static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g,
7755 context += ctxsw_prog_ucode_header_size_in_bytes(); 7755 context += ctxsw_prog_ucode_header_size_in_bytes();
7756 if (!check_local_header_magic(context)) { 7756 if (!check_local_header_magic(context)) {
7757 nvgpu_err(g, 7757 nvgpu_err(g,
7758 "Invalid GPCCS local header: magic value\n"); 7758 "Invalid GPCCS local header: magic value");
7759 return -EINVAL; 7759 return -EINVAL;
7760 7760
7761 } 7761 }
@@ -7772,7 +7772,7 @@ static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g,
7772 7772
7773 if ((i == gpc_num) && ((tpc_num + 1) > num_tpcs)) { 7773 if ((i == gpc_num) && ((tpc_num + 1) > num_tpcs)) {
7774 nvgpu_err(g, 7774 nvgpu_err(g,
7775 "GPC %d TPC %d not in this context buffer.\n", 7775 "GPC %d TPC %d not in this context buffer.",
7776 gpc_num, tpc_num); 7776 gpc_num, tpc_num);
7777 return -EINVAL; 7777 return -EINVAL;
7778 } 7778 }
@@ -8547,7 +8547,7 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
8547 8547
8548 tmp_err = gr_gk20a_enable_ctxsw(g); 8548 tmp_err = gr_gk20a_enable_ctxsw(g);
8549 if (tmp_err) { 8549 if (tmp_err) {
8550 nvgpu_err(g, "unable to restart ctxsw!\n"); 8550 nvgpu_err(g, "unable to restart ctxsw!");
8551 err = tmp_err; 8551 err = tmp_err;
8552 } 8552 }
8553 8553
@@ -8718,7 +8718,7 @@ int gk20a_gr_wait_for_sm_lock_down(struct gk20a *g, u32 gpc, u32 tpc,
8718 nvgpu_err(g, 8718 nvgpu_err(g,
8719 "GPC%d TPC%d: timed out while trying to lock down SM", gpc, tpc); 8719 "GPC%d TPC%d: timed out while trying to lock down SM", gpc, tpc);
8720 nvgpu_err(g, 8720 nvgpu_err(g,
8721 "STATUS0(0x%x)=0x%x CONTROL0=0x%x VALID_MASK=0x%llx PAUSE_MASK=0x%llx TRAP_MASK=0x%llx\n", 8721 "STATUS0(0x%x)=0x%x CONTROL0=0x%x VALID_MASK=0x%llx PAUSE_MASK=0x%llx TRAP_MASK=0x%llx",
8722 gr_gpc0_tpc0_sm_dbgr_status0_r() + offset, dbgr_status0, dbgr_control0, 8722 gr_gpc0_tpc0_sm_dbgr_status0_r() + offset, dbgr_status0, dbgr_control0,
8723 warps_valid, warps_paused, warps_trapped); 8723 warps_valid, warps_paused, warps_trapped);
8724 8724
@@ -8739,7 +8739,7 @@ void gk20a_suspend_single_sm(struct gk20a *g,
8739 /* if an SM debugger isn't attached, skip suspend */ 8739 /* if an SM debugger isn't attached, skip suspend */
8740 if (!gk20a_gr_sm_debugger_attached(g)) { 8740 if (!gk20a_gr_sm_debugger_attached(g)) {
8741 nvgpu_err(g, 8741 nvgpu_err(g,
8742 "SM debugger not attached, skipping suspend!\n"); 8742 "SM debugger not attached, skipping suspend!");
8743 return; 8743 return;
8744 } 8744 }
8745 8745
@@ -8754,7 +8754,7 @@ void gk20a_suspend_single_sm(struct gk20a *g,
8754 global_esr_mask, check_errors); 8754 global_esr_mask, check_errors);
8755 if (err) { 8755 if (err) {
8756 nvgpu_err(g, 8756 nvgpu_err(g,
8757 "SuspendSm failed\n"); 8757 "SuspendSm failed");
8758 return; 8758 return;
8759 } 8759 }
8760} 8760}
@@ -8770,7 +8770,7 @@ void gk20a_suspend_all_sms(struct gk20a *g,
8770 /* if an SM debugger isn't attached, skip suspend */ 8770 /* if an SM debugger isn't attached, skip suspend */
8771 if (!gk20a_gr_sm_debugger_attached(g)) { 8771 if (!gk20a_gr_sm_debugger_attached(g)) {
8772 nvgpu_err(g, 8772 nvgpu_err(g,
8773 "SM debugger not attached, skipping suspend!\n"); 8773 "SM debugger not attached, skipping suspend!");
8774 return; 8774 return;
8775 } 8775 }
8776 8776
@@ -8791,7 +8791,7 @@ void gk20a_suspend_all_sms(struct gk20a *g,
8791 global_esr_mask, check_errors); 8791 global_esr_mask, check_errors);
8792 if (err) { 8792 if (err) {
8793 nvgpu_err(g, 8793 nvgpu_err(g,
8794 "SuspendAllSms failed\n"); 8794 "SuspendAllSms failed");
8795 return; 8795 return;
8796 } 8796 }
8797 } 8797 }
@@ -9099,7 +9099,7 @@ int gr_gk20a_set_sm_debug_mode(struct gk20a *g,
9099 9099
9100 err = gr_gk20a_exec_ctx_ops(ch, ops, i, i, 0); 9100 err = gr_gk20a_exec_ctx_ops(ch, ops, i, i, 0);
9101 if (err) 9101 if (err)
9102 nvgpu_err(g, "Failed to access register\n"); 9102 nvgpu_err(g, "Failed to access register");
9103 nvgpu_kfree(g, ops); 9103 nvgpu_kfree(g, ops);
9104 return err; 9104 return err;
9105} 9105}
@@ -9237,7 +9237,7 @@ int gr_gk20a_suspend_contexts(struct gk20a *g,
9237 9237
9238 err = gr_gk20a_enable_ctxsw(g); 9238 err = gr_gk20a_enable_ctxsw(g);
9239 if (err) 9239 if (err)
9240 nvgpu_err(g, "unable to restart ctxsw!\n"); 9240 nvgpu_err(g, "unable to restart ctxsw!");
9241 9241
9242 *ctx_resident_ch_fd = local_ctx_resident_ch_fd; 9242 *ctx_resident_ch_fd = local_ctx_resident_ch_fd;
9243 9243
@@ -9275,7 +9275,7 @@ int gr_gk20a_resume_contexts(struct gk20a *g,
9275 9275
9276 err = gr_gk20a_enable_ctxsw(g); 9276 err = gr_gk20a_enable_ctxsw(g);
9277 if (err) 9277 if (err)
9278 nvgpu_err(g, "unable to restart ctxsw!\n"); 9278 nvgpu_err(g, "unable to restart ctxsw!");
9279 9279
9280 *ctx_resident_ch_fd = local_ctx_resident_ch_fd; 9280 *ctx_resident_ch_fd = local_ctx_resident_ch_fd;
9281 9281
diff --git a/drivers/gpu/nvgpu/gk20a/ltc_gk20a.c b/drivers/gpu/nvgpu/gk20a/ltc_gk20a.c
index 70b688f6..23576ce0 100644
--- a/drivers/gpu/nvgpu/gk20a/ltc_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/ltc_gk20a.c
@@ -185,7 +185,7 @@ static void gk20a_ltc_isr(struct gk20a *g)
185 u32 intr; 185 u32 intr;
186 186
187 intr = gk20a_readl(g, ltc_ltc0_ltss_intr_r()); 187 intr = gk20a_readl(g, ltc_ltc0_ltss_intr_r());
188 nvgpu_err(g, "ltc: %08x\n", intr); 188 nvgpu_err(g, "ltc: %08x", intr);
189 gk20a_writel(g, ltc_ltc0_ltss_intr_r(), intr); 189 gk20a_writel(g, ltc_ltc0_ltss_intr_r(), intr);
190} 190}
191 191
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
index 183a540a..72f9eeab 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
@@ -1171,7 +1171,7 @@ fail_validate:
1171 if (allocated) 1171 if (allocated)
1172 __nvgpu_vm_free_va(vm, map_offset, pgsz_idx); 1172 __nvgpu_vm_free_va(vm, map_offset, pgsz_idx);
1173fail_alloc: 1173fail_alloc:
1174 nvgpu_err(g, "%s: failed with err=%d\n", __func__, err); 1174 nvgpu_err(g, "%s: failed with err=%d", __func__, err);
1175 return 0; 1175 return 0;
1176} 1176}
1177 1177
@@ -2670,7 +2670,7 @@ int gk20a_alloc_inst_block(struct gk20a *g, struct nvgpu_mem *inst_block)
2670 2670
2671 err = nvgpu_dma_alloc(g, ram_in_alloc_size_v(), inst_block); 2671 err = nvgpu_dma_alloc(g, ram_in_alloc_size_v(), inst_block);
2672 if (err) { 2672 if (err) {
2673 nvgpu_err(g, "%s: memory allocation failed\n", __func__); 2673 nvgpu_err(g, "%s: memory allocation failed", __func__);
2674 return err; 2674 return err;
2675 } 2675 }
2676 2676
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
index 8b28a71a..a9e03943 100644
--- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
@@ -2193,7 +2193,7 @@ int gk20a_init_pmu(struct pmu_gk20a *pmu)
2193 get_pmu_sequence_out_alloc_ptr_v0; 2193 get_pmu_sequence_out_alloc_ptr_v0;
2194 break; 2194 break;
2195 default: 2195 default:
2196 nvgpu_err(g, "PMU code version not supported version: %d\n", 2196 nvgpu_err(g, "PMU code version not supported version: %d",
2197 pmu->desc->app_version); 2197 pmu->desc->app_version);
2198 err = -EINVAL; 2198 err = -EINVAL;
2199 goto fail_pmu_seq; 2199 goto fail_pmu_seq;
@@ -3227,7 +3227,7 @@ static int gk20a_init_pmu_setup_sw(struct gk20a *g)
3227 err = nvgpu_dma_alloc_map_sys(vm, GK20A_PMU_SEQ_BUF_SIZE, 3227 err = nvgpu_dma_alloc_map_sys(vm, GK20A_PMU_SEQ_BUF_SIZE,
3228 &pmu->seq_buf); 3228 &pmu->seq_buf);
3229 if (err) { 3229 if (err) {
3230 nvgpu_err(g, "failed to allocate memory\n"); 3230 nvgpu_err(g, "failed to allocate memory");
3231 goto err_free_seq; 3231 goto err_free_seq;
3232 } 3232 }
3233 3233
@@ -3244,7 +3244,7 @@ static int gk20a_init_pmu_setup_sw(struct gk20a *g)
3244 err = nvgpu_dma_alloc_map(vm, GK20A_PMU_TRACE_BUFSIZE, 3244 err = nvgpu_dma_alloc_map(vm, GK20A_PMU_TRACE_BUFSIZE,
3245 &pmu->trace_buf); 3245 &pmu->trace_buf);
3246 if (err) { 3246 if (err) {
3247 nvgpu_err(g, "failed to allocate pmu trace buffer\n"); 3247 nvgpu_err(g, "failed to allocate pmu trace buffer");
3248 goto err_free_seq_buf; 3248 goto err_free_seq_buf;
3249 } 3249 }
3250 3250
@@ -4542,7 +4542,7 @@ void pmu_dump_falcon_stats(struct pmu_gk20a *pmu)
4542 nvgpu_err(g, "PMU_FALCON_REG_SP : 0x%x", 4542 nvgpu_err(g, "PMU_FALCON_REG_SP : 0x%x",
4543 gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r())); 4543 gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r()));
4544 } 4544 }
4545 nvgpu_err(g, "elpg stat: %d\n", 4545 nvgpu_err(g, "elpg stat: %d",
4546 pmu->elpg_stat); 4546 pmu->elpg_stat);
4547 4547
4548 /* PMU may crash due to FECS crash. Dump FECS status */ 4548 /* PMU may crash due to FECS crash. Dump FECS status */
@@ -4671,7 +4671,7 @@ static bool pmu_validate_cmd(struct pmu_gk20a *pmu, struct pmu_cmd *cmd,
4671 return true; 4671 return true;
4672 4672
4673invalid_cmd: 4673invalid_cmd:
4674 nvgpu_err(g, "invalid pmu cmd :\n" 4674 nvgpu_err(g, "invalid pmu cmd :"
4675 "queue_id=%d,\n" 4675 "queue_id=%d,\n"
4676 "cmd_size=%d, cmd_unit_id=%d, msg=%p, msg_size=%d,\n" 4676 "cmd_size=%d, cmd_unit_id=%d, msg=%p, msg_size=%d,\n"
4677 "payload in=%p, in_size=%d, in_offset=%d,\n" 4677 "payload in=%p, in_size=%d, in_offset=%d,\n"
@@ -4756,7 +4756,7 @@ int gk20a_pmu_sysmem_surface_alloc(struct gk20a *g, struct nvgpu_mem *mem,
4756 4756
4757 err = nvgpu_dma_alloc_map_sys(vm, size, mem); 4757 err = nvgpu_dma_alloc_map_sys(vm, size, mem);
4758 if (err) { 4758 if (err) {
4759 nvgpu_err(g, "failed to allocate memory\n"); 4759 nvgpu_err(g, "failed to allocate memory");
4760 return -ENOMEM; 4760 return -ENOMEM;
4761 } 4761 }
4762 4762
diff --git a/drivers/gpu/nvgpu/gk20a/regops_gk20a.c b/drivers/gpu/nvgpu/gk20a/regops_gk20a.c
index c4b357b1..9919fc3d 100644
--- a/drivers/gpu/nvgpu/gk20a/regops_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/regops_gk20a.c
@@ -636,7 +636,7 @@ static int validate_reg_op_offset(struct dbg_session_gk20a *dbg_s,
636 636
637 /* support only 24-bit 4-byte aligned offsets */ 637 /* support only 24-bit 4-byte aligned offsets */
638 if (offset & 0xFF000003) { 638 if (offset & 0xFF000003) {
639 nvgpu_err(dbg_s->g, "invalid regop offset: 0x%x\n", offset); 639 nvgpu_err(dbg_s->g, "invalid regop offset: 0x%x", offset);
640 op->status |= REGOP(STATUS_INVALID_OFFSET); 640 op->status |= REGOP(STATUS_INVALID_OFFSET);
641 return -EINVAL; 641 return -EINVAL;
642 } 642 }
@@ -674,7 +674,7 @@ static int validate_reg_op_offset(struct dbg_session_gk20a *dbg_s,
674 } 674 }
675 675
676 if (!valid) { 676 if (!valid) {
677 nvgpu_err(dbg_s->g, "invalid regop offset: 0x%x\n", offset); 677 nvgpu_err(dbg_s->g, "invalid regop offset: 0x%x", offset);
678 op->status |= REGOP(STATUS_INVALID_OFFSET); 678 op->status |= REGOP(STATUS_INVALID_OFFSET);
679 return -EINVAL; 679 return -EINVAL;
680 } 680 }
diff --git a/drivers/gpu/nvgpu/gk20a/sim_gk20a.c b/drivers/gpu/nvgpu/gk20a/sim_gk20a.c
index 122e66f9..ecf829b7 100644
--- a/drivers/gpu/nvgpu/gk20a/sim_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/sim_gk20a.c
@@ -78,14 +78,14 @@ static int alloc_and_kmap_iopage(struct gk20a *g,
78 78
79 if (!*page) { 79 if (!*page) {
80 err = -ENOMEM; 80 err = -ENOMEM;
81 nvgpu_err(g, "couldn't allocate io page\n"); 81 nvgpu_err(g, "couldn't allocate io page");
82 goto fail; 82 goto fail;
83 } 83 }
84 84
85 *kvaddr = kmap(*page); 85 *kvaddr = kmap(*page);
86 if (!*kvaddr) { 86 if (!*kvaddr) {
87 err = -ENOMEM; 87 err = -ENOMEM;
88 nvgpu_err(g, "couldn't kmap io page\n"); 88 nvgpu_err(g, "couldn't kmap io page");
89 goto fail; 89 goto fail;
90 } 90 }
91 *phys = page_to_phys(*page); 91 *phys = page_to_phys(*page);
@@ -119,7 +119,7 @@ int gk20a_init_sim_support(struct platform_device *pdev)
119 119
120 if (!(g->sim.send_bfr.kvaddr && g->sim.recv_bfr.kvaddr && 120 if (!(g->sim.send_bfr.kvaddr && g->sim.recv_bfr.kvaddr &&
121 g->sim.msg_bfr.kvaddr)) { 121 g->sim.msg_bfr.kvaddr)) {
122 nvgpu_err(g, "couldn't allocate all sim buffers\n"); 122 nvgpu_err(g, "couldn't allocate all sim buffers");
123 goto fail; 123 goto fail;
124 } 124 }
125 125
@@ -269,7 +269,7 @@ static int rpc_recv_poll(struct gk20a *g)
269 (u64)recv_phys_addr_lo << PAGE_SHIFT; 269 (u64)recv_phys_addr_lo << PAGE_SHIFT;
270 270
271 if (recv_phys_addr != g->sim.msg_bfr.phys) { 271 if (recv_phys_addr != g->sim.msg_bfr.phys) {
272 nvgpu_err(g, "%s Error in RPC reply\n", 272 nvgpu_err(g, "%s Error in RPC reply",
273 __func__); 273 __func__);
274 return -1; 274 return -1;
275 } 275 }
@@ -296,21 +296,21 @@ static int issue_rpc_and_wait(struct gk20a *g)
296 296
297 err = rpc_send_message(g); 297 err = rpc_send_message(g);
298 if (err) { 298 if (err) {
299 nvgpu_err(g, "%s failed rpc_send_message\n", 299 nvgpu_err(g, "%s failed rpc_send_message",
300 __func__); 300 __func__);
301 return err; 301 return err;
302 } 302 }
303 303
304 err = rpc_recv_poll(g); 304 err = rpc_recv_poll(g);
305 if (err) { 305 if (err) {
306 nvgpu_err(g, "%s failed rpc_recv_poll\n", 306 nvgpu_err(g, "%s failed rpc_recv_poll",
307 __func__); 307 __func__);
308 return err; 308 return err;
309 } 309 }
310 310
311 /* Now check if RPC really succeeded */ 311 /* Now check if RPC really succeeded */
312 if (*sim_msg_hdr(g, sim_msg_result_r()) != sim_msg_result_success_v()) { 312 if (*sim_msg_hdr(g, sim_msg_result_r()) != sim_msg_result_success_v()) {
313 nvgpu_err(g, "%s received failed status!\n", 313 nvgpu_err(g, "%s received failed status!",
314 __func__); 314 __func__);
315 return -(*sim_msg_hdr(g, sim_msg_result_r())); 315 return -(*sim_msg_hdr(g, sim_msg_result_r()));
316 } 316 }
diff --git a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
index 9cb5b262..1488fbf9 100644
--- a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
@@ -95,7 +95,7 @@ int gk20a_tsg_bind_channel(struct tsg_gk20a *tsg,
95 tsg->runlist_id = ch->runlist_id; 95 tsg->runlist_id = ch->runlist_id;
96 else if (tsg->runlist_id != ch->runlist_id) { 96 else if (tsg->runlist_id != ch->runlist_id) {
97 nvgpu_err(tsg->g, 97 nvgpu_err(tsg->g,
98 "Error: TSG channel should be share same runlist ch[%d] tsg[%d]\n", 98 "Error: TSG channel should be share same runlist ch[%d] tsg[%d]",
99 ch->runlist_id, tsg->runlist_id); 99 ch->runlist_id, tsg->runlist_id);
100 return -EINVAL; 100 return -EINVAL;
101 } 101 }
diff --git a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
index 98d1a34d..815ae638 100644
--- a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
@@ -1418,7 +1418,7 @@ int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt)
1418 err = nvgpu_dma_alloc_flags_sys(g, 1418 err = nvgpu_dma_alloc_flags_sys(g,
1419 NVGPU_DMA_READ_ONLY, bl_sz, &acr->hsbl_ucode); 1419 NVGPU_DMA_READ_ONLY, bl_sz, &acr->hsbl_ucode);
1420 if (err) { 1420 if (err) {
1421 nvgpu_err(g, "failed to allocate memory\n"); 1421 nvgpu_err(g, "failed to allocate memory");
1422 goto err_done; 1422 goto err_done;
1423 } 1423 }
1424 1424
diff --git a/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c b/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c
index 2127badb..84c3dfcd 100644
--- a/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c
@@ -225,7 +225,7 @@ u32 gm20b_ltc_cbc_fix_config(struct gk20a *g, int base)
225 if (val == 2) { 225 if (val == 2) {
226 return base * 2; 226 return base * 2;
227 } else if (val != 1) { 227 } else if (val != 1) {
228 nvgpu_err(g, "Invalid number of active ltcs: %08x\n", val); 228 nvgpu_err(g, "Invalid number of active ltcs: %08x", val);
229 } 229 }
230 230
231 return base; 231 return base;
diff --git a/drivers/gpu/nvgpu/gp106/xve_gp106.c b/drivers/gpu/nvgpu/gp106/xve_gp106.c
index fb0e8a8f..4eb96614 100644
--- a/drivers/gpu/nvgpu/gp106/xve_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/xve_gp106.c
@@ -529,7 +529,7 @@ static ssize_t xve_link_speed_write(struct file *filp,
529 else if (strncmp(kbuff, "Gen3", check_len) == 0) 529 else if (strncmp(kbuff, "Gen3", check_len) == 0)
530 link_speed = GPU_XVE_SPEED_8P0; 530 link_speed = GPU_XVE_SPEED_8P0;
531 else 531 else
532 nvgpu_err(g, "%s: Unknown PCIe speed: %s\n", 532 nvgpu_err(g, "%s: Unknown PCIe speed: %s",
533 __func__, kbuff); 533 __func__, kbuff);
534 534
535 if (!link_speed) 535 if (!link_speed)
diff --git a/drivers/gpu/nvgpu/gp10b/gp10b_sysfs.c b/drivers/gpu/nvgpu/gp10b/gp10b_sysfs.c
index 1bd95b40..5a242bb5 100644
--- a/drivers/gpu/nvgpu/gp10b/gp10b_sysfs.c
+++ b/drivers/gpu/nvgpu/gp10b/gp10b_sysfs.c
@@ -35,7 +35,7 @@ static ssize_t ecc_enable_store(struct device *dev,
35 err = g->ops.pmu.send_lrf_tex_ltc_dram_overide_en_dis_cmd 35 err = g->ops.pmu.send_lrf_tex_ltc_dram_overide_en_dis_cmd
36 (g, ecc_mask); 36 (g, ecc_mask);
37 if (err) 37 if (err)
38 nvgpu_err(g, "ECC override did not happen\n"); 38 nvgpu_err(g, "ECC override did not happen");
39 } else 39 } else
40 return -EINVAL; 40 return -EINVAL;
41 return count; 41 return count;
@@ -90,7 +90,7 @@ void gp10b_create_sysfs(struct device *dev)
90 error |= device_create_file(dev, &dev_attr_ecc_enable); 90 error |= device_create_file(dev, &dev_attr_ecc_enable);
91 error |= device_create_file(dev, &dev_attr_czf_bypass); 91 error |= device_create_file(dev, &dev_attr_czf_bypass);
92 if (error) 92 if (error)
93 nvgpu_err(g, "Failed to create sysfs attributes!\n"); 93 nvgpu_err(g, "Failed to create sysfs attributes!");
94} 94}
95 95
96void gp10b_remove_sysfs(struct device *dev) 96void gp10b_remove_sysfs(struct device *dev)
diff --git a/drivers/gpu/nvgpu/gp10b/gr_gp10b.c b/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
index 2c85a667..98a8be2f 100644
--- a/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
@@ -1631,7 +1631,7 @@ static int gr_gp10b_disable_channel_or_tsg(struct gk20a *g, struct channel_gk20a
1631 ret = gk20a_disable_channel_tsg(g, fault_ch); 1631 ret = gk20a_disable_channel_tsg(g, fault_ch);
1632 if (ret) { 1632 if (ret) {
1633 nvgpu_err(g, 1633 nvgpu_err(g,
1634 "CILP: failed to disable channel/TSG!\n"); 1634 "CILP: failed to disable channel/TSG!");
1635 return ret; 1635 return ret;
1636 } 1636 }
1637 1637
@@ -1833,7 +1833,7 @@ static int gr_gp10b_pre_process_sm_exception(struct gk20a *g,
1833 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: Setting CILP preempt pending\n"); 1833 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: Setting CILP preempt pending\n");
1834 ret = gr_gp10b_set_cilp_preempt_pending(g, fault_ch); 1834 ret = gr_gp10b_set_cilp_preempt_pending(g, fault_ch);
1835 if (ret) { 1835 if (ret) {
1836 nvgpu_err(g, "CILP: error while setting CILP preempt pending!\n"); 1836 nvgpu_err(g, "CILP: error while setting CILP preempt pending!");
1837 return ret; 1837 return ret;
1838 } 1838 }
1839 1839
diff --git a/drivers/gpu/nvgpu/therm/thrmpmu.c b/drivers/gpu/nvgpu/therm/thrmpmu.c
index 84e9871a..918d4ad8 100644
--- a/drivers/gpu/nvgpu/therm/thrmpmu.c
+++ b/drivers/gpu/nvgpu/therm/thrmpmu.c
@@ -102,7 +102,7 @@ static u32 therm_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd,
102 &handlerparams->success, 1); 102 &handlerparams->success, 1);
103 103
104 if (handlerparams->success == 0) { 104 if (handlerparams->success == 0) {
105 nvgpu_err(g, "could not process cmd\n"); 105 nvgpu_err(g, "could not process cmd");
106 status = -ETIMEDOUT; 106 status = -ETIMEDOUT;
107 goto exit; 107 goto exit;
108 } 108 }
diff --git a/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c b/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
index 28514386..5fbe7227 100644
--- a/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
@@ -428,7 +428,7 @@ static int vgpu_fifo_preempt_channel(struct gk20a *g, u32 hw_chid)
428 428
429 if (err || msg.ret) { 429 if (err || msg.ret) {
430 nvgpu_err(g, 430 nvgpu_err(g,
431 "preempt channel %d failed\n", hw_chid); 431 "preempt channel %d failed", hw_chid);
432 err = -ENOMEM; 432 err = -ENOMEM;
433 } 433 }
434 434
@@ -452,7 +452,7 @@ static int vgpu_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
452 452
453 if (err) { 453 if (err) {
454 nvgpu_err(g, 454 nvgpu_err(g,
455 "preempt tsg %u failed\n", tsgid); 455 "preempt tsg %u failed", tsgid);
456 } 456 }
457 457
458 return err; 458 return err;
diff --git a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c
index f425b7e5..347f27a5 100644
--- a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c
+++ b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c
@@ -141,7 +141,7 @@ static u64 vgpu_gp10b_locked_gmmu_map(struct vm_gk20a *vm,
141 vm->gmmu_page_sizes[gmmu_page_size_big]) { 141 vm->gmmu_page_sizes[gmmu_page_size_big]) {
142 pgsz_idx = gmmu_page_size_big; 142 pgsz_idx = gmmu_page_size_big;
143 } else { 143 } else {
144 nvgpu_err(g, "invalid kernel page size %d\n", 144 nvgpu_err(g, "invalid kernel page size %d",
145 page_size); 145 page_size);
146 goto fail; 146 goto fail;
147 } 147 }
@@ -172,7 +172,7 @@ static u64 vgpu_gp10b_locked_gmmu_map(struct vm_gk20a *vm,
172fail: 172fail:
173 if (handle) 173 if (handle)
174 tegra_gr_comm_oob_put_ptr(handle); 174 tegra_gr_comm_oob_put_ptr(handle);
175 nvgpu_err(g, "%s: failed with err=%d\n", __func__, err); 175 nvgpu_err(g, "%s: failed with err=%d", __func__, err);
176 return 0; 176 return 0;
177} 177}
178 178
diff --git a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
index db120d76..287567d6 100644
--- a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
@@ -110,7 +110,7 @@ static u64 vgpu_locked_gmmu_map(struct vm_gk20a *vm,
110 map_offset = __nvgpu_vm_alloc_va(vm, size, 110 map_offset = __nvgpu_vm_alloc_va(vm, size,
111 pgsz_idx); 111 pgsz_idx);
112 if (!map_offset) { 112 if (!map_offset) {
113 nvgpu_err(g, "failed to allocate va space\n"); 113 nvgpu_err(g, "failed to allocate va space");
114 err = -ENOMEM; 114 err = -ENOMEM;
115 goto fail; 115 goto fail;
116 } 116 }
@@ -138,7 +138,7 @@ static u64 vgpu_locked_gmmu_map(struct vm_gk20a *vm,
138 vm->gmmu_page_sizes[gmmu_page_size_big]) { 138 vm->gmmu_page_sizes[gmmu_page_size_big]) {
139 pgsz_idx = gmmu_page_size_big; 139 pgsz_idx = gmmu_page_size_big;
140 } else { 140 } else {
141 nvgpu_err(g, "invalid kernel page size %d\n", 141 nvgpu_err(g, "invalid kernel page size %d",
142 page_size); 142 page_size);
143 goto fail; 143 goto fail;
144 } 144 }
@@ -160,7 +160,7 @@ static u64 vgpu_locked_gmmu_map(struct vm_gk20a *vm,
160 160
161 return map_offset; 161 return map_offset;
162fail: 162fail:
163 nvgpu_err(g, "%s: failed with err=%d\n", __func__, err); 163 nvgpu_err(g, "%s: failed with err=%d", __func__, err);
164 return 0; 164 return 0;
165} 165}
166 166