summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/nvgpu/vgpu/css_vgpu.c4
-rw-r--r--drivers/gpu/nvgpu/vgpu/fifo_vgpu.c15
-rw-r--r--drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c7
-rw-r--r--drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c7
-rw-r--r--drivers/gpu/nvgpu/vgpu/gr_vgpu.c41
-rw-r--r--drivers/gpu/nvgpu/vgpu/mm_vgpu.c11
-rw-r--r--drivers/gpu/nvgpu/vgpu/tsg_vgpu.c4
-rw-r--r--drivers/gpu/nvgpu/vgpu/vgpu.c26
8 files changed, 51 insertions, 64 deletions
diff --git a/drivers/gpu/nvgpu/vgpu/css_vgpu.c b/drivers/gpu/nvgpu/vgpu/css_vgpu.c
index 5a80f24d..142d9ce1 100644
--- a/drivers/gpu/nvgpu/vgpu/css_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/css_vgpu.c
@@ -157,7 +157,7 @@ static int vgpu_css_attach(struct channel_gk20a *ch,
157 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 157 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
158 err = err ? err : msg.ret; 158 err = err ? err : msg.ret;
159 if (err) 159 if (err)
160 gk20a_err(dev_from_gk20a(g), "%s failed", __func__); 160 nvgpu_err(g, "failed");
161 else 161 else
162 cs_client->perfmon_start = p->perfmon_start; 162 cs_client->perfmon_start = p->perfmon_start;
163 163
@@ -185,7 +185,7 @@ static int vgpu_css_detach(struct channel_gk20a *ch,
185 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 185 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
186 err = err ? err : msg.ret; 186 err = err ? err : msg.ret;
187 if (err) 187 if (err)
188 gk20a_err(dev_from_gk20a(g), "%s failed", __func__); 188 nvgpu_err(g, "failed");
189 189
190 return err; 190 return err;
191} 191}
diff --git a/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c b/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
index e2883f7c..e775abbb 100644
--- a/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
@@ -78,7 +78,7 @@ static int vgpu_channel_alloc_inst(struct gk20a *g, struct channel_gk20a *ch)
78 p->pid = (u64)current->tgid; 78 p->pid = (u64)current->tgid;
79 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 79 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
80 if (err || msg.ret) { 80 if (err || msg.ret) {
81 gk20a_err(dev_from_gk20a(g), "fail"); 81 nvgpu_err(g, "fail");
82 return -ENOMEM; 82 return -ENOMEM;
83 } 83 }
84 84
@@ -365,21 +365,20 @@ static int vgpu_init_fifo_setup_hw(struct gk20a *g)
365 smp_mb(); 365 smp_mb();
366 366
367 if (v1 != gk20a_bar1_readl(g, bar1_vaddr)) { 367 if (v1 != gk20a_bar1_readl(g, bar1_vaddr)) {
368 gk20a_err(dev_from_gk20a(g), "bar1 broken @ gk20a!"); 368 nvgpu_err(g, "bar1 broken @ gk20a!");
369 return -EINVAL; 369 return -EINVAL;
370 } 370 }
371 371
372 gk20a_bar1_writel(g, bar1_vaddr, v2); 372 gk20a_bar1_writel(g, bar1_vaddr, v2);
373 373
374 if (v2 != gk20a_bar1_readl(g, bar1_vaddr)) { 374 if (v2 != gk20a_bar1_readl(g, bar1_vaddr)) {
375 gk20a_err(dev_from_gk20a(g), "bar1 broken @ gk20a!"); 375 nvgpu_err(g, "bar1 broken @ gk20a!");
376 return -EINVAL; 376 return -EINVAL;
377 } 377 }
378 378
379 /* is it visible to the cpu? */ 379 /* is it visible to the cpu? */
380 if (*cpu_vaddr != v2) { 380 if (*cpu_vaddr != v2) {
381 gk20a_err(dev_from_gk20a(g), 381 nvgpu_err(g, "cpu didn't see bar1 write @ %p!",
382 "cpu didn't see bar1 write @ %p!",
383 cpu_vaddr); 382 cpu_vaddr);
384 } 383 }
385 384
@@ -426,7 +425,7 @@ static int vgpu_fifo_preempt_channel(struct gk20a *g, u32 hw_chid)
426 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 425 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
427 426
428 if (err || msg.ret) { 427 if (err || msg.ret) {
429 gk20a_err(dev_from_gk20a(g), 428 nvgpu_err(g,
430 "preempt channel %d failed\n", hw_chid); 429 "preempt channel %d failed\n", hw_chid);
431 err = -ENOMEM; 430 err = -ENOMEM;
432 } 431 }
@@ -450,7 +449,7 @@ static int vgpu_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
450 err = err ? err : msg.ret; 449 err = err ? err : msg.ret;
451 450
452 if (err) { 451 if (err) {
453 gk20a_err(dev_from_gk20a(g), 452 nvgpu_err(g,
454 "preempt tsg %u failed\n", tsgid); 453 "preempt tsg %u failed\n", tsgid);
455 } 454 }
456 455
@@ -722,7 +721,7 @@ int vgpu_fifo_isr(struct gk20a *g, struct tegra_vgpu_fifo_intr_info *info)
722 if (!ch) 721 if (!ch)
723 return 0; 722 return 0;
724 723
725 gk20a_err(dev_from_gk20a(g), "fifo intr (%d) on ch %u", 724 nvgpu_err(g, "fifo intr (%d) on ch %u",
726 info->type, info->chid); 725 info->type, info->chid);
727 726
728 trace_gk20a_channel_reset(ch->hw_chid, ch->tsgid); 727 trace_gk20a_channel_reset(ch->hw_chid, ch->tsgid);
diff --git a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c
index 1a5811fe..cc9c46bf 100644
--- a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c
+++ b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c
@@ -88,7 +88,7 @@ static int vgpu_gr_gp10b_alloc_gr_ctx(struct gk20a *g,
88 err = g->ops.gr.set_ctxsw_preemption_mode(g, gr_ctx, vm, 88 err = g->ops.gr.set_ctxsw_preemption_mode(g, gr_ctx, vm,
89 class, graphics_preempt_mode, compute_preempt_mode); 89 class, graphics_preempt_mode, compute_preempt_mode);
90 if (err) { 90 if (err) {
91 gk20a_err(dev_from_gk20a(g), 91 nvgpu_err(g,
92 "set_ctxsw_preemption_mode failed"); 92 "set_ctxsw_preemption_mode failed");
93 goto fail; 93 goto fail;
94 } 94 }
@@ -254,7 +254,7 @@ static int vgpu_gr_gp10b_set_ctxsw_preemption_mode(struct gk20a *g,
254 return err; 254 return err;
255 255
256fail: 256fail:
257 gk20a_err(dev_from_gk20a(g), "%s failed %d", __func__, err); 257 nvgpu_err(g, "%s failed %d", __func__, err);
258 return err; 258 return err;
259} 259}
260 260
@@ -297,8 +297,7 @@ static int vgpu_gr_gp10b_set_preemption_mode(struct channel_gk20a *ch,
297 graphics_preempt_mode, 297 graphics_preempt_mode,
298 compute_preempt_mode); 298 compute_preempt_mode);
299 if (err) { 299 if (err) {
300 gk20a_err(dev_from_gk20a(g), 300 nvgpu_err(g, "set_ctxsw_preemption_mode failed");
301 "set_ctxsw_preemption_mode failed");
302 return err; 301 return err;
303 } 302 }
304 } else { 303 } else {
diff --git a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c
index 2da18fb8..cfda867c 100644
--- a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c
+++ b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c
@@ -54,7 +54,6 @@ static u64 vgpu_gp10b_locked_gmmu_map(struct vm_gk20a *vm,
54 enum nvgpu_aperture aperture) 54 enum nvgpu_aperture aperture)
55{ 55{
56 int err = 0; 56 int err = 0;
57 struct device *d = dev_from_vm(vm);
58 struct gk20a *g = gk20a_from_vm(vm); 57 struct gk20a *g = gk20a_from_vm(vm);
59 struct tegra_vgpu_cmd_msg msg; 58 struct tegra_vgpu_cmd_msg msg;
60 struct tegra_vgpu_as_map_ex_params *p = &msg.params.as_map_ex; 59 struct tegra_vgpu_as_map_ex_params *p = &msg.params.as_map_ex;
@@ -82,7 +81,7 @@ static u64 vgpu_gp10b_locked_gmmu_map(struct vm_gk20a *vm,
82 if (!map_offset) { 81 if (!map_offset) {
83 map_offset = gk20a_vm_alloc_va(vm, size, pgsz_idx); 82 map_offset = gk20a_vm_alloc_va(vm, size, pgsz_idx);
84 if (!map_offset) { 83 if (!map_offset) {
85 gk20a_err(d, "failed to allocate va space"); 84 nvgpu_err(g, "failed to allocate va space");
86 err = -ENOMEM; 85 err = -ENOMEM;
87 goto fail; 86 goto fail;
88 } 87 }
@@ -140,7 +139,7 @@ static u64 vgpu_gp10b_locked_gmmu_map(struct vm_gk20a *vm,
140 vm->gmmu_page_sizes[gmmu_page_size_big]) { 139 vm->gmmu_page_sizes[gmmu_page_size_big]) {
141 pgsz_idx = gmmu_page_size_big; 140 pgsz_idx = gmmu_page_size_big;
142 } else { 141 } else {
143 gk20a_err(d, "invalid kernel page size %d\n", 142 nvgpu_err(g, "invalid kernel page size %d\n",
144 page_size); 143 page_size);
145 goto fail; 144 goto fail;
146 } 145 }
@@ -171,7 +170,7 @@ static u64 vgpu_gp10b_locked_gmmu_map(struct vm_gk20a *vm,
171fail: 170fail:
172 if (handle) 171 if (handle)
173 tegra_gr_comm_oob_put_ptr(handle); 172 tegra_gr_comm_oob_put_ptr(handle);
174 gk20a_err(d, "%s: failed with err=%d\n", __func__, err); 173 nvgpu_err(g, "%s: failed with err=%d\n", __func__, err);
175 return 0; 174 return 0;
176} 175}
177 176
diff --git a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
index 612e50e7..102adae3 100644
--- a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
@@ -294,7 +294,7 @@ int vgpu_gr_alloc_gr_ctx(struct gk20a *g,
294 err = err ? err : msg.ret; 294 err = err ? err : msg.ret;
295 295
296 if (unlikely(err)) { 296 if (unlikely(err)) {
297 gk20a_err(dev_from_gk20a(g), "fail to alloc gr_ctx"); 297 nvgpu_err(g, "fail to alloc gr_ctx");
298 gk20a_vm_free_va(vm, gr_ctx->mem.gpu_va, 298 gk20a_vm_free_va(vm, gr_ctx->mem.gpu_va,
299 gr_ctx->mem.size, gmmu_page_size_kernel); 299 gr_ctx->mem.size, gmmu_page_size_kernel);
300 nvgpu_kfree(g, gr_ctx); 300 nvgpu_kfree(g, gr_ctx);
@@ -485,15 +485,13 @@ static int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c,
485 485
486 /* an address space needs to have been bound at this point.*/ 486 /* an address space needs to have been bound at this point.*/
487 if (!gk20a_channel_as_bound(c)) { 487 if (!gk20a_channel_as_bound(c)) {
488 gk20a_err(dev_from_gk20a(g), 488 nvgpu_err(g, "not bound to address space at time"
489 "not bound to address space at time"
490 " of grctx allocation"); 489 " of grctx allocation");
491 return -EINVAL; 490 return -EINVAL;
492 } 491 }
493 492
494 if (!g->ops.gr.is_valid_class(g, args->class_num)) { 493 if (!g->ops.gr.is_valid_class(g, args->class_num)) {
495 gk20a_err(dev_from_gk20a(g), 494 nvgpu_err(g, "invalid obj class 0x%x", args->class_num);
496 "invalid obj class 0x%x", args->class_num);
497 err = -EINVAL; 495 err = -EINVAL;
498 goto out; 496 goto out;
499 } 497 }
@@ -512,15 +510,14 @@ static int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c,
512 if (!err) 510 if (!err)
513 err = vgpu_gr_ch_bind_gr_ctx(c); 511 err = vgpu_gr_ch_bind_gr_ctx(c);
514 if (err) { 512 if (err) {
515 gk20a_err(dev_from_gk20a(g), 513 nvgpu_err(g, "fail to allocate gr ctx buffer");
516 "fail to allocate gr ctx buffer");
517 goto out; 514 goto out;
518 } 515 }
519 } else { 516 } else {
520 /*TBD: needs to be more subtle about which is 517 /*TBD: needs to be more subtle about which is
521 * being allocated as some are allowed to be 518 * being allocated as some are allowed to be
522 * allocated along same channel */ 519 * allocated along same channel */
523 gk20a_err(dev_from_gk20a(g), 520 nvgpu_err(g,
524 "too many classes alloc'd on same channel"); 521 "too many classes alloc'd on same channel");
525 err = -EINVAL; 522 err = -EINVAL;
526 goto out; 523 goto out;
@@ -536,7 +533,7 @@ static int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c,
536 if (!err) 533 if (!err)
537 err = vgpu_gr_tsg_bind_gr_ctx(tsg); 534 err = vgpu_gr_tsg_bind_gr_ctx(tsg);
538 if (err) { 535 if (err) {
539 gk20a_err(dev_from_gk20a(g), 536 nvgpu_err(g,
540 "fail to allocate TSG gr ctx buffer, err=%d", err); 537 "fail to allocate TSG gr ctx buffer, err=%d", err);
541 gk20a_vm_put(tsg->vm); 538 gk20a_vm_put(tsg->vm);
542 tsg->vm = NULL; 539 tsg->vm = NULL;
@@ -547,8 +544,7 @@ static int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c,
547 ch_ctx->gr_ctx = tsg->tsg_gr_ctx; 544 ch_ctx->gr_ctx = tsg->tsg_gr_ctx;
548 err = vgpu_gr_ch_bind_gr_ctx(c); 545 err = vgpu_gr_ch_bind_gr_ctx(c);
549 if (err) { 546 if (err) {
550 gk20a_err(dev_from_gk20a(g), 547 nvgpu_err(g, "fail to bind gr ctx buffer");
551 "fail to bind gr ctx buffer");
552 goto out; 548 goto out;
553 } 549 }
554 } 550 }
@@ -556,8 +552,7 @@ static int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c,
556 /* commit gr ctx buffer */ 552 /* commit gr ctx buffer */
557 err = vgpu_gr_commit_inst(c, ch_ctx->gr_ctx->mem.gpu_va); 553 err = vgpu_gr_commit_inst(c, ch_ctx->gr_ctx->mem.gpu_va);
558 if (err) { 554 if (err) {
559 gk20a_err(dev_from_gk20a(g), 555 nvgpu_err(g, "fail to commit gr ctx buffer");
560 "fail to commit gr ctx buffer");
561 goto out; 556 goto out;
562 } 557 }
563 558
@@ -565,8 +560,7 @@ static int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c,
565 if (ch_ctx->patch_ctx.mem.pages == NULL) { 560 if (ch_ctx->patch_ctx.mem.pages == NULL) {
566 err = vgpu_gr_alloc_channel_patch_ctx(g, c); 561 err = vgpu_gr_alloc_channel_patch_ctx(g, c);
567 if (err) { 562 if (err) {
568 gk20a_err(dev_from_gk20a(g), 563 nvgpu_err(g, "fail to allocate patch buffer");
569 "fail to allocate patch buffer");
570 goto out; 564 goto out;
571 } 565 }
572 } 566 }
@@ -575,8 +569,7 @@ static int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c,
575 if (!ch_ctx->global_ctx_buffer_mapped) { 569 if (!ch_ctx->global_ctx_buffer_mapped) {
576 err = vgpu_gr_map_global_ctx_buffers(g, c); 570 err = vgpu_gr_map_global_ctx_buffers(g, c);
577 if (err) { 571 if (err) {
578 gk20a_err(dev_from_gk20a(g), 572 nvgpu_err(g, "fail to map global ctx buffer");
579 "fail to map global ctx buffer");
580 goto out; 573 goto out;
581 } 574 }
582 gr_gk20a_elpg_protected_call(g, 575 gr_gk20a_elpg_protected_call(g,
@@ -588,8 +581,7 @@ static int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c,
588 err = gr_gk20a_elpg_protected_call(g, 581 err = gr_gk20a_elpg_protected_call(g,
589 vgpu_gr_load_golden_ctx_image(g, c)); 582 vgpu_gr_load_golden_ctx_image(g, c));
590 if (err) { 583 if (err) {
591 gk20a_err(dev_from_gk20a(g), 584 nvgpu_err(g, "fail to load golden ctx image");
592 "fail to load golden ctx image");
593 goto out; 585 goto out;
594 } 586 }
595 c->first_init = true; 587 c->first_init = true;
@@ -602,7 +594,7 @@ out:
602 can be reused so no need to release them. 594 can be reused so no need to release them.
603 2. golden image load is a one time thing so if 595 2. golden image load is a one time thing so if
604 they pass, no need to undo. */ 596 they pass, no need to undo. */
605 gk20a_err(dev_from_gk20a(g), "fail"); 597 nvgpu_err(g, "fail");
606 return err; 598 return err;
607} 599}
608 600
@@ -651,7 +643,7 @@ static int vgpu_gr_init_gr_config(struct gk20a *g, struct gr_gk20a *gr)
651 g->ops.gr.init_fs_state(g); 643 g->ops.gr.init_fs_state(g);
652 return 0; 644 return 0;
653cleanup: 645cleanup:
654 gk20a_err(dev_from_gk20a(g), "%s: out of memory", __func__); 646 nvgpu_err(g, "out of memory");
655 647
656 nvgpu_kfree(g, gr->gpc_tpc_count); 648 nvgpu_kfree(g, gr->gpc_tpc_count);
657 gr->gpc_tpc_count = NULL; 649 gr->gpc_tpc_count = NULL;
@@ -905,7 +897,7 @@ static int vgpu_gr_init_gr_setup_sw(struct gk20a *g)
905 return 0; 897 return 0;
906 898
907clean_up: 899clean_up:
908 gk20a_err(dev_from_gk20a(g), "fail"); 900 nvgpu_err(g, "fail");
909 vgpu_remove_gr_support(gr); 901 vgpu_remove_gr_support(gr);
910 return err; 902 return err;
911} 903}
@@ -928,8 +920,7 @@ int vgpu_gr_isr(struct gk20a *g, struct tegra_vgpu_gr_intr_info *info)
928 920
929 if (info->type != TEGRA_VGPU_GR_INTR_NOTIFY && 921 if (info->type != TEGRA_VGPU_GR_INTR_NOTIFY &&
930 info->type != TEGRA_VGPU_GR_INTR_SEMAPHORE) 922 info->type != TEGRA_VGPU_GR_INTR_SEMAPHORE)
931 gk20a_err(dev_from_gk20a(g), "gr intr (%d) on ch %u", 923 nvgpu_err(g, "gr intr (%d) on ch %u", info->type, info->chid);
932 info->type, info->chid);
933 924
934 switch (info->type) { 925 switch (info->type) {
935 case TEGRA_VGPU_GR_INTR_NOTIFY: 926 case TEGRA_VGPU_GR_INTR_NOTIFY:
@@ -1186,7 +1177,7 @@ void vgpu_gr_handle_sm_esr_event(struct gk20a *g,
1186 struct nvgpu_dbg_gpu_sm_error_state_record *sm_error_states; 1177 struct nvgpu_dbg_gpu_sm_error_state_record *sm_error_states;
1187 1178
1188 if (info->sm_id >= g->gr.no_of_sm) { 1179 if (info->sm_id >= g->gr.no_of_sm) {
1189 gk20a_err(g->dev, "invalid smd_id %d / %d", 1180 nvgpu_err(g, "invalid smd_id %d / %d",
1190 info->sm_id, g->gr.no_of_sm); 1181 info->sm_id, g->gr.no_of_sm);
1191 return; 1182 return;
1192 } 1183 }
diff --git a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
index b12f8a53..3c139df5 100644
--- a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
@@ -105,7 +105,7 @@ static u64 vgpu_locked_gmmu_map(struct vm_gk20a *vm,
105 map_offset = gk20a_vm_alloc_va(vm, size, 105 map_offset = gk20a_vm_alloc_va(vm, size,
106 pgsz_idx); 106 pgsz_idx);
107 if (!map_offset) { 107 if (!map_offset) {
108 gk20a_err(d, "failed to allocate va space\n"); 108 nvgpu_err(g, "failed to allocate va space\n");
109 err = -ENOMEM; 109 err = -ENOMEM;
110 goto fail; 110 goto fail;
111 } 111 }
@@ -133,7 +133,7 @@ static u64 vgpu_locked_gmmu_map(struct vm_gk20a *vm,
133 vm->gmmu_page_sizes[gmmu_page_size_big]) { 133 vm->gmmu_page_sizes[gmmu_page_size_big]) {
134 pgsz_idx = gmmu_page_size_big; 134 pgsz_idx = gmmu_page_size_big;
135 } else { 135 } else {
136 gk20a_err(d, "invalid kernel page size %d\n", 136 nvgpu_err(g, "invalid kernel page size %d\n",
137 page_size); 137 page_size);
138 goto fail; 138 goto fail;
139 } 139 }
@@ -155,7 +155,7 @@ static u64 vgpu_locked_gmmu_map(struct vm_gk20a *vm,
155 155
156 return map_offset; 156 return map_offset;
157fail: 157fail:
158 gk20a_err(d, "%s: failed with err=%d\n", __func__, err); 158 nvgpu_err(g, "%s: failed with err=%d\n", __func__, err);
159 return 0; 159 return 0;
160} 160}
161 161
@@ -294,7 +294,7 @@ static int vgpu_vm_alloc_share(struct gk20a_as_share *as_share,
294 gk20a_dbg_fn(""); 294 gk20a_dbg_fn("");
295 295
296 if (userspace_managed) { 296 if (userspace_managed) {
297 gk20a_err(dev_from_gk20a(g), 297 nvgpu_err(g,
298 "userspace-managed address spaces not yet supported"); 298 "userspace-managed address spaces not yet supported");
299 return -ENOSYS; 299 return -ENOSYS;
300 } 300 }
@@ -506,8 +506,7 @@ static void vgpu_mm_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb)
506{ 506{
507 gk20a_dbg_fn(""); 507 gk20a_dbg_fn("");
508 508
509 gk20a_err(g->dev, "%s: call to RM server not supported", 509 nvgpu_err(g, "call to RM server not supported");
510 __func__);
511} 510}
512 511
513static void vgpu_mm_mmu_set_debug_mode(struct gk20a *g, bool enable) 512static void vgpu_mm_mmu_set_debug_mode(struct gk20a *g, bool enable)
diff --git a/drivers/gpu/nvgpu/vgpu/tsg_vgpu.c b/drivers/gpu/nvgpu/vgpu/tsg_vgpu.c
index e668d1ed..8a0276f7 100644
--- a/drivers/gpu/nvgpu/vgpu/tsg_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/tsg_vgpu.c
@@ -38,7 +38,7 @@ static int vgpu_tsg_open(struct tsg_gk20a *tsg)
38 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 38 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
39 err = err ? err : msg.ret; 39 err = err ? err : msg.ret;
40 if (err) { 40 if (err) {
41 gk20a_err(dev_from_gk20a(tsg->g), 41 nvgpu_err(tsg->g,
42 "vgpu_tsg_open failed, tsgid %d", tsg->tsgid); 42 "vgpu_tsg_open failed, tsgid %d", tsg->tsgid);
43 } 43 }
44 44
@@ -66,7 +66,7 @@ static int vgpu_tsg_bind_channel(struct tsg_gk20a *tsg,
66 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 66 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
67 err = err ? err : msg.ret; 67 err = err ? err : msg.ret;
68 if (err) { 68 if (err) {
69 gk20a_err(dev_from_gk20a(tsg->g), 69 nvgpu_err(tsg->g,
70 "vgpu_tsg_bind_channel failed, ch %d tsgid %d", 70 "vgpu_tsg_bind_channel failed, ch %d tsgid %d",
71 ch->hw_chid, tsg->tsgid); 71 ch->hw_chid, tsg->tsgid);
72 gk20a_tsg_unbind_channel(ch); 72 gk20a_tsg_unbind_channel(ch);
diff --git a/drivers/gpu/nvgpu/vgpu/vgpu.c b/drivers/gpu/nvgpu/vgpu/vgpu.c
index b32df08d..4cb7c52e 100644
--- a/drivers/gpu/nvgpu/vgpu/vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/vgpu.c
@@ -106,7 +106,7 @@ static void vgpu_handle_channel_event(struct gk20a *g,
106{ 106{
107 if (info->id >= g->fifo.num_channels || 107 if (info->id >= g->fifo.num_channels ||
108 info->event_id >= NVGPU_IOCTL_CHANNEL_EVENT_ID_MAX) { 108 info->event_id >= NVGPU_IOCTL_CHANNEL_EVENT_ID_MAX) {
109 gk20a_err(g->dev, "invalid channel event"); 109 nvgpu_err(g, "invalid channel event");
110 return; 110 return;
111 } 111 }
112 112
@@ -118,7 +118,7 @@ static void vgpu_handle_channel_event(struct gk20a *g,
118 struct channel_gk20a *ch = &g->fifo.channel[info->id]; 118 struct channel_gk20a *ch = &g->fifo.channel[info->id];
119 119
120 if (!gk20a_channel_get(ch)) { 120 if (!gk20a_channel_get(ch)) {
121 gk20a_err(g->dev, "invalid channel %d for event %d", 121 nvgpu_err(g, "invalid channel %d for event %d",
122 (int)info->id, (int)info->event_id); 122 (int)info->id, (int)info->event_id);
123 return; 123 return;
124 } 124 }
@@ -179,7 +179,7 @@ static int vgpu_intr_thread(void *dev_id)
179 vgpu_gr_handle_sm_esr_event(g, &msg->info.sm_esr); 179 vgpu_gr_handle_sm_esr_event(g, &msg->info.sm_esr);
180 break; 180 break;
181 default: 181 default:
182 gk20a_err(g->dev, "unknown event %u", msg->event); 182 nvgpu_err(g, "unknown event %u", msg->event);
183 break; 183 break;
184 } 184 }
185 185
@@ -349,8 +349,7 @@ static int vgpu_read_ptimer(struct gk20a *g, u64 *value)
349 if (!err) 349 if (!err)
350 *value = p->time; 350 *value = p->time;
351 else 351 else
352 gk20a_err(dev_from_gk20a(g), 352 nvgpu_err(g, "vgpu read ptimer failed, err=%d", err);
353 "vgpu read ptimer failed, err=%d", err);
354 353
355 return err; 354 return err;
356} 355}
@@ -393,7 +392,7 @@ static int vgpu_init_hal(struct gk20a *g)
393 err = vgpu_gp10b_init_hal(g); 392 err = vgpu_gp10b_init_hal(g);
394 break; 393 break;
395 default: 394 default:
396 gk20a_err(g->dev, "no support for %x", ver); 395 nvgpu_err(g, "no support for %x", ver);
397 err = -ENODEV; 396 err = -ENODEV;
398 break; 397 break;
399 } 398 }
@@ -423,25 +422,25 @@ int vgpu_pm_finalize_poweron(struct device *dev)
423 422
424 err = vgpu_init_mm_support(g); 423 err = vgpu_init_mm_support(g);
425 if (err) { 424 if (err) {
426 gk20a_err(dev, "failed to init gk20a mm"); 425 nvgpu_err(g, "failed to init gk20a mm");
427 goto done; 426 goto done;
428 } 427 }
429 428
430 err = vgpu_init_fifo_support(g); 429 err = vgpu_init_fifo_support(g);
431 if (err) { 430 if (err) {
432 gk20a_err(dev, "failed to init gk20a fifo"); 431 nvgpu_err(g, "failed to init gk20a fifo");
433 goto done; 432 goto done;
434 } 433 }
435 434
436 err = vgpu_init_gr_support(g); 435 err = vgpu_init_gr_support(g);
437 if (err) { 436 if (err) {
438 gk20a_err(dev, "failed to init gk20a gr"); 437 nvgpu_err(g, "failed to init gk20a gr");
439 goto done; 438 goto done;
440 } 439 }
441 440
442 err = g->ops.chip_init_gpu_characteristics(g); 441 err = g->ops.chip_init_gpu_characteristics(g);
443 if (err) { 442 if (err) {
444 gk20a_err(dev, "failed to init gk20a gpu characteristics"); 443 nvgpu_err(g, "failed to init gk20a gpu characteristics");
445 goto done; 444 goto done;
446 } 445 }
447 446
@@ -459,6 +458,7 @@ static int vgpu_qos_notify(struct notifier_block *nb,
459 struct gk20a_scale_profile *profile = 458 struct gk20a_scale_profile *profile =
460 container_of(nb, struct gk20a_scale_profile, 459 container_of(nb, struct gk20a_scale_profile,
461 qos_notify_block); 460 qos_notify_block);
461 struct gk20a *g = get_gk20a(profile->dev);
462 struct tegra_vgpu_cmd_msg msg = {}; 462 struct tegra_vgpu_cmd_msg msg = {};
463 struct tegra_vgpu_gpu_clk_rate_params *p = &msg.params.gpu_clk_rate; 463 struct tegra_vgpu_gpu_clk_rate_params *p = &msg.params.gpu_clk_rate;
464 u32 max_freq; 464 u32 max_freq;
@@ -474,7 +474,7 @@ static int vgpu_qos_notify(struct notifier_block *nb,
474 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 474 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
475 err = err ? err : msg.ret; 475 err = err ? err : msg.ret;
476 if (err) 476 if (err)
477 gk20a_err(profile->dev, "%s failed, err=%d", __func__, err); 477 nvgpu_err(g, "%s failed, err=%d", __func__, err);
478 478
479 return NOTIFY_OK; /* need notify call further */ 479 return NOTIFY_OK; /* need notify call further */
480} 480}
@@ -536,13 +536,13 @@ static int vgpu_get_constants(struct gk20a *g)
536 err = err ? err : msg.ret; 536 err = err ? err : msg.ret;
537 537
538 if (unlikely(err)) { 538 if (unlikely(err)) {
539 gk20a_err(g->dev, "%s failed, err=%d", __func__, err); 539 nvgpu_err(g, "%s failed, err=%d", __func__, err);
540 return err; 540 return err;
541 } 541 }
542 542
543 if (unlikely(p->gpc_count > TEGRA_VGPU_MAX_GPC_COUNT || 543 if (unlikely(p->gpc_count > TEGRA_VGPU_MAX_GPC_COUNT ||
544 p->max_tpc_per_gpc_count > TEGRA_VGPU_MAX_TPC_COUNT_PER_GPC)) { 544 p->max_tpc_per_gpc_count > TEGRA_VGPU_MAX_TPC_COUNT_PER_GPC)) {
545 gk20a_err(g->dev, "gpc_count %d max_tpc_per_gpc %d overflow", 545 nvgpu_err(g, "gpc_count %d max_tpc_per_gpc %d overflow",
546 (int)p->gpc_count, (int)p->max_tpc_per_gpc_count); 546 (int)p->gpc_count, (int)p->max_tpc_per_gpc_count);
547 return -EINVAL; 547 return -EINVAL;
548 } 548 }