summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2017-04-06 16:10:30 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-04-10 15:24:27 -0400
commit5405070ecd27ce462babc1dff231fec5cd8bd6b7 (patch)
tree903461959633aec359b5bb3f4f660c5dcb6bdbcf /drivers/gpu/nvgpu/vgpu/gr_vgpu.c
parent3a1104c3699b05201abf48ed9283bb8ccbe42732 (diff)
gpu: nvgpu: vgpu: Use new error macros
gk20a_err() and gk20a_warn() require a struct device pointer, which is not portable across operating systems. The new nvgpu_err() and nvgpu_warn() macros take struct gk20a pointer. Convert code to use the more portable macros. JIRA NVGPU-16 Change-Id: I071e8c50959bfa81730ca964d912bc69f9c7e6ad Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: http://git-master/r/1457355 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/vgpu/gr_vgpu.c')
-rw-r--r--drivers/gpu/nvgpu/vgpu/gr_vgpu.c41
1 files changed, 16 insertions, 25 deletions
diff --git a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
index 612e50e7..102adae3 100644
--- a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
@@ -294,7 +294,7 @@ int vgpu_gr_alloc_gr_ctx(struct gk20a *g,
294 err = err ? err : msg.ret; 294 err = err ? err : msg.ret;
295 295
296 if (unlikely(err)) { 296 if (unlikely(err)) {
297 gk20a_err(dev_from_gk20a(g), "fail to alloc gr_ctx"); 297 nvgpu_err(g, "fail to alloc gr_ctx");
298 gk20a_vm_free_va(vm, gr_ctx->mem.gpu_va, 298 gk20a_vm_free_va(vm, gr_ctx->mem.gpu_va,
299 gr_ctx->mem.size, gmmu_page_size_kernel); 299 gr_ctx->mem.size, gmmu_page_size_kernel);
300 nvgpu_kfree(g, gr_ctx); 300 nvgpu_kfree(g, gr_ctx);
@@ -485,15 +485,13 @@ static int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c,
485 485
486 /* an address space needs to have been bound at this point.*/ 486 /* an address space needs to have been bound at this point.*/
487 if (!gk20a_channel_as_bound(c)) { 487 if (!gk20a_channel_as_bound(c)) {
488 gk20a_err(dev_from_gk20a(g), 488 nvgpu_err(g, "not bound to address space at time"
489 "not bound to address space at time"
490 " of grctx allocation"); 489 " of grctx allocation");
491 return -EINVAL; 490 return -EINVAL;
492 } 491 }
493 492
494 if (!g->ops.gr.is_valid_class(g, args->class_num)) { 493 if (!g->ops.gr.is_valid_class(g, args->class_num)) {
495 gk20a_err(dev_from_gk20a(g), 494 nvgpu_err(g, "invalid obj class 0x%x", args->class_num);
496 "invalid obj class 0x%x", args->class_num);
497 err = -EINVAL; 495 err = -EINVAL;
498 goto out; 496 goto out;
499 } 497 }
@@ -512,15 +510,14 @@ static int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c,
512 if (!err) 510 if (!err)
513 err = vgpu_gr_ch_bind_gr_ctx(c); 511 err = vgpu_gr_ch_bind_gr_ctx(c);
514 if (err) { 512 if (err) {
515 gk20a_err(dev_from_gk20a(g), 513 nvgpu_err(g, "fail to allocate gr ctx buffer");
516 "fail to allocate gr ctx buffer");
517 goto out; 514 goto out;
518 } 515 }
519 } else { 516 } else {
520 /*TBD: needs to be more subtle about which is 517 /*TBD: needs to be more subtle about which is
521 * being allocated as some are allowed to be 518 * being allocated as some are allowed to be
522 * allocated along same channel */ 519 * allocated along same channel */
523 gk20a_err(dev_from_gk20a(g), 520 nvgpu_err(g,
524 "too many classes alloc'd on same channel"); 521 "too many classes alloc'd on same channel");
525 err = -EINVAL; 522 err = -EINVAL;
526 goto out; 523 goto out;
@@ -536,7 +533,7 @@ static int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c,
536 if (!err) 533 if (!err)
537 err = vgpu_gr_tsg_bind_gr_ctx(tsg); 534 err = vgpu_gr_tsg_bind_gr_ctx(tsg);
538 if (err) { 535 if (err) {
539 gk20a_err(dev_from_gk20a(g), 536 nvgpu_err(g,
540 "fail to allocate TSG gr ctx buffer, err=%d", err); 537 "fail to allocate TSG gr ctx buffer, err=%d", err);
541 gk20a_vm_put(tsg->vm); 538 gk20a_vm_put(tsg->vm);
542 tsg->vm = NULL; 539 tsg->vm = NULL;
@@ -547,8 +544,7 @@ static int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c,
547 ch_ctx->gr_ctx = tsg->tsg_gr_ctx; 544 ch_ctx->gr_ctx = tsg->tsg_gr_ctx;
548 err = vgpu_gr_ch_bind_gr_ctx(c); 545 err = vgpu_gr_ch_bind_gr_ctx(c);
549 if (err) { 546 if (err) {
550 gk20a_err(dev_from_gk20a(g), 547 nvgpu_err(g, "fail to bind gr ctx buffer");
551 "fail to bind gr ctx buffer");
552 goto out; 548 goto out;
553 } 549 }
554 } 550 }
@@ -556,8 +552,7 @@ static int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c,
556 /* commit gr ctx buffer */ 552 /* commit gr ctx buffer */
557 err = vgpu_gr_commit_inst(c, ch_ctx->gr_ctx->mem.gpu_va); 553 err = vgpu_gr_commit_inst(c, ch_ctx->gr_ctx->mem.gpu_va);
558 if (err) { 554 if (err) {
559 gk20a_err(dev_from_gk20a(g), 555 nvgpu_err(g, "fail to commit gr ctx buffer");
560 "fail to commit gr ctx buffer");
561 goto out; 556 goto out;
562 } 557 }
563 558
@@ -565,8 +560,7 @@ static int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c,
565 if (ch_ctx->patch_ctx.mem.pages == NULL) { 560 if (ch_ctx->patch_ctx.mem.pages == NULL) {
566 err = vgpu_gr_alloc_channel_patch_ctx(g, c); 561 err = vgpu_gr_alloc_channel_patch_ctx(g, c);
567 if (err) { 562 if (err) {
568 gk20a_err(dev_from_gk20a(g), 563 nvgpu_err(g, "fail to allocate patch buffer");
569 "fail to allocate patch buffer");
570 goto out; 564 goto out;
571 } 565 }
572 } 566 }
@@ -575,8 +569,7 @@ static int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c,
575 if (!ch_ctx->global_ctx_buffer_mapped) { 569 if (!ch_ctx->global_ctx_buffer_mapped) {
576 err = vgpu_gr_map_global_ctx_buffers(g, c); 570 err = vgpu_gr_map_global_ctx_buffers(g, c);
577 if (err) { 571 if (err) {
578 gk20a_err(dev_from_gk20a(g), 572 nvgpu_err(g, "fail to map global ctx buffer");
579 "fail to map global ctx buffer");
580 goto out; 573 goto out;
581 } 574 }
582 gr_gk20a_elpg_protected_call(g, 575 gr_gk20a_elpg_protected_call(g,
@@ -588,8 +581,7 @@ static int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c,
588 err = gr_gk20a_elpg_protected_call(g, 581 err = gr_gk20a_elpg_protected_call(g,
589 vgpu_gr_load_golden_ctx_image(g, c)); 582 vgpu_gr_load_golden_ctx_image(g, c));
590 if (err) { 583 if (err) {
591 gk20a_err(dev_from_gk20a(g), 584 nvgpu_err(g, "fail to load golden ctx image");
592 "fail to load golden ctx image");
593 goto out; 585 goto out;
594 } 586 }
595 c->first_init = true; 587 c->first_init = true;
@@ -602,7 +594,7 @@ out:
602 can be reused so no need to release them. 594 can be reused so no need to release them.
603 2. golden image load is a one time thing so if 595 2. golden image load is a one time thing so if
604 they pass, no need to undo. */ 596 they pass, no need to undo. */
605 gk20a_err(dev_from_gk20a(g), "fail"); 597 nvgpu_err(g, "fail");
606 return err; 598 return err;
607} 599}
608 600
@@ -651,7 +643,7 @@ static int vgpu_gr_init_gr_config(struct gk20a *g, struct gr_gk20a *gr)
651 g->ops.gr.init_fs_state(g); 643 g->ops.gr.init_fs_state(g);
652 return 0; 644 return 0;
653cleanup: 645cleanup:
654 gk20a_err(dev_from_gk20a(g), "%s: out of memory", __func__); 646 nvgpu_err(g, "out of memory");
655 647
656 nvgpu_kfree(g, gr->gpc_tpc_count); 648 nvgpu_kfree(g, gr->gpc_tpc_count);
657 gr->gpc_tpc_count = NULL; 649 gr->gpc_tpc_count = NULL;
@@ -905,7 +897,7 @@ static int vgpu_gr_init_gr_setup_sw(struct gk20a *g)
905 return 0; 897 return 0;
906 898
907clean_up: 899clean_up:
908 gk20a_err(dev_from_gk20a(g), "fail"); 900 nvgpu_err(g, "fail");
909 vgpu_remove_gr_support(gr); 901 vgpu_remove_gr_support(gr);
910 return err; 902 return err;
911} 903}
@@ -928,8 +920,7 @@ int vgpu_gr_isr(struct gk20a *g, struct tegra_vgpu_gr_intr_info *info)
928 920
929 if (info->type != TEGRA_VGPU_GR_INTR_NOTIFY && 921 if (info->type != TEGRA_VGPU_GR_INTR_NOTIFY &&
930 info->type != TEGRA_VGPU_GR_INTR_SEMAPHORE) 922 info->type != TEGRA_VGPU_GR_INTR_SEMAPHORE)
931 gk20a_err(dev_from_gk20a(g), "gr intr (%d) on ch %u", 923 nvgpu_err(g, "gr intr (%d) on ch %u", info->type, info->chid);
932 info->type, info->chid);
933 924
934 switch (info->type) { 925 switch (info->type) {
935 case TEGRA_VGPU_GR_INTR_NOTIFY: 926 case TEGRA_VGPU_GR_INTR_NOTIFY:
@@ -1186,7 +1177,7 @@ void vgpu_gr_handle_sm_esr_event(struct gk20a *g,
1186 struct nvgpu_dbg_gpu_sm_error_state_record *sm_error_states; 1177 struct nvgpu_dbg_gpu_sm_error_state_record *sm_error_states;
1187 1178
1188 if (info->sm_id >= g->gr.no_of_sm) { 1179 if (info->sm_id >= g->gr.no_of_sm) {
1189 gk20a_err(g->dev, "invalid smd_id %d / %d", 1180 nvgpu_err(g, "invalid smd_id %d / %d",
1190 info->sm_id, g->gr.no_of_sm); 1181 info->sm_id, g->gr.no_of_sm);
1191 return; 1182 return;
1192 } 1183 }