summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gv11b/gr_gv11b.c
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2017-04-10 14:09:13 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-04-10 23:24:35 -0400
commit7fe4b6572ba80dda58d513969b69e22437901077 (patch)
tree9c1f81710e4e159648fb54beed0f1c98b5811716 /drivers/gpu/nvgpu/gv11b/gr_gv11b.c
parent1a426c981c4fa2816d969b27163ab2dbc2fa4e89 (diff)
gpu: nvgpu: gv11b: Use new error macros
gk20a_err() and gk20a_warn() require a struct device pointer, which is not portable across operating systems. The new nvgpu_err() and nvgpu_warn() macros take struct gk20a pointer. Convert code to use the more portable macros. JIRA NVGPU-16 Change-Id: I8c0d8944f625e3c5b16a9f5a2a59d95a680f4e55 Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: http://git-master/r/1459822 Reviewed-by: Automatic_Commit_Validation_User Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com> Reviewed-by: Alex Waterman <alexw@nvidia.com> GVS: Gerrit_Virtual_Submit
Diffstat (limited to 'drivers/gpu/nvgpu/gv11b/gr_gv11b.c')
-rw-r--r--drivers/gpu/nvgpu/gv11b/gr_gv11b.c63
1 files changed, 25 insertions, 38 deletions
diff --git a/drivers/gpu/nvgpu/gv11b/gr_gv11b.c b/drivers/gpu/nvgpu/gv11b/gr_gv11b.c
index f4e31ec0..5b0526b0 100644
--- a/drivers/gpu/nvgpu/gv11b/gr_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/gr_gv11b.c
@@ -22,6 +22,7 @@
22 22
23#include <nvgpu/timers.h> 23#include <nvgpu/timers.h>
24#include <nvgpu/dma.h> 24#include <nvgpu/dma.h>
25#include <nvgpu/log.h>
25 26
26#include "gk20a/gk20a.h" 27#include "gk20a/gk20a.h"
27#include "gk20a/gr_gk20a.h" 28#include "gk20a/gr_gk20a.h"
@@ -234,8 +235,7 @@ static int gr_gv11b_zbc_s_query_table(struct gk20a *g, struct gr_gk20a *gr,
234 u32 index = query_params->index_size; 235 u32 index = query_params->index_size;
235 236
236 if (index >= GK20A_ZBC_TABLE_SIZE) { 237 if (index >= GK20A_ZBC_TABLE_SIZE) {
237 gk20a_err(dev_from_gk20a(g), 238 nvgpu_err(g, "invalid zbc stencil table index");
238 "invalid zbc stencil table index\n");
239 return -EINVAL; 239 return -EINVAL;
240 } 240 }
241 query_params->depth = gr->zbc_s_tbl[index].stencil; 241 query_params->depth = gr->zbc_s_tbl[index].stencil;
@@ -332,8 +332,7 @@ static int gr_gv11b_load_stencil_default_tbl(struct gk20a *g,
332 if (!err) { 332 if (!err) {
333 gr->max_default_s_index = 3; 333 gr->max_default_s_index = 3;
334 } else { 334 } else {
335 gk20a_err(dev_from_gk20a(g), 335 nvgpu_err(g, "fail to load default zbc stencil table");
336 "fail to load default zbc stencil table\n");
337 return err; 336 return err;
338 } 337 }
339 338
@@ -628,8 +627,7 @@ static int gr_gv11b_init_ctx_state(struct gk20a *g)
628 op.mailbox.ret = &g->gr.t18x.ctx_vars.preempt_image_size; 627 op.mailbox.ret = &g->gr.t18x.ctx_vars.preempt_image_size;
629 err = gr_gk20a_submit_fecs_method_op(g, op, false); 628 err = gr_gk20a_submit_fecs_method_op(g, op, false);
630 if (err) { 629 if (err) {
631 gk20a_err(dev_from_gk20a(g), 630 nvgpu_err(g, "query preempt image size failed");
632 "query preempt image size failed");
633 return err; 631 return err;
634 } 632 }
635 } 633 }
@@ -717,8 +715,7 @@ static int gr_gv11b_alloc_gr_ctx(struct gk20a *g,
717 g->gr.t18x.ctx_vars.preempt_image_size, 715 g->gr.t18x.ctx_vars.preempt_image_size,
718 &(*gr_ctx)->t18x.preempt_ctxsw_buffer); 716 &(*gr_ctx)->t18x.preempt_ctxsw_buffer);
719 if (err) { 717 if (err) {
720 gk20a_err(dev_from_gk20a(vm->mm->g), 718 nvgpu_err(vm->mm->g, "cannot allocate preempt buffer");
721 "cannot allocate preempt buffer");
722 goto fail_free_gk20a_ctx; 719 goto fail_free_gk20a_ctx;
723 } 720 }
724 721
@@ -726,8 +723,7 @@ static int gr_gv11b_alloc_gr_ctx(struct gk20a *g,
726 spill_size, 723 spill_size,
727 &(*gr_ctx)->t18x.spill_ctxsw_buffer); 724 &(*gr_ctx)->t18x.spill_ctxsw_buffer);
728 if (err) { 725 if (err) {
729 gk20a_err(dev_from_gk20a(vm->mm->g), 726 nvgpu_err(vm->mm->g, "cannot allocate spill buffer");
730 "cannot allocate spill buffer");
731 goto fail_free_preempt; 727 goto fail_free_preempt;
732 } 728 }
733 729
@@ -735,8 +731,7 @@ static int gr_gv11b_alloc_gr_ctx(struct gk20a *g,
735 attrib_cb_size, 731 attrib_cb_size,
736 &(*gr_ctx)->t18x.betacb_ctxsw_buffer); 732 &(*gr_ctx)->t18x.betacb_ctxsw_buffer);
737 if (err) { 733 if (err) {
738 gk20a_err(dev_from_gk20a(vm->mm->g), 734 nvgpu_err(vm->mm->g, "cannot allocate beta buffer");
739 "cannot allocate beta buffer");
740 goto fail_free_spill; 735 goto fail_free_spill;
741 } 736 }
742 737
@@ -744,8 +739,7 @@ static int gr_gv11b_alloc_gr_ctx(struct gk20a *g,
744 pagepool_size, 739 pagepool_size,
745 &(*gr_ctx)->t18x.pagepool_ctxsw_buffer); 740 &(*gr_ctx)->t18x.pagepool_ctxsw_buffer);
746 if (err) { 741 if (err) {
747 gk20a_err(dev_from_gk20a(vm->mm->g), 742 nvgpu_err(vm->mm->g, "cannot allocate page pool");
748 "cannot allocate page pool");
749 goto fail_free_betacb; 743 goto fail_free_betacb;
750 } 744 }
751 745
@@ -785,29 +779,28 @@ static void dump_ctx_switch_stats(struct gk20a *g, struct vm_gk20a *vm,
785 WARN_ON("Cannot map context"); 779 WARN_ON("Cannot map context");
786 return; 780 return;
787 } 781 }
788 gk20a_err(dev_from_gk20a(g), "ctxsw_prog_main_image_magic_value_o : %x (expect %x)\n", 782 nvgpu_err(g, "ctxsw_prog_main_image_magic_value_o : %x (expect %x)",
789 nvgpu_mem_rd(g, mem, 783 nvgpu_mem_rd(g, mem,
790 ctxsw_prog_main_image_magic_value_o()), 784 ctxsw_prog_main_image_magic_value_o()),
791 ctxsw_prog_main_image_magic_value_v_value_v()); 785 ctxsw_prog_main_image_magic_value_v_value_v());
792 786
793 787
794 gk20a_err(dev_from_gk20a(g), "NUM_SAVE_OPERATIONS : %d\n", 788 nvgpu_err(g, "NUM_SAVE_OPERATIONS : %d",
795 nvgpu_mem_rd(g, mem, 789 nvgpu_mem_rd(g, mem,
796 ctxsw_prog_main_image_num_save_ops_o())); 790 ctxsw_prog_main_image_num_save_ops_o()));
797 gk20a_err(dev_from_gk20a(g), "WFI_SAVE_OPERATIONS : %d\n", 791 nvgpu_err(g, "WFI_SAVE_OPERATIONS : %d",
798 nvgpu_mem_rd(g, mem, 792 nvgpu_mem_rd(g, mem,
799 ctxsw_prog_main_image_num_wfi_save_ops_o())); 793 ctxsw_prog_main_image_num_wfi_save_ops_o()));
800 gk20a_err(dev_from_gk20a(g), "CTA_SAVE_OPERATIONS : %d\n", 794 nvgpu_err(g, "CTA_SAVE_OPERATIONS : %d",
801 nvgpu_mem_rd(g, mem, 795 nvgpu_mem_rd(g, mem,
802 ctxsw_prog_main_image_num_cta_save_ops_o())); 796 ctxsw_prog_main_image_num_cta_save_ops_o()));
803 gk20a_err(dev_from_gk20a(g), "GFXP_SAVE_OPERATIONS : %d\n", 797 nvgpu_err(g, "GFXP_SAVE_OPERATIONS : %d",
804 nvgpu_mem_rd(g, mem, 798 nvgpu_mem_rd(g, mem,
805 ctxsw_prog_main_image_num_gfxp_save_ops_o())); 799 ctxsw_prog_main_image_num_gfxp_save_ops_o()));
806 gk20a_err(dev_from_gk20a(g), "CILP_SAVE_OPERATIONS : %d\n", 800 nvgpu_err(g, "CILP_SAVE_OPERATIONS : %d",
807 nvgpu_mem_rd(g, mem, 801 nvgpu_mem_rd(g, mem,
808 ctxsw_prog_main_image_num_cilp_save_ops_o())); 802 ctxsw_prog_main_image_num_cilp_save_ops_o()));
809 gk20a_err(dev_from_gk20a(g), 803 nvgpu_err(g, "image gfx preemption option (GFXP is 1) %x",
810 "image gfx preemption option (GFXP is 1) %x\n",
811 nvgpu_mem_rd(g, mem, 804 nvgpu_mem_rd(g, mem,
812 ctxsw_prog_main_image_graphics_preemption_options_o())); 805 ctxsw_prog_main_image_graphics_preemption_options_o()));
813 nvgpu_mem_end(g, mem); 806 nvgpu_mem_end(g, mem);
@@ -868,8 +861,7 @@ static void gr_gv11b_update_ctxsw_preemption_mode(struct gk20a *g,
868 861
869 err = gr_gk20a_ctx_patch_write_begin(g, ch_ctx); 862 err = gr_gk20a_ctx_patch_write_begin(g, ch_ctx);
870 if (err) { 863 if (err) {
871 gk20a_err(dev_from_gk20a(g), 864 nvgpu_err(g, "can't map patch context");
872 "can't map patch context");
873 goto out; 865 goto out;
874 } 866 }
875 867
@@ -1118,7 +1110,7 @@ static int gr_gv11b_wait_empty(struct gk20a *g, unsigned long duration_ms,
1118 1110
1119 } while (!nvgpu_timeout_expired(&timeout)); 1111 } while (!nvgpu_timeout_expired(&timeout));
1120 1112
1121 gk20a_err(dev_from_gk20a(g), 1113 nvgpu_err(g,
1122 "timeout, ctxsw busy : %d, gr busy : %d, %08x, %08x, %08x, %08x", 1114 "timeout, ctxsw busy : %d, gr busy : %d, %08x, %08x, %08x, %08x",
1123 ctxsw_active, gr_busy, activity0, activity1, activity2, activity4); 1115 ctxsw_active, gr_busy, activity0, activity1, activity2, activity4);
1124 1116
@@ -1272,15 +1264,13 @@ static int gr_gv11b_disable_channel_or_tsg(struct gk20a *g, struct channel_gk20a
1272 1264
1273 ret = gk20a_disable_channel_tsg(g, fault_ch); 1265 ret = gk20a_disable_channel_tsg(g, fault_ch);
1274 if (ret) { 1266 if (ret) {
1275 gk20a_err(dev_from_gk20a(g), 1267 nvgpu_err(g, "CILP: failed to disable channel/TSG!");
1276 "CILP: failed to disable channel/TSG!\n");
1277 return ret; 1268 return ret;
1278 } 1269 }
1279 1270
1280 ret = g->ops.fifo.update_runlist(g, fault_ch->runlist_id, ~0, true, false); 1271 ret = g->ops.fifo.update_runlist(g, fault_ch->runlist_id, ~0, true, false);
1281 if (ret) { 1272 if (ret) {
1282 gk20a_err(dev_from_gk20a(g), 1273 nvgpu_err(g, "CILP: failed to restart runlist 0!");
1283 "CILP: failed to restart runlist 0!");
1284 return ret; 1274 return ret;
1285 } 1275 }
1286 1276
@@ -1319,7 +1309,7 @@ static int gr_gv11b_set_cilp_preempt_pending(struct gk20a *g, struct channel_gk2
1319 "CILP: looking up ctx id"); 1309 "CILP: looking up ctx id");
1320 ret = gr_gk20a_get_ctx_id(g, fault_ch, &gr_ctx->t18x.ctx_id); 1310 ret = gr_gk20a_get_ctx_id(g, fault_ch, &gr_ctx->t18x.ctx_id);
1321 if (ret) { 1311 if (ret) {
1322 gk20a_err(dev_from_gk20a(g), "CILP: error looking up ctx id!\n"); 1312 nvgpu_err(g, "CILP: error looking up ctx id!");
1323 return ret; 1313 return ret;
1324 } 1314 }
1325 gr_ctx->t18x.ctx_id_valid = true; 1315 gr_ctx->t18x.ctx_id_valid = true;
@@ -1343,8 +1333,7 @@ static int gr_gv11b_set_cilp_preempt_pending(struct gk20a *g, struct channel_gk2
1343 .cond.fail = GR_IS_UCODE_OP_SKIP}); 1333 .cond.fail = GR_IS_UCODE_OP_SKIP});
1344 1334
1345 if (ret) { 1335 if (ret) {
1346 gk20a_err(dev_from_gk20a(g), 1336 nvgpu_err(g, "CILP: failed to enable ctxsw interrupt!");
1347 "CILP: failed to enable ctxsw interrupt!");
1348 return ret; 1337 return ret;
1349 } 1338 }
1350 1339
@@ -1357,8 +1346,7 @@ static int gr_gv11b_set_cilp_preempt_pending(struct gk20a *g, struct channel_gk2
1357 1346
1358 ret = gr_gv11b_disable_channel_or_tsg(g, fault_ch); 1347 ret = gr_gv11b_disable_channel_or_tsg(g, fault_ch);
1359 if (ret) { 1348 if (ret) {
1360 gk20a_err(dev_from_gk20a(g), 1349 nvgpu_err(g, "CILP: failed to disable channel!!");
1361 "CILP: failed to disable channel!!");
1362 return ret; 1350 return ret;
1363 } 1351 }
1364 1352
@@ -1472,7 +1460,7 @@ static int gr_gv11b_pre_process_sm_exception(struct gk20a *g,
1472 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: Setting CILP preempt pending\n"); 1460 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: Setting CILP preempt pending\n");
1473 ret = gr_gv11b_set_cilp_preempt_pending(g, fault_ch); 1461 ret = gr_gv11b_set_cilp_preempt_pending(g, fault_ch);
1474 if (ret) { 1462 if (ret) {
1475 gk20a_err(dev_from_gk20a(g), "CILP: error while setting CILP preempt pending!\n"); 1463 nvgpu_err(g, "CILP: error while setting CILP preempt pending!");
1476 return ret; 1464 return ret;
1477 } 1465 }
1478 1466
@@ -1562,7 +1550,7 @@ static int gr_gv11b_handle_fecs_error(struct gk20a *g,
1562 /* set preempt_pending to false */ 1550 /* set preempt_pending to false */
1563 ret = gr_gv11b_clear_cilp_preempt_pending(g, ch); 1551 ret = gr_gv11b_clear_cilp_preempt_pending(g, ch);
1564 if (ret) { 1552 if (ret) {
1565 gk20a_err(dev_from_gk20a(g), "CILP: error while unsetting CILP preempt pending!\n"); 1553 nvgpu_err(g, "CILP: error while unsetting CILP preempt pending!");
1566 gk20a_channel_put(ch); 1554 gk20a_channel_put(ch);
1567 goto clean_up; 1555 goto clean_up;
1568 } 1556 }
@@ -2002,8 +1990,7 @@ void gr_gv11b_init_elcg_mode(struct gk20a *g, u32 mode, u32 engine)
2002 therm_gate_ctrl_eng_clk_auto_f()); 1990 therm_gate_ctrl_eng_clk_auto_f());
2003 break; 1991 break;
2004 default: 1992 default:
2005 gk20a_err(dev_from_gk20a(g), 1993 nvgpu_err(g, "invalid elcg mode %d", mode);
2006 "invalid elcg mode %d", mode);
2007 } 1994 }
2008 1995
2009 gk20a_writel(g, therm_gate_ctrl_r(engine), gate_ctrl); 1996 gk20a_writel(g, therm_gate_ctrl_r(engine), gate_ctrl);