summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2017-04-06 13:55:48 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-04-10 22:04:20 -0400
commit57d624f900896a257e2e918e93e99a14f734aea5 (patch)
tree7c2f76516baad4d1d9acec583817fe4beee63bb2 /drivers/gpu/nvgpu/gp10b/gr_gp10b.c
parent3ba374a5d94f8c2067731155afaf79f03e6c390c (diff)
gpu: nvgpu: gp10b: Use new error macros
gk20a_err() and gk20a_warn() require a struct device pointer, which is not portable across operating systems. The new nvgpu_err() and nvgpu_warn() macros take struct gk20a pointer. Convert code to use the more portable macros. JIRA NVGPU-16 Change-Id: I8dc0ddf3b6ea38af6300c27558b60786c163da6d Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: http://git-master/r/1457344 Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com> Reviewed-by: Alex Waterman <alexw@nvidia.com> GVS: Gerrit_Virtual_Submit
Diffstat (limited to 'drivers/gpu/nvgpu/gp10b/gr_gp10b.c')
-rw-r--r--drivers/gpu/nvgpu/gp10b/gr_gp10b.c79
1 files changed, 33 insertions, 46 deletions
diff --git a/drivers/gpu/nvgpu/gp10b/gr_gp10b.c b/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
index c1cb1376..708d25d0 100644
--- a/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
@@ -818,8 +818,7 @@ static int gr_gp10b_init_ctx_state(struct gk20a *g)
818 op.mailbox.ret = &g->gr.t18x.ctx_vars.preempt_image_size; 818 op.mailbox.ret = &g->gr.t18x.ctx_vars.preempt_image_size;
819 err = gr_gk20a_submit_fecs_method_op(g, op, false); 819 err = gr_gk20a_submit_fecs_method_op(g, op, false);
820 if (err) { 820 if (err) {
821 gk20a_err(dev_from_gk20a(g), 821 nvgpu_err(g, "query preempt image size failed");
822 "query preempt image size failed");
823 return err; 822 return err;
824 } 823 }
825 } 824 }
@@ -921,8 +920,7 @@ static int gr_gp10b_set_ctxsw_preemption_mode(struct gk20a *g,
921 g->gr.t18x.ctx_vars.preempt_image_size, 920 g->gr.t18x.ctx_vars.preempt_image_size,
922 &gr_ctx->t18x.preempt_ctxsw_buffer); 921 &gr_ctx->t18x.preempt_ctxsw_buffer);
923 if (err) { 922 if (err) {
924 gk20a_err(dev_from_gk20a(g), 923 nvgpu_err(g, "cannot allocate preempt buffer");
925 "cannot allocate preempt buffer");
926 goto fail; 924 goto fail;
927 } 925 }
928 926
@@ -930,8 +928,7 @@ static int gr_gp10b_set_ctxsw_preemption_mode(struct gk20a *g,
930 spill_size, 928 spill_size,
931 &gr_ctx->t18x.spill_ctxsw_buffer); 929 &gr_ctx->t18x.spill_ctxsw_buffer);
932 if (err) { 930 if (err) {
933 gk20a_err(dev_from_gk20a(g), 931 nvgpu_err(g, "cannot allocate spill buffer");
934 "cannot allocate spill buffer");
935 goto fail_free_preempt; 932 goto fail_free_preempt;
936 } 933 }
937 934
@@ -939,8 +936,7 @@ static int gr_gp10b_set_ctxsw_preemption_mode(struct gk20a *g,
939 attrib_cb_size, 936 attrib_cb_size,
940 &gr_ctx->t18x.betacb_ctxsw_buffer); 937 &gr_ctx->t18x.betacb_ctxsw_buffer);
941 if (err) { 938 if (err) {
942 gk20a_err(dev_from_gk20a(g), 939 nvgpu_err(g, "cannot allocate beta buffer");
943 "cannot allocate beta buffer");
944 goto fail_free_spill; 940 goto fail_free_spill;
945 } 941 }
946 942
@@ -948,8 +944,7 @@ static int gr_gp10b_set_ctxsw_preemption_mode(struct gk20a *g,
948 pagepool_size, 944 pagepool_size,
949 &gr_ctx->t18x.pagepool_ctxsw_buffer); 945 &gr_ctx->t18x.pagepool_ctxsw_buffer);
950 if (err) { 946 if (err) {
951 gk20a_err(dev_from_gk20a(g), 947 nvgpu_err(g, "cannot allocate page pool");
952 "cannot allocate page pool");
953 goto fail_free_betacb; 948 goto fail_free_betacb;
954 } 949 }
955 950
@@ -1016,8 +1011,7 @@ static int gr_gp10b_alloc_gr_ctx(struct gk20a *g,
1016 err = g->ops.gr.set_ctxsw_preemption_mode(g, *gr_ctx, vm, 1011 err = g->ops.gr.set_ctxsw_preemption_mode(g, *gr_ctx, vm,
1017 class, graphics_preempt_mode, compute_preempt_mode); 1012 class, graphics_preempt_mode, compute_preempt_mode);
1018 if (err) { 1013 if (err) {
1019 gk20a_err(dev_from_gk20a(g), 1014 nvgpu_err(g, "set_ctxsw_preemption_mode failed");
1020 "set_ctxsw_preemption_mode failed");
1021 goto fail_free_gk20a_ctx; 1015 goto fail_free_gk20a_ctx;
1022 } 1016 }
1023 } else 1017 } else
@@ -1044,44 +1038,44 @@ static void dump_ctx_switch_stats(struct gk20a *g, struct vm_gk20a *vm,
1044 WARN_ON("Cannot map context"); 1038 WARN_ON("Cannot map context");
1045 return; 1039 return;
1046 } 1040 }
1047 gk20a_err(dev_from_gk20a(g), "ctxsw_prog_main_image_magic_value_o : %x (expect %x)\n", 1041 nvgpu_err(g, "ctxsw_prog_main_image_magic_value_o : %x (expect %x)",
1048 nvgpu_mem_rd(g, mem, 1042 nvgpu_mem_rd(g, mem,
1049 ctxsw_prog_main_image_magic_value_o()), 1043 ctxsw_prog_main_image_magic_value_o()),
1050 ctxsw_prog_main_image_magic_value_v_value_v()); 1044 ctxsw_prog_main_image_magic_value_v_value_v());
1051 1045
1052 gk20a_err(dev_from_gk20a(g), "ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi : %x\n", 1046 nvgpu_err(g, "ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi : %x",
1053 nvgpu_mem_rd(g, mem, 1047 nvgpu_mem_rd(g, mem,
1054 ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi_o())); 1048 ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi_o()));
1055 1049
1056 gk20a_err(dev_from_gk20a(g), "ctxsw_prog_main_image_context_timestamp_buffer_ptr : %x\n", 1050 nvgpu_err(g, "ctxsw_prog_main_image_context_timestamp_buffer_ptr : %x",
1057 nvgpu_mem_rd(g, mem, 1051 nvgpu_mem_rd(g, mem,
1058 ctxsw_prog_main_image_context_timestamp_buffer_ptr_o())); 1052 ctxsw_prog_main_image_context_timestamp_buffer_ptr_o()));
1059 1053
1060 gk20a_err(dev_from_gk20a(g), "ctxsw_prog_main_image_context_timestamp_buffer_control : %x\n", 1054 nvgpu_err(g, "ctxsw_prog_main_image_context_timestamp_buffer_control : %x",
1061 nvgpu_mem_rd(g, mem, 1055 nvgpu_mem_rd(g, mem,
1062 ctxsw_prog_main_image_context_timestamp_buffer_control_o())); 1056 ctxsw_prog_main_image_context_timestamp_buffer_control_o()));
1063 1057
1064 gk20a_err(dev_from_gk20a(g), "NUM_SAVE_OPERATIONS : %d\n", 1058 nvgpu_err(g, "NUM_SAVE_OPERATIONS : %d",
1065 nvgpu_mem_rd(g, mem, 1059 nvgpu_mem_rd(g, mem,
1066 ctxsw_prog_main_image_num_save_ops_o())); 1060 ctxsw_prog_main_image_num_save_ops_o()));
1067 gk20a_err(dev_from_gk20a(g), "WFI_SAVE_OPERATIONS : %d\n", 1061 nvgpu_err(g, "WFI_SAVE_OPERATIONS : %d",
1068 nvgpu_mem_rd(g, mem, 1062 nvgpu_mem_rd(g, mem,
1069 ctxsw_prog_main_image_num_wfi_save_ops_o())); 1063 ctxsw_prog_main_image_num_wfi_save_ops_o()));
1070 gk20a_err(dev_from_gk20a(g), "CTA_SAVE_OPERATIONS : %d\n", 1064 nvgpu_err(g, "CTA_SAVE_OPERATIONS : %d",
1071 nvgpu_mem_rd(g, mem, 1065 nvgpu_mem_rd(g, mem,
1072 ctxsw_prog_main_image_num_cta_save_ops_o())); 1066 ctxsw_prog_main_image_num_cta_save_ops_o()));
1073 gk20a_err(dev_from_gk20a(g), "GFXP_SAVE_OPERATIONS : %d\n", 1067 nvgpu_err(g, "GFXP_SAVE_OPERATIONS : %d",
1074 nvgpu_mem_rd(g, mem, 1068 nvgpu_mem_rd(g, mem,
1075 ctxsw_prog_main_image_num_gfxp_save_ops_o())); 1069 ctxsw_prog_main_image_num_gfxp_save_ops_o()));
1076 gk20a_err(dev_from_gk20a(g), "CILP_SAVE_OPERATIONS : %d\n", 1070 nvgpu_err(g, "CILP_SAVE_OPERATIONS : %d",
1077 nvgpu_mem_rd(g, mem, 1071 nvgpu_mem_rd(g, mem,
1078 ctxsw_prog_main_image_num_cilp_save_ops_o())); 1072 ctxsw_prog_main_image_num_cilp_save_ops_o()));
1079 gk20a_err(dev_from_gk20a(g), 1073 nvgpu_err(g,
1080 "image gfx preemption option (GFXP is 1) %x\n", 1074 "image gfx preemption option (GFXP is 1) %x",
1081 nvgpu_mem_rd(g, mem, 1075 nvgpu_mem_rd(g, mem,
1082 ctxsw_prog_main_image_graphics_preemption_options_o())); 1076 ctxsw_prog_main_image_graphics_preemption_options_o()));
1083 gk20a_err(dev_from_gk20a(g), 1077 nvgpu_err(g,
1084 "image compute preemption option (CTA is 1) %x\n", 1078 "image compute preemption option (CTA is 1) %x",
1085 nvgpu_mem_rd(g, mem, 1079 nvgpu_mem_rd(g, mem,
1086 ctxsw_prog_main_image_compute_preemption_options_o())); 1080 ctxsw_prog_main_image_compute_preemption_options_o()));
1087 nvgpu_mem_end(g, mem); 1081 nvgpu_mem_end(g, mem);
@@ -1154,8 +1148,7 @@ static void gr_gp10b_update_ctxsw_preemption_mode(struct gk20a *g,
1154 1148
1155 err = gr_gk20a_ctx_patch_write_begin(g, ch_ctx); 1149 err = gr_gk20a_ctx_patch_write_begin(g, ch_ctx);
1156 if (err) { 1150 if (err) {
1157 gk20a_err(dev_from_gk20a(g), 1151 nvgpu_err(g, "can't map patch context");
1158 "can't map patch context");
1159 goto out; 1152 goto out;
1160 } 1153 }
1161 1154
@@ -1403,7 +1396,7 @@ static int gr_gp10b_wait_empty(struct gk20a *g, unsigned long duration_ms,
1403 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); 1396 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
1404 } while (!nvgpu_timeout_expired(&timeout)); 1397 } while (!nvgpu_timeout_expired(&timeout));
1405 1398
1406 gk20a_err(dev_from_gk20a(g), 1399 nvgpu_err(g,
1407 "timeout, ctxsw busy : %d, gr busy : %d, %08x, %08x, %08x, %08x", 1400 "timeout, ctxsw busy : %d, gr busy : %d, %08x, %08x, %08x, %08x",
1408 ctxsw_active, gr_busy, activity0, activity1, activity2, activity4); 1401 ctxsw_active, gr_busy, activity0, activity1, activity2, activity4);
1409 1402
@@ -1617,14 +1610,14 @@ static int gr_gp10b_disable_channel_or_tsg(struct gk20a *g, struct channel_gk20a
1617 1610
1618 ret = gk20a_disable_channel_tsg(g, fault_ch); 1611 ret = gk20a_disable_channel_tsg(g, fault_ch);
1619 if (ret) { 1612 if (ret) {
1620 gk20a_err(dev_from_gk20a(g), 1613 nvgpu_err(g,
1621 "CILP: failed to disable channel/TSG!\n"); 1614 "CILP: failed to disable channel/TSG!\n");
1622 return ret; 1615 return ret;
1623 } 1616 }
1624 1617
1625 ret = g->ops.fifo.update_runlist(g, fault_ch->runlist_id, ~0, true, false); 1618 ret = g->ops.fifo.update_runlist(g, fault_ch->runlist_id, ~0, true, false);
1626 if (ret) { 1619 if (ret) {
1627 gk20a_err(dev_from_gk20a(g), 1620 nvgpu_err(g,
1628 "CILP: failed to restart runlist 0!"); 1621 "CILP: failed to restart runlist 0!");
1629 return ret; 1622 return ret;
1630 } 1623 }
@@ -1664,7 +1657,7 @@ static int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g, struct channel_gk2
1664 "CILP: looking up ctx id"); 1657 "CILP: looking up ctx id");
1665 ret = gr_gk20a_get_ctx_id(g, fault_ch, &gr_ctx->t18x.ctx_id); 1658 ret = gr_gk20a_get_ctx_id(g, fault_ch, &gr_ctx->t18x.ctx_id);
1666 if (ret) { 1659 if (ret) {
1667 gk20a_err(dev_from_gk20a(g), "CILP: error looking up ctx id!\n"); 1660 nvgpu_err(g, "CILP: error looking up ctx id!");
1668 return ret; 1661 return ret;
1669 } 1662 }
1670 gr_ctx->t18x.ctx_id_valid = true; 1663 gr_ctx->t18x.ctx_id_valid = true;
@@ -1688,8 +1681,7 @@ static int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g, struct channel_gk2
1688 .cond.fail = GR_IS_UCODE_OP_SKIP}); 1681 .cond.fail = GR_IS_UCODE_OP_SKIP});
1689 1682
1690 if (ret) { 1683 if (ret) {
1691 gk20a_err(dev_from_gk20a(g), 1684 nvgpu_err(g, "CILP: failed to enable ctxsw interrupt!");
1692 "CILP: failed to enable ctxsw interrupt!");
1693 return ret; 1685 return ret;
1694 } 1686 }
1695 1687
@@ -1702,8 +1694,7 @@ static int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g, struct channel_gk2
1702 1694
1703 ret = gr_gp10b_disable_channel_or_tsg(g, fault_ch); 1695 ret = gr_gp10b_disable_channel_or_tsg(g, fault_ch);
1704 if (ret) { 1696 if (ret) {
1705 gk20a_err(dev_from_gk20a(g), 1697 nvgpu_err(g, "CILP: failed to disable channel!!");
1706 "CILP: failed to disable channel!!");
1707 return ret; 1698 return ret;
1708 } 1699 }
1709 1700
@@ -1822,7 +1813,7 @@ static int gr_gp10b_pre_process_sm_exception(struct gk20a *g,
1822 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: Setting CILP preempt pending\n"); 1813 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: Setting CILP preempt pending\n");
1823 ret = gr_gp10b_set_cilp_preempt_pending(g, fault_ch); 1814 ret = gr_gp10b_set_cilp_preempt_pending(g, fault_ch);
1824 if (ret) { 1815 if (ret) {
1825 gk20a_err(dev_from_gk20a(g), "CILP: error while setting CILP preempt pending!\n"); 1816 nvgpu_err(g, "CILP: error while setting CILP preempt pending!\n");
1826 return ret; 1817 return ret;
1827 } 1818 }
1828 1819
@@ -1912,7 +1903,7 @@ static int gr_gp10b_handle_fecs_error(struct gk20a *g,
1912 /* set preempt_pending to false */ 1903 /* set preempt_pending to false */
1913 ret = gr_gp10b_clear_cilp_preempt_pending(g, ch); 1904 ret = gr_gp10b_clear_cilp_preempt_pending(g, ch);
1914 if (ret) { 1905 if (ret) {
1915 gk20a_err(dev_from_gk20a(g), "CILP: error while unsetting CILP preempt pending!\n"); 1906 nvgpu_err(g, "CILP: error while unsetting CILP preempt pending!");
1916 gk20a_channel_put(ch); 1907 gk20a_channel_put(ch);
1917 goto clean_up; 1908 goto clean_up;
1918 } 1909 }
@@ -1976,8 +1967,7 @@ static bool gr_gp10b_suspend_context(struct channel_gk20a *ch,
1976 if (gr_ctx->compute_preempt_mode == NVGPU_COMPUTE_PREEMPTION_MODE_CILP) { 1967 if (gr_ctx->compute_preempt_mode == NVGPU_COMPUTE_PREEMPTION_MODE_CILP) {
1977 err = gr_gp10b_set_cilp_preempt_pending(g, ch); 1968 err = gr_gp10b_set_cilp_preempt_pending(g, ch);
1978 if (err) 1969 if (err)
1979 gk20a_err(dev_from_gk20a(g), 1970 nvgpu_err(g, "unable to set CILP preempt pending");
1980 "unable to set CILP preempt pending\n");
1981 else 1971 else
1982 *cilp_preempt_pending = true; 1972 *cilp_preempt_pending = true;
1983 1973
@@ -2009,7 +1999,7 @@ static int gr_gp10b_suspend_contexts(struct gk20a *g,
2009 1999
2010 err = gr_gk20a_disable_ctxsw(g); 2000 err = gr_gk20a_disable_ctxsw(g);
2011 if (err) { 2001 if (err) {
2012 gk20a_err(dev_from_gk20a(g), "unable to stop gr ctxsw"); 2002 nvgpu_err(g, "unable to stop gr ctxsw");
2013 nvgpu_mutex_release(&g->dbg_sessions_lock); 2003 nvgpu_mutex_release(&g->dbg_sessions_lock);
2014 goto clean_up; 2004 goto clean_up;
2015 } 2005 }
@@ -2159,8 +2149,7 @@ static int gr_gp10b_set_preemption_mode(struct channel_gk20a *ch,
2159 err = g->ops.gr.set_ctxsw_preemption_mode(g, gr_ctx, vm, class, 2149 err = g->ops.gr.set_ctxsw_preemption_mode(g, gr_ctx, vm, class,
2160 graphics_preempt_mode, compute_preempt_mode); 2150 graphics_preempt_mode, compute_preempt_mode);
2161 if (err) { 2151 if (err) {
2162 gk20a_err(dev_from_gk20a(g), 2152 nvgpu_err(g, "set_ctxsw_preemption_mode failed");
2163 "set_ctxsw_preemption_mode failed");
2164 return err; 2153 return err;
2165 } 2154 }
2166 } 2155 }
@@ -2181,8 +2170,7 @@ static int gr_gp10b_set_preemption_mode(struct channel_gk20a *ch,
2181 2170
2182 err = gr_gk20a_ctx_patch_write_begin(g, ch_ctx); 2171 err = gr_gk20a_ctx_patch_write_begin(g, ch_ctx);
2183 if (err) { 2172 if (err) {
2184 gk20a_err(dev_from_gk20a(g), 2173 nvgpu_err(g, "can't map patch context");
2185 "can't map patch context");
2186 goto enable_ch; 2174 goto enable_ch;
2187 } 2175 }
2188 g->ops.gr.commit_global_cb_manager(g, ch, true); 2176 g->ops.gr.commit_global_cb_manager(g, ch, true);
@@ -2245,8 +2233,7 @@ static int gp10b_gr_fuse_override(struct gk20a *g)
2245 g->gr.t18x.fecs_feature_override_ecc_val = value; 2233 g->gr.t18x.fecs_feature_override_ecc_val = value;
2246 break; 2234 break;
2247 default: 2235 default:
2248 gk20a_err(dev_from_gk20a(g), 2236 nvgpu_err(g, "ignore unknown fuse override %08x", fuse);
2249 "ignore unknown fuse override %08x", fuse);
2250 break; 2237 break;
2251 } 2238 }
2252 } 2239 }