summaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2017-04-06 13:55:48 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-04-10 22:04:20 -0400
commit57d624f900896a257e2e918e93e99a14f734aea5 (patch)
tree7c2f76516baad4d1d9acec583817fe4beee63bb2 /drivers/gpu
parent3ba374a5d94f8c2067731155afaf79f03e6c390c (diff)
gpu: nvgpu: gp10b: Use new error macros
gk20a_err() and gk20a_warn() require a struct device pointer, which is not portable across operating systems. The new nvgpu_err() and nvgpu_warn() macros take struct gk20a pointer. Convert code to use the more portable macros. JIRA NVGPU-16 Change-Id: I8dc0ddf3b6ea38af6300c27558b60786c163da6d Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: http://git-master/r/1457344 Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com> Reviewed-by: Alex Waterman <alexw@nvidia.com> GVS: Gerrit_Virtual_Submit
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/nvgpu/gp10b/cde_gp10b.c4
-rw-r--r--drivers/gpu/nvgpu/gp10b/fecs_trace_gp10b.c2
-rw-r--r--drivers/gpu/nvgpu/gp10b/fifo_gp10b.c2
-rw-r--r--drivers/gpu/nvgpu/gp10b/gp10b_sysfs.c5
-rw-r--r--drivers/gpu/nvgpu/gp10b/gr_gp10b.c79
-rw-r--r--drivers/gpu/nvgpu/gp10b/hal_gp10b.c2
-rw-r--r--drivers/gpu/nvgpu/gp10b/ltc_gp10b.c11
-rw-r--r--drivers/gpu/nvgpu/gp10b/pmu_gp10b.c13
-rw-r--r--drivers/gpu/nvgpu/gp10b/rpfb_gp10b.c11
9 files changed, 60 insertions, 69 deletions
diff --git a/drivers/gpu/nvgpu/gp10b/cde_gp10b.c b/drivers/gpu/nvgpu/gp10b/cde_gp10b.c
index 5f68de5a..1af5b01c 100644
--- a/drivers/gpu/nvgpu/gp10b/cde_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/cde_gp10b.c
@@ -16,6 +16,8 @@
16#include "gk20a/gk20a.h" 16#include "gk20a/gk20a.h"
17#include "cde_gp10b.h" 17#include "cde_gp10b.h"
18 18
19#include <nvgpu/log.h>
20
19enum gp10b_programs { 21enum gp10b_programs {
20 GP10B_PROG_HPASS = 0, 22 GP10B_PROG_HPASS = 0,
21 GP10B_PROG_HPASS_4K = 1, 23 GP10B_PROG_HPASS_4K = 1,
@@ -46,7 +48,7 @@ static void gp10b_cde_get_program_numbers(struct gk20a *g,
46 } 48 }
47 if (g->mm.bypass_smmu) { 49 if (g->mm.bypass_smmu) {
48 if (!g->mm.disable_bigpage) { 50 if (!g->mm.disable_bigpage) {
49 gk20a_warn(g->dev, 51 nvgpu_warn(g,
50 "when bypass_smmu is 1, disable_bigpage must be 1 too"); 52 "when bypass_smmu is 1, disable_bigpage must be 1 too");
51 } 53 }
52 hprog |= 1; 54 hprog |= 1;
diff --git a/drivers/gpu/nvgpu/gp10b/fecs_trace_gp10b.c b/drivers/gpu/nvgpu/gp10b/fecs_trace_gp10b.c
index d9e7e7bf..af06b8a6 100644
--- a/drivers/gpu/nvgpu/gp10b/fecs_trace_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/fecs_trace_gp10b.c
@@ -37,7 +37,7 @@ static int gp10b_fecs_trace_flush(struct gk20a *g)
37 err = gr_gk20a_elpg_protected_call(g, 37 err = gr_gk20a_elpg_protected_call(g,
38 gr_gk20a_submit_fecs_method_op(g, op, false)); 38 gr_gk20a_submit_fecs_method_op(g, op, false));
39 if (err) 39 if (err)
40 gk20a_err(dev_from_gk20a(g), "write timestamp record failed"); 40 nvgpu_err(g, "write timestamp record failed");
41 41
42 return err; 42 return err;
43} 43}
diff --git a/drivers/gpu/nvgpu/gp10b/fifo_gp10b.c b/drivers/gpu/nvgpu/gp10b/fifo_gp10b.c
index b305b895..f7f62599 100644
--- a/drivers/gpu/nvgpu/gp10b/fifo_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/fifo_gp10b.c
@@ -228,7 +228,7 @@ static void gp10b_device_info_data_parse(struct gk20a *g, u32 table_entry,
228 gk20a_dbg_info("device info: fault_id: %d", *fault_id); 228 gk20a_dbg_info("device info: fault_id: %d", *fault_id);
229 } 229 }
230 } else 230 } else
231 gk20a_err(g->dev, "unknown device_info_data %d", 231 nvgpu_err(g, "unknown device_info_data %d",
232 top_device_info_data_type_v(table_entry)); 232 top_device_info_data_type_v(table_entry));
233} 233}
234 234
diff --git a/drivers/gpu/nvgpu/gp10b/gp10b_sysfs.c b/drivers/gpu/nvgpu/gp10b/gp10b_sysfs.c
index 5035bb99..d42afb4c 100644
--- a/drivers/gpu/nvgpu/gp10b/gp10b_sysfs.c
+++ b/drivers/gpu/nvgpu/gp10b/gp10b_sysfs.c
@@ -32,7 +32,7 @@ static ssize_t ecc_enable_store(struct device *dev,
32 err = g->ops.pmu.send_lrf_tex_ltc_dram_overide_en_dis_cmd 32 err = g->ops.pmu.send_lrf_tex_ltc_dram_overide_en_dis_cmd
33 (g, ecc_mask); 33 (g, ecc_mask);
34 if (err) 34 if (err)
35 dev_err(dev, "ECC override did not happen\n"); 35 nvgpu_err(g, "ECC override did not happen\n");
36 } else 36 } else
37 return -EINVAL; 37 return -EINVAL;
38 return count; 38 return count;
@@ -51,11 +51,12 @@ static DEVICE_ATTR(ecc_enable, ROOTRW, ecc_enable_read, ecc_enable_store);
51 51
52void gp10b_create_sysfs(struct device *dev) 52void gp10b_create_sysfs(struct device *dev)
53{ 53{
54 struct gk20a *g = get_gk20a(dev);
54 int error = 0; 55 int error = 0;
55 56
56 error |= device_create_file(dev, &dev_attr_ecc_enable); 57 error |= device_create_file(dev, &dev_attr_ecc_enable);
57 if (error) 58 if (error)
58 dev_err(dev, "Failed to create sysfs attributes!\n"); 59 nvgpu_err(g, "Failed to create sysfs attributes!\n");
59} 60}
60 61
61void gp10b_remove_sysfs(struct device *dev) 62void gp10b_remove_sysfs(struct device *dev)
diff --git a/drivers/gpu/nvgpu/gp10b/gr_gp10b.c b/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
index c1cb1376..708d25d0 100644
--- a/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
@@ -818,8 +818,7 @@ static int gr_gp10b_init_ctx_state(struct gk20a *g)
818 op.mailbox.ret = &g->gr.t18x.ctx_vars.preempt_image_size; 818 op.mailbox.ret = &g->gr.t18x.ctx_vars.preempt_image_size;
819 err = gr_gk20a_submit_fecs_method_op(g, op, false); 819 err = gr_gk20a_submit_fecs_method_op(g, op, false);
820 if (err) { 820 if (err) {
821 gk20a_err(dev_from_gk20a(g), 821 nvgpu_err(g, "query preempt image size failed");
822 "query preempt image size failed");
823 return err; 822 return err;
824 } 823 }
825 } 824 }
@@ -921,8 +920,7 @@ static int gr_gp10b_set_ctxsw_preemption_mode(struct gk20a *g,
921 g->gr.t18x.ctx_vars.preempt_image_size, 920 g->gr.t18x.ctx_vars.preempt_image_size,
922 &gr_ctx->t18x.preempt_ctxsw_buffer); 921 &gr_ctx->t18x.preempt_ctxsw_buffer);
923 if (err) { 922 if (err) {
924 gk20a_err(dev_from_gk20a(g), 923 nvgpu_err(g, "cannot allocate preempt buffer");
925 "cannot allocate preempt buffer");
926 goto fail; 924 goto fail;
927 } 925 }
928 926
@@ -930,8 +928,7 @@ static int gr_gp10b_set_ctxsw_preemption_mode(struct gk20a *g,
930 spill_size, 928 spill_size,
931 &gr_ctx->t18x.spill_ctxsw_buffer); 929 &gr_ctx->t18x.spill_ctxsw_buffer);
932 if (err) { 930 if (err) {
933 gk20a_err(dev_from_gk20a(g), 931 nvgpu_err(g, "cannot allocate spill buffer");
934 "cannot allocate spill buffer");
935 goto fail_free_preempt; 932 goto fail_free_preempt;
936 } 933 }
937 934
@@ -939,8 +936,7 @@ static int gr_gp10b_set_ctxsw_preemption_mode(struct gk20a *g,
939 attrib_cb_size, 936 attrib_cb_size,
940 &gr_ctx->t18x.betacb_ctxsw_buffer); 937 &gr_ctx->t18x.betacb_ctxsw_buffer);
941 if (err) { 938 if (err) {
942 gk20a_err(dev_from_gk20a(g), 939 nvgpu_err(g, "cannot allocate beta buffer");
943 "cannot allocate beta buffer");
944 goto fail_free_spill; 940 goto fail_free_spill;
945 } 941 }
946 942
@@ -948,8 +944,7 @@ static int gr_gp10b_set_ctxsw_preemption_mode(struct gk20a *g,
948 pagepool_size, 944 pagepool_size,
949 &gr_ctx->t18x.pagepool_ctxsw_buffer); 945 &gr_ctx->t18x.pagepool_ctxsw_buffer);
950 if (err) { 946 if (err) {
951 gk20a_err(dev_from_gk20a(g), 947 nvgpu_err(g, "cannot allocate page pool");
952 "cannot allocate page pool");
953 goto fail_free_betacb; 948 goto fail_free_betacb;
954 } 949 }
955 950
@@ -1016,8 +1011,7 @@ static int gr_gp10b_alloc_gr_ctx(struct gk20a *g,
1016 err = g->ops.gr.set_ctxsw_preemption_mode(g, *gr_ctx, vm, 1011 err = g->ops.gr.set_ctxsw_preemption_mode(g, *gr_ctx, vm,
1017 class, graphics_preempt_mode, compute_preempt_mode); 1012 class, graphics_preempt_mode, compute_preempt_mode);
1018 if (err) { 1013 if (err) {
1019 gk20a_err(dev_from_gk20a(g), 1014 nvgpu_err(g, "set_ctxsw_preemption_mode failed");
1020 "set_ctxsw_preemption_mode failed");
1021 goto fail_free_gk20a_ctx; 1015 goto fail_free_gk20a_ctx;
1022 } 1016 }
1023 } else 1017 } else
@@ -1044,44 +1038,44 @@ static void dump_ctx_switch_stats(struct gk20a *g, struct vm_gk20a *vm,
1044 WARN_ON("Cannot map context"); 1038 WARN_ON("Cannot map context");
1045 return; 1039 return;
1046 } 1040 }
1047 gk20a_err(dev_from_gk20a(g), "ctxsw_prog_main_image_magic_value_o : %x (expect %x)\n", 1041 nvgpu_err(g, "ctxsw_prog_main_image_magic_value_o : %x (expect %x)",
1048 nvgpu_mem_rd(g, mem, 1042 nvgpu_mem_rd(g, mem,
1049 ctxsw_prog_main_image_magic_value_o()), 1043 ctxsw_prog_main_image_magic_value_o()),
1050 ctxsw_prog_main_image_magic_value_v_value_v()); 1044 ctxsw_prog_main_image_magic_value_v_value_v());
1051 1045
1052 gk20a_err(dev_from_gk20a(g), "ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi : %x\n", 1046 nvgpu_err(g, "ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi : %x",
1053 nvgpu_mem_rd(g, mem, 1047 nvgpu_mem_rd(g, mem,
1054 ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi_o())); 1048 ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi_o()));
1055 1049
1056 gk20a_err(dev_from_gk20a(g), "ctxsw_prog_main_image_context_timestamp_buffer_ptr : %x\n", 1050 nvgpu_err(g, "ctxsw_prog_main_image_context_timestamp_buffer_ptr : %x",
1057 nvgpu_mem_rd(g, mem, 1051 nvgpu_mem_rd(g, mem,
1058 ctxsw_prog_main_image_context_timestamp_buffer_ptr_o())); 1052 ctxsw_prog_main_image_context_timestamp_buffer_ptr_o()));
1059 1053
1060 gk20a_err(dev_from_gk20a(g), "ctxsw_prog_main_image_context_timestamp_buffer_control : %x\n", 1054 nvgpu_err(g, "ctxsw_prog_main_image_context_timestamp_buffer_control : %x",
1061 nvgpu_mem_rd(g, mem, 1055 nvgpu_mem_rd(g, mem,
1062 ctxsw_prog_main_image_context_timestamp_buffer_control_o())); 1056 ctxsw_prog_main_image_context_timestamp_buffer_control_o()));
1063 1057
1064 gk20a_err(dev_from_gk20a(g), "NUM_SAVE_OPERATIONS : %d\n", 1058 nvgpu_err(g, "NUM_SAVE_OPERATIONS : %d",
1065 nvgpu_mem_rd(g, mem, 1059 nvgpu_mem_rd(g, mem,
1066 ctxsw_prog_main_image_num_save_ops_o())); 1060 ctxsw_prog_main_image_num_save_ops_o()));
1067 gk20a_err(dev_from_gk20a(g), "WFI_SAVE_OPERATIONS : %d\n", 1061 nvgpu_err(g, "WFI_SAVE_OPERATIONS : %d",
1068 nvgpu_mem_rd(g, mem, 1062 nvgpu_mem_rd(g, mem,
1069 ctxsw_prog_main_image_num_wfi_save_ops_o())); 1063 ctxsw_prog_main_image_num_wfi_save_ops_o()));
1070 gk20a_err(dev_from_gk20a(g), "CTA_SAVE_OPERATIONS : %d\n", 1064 nvgpu_err(g, "CTA_SAVE_OPERATIONS : %d",
1071 nvgpu_mem_rd(g, mem, 1065 nvgpu_mem_rd(g, mem,
1072 ctxsw_prog_main_image_num_cta_save_ops_o())); 1066 ctxsw_prog_main_image_num_cta_save_ops_o()));
1073 gk20a_err(dev_from_gk20a(g), "GFXP_SAVE_OPERATIONS : %d\n", 1067 nvgpu_err(g, "GFXP_SAVE_OPERATIONS : %d",
1074 nvgpu_mem_rd(g, mem, 1068 nvgpu_mem_rd(g, mem,
1075 ctxsw_prog_main_image_num_gfxp_save_ops_o())); 1069 ctxsw_prog_main_image_num_gfxp_save_ops_o()));
1076 gk20a_err(dev_from_gk20a(g), "CILP_SAVE_OPERATIONS : %d\n", 1070 nvgpu_err(g, "CILP_SAVE_OPERATIONS : %d",
1077 nvgpu_mem_rd(g, mem, 1071 nvgpu_mem_rd(g, mem,
1078 ctxsw_prog_main_image_num_cilp_save_ops_o())); 1072 ctxsw_prog_main_image_num_cilp_save_ops_o()));
1079 gk20a_err(dev_from_gk20a(g), 1073 nvgpu_err(g,
1080 "image gfx preemption option (GFXP is 1) %x\n", 1074 "image gfx preemption option (GFXP is 1) %x",
1081 nvgpu_mem_rd(g, mem, 1075 nvgpu_mem_rd(g, mem,
1082 ctxsw_prog_main_image_graphics_preemption_options_o())); 1076 ctxsw_prog_main_image_graphics_preemption_options_o()));
1083 gk20a_err(dev_from_gk20a(g), 1077 nvgpu_err(g,
1084 "image compute preemption option (CTA is 1) %x\n", 1078 "image compute preemption option (CTA is 1) %x",
1085 nvgpu_mem_rd(g, mem, 1079 nvgpu_mem_rd(g, mem,
1086 ctxsw_prog_main_image_compute_preemption_options_o())); 1080 ctxsw_prog_main_image_compute_preemption_options_o()));
1087 nvgpu_mem_end(g, mem); 1081 nvgpu_mem_end(g, mem);
@@ -1154,8 +1148,7 @@ static void gr_gp10b_update_ctxsw_preemption_mode(struct gk20a *g,
1154 1148
1155 err = gr_gk20a_ctx_patch_write_begin(g, ch_ctx); 1149 err = gr_gk20a_ctx_patch_write_begin(g, ch_ctx);
1156 if (err) { 1150 if (err) {
1157 gk20a_err(dev_from_gk20a(g), 1151 nvgpu_err(g, "can't map patch context");
1158 "can't map patch context");
1159 goto out; 1152 goto out;
1160 } 1153 }
1161 1154
@@ -1403,7 +1396,7 @@ static int gr_gp10b_wait_empty(struct gk20a *g, unsigned long duration_ms,
1403 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); 1396 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
1404 } while (!nvgpu_timeout_expired(&timeout)); 1397 } while (!nvgpu_timeout_expired(&timeout));
1405 1398
1406 gk20a_err(dev_from_gk20a(g), 1399 nvgpu_err(g,
1407 "timeout, ctxsw busy : %d, gr busy : %d, %08x, %08x, %08x, %08x", 1400 "timeout, ctxsw busy : %d, gr busy : %d, %08x, %08x, %08x, %08x",
1408 ctxsw_active, gr_busy, activity0, activity1, activity2, activity4); 1401 ctxsw_active, gr_busy, activity0, activity1, activity2, activity4);
1409 1402
@@ -1617,14 +1610,14 @@ static int gr_gp10b_disable_channel_or_tsg(struct gk20a *g, struct channel_gk20a
1617 1610
1618 ret = gk20a_disable_channel_tsg(g, fault_ch); 1611 ret = gk20a_disable_channel_tsg(g, fault_ch);
1619 if (ret) { 1612 if (ret) {
1620 gk20a_err(dev_from_gk20a(g), 1613 nvgpu_err(g,
1621 "CILP: failed to disable channel/TSG!\n"); 1614 "CILP: failed to disable channel/TSG!\n");
1622 return ret; 1615 return ret;
1623 } 1616 }
1624 1617
1625 ret = g->ops.fifo.update_runlist(g, fault_ch->runlist_id, ~0, true, false); 1618 ret = g->ops.fifo.update_runlist(g, fault_ch->runlist_id, ~0, true, false);
1626 if (ret) { 1619 if (ret) {
1627 gk20a_err(dev_from_gk20a(g), 1620 nvgpu_err(g,
1628 "CILP: failed to restart runlist 0!"); 1621 "CILP: failed to restart runlist 0!");
1629 return ret; 1622 return ret;
1630 } 1623 }
@@ -1664,7 +1657,7 @@ static int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g, struct channel_gk2
1664 "CILP: looking up ctx id"); 1657 "CILP: looking up ctx id");
1665 ret = gr_gk20a_get_ctx_id(g, fault_ch, &gr_ctx->t18x.ctx_id); 1658 ret = gr_gk20a_get_ctx_id(g, fault_ch, &gr_ctx->t18x.ctx_id);
1666 if (ret) { 1659 if (ret) {
1667 gk20a_err(dev_from_gk20a(g), "CILP: error looking up ctx id!\n"); 1660 nvgpu_err(g, "CILP: error looking up ctx id!");
1668 return ret; 1661 return ret;
1669 } 1662 }
1670 gr_ctx->t18x.ctx_id_valid = true; 1663 gr_ctx->t18x.ctx_id_valid = true;
@@ -1688,8 +1681,7 @@ static int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g, struct channel_gk2
1688 .cond.fail = GR_IS_UCODE_OP_SKIP}); 1681 .cond.fail = GR_IS_UCODE_OP_SKIP});
1689 1682
1690 if (ret) { 1683 if (ret) {
1691 gk20a_err(dev_from_gk20a(g), 1684 nvgpu_err(g, "CILP: failed to enable ctxsw interrupt!");
1692 "CILP: failed to enable ctxsw interrupt!");
1693 return ret; 1685 return ret;
1694 } 1686 }
1695 1687
@@ -1702,8 +1694,7 @@ static int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g, struct channel_gk2
1702 1694
1703 ret = gr_gp10b_disable_channel_or_tsg(g, fault_ch); 1695 ret = gr_gp10b_disable_channel_or_tsg(g, fault_ch);
1704 if (ret) { 1696 if (ret) {
1705 gk20a_err(dev_from_gk20a(g), 1697 nvgpu_err(g, "CILP: failed to disable channel!!");
1706 "CILP: failed to disable channel!!");
1707 return ret; 1698 return ret;
1708 } 1699 }
1709 1700
@@ -1822,7 +1813,7 @@ static int gr_gp10b_pre_process_sm_exception(struct gk20a *g,
1822 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: Setting CILP preempt pending\n"); 1813 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: Setting CILP preempt pending\n");
1823 ret = gr_gp10b_set_cilp_preempt_pending(g, fault_ch); 1814 ret = gr_gp10b_set_cilp_preempt_pending(g, fault_ch);
1824 if (ret) { 1815 if (ret) {
1825 gk20a_err(dev_from_gk20a(g), "CILP: error while setting CILP preempt pending!\n"); 1816 nvgpu_err(g, "CILP: error while setting CILP preempt pending!\n");
1826 return ret; 1817 return ret;
1827 } 1818 }
1828 1819
@@ -1912,7 +1903,7 @@ static int gr_gp10b_handle_fecs_error(struct gk20a *g,
1912 /* set preempt_pending to false */ 1903 /* set preempt_pending to false */
1913 ret = gr_gp10b_clear_cilp_preempt_pending(g, ch); 1904 ret = gr_gp10b_clear_cilp_preempt_pending(g, ch);
1914 if (ret) { 1905 if (ret) {
1915 gk20a_err(dev_from_gk20a(g), "CILP: error while unsetting CILP preempt pending!\n"); 1906 nvgpu_err(g, "CILP: error while unsetting CILP preempt pending!");
1916 gk20a_channel_put(ch); 1907 gk20a_channel_put(ch);
1917 goto clean_up; 1908 goto clean_up;
1918 } 1909 }
@@ -1976,8 +1967,7 @@ static bool gr_gp10b_suspend_context(struct channel_gk20a *ch,
1976 if (gr_ctx->compute_preempt_mode == NVGPU_COMPUTE_PREEMPTION_MODE_CILP) { 1967 if (gr_ctx->compute_preempt_mode == NVGPU_COMPUTE_PREEMPTION_MODE_CILP) {
1977 err = gr_gp10b_set_cilp_preempt_pending(g, ch); 1968 err = gr_gp10b_set_cilp_preempt_pending(g, ch);
1978 if (err) 1969 if (err)
1979 gk20a_err(dev_from_gk20a(g), 1970 nvgpu_err(g, "unable to set CILP preempt pending");
1980 "unable to set CILP preempt pending\n");
1981 else 1971 else
1982 *cilp_preempt_pending = true; 1972 *cilp_preempt_pending = true;
1983 1973
@@ -2009,7 +1999,7 @@ static int gr_gp10b_suspend_contexts(struct gk20a *g,
2009 1999
2010 err = gr_gk20a_disable_ctxsw(g); 2000 err = gr_gk20a_disable_ctxsw(g);
2011 if (err) { 2001 if (err) {
2012 gk20a_err(dev_from_gk20a(g), "unable to stop gr ctxsw"); 2002 nvgpu_err(g, "unable to stop gr ctxsw");
2013 nvgpu_mutex_release(&g->dbg_sessions_lock); 2003 nvgpu_mutex_release(&g->dbg_sessions_lock);
2014 goto clean_up; 2004 goto clean_up;
2015 } 2005 }
@@ -2159,8 +2149,7 @@ static int gr_gp10b_set_preemption_mode(struct channel_gk20a *ch,
2159 err = g->ops.gr.set_ctxsw_preemption_mode(g, gr_ctx, vm, class, 2149 err = g->ops.gr.set_ctxsw_preemption_mode(g, gr_ctx, vm, class,
2160 graphics_preempt_mode, compute_preempt_mode); 2150 graphics_preempt_mode, compute_preempt_mode);
2161 if (err) { 2151 if (err) {
2162 gk20a_err(dev_from_gk20a(g), 2152 nvgpu_err(g, "set_ctxsw_preemption_mode failed");
2163 "set_ctxsw_preemption_mode failed");
2164 return err; 2153 return err;
2165 } 2154 }
2166 } 2155 }
@@ -2181,8 +2170,7 @@ static int gr_gp10b_set_preemption_mode(struct channel_gk20a *ch,
2181 2170
2182 err = gr_gk20a_ctx_patch_write_begin(g, ch_ctx); 2171 err = gr_gk20a_ctx_patch_write_begin(g, ch_ctx);
2183 if (err) { 2172 if (err) {
2184 gk20a_err(dev_from_gk20a(g), 2173 nvgpu_err(g, "can't map patch context");
2185 "can't map patch context");
2186 goto enable_ch; 2174 goto enable_ch;
2187 } 2175 }
2188 g->ops.gr.commit_global_cb_manager(g, ch, true); 2176 g->ops.gr.commit_global_cb_manager(g, ch, true);
@@ -2245,8 +2233,7 @@ static int gp10b_gr_fuse_override(struct gk20a *g)
2245 g->gr.t18x.fecs_feature_override_ecc_val = value; 2233 g->gr.t18x.fecs_feature_override_ecc_val = value;
2246 break; 2234 break;
2247 default: 2235 default:
2248 gk20a_err(dev_from_gk20a(g), 2236 nvgpu_err(g, "ignore unknown fuse override %08x", fuse);
2249 "ignore unknown fuse override %08x", fuse);
2250 break; 2237 break;
2251 } 2238 }
2252 } 2239 }
diff --git a/drivers/gpu/nvgpu/gp10b/hal_gp10b.c b/drivers/gpu/nvgpu/gp10b/hal_gp10b.c
index 95fdccea..ef68c6de 100644
--- a/drivers/gpu/nvgpu/gp10b/hal_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/hal_gp10b.c
@@ -178,7 +178,7 @@ static int gp10b_get_litter_value(struct gk20a *g, int value)
178 ret = 0; 178 ret = 0;
179 break; 179 break;
180 default: 180 default:
181 gk20a_err(dev_from_gk20a(g), "Missing definition %d", value); 181 nvgpu_err(g, "Missing definition %d", value);
182 BUG(); 182 BUG();
183 break; 183 break;
184 } 184 }
diff --git a/drivers/gpu/nvgpu/gp10b/ltc_gp10b.c b/drivers/gpu/nvgpu/gp10b/ltc_gp10b.c
index e1aa34a9..42bfbf29 100644
--- a/drivers/gpu/nvgpu/gp10b/ltc_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/ltc_gp10b.c
@@ -18,6 +18,8 @@
18#include "gk20a/gk20a.h" 18#include "gk20a/gk20a.h"
19#include "gm20b/ltc_gm20b.h" 19#include "gm20b/ltc_gm20b.h"
20 20
21#include <nvgpu/log.h>
22
21#include <nvgpu/hw/gp10b/hw_mc_gp10b.h> 23#include <nvgpu/hw/gp10b/hw_mc_gp10b.h>
22#include <nvgpu/hw/gp10b/hw_ltc_gp10b.h> 24#include <nvgpu/hw/gp10b/hw_ltc_gp10b.h>
23 25
@@ -128,8 +130,7 @@ static void gp10b_ltc_isr(struct gk20a *g)
128 u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE); 130 u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE);
129 131
130 mc_intr = gk20a_readl(g, mc_intr_ltc_r()); 132 mc_intr = gk20a_readl(g, mc_intr_ltc_r());
131 gk20a_err(dev_from_gk20a(g), "mc_ltc_intr: %08x", 133 nvgpu_err(g, "mc_ltc_intr: %08x", mc_intr);
132 mc_intr);
133 for (ltc = 0; ltc < g->ltc_count; ltc++) { 134 for (ltc = 0; ltc < g->ltc_count; ltc++) {
134 if ((mc_intr & 1 << ltc) == 0) 135 if ((mc_intr & 1 << ltc) == 0)
135 continue; 136 continue;
@@ -142,7 +143,7 @@ static void gp10b_ltc_isr(struct gk20a *g)
142 ltc_ltcs_ltss_intr_ecc_sec_error_pending_f()) { 143 ltc_ltcs_ltss_intr_ecc_sec_error_pending_f()) {
143 u32 ecc_stats_reg_val; 144 u32 ecc_stats_reg_val;
144 145
145 gk20a_err(dev_from_gk20a(g), 146 nvgpu_err(g,
146 "Single bit error detected in GPU L2!"); 147 "Single bit error detected in GPU L2!");
147 148
148 ecc_stats_reg_val = 149 ecc_stats_reg_val =
@@ -162,7 +163,7 @@ static void gp10b_ltc_isr(struct gk20a *g)
162 ltc_ltcs_ltss_intr_ecc_ded_error_pending_f()) { 163 ltc_ltcs_ltss_intr_ecc_ded_error_pending_f()) {
163 u32 ecc_stats_reg_val; 164 u32 ecc_stats_reg_val;
164 165
165 gk20a_err(dev_from_gk20a(g), 166 nvgpu_err(g,
166 "Double bit error detected in GPU L2!"); 167 "Double bit error detected in GPU L2!");
167 168
168 ecc_stats_reg_val = 169 ecc_stats_reg_val =
@@ -177,7 +178,7 @@ static void gp10b_ltc_isr(struct gk20a *g)
177 ecc_stats_reg_val); 178 ecc_stats_reg_val);
178 } 179 }
179 180
180 gk20a_err(dev_from_gk20a(g), "ltc%d, slice %d: %08x", 181 nvgpu_err(g, "ltc%d, slice %d: %08x",
181 ltc, slice, ltc_intr); 182 ltc, slice, ltc_intr);
182 gk20a_writel(g, ltc_ltc0_lts0_intr_r() + 183 gk20a_writel(g, ltc_ltc0_lts0_intr_r() +
183 ltc_stride * ltc + lts_stride * slice, 184 ltc_stride * ltc + lts_stride * slice,
diff --git a/drivers/gpu/nvgpu/gp10b/pmu_gp10b.c b/drivers/gpu/nvgpu/gp10b/pmu_gp10b.c
index 15b12c74..2d9882c9 100644
--- a/drivers/gpu/nvgpu/gp10b/pmu_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/pmu_gp10b.c
@@ -24,6 +24,8 @@
24#include "pmu_gp10b.h" 24#include "pmu_gp10b.h"
25#include "gp10b_sysfs.h" 25#include "gp10b_sysfs.h"
26 26
27#include <nvgpu/log.h>
28
27#include <nvgpu/hw/gp10b/hw_pwr_gp10b.h> 29#include <nvgpu/hw/gp10b/hw_pwr_gp10b.h>
28#include <nvgpu/hw/gp10b/hw_fuse_gp10b.h> 30#include <nvgpu/hw/gp10b/hw_fuse_gp10b.h>
29 31
@@ -192,8 +194,7 @@ int gp10b_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
192 &g->ops.pmu.lspmuwprinitdone, 1); 194 &g->ops.pmu.lspmuwprinitdone, 1);
193 /* check again if it still not ready indicate an error */ 195 /* check again if it still not ready indicate an error */
194 if (!g->ops.pmu.lspmuwprinitdone) { 196 if (!g->ops.pmu.lspmuwprinitdone) {
195 gk20a_err(dev_from_gk20a(g), 197 nvgpu_err(g, "PMU not ready to load LSF");
196 "PMU not ready to load LSF");
197 return -ETIMEDOUT; 198 return -ETIMEDOUT;
198 } 199 }
199 } 200 }
@@ -213,7 +214,7 @@ static void pmu_handle_gr_param_msg(struct gk20a *g, struct pmu_msg *msg,
213 gk20a_dbg_fn(""); 214 gk20a_dbg_fn("");
214 215
215 if (status != 0) { 216 if (status != 0) {
216 gk20a_err(dev_from_gk20a(g), "GR PARAM cmd aborted"); 217 nvgpu_err(g, "GR PARAM cmd aborted");
217 /* TBD: disable ELPG */ 218 /* TBD: disable ELPG */
218 return; 219 return;
219 } 220 }
@@ -378,12 +379,12 @@ static void pmu_dump_security_fuses_gp10b(struct gk20a *g)
378{ 379{
379 u32 val; 380 u32 val;
380 381
381 gk20a_err(dev_from_gk20a(g), "FUSE_OPT_SEC_DEBUG_EN_0 : 0x%x", 382 nvgpu_err(g, "FUSE_OPT_SEC_DEBUG_EN_0 : 0x%x",
382 gk20a_readl(g, fuse_opt_sec_debug_en_r())); 383 gk20a_readl(g, fuse_opt_sec_debug_en_r()));
383 gk20a_err(dev_from_gk20a(g), "FUSE_OPT_PRIV_SEC_EN_0 : 0x%x", 384 nvgpu_err(g, "FUSE_OPT_PRIV_SEC_EN_0 : 0x%x",
384 gk20a_readl(g, fuse_opt_priv_sec_en_r())); 385 gk20a_readl(g, fuse_opt_priv_sec_en_r()));
385 tegra_fuse_readl(FUSE_GCPLEX_CONFIG_FUSE_0, &val); 386 tegra_fuse_readl(FUSE_GCPLEX_CONFIG_FUSE_0, &val);
386 gk20a_err(dev_from_gk20a(g), "FUSE_GCPLEX_CONFIG_FUSE_0 : 0x%x", 387 nvgpu_err(g, "FUSE_GCPLEX_CONFIG_FUSE_0 : 0x%x",
387 val); 388 val);
388} 389}
389 390
diff --git a/drivers/gpu/nvgpu/gp10b/rpfb_gp10b.c b/drivers/gpu/nvgpu/gp10b/rpfb_gp10b.c
index e8f3d930..77c6853c 100644
--- a/drivers/gpu/nvgpu/gp10b/rpfb_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/rpfb_gp10b.c
@@ -42,8 +42,7 @@ int gp10b_replayable_pagefault_buffer_init(struct gk20a *g)
42 err = nvgpu_dma_alloc_map_sys(vm, rbfb_size, 42 err = nvgpu_dma_alloc_map_sys(vm, rbfb_size,
43 &g->mm.bar2_desc); 43 &g->mm.bar2_desc);
44 if (err) { 44 if (err) {
45 dev_err(dev_from_gk20a(g), 45 nvgpu_err(g, "Error in replayable fault buffer");
46 "%s Error in replayable fault buffer\n", __func__);
47 return err; 46 return err;
48 } 47 }
49 } 48 }
@@ -75,8 +74,8 @@ u32 gp10b_replayable_pagefault_buffer_get_index(struct gk20a *g)
75 get_idx = gk20a_readl(g, fifo_replay_fault_buffer_get_r()); 74 get_idx = gk20a_readl(g, fifo_replay_fault_buffer_get_r());
76 75
77 if (get_idx >= fifo_replay_fault_buffer_size_hw_entries_v()) 76 if (get_idx >= fifo_replay_fault_buffer_size_hw_entries_v())
78 dev_err(dev_from_gk20a(g), "%s Error in replayable fault buffer\n", 77 nvgpu_err(g, "Error in replayable fault buffer");
79 __func__); 78
80 gk20a_dbg_fn("done"); 79 gk20a_dbg_fn("done");
81 return get_idx; 80 return get_idx;
82} 81}
@@ -89,8 +88,8 @@ u32 gp10b_replayable_pagefault_buffer_put_index(struct gk20a *g)
89 put_idx = gk20a_readl(g, fifo_replay_fault_buffer_put_r()); 88 put_idx = gk20a_readl(g, fifo_replay_fault_buffer_put_r());
90 89
91 if (put_idx >= fifo_replay_fault_buffer_size_hw_entries_v()) 90 if (put_idx >= fifo_replay_fault_buffer_size_hw_entries_v())
92 dev_err(dev_from_gk20a(g), "%s Error in UVM\n", 91 nvgpu_err(g, "Error in UVM");
93 __func__); 92
94 gk20a_dbg_fn("done"); 93 gk20a_dbg_fn("done");
95 return put_idx; 94 return put_idx;
96} 95}