summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2018-04-18 22:39:46 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-05-09 21:26:04 -0400
commitdd739fcb039d51606e9a5454ec0aab17bcb01965 (patch)
tree806ba8575d146367ad1be00086ca0cdae35a6b28 /drivers/gpu/nvgpu/gp10b/gr_gp10b.c
parent7e66f2a63d4855e763fa768047dfc32f6f96b771 (diff)
gpu: nvgpu: Remove gk20a_dbg* functions
Switch all logging to nvgpu_log*(). gk20a_dbg* macros are intentionally left there because of use from other repositories. Because the new functions do not work without a pointer to struct gk20a, and piping it just for logging is excessive, some log messages are deleted. Change-Id: I00e22e75fe4596a330bb0282ab4774b3639ee31e Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1704148 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gp10b/gr_gp10b.c')
-rw-r--r--drivers/gpu/nvgpu/gp10b/gr_gp10b.c123
1 files changed, 62 insertions, 61 deletions
diff --git a/drivers/gpu/nvgpu/gp10b/gr_gp10b.c b/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
index 0178abbf..bc982d30 100644
--- a/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
@@ -69,7 +69,7 @@ bool gr_gp10b_is_valid_class(struct gk20a *g, u32 class_num)
69 default: 69 default:
70 break; 70 break;
71 } 71 }
72 gk20a_dbg_info("class=0x%x valid=%d", class_num, valid); 72 nvgpu_log_info(g, "class=0x%x valid=%d", class_num, valid);
73 return valid; 73 return valid;
74} 74}
75 75
@@ -169,7 +169,7 @@ int gr_gp10b_handle_sm_exception(struct gk20a *g,
169 gr_pri_gpc0_tpc0_sm_lrf_ecc_double_err_count_r() + offset, 169 gr_pri_gpc0_tpc0_sm_lrf_ecc_double_err_count_r() + offset,
170 0); 170 0);
171 if (lrf_ecc_sed_status) { 171 if (lrf_ecc_sed_status) {
172 gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, 172 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr,
173 "Single bit error detected in SM LRF!"); 173 "Single bit error detected in SM LRF!");
174 174
175 gr_gp10b_sm_lrf_ecc_overcount_war(1, 175 gr_gp10b_sm_lrf_ecc_overcount_war(1,
@@ -181,7 +181,7 @@ int gr_gp10b_handle_sm_exception(struct gk20a *g,
181 lrf_single_count_delta; 181 lrf_single_count_delta;
182 } 182 }
183 if (lrf_ecc_ded_status) { 183 if (lrf_ecc_ded_status) {
184 gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, 184 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr,
185 "Double bit error detected in SM LRF!"); 185 "Double bit error detected in SM LRF!");
186 186
187 gr_gp10b_sm_lrf_ecc_overcount_war(0, 187 gr_gp10b_sm_lrf_ecc_overcount_war(0,
@@ -208,7 +208,7 @@ int gr_gp10b_handle_sm_exception(struct gk20a *g,
208 gr_pri_gpc0_tpc0_sm_shm_ecc_status_single_err_detected_shm1_pending_f()) ) { 208 gr_pri_gpc0_tpc0_sm_shm_ecc_status_single_err_detected_shm1_pending_f()) ) {
209 u32 ecc_stats_reg_val; 209 u32 ecc_stats_reg_val;
210 210
211 gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, 211 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr,
212 "Single bit error detected in SM SHM!"); 212 "Single bit error detected in SM SHM!");
213 213
214 ecc_stats_reg_val = 214 ecc_stats_reg_val =
@@ -230,7 +230,7 @@ int gr_gp10b_handle_sm_exception(struct gk20a *g,
230 gr_pri_gpc0_tpc0_sm_shm_ecc_status_double_err_detected_shm1_pending_f()) ) { 230 gr_pri_gpc0_tpc0_sm_shm_ecc_status_double_err_detected_shm1_pending_f()) ) {
231 u32 ecc_stats_reg_val; 231 u32 ecc_stats_reg_val;
232 232
233 gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, 233 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr,
234 "Double bit error detected in SM SHM!"); 234 "Double bit error detected in SM SHM!");
235 235
236 ecc_stats_reg_val = 236 ecc_stats_reg_val =
@@ -260,14 +260,14 @@ int gr_gp10b_handle_tex_exception(struct gk20a *g, u32 gpc, u32 tpc,
260 u32 esr; 260 u32 esr;
261 u32 ecc_stats_reg_val; 261 u32 ecc_stats_reg_val;
262 262
263 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); 263 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
264 264
265 esr = gk20a_readl(g, 265 esr = gk20a_readl(g,
266 gr_gpc0_tpc0_tex_m_hww_esr_r() + offset); 266 gr_gpc0_tpc0_tex_m_hww_esr_r() + offset);
267 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "0x%08x", esr); 267 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "0x%08x", esr);
268 268
269 if (esr & gr_gpc0_tpc0_tex_m_hww_esr_ecc_sec_pending_f()) { 269 if (esr & gr_gpc0_tpc0_tex_m_hww_esr_ecc_sec_pending_f()) {
270 gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, 270 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr,
271 "Single bit error detected in TEX!"); 271 "Single bit error detected in TEX!");
272 272
273 /* Pipe 0 counters */ 273 /* Pipe 0 counters */
@@ -323,7 +323,7 @@ int gr_gp10b_handle_tex_exception(struct gk20a *g, u32 gpc, u32 tpc,
323 gr_pri_gpc0_tpc0_tex_m_routing_sel_default_f()); 323 gr_pri_gpc0_tpc0_tex_m_routing_sel_default_f());
324 } 324 }
325 if (esr & gr_gpc0_tpc0_tex_m_hww_esr_ecc_ded_pending_f()) { 325 if (esr & gr_gpc0_tpc0_tex_m_hww_esr_ecc_ded_pending_f()) {
326 gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, 326 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr,
327 "Double bit error detected in TEX!"); 327 "Double bit error detected in TEX!");
328 328
329 /* Pipe 0 counters */ 329 /* Pipe 0 counters */
@@ -403,7 +403,7 @@ int gr_gp10b_commit_global_cb_manager(struct gk20a *g,
403 u32 ppc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_PPC_IN_GPC_STRIDE); 403 u32 ppc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_PPC_IN_GPC_STRIDE);
404 u32 num_pes_per_gpc = nvgpu_get_litter_value(g, GPU_LIT_NUM_PES_PER_GPC); 404 u32 num_pes_per_gpc = nvgpu_get_litter_value(g, GPU_LIT_NUM_PES_PER_GPC);
405 405
406 gk20a_dbg_fn(""); 406 nvgpu_log_fn(g, " ");
407 407
408 tsg = tsg_gk20a_from_ch(c); 408 tsg = tsg_gk20a_from_ch(c);
409 if (!tsg) 409 if (!tsg)
@@ -660,21 +660,21 @@ static void gr_gp10b_set_coalesce_buffer_size(struct gk20a *g, u32 data)
660{ 660{
661 u32 val; 661 u32 val;
662 662
663 gk20a_dbg_fn(""); 663 nvgpu_log_fn(g, " ");
664 664
665 val = gk20a_readl(g, gr_gpcs_tc_debug0_r()); 665 val = gk20a_readl(g, gr_gpcs_tc_debug0_r());
666 val = set_field(val, gr_gpcs_tc_debug0_limit_coalesce_buffer_size_m(), 666 val = set_field(val, gr_gpcs_tc_debug0_limit_coalesce_buffer_size_m(),
667 gr_gpcs_tc_debug0_limit_coalesce_buffer_size_f(data)); 667 gr_gpcs_tc_debug0_limit_coalesce_buffer_size_f(data));
668 gk20a_writel(g, gr_gpcs_tc_debug0_r(), val); 668 gk20a_writel(g, gr_gpcs_tc_debug0_r(), val);
669 669
670 gk20a_dbg_fn("done"); 670 nvgpu_log_fn(g, "done");
671} 671}
672 672
673void gr_gp10b_set_bes_crop_debug3(struct gk20a *g, u32 data) 673void gr_gp10b_set_bes_crop_debug3(struct gk20a *g, u32 data)
674{ 674{
675 u32 val; 675 u32 val;
676 676
677 gk20a_dbg_fn(""); 677 nvgpu_log_fn(g, " ");
678 678
679 val = gk20a_readl(g, gr_bes_crop_debug3_r()); 679 val = gk20a_readl(g, gr_bes_crop_debug3_r());
680 if ((data & 1)) { 680 if ((data & 1)) {
@@ -722,7 +722,7 @@ void gr_gp10b_set_bes_crop_debug4(struct gk20a *g, u32 data)
722int gr_gp10b_handle_sw_method(struct gk20a *g, u32 addr, 722int gr_gp10b_handle_sw_method(struct gk20a *g, u32 addr,
723 u32 class_num, u32 offset, u32 data) 723 u32 class_num, u32 offset, u32 data)
724{ 724{
725 gk20a_dbg_fn(""); 725 nvgpu_log_fn(g, " ");
726 726
727 if (class_num == PASCAL_COMPUTE_A) { 727 if (class_num == PASCAL_COMPUTE_A) {
728 switch (offset << 2) { 728 switch (offset << 2) {
@@ -800,7 +800,7 @@ void gr_gp10b_set_alpha_circular_buffer_size(struct gk20a *g, u32 data)
800 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); 800 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
801 u32 ppc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_PPC_IN_GPC_STRIDE); 801 u32 ppc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_PPC_IN_GPC_STRIDE);
802 802
803 gk20a_dbg_fn(""); 803 nvgpu_log_fn(g, " ");
804 804
805 if (alpha_cb_size > gr->alpha_cb_size) 805 if (alpha_cb_size > gr->alpha_cb_size)
806 alpha_cb_size = gr->alpha_cb_size; 806 alpha_cb_size = gr->alpha_cb_size;
@@ -853,7 +853,7 @@ void gr_gp10b_set_circular_buffer_size(struct gk20a *g, u32 data)
853 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); 853 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
854 u32 ppc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_PPC_IN_GPC_STRIDE); 854 u32 ppc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_PPC_IN_GPC_STRIDE);
855 855
856 gk20a_dbg_fn(""); 856 nvgpu_log_fn(g, " ");
857 857
858 if (cb_size_steady > gr->attrib_cb_size) 858 if (cb_size_steady > gr->attrib_cb_size)
859 cb_size_steady = gr->attrib_cb_size; 859 cb_size_steady = gr->attrib_cb_size;
@@ -923,7 +923,7 @@ int gr_gp10b_init_ctx_state(struct gk20a *g)
923 }; 923 };
924 int err; 924 int err;
925 925
926 gk20a_dbg_fn(""); 926 nvgpu_log_fn(g, " ");
927 927
928 err = gr_gk20a_init_ctx_state(g); 928 err = gr_gk20a_init_ctx_state(g);
929 if (err) 929 if (err)
@@ -940,10 +940,10 @@ int gr_gp10b_init_ctx_state(struct gk20a *g)
940 } 940 }
941 } 941 }
942 942
943 gk20a_dbg_info("preempt image size: %u", 943 nvgpu_log_info(g, "preempt image size: %u",
944 g->gr.ctx_vars.preempt_image_size); 944 g->gr.ctx_vars.preempt_image_size);
945 945
946 gk20a_dbg_fn("done"); 946 nvgpu_log_fn(g, "done");
947 947
948 return 0; 948 return 0;
949} 949}
@@ -952,8 +952,9 @@ int gr_gp10b_alloc_buffer(struct vm_gk20a *vm, size_t size,
952 struct nvgpu_mem *mem) 952 struct nvgpu_mem *mem)
953{ 953{
954 int err; 954 int err;
955 struct gk20a *g = gk20a_from_vm(vm);
955 956
956 gk20a_dbg_fn(""); 957 nvgpu_log_fn(g, " ");
957 958
958 err = nvgpu_dma_alloc_sys(vm->mm->g, size, mem); 959 err = nvgpu_dma_alloc_sys(vm->mm->g, size, mem);
959 if (err) 960 if (err)
@@ -1029,9 +1030,9 @@ int gr_gp10b_set_ctxsw_preemption_mode(struct gk20a *g,
1029 g->gr.max_tpc_count; 1030 g->gr.max_tpc_count;
1030 attrib_cb_size = ALIGN(attrib_cb_size, 128); 1031 attrib_cb_size = ALIGN(attrib_cb_size, 128);
1031 1032
1032 gk20a_dbg_info("gfxp context spill_size=%d", spill_size); 1033 nvgpu_log_info(g, "gfxp context spill_size=%d", spill_size);
1033 gk20a_dbg_info("gfxp context pagepool_size=%d", pagepool_size); 1034 nvgpu_log_info(g, "gfxp context pagepool_size=%d", pagepool_size);
1034 gk20a_dbg_info("gfxp context attrib_cb_size=%d", 1035 nvgpu_log_info(g, "gfxp context attrib_cb_size=%d",
1035 attrib_cb_size); 1036 attrib_cb_size);
1036 1037
1037 err = gr_gp10b_alloc_buffer(vm, 1038 err = gr_gp10b_alloc_buffer(vm,
@@ -1112,7 +1113,7 @@ int gr_gp10b_alloc_gr_ctx(struct gk20a *g,
1112 u32 graphics_preempt_mode = 0; 1113 u32 graphics_preempt_mode = 0;
1113 u32 compute_preempt_mode = 0; 1114 u32 compute_preempt_mode = 0;
1114 1115
1115 gk20a_dbg_fn(""); 1116 nvgpu_log_fn(g, " ");
1116 1117
1117 err = gr_gk20a_alloc_gr_ctx(g, gr_ctx, vm, class, flags); 1118 err = gr_gk20a_alloc_gr_ctx(g, gr_ctx, vm, class, flags);
1118 if (err) 1119 if (err)
@@ -1137,7 +1138,7 @@ int gr_gp10b_alloc_gr_ctx(struct gk20a *g,
1137 goto fail_free_gk20a_ctx; 1138 goto fail_free_gk20a_ctx;
1138 } 1139 }
1139 1140
1140 gk20a_dbg_fn("done"); 1141 nvgpu_log_fn(g, "done");
1141 1142
1142 return 0; 1143 return 0;
1143 1144
@@ -1215,7 +1216,7 @@ void gr_gp10b_update_ctxsw_preemption_mode(struct gk20a *g,
1215 ctxsw_prog_main_image_compute_preemption_options_control_cta_f(); 1216 ctxsw_prog_main_image_compute_preemption_options_control_cta_f();
1216 int err; 1217 int err;
1217 1218
1218 gk20a_dbg_fn(""); 1219 nvgpu_log_fn(g, " ");
1219 1220
1220 tsg = tsg_gk20a_from_ch(c); 1221 tsg = tsg_gk20a_from_ch(c);
1221 if (!tsg) 1222 if (!tsg)
@@ -1224,21 +1225,21 @@ void gr_gp10b_update_ctxsw_preemption_mode(struct gk20a *g,
1224 gr_ctx = &tsg->gr_ctx; 1225 gr_ctx = &tsg->gr_ctx;
1225 1226
1226 if (gr_ctx->graphics_preempt_mode == NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP) { 1227 if (gr_ctx->graphics_preempt_mode == NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP) {
1227 gk20a_dbg_info("GfxP: %x", gfxp_preempt_option); 1228 nvgpu_log_info(g, "GfxP: %x", gfxp_preempt_option);
1228 nvgpu_mem_wr(g, mem, 1229 nvgpu_mem_wr(g, mem,
1229 ctxsw_prog_main_image_graphics_preemption_options_o(), 1230 ctxsw_prog_main_image_graphics_preemption_options_o(),
1230 gfxp_preempt_option); 1231 gfxp_preempt_option);
1231 } 1232 }
1232 1233
1233 if (gr_ctx->compute_preempt_mode == NVGPU_PREEMPTION_MODE_COMPUTE_CILP) { 1234 if (gr_ctx->compute_preempt_mode == NVGPU_PREEMPTION_MODE_COMPUTE_CILP) {
1234 gk20a_dbg_info("CILP: %x", cilp_preempt_option); 1235 nvgpu_log_info(g, "CILP: %x", cilp_preempt_option);
1235 nvgpu_mem_wr(g, mem, 1236 nvgpu_mem_wr(g, mem,
1236 ctxsw_prog_main_image_compute_preemption_options_o(), 1237 ctxsw_prog_main_image_compute_preemption_options_o(),
1237 cilp_preempt_option); 1238 cilp_preempt_option);
1238 } 1239 }
1239 1240
1240 if (gr_ctx->compute_preempt_mode == NVGPU_PREEMPTION_MODE_COMPUTE_CTA) { 1241 if (gr_ctx->compute_preempt_mode == NVGPU_PREEMPTION_MODE_COMPUTE_CTA) {
1241 gk20a_dbg_info("CTA: %x", cta_preempt_option); 1242 nvgpu_log_info(g, "CTA: %x", cta_preempt_option);
1242 nvgpu_mem_wr(g, mem, 1243 nvgpu_mem_wr(g, mem,
1243 ctxsw_prog_main_image_compute_preemption_options_o(), 1244 ctxsw_prog_main_image_compute_preemption_options_o(),
1244 cta_preempt_option); 1245 cta_preempt_option);
@@ -1269,7 +1270,7 @@ void gr_gp10b_update_ctxsw_preemption_mode(struct gk20a *g,
1269 (u64_hi32(gr_ctx->betacb_ctxsw_buffer.gpu_va) << 1270 (u64_hi32(gr_ctx->betacb_ctxsw_buffer.gpu_va) <<
1270 (32 - gr_gpcs_setup_attrib_cb_base_addr_39_12_align_bits_v())); 1271 (32 - gr_gpcs_setup_attrib_cb_base_addr_39_12_align_bits_v()));
1271 1272
1272 gk20a_dbg_info("attrib cb addr : 0x%016x", addr); 1273 nvgpu_log_info(g, "attrib cb addr : 0x%016x", addr);
1273 g->ops.gr.commit_global_attrib_cb(g, gr_ctx, addr, true); 1274 g->ops.gr.commit_global_attrib_cb(g, gr_ctx, addr, true);
1274 1275
1275 addr = (u64_lo32(gr_ctx->pagepool_ctxsw_buffer.gpu_va) >> 1276 addr = (u64_lo32(gr_ctx->pagepool_ctxsw_buffer.gpu_va) >>
@@ -1315,7 +1316,7 @@ void gr_gp10b_update_ctxsw_preemption_mode(struct gk20a *g,
1315 } 1316 }
1316 1317
1317out: 1318out:
1318 gk20a_dbg_fn("done"); 1319 nvgpu_log_fn(g, "done");
1319} 1320}
1320 1321
1321int gr_gp10b_dump_gr_status_regs(struct gk20a *g, 1322int gr_gp10b_dump_gr_status_regs(struct gk20a *g,
@@ -1475,7 +1476,7 @@ int gr_gp10b_wait_empty(struct gk20a *g, unsigned long duration_ms,
1475 u32 activity0, activity1, activity2, activity4; 1476 u32 activity0, activity1, activity2, activity4;
1476 struct nvgpu_timeout timeout; 1477 struct nvgpu_timeout timeout;
1477 1478
1478 gk20a_dbg_fn(""); 1479 nvgpu_log_fn(g, " ");
1479 1480
1480 nvgpu_timeout_init(g, &timeout, duration_ms, NVGPU_TIMER_CPU_TIMER); 1481 nvgpu_timeout_init(g, &timeout, duration_ms, NVGPU_TIMER_CPU_TIMER);
1481 1482
@@ -1500,7 +1501,7 @@ int gr_gp10b_wait_empty(struct gk20a *g, unsigned long duration_ms,
1500 gr_activity_empty_or_preempted(activity4)); 1501 gr_activity_empty_or_preempted(activity4));
1501 1502
1502 if (!gr_enabled || (!gr_busy && !ctxsw_active)) { 1503 if (!gr_enabled || (!gr_busy && !ctxsw_active)) {
1503 gk20a_dbg_fn("done"); 1504 nvgpu_log_fn(g, "done");
1504 return 0; 1505 return 0;
1505 } 1506 }
1506 1507
@@ -1569,7 +1570,7 @@ void gr_gp10b_commit_global_bundle_cb(struct gk20a *g,
1569 1570
1570 data = min_t(u32, data, g->gr.min_gpm_fifo_depth); 1571 data = min_t(u32, data, g->gr.min_gpm_fifo_depth);
1571 1572
1572 gk20a_dbg_info("bundle cb token limit : %d, state limit : %d", 1573 nvgpu_log_info(g, "bundle cb token limit : %d, state limit : %d",
1573 g->gr.bundle_cb_token_limit, data); 1574 g->gr.bundle_cb_token_limit, data);
1574 1575
1575 gr_gk20a_ctx_patch_write(g, gr_ctx, gr_pd_ab_dist_cfg2_r(), 1576 gr_gk20a_ctx_patch_write(g, gr_ctx, gr_pd_ab_dist_cfg2_r(),
@@ -1626,7 +1627,7 @@ int gr_gp10b_init_fs_state(struct gk20a *g)
1626{ 1627{
1627 u32 data; 1628 u32 data;
1628 1629
1629 gk20a_dbg_fn(""); 1630 nvgpu_log_fn(g, " ");
1630 1631
1631 data = gk20a_readl(g, gr_gpcs_tpcs_sm_texio_control_r()); 1632 data = gk20a_readl(g, gr_gpcs_tpcs_sm_texio_control_r());
1632 data = set_field(data, gr_gpcs_tpcs_sm_texio_control_oor_addr_check_mode_m(), 1633 data = set_field(data, gr_gpcs_tpcs_sm_texio_control_oor_addr_check_mode_m(),
@@ -1705,7 +1706,7 @@ static int gr_gp10b_disable_channel_or_tsg(struct gk20a *g, struct channel_gk20a
1705{ 1706{
1706 int ret = 0; 1707 int ret = 0;
1707 1708
1708 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, ""); 1709 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, " ");
1709 1710
1710 ret = gk20a_disable_channel_tsg(g, fault_ch); 1711 ret = gk20a_disable_channel_tsg(g, fault_ch);
1711 if (ret) { 1712 if (ret) {
@@ -1721,18 +1722,18 @@ static int gr_gp10b_disable_channel_or_tsg(struct gk20a *g, struct channel_gk20a
1721 return ret; 1722 return ret;
1722 } 1723 }
1723 1724
1724 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, "CILP: restarted runlist"); 1725 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, "CILP: restarted runlist");
1725 1726
1726 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, 1727 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr,
1727 "CILP: tsgid: 0x%x", fault_ch->tsgid); 1728 "CILP: tsgid: 0x%x", fault_ch->tsgid);
1728 1729
1729 if (gk20a_is_channel_marked_as_tsg(fault_ch)) { 1730 if (gk20a_is_channel_marked_as_tsg(fault_ch)) {
1730 gk20a_fifo_issue_preempt(g, fault_ch->tsgid, true); 1731 gk20a_fifo_issue_preempt(g, fault_ch->tsgid, true);
1731 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, 1732 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr,
1732 "CILP: preempted tsg"); 1733 "CILP: preempted tsg");
1733 } else { 1734 } else {
1734 gk20a_fifo_issue_preempt(g, fault_ch->chid, false); 1735 gk20a_fifo_issue_preempt(g, fault_ch->chid, false);
1735 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, 1736 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr,
1736 "CILP: preempted channel"); 1737 "CILP: preempted channel");
1737 } 1738 }
1738 1739
@@ -1746,7 +1747,7 @@ int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g,
1746 struct tsg_gk20a *tsg; 1747 struct tsg_gk20a *tsg;
1747 struct nvgpu_gr_ctx *gr_ctx; 1748 struct nvgpu_gr_ctx *gr_ctx;
1748 1749
1749 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, ""); 1750 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, " ");
1750 1751
1751 tsg = tsg_gk20a_from_ch(fault_ch); 1752 tsg = tsg_gk20a_from_ch(fault_ch);
1752 if (!tsg) 1753 if (!tsg)
@@ -1755,7 +1756,7 @@ int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g,
1755 gr_ctx = &tsg->gr_ctx; 1756 gr_ctx = &tsg->gr_ctx;
1756 1757
1757 if (gr_ctx->cilp_preempt_pending) { 1758 if (gr_ctx->cilp_preempt_pending) {
1758 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, 1759 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr,
1759 "CILP is already pending for chid %d", 1760 "CILP is already pending for chid %d",
1760 fault_ch->chid); 1761 fault_ch->chid);
1761 return 0; 1762 return 0;
@@ -1763,7 +1764,7 @@ int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g,
1763 1764
1764 /* get ctx_id from the ucode image */ 1765 /* get ctx_id from the ucode image */
1765 if (!gr_ctx->ctx_id_valid) { 1766 if (!gr_ctx->ctx_id_valid) {
1766 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, 1767 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr,
1767 "CILP: looking up ctx id"); 1768 "CILP: looking up ctx id");
1768 ret = gr_gk20a_get_ctx_id(g, fault_ch, &gr_ctx->ctx_id); 1769 ret = gr_gk20a_get_ctx_id(g, fault_ch, &gr_ctx->ctx_id);
1769 if (ret) { 1770 if (ret) {
@@ -1773,7 +1774,7 @@ int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g,
1773 gr_ctx->ctx_id_valid = true; 1774 gr_ctx->ctx_id_valid = true;
1774 } 1775 }
1775 1776
1776 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, 1777 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr,
1777 "CILP: ctx id is 0x%x", gr_ctx->ctx_id); 1778 "CILP: ctx id is 0x%x", gr_ctx->ctx_id);
1778 1779
1779 /* send ucode method to set ctxsw interrupt */ 1780 /* send ucode method to set ctxsw interrupt */
@@ -1795,10 +1796,10 @@ int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g,
1795 return ret; 1796 return ret;
1796 } 1797 }
1797 1798
1798 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, 1799 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr,
1799 "CILP: enabled ctxsw completion interrupt"); 1800 "CILP: enabled ctxsw completion interrupt");
1800 1801
1801 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, 1802 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr,
1802 "CILP: disabling channel %d", 1803 "CILP: disabling channel %d",
1803 fault_ch->chid); 1804 fault_ch->chid);
1804 1805
@@ -1826,7 +1827,7 @@ static int gr_gp10b_clear_cilp_preempt_pending(struct gk20a *g,
1826 struct tsg_gk20a *tsg; 1827 struct tsg_gk20a *tsg;
1827 struct nvgpu_gr_ctx *gr_ctx; 1828 struct nvgpu_gr_ctx *gr_ctx;
1828 1829
1829 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, ""); 1830 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, " ");
1830 1831
1831 tsg = tsg_gk20a_from_ch(fault_ch); 1832 tsg = tsg_gk20a_from_ch(fault_ch);
1832 if (!tsg) 1833 if (!tsg)
@@ -1837,7 +1838,7 @@ static int gr_gp10b_clear_cilp_preempt_pending(struct gk20a *g,
1837 /* The ucode is self-clearing, so all we need to do here is 1838 /* The ucode is self-clearing, so all we need to do here is
1838 to clear cilp_preempt_pending. */ 1839 to clear cilp_preempt_pending. */
1839 if (!gr_ctx->cilp_preempt_pending) { 1840 if (!gr_ctx->cilp_preempt_pending) {
1840 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, 1841 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr,
1841 "CILP is already cleared for chid %d\n", 1842 "CILP is already cleared for chid %d\n",
1842 fault_ch->chid); 1843 fault_ch->chid);
1843 return 0; 1844 return 0;
@@ -1878,7 +1879,7 @@ int gr_gp10b_pre_process_sm_exception(struct gk20a *g,
1878 NVGPU_PREEMPTION_MODE_COMPUTE_CILP); 1879 NVGPU_PREEMPTION_MODE_COMPUTE_CILP);
1879 } 1880 }
1880 1881
1881 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "SM Exception received on gpc %d tpc %d = %u\n", 1882 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "SM Exception received on gpc %d tpc %d = %u\n",
1882 gpc, tpc, global_esr); 1883 gpc, tpc, global_esr);
1883 1884
1884 if (cilp_enabled && sm_debugger_attached) { 1885 if (cilp_enabled && sm_debugger_attached) {
@@ -1900,19 +1901,19 @@ int gr_gp10b_pre_process_sm_exception(struct gk20a *g,
1900 if (warp_esr != 0 || (global_esr & global_mask) != 0) { 1901 if (warp_esr != 0 || (global_esr & global_mask) != 0) {
1901 *ignore_debugger = true; 1902 *ignore_debugger = true;
1902 1903
1903 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, 1904 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
1904 "CILP: starting wait for LOCKED_DOWN on gpc %d tpc %d\n", 1905 "CILP: starting wait for LOCKED_DOWN on gpc %d tpc %d\n",
1905 gpc, tpc); 1906 gpc, tpc);
1906 1907
1907 if (gk20a_dbg_gpu_broadcast_stop_trigger(fault_ch)) { 1908 if (gk20a_dbg_gpu_broadcast_stop_trigger(fault_ch)) {
1908 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, 1909 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
1909 "CILP: Broadcasting STOP_TRIGGER from gpc %d tpc %d\n", 1910 "CILP: Broadcasting STOP_TRIGGER from gpc %d tpc %d\n",
1910 gpc, tpc); 1911 gpc, tpc);
1911 g->ops.gr.suspend_all_sms(g, global_mask, false); 1912 g->ops.gr.suspend_all_sms(g, global_mask, false);
1912 1913
1913 gk20a_dbg_gpu_clear_broadcast_stop_trigger(fault_ch); 1914 gk20a_dbg_gpu_clear_broadcast_stop_trigger(fault_ch);
1914 } else { 1915 } else {
1915 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, 1916 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
1916 "CILP: STOP_TRIGGER from gpc %d tpc %d\n", 1917 "CILP: STOP_TRIGGER from gpc %d tpc %d\n",
1917 gpc, tpc); 1918 gpc, tpc);
1918 g->ops.gr.suspend_single_sm(g, gpc, tpc, sm, global_mask, true); 1919 g->ops.gr.suspend_single_sm(g, gpc, tpc, sm, global_mask, true);
@@ -1923,11 +1924,11 @@ int gr_gp10b_pre_process_sm_exception(struct gk20a *g,
1923 gpc, tpc, sm); 1924 gpc, tpc, sm);
1924 g->ops.gr.clear_sm_hww(g, 1925 g->ops.gr.clear_sm_hww(g,
1925 gpc, tpc, sm, global_esr_copy); 1926 gpc, tpc, sm, global_esr_copy);
1926 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, 1927 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
1927 "CILP: HWWs cleared for gpc %d tpc %d\n", 1928 "CILP: HWWs cleared for gpc %d tpc %d\n",
1928 gpc, tpc); 1929 gpc, tpc);
1929 1930
1930 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: Setting CILP preempt pending\n"); 1931 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: Setting CILP preempt pending\n");
1931 ret = gr_gp10b_set_cilp_preempt_pending(g, fault_ch); 1932 ret = gr_gp10b_set_cilp_preempt_pending(g, fault_ch);
1932 if (ret) { 1933 if (ret) {
1933 nvgpu_err(g, "CILP: error while setting CILP preempt pending!"); 1934 nvgpu_err(g, "CILP: error while setting CILP preempt pending!");
@@ -1936,7 +1937,7 @@ int gr_gp10b_pre_process_sm_exception(struct gk20a *g,
1936 1937
1937 dbgr_control0 = gk20a_readl(g, gr_gpc0_tpc0_sm_dbgr_control0_r() + offset); 1938 dbgr_control0 = gk20a_readl(g, gr_gpc0_tpc0_sm_dbgr_control0_r() + offset);
1938 if (dbgr_control0 & gr_gpcs_tpcs_sm_dbgr_control0_single_step_mode_enable_f()) { 1939 if (dbgr_control0 & gr_gpcs_tpcs_sm_dbgr_control0_single_step_mode_enable_f()) {
1939 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, 1940 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
1940 "CILP: clearing SINGLE_STEP_MODE before resume for gpc %d tpc %d\n", 1941 "CILP: clearing SINGLE_STEP_MODE before resume for gpc %d tpc %d\n",
1941 gpc, tpc); 1942 gpc, tpc);
1942 dbgr_control0 = set_field(dbgr_control0, 1943 dbgr_control0 = set_field(dbgr_control0,
@@ -1945,13 +1946,13 @@ int gr_gp10b_pre_process_sm_exception(struct gk20a *g,
1945 gk20a_writel(g, gr_gpc0_tpc0_sm_dbgr_control0_r() + offset, dbgr_control0); 1946 gk20a_writel(g, gr_gpc0_tpc0_sm_dbgr_control0_r() + offset, dbgr_control0);
1946 } 1947 }
1947 1948
1948 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, 1949 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
1949 "CILP: resume for gpc %d tpc %d\n", 1950 "CILP: resume for gpc %d tpc %d\n",
1950 gpc, tpc); 1951 gpc, tpc);
1951 g->ops.gr.resume_single_sm(g, gpc, tpc, sm); 1952 g->ops.gr.resume_single_sm(g, gpc, tpc, sm);
1952 1953
1953 *ignore_debugger = true; 1954 *ignore_debugger = true;
1954 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: All done on gpc %d, tpc %d\n", gpc, tpc); 1955 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: All done on gpc %d, tpc %d\n", gpc, tpc);
1955 } 1956 }
1956 1957
1957 *early_exit = true; 1958 *early_exit = true;
@@ -1999,14 +2000,14 @@ int gr_gp10b_handle_fecs_error(struct gk20a *g,
1999 int ret = 0; 2000 int ret = 0;
2000 struct tsg_gk20a *tsg; 2001 struct tsg_gk20a *tsg;
2001 2002
2002 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, ""); 2003 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, " ");
2003 2004
2004 /* 2005 /*
2005 * INTR1 (bit 1 of the HOST_INT_STATUS_CTXSW_INTR) 2006 * INTR1 (bit 1 of the HOST_INT_STATUS_CTXSW_INTR)
2006 * indicates that a CILP ctxsw save has finished 2007 * indicates that a CILP ctxsw save has finished
2007 */ 2008 */
2008 if (gr_fecs_intr & gr_fecs_host_int_status_ctxsw_intr_f(2)) { 2009 if (gr_fecs_intr & gr_fecs_host_int_status_ctxsw_intr_f(2)) {
2009 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, 2010 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr,
2010 "CILP: ctxsw save completed!\n"); 2011 "CILP: ctxsw save completed!\n");
2011 2012
2012 /* now clear the interrupt */ 2013 /* now clear the interrupt */
@@ -2162,7 +2163,7 @@ int gr_gp10b_suspend_contexts(struct gk20a *g,
2162 struct nvgpu_gr_ctx *gr_ctx; 2163 struct nvgpu_gr_ctx *gr_ctx;
2163 struct nvgpu_timeout timeout; 2164 struct nvgpu_timeout timeout;
2164 2165
2165 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, 2166 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr,
2166 "CILP preempt pending, waiting %lu msecs for preemption", 2167 "CILP preempt pending, waiting %lu msecs for preemption",
2167 gk20a_get_gr_idle_timeout(g)); 2168 gk20a_get_gr_idle_timeout(g));
2168 2169
@@ -2285,7 +2286,7 @@ int gr_gp10b_set_preemption_mode(struct channel_gk20a *ch,
2285 2286
2286 if (g->ops.gr.set_ctxsw_preemption_mode) { 2287 if (g->ops.gr.set_ctxsw_preemption_mode) {
2287 2288
2288 gk20a_dbg(gpu_dbg_sched, "chid=%d tsgid=%d pid=%d " 2289 nvgpu_log(g, gpu_dbg_sched, "chid=%d tsgid=%d pid=%d "
2289 "graphics_preempt=%d compute_preempt=%d", 2290 "graphics_preempt=%d compute_preempt=%d",
2290 ch->chid, 2291 ch->chid,
2291 ch->tsgid, 2292 ch->tsgid,