diff options
Diffstat (limited to 'drivers/gpu/nvgpu/gv11b/gr_gv11b.c')
-rw-r--r-- | drivers/gpu/nvgpu/gv11b/gr_gv11b.c | 132 |
1 files changed, 67 insertions, 65 deletions
diff --git a/drivers/gpu/nvgpu/gv11b/gr_gv11b.c b/drivers/gpu/nvgpu/gv11b/gr_gv11b.c index 52e442f3..536d9dcb 100644 --- a/drivers/gpu/nvgpu/gv11b/gr_gv11b.c +++ b/drivers/gpu/nvgpu/gv11b/gr_gv11b.c | |||
@@ -96,7 +96,7 @@ bool gr_gv11b_is_valid_class(struct gk20a *g, u32 class_num) | |||
96 | default: | 96 | default: |
97 | break; | 97 | break; |
98 | } | 98 | } |
99 | gk20a_dbg_info("class=0x%x valid=%d", class_num, valid); | 99 | nvgpu_log_info(g, "class=0x%x valid=%d", class_num, valid); |
100 | return valid; | 100 | return valid; |
101 | } | 101 | } |
102 | 102 | ||
@@ -190,7 +190,7 @@ static int gr_gv11b_handle_l1_tag_exception(struct gk20a *g, u32 gpc, u32 tpc, | |||
190 | gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_uncorrected_err_total_counter_overflow_v(l1_tag_ecc_status); | 190 | gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_uncorrected_err_total_counter_overflow_v(l1_tag_ecc_status); |
191 | 191 | ||
192 | if ((l1_tag_corrected_err_count_delta > 0) || is_l1_tag_ecc_corrected_total_err_overflow) { | 192 | if ((l1_tag_corrected_err_count_delta > 0) || is_l1_tag_ecc_corrected_total_err_overflow) { |
193 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, | 193 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr, |
194 | "corrected error (SBE) detected in SM L1 tag! err_mask [%08x] is_overf [%d]", | 194 | "corrected error (SBE) detected in SM L1 tag! err_mask [%08x] is_overf [%d]", |
195 | l1_tag_ecc_corrected_err_status, is_l1_tag_ecc_corrected_total_err_overflow); | 195 | l1_tag_ecc_corrected_err_status, is_l1_tag_ecc_corrected_total_err_overflow); |
196 | 196 | ||
@@ -205,7 +205,7 @@ static int gr_gv11b_handle_l1_tag_exception(struct gk20a *g, u32 gpc, u32 tpc, | |||
205 | 0); | 205 | 0); |
206 | } | 206 | } |
207 | if ((l1_tag_uncorrected_err_count_delta > 0) || is_l1_tag_ecc_uncorrected_total_err_overflow) { | 207 | if ((l1_tag_uncorrected_err_count_delta > 0) || is_l1_tag_ecc_uncorrected_total_err_overflow) { |
208 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, | 208 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr, |
209 | "Uncorrected error (DBE) detected in SM L1 tag! err_mask [%08x] is_overf [%d]", | 209 | "Uncorrected error (DBE) detected in SM L1 tag! err_mask [%08x] is_overf [%d]", |
210 | l1_tag_ecc_uncorrected_err_status, is_l1_tag_ecc_uncorrected_total_err_overflow); | 210 | l1_tag_ecc_uncorrected_err_status, is_l1_tag_ecc_uncorrected_total_err_overflow); |
211 | 211 | ||
@@ -282,7 +282,7 @@ static int gr_gv11b_handle_lrf_exception(struct gk20a *g, u32 gpc, u32 tpc, | |||
282 | gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_total_counter_overflow_v(lrf_ecc_status); | 282 | gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_total_counter_overflow_v(lrf_ecc_status); |
283 | 283 | ||
284 | if ((lrf_corrected_err_count_delta > 0) || is_lrf_ecc_corrected_total_err_overflow) { | 284 | if ((lrf_corrected_err_count_delta > 0) || is_lrf_ecc_corrected_total_err_overflow) { |
285 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, | 285 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr, |
286 | "corrected error (SBE) detected in SM LRF! err_mask [%08x] is_overf [%d]", | 286 | "corrected error (SBE) detected in SM LRF! err_mask [%08x] is_overf [%d]", |
287 | lrf_ecc_corrected_err_status, is_lrf_ecc_corrected_total_err_overflow); | 287 | lrf_ecc_corrected_err_status, is_lrf_ecc_corrected_total_err_overflow); |
288 | 288 | ||
@@ -297,7 +297,7 @@ static int gr_gv11b_handle_lrf_exception(struct gk20a *g, u32 gpc, u32 tpc, | |||
297 | 0); | 297 | 0); |
298 | } | 298 | } |
299 | if ((lrf_uncorrected_err_count_delta > 0) || is_lrf_ecc_uncorrected_total_err_overflow) { | 299 | if ((lrf_uncorrected_err_count_delta > 0) || is_lrf_ecc_uncorrected_total_err_overflow) { |
300 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, | 300 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr, |
301 | "Uncorrected error (DBE) detected in SM LRF! err_mask [%08x] is_overf [%d]", | 301 | "Uncorrected error (DBE) detected in SM LRF! err_mask [%08x] is_overf [%d]", |
302 | lrf_ecc_uncorrected_err_status, is_lrf_ecc_uncorrected_total_err_overflow); | 302 | lrf_ecc_uncorrected_err_status, is_lrf_ecc_uncorrected_total_err_overflow); |
303 | 303 | ||
@@ -441,7 +441,7 @@ static int gr_gv11b_handle_cbu_exception(struct gk20a *g, u32 gpc, u32 tpc, | |||
441 | gr_pri_gpc0_tpc0_sm_cbu_ecc_status_uncorrected_err_total_counter_overflow_v(cbu_ecc_status); | 441 | gr_pri_gpc0_tpc0_sm_cbu_ecc_status_uncorrected_err_total_counter_overflow_v(cbu_ecc_status); |
442 | 442 | ||
443 | if ((cbu_corrected_err_count_delta > 0) || is_cbu_ecc_corrected_total_err_overflow) { | 443 | if ((cbu_corrected_err_count_delta > 0) || is_cbu_ecc_corrected_total_err_overflow) { |
444 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, | 444 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr, |
445 | "corrected error (SBE) detected in SM CBU! err_mask [%08x] is_overf [%d]", | 445 | "corrected error (SBE) detected in SM CBU! err_mask [%08x] is_overf [%d]", |
446 | cbu_ecc_corrected_err_status, is_cbu_ecc_corrected_total_err_overflow); | 446 | cbu_ecc_corrected_err_status, is_cbu_ecc_corrected_total_err_overflow); |
447 | 447 | ||
@@ -456,7 +456,7 @@ static int gr_gv11b_handle_cbu_exception(struct gk20a *g, u32 gpc, u32 tpc, | |||
456 | 0); | 456 | 0); |
457 | } | 457 | } |
458 | if ((cbu_uncorrected_err_count_delta > 0) || is_cbu_ecc_uncorrected_total_err_overflow) { | 458 | if ((cbu_uncorrected_err_count_delta > 0) || is_cbu_ecc_uncorrected_total_err_overflow) { |
459 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, | 459 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr, |
460 | "Uncorrected error (DBE) detected in SM CBU! err_mask [%08x] is_overf [%d]", | 460 | "Uncorrected error (DBE) detected in SM CBU! err_mask [%08x] is_overf [%d]", |
461 | cbu_ecc_uncorrected_err_status, is_cbu_ecc_uncorrected_total_err_overflow); | 461 | cbu_ecc_uncorrected_err_status, is_cbu_ecc_uncorrected_total_err_overflow); |
462 | 462 | ||
@@ -521,7 +521,7 @@ static int gr_gv11b_handle_l1_data_exception(struct gk20a *g, u32 gpc, u32 tpc, | |||
521 | gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_uncorrected_err_total_counter_overflow_v(l1_data_ecc_status); | 521 | gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_uncorrected_err_total_counter_overflow_v(l1_data_ecc_status); |
522 | 522 | ||
523 | if ((l1_data_corrected_err_count_delta > 0) || is_l1_data_ecc_corrected_total_err_overflow) { | 523 | if ((l1_data_corrected_err_count_delta > 0) || is_l1_data_ecc_corrected_total_err_overflow) { |
524 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, | 524 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr, |
525 | "corrected error (SBE) detected in SM L1 data! err_mask [%08x] is_overf [%d]", | 525 | "corrected error (SBE) detected in SM L1 data! err_mask [%08x] is_overf [%d]", |
526 | l1_data_ecc_corrected_err_status, is_l1_data_ecc_corrected_total_err_overflow); | 526 | l1_data_ecc_corrected_err_status, is_l1_data_ecc_corrected_total_err_overflow); |
527 | 527 | ||
@@ -536,7 +536,7 @@ static int gr_gv11b_handle_l1_data_exception(struct gk20a *g, u32 gpc, u32 tpc, | |||
536 | 0); | 536 | 0); |
537 | } | 537 | } |
538 | if ((l1_data_uncorrected_err_count_delta > 0) || is_l1_data_ecc_uncorrected_total_err_overflow) { | 538 | if ((l1_data_uncorrected_err_count_delta > 0) || is_l1_data_ecc_uncorrected_total_err_overflow) { |
539 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, | 539 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr, |
540 | "Uncorrected error (DBE) detected in SM L1 data! err_mask [%08x] is_overf [%d]", | 540 | "Uncorrected error (DBE) detected in SM L1 data! err_mask [%08x] is_overf [%d]", |
541 | l1_data_ecc_uncorrected_err_status, is_l1_data_ecc_uncorrected_total_err_overflow); | 541 | l1_data_ecc_uncorrected_err_status, is_l1_data_ecc_uncorrected_total_err_overflow); |
542 | 542 | ||
@@ -605,7 +605,7 @@ static int gr_gv11b_handle_icache_exception(struct gk20a *g, u32 gpc, u32 tpc, | |||
605 | gr_pri_gpc0_tpc0_sm_icache_ecc_status_uncorrected_err_total_counter_overflow_v(icache_ecc_status); | 605 | gr_pri_gpc0_tpc0_sm_icache_ecc_status_uncorrected_err_total_counter_overflow_v(icache_ecc_status); |
606 | 606 | ||
607 | if ((icache_corrected_err_count_delta > 0) || is_icache_ecc_corrected_total_err_overflow) { | 607 | if ((icache_corrected_err_count_delta > 0) || is_icache_ecc_corrected_total_err_overflow) { |
608 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, | 608 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr, |
609 | "corrected error (SBE) detected in SM L0 && L1 icache! err_mask [%08x] is_overf [%d]", | 609 | "corrected error (SBE) detected in SM L0 && L1 icache! err_mask [%08x] is_overf [%d]", |
610 | icache_ecc_corrected_err_status, is_icache_ecc_corrected_total_err_overflow); | 610 | icache_ecc_corrected_err_status, is_icache_ecc_corrected_total_err_overflow); |
611 | 611 | ||
@@ -620,7 +620,7 @@ static int gr_gv11b_handle_icache_exception(struct gk20a *g, u32 gpc, u32 tpc, | |||
620 | 0); | 620 | 0); |
621 | } | 621 | } |
622 | if ((icache_uncorrected_err_count_delta > 0) || is_icache_ecc_uncorrected_total_err_overflow) { | 622 | if ((icache_uncorrected_err_count_delta > 0) || is_icache_ecc_uncorrected_total_err_overflow) { |
623 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, | 623 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr, |
624 | "Uncorrected error (DBE) detected in SM L0 && L1 icache! err_mask [%08x] is_overf [%d]", | 624 | "Uncorrected error (DBE) detected in SM L0 && L1 icache! err_mask [%08x] is_overf [%d]", |
625 | icache_ecc_uncorrected_err_status, is_icache_ecc_uncorrected_total_err_overflow); | 625 | icache_ecc_uncorrected_err_status, is_icache_ecc_uncorrected_total_err_overflow); |
626 | 626 | ||
@@ -1129,14 +1129,14 @@ static void gr_gv11b_set_coalesce_buffer_size(struct gk20a *g, u32 data) | |||
1129 | { | 1129 | { |
1130 | u32 val; | 1130 | u32 val; |
1131 | 1131 | ||
1132 | gk20a_dbg_fn(""); | 1132 | nvgpu_log_fn(g, " "); |
1133 | 1133 | ||
1134 | val = gk20a_readl(g, gr_gpcs_tc_debug0_r()); | 1134 | val = gk20a_readl(g, gr_gpcs_tc_debug0_r()); |
1135 | val = set_field(val, gr_gpcs_tc_debug0_limit_coalesce_buffer_size_m(), | 1135 | val = set_field(val, gr_gpcs_tc_debug0_limit_coalesce_buffer_size_m(), |
1136 | gr_gpcs_tc_debug0_limit_coalesce_buffer_size_f(data)); | 1136 | gr_gpcs_tc_debug0_limit_coalesce_buffer_size_f(data)); |
1137 | gk20a_writel(g, gr_gpcs_tc_debug0_r(), val); | 1137 | gk20a_writel(g, gr_gpcs_tc_debug0_r(), val); |
1138 | 1138 | ||
1139 | gk20a_dbg_fn("done"); | 1139 | nvgpu_log_fn(g, "done"); |
1140 | } | 1140 | } |
1141 | 1141 | ||
1142 | static void gr_gv11b_set_tex_in_dbg(struct gk20a *g, u32 data) | 1142 | static void gr_gv11b_set_tex_in_dbg(struct gk20a *g, u32 data) |
@@ -1144,7 +1144,7 @@ static void gr_gv11b_set_tex_in_dbg(struct gk20a *g, u32 data) | |||
1144 | u32 val; | 1144 | u32 val; |
1145 | bool flag; | 1145 | bool flag; |
1146 | 1146 | ||
1147 | gk20a_dbg_fn(""); | 1147 | nvgpu_log_fn(g, " "); |
1148 | 1148 | ||
1149 | val = gk20a_readl(g, gr_gpcs_tpcs_tex_in_dbg_r()); | 1149 | val = gk20a_readl(g, gr_gpcs_tpcs_tex_in_dbg_r()); |
1150 | flag = (data & NVC397_SET_TEX_IN_DBG_TSL1_RVCH_INVALIDATE) ? 1 : 0; | 1150 | flag = (data & NVC397_SET_TEX_IN_DBG_TSL1_RVCH_INVALIDATE) ? 1 : 0; |
@@ -1190,7 +1190,7 @@ static void gr_gv11b_set_skedcheck(struct gk20a *g, u32 data) | |||
1190 | 1190 | ||
1191 | static void gv11b_gr_set_shader_exceptions(struct gk20a *g, u32 data) | 1191 | static void gv11b_gr_set_shader_exceptions(struct gk20a *g, u32 data) |
1192 | { | 1192 | { |
1193 | gk20a_dbg_fn(""); | 1193 | nvgpu_log_fn(g, " "); |
1194 | 1194 | ||
1195 | if (data == NVA297_SET_SHADER_EXCEPTIONS_ENABLE_FALSE) { | 1195 | if (data == NVA297_SET_SHADER_EXCEPTIONS_ENABLE_FALSE) { |
1196 | gk20a_writel(g, gr_gpcs_tpcs_sms_hww_warp_esr_report_mask_r(), | 1196 | gk20a_writel(g, gr_gpcs_tpcs_sms_hww_warp_esr_report_mask_r(), |
@@ -1224,7 +1224,7 @@ static void gr_gv11b_set_shader_cut_collector(struct gk20a *g, u32 data) | |||
1224 | int gr_gv11b_handle_sw_method(struct gk20a *g, u32 addr, | 1224 | int gr_gv11b_handle_sw_method(struct gk20a *g, u32 addr, |
1225 | u32 class_num, u32 offset, u32 data) | 1225 | u32 class_num, u32 offset, u32 data) |
1226 | { | 1226 | { |
1227 | gk20a_dbg_fn(""); | 1227 | nvgpu_log_fn(g, " "); |
1228 | 1228 | ||
1229 | if (class_num == VOLTA_COMPUTE_A) { | 1229 | if (class_num == VOLTA_COMPUTE_A) { |
1230 | switch (offset << 2) { | 1230 | switch (offset << 2) { |
@@ -1315,7 +1315,7 @@ void gr_gv11b_set_alpha_circular_buffer_size(struct gk20a *g, u32 data) | |||
1315 | u32 pd_ab_max_output; | 1315 | u32 pd_ab_max_output; |
1316 | u32 alpha_cb_size = data * 4; | 1316 | u32 alpha_cb_size = data * 4; |
1317 | 1317 | ||
1318 | gk20a_dbg_fn(""); | 1318 | nvgpu_log_fn(g, " "); |
1319 | 1319 | ||
1320 | if (alpha_cb_size > gr->alpha_cb_size) | 1320 | if (alpha_cb_size > gr->alpha_cb_size) |
1321 | alpha_cb_size = gr->alpha_cb_size; | 1321 | alpha_cb_size = gr->alpha_cb_size; |
@@ -1360,7 +1360,7 @@ void gr_gv11b_set_circular_buffer_size(struct gk20a *g, u32 data) | |||
1360 | u32 gpc_index, ppc_index, stride, val; | 1360 | u32 gpc_index, ppc_index, stride, val; |
1361 | u32 cb_size_steady = data * 4, cb_size; | 1361 | u32 cb_size_steady = data * 4, cb_size; |
1362 | 1362 | ||
1363 | gk20a_dbg_fn(""); | 1363 | nvgpu_log_fn(g, " "); |
1364 | 1364 | ||
1365 | if (cb_size_steady > gr->attrib_cb_size) | 1365 | if (cb_size_steady > gr->attrib_cb_size) |
1366 | cb_size_steady = gr->attrib_cb_size; | 1366 | cb_size_steady = gr->attrib_cb_size; |
@@ -1423,8 +1423,9 @@ int gr_gv11b_alloc_buffer(struct vm_gk20a *vm, size_t size, | |||
1423 | struct nvgpu_mem *mem) | 1423 | struct nvgpu_mem *mem) |
1424 | { | 1424 | { |
1425 | int err; | 1425 | int err; |
1426 | struct gk20a *g = gk20a_from_vm(vm); | ||
1426 | 1427 | ||
1427 | gk20a_dbg_fn(""); | 1428 | nvgpu_log_fn(g, " "); |
1428 | 1429 | ||
1429 | err = nvgpu_dma_alloc_sys(vm->mm->g, size, mem); | 1430 | err = nvgpu_dma_alloc_sys(vm->mm->g, size, mem); |
1430 | if (err) | 1431 | if (err) |
@@ -1500,9 +1501,9 @@ int gr_gv11b_set_ctxsw_preemption_mode(struct gk20a *g, | |||
1500 | g->gr.max_tpc_count; | 1501 | g->gr.max_tpc_count; |
1501 | attrib_cb_size = ALIGN(attrib_cb_size, 128); | 1502 | attrib_cb_size = ALIGN(attrib_cb_size, 128); |
1502 | 1503 | ||
1503 | gk20a_dbg_info("gfxp context spill_size=%d", spill_size); | 1504 | nvgpu_log_info(g, "gfxp context spill_size=%d", spill_size); |
1504 | gk20a_dbg_info("gfxp context pagepool_size=%d", pagepool_size); | 1505 | nvgpu_log_info(g, "gfxp context pagepool_size=%d", pagepool_size); |
1505 | gk20a_dbg_info("gfxp context attrib_cb_size=%d", | 1506 | nvgpu_log_info(g, "gfxp context attrib_cb_size=%d", |
1506 | attrib_cb_size); | 1507 | attrib_cb_size); |
1507 | 1508 | ||
1508 | err = gr_gp10b_alloc_buffer(vm, | 1509 | err = gr_gp10b_alloc_buffer(vm, |
@@ -1590,7 +1591,7 @@ void gr_gv11b_update_ctxsw_preemption_mode(struct gk20a *g, | |||
1590 | ctxsw_prog_main_image_compute_preemption_options_control_cta_f(); | 1591 | ctxsw_prog_main_image_compute_preemption_options_control_cta_f(); |
1591 | int err; | 1592 | int err; |
1592 | 1593 | ||
1593 | gk20a_dbg_fn(""); | 1594 | nvgpu_log_fn(g, " "); |
1594 | 1595 | ||
1595 | tsg = tsg_gk20a_from_ch(c); | 1596 | tsg = tsg_gk20a_from_ch(c); |
1596 | if (!tsg) | 1597 | if (!tsg) |
@@ -1600,7 +1601,7 @@ void gr_gv11b_update_ctxsw_preemption_mode(struct gk20a *g, | |||
1600 | 1601 | ||
1601 | if (gr_ctx->graphics_preempt_mode == | 1602 | if (gr_ctx->graphics_preempt_mode == |
1602 | NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP) { | 1603 | NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP) { |
1603 | gk20a_dbg_info("GfxP: %x", gfxp_preempt_option); | 1604 | nvgpu_log_info(g, "GfxP: %x", gfxp_preempt_option); |
1604 | nvgpu_mem_wr(g, mem, | 1605 | nvgpu_mem_wr(g, mem, |
1605 | ctxsw_prog_main_image_graphics_preemption_options_o(), | 1606 | ctxsw_prog_main_image_graphics_preemption_options_o(), |
1606 | gfxp_preempt_option); | 1607 | gfxp_preempt_option); |
@@ -1608,7 +1609,7 @@ void gr_gv11b_update_ctxsw_preemption_mode(struct gk20a *g, | |||
1608 | 1609 | ||
1609 | if (gr_ctx->compute_preempt_mode == | 1610 | if (gr_ctx->compute_preempt_mode == |
1610 | NVGPU_PREEMPTION_MODE_COMPUTE_CILP) { | 1611 | NVGPU_PREEMPTION_MODE_COMPUTE_CILP) { |
1611 | gk20a_dbg_info("CILP: %x", cilp_preempt_option); | 1612 | nvgpu_log_info(g, "CILP: %x", cilp_preempt_option); |
1612 | nvgpu_mem_wr(g, mem, | 1613 | nvgpu_mem_wr(g, mem, |
1613 | ctxsw_prog_main_image_compute_preemption_options_o(), | 1614 | ctxsw_prog_main_image_compute_preemption_options_o(), |
1614 | cilp_preempt_option); | 1615 | cilp_preempt_option); |
@@ -1616,7 +1617,7 @@ void gr_gv11b_update_ctxsw_preemption_mode(struct gk20a *g, | |||
1616 | 1617 | ||
1617 | if (gr_ctx->compute_preempt_mode == | 1618 | if (gr_ctx->compute_preempt_mode == |
1618 | NVGPU_PREEMPTION_MODE_COMPUTE_CTA) { | 1619 | NVGPU_PREEMPTION_MODE_COMPUTE_CTA) { |
1619 | gk20a_dbg_info("CTA: %x", cta_preempt_option); | 1620 | nvgpu_log_info(g, "CTA: %x", cta_preempt_option); |
1620 | nvgpu_mem_wr(g, mem, | 1621 | nvgpu_mem_wr(g, mem, |
1621 | ctxsw_prog_main_image_compute_preemption_options_o(), | 1622 | ctxsw_prog_main_image_compute_preemption_options_o(), |
1622 | cta_preempt_option); | 1623 | cta_preempt_option); |
@@ -1647,7 +1648,7 @@ void gr_gv11b_update_ctxsw_preemption_mode(struct gk20a *g, | |||
1647 | (u64_hi32(gr_ctx->betacb_ctxsw_buffer.gpu_va) << | 1648 | (u64_hi32(gr_ctx->betacb_ctxsw_buffer.gpu_va) << |
1648 | (32 - gr_gpcs_setup_attrib_cb_base_addr_39_12_align_bits_v())); | 1649 | (32 - gr_gpcs_setup_attrib_cb_base_addr_39_12_align_bits_v())); |
1649 | 1650 | ||
1650 | gk20a_dbg_info("attrib cb addr : 0x%016x", addr); | 1651 | nvgpu_log_info(g, "attrib cb addr : 0x%016x", addr); |
1651 | g->ops.gr.commit_global_attrib_cb(g, gr_ctx, addr, true); | 1652 | g->ops.gr.commit_global_attrib_cb(g, gr_ctx, addr, true); |
1652 | 1653 | ||
1653 | addr = (u64_lo32(gr_ctx->pagepool_ctxsw_buffer.gpu_va) >> | 1654 | addr = (u64_lo32(gr_ctx->pagepool_ctxsw_buffer.gpu_va) >> |
@@ -1698,7 +1699,7 @@ void gr_gv11b_update_ctxsw_preemption_mode(struct gk20a *g, | |||
1698 | } | 1699 | } |
1699 | 1700 | ||
1700 | out: | 1701 | out: |
1701 | gk20a_dbg_fn("done"); | 1702 | nvgpu_log_fn(g, "done"); |
1702 | } | 1703 | } |
1703 | static void gr_gv11b_dump_gr_per_sm_regs(struct gk20a *g, | 1704 | static void gr_gv11b_dump_gr_per_sm_regs(struct gk20a *g, |
1704 | struct gk20a_debug_output *o, | 1705 | struct gk20a_debug_output *o, |
@@ -1949,7 +1950,7 @@ int gr_gv11b_wait_empty(struct gk20a *g, unsigned long duration_ms, | |||
1949 | u32 activity0, activity1, activity2, activity4; | 1950 | u32 activity0, activity1, activity2, activity4; |
1950 | struct nvgpu_timeout timeout; | 1951 | struct nvgpu_timeout timeout; |
1951 | 1952 | ||
1952 | gk20a_dbg_fn(""); | 1953 | nvgpu_log_fn(g, " "); |
1953 | 1954 | ||
1954 | nvgpu_timeout_init(g, &timeout, duration_ms, NVGPU_TIMER_CPU_TIMER); | 1955 | nvgpu_timeout_init(g, &timeout, duration_ms, NVGPU_TIMER_CPU_TIMER); |
1955 | 1956 | ||
@@ -1974,7 +1975,7 @@ int gr_gv11b_wait_empty(struct gk20a *g, unsigned long duration_ms, | |||
1974 | gr_activity_empty_or_preempted(activity4)); | 1975 | gr_activity_empty_or_preempted(activity4)); |
1975 | 1976 | ||
1976 | if (!gr_enabled || (!gr_busy && !ctxsw_active)) { | 1977 | if (!gr_enabled || (!gr_busy && !ctxsw_active)) { |
1977 | gk20a_dbg_fn("done"); | 1978 | nvgpu_log_fn(g, "done"); |
1978 | return 0; | 1979 | return 0; |
1979 | } | 1980 | } |
1980 | 1981 | ||
@@ -2191,7 +2192,7 @@ int gr_gv11b_pre_process_sm_exception(struct gk20a *g, | |||
2191 | NVGPU_PREEMPTION_MODE_COMPUTE_CILP); | 2192 | NVGPU_PREEMPTION_MODE_COMPUTE_CILP); |
2192 | } | 2193 | } |
2193 | 2194 | ||
2194 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, | 2195 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, |
2195 | "SM Exception received on gpc %d tpc %d sm %d = 0x%08x", | 2196 | "SM Exception received on gpc %d tpc %d sm %d = 0x%08x", |
2196 | gpc, tpc, sm, global_esr); | 2197 | gpc, tpc, sm, global_esr); |
2197 | 2198 | ||
@@ -2210,13 +2211,13 @@ int gr_gv11b_pre_process_sm_exception(struct gk20a *g, | |||
2210 | if (warp_esr != 0 || (global_esr & global_mask) != 0) { | 2211 | if (warp_esr != 0 || (global_esr & global_mask) != 0) { |
2211 | *ignore_debugger = true; | 2212 | *ignore_debugger = true; |
2212 | 2213 | ||
2213 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, | 2214 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, |
2214 | "CILP: starting wait for LOCKED_DOWN on " | 2215 | "CILP: starting wait for LOCKED_DOWN on " |
2215 | "gpc %d tpc %d sm %d", | 2216 | "gpc %d tpc %d sm %d", |
2216 | gpc, tpc, sm); | 2217 | gpc, tpc, sm); |
2217 | 2218 | ||
2218 | if (gk20a_dbg_gpu_broadcast_stop_trigger(fault_ch)) { | 2219 | if (gk20a_dbg_gpu_broadcast_stop_trigger(fault_ch)) { |
2219 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, | 2220 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, |
2220 | "CILP: Broadcasting STOP_TRIGGER from " | 2221 | "CILP: Broadcasting STOP_TRIGGER from " |
2221 | "gpc %d tpc %d sm %d", | 2222 | "gpc %d tpc %d sm %d", |
2222 | gpc, tpc, sm); | 2223 | gpc, tpc, sm); |
@@ -2225,7 +2226,7 @@ int gr_gv11b_pre_process_sm_exception(struct gk20a *g, | |||
2225 | 2226 | ||
2226 | gk20a_dbg_gpu_clear_broadcast_stop_trigger(fault_ch); | 2227 | gk20a_dbg_gpu_clear_broadcast_stop_trigger(fault_ch); |
2227 | } else { | 2228 | } else { |
2228 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, | 2229 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, |
2229 | "CILP: STOP_TRIGGER from " | 2230 | "CILP: STOP_TRIGGER from " |
2230 | "gpc %d tpc %d sm %d", | 2231 | "gpc %d tpc %d sm %d", |
2231 | gpc, tpc, sm); | 2232 | gpc, tpc, sm); |
@@ -2238,12 +2239,12 @@ int gr_gv11b_pre_process_sm_exception(struct gk20a *g, | |||
2238 | gpc, tpc, sm); | 2239 | gpc, tpc, sm); |
2239 | g->ops.gr.clear_sm_hww(g, | 2240 | g->ops.gr.clear_sm_hww(g, |
2240 | gpc, tpc, sm, global_esr_copy); | 2241 | gpc, tpc, sm, global_esr_copy); |
2241 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, | 2242 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, |
2242 | "CILP: HWWs cleared for " | 2243 | "CILP: HWWs cleared for " |
2243 | "gpc %d tpc %d sm %d", | 2244 | "gpc %d tpc %d sm %d", |
2244 | gpc, tpc, sm); | 2245 | gpc, tpc, sm); |
2245 | 2246 | ||
2246 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: Setting CILP preempt pending\n"); | 2247 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: Setting CILP preempt pending\n"); |
2247 | ret = gr_gp10b_set_cilp_preempt_pending(g, fault_ch); | 2248 | ret = gr_gp10b_set_cilp_preempt_pending(g, fault_ch); |
2248 | if (ret) { | 2249 | if (ret) { |
2249 | nvgpu_err(g, "CILP: error while setting CILP preempt pending!"); | 2250 | nvgpu_err(g, "CILP: error while setting CILP preempt pending!"); |
@@ -2252,7 +2253,7 @@ int gr_gv11b_pre_process_sm_exception(struct gk20a *g, | |||
2252 | 2253 | ||
2253 | dbgr_control0 = gk20a_readl(g, gr_gpc0_tpc0_sm0_dbgr_control0_r() + offset); | 2254 | dbgr_control0 = gk20a_readl(g, gr_gpc0_tpc0_sm0_dbgr_control0_r() + offset); |
2254 | if (dbgr_control0 & gr_gpc0_tpc0_sm0_dbgr_control0_single_step_mode_enable_f()) { | 2255 | if (dbgr_control0 & gr_gpc0_tpc0_sm0_dbgr_control0_single_step_mode_enable_f()) { |
2255 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, | 2256 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, |
2256 | "CILP: clearing SINGLE_STEP_MODE " | 2257 | "CILP: clearing SINGLE_STEP_MODE " |
2257 | "before resume for gpc %d tpc %d sm %d", | 2258 | "before resume for gpc %d tpc %d sm %d", |
2258 | gpc, tpc, sm); | 2259 | gpc, tpc, sm); |
@@ -2262,13 +2263,13 @@ int gr_gv11b_pre_process_sm_exception(struct gk20a *g, | |||
2262 | gk20a_writel(g, gr_gpc0_tpc0_sm0_dbgr_control0_r() + offset, dbgr_control0); | 2263 | gk20a_writel(g, gr_gpc0_tpc0_sm0_dbgr_control0_r() + offset, dbgr_control0); |
2263 | } | 2264 | } |
2264 | 2265 | ||
2265 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, | 2266 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, |
2266 | "CILP: resume for gpc %d tpc %d sm %d", | 2267 | "CILP: resume for gpc %d tpc %d sm %d", |
2267 | gpc, tpc, sm); | 2268 | gpc, tpc, sm); |
2268 | g->ops.gr.resume_single_sm(g, gpc, tpc, sm); | 2269 | g->ops.gr.resume_single_sm(g, gpc, tpc, sm); |
2269 | 2270 | ||
2270 | *ignore_debugger = true; | 2271 | *ignore_debugger = true; |
2271 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, | 2272 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, |
2272 | "CILP: All done on gpc %d, tpc %d sm %d", | 2273 | "CILP: All done on gpc %d, tpc %d sm %d", |
2273 | gpc, tpc, sm); | 2274 | gpc, tpc, sm); |
2274 | } | 2275 | } |
@@ -2388,7 +2389,7 @@ int gr_gv11b_setup_rop_mapping(struct gk20a *g, struct gr_gk20a *gr) | |||
2388 | GPU_LIT_NUM_TPC_PER_GPC); | 2389 | GPU_LIT_NUM_TPC_PER_GPC); |
2389 | u32 num_tpcs = num_gpcs * num_tpc_per_gpc; | 2390 | u32 num_tpcs = num_gpcs * num_tpc_per_gpc; |
2390 | 2391 | ||
2391 | gk20a_dbg_fn(""); | 2392 | nvgpu_log_fn(g, " "); |
2392 | 2393 | ||
2393 | if (!gr->map_tiles) | 2394 | if (!gr->map_tiles) |
2394 | return -1; | 2395 | return -1; |
@@ -2535,7 +2536,7 @@ void gr_gv11b_program_zcull_mapping(struct gk20a *g, u32 zcull_num_entries, | |||
2535 | { | 2536 | { |
2536 | u32 val, i, j; | 2537 | u32 val, i, j; |
2537 | 2538 | ||
2538 | gk20a_dbg_fn(""); | 2539 | nvgpu_log_fn(g, " "); |
2539 | 2540 | ||
2540 | for (i = 0, j = 0; i < (zcull_num_entries / 8); i++, j += 8) { | 2541 | for (i = 0, j = 0; i < (zcull_num_entries / 8); i++, j += 8) { |
2541 | val = | 2542 | val = |
@@ -2666,8 +2667,9 @@ int gr_gv11b_commit_inst(struct channel_gk20a *c, u64 gpu_va) | |||
2666 | u32 addr_hi; | 2667 | u32 addr_hi; |
2667 | struct ctx_header_desc *ctx; | 2668 | struct ctx_header_desc *ctx; |
2668 | int err; | 2669 | int err; |
2670 | struct gk20a *g = c->g; | ||
2669 | 2671 | ||
2670 | gk20a_dbg_fn(""); | 2672 | nvgpu_log_fn(g, " "); |
2671 | 2673 | ||
2672 | err = gv11b_alloc_subctx_header(c); | 2674 | err = gv11b_alloc_subctx_header(c); |
2673 | if (err) | 2675 | if (err) |
@@ -2704,7 +2706,7 @@ int gr_gv11b_commit_global_timeslice(struct gk20a *g, struct channel_gk20a *c) | |||
2704 | u32 pe_vaf; | 2706 | u32 pe_vaf; |
2705 | u32 pe_vsc_vpc; | 2707 | u32 pe_vsc_vpc; |
2706 | 2708 | ||
2707 | gk20a_dbg_fn(""); | 2709 | nvgpu_log_fn(g, " "); |
2708 | 2710 | ||
2709 | pd_ab_dist_cfg0 = gk20a_readl(g, gr_pd_ab_dist_cfg0_r()); | 2711 | pd_ab_dist_cfg0 = gk20a_readl(g, gr_pd_ab_dist_cfg0_r()); |
2710 | ds_debug = gk20a_readl(g, gr_ds_debug_r()); | 2712 | ds_debug = gk20a_readl(g, gr_ds_debug_r()); |
@@ -2814,7 +2816,7 @@ void gr_gv11b_load_tpc_mask(struct gk20a *g) | |||
2814 | } | 2816 | } |
2815 | } | 2817 | } |
2816 | 2818 | ||
2817 | gk20a_dbg_info("pes_tpc_mask %u\n", pes_tpc_mask); | 2819 | nvgpu_log_info(g, "pes_tpc_mask %u\n", pes_tpc_mask); |
2818 | fuse_tpc_mask = g->ops.gr.get_gpc_tpc_mask(g, gpc); | 2820 | fuse_tpc_mask = g->ops.gr.get_gpc_tpc_mask(g, gpc); |
2819 | if (g->tpc_fs_mask_user && | 2821 | if (g->tpc_fs_mask_user && |
2820 | g->tpc_fs_mask_user != fuse_tpc_mask && | 2822 | g->tpc_fs_mask_user != fuse_tpc_mask && |
@@ -2860,7 +2862,7 @@ int gr_gv11b_init_fs_state(struct gk20a *g) | |||
2860 | u32 ver = g->params.gpu_arch + g->params.gpu_impl; | 2862 | u32 ver = g->params.gpu_arch + g->params.gpu_impl; |
2861 | u32 rev = g->params.gpu_rev; | 2863 | u32 rev = g->params.gpu_rev; |
2862 | 2864 | ||
2863 | gk20a_dbg_fn(""); | 2865 | nvgpu_log_fn(g, " "); |
2864 | 2866 | ||
2865 | data = gk20a_readl(g, gr_gpcs_tpcs_sm_texio_control_r()); | 2867 | data = gk20a_readl(g, gr_gpcs_tpcs_sm_texio_control_r()); |
2866 | data = set_field(data, gr_gpcs_tpcs_sm_texio_control_oor_addr_check_mode_m(), | 2868 | data = set_field(data, gr_gpcs_tpcs_sm_texio_control_oor_addr_check_mode_m(), |
@@ -2928,14 +2930,14 @@ void gv11b_gr_get_esr_sm_sel(struct gk20a *g, u32 gpc, u32 tpc, | |||
2928 | u32 offset = gk20a_gr_gpc_offset(g, gpc) + gk20a_gr_tpc_offset(g, tpc); | 2930 | u32 offset = gk20a_gr_gpc_offset(g, gpc) + gk20a_gr_tpc_offset(g, tpc); |
2929 | 2931 | ||
2930 | reg_val = gk20a_readl(g, gr_gpc0_tpc0_sm_tpc_esr_sm_sel_r() + offset); | 2932 | reg_val = gk20a_readl(g, gr_gpc0_tpc0_sm_tpc_esr_sm_sel_r() + offset); |
2931 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, | 2933 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, |
2932 | "sm tpc esr sm sel reg val: 0x%x", reg_val); | 2934 | "sm tpc esr sm sel reg val: 0x%x", reg_val); |
2933 | *esr_sm_sel = 0; | 2935 | *esr_sm_sel = 0; |
2934 | if (gr_gpc0_tpc0_sm_tpc_esr_sm_sel_sm0_error_v(reg_val)) | 2936 | if (gr_gpc0_tpc0_sm_tpc_esr_sm_sel_sm0_error_v(reg_val)) |
2935 | *esr_sm_sel = 1; | 2937 | *esr_sm_sel = 1; |
2936 | if (gr_gpc0_tpc0_sm_tpc_esr_sm_sel_sm1_error_v(reg_val)) | 2938 | if (gr_gpc0_tpc0_sm_tpc_esr_sm_sel_sm1_error_v(reg_val)) |
2937 | *esr_sm_sel |= 1 << 1; | 2939 | *esr_sm_sel |= 1 << 1; |
2938 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, | 2940 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, |
2939 | "esr_sm_sel bitmask: 0x%x", *esr_sm_sel); | 2941 | "esr_sm_sel bitmask: 0x%x", *esr_sm_sel); |
2940 | } | 2942 | } |
2941 | 2943 | ||
@@ -2954,7 +2956,7 @@ int gv11b_gr_sm_trigger_suspend(struct gk20a *g) | |||
2954 | gk20a_writel(g, | 2956 | gk20a_writel(g, |
2955 | gr_gpcs_tpcs_sms_dbgr_control0_r(), dbgr_control0); | 2957 | gr_gpcs_tpcs_sms_dbgr_control0_r(), dbgr_control0); |
2956 | 2958 | ||
2957 | gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, | 2959 | nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, |
2958 | "stop trigger enable: broadcast dbgr_control0: 0x%x ", | 2960 | "stop trigger enable: broadcast dbgr_control0: 0x%x ", |
2959 | dbgr_control0); | 2961 | dbgr_control0); |
2960 | 2962 | ||
@@ -3012,19 +3014,19 @@ void gv11b_gr_bpt_reg_info(struct gk20a *g, struct nvgpu_warpstate *w_state) | |||
3012 | 3014 | ||
3013 | /* Only for debug purpose */ | 3015 | /* Only for debug purpose */ |
3014 | for (sm_id = 0; sm_id < gr->no_of_sm; sm_id++) { | 3016 | for (sm_id = 0; sm_id < gr->no_of_sm; sm_id++) { |
3015 | gk20a_dbg_fn("w_state[%d].valid_warps[0]: %llx\n", | 3017 | nvgpu_log_fn(g, "w_state[%d].valid_warps[0]: %llx\n", |
3016 | sm_id, w_state[sm_id].valid_warps[0]); | 3018 | sm_id, w_state[sm_id].valid_warps[0]); |
3017 | gk20a_dbg_fn("w_state[%d].valid_warps[1]: %llx\n", | 3019 | nvgpu_log_fn(g, "w_state[%d].valid_warps[1]: %llx\n", |
3018 | sm_id, w_state[sm_id].valid_warps[1]); | 3020 | sm_id, w_state[sm_id].valid_warps[1]); |
3019 | 3021 | ||
3020 | gk20a_dbg_fn("w_state[%d].trapped_warps[0]: %llx\n", | 3022 | nvgpu_log_fn(g, "w_state[%d].trapped_warps[0]: %llx\n", |
3021 | sm_id, w_state[sm_id].trapped_warps[0]); | 3023 | sm_id, w_state[sm_id].trapped_warps[0]); |
3022 | gk20a_dbg_fn("w_state[%d].trapped_warps[1]: %llx\n", | 3024 | nvgpu_log_fn(g, "w_state[%d].trapped_warps[1]: %llx\n", |
3023 | sm_id, w_state[sm_id].trapped_warps[1]); | 3025 | sm_id, w_state[sm_id].trapped_warps[1]); |
3024 | 3026 | ||
3025 | gk20a_dbg_fn("w_state[%d].paused_warps[0]: %llx\n", | 3027 | nvgpu_log_fn(g, "w_state[%d].paused_warps[0]: %llx\n", |
3026 | sm_id, w_state[sm_id].paused_warps[0]); | 3028 | sm_id, w_state[sm_id].paused_warps[0]); |
3027 | gk20a_dbg_fn("w_state[%d].paused_warps[1]: %llx\n", | 3029 | nvgpu_log_fn(g, "w_state[%d].paused_warps[1]: %llx\n", |
3028 | sm_id, w_state[sm_id].paused_warps[1]); | 3030 | sm_id, w_state[sm_id].paused_warps[1]); |
3029 | } | 3031 | } |
3030 | } | 3032 | } |
@@ -3257,7 +3259,7 @@ bool gv11b_gr_sm_debugger_attached(struct gk20a *g) | |||
3257 | */ | 3259 | */ |
3258 | debugger_mode = | 3260 | debugger_mode = |
3259 | gr_gpc0_tpc0_sm0_dbgr_control0_debugger_mode_v(dbgr_control0); | 3261 | gr_gpc0_tpc0_sm0_dbgr_control0_debugger_mode_v(dbgr_control0); |
3260 | gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, | 3262 | nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, |
3261 | "SM Debugger Mode: %d", debugger_mode); | 3263 | "SM Debugger Mode: %d", debugger_mode); |
3262 | if (debugger_mode == | 3264 | if (debugger_mode == |
3263 | gr_gpc0_tpc0_sm0_dbgr_control0_debugger_mode_on_v()) | 3265 | gr_gpc0_tpc0_sm0_dbgr_control0_debugger_mode_on_v()) |
@@ -3576,7 +3578,7 @@ static void gv11b_gr_sm_dump_warp_bpt_pause_trap_mask_regs(struct gk20a *g, | |||
3576 | dbgr_status0, dbgr_control0, warps_valid, | 3578 | dbgr_status0, dbgr_control0, warps_valid, |
3577 | warps_paused, warps_trapped); | 3579 | warps_paused, warps_trapped); |
3578 | else | 3580 | else |
3579 | gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, | 3581 | nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, |
3580 | "STATUS0=0x%x CONTROL0=0x%x VALID_MASK=0x%llx " | 3582 | "STATUS0=0x%x CONTROL0=0x%x VALID_MASK=0x%llx " |
3581 | "PAUSE_MASK=0x%llx TRAP_MASK=0x%llx\n", | 3583 | "PAUSE_MASK=0x%llx TRAP_MASK=0x%llx\n", |
3582 | dbgr_status0, dbgr_control0, warps_valid, | 3584 | dbgr_status0, dbgr_control0, warps_valid, |
@@ -3598,7 +3600,7 @@ int gv11b_gr_wait_for_sm_lock_down(struct gk20a *g, | |||
3598 | gk20a_gr_tpc_offset(g, tpc) + | 3600 | gk20a_gr_tpc_offset(g, tpc) + |
3599 | gv11b_gr_sm_offset(g, sm); | 3601 | gv11b_gr_sm_offset(g, sm); |
3600 | 3602 | ||
3601 | gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, | 3603 | nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, |
3602 | "GPC%d TPC%d: locking down SM%d", gpc, tpc, sm); | 3604 | "GPC%d TPC%d: locking down SM%d", gpc, tpc, sm); |
3603 | 3605 | ||
3604 | nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g), | 3606 | nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g), |
@@ -3642,7 +3644,7 @@ int gv11b_gr_wait_for_sm_lock_down(struct gk20a *g, | |||
3642 | } | 3644 | } |
3643 | 3645 | ||
3644 | if (locked_down || no_error_pending) { | 3646 | if (locked_down || no_error_pending) { |
3645 | gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, | 3647 | nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, |
3646 | "GPC%d TPC%d: locked down SM%d", gpc, tpc, sm); | 3648 | "GPC%d TPC%d: locked down SM%d", gpc, tpc, sm); |
3647 | return 0; | 3649 | return 0; |
3648 | } | 3650 | } |
@@ -3677,7 +3679,7 @@ int gv11b_gr_lock_down_sm(struct gk20a *g, | |||
3677 | u32 offset = gk20a_gr_gpc_offset(g, gpc) + gk20a_gr_tpc_offset(g, tpc) + | 3679 | u32 offset = gk20a_gr_gpc_offset(g, gpc) + gk20a_gr_tpc_offset(g, tpc) + |
3678 | gv11b_gr_sm_offset(g, sm); | 3680 | gv11b_gr_sm_offset(g, sm); |
3679 | 3681 | ||
3680 | gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, | 3682 | nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, |
3681 | "GPC%d TPC%d SM%d: assert stop trigger", gpc, tpc, sm); | 3683 | "GPC%d TPC%d SM%d: assert stop trigger", gpc, tpc, sm); |
3682 | 3684 | ||
3683 | /* assert stop trigger */ | 3685 | /* assert stop trigger */ |
@@ -3699,13 +3701,13 @@ void gv11b_gr_clear_sm_hww(struct gk20a *g, u32 gpc, u32 tpc, u32 sm, | |||
3699 | 3701 | ||
3700 | gk20a_writel(g, gr_gpc0_tpc0_sm0_hww_global_esr_r() + offset, | 3702 | gk20a_writel(g, gr_gpc0_tpc0_sm0_hww_global_esr_r() + offset, |
3701 | global_esr); | 3703 | global_esr); |
3702 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, | 3704 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, |
3703 | "Cleared HWW global esr, current reg val: 0x%x", | 3705 | "Cleared HWW global esr, current reg val: 0x%x", |
3704 | gk20a_readl(g, gr_gpc0_tpc0_sm0_hww_global_esr_r() + | 3706 | gk20a_readl(g, gr_gpc0_tpc0_sm0_hww_global_esr_r() + |
3705 | offset)); | 3707 | offset)); |
3706 | 3708 | ||
3707 | gk20a_writel(g, gr_gpc0_tpc0_sm0_hww_warp_esr_r() + offset, 0); | 3709 | gk20a_writel(g, gr_gpc0_tpc0_sm0_hww_warp_esr_r() + offset, 0); |
3708 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, | 3710 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, |
3709 | "Cleared HWW warp esr, current reg val: 0x%x", | 3711 | "Cleared HWW warp esr, current reg val: 0x%x", |
3710 | gk20a_readl(g, gr_gpc0_tpc0_sm0_hww_warp_esr_r() + | 3712 | gk20a_readl(g, gr_gpc0_tpc0_sm0_hww_warp_esr_r() + |
3711 | offset)); | 3713 | offset)); |
@@ -4440,7 +4442,7 @@ int gr_gv11b_decode_priv_addr(struct gk20a *g, u32 addr, | |||
4440 | { | 4442 | { |
4441 | u32 gpc_addr; | 4443 | u32 gpc_addr; |
4442 | 4444 | ||
4443 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); | 4445 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); |
4444 | 4446 | ||
4445 | /* setup defaults */ | 4447 | /* setup defaults */ |
4446 | *addr_type = CTXSW_ADDR_TYPE_SYS; | 4448 | *addr_type = CTXSW_ADDR_TYPE_SYS; |
@@ -4591,12 +4593,12 @@ int gr_gv11b_create_priv_addr_table(struct gk20a *g, | |||
4591 | t = 0; | 4593 | t = 0; |
4592 | *num_registers = 0; | 4594 | *num_registers = 0; |
4593 | 4595 | ||
4594 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); | 4596 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); |
4595 | 4597 | ||
4596 | err = g->ops.gr.decode_priv_addr(g, addr, &addr_type, | 4598 | err = g->ops.gr.decode_priv_addr(g, addr, &addr_type, |
4597 | &gpc_num, &tpc_num, &ppc_num, &be_num, | 4599 | &gpc_num, &tpc_num, &ppc_num, &be_num, |
4598 | &broadcast_flags); | 4600 | &broadcast_flags); |
4599 | gk20a_dbg(gpu_dbg_gpu_dbg, "addr_type = %d", addr_type); | 4601 | nvgpu_log(g, gpu_dbg_gpu_dbg, "addr_type = %d", addr_type); |
4600 | if (err) | 4602 | if (err) |
4601 | return err; | 4603 | return err; |
4602 | 4604 | ||
@@ -4690,7 +4692,7 @@ int gr_gv11b_create_priv_addr_table(struct gk20a *g, | |||
4690 | } else if (((addr_type == CTXSW_ADDR_TYPE_EGPC) || | 4692 | } else if (((addr_type == CTXSW_ADDR_TYPE_EGPC) || |
4691 | (addr_type == CTXSW_ADDR_TYPE_ETPC)) && | 4693 | (addr_type == CTXSW_ADDR_TYPE_ETPC)) && |
4692 | g->ops.gr.egpc_etpc_priv_addr_table) { | 4694 | g->ops.gr.egpc_etpc_priv_addr_table) { |
4693 | gk20a_dbg(gpu_dbg_gpu_dbg, "addr_type : EGPC/ETPC"); | 4695 | nvgpu_log(g, gpu_dbg_gpu_dbg, "addr_type : EGPC/ETPC"); |
4694 | g->ops.gr.egpc_etpc_priv_addr_table(g, addr, gpc_num, tpc_num, | 4696 | g->ops.gr.egpc_etpc_priv_addr_table(g, addr, gpc_num, tpc_num, |
4695 | broadcast_flags, priv_addr_table, &t); | 4697 | broadcast_flags, priv_addr_table, &t); |
4696 | } else if (broadcast_flags & PRI_BROADCAST_FLAGS_LTSS) { | 4698 | } else if (broadcast_flags & PRI_BROADCAST_FLAGS_LTSS) { |