summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/gr_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.c364
1 files changed, 181 insertions, 183 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
index 86111321..00f26650 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
@@ -122,7 +122,7 @@ int gr_gk20a_get_ctx_id(struct gk20a *g,
122 122
123 *ctx_id = nvgpu_mem_rd(g, mem, 123 *ctx_id = nvgpu_mem_rd(g, mem,
124 ctxsw_prog_main_image_context_id_o()); 124 ctxsw_prog_main_image_context_id_o());
125 gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, "ctx_id: 0x%x", *ctx_id); 125 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr, "ctx_id: 0x%x", *ctx_id);
126 126
127 nvgpu_mem_end(g, mem); 127 nvgpu_mem_end(g, mem);
128 128
@@ -220,7 +220,7 @@ static void gr_gk20a_load_falcon_dmem(struct gk20a *g)
220 const u32 *ucode_u32_data; 220 const u32 *ucode_u32_data;
221 u32 checksum; 221 u32 checksum;
222 222
223 gk20a_dbg_fn(""); 223 nvgpu_log_fn(g, " ");
224 224
225 gk20a_writel(g, gr_gpccs_dmemc_r(0), (gr_gpccs_dmemc_offs_f(0) | 225 gk20a_writel(g, gr_gpccs_dmemc_r(0), (gr_gpccs_dmemc_offs_f(0) |
226 gr_gpccs_dmemc_blk_f(0) | 226 gr_gpccs_dmemc_blk_f(0) |
@@ -245,7 +245,7 @@ static void gr_gk20a_load_falcon_dmem(struct gk20a *g)
245 gk20a_writel(g, gr_fecs_dmemd_r(0), ucode_u32_data[i]); 245 gk20a_writel(g, gr_fecs_dmemd_r(0), ucode_u32_data[i]);
246 checksum += ucode_u32_data[i]; 246 checksum += ucode_u32_data[i];
247 } 247 }
248 gk20a_dbg_fn("done"); 248 nvgpu_log_fn(g, "done");
249} 249}
250 250
251static void gr_gk20a_load_falcon_imem(struct gk20a *g) 251static void gr_gk20a_load_falcon_imem(struct gk20a *g)
@@ -255,7 +255,7 @@ static void gr_gk20a_load_falcon_imem(struct gk20a *g)
255 u32 tag, i, pad_start, pad_end; 255 u32 tag, i, pad_start, pad_end;
256 u32 checksum; 256 u32 checksum;
257 257
258 gk20a_dbg_fn(""); 258 nvgpu_log_fn(g, " ");
259 259
260 cfg = gk20a_readl(g, gr_fecs_cfg_r()); 260 cfg = gk20a_readl(g, gr_fecs_cfg_r());
261 fecs_imem_size = gr_fecs_cfg_imem_sz_v(cfg); 261 fecs_imem_size = gr_fecs_cfg_imem_sz_v(cfg);
@@ -343,7 +343,7 @@ int gr_gk20a_wait_idle(struct gk20a *g, unsigned long duration_ms,
343 bool ctx_status_invalid; 343 bool ctx_status_invalid;
344 struct nvgpu_timeout timeout; 344 struct nvgpu_timeout timeout;
345 345
346 gk20a_dbg_fn(""); 346 nvgpu_log_fn(g, " ");
347 347
348 gr_engine_id = gk20a_fifo_get_gr_engine_id(g); 348 gr_engine_id = gk20a_fifo_get_gr_engine_id(g);
349 349
@@ -372,7 +372,7 @@ int gr_gk20a_wait_idle(struct gk20a *g, unsigned long duration_ms,
372 372
373 if (!gr_enabled || ctx_status_invalid 373 if (!gr_enabled || ctx_status_invalid
374 || (!gr_busy && !ctxsw_active)) { 374 || (!gr_busy && !ctxsw_active)) {
375 gk20a_dbg_fn("done"); 375 nvgpu_log_fn(g, "done");
376 return 0; 376 return 0;
377 } 377 }
378 378
@@ -398,7 +398,7 @@ int gr_gk20a_wait_fe_idle(struct gk20a *g, unsigned long duration_ms,
398 if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) 398 if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL))
399 return 0; 399 return 0;
400 400
401 gk20a_dbg_fn(""); 401 nvgpu_log_fn(g, " ");
402 402
403 nvgpu_timeout_init(g, &timeout, duration_ms, NVGPU_TIMER_CPU_TIMER); 403 nvgpu_timeout_init(g, &timeout, duration_ms, NVGPU_TIMER_CPU_TIMER);
404 404
@@ -406,7 +406,7 @@ int gr_gk20a_wait_fe_idle(struct gk20a *g, unsigned long duration_ms,
406 val = gk20a_readl(g, gr_status_r()); 406 val = gk20a_readl(g, gr_status_r());
407 407
408 if (!gr_status_fe_method_lower_v(val)) { 408 if (!gr_status_fe_method_lower_v(val)) {
409 gk20a_dbg_fn("done"); 409 nvgpu_log_fn(g, "done");
410 return 0; 410 return 0;
411 } 411 }
412 412
@@ -430,7 +430,7 @@ int gr_gk20a_ctx_wait_ucode(struct gk20a *g, u32 mailbox_id,
430 u32 check = WAIT_UCODE_LOOP; 430 u32 check = WAIT_UCODE_LOOP;
431 u32 reg; 431 u32 reg;
432 432
433 gk20a_dbg_fn(""); 433 nvgpu_log_fn(g, " ");
434 434
435 if (sleepduringwait) 435 if (sleepduringwait)
436 delay = GR_IDLE_CHECK_DEFAULT; 436 delay = GR_IDLE_CHECK_DEFAULT;
@@ -532,7 +532,7 @@ int gr_gk20a_ctx_wait_ucode(struct gk20a *g, u32 mailbox_id,
532 return -1; 532 return -1;
533 } 533 }
534 534
535 gk20a_dbg_fn("done"); 535 nvgpu_log_fn(g, "done");
536 return 0; 536 return 0;
537} 537}
538 538
@@ -618,7 +618,7 @@ int gr_gk20a_disable_ctxsw(struct gk20a *g)
618{ 618{
619 int err = 0; 619 int err = 0;
620 620
621 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); 621 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
622 622
623 nvgpu_mutex_acquire(&g->ctxsw_disable_lock); 623 nvgpu_mutex_acquire(&g->ctxsw_disable_lock);
624 g->ctxsw_disable_count++; 624 g->ctxsw_disable_count++;
@@ -635,7 +635,7 @@ int gr_gk20a_enable_ctxsw(struct gk20a *g)
635{ 635{
636 int err = 0; 636 int err = 0;
637 637
638 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); 638 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
639 639
640 nvgpu_mutex_acquire(&g->ctxsw_disable_lock); 640 nvgpu_mutex_acquire(&g->ctxsw_disable_lock);
641 g->ctxsw_disable_count--; 641 g->ctxsw_disable_count--;
@@ -669,7 +669,7 @@ int gr_gk20a_commit_inst(struct channel_gk20a *c, u64 gpu_va)
669 u32 addr_lo; 669 u32 addr_lo;
670 u32 addr_hi; 670 u32 addr_hi;
671 671
672 gk20a_dbg_fn(""); 672 nvgpu_log_fn(c->g, " ");
673 673
674 addr_lo = u64_lo32(gpu_va) >> 12; 674 addr_lo = u64_lo32(gpu_va) >> 12;
675 addr_hi = u64_hi32(gpu_va); 675 addr_hi = u64_hi32(gpu_va);
@@ -775,7 +775,7 @@ int gr_gk20a_fecs_ctx_bind_channel(struct gk20a *g,
775 u32 data = fecs_current_ctx_data(g, &c->inst_block); 775 u32 data = fecs_current_ctx_data(g, &c->inst_block);
776 u32 ret; 776 u32 ret;
777 777
778 gk20a_dbg_info("bind channel %d inst ptr 0x%08x", 778 nvgpu_log_info(g, "bind channel %d inst ptr 0x%08x",
779 c->chid, inst_base_ptr); 779 c->chid, inst_base_ptr);
780 780
781 ret = gr_gk20a_submit_fecs_method_op(g, 781 ret = gr_gk20a_submit_fecs_method_op(g,
@@ -823,7 +823,7 @@ static int gr_gk20a_ctx_zcull_setup(struct gk20a *g, struct channel_gk20a *c)
823 struct nvgpu_mem *ctxheader = &ctx->mem; 823 struct nvgpu_mem *ctxheader = &ctx->mem;
824 int ret = 0; 824 int ret = 0;
825 825
826 gk20a_dbg_fn(""); 826 nvgpu_log_fn(g, " ");
827 827
828 tsg = tsg_gk20a_from_ch(c); 828 tsg = tsg_gk20a_from_ch(c);
829 if (!tsg) 829 if (!tsg)
@@ -905,7 +905,7 @@ int gr_gk20a_commit_global_ctx_buffers(struct gk20a *g,
905 u64 addr; 905 u64 addr;
906 u32 size; 906 u32 size;
907 907
908 gk20a_dbg_fn(""); 908 nvgpu_log_fn(g, " ");
909 909
910 tsg = tsg_gk20a_from_ch(c); 910 tsg = tsg_gk20a_from_ch(c);
911 if (!tsg) 911 if (!tsg)
@@ -931,7 +931,7 @@ int gr_gk20a_commit_global_ctx_buffers(struct gk20a *g,
931 if (size == g->ops.gr.pagepool_default_size(g)) 931 if (size == g->ops.gr.pagepool_default_size(g))
932 size = gr_scc_pagepool_total_pages_hwmax_v(); 932 size = gr_scc_pagepool_total_pages_hwmax_v();
933 933
934 gk20a_dbg_info("pagepool buffer addr : 0x%016llx, size : %d", 934 nvgpu_log_info(g, "pagepool buffer addr : 0x%016llx, size : %d",
935 addr, size); 935 addr, size);
936 936
937 g->ops.gr.commit_global_pagepool(g, gr_ctx, addr, size, patch); 937 g->ops.gr.commit_global_pagepool(g, gr_ctx, addr, size, patch);
@@ -944,7 +944,7 @@ int gr_gk20a_commit_global_ctx_buffers(struct gk20a *g,
944 944
945 size = gr->bundle_cb_default_size; 945 size = gr->bundle_cb_default_size;
946 946
947 gk20a_dbg_info("bundle cb addr : 0x%016llx, size : %d", 947 nvgpu_log_info(g, "bundle cb addr : 0x%016llx, size : %d",
948 addr, size); 948 addr, size);
949 949
950 g->ops.gr.commit_global_bundle_cb(g, gr_ctx, addr, size, patch); 950 g->ops.gr.commit_global_bundle_cb(g, gr_ctx, addr, size, patch);
@@ -955,7 +955,7 @@ int gr_gk20a_commit_global_ctx_buffers(struct gk20a *g,
955 (u64_hi32(gr_ctx->global_ctx_buffer_va[ATTRIBUTE_VA]) << 955 (u64_hi32(gr_ctx->global_ctx_buffer_va[ATTRIBUTE_VA]) <<
956 (32 - gr_gpcs_setup_attrib_cb_base_addr_39_12_align_bits_v())); 956 (32 - gr_gpcs_setup_attrib_cb_base_addr_39_12_align_bits_v()));
957 957
958 gk20a_dbg_info("attrib cb addr : 0x%016llx", addr); 958 nvgpu_log_info(g, "attrib cb addr : 0x%016llx", addr);
959 g->ops.gr.commit_global_attrib_cb(g, gr_ctx, addr, patch); 959 g->ops.gr.commit_global_attrib_cb(g, gr_ctx, addr, patch);
960 g->ops.gr.commit_global_cb_manager(g, c, patch); 960 g->ops.gr.commit_global_cb_manager(g, c, patch);
961 961
@@ -976,7 +976,7 @@ int gr_gk20a_commit_global_timeslice(struct gk20a *g, struct channel_gk20a *c)
976 u32 pe_vaf; 976 u32 pe_vaf;
977 u32 pe_vsc_vpc; 977 u32 pe_vsc_vpc;
978 978
979 gk20a_dbg_fn(""); 979 nvgpu_log_fn(g, " ");
980 980
981 gpm_pd_cfg = gk20a_readl(g, gr_gpcs_gpm_pd_cfg_r()); 981 gpm_pd_cfg = gk20a_readl(g, gr_gpcs_gpm_pd_cfg_r());
982 pd_ab_dist_cfg0 = gk20a_readl(g, gr_pd_ab_dist_cfg0_r()); 982 pd_ab_dist_cfg0 = gk20a_readl(g, gr_pd_ab_dist_cfg0_r());
@@ -1036,7 +1036,7 @@ int gr_gk20a_setup_rop_mapping(struct gk20a *g, struct gr_gk20a *gr)
1036 if (!gr->map_tiles) 1036 if (!gr->map_tiles)
1037 return -1; 1037 return -1;
1038 1038
1039 gk20a_dbg_fn(""); 1039 nvgpu_log_fn(g, " ");
1040 1040
1041 gk20a_writel(g, gr_crstr_map_table_cfg_r(), 1041 gk20a_writel(g, gr_crstr_map_table_cfg_r(),
1042 gr_crstr_map_table_cfg_row_offset_f(gr->map_row_offset) | 1042 gr_crstr_map_table_cfg_row_offset_f(gr->map_row_offset) |
@@ -1219,7 +1219,7 @@ int gr_gk20a_init_fs_state(struct gk20a *g)
1219 u32 reg_index; 1219 u32 reg_index;
1220 int err; 1220 int err;
1221 1221
1222 gk20a_dbg_fn(""); 1222 nvgpu_log_fn(g, " ");
1223 1223
1224 if (g->ops.gr.init_sm_id_table) { 1224 if (g->ops.gr.init_sm_id_table) {
1225 err = g->ops.gr.init_sm_id_table(g); 1225 err = g->ops.gr.init_sm_id_table(g);
@@ -1302,7 +1302,7 @@ int gr_gk20a_fecs_ctx_image_save(struct channel_gk20a *c, u32 save_type)
1302 struct gk20a *g = c->g; 1302 struct gk20a *g = c->g;
1303 int ret; 1303 int ret;
1304 1304
1305 gk20a_dbg_fn(""); 1305 nvgpu_log_fn(g, " ");
1306 1306
1307 ret = gr_gk20a_submit_fecs_method_op(g, 1307 ret = gr_gk20a_submit_fecs_method_op(g,
1308 (struct fecs_method_op_gk20a) { 1308 (struct fecs_method_op_gk20a) {
@@ -1411,7 +1411,7 @@ static int gr_gk20a_init_golden_ctx_image(struct gk20a *g,
1411 struct av_list_gk20a *sw_method_init = &g->gr.ctx_vars.sw_method_init; 1411 struct av_list_gk20a *sw_method_init = &g->gr.ctx_vars.sw_method_init;
1412 u32 last_method_data = 0; 1412 u32 last_method_data = 0;
1413 1413
1414 gk20a_dbg_fn(""); 1414 nvgpu_log_fn(g, " ");
1415 1415
1416 tsg = tsg_gk20a_from_ch(c); 1416 tsg = tsg_gk20a_from_ch(c);
1417 if (!tsg) 1417 if (!tsg)
@@ -1647,7 +1647,7 @@ clean_up:
1647 if (err) 1647 if (err)
1648 nvgpu_err(g, "fail"); 1648 nvgpu_err(g, "fail");
1649 else 1649 else
1650 gk20a_dbg_fn("done"); 1650 nvgpu_log_fn(g, "done");
1651 1651
1652 nvgpu_mem_end(g, gold_mem); 1652 nvgpu_mem_end(g, gold_mem);
1653 nvgpu_mem_end(g, gr_mem); 1653 nvgpu_mem_end(g, gr_mem);
@@ -1666,7 +1666,7 @@ int gr_gk20a_update_smpc_ctxsw_mode(struct gk20a *g,
1666 u32 data; 1666 u32 data;
1667 int ret; 1667 int ret;
1668 1668
1669 gk20a_dbg_fn(""); 1669 nvgpu_log_fn(g, " ");
1670 1670
1671 tsg = tsg_gk20a_from_ch(c); 1671 tsg = tsg_gk20a_from_ch(c);
1672 if (!tsg) 1672 if (!tsg)
@@ -1732,7 +1732,7 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g,
1732 struct nvgpu_mem *ctxheader = &ctx->mem; 1732 struct nvgpu_mem *ctxheader = &ctx->mem;
1733 int ret; 1733 int ret;
1734 1734
1735 gk20a_dbg_fn(""); 1735 nvgpu_log_fn(g, " ");
1736 1736
1737 tsg = tsg_gk20a_from_ch(c); 1737 tsg = tsg_gk20a_from_ch(c);
1738 if (!tsg) 1738 if (!tsg)
@@ -1884,7 +1884,7 @@ int gr_gk20a_load_golden_ctx_image(struct gk20a *g,
1884 int ret = 0; 1884 int ret = 0;
1885 struct nvgpu_mem *mem; 1885 struct nvgpu_mem *mem;
1886 1886
1887 gk20a_dbg_fn(""); 1887 nvgpu_log_fn(g, " ");
1888 1888
1889 tsg = tsg_gk20a_from_ch(c); 1889 tsg = tsg_gk20a_from_ch(c);
1890 if (!tsg) 1890 if (!tsg)
@@ -1991,7 +1991,7 @@ int gr_gk20a_load_golden_ctx_image(struct gk20a *g,
1991 1991
1992static void gr_gk20a_start_falcon_ucode(struct gk20a *g) 1992static void gr_gk20a_start_falcon_ucode(struct gk20a *g)
1993{ 1993{
1994 gk20a_dbg_fn(""); 1994 nvgpu_log_fn(g, " ");
1995 1995
1996 gk20a_writel(g, gr_fecs_ctxsw_mailbox_clear_r(0), 1996 gk20a_writel(g, gr_fecs_ctxsw_mailbox_clear_r(0),
1997 gr_fecs_ctxsw_mailbox_clear_value_f(~0)); 1997 gr_fecs_ctxsw_mailbox_clear_value_f(~0));
@@ -2002,7 +2002,7 @@ static void gr_gk20a_start_falcon_ucode(struct gk20a *g)
2002 gk20a_writel(g, gr_gpccs_cpuctl_r(), gr_gpccs_cpuctl_startcpu_f(1)); 2002 gk20a_writel(g, gr_gpccs_cpuctl_r(), gr_gpccs_cpuctl_startcpu_f(1));
2003 gk20a_writel(g, gr_fecs_cpuctl_r(), gr_fecs_cpuctl_startcpu_f(1)); 2003 gk20a_writel(g, gr_fecs_cpuctl_r(), gr_fecs_cpuctl_startcpu_f(1));
2004 2004
2005 gk20a_dbg_fn("done"); 2005 nvgpu_log_fn(g, "done");
2006} 2006}
2007 2007
2008static int gr_gk20a_init_ctxsw_ucode_vaspace(struct gk20a *g) 2008static int gr_gk20a_init_ctxsw_ucode_vaspace(struct gk20a *g)
@@ -2392,7 +2392,7 @@ int gr_gk20a_load_ctxsw_ucode(struct gk20a *g)
2392{ 2392{
2393 int err; 2393 int err;
2394 2394
2395 gk20a_dbg_fn(""); 2395 nvgpu_log_fn(g, " ");
2396 2396
2397 if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) { 2397 if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) {
2398 gk20a_writel(g, gr_fecs_ctxsw_mailbox_r(7), 2398 gk20a_writel(g, gr_fecs_ctxsw_mailbox_r(7),
@@ -2419,7 +2419,7 @@ int gr_gk20a_load_ctxsw_ucode(struct gk20a *g)
2419 gr_gk20a_load_falcon_with_bootloader(g); 2419 gr_gk20a_load_falcon_with_bootloader(g);
2420 g->gr.skip_ucode_init = true; 2420 g->gr.skip_ucode_init = true;
2421 } 2421 }
2422 gk20a_dbg_fn("done"); 2422 nvgpu_log_fn(g, "done");
2423 return 0; 2423 return 0;
2424} 2424}
2425 2425
@@ -2427,7 +2427,7 @@ static int gr_gk20a_wait_ctxsw_ready(struct gk20a *g)
2427{ 2427{
2428 u32 ret; 2428 u32 ret;
2429 2429
2430 gk20a_dbg_fn(""); 2430 nvgpu_log_fn(g, " ");
2431 2431
2432 ret = gr_gk20a_ctx_wait_ucode(g, 0, NULL, 2432 ret = gr_gk20a_ctx_wait_ucode(g, 0, NULL,
2433 GR_IS_UCODE_OP_EQUAL, 2433 GR_IS_UCODE_OP_EQUAL,
@@ -2448,7 +2448,7 @@ static int gr_gk20a_wait_ctxsw_ready(struct gk20a *g)
2448 gk20a_writel(g, gr_fecs_method_push_r(), 2448 gk20a_writel(g, gr_fecs_method_push_r(),
2449 gr_fecs_method_push_adr_set_watchdog_timeout_f()); 2449 gr_fecs_method_push_adr_set_watchdog_timeout_f());
2450 2450
2451 gk20a_dbg_fn("done"); 2451 nvgpu_log_fn(g, "done");
2452 return 0; 2452 return 0;
2453} 2453}
2454 2454
@@ -2463,7 +2463,7 @@ int gr_gk20a_init_ctx_state(struct gk20a *g)
2463 .cond.fail = GR_IS_UCODE_OP_SKIP, 2463 .cond.fail = GR_IS_UCODE_OP_SKIP,
2464 }; 2464 };
2465 2465
2466 gk20a_dbg_fn(""); 2466 nvgpu_log_fn(g, " ");
2467 /* query ctxsw image sizes, if golden context is not created */ 2467 /* query ctxsw image sizes, if golden context is not created */
2468 if (!g->gr.ctx_vars.golden_image_initialized) { 2468 if (!g->gr.ctx_vars.golden_image_initialized) {
2469 op.method.addr = 2469 op.method.addr =
@@ -2496,7 +2496,7 @@ int gr_gk20a_init_ctx_state(struct gk20a *g)
2496 g->gr.ctx_vars.priv_access_map_size = 512 * 1024; 2496 g->gr.ctx_vars.priv_access_map_size = 512 * 1024;
2497 } 2497 }
2498 2498
2499 gk20a_dbg_fn("done"); 2499 nvgpu_log_fn(g, "done");
2500 return 0; 2500 return 0;
2501} 2501}
2502 2502
@@ -2543,7 +2543,7 @@ static void gr_gk20a_free_global_ctx_buffers(struct gk20a *g)
2543 } 2543 }
2544 } 2544 }
2545 2545
2546 gk20a_dbg_fn("done"); 2546 nvgpu_log_fn(g, "done");
2547} 2547}
2548 2548
2549static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g) 2549static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
@@ -2557,11 +2557,11 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
2557 u32 pagepool_buffer_size = g->ops.gr.pagepool_default_size(g) * 2557 u32 pagepool_buffer_size = g->ops.gr.pagepool_default_size(g) *
2558 gr_scc_pagepool_total_pages_byte_granularity_v(); 2558 gr_scc_pagepool_total_pages_byte_granularity_v();
2559 2559
2560 gk20a_dbg_fn(""); 2560 nvgpu_log_fn(g, " ");
2561 2561
2562 attr_buffer_size = g->ops.gr.calc_global_ctx_buffer_size(g); 2562 attr_buffer_size = g->ops.gr.calc_global_ctx_buffer_size(g);
2563 2563
2564 gk20a_dbg_info("cb_buffer_size : %d", cb_buffer_size); 2564 nvgpu_log_info(g, "cb_buffer_size : %d", cb_buffer_size);
2565 2565
2566 err = gk20a_gr_alloc_ctx_buffer(g, &gr->global_ctx_buffer[CIRCULAR], 2566 err = gk20a_gr_alloc_ctx_buffer(g, &gr->global_ctx_buffer[CIRCULAR],
2567 cb_buffer_size); 2567 cb_buffer_size);
@@ -2576,7 +2576,7 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
2576 goto clean_up; 2576 goto clean_up;
2577 } 2577 }
2578 2578
2579 gk20a_dbg_info("pagepool_buffer_size : %d", pagepool_buffer_size); 2579 nvgpu_log_info(g, "pagepool_buffer_size : %d", pagepool_buffer_size);
2580 2580
2581 err = gk20a_gr_alloc_ctx_buffer(g, &gr->global_ctx_buffer[PAGEPOOL], 2581 err = gk20a_gr_alloc_ctx_buffer(g, &gr->global_ctx_buffer[PAGEPOOL],
2582 pagepool_buffer_size); 2582 pagepool_buffer_size);
@@ -2591,7 +2591,7 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
2591 goto clean_up; 2591 goto clean_up;
2592 } 2592 }
2593 2593
2594 gk20a_dbg_info("attr_buffer_size : %d", attr_buffer_size); 2594 nvgpu_log_info(g, "attr_buffer_size : %d", attr_buffer_size);
2595 2595
2596 err = gk20a_gr_alloc_ctx_buffer(g, &gr->global_ctx_buffer[ATTRIBUTE], 2596 err = gk20a_gr_alloc_ctx_buffer(g, &gr->global_ctx_buffer[ATTRIBUTE],
2597 attr_buffer_size); 2597 attr_buffer_size);
@@ -2606,7 +2606,7 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
2606 goto clean_up; 2606 goto clean_up;
2607 } 2607 }
2608 2608
2609 gk20a_dbg_info("golden_image_size : %d", 2609 nvgpu_log_info(g, "golden_image_size : %d",
2610 gr->ctx_vars.golden_image_size); 2610 gr->ctx_vars.golden_image_size);
2611 2611
2612 err = gk20a_gr_alloc_ctx_buffer(g, 2612 err = gk20a_gr_alloc_ctx_buffer(g,
@@ -2615,7 +2615,7 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
2615 if (err) 2615 if (err)
2616 goto clean_up; 2616 goto clean_up;
2617 2617
2618 gk20a_dbg_info("priv_access_map_size : %d", 2618 nvgpu_log_info(g, "priv_access_map_size : %d",
2619 gr->ctx_vars.priv_access_map_size); 2619 gr->ctx_vars.priv_access_map_size);
2620 2620
2621 err = gk20a_gr_alloc_ctx_buffer(g, 2621 err = gk20a_gr_alloc_ctx_buffer(g,
@@ -2625,7 +2625,7 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
2625 if (err) 2625 if (err)
2626 goto clean_up; 2626 goto clean_up;
2627 2627
2628 gk20a_dbg_fn("done"); 2628 nvgpu_log_fn(g, "done");
2629 return 0; 2629 return 0;
2630 2630
2631 clean_up: 2631 clean_up:
@@ -2643,7 +2643,7 @@ static void gr_gk20a_unmap_global_ctx_buffers(struct gk20a *g,
2643 int *g_bfr_index = gr_ctx->global_ctx_buffer_index; 2643 int *g_bfr_index = gr_ctx->global_ctx_buffer_index;
2644 u32 i; 2644 u32 i;
2645 2645
2646 gk20a_dbg_fn(""); 2646 nvgpu_log_fn(g, " ");
2647 2647
2648 for (i = 0; i < NR_GLOBAL_CTX_BUF_VA; i++) { 2648 for (i = 0; i < NR_GLOBAL_CTX_BUF_VA; i++) {
2649 if (g_bfr_index[i]) { 2649 if (g_bfr_index[i]) {
@@ -2679,7 +2679,7 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
2679 struct nvgpu_mem *mem; 2679 struct nvgpu_mem *mem;
2680 u64 gpu_va; 2680 u64 gpu_va;
2681 2681
2682 gk20a_dbg_fn(""); 2682 nvgpu_log_fn(g, " ");
2683 2683
2684 tsg = tsg_gk20a_from_ch(c); 2684 tsg = tsg_gk20a_from_ch(c);
2685 if (!tsg) 2685 if (!tsg)
@@ -2780,7 +2780,7 @@ int gr_gk20a_alloc_gr_ctx(struct gk20a *g,
2780 struct gr_gk20a *gr = &g->gr; 2780 struct gr_gk20a *gr = &g->gr;
2781 int err = 0; 2781 int err = 0;
2782 2782
2783 gk20a_dbg_fn(""); 2783 nvgpu_log_fn(g, " ");
2784 2784
2785 if (gr->ctx_vars.buffer_size == 0) 2785 if (gr->ctx_vars.buffer_size == 0)
2786 return 0; 2786 return 0;
@@ -2835,7 +2835,7 @@ static int gr_gk20a_alloc_tsg_gr_ctx(struct gk20a *g,
2835void gr_gk20a_free_gr_ctx(struct gk20a *g, 2835void gr_gk20a_free_gr_ctx(struct gk20a *g,
2836 struct vm_gk20a *vm, struct nvgpu_gr_ctx *gr_ctx) 2836 struct vm_gk20a *vm, struct nvgpu_gr_ctx *gr_ctx)
2837{ 2837{
2838 gk20a_dbg_fn(""); 2838 nvgpu_log_fn(g, " ");
2839 2839
2840 if (gr_ctx->mem.gpu_va) { 2840 if (gr_ctx->mem.gpu_va) {
2841 gr_gk20a_unmap_global_ctx_buffers(g, vm, gr_ctx); 2841 gr_gk20a_unmap_global_ctx_buffers(g, vm, gr_ctx);
@@ -2881,7 +2881,7 @@ static int gr_gk20a_alloc_channel_patch_ctx(struct gk20a *g,
2881 u32 alloc_size; 2881 u32 alloc_size;
2882 int err = 0; 2882 int err = 0;
2883 2883
2884 gk20a_dbg_fn(""); 2884 nvgpu_log_fn(g, " ");
2885 2885
2886 tsg = tsg_gk20a_from_ch(c); 2886 tsg = tsg_gk20a_from_ch(c);
2887 if (!tsg) 2887 if (!tsg)
@@ -2899,7 +2899,7 @@ static int gr_gk20a_alloc_channel_patch_ctx(struct gk20a *g,
2899 if (err) 2899 if (err)
2900 return err; 2900 return err;
2901 2901
2902 gk20a_dbg_fn("done"); 2902 nvgpu_log_fn(g, "done");
2903 return 0; 2903 return 0;
2904} 2904}
2905 2905
@@ -2909,7 +2909,7 @@ static void gr_gk20a_free_channel_patch_ctx(struct gk20a *g,
2909{ 2909{
2910 struct patch_desc *patch_ctx = &gr_ctx->patch_ctx; 2910 struct patch_desc *patch_ctx = &gr_ctx->patch_ctx;
2911 2911
2912 gk20a_dbg_fn(""); 2912 nvgpu_log_fn(g, " ");
2913 2913
2914 if (patch_ctx->mem.gpu_va) 2914 if (patch_ctx->mem.gpu_va)
2915 nvgpu_gmmu_unmap(vm, &patch_ctx->mem, 2915 nvgpu_gmmu_unmap(vm, &patch_ctx->mem,
@@ -2925,7 +2925,7 @@ static void gr_gk20a_free_channel_pm_ctx(struct gk20a *g,
2925{ 2925{
2926 struct pm_ctx_desc *pm_ctx = &gr_ctx->pm_ctx; 2926 struct pm_ctx_desc *pm_ctx = &gr_ctx->pm_ctx;
2927 2927
2928 gk20a_dbg_fn(""); 2928 nvgpu_log_fn(g, " ");
2929 2929
2930 if (pm_ctx->mem.gpu_va) { 2930 if (pm_ctx->mem.gpu_va) {
2931 nvgpu_gmmu_unmap(vm, &pm_ctx->mem, pm_ctx->mem.gpu_va); 2931 nvgpu_gmmu_unmap(vm, &pm_ctx->mem, pm_ctx->mem.gpu_va);
@@ -2942,7 +2942,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags)
2942 struct tsg_gk20a *tsg = NULL; 2942 struct tsg_gk20a *tsg = NULL;
2943 int err = 0; 2943 int err = 0;
2944 2944
2945 gk20a_dbg_fn(""); 2945 nvgpu_log_fn(g, " ");
2946 2946
2947 /* an address space needs to have been bound at this point.*/ 2947 /* an address space needs to have been bound at this point.*/
2948 if (!gk20a_channel_as_bound(c) && !c->vm) { 2948 if (!gk20a_channel_as_bound(c) && !c->vm) {
@@ -3047,7 +3047,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags)
3047 } 3047 }
3048 } 3048 }
3049 3049
3050 gk20a_dbg_fn("done"); 3050 nvgpu_log_fn(g, "done");
3051 return 0; 3051 return 0;
3052out: 3052out:
3053 /* 1. gr_ctx, patch_ctx and global ctx buffer mapping 3053 /* 1. gr_ctx, patch_ctx and global ctx buffer mapping
@@ -3062,7 +3062,7 @@ static void gk20a_remove_gr_support(struct gr_gk20a *gr)
3062{ 3062{
3063 struct gk20a *g = gr->g; 3063 struct gk20a *g = gr->g;
3064 3064
3065 gk20a_dbg_fn(""); 3065 nvgpu_log_fn(g, " ");
3066 3066
3067 gr_gk20a_free_cyclestats_snapshot_data(g); 3067 gr_gk20a_free_cyclestats_snapshot_data(g);
3068 3068
@@ -3322,35 +3322,35 @@ static int gr_gk20a_init_gr_config(struct gk20a *g, struct gr_gk20a *gr)
3322 sm_per_tpc * sizeof(struct sm_info)); 3322 sm_per_tpc * sizeof(struct sm_info));
3323 gr->no_of_sm = 0; 3323 gr->no_of_sm = 0;
3324 3324
3325 gk20a_dbg_info("fbps: %d", gr->num_fbps); 3325 nvgpu_log_info(g, "fbps: %d", gr->num_fbps);
3326 gk20a_dbg_info("max_gpc_count: %d", gr->max_gpc_count); 3326 nvgpu_log_info(g, "max_gpc_count: %d", gr->max_gpc_count);
3327 gk20a_dbg_info("max_fbps_count: %d", gr->max_fbps_count); 3327 nvgpu_log_info(g, "max_fbps_count: %d", gr->max_fbps_count);
3328 gk20a_dbg_info("max_tpc_per_gpc_count: %d", gr->max_tpc_per_gpc_count); 3328 nvgpu_log_info(g, "max_tpc_per_gpc_count: %d", gr->max_tpc_per_gpc_count);
3329 gk20a_dbg_info("max_zcull_per_gpc_count: %d", gr->max_zcull_per_gpc_count); 3329 nvgpu_log_info(g, "max_zcull_per_gpc_count: %d", gr->max_zcull_per_gpc_count);
3330 gk20a_dbg_info("max_tpc_count: %d", gr->max_tpc_count); 3330 nvgpu_log_info(g, "max_tpc_count: %d", gr->max_tpc_count);
3331 gk20a_dbg_info("sys_count: %d", gr->sys_count); 3331 nvgpu_log_info(g, "sys_count: %d", gr->sys_count);
3332 gk20a_dbg_info("gpc_count: %d", gr->gpc_count); 3332 nvgpu_log_info(g, "gpc_count: %d", gr->gpc_count);
3333 gk20a_dbg_info("pe_count_per_gpc: %d", gr->pe_count_per_gpc); 3333 nvgpu_log_info(g, "pe_count_per_gpc: %d", gr->pe_count_per_gpc);
3334 gk20a_dbg_info("tpc_count: %d", gr->tpc_count); 3334 nvgpu_log_info(g, "tpc_count: %d", gr->tpc_count);
3335 gk20a_dbg_info("ppc_count: %d", gr->ppc_count); 3335 nvgpu_log_info(g, "ppc_count: %d", gr->ppc_count);
3336 3336
3337 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) 3337 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++)
3338 gk20a_dbg_info("gpc_tpc_count[%d] : %d", 3338 nvgpu_log_info(g, "gpc_tpc_count[%d] : %d",
3339 gpc_index, gr->gpc_tpc_count[gpc_index]); 3339 gpc_index, gr->gpc_tpc_count[gpc_index]);
3340 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) 3340 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++)
3341 gk20a_dbg_info("gpc_zcb_count[%d] : %d", 3341 nvgpu_log_info(g, "gpc_zcb_count[%d] : %d",
3342 gpc_index, gr->gpc_zcb_count[gpc_index]); 3342 gpc_index, gr->gpc_zcb_count[gpc_index]);
3343 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) 3343 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++)
3344 gk20a_dbg_info("gpc_ppc_count[%d] : %d", 3344 nvgpu_log_info(g, "gpc_ppc_count[%d] : %d",
3345 gpc_index, gr->gpc_ppc_count[gpc_index]); 3345 gpc_index, gr->gpc_ppc_count[gpc_index]);
3346 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) 3346 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++)
3347 gk20a_dbg_info("gpc_skip_mask[%d] : %d", 3347 nvgpu_log_info(g, "gpc_skip_mask[%d] : %d",
3348 gpc_index, gr->gpc_skip_mask[gpc_index]); 3348 gpc_index, gr->gpc_skip_mask[gpc_index]);
3349 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) 3349 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++)
3350 for (pes_index = 0; 3350 for (pes_index = 0;
3351 pes_index < gr->pe_count_per_gpc; 3351 pes_index < gr->pe_count_per_gpc;
3352 pes_index++) 3352 pes_index++)
3353 gk20a_dbg_info("pes_tpc_count[%d][%d] : %d", 3353 nvgpu_log_info(g, "pes_tpc_count[%d][%d] : %d",
3354 pes_index, gpc_index, 3354 pes_index, gpc_index,
3355 gr->pes_tpc_count[pes_index][gpc_index]); 3355 gr->pes_tpc_count[pes_index][gpc_index]);
3356 3356
@@ -3358,7 +3358,7 @@ static int gr_gk20a_init_gr_config(struct gk20a *g, struct gr_gk20a *gr)
3358 for (pes_index = 0; 3358 for (pes_index = 0;
3359 pes_index < gr->pe_count_per_gpc; 3359 pes_index < gr->pe_count_per_gpc;
3360 pes_index++) 3360 pes_index++)
3361 gk20a_dbg_info("pes_tpc_mask[%d][%d] : %d", 3361 nvgpu_log_info(g, "pes_tpc_mask[%d][%d] : %d",
3362 pes_index, gpc_index, 3362 pes_index, gpc_index,
3363 gr->pes_tpc_mask[pes_index][gpc_index]); 3363 gr->pes_tpc_mask[pes_index][gpc_index]);
3364 3364
@@ -3367,16 +3367,16 @@ static int gr_gk20a_init_gr_config(struct gk20a *g, struct gr_gk20a *gr)
3367 g->ops.gr.calc_global_ctx_buffer_size(g); 3367 g->ops.gr.calc_global_ctx_buffer_size(g);
3368 gr->timeslice_mode = gr_gpcs_ppcs_cbm_cfg_timeslice_mode_enable_v(); 3368 gr->timeslice_mode = gr_gpcs_ppcs_cbm_cfg_timeslice_mode_enable_v();
3369 3369
3370 gk20a_dbg_info("bundle_cb_default_size: %d", 3370 nvgpu_log_info(g, "bundle_cb_default_size: %d",
3371 gr->bundle_cb_default_size); 3371 gr->bundle_cb_default_size);
3372 gk20a_dbg_info("min_gpm_fifo_depth: %d", gr->min_gpm_fifo_depth); 3372 nvgpu_log_info(g, "min_gpm_fifo_depth: %d", gr->min_gpm_fifo_depth);
3373 gk20a_dbg_info("bundle_cb_token_limit: %d", gr->bundle_cb_token_limit); 3373 nvgpu_log_info(g, "bundle_cb_token_limit: %d", gr->bundle_cb_token_limit);
3374 gk20a_dbg_info("attrib_cb_default_size: %d", 3374 nvgpu_log_info(g, "attrib_cb_default_size: %d",
3375 gr->attrib_cb_default_size); 3375 gr->attrib_cb_default_size);
3376 gk20a_dbg_info("attrib_cb_size: %d", gr->attrib_cb_size); 3376 nvgpu_log_info(g, "attrib_cb_size: %d", gr->attrib_cb_size);
3377 gk20a_dbg_info("alpha_cb_default_size: %d", gr->alpha_cb_default_size); 3377 nvgpu_log_info(g, "alpha_cb_default_size: %d", gr->alpha_cb_default_size);
3378 gk20a_dbg_info("alpha_cb_size: %d", gr->alpha_cb_size); 3378 nvgpu_log_info(g, "alpha_cb_size: %d", gr->alpha_cb_size);
3379 gk20a_dbg_info("timeslice_mode: %d", gr->timeslice_mode); 3379 nvgpu_log_info(g, "timeslice_mode: %d", gr->timeslice_mode);
3380 3380
3381 return 0; 3381 return 0;
3382 3382
@@ -3582,7 +3582,7 @@ clean_up:
3582 if (ret) 3582 if (ret)
3583 nvgpu_err(g, "fail"); 3583 nvgpu_err(g, "fail");
3584 else 3584 else
3585 gk20a_dbg_fn("done"); 3585 nvgpu_log_fn(g, "done");
3586 3586
3587 return ret; 3587 return ret;
3588} 3588}
@@ -4094,7 +4094,7 @@ clean_up:
4094int gk20a_gr_zbc_set_table(struct gk20a *g, struct gr_gk20a *gr, 4094int gk20a_gr_zbc_set_table(struct gk20a *g, struct gr_gk20a *gr,
4095 struct zbc_entry *zbc_val) 4095 struct zbc_entry *zbc_val)
4096{ 4096{
4097 gk20a_dbg_fn(""); 4097 nvgpu_log_fn(g, " ");
4098 4098
4099 return gr_gk20a_elpg_protected_call(g, 4099 return gr_gk20a_elpg_protected_call(g,
4100 gr_gk20a_add_zbc(g, gr, zbc_val)); 4100 gr_gk20a_add_zbc(g, gr, zbc_val));
@@ -4197,10 +4197,10 @@ void gr_gk20a_program_zcull_mapping(struct gk20a *g, u32 zcull_num_entries,
4197{ 4197{
4198 u32 val; 4198 u32 val;
4199 4199
4200 gk20a_dbg_fn(""); 4200 nvgpu_log_fn(g, " ");
4201 4201
4202 if (zcull_num_entries >= 8) { 4202 if (zcull_num_entries >= 8) {
4203 gk20a_dbg_fn("map0"); 4203 nvgpu_log_fn(g, "map0");
4204 val = 4204 val =
4205 gr_gpcs_zcull_sm_in_gpc_number_map0_tile_0_f( 4205 gr_gpcs_zcull_sm_in_gpc_number_map0_tile_0_f(
4206 zcull_map_tiles[0]) | 4206 zcull_map_tiles[0]) |
@@ -4223,7 +4223,7 @@ void gr_gk20a_program_zcull_mapping(struct gk20a *g, u32 zcull_num_entries,
4223 } 4223 }
4224 4224
4225 if (zcull_num_entries >= 16) { 4225 if (zcull_num_entries >= 16) {
4226 gk20a_dbg_fn("map1"); 4226 nvgpu_log_fn(g, "map1");
4227 val = 4227 val =
4228 gr_gpcs_zcull_sm_in_gpc_number_map1_tile_8_f( 4228 gr_gpcs_zcull_sm_in_gpc_number_map1_tile_8_f(
4229 zcull_map_tiles[8]) | 4229 zcull_map_tiles[8]) |
@@ -4246,7 +4246,7 @@ void gr_gk20a_program_zcull_mapping(struct gk20a *g, u32 zcull_num_entries,
4246 } 4246 }
4247 4247
4248 if (zcull_num_entries >= 24) { 4248 if (zcull_num_entries >= 24) {
4249 gk20a_dbg_fn("map2"); 4249 nvgpu_log_fn(g, "map2");
4250 val = 4250 val =
4251 gr_gpcs_zcull_sm_in_gpc_number_map2_tile_16_f( 4251 gr_gpcs_zcull_sm_in_gpc_number_map2_tile_16_f(
4252 zcull_map_tiles[16]) | 4252 zcull_map_tiles[16]) |
@@ -4269,7 +4269,7 @@ void gr_gk20a_program_zcull_mapping(struct gk20a *g, u32 zcull_num_entries,
4269 } 4269 }
4270 4270
4271 if (zcull_num_entries >= 32) { 4271 if (zcull_num_entries >= 32) {
4272 gk20a_dbg_fn("map3"); 4272 nvgpu_log_fn(g, "map3");
4273 val = 4273 val =
4274 gr_gpcs_zcull_sm_in_gpc_number_map3_tile_24_f( 4274 gr_gpcs_zcull_sm_in_gpc_number_map3_tile_24_f(
4275 zcull_map_tiles[24]) | 4275 zcull_map_tiles[24]) |
@@ -4452,7 +4452,7 @@ static int gk20a_init_gr_setup_hw(struct gk20a *g)
4452 u32 last_method_data = 0; 4452 u32 last_method_data = 0;
4453 u32 i, err; 4453 u32 i, err;
4454 4454
4455 gk20a_dbg_fn(""); 4455 nvgpu_log_fn(g, " ");
4456 4456
4457 /* init mmu debug buffer */ 4457 /* init mmu debug buffer */
4458 addr = nvgpu_mem_get_addr(g, &gr->mmu_wr_mem); 4458 addr = nvgpu_mem_get_addr(g, &gr->mmu_wr_mem);
@@ -4613,13 +4613,13 @@ restore_fe_go_idle:
4613 } 4613 }
4614 4614
4615out: 4615out:
4616 gk20a_dbg_fn("done"); 4616 nvgpu_log_fn(g, "done");
4617 return err; 4617 return err;
4618} 4618}
4619 4619
4620static void gr_gk20a_load_gating_prod(struct gk20a *g) 4620static void gr_gk20a_load_gating_prod(struct gk20a *g)
4621{ 4621{
4622 gk20a_dbg_fn(""); 4622 nvgpu_log_fn(g, " ");
4623 4623
4624 /* slcg prod values */ 4624 /* slcg prod values */
4625 if (g->ops.clock_gating.slcg_bus_load_gating_prod) 4625 if (g->ops.clock_gating.slcg_bus_load_gating_prod)
@@ -4657,7 +4657,7 @@ static void gr_gk20a_load_gating_prod(struct gk20a *g)
4657 if (g->ops.clock_gating.pg_gr_load_gating_prod) 4657 if (g->ops.clock_gating.pg_gr_load_gating_prod)
4658 g->ops.clock_gating.pg_gr_load_gating_prod(g, true); 4658 g->ops.clock_gating.pg_gr_load_gating_prod(g, true);
4659 4659
4660 gk20a_dbg_fn("done"); 4660 nvgpu_log_fn(g, "done");
4661} 4661}
4662 4662
4663static int gk20a_init_gr_prepare(struct gk20a *g) 4663static int gk20a_init_gr_prepare(struct gk20a *g)
@@ -4703,7 +4703,7 @@ static int gr_gk20a_wait_mem_scrubbing(struct gk20a *g)
4703 bool fecs_scrubbing; 4703 bool fecs_scrubbing;
4704 bool gpccs_scrubbing; 4704 bool gpccs_scrubbing;
4705 4705
4706 gk20a_dbg_fn(""); 4706 nvgpu_log_fn(g, " ");
4707 4707
4708 nvgpu_timeout_init(g, &timeout, 4708 nvgpu_timeout_init(g, &timeout,
4709 CTXSW_MEM_SCRUBBING_TIMEOUT_MAX / 4709 CTXSW_MEM_SCRUBBING_TIMEOUT_MAX /
@@ -4719,7 +4719,7 @@ static int gr_gk20a_wait_mem_scrubbing(struct gk20a *g)
4719 gr_gpccs_dmactl_imem_scrubbing_m()); 4719 gr_gpccs_dmactl_imem_scrubbing_m());
4720 4720
4721 if (!fecs_scrubbing && !gpccs_scrubbing) { 4721 if (!fecs_scrubbing && !gpccs_scrubbing) {
4722 gk20a_dbg_fn("done"); 4722 nvgpu_log_fn(g, "done");
4723 return 0; 4723 return 0;
4724 } 4724 }
4725 4725
@@ -4746,7 +4746,7 @@ out:
4746 if (err) 4746 if (err)
4747 nvgpu_err(g, "fail"); 4747 nvgpu_err(g, "fail");
4748 else 4748 else
4749 gk20a_dbg_fn("done"); 4749 nvgpu_log_fn(g, "done");
4750 4750
4751 return err; 4751 return err;
4752} 4752}
@@ -4756,7 +4756,7 @@ static int gk20a_init_gr_reset_enable_hw(struct gk20a *g)
4756 struct av_list_gk20a *sw_non_ctx_load = &g->gr.ctx_vars.sw_non_ctx_load; 4756 struct av_list_gk20a *sw_non_ctx_load = &g->gr.ctx_vars.sw_non_ctx_load;
4757 u32 i, err = 0; 4757 u32 i, err = 0;
4758 4758
4759 gk20a_dbg_fn(""); 4759 nvgpu_log_fn(g, " ");
4760 4760
4761 /* enable interrupts */ 4761 /* enable interrupts */
4762 gk20a_writel(g, gr_intr_r(), ~0); 4762 gk20a_writel(g, gr_intr_r(), ~0);
@@ -4780,7 +4780,7 @@ out:
4780 if (err) 4780 if (err)
4781 nvgpu_err(g, "fail"); 4781 nvgpu_err(g, "fail");
4782 else 4782 else
4783 gk20a_dbg_fn("done"); 4783 nvgpu_log_fn(g, "done");
4784 4784
4785 return 0; 4785 return 0;
4786} 4786}
@@ -4810,7 +4810,7 @@ static int gr_gk20a_init_access_map(struct gk20a *g)
4810 map_bit = whitelist[w] >> 2; 4810 map_bit = whitelist[w] >> 2;
4811 map_byte = map_bit >> 3; 4811 map_byte = map_bit >> 3;
4812 map_shift = map_bit & 0x7; /* i.e. 0-7 */ 4812 map_shift = map_bit & 0x7; /* i.e. 0-7 */
4813 gk20a_dbg_info("access map addr:0x%x byte:0x%x bit:%d", 4813 nvgpu_log_info(g, "access map addr:0x%x byte:0x%x bit:%d",
4814 whitelist[w], map_byte, map_shift); 4814 whitelist[w], map_byte, map_shift);
4815 x = nvgpu_mem_rd32(g, mem, map_byte / sizeof(u32)); 4815 x = nvgpu_mem_rd32(g, mem, map_byte / sizeof(u32));
4816 x |= 1 << ( 4816 x |= 1 << (
@@ -4828,10 +4828,10 @@ static int gk20a_init_gr_setup_sw(struct gk20a *g)
4828 struct gr_gk20a *gr = &g->gr; 4828 struct gr_gk20a *gr = &g->gr;
4829 int err; 4829 int err;
4830 4830
4831 gk20a_dbg_fn(""); 4831 nvgpu_log_fn(g, " ");
4832 4832
4833 if (gr->sw_ready) { 4833 if (gr->sw_ready) {
4834 gk20a_dbg_fn("skip init"); 4834 nvgpu_log_fn(g, "skip init");
4835 return 0; 4835 return 0;
4836 } 4836 }
4837 4837
@@ -4888,7 +4888,7 @@ static int gk20a_init_gr_setup_sw(struct gk20a *g)
4888 if (g->ops.gr.create_gr_sysfs) 4888 if (g->ops.gr.create_gr_sysfs)
4889 g->ops.gr.create_gr_sysfs(g); 4889 g->ops.gr.create_gr_sysfs(g);
4890 4890
4891 gk20a_dbg_fn("done"); 4891 nvgpu_log_fn(g, "done");
4892 return 0; 4892 return 0;
4893 4893
4894clean_up: 4894clean_up:
@@ -4906,7 +4906,7 @@ static int gk20a_init_gr_bind_fecs_elpg(struct gk20a *g)
4906 4906
4907 u32 size; 4907 u32 size;
4908 4908
4909 gk20a_dbg_fn(""); 4909 nvgpu_log_fn(g, " ");
4910 4910
4911 size = 0; 4911 size = 0;
4912 4912
@@ -4947,7 +4947,7 @@ int gk20a_init_gr_support(struct gk20a *g)
4947{ 4947{
4948 u32 err; 4948 u32 err;
4949 4949
4950 gk20a_dbg_fn(""); 4950 nvgpu_log_fn(g, " ");
4951 4951
4952 /* this is required before gr_gk20a_init_ctx_state */ 4952 /* this is required before gr_gk20a_init_ctx_state */
4953 nvgpu_mutex_init(&g->gr.fecs_mutex); 4953 nvgpu_mutex_init(&g->gr.fecs_mutex);
@@ -4999,7 +4999,7 @@ void gk20a_gr_wait_initialized(struct gk20a *g)
4999 4999
5000void gk20a_gr_set_shader_exceptions(struct gk20a *g, u32 data) 5000void gk20a_gr_set_shader_exceptions(struct gk20a *g, u32 data)
5001{ 5001{
5002 gk20a_dbg_fn(""); 5002 nvgpu_log_fn(g, " ");
5003 5003
5004 if (data == NVA297_SET_SHADER_EXCEPTIONS_ENABLE_FALSE) { 5004 if (data == NVA297_SET_SHADER_EXCEPTIONS_ENABLE_FALSE) {
5005 gk20a_writel(g, 5005 gk20a_writel(g,
@@ -5046,7 +5046,7 @@ int gk20a_enable_gr_hw(struct gk20a *g)
5046{ 5046{
5047 int err; 5047 int err;
5048 5048
5049 gk20a_dbg_fn(""); 5049 nvgpu_log_fn(g, " ");
5050 5050
5051 err = gk20a_init_gr_prepare(g); 5051 err = gk20a_init_gr_prepare(g);
5052 if (err) 5052 if (err)
@@ -5056,7 +5056,7 @@ int gk20a_enable_gr_hw(struct gk20a *g)
5056 if (err) 5056 if (err)
5057 return err; 5057 return err;
5058 5058
5059 gk20a_dbg_fn("done"); 5059 nvgpu_log_fn(g, "done");
5060 5060
5061 return 0; 5061 return 0;
5062} 5062}
@@ -5163,7 +5163,7 @@ static void gk20a_gr_set_error_notifier(struct gk20a *g,
5163static int gk20a_gr_handle_semaphore_timeout_pending(struct gk20a *g, 5163static int gk20a_gr_handle_semaphore_timeout_pending(struct gk20a *g,
5164 struct gr_gk20a_isr_data *isr_data) 5164 struct gr_gk20a_isr_data *isr_data)
5165{ 5165{
5166 gk20a_dbg_fn(""); 5166 nvgpu_log_fn(g, " ");
5167 gk20a_gr_set_error_notifier(g, isr_data, 5167 gk20a_gr_set_error_notifier(g, isr_data,
5168 NVGPU_ERR_NOTIFIER_GR_SEMAPHORE_TIMEOUT); 5168 NVGPU_ERR_NOTIFIER_GR_SEMAPHORE_TIMEOUT);
5169 nvgpu_err(g, 5169 nvgpu_err(g,
@@ -5174,7 +5174,7 @@ static int gk20a_gr_handle_semaphore_timeout_pending(struct gk20a *g,
5174static int gk20a_gr_intr_illegal_notify_pending(struct gk20a *g, 5174static int gk20a_gr_intr_illegal_notify_pending(struct gk20a *g,
5175 struct gr_gk20a_isr_data *isr_data) 5175 struct gr_gk20a_isr_data *isr_data)
5176{ 5176{
5177 gk20a_dbg_fn(""); 5177 nvgpu_log_fn(g, " ");
5178 gk20a_gr_set_error_notifier(g, isr_data, 5178 gk20a_gr_set_error_notifier(g, isr_data,
5179 NVGPU_ERR_NOTIFIER_GR_ILLEGAL_NOTIFY); 5179 NVGPU_ERR_NOTIFIER_GR_ILLEGAL_NOTIFY);
5180 /* This is an unrecoverable error, reset is needed */ 5180 /* This is an unrecoverable error, reset is needed */
@@ -5202,7 +5202,7 @@ static int gk20a_gr_handle_illegal_method(struct gk20a *g,
5202static int gk20a_gr_handle_illegal_class(struct gk20a *g, 5202static int gk20a_gr_handle_illegal_class(struct gk20a *g,
5203 struct gr_gk20a_isr_data *isr_data) 5203 struct gr_gk20a_isr_data *isr_data)
5204{ 5204{
5205 gk20a_dbg_fn(""); 5205 nvgpu_log_fn(g, " ");
5206 gk20a_gr_set_error_notifier(g, isr_data, 5206 gk20a_gr_set_error_notifier(g, isr_data,
5207 NVGPU_ERR_NOTIFIER_GR_ERROR_SW_NOTIFY); 5207 NVGPU_ERR_NOTIFIER_GR_ERROR_SW_NOTIFY);
5208 nvgpu_err(g, 5208 nvgpu_err(g,
@@ -5243,7 +5243,7 @@ static int gk20a_gr_handle_class_error(struct gk20a *g,
5243{ 5243{
5244 u32 gr_class_error; 5244 u32 gr_class_error;
5245 5245
5246 gk20a_dbg_fn(""); 5246 nvgpu_log_fn(g, " ");
5247 5247
5248 gr_class_error = 5248 gr_class_error =
5249 gr_class_error_code_v(gk20a_readl(g, gr_class_error_r())); 5249 gr_class_error_code_v(gk20a_readl(g, gr_class_error_r()));
@@ -5274,7 +5274,7 @@ static int gk20a_gr_handle_class_error(struct gk20a *g,
5274static int gk20a_gr_handle_firmware_method(struct gk20a *g, 5274static int gk20a_gr_handle_firmware_method(struct gk20a *g,
5275 struct gr_gk20a_isr_data *isr_data) 5275 struct gr_gk20a_isr_data *isr_data)
5276{ 5276{
5277 gk20a_dbg_fn(""); 5277 nvgpu_log_fn(g, " ");
5278 5278
5279 gk20a_gr_set_error_notifier(g, isr_data, 5279 gk20a_gr_set_error_notifier(g, isr_data,
5280 NVGPU_ERR_NOTIFIER_GR_ERROR_SW_NOTIFY); 5280 NVGPU_ERR_NOTIFIER_GR_ERROR_SW_NOTIFY);
@@ -5450,7 +5450,7 @@ int gk20a_gr_handle_notify_pending(struct gk20a *g,
5450 } 5450 }
5451 nvgpu_mutex_release(&ch->cyclestate.cyclestate_buffer_mutex); 5451 nvgpu_mutex_release(&ch->cyclestate.cyclestate_buffer_mutex);
5452#endif 5452#endif
5453 gk20a_dbg_fn(""); 5453 nvgpu_log_fn(g, " ");
5454 nvgpu_cond_broadcast_interruptible(&ch->notifier_wq); 5454 nvgpu_cond_broadcast_interruptible(&ch->notifier_wq);
5455 return 0; 5455 return 0;
5456} 5456}
@@ -5543,7 +5543,7 @@ int gk20a_gr_lock_down_sm(struct gk20a *g,
5543 u32 offset = gk20a_gr_gpc_offset(g, gpc) + gk20a_gr_tpc_offset(g, tpc); 5543 u32 offset = gk20a_gr_gpc_offset(g, gpc) + gk20a_gr_tpc_offset(g, tpc);
5544 u32 dbgr_control0; 5544 u32 dbgr_control0;
5545 5545
5546 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, 5546 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg,
5547 "GPC%d TPC%d SM%d: assert stop trigger", gpc, tpc, sm); 5547 "GPC%d TPC%d SM%d: assert stop trigger", gpc, tpc, sm);
5548 5548
5549 /* assert stop trigger */ 5549 /* assert stop trigger */
@@ -5582,7 +5582,7 @@ int gr_gk20a_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc, u32 sm,
5582 bool sm_debugger_attached; 5582 bool sm_debugger_attached;
5583 u32 global_esr, warp_esr, global_mask; 5583 u32 global_esr, warp_esr, global_mask;
5584 5584
5585 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); 5585 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
5586 5586
5587 sm_debugger_attached = g->ops.gr.sm_debugger_attached(g); 5587 sm_debugger_attached = g->ops.gr.sm_debugger_attached(g);
5588 5588
@@ -5597,7 +5597,7 @@ int gr_gk20a_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc, u32 sm,
5597 return -EFAULT; 5597 return -EFAULT;
5598 } 5598 }
5599 5599
5600 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, 5600 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg,
5601 "sm hww global 0x%08x warp 0x%08x", global_esr, warp_esr); 5601 "sm hww global 0x%08x warp 0x%08x", global_esr, warp_esr);
5602 5602
5603 gr_gk20a_elpg_protected_call(g, 5603 gr_gk20a_elpg_protected_call(g,
@@ -5617,7 +5617,7 @@ int gr_gk20a_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc, u32 sm,
5617 } 5617 }
5618 5618
5619 if (early_exit) { 5619 if (early_exit) {
5620 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, 5620 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg,
5621 "returning early"); 5621 "returning early");
5622 return ret; 5622 return ret;
5623 } 5623 }
@@ -5640,13 +5640,13 @@ int gr_gk20a_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc, u32 sm,
5640 gk20a_writel(g, 5640 gk20a_writel(g,
5641 gr_gpc0_tpc0_tpccs_tpc_exception_en_r() + offset, 5641 gr_gpc0_tpc0_tpccs_tpc_exception_en_r() + offset,
5642 tpc_exception_en); 5642 tpc_exception_en);
5643 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "SM Exceptions disabled"); 5643 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "SM Exceptions disabled");
5644 } 5644 }
5645 5645
5646 /* if a debugger is present and an error has occurred, do a warp sync */ 5646 /* if a debugger is present and an error has occurred, do a warp sync */
5647 if (!ignore_debugger && 5647 if (!ignore_debugger &&
5648 ((warp_esr != 0) || ((global_esr & ~global_mask) != 0))) { 5648 ((warp_esr != 0) || ((global_esr & ~global_mask) != 0))) {
5649 gk20a_dbg(gpu_dbg_intr, "warp sync needed"); 5649 nvgpu_log(g, gpu_dbg_intr, "warp sync needed");
5650 do_warp_sync = true; 5650 do_warp_sync = true;
5651 } 5651 }
5652 5652
@@ -5660,7 +5660,7 @@ int gr_gk20a_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc, u32 sm,
5660 } 5660 }
5661 5661
5662 if (ignore_debugger) 5662 if (ignore_debugger)
5663 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, 5663 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg,
5664 "ignore_debugger set, skipping event posting"); 5664 "ignore_debugger set, skipping event posting");
5665 else 5665 else
5666 *post_event |= true; 5666 *post_event |= true;
@@ -5677,11 +5677,11 @@ int gr_gk20a_handle_tex_exception(struct gk20a *g, u32 gpc, u32 tpc,
5677 u32 offset = gpc_stride * gpc + tpc_in_gpc_stride * tpc; 5677 u32 offset = gpc_stride * gpc + tpc_in_gpc_stride * tpc;
5678 u32 esr; 5678 u32 esr;
5679 5679
5680 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); 5680 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
5681 5681
5682 esr = gk20a_readl(g, 5682 esr = gk20a_readl(g,
5683 gr_gpc0_tpc0_tex_m_hww_esr_r() + offset); 5683 gr_gpc0_tpc0_tex_m_hww_esr_r() + offset);
5684 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "0x%08x", esr); 5684 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "0x%08x", esr);
5685 5685
5686 gk20a_writel(g, 5686 gk20a_writel(g,
5687 gr_gpc0_tpc0_tex_m_hww_esr_r() + offset, 5687 gr_gpc0_tpc0_tex_m_hww_esr_r() + offset,
@@ -5706,7 +5706,7 @@ static int gk20a_gr_handle_tpc_exception(struct gk20a *g, u32 gpc, u32 tpc,
5706 + offset); 5706 + offset);
5707 u32 sm_per_tpc = nvgpu_get_litter_value(g, GPU_LIT_NUM_SM_PER_TPC); 5707 u32 sm_per_tpc = nvgpu_get_litter_value(g, GPU_LIT_NUM_SM_PER_TPC);
5708 5708
5709 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, 5709 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg,
5710 "GPC%d TPC%d: pending exception 0x%x", 5710 "GPC%d TPC%d: pending exception 0x%x",
5711 gpc, tpc, tpc_exception); 5711 gpc, tpc, tpc_exception);
5712 5712
@@ -5715,7 +5715,7 @@ static int gk20a_gr_handle_tpc_exception(struct gk20a *g, u32 gpc, u32 tpc,
5715 gr_gpc0_tpc0_tpccs_tpc_exception_sm_pending_v()) { 5715 gr_gpc0_tpc0_tpccs_tpc_exception_sm_pending_v()) {
5716 u32 esr_sm_sel, sm; 5716 u32 esr_sm_sel, sm;
5717 5717
5718 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, 5718 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg,
5719 "GPC%d TPC%d: SM exception pending", gpc, tpc); 5719 "GPC%d TPC%d: SM exception pending", gpc, tpc);
5720 5720
5721 if (g->ops.gr.handle_tpc_sm_ecc_exception) 5721 if (g->ops.gr.handle_tpc_sm_ecc_exception)
@@ -5729,7 +5729,7 @@ static int gk20a_gr_handle_tpc_exception(struct gk20a *g, u32 gpc, u32 tpc,
5729 if (!(esr_sm_sel & (1 << sm))) 5729 if (!(esr_sm_sel & (1 << sm)))
5730 continue; 5730 continue;
5731 5731
5732 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, 5732 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg,
5733 "GPC%d TPC%d: SM%d exception pending", 5733 "GPC%d TPC%d: SM%d exception pending",
5734 gpc, tpc, sm); 5734 gpc, tpc, sm);
5735 5735
@@ -5750,7 +5750,7 @@ static int gk20a_gr_handle_tpc_exception(struct gk20a *g, u32 gpc, u32 tpc,
5750 /* check if a tex exeption is pending */ 5750 /* check if a tex exeption is pending */
5751 if (gr_gpc0_tpc0_tpccs_tpc_exception_tex_v(tpc_exception) == 5751 if (gr_gpc0_tpc0_tpccs_tpc_exception_tex_v(tpc_exception) ==
5752 gr_gpc0_tpc0_tpccs_tpc_exception_tex_pending_v()) { 5752 gr_gpc0_tpc0_tpccs_tpc_exception_tex_pending_v()) {
5753 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, 5753 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg,
5754 "GPC%d TPC%d: TEX exception pending", gpc, tpc); 5754 "GPC%d TPC%d: TEX exception pending", gpc, tpc);
5755 ret |= g->ops.gr.handle_tex_exception(g, gpc, tpc, post_event); 5755 ret |= g->ops.gr.handle_tex_exception(g, gpc, tpc, post_event);
5756 } 5756 }
@@ -5771,13 +5771,13 @@ static int gk20a_gr_handle_gpc_exception(struct gk20a *g, bool *post_event,
5771 u32 exception1 = gk20a_readl(g, gr_exception1_r()); 5771 u32 exception1 = gk20a_readl(g, gr_exception1_r());
5772 u32 gpc_exception; 5772 u32 gpc_exception;
5773 5773
5774 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, ""); 5774 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, " ");
5775 5775
5776 for (gpc = 0; gpc < gr->gpc_count; gpc++) { 5776 for (gpc = 0; gpc < gr->gpc_count; gpc++) {
5777 if ((exception1 & (1 << gpc)) == 0) 5777 if ((exception1 & (1 << gpc)) == 0)
5778 continue; 5778 continue;
5779 5779
5780 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, 5780 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg,
5781 "GPC%d exception pending", gpc); 5781 "GPC%d exception pending", gpc);
5782 5782
5783 gpc_offset = gk20a_gr_gpc_offset(g, gpc); 5783 gpc_offset = gk20a_gr_gpc_offset(g, gpc);
@@ -5791,7 +5791,7 @@ static int gk20a_gr_handle_gpc_exception(struct gk20a *g, bool *post_event,
5791 (1 << tpc)) == 0) 5791 (1 << tpc)) == 0)
5792 continue; 5792 continue;
5793 5793
5794 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, 5794 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg,
5795 "GPC%d: TPC%d exception pending", gpc, tpc); 5795 "GPC%d: TPC%d exception pending", gpc, tpc);
5796 5796
5797 ret |= gk20a_gr_handle_tpc_exception(g, gpc, tpc, 5797 ret |= gk20a_gr_handle_tpc_exception(g, gpc, tpc,
@@ -5860,8 +5860,8 @@ int gk20a_gr_isr(struct gk20a *g)
5860 u32 gr_engine_id; 5860 u32 gr_engine_id;
5861 u32 global_esr = 0; 5861 u32 global_esr = 0;
5862 5862
5863 gk20a_dbg_fn(""); 5863 nvgpu_log_fn(g, " ");
5864 gk20a_dbg(gpu_dbg_intr, "pgraph intr %08x", gr_intr); 5864 nvgpu_log(g, gpu_dbg_intr, "pgraph intr %08x", gr_intr);
5865 5865
5866 if (!gr_intr) 5866 if (!gr_intr)
5867 return 0; 5867 return 0;
@@ -5896,7 +5896,7 @@ int gk20a_gr_isr(struct gk20a *g)
5896 nvgpu_err(g, "ch id is INVALID 0xffffffff"); 5896 nvgpu_err(g, "ch id is INVALID 0xffffffff");
5897 } 5897 }
5898 5898
5899 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, 5899 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg,
5900 "channel %d: addr 0x%08x, " 5900 "channel %d: addr 0x%08x, "
5901 "data 0x%08x 0x%08x," 5901 "data 0x%08x 0x%08x,"
5902 "ctx 0x%08x, offset 0x%08x, " 5902 "ctx 0x%08x, offset 0x%08x, "
@@ -5968,7 +5968,7 @@ int gk20a_gr_isr(struct gk20a *g)
5968 * register using set_falcon[4] */ 5968 * register using set_falcon[4] */
5969 if (gr_intr & gr_intr_firmware_method_pending_f()) { 5969 if (gr_intr & gr_intr_firmware_method_pending_f()) {
5970 need_reset |= gk20a_gr_handle_firmware_method(g, &isr_data); 5970 need_reset |= gk20a_gr_handle_firmware_method(g, &isr_data);
5971 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "firmware method intr pending\n"); 5971 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "firmware method intr pending\n");
5972 gk20a_writel(g, gr_intr_r(), 5972 gk20a_writel(g, gr_intr_r(),
5973 gr_intr_firmware_method_reset_f()); 5973 gr_intr_firmware_method_reset_f());
5974 gr_intr &= ~gr_intr_firmware_method_pending_f(); 5974 gr_intr &= ~gr_intr_firmware_method_pending_f();
@@ -5977,7 +5977,7 @@ int gk20a_gr_isr(struct gk20a *g)
5977 if (gr_intr & gr_intr_exception_pending_f()) { 5977 if (gr_intr & gr_intr_exception_pending_f()) {
5978 u32 exception = gk20a_readl(g, gr_exception_r()); 5978 u32 exception = gk20a_readl(g, gr_exception_r());
5979 5979
5980 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "exception %08x\n", exception); 5980 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "exception %08x\n", exception);
5981 5981
5982 if (exception & gr_exception_fe_m()) { 5982 if (exception & gr_exception_fe_m()) {
5983 u32 fe = gk20a_readl(g, gr_fe_hww_esr_r()); 5983 u32 fe = gk20a_readl(g, gr_fe_hww_esr_r());
@@ -6057,7 +6057,7 @@ int gk20a_gr_isr(struct gk20a *g)
6057 if (exception & gr_exception_gpc_m() && need_reset == 0) { 6057 if (exception & gr_exception_gpc_m() && need_reset == 0) {
6058 bool post_event = false; 6058 bool post_event = false;
6059 6059
6060 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, 6060 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg,
6061 "GPC exception pending"); 6061 "GPC exception pending");
6062 6062
6063 fault_ch = gk20a_fifo_channel_from_chid(g, 6063 fault_ch = gk20a_fifo_channel_from_chid(g,
@@ -6133,7 +6133,7 @@ int gk20a_gr_nonstall_isr(struct gk20a *g)
6133 int ops = 0; 6133 int ops = 0;
6134 u32 gr_intr = gk20a_readl(g, gr_intr_nonstall_r()); 6134 u32 gr_intr = gk20a_readl(g, gr_intr_nonstall_r());
6135 6135
6136 gk20a_dbg(gpu_dbg_intr, "pgraph nonstall intr %08x", gr_intr); 6136 nvgpu_log(g, gpu_dbg_intr, "pgraph nonstall intr %08x", gr_intr);
6137 6137
6138 if (gr_intr & gr_intr_nonstall_trap_pending_f()) { 6138 if (gr_intr & gr_intr_nonstall_trap_pending_f()) {
6139 /* Clear the interrupt */ 6139 /* Clear the interrupt */
@@ -6201,7 +6201,7 @@ int gk20a_gr_suspend(struct gk20a *g)
6201{ 6201{
6202 u32 ret = 0; 6202 u32 ret = 0;
6203 6203
6204 gk20a_dbg_fn(""); 6204 nvgpu_log_fn(g, " ");
6205 6205
6206 ret = g->ops.gr.wait_empty(g, gk20a_get_gr_idle_timeout(g), 6206 ret = g->ops.gr.wait_empty(g, gk20a_get_gr_idle_timeout(g),
6207 GR_IDLE_CHECK_DEFAULT); 6207 GR_IDLE_CHECK_DEFAULT);
@@ -6227,7 +6227,7 @@ int gk20a_gr_suspend(struct gk20a *g)
6227 6227
6228 g->gr.initialized = false; 6228 g->gr.initialized = false;
6229 6229
6230 gk20a_dbg_fn("done"); 6230 nvgpu_log_fn(g, "done");
6231 return ret; 6231 return ret;
6232} 6232}
6233 6233
@@ -6250,7 +6250,7 @@ int gr_gk20a_decode_priv_addr(struct gk20a *g, u32 addr,
6250{ 6250{
6251 u32 gpc_addr; 6251 u32 gpc_addr;
6252 6252
6253 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); 6253 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr);
6254 6254
6255 /* setup defaults */ 6255 /* setup defaults */
6256 *addr_type = CTXSW_ADDR_TYPE_SYS; 6256 *addr_type = CTXSW_ADDR_TYPE_SYS;
@@ -6338,7 +6338,7 @@ int gr_gk20a_split_ppc_broadcast_addr(struct gk20a *g, u32 addr,
6338{ 6338{
6339 u32 ppc_num; 6339 u32 ppc_num;
6340 6340
6341 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); 6341 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr);
6342 6342
6343 for (ppc_num = 0; ppc_num < g->gr.pe_count_per_gpc; ppc_num++) 6343 for (ppc_num = 0; ppc_num < g->gr.pe_count_per_gpc; ppc_num++)
6344 priv_addr_table[(*t)++] = pri_ppc_addr(g, pri_ppccs_addr_mask(addr), 6344 priv_addr_table[(*t)++] = pri_ppc_addr(g, pri_ppccs_addr_mask(addr),
@@ -6369,12 +6369,12 @@ int gr_gk20a_create_priv_addr_table(struct gk20a *g,
6369 t = 0; 6369 t = 0;
6370 *num_registers = 0; 6370 *num_registers = 0;
6371 6371
6372 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); 6372 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr);
6373 6373
6374 err = g->ops.gr.decode_priv_addr(g, addr, &addr_type, 6374 err = g->ops.gr.decode_priv_addr(g, addr, &addr_type,
6375 &gpc_num, &tpc_num, &ppc_num, &be_num, 6375 &gpc_num, &tpc_num, &ppc_num, &be_num,
6376 &broadcast_flags); 6376 &broadcast_flags);
6377 gk20a_dbg(gpu_dbg_gpu_dbg, "addr_type = %d", addr_type); 6377 nvgpu_log(g, gpu_dbg_gpu_dbg, "addr_type = %d", addr_type);
6378 if (err) 6378 if (err)
6379 return err; 6379 return err;
6380 6380
@@ -6428,7 +6428,7 @@ int gr_gk20a_create_priv_addr_table(struct gk20a *g,
6428 } else if (((addr_type == CTXSW_ADDR_TYPE_EGPC) || 6428 } else if (((addr_type == CTXSW_ADDR_TYPE_EGPC) ||
6429 (addr_type == CTXSW_ADDR_TYPE_ETPC)) && 6429 (addr_type == CTXSW_ADDR_TYPE_ETPC)) &&
6430 g->ops.gr.egpc_etpc_priv_addr_table) { 6430 g->ops.gr.egpc_etpc_priv_addr_table) {
6431 gk20a_dbg(gpu_dbg_gpu_dbg, "addr_type : EGPC/ETPC"); 6431 nvgpu_log(g, gpu_dbg_gpu_dbg, "addr_type : EGPC/ETPC");
6432 g->ops.gr.egpc_etpc_priv_addr_table(g, addr, gpc_num, tpc_num, 6432 g->ops.gr.egpc_etpc_priv_addr_table(g, addr, gpc_num, tpc_num,
6433 broadcast_flags, priv_addr_table, &t); 6433 broadcast_flags, priv_addr_table, &t);
6434 } else if (broadcast_flags & PRI_BROADCAST_FLAGS_LTSS) { 6434 } else if (broadcast_flags & PRI_BROADCAST_FLAGS_LTSS) {
@@ -6477,11 +6477,11 @@ int gr_gk20a_get_ctx_buffer_offsets(struct gk20a *g,
6477 u32 potential_offsets = gr->max_gpc_count * gr->max_tpc_per_gpc_count * 6477 u32 potential_offsets = gr->max_gpc_count * gr->max_tpc_per_gpc_count *
6478 sm_per_tpc; 6478 sm_per_tpc;
6479 6479
6480 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); 6480 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr);
6481 6481
6482 /* implementation is crossed-up if either of these happen */ 6482 /* implementation is crossed-up if either of these happen */
6483 if (max_offsets > potential_offsets) { 6483 if (max_offsets > potential_offsets) {
6484 gk20a_dbg_fn("max_offsets > potential_offsets"); 6484 nvgpu_log_fn(g, "max_offsets > potential_offsets");
6485 return -EINVAL; 6485 return -EINVAL;
6486 } 6486 }
6487 6487
@@ -6490,7 +6490,7 @@ int gr_gk20a_get_ctx_buffer_offsets(struct gk20a *g,
6490 6490
6491 priv_registers = nvgpu_kzalloc(g, sizeof(u32) * potential_offsets); 6491 priv_registers = nvgpu_kzalloc(g, sizeof(u32) * potential_offsets);
6492 if (!priv_registers) { 6492 if (!priv_registers) {
6493 gk20a_dbg_fn("failed alloc for potential_offsets=%d", potential_offsets); 6493 nvgpu_log_fn(g, "failed alloc for potential_offsets=%d", potential_offsets);
6494 err = PTR_ERR(priv_registers); 6494 err = PTR_ERR(priv_registers);
6495 goto cleanup; 6495 goto cleanup;
6496 } 6496 }
@@ -6502,7 +6502,7 @@ int gr_gk20a_get_ctx_buffer_offsets(struct gk20a *g,
6502 &num_registers); 6502 &num_registers);
6503 6503
6504 if ((max_offsets > 1) && (num_registers > max_offsets)) { 6504 if ((max_offsets > 1) && (num_registers > max_offsets)) {
6505 gk20a_dbg_fn("max_offsets = %d, num_registers = %d", 6505 nvgpu_log_fn(g, "max_offsets = %d, num_registers = %d",
6506 max_offsets, num_registers); 6506 max_offsets, num_registers);
6507 err = -EINVAL; 6507 err = -EINVAL;
6508 goto cleanup; 6508 goto cleanup;
@@ -6512,7 +6512,7 @@ int gr_gk20a_get_ctx_buffer_offsets(struct gk20a *g,
6512 num_registers = 1; 6512 num_registers = 1;
6513 6513
6514 if (!g->gr.ctx_vars.local_golden_image) { 6514 if (!g->gr.ctx_vars.local_golden_image) {
6515 gk20a_dbg_fn("no context switch header info to work with"); 6515 nvgpu_log_fn(g, "no context switch header info to work with");
6516 err = -EINVAL; 6516 err = -EINVAL;
6517 goto cleanup; 6517 goto cleanup;
6518 } 6518 }
@@ -6525,7 +6525,7 @@ int gr_gk20a_get_ctx_buffer_offsets(struct gk20a *g,
6525 g->gr.ctx_vars.golden_image_size, 6525 g->gr.ctx_vars.golden_image_size,
6526 &priv_offset); 6526 &priv_offset);
6527 if (err) { 6527 if (err) {
6528 gk20a_dbg_fn("Could not determine priv_offset for addr:0x%x", 6528 nvgpu_log_fn(g, "Could not determine priv_offset for addr:0x%x",
6529 addr); /*, grPriRegStr(addr)));*/ 6529 addr); /*, grPriRegStr(addr)));*/
6530 goto cleanup; 6530 goto cleanup;
6531 } 6531 }
@@ -6558,7 +6558,7 @@ int gr_gk20a_get_pm_ctx_buffer_offsets(struct gk20a *g,
6558 u32 potential_offsets = gr->max_gpc_count * gr->max_tpc_per_gpc_count * 6558 u32 potential_offsets = gr->max_gpc_count * gr->max_tpc_per_gpc_count *
6559 sm_per_tpc; 6559 sm_per_tpc;
6560 6560
6561 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); 6561 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr);
6562 6562
6563 /* implementation is crossed-up if either of these happen */ 6563 /* implementation is crossed-up if either of these happen */
6564 if (max_offsets > potential_offsets) 6564 if (max_offsets > potential_offsets)
@@ -6569,7 +6569,7 @@ int gr_gk20a_get_pm_ctx_buffer_offsets(struct gk20a *g,
6569 6569
6570 priv_registers = nvgpu_kzalloc(g, sizeof(u32) * potential_offsets); 6570 priv_registers = nvgpu_kzalloc(g, sizeof(u32) * potential_offsets);
6571 if (!priv_registers) { 6571 if (!priv_registers) {
6572 gk20a_dbg_fn("failed alloc for potential_offsets=%d", potential_offsets); 6572 nvgpu_log_fn(g, "failed alloc for potential_offsets=%d", potential_offsets);
6573 return -ENOMEM; 6573 return -ENOMEM;
6574 } 6574 }
6575 memset(offsets, 0, sizeof(u32) * max_offsets); 6575 memset(offsets, 0, sizeof(u32) * max_offsets);
@@ -6588,7 +6588,7 @@ int gr_gk20a_get_pm_ctx_buffer_offsets(struct gk20a *g,
6588 num_registers = 1; 6588 num_registers = 1;
6589 6589
6590 if (!g->gr.ctx_vars.local_golden_image) { 6590 if (!g->gr.ctx_vars.local_golden_image) {
6591 gk20a_dbg_fn("no context switch header info to work with"); 6591 nvgpu_log_fn(g, "no context switch header info to work with");
6592 err = -EINVAL; 6592 err = -EINVAL;
6593 goto cleanup; 6593 goto cleanup;
6594 } 6594 }
@@ -6598,7 +6598,7 @@ int gr_gk20a_get_pm_ctx_buffer_offsets(struct gk20a *g,
6598 priv_registers[i], 6598 priv_registers[i],
6599 &priv_offset); 6599 &priv_offset);
6600 if (err) { 6600 if (err) {
6601 gk20a_dbg_fn("Could not determine priv_offset for addr:0x%x", 6601 nvgpu_log_fn(g, "Could not determine priv_offset for addr:0x%x",
6602 addr); /*, grPriRegStr(addr)));*/ 6602 addr); /*, grPriRegStr(addr)));*/
6603 goto cleanup; 6603 goto cleanup;
6604 } 6604 }
@@ -6684,7 +6684,7 @@ static int gr_gk20a_ctx_patch_smpc(struct gk20a *g,
6684 g->ops.gr.init_sm_dsm_reg_info(); 6684 g->ops.gr.init_sm_dsm_reg_info();
6685 g->ops.gr.get_ovr_perf_regs(g, &num_ovr_perf_regs, &ovr_perf_regs); 6685 g->ops.gr.get_ovr_perf_regs(g, &num_ovr_perf_regs, &ovr_perf_regs);
6686 6686
6687 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); 6687 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr);
6688 6688
6689 for (reg = 0; reg < num_ovr_perf_regs; reg++) { 6689 for (reg = 0; reg < num_ovr_perf_regs; reg++) {
6690 for (gpc = 0; gpc < num_gpc; gpc++) { 6690 for (gpc = 0; gpc < num_gpc; gpc++) {
@@ -6754,13 +6754,11 @@ static int gr_gk20a_ctx_patch_smpc(struct gk20a *g,
6754static inline bool check_main_image_header_magic(u8 *context) 6754static inline bool check_main_image_header_magic(u8 *context)
6755{ 6755{
6756 u32 magic = *(u32 *)(context + ctxsw_prog_main_image_magic_value_o()); 6756 u32 magic = *(u32 *)(context + ctxsw_prog_main_image_magic_value_o());
6757 gk20a_dbg(gpu_dbg_gpu_dbg, "main image magic=0x%x", magic);
6758 return magic == ctxsw_prog_main_image_magic_value_v_value_v(); 6757 return magic == ctxsw_prog_main_image_magic_value_v_value_v();
6759} 6758}
6760static inline bool check_local_header_magic(u8 *context) 6759static inline bool check_local_header_magic(u8 *context)
6761{ 6760{
6762 u32 magic = *(u32 *)(context + ctxsw_prog_local_magic_value_o()); 6761 u32 magic = *(u32 *)(context + ctxsw_prog_local_magic_value_o());
6763 gk20a_dbg(gpu_dbg_gpu_dbg, "local magic=0x%x", magic);
6764 return magic == ctxsw_prog_local_magic_value_v_value_v(); 6762 return magic == ctxsw_prog_local_magic_value_v_value_v();
6765 6763
6766} 6764}
@@ -6823,14 +6821,14 @@ static int gr_gk20a_find_priv_offset_in_ext_buffer(struct gk20a *g,
6823 else 6821 else
6824 return -EINVAL; 6822 return -EINVAL;
6825 6823
6826 gk20a_dbg_info(" gpc = %d tpc = %d", 6824 nvgpu_log_info(g, " gpc = %d tpc = %d",
6827 gpc_num, tpc_num); 6825 gpc_num, tpc_num);
6828 } else if ((g->ops.gr.is_etpc_addr) && 6826 } else if ((g->ops.gr.is_etpc_addr) &&
6829 g->ops.gr.is_etpc_addr(g, addr)) { 6827 g->ops.gr.is_etpc_addr(g, addr)) {
6830 g->ops.gr.get_egpc_etpc_num(g, addr, &gpc_num, &tpc_num); 6828 g->ops.gr.get_egpc_etpc_num(g, addr, &gpc_num, &tpc_num);
6831 gpc_base = g->ops.gr.get_egpc_base(g); 6829 gpc_base = g->ops.gr.get_egpc_base(g);
6832 } else { 6830 } else {
6833 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, 6831 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
6834 "does not exist in extended region"); 6832 "does not exist in extended region");
6835 return -EINVAL; 6833 return -EINVAL;
6836 } 6834 }
@@ -6857,7 +6855,7 @@ static int gr_gk20a_find_priv_offset_in_ext_buffer(struct gk20a *g,
6857 data32 = *(u32 *)(context + ctxsw_prog_main_extended_buffer_ctl_o()); 6855 data32 = *(u32 *)(context + ctxsw_prog_main_extended_buffer_ctl_o());
6858 ext_priv_size = ctxsw_prog_main_extended_buffer_ctl_size_v(data32); 6856 ext_priv_size = ctxsw_prog_main_extended_buffer_ctl_size_v(data32);
6859 if (0 == ext_priv_size) { 6857 if (0 == ext_priv_size) {
6860 gk20a_dbg_info(" No extended memory in context buffer"); 6858 nvgpu_log_info(g, " No extended memory in context buffer");
6861 return -EINVAL; 6859 return -EINVAL;
6862 } 6860 }
6863 ext_priv_offset = ctxsw_prog_main_extended_buffer_ctl_offset_v(data32); 6861 ext_priv_offset = ctxsw_prog_main_extended_buffer_ctl_offset_v(data32);
@@ -6891,7 +6889,7 @@ static int gr_gk20a_find_priv_offset_in_ext_buffer(struct gk20a *g,
6891 if ((addr & tpc_gpc_mask) == (sm_dsm_perf_regs[i] & tpc_gpc_mask)) { 6889 if ((addr & tpc_gpc_mask) == (sm_dsm_perf_regs[i] & tpc_gpc_mask)) {
6892 sm_dsm_perf_reg_id = i; 6890 sm_dsm_perf_reg_id = i;
6893 6891
6894 gk20a_dbg_info("register match: 0x%08x", 6892 nvgpu_log_info(g, "register match: 0x%08x",
6895 sm_dsm_perf_regs[i]); 6893 sm_dsm_perf_regs[i]);
6896 6894
6897 chk_addr = (gpc_base + gpc_stride * gpc_num) + 6895 chk_addr = (gpc_base + gpc_stride * gpc_num) +
@@ -6921,7 +6919,7 @@ static int gr_gk20a_find_priv_offset_in_ext_buffer(struct gk20a *g,
6921 (sm_dsm_perf_ctrl_regs[i] & tpc_gpc_mask)) { 6919 (sm_dsm_perf_ctrl_regs[i] & tpc_gpc_mask)) {
6922 sm_dsm_perf_ctrl_reg_id = i; 6920 sm_dsm_perf_ctrl_reg_id = i;
6923 6921
6924 gk20a_dbg_info("register match: 0x%08x", 6922 nvgpu_log_info(g, "register match: 0x%08x",
6925 sm_dsm_perf_ctrl_regs[i]); 6923 sm_dsm_perf_ctrl_regs[i]);
6926 6924
6927 chk_addr = (gpc_base + gpc_stride * gpc_num) + 6925 chk_addr = (gpc_base + gpc_stride * gpc_num) +
@@ -7032,7 +7030,7 @@ gr_gk20a_process_context_buffer_priv_segment(struct gk20a *g,
7032 u32 tpc_in_gpc_base = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_BASE); 7030 u32 tpc_in_gpc_base = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_BASE);
7033 u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_STRIDE); 7031 u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_STRIDE);
7034 7032
7035 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "pri_addr=0x%x", pri_addr); 7033 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "pri_addr=0x%x", pri_addr);
7036 7034
7037 if (!g->gr.ctx_vars.valid) 7035 if (!g->gr.ctx_vars.valid)
7038 return -EINVAL; 7036 return -EINVAL;
@@ -7215,12 +7213,12 @@ static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g,
7215 u8 *context; 7213 u8 *context;
7216 u32 offset_to_segment; 7214 u32 offset_to_segment;
7217 7215
7218 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); 7216 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr);
7219 7217
7220 err = g->ops.gr.decode_priv_addr(g, addr, &addr_type, 7218 err = g->ops.gr.decode_priv_addr(g, addr, &addr_type,
7221 &gpc_num, &tpc_num, &ppc_num, &be_num, 7219 &gpc_num, &tpc_num, &ppc_num, &be_num,
7222 &broadcast_flags); 7220 &broadcast_flags);
7223 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, 7221 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
7224 "addr_type = %d, broadcast_flags: %08x", 7222 "addr_type = %d, broadcast_flags: %08x",
7225 addr_type, broadcast_flags); 7223 addr_type, broadcast_flags);
7226 if (err) 7224 if (err)
@@ -7243,7 +7241,7 @@ static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g,
7243 } 7241 }
7244 data32 = *(u32 *)(context + ctxsw_prog_local_priv_register_ctl_o()); 7242 data32 = *(u32 *)(context + ctxsw_prog_local_priv_register_ctl_o());
7245 sys_priv_offset = ctxsw_prog_local_priv_register_ctl_offset_v(data32); 7243 sys_priv_offset = ctxsw_prog_local_priv_register_ctl_offset_v(data32);
7246 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "sys_priv_offset=0x%x", sys_priv_offset); 7244 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "sys_priv_offset=0x%x", sys_priv_offset);
7247 7245
7248 /* If found in Ext buffer, ok. 7246 /* If found in Ext buffer, ok.
7249 * If it failed and we expected to find it there (quad offset) 7247 * If it failed and we expected to find it there (quad offset)
@@ -7253,7 +7251,7 @@ static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g,
7253 addr, is_quad, quad, context_buffer, 7251 addr, is_quad, quad, context_buffer,
7254 context_buffer_size, priv_offset); 7252 context_buffer_size, priv_offset);
7255 if (!err || (err && is_quad)) { 7253 if (!err || (err && is_quad)) {
7256 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, 7254 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
7257 "err = %d, is_quad = %s", 7255 "err = %d, is_quad = %s",
7258 err, is_quad ? "true" : false); 7256 err, is_quad ? "true" : false);
7259 return err; 7257 return err;
@@ -7357,7 +7355,7 @@ static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g,
7357 num_tpcs) << 2); 7355 num_tpcs) << 2);
7358 } 7356 }
7359 } else { 7357 } else {
7360 gk20a_dbg_fn("Unknown address type."); 7358 nvgpu_log_fn(g, "Unknown address type.");
7361 return -EINVAL; 7359 return -EINVAL;
7362 } 7360 }
7363 err = gr_gk20a_process_context_buffer_priv_segment(g, 7361 err = gr_gk20a_process_context_buffer_priv_segment(g,
@@ -7668,7 +7666,7 @@ static int gr_gk20a_create_hwpm_ctxsw_buffer_offset_map(struct gk20a *g)
7668 u32 num_ltc = g->ops.gr.get_max_ltc_per_fbp(g) * g->gr.num_fbps; 7666 u32 num_ltc = g->ops.gr.get_max_ltc_per_fbp(g) * g->gr.num_fbps;
7669 7667
7670 if (hwpm_ctxsw_buffer_size == 0) { 7668 if (hwpm_ctxsw_buffer_size == 0) {
7671 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, 7669 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
7672 "no PM Ctxsw buffer memory in context buffer"); 7670 "no PM Ctxsw buffer memory in context buffer");
7673 return -EINVAL; 7671 return -EINVAL;
7674 } 7672 }
@@ -7760,10 +7758,10 @@ static int gr_gk20a_create_hwpm_ctxsw_buffer_offset_map(struct gk20a *g)
7760 g->gr.ctx_vars.hwpm_ctxsw_buffer_offset_map = map; 7758 g->gr.ctx_vars.hwpm_ctxsw_buffer_offset_map = map;
7761 g->gr.ctx_vars.hwpm_ctxsw_buffer_offset_map_count = count; 7759 g->gr.ctx_vars.hwpm_ctxsw_buffer_offset_map_count = count;
7762 7760
7763 gk20a_dbg_info("Reg Addr => HWPM Ctxt switch buffer offset"); 7761 nvgpu_log_info(g, "Reg Addr => HWPM Ctxt switch buffer offset");
7764 7762
7765 for (i = 0; i < count; i++) 7763 for (i = 0; i < count; i++)
7766 gk20a_dbg_info("%08x => %08x", map[i].addr, map[i].offset); 7764 nvgpu_log_info(g, "%08x => %08x", map[i].addr, map[i].offset);
7767 7765
7768 return 0; 7766 return 0;
7769cleanup: 7767cleanup:
@@ -7785,7 +7783,7 @@ static int gr_gk20a_find_priv_offset_in_pm_buffer(struct gk20a *g,
7785 u32 count; 7783 u32 count;
7786 struct ctxsw_buf_offset_map_entry *map, *result, map_key; 7784 struct ctxsw_buf_offset_map_entry *map, *result, map_key;
7787 7785
7788 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); 7786 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr);
7789 7787
7790 /* Create map of pri address and pm offset if necessary */ 7788 /* Create map of pri address and pm offset if necessary */
7791 if (gr->ctx_vars.hwpm_ctxsw_buffer_offset_map == NULL) { 7789 if (gr->ctx_vars.hwpm_ctxsw_buffer_offset_map == NULL) {
@@ -7831,7 +7829,7 @@ bool gk20a_is_channel_ctx_resident(struct channel_gk20a *ch)
7831 curr_ch = gk20a_gr_get_channel_from_ctx(g, curr_gr_ctx, 7829 curr_ch = gk20a_gr_get_channel_from_ctx(g, curr_gr_ctx,
7832 &curr_gr_tsgid); 7830 &curr_gr_tsgid);
7833 7831
7834 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, 7832 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
7835 "curr_gr_chid=%d curr_tsgid=%d, ch->tsgid=%d" 7833 "curr_gr_chid=%d curr_tsgid=%d, ch->tsgid=%d"
7836 " ch->chid=%d", 7834 " ch->chid=%d",
7837 curr_ch ? curr_ch->chid : -1, 7835 curr_ch ? curr_ch->chid : -1,
@@ -7873,7 +7871,7 @@ int __gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
7873 u32 ctx_op_nr, num_ctx_ops[2] = {num_ctx_wr_ops, num_ctx_rd_ops}; 7871 u32 ctx_op_nr, num_ctx_ops[2] = {num_ctx_wr_ops, num_ctx_rd_ops};
7874 int err = 0, pass; 7872 int err = 0, pass;
7875 7873
7876 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "wr_ops=%d rd_ops=%d", 7874 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "wr_ops=%d rd_ops=%d",
7877 num_ctx_wr_ops, num_ctx_rd_ops); 7875 num_ctx_wr_ops, num_ctx_rd_ops);
7878 7876
7879 tsg = tsg_gk20a_from_ch(ch); 7877 tsg = tsg_gk20a_from_ch(ch);
@@ -7906,7 +7904,7 @@ int __gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
7906 v |= ctx_ops[i].value_lo; 7904 v |= ctx_ops[i].value_lo;
7907 gk20a_writel(g, offset, v); 7905 gk20a_writel(g, offset, v);
7908 7906
7909 gk20a_dbg(gpu_dbg_gpu_dbg, 7907 nvgpu_log(g, gpu_dbg_gpu_dbg,
7910 "direct wr: offset=0x%x v=0x%x", 7908 "direct wr: offset=0x%x v=0x%x",
7911 offset, v); 7909 offset, v);
7912 7910
@@ -7916,7 +7914,7 @@ int __gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
7916 v |= ctx_ops[i].value_hi; 7914 v |= ctx_ops[i].value_hi;
7917 gk20a_writel(g, offset + 4, v); 7915 gk20a_writel(g, offset + 4, v);
7918 7916
7919 gk20a_dbg(gpu_dbg_gpu_dbg, 7917 nvgpu_log(g, gpu_dbg_gpu_dbg,
7920 "direct wr: offset=0x%x v=0x%x", 7918 "direct wr: offset=0x%x v=0x%x",
7921 offset + 4, v); 7919 offset + 4, v);
7922 } 7920 }
@@ -7925,7 +7923,7 @@ int __gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
7925 ctx_ops[i].value_lo = 7923 ctx_ops[i].value_lo =
7926 gk20a_readl(g, offset); 7924 gk20a_readl(g, offset);
7927 7925
7928 gk20a_dbg(gpu_dbg_gpu_dbg, 7926 nvgpu_log(g, gpu_dbg_gpu_dbg,
7929 "direct rd: offset=0x%x v=0x%x", 7927 "direct rd: offset=0x%x v=0x%x",
7930 offset, ctx_ops[i].value_lo); 7928 offset, ctx_ops[i].value_lo);
7931 7929
@@ -7933,7 +7931,7 @@ int __gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
7933 ctx_ops[i].value_hi = 7931 ctx_ops[i].value_hi =
7934 gk20a_readl(g, offset + 4); 7932 gk20a_readl(g, offset + 4);
7935 7933
7936 gk20a_dbg(gpu_dbg_gpu_dbg, 7934 nvgpu_log(g, gpu_dbg_gpu_dbg,
7937 "direct rd: offset=0x%x v=0x%x", 7935 "direct rd: offset=0x%x v=0x%x",
7938 offset, ctx_ops[i].value_lo); 7936 offset, ctx_ops[i].value_lo);
7939 } else 7937 } else
@@ -8001,7 +7999,7 @@ int __gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
8001 offsets, offset_addrs, 7999 offsets, offset_addrs,
8002 &num_offsets); 8000 &num_offsets);
8003 if (err) { 8001 if (err) {
8004 gk20a_dbg(gpu_dbg_gpu_dbg, 8002 nvgpu_log(g, gpu_dbg_gpu_dbg,
8005 "ctx op invalid offset: offset=0x%x", 8003 "ctx op invalid offset: offset=0x%x",
8006 ctx_ops[i].offset); 8004 ctx_ops[i].offset);
8007 ctx_ops[i].status = 8005 ctx_ops[i].status =
@@ -8044,7 +8042,7 @@ int __gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
8044 v |= ctx_ops[i].value_lo; 8042 v |= ctx_ops[i].value_lo;
8045 nvgpu_mem_wr(g, current_mem, offsets[j], v); 8043 nvgpu_mem_wr(g, current_mem, offsets[j], v);
8046 8044
8047 gk20a_dbg(gpu_dbg_gpu_dbg, 8045 nvgpu_log(g, gpu_dbg_gpu_dbg,
8048 "context wr: offset=0x%x v=0x%x", 8046 "context wr: offset=0x%x v=0x%x",
8049 offsets[j], v); 8047 offsets[j], v);
8050 8048
@@ -8054,7 +8052,7 @@ int __gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
8054 v |= ctx_ops[i].value_hi; 8052 v |= ctx_ops[i].value_hi;
8055 nvgpu_mem_wr(g, current_mem, offsets[j] + 4, v); 8053 nvgpu_mem_wr(g, current_mem, offsets[j] + 4, v);
8056 8054
8057 gk20a_dbg(gpu_dbg_gpu_dbg, 8055 nvgpu_log(g, gpu_dbg_gpu_dbg,
8058 "context wr: offset=0x%x v=0x%x", 8056 "context wr: offset=0x%x v=0x%x",
8059 offsets[j] + 4, v); 8057 offsets[j] + 4, v);
8060 } 8058 }
@@ -8068,14 +8066,14 @@ int __gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
8068 ctx_ops[i].value_lo = 8066 ctx_ops[i].value_lo =
8069 nvgpu_mem_rd(g, current_mem, offsets[0]); 8067 nvgpu_mem_rd(g, current_mem, offsets[0]);
8070 8068
8071 gk20a_dbg(gpu_dbg_gpu_dbg, "context rd: offset=0x%x v=0x%x", 8069 nvgpu_log(g, gpu_dbg_gpu_dbg, "context rd: offset=0x%x v=0x%x",
8072 offsets[0], ctx_ops[i].value_lo); 8070 offsets[0], ctx_ops[i].value_lo);
8073 8071
8074 if (ctx_ops[i].op == REGOP(READ_64)) { 8072 if (ctx_ops[i].op == REGOP(READ_64)) {
8075 ctx_ops[i].value_hi = 8073 ctx_ops[i].value_hi =
8076 nvgpu_mem_rd(g, current_mem, offsets[0] + 4); 8074 nvgpu_mem_rd(g, current_mem, offsets[0] + 4);
8077 8075
8078 gk20a_dbg(gpu_dbg_gpu_dbg, 8076 nvgpu_log(g, gpu_dbg_gpu_dbg,
8079 "context rd: offset=0x%x v=0x%x", 8077 "context rd: offset=0x%x v=0x%x",
8080 offsets[0] + 4, ctx_ops[i].value_hi); 8078 offsets[0] + 4, ctx_ops[i].value_hi);
8081 } else 8079 } else
@@ -8121,7 +8119,7 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
8121 8119
8122 ch_is_curr_ctx = gk20a_is_channel_ctx_resident(ch); 8120 ch_is_curr_ctx = gk20a_is_channel_ctx_resident(ch);
8123 8121
8124 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "is curr ctx=%d", 8122 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "is curr ctx=%d",
8125 ch_is_curr_ctx); 8123 ch_is_curr_ctx);
8126 8124
8127 err = __gr_gk20a_exec_ctx_ops(ch, ctx_ops, num_ops, num_ctx_wr_ops, 8125 err = __gr_gk20a_exec_ctx_ops(ch, ctx_ops, num_ops, num_ctx_wr_ops,
@@ -8176,7 +8174,7 @@ int gk20a_gr_wait_for_sm_lock_down(struct gk20a *g, u32 gpc, u32 tpc, u32 sm,
8176 struct nvgpu_timeout timeout; 8174 struct nvgpu_timeout timeout;
8177 u32 warp_esr; 8175 u32 warp_esr;
8178 8176
8179 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, 8177 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg,
8180 "GPC%d TPC%d SM%d: locking down SM", gpc, tpc, sm); 8178 "GPC%d TPC%d SM%d: locking down SM", gpc, tpc, sm);
8181 8179
8182 nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g), 8180 nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g),
@@ -8201,7 +8199,7 @@ int gk20a_gr_wait_for_sm_lock_down(struct gk20a *g, u32 gpc, u32 tpc, u32 sm,
8201 ((global_esr & ~global_esr_mask) == 0); 8199 ((global_esr & ~global_esr_mask) == 0);
8202 8200
8203 if (locked_down || no_error_pending) { 8201 if (locked_down || no_error_pending) {
8204 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, 8202 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg,
8205 "GPC%d TPC%d SM%d: locked down SM", 8203 "GPC%d TPC%d SM%d: locked down SM",
8206 gpc, tpc, sm); 8204 gpc, tpc, sm);
8207 return 0; 8205 return 0;