summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/fifo_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.c150
1 files changed, 76 insertions, 74 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
index 75d66968..cc63c3b8 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
@@ -94,7 +94,7 @@ u32 gk20a_fifo_get_engine_ids(struct gk20a *g,
94 engine_id[instance_cnt] = active_engine_id; 94 engine_id[instance_cnt] = active_engine_id;
95 ++instance_cnt; 95 ++instance_cnt;
96 } else { 96 } else {
97 gk20a_dbg_info("warning engine_id table sz is small %d", 97 nvgpu_log_info(g, "warning engine_id table sz is small %d",
98 engine_id_sz); 98 engine_id_sz);
99 } 99 }
100 } 100 }
@@ -320,7 +320,7 @@ int gk20a_fifo_engine_enum_from_type(struct gk20a *g, u32 engine_type,
320{ 320{
321 int ret = ENGINE_INVAL_GK20A; 321 int ret = ENGINE_INVAL_GK20A;
322 322
323 gk20a_dbg_info("engine type %d", engine_type); 323 nvgpu_log_info(g, "engine type %d", engine_type);
324 if (engine_type == top_device_info_type_enum_graphics_v()) 324 if (engine_type == top_device_info_type_enum_graphics_v())
325 ret = ENGINE_GR_GK20A; 325 ret = ENGINE_GR_GK20A;
326 else if ((engine_type >= top_device_info_type_enum_copy0_v()) && 326 else if ((engine_type >= top_device_info_type_enum_copy0_v()) &&
@@ -354,7 +354,7 @@ int gk20a_fifo_init_engine_info(struct fifo_gk20a *f)
354 u32 gr_runlist_id = ~0; 354 u32 gr_runlist_id = ~0;
355 bool found_pbdma_for_runlist = false; 355 bool found_pbdma_for_runlist = false;
356 356
357 gk20a_dbg_fn(""); 357 nvgpu_log_fn(g, " ");
358 358
359 f->num_engines = 0; 359 f->num_engines = 0;
360 360
@@ -367,7 +367,7 @@ int gk20a_fifo_init_engine_info(struct fifo_gk20a *f)
367 if (top_device_info_engine_v(table_entry)) { 367 if (top_device_info_engine_v(table_entry)) {
368 engine_id = 368 engine_id =
369 top_device_info_engine_enum_v(table_entry); 369 top_device_info_engine_enum_v(table_entry);
370 gk20a_dbg_info("info: engine_id %d", 370 nvgpu_log_info(g, "info: engine_id %d",
371 top_device_info_engine_enum_v(table_entry)); 371 top_device_info_engine_enum_v(table_entry));
372 } 372 }
373 373
@@ -375,7 +375,7 @@ int gk20a_fifo_init_engine_info(struct fifo_gk20a *f)
375 if (top_device_info_runlist_v(table_entry)) { 375 if (top_device_info_runlist_v(table_entry)) {
376 runlist_id = 376 runlist_id =
377 top_device_info_runlist_enum_v(table_entry); 377 top_device_info_runlist_enum_v(table_entry);
378 gk20a_dbg_info("gr info: runlist_id %d", runlist_id); 378 nvgpu_log_info(g, "gr info: runlist_id %d", runlist_id);
379 379
380 runlist_bit = BIT(runlist_id); 380 runlist_bit = BIT(runlist_id);
381 381
@@ -384,7 +384,7 @@ int gk20a_fifo_init_engine_info(struct fifo_gk20a *f)
384 pbdma_id++) { 384 pbdma_id++) {
385 if (f->pbdma_map[pbdma_id] & 385 if (f->pbdma_map[pbdma_id] &
386 runlist_bit) { 386 runlist_bit) {
387 gk20a_dbg_info( 387 nvgpu_log_info(g,
388 "gr info: pbdma_map[%d]=%d", 388 "gr info: pbdma_map[%d]=%d",
389 pbdma_id, 389 pbdma_id,
390 f->pbdma_map[pbdma_id]); 390 f->pbdma_map[pbdma_id]);
@@ -402,13 +402,13 @@ int gk20a_fifo_init_engine_info(struct fifo_gk20a *f)
402 if (top_device_info_intr_v(table_entry)) { 402 if (top_device_info_intr_v(table_entry)) {
403 intr_id = 403 intr_id =
404 top_device_info_intr_enum_v(table_entry); 404 top_device_info_intr_enum_v(table_entry);
405 gk20a_dbg_info("gr info: intr_id %d", intr_id); 405 nvgpu_log_info(g, "gr info: intr_id %d", intr_id);
406 } 406 }
407 407
408 if (top_device_info_reset_v(table_entry)) { 408 if (top_device_info_reset_v(table_entry)) {
409 reset_id = 409 reset_id =
410 top_device_info_reset_enum_v(table_entry); 410 top_device_info_reset_enum_v(table_entry);
411 gk20a_dbg_info("gr info: reset_id %d", 411 nvgpu_log_info(g, "gr info: reset_id %d",
412 reset_id); 412 reset_id);
413 } 413 }
414 } else if (entry == top_device_info_entry_engine_type_v()) { 414 } else if (entry == top_device_info_entry_engine_type_v()) {
@@ -538,7 +538,7 @@ static void gk20a_remove_fifo_support(struct fifo_gk20a *f)
538 struct gk20a *g = f->g; 538 struct gk20a *g = f->g;
539 unsigned int i = 0; 539 unsigned int i = 0;
540 540
541 gk20a_dbg_fn(""); 541 nvgpu_log_fn(g, " ");
542 542
543 nvgpu_channel_worker_deinit(g); 543 nvgpu_channel_worker_deinit(g);
544 /* 544 /*
@@ -616,7 +616,7 @@ static void fifo_pbdma_exception_status(struct gk20a *g,
616 get_exception_pbdma_info(g, eng_info); 616 get_exception_pbdma_info(g, eng_info);
617 e = &eng_info->pbdma_exception_info; 617 e = &eng_info->pbdma_exception_info;
618 618
619 gk20a_dbg_fn("pbdma_id %d, " 619 nvgpu_log_fn(g, "pbdma_id %d, "
620 "id_type %s, id %d, chan_status %d, " 620 "id_type %s, id %d, chan_status %d, "
621 "next_id_type %s, next_id %d, " 621 "next_id_type %s, next_id %d, "
622 "chsw_in_progress %d", 622 "chsw_in_progress %d",
@@ -657,7 +657,7 @@ static void fifo_engine_exception_status(struct gk20a *g,
657 get_exception_engine_info(g, eng_info); 657 get_exception_engine_info(g, eng_info);
658 e = &eng_info->engine_exception_info; 658 e = &eng_info->engine_exception_info;
659 659
660 gk20a_dbg_fn("engine_id %d, id_type %s, id %d, ctx_status %d, " 660 nvgpu_log_fn(g, "engine_id %d, id_type %s, id %d, ctx_status %d, "
661 "faulted %d, idle %d, ctxsw_in_progress %d, ", 661 "faulted %d, idle %d, ctxsw_in_progress %d, ",
662 eng_info->engine_id, e->id_is_chid ? "chid" : "tsgid", 662 eng_info->engine_id, e->id_is_chid ? "chid" : "tsgid",
663 e->id, e->ctx_status_v, 663 e->id, e->ctx_status_v,
@@ -745,7 +745,7 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f)
745 745
746clean_up_runlist: 746clean_up_runlist:
747 gk20a_fifo_delete_runlist(f); 747 gk20a_fifo_delete_runlist(f);
748 gk20a_dbg_fn("fail"); 748 nvgpu_log_fn(g, "fail");
749 return -ENOMEM; 749 return -ENOMEM;
750} 750}
751 751
@@ -784,7 +784,7 @@ int gk20a_init_fifo_reset_enable_hw(struct gk20a *g)
784 unsigned int i; 784 unsigned int i;
785 u32 host_num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA); 785 u32 host_num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA);
786 786
787 gk20a_dbg_fn(""); 787 nvgpu_log_fn(g, " ");
788 788
789 /* enable pmc pfifo */ 789 /* enable pmc pfifo */
790 g->ops.mc.reset(g, mc_enable_pfifo_enabled_f()); 790 g->ops.mc.reset(g, mc_enable_pfifo_enabled_f());
@@ -805,7 +805,7 @@ int gk20a_init_fifo_reset_enable_hw(struct gk20a *g)
805 timeout = gk20a_readl(g, fifo_fb_timeout_r()); 805 timeout = gk20a_readl(g, fifo_fb_timeout_r());
806 timeout = set_field(timeout, fifo_fb_timeout_period_m(), 806 timeout = set_field(timeout, fifo_fb_timeout_period_m(),
807 fifo_fb_timeout_period_max_f()); 807 fifo_fb_timeout_period_max_f());
808 gk20a_dbg_info("fifo_fb_timeout reg val = 0x%08x", timeout); 808 nvgpu_log_info(g, "fifo_fb_timeout reg val = 0x%08x", timeout);
809 gk20a_writel(g, fifo_fb_timeout_r(), timeout); 809 gk20a_writel(g, fifo_fb_timeout_r(), timeout);
810 810
811 /* write pbdma timeout value */ 811 /* write pbdma timeout value */
@@ -813,7 +813,7 @@ int gk20a_init_fifo_reset_enable_hw(struct gk20a *g)
813 timeout = gk20a_readl(g, pbdma_timeout_r(i)); 813 timeout = gk20a_readl(g, pbdma_timeout_r(i));
814 timeout = set_field(timeout, pbdma_timeout_period_m(), 814 timeout = set_field(timeout, pbdma_timeout_period_m(),
815 pbdma_timeout_period_max_f()); 815 pbdma_timeout_period_max_f());
816 gk20a_dbg_info("pbdma_timeout reg val = 0x%08x", timeout); 816 nvgpu_log_info(g, "pbdma_timeout reg val = 0x%08x", timeout);
817 gk20a_writel(g, pbdma_timeout_r(i), timeout); 817 gk20a_writel(g, pbdma_timeout_r(i), timeout);
818 } 818 }
819 if (g->ops.fifo.apply_pb_timeout) 819 if (g->ops.fifo.apply_pb_timeout)
@@ -837,10 +837,10 @@ int gk20a_init_fifo_reset_enable_hw(struct gk20a *g)
837 intr_stall = gk20a_readl(g, pbdma_intr_stall_r(i)); 837 intr_stall = gk20a_readl(g, pbdma_intr_stall_r(i));
838 intr_stall &= ~pbdma_intr_stall_lbreq_enabled_f(); 838 intr_stall &= ~pbdma_intr_stall_lbreq_enabled_f();
839 gk20a_writel(g, pbdma_intr_stall_r(i), intr_stall); 839 gk20a_writel(g, pbdma_intr_stall_r(i), intr_stall);
840 gk20a_dbg_info("pbdma id:%u, intr_en_0 0x%08x", i, intr_stall); 840 nvgpu_log_info(g, "pbdma id:%u, intr_en_0 0x%08x", i, intr_stall);
841 gk20a_writel(g, pbdma_intr_en_0_r(i), intr_stall); 841 gk20a_writel(g, pbdma_intr_en_0_r(i), intr_stall);
842 842
843 gk20a_dbg_info("pbdma id:%u, intr_en_1 0x%08x", i, 843 nvgpu_log_info(g, "pbdma id:%u, intr_en_1 0x%08x", i,
844 ~pbdma_intr_en_0_lbreq_enabled_f()); 844 ~pbdma_intr_en_0_lbreq_enabled_f());
845 gk20a_writel(g, pbdma_intr_en_1_r(i), 845 gk20a_writel(g, pbdma_intr_en_1_r(i),
846 ~pbdma_intr_en_0_lbreq_enabled_f()); 846 ~pbdma_intr_en_0_lbreq_enabled_f());
@@ -852,12 +852,12 @@ int gk20a_init_fifo_reset_enable_hw(struct gk20a *g)
852 /* clear and enable pfifo interrupt */ 852 /* clear and enable pfifo interrupt */
853 gk20a_writel(g, fifo_intr_0_r(), 0xFFFFFFFF); 853 gk20a_writel(g, fifo_intr_0_r(), 0xFFFFFFFF);
854 mask = gk20a_fifo_intr_0_en_mask(g); 854 mask = gk20a_fifo_intr_0_en_mask(g);
855 gk20a_dbg_info("fifo_intr_en_0 0x%08x", mask); 855 nvgpu_log_info(g, "fifo_intr_en_0 0x%08x", mask);
856 gk20a_writel(g, fifo_intr_en_0_r(), mask); 856 gk20a_writel(g, fifo_intr_en_0_r(), mask);
857 gk20a_dbg_info("fifo_intr_en_1 = 0x80000000"); 857 nvgpu_log_info(g, "fifo_intr_en_1 = 0x80000000");
858 gk20a_writel(g, fifo_intr_en_1_r(), 0x80000000); 858 gk20a_writel(g, fifo_intr_en_1_r(), 0x80000000);
859 859
860 gk20a_dbg_fn("done"); 860 nvgpu_log_fn(g, "done");
861 861
862 return 0; 862 return 0;
863} 863}
@@ -868,7 +868,7 @@ int gk20a_init_fifo_setup_sw_common(struct gk20a *g)
868 unsigned int chid, i; 868 unsigned int chid, i;
869 int err = 0; 869 int err = 0;
870 870
871 gk20a_dbg_fn(""); 871 nvgpu_log_fn(g, " ");
872 872
873 f->g = g; 873 f->g = g;
874 874
@@ -945,7 +945,7 @@ int gk20a_init_fifo_setup_sw_common(struct gk20a *g)
945 goto clean_up; 945 goto clean_up;
946 } 946 }
947 947
948 gk20a_dbg_fn("done"); 948 nvgpu_log_fn(g, "done");
949 return 0; 949 return 0;
950 950
951clean_up: 951clean_up:
@@ -972,10 +972,10 @@ int gk20a_init_fifo_setup_sw(struct gk20a *g)
972 u64 userd_base; 972 u64 userd_base;
973 int err = 0; 973 int err = 0;
974 974
975 gk20a_dbg_fn(""); 975 nvgpu_log_fn(g, " ");
976 976
977 if (f->sw_ready) { 977 if (f->sw_ready) {
978 gk20a_dbg_fn("skip init"); 978 nvgpu_log_fn(g, "skip init");
979 return 0; 979 return 0;
980 } 980 }
981 981
@@ -997,7 +997,7 @@ int gk20a_init_fifo_setup_sw(struct gk20a *g)
997 nvgpu_err(g, "userd memory allocation failed"); 997 nvgpu_err(g, "userd memory allocation failed");
998 goto clean_up; 998 goto clean_up;
999 } 999 }
1000 gk20a_dbg(gpu_dbg_map, "userd gpu va = 0x%llx", f->userd.gpu_va); 1000 nvgpu_log(g, gpu_dbg_map, "userd gpu va = 0x%llx", f->userd.gpu_va);
1001 1001
1002 userd_base = nvgpu_mem_get_addr(g, &f->userd); 1002 userd_base = nvgpu_mem_get_addr(g, &f->userd);
1003 for (chid = 0; chid < f->num_channels; chid++) { 1003 for (chid = 0; chid < f->num_channels; chid++) {
@@ -1013,11 +1013,11 @@ int gk20a_init_fifo_setup_sw(struct gk20a *g)
1013 1013
1014 f->sw_ready = true; 1014 f->sw_ready = true;
1015 1015
1016 gk20a_dbg_fn("done"); 1016 nvgpu_log_fn(g, "done");
1017 return 0; 1017 return 0;
1018 1018
1019clean_up: 1019clean_up:
1020 gk20a_dbg_fn("fail"); 1020 nvgpu_log_fn(g, "fail");
1021 if (nvgpu_mem_is_valid(&f->userd)) { 1021 if (nvgpu_mem_is_valid(&f->userd)) {
1022 if (g->ops.mm.is_bar1_supported(g)) 1022 if (g->ops.mm.is_bar1_supported(g))
1023 nvgpu_dma_unmap_free(g->mm.bar1.vm, &f->userd); 1023 nvgpu_dma_unmap_free(g->mm.bar1.vm, &f->userd);
@@ -1032,7 +1032,7 @@ void gk20a_fifo_handle_runlist_event(struct gk20a *g)
1032{ 1032{
1033 u32 runlist_event = gk20a_readl(g, fifo_intr_runlist_r()); 1033 u32 runlist_event = gk20a_readl(g, fifo_intr_runlist_r());
1034 1034
1035 gk20a_dbg(gpu_dbg_intr, "runlist event %08x", 1035 nvgpu_log(g, gpu_dbg_intr, "runlist event %08x",
1036 runlist_event); 1036 runlist_event);
1037 1037
1038 gk20a_writel(g, fifo_intr_runlist_r(), runlist_event); 1038 gk20a_writel(g, fifo_intr_runlist_r(), runlist_event);
@@ -1042,7 +1042,7 @@ int gk20a_init_fifo_setup_hw(struct gk20a *g)
1042{ 1042{
1043 struct fifo_gk20a *f = &g->fifo; 1043 struct fifo_gk20a *f = &g->fifo;
1044 1044
1045 gk20a_dbg_fn(""); 1045 nvgpu_log_fn(g, " ");
1046 1046
1047 /* test write, read through bar1 @ userd region before 1047 /* test write, read through bar1 @ userd region before
1048 * turning on the snooping */ 1048 * turning on the snooping */
@@ -1053,7 +1053,7 @@ int gk20a_init_fifo_setup_hw(struct gk20a *g)
1053 u32 bar1_vaddr = f->userd.gpu_va; 1053 u32 bar1_vaddr = f->userd.gpu_va;
1054 volatile u32 *cpu_vaddr = f->userd.cpu_va; 1054 volatile u32 *cpu_vaddr = f->userd.cpu_va;
1055 1055
1056 gk20a_dbg_info("test bar1 @ vaddr 0x%x", 1056 nvgpu_log_info(g, "test bar1 @ vaddr 0x%x",
1057 bar1_vaddr); 1057 bar1_vaddr);
1058 1058
1059 v = gk20a_bar1_readl(g, bar1_vaddr); 1059 v = gk20a_bar1_readl(g, bar1_vaddr);
@@ -1093,7 +1093,7 @@ int gk20a_init_fifo_setup_hw(struct gk20a *g)
1093 fifo_bar1_base_ptr_f(f->userd.gpu_va >> 12) | 1093 fifo_bar1_base_ptr_f(f->userd.gpu_va >> 12) |
1094 fifo_bar1_base_valid_true_f()); 1094 fifo_bar1_base_valid_true_f());
1095 1095
1096 gk20a_dbg_fn("done"); 1096 nvgpu_log_fn(g, "done");
1097 1097
1098 return 0; 1098 return 0;
1099} 1099}
@@ -1261,7 +1261,7 @@ void gk20a_fifo_get_mmu_fault_info(struct gk20a *g, u32 mmu_fault_id,
1261 u32 fault_info; 1261 u32 fault_info;
1262 u32 addr_lo, addr_hi; 1262 u32 addr_lo, addr_hi;
1263 1263
1264 gk20a_dbg_fn("mmu_fault_id %d", mmu_fault_id); 1264 nvgpu_log_fn(g, "mmu_fault_id %d", mmu_fault_id);
1265 1265
1266 memset(mmfault, 0, sizeof(*mmfault)); 1266 memset(mmfault, 0, sizeof(*mmfault));
1267 1267
@@ -1291,7 +1291,7 @@ void gk20a_fifo_reset_engine(struct gk20a *g, u32 engine_id)
1291 u32 engine_enum = ENGINE_INVAL_GK20A; 1291 u32 engine_enum = ENGINE_INVAL_GK20A;
1292 struct fifo_engine_info_gk20a *engine_info; 1292 struct fifo_engine_info_gk20a *engine_info;
1293 1293
1294 gk20a_dbg_fn(""); 1294 nvgpu_log_fn(g, " ");
1295 1295
1296 if (!g) 1296 if (!g)
1297 return; 1297 return;
@@ -1489,7 +1489,7 @@ void gk20a_fifo_abort_tsg(struct gk20a *g, u32 tsgid, bool preempt)
1489 struct tsg_gk20a *tsg = &g->fifo.tsg[tsgid]; 1489 struct tsg_gk20a *tsg = &g->fifo.tsg[tsgid];
1490 struct channel_gk20a *ch; 1490 struct channel_gk20a *ch;
1491 1491
1492 gk20a_dbg_fn(""); 1492 nvgpu_log_fn(g, " ");
1493 1493
1494 g->ops.fifo.disable_tsg(tsg); 1494 g->ops.fifo.disable_tsg(tsg);
1495 1495
@@ -1556,7 +1556,7 @@ static bool gk20a_fifo_handle_mmu_fault(
1556 bool verbose = true; 1556 bool verbose = true;
1557 u32 grfifo_ctl; 1557 u32 grfifo_ctl;
1558 1558
1559 gk20a_dbg_fn(""); 1559 nvgpu_log_fn(g, " ");
1560 1560
1561 g->fifo.deferred_reset_pending = false; 1561 g->fifo.deferred_reset_pending = false;
1562 1562
@@ -1693,7 +1693,7 @@ static bool gk20a_fifo_handle_mmu_fault(
1693 1693
1694 /* handled during channel free */ 1694 /* handled during channel free */
1695 g->fifo.deferred_reset_pending = true; 1695 g->fifo.deferred_reset_pending = true;
1696 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, 1696 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg,
1697 "sm debugger attached," 1697 "sm debugger attached,"
1698 " deferring channel recovery to channel free"); 1698 " deferring channel recovery to channel free");
1699 } else { 1699 } else {
@@ -2196,6 +2196,7 @@ bool gk20a_fifo_check_tsg_ctxsw_timeout(struct tsg_gk20a *tsg,
2196 struct channel_gk20a *ch; 2196 struct channel_gk20a *ch;
2197 bool recover = false; 2197 bool recover = false;
2198 bool progress = false; 2198 bool progress = false;
2199 struct gk20a *g = tsg->g;
2199 2200
2200 *verbose = false; 2201 *verbose = false;
2201 *ms = GRFIFO_TIMEOUT_CHECK_PERIOD_US / 1000; 2202 *ms = GRFIFO_TIMEOUT_CHECK_PERIOD_US / 1000;
@@ -2221,7 +2222,7 @@ bool gk20a_fifo_check_tsg_ctxsw_timeout(struct tsg_gk20a *tsg,
2221 * this resets timeout for channels that already completed their work 2222 * this resets timeout for channels that already completed their work
2222 */ 2223 */
2223 if (progress) { 2224 if (progress) {
2224 gk20a_dbg_info("progress on tsg=%d ch=%d", 2225 nvgpu_log_info(g, "progress on tsg=%d ch=%d",
2225 tsg->tsgid, ch->chid); 2226 tsg->tsgid, ch->chid);
2226 gk20a_channel_put(ch); 2227 gk20a_channel_put(ch);
2227 *ms = GRFIFO_TIMEOUT_CHECK_PERIOD_US / 1000; 2228 *ms = GRFIFO_TIMEOUT_CHECK_PERIOD_US / 1000;
@@ -2239,7 +2240,7 @@ bool gk20a_fifo_check_tsg_ctxsw_timeout(struct tsg_gk20a *tsg,
2239 * caused the problem, so set timeout error notifier for all channels. 2240 * caused the problem, so set timeout error notifier for all channels.
2240 */ 2241 */
2241 if (recover) { 2242 if (recover) {
2242 gk20a_dbg_info("timeout on tsg=%d ch=%d", 2243 nvgpu_log_info(g, "timeout on tsg=%d ch=%d",
2243 tsg->tsgid, ch->chid); 2244 tsg->tsgid, ch->chid);
2244 *ms = ch->timeout_accumulated_ms; 2245 *ms = ch->timeout_accumulated_ms;
2245 gk20a_channel_put(ch); 2246 gk20a_channel_put(ch);
@@ -2311,7 +2312,7 @@ bool gk20a_fifo_handle_sched_error(struct gk20a *g)
2311 is_tsg, true, verbose, 2312 is_tsg, true, verbose,
2312 RC_TYPE_CTXSW_TIMEOUT); 2313 RC_TYPE_CTXSW_TIMEOUT);
2313 } else { 2314 } else {
2314 gk20a_dbg_info( 2315 nvgpu_log_info(g,
2315 "fifo is waiting for ctx switch for %d ms, " 2316 "fifo is waiting for ctx switch for %d ms, "
2316 "%s=%d", ms, is_tsg ? "tsg" : "ch", id); 2317 "%s=%d", ms, is_tsg ? "tsg" : "ch", id);
2317 } 2318 }
@@ -2330,7 +2331,7 @@ static u32 fifo_error_isr(struct gk20a *g, u32 fifo_intr)
2330 bool print_channel_reset_log = false; 2331 bool print_channel_reset_log = false;
2331 u32 handled = 0; 2332 u32 handled = 0;
2332 2333
2333 gk20a_dbg_fn("fifo_intr=0x%08x", fifo_intr); 2334 nvgpu_log_fn(g, "fifo_intr=0x%08x", fifo_intr);
2334 2335
2335 if (fifo_intr & fifo_intr_0_pio_error_pending_f()) { 2336 if (fifo_intr & fifo_intr_0_pio_error_pending_f()) {
2336 /* pio mode is unused. this shouldn't happen, ever. */ 2337 /* pio mode is unused. this shouldn't happen, ever. */
@@ -2381,7 +2382,7 @@ static u32 fifo_error_isr(struct gk20a *g, u32 fifo_intr)
2381 engine_id++) { 2382 engine_id++) {
2382 u32 active_engine_id = g->fifo.active_engines_list[engine_id]; 2383 u32 active_engine_id = g->fifo.active_engines_list[engine_id];
2383 u32 engine_enum = g->fifo.engine_info[active_engine_id].engine_enum; 2384 u32 engine_enum = g->fifo.engine_info[active_engine_id].engine_enum;
2384 gk20a_dbg_fn("enum:%d -> engine_id:%d", engine_enum, 2385 nvgpu_log_fn(g, "enum:%d -> engine_id:%d", engine_enum,
2385 active_engine_id); 2386 active_engine_id);
2386 fifo_pbdma_exception_status(g, 2387 fifo_pbdma_exception_status(g,
2387 &g->fifo.engine_info[active_engine_id]); 2388 &g->fifo.engine_info[active_engine_id]);
@@ -2632,7 +2633,7 @@ static u32 fifo_pbdma_isr(struct gk20a *g, u32 fifo_intr)
2632 2633
2633 for (i = 0; i < host_num_pbdma; i++) { 2634 for (i = 0; i < host_num_pbdma; i++) {
2634 if (fifo_intr_pbdma_id_status_v(pbdma_pending, i)) { 2635 if (fifo_intr_pbdma_id_status_v(pbdma_pending, i)) {
2635 gk20a_dbg(gpu_dbg_intr, "pbdma id %d intr pending", i); 2636 nvgpu_log(g, gpu_dbg_intr, "pbdma id %d intr pending", i);
2636 clear_intr |= 2637 clear_intr |=
2637 gk20a_fifo_handle_pbdma_intr(g, f, i, RC_YES); 2638 gk20a_fifo_handle_pbdma_intr(g, f, i, RC_YES);
2638 } 2639 }
@@ -2653,7 +2654,7 @@ void gk20a_fifo_isr(struct gk20a *g)
2653 * in a threaded interrupt context... */ 2654 * in a threaded interrupt context... */
2654 nvgpu_mutex_acquire(&g->fifo.intr.isr.mutex); 2655 nvgpu_mutex_acquire(&g->fifo.intr.isr.mutex);
2655 2656
2656 gk20a_dbg(gpu_dbg_intr, "fifo isr %08x\n", fifo_intr); 2657 nvgpu_log(g, gpu_dbg_intr, "fifo isr %08x\n", fifo_intr);
2657 2658
2658 /* handle runlist update */ 2659 /* handle runlist update */
2659 if (fifo_intr & fifo_intr_0_runlist_event_pending_f()) { 2660 if (fifo_intr & fifo_intr_0_runlist_event_pending_f()) {
@@ -2681,7 +2682,7 @@ int gk20a_fifo_nonstall_isr(struct gk20a *g)
2681 u32 fifo_intr = gk20a_readl(g, fifo_intr_0_r()); 2682 u32 fifo_intr = gk20a_readl(g, fifo_intr_0_r());
2682 u32 clear_intr = 0; 2683 u32 clear_intr = 0;
2683 2684
2684 gk20a_dbg(gpu_dbg_intr, "fifo nonstall isr %08x\n", fifo_intr); 2685 nvgpu_log(g, gpu_dbg_intr, "fifo nonstall isr %08x\n", fifo_intr);
2685 2686
2686 if (fifo_intr & fifo_intr_0_channel_intr_pending_f()) 2687 if (fifo_intr & fifo_intr_0_channel_intr_pending_f())
2687 clear_intr = fifo_intr_0_channel_intr_pending_f(); 2688 clear_intr = fifo_intr_0_channel_intr_pending_f();
@@ -2769,7 +2770,7 @@ int __locked_fifo_preempt(struct gk20a *g, u32 id, bool is_tsg)
2769 int ret; 2770 int ret;
2770 unsigned int id_type; 2771 unsigned int id_type;
2771 2772
2772 gk20a_dbg_fn("%d", id); 2773 nvgpu_log_fn(g, "%d", id);
2773 2774
2774 /* issue preempt */ 2775 /* issue preempt */
2775 gk20a_fifo_issue_preempt(g, id, is_tsg); 2776 gk20a_fifo_issue_preempt(g, id, is_tsg);
@@ -2794,7 +2795,7 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, u32 chid)
2794 u32 mutex_ret = 0; 2795 u32 mutex_ret = 0;
2795 u32 i; 2796 u32 i;
2796 2797
2797 gk20a_dbg_fn("%d", chid); 2798 nvgpu_log_fn(g, "%d", chid);
2798 2799
2799 /* we have no idea which runlist we are using. lock all */ 2800 /* we have no idea which runlist we are using. lock all */
2800 for (i = 0; i < g->fifo.max_runlists; i++) 2801 for (i = 0; i < g->fifo.max_runlists; i++)
@@ -2821,7 +2822,7 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
2821 u32 mutex_ret = 0; 2822 u32 mutex_ret = 0;
2822 u32 i; 2823 u32 i;
2823 2824
2824 gk20a_dbg_fn("%d", tsgid); 2825 nvgpu_log_fn(g, "%d", tsgid);
2825 2826
2826 /* we have no idea which runlist we are using. lock all */ 2827 /* we have no idea which runlist we are using. lock all */
2827 for (i = 0; i < g->fifo.max_runlists; i++) 2828 for (i = 0; i < g->fifo.max_runlists; i++)
@@ -2938,7 +2939,7 @@ int gk20a_fifo_disable_engine_activity(struct gk20a *g,
2938 u32 mutex_ret; 2939 u32 mutex_ret;
2939 u32 err = 0; 2940 u32 err = 0;
2940 2941
2941 gk20a_dbg_fn(""); 2942 nvgpu_log_fn(g, " ");
2942 2943
2943 gr_stat = 2944 gr_stat =
2944 gk20a_readl(g, fifo_engine_status_r(eng_info->engine_id)); 2945 gk20a_readl(g, fifo_engine_status_r(eng_info->engine_id));
@@ -2988,12 +2989,12 @@ clean_up:
2988 nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 2989 nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
2989 2990
2990 if (err) { 2991 if (err) {
2991 gk20a_dbg_fn("failed"); 2992 nvgpu_log_fn(g, "failed");
2992 if (gk20a_fifo_enable_engine_activity(g, eng_info)) 2993 if (gk20a_fifo_enable_engine_activity(g, eng_info))
2993 nvgpu_err(g, 2994 nvgpu_err(g,
2994 "failed to enable gr engine activity"); 2995 "failed to enable gr engine activity");
2995 } else { 2996 } else {
2996 gk20a_dbg_fn("done"); 2997 nvgpu_log_fn(g, "done");
2997 } 2998 }
2998 return err; 2999 return err;
2999} 3000}
@@ -3129,8 +3130,9 @@ u32 *gk20a_runlist_construct_locked(struct fifo_gk20a *f,
3129 bool skip_next = false; 3130 bool skip_next = false;
3130 u32 tsgid, count = 0; 3131 u32 tsgid, count = 0;
3131 u32 runlist_entry_words = f->runlist_entry_size / sizeof(u32); 3132 u32 runlist_entry_words = f->runlist_entry_size / sizeof(u32);
3133 struct gk20a *g = f->g;
3132 3134
3133 gk20a_dbg_fn(""); 3135 nvgpu_log_fn(g, " ");
3134 3136
3135 /* for each TSG, T, on this level, insert all higher-level channels 3137 /* for each TSG, T, on this level, insert all higher-level channels
3136 and TSGs before inserting T. */ 3138 and TSGs before inserting T. */
@@ -3156,9 +3158,9 @@ u32 *gk20a_runlist_construct_locked(struct fifo_gk20a *f,
3156 return NULL; 3158 return NULL;
3157 3159
3158 /* add TSG entry */ 3160 /* add TSG entry */
3159 gk20a_dbg_info("add TSG %d to runlist", tsg->tsgid); 3161 nvgpu_log_info(g, "add TSG %d to runlist", tsg->tsgid);
3160 f->g->ops.fifo.get_tsg_runlist_entry(tsg, runlist_entry); 3162 f->g->ops.fifo.get_tsg_runlist_entry(tsg, runlist_entry);
3161 gk20a_dbg_info("tsg runlist count %d runlist [0] %x [1] %x\n", 3163 nvgpu_log_info(g, "tsg runlist count %d runlist [0] %x [1] %x\n",
3162 count, runlist_entry[0], runlist_entry[1]); 3164 count, runlist_entry[0], runlist_entry[1]);
3163 runlist_entry += runlist_entry_words; 3165 runlist_entry += runlist_entry_words;
3164 count++; 3166 count++;
@@ -3177,10 +3179,10 @@ u32 *gk20a_runlist_construct_locked(struct fifo_gk20a *f,
3177 return NULL; 3179 return NULL;
3178 } 3180 }
3179 3181
3180 gk20a_dbg_info("add channel %d to runlist", 3182 nvgpu_log_info(g, "add channel %d to runlist",
3181 ch->chid); 3183 ch->chid);
3182 f->g->ops.fifo.get_ch_runlist_entry(ch, runlist_entry); 3184 f->g->ops.fifo.get_ch_runlist_entry(ch, runlist_entry);
3183 gk20a_dbg_info( 3185 nvgpu_log_info(g,
3184 "run list count %d runlist [0] %x [1] %x\n", 3186 "run list count %d runlist [0] %x [1] %x\n",
3185 count, runlist_entry[0], runlist_entry[1]); 3187 count, runlist_entry[0], runlist_entry[1]);
3186 count++; 3188 count++;
@@ -3222,7 +3224,7 @@ int gk20a_fifo_set_runlist_interleave(struct gk20a *g,
3222 u32 runlist_id, 3224 u32 runlist_id,
3223 u32 new_level) 3225 u32 new_level)
3224{ 3226{
3225 gk20a_dbg_fn(""); 3227 nvgpu_log_fn(g, " ");
3226 3228
3227 g->fifo.tsg[id].interleave_level = new_level; 3229 g->fifo.tsg[id].interleave_level = new_level;
3228 3230
@@ -3313,7 +3315,7 @@ static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
3313 3315
3314 runlist_iova = nvgpu_mem_get_addr(g, &runlist->mem[new_buf]); 3316 runlist_iova = nvgpu_mem_get_addr(g, &runlist->mem[new_buf]);
3315 3317
3316 gk20a_dbg_info("runlist_id : %d, switch to new buffer 0x%16llx", 3318 nvgpu_log_info(g, "runlist_id : %d, switch to new buffer 0x%16llx",
3317 runlist_id, (u64)runlist_iova); 3319 runlist_id, (u64)runlist_iova);
3318 3320
3319 if (!runlist_iova) { 3321 if (!runlist_iova) {
@@ -3445,7 +3447,7 @@ int gk20a_fifo_update_runlist(struct gk20a *g, u32 runlist_id, u32 chid,
3445 u32 mutex_ret; 3447 u32 mutex_ret;
3446 u32 ret = 0; 3448 u32 ret = 0;
3447 3449
3448 gk20a_dbg_fn(""); 3450 nvgpu_log_fn(g, " ");
3449 3451
3450 runlist = &f->runlist_info[runlist_id]; 3452 runlist = &f->runlist_info[runlist_id];
3451 3453
@@ -3465,7 +3467,7 @@ int gk20a_fifo_update_runlist(struct gk20a *g, u32 runlist_id, u32 chid,
3465 3467
3466int gk20a_fifo_suspend(struct gk20a *g) 3468int gk20a_fifo_suspend(struct gk20a *g)
3467{ 3469{
3468 gk20a_dbg_fn(""); 3470 nvgpu_log_fn(g, " ");
3469 3471
3470 /* stop bar1 snooping */ 3472 /* stop bar1 snooping */
3471 if (g->ops.mm.is_bar1_supported(g)) 3473 if (g->ops.mm.is_bar1_supported(g))
@@ -3476,7 +3478,7 @@ int gk20a_fifo_suspend(struct gk20a *g)
3476 gk20a_writel(g, fifo_intr_en_0_r(), 0); 3478 gk20a_writel(g, fifo_intr_en_0_r(), 0);
3477 gk20a_writel(g, fifo_intr_en_1_r(), 0); 3479 gk20a_writel(g, fifo_intr_en_1_r(), 0);
3478 3480
3479 gk20a_dbg_fn("done"); 3481 nvgpu_log_fn(g, "done");
3480 return 0; 3482 return 0;
3481} 3483}
3482 3484
@@ -3511,7 +3513,7 @@ int gk20a_fifo_wait_engine_idle(struct gk20a *g)
3511 int ret = -ETIMEDOUT; 3513 int ret = -ETIMEDOUT;
3512 u32 i, host_num_engines; 3514 u32 i, host_num_engines;
3513 3515
3514 gk20a_dbg_fn(""); 3516 nvgpu_log_fn(g, " ");
3515 3517
3516 host_num_engines = 3518 host_num_engines =
3517 nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_ENGINES); 3519 nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_ENGINES);
@@ -3533,12 +3535,12 @@ int gk20a_fifo_wait_engine_idle(struct gk20a *g)
3533 } while (!nvgpu_timeout_expired(&timeout)); 3535 } while (!nvgpu_timeout_expired(&timeout));
3534 3536
3535 if (ret) { 3537 if (ret) {
3536 gk20a_dbg_info("cannot idle engine %u", i); 3538 nvgpu_log_info(g, "cannot idle engine %u", i);
3537 break; 3539 break;
3538 } 3540 }
3539 } 3541 }
3540 3542
3541 gk20a_dbg_fn("done"); 3543 nvgpu_log_fn(g, "done");
3542 3544
3543 return ret; 3545 return ret;
3544} 3546}
@@ -3839,7 +3841,7 @@ void gk20a_fifo_channel_unbind(struct channel_gk20a *ch_gk20a)
3839{ 3841{
3840 struct gk20a *g = ch_gk20a->g; 3842 struct gk20a *g = ch_gk20a->g;
3841 3843
3842 gk20a_dbg_fn(""); 3844 nvgpu_log_fn(g, " ");
3843 3845
3844 if (nvgpu_atomic_cmpxchg(&ch_gk20a->bound, true, false)) { 3846 if (nvgpu_atomic_cmpxchg(&ch_gk20a->bound, true, false)) {
3845 gk20a_writel(g, ccsr_channel_inst_r(ch_gk20a->chid), 3847 gk20a_writel(g, ccsr_channel_inst_r(ch_gk20a->chid),
@@ -3854,12 +3856,12 @@ static int gk20a_fifo_commit_userd(struct channel_gk20a *c)
3854 u32 addr_hi; 3856 u32 addr_hi;
3855 struct gk20a *g = c->g; 3857 struct gk20a *g = c->g;
3856 3858
3857 gk20a_dbg_fn(""); 3859 nvgpu_log_fn(g, " ");
3858 3860
3859 addr_lo = u64_lo32(c->userd_iova >> ram_userd_base_shift_v()); 3861 addr_lo = u64_lo32(c->userd_iova >> ram_userd_base_shift_v());
3860 addr_hi = u64_hi32(c->userd_iova); 3862 addr_hi = u64_hi32(c->userd_iova);
3861 3863
3862 gk20a_dbg_info("channel %d : set ramfc userd 0x%16llx", 3864 nvgpu_log_info(g, "channel %d : set ramfc userd 0x%16llx",
3863 c->chid, (u64)c->userd_iova); 3865 c->chid, (u64)c->userd_iova);
3864 3866
3865 nvgpu_mem_wr32(g, &c->inst_block, 3867 nvgpu_mem_wr32(g, &c->inst_block,
@@ -3885,7 +3887,7 @@ int gk20a_fifo_setup_ramfc(struct channel_gk20a *c,
3885 struct gk20a *g = c->g; 3887 struct gk20a *g = c->g;
3886 struct nvgpu_mem *mem = &c->inst_block; 3888 struct nvgpu_mem *mem = &c->inst_block;
3887 3889
3888 gk20a_dbg_fn(""); 3890 nvgpu_log_fn(g, " ");
3889 3891
3890 nvgpu_memset(g, mem, 0, 0, ram_fc_size_val_v()); 3892 nvgpu_memset(g, mem, 0, 0, ram_fc_size_val_v());
3891 3893
@@ -3946,7 +3948,7 @@ void gk20a_fifo_setup_ramfc_for_privileged_channel(struct channel_gk20a *c)
3946 struct gk20a *g = c->g; 3948 struct gk20a *g = c->g;
3947 struct nvgpu_mem *mem = &c->inst_block; 3949 struct nvgpu_mem *mem = &c->inst_block;
3948 3950
3949 gk20a_dbg_info("channel %d : set ramfc privileged_channel", c->chid); 3951 nvgpu_log_info(g, "channel %d : set ramfc privileged_channel", c->chid);
3950 3952
3951 /* Enable HCE priv mode for phys mode transfer */ 3953 /* Enable HCE priv mode for phys mode transfer */
3952 nvgpu_mem_wr32(g, mem, ram_fc_hce_ctrl_w(), 3954 nvgpu_mem_wr32(g, mem, ram_fc_hce_ctrl_w(),
@@ -3959,7 +3961,7 @@ int gk20a_fifo_setup_userd(struct channel_gk20a *c)
3959 struct nvgpu_mem *mem; 3961 struct nvgpu_mem *mem;
3960 u32 offset; 3962 u32 offset;
3961 3963
3962 gk20a_dbg_fn(""); 3964 nvgpu_log_fn(g, " ");
3963 3965
3964 if (nvgpu_mem_is_valid(&c->usermode_userd)) { 3966 if (nvgpu_mem_is_valid(&c->usermode_userd)) {
3965 mem = &c->usermode_userd; 3967 mem = &c->usermode_userd;
@@ -3987,16 +3989,16 @@ int gk20a_fifo_alloc_inst(struct gk20a *g, struct channel_gk20a *ch)
3987{ 3989{
3988 int err; 3990 int err;
3989 3991
3990 gk20a_dbg_fn(""); 3992 nvgpu_log_fn(g, " ");
3991 3993
3992 err = g->ops.mm.alloc_inst_block(g, &ch->inst_block); 3994 err = g->ops.mm.alloc_inst_block(g, &ch->inst_block);
3993 if (err) 3995 if (err)
3994 return err; 3996 return err;
3995 3997
3996 gk20a_dbg_info("channel %d inst block physical addr: 0x%16llx", 3998 nvgpu_log_info(g, "channel %d inst block physical addr: 0x%16llx",
3997 ch->chid, nvgpu_inst_block_addr(g, &ch->inst_block)); 3999 ch->chid, nvgpu_inst_block_addr(g, &ch->inst_block));
3998 4000
3999 gk20a_dbg_fn("done"); 4001 nvgpu_log_fn(g, "done");
4000 return 0; 4002 return 0;
4001} 4003}
4002 4004
@@ -4086,7 +4088,7 @@ void gk20a_fifo_add_syncpt_wait_cmd(struct gk20a *g,
4086 struct priv_cmd_entry *cmd, u32 off, 4088 struct priv_cmd_entry *cmd, u32 off,
4087 u32 id, u32 thresh, u64 gpu_va) 4089 u32 id, u32 thresh, u64 gpu_va)
4088{ 4090{
4089 gk20a_dbg_fn(""); 4091 nvgpu_log_fn(g, " ");
4090 4092
4091 off = cmd->off + off; 4093 off = cmd->off + off;
4092 /* syncpoint_a */ 4094 /* syncpoint_a */
@@ -4115,7 +4117,7 @@ void gk20a_fifo_add_syncpt_incr_cmd(struct gk20a *g,
4115{ 4117{
4116 u32 off = cmd->off; 4118 u32 off = cmd->off;
4117 4119
4118 gk20a_dbg_fn(""); 4120 nvgpu_log_fn(g, " ");
4119 if (wfi_cmd) { 4121 if (wfi_cmd) {
4120 /* wfi */ 4122 /* wfi */
4121 nvgpu_mem_wr32(g, cmd->mem, off++, 0x2001001E); 4123 nvgpu_mem_wr32(g, cmd->mem, off++, 0x2001001E);