summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common')
-rw-r--r--drivers/gpu/nvgpu/common/as.c19
-rw-r--r--drivers/gpu/nvgpu/common/linux/cde.c54
-rw-r--r--drivers/gpu/nvgpu/common/linux/cde_gp10b.c8
-rw-r--r--drivers/gpu/nvgpu/common/linux/channel.c10
-rw-r--r--drivers/gpu/nvgpu/common/linux/ctxsw_trace.c54
-rw-r--r--drivers/gpu/nvgpu/common/linux/debug.c4
-rw-r--r--drivers/gpu/nvgpu/common/linux/debug_fifo.c7
-rw-r--r--drivers/gpu/nvgpu/common/linux/driver_common.c2
-rw-r--r--drivers/gpu/nvgpu/common/linux/intr.c4
-rw-r--r--drivers/gpu/nvgpu/common/linux/io.c22
-rw-r--r--drivers/gpu/nvgpu/common/linux/io_usermode.c2
-rw-r--r--drivers/gpu/nvgpu/common/linux/ioctl.c5
-rw-r--r--drivers/gpu/nvgpu/common/linux/ioctl_as.c33
-rw-r--r--drivers/gpu/nvgpu/common/linux/ioctl_channel.c22
-rw-r--r--drivers/gpu/nvgpu/common/linux/ioctl_clk_arb.c7
-rw-r--r--drivers/gpu/nvgpu/common/linux/ioctl_ctrl.c42
-rw-r--r--drivers/gpu/nvgpu/common/linux/ioctl_dbg.c100
-rw-r--r--drivers/gpu/nvgpu/common/linux/ioctl_tsg.c21
-rw-r--r--drivers/gpu/nvgpu/common/linux/log.c2
-rw-r--r--drivers/gpu/nvgpu/common/linux/module.c17
-rw-r--r--drivers/gpu/nvgpu/common/linux/nvgpu_mem.c10
-rw-r--r--drivers/gpu/nvgpu/common/linux/platform_gk20a_tegra.c10
-rw-r--r--drivers/gpu/nvgpu/common/linux/platform_gp10b_tegra.c11
-rw-r--r--drivers/gpu/nvgpu/common/linux/sched.c57
-rw-r--r--drivers/gpu/nvgpu/common/linux/vgpu/clk_vgpu.c8
-rw-r--r--drivers/gpu/nvgpu/common/linux/vgpu/css_vgpu.c11
-rw-r--r--drivers/gpu/nvgpu/common/linux/vgpu/fecs_trace_vgpu.c2
-rw-r--r--drivers/gpu/nvgpu/common/linux/vgpu/vgpu_linux.c17
-rw-r--r--drivers/gpu/nvgpu/common/linux/vm.c3
-rw-r--r--drivers/gpu/nvgpu/common/mm/vidmem.c2
-rw-r--r--drivers/gpu/nvgpu/common/vbios/bios.c54
31 files changed, 340 insertions, 280 deletions
diff --git a/drivers/gpu/nvgpu/common/as.c b/drivers/gpu/nvgpu/common/as.c
index 5b76cf0e..77f088b7 100644
--- a/drivers/gpu/nvgpu/common/as.c
+++ b/drivers/gpu/nvgpu/common/as.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * GK20A Address Spaces 2 * GK20A Address Spaces
3 * 3 *
4 * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"), 7 * copy of this software and associated documentation files (the "Software"),
@@ -34,13 +34,17 @@
34/* dumb allocator... */ 34/* dumb allocator... */
35static int generate_as_share_id(struct gk20a_as *as) 35static int generate_as_share_id(struct gk20a_as *as)
36{ 36{
37 gk20a_dbg_fn(""); 37 struct gk20a *g = gk20a_from_as(as);
38
39 nvgpu_log_fn(g, " ");
38 return ++as->last_share_id; 40 return ++as->last_share_id;
39} 41}
40/* still dumb */ 42/* still dumb */
41static void release_as_share_id(struct gk20a_as *as, int id) 43static void release_as_share_id(struct gk20a_as *as, int id)
42{ 44{
43 gk20a_dbg_fn(""); 45 struct gk20a *g = gk20a_from_as(as);
46
47 nvgpu_log_fn(g, " ");
44 return; 48 return;
45} 49}
46 50
@@ -56,7 +60,7 @@ static int gk20a_vm_alloc_share(struct gk20a_as_share *as_share,
56 const bool userspace_managed = 60 const bool userspace_managed =
57 (flags & NVGPU_GPU_IOCTL_ALLOC_AS_FLAGS_USERSPACE_MANAGED) != 0; 61 (flags & NVGPU_GPU_IOCTL_ALLOC_AS_FLAGS_USERSPACE_MANAGED) != 0;
58 62
59 gk20a_dbg_fn(""); 63 nvgpu_log_fn(g, " ");
60 64
61 if (big_page_size == 0) { 65 if (big_page_size == 0) {
62 big_page_size = g->ops.mm.get_default_big_page_size(); 66 big_page_size = g->ops.mm.get_default_big_page_size();
@@ -92,7 +96,7 @@ int gk20a_as_alloc_share(struct gk20a *g,
92 struct gk20a_as_share *as_share; 96 struct gk20a_as_share *as_share;
93 int err = 0; 97 int err = 0;
94 98
95 gk20a_dbg_fn(""); 99 nvgpu_log_fn(g, " ");
96 g = gk20a_get(g); 100 g = gk20a_get(g);
97 if (!g) 101 if (!g)
98 return -ENODEV; 102 return -ENODEV;
@@ -126,8 +130,9 @@ failed:
126int gk20a_vm_release_share(struct gk20a_as_share *as_share) 130int gk20a_vm_release_share(struct gk20a_as_share *as_share)
127{ 131{
128 struct vm_gk20a *vm = as_share->vm; 132 struct vm_gk20a *vm = as_share->vm;
133 struct gk20a *g = gk20a_from_vm(vm);
129 134
130 gk20a_dbg_fn(""); 135 nvgpu_log_fn(g, " ");
131 136
132 vm->as_share = NULL; 137 vm->as_share = NULL;
133 as_share->vm = NULL; 138 as_share->vm = NULL;
@@ -146,7 +151,7 @@ int gk20a_as_release_share(struct gk20a_as_share *as_share)
146 struct gk20a *g = as_share->vm->mm->g; 151 struct gk20a *g = as_share->vm->mm->g;
147 int err; 152 int err;
148 153
149 gk20a_dbg_fn(""); 154 nvgpu_log_fn(g, " ");
150 155
151 err = gk20a_busy(g); 156 err = gk20a_busy(g);
152 157
diff --git a/drivers/gpu/nvgpu/common/linux/cde.c b/drivers/gpu/nvgpu/common/linux/cde.c
index 7c92246c..511d564f 100644
--- a/drivers/gpu/nvgpu/common/linux/cde.c
+++ b/drivers/gpu/nvgpu/common/linux/cde.c
@@ -464,7 +464,7 @@ static int gk20a_cde_patch_params(struct gk20a_cde_ctx *cde_ctx)
464 new_data = cde_ctx->user_param_values[user_id]; 464 new_data = cde_ctx->user_param_values[user_id];
465 } 465 }
466 466
467 gk20a_dbg(gpu_dbg_cde, "cde: patch: idx_in_file=%d param_id=%d target_buf=%u target_byte_offset=%lld data_value=0x%llx data_offset/data_diff=%lld data_type=%d data_shift=%d data_mask=0x%llx", 467 nvgpu_log(g, gpu_dbg_cde, "cde: patch: idx_in_file=%d param_id=%d target_buf=%u target_byte_offset=%lld data_value=0x%llx data_offset/data_diff=%lld data_type=%d data_shift=%d data_mask=0x%llx",
468 i, param->id, param->target_buf, 468 i, param->id, param->target_buf,
469 param->target_byte_offset, new_data, 469 param->target_byte_offset, new_data,
470 param->data_offset, param->type, param->shift, 470 param->data_offset, param->type, param->shift,
@@ -790,8 +790,9 @@ __acquires(&cde_app->mutex)
790__releases(&cde_app->mutex) 790__releases(&cde_app->mutex)
791{ 791{
792 struct gk20a_cde_app *cde_app = &cde_ctx->l->cde_app; 792 struct gk20a_cde_app *cde_app = &cde_ctx->l->cde_app;
793 struct gk20a *g = &cde_ctx->l->g;
793 794
794 gk20a_dbg(gpu_dbg_cde_ctx, "releasing use on %p", cde_ctx); 795 nvgpu_log(g, gpu_dbg_cde_ctx, "releasing use on %p", cde_ctx);
795 trace_gk20a_cde_release(cde_ctx); 796 trace_gk20a_cde_release(cde_ctx);
796 797
797 nvgpu_mutex_acquire(&cde_app->mutex); 798 nvgpu_mutex_acquire(&cde_app->mutex);
@@ -801,7 +802,7 @@ __releases(&cde_app->mutex)
801 nvgpu_list_move(&cde_ctx->list, &cde_app->free_contexts); 802 nvgpu_list_move(&cde_ctx->list, &cde_app->free_contexts);
802 cde_app->ctx_usecount--; 803 cde_app->ctx_usecount--;
803 } else { 804 } else {
804 gk20a_dbg_info("double release cde context %p", cde_ctx); 805 nvgpu_log_info(g, "double release cde context %p", cde_ctx);
805 } 806 }
806 807
807 nvgpu_mutex_release(&cde_app->mutex); 808 nvgpu_mutex_release(&cde_app->mutex);
@@ -823,7 +824,7 @@ __releases(&cde_app->mutex)
823 if (cde_ctx->in_use || !cde_app->initialised) 824 if (cde_ctx->in_use || !cde_app->initialised)
824 return; 825 return;
825 826
826 gk20a_dbg(gpu_dbg_fn | gpu_dbg_cde_ctx, 827 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_cde_ctx,
827 "cde: attempting to delete temporary %p", cde_ctx); 828 "cde: attempting to delete temporary %p", cde_ctx);
828 829
829 err = gk20a_busy(g); 830 err = gk20a_busy(g);
@@ -837,7 +838,7 @@ __releases(&cde_app->mutex)
837 838
838 nvgpu_mutex_acquire(&cde_app->mutex); 839 nvgpu_mutex_acquire(&cde_app->mutex);
839 if (cde_ctx->in_use || !cde_app->initialised) { 840 if (cde_ctx->in_use || !cde_app->initialised) {
840 gk20a_dbg(gpu_dbg_cde_ctx, 841 nvgpu_log(g, gpu_dbg_cde_ctx,
841 "cde: context use raced, not deleting %p", 842 "cde: context use raced, not deleting %p",
842 cde_ctx); 843 cde_ctx);
843 goto out; 844 goto out;
@@ -847,7 +848,7 @@ __releases(&cde_app->mutex)
847 "double pending %p", cde_ctx); 848 "double pending %p", cde_ctx);
848 849
849 gk20a_cde_remove_ctx(cde_ctx); 850 gk20a_cde_remove_ctx(cde_ctx);
850 gk20a_dbg(gpu_dbg_fn | gpu_dbg_cde_ctx, 851 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_cde_ctx,
851 "cde: destroyed %p count=%d use=%d max=%d", 852 "cde: destroyed %p count=%d use=%d max=%d",
852 cde_ctx, cde_app->ctx_count, cde_app->ctx_usecount, 853 cde_ctx, cde_app->ctx_count, cde_app->ctx_usecount,
853 cde_app->ctx_count_top); 854 cde_app->ctx_count_top);
@@ -874,7 +875,7 @@ __must_hold(&cde_app->mutex)
874 if (!nvgpu_list_empty(&cde_app->free_contexts)) { 875 if (!nvgpu_list_empty(&cde_app->free_contexts)) {
875 cde_ctx = nvgpu_list_first_entry(&cde_app->free_contexts, 876 cde_ctx = nvgpu_list_first_entry(&cde_app->free_contexts,
876 gk20a_cde_ctx, list); 877 gk20a_cde_ctx, list);
877 gk20a_dbg(gpu_dbg_fn | gpu_dbg_cde_ctx, 878 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_cde_ctx,
878 "cde: got free %p count=%d use=%d max=%d", 879 "cde: got free %p count=%d use=%d max=%d",
879 cde_ctx, cde_app->ctx_count, 880 cde_ctx, cde_app->ctx_count,
880 cde_app->ctx_usecount, 881 cde_app->ctx_usecount,
@@ -893,7 +894,7 @@ __must_hold(&cde_app->mutex)
893 894
894 /* no free contexts, get a temporary one */ 895 /* no free contexts, get a temporary one */
895 896
896 gk20a_dbg(gpu_dbg_fn | gpu_dbg_cde_ctx, 897 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_cde_ctx,
897 "cde: no free contexts, count=%d", 898 "cde: no free contexts, count=%d",
898 cde_app->ctx_count); 899 cde_app->ctx_count);
899 900
@@ -967,7 +968,7 @@ static struct gk20a_cde_ctx *gk20a_cde_allocate_context(struct nvgpu_os_linux *l
967 INIT_DELAYED_WORK(&cde_ctx->ctx_deleter_work, 968 INIT_DELAYED_WORK(&cde_ctx->ctx_deleter_work,
968 gk20a_cde_ctx_deleter_fn); 969 gk20a_cde_ctx_deleter_fn);
969 970
970 gk20a_dbg(gpu_dbg_fn | gpu_dbg_cde_ctx, "cde: allocated %p", cde_ctx); 971 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_cde_ctx, "cde: allocated %p", cde_ctx);
971 trace_gk20a_cde_allocate_context(cde_ctx); 972 trace_gk20a_cde_allocate_context(cde_ctx);
972 return cde_ctx; 973 return cde_ctx;
973} 974}
@@ -1005,7 +1006,7 @@ __releases(&l->cde_app->mutex)
1005 u32 submit_op; 1006 u32 submit_op;
1006 struct dma_buf_attachment *attachment; 1007 struct dma_buf_attachment *attachment;
1007 1008
1008 gk20a_dbg(gpu_dbg_cde, "compbits_byte_offset=%llu scatterbuffer_byte_offset=%llu", 1009 nvgpu_log(g, gpu_dbg_cde, "compbits_byte_offset=%llu scatterbuffer_byte_offset=%llu",
1009 compbits_byte_offset, scatterbuffer_byte_offset); 1010 compbits_byte_offset, scatterbuffer_byte_offset);
1010 1011
1011 /* scatter buffer must be after compbits buffer */ 1012 /* scatter buffer must be after compbits buffer */
@@ -1055,11 +1056,11 @@ __releases(&l->cde_app->mutex)
1055 compbits_byte_offset; 1056 compbits_byte_offset;
1056 } 1057 }
1057 1058
1058 gk20a_dbg(gpu_dbg_cde, "map_offset=%llu map_size=%llu", 1059 nvgpu_log(g, gpu_dbg_cde, "map_offset=%llu map_size=%llu",
1059 map_offset, map_size); 1060 map_offset, map_size);
1060 gk20a_dbg(gpu_dbg_cde, "mapped_compbits_offset=%llu compbits_size=%llu", 1061 nvgpu_log(g, gpu_dbg_cde, "mapped_compbits_offset=%llu compbits_size=%llu",
1061 mapped_compbits_offset, compbits_size); 1062 mapped_compbits_offset, compbits_size);
1062 gk20a_dbg(gpu_dbg_cde, "mapped_scatterbuffer_offset=%llu scatterbuffer_size=%llu", 1063 nvgpu_log(g, gpu_dbg_cde, "mapped_scatterbuffer_offset=%llu scatterbuffer_size=%llu",
1063 mapped_scatterbuffer_offset, scatterbuffer_size); 1064 mapped_scatterbuffer_offset, scatterbuffer_size);
1064 1065
1065 1066
@@ -1096,7 +1097,7 @@ __releases(&l->cde_app->mutex)
1096 1097
1097 scatter_buffer = surface + scatterbuffer_byte_offset; 1098 scatter_buffer = surface + scatterbuffer_byte_offset;
1098 1099
1099 gk20a_dbg(gpu_dbg_cde, "surface=0x%p scatterBuffer=0x%p", 1100 nvgpu_log(g, gpu_dbg_cde, "surface=0x%p scatterBuffer=0x%p",
1100 surface, scatter_buffer); 1101 surface, scatter_buffer);
1101 sgt = gk20a_mm_pin(dev_from_gk20a(g), compbits_scatter_buf, 1102 sgt = gk20a_mm_pin(dev_from_gk20a(g), compbits_scatter_buf,
1102 &attachment); 1103 &attachment);
@@ -1163,11 +1164,11 @@ __releases(&l->cde_app->mutex)
1163 goto exit_unmap_surface; 1164 goto exit_unmap_surface;
1164 } 1165 }
1165 1166
1166 gk20a_dbg(gpu_dbg_cde, "cde: buffer=cbc, size=%zu, gpuva=%llx\n", 1167 nvgpu_log(g, gpu_dbg_cde, "cde: buffer=cbc, size=%zu, gpuva=%llx\n",
1167 g->gr.compbit_store.mem.size, cde_ctx->backing_store_vaddr); 1168 g->gr.compbit_store.mem.size, cde_ctx->backing_store_vaddr);
1168 gk20a_dbg(gpu_dbg_cde, "cde: buffer=compbits, size=%llu, gpuva=%llx\n", 1169 nvgpu_log(g, gpu_dbg_cde, "cde: buffer=compbits, size=%llu, gpuva=%llx\n",
1169 cde_ctx->compbit_size, cde_ctx->compbit_vaddr); 1170 cde_ctx->compbit_size, cde_ctx->compbit_vaddr);
1170 gk20a_dbg(gpu_dbg_cde, "cde: buffer=scatterbuffer, size=%llu, gpuva=%llx\n", 1171 nvgpu_log(g, gpu_dbg_cde, "cde: buffer=scatterbuffer, size=%llu, gpuva=%llx\n",
1171 cde_ctx->scatterbuffer_size, cde_ctx->scatterbuffer_vaddr); 1172 cde_ctx->scatterbuffer_size, cde_ctx->scatterbuffer_vaddr);
1172 1173
1173 /* take always the postfence as it is needed for protecting the 1174 /* take always the postfence as it is needed for protecting the
@@ -1234,9 +1235,9 @@ __releases(&cde_app->mutex)
1234 return; 1235 return;
1235 1236
1236 trace_gk20a_cde_finished_ctx_cb(cde_ctx); 1237 trace_gk20a_cde_finished_ctx_cb(cde_ctx);
1237 gk20a_dbg(gpu_dbg_fn | gpu_dbg_cde_ctx, "cde: finished %p", cde_ctx); 1238 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_cde_ctx, "cde: finished %p", cde_ctx);
1238 if (!cde_ctx->in_use) 1239 if (!cde_ctx->in_use)
1239 gk20a_dbg_info("double finish cde context %p on channel %p", 1240 nvgpu_log_info(g, "double finish cde context %p on channel %p",
1240 cde_ctx, ch); 1241 cde_ctx, ch);
1241 1242
1242 if (ch->has_timedout) { 1243 if (ch->has_timedout) {
@@ -1406,12 +1407,13 @@ __acquires(&cde_app->mutex)
1406__releases(&cde_app->mutex) 1407__releases(&cde_app->mutex)
1407{ 1408{
1408 struct gk20a_cde_app *cde_app = &l->cde_app; 1409 struct gk20a_cde_app *cde_app = &l->cde_app;
1410 struct gk20a *g = &l->g;
1409 int err; 1411 int err;
1410 1412
1411 if (cde_app->initialised) 1413 if (cde_app->initialised)
1412 return 0; 1414 return 0;
1413 1415
1414 gk20a_dbg(gpu_dbg_fn | gpu_dbg_cde_ctx, "cde: init"); 1416 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_cde_ctx, "cde: init");
1415 1417
1416 err = nvgpu_mutex_init(&cde_app->mutex); 1418 err = nvgpu_mutex_init(&cde_app->mutex);
1417 if (err) 1419 if (err)
@@ -1430,7 +1432,7 @@ __releases(&cde_app->mutex)
1430 cde_app->initialised = true; 1432 cde_app->initialised = true;
1431 1433
1432 nvgpu_mutex_release(&cde_app->mutex); 1434 nvgpu_mutex_release(&cde_app->mutex);
1433 gk20a_dbg(gpu_dbg_cde_ctx, "cde: init finished: %d", err); 1435 nvgpu_log(g, gpu_dbg_cde_ctx, "cde: init finished: %d", err);
1434 1436
1435 if (err) 1437 if (err)
1436 nvgpu_mutex_destroy(&cde_app->mutex); 1438 nvgpu_mutex_destroy(&cde_app->mutex);
@@ -1528,14 +1530,14 @@ static int gk20a_buffer_convert_gpu_to_cde_v1(
1528 nvgpu_warn(g, "cde: surface is exceptionally large (xtiles=%d, ytiles=%d)", 1530 nvgpu_warn(g, "cde: surface is exceptionally large (xtiles=%d, ytiles=%d)",
1529 xtiles, ytiles); 1531 xtiles, ytiles);
1530 1532
1531 gk20a_dbg(gpu_dbg_cde, "w=%d, h=%d, bh_log2=%d, compbits_hoffset=0x%llx, compbits_voffset=0x%llx, scatterbuffer_offset=0x%llx", 1533 nvgpu_log(g, gpu_dbg_cde, "w=%d, h=%d, bh_log2=%d, compbits_hoffset=0x%llx, compbits_voffset=0x%llx, scatterbuffer_offset=0x%llx",
1532 width, height, block_height_log2, 1534 width, height, block_height_log2,
1533 compbits_hoffset, compbits_voffset, scatterbuffer_offset); 1535 compbits_hoffset, compbits_voffset, scatterbuffer_offset);
1534 gk20a_dbg(gpu_dbg_cde, "resolution (%d, %d) tiles (%d, %d)", 1536 nvgpu_log(g, gpu_dbg_cde, "resolution (%d, %d) tiles (%d, %d)",
1535 width, height, xtiles, ytiles); 1537 width, height, xtiles, ytiles);
1536 gk20a_dbg(gpu_dbg_cde, "group (%d, %d) gridH (%d, %d) gridV (%d, %d)", 1538 nvgpu_log(g, gpu_dbg_cde, "group (%d, %d) gridH (%d, %d) gridV (%d, %d)",
1537 wgx, wgy, gridw_h, gridh_h, gridw_v, gridh_v); 1539 wgx, wgy, gridw_h, gridh_h, gridw_v, gridh_v);
1538 gk20a_dbg(gpu_dbg_cde, "hprog=%d, offset=0x%x, regs=%d, vprog=%d, offset=0x%x, regs=%d", 1540 nvgpu_log(g, gpu_dbg_cde, "hprog=%d, offset=0x%x, regs=%d, vprog=%d, offset=0x%x, regs=%d",
1539 hprog, 1541 hprog,
1540 l->cde_app.arrays[ARRAY_PROGRAM_OFFSET][hprog], 1542 l->cde_app.arrays[ARRAY_PROGRAM_OFFSET][hprog],
1541 l->cde_app.arrays[ARRAY_REGISTER_COUNT][hprog], 1543 l->cde_app.arrays[ARRAY_REGISTER_COUNT][hprog],
@@ -1634,7 +1636,7 @@ static int gk20a_buffer_convert_gpu_to_cde(
1634 if (!l->cde_app.initialised) 1636 if (!l->cde_app.initialised)
1635 return -ENOSYS; 1637 return -ENOSYS;
1636 1638
1637 gk20a_dbg(gpu_dbg_cde, "firmware version = %d\n", 1639 nvgpu_log(g, gpu_dbg_cde, "firmware version = %d\n",
1638 l->cde_app.firmware_version); 1640 l->cde_app.firmware_version);
1639 1641
1640 if (l->cde_app.firmware_version == 1) { 1642 if (l->cde_app.firmware_version == 1) {
diff --git a/drivers/gpu/nvgpu/common/linux/cde_gp10b.c b/drivers/gpu/nvgpu/common/linux/cde_gp10b.c
index 483a3ee7..5c0e79a7 100644
--- a/drivers/gpu/nvgpu/common/linux/cde_gp10b.c
+++ b/drivers/gpu/nvgpu/common/linux/cde_gp10b.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * GP10B CDE 2 * GP10B CDE
3 * 3 *
4 * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"), 7 * copy of this software and associated documentation files (the "Software"),
@@ -117,7 +117,7 @@ int gp10b_populate_scatter_buffer(struct gk20a *g,
117 u64 surf_pa = sg_phys(sg); 117 u64 surf_pa = sg_phys(sg);
118 unsigned int n = (int)(sg->length >> page_size_log2); 118 unsigned int n = (int)(sg->length >> page_size_log2);
119 119
120 gk20a_dbg(gpu_dbg_cde, "surfPA=0x%llx + %d pages", surf_pa, n); 120 nvgpu_log(g, gpu_dbg_cde, "surfPA=0x%llx + %d pages", surf_pa, n);
121 121
122 for (j=0; j < n && pages_left > 0; j++, surf_pa += page_size) { 122 for (j=0; j < n && pages_left > 0; j++, surf_pa += page_size) {
123 u32 addr = (((u32)(surf_pa>>7)) & getSliceMaskGP10B) >> page_size_shift; 123 u32 addr = (((u32)(surf_pa>>7)) & getSliceMaskGP10B) >> page_size_shift;
@@ -143,9 +143,9 @@ int gp10b_populate_scatter_buffer(struct gk20a *g,
143 scatter_buffer[page >> 3] = d; 143 scatter_buffer[page >> 3] = d;
144 144
145 if (nvgpu_log_mask_enabled(g, gpu_dbg_cde)) { 145 if (nvgpu_log_mask_enabled(g, gpu_dbg_cde)) {
146 gk20a_dbg(gpu_dbg_cde, "scatterBuffer content:"); 146 nvgpu_log(g, gpu_dbg_cde, "scatterBuffer content:");
147 for (i = 0; i < page >> 3; i++) { 147 for (i = 0; i < page >> 3; i++) {
148 gk20a_dbg(gpu_dbg_cde, " %x", scatter_buffer[i]); 148 nvgpu_log(g, gpu_dbg_cde, " %x", scatter_buffer[i]);
149 } 149 }
150 } 150 }
151 151
diff --git a/drivers/gpu/nvgpu/common/linux/channel.c b/drivers/gpu/nvgpu/common/linux/channel.c
index 8f2adc3a..d767374b 100644
--- a/drivers/gpu/nvgpu/common/linux/channel.c
+++ b/drivers/gpu/nvgpu/common/linux/channel.c
@@ -834,7 +834,7 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
834 /* update debug settings */ 834 /* update debug settings */
835 nvgpu_ltc_sync_enabled(g); 835 nvgpu_ltc_sync_enabled(g);
836 836
837 gk20a_dbg_info("channel %d", c->chid); 837 nvgpu_log_info(g, "channel %d", c->chid);
838 838
839 /* 839 /*
840 * Job tracking is necessary for any of the following conditions: 840 * Job tracking is necessary for any of the following conditions:
@@ -943,7 +943,7 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
943 fence ? fence->id : 0, 943 fence ? fence->id : 0,
944 fence ? fence->value : 0); 944 fence ? fence->value : 0);
945 945
946 gk20a_dbg_info("pre-submit put %d, get %d, size %d", 946 nvgpu_log_info(g, "pre-submit put %d, get %d, size %d",
947 c->gpfifo.put, c->gpfifo.get, c->gpfifo.entry_num); 947 c->gpfifo.put, c->gpfifo.get, c->gpfifo.entry_num);
948 948
949 /* 949 /*
@@ -1023,18 +1023,18 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
1023 post_fence ? post_fence->syncpt_id : 0, 1023 post_fence ? post_fence->syncpt_id : 0,
1024 post_fence ? post_fence->syncpt_value : 0); 1024 post_fence ? post_fence->syncpt_value : 0);
1025 1025
1026 gk20a_dbg_info("post-submit put %d, get %d, size %d", 1026 nvgpu_log_info(g, "post-submit put %d, get %d, size %d",
1027 c->gpfifo.put, c->gpfifo.get, c->gpfifo.entry_num); 1027 c->gpfifo.put, c->gpfifo.get, c->gpfifo.entry_num);
1028 1028
1029 if (profile) 1029 if (profile)
1030 profile->timestamp[PROFILE_END] = sched_clock(); 1030 profile->timestamp[PROFILE_END] = sched_clock();
1031 gk20a_dbg_fn("done"); 1031 nvgpu_log_fn(g, "done");
1032 return err; 1032 return err;
1033 1033
1034clean_up_job: 1034clean_up_job:
1035 channel_gk20a_free_job(c, job); 1035 channel_gk20a_free_job(c, job);
1036clean_up: 1036clean_up:
1037 gk20a_dbg_fn("fail"); 1037 nvgpu_log_fn(g, "fail");
1038 gk20a_fence_put(post_fence); 1038 gk20a_fence_put(post_fence);
1039 if (c->deterministic) 1039 if (c->deterministic)
1040 nvgpu_rwsem_up_read(&g->deterministic_busy); 1040 nvgpu_rwsem_up_read(&g->deterministic_busy);
diff --git a/drivers/gpu/nvgpu/common/linux/ctxsw_trace.c b/drivers/gpu/nvgpu/common/linux/ctxsw_trace.c
index 8268bf60..2f0c3e89 100644
--- a/drivers/gpu/nvgpu/common/linux/ctxsw_trace.c
+++ b/drivers/gpu/nvgpu/common/linux/ctxsw_trace.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify it 4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License, 5 * under the terms and conditions of the GNU General Public License,
@@ -79,13 +79,14 @@ ssize_t gk20a_ctxsw_dev_read(struct file *filp, char __user *buf, size_t size,
79 loff_t *off) 79 loff_t *off)
80{ 80{
81 struct gk20a_ctxsw_dev *dev = filp->private_data; 81 struct gk20a_ctxsw_dev *dev = filp->private_data;
82 struct gk20a *g = dev->g;
82 struct nvgpu_ctxsw_ring_header *hdr = dev->hdr; 83 struct nvgpu_ctxsw_ring_header *hdr = dev->hdr;
83 struct nvgpu_ctxsw_trace_entry __user *entry = 84 struct nvgpu_ctxsw_trace_entry __user *entry =
84 (struct nvgpu_ctxsw_trace_entry *) buf; 85 (struct nvgpu_ctxsw_trace_entry *) buf;
85 size_t copied = 0; 86 size_t copied = 0;
86 int err; 87 int err;
87 88
88 gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, 89 nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw,
89 "filp=%p buf=%p size=%zu", filp, buf, size); 90 "filp=%p buf=%p size=%zu", filp, buf, size);
90 91
91 nvgpu_mutex_acquire(&dev->write_lock); 92 nvgpu_mutex_acquire(&dev->write_lock);
@@ -119,7 +120,7 @@ ssize_t gk20a_ctxsw_dev_read(struct file *filp, char __user *buf, size_t size,
119 size -= sizeof(*entry); 120 size -= sizeof(*entry);
120 } 121 }
121 122
122 gk20a_dbg(gpu_dbg_ctxsw, "copied=%zu read_idx=%d", copied, 123 nvgpu_log(g, gpu_dbg_ctxsw, "copied=%zu read_idx=%d", copied,
123 hdr->read_idx); 124 hdr->read_idx);
124 125
125 *off = hdr->read_idx; 126 *off = hdr->read_idx;
@@ -130,7 +131,9 @@ ssize_t gk20a_ctxsw_dev_read(struct file *filp, char __user *buf, size_t size,
130 131
131static int gk20a_ctxsw_dev_ioctl_trace_enable(struct gk20a_ctxsw_dev *dev) 132static int gk20a_ctxsw_dev_ioctl_trace_enable(struct gk20a_ctxsw_dev *dev)
132{ 133{
133 gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, "trace enabled"); 134 struct gk20a *g = dev->g;
135
136 nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, "trace enabled");
134 nvgpu_mutex_acquire(&dev->write_lock); 137 nvgpu_mutex_acquire(&dev->write_lock);
135 dev->write_enabled = true; 138 dev->write_enabled = true;
136 nvgpu_mutex_release(&dev->write_lock); 139 nvgpu_mutex_release(&dev->write_lock);
@@ -140,7 +143,9 @@ static int gk20a_ctxsw_dev_ioctl_trace_enable(struct gk20a_ctxsw_dev *dev)
140 143
141static int gk20a_ctxsw_dev_ioctl_trace_disable(struct gk20a_ctxsw_dev *dev) 144static int gk20a_ctxsw_dev_ioctl_trace_disable(struct gk20a_ctxsw_dev *dev)
142{ 145{
143 gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, "trace disabled"); 146 struct gk20a *g = dev->g;
147
148 nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, "trace disabled");
144 dev->g->ops.fecs_trace.disable(dev->g); 149 dev->g->ops.fecs_trace.disable(dev->g);
145 nvgpu_mutex_acquire(&dev->write_lock); 150 nvgpu_mutex_acquire(&dev->write_lock);
146 dev->write_enabled = false; 151 dev->write_enabled = false;
@@ -168,7 +173,7 @@ static int gk20a_ctxsw_dev_alloc_buffer(struct gk20a_ctxsw_dev *dev,
168 dev->size = size; 173 dev->size = size;
169 dev->num_ents = dev->hdr->num_ents; 174 dev->num_ents = dev->hdr->num_ents;
170 175
171 gk20a_dbg(gpu_dbg_ctxsw, "size=%zu hdr=%p ents=%p num_ents=%d", 176 nvgpu_log(g, gpu_dbg_ctxsw, "size=%zu hdr=%p ents=%p num_ents=%d",
172 dev->size, dev->hdr, dev->ents, dev->hdr->num_ents); 177 dev->size, dev->hdr, dev->ents, dev->hdr->num_ents);
173 return 0; 178 return 0;
174} 179}
@@ -208,10 +213,11 @@ int gk20a_ctxsw_dev_ring_free(struct gk20a *g)
208static int gk20a_ctxsw_dev_ioctl_ring_setup(struct gk20a_ctxsw_dev *dev, 213static int gk20a_ctxsw_dev_ioctl_ring_setup(struct gk20a_ctxsw_dev *dev,
209 struct nvgpu_ctxsw_ring_setup_args *args) 214 struct nvgpu_ctxsw_ring_setup_args *args)
210{ 215{
216 struct gk20a *g = dev->g;
211 size_t size = args->size; 217 size_t size = args->size;
212 int ret; 218 int ret;
213 219
214 gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, "size=%zu", size); 220 nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, "size=%zu", size);
215 221
216 if (size > GK20A_CTXSW_TRACE_MAX_VM_RING_SIZE) 222 if (size > GK20A_CTXSW_TRACE_MAX_VM_RING_SIZE)
217 return -EINVAL; 223 return -EINVAL;
@@ -252,7 +258,7 @@ static int gk20a_ctxsw_dev_ioctl_poll(struct gk20a_ctxsw_dev *dev)
252 struct gk20a *g = dev->g; 258 struct gk20a *g = dev->g;
253 int err; 259 int err;
254 260
255 gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, ""); 261 nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, " ");
256 262
257 err = gk20a_busy(g); 263 err = gk20a_busy(g);
258 if (err) 264 if (err)
@@ -286,7 +292,7 @@ int gk20a_ctxsw_dev_open(struct inode *inode, struct file *filp)
286 if (!g) 292 if (!g)
287 return -ENODEV; 293 return -ENODEV;
288 294
289 gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, "g=%p", g); 295 nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, "g=%p", g);
290 296
291 if (!capable(CAP_SYS_ADMIN)) { 297 if (!capable(CAP_SYS_ADMIN)) {
292 err = -EPERM; 298 err = -EPERM;
@@ -322,13 +328,13 @@ int gk20a_ctxsw_dev_open(struct inode *inode, struct file *filp)
322 328
323 size = sizeof(struct nvgpu_ctxsw_ring_header) + 329 size = sizeof(struct nvgpu_ctxsw_ring_header) +
324 n * sizeof(struct nvgpu_ctxsw_trace_entry); 330 n * sizeof(struct nvgpu_ctxsw_trace_entry);
325 gk20a_dbg(gpu_dbg_ctxsw, "size=%zu entries=%d ent_size=%zu", 331 nvgpu_log(g, gpu_dbg_ctxsw, "size=%zu entries=%d ent_size=%zu",
326 size, n, sizeof(struct nvgpu_ctxsw_trace_entry)); 332 size, n, sizeof(struct nvgpu_ctxsw_trace_entry));
327 333
328 err = gk20a_ctxsw_dev_alloc_buffer(dev, size); 334 err = gk20a_ctxsw_dev_alloc_buffer(dev, size);
329 if (!err) { 335 if (!err) {
330 filp->private_data = dev; 336 filp->private_data = dev;
331 gk20a_dbg(gpu_dbg_ctxsw, "filp=%p dev=%p size=%zu", 337 nvgpu_log(g, gpu_dbg_ctxsw, "filp=%p dev=%p size=%zu",
332 filp, dev, size); 338 filp, dev, size);
333 } 339 }
334 340
@@ -348,7 +354,7 @@ int gk20a_ctxsw_dev_release(struct inode *inode, struct file *filp)
348 struct gk20a_ctxsw_dev *dev = filp->private_data; 354 struct gk20a_ctxsw_dev *dev = filp->private_data;
349 struct gk20a *g = dev->g; 355 struct gk20a *g = dev->g;
350 356
351 gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, "dev: %p", dev); 357 nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, "dev: %p", dev);
352 358
353 g->ops.fecs_trace.disable(g); 359 g->ops.fecs_trace.disable(g);
354 360
@@ -372,7 +378,7 @@ long gk20a_ctxsw_dev_ioctl(struct file *filp, unsigned int cmd,
372 u8 buf[NVGPU_CTXSW_IOCTL_MAX_ARG_SIZE]; 378 u8 buf[NVGPU_CTXSW_IOCTL_MAX_ARG_SIZE];
373 int err = 0; 379 int err = 0;
374 380
375 gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, "nr=%d", _IOC_NR(cmd)); 381 nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, "nr=%d", _IOC_NR(cmd));
376 382
377 if ((_IOC_TYPE(cmd) != NVGPU_CTXSW_IOCTL_MAGIC) || 383 if ((_IOC_TYPE(cmd) != NVGPU_CTXSW_IOCTL_MAGIC) ||
378 (_IOC_NR(cmd) == 0) || 384 (_IOC_NR(cmd) == 0) ||
@@ -423,10 +429,11 @@ long gk20a_ctxsw_dev_ioctl(struct file *filp, unsigned int cmd,
423unsigned int gk20a_ctxsw_dev_poll(struct file *filp, poll_table *wait) 429unsigned int gk20a_ctxsw_dev_poll(struct file *filp, poll_table *wait)
424{ 430{
425 struct gk20a_ctxsw_dev *dev = filp->private_data; 431 struct gk20a_ctxsw_dev *dev = filp->private_data;
432 struct gk20a *g = dev->g;
426 struct nvgpu_ctxsw_ring_header *hdr = dev->hdr; 433 struct nvgpu_ctxsw_ring_header *hdr = dev->hdr;
427 unsigned int mask = 0; 434 unsigned int mask = 0;
428 435
429 gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, ""); 436 nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, " ");
430 437
431 nvgpu_mutex_acquire(&dev->write_lock); 438 nvgpu_mutex_acquire(&dev->write_lock);
432 poll_wait(filp, &dev->readout_wq.wq, wait); 439 poll_wait(filp, &dev->readout_wq.wq, wait);
@@ -440,18 +447,20 @@ unsigned int gk20a_ctxsw_dev_poll(struct file *filp, poll_table *wait)
440static void gk20a_ctxsw_dev_vma_open(struct vm_area_struct *vma) 447static void gk20a_ctxsw_dev_vma_open(struct vm_area_struct *vma)
441{ 448{
442 struct gk20a_ctxsw_dev *dev = vma->vm_private_data; 449 struct gk20a_ctxsw_dev *dev = vma->vm_private_data;
450 struct gk20a *g = dev->g;
443 451
444 nvgpu_atomic_inc(&dev->vma_ref); 452 nvgpu_atomic_inc(&dev->vma_ref);
445 gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, "vma_ref=%d", 453 nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, "vma_ref=%d",
446 nvgpu_atomic_read(&dev->vma_ref)); 454 nvgpu_atomic_read(&dev->vma_ref));
447} 455}
448 456
449static void gk20a_ctxsw_dev_vma_close(struct vm_area_struct *vma) 457static void gk20a_ctxsw_dev_vma_close(struct vm_area_struct *vma)
450{ 458{
451 struct gk20a_ctxsw_dev *dev = vma->vm_private_data; 459 struct gk20a_ctxsw_dev *dev = vma->vm_private_data;
460 struct gk20a *g = dev->g;
452 461
453 nvgpu_atomic_dec(&dev->vma_ref); 462 nvgpu_atomic_dec(&dev->vma_ref);
454 gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, "vma_ref=%d", 463 nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, "vma_ref=%d",
455 nvgpu_atomic_read(&dev->vma_ref)); 464 nvgpu_atomic_read(&dev->vma_ref));
456} 465}
457 466
@@ -469,9 +478,10 @@ int gk20a_ctxsw_dev_mmap_buffer(struct gk20a *g,
469int gk20a_ctxsw_dev_mmap(struct file *filp, struct vm_area_struct *vma) 478int gk20a_ctxsw_dev_mmap(struct file *filp, struct vm_area_struct *vma)
470{ 479{
471 struct gk20a_ctxsw_dev *dev = filp->private_data; 480 struct gk20a_ctxsw_dev *dev = filp->private_data;
481 struct gk20a *g = dev->g;
472 int ret; 482 int ret;
473 483
474 gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, "vm_start=%lx vm_end=%lx", 484 nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, "vm_start=%lx vm_end=%lx",
475 vma->vm_start, vma->vm_end); 485 vma->vm_start, vma->vm_end);
476 486
477 ret = dev->g->ops.fecs_trace.mmap_user_buffer(dev->g, vma); 487 ret = dev->g->ops.fecs_trace.mmap_user_buffer(dev->g, vma);
@@ -513,7 +523,7 @@ int gk20a_ctxsw_trace_init(struct gk20a *g)
513 struct gk20a_ctxsw_trace *trace = g->ctxsw_trace; 523 struct gk20a_ctxsw_trace *trace = g->ctxsw_trace;
514 int err; 524 int err;
515 525
516 gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, "g=%p trace=%p", g, trace); 526 nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, "g=%p trace=%p", g, trace);
517 527
518 /* if tracing is not supported, skip this */ 528 /* if tracing is not supported, skip this */
519 if (!g->ops.fecs_trace.init) 529 if (!g->ops.fecs_trace.init)
@@ -590,7 +600,7 @@ int gk20a_ctxsw_trace_write(struct gk20a *g,
590 dev = &g->ctxsw_trace->devs[entry->vmid]; 600 dev = &g->ctxsw_trace->devs[entry->vmid];
591 hdr = dev->hdr; 601 hdr = dev->hdr;
592 602
593 gk20a_dbg(gpu_dbg_fn | gpu_dbg_ctxsw, 603 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_ctxsw,
594 "dev=%p hdr=%p", dev, hdr); 604 "dev=%p hdr=%p", dev, hdr);
595 605
596 nvgpu_mutex_acquire(&dev->write_lock); 606 nvgpu_mutex_acquire(&dev->write_lock);
@@ -630,7 +640,7 @@ int gk20a_ctxsw_trace_write(struct gk20a *g,
630 goto filter; 640 goto filter;
631 } 641 }
632 642
633 gk20a_dbg(gpu_dbg_ctxsw, 643 nvgpu_log(g, gpu_dbg_ctxsw,
634 "seqno=%d context_id=%08x pid=%lld tag=%x timestamp=%llx", 644 "seqno=%d context_id=%08x pid=%lld tag=%x timestamp=%llx",
635 entry->seqno, entry->context_id, entry->pid, 645 entry->seqno, entry->context_id, entry->pid,
636 entry->tag, entry->timestamp); 646 entry->tag, entry->timestamp);
@@ -644,7 +654,7 @@ int gk20a_ctxsw_trace_write(struct gk20a *g,
644 if (unlikely(write_idx >= hdr->num_ents)) 654 if (unlikely(write_idx >= hdr->num_ents))
645 write_idx = 0; 655 write_idx = 0;
646 hdr->write_idx = write_idx; 656 hdr->write_idx = write_idx;
647 gk20a_dbg(gpu_dbg_ctxsw, "added: read=%d write=%d len=%d", 657 nvgpu_log(g, gpu_dbg_ctxsw, "added: read=%d write=%d len=%d",
648 hdr->read_idx, hdr->write_idx, ring_len(hdr)); 658 hdr->read_idx, hdr->write_idx, ring_len(hdr));
649 659
650 nvgpu_mutex_release(&dev->write_lock); 660 nvgpu_mutex_release(&dev->write_lock);
@@ -657,7 +667,7 @@ drop:
657 hdr->drop_count++; 667 hdr->drop_count++;
658 668
659filter: 669filter:
660 gk20a_dbg(gpu_dbg_ctxsw, 670 nvgpu_log(g, gpu_dbg_ctxsw,
661 "dropping seqno=%d context_id=%08x pid=%lld " 671 "dropping seqno=%d context_id=%08x pid=%lld "
662 "tag=%x time=%llx (%s)", 672 "tag=%x time=%llx (%s)",
663 entry->seqno, entry->context_id, entry->pid, 673 entry->seqno, entry->context_id, entry->pid,
diff --git a/drivers/gpu/nvgpu/common/linux/debug.c b/drivers/gpu/nvgpu/common/linux/debug.c
index a458a3d4..e8c0417a 100644
--- a/drivers/gpu/nvgpu/common/linux/debug.c
+++ b/drivers/gpu/nvgpu/common/linux/debug.c
@@ -307,10 +307,6 @@ void gk20a_debug_init(struct gk20a *g, const char *debugfs_symlink)
307 debugfs_create_u32("disable_syncpoints", S_IRUGO, 307 debugfs_create_u32("disable_syncpoints", S_IRUGO,
308 l->debugfs, &g->disable_syncpoints); 308 l->debugfs, &g->disable_syncpoints);
309 309
310 /* Legacy debugging API. */
311 debugfs_create_u64("dbg_mask", S_IRUGO|S_IWUSR,
312 l->debugfs, &nvgpu_dbg_mask);
313
314 /* New debug logging API. */ 310 /* New debug logging API. */
315 debugfs_create_u64("log_mask", S_IRUGO|S_IWUSR, 311 debugfs_create_u64("log_mask", S_IRUGO|S_IWUSR,
316 l->debugfs, &g->log_mask); 312 l->debugfs, &g->log_mask);
diff --git a/drivers/gpu/nvgpu/common/linux/debug_fifo.c b/drivers/gpu/nvgpu/common/linux/debug_fifo.c
index aeab0c92..b2a87e0d 100644
--- a/drivers/gpu/nvgpu/common/linux/debug_fifo.c
+++ b/drivers/gpu/nvgpu/common/linux/debug_fifo.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2017 NVIDIA Corporation. All rights reserved. 2 * Copyright (C) 2017-2018 NVIDIA Corporation. All rights reserved.
3 * 3 *
4 * This software is licensed under the terms of the GNU General Public 4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and 5 * License version 2, as published by the Free Software Foundation, and
@@ -108,6 +108,7 @@ static const struct seq_operations gk20a_fifo_sched_debugfs_seq_ops = {
108static int gk20a_fifo_sched_debugfs_open(struct inode *inode, 108static int gk20a_fifo_sched_debugfs_open(struct inode *inode,
109 struct file *file) 109 struct file *file)
110{ 110{
111 struct gk20a *g = inode->i_private;
111 int err; 112 int err;
112 113
113 if (!capable(CAP_SYS_ADMIN)) 114 if (!capable(CAP_SYS_ADMIN))
@@ -117,7 +118,7 @@ static int gk20a_fifo_sched_debugfs_open(struct inode *inode,
117 if (err) 118 if (err)
118 return err; 119 return err;
119 120
120 gk20a_dbg(gpu_dbg_info, "i_private=%p", inode->i_private); 121 nvgpu_log(g, gpu_dbg_info, "i_private=%p", inode->i_private);
121 122
122 ((struct seq_file *)file->private_data)->private = inode->i_private; 123 ((struct seq_file *)file->private_data)->private = inode->i_private;
123 return 0; 124 return 0;
@@ -301,7 +302,7 @@ void gk20a_fifo_debugfs_init(struct gk20a *g)
301 if (IS_ERR_OR_NULL(fifo_root)) 302 if (IS_ERR_OR_NULL(fifo_root))
302 return; 303 return;
303 304
304 gk20a_dbg(gpu_dbg_info, "g=%p", g); 305 nvgpu_log(g, gpu_dbg_info, "g=%p", g);
305 306
306 debugfs_create_file("sched", 0600, fifo_root, g, 307 debugfs_create_file("sched", 0600, fifo_root, g,
307 &gk20a_fifo_sched_debugfs_fops); 308 &gk20a_fifo_sched_debugfs_fops);
diff --git a/drivers/gpu/nvgpu/common/linux/driver_common.c b/drivers/gpu/nvgpu/common/linux/driver_common.c
index 53789423..769f7e03 100644
--- a/drivers/gpu/nvgpu/common/linux/driver_common.c
+++ b/drivers/gpu/nvgpu/common/linux/driver_common.c
@@ -87,7 +87,7 @@ static void nvgpu_init_gr_vars(struct gk20a *g)
87{ 87{
88 gk20a_init_gr(g); 88 gk20a_init_gr(g);
89 89
90 gk20a_dbg_info("total ram pages : %lu", totalram_pages); 90 nvgpu_log_info(g, "total ram pages : %lu", totalram_pages);
91 g->gr.max_comptag_mem = totalram_pages 91 g->gr.max_comptag_mem = totalram_pages
92 >> (10 - (PAGE_SHIFT - 10)); 92 >> (10 - (PAGE_SHIFT - 10));
93} 93}
diff --git a/drivers/gpu/nvgpu/common/linux/intr.c b/drivers/gpu/nvgpu/common/linux/intr.c
index 05dd3f2a..7ffc7e87 100644
--- a/drivers/gpu/nvgpu/common/linux/intr.c
+++ b/drivers/gpu/nvgpu/common/linux/intr.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify it 4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License, 5 * under the terms and conditions of the GNU General Public License,
@@ -50,7 +50,7 @@ irqreturn_t nvgpu_intr_thread_stall(struct gk20a *g)
50 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g); 50 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
51 int hw_irq_count; 51 int hw_irq_count;
52 52
53 gk20a_dbg(gpu_dbg_intr, "interrupt thread launched"); 53 nvgpu_log(g, gpu_dbg_intr, "interrupt thread launched");
54 54
55 trace_mc_gk20a_intr_thread_stall(g->name); 55 trace_mc_gk20a_intr_thread_stall(g->name);
56 56
diff --git a/drivers/gpu/nvgpu/common/linux/io.c b/drivers/gpu/nvgpu/common/linux/io.c
index cde90ddd..c06512a5 100644
--- a/drivers/gpu/nvgpu/common/linux/io.c
+++ b/drivers/gpu/nvgpu/common/linux/io.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify it 4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License, 5 * under the terms and conditions of the GNU General Public License,
@@ -23,11 +23,11 @@ void nvgpu_writel(struct gk20a *g, u32 r, u32 v)
23 23
24 if (unlikely(!l->regs)) { 24 if (unlikely(!l->regs)) {
25 __gk20a_warn_on_no_regs(); 25 __gk20a_warn_on_no_regs();
26 gk20a_dbg(gpu_dbg_reg, "r=0x%x v=0x%x (failed)", r, v); 26 nvgpu_log(g, gpu_dbg_reg, "r=0x%x v=0x%x (failed)", r, v);
27 } else { 27 } else {
28 writel_relaxed(v, l->regs + r); 28 writel_relaxed(v, l->regs + r);
29 nvgpu_wmb(); 29 nvgpu_wmb();
30 gk20a_dbg(gpu_dbg_reg, "r=0x%x v=0x%x", r, v); 30 nvgpu_log(g, gpu_dbg_reg, "r=0x%x v=0x%x", r, v);
31 } 31 }
32} 32}
33 33
@@ -48,10 +48,10 @@ u32 __nvgpu_readl(struct gk20a *g, u32 r)
48 48
49 if (unlikely(!l->regs)) { 49 if (unlikely(!l->regs)) {
50 __gk20a_warn_on_no_regs(); 50 __gk20a_warn_on_no_regs();
51 gk20a_dbg(gpu_dbg_reg, "r=0x%x v=0x%x (failed)", r, v); 51 nvgpu_log(g, gpu_dbg_reg, "r=0x%x v=0x%x (failed)", r, v);
52 } else { 52 } else {
53 v = readl(l->regs + r); 53 v = readl(l->regs + r);
54 gk20a_dbg(gpu_dbg_reg, "r=0x%x v=0x%x", r, v); 54 nvgpu_log(g, gpu_dbg_reg, "r=0x%x v=0x%x", r, v);
55 } 55 }
56 56
57 return v; 57 return v;
@@ -63,13 +63,13 @@ void nvgpu_writel_check(struct gk20a *g, u32 r, u32 v)
63 63
64 if (unlikely(!l->regs)) { 64 if (unlikely(!l->regs)) {
65 __gk20a_warn_on_no_regs(); 65 __gk20a_warn_on_no_regs();
66 gk20a_dbg(gpu_dbg_reg, "r=0x%x v=0x%x (failed)", r, v); 66 nvgpu_log(g, gpu_dbg_reg, "r=0x%x v=0x%x (failed)", r, v);
67 } else { 67 } else {
68 nvgpu_wmb(); 68 nvgpu_wmb();
69 do { 69 do {
70 writel_relaxed(v, l->regs + r); 70 writel_relaxed(v, l->regs + r);
71 } while (readl(l->regs + r) != v); 71 } while (readl(l->regs + r) != v);
72 gk20a_dbg(gpu_dbg_reg, "r=0x%x v=0x%x", r, v); 72 nvgpu_log(g, gpu_dbg_reg, "r=0x%x v=0x%x", r, v);
73 } 73 }
74} 74}
75 75
@@ -79,11 +79,11 @@ void nvgpu_bar1_writel(struct gk20a *g, u32 b, u32 v)
79 79
80 if (unlikely(!l->bar1)) { 80 if (unlikely(!l->bar1)) {
81 __gk20a_warn_on_no_regs(); 81 __gk20a_warn_on_no_regs();
82 gk20a_dbg(gpu_dbg_reg, "b=0x%x v=0x%x (failed)", b, v); 82 nvgpu_log(g, gpu_dbg_reg, "b=0x%x v=0x%x (failed)", b, v);
83 } else { 83 } else {
84 nvgpu_wmb(); 84 nvgpu_wmb();
85 writel_relaxed(v, l->bar1 + b); 85 writel_relaxed(v, l->bar1 + b);
86 gk20a_dbg(gpu_dbg_reg, "b=0x%x v=0x%x", b, v); 86 nvgpu_log(g, gpu_dbg_reg, "b=0x%x v=0x%x", b, v);
87 } 87 }
88} 88}
89 89
@@ -94,10 +94,10 @@ u32 nvgpu_bar1_readl(struct gk20a *g, u32 b)
94 94
95 if (unlikely(!l->bar1)) { 95 if (unlikely(!l->bar1)) {
96 __gk20a_warn_on_no_regs(); 96 __gk20a_warn_on_no_regs();
97 gk20a_dbg(gpu_dbg_reg, "b=0x%x v=0x%x (failed)", b, v); 97 nvgpu_log(g, gpu_dbg_reg, "b=0x%x v=0x%x (failed)", b, v);
98 } else { 98 } else {
99 v = readl(l->bar1 + b); 99 v = readl(l->bar1 + b);
100 gk20a_dbg(gpu_dbg_reg, "b=0x%x v=0x%x", b, v); 100 nvgpu_log(g, gpu_dbg_reg, "b=0x%x v=0x%x", b, v);
101 } 101 }
102 102
103 return v; 103 return v;
diff --git a/drivers/gpu/nvgpu/common/linux/io_usermode.c b/drivers/gpu/nvgpu/common/linux/io_usermode.c
index 888be318..a7b728dd 100644
--- a/drivers/gpu/nvgpu/common/linux/io_usermode.c
+++ b/drivers/gpu/nvgpu/common/linux/io_usermode.c
@@ -25,5 +25,5 @@ void nvgpu_usermode_writel(struct gk20a *g, u32 r, u32 v)
25 void __iomem *reg = l->usermode_regs + (r - usermode_cfg0_r()); 25 void __iomem *reg = l->usermode_regs + (r - usermode_cfg0_r());
26 26
27 writel_relaxed(v, reg); 27 writel_relaxed(v, reg);
28 gk20a_dbg(gpu_dbg_reg, "usermode r=0x%x v=0x%x", r, v); 28 nvgpu_log(g, gpu_dbg_reg, "usermode r=0x%x v=0x%x", r, v);
29} 29}
diff --git a/drivers/gpu/nvgpu/common/linux/ioctl.c b/drivers/gpu/nvgpu/common/linux/ioctl.c
index 04974786..359e5103 100644
--- a/drivers/gpu/nvgpu/common/linux/ioctl.c
+++ b/drivers/gpu/nvgpu/common/linux/ioctl.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * NVGPU IOCTLs 2 * NVGPU IOCTLs
3 * 3 *
4 * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -139,8 +139,9 @@ static int gk20a_create_device(
139{ 139{
140 struct device *subdev; 140 struct device *subdev;
141 int err; 141 int err;
142 struct gk20a *g = gk20a_from_dev(dev);
142 143
143 gk20a_dbg_fn(""); 144 nvgpu_log_fn(g, " ");
144 145
145 cdev_init(cdev, ops); 146 cdev_init(cdev, ops);
146 cdev->owner = THIS_MODULE; 147 cdev->owner = THIS_MODULE;
diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_as.c b/drivers/gpu/nvgpu/common/linux/ioctl_as.c
index e09e099b..41bbdfcb 100644
--- a/drivers/gpu/nvgpu/common/linux/ioctl_as.c
+++ b/drivers/gpu/nvgpu/common/linux/ioctl_as.c
@@ -50,8 +50,9 @@ static int gk20a_as_ioctl_bind_channel(
50{ 50{
51 int err = 0; 51 int err = 0;
52 struct channel_gk20a *ch; 52 struct channel_gk20a *ch;
53 struct gk20a *g = gk20a_from_vm(as_share->vm);
53 54
54 gk20a_dbg_fn(""); 55 nvgpu_log_fn(g, " ");
55 56
56 ch = gk20a_get_channel_from_file(args->channel_fd); 57 ch = gk20a_get_channel_from_file(args->channel_fd);
57 if (!ch) 58 if (!ch)
@@ -76,7 +77,7 @@ static int gk20a_as_ioctl_alloc_space(
76{ 77{
77 struct gk20a *g = gk20a_from_vm(as_share->vm); 78 struct gk20a *g = gk20a_from_vm(as_share->vm);
78 79
79 gk20a_dbg_fn(""); 80 nvgpu_log_fn(g, " ");
80 return nvgpu_vm_area_alloc(as_share->vm, args->pages, args->page_size, 81 return nvgpu_vm_area_alloc(as_share->vm, args->pages, args->page_size,
81 &args->o_a.offset, 82 &args->o_a.offset,
82 gk20a_as_translate_linux_flags(g, 83 gk20a_as_translate_linux_flags(g,
@@ -87,7 +88,9 @@ static int gk20a_as_ioctl_free_space(
87 struct gk20a_as_share *as_share, 88 struct gk20a_as_share *as_share,
88 struct nvgpu_as_free_space_args *args) 89 struct nvgpu_as_free_space_args *args)
89{ 90{
90 gk20a_dbg_fn(""); 91 struct gk20a *g = gk20a_from_vm(as_share->vm);
92
93 nvgpu_log_fn(g, " ");
91 return nvgpu_vm_area_free(as_share->vm, args->offset); 94 return nvgpu_vm_area_free(as_share->vm, args->offset);
92} 95}
93 96
@@ -95,7 +98,9 @@ static int gk20a_as_ioctl_map_buffer_ex(
95 struct gk20a_as_share *as_share, 98 struct gk20a_as_share *as_share,
96 struct nvgpu_as_map_buffer_ex_args *args) 99 struct nvgpu_as_map_buffer_ex_args *args)
97{ 100{
98 gk20a_dbg_fn(""); 101 struct gk20a *g = gk20a_from_vm(as_share->vm);
102
103 nvgpu_log_fn(g, " ");
99 104
100 /* unsupported, direct kind control must be used */ 105 /* unsupported, direct kind control must be used */
101 if (!(args->flags & NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL)) { 106 if (!(args->flags & NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL)) {
@@ -117,7 +122,9 @@ static int gk20a_as_ioctl_unmap_buffer(
117 struct gk20a_as_share *as_share, 122 struct gk20a_as_share *as_share,
118 struct nvgpu_as_unmap_buffer_args *args) 123 struct nvgpu_as_unmap_buffer_args *args)
119{ 124{
120 gk20a_dbg_fn(""); 125 struct gk20a *g = gk20a_from_vm(as_share->vm);
126
127 nvgpu_log_fn(g, " ");
121 128
122 nvgpu_vm_unmap(as_share->vm, args->offset, NULL); 129 nvgpu_vm_unmap(as_share->vm, args->offset, NULL);
123 130
@@ -128,6 +135,7 @@ static int gk20a_as_ioctl_map_buffer_batch(
128 struct gk20a_as_share *as_share, 135 struct gk20a_as_share *as_share,
129 struct nvgpu_as_map_buffer_batch_args *args) 136 struct nvgpu_as_map_buffer_batch_args *args)
130{ 137{
138 struct gk20a *g = gk20a_from_vm(as_share->vm);
131 u32 i; 139 u32 i;
132 int err = 0; 140 int err = 0;
133 141
@@ -140,7 +148,7 @@ static int gk20a_as_ioctl_map_buffer_batch(
140 148
141 struct vm_gk20a_mapping_batch batch; 149 struct vm_gk20a_mapping_batch batch;
142 150
143 gk20a_dbg_fn(""); 151 nvgpu_log_fn(g, " ");
144 152
145 if (args->num_unmaps > NVGPU_IOCTL_AS_MAP_BUFFER_BATCH_LIMIT || 153 if (args->num_unmaps > NVGPU_IOCTL_AS_MAP_BUFFER_BATCH_LIMIT ||
146 args->num_maps > NVGPU_IOCTL_AS_MAP_BUFFER_BATCH_LIMIT) 154 args->num_maps > NVGPU_IOCTL_AS_MAP_BUFFER_BATCH_LIMIT)
@@ -220,9 +228,10 @@ static int gk20a_as_ioctl_get_va_regions(
220 unsigned int write_entries; 228 unsigned int write_entries;
221 struct nvgpu_as_va_region __user *user_region_ptr; 229 struct nvgpu_as_va_region __user *user_region_ptr;
222 struct vm_gk20a *vm = as_share->vm; 230 struct vm_gk20a *vm = as_share->vm;
231 struct gk20a *g = gk20a_from_vm(vm);
223 unsigned int page_sizes = gmmu_page_size_kernel; 232 unsigned int page_sizes = gmmu_page_size_kernel;
224 233
225 gk20a_dbg_fn(""); 234 nvgpu_log_fn(g, " ");
226 235
227 if (!vm->big_pages) 236 if (!vm->big_pages)
228 page_sizes--; 237 page_sizes--;
@@ -293,14 +302,14 @@ int gk20a_as_dev_open(struct inode *inode, struct file *filp)
293 struct gk20a *g; 302 struct gk20a *g;
294 int err; 303 int err;
295 304
296 gk20a_dbg_fn("");
297
298 l = container_of(inode->i_cdev, struct nvgpu_os_linux, as_dev.cdev); 305 l = container_of(inode->i_cdev, struct nvgpu_os_linux, as_dev.cdev);
299 g = &l->g; 306 g = &l->g;
300 307
308 nvgpu_log_fn(g, " ");
309
301 err = gk20a_as_alloc_share(g, 0, 0, &as_share); 310 err = gk20a_as_alloc_share(g, 0, 0, &as_share);
302 if (err) { 311 if (err) {
303 gk20a_dbg_fn("failed to alloc share"); 312 nvgpu_log_fn(g, "failed to alloc share");
304 return err; 313 return err;
305 } 314 }
306 315
@@ -312,8 +321,6 @@ int gk20a_as_dev_release(struct inode *inode, struct file *filp)
312{ 321{
313 struct gk20a_as_share *as_share = filp->private_data; 322 struct gk20a_as_share *as_share = filp->private_data;
314 323
315 gk20a_dbg_fn("");
316
317 if (!as_share) 324 if (!as_share)
318 return 0; 325 return 0;
319 326
@@ -328,7 +335,7 @@ long gk20a_as_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
328 335
329 u8 buf[NVGPU_AS_IOCTL_MAX_ARG_SIZE]; 336 u8 buf[NVGPU_AS_IOCTL_MAX_ARG_SIZE];
330 337
331 gk20a_dbg_fn("start %d", _IOC_NR(cmd)); 338 nvgpu_log_fn(g, "start %d", _IOC_NR(cmd));
332 339
333 if ((_IOC_TYPE(cmd) != NVGPU_AS_IOCTL_MAGIC) || 340 if ((_IOC_TYPE(cmd) != NVGPU_AS_IOCTL_MAGIC) ||
334 (_IOC_NR(cmd) == 0) || 341 (_IOC_NR(cmd) == 0) ||
diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_channel.c b/drivers/gpu/nvgpu/common/linux/ioctl_channel.c
index 06dfb180..606c5251 100644
--- a/drivers/gpu/nvgpu/common/linux/ioctl_channel.c
+++ b/drivers/gpu/nvgpu/common/linux/ioctl_channel.c
@@ -476,7 +476,7 @@ static int __gk20a_channel_open(struct gk20a *g,
476 struct channel_gk20a *ch; 476 struct channel_gk20a *ch;
477 struct channel_priv *priv; 477 struct channel_priv *priv;
478 478
479 gk20a_dbg_fn(""); 479 nvgpu_log_fn(g, " ");
480 480
481 g = gk20a_get(g); 481 g = gk20a_get(g);
482 if (!g) 482 if (!g)
@@ -529,10 +529,10 @@ int gk20a_channel_open(struct inode *inode, struct file *filp)
529 struct gk20a *g = &l->g; 529 struct gk20a *g = &l->g;
530 int ret; 530 int ret;
531 531
532 gk20a_dbg_fn("start"); 532 nvgpu_log_fn(g, "start");
533 ret = __gk20a_channel_open(g, filp, -1); 533 ret = __gk20a_channel_open(g, filp, -1);
534 534
535 gk20a_dbg_fn("end"); 535 nvgpu_log_fn(g, "end");
536 return ret; 536 return ret;
537} 537}
538 538
@@ -676,7 +676,7 @@ static int gk20a_channel_wait(struct channel_gk20a *ch,
676 int remain, ret = 0; 676 int remain, ret = 0;
677 u64 end; 677 u64 end;
678 678
679 gk20a_dbg_fn(""); 679 nvgpu_log_fn(g, " ");
680 680
681 if (ch->has_timedout) 681 if (ch->has_timedout)
682 return -ETIMEDOUT; 682 return -ETIMEDOUT;
@@ -760,7 +760,7 @@ static int gk20a_channel_zcull_bind(struct channel_gk20a *ch,
760 struct gk20a *g = ch->g; 760 struct gk20a *g = ch->g;
761 struct gr_gk20a *gr = &g->gr; 761 struct gr_gk20a *gr = &g->gr;
762 762
763 gk20a_dbg_fn(""); 763 nvgpu_log_fn(gr->g, " ");
764 764
765 return g->ops.gr.bind_ctxsw_zcull(g, gr, ch, 765 return g->ops.gr.bind_ctxsw_zcull(g, gr, ch,
766 args->gpu_va, args->mode); 766 args->gpu_va, args->mode);
@@ -775,9 +775,10 @@ static int gk20a_ioctl_channel_submit_gpfifo(
775 struct fifo_profile_gk20a *profile = NULL; 775 struct fifo_profile_gk20a *profile = NULL;
776 u32 submit_flags = 0; 776 u32 submit_flags = 0;
777 int fd = -1; 777 int fd = -1;
778 struct gk20a *g = ch->g;
778 779
779 int ret = 0; 780 int ret = 0;
780 gk20a_dbg_fn(""); 781 nvgpu_log_fn(g, " ");
781 782
782#ifdef CONFIG_DEBUG_FS 783#ifdef CONFIG_DEBUG_FS
783 profile = gk20a_fifo_profile_acquire(ch->g); 784 profile = gk20a_fifo_profile_acquire(ch->g);
@@ -1064,8 +1065,9 @@ long gk20a_channel_ioctl(struct file *filp,
1064 struct device *dev = dev_from_gk20a(ch->g); 1065 struct device *dev = dev_from_gk20a(ch->g);
1065 u8 buf[NVGPU_IOCTL_CHANNEL_MAX_ARG_SIZE] = {0}; 1066 u8 buf[NVGPU_IOCTL_CHANNEL_MAX_ARG_SIZE] = {0};
1066 int err = 0; 1067 int err = 0;
1068 struct gk20a *g = ch->g;
1067 1069
1068 gk20a_dbg_fn("start %d", _IOC_NR(cmd)); 1070 nvgpu_log_fn(g, "start %d", _IOC_NR(cmd));
1069 1071
1070 if ((_IOC_TYPE(cmd) != NVGPU_IOCTL_MAGIC) || 1072 if ((_IOC_TYPE(cmd) != NVGPU_IOCTL_MAGIC) ||
1071 (_IOC_NR(cmd) == 0) || 1073 (_IOC_NR(cmd) == 0) ||
@@ -1224,7 +1226,7 @@ long gk20a_channel_ioctl(struct file *filp,
1224 { 1226 {
1225 u32 timeout = 1227 u32 timeout =
1226 (u32)((struct nvgpu_set_timeout_args *)buf)->timeout; 1228 (u32)((struct nvgpu_set_timeout_args *)buf)->timeout;
1227 gk20a_dbg(gpu_dbg_gpu_dbg, "setting timeout (%d ms) for chid %d", 1229 nvgpu_log(g, gpu_dbg_gpu_dbg, "setting timeout (%d ms) for chid %d",
1228 timeout, ch->chid); 1230 timeout, ch->chid);
1229 ch->timeout_ms_max = timeout; 1231 ch->timeout_ms_max = timeout;
1230 gk20a_channel_trace_sched_param( 1232 gk20a_channel_trace_sched_param(
@@ -1238,7 +1240,7 @@ long gk20a_channel_ioctl(struct file *filp,
1238 bool timeout_debug_dump = !((u32) 1240 bool timeout_debug_dump = !((u32)
1239 ((struct nvgpu_set_timeout_ex_args *)buf)->flags & 1241 ((struct nvgpu_set_timeout_ex_args *)buf)->flags &
1240 (1 << NVGPU_TIMEOUT_FLAG_DISABLE_DUMP)); 1242 (1 << NVGPU_TIMEOUT_FLAG_DISABLE_DUMP));
1241 gk20a_dbg(gpu_dbg_gpu_dbg, "setting timeout (%d ms) for chid %d", 1243 nvgpu_log(g, gpu_dbg_gpu_dbg, "setting timeout (%d ms) for chid %d",
1242 timeout, ch->chid); 1244 timeout, ch->chid);
1243 ch->timeout_ms_max = timeout; 1245 ch->timeout_ms_max = timeout;
1244 ch->timeout_debug_dump = timeout_debug_dump; 1246 ch->timeout_debug_dump = timeout_debug_dump;
@@ -1367,7 +1369,7 @@ long gk20a_channel_ioctl(struct file *filp,
1367 1369
1368 gk20a_channel_put(ch); 1370 gk20a_channel_put(ch);
1369 1371
1370 gk20a_dbg_fn("end"); 1372 nvgpu_log_fn(g, "end");
1371 1373
1372 return err; 1374 return err;
1373} 1375}
diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_clk_arb.c b/drivers/gpu/nvgpu/common/linux/ioctl_clk_arb.c
index 039f65f8..3ab8cf9e 100644
--- a/drivers/gpu/nvgpu/common/linux/ioctl_clk_arb.c
+++ b/drivers/gpu/nvgpu/common/linux/ioctl_clk_arb.c
@@ -209,9 +209,10 @@ static ssize_t nvgpu_clk_arb_read_event_dev(struct file *filp, char __user *buf,
209static int nvgpu_clk_arb_set_event_filter(struct nvgpu_clk_dev *dev, 209static int nvgpu_clk_arb_set_event_filter(struct nvgpu_clk_dev *dev,
210 struct nvgpu_gpu_set_event_filter_args *args) 210 struct nvgpu_gpu_set_event_filter_args *args)
211{ 211{
212 struct gk20a *g = dev->session->g;
212 u32 mask; 213 u32 mask;
213 214
214 gk20a_dbg(gpu_dbg_fn, ""); 215 nvgpu_log(g, gpu_dbg_fn, " ");
215 216
216 if (args->flags) 217 if (args->flags)
217 return -EINVAL; 218 return -EINVAL;
@@ -237,7 +238,7 @@ static long nvgpu_clk_arb_ioctl_event_dev(struct file *filp, unsigned int cmd,
237 u8 buf[NVGPU_EVENT_IOCTL_MAX_ARG_SIZE]; 238 u8 buf[NVGPU_EVENT_IOCTL_MAX_ARG_SIZE];
238 int err = 0; 239 int err = 0;
239 240
240 gk20a_dbg(gpu_dbg_fn, "nr=%d", _IOC_NR(cmd)); 241 nvgpu_log(g, gpu_dbg_fn, "nr=%d", _IOC_NR(cmd));
241 242
242 if ((_IOC_TYPE(cmd) != NVGPU_EVENT_IOCTL_MAGIC) || (_IOC_NR(cmd) == 0) 243 if ((_IOC_TYPE(cmd) != NVGPU_EVENT_IOCTL_MAGIC) || (_IOC_NR(cmd) == 0)
243 || (_IOC_NR(cmd) > NVGPU_EVENT_IOCTL_LAST)) 244 || (_IOC_NR(cmd) > NVGPU_EVENT_IOCTL_LAST))
@@ -681,7 +682,7 @@ int nvgpu_clk_arb_debugfs_init(struct gk20a *g)
681 struct dentry *gpu_root = l->debugfs; 682 struct dentry *gpu_root = l->debugfs;
682 struct dentry *d; 683 struct dentry *d;
683 684
684 gk20a_dbg(gpu_dbg_info, "g=%p", g); 685 nvgpu_log(g, gpu_dbg_info, "g=%p", g);
685 686
686 d = debugfs_create_file( 687 d = debugfs_create_file(
687 "arb_stats", 688 "arb_stats",
diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_ctrl.c b/drivers/gpu/nvgpu/common/linux/ioctl_ctrl.c
index 70707a5c..7bb97369 100644
--- a/drivers/gpu/nvgpu/common/linux/ioctl_ctrl.c
+++ b/drivers/gpu/nvgpu/common/linux/ioctl_ctrl.c
@@ -62,14 +62,14 @@ int gk20a_ctrl_dev_open(struct inode *inode, struct file *filp)
62 struct gk20a_ctrl_priv *priv; 62 struct gk20a_ctrl_priv *priv;
63 int err = 0; 63 int err = 0;
64 64
65 gk20a_dbg_fn("");
66
67 l = container_of(inode->i_cdev, 65 l = container_of(inode->i_cdev,
68 struct nvgpu_os_linux, ctrl.cdev); 66 struct nvgpu_os_linux, ctrl.cdev);
69 g = gk20a_get(&l->g); 67 g = gk20a_get(&l->g);
70 if (!g) 68 if (!g)
71 return -ENODEV; 69 return -ENODEV;
72 70
71 nvgpu_log_fn(g, " ");
72
73 priv = nvgpu_kzalloc(g, sizeof(struct gk20a_ctrl_priv)); 73 priv = nvgpu_kzalloc(g, sizeof(struct gk20a_ctrl_priv));
74 if (!priv) { 74 if (!priv) {
75 err = -ENOMEM; 75 err = -ENOMEM;
@@ -102,7 +102,7 @@ int gk20a_ctrl_dev_release(struct inode *inode, struct file *filp)
102 struct gk20a_ctrl_priv *priv = filp->private_data; 102 struct gk20a_ctrl_priv *priv = filp->private_data;
103 struct gk20a *g = priv->g; 103 struct gk20a *g = priv->g;
104 104
105 gk20a_dbg_fn(""); 105 nvgpu_log_fn(g, " ");
106 106
107 if (priv->clk_session) 107 if (priv->clk_session)
108 nvgpu_clk_arb_release_session(g, priv->clk_session); 108 nvgpu_clk_arb_release_session(g, priv->clk_session);
@@ -684,7 +684,7 @@ static int nvgpu_gpu_ioctl_wait_for_pause(struct gk20a *g,
684 /* Copy to user space - pointed by "args->pwarpstate" */ 684 /* Copy to user space - pointed by "args->pwarpstate" */
685 if (copy_to_user((void __user *)(uintptr_t)args->pwarpstate, 685 if (copy_to_user((void __user *)(uintptr_t)args->pwarpstate,
686 w_state, ioctl_size)) { 686 w_state, ioctl_size)) {
687 gk20a_dbg_fn("copy_to_user failed!"); 687 nvgpu_log_fn(g, "copy_to_user failed!");
688 err = -EFAULT; 688 err = -EFAULT;
689 } 689 }
690 690
@@ -901,7 +901,7 @@ static int nvgpu_gpu_alloc_vidmem(struct gk20a *g,
901 u32 align = args->in.alignment ? args->in.alignment : SZ_4K; 901 u32 align = args->in.alignment ? args->in.alignment : SZ_4K;
902 int fd; 902 int fd;
903 903
904 gk20a_dbg_fn(""); 904 nvgpu_log_fn(g, " ");
905 905
906 /* not yet supported */ 906 /* not yet supported */
907 if (WARN_ON(args->in.flags & NVGPU_GPU_ALLOC_VIDMEM_FLAG_CPU_MASK)) 907 if (WARN_ON(args->in.flags & NVGPU_GPU_ALLOC_VIDMEM_FLAG_CPU_MASK))
@@ -933,7 +933,7 @@ static int nvgpu_gpu_alloc_vidmem(struct gk20a *g,
933 933
934 args->out.dmabuf_fd = fd; 934 args->out.dmabuf_fd = fd;
935 935
936 gk20a_dbg_fn("done, fd=%d", fd); 936 nvgpu_log_fn(g, "done, fd=%d", fd);
937 937
938 return 0; 938 return 0;
939} 939}
@@ -943,7 +943,7 @@ static int nvgpu_gpu_get_memory_state(struct gk20a *g,
943{ 943{
944 int err; 944 int err;
945 945
946 gk20a_dbg_fn(""); 946 nvgpu_log_fn(g, " ");
947 947
948 if (args->reserved[0] || args->reserved[1] || 948 if (args->reserved[0] || args->reserved[1] ||
949 args->reserved[2] || args->reserved[3]) 949 args->reserved[2] || args->reserved[3])
@@ -951,7 +951,7 @@ static int nvgpu_gpu_get_memory_state(struct gk20a *g,
951 951
952 err = nvgpu_vidmem_get_space(g, &args->total_free_bytes); 952 err = nvgpu_vidmem_get_space(g, &args->total_free_bytes);
953 953
954 gk20a_dbg_fn("done, err=%d, bytes=%lld", err, args->total_free_bytes); 954 nvgpu_log_fn(g, "done, err=%d, bytes=%lld", err, args->total_free_bytes);
955 955
956 return err; 956 return err;
957} 957}
@@ -973,7 +973,7 @@ static int nvgpu_gpu_clk_get_vf_points(struct gk20a *g,
973 u16 min_mhz; 973 u16 min_mhz;
974 u16 max_mhz; 974 u16 max_mhz;
975 975
976 gk20a_dbg_fn(""); 976 nvgpu_log_fn(g, " ");
977 977
978 if (!session || args->flags) 978 if (!session || args->flags)
979 return -EINVAL; 979 return -EINVAL;
@@ -1059,7 +1059,7 @@ static int nvgpu_gpu_clk_get_range(struct gk20a *g,
1059 int err; 1059 int err;
1060 u16 min_mhz, max_mhz; 1060 u16 min_mhz, max_mhz;
1061 1061
1062 gk20a_dbg_fn(""); 1062 nvgpu_log_fn(g, " ");
1063 1063
1064 if (!session) 1064 if (!session)
1065 return -EINVAL; 1065 return -EINVAL;
@@ -1138,7 +1138,7 @@ static int nvgpu_gpu_clk_set_info(struct gk20a *g,
1138 int i; 1138 int i;
1139 int ret; 1139 int ret;
1140 1140
1141 gk20a_dbg_fn(""); 1141 nvgpu_log_fn(g, " ");
1142 1142
1143 if (!session || args->flags) 1143 if (!session || args->flags)
1144 return -EINVAL; 1144 return -EINVAL;
@@ -1201,7 +1201,7 @@ static int nvgpu_gpu_clk_get_info(struct gk20a *g,
1201 int err; 1201 int err;
1202 int bit; 1202 int bit;
1203 1203
1204 gk20a_dbg_fn(""); 1204 nvgpu_log_fn(g, " ");
1205 1205
1206 if (!session) 1206 if (!session)
1207 return -EINVAL; 1207 return -EINVAL;
@@ -1287,7 +1287,7 @@ static int nvgpu_gpu_get_event_fd(struct gk20a *g,
1287{ 1287{
1288 struct nvgpu_clk_session *session = priv->clk_session; 1288 struct nvgpu_clk_session *session = priv->clk_session;
1289 1289
1290 gk20a_dbg_fn(""); 1290 nvgpu_log_fn(g, " ");
1291 1291
1292 if (!session) 1292 if (!session)
1293 return -EINVAL; 1293 return -EINVAL;
@@ -1301,7 +1301,7 @@ static int nvgpu_gpu_get_voltage(struct gk20a *g,
1301{ 1301{
1302 int err = -EINVAL; 1302 int err = -EINVAL;
1303 1303
1304 gk20a_dbg_fn(""); 1304 nvgpu_log_fn(g, " ");
1305 1305
1306 if (args->reserved) 1306 if (args->reserved)
1307 return -EINVAL; 1307 return -EINVAL;
@@ -1337,7 +1337,7 @@ static int nvgpu_gpu_get_current(struct gk20a *g,
1337{ 1337{
1338 int err; 1338 int err;
1339 1339
1340 gk20a_dbg_fn(""); 1340 nvgpu_log_fn(g, " ");
1341 1341
1342 if (args->reserved[0] || args->reserved[1] || args->reserved[2]) 1342 if (args->reserved[0] || args->reserved[1] || args->reserved[2])
1343 return -EINVAL; 1343 return -EINVAL;
@@ -1361,7 +1361,7 @@ static int nvgpu_gpu_get_power(struct gk20a *g,
1361{ 1361{
1362 int err; 1362 int err;
1363 1363
1364 gk20a_dbg_fn(""); 1364 nvgpu_log_fn(g, " ");
1365 1365
1366 if (args->reserved[0] || args->reserved[1] || args->reserved[2]) 1366 if (args->reserved[0] || args->reserved[1] || args->reserved[2])
1367 return -EINVAL; 1367 return -EINVAL;
@@ -1386,7 +1386,7 @@ static int nvgpu_gpu_get_temperature(struct gk20a *g,
1386 int err; 1386 int err;
1387 u32 temp_f24_8; 1387 u32 temp_f24_8;
1388 1388
1389 gk20a_dbg_fn(""); 1389 nvgpu_log_fn(g, " ");
1390 1390
1391 if (args->reserved[0] || args->reserved[1] || args->reserved[2]) 1391 if (args->reserved[0] || args->reserved[1] || args->reserved[2])
1392 return -EINVAL; 1392 return -EINVAL;
@@ -1415,7 +1415,7 @@ static int nvgpu_gpu_set_therm_alert_limit(struct gk20a *g,
1415{ 1415{
1416 int err; 1416 int err;
1417 1417
1418 gk20a_dbg_fn(""); 1418 nvgpu_log_fn(g, " ");
1419 1419
1420 if (args->reserved[0] || args->reserved[1] || args->reserved[2]) 1420 if (args->reserved[0] || args->reserved[1] || args->reserved[2])
1421 return -EINVAL; 1421 return -EINVAL;
@@ -1491,7 +1491,7 @@ static int nvgpu_gpu_set_deterministic_opts(struct gk20a *g,
1491 u32 i = 0; 1491 u32 i = 0;
1492 int err = 0; 1492 int err = 0;
1493 1493
1494 gk20a_dbg_fn(""); 1494 nvgpu_log_fn(g, " ");
1495 1495
1496 user_channels = (int __user *)(uintptr_t)args->channels; 1496 user_channels = (int __user *)(uintptr_t)args->channels;
1497 1497
@@ -1556,7 +1556,7 @@ long gk20a_ctrl_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg
1556 struct zbc_query_params *zbc_tbl; 1556 struct zbc_query_params *zbc_tbl;
1557 int i, err = 0; 1557 int i, err = 0;
1558 1558
1559 gk20a_dbg_fn("start %d", _IOC_NR(cmd)); 1559 nvgpu_log_fn(g, "start %d", _IOC_NR(cmd));
1560 1560
1561 if ((_IOC_TYPE(cmd) != NVGPU_GPU_IOCTL_MAGIC) || 1561 if ((_IOC_TYPE(cmd) != NVGPU_GPU_IOCTL_MAGIC) ||
1562 (_IOC_NR(cmd) == 0) || 1562 (_IOC_NR(cmd) == 0) ||
@@ -1855,7 +1855,7 @@ long gk20a_ctrl_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg
1855 break; 1855 break;
1856 1856
1857 default: 1857 default:
1858 gk20a_dbg_info("unrecognized gpu ioctl cmd: 0x%x", cmd); 1858 nvgpu_log_info(g, "unrecognized gpu ioctl cmd: 0x%x", cmd);
1859 err = -ENOTTY; 1859 err = -ENOTTY;
1860 break; 1860 break;
1861 } 1861 }
diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_dbg.c b/drivers/gpu/nvgpu/common/linux/ioctl_dbg.c
index a53d1cfb..2aba2664 100644
--- a/drivers/gpu/nvgpu/common/linux/ioctl_dbg.c
+++ b/drivers/gpu/nvgpu/common/linux/ioctl_dbg.c
@@ -56,7 +56,7 @@ static int alloc_profiler(struct gk20a *g,
56 struct dbg_profiler_object_data *prof; 56 struct dbg_profiler_object_data *prof;
57 *_prof = NULL; 57 *_prof = NULL;
58 58
59 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); 59 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
60 60
61 prof = nvgpu_kzalloc(g, sizeof(*prof)); 61 prof = nvgpu_kzalloc(g, sizeof(*prof));
62 if (!prof) 62 if (!prof)
@@ -72,7 +72,7 @@ static int alloc_session(struct gk20a *g, struct dbg_session_gk20a_linux **_dbg_
72 struct dbg_session_gk20a_linux *dbg_s_linux; 72 struct dbg_session_gk20a_linux *dbg_s_linux;
73 *_dbg_s_linux = NULL; 73 *_dbg_s_linux = NULL;
74 74
75 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); 75 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
76 76
77 dbg_s_linux = nvgpu_kzalloc(g, sizeof(*dbg_s_linux)); 77 dbg_s_linux = nvgpu_kzalloc(g, sizeof(*dbg_s_linux));
78 if (!dbg_s_linux) 78 if (!dbg_s_linux)
@@ -142,8 +142,9 @@ unsigned int gk20a_dbg_gpu_dev_poll(struct file *filep, poll_table *wait)
142 unsigned int mask = 0; 142 unsigned int mask = 0;
143 struct dbg_session_gk20a_linux *dbg_session_linux = filep->private_data; 143 struct dbg_session_gk20a_linux *dbg_session_linux = filep->private_data;
144 struct dbg_session_gk20a *dbg_s = &dbg_session_linux->dbg_s; 144 struct dbg_session_gk20a *dbg_s = &dbg_session_linux->dbg_s;
145 struct gk20a *g = dbg_s->g;
145 146
146 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); 147 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
147 148
148 poll_wait(filep, &dbg_s->dbg_events.wait_queue.wq, wait); 149 poll_wait(filep, &dbg_s->dbg_events.wait_queue.wq, wait);
149 150
@@ -151,9 +152,9 @@ unsigned int gk20a_dbg_gpu_dev_poll(struct file *filep, poll_table *wait)
151 152
152 if (dbg_s->dbg_events.events_enabled && 153 if (dbg_s->dbg_events.events_enabled &&
153 dbg_s->dbg_events.num_pending_events > 0) { 154 dbg_s->dbg_events.num_pending_events > 0) {
154 gk20a_dbg(gpu_dbg_gpu_dbg, "found pending event on session id %d", 155 nvgpu_log(g, gpu_dbg_gpu_dbg, "found pending event on session id %d",
155 dbg_s->id); 156 dbg_s->id);
156 gk20a_dbg(gpu_dbg_gpu_dbg, "%d events pending", 157 nvgpu_log(g, gpu_dbg_gpu_dbg, "%d events pending",
157 dbg_s->dbg_events.num_pending_events); 158 dbg_s->dbg_events.num_pending_events);
158 mask = (POLLPRI | POLLIN); 159 mask = (POLLPRI | POLLIN);
159 } 160 }
@@ -170,7 +171,7 @@ int gk20a_dbg_gpu_dev_release(struct inode *inode, struct file *filp)
170 struct gk20a *g = dbg_s->g; 171 struct gk20a *g = dbg_s->g;
171 struct dbg_profiler_object_data *prof_obj, *tmp_obj; 172 struct dbg_profiler_object_data *prof_obj, *tmp_obj;
172 173
173 gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn, "%s", g->name); 174 nvgpu_log(g, gpu_dbg_gpu_dbg | gpu_dbg_fn, "%s", g->name);
174 175
175 /* unbind channels */ 176 /* unbind channels */
176 dbg_unbind_all_channels_gk20a(dbg_s); 177 dbg_unbind_all_channels_gk20a(dbg_s);
@@ -213,7 +214,11 @@ int gk20a_dbg_gpu_dev_release(struct inode *inode, struct file *filp)
213 214
214int gk20a_prof_gpu_dev_open(struct inode *inode, struct file *filp) 215int gk20a_prof_gpu_dev_open(struct inode *inode, struct file *filp)
215{ 216{
216 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); 217 struct nvgpu_os_linux *l = container_of(inode->i_cdev,
218 struct nvgpu_os_linux, prof.cdev);
219 struct gk20a *g = &l->g;
220
221 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
217 return gk20a_dbg_gpu_do_dev_open(inode, filp, true /* is profiler */); 222 return gk20a_dbg_gpu_do_dev_open(inode, filp, true /* is profiler */);
218} 223}
219 224
@@ -223,7 +228,7 @@ static int nvgpu_dbg_gpu_ioctl_timeout(struct dbg_session_gk20a *dbg_s,
223 int err; 228 int err;
224 struct gk20a *g = dbg_s->g; 229 struct gk20a *g = dbg_s->g;
225 230
226 gk20a_dbg_fn("powergate mode = %d", args->enable); 231 nvgpu_log_fn(g, "powergate mode = %d", args->enable);
227 232
228 nvgpu_mutex_acquire(&g->dbg_sessions_lock); 233 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
229 err = nvgpu_dbg_timeout_enable(dbg_s, args->enable); 234 err = nvgpu_dbg_timeout_enable(dbg_s, args->enable);
@@ -356,7 +361,9 @@ static int nvgpu_dbg_gpu_ioctl_set_next_stop_trigger_type(
356 struct dbg_session_gk20a *dbg_s, 361 struct dbg_session_gk20a *dbg_s,
357 struct nvgpu_dbg_gpu_set_next_stop_trigger_type_args *args) 362 struct nvgpu_dbg_gpu_set_next_stop_trigger_type_args *args)
358{ 363{
359 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); 364 struct gk20a *g = dbg_s->g;
365
366 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
360 367
361 gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s); 368 gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s);
362 369
@@ -373,7 +380,7 @@ static int nvgpu_dbg_timeout_enable(struct dbg_session_gk20a *dbg_s,
373 struct gk20a *g = dbg_s->g; 380 struct gk20a *g = dbg_s->g;
374 int err = 0; 381 int err = 0;
375 382
376 gk20a_dbg(gpu_dbg_gpu_dbg, "Timeouts mode requested : %d", 383 nvgpu_log(g, gpu_dbg_gpu_dbg, "Timeouts mode requested : %d",
377 timeout_mode); 384 timeout_mode);
378 385
379 switch (timeout_mode) { 386 switch (timeout_mode) {
@@ -401,7 +408,7 @@ static int nvgpu_dbg_timeout_enable(struct dbg_session_gk20a *dbg_s,
401 break; 408 break;
402 } 409 }
403 410
404 gk20a_dbg(gpu_dbg_gpu_dbg, "Timeouts enabled : %s", 411 nvgpu_log(g, gpu_dbg_gpu_dbg, "Timeouts enabled : %s",
405 g->timeouts_enabled ? "Yes" : "No"); 412 g->timeouts_enabled ? "Yes" : "No");
406 413
407 return err; 414 return err;
@@ -431,7 +438,7 @@ static int gk20a_dbg_gpu_do_dev_open(struct inode *inode,
431 438
432 dev = dev_from_gk20a(g); 439 dev = dev_from_gk20a(g);
433 440
434 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "dbg session: %s", g->name); 441 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "dbg session: %s", g->name);
435 442
436 err = alloc_session(g, &dbg_session_linux); 443 err = alloc_session(g, &dbg_session_linux);
437 if (err) 444 if (err)
@@ -482,7 +489,7 @@ static int dbg_unbind_single_channel_gk20a(struct dbg_session_gk20a *dbg_s,
482 struct dbg_profiler_object_data *prof_obj, *tmp_obj; 489 struct dbg_profiler_object_data *prof_obj, *tmp_obj;
483 struct dbg_session_channel_data_linux *ch_data_linux; 490 struct dbg_session_channel_data_linux *ch_data_linux;
484 491
485 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); 492 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
486 493
487 chid = ch_data->chid; 494 chid = ch_data->chid;
488 495
@@ -527,7 +534,7 @@ static int dbg_bind_channel_gk20a(struct dbg_session_gk20a *dbg_s,
527 struct dbg_session_data *session_data; 534 struct dbg_session_data *session_data;
528 int err = 0; 535 int err = 0;
529 536
530 gk20a_dbg(gpu_dbg_fn|gpu_dbg_gpu_dbg, "%s fd=%d", 537 nvgpu_log(g, gpu_dbg_fn|gpu_dbg_gpu_dbg, "%s fd=%d",
531 g->name, args->channel_fd); 538 g->name, args->channel_fd);
532 539
533 /* 540 /*
@@ -541,12 +548,12 @@ static int dbg_bind_channel_gk20a(struct dbg_session_gk20a *dbg_s,
541 548
542 ch = gk20a_get_channel_from_file(args->channel_fd); 549 ch = gk20a_get_channel_from_file(args->channel_fd);
543 if (!ch) { 550 if (!ch) {
544 gk20a_dbg_fn("no channel found for fd"); 551 nvgpu_log_fn(g, "no channel found for fd");
545 err = -EINVAL; 552 err = -EINVAL;
546 goto out_fput; 553 goto out_fput;
547 } 554 }
548 555
549 gk20a_dbg_fn("%s hwchid=%d", g->name, ch->chid); 556 nvgpu_log_fn(g, "%s hwchid=%d", g->name, ch->chid);
550 557
551 nvgpu_mutex_acquire(&g->dbg_sessions_lock); 558 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
552 nvgpu_mutex_acquire(&ch->dbg_s_lock); 559 nvgpu_mutex_acquire(&ch->dbg_s_lock);
@@ -818,7 +825,7 @@ static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s,
818 struct gk20a *g = dbg_s->g; 825 struct gk20a *g = dbg_s->g;
819 struct channel_gk20a *ch; 826 struct channel_gk20a *ch;
820 827
821 gk20a_dbg_fn("%d ops, max fragment %d", args->num_ops, g->dbg_regops_tmp_buf_ops); 828 nvgpu_log_fn(g, "%d ops, max fragment %d", args->num_ops, g->dbg_regops_tmp_buf_ops);
822 829
823 if (args->num_ops > NVGPU_IOCTL_DBG_REG_OPS_LIMIT) { 830 if (args->num_ops > NVGPU_IOCTL_DBG_REG_OPS_LIMIT) {
824 nvgpu_err(g, "regops limit exceeded"); 831 nvgpu_err(g, "regops limit exceeded");
@@ -890,10 +897,10 @@ static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s,
890 (args->ops + 897 (args->ops +
891 ops_offset * sizeof(struct nvgpu_dbg_gpu_reg_op)); 898 ops_offset * sizeof(struct nvgpu_dbg_gpu_reg_op));
892 899
893 gk20a_dbg_fn("Regops fragment: start_op=%llu ops=%llu", 900 nvgpu_log_fn(g, "Regops fragment: start_op=%llu ops=%llu",
894 ops_offset, num_ops); 901 ops_offset, num_ops);
895 902
896 gk20a_dbg_fn("Copying regops from userspace"); 903 nvgpu_log_fn(g, "Copying regops from userspace");
897 904
898 if (copy_from_user(linux_fragment, 905 if (copy_from_user(linux_fragment,
899 fragment, fragment_size)) { 906 fragment, fragment_size)) {
@@ -917,7 +924,7 @@ static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s,
917 if (err) 924 if (err)
918 break; 925 break;
919 926
920 gk20a_dbg_fn("Copying result to userspace"); 927 nvgpu_log_fn(g, "Copying result to userspace");
921 928
922 if (copy_to_user(fragment, linux_fragment, 929 if (copy_to_user(fragment, linux_fragment,
923 fragment_size)) { 930 fragment_size)) {
@@ -955,7 +962,7 @@ static int nvgpu_ioctl_powergate_gk20a(struct dbg_session_gk20a *dbg_s,
955{ 962{
956 int err; 963 int err;
957 struct gk20a *g = dbg_s->g; 964 struct gk20a *g = dbg_s->g;
958 gk20a_dbg_fn("%s powergate mode = %d", 965 nvgpu_log_fn(g, "%s powergate mode = %d",
959 g->name, args->mode); 966 g->name, args->mode);
960 967
961 nvgpu_mutex_acquire(&g->dbg_sessions_lock); 968 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
@@ -978,7 +985,7 @@ static int nvgpu_dbg_gpu_ioctl_smpc_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
978 struct gk20a *g = dbg_s->g; 985 struct gk20a *g = dbg_s->g;
979 struct channel_gk20a *ch_gk20a; 986 struct channel_gk20a *ch_gk20a;
980 987
981 gk20a_dbg_fn("%s smpc ctxsw mode = %d", 988 nvgpu_log_fn(g, "%s smpc ctxsw mode = %d",
982 g->name, args->mode); 989 g->name, args->mode);
983 990
984 err = gk20a_busy(g); 991 err = gk20a_busy(g);
@@ -1075,7 +1082,7 @@ static int nvgpu_dbg_gpu_ioctl_suspend_resume_sm(
1075 struct channel_gk20a *ch; 1082 struct channel_gk20a *ch;
1076 int err = 0, action = args->mode; 1083 int err = 0, action = args->mode;
1077 1084
1078 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "action: %d", args->mode); 1085 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "action: %d", args->mode);
1079 1086
1080 ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); 1087 ch = nvgpu_dbg_gpu_get_session_channel(dbg_s);
1081 if (!ch) 1088 if (!ch)
@@ -1127,7 +1134,7 @@ static int nvgpu_ioctl_allocate_profiler_object(
1127 struct gk20a *g = get_gk20a(dbg_session_linux->dev); 1134 struct gk20a *g = get_gk20a(dbg_session_linux->dev);
1128 struct dbg_profiler_object_data *prof_obj; 1135 struct dbg_profiler_object_data *prof_obj;
1129 1136
1130 gk20a_dbg_fn("%s", g->name); 1137 nvgpu_log_fn(g, "%s", g->name);
1131 1138
1132 nvgpu_mutex_acquire(&g->dbg_sessions_lock); 1139 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
1133 1140
@@ -1171,7 +1178,7 @@ static int nvgpu_ioctl_free_profiler_object(
1171 struct dbg_profiler_object_data *prof_obj, *tmp_obj; 1178 struct dbg_profiler_object_data *prof_obj, *tmp_obj;
1172 bool obj_found = false; 1179 bool obj_found = false;
1173 1180
1174 gk20a_dbg_fn("%s session_id = %d profiler_handle = %x", 1181 nvgpu_log_fn(g, "%s session_id = %d profiler_handle = %x",
1175 g->name, dbg_s->id, args->profiler_handle); 1182 g->name, dbg_s->id, args->profiler_handle);
1176 1183
1177 nvgpu_mutex_acquire(&g->dbg_sessions_lock); 1184 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
@@ -1253,7 +1260,9 @@ static void gk20a_dbg_session_nvgpu_mutex_release(struct dbg_session_gk20a *dbg_
1253 1260
1254static void gk20a_dbg_gpu_events_enable(struct dbg_session_gk20a *dbg_s) 1261static void gk20a_dbg_gpu_events_enable(struct dbg_session_gk20a *dbg_s)
1255{ 1262{
1256 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); 1263 struct gk20a *g = dbg_s->g;
1264
1265 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
1257 1266
1258 gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s); 1267 gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s);
1259 1268
@@ -1265,7 +1274,9 @@ static void gk20a_dbg_gpu_events_enable(struct dbg_session_gk20a *dbg_s)
1265 1274
1266static void gk20a_dbg_gpu_events_disable(struct dbg_session_gk20a *dbg_s) 1275static void gk20a_dbg_gpu_events_disable(struct dbg_session_gk20a *dbg_s)
1267{ 1276{
1268 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); 1277 struct gk20a *g = dbg_s->g;
1278
1279 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
1269 1280
1270 gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s); 1281 gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s);
1271 1282
@@ -1277,7 +1288,9 @@ static void gk20a_dbg_gpu_events_disable(struct dbg_session_gk20a *dbg_s)
1277 1288
1278static void gk20a_dbg_gpu_events_clear(struct dbg_session_gk20a *dbg_s) 1289static void gk20a_dbg_gpu_events_clear(struct dbg_session_gk20a *dbg_s)
1279{ 1290{
1280 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); 1291 struct gk20a *g = dbg_s->g;
1292
1293 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
1281 1294
1282 gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s); 1295 gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s);
1283 1296
@@ -1294,13 +1307,13 @@ static int gk20a_dbg_gpu_events_ctrl(struct dbg_session_gk20a *dbg_s,
1294{ 1307{
1295 int ret = 0; 1308 int ret = 0;
1296 struct channel_gk20a *ch; 1309 struct channel_gk20a *ch;
1310 struct gk20a *g = dbg_s->g;
1297 1311
1298 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "dbg events ctrl cmd %d", args->cmd); 1312 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "dbg events ctrl cmd %d", args->cmd);
1299 1313
1300 ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); 1314 ch = nvgpu_dbg_gpu_get_session_channel(dbg_s);
1301 if (!ch) { 1315 if (!ch) {
1302 nvgpu_err(dbg_s->g, 1316 nvgpu_err(g, "no channel bound to dbg session");
1303 "no channel bound to dbg session");
1304 return -EINVAL; 1317 return -EINVAL;
1305 } 1318 }
1306 1319
@@ -1318,8 +1331,7 @@ static int gk20a_dbg_gpu_events_ctrl(struct dbg_session_gk20a *dbg_s,
1318 break; 1331 break;
1319 1332
1320 default: 1333 default:
1321 nvgpu_err(dbg_s->g, 1334 nvgpu_err(g, "unrecognized dbg gpu events ctrl cmd: 0x%x",
1322 "unrecognized dbg gpu events ctrl cmd: 0x%x",
1323 args->cmd); 1335 args->cmd);
1324 ret = -EINVAL; 1336 ret = -EINVAL;
1325 break; 1337 break;
@@ -1422,7 +1434,7 @@ static int gk20a_dbg_pc_sampling(struct dbg_session_gk20a *dbg_s,
1422 if (!ch) 1434 if (!ch)
1423 return -EINVAL; 1435 return -EINVAL;
1424 1436
1425 gk20a_dbg_fn(""); 1437 nvgpu_log_fn(g, " ");
1426 1438
1427 return g->ops.gr.update_pc_sampling ? 1439 return g->ops.gr.update_pc_sampling ?
1428 g->ops.gr.update_pc_sampling(ch, args->enable) : -EINVAL; 1440 g->ops.gr.update_pc_sampling(ch, args->enable) : -EINVAL;
@@ -1646,7 +1658,7 @@ static int nvgpu_profiler_reserve_release(struct dbg_session_gk20a *dbg_s,
1646 struct dbg_profiler_object_data *prof_obj; 1658 struct dbg_profiler_object_data *prof_obj;
1647 int err = 0; 1659 int err = 0;
1648 1660
1649 gk20a_dbg_fn("%s profiler_handle = %x", g->name, profiler_handle); 1661 nvgpu_log_fn(g, "%s profiler_handle = %x", g->name, profiler_handle);
1650 1662
1651 nvgpu_mutex_acquire(&g->dbg_sessions_lock); 1663 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
1652 1664
@@ -1678,7 +1690,7 @@ static int nvgpu_profiler_reserve_acquire(struct dbg_session_gk20a *dbg_s,
1678 struct dbg_profiler_object_data *prof_obj, *my_prof_obj; 1690 struct dbg_profiler_object_data *prof_obj, *my_prof_obj;
1679 int err = 0; 1691 int err = 0;
1680 1692
1681 gk20a_dbg_fn("%s profiler_handle = %x", g->name, profiler_handle); 1693 nvgpu_log_fn(g, "%s profiler_handle = %x", g->name, profiler_handle);
1682 1694
1683 if (g->profiler_reservation_count < 0) { 1695 if (g->profiler_reservation_count < 0) {
1684 nvgpu_err(g, "Negative reservation count!"); 1696 nvgpu_err(g, "Negative reservation count!");
@@ -1782,12 +1794,12 @@ static int dbg_unbind_channel_gk20a(struct dbg_session_gk20a *dbg_s,
1782 struct channel_gk20a *ch; 1794 struct channel_gk20a *ch;
1783 int err; 1795 int err;
1784 1796
1785 gk20a_dbg(gpu_dbg_fn|gpu_dbg_gpu_dbg, "%s fd=%d", 1797 nvgpu_log(g, gpu_dbg_fn|gpu_dbg_gpu_dbg, "%s fd=%d",
1786 g->name, args->channel_fd); 1798 g->name, args->channel_fd);
1787 1799
1788 ch = gk20a_get_channel_from_file(args->channel_fd); 1800 ch = gk20a_get_channel_from_file(args->channel_fd);
1789 if (!ch) { 1801 if (!ch) {
1790 gk20a_dbg_fn("no channel found for fd"); 1802 nvgpu_log_fn(g, "no channel found for fd");
1791 return -EINVAL; 1803 return -EINVAL;
1792 } 1804 }
1793 1805
@@ -1802,7 +1814,7 @@ static int dbg_unbind_channel_gk20a(struct dbg_session_gk20a *dbg_s,
1802 nvgpu_mutex_release(&dbg_s->ch_list_lock); 1814 nvgpu_mutex_release(&dbg_s->ch_list_lock);
1803 1815
1804 if (!channel_found) { 1816 if (!channel_found) {
1805 gk20a_dbg_fn("channel not bounded, fd=%d\n", args->channel_fd); 1817 nvgpu_log_fn(g, "channel not bounded, fd=%d\n", args->channel_fd);
1806 err = -EINVAL; 1818 err = -EINVAL;
1807 goto out; 1819 goto out;
1808 } 1820 }
@@ -1820,7 +1832,11 @@ out:
1820 1832
1821int gk20a_dbg_gpu_dev_open(struct inode *inode, struct file *filp) 1833int gk20a_dbg_gpu_dev_open(struct inode *inode, struct file *filp)
1822{ 1834{
1823 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); 1835 struct nvgpu_os_linux *l = container_of(inode->i_cdev,
1836 struct nvgpu_os_linux, dbg.cdev);
1837 struct gk20a *g = &l->g;
1838
1839 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
1824 return gk20a_dbg_gpu_do_dev_open(inode, filp, false /* not profiler */); 1840 return gk20a_dbg_gpu_do_dev_open(inode, filp, false /* not profiler */);
1825} 1841}
1826 1842
@@ -1833,7 +1849,7 @@ long gk20a_dbg_gpu_dev_ioctl(struct file *filp, unsigned int cmd,
1833 u8 buf[NVGPU_DBG_GPU_IOCTL_MAX_ARG_SIZE]; 1849 u8 buf[NVGPU_DBG_GPU_IOCTL_MAX_ARG_SIZE];
1834 int err = 0; 1850 int err = 0;
1835 1851
1836 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); 1852 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
1837 1853
1838 if ((_IOC_TYPE(cmd) != NVGPU_DBG_GPU_IOCTL_MAGIC) || 1854 if ((_IOC_TYPE(cmd) != NVGPU_DBG_GPU_IOCTL_MAGIC) ||
1839 (_IOC_NR(cmd) == 0) || 1855 (_IOC_NR(cmd) == 0) ||
@@ -1979,7 +1995,7 @@ long gk20a_dbg_gpu_dev_ioctl(struct file *filp, unsigned int cmd,
1979 1995
1980 nvgpu_mutex_release(&dbg_s->ioctl_lock); 1996 nvgpu_mutex_release(&dbg_s->ioctl_lock);
1981 1997
1982 gk20a_dbg(gpu_dbg_gpu_dbg, "ret=%d", err); 1998 nvgpu_log(g, gpu_dbg_gpu_dbg, "ret=%d", err);
1983 1999
1984 if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ)) 2000 if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
1985 err = copy_to_user((void __user *)arg, 2001 err = copy_to_user((void __user *)arg,
diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_tsg.c b/drivers/gpu/nvgpu/common/linux/ioctl_tsg.c
index be2315bd..d0bfd55a 100644
--- a/drivers/gpu/nvgpu/common/linux/ioctl_tsg.c
+++ b/drivers/gpu/nvgpu/common/linux/ioctl_tsg.c
@@ -175,6 +175,7 @@ void gk20a_tsg_event_id_post_event(struct tsg_gk20a *tsg,
175 struct gk20a_event_id_data *event_id_data; 175 struct gk20a_event_id_data *event_id_data;
176 u32 event_id; 176 u32 event_id;
177 int err = 0; 177 int err = 0;
178 struct gk20a *g = tsg->g;
178 179
179 event_id = nvgpu_event_id_to_ioctl_channel_event_id(__event_id); 180 event_id = nvgpu_event_id_to_ioctl_channel_event_id(__event_id);
180 if (event_id >= NVGPU_IOCTL_CHANNEL_EVENT_ID_MAX) 181 if (event_id >= NVGPU_IOCTL_CHANNEL_EVENT_ID_MAX)
@@ -187,7 +188,7 @@ void gk20a_tsg_event_id_post_event(struct tsg_gk20a *tsg,
187 188
188 nvgpu_mutex_acquire(&event_id_data->lock); 189 nvgpu_mutex_acquire(&event_id_data->lock);
189 190
190 gk20a_dbg_info( 191 nvgpu_log_info(g,
191 "posting event for event_id=%d on tsg=%d\n", 192 "posting event for event_id=%d on tsg=%d\n",
192 event_id, tsg->tsgid); 193 event_id, tsg->tsgid);
193 event_id_data->event_posted = true; 194 event_id_data->event_posted = true;
@@ -205,14 +206,14 @@ static unsigned int gk20a_event_id_poll(struct file *filep, poll_table *wait)
205 u32 event_id = event_id_data->event_id; 206 u32 event_id = event_id_data->event_id;
206 struct tsg_gk20a *tsg = g->fifo.tsg + event_id_data->id; 207 struct tsg_gk20a *tsg = g->fifo.tsg + event_id_data->id;
207 208
208 gk20a_dbg(gpu_dbg_fn | gpu_dbg_info, ""); 209 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_info, " ");
209 210
210 poll_wait(filep, &event_id_data->event_id_wq.wq, wait); 211 poll_wait(filep, &event_id_data->event_id_wq.wq, wait);
211 212
212 nvgpu_mutex_acquire(&event_id_data->lock); 213 nvgpu_mutex_acquire(&event_id_data->lock);
213 214
214 if (event_id_data->event_posted) { 215 if (event_id_data->event_posted) {
215 gk20a_dbg_info( 216 nvgpu_log_info(g,
216 "found pending event_id=%d on TSG=%d\n", 217 "found pending event_id=%d on TSG=%d\n",
217 event_id, tsg->tsgid); 218 event_id, tsg->tsgid);
218 mask = (POLLPRI | POLLIN); 219 mask = (POLLPRI | POLLIN);
@@ -363,7 +364,7 @@ int nvgpu_ioctl_tsg_open(struct gk20a *g, struct file *filp)
363 364
364 dev = dev_from_gk20a(g); 365 dev = dev_from_gk20a(g);
365 366
366 gk20a_dbg(gpu_dbg_fn, "tsg: %s", dev_name(dev)); 367 nvgpu_log(g, gpu_dbg_fn, "tsg: %s", dev_name(dev));
367 368
368 priv = nvgpu_kmalloc(g, sizeof(*priv)); 369 priv = nvgpu_kmalloc(g, sizeof(*priv));
369 if (!priv) { 370 if (!priv) {
@@ -397,12 +398,12 @@ int nvgpu_ioctl_tsg_dev_open(struct inode *inode, struct file *filp)
397 struct gk20a *g; 398 struct gk20a *g;
398 int ret; 399 int ret;
399 400
400 gk20a_dbg_fn("");
401
402 l = container_of(inode->i_cdev, 401 l = container_of(inode->i_cdev,
403 struct nvgpu_os_linux, tsg.cdev); 402 struct nvgpu_os_linux, tsg.cdev);
404 g = &l->g; 403 g = &l->g;
405 404
405 nvgpu_log_fn(g, " ");
406
406 ret = gk20a_busy(g); 407 ret = gk20a_busy(g);
407 if (ret) { 408 if (ret) {
408 nvgpu_err(g, "failed to power on, %d", ret); 409 nvgpu_err(g, "failed to power on, %d", ret);
@@ -412,7 +413,7 @@ int nvgpu_ioctl_tsg_dev_open(struct inode *inode, struct file *filp)
412 ret = nvgpu_ioctl_tsg_open(&l->g, filp); 413 ret = nvgpu_ioctl_tsg_open(&l->g, filp);
413 414
414 gk20a_idle(g); 415 gk20a_idle(g);
415 gk20a_dbg_fn("done"); 416 nvgpu_log_fn(g, "done");
416 return ret; 417 return ret;
417} 418}
418 419
@@ -445,7 +446,7 @@ static int gk20a_tsg_ioctl_set_runlist_interleave(struct gk20a *g,
445 u32 level = arg->level; 446 u32 level = arg->level;
446 int err; 447 int err;
447 448
448 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid); 449 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid);
449 450
450 nvgpu_mutex_acquire(&sched->control_lock); 451 nvgpu_mutex_acquire(&sched->control_lock);
451 if (sched->control_locked) { 452 if (sched->control_locked) {
@@ -474,7 +475,7 @@ static int gk20a_tsg_ioctl_set_timeslice(struct gk20a *g,
474 struct gk20a_sched_ctrl *sched = &l->sched_ctrl; 475 struct gk20a_sched_ctrl *sched = &l->sched_ctrl;
475 int err; 476 int err;
476 477
477 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid); 478 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid);
478 479
479 nvgpu_mutex_acquire(&sched->control_lock); 480 nvgpu_mutex_acquire(&sched->control_lock);
480 if (sched->control_locked) { 481 if (sched->control_locked) {
@@ -509,7 +510,7 @@ long nvgpu_ioctl_tsg_dev_ioctl(struct file *filp, unsigned int cmd,
509 u8 __maybe_unused buf[NVGPU_TSG_IOCTL_MAX_ARG_SIZE]; 510 u8 __maybe_unused buf[NVGPU_TSG_IOCTL_MAX_ARG_SIZE];
510 int err = 0; 511 int err = 0;
511 512
512 gk20a_dbg_fn("start %d", _IOC_NR(cmd)); 513 nvgpu_log_fn(g, "start %d", _IOC_NR(cmd));
513 514
514 if ((_IOC_TYPE(cmd) != NVGPU_TSG_IOCTL_MAGIC) || 515 if ((_IOC_TYPE(cmd) != NVGPU_TSG_IOCTL_MAGIC) ||
515 (_IOC_NR(cmd) == 0) || 516 (_IOC_NR(cmd) == 0) ||
diff --git a/drivers/gpu/nvgpu/common/linux/log.c b/drivers/gpu/nvgpu/common/linux/log.c
index 4dc8f667..ca29e0f3 100644
--- a/drivers/gpu/nvgpu/common/linux/log.c
+++ b/drivers/gpu/nvgpu/common/linux/log.c
@@ -38,8 +38,6 @@
38 */ 38 */
39#define LOG_FMT "nvgpu: %s %33s:%-4d [%s] %s\n" 39#define LOG_FMT "nvgpu: %s %33s:%-4d [%s] %s\n"
40 40
41u64 nvgpu_dbg_mask = NVGPU_DEFAULT_DBG_MASK;
42
43static const char *log_types[] = { 41static const char *log_types[] = {
44 "ERR", 42 "ERR",
45 "WRN", 43 "WRN",
diff --git a/drivers/gpu/nvgpu/common/linux/module.c b/drivers/gpu/nvgpu/common/linux/module.c
index 34850013..f00b3cce 100644
--- a/drivers/gpu/nvgpu/common/linux/module.c
+++ b/drivers/gpu/nvgpu/common/linux/module.c
@@ -218,7 +218,7 @@ int gk20a_pm_finalize_poweron(struct device *dev)
218 struct gk20a_platform *platform = gk20a_get_platform(dev); 218 struct gk20a_platform *platform = gk20a_get_platform(dev);
219 int err; 219 int err;
220 220
221 gk20a_dbg_fn(""); 221 nvgpu_log_fn(g, " ");
222 222
223 if (g->power_on) 223 if (g->power_on)
224 return 0; 224 return 0;
@@ -331,7 +331,7 @@ static int gk20a_pm_prepare_poweroff(struct device *dev)
331 struct gk20a_platform *platform = gk20a_get_platform(dev); 331 struct gk20a_platform *platform = gk20a_get_platform(dev);
332 bool irqs_enabled; 332 bool irqs_enabled;
333 333
334 gk20a_dbg_fn(""); 334 nvgpu_log_fn(g, " ");
335 335
336 nvgpu_mutex_acquire(&g->poweroff_lock); 336 nvgpu_mutex_acquire(&g->poweroff_lock);
337 337
@@ -1013,7 +1013,7 @@ static int gk20a_pm_init(struct device *dev)
1013 struct gk20a *g = get_gk20a(dev); 1013 struct gk20a *g = get_gk20a(dev);
1014 int err = 0; 1014 int err = 0;
1015 1015
1016 gk20a_dbg_fn(""); 1016 nvgpu_log_fn(g, " ");
1017 1017
1018 /* Initialise pm runtime */ 1018 /* Initialise pm runtime */
1019 if (g->railgate_delay) { 1019 if (g->railgate_delay) {
@@ -1043,7 +1043,7 @@ void gk20a_driver_start_unload(struct gk20a *g)
1043{ 1043{
1044 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g); 1044 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
1045 1045
1046 gk20a_dbg(gpu_dbg_shutdown, "Driver is now going down!\n"); 1046 nvgpu_log(g, gpu_dbg_shutdown, "Driver is now going down!\n");
1047 1047
1048 down_write(&l->busy_lock); 1048 down_write(&l->busy_lock);
1049 __nvgpu_set_enabled(g, NVGPU_DRIVER_IS_DYING, true); 1049 __nvgpu_set_enabled(g, NVGPU_DRIVER_IS_DYING, true);
@@ -1134,8 +1134,6 @@ static int gk20a_probe(struct platform_device *dev)
1134 return -ENODATA; 1134 return -ENODATA;
1135 } 1135 }
1136 1136
1137 gk20a_dbg_fn("");
1138
1139 platform_set_drvdata(dev, platform); 1137 platform_set_drvdata(dev, platform);
1140 1138
1141 if (gk20a_gpu_is_virtual(&dev->dev)) 1139 if (gk20a_gpu_is_virtual(&dev->dev))
@@ -1148,6 +1146,9 @@ static int gk20a_probe(struct platform_device *dev)
1148 } 1146 }
1149 1147
1150 gk20a = &l->g; 1148 gk20a = &l->g;
1149
1150 nvgpu_log_fn(gk20a, " ");
1151
1151 nvgpu_init_gk20a(gk20a); 1152 nvgpu_init_gk20a(gk20a);
1152 set_gk20a(dev, gk20a); 1153 set_gk20a(dev, gk20a);
1153 l->dev = &dev->dev; 1154 l->dev = &dev->dev;
@@ -1248,7 +1249,7 @@ int nvgpu_remove(struct device *dev, struct class *class)
1248 struct gk20a_platform *platform = gk20a_get_platform(dev); 1249 struct gk20a_platform *platform = gk20a_get_platform(dev);
1249 int err; 1250 int err;
1250 1251
1251 gk20a_dbg_fn(""); 1252 nvgpu_log_fn(g, " ");
1252 1253
1253 err = nvgpu_quiesce(g); 1254 err = nvgpu_quiesce(g);
1254 WARN(err, "gpu failed to idle during driver removal"); 1255 WARN(err, "gpu failed to idle during driver removal");
@@ -1288,7 +1289,7 @@ int nvgpu_remove(struct device *dev, struct class *class)
1288 if (platform->remove) 1289 if (platform->remove)
1289 platform->remove(dev); 1290 platform->remove(dev);
1290 1291
1291 gk20a_dbg_fn("removed"); 1292 nvgpu_log_fn(g, "removed");
1292 1293
1293 return err; 1294 return err;
1294} 1295}
diff --git a/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c b/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c
index 3cac13ba..015295ba 100644
--- a/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c
+++ b/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c
@@ -140,7 +140,7 @@ u32 nvgpu_mem_rd32(struct gk20a *g, struct nvgpu_mem *mem, u32 w)
140 WARN_ON(!ptr); 140 WARN_ON(!ptr);
141 data = ptr[w]; 141 data = ptr[w];
142#ifdef CONFIG_TEGRA_SIMULATION_PLATFORM 142#ifdef CONFIG_TEGRA_SIMULATION_PLATFORM
143 gk20a_dbg(gpu_dbg_mem, " %p = 0x%x", ptr + w, data); 143 nvgpu_log(g, gpu_dbg_mem, " %p = 0x%x", ptr + w, data);
144#endif 144#endif
145 } else if (mem->aperture == APERTURE_VIDMEM || g->mm.force_pramin) { 145 } else if (mem->aperture == APERTURE_VIDMEM || g->mm.force_pramin) {
146 u32 value; 146 u32 value;
@@ -177,7 +177,7 @@ void nvgpu_mem_rd_n(struct gk20a *g, struct nvgpu_mem *mem,
177 memcpy(dest, src, size); 177 memcpy(dest, src, size);
178#ifdef CONFIG_TEGRA_SIMULATION_PLATFORM 178#ifdef CONFIG_TEGRA_SIMULATION_PLATFORM
179 if (size) 179 if (size)
180 gk20a_dbg(gpu_dbg_mem, " %p = 0x%x ... [%d bytes]", 180 nvgpu_log(g, gpu_dbg_mem, " %p = 0x%x ... [%d bytes]",
181 src, *dest, size); 181 src, *dest, size);
182#endif 182#endif
183 } else if (mem->aperture == APERTURE_VIDMEM || g->mm.force_pramin) { 183 } else if (mem->aperture == APERTURE_VIDMEM || g->mm.force_pramin) {
@@ -215,7 +215,7 @@ void nvgpu_mem_wr32(struct gk20a *g, struct nvgpu_mem *mem, u32 w, u32 data)
215 215
216 WARN_ON(!ptr); 216 WARN_ON(!ptr);
217#ifdef CONFIG_TEGRA_SIMULATION_PLATFORM 217#ifdef CONFIG_TEGRA_SIMULATION_PLATFORM
218 gk20a_dbg(gpu_dbg_mem, " %p = 0x%x", ptr + w, data); 218 nvgpu_log(g, gpu_dbg_mem, " %p = 0x%x", ptr + w, data);
219#endif 219#endif
220 ptr[w] = data; 220 ptr[w] = data;
221 } else if (mem->aperture == APERTURE_VIDMEM || g->mm.force_pramin) { 221 } else if (mem->aperture == APERTURE_VIDMEM || g->mm.force_pramin) {
@@ -249,7 +249,7 @@ void nvgpu_mem_wr_n(struct gk20a *g, struct nvgpu_mem *mem, u32 offset,
249 WARN_ON(!mem->cpu_va); 249 WARN_ON(!mem->cpu_va);
250#ifdef CONFIG_TEGRA_SIMULATION_PLATFORM 250#ifdef CONFIG_TEGRA_SIMULATION_PLATFORM
251 if (size) 251 if (size)
252 gk20a_dbg(gpu_dbg_mem, " %p = 0x%x ... [%d bytes]", 252 nvgpu_log(g, gpu_dbg_mem, " %p = 0x%x ... [%d bytes]",
253 dest, *src, size); 253 dest, *src, size);
254#endif 254#endif
255 memcpy(dest, src, size); 255 memcpy(dest, src, size);
@@ -296,7 +296,7 @@ void nvgpu_memset(struct gk20a *g, struct nvgpu_mem *mem, u32 offset,
296 WARN_ON(!mem->cpu_va); 296 WARN_ON(!mem->cpu_va);
297#ifdef CONFIG_TEGRA_SIMULATION_PLATFORM 297#ifdef CONFIG_TEGRA_SIMULATION_PLATFORM
298 if (size) 298 if (size)
299 gk20a_dbg(gpu_dbg_mem, " %p = 0x%x [times %d]", 299 nvgpu_log(g, gpu_dbg_mem, " %p = 0x%x [times %d]",
300 dest, c, size); 300 dest, c, size);
301#endif 301#endif
302 memset(dest, c, size); 302 memset(dest, c, size);
diff --git a/drivers/gpu/nvgpu/common/linux/platform_gk20a_tegra.c b/drivers/gpu/nvgpu/common/linux/platform_gk20a_tegra.c
index 82648ca3..5301b13d 100644
--- a/drivers/gpu/nvgpu/common/linux/platform_gk20a_tegra.c
+++ b/drivers/gpu/nvgpu/common/linux/platform_gk20a_tegra.c
@@ -551,6 +551,7 @@ static void gk20a_tegra_scale_init(struct device *dev)
551 struct gk20a_platform *platform = gk20a_get_platform(dev); 551 struct gk20a_platform *platform = gk20a_get_platform(dev);
552 struct gk20a_scale_profile *profile = platform->g->scale_profile; 552 struct gk20a_scale_profile *profile = platform->g->scale_profile;
553 struct gk20a_emc_params *emc_params; 553 struct gk20a_emc_params *emc_params;
554 struct gk20a *g = platform->g;
554 555
555 if (!profile) 556 if (!profile)
556 return; 557 return;
@@ -568,7 +569,7 @@ static void gk20a_tegra_scale_init(struct device *dev)
568#ifdef CONFIG_TEGRA_BWMGR 569#ifdef CONFIG_TEGRA_BWMGR
569 emc_params->bwmgr_cl = tegra_bwmgr_register(TEGRA_BWMGR_CLIENT_GPU); 570 emc_params->bwmgr_cl = tegra_bwmgr_register(TEGRA_BWMGR_CLIENT_GPU);
570 if (!emc_params->bwmgr_cl) { 571 if (!emc_params->bwmgr_cl) {
571 gk20a_dbg_info("%s Missing GPU BWMGR client\n", __func__); 572 nvgpu_log_info(g, "%s Missing GPU BWMGR client\n", __func__);
572 return; 573 return;
573 } 574 }
574#endif 575#endif
@@ -767,6 +768,7 @@ static int gk20a_tegra_probe(struct device *dev)
767 struct device_node *np = dev->of_node; 768 struct device_node *np = dev->of_node;
768 bool joint_xpu_rail = false; 769 bool joint_xpu_rail = false;
769 int ret; 770 int ret;
771 struct gk20a *g = platform->g;
770 772
771#ifdef CONFIG_COMMON_CLK 773#ifdef CONFIG_COMMON_CLK
772 /* DVFS is not guaranteed to be initialized at the time of probe on 774 /* DVFS is not guaranteed to be initialized at the time of probe on
@@ -775,13 +777,13 @@ static int gk20a_tegra_probe(struct device *dev)
775 if (!platform->gpu_rail) { 777 if (!platform->gpu_rail) {
776 platform->gpu_rail = tegra_dvfs_get_rail_by_name(GPU_RAIL_NAME); 778 platform->gpu_rail = tegra_dvfs_get_rail_by_name(GPU_RAIL_NAME);
777 if (!platform->gpu_rail) { 779 if (!platform->gpu_rail) {
778 gk20a_dbg_info("deferring probe no gpu_rail\n"); 780 nvgpu_log_info(g, "deferring probe no gpu_rail");
779 return -EPROBE_DEFER; 781 return -EPROBE_DEFER;
780 } 782 }
781 } 783 }
782 784
783 if (!tegra_dvfs_is_rail_ready(platform->gpu_rail)) { 785 if (!tegra_dvfs_is_rail_ready(platform->gpu_rail)) {
784 gk20a_dbg_info("deferring probe gpu_rail not ready\n"); 786 nvgpu_log_info(g, "deferring probe gpu_rail not ready");
785 return -EPROBE_DEFER; 787 return -EPROBE_DEFER;
786 } 788 }
787#endif 789#endif
@@ -798,7 +800,7 @@ static int gk20a_tegra_probe(struct device *dev)
798#endif 800#endif
799 801
800 if (joint_xpu_rail) { 802 if (joint_xpu_rail) {
801 gk20a_dbg_info("XPU rails are joint\n"); 803 nvgpu_log_info(g, "XPU rails are joint\n");
802 platform->g->can_railgate = false; 804 platform->g->can_railgate = false;
803 } 805 }
804 806
diff --git a/drivers/gpu/nvgpu/common/linux/platform_gp10b_tegra.c b/drivers/gpu/nvgpu/common/linux/platform_gp10b_tegra.c
index 6e54d00b..08c5df0f 100644
--- a/drivers/gpu/nvgpu/common/linux/platform_gp10b_tegra.c
+++ b/drivers/gpu/nvgpu/common/linux/platform_gp10b_tegra.c
@@ -273,11 +273,11 @@ void gp10b_tegra_prescale(struct device *dev)
273 struct gk20a *g = get_gk20a(dev); 273 struct gk20a *g = get_gk20a(dev);
274 u32 avg = 0; 274 u32 avg = 0;
275 275
276 gk20a_dbg_fn(""); 276 nvgpu_log_fn(g, " ");
277 277
278 nvgpu_pmu_load_norm(g, &avg); 278 nvgpu_pmu_load_norm(g, &avg);
279 279
280 gk20a_dbg_fn("done"); 280 nvgpu_log_fn(g, "done");
281} 281}
282 282
283void gp10b_tegra_postscale(struct device *pdev, 283void gp10b_tegra_postscale(struct device *pdev,
@@ -288,7 +288,7 @@ void gp10b_tegra_postscale(struct device *pdev,
288 struct gk20a *g = get_gk20a(pdev); 288 struct gk20a *g = get_gk20a(pdev);
289 unsigned long emc_rate; 289 unsigned long emc_rate;
290 290
291 gk20a_dbg_fn(""); 291 nvgpu_log_fn(g, " ");
292 if (profile && !platform->is_railgated(pdev)) { 292 if (profile && !platform->is_railgated(pdev)) {
293 unsigned long emc_scale; 293 unsigned long emc_scale;
294 294
@@ -306,7 +306,7 @@ void gp10b_tegra_postscale(struct device *pdev,
306 (struct tegra_bwmgr_client *)profile->private_data, 306 (struct tegra_bwmgr_client *)profile->private_data,
307 emc_rate, TEGRA_BWMGR_SET_EMC_FLOOR); 307 emc_rate, TEGRA_BWMGR_SET_EMC_FLOOR);
308 } 308 }
309 gk20a_dbg_fn("done"); 309 nvgpu_log_fn(g, "done");
310} 310}
311 311
312long gp10b_round_clk_rate(struct device *dev, unsigned long rate) 312long gp10b_round_clk_rate(struct device *dev, unsigned long rate)
@@ -328,6 +328,7 @@ int gp10b_clk_get_freqs(struct device *dev,
328 unsigned long **freqs, int *num_freqs) 328 unsigned long **freqs, int *num_freqs)
329{ 329{
330 struct gk20a_platform *platform = gk20a_get_platform(dev); 330 struct gk20a_platform *platform = gk20a_get_platform(dev);
331 struct gk20a *g = platform->g;
331 unsigned long max_rate; 332 unsigned long max_rate;
332 unsigned long new_rate = 0, prev_rate = 0; 333 unsigned long new_rate = 0, prev_rate = 0;
333 int i = 0, freq_counter = 0; 334 int i = 0, freq_counter = 0;
@@ -358,7 +359,7 @@ int gp10b_clk_get_freqs(struct device *dev,
358 *freqs = gp10b_freq_table; 359 *freqs = gp10b_freq_table;
359 *num_freqs = freq_counter; 360 *num_freqs = freq_counter;
360 361
361 gk20a_dbg_info("min rate: %ld max rate: %ld num_of_freq %d\n", 362 nvgpu_log_info(g, "min rate: %ld max rate: %ld num_of_freq %d\n",
362 gp10b_freq_table[0], max_rate, *num_freqs); 363 gp10b_freq_table[0], max_rate, *num_freqs);
363 364
364 return 0; 365 return 0;
diff --git a/drivers/gpu/nvgpu/common/linux/sched.c b/drivers/gpu/nvgpu/common/linux/sched.c
index a7da020c..2ad5aabf 100644
--- a/drivers/gpu/nvgpu/common/linux/sched.c
+++ b/drivers/gpu/nvgpu/common/linux/sched.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify it 4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License, 5 * under the terms and conditions of the GNU General Public License,
@@ -37,10 +37,11 @@ ssize_t gk20a_sched_dev_read(struct file *filp, char __user *buf,
37 size_t size, loff_t *off) 37 size_t size, loff_t *off)
38{ 38{
39 struct gk20a_sched_ctrl *sched = filp->private_data; 39 struct gk20a_sched_ctrl *sched = filp->private_data;
40 struct gk20a *g = sched->g;
40 struct nvgpu_sched_event_arg event = { 0 }; 41 struct nvgpu_sched_event_arg event = { 0 };
41 int err; 42 int err;
42 43
43 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, 44 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched,
44 "filp=%p buf=%p size=%zu", filp, buf, size); 45 "filp=%p buf=%p size=%zu", filp, buf, size);
45 46
46 if (size < sizeof(event)) 47 if (size < sizeof(event))
@@ -77,9 +78,10 @@ ssize_t gk20a_sched_dev_read(struct file *filp, char __user *buf,
77unsigned int gk20a_sched_dev_poll(struct file *filp, poll_table *wait) 78unsigned int gk20a_sched_dev_poll(struct file *filp, poll_table *wait)
78{ 79{
79 struct gk20a_sched_ctrl *sched = filp->private_data; 80 struct gk20a_sched_ctrl *sched = filp->private_data;
81 struct gk20a *g = sched->g;
80 unsigned int mask = 0; 82 unsigned int mask = 0;
81 83
82 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, ""); 84 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, " ");
83 85
84 nvgpu_mutex_acquire(&sched->status_lock); 86 nvgpu_mutex_acquire(&sched->status_lock);
85 poll_wait(filp, &sched->readout_wq.wq, wait); 87 poll_wait(filp, &sched->readout_wq.wq, wait);
@@ -93,7 +95,9 @@ unsigned int gk20a_sched_dev_poll(struct file *filp, poll_table *wait)
93static int gk20a_sched_dev_ioctl_get_tsgs(struct gk20a_sched_ctrl *sched, 95static int gk20a_sched_dev_ioctl_get_tsgs(struct gk20a_sched_ctrl *sched,
94 struct nvgpu_sched_get_tsgs_args *arg) 96 struct nvgpu_sched_get_tsgs_args *arg)
95{ 97{
96 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "size=%u buffer=%llx", 98 struct gk20a *g = sched->g;
99
100 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "size=%u buffer=%llx",
97 arg->size, arg->buffer); 101 arg->size, arg->buffer);
98 102
99 if ((arg->size < sched->bitmap_size) || (!arg->buffer)) { 103 if ((arg->size < sched->bitmap_size) || (!arg->buffer)) {
@@ -115,7 +119,9 @@ static int gk20a_sched_dev_ioctl_get_tsgs(struct gk20a_sched_ctrl *sched,
115static int gk20a_sched_dev_ioctl_get_recent_tsgs(struct gk20a_sched_ctrl *sched, 119static int gk20a_sched_dev_ioctl_get_recent_tsgs(struct gk20a_sched_ctrl *sched,
116 struct nvgpu_sched_get_tsgs_args *arg) 120 struct nvgpu_sched_get_tsgs_args *arg)
117{ 121{
118 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "size=%u buffer=%llx", 122 struct gk20a *g = sched->g;
123
124 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "size=%u buffer=%llx",
119 arg->size, arg->buffer); 125 arg->size, arg->buffer);
120 126
121 if ((arg->size < sched->bitmap_size) || (!arg->buffer)) { 127 if ((arg->size < sched->bitmap_size) || (!arg->buffer)) {
@@ -139,7 +145,8 @@ static int gk20a_sched_dev_ioctl_get_recent_tsgs(struct gk20a_sched_ctrl *sched,
139static int gk20a_sched_dev_ioctl_get_tsgs_by_pid(struct gk20a_sched_ctrl *sched, 145static int gk20a_sched_dev_ioctl_get_tsgs_by_pid(struct gk20a_sched_ctrl *sched,
140 struct nvgpu_sched_get_tsgs_by_pid_args *arg) 146 struct nvgpu_sched_get_tsgs_by_pid_args *arg)
141{ 147{
142 struct fifo_gk20a *f = &sched->g->fifo; 148 struct gk20a *g = sched->g;
149 struct fifo_gk20a *f = &g->fifo;
143 struct tsg_gk20a *tsg; 150 struct tsg_gk20a *tsg;
144 u64 *bitmap; 151 u64 *bitmap;
145 unsigned int tsgid; 152 unsigned int tsgid;
@@ -147,7 +154,7 @@ static int gk20a_sched_dev_ioctl_get_tsgs_by_pid(struct gk20a_sched_ctrl *sched,
147 pid_t tgid = (pid_t)arg->pid; 154 pid_t tgid = (pid_t)arg->pid;
148 int err = 0; 155 int err = 0;
149 156
150 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "pid=%d size=%u buffer=%llx", 157 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "pid=%d size=%u buffer=%llx",
151 (pid_t)arg->pid, arg->size, arg->buffer); 158 (pid_t)arg->pid, arg->size, arg->buffer);
152 159
153 if ((arg->size < sched->bitmap_size) || (!arg->buffer)) { 160 if ((arg->size < sched->bitmap_size) || (!arg->buffer)) {
@@ -186,7 +193,7 @@ static int gk20a_sched_dev_ioctl_get_params(struct gk20a_sched_ctrl *sched,
186 struct tsg_gk20a *tsg; 193 struct tsg_gk20a *tsg;
187 u32 tsgid = arg->tsgid; 194 u32 tsgid = arg->tsgid;
188 195
189 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid); 196 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid);
190 197
191 if (tsgid >= f->num_channels) 198 if (tsgid >= f->num_channels)
192 return -EINVAL; 199 return -EINVAL;
@@ -221,7 +228,7 @@ static int gk20a_sched_dev_ioctl_tsg_set_timeslice(
221 u32 tsgid = arg->tsgid; 228 u32 tsgid = arg->tsgid;
222 int err; 229 int err;
223 230
224 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid); 231 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid);
225 232
226 if (tsgid >= f->num_channels) 233 if (tsgid >= f->num_channels)
227 return -EINVAL; 234 return -EINVAL;
@@ -256,7 +263,7 @@ static int gk20a_sched_dev_ioctl_tsg_set_runlist_interleave(
256 u32 tsgid = arg->tsgid; 263 u32 tsgid = arg->tsgid;
257 int err; 264 int err;
258 265
259 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid); 266 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid);
260 267
261 if (tsgid >= f->num_channels) 268 if (tsgid >= f->num_channels)
262 return -EINVAL; 269 return -EINVAL;
@@ -283,7 +290,9 @@ done:
283 290
284static int gk20a_sched_dev_ioctl_lock_control(struct gk20a_sched_ctrl *sched) 291static int gk20a_sched_dev_ioctl_lock_control(struct gk20a_sched_ctrl *sched)
285{ 292{
286 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, ""); 293 struct gk20a *g = sched->g;
294
295 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, " ");
287 296
288 nvgpu_mutex_acquire(&sched->control_lock); 297 nvgpu_mutex_acquire(&sched->control_lock);
289 sched->control_locked = true; 298 sched->control_locked = true;
@@ -293,7 +302,9 @@ static int gk20a_sched_dev_ioctl_lock_control(struct gk20a_sched_ctrl *sched)
293 302
294static int gk20a_sched_dev_ioctl_unlock_control(struct gk20a_sched_ctrl *sched) 303static int gk20a_sched_dev_ioctl_unlock_control(struct gk20a_sched_ctrl *sched)
295{ 304{
296 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, ""); 305 struct gk20a *g = sched->g;
306
307 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, " ");
297 308
298 nvgpu_mutex_acquire(&sched->control_lock); 309 nvgpu_mutex_acquire(&sched->control_lock);
299 sched->control_locked = false; 310 sched->control_locked = false;
@@ -304,7 +315,9 @@ static int gk20a_sched_dev_ioctl_unlock_control(struct gk20a_sched_ctrl *sched)
304static int gk20a_sched_dev_ioctl_get_api_version(struct gk20a_sched_ctrl *sched, 315static int gk20a_sched_dev_ioctl_get_api_version(struct gk20a_sched_ctrl *sched,
305 struct nvgpu_sched_api_version_args *args) 316 struct nvgpu_sched_api_version_args *args)
306{ 317{
307 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, ""); 318 struct gk20a *g = sched->g;
319
320 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, " ");
308 321
309 args->version = NVGPU_SCHED_API_VERSION; 322 args->version = NVGPU_SCHED_API_VERSION;
310 return 0; 323 return 0;
@@ -318,7 +331,7 @@ static int gk20a_sched_dev_ioctl_get_tsg(struct gk20a_sched_ctrl *sched,
318 struct tsg_gk20a *tsg; 331 struct tsg_gk20a *tsg;
319 u32 tsgid = arg->tsgid; 332 u32 tsgid = arg->tsgid;
320 333
321 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid); 334 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid);
322 335
323 if (tsgid >= f->num_channels) 336 if (tsgid >= f->num_channels)
324 return -EINVAL; 337 return -EINVAL;
@@ -355,7 +368,7 @@ static int gk20a_sched_dev_ioctl_put_tsg(struct gk20a_sched_ctrl *sched,
355 struct tsg_gk20a *tsg; 368 struct tsg_gk20a *tsg;
356 u32 tsgid = arg->tsgid; 369 u32 tsgid = arg->tsgid;
357 370
358 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid); 371 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid);
359 372
360 if (tsgid >= f->num_channels) 373 if (tsgid >= f->num_channels)
361 return -EINVAL; 374 return -EINVAL;
@@ -390,7 +403,7 @@ int gk20a_sched_dev_open(struct inode *inode, struct file *filp)
390 return -ENODEV; 403 return -ENODEV;
391 sched = &l->sched_ctrl; 404 sched = &l->sched_ctrl;
392 405
393 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "g=%p", g); 406 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "g=%p", g);
394 407
395 if (!sched->sw_ready) { 408 if (!sched->sw_ready) {
396 err = gk20a_busy(g); 409 err = gk20a_busy(g);
@@ -410,7 +423,7 @@ int gk20a_sched_dev_open(struct inode *inode, struct file *filp)
410 memset(sched->ref_tsg_bitmap, 0, sched->bitmap_size); 423 memset(sched->ref_tsg_bitmap, 0, sched->bitmap_size);
411 424
412 filp->private_data = sched; 425 filp->private_data = sched;
413 gk20a_dbg(gpu_dbg_sched, "filp=%p sched=%p", filp, sched); 426 nvgpu_log(g, gpu_dbg_sched, "filp=%p sched=%p", filp, sched);
414 427
415free_ref: 428free_ref:
416 if (err) 429 if (err)
@@ -426,7 +439,7 @@ long gk20a_sched_dev_ioctl(struct file *filp, unsigned int cmd,
426 u8 buf[NVGPU_CTXSW_IOCTL_MAX_ARG_SIZE]; 439 u8 buf[NVGPU_CTXSW_IOCTL_MAX_ARG_SIZE];
427 int err = 0; 440 int err = 0;
428 441
429 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "nr=%d", _IOC_NR(cmd)); 442 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "nr=%d", _IOC_NR(cmd));
430 443
431 if ((_IOC_TYPE(cmd) != NVGPU_SCHED_IOCTL_MAGIC) || 444 if ((_IOC_TYPE(cmd) != NVGPU_SCHED_IOCTL_MAGIC) ||
432 (_IOC_NR(cmd) == 0) || 445 (_IOC_NR(cmd) == 0) ||
@@ -509,7 +522,7 @@ int gk20a_sched_dev_release(struct inode *inode, struct file *filp)
509 struct tsg_gk20a *tsg; 522 struct tsg_gk20a *tsg;
510 unsigned int tsgid; 523 unsigned int tsgid;
511 524
512 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "sched: %p", sched); 525 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "sched: %p", sched);
513 526
514 /* release any reference to TSGs */ 527 /* release any reference to TSGs */
515 for (tsgid = 0; tsgid < f->num_channels; tsgid++) { 528 for (tsgid = 0; tsgid < f->num_channels; tsgid++) {
@@ -535,7 +548,7 @@ void gk20a_sched_ctrl_tsg_added(struct gk20a *g, struct tsg_gk20a *tsg)
535 struct gk20a_sched_ctrl *sched = &l->sched_ctrl; 548 struct gk20a_sched_ctrl *sched = &l->sched_ctrl;
536 int err; 549 int err;
537 550
538 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid); 551 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid);
539 552
540 if (!sched->sw_ready) { 553 if (!sched->sw_ready) {
541 err = gk20a_busy(g); 554 err = gk20a_busy(g);
@@ -560,7 +573,7 @@ void gk20a_sched_ctrl_tsg_removed(struct gk20a *g, struct tsg_gk20a *tsg)
560 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g); 573 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
561 struct gk20a_sched_ctrl *sched = &l->sched_ctrl; 574 struct gk20a_sched_ctrl *sched = &l->sched_ctrl;
562 575
563 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid); 576 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid);
564 577
565 nvgpu_mutex_acquire(&sched->status_lock); 578 nvgpu_mutex_acquire(&sched->status_lock);
566 NVGPU_SCHED_CLR(tsg->tsgid, sched->active_tsg_bitmap); 579 NVGPU_SCHED_CLR(tsg->tsgid, sched->active_tsg_bitmap);
@@ -592,7 +605,7 @@ int gk20a_sched_ctrl_init(struct gk20a *g)
592 sched->bitmap_size = roundup(f->num_channels, 64) / 8; 605 sched->bitmap_size = roundup(f->num_channels, 64) / 8;
593 sched->status = 0; 606 sched->status = 0;
594 607
595 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "g=%p sched=%p size=%zu", 608 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "g=%p sched=%p size=%zu",
596 g, sched, sched->bitmap_size); 609 g, sched, sched->bitmap_size);
597 610
598 sched->active_tsg_bitmap = nvgpu_kzalloc(g, sched->bitmap_size); 611 sched->active_tsg_bitmap = nvgpu_kzalloc(g, sched->bitmap_size);
diff --git a/drivers/gpu/nvgpu/common/linux/vgpu/clk_vgpu.c b/drivers/gpu/nvgpu/common/linux/vgpu/clk_vgpu.c
index 0bd8e2bc..0858e6b1 100644
--- a/drivers/gpu/nvgpu/common/linux/vgpu/clk_vgpu.c
+++ b/drivers/gpu/nvgpu/common/linux/vgpu/clk_vgpu.c
@@ -33,7 +33,7 @@ static unsigned long vgpu_clk_get_rate(struct gk20a *g, u32 api_domain)
33 int err; 33 int err;
34 unsigned long ret = 0; 34 unsigned long ret = 0;
35 35
36 gk20a_dbg_fn(""); 36 nvgpu_log_fn(g, " ");
37 37
38 switch (api_domain) { 38 switch (api_domain) {
39 case CTRL_CLK_DOMAIN_GPCCLK: 39 case CTRL_CLK_DOMAIN_GPCCLK:
@@ -65,7 +65,7 @@ static int vgpu_clk_set_rate(struct gk20a *g,
65 struct tegra_vgpu_gpu_clk_rate_params *p = &msg.params.gpu_clk_rate; 65 struct tegra_vgpu_gpu_clk_rate_params *p = &msg.params.gpu_clk_rate;
66 int err = -EINVAL; 66 int err = -EINVAL;
67 67
68 gk20a_dbg_fn(""); 68 nvgpu_log_fn(g, " ");
69 69
70 switch (api_domain) { 70 switch (api_domain) {
71 case CTRL_CLK_DOMAIN_GPCCLK: 71 case CTRL_CLK_DOMAIN_GPCCLK:
@@ -121,7 +121,7 @@ int vgpu_clk_get_freqs(struct device *dev,
121 unsigned int i; 121 unsigned int i;
122 int err; 122 int err;
123 123
124 gk20a_dbg_fn(""); 124 nvgpu_log_fn(g, " ");
125 125
126 msg.cmd = TEGRA_VGPU_CMD_GET_GPU_FREQ_TABLE; 126 msg.cmd = TEGRA_VGPU_CMD_GET_GPU_FREQ_TABLE;
127 msg.handle = vgpu_get_handle(g); 127 msg.handle = vgpu_get_handle(g);
@@ -152,7 +152,7 @@ int vgpu_clk_cap_rate(struct device *dev, unsigned long rate)
152 struct tegra_vgpu_gpu_clk_rate_params *p = &msg.params.gpu_clk_rate; 152 struct tegra_vgpu_gpu_clk_rate_params *p = &msg.params.gpu_clk_rate;
153 int err = 0; 153 int err = 0;
154 154
155 gk20a_dbg_fn(""); 155 nvgpu_log_fn(g, " ");
156 156
157 msg.cmd = TEGRA_VGPU_CMD_CAP_GPU_CLK_RATE; 157 msg.cmd = TEGRA_VGPU_CMD_CAP_GPU_CLK_RATE;
158 msg.handle = vgpu_get_handle(g); 158 msg.handle = vgpu_get_handle(g);
diff --git a/drivers/gpu/nvgpu/common/linux/vgpu/css_vgpu.c b/drivers/gpu/nvgpu/common/linux/vgpu/css_vgpu.c
index fe9dc670..ba2bf58b 100644
--- a/drivers/gpu/nvgpu/common/linux/vgpu/css_vgpu.c
+++ b/drivers/gpu/nvgpu/common/linux/vgpu/css_vgpu.c
@@ -86,7 +86,7 @@ static int vgpu_css_init_snapshot_buffer(struct gr_gk20a *gr)
86 int err; 86 int err;
87 u64 size; 87 u64 size;
88 88
89 gk20a_dbg_fn(""); 89 nvgpu_log_fn(g, " ");
90 90
91 if (data->hw_snapshot) 91 if (data->hw_snapshot)
92 return 0; 92 return 0;
@@ -125,6 +125,7 @@ fail:
125void vgpu_css_release_snapshot_buffer(struct gr_gk20a *gr) 125void vgpu_css_release_snapshot_buffer(struct gr_gk20a *gr)
126{ 126{
127 struct gk20a_cs_snapshot *data = gr->cs_data; 127 struct gk20a_cs_snapshot *data = gr->cs_data;
128 struct gk20a *g = gr->g;
128 129
129 if (!data->hw_snapshot) 130 if (!data->hw_snapshot)
130 return; 131 return;
@@ -135,7 +136,7 @@ void vgpu_css_release_snapshot_buffer(struct gr_gk20a *gr)
135 vgpu_ivm_mempool_unreserve(css_cookie); 136 vgpu_ivm_mempool_unreserve(css_cookie);
136 css_cookie = NULL; 137 css_cookie = NULL;
137 138
138 gk20a_dbg_info("cyclestats(vgpu): buffer for snapshots released\n"); 139 nvgpu_log_info(g, "cyclestats(vgpu): buffer for snapshots released\n");
139} 140}
140 141
141int vgpu_css_flush_snapshots(struct channel_gk20a *ch, 142int vgpu_css_flush_snapshots(struct channel_gk20a *ch,
@@ -148,7 +149,7 @@ int vgpu_css_flush_snapshots(struct channel_gk20a *ch,
148 struct gk20a_cs_snapshot *data = gr->cs_data; 149 struct gk20a_cs_snapshot *data = gr->cs_data;
149 int err; 150 int err;
150 151
151 gk20a_dbg_fn(""); 152 nvgpu_log_fn(g, " ");
152 153
153 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_CYCLESTATS_SNAPSHOT; 154 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_CYCLESTATS_SNAPSHOT;
154 msg.handle = vgpu_get_handle(g); 155 msg.handle = vgpu_get_handle(g);
@@ -176,7 +177,7 @@ static int vgpu_css_attach(struct channel_gk20a *ch,
176 &msg.params.cyclestats_snapshot; 177 &msg.params.cyclestats_snapshot;
177 int err; 178 int err;
178 179
179 gk20a_dbg_fn(""); 180 nvgpu_log_fn(g, " ");
180 181
181 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_CYCLESTATS_SNAPSHOT; 182 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_CYCLESTATS_SNAPSHOT;
182 msg.handle = vgpu_get_handle(g); 183 msg.handle = vgpu_get_handle(g);
@@ -203,7 +204,7 @@ int vgpu_css_detach(struct channel_gk20a *ch,
203 &msg.params.cyclestats_snapshot; 204 &msg.params.cyclestats_snapshot;
204 int err; 205 int err;
205 206
206 gk20a_dbg_fn(""); 207 nvgpu_log_fn(g, " ");
207 208
208 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_CYCLESTATS_SNAPSHOT; 209 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_CYCLESTATS_SNAPSHOT;
209 msg.handle = vgpu_get_handle(g); 210 msg.handle = vgpu_get_handle(g);
diff --git a/drivers/gpu/nvgpu/common/linux/vgpu/fecs_trace_vgpu.c b/drivers/gpu/nvgpu/common/linux/vgpu/fecs_trace_vgpu.c
index 31d89853..499a8eb4 100644
--- a/drivers/gpu/nvgpu/common/linux/vgpu/fecs_trace_vgpu.c
+++ b/drivers/gpu/nvgpu/common/linux/vgpu/fecs_trace_vgpu.c
@@ -46,7 +46,7 @@ int vgpu_fecs_trace_init(struct gk20a *g)
46 u32 mempool; 46 u32 mempool;
47 int err; 47 int err;
48 48
49 gk20a_dbg_fn(""); 49 nvgpu_log_fn(g, " ");
50 50
51 vcst = nvgpu_kzalloc(g, sizeof(*vcst)); 51 vcst = nvgpu_kzalloc(g, sizeof(*vcst));
52 if (!vcst) 52 if (!vcst)
diff --git a/drivers/gpu/nvgpu/common/linux/vgpu/vgpu_linux.c b/drivers/gpu/nvgpu/common/linux/vgpu/vgpu_linux.c
index c3d95b4a..5d3598b5 100644
--- a/drivers/gpu/nvgpu/common/linux/vgpu/vgpu_linux.c
+++ b/drivers/gpu/nvgpu/common/linux/vgpu/vgpu_linux.c
@@ -142,7 +142,7 @@ int vgpu_pm_prepare_poweroff(struct device *dev)
142 struct gk20a *g = get_gk20a(dev); 142 struct gk20a *g = get_gk20a(dev);
143 int ret = 0; 143 int ret = 0;
144 144
145 gk20a_dbg_fn(""); 145 nvgpu_log_fn(g, " ");
146 146
147 if (!g->power_on) 147 if (!g->power_on)
148 return 0; 148 return 0;
@@ -162,7 +162,7 @@ int vgpu_pm_finalize_poweron(struct device *dev)
162 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g); 162 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
163 int err; 163 int err;
164 164
165 gk20a_dbg_fn(""); 165 nvgpu_log_fn(g, " ");
166 166
167 if (g->power_on) 167 if (g->power_on)
168 return 0; 168 return 0;
@@ -227,7 +227,7 @@ static int vgpu_qos_notify(struct notifier_block *nb,
227 u32 max_freq; 227 u32 max_freq;
228 int err; 228 int err;
229 229
230 gk20a_dbg_fn(""); 230 nvgpu_log_fn(g, " ");
231 231
232 max_freq = (u32)pm_qos_read_max_bound(PM_QOS_GPU_FREQ_BOUNDS); 232 max_freq = (u32)pm_qos_read_max_bound(PM_QOS_GPU_FREQ_BOUNDS);
233 err = vgpu_clk_cap_rate(profile->dev, max_freq); 233 err = vgpu_clk_cap_rate(profile->dev, max_freq);
@@ -277,7 +277,7 @@ static int vgpu_pm_init(struct device *dev)
277 int num_freqs; 277 int num_freqs;
278 int err = 0; 278 int err = 0;
279 279
280 gk20a_dbg_fn(""); 280 nvgpu_log_fn(g, " ");
281 281
282 if (nvgpu_platform_is_simulation(g)) 282 if (nvgpu_platform_is_simulation(g))
283 return 0; 283 return 0;
@@ -321,14 +321,15 @@ int vgpu_probe(struct platform_device *pdev)
321 return -ENODATA; 321 return -ENODATA;
322 } 322 }
323 323
324 gk20a_dbg_fn("");
325
326 l = kzalloc(sizeof(*l), GFP_KERNEL); 324 l = kzalloc(sizeof(*l), GFP_KERNEL);
327 if (!l) { 325 if (!l) {
328 dev_err(dev, "couldn't allocate gk20a support"); 326 dev_err(dev, "couldn't allocate gk20a support");
329 return -ENOMEM; 327 return -ENOMEM;
330 } 328 }
331 gk20a = &l->g; 329 gk20a = &l->g;
330
331 nvgpu_log_fn(gk20a, " ");
332
332 nvgpu_init_gk20a(gk20a); 333 nvgpu_init_gk20a(gk20a);
333 334
334 nvgpu_kmem_init(gk20a); 335 nvgpu_kmem_init(gk20a);
@@ -428,7 +429,7 @@ int vgpu_probe(struct platform_device *pdev)
428 vgpu_create_sysfs(dev); 429 vgpu_create_sysfs(dev);
429 gk20a_init_gr(gk20a); 430 gk20a_init_gr(gk20a);
430 431
431 gk20a_dbg_info("total ram pages : %lu", totalram_pages); 432 nvgpu_log_info(gk20a, "total ram pages : %lu", totalram_pages);
432 gk20a->gr.max_comptag_mem = totalram_pages 433 gk20a->gr.max_comptag_mem = totalram_pages
433 >> (10 - (PAGE_SHIFT - 10)); 434 >> (10 - (PAGE_SHIFT - 10));
434 435
@@ -442,7 +443,7 @@ int vgpu_remove(struct platform_device *pdev)
442 struct device *dev = &pdev->dev; 443 struct device *dev = &pdev->dev;
443 struct gk20a *g = get_gk20a(dev); 444 struct gk20a *g = get_gk20a(dev);
444 445
445 gk20a_dbg_fn(""); 446 nvgpu_log_fn(g, " ");
446 447
447 vgpu_pm_qos_remove(dev); 448 vgpu_pm_qos_remove(dev);
448 if (g->remove_support) 449 if (g->remove_support)
diff --git a/drivers/gpu/nvgpu/common/linux/vm.c b/drivers/gpu/nvgpu/common/linux/vm.c
index 75572b93..baa77515 100644
--- a/drivers/gpu/nvgpu/common/linux/vm.c
+++ b/drivers/gpu/nvgpu/common/linux/vm.c
@@ -88,8 +88,9 @@ int nvgpu_vm_find_buf(struct vm_gk20a *vm, u64 gpu_va,
88 u64 *offset) 88 u64 *offset)
89{ 89{
90 struct nvgpu_mapped_buf *mapped_buffer; 90 struct nvgpu_mapped_buf *mapped_buffer;
91 struct gk20a *g = gk20a_from_vm(vm);
91 92
92 gk20a_dbg_fn("gpu_va=0x%llx", gpu_va); 93 nvgpu_log_fn(g, "gpu_va=0x%llx", gpu_va);
93 94
94 nvgpu_mutex_acquire(&vm->update_gmmu_lock); 95 nvgpu_mutex_acquire(&vm->update_gmmu_lock);
95 96
diff --git a/drivers/gpu/nvgpu/common/mm/vidmem.c b/drivers/gpu/nvgpu/common/mm/vidmem.c
index 0fb423b6..37435f97 100644
--- a/drivers/gpu/nvgpu/common/mm/vidmem.c
+++ b/drivers/gpu/nvgpu/common/mm/vidmem.c
@@ -394,7 +394,7 @@ int nvgpu_vidmem_get_space(struct gk20a *g, u64 *space)
394{ 394{
395 struct nvgpu_allocator *allocator = &g->mm.vidmem.allocator; 395 struct nvgpu_allocator *allocator = &g->mm.vidmem.allocator;
396 396
397 gk20a_dbg_fn(""); 397 nvgpu_log_fn(g, " ");
398 398
399 if (!nvgpu_alloc_initialized(allocator)) 399 if (!nvgpu_alloc_initialized(allocator))
400 return -ENOSYS; 400 return -ENOSYS;
diff --git a/drivers/gpu/nvgpu/common/vbios/bios.c b/drivers/gpu/nvgpu/common/vbios/bios.c
index 52c0a798..01f9262c 100644
--- a/drivers/gpu/nvgpu/common/vbios/bios.c
+++ b/drivers/gpu/nvgpu/common/vbios/bios.c
@@ -296,7 +296,7 @@ int nvgpu_bios_parse_rom(struct gk20a *g)
296 struct pci_ext_data_struct *pci_ext_data; 296 struct pci_ext_data_struct *pci_ext_data;
297 297
298 pci_rom = (struct pci_exp_rom *)&g->bios.data[offset]; 298 pci_rom = (struct pci_exp_rom *)&g->bios.data[offset];
299 gk20a_dbg_fn("pci rom sig %04x ptr %04x block %x", 299 nvgpu_log_fn(g, "pci rom sig %04x ptr %04x block %x",
300 pci_rom->sig, pci_rom->pci_data_struct_ptr, 300 pci_rom->sig, pci_rom->pci_data_struct_ptr,
301 pci_rom->size_of_block); 301 pci_rom->size_of_block);
302 302
@@ -309,7 +309,7 @@ int nvgpu_bios_parse_rom(struct gk20a *g)
309 pci_data = 309 pci_data =
310 (struct pci_data_struct *) 310 (struct pci_data_struct *)
311 &g->bios.data[offset + pci_rom->pci_data_struct_ptr]; 311 &g->bios.data[offset + pci_rom->pci_data_struct_ptr];
312 gk20a_dbg_fn("pci data sig %08x len %d image len %x type %x last %d max %08x", 312 nvgpu_log_fn(g, "pci data sig %08x len %d image len %x type %x last %d max %08x",
313 pci_data->sig, pci_data->pci_data_struct_len, 313 pci_data->sig, pci_data->pci_data_struct_len,
314 pci_data->image_len, pci_data->code_type, 314 pci_data->image_len, pci_data->code_type,
315 pci_data->last_image, 315 pci_data->last_image,
@@ -322,7 +322,7 @@ int nvgpu_bios_parse_rom(struct gk20a *g)
322 pci_data->pci_data_struct_len + 322 pci_data->pci_data_struct_len +
323 0xf) 323 0xf)
324 & ~0xf]; 324 & ~0xf];
325 gk20a_dbg_fn("pci ext data sig %08x rev %x len %x sub_image_len %x priv_last %d flags %x", 325 nvgpu_log_fn(g, "pci ext data sig %08x rev %x len %x sub_image_len %x priv_last %d flags %x",
326 pci_ext_data->sig, 326 pci_ext_data->sig,
327 pci_ext_data->nv_pci_data_ext_rev, 327 pci_ext_data->nv_pci_data_ext_rev,
328 pci_ext_data->nv_pci_data_ext_len, 328 pci_ext_data->nv_pci_data_ext_len,
@@ -330,7 +330,7 @@ int nvgpu_bios_parse_rom(struct gk20a *g)
330 pci_ext_data->priv_last_image, 330 pci_ext_data->priv_last_image,
331 pci_ext_data->flags); 331 pci_ext_data->flags);
332 332
333 gk20a_dbg_fn("expansion rom offset %x", 333 nvgpu_log_fn(g, "expansion rom offset %x",
334 pci_data->image_len * 512); 334 pci_data->image_len * 512);
335 g->bios.expansion_rom_offset = 335 g->bios.expansion_rom_offset =
336 pci_data->image_len * 512; 336 pci_data->image_len * 512;
@@ -342,7 +342,7 @@ int nvgpu_bios_parse_rom(struct gk20a *g)
342 } 342 }
343 } 343 }
344 344
345 gk20a_dbg_info("read bios"); 345 nvgpu_log_info(g, "read bios");
346 for (i = 0; i < g->bios.size - 6; i++) { 346 for (i = 0; i < g->bios.size - 6; i++) {
347 if (nvgpu_bios_rdu16(g, i) == BIT_HEADER_ID && 347 if (nvgpu_bios_rdu16(g, i) == BIT_HEADER_ID &&
348 nvgpu_bios_rdu32(g, i+2) == BIT_HEADER_SIGNATURE) { 348 nvgpu_bios_rdu32(g, i+2) == BIT_HEADER_SIGNATURE) {
@@ -362,7 +362,7 @@ static void nvgpu_bios_parse_biosdata(struct gk20a *g, int offset)
362 struct biosdata biosdata; 362 struct biosdata biosdata;
363 363
364 memcpy(&biosdata, &g->bios.data[offset], sizeof(biosdata)); 364 memcpy(&biosdata, &g->bios.data[offset], sizeof(biosdata));
365 gk20a_dbg_fn("bios version %x, oem version %x", 365 nvgpu_log_fn(g, "bios version %x, oem version %x",
366 biosdata.version, 366 biosdata.version,
367 biosdata.oem_version); 367 biosdata.oem_version);
368 368
@@ -375,9 +375,9 @@ static void nvgpu_bios_parse_nvinit_ptrs(struct gk20a *g, int offset)
375 struct nvinit_ptrs nvinit_ptrs; 375 struct nvinit_ptrs nvinit_ptrs;
376 376
377 memcpy(&nvinit_ptrs, &g->bios.data[offset], sizeof(nvinit_ptrs)); 377 memcpy(&nvinit_ptrs, &g->bios.data[offset], sizeof(nvinit_ptrs));
378 gk20a_dbg_fn("devinit ptr %x size %d", nvinit_ptrs.devinit_tables_ptr, 378 nvgpu_log_fn(g, "devinit ptr %x size %d", nvinit_ptrs.devinit_tables_ptr,
379 nvinit_ptrs.devinit_tables_size); 379 nvinit_ptrs.devinit_tables_size);
380 gk20a_dbg_fn("bootscripts ptr %x size %d", nvinit_ptrs.bootscripts_ptr, 380 nvgpu_log_fn(g, "bootscripts ptr %x size %d", nvinit_ptrs.bootscripts_ptr,
381 nvinit_ptrs.bootscripts_size); 381 nvinit_ptrs.bootscripts_size);
382 382
383 g->bios.devinit_tables = &g->bios.data[nvinit_ptrs.devinit_tables_ptr]; 383 g->bios.devinit_tables = &g->bios.data[nvinit_ptrs.devinit_tables_ptr];
@@ -449,7 +449,7 @@ static void nvgpu_bios_parse_devinit_appinfo(struct gk20a *g, int dmem_offset)
449 struct devinit_engine_interface interface; 449 struct devinit_engine_interface interface;
450 450
451 memcpy(&interface, &g->bios.devinit.dmem[dmem_offset], sizeof(interface)); 451 memcpy(&interface, &g->bios.devinit.dmem[dmem_offset], sizeof(interface));
452 gk20a_dbg_fn("devinit version %x tables phys %x script phys %x size %d", 452 nvgpu_log_fn(g, "devinit version %x tables phys %x script phys %x size %d",
453 interface.version, 453 interface.version,
454 interface.tables_phys_base, 454 interface.tables_phys_base,
455 interface.script_phys_base, 455 interface.script_phys_base,
@@ -468,7 +468,7 @@ static int nvgpu_bios_parse_appinfo_table(struct gk20a *g, int offset)
468 468
469 memcpy(&hdr, &g->bios.data[offset], sizeof(hdr)); 469 memcpy(&hdr, &g->bios.data[offset], sizeof(hdr));
470 470
471 gk20a_dbg_fn("appInfoHdr ver %d size %d entrySize %d entryCount %d", 471 nvgpu_log_fn(g, "appInfoHdr ver %d size %d entrySize %d entryCount %d",
472 hdr.version, hdr.header_size, 472 hdr.version, hdr.header_size,
473 hdr.entry_size, hdr.entry_count); 473 hdr.entry_size, hdr.entry_count);
474 474
@@ -481,7 +481,7 @@ static int nvgpu_bios_parse_appinfo_table(struct gk20a *g, int offset)
481 481
482 memcpy(&entry, &g->bios.data[offset], sizeof(entry)); 482 memcpy(&entry, &g->bios.data[offset], sizeof(entry));
483 483
484 gk20a_dbg_fn("appInfo id %d dmem_offset %d", 484 nvgpu_log_fn(g, "appInfo id %d dmem_offset %d",
485 entry.id, entry.dmem_offset); 485 entry.id, entry.dmem_offset);
486 486
487 if (entry.id == APPINFO_ID_DEVINIT) 487 if (entry.id == APPINFO_ID_DEVINIT)
@@ -530,26 +530,26 @@ static int nvgpu_bios_parse_falcon_ucode_desc(struct gk20a *g,
530 memcpy(&desc, &udesc, sizeof(udesc.v2)); 530 memcpy(&desc, &udesc, sizeof(udesc.v2));
531 break; 531 break;
532 default: 532 default:
533 gk20a_dbg_info("invalid version"); 533 nvgpu_log_info(g, "invalid version");
534 return -EINVAL; 534 return -EINVAL;
535 } 535 }
536 536
537 gk20a_dbg_info("falcon ucode desc version %x len %x", version, desc_size); 537 nvgpu_log_info(g, "falcon ucode desc version %x len %x", version, desc_size);
538 538
539 gk20a_dbg_info("falcon ucode desc stored size %x uncompressed size %x", 539 nvgpu_log_info(g, "falcon ucode desc stored size %x uncompressed size %x",
540 desc.stored_size, desc.uncompressed_size); 540 desc.stored_size, desc.uncompressed_size);
541 gk20a_dbg_info("falcon ucode desc virtualEntry %x, interfaceOffset %x", 541 nvgpu_log_info(g, "falcon ucode desc virtualEntry %x, interfaceOffset %x",
542 desc.virtual_entry, desc.interface_offset); 542 desc.virtual_entry, desc.interface_offset);
543 gk20a_dbg_info("falcon ucode IMEM phys base %x, load size %x virt base %x sec base %x sec size %x", 543 nvgpu_log_info(g, "falcon ucode IMEM phys base %x, load size %x virt base %x sec base %x sec size %x",
544 desc.imem_phys_base, desc.imem_load_size, 544 desc.imem_phys_base, desc.imem_load_size,
545 desc.imem_virt_base, desc.imem_sec_base, 545 desc.imem_virt_base, desc.imem_sec_base,
546 desc.imem_sec_size); 546 desc.imem_sec_size);
547 gk20a_dbg_info("falcon ucode DMEM offset %x phys base %x, load size %x", 547 nvgpu_log_info(g, "falcon ucode DMEM offset %x phys base %x, load size %x",
548 desc.dmem_offset, desc.dmem_phys_base, 548 desc.dmem_offset, desc.dmem_phys_base,
549 desc.dmem_load_size); 549 desc.dmem_load_size);
550 550
551 if (desc.stored_size != desc.uncompressed_size) { 551 if (desc.stored_size != desc.uncompressed_size) {
552 gk20a_dbg_info("does not match"); 552 nvgpu_log_info(g, "does not match");
553 return -EINVAL; 553 return -EINVAL;
554 } 554 }
555 555
@@ -575,7 +575,7 @@ static int nvgpu_bios_parse_falcon_ucode_table(struct gk20a *g, int offset)
575 int i; 575 int i;
576 576
577 memcpy(&hdr, &g->bios.data[offset], sizeof(hdr)); 577 memcpy(&hdr, &g->bios.data[offset], sizeof(hdr));
578 gk20a_dbg_fn("falcon ucode table ver %d size %d entrySize %d entryCount %d descVer %d descSize %d", 578 nvgpu_log_fn(g, "falcon ucode table ver %d size %d entrySize %d entryCount %d descVer %d descSize %d",
579 hdr.version, hdr.header_size, 579 hdr.version, hdr.header_size,
580 hdr.entry_size, hdr.entry_count, 580 hdr.entry_size, hdr.entry_count,
581 hdr.desc_version, hdr.desc_size); 581 hdr.desc_version, hdr.desc_size);
@@ -590,7 +590,7 @@ static int nvgpu_bios_parse_falcon_ucode_table(struct gk20a *g, int offset)
590 590
591 memcpy(&entry, &g->bios.data[offset], sizeof(entry)); 591 memcpy(&entry, &g->bios.data[offset], sizeof(entry));
592 592
593 gk20a_dbg_fn("falcon ucode table entry appid %x targetId %x descPtr %x", 593 nvgpu_log_fn(g, "falcon ucode table entry appid %x targetId %x descPtr %x",
594 entry.application_id, entry.target_id, 594 entry.application_id, entry.target_id,
595 entry.desc_ptr); 595 entry.desc_ptr);
596 596
@@ -638,7 +638,7 @@ static void nvgpu_bios_parse_falcon_data_v2(struct gk20a *g, int offset)
638 int err; 638 int err;
639 639
640 memcpy(&falcon_data, &g->bios.data[offset], sizeof(falcon_data)); 640 memcpy(&falcon_data, &g->bios.data[offset], sizeof(falcon_data));
641 gk20a_dbg_fn("falcon ucode table ptr %x", 641 nvgpu_log_fn(g, "falcon ucode table ptr %x",
642 falcon_data.falcon_ucode_table_ptr); 642 falcon_data.falcon_ucode_table_ptr);
643 err = nvgpu_bios_parse_falcon_ucode_table(g, 643 err = nvgpu_bios_parse_falcon_ucode_table(g,
644 falcon_data.falcon_ucode_table_ptr); 644 falcon_data.falcon_ucode_table_ptr);
@@ -676,7 +676,7 @@ void *nvgpu_bios_get_perf_table_ptrs(struct gk20a *g,
676 676
677 if (table_id < (ptoken->data_size/data_size)) { 677 if (table_id < (ptoken->data_size/data_size)) {
678 678
679 gk20a_dbg_info("Perf_Tbl_ID-offset 0x%x Tbl_ID_Ptr-offset- 0x%x", 679 nvgpu_log_info(g, "Perf_Tbl_ID-offset 0x%x Tbl_ID_Ptr-offset- 0x%x",
680 (ptoken->data_ptr + 680 (ptoken->data_ptr +
681 (table_id * data_size)), 681 (table_id * data_size)),
682 perf_table_id_offset); 682 perf_table_id_offset);
@@ -705,18 +705,18 @@ static void nvgpu_bios_parse_bit(struct gk20a *g, int offset)
705 struct bit_token bit_token; 705 struct bit_token bit_token;
706 int i; 706 int i;
707 707
708 gk20a_dbg_fn(""); 708 nvgpu_log_fn(g, " ");
709 memcpy(&bit, &g->bios.data[offset], sizeof(bit)); 709 memcpy(&bit, &g->bios.data[offset], sizeof(bit));
710 710
711 gk20a_dbg_info("BIT header: %04x %08x", bit.id, bit.signature); 711 nvgpu_log_info(g, "BIT header: %04x %08x", bit.id, bit.signature);
712 gk20a_dbg_info("tokens: %d entries * %d bytes", 712 nvgpu_log_info(g, "tokens: %d entries * %d bytes",
713 bit.token_entries, bit.token_size); 713 bit.token_entries, bit.token_size);
714 714
715 offset += bit.header_size; 715 offset += bit.header_size;
716 for (i = 0; i < bit.token_entries; i++) { 716 for (i = 0; i < bit.token_entries; i++) {
717 memcpy(&bit_token, &g->bios.data[offset], sizeof(bit_token)); 717 memcpy(&bit_token, &g->bios.data[offset], sizeof(bit_token));
718 718
719 gk20a_dbg_info("BIT token id %d ptr %d size %d ver %d", 719 nvgpu_log_info(g, "BIT token id %d ptr %d size %d ver %d",
720 bit_token.token_id, bit_token.data_ptr, 720 bit_token.token_id, bit_token.data_ptr,
721 bit_token.data_size, bit_token.data_version); 721 bit_token.data_size, bit_token.data_version);
722 722
@@ -753,7 +753,7 @@ static void nvgpu_bios_parse_bit(struct gk20a *g, int offset)
753 753
754 offset += bit.token_size; 754 offset += bit.token_size;
755 } 755 }
756 gk20a_dbg_fn("done"); 756 nvgpu_log_fn(g, "done");
757} 757}
758 758
759static u32 __nvgpu_bios_readbyte(struct gk20a *g, u32 offset) 759static u32 __nvgpu_bios_readbyte(struct gk20a *g, u32 offset)