summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2018-04-18 22:39:46 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-05-09 21:26:04 -0400
commitdd739fcb039d51606e9a5454ec0aab17bcb01965 (patch)
tree806ba8575d146367ad1be00086ca0cdae35a6b28
parent7e66f2a63d4855e763fa768047dfc32f6f96b771 (diff)
gpu: nvgpu: Remove gk20a_dbg* functions
Switch all logging to nvgpu_log*(). gk20a_dbg* macros are intentionally left there because of use from other repositories. Because the new functions do not work without a pointer to struct gk20a, and piping it just for logging is excessive, some log messages are deleted. Change-Id: I00e22e75fe4596a330bb0282ab4774b3639ee31e Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1704148 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
-rw-r--r--drivers/gpu/nvgpu/boardobj/boardobj.c14
-rw-r--r--drivers/gpu/nvgpu/boardobj/boardobjgrp.c52
-rw-r--r--drivers/gpu/nvgpu/boardobj/boardobjgrp_e255.c6
-rw-r--r--drivers/gpu/nvgpu/boardobj/boardobjgrp_e32.c6
-rw-r--r--drivers/gpu/nvgpu/clk/clk.c4
-rw-r--r--drivers/gpu/nvgpu/clk/clk_domain.c56
-rw-r--r--drivers/gpu/nvgpu/clk/clk_fll.c28
-rw-r--r--drivers/gpu/nvgpu/clk/clk_freq_controller.c14
-rw-r--r--drivers/gpu/nvgpu/clk/clk_prog.c46
-rw-r--r--drivers/gpu/nvgpu/clk/clk_vf_point.c26
-rw-r--r--drivers/gpu/nvgpu/clk/clk_vin.c36
-rw-r--r--drivers/gpu/nvgpu/common/as.c19
-rw-r--r--drivers/gpu/nvgpu/common/linux/cde.c54
-rw-r--r--drivers/gpu/nvgpu/common/linux/cde_gp10b.c8
-rw-r--r--drivers/gpu/nvgpu/common/linux/channel.c10
-rw-r--r--drivers/gpu/nvgpu/common/linux/ctxsw_trace.c54
-rw-r--r--drivers/gpu/nvgpu/common/linux/debug.c4
-rw-r--r--drivers/gpu/nvgpu/common/linux/debug_fifo.c7
-rw-r--r--drivers/gpu/nvgpu/common/linux/driver_common.c2
-rw-r--r--drivers/gpu/nvgpu/common/linux/intr.c4
-rw-r--r--drivers/gpu/nvgpu/common/linux/io.c22
-rw-r--r--drivers/gpu/nvgpu/common/linux/io_usermode.c2
-rw-r--r--drivers/gpu/nvgpu/common/linux/ioctl.c5
-rw-r--r--drivers/gpu/nvgpu/common/linux/ioctl_as.c33
-rw-r--r--drivers/gpu/nvgpu/common/linux/ioctl_channel.c22
-rw-r--r--drivers/gpu/nvgpu/common/linux/ioctl_clk_arb.c7
-rw-r--r--drivers/gpu/nvgpu/common/linux/ioctl_ctrl.c42
-rw-r--r--drivers/gpu/nvgpu/common/linux/ioctl_dbg.c100
-rw-r--r--drivers/gpu/nvgpu/common/linux/ioctl_tsg.c21
-rw-r--r--drivers/gpu/nvgpu/common/linux/log.c2
-rw-r--r--drivers/gpu/nvgpu/common/linux/module.c17
-rw-r--r--drivers/gpu/nvgpu/common/linux/nvgpu_mem.c10
-rw-r--r--drivers/gpu/nvgpu/common/linux/platform_gk20a_tegra.c10
-rw-r--r--drivers/gpu/nvgpu/common/linux/platform_gp10b_tegra.c11
-rw-r--r--drivers/gpu/nvgpu/common/linux/sched.c57
-rw-r--r--drivers/gpu/nvgpu/common/linux/vgpu/clk_vgpu.c8
-rw-r--r--drivers/gpu/nvgpu/common/linux/vgpu/css_vgpu.c11
-rw-r--r--drivers/gpu/nvgpu/common/linux/vgpu/fecs_trace_vgpu.c2
-rw-r--r--drivers/gpu/nvgpu/common/linux/vgpu/vgpu_linux.c17
-rw-r--r--drivers/gpu/nvgpu/common/linux/vm.c3
-rw-r--r--drivers/gpu/nvgpu/common/mm/vidmem.c2
-rw-r--r--drivers/gpu/nvgpu/common/vbios/bios.c54
-rw-r--r--drivers/gpu/nvgpu/gk20a/ce2_gk20a.c14
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.c46
-rw-r--r--drivers/gpu/nvgpu/gk20a/css_gr_gk20a.c6
-rw-r--r--drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c17
-rw-r--r--drivers/gpu/nvgpu/gk20a/fb_gk20a.c6
-rw-r--r--drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c36
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.c150
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a.c12
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.c82
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a_sim.c12
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.c364
-rw-r--r--drivers/gpu/nvgpu/gk20a/hal.c2
-rw-r--r--drivers/gpu/nvgpu/gk20a/mc_gk20a.c8
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.c28
-rw-r--r--drivers/gpu/nvgpu/gk20a/pmu_gk20a.c52
-rw-r--r--drivers/gpu/nvgpu/gk20a/pramin_gk20a.c4
-rw-r--r--drivers/gpu/nvgpu/gk20a/priv_ring_gk20a.c8
-rw-r--r--drivers/gpu/nvgpu/gk20a/regops_gk20a.c16
-rw-r--r--drivers/gpu/nvgpu/gk20a/therm_gk20a.c8
-rw-r--r--drivers/gpu/nvgpu/gk20a/tsg_gk20a.c18
-rw-r--r--drivers/gpu/nvgpu/gm20b/acr_gm20b.c132
-rw-r--r--drivers/gpu/nvgpu/gm20b/bus_gm20b.c4
-rw-r--r--drivers/gpu/nvgpu/gm20b/clk_gm20b.c41
-rw-r--r--drivers/gpu/nvgpu/gm20b/fb_gm20b.c2
-rw-r--r--drivers/gpu/nvgpu/gm20b/fifo_gm20b.c2
-rw-r--r--drivers/gpu/nvgpu/gm20b/gr_gm20b.c48
-rw-r--r--drivers/gpu/nvgpu/gm20b/ltc_gm20b.c16
-rw-r--r--drivers/gpu/nvgpu/gm20b/mm_gm20b.c8
-rw-r--r--drivers/gpu/nvgpu/gm20b/pmu_gm20b.c40
-rw-r--r--drivers/gpu/nvgpu/gm20b/therm_gm20b.c4
-rw-r--r--drivers/gpu/nvgpu/gp106/acr_gp106.c98
-rw-r--r--drivers/gpu/nvgpu/gp106/bios_gp106.c20
-rw-r--r--drivers/gpu/nvgpu/gp106/clk_gp106.c7
-rw-r--r--drivers/gpu/nvgpu/gp106/fb_gp106.c4
-rw-r--r--drivers/gpu/nvgpu/gp106/gr_gp106.c10
-rw-r--r--drivers/gpu/nvgpu/gp106/hal_gp106.c4
-rw-r--r--drivers/gpu/nvgpu/gp106/mclk_gp106.c12
-rw-r--r--drivers/gpu/nvgpu/gp106/pmu_gp106.c18
-rw-r--r--drivers/gpu/nvgpu/gp106/pmu_gp106.h6
-rw-r--r--drivers/gpu/nvgpu/gp106/sec2_gp106.c12
-rw-r--r--drivers/gpu/nvgpu/gp106/therm_gp106.c6
-rw-r--r--drivers/gpu/nvgpu/gp106/xve_gp106.c60
-rw-r--r--drivers/gpu/nvgpu/gp106/xve_gp106.h10
-rw-r--r--drivers/gpu/nvgpu/gp10b/ce_gp10b.c10
-rw-r--r--drivers/gpu/nvgpu/gp10b/fecs_trace_gp10b.c4
-rw-r--r--drivers/gpu/nvgpu/gp10b/fifo_gp10b.c25
-rw-r--r--drivers/gpu/nvgpu/gp10b/gr_gp10b.c123
-rw-r--r--drivers/gpu/nvgpu/gp10b/ltc_gp10b.c14
-rw-r--r--drivers/gpu/nvgpu/gp10b/mc_gp10b.c4
-rw-r--r--drivers/gpu/nvgpu/gp10b/mm_gp10b.c16
-rw-r--r--drivers/gpu/nvgpu/gp10b/pmu_gp10b.c26
-rw-r--r--drivers/gpu/nvgpu/gp10b/rpfb_gp10b.c14
-rw-r--r--drivers/gpu/nvgpu/gp10b/therm_gp10b.c8
-rw-r--r--drivers/gpu/nvgpu/gv11b/acr_gv11b.c38
-rw-r--r--drivers/gpu/nvgpu/gv11b/css_gr_gv11b.c6
-rw-r--r--drivers/gpu/nvgpu/gv11b/dbg_gpu_gv11b.c4
-rw-r--r--drivers/gpu/nvgpu/gv11b/fb_gv11b.c2
-rw-r--r--drivers/gpu/nvgpu/gv11b/fifo_gv11b.c61
-rw-r--r--drivers/gpu/nvgpu/gv11b/gr_gv11b.c132
-rw-r--r--drivers/gpu/nvgpu/gv11b/ltc_gv11b.c4
-rw-r--r--drivers/gpu/nvgpu/gv11b/mm_gv11b.c2
-rw-r--r--drivers/gpu/nvgpu/gv11b/pmu_gv11b.c22
-rw-r--r--drivers/gpu/nvgpu/gv11b/therm_gv11b.c4
-rw-r--r--drivers/gpu/nvgpu/lpwr/lpwr.c18
-rw-r--r--drivers/gpu/nvgpu/perf/perf.c6
-rw-r--r--drivers/gpu/nvgpu/perf/vfe_equ.c30
-rw-r--r--drivers/gpu/nvgpu/perf/vfe_var.c58
-rw-r--r--drivers/gpu/nvgpu/pmgr/pwrdev.c14
-rw-r--r--drivers/gpu/nvgpu/pmgr/pwrmonitor.c18
-rw-r--r--drivers/gpu/nvgpu/pmgr/pwrpolicy.c12
-rw-r--r--drivers/gpu/nvgpu/pstate/pstate.c16
-rw-r--r--drivers/gpu/nvgpu/therm/thrmchannel.c14
-rw-r--r--drivers/gpu/nvgpu/therm/thrmdev.c14
-rw-r--r--drivers/gpu/nvgpu/vgpu/ce2_vgpu.c2
-rw-r--r--drivers/gpu/nvgpu/vgpu/dbg_vgpu.c6
-rw-r--r--drivers/gpu/nvgpu/vgpu/fifo_vgpu.c70
-rw-r--r--drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c14
-rw-r--r--drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c2
-rw-r--r--drivers/gpu/nvgpu/vgpu/gr_vgpu.c82
-rw-r--r--drivers/gpu/nvgpu/vgpu/gv11b/vgpu_gv11b.c2
-rw-r--r--drivers/gpu/nvgpu/vgpu/gv11b/vgpu_tsg_gv11b.c3
-rw-r--r--drivers/gpu/nvgpu/vgpu/ltc_vgpu.c6
-rw-r--r--drivers/gpu/nvgpu/vgpu/mm_vgpu.c23
-rw-r--r--drivers/gpu/nvgpu/vgpu/tsg_vgpu.c15
-rw-r--r--drivers/gpu/nvgpu/vgpu/vgpu.c12
-rw-r--r--drivers/gpu/nvgpu/volt/volt_dev.c12
-rw-r--r--drivers/gpu/nvgpu/volt/volt_pmu.c2
-rw-r--r--drivers/gpu/nvgpu/volt/volt_policy.c12
-rw-r--r--drivers/gpu/nvgpu/volt/volt_rail.c20
131 files changed, 1726 insertions, 1637 deletions
diff --git a/drivers/gpu/nvgpu/boardobj/boardobj.c b/drivers/gpu/nvgpu/boardobj/boardobj.c
index f9be6981..f38c7c4a 100644
--- a/drivers/gpu/nvgpu/boardobj/boardobj.c
+++ b/drivers/gpu/nvgpu/boardobj/boardobj.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -32,7 +32,7 @@ u32 boardobj_construct_super(struct gk20a *g, struct boardobj **ppboardobj,
32 struct boardobj *pboardobj = NULL; 32 struct boardobj *pboardobj = NULL;
33 struct boardobj *devtmp = (struct boardobj *)args; 33 struct boardobj *devtmp = (struct boardobj *)args;
34 34
35 gk20a_dbg_info(" "); 35 nvgpu_log_info(g, " ");
36 36
37 if (devtmp == NULL) 37 if (devtmp == NULL)
38 return -EINVAL; 38 return -EINVAL;
@@ -61,7 +61,9 @@ u32 boardobj_construct_super(struct gk20a *g, struct boardobj **ppboardobj,
61 61
62u32 boardobj_destruct_super(struct boardobj *pboardobj) 62u32 boardobj_destruct_super(struct boardobj *pboardobj)
63{ 63{
64 gk20a_dbg_info(""); 64 struct gk20a *g = pboardobj->g;
65
66 nvgpu_log_info(g, " ");
65 if (pboardobj == NULL) 67 if (pboardobj == NULL)
66 return -EINVAL; 68 return -EINVAL;
67 69
@@ -75,7 +77,7 @@ u32 boardobj_destruct_super(struct boardobj *pboardobj)
75bool boardobj_implements_super(struct gk20a *g, struct boardobj *pboardobj, 77bool boardobj_implements_super(struct gk20a *g, struct boardobj *pboardobj,
76 u8 type) 78 u8 type)
77{ 79{
78 gk20a_dbg_info(""); 80 nvgpu_log_info(g, " ");
79 81
80 return (0 != (pboardobj->type_mask & BIT(type))); 82 return (0 != (pboardobj->type_mask & BIT(type)));
81} 83}
@@ -83,12 +85,12 @@ bool boardobj_implements_super(struct gk20a *g, struct boardobj *pboardobj,
83u32 boardobj_pmudatainit_super(struct gk20a *g, struct boardobj *pboardobj, 85u32 boardobj_pmudatainit_super(struct gk20a *g, struct boardobj *pboardobj,
84 struct nv_pmu_boardobj *pmudata) 86 struct nv_pmu_boardobj *pmudata)
85{ 87{
86 gk20a_dbg_info(""); 88 nvgpu_log_info(g, " ");
87 if (pboardobj == NULL) 89 if (pboardobj == NULL)
88 return -EINVAL; 90 return -EINVAL;
89 if (pmudata == NULL) 91 if (pmudata == NULL)
90 return -EINVAL; 92 return -EINVAL;
91 pmudata->type = pboardobj->type; 93 pmudata->type = pboardobj->type;
92 gk20a_dbg_info(" Done"); 94 nvgpu_log_info(g, " Done");
93 return 0; 95 return 0;
94} 96}
diff --git a/drivers/gpu/nvgpu/boardobj/boardobjgrp.c b/drivers/gpu/nvgpu/boardobj/boardobjgrp.c
index 64c02a84..7343f66c 100644
--- a/drivers/gpu/nvgpu/boardobj/boardobjgrp.c
+++ b/drivers/gpu/nvgpu/boardobj/boardobjgrp.c
@@ -50,7 +50,7 @@ struct boardobjgrp_pmucmdhandler_params {
50 50
51u32 boardobjgrp_construct_super(struct gk20a *g, struct boardobjgrp *pboardobjgrp) 51u32 boardobjgrp_construct_super(struct gk20a *g, struct boardobjgrp *pboardobjgrp)
52{ 52{
53 gk20a_dbg_info(""); 53 nvgpu_log_info(g, " ");
54 54
55 if (pboardobjgrp == NULL) 55 if (pboardobjgrp == NULL)
56 return -EINVAL; 56 return -EINVAL;
@@ -101,7 +101,9 @@ u32 boardobjgrp_construct_super(struct gk20a *g, struct boardobjgrp *pboardobjgr
101 101
102u32 boardobjgrp_destruct_impl(struct boardobjgrp *pboardobjgrp) 102u32 boardobjgrp_destruct_impl(struct boardobjgrp *pboardobjgrp)
103{ 103{
104 gk20a_dbg_info(""); 104 struct gk20a *g = pboardobjgrp->g;
105
106 nvgpu_log_info(g, " ");
105 107
106 if (pboardobjgrp == NULL) 108 if (pboardobjgrp == NULL)
107 return -EINVAL; 109 return -EINVAL;
@@ -120,7 +122,7 @@ u32 boardobjgrp_destruct_super(struct boardobjgrp *pboardobjgrp)
120 u32 stat; 122 u32 stat;
121 u8 index; 123 u8 index;
122 124
123 gk20a_dbg_info(""); 125 nvgpu_log_info(g, " ");
124 126
125 if (pboardobjgrp->mask == NULL) 127 if (pboardobjgrp->mask == NULL)
126 return -EINVAL; 128 return -EINVAL;
@@ -165,7 +167,7 @@ u32 boardobjgrp_pmucmd_construct_impl(struct gk20a *g, struct boardobjgrp
165 *pboardobjgrp, struct boardobjgrp_pmu_cmd *cmd, u8 id, u8 msgid, 167 *pboardobjgrp, struct boardobjgrp_pmu_cmd *cmd, u8 id, u8 msgid,
166 u8 hdrsize, u8 entrysize, u16 fbsize, u32 ss_offset, u8 rpc_func_id) 168 u8 hdrsize, u8 entrysize, u16 fbsize, u32 ss_offset, u8 rpc_func_id)
167{ 169{
168 gk20a_dbg_info(""); 170 nvgpu_log_info(g, " ");
169 171
170 /* Copy the parameters into the CMD*/ 172 /* Copy the parameters into the CMD*/
171 cmd->id = id; 173 cmd->id = id;
@@ -234,7 +236,7 @@ u32 boardobjgrp_pmucmd_pmuinithandle_impl(struct gk20a *g,
234 u32 status = 0; 236 u32 status = 0;
235 struct nvgpu_mem *sysmem_desc = &pcmd->surf.sysmem_desc; 237 struct nvgpu_mem *sysmem_desc = &pcmd->surf.sysmem_desc;
236 238
237 gk20a_dbg_info(""); 239 nvgpu_log_info(g, " ");
238 240
239 if (g->ops.pmu_ver.boardobj.is_boardobjgrp_pmucmd_id_valid(g, 241 if (g->ops.pmu_ver.boardobj.is_boardobjgrp_pmucmd_id_valid(g,
240 pboardobjgrp, pcmd)) 242 pboardobjgrp, pcmd))
@@ -259,7 +261,7 @@ u32 boardobjgrp_pmuinithandle_impl(struct gk20a *g,
259{ 261{
260 u32 status = 0; 262 u32 status = 0;
261 263
262 gk20a_dbg_info(""); 264 nvgpu_log_info(g, " ");
263 265
264 status = boardobjgrp_pmucmd_pmuinithandle_impl(g, pboardobjgrp, 266 status = boardobjgrp_pmucmd_pmuinithandle_impl(g, pboardobjgrp,
265 &pboardobjgrp->pmu.set); 267 &pboardobjgrp->pmu.set);
@@ -295,7 +297,7 @@ u32 boardobjgrp_pmuhdrdatainit_super(struct gk20a *g, struct boardobjgrp
295 *pboardobjgrp, struct nv_pmu_boardobjgrp_super *pboardobjgrppmu, 297 *pboardobjgrp, struct nv_pmu_boardobjgrp_super *pboardobjgrppmu,
296 struct boardobjgrpmask *mask) 298 struct boardobjgrpmask *mask)
297{ 299{
298 gk20a_dbg_info(""); 300 nvgpu_log_info(g, " ");
299 301
300 if (pboardobjgrp == NULL) 302 if (pboardobjgrp == NULL)
301 return -EINVAL; 303 return -EINVAL;
@@ -306,7 +308,7 @@ u32 boardobjgrp_pmuhdrdatainit_super(struct gk20a *g, struct boardobjgrp
306 pboardobjgrppmu->obj_slots = BOARDOBJGRP_PMU_SLOTS_GET(pboardobjgrp); 308 pboardobjgrppmu->obj_slots = BOARDOBJGRP_PMU_SLOTS_GET(pboardobjgrp);
307 pboardobjgrppmu->flags = 0; 309 pboardobjgrppmu->flags = 0;
308 310
309 gk20a_dbg_info(" Done"); 311 nvgpu_log_info(g, " Done");
310 return 0; 312 return 0;
311} 313}
312 314
@@ -314,7 +316,7 @@ static u32 boardobjgrp_pmudatainstget_stub(struct gk20a *g,
314 struct nv_pmu_boardobjgrp *boardobjgrppmu, 316 struct nv_pmu_boardobjgrp *boardobjgrppmu,
315 struct nv_pmu_boardobj **ppboardobjpmudata, u8 idx) 317 struct nv_pmu_boardobj **ppboardobjpmudata, u8 idx)
316{ 318{
317 gk20a_dbg_info(""); 319 nvgpu_log_info(g, " ");
318 return -EINVAL; 320 return -EINVAL;
319} 321}
320 322
@@ -323,7 +325,7 @@ static u32 boardobjgrp_pmustatusinstget_stub(struct gk20a *g,
323 void *pboardobjgrppmu, 325 void *pboardobjgrppmu,
324 struct nv_pmu_boardobj_query **ppBoardobjpmustatus, u8 idx) 326 struct nv_pmu_boardobj_query **ppBoardobjpmustatus, u8 idx)
325{ 327{
326 gk20a_dbg_info(""); 328 nvgpu_log_info(g, " ");
327 return -EINVAL; 329 return -EINVAL;
328} 330}
329 331
@@ -336,7 +338,7 @@ u32 boardobjgrp_pmudatainit_legacy(struct gk20a *g,
336 struct nv_pmu_boardobj *ppmudata = NULL; 338 struct nv_pmu_boardobj *ppmudata = NULL;
337 u8 index; 339 u8 index;
338 340
339 gk20a_dbg_info(""); 341 nvgpu_log_info(g, " ");
340 342
341 if (pboardobjgrp == NULL) 343 if (pboardobjgrp == NULL)
342 return -EINVAL; 344 return -EINVAL;
@@ -374,7 +376,7 @@ u32 boardobjgrp_pmudatainit_legacy(struct gk20a *g,
374 BOARDOBJGRP_FOR_EACH_INDEX_IN_MASK_END 376 BOARDOBJGRP_FOR_EACH_INDEX_IN_MASK_END
375 377
376boardobjgrppmudatainit_legacy_done: 378boardobjgrppmudatainit_legacy_done:
377 gk20a_dbg_info(" Done"); 379 nvgpu_log_info(g, " Done");
378 return status; 380 return status;
379} 381}
380 382
@@ -386,7 +388,7 @@ u32 boardobjgrp_pmudatainit_super(struct gk20a *g, struct boardobjgrp
386 struct nv_pmu_boardobj *ppmudata = NULL; 388 struct nv_pmu_boardobj *ppmudata = NULL;
387 u8 index; 389 u8 index;
388 390
389 gk20a_dbg_info(""); 391 nvgpu_log_info(g, " ");
390 392
391 if (pboardobjgrp == NULL) 393 if (pboardobjgrp == NULL)
392 return -EINVAL; 394 return -EINVAL;
@@ -420,7 +422,7 @@ u32 boardobjgrp_pmudatainit_super(struct gk20a *g, struct boardobjgrp
420 } 422 }
421 423
422boardobjgrppmudatainit_super_done: 424boardobjgrppmudatainit_super_done:
423 gk20a_dbg_info(" Done"); 425 nvgpu_log_info(g, " Done");
424 return status; 426 return status;
425} 427}
426 428
@@ -452,7 +454,7 @@ u32 boardobjgrp_pmuset_impl(struct gk20a *g, struct boardobjgrp *pboardobjgrp)
452 struct boardobjgrp_pmu_cmd *pcmd = 454 struct boardobjgrp_pmu_cmd *pcmd =
453 (struct boardobjgrp_pmu_cmd *)(&pboardobjgrp->pmu.set); 455 (struct boardobjgrp_pmu_cmd *)(&pboardobjgrp->pmu.set);
454 456
455 gk20a_dbg_info(""); 457 nvgpu_log_info(g, " ");
456 458
457 if (check_boardobjgrp_param(g, pboardobjgrp)) 459 if (check_boardobjgrp_param(g, pboardobjgrp))
458 return -EINVAL; 460 return -EINVAL;
@@ -511,7 +513,7 @@ u32 boardobjgrp_pmuset_impl_v1(struct gk20a *g, struct boardobjgrp *pboardobjgrp
511 struct boardobjgrp_pmu_cmd *pcmd = 513 struct boardobjgrp_pmu_cmd *pcmd =
512 (struct boardobjgrp_pmu_cmd *)(&pboardobjgrp->pmu.set); 514 (struct boardobjgrp_pmu_cmd *)(&pboardobjgrp->pmu.set);
513 515
514 gk20a_dbg_info(""); 516 nvgpu_log_info(g, " ");
515 517
516 if (check_boardobjgrp_param(g, pboardobjgrp)) 518 if (check_boardobjgrp_param(g, pboardobjgrp))
517 return -EINVAL; 519 return -EINVAL;
@@ -568,7 +570,7 @@ boardobjgrp_pmugetstatus_impl(struct gk20a *g, struct boardobjgrp *pboardobjgrp,
568 struct boardobjgrp_pmu_cmd *pset = 570 struct boardobjgrp_pmu_cmd *pset =
569 (struct boardobjgrp_pmu_cmd *)(&pboardobjgrp->pmu.set); 571 (struct boardobjgrp_pmu_cmd *)(&pboardobjgrp->pmu.set);
570 572
571 gk20a_dbg_info(""); 573 nvgpu_log_info(g, " ");
572 574
573 if (check_boardobjgrp_param(g, pboardobjgrp)) 575 if (check_boardobjgrp_param(g, pboardobjgrp))
574 return -EINVAL; 576 return -EINVAL;
@@ -635,7 +637,7 @@ boardobjgrp_pmugetstatus_impl_v1(struct gk20a *g, struct boardobjgrp *pboardobjg
635 struct boardobjgrp_pmu_cmd *pcmd = 637 struct boardobjgrp_pmu_cmd *pcmd =
636 (struct boardobjgrp_pmu_cmd *)(&pboardobjgrp->pmu.getstatus); 638 (struct boardobjgrp_pmu_cmd *)(&pboardobjgrp->pmu.getstatus);
637 639
638 gk20a_dbg_info(""); 640 nvgpu_log_info(g, " ");
639 641
640 if (check_boardobjgrp_param(g, pboardobjgrp)) 642 if (check_boardobjgrp_param(g, pboardobjgrp))
641 return -EINVAL; 643 return -EINVAL;
@@ -690,8 +692,9 @@ static u32
690boardobjgrp_objinsert_final(struct boardobjgrp *pboardobjgrp, 692boardobjgrp_objinsert_final(struct boardobjgrp *pboardobjgrp,
691 struct boardobj *pboardobj, u8 index) 693 struct boardobj *pboardobj, u8 index)
692{ 694{
695 struct gk20a *g = pboardobjgrp->g;
693 696
694 gk20a_dbg_info(""); 697 nvgpu_log_info(g, " ");
695 698
696 if (pboardobjgrp == NULL) 699 if (pboardobjgrp == NULL)
697 return -EINVAL; 700 return -EINVAL;
@@ -719,7 +722,7 @@ boardobjgrp_objinsert_final(struct boardobjgrp *pboardobjgrp,
719 722
720 pboardobjgrp->objmask |= BIT(index); 723 pboardobjgrp->objmask |= BIT(index);
721 724
722 gk20a_dbg_info(" Done"); 725 nvgpu_log_info(g, " Done");
723 726
724 return boardobjgrpmask_bitset(pboardobjgrp->mask, index); 727 return boardobjgrpmask_bitset(pboardobjgrp->mask, index);
725} 728}
@@ -789,8 +792,9 @@ static u32 boardobjgrp_objremoveanddestroy_final(
789{ 792{
790 u32 status = 0; 793 u32 status = 0;
791 u32 stat; 794 u32 stat;
795 struct gk20a *g = pboardobjgrp->g;
792 796
793 gk20a_dbg_info(""); 797 nvgpu_log_info(g, " ");
794 798
795 if (!boardobjgrp_idxisvalid(pboardobjgrp, index)) 799 if (!boardobjgrp_idxisvalid(pboardobjgrp, index))
796 return -EINVAL; 800 return -EINVAL;
@@ -824,8 +828,6 @@ void boardobjgrpe32hdrset(struct nv_pmu_boardobjgrp *hdr, u32 objmask)
824{ 828{
825 u32 slots = objmask; 829 u32 slots = objmask;
826 830
827 gk20a_dbg_info("");
828
829 HIGHESTBITIDX_32(slots); 831 HIGHESTBITIDX_32(slots);
830 slots++; 832 slots++;
831 833
@@ -844,7 +846,7 @@ static void boardobjgrp_pmucmdhandler(struct gk20a *g, struct pmu_msg *msg,
844 struct boardobjgrp *pboardobjgrp = phandlerparams->pboardobjgrp; 846 struct boardobjgrp *pboardobjgrp = phandlerparams->pboardobjgrp;
845 struct boardobjgrp_pmu_cmd *pgrpcmd = phandlerparams->pcmd; 847 struct boardobjgrp_pmu_cmd *pgrpcmd = phandlerparams->pcmd;
846 848
847 gk20a_dbg_info(""); 849 nvgpu_log_info(g, " ");
848 850
849 pgrpmsg = &msg->msg.boardobj.grp; 851 pgrpmsg = &msg->msg.boardobj.grp;
850 852
@@ -895,7 +897,7 @@ static u32 boardobjgrp_pmucmdsend(struct gk20a *g,
895 u32 seqdesc; 897 u32 seqdesc;
896 u32 status = 0; 898 u32 status = 0;
897 899
898 gk20a_dbg_info(""); 900 nvgpu_log_info(g, " ");
899 901
900 memset(&payload, 0, sizeof(payload)); 902 memset(&payload, 0, sizeof(payload));
901 memset(&handlerparams, 0, sizeof(handlerparams)); 903 memset(&handlerparams, 0, sizeof(handlerparams));
diff --git a/drivers/gpu/nvgpu/boardobj/boardobjgrp_e255.c b/drivers/gpu/nvgpu/boardobj/boardobjgrp_e255.c
index 7aabb89e..1f2cd836 100644
--- a/drivers/gpu/nvgpu/boardobj/boardobjgrp_e255.c
+++ b/drivers/gpu/nvgpu/boardobj/boardobjgrp_e255.c
@@ -1,5 +1,5 @@
1/* 1/*
2* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 2* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
3* 3*
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -33,7 +33,7 @@ u32 boardobjgrpconstruct_e255(struct gk20a *g,
33 u32 status = 0; 33 u32 status = 0;
34 u8 objslots; 34 u8 objslots;
35 35
36 gk20a_dbg_info(""); 36 nvgpu_log_info(g, " ");
37 37
38 objslots = 255; 38 objslots = 255;
39 status = boardobjgrpmask_e255_init(&pboardobjgrp_e255->mask, NULL); 39 status = boardobjgrpmask_e255_init(&pboardobjgrp_e255->mask, NULL);
@@ -65,7 +65,7 @@ u32 boardobjgrp_pmuhdrdatainit_e255(struct gk20a *g,
65 (struct nv_pmu_boardobjgrp_e255 *)pboardobjgrppmu; 65 (struct nv_pmu_boardobjgrp_e255 *)pboardobjgrppmu;
66 u32 status; 66 u32 status;
67 67
68 gk20a_dbg_info(""); 68 nvgpu_log_info(g, " ");
69 69
70 if (pboardobjgrp == NULL) 70 if (pboardobjgrp == NULL)
71 return -EINVAL; 71 return -EINVAL;
diff --git a/drivers/gpu/nvgpu/boardobj/boardobjgrp_e32.c b/drivers/gpu/nvgpu/boardobj/boardobjgrp_e32.c
index e793e34c..6d4b4520 100644
--- a/drivers/gpu/nvgpu/boardobj/boardobjgrp_e32.c
+++ b/drivers/gpu/nvgpu/boardobj/boardobjgrp_e32.c
@@ -1,5 +1,5 @@
1/* 1/*
2* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 2* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
3* 3*
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -34,7 +34,7 @@ u32 boardobjgrpconstruct_e32(struct gk20a *g,
34 u32 status; 34 u32 status;
35 u8 objslots; 35 u8 objslots;
36 36
37 gk20a_dbg_info(""); 37 nvgpu_log_info(g, " ");
38 objslots = 32; 38 objslots = 32;
39 39
40 status = boardobjgrpmask_e32_init(&pboardobjgrp_e32->mask, NULL); 40 status = boardobjgrpmask_e32_init(&pboardobjgrp_e32->mask, NULL);
@@ -65,7 +65,7 @@ u32 boardobjgrp_pmuhdrdatainit_e32(struct gk20a *g,
65 (struct nv_pmu_boardobjgrp_e32 *)pboardobjgrppmu; 65 (struct nv_pmu_boardobjgrp_e32 *)pboardobjgrppmu;
66 u32 status; 66 u32 status;
67 67
68 gk20a_dbg_info(""); 68 nvgpu_log_info(g, " ");
69 69
70 if (pboardobjgrp == NULL) 70 if (pboardobjgrp == NULL)
71 return -EINVAL; 71 return -EINVAL;
diff --git a/drivers/gpu/nvgpu/clk/clk.c b/drivers/gpu/nvgpu/clk/clk.c
index ecc352b1..a8d99bbb 100644
--- a/drivers/gpu/nvgpu/clk/clk.c
+++ b/drivers/gpu/nvgpu/clk/clk.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -43,7 +43,7 @@ static void clkrpc_pmucmdhandler(struct gk20a *g, struct pmu_msg *msg,
43 struct clkrpc_pmucmdhandler_params *phandlerparams = 43 struct clkrpc_pmucmdhandler_params *phandlerparams =
44 (struct clkrpc_pmucmdhandler_params *)param; 44 (struct clkrpc_pmucmdhandler_params *)param;
45 45
46 gk20a_dbg_info(""); 46 nvgpu_log_info(g, " ");
47 47
48 if (msg->msg.clk.msg_type != NV_PMU_CLK_MSG_ID_RPC) { 48 if (msg->msg.clk.msg_type != NV_PMU_CLK_MSG_ID_RPC) {
49 nvgpu_err(g, "unsupported msg for VFE LOAD RPC %x", 49 nvgpu_err(g, "unsupported msg for VFE LOAD RPC %x",
diff --git a/drivers/gpu/nvgpu/clk/clk_domain.c b/drivers/gpu/nvgpu/clk/clk_domain.c
index 1d47d2d5..f306cf56 100644
--- a/drivers/gpu/nvgpu/clk/clk_domain.c
+++ b/drivers/gpu/nvgpu/clk/clk_domain.c
@@ -153,7 +153,7 @@ static u32 _clk_domains_pmudata_instget(struct gk20a *g,
153 (struct nv_pmu_clk_clk_domain_boardobj_grp_set *) 153 (struct nv_pmu_clk_clk_domain_boardobj_grp_set *)
154 pmuboardobjgrp; 154 pmuboardobjgrp;
155 155
156 gk20a_dbg_info(""); 156 nvgpu_log_info(g, " ");
157 157
158 /*check whether pmuboardobjgrp has a valid boardobj in index*/ 158 /*check whether pmuboardobjgrp has a valid boardobj in index*/
159 if (((u32)BIT(idx) & 159 if (((u32)BIT(idx) &
@@ -162,7 +162,7 @@ static u32 _clk_domains_pmudata_instget(struct gk20a *g,
162 162
163 *ppboardobjpmudata = (struct nv_pmu_boardobj *) 163 *ppboardobjpmudata = (struct nv_pmu_boardobj *)
164 &pgrp_set->objects[idx].data.board_obj; 164 &pgrp_set->objects[idx].data.board_obj;
165 gk20a_dbg_info(" Done"); 165 nvgpu_log_info(g, " Done");
166 return 0; 166 return 0;
167} 167}
168 168
@@ -176,7 +176,7 @@ u32 clk_domain_sw_setup(struct gk20a *g)
176 struct clk_domain_3x_slave *pdomain_slave; 176 struct clk_domain_3x_slave *pdomain_slave;
177 u8 i; 177 u8 i;
178 178
179 gk20a_dbg_info(""); 179 nvgpu_log_info(g, " ");
180 180
181 status = boardobjgrpconstruct_e32(g, &g->clk_pmu.clk_domainobjs.super); 181 status = boardobjgrpconstruct_e32(g, &g->clk_pmu.clk_domainobjs.super);
182 if (status) { 182 if (status) {
@@ -255,7 +255,7 @@ u32 clk_domain_sw_setup(struct gk20a *g)
255 } 255 }
256 256
257done: 257done:
258 gk20a_dbg_info(" done status %x", status); 258 nvgpu_log_info(g, " done status %x", status);
259 return status; 259 return status;
260} 260}
261 261
@@ -264,7 +264,7 @@ u32 clk_domain_pmu_setup(struct gk20a *g)
264 u32 status; 264 u32 status;
265 struct boardobjgrp *pboardobjgrp = NULL; 265 struct boardobjgrp *pboardobjgrp = NULL;
266 266
267 gk20a_dbg_info(""); 267 nvgpu_log_info(g, " ");
268 268
269 pboardobjgrp = &g->clk_pmu.clk_domainobjs.super.super; 269 pboardobjgrp = &g->clk_pmu.clk_domainobjs.super.super;
270 270
@@ -273,7 +273,7 @@ u32 clk_domain_pmu_setup(struct gk20a *g)
273 273
274 status = pboardobjgrp->pmuinithandle(g, pboardobjgrp); 274 status = pboardobjgrp->pmuinithandle(g, pboardobjgrp);
275 275
276 gk20a_dbg_info("Done"); 276 nvgpu_log_info(g, "Done");
277 return status; 277 return status;
278} 278}
279 279
@@ -298,7 +298,7 @@ static u32 devinit_get_clocks_table(struct gk20a *g,
298 struct clk_domain_3x_slave v3x_slave; 298 struct clk_domain_3x_slave v3x_slave;
299 } clk_domain_data; 299 } clk_domain_data;
300 300
301 gk20a_dbg_info(""); 301 nvgpu_log_info(g, " ");
302 302
303 clocks_table_ptr = (u8 *)nvgpu_bios_get_perf_table_ptrs(g, 303 clocks_table_ptr = (u8 *)nvgpu_bios_get_perf_table_ptrs(g,
304 g->bios.clock_token, CLOCKS_TABLE); 304 g->bios.clock_token, CLOCKS_TABLE);
@@ -459,7 +459,7 @@ static u32 devinit_get_clocks_table(struct gk20a *g,
459 } 459 }
460 460
461done: 461done:
462 gk20a_dbg_info(" done status %x", status); 462 nvgpu_log_info(g, " done status %x", status);
463 return status; 463 return status;
464} 464}
465 465
@@ -467,7 +467,7 @@ static u32 clkdomainclkproglink_not_supported(struct gk20a *g,
467 struct clk_pmupstate *pclk, 467 struct clk_pmupstate *pclk,
468 struct clk_domain *pdomain) 468 struct clk_domain *pdomain)
469{ 469{
470 gk20a_dbg_info(""); 470 nvgpu_log_info(g, " ");
471 return -EINVAL; 471 return -EINVAL;
472} 472}
473 473
@@ -480,7 +480,7 @@ static int clkdomainvfsearch_stub(
480 u8 rail) 480 u8 rail)
481 481
482{ 482{
483 gk20a_dbg_info(""); 483 nvgpu_log_info(g, " ");
484 return -EINVAL; 484 return -EINVAL;
485} 485}
486 486
@@ -492,7 +492,7 @@ static u32 clkdomaingetfpoints_stub(
492 u16 *pfreqpointsinmhz, 492 u16 *pfreqpointsinmhz,
493 u8 rail) 493 u8 rail)
494{ 494{
495 gk20a_dbg_info(""); 495 nvgpu_log_info(g, " ");
496 return -EINVAL; 496 return -EINVAL;
497} 497}
498 498
@@ -541,7 +541,7 @@ static u32 _clk_domain_pmudatainit_3x(struct gk20a *g,
541 struct clk_domain_3x *pclk_domain_3x; 541 struct clk_domain_3x *pclk_domain_3x;
542 struct nv_pmu_clk_clk_domain_3x_boardobj_set *pset; 542 struct nv_pmu_clk_clk_domain_3x_boardobj_set *pset;
543 543
544 gk20a_dbg_info(""); 544 nvgpu_log_info(g, " ");
545 545
546 status = clk_domain_pmudatainit_super(g, board_obj_ptr, ppmudata); 546 status = clk_domain_pmudatainit_super(g, board_obj_ptr, ppmudata);
547 if (status != 0) 547 if (status != 0)
@@ -592,7 +592,7 @@ static u32 clkdomainclkproglink_3x_prog(struct gk20a *g,
592 struct clk_prog *pprog = NULL; 592 struct clk_prog *pprog = NULL;
593 u8 i; 593 u8 i;
594 594
595 gk20a_dbg_info(""); 595 nvgpu_log_info(g, " ");
596 596
597 for (i = p3xprog->clk_prog_idx_first; 597 for (i = p3xprog->clk_prog_idx_first;
598 i <= p3xprog->clk_prog_idx_last; 598 i <= p3xprog->clk_prog_idx_last;
@@ -616,7 +616,7 @@ static int clkdomaingetslaveclk(struct gk20a *g,
616 u8 slaveidx; 616 u8 slaveidx;
617 struct clk_domain_3x_master *p3xmaster; 617 struct clk_domain_3x_master *p3xmaster;
618 618
619 gk20a_dbg_info(""); 619 nvgpu_log_info(g, " ");
620 620
621 if (pclkmhz == NULL) 621 if (pclkmhz == NULL)
622 return -EINVAL; 622 return -EINVAL;
@@ -657,7 +657,7 @@ static int clkdomainvfsearch(struct gk20a *g,
657 u16 bestclkmhz; 657 u16 bestclkmhz;
658 u32 bestvoltuv; 658 u32 bestvoltuv;
659 659
660 gk20a_dbg_info(""); 660 nvgpu_log_info(g, " ");
661 661
662 if ((pclkmhz == NULL) || (pvoltuv == NULL)) 662 if ((pclkmhz == NULL) || (pvoltuv == NULL))
663 return -EINVAL; 663 return -EINVAL;
@@ -719,7 +719,7 @@ static int clkdomainvfsearch(struct gk20a *g,
719 goto done; 719 goto done;
720 } 720 }
721done: 721done:
722 gk20a_dbg_info("done status %x", status); 722 nvgpu_log_info(g, "done status %x", status);
723 return status; 723 return status;
724} 724}
725 725
@@ -744,7 +744,7 @@ static u32 clkdomaingetfpoints
744 u16 *freqpointsdata; 744 u16 *freqpointsdata;
745 u8 i; 745 u8 i;
746 746
747 gk20a_dbg_info(""); 747 nvgpu_log_info(g, " ");
748 748
749 if (pfpointscount == NULL) 749 if (pfpointscount == NULL)
750 return -EINVAL; 750 return -EINVAL;
@@ -783,7 +783,7 @@ static u32 clkdomaingetfpoints
783 783
784 *pfpointscount = totalcount; 784 *pfpointscount = totalcount;
785done: 785done:
786 gk20a_dbg_info("done status %x", status); 786 nvgpu_log_info(g, "done status %x", status);
787 return status; 787 return status;
788} 788}
789 789
@@ -796,7 +796,7 @@ static u32 _clk_domain_pmudatainit_3x_prog(struct gk20a *g,
796 struct nv_pmu_clk_clk_domain_3x_prog_boardobj_set *pset; 796 struct nv_pmu_clk_clk_domain_3x_prog_boardobj_set *pset;
797 struct clk_domains *pdomains = &(g->clk_pmu.clk_domainobjs); 797 struct clk_domains *pdomains = &(g->clk_pmu.clk_domainobjs);
798 798
799 gk20a_dbg_info(""); 799 nvgpu_log_info(g, " ");
800 800
801 status = _clk_domain_pmudatainit_3x(g, board_obj_ptr, ppmudata); 801 status = _clk_domain_pmudatainit_3x(g, board_obj_ptr, ppmudata);
802 if (status != 0) 802 if (status != 0)
@@ -876,7 +876,7 @@ static u32 _clk_domain_pmudatainit_3x_slave(struct gk20a *g,
876 struct clk_domain_3x_slave *pclk_domain_3x_slave; 876 struct clk_domain_3x_slave *pclk_domain_3x_slave;
877 struct nv_pmu_clk_clk_domain_3x_slave_boardobj_set *pset; 877 struct nv_pmu_clk_clk_domain_3x_slave_boardobj_set *pset;
878 878
879 gk20a_dbg_info(""); 879 nvgpu_log_info(g, " ");
880 880
881 status = _clk_domain_pmudatainit_3x_prog(g, board_obj_ptr, ppmudata); 881 status = _clk_domain_pmudatainit_3x_prog(g, board_obj_ptr, ppmudata);
882 if (status != 0) 882 if (status != 0)
@@ -935,7 +935,7 @@ static u32 clkdomainclkproglink_3x_master(struct gk20a *g,
935 u16 freq_max_last_mhz = 0; 935 u16 freq_max_last_mhz = 0;
936 u8 i; 936 u8 i;
937 937
938 gk20a_dbg_info(""); 938 nvgpu_log_info(g, " ");
939 939
940 status = clkdomainclkproglink_3x_prog(g, pclk, pdomain); 940 status = clkdomainclkproglink_3x_prog(g, pclk, pdomain);
941 if (status) 941 if (status)
@@ -961,7 +961,7 @@ static u32 clkdomainclkproglink_3x_master(struct gk20a *g,
961 goto done; 961 goto done;
962 } 962 }
963done: 963done:
964 gk20a_dbg_info("done status %x", status); 964 nvgpu_log_info(g, "done status %x", status);
965 return status; 965 return status;
966} 966}
967 967
@@ -973,7 +973,7 @@ static u32 _clk_domain_pmudatainit_3x_master(struct gk20a *g,
973 struct clk_domain_3x_master *pclk_domain_3x_master; 973 struct clk_domain_3x_master *pclk_domain_3x_master;
974 struct nv_pmu_clk_clk_domain_3x_master_boardobj_set *pset; 974 struct nv_pmu_clk_clk_domain_3x_master_boardobj_set *pset;
975 975
976 gk20a_dbg_info(""); 976 nvgpu_log_info(g, " ");
977 977
978 status = _clk_domain_pmudatainit_3x_prog(g, board_obj_ptr, ppmudata); 978 status = _clk_domain_pmudatainit_3x_prog(g, board_obj_ptr, ppmudata);
979 if (status != 0) 979 if (status != 0)
@@ -1021,7 +1021,7 @@ static u32 clkdomainclkproglink_fixed(struct gk20a *g,
1021 struct clk_pmupstate *pclk, 1021 struct clk_pmupstate *pclk,
1022 struct clk_domain *pdomain) 1022 struct clk_domain *pdomain)
1023{ 1023{
1024 gk20a_dbg_info(""); 1024 nvgpu_log_info(g, " ");
1025 return 0; 1025 return 0;
1026} 1026}
1027 1027
@@ -1033,7 +1033,7 @@ static u32 _clk_domain_pmudatainit_3x_fixed(struct gk20a *g,
1033 struct clk_domain_3x_fixed *pclk_domain_3x_fixed; 1033 struct clk_domain_3x_fixed *pclk_domain_3x_fixed;
1034 struct nv_pmu_clk_clk_domain_3x_fixed_boardobj_set *pset; 1034 struct nv_pmu_clk_clk_domain_3x_fixed_boardobj_set *pset;
1035 1035
1036 gk20a_dbg_info(""); 1036 nvgpu_log_info(g, " ");
1037 1037
1038 status = _clk_domain_pmudatainit_3x(g, board_obj_ptr, ppmudata); 1038 status = _clk_domain_pmudatainit_3x(g, board_obj_ptr, ppmudata);
1039 if (status != 0) 1039 if (status != 0)
@@ -1085,7 +1085,7 @@ static struct clk_domain *construct_clk_domain(struct gk20a *g, void *pargs)
1085 struct boardobj *board_obj_ptr = NULL; 1085 struct boardobj *board_obj_ptr = NULL;
1086 u32 status; 1086 u32 status;
1087 1087
1088 gk20a_dbg_info(" %d", BOARDOBJ_GET_TYPE(pargs)); 1088 nvgpu_log_info(g, " %d", BOARDOBJ_GET_TYPE(pargs));
1089 switch (BOARDOBJ_GET_TYPE(pargs)) { 1089 switch (BOARDOBJ_GET_TYPE(pargs)) {
1090 case CTRL_CLK_CLK_DOMAIN_TYPE_3X_FIXED: 1090 case CTRL_CLK_CLK_DOMAIN_TYPE_3X_FIXED:
1091 status = clk_domain_construct_3x_fixed(g, &board_obj_ptr, 1091 status = clk_domain_construct_3x_fixed(g, &board_obj_ptr,
@@ -1109,7 +1109,7 @@ static struct clk_domain *construct_clk_domain(struct gk20a *g, void *pargs)
1109 if (status) 1109 if (status)
1110 return NULL; 1110 return NULL;
1111 1111
1112 gk20a_dbg_info(" Done"); 1112 nvgpu_log_info(g, " Done");
1113 1113
1114 return (struct clk_domain *)board_obj_ptr; 1114 return (struct clk_domain *)board_obj_ptr;
1115} 1115}
@@ -1122,7 +1122,7 @@ static u32 clk_domain_pmudatainit_super(struct gk20a *g,
1122 struct clk_domain *pclk_domain; 1122 struct clk_domain *pclk_domain;
1123 struct nv_pmu_clk_clk_domain_boardobj_set *pset; 1123 struct nv_pmu_clk_clk_domain_boardobj_set *pset;
1124 1124
1125 gk20a_dbg_info(""); 1125 nvgpu_log_info(g, " ");
1126 1126
1127 status = boardobj_pmudatainit_super(g, board_obj_ptr, ppmudata); 1127 status = boardobj_pmudatainit_super(g, board_obj_ptr, ppmudata);
1128 if (status != 0) 1128 if (status != 0)
diff --git a/drivers/gpu/nvgpu/clk/clk_fll.c b/drivers/gpu/nvgpu/clk/clk_fll.c
index 15d386d5..87222b90 100644
--- a/drivers/gpu/nvgpu/clk/clk_fll.c
+++ b/drivers/gpu/nvgpu/clk/clk_fll.c
@@ -50,7 +50,7 @@ static u32 _clk_fll_devgrp_pmudatainit_super(struct gk20a *g,
50 pboardobjgrp; 50 pboardobjgrp;
51 u32 status = 0; 51 u32 status = 0;
52 52
53 gk20a_dbg_info(""); 53 nvgpu_log_info(g, " ");
54 54
55 status = boardobjgrp_pmudatainit_e32(g, pboardobjgrp, pboardobjgrppmu); 55 status = boardobjgrp_pmudatainit_e32(g, pboardobjgrp, pboardobjgrppmu);
56 if (status) { 56 if (status) {
@@ -67,7 +67,7 @@ static u32 _clk_fll_devgrp_pmudatainit_super(struct gk20a *g,
67 pfll_objs->lut_prog_master_mask.super.bitcount, 67 pfll_objs->lut_prog_master_mask.super.bitcount,
68 &pset->lut_prog_master_mask.super); 68 &pset->lut_prog_master_mask.super);
69 69
70 gk20a_dbg_info(" Done"); 70 nvgpu_log_info(g, " Done");
71 return status; 71 return status;
72} 72}
73 73
@@ -80,7 +80,7 @@ static u32 _clk_fll_devgrp_pmudata_instget(struct gk20a *g,
80 (struct nv_pmu_clk_clk_fll_device_boardobj_grp_set *) 80 (struct nv_pmu_clk_clk_fll_device_boardobj_grp_set *)
81 pmuboardobjgrp; 81 pmuboardobjgrp;
82 82
83 gk20a_dbg_info(""); 83 nvgpu_log_info(g, " ");
84 84
85 /*check whether pmuboardobjgrp has a valid boardobj in index*/ 85 /*check whether pmuboardobjgrp has a valid boardobj in index*/
86 if (((u32)BIT(idx) & 86 if (((u32)BIT(idx) &
@@ -89,7 +89,7 @@ static u32 _clk_fll_devgrp_pmudata_instget(struct gk20a *g,
89 89
90 *ppboardobjpmudata = (struct nv_pmu_boardobj *) 90 *ppboardobjpmudata = (struct nv_pmu_boardobj *)
91 &pgrp_set->objects[idx].data.board_obj; 91 &pgrp_set->objects[idx].data.board_obj;
92 gk20a_dbg_info(" Done"); 92 nvgpu_log_info(g, " Done");
93 return 0; 93 return 0;
94} 94}
95 95
@@ -123,7 +123,7 @@ u32 clk_fll_sw_setup(struct gk20a *g)
123 u8 i; 123 u8 i;
124 u8 j; 124 u8 j;
125 125
126 gk20a_dbg_info(""); 126 nvgpu_log_info(g, " ");
127 127
128 status = boardobjgrpconstruct_e32(g, &g->clk_pmu.avfs_fllobjs.super); 128 status = boardobjgrpconstruct_e32(g, &g->clk_pmu.avfs_fllobjs.super);
129 if (status) { 129 if (status) {
@@ -202,7 +202,7 @@ u32 clk_fll_sw_setup(struct gk20a *g)
202 } 202 }
203 } 203 }
204done: 204done:
205 gk20a_dbg_info(" done status %x", status); 205 nvgpu_log_info(g, " done status %x", status);
206 return status; 206 return status;
207} 207}
208 208
@@ -211,7 +211,7 @@ u32 clk_fll_pmu_setup(struct gk20a *g)
211 u32 status; 211 u32 status;
212 struct boardobjgrp *pboardobjgrp = NULL; 212 struct boardobjgrp *pboardobjgrp = NULL;
213 213
214 gk20a_dbg_info(""); 214 nvgpu_log_info(g, " ");
215 215
216 pboardobjgrp = &g->clk_pmu.avfs_fllobjs.super.super; 216 pboardobjgrp = &g->clk_pmu.avfs_fllobjs.super.super;
217 217
@@ -220,7 +220,7 @@ u32 clk_fll_pmu_setup(struct gk20a *g)
220 220
221 status = pboardobjgrp->pmuinithandle(g, pboardobjgrp); 221 status = pboardobjgrp->pmuinithandle(g, pboardobjgrp);
222 222
223 gk20a_dbg_info("Done"); 223 nvgpu_log_info(g, "Done");
224 return status; 224 return status;
225} 225}
226 226
@@ -241,7 +241,7 @@ static u32 devinit_get_fll_device_table(struct gk20a *g,
241 u32 vbios_domain = NV_PERF_DOMAIN_4X_CLOCK_DOMAIN_SKIP; 241 u32 vbios_domain = NV_PERF_DOMAIN_4X_CLOCK_DOMAIN_SKIP;
242 struct avfsvinobjs *pvinobjs = &g->clk_pmu.avfs_vinobjs; 242 struct avfsvinobjs *pvinobjs = &g->clk_pmu.avfs_vinobjs;
243 243
244 gk20a_dbg_info(""); 244 nvgpu_log_info(g, " ");
245 245
246 fll_table_ptr = (u8 *)nvgpu_bios_get_perf_table_ptrs(g, 246 fll_table_ptr = (u8 *)nvgpu_bios_get_perf_table_ptrs(g,
247 g->bios.clock_token, FLL_TABLE); 247 g->bios.clock_token, FLL_TABLE);
@@ -350,7 +350,7 @@ static u32 devinit_get_fll_device_table(struct gk20a *g,
350 } 350 }
351 351
352done: 352done:
353 gk20a_dbg_info(" done status %x", status); 353 nvgpu_log_info(g, " done status %x", status);
354 return status; 354 return status;
355} 355}
356 356
@@ -399,7 +399,7 @@ static struct fll_device *construct_fll_device(struct gk20a *g,
399 struct fll_device *board_obj_fll_ptr = NULL; 399 struct fll_device *board_obj_fll_ptr = NULL;
400 u32 status; 400 u32 status;
401 401
402 gk20a_dbg_info(""); 402 nvgpu_log_info(g, " ");
403 status = boardobj_construct_super(g, &board_obj_ptr, 403 status = boardobj_construct_super(g, &board_obj_ptr,
404 sizeof(struct fll_device), pargs); 404 sizeof(struct fll_device), pargs);
405 if (status) 405 if (status)
@@ -429,7 +429,7 @@ static struct fll_device *construct_fll_device(struct gk20a *g,
429 boardobjgrpmask_e32_init( 429 boardobjgrpmask_e32_init(
430 &board_obj_fll_ptr->lut_prog_broadcast_slave_mask, NULL); 430 &board_obj_fll_ptr->lut_prog_broadcast_slave_mask, NULL);
431 431
432 gk20a_dbg_info(" Done"); 432 nvgpu_log_info(g, " Done");
433 433
434 return (struct fll_device *)board_obj_ptr; 434 return (struct fll_device *)board_obj_ptr;
435} 435}
@@ -442,7 +442,7 @@ static u32 fll_device_init_pmudata_super(struct gk20a *g,
442 struct fll_device *pfll_dev; 442 struct fll_device *pfll_dev;
443 struct nv_pmu_clk_clk_fll_device_boardobj_set *perf_pmu_data; 443 struct nv_pmu_clk_clk_fll_device_boardobj_set *perf_pmu_data;
444 444
445 gk20a_dbg_info(""); 445 nvgpu_log_info(g, " ");
446 446
447 status = boardobj_pmudatainit_super(g, board_obj_ptr, ppmudata); 447 status = boardobj_pmudatainit_super(g, board_obj_ptr, ppmudata);
448 if (status != 0) 448 if (status != 0)
@@ -473,7 +473,7 @@ static u32 fll_device_init_pmudata_super(struct gk20a *g,
473 pfll_dev->lut_prog_broadcast_slave_mask.super.bitcount, 473 pfll_dev->lut_prog_broadcast_slave_mask.super.bitcount,
474 &perf_pmu_data->lut_prog_broadcast_slave_mask.super); 474 &perf_pmu_data->lut_prog_broadcast_slave_mask.super);
475 475
476 gk20a_dbg_info(" Done"); 476 nvgpu_log_info(g, " Done");
477 477
478 return status; 478 return status;
479} 479}
diff --git a/drivers/gpu/nvgpu/clk/clk_freq_controller.c b/drivers/gpu/nvgpu/clk/clk_freq_controller.c
index fce177a7..9091f71b 100644
--- a/drivers/gpu/nvgpu/clk/clk_freq_controller.c
+++ b/drivers/gpu/nvgpu/clk/clk_freq_controller.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -321,7 +321,7 @@ u32 clk_freq_controller_pmu_setup(struct gk20a *g)
321 u32 status; 321 u32 status;
322 struct boardobjgrp *pboardobjgrp = NULL; 322 struct boardobjgrp *pboardobjgrp = NULL;
323 323
324 gk20a_dbg_info(""); 324 nvgpu_log_info(g, " ");
325 325
326 pboardobjgrp = &g->clk_pmu.clk_freq_controllers.super.super; 326 pboardobjgrp = &g->clk_pmu.clk_freq_controllers.super.super;
327 327
@@ -330,7 +330,7 @@ u32 clk_freq_controller_pmu_setup(struct gk20a *g)
330 330
331 status = pboardobjgrp->pmuinithandle(g, pboardobjgrp); 331 status = pboardobjgrp->pmuinithandle(g, pboardobjgrp);
332 332
333 gk20a_dbg_info("Done"); 333 nvgpu_log_info(g, "Done");
334 return status; 334 return status;
335} 335}
336 336
@@ -343,7 +343,7 @@ static u32 _clk_freq_controller_devgrp_pmudata_instget(struct gk20a *g,
343 (struct nv_pmu_clk_clk_freq_controller_boardobj_grp_set *) 343 (struct nv_pmu_clk_clk_freq_controller_boardobj_grp_set *)
344 pmuboardobjgrp; 344 pmuboardobjgrp;
345 345
346 gk20a_dbg_info(""); 346 nvgpu_log_info(g, " ");
347 347
348 /*check whether pmuboardobjgrp has a valid boardobj in index*/ 348 /*check whether pmuboardobjgrp has a valid boardobj in index*/
349 if (((u32)BIT(idx) & 349 if (((u32)BIT(idx) &
@@ -352,7 +352,7 @@ static u32 _clk_freq_controller_devgrp_pmudata_instget(struct gk20a *g,
352 352
353 *ppboardobjpmudata = (struct nv_pmu_boardobj *) 353 *ppboardobjpmudata = (struct nv_pmu_boardobj *)
354 &pgrp_set->objects[idx].data.board_obj; 354 &pgrp_set->objects[idx].data.board_obj;
355 gk20a_dbg_info(" Done"); 355 nvgpu_log_info(g, " Done");
356 return 0; 356 return 0;
357} 357}
358 358
@@ -392,7 +392,7 @@ u32 clk_freq_controller_sw_setup(struct gk20a *g)
392 u8 i; 392 u8 i;
393 u8 j; 393 u8 j;
394 394
395 gk20a_dbg_info(""); 395 nvgpu_log_info(g, " ");
396 396
397 pclk_freq_controllers = &g->clk_pmu.clk_freq_controllers; 397 pclk_freq_controllers = &g->clk_pmu.clk_freq_controllers;
398 status = boardobjgrpconstruct_e32(g, &pclk_freq_controllers->super); 398 status = boardobjgrpconstruct_e32(g, &pclk_freq_controllers->super);
@@ -447,6 +447,6 @@ u32 clk_freq_controller_sw_setup(struct gk20a *g)
447 freq_ctrl_load_mask.super, i); 447 freq_ctrl_load_mask.super, i);
448 } 448 }
449done: 449done:
450 gk20a_dbg_info(" done status %x", status); 450 nvgpu_log_info(g, " done status %x", status);
451 return status; 451 return status;
452} 452}
diff --git a/drivers/gpu/nvgpu/clk/clk_prog.c b/drivers/gpu/nvgpu/clk/clk_prog.c
index 6b5315b4..8926b9f5 100644
--- a/drivers/gpu/nvgpu/clk/clk_prog.c
+++ b/drivers/gpu/nvgpu/clk/clk_prog.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -72,7 +72,7 @@ static u32 _clk_progs_pmudata_instget(struct gk20a *g,
72 struct nv_pmu_clk_clk_prog_boardobj_grp_set *pgrp_set = 72 struct nv_pmu_clk_clk_prog_boardobj_grp_set *pgrp_set =
73 (struct nv_pmu_clk_clk_prog_boardobj_grp_set *)pmuboardobjgrp; 73 (struct nv_pmu_clk_clk_prog_boardobj_grp_set *)pmuboardobjgrp;
74 74
75 gk20a_dbg_info(""); 75 nvgpu_log_info(g, " ");
76 76
77 /*check whether pmuboardobjgrp has a valid boardobj in index*/ 77 /*check whether pmuboardobjgrp has a valid boardobj in index*/
78 if (((u32)BIT(idx) & 78 if (((u32)BIT(idx) &
@@ -81,7 +81,7 @@ static u32 _clk_progs_pmudata_instget(struct gk20a *g,
81 81
82 *ppboardobjpmudata = (struct nv_pmu_boardobj *) 82 *ppboardobjpmudata = (struct nv_pmu_boardobj *)
83 &pgrp_set->objects[idx].data.board_obj; 83 &pgrp_set->objects[idx].data.board_obj;
84 gk20a_dbg_info(" Done"); 84 nvgpu_log_info(g, " Done");
85 return 0; 85 return 0;
86} 86}
87 87
@@ -91,7 +91,7 @@ u32 clk_prog_sw_setup(struct gk20a *g)
91 struct boardobjgrp *pboardobjgrp = NULL; 91 struct boardobjgrp *pboardobjgrp = NULL;
92 struct clk_progs *pclkprogobjs; 92 struct clk_progs *pclkprogobjs;
93 93
94 gk20a_dbg_info(""); 94 nvgpu_log_info(g, " ");
95 95
96 status = boardobjgrpconstruct_e255(g, &g->clk_pmu.clk_progobjs.super); 96 status = boardobjgrpconstruct_e255(g, &g->clk_pmu.clk_progobjs.super);
97 if (status) { 97 if (status) {
@@ -130,7 +130,7 @@ u32 clk_prog_sw_setup(struct gk20a *g)
130 130
131 131
132done: 132done:
133 gk20a_dbg_info(" done status %x", status); 133 nvgpu_log_info(g, " done status %x", status);
134 return status; 134 return status;
135} 135}
136 136
@@ -139,7 +139,7 @@ u32 clk_prog_pmu_setup(struct gk20a *g)
139 u32 status; 139 u32 status;
140 struct boardobjgrp *pboardobjgrp = NULL; 140 struct boardobjgrp *pboardobjgrp = NULL;
141 141
142 gk20a_dbg_info(""); 142 nvgpu_log_info(g, " ");
143 143
144 pboardobjgrp = &g->clk_pmu.clk_progobjs.super.super; 144 pboardobjgrp = &g->clk_pmu.clk_progobjs.super.super;
145 145
@@ -148,7 +148,7 @@ u32 clk_prog_pmu_setup(struct gk20a *g)
148 148
149 status = pboardobjgrp->pmuinithandle(g, pboardobjgrp); 149 status = pboardobjgrp->pmuinithandle(g, pboardobjgrp);
150 150
151 gk20a_dbg_info("Done"); 151 nvgpu_log_info(g, "Done");
152 return status; 152 return status;
153} 153}
154 154
@@ -186,7 +186,7 @@ static u32 devinit_get_clk_prog_table(struct gk20a *g,
186 struct clk_prog_1x_master_table v1x_master_table; 186 struct clk_prog_1x_master_table v1x_master_table;
187 } prog_data; 187 } prog_data;
188 188
189 gk20a_dbg_info(""); 189 nvgpu_log_info(g, " ");
190 190
191 clkprogs_tbl_ptr = (u8 *)nvgpu_bios_get_perf_table_ptrs(g, 191 clkprogs_tbl_ptr = (u8 *)nvgpu_bios_get_perf_table_ptrs(g,
192 g->bios.clock_token, CLOCK_PROGRAMMING_TABLE); 192 g->bios.clock_token, CLOCK_PROGRAMMING_TABLE);
@@ -372,7 +372,7 @@ static u32 devinit_get_clk_prog_table(struct gk20a *g,
372 } 372 }
373 } 373 }
374done: 374done:
375 gk20a_dbg_info(" done status %x", status); 375 nvgpu_log_info(g, " done status %x", status);
376 return status; 376 return status;
377} 377}
378 378
@@ -382,7 +382,7 @@ static u32 _clk_prog_pmudatainit_super(struct gk20a *g,
382{ 382{
383 u32 status = 0; 383 u32 status = 0;
384 384
385 gk20a_dbg_info(""); 385 nvgpu_log_info(g, " ");
386 386
387 status = boardobj_pmudatainit_super(g, board_obj_ptr, ppmudata); 387 status = boardobj_pmudatainit_super(g, board_obj_ptr, ppmudata);
388 return status; 388 return status;
@@ -396,7 +396,7 @@ static u32 _clk_prog_pmudatainit_1x(struct gk20a *g,
396 struct clk_prog_1x *pclk_prog_1x; 396 struct clk_prog_1x *pclk_prog_1x;
397 struct nv_pmu_clk_clk_prog_1x_boardobj_set *pset; 397 struct nv_pmu_clk_clk_prog_1x_boardobj_set *pset;
398 398
399 gk20a_dbg_info(""); 399 nvgpu_log_info(g, " ");
400 400
401 status = _clk_prog_pmudatainit_super(g, board_obj_ptr, ppmudata); 401 status = _clk_prog_pmudatainit_super(g, board_obj_ptr, ppmudata);
402 if (status != 0) 402 if (status != 0)
@@ -424,7 +424,7 @@ static u32 _clk_prog_pmudatainit_1x_master(struct gk20a *g,
424 u32 vfsize = sizeof(struct ctrl_clk_clk_prog_1x_master_vf_entry) * 424 u32 vfsize = sizeof(struct ctrl_clk_clk_prog_1x_master_vf_entry) *
425 g->clk_pmu.clk_progobjs.vf_entry_count; 425 g->clk_pmu.clk_progobjs.vf_entry_count;
426 426
427 gk20a_dbg_info(""); 427 nvgpu_log_info(g, " ");
428 428
429 status = _clk_prog_pmudatainit_1x(g, board_obj_ptr, ppmudata); 429 status = _clk_prog_pmudatainit_1x(g, board_obj_ptr, ppmudata);
430 430
@@ -455,7 +455,7 @@ static u32 _clk_prog_pmudatainit_1x_master_ratio(struct gk20a *g,
455 u32 slavesize = sizeof(struct ctrl_clk_clk_prog_1x_master_ratio_slave_entry) * 455 u32 slavesize = sizeof(struct ctrl_clk_clk_prog_1x_master_ratio_slave_entry) *
456 g->clk_pmu.clk_progobjs.slave_entry_count; 456 g->clk_pmu.clk_progobjs.slave_entry_count;
457 457
458 gk20a_dbg_info(""); 458 nvgpu_log_info(g, " ");
459 459
460 status = _clk_prog_pmudatainit_1x_master(g, board_obj_ptr, ppmudata); 460 status = _clk_prog_pmudatainit_1x_master(g, board_obj_ptr, ppmudata);
461 if (status != 0) 461 if (status != 0)
@@ -483,7 +483,7 @@ static u32 _clk_prog_pmudatainit_1x_master_table(struct gk20a *g,
483 u32 slavesize = sizeof(struct ctrl_clk_clk_prog_1x_master_ratio_slave_entry) * 483 u32 slavesize = sizeof(struct ctrl_clk_clk_prog_1x_master_ratio_slave_entry) *
484 g->clk_pmu.clk_progobjs.slave_entry_count; 484 g->clk_pmu.clk_progobjs.slave_entry_count;
485 485
486 gk20a_dbg_info(""); 486 nvgpu_log_info(g, " ");
487 487
488 status = _clk_prog_pmudatainit_1x_master(g, board_obj_ptr, ppmudata); 488 status = _clk_prog_pmudatainit_1x_master(g, board_obj_ptr, ppmudata);
489 if (status != 0) 489 if (status != 0)
@@ -510,7 +510,7 @@ static u32 _clk_prog_1x_master_rail_construct_vf_point(struct gk20a *g,
510 struct clk_vf_point *p_vf_point; 510 struct clk_vf_point *p_vf_point;
511 u32 status; 511 u32 status;
512 512
513 gk20a_dbg_info(""); 513 nvgpu_log_info(g, " ");
514 514
515 p_vf_point = construct_clk_vf_point(g, (void *)p_vf_point_tmp); 515 p_vf_point = construct_clk_vf_point(g, (void *)p_vf_point_tmp);
516 if (p_vf_point == NULL) { 516 if (p_vf_point == NULL) {
@@ -527,7 +527,7 @@ static u32 _clk_prog_1x_master_rail_construct_vf_point(struct gk20a *g,
527 p_vf_rail->vf_point_idx_last = (*p_vf_point_idx)++; 527 p_vf_rail->vf_point_idx_last = (*p_vf_point_idx)++;
528 528
529done: 529done:
530 gk20a_dbg_info("done status %x", status); 530 nvgpu_log_info(g, "done status %x", status);
531 return status; 531 return status;
532} 532}
533 533
@@ -561,7 +561,7 @@ static u32 clk_prog_construct_1x(struct gk20a *g,
561 (struct clk_prog_1x *)pargs; 561 (struct clk_prog_1x *)pargs;
562 u32 status = 0; 562 u32 status = 0;
563 563
564 gk20a_dbg_info(" "); 564 nvgpu_log_info(g, " ");
565 ptmpobj->type_mask |= BIT(CTRL_CLK_CLK_PROG_TYPE_1X); 565 ptmpobj->type_mask |= BIT(CTRL_CLK_CLK_PROG_TYPE_1X);
566 status = clk_prog_construct_super(g, ppboardobj, size, pargs); 566 status = clk_prog_construct_super(g, ppboardobj, size, pargs);
567 if (status) 567 if (status)
@@ -592,7 +592,7 @@ static u32 clk_prog_construct_1x_master(struct gk20a *g,
592 g->clk_pmu.clk_progobjs.vf_entry_count; 592 g->clk_pmu.clk_progobjs.vf_entry_count;
593 u8 railidx; 593 u8 railidx;
594 594
595 gk20a_dbg_info(" type - %x", BOARDOBJ_GET_TYPE(pargs)); 595 nvgpu_log_info(g, " type - %x", BOARDOBJ_GET_TYPE(pargs));
596 596
597 ptmpobj->type_mask |= BIT(CTRL_CLK_CLK_PROG_TYPE_1X_MASTER); 597 ptmpobj->type_mask |= BIT(CTRL_CLK_CLK_PROG_TYPE_1X_MASTER);
598 status = clk_prog_construct_1x(g, ppboardobj, size, pargs); 598 status = clk_prog_construct_1x(g, ppboardobj, size, pargs);
@@ -686,7 +686,7 @@ static u32 clk_prog_construct_1x_master_table(struct gk20a *g,
686 u32 slavesize = sizeof(struct ctrl_clk_clk_prog_1x_master_ratio_slave_entry) * 686 u32 slavesize = sizeof(struct ctrl_clk_clk_prog_1x_master_ratio_slave_entry) *
687 g->clk_pmu.clk_progobjs.slave_entry_count; 687 g->clk_pmu.clk_progobjs.slave_entry_count;
688 688
689 gk20a_dbg_info("type - %x", BOARDOBJ_GET_TYPE(pargs)); 689 nvgpu_log_info(g, "type - %x", BOARDOBJ_GET_TYPE(pargs));
690 690
691 if (BOARDOBJ_GET_TYPE(pargs) != CTRL_CLK_CLK_PROG_TYPE_1X_MASTER_TABLE) 691 if (BOARDOBJ_GET_TYPE(pargs) != CTRL_CLK_CLK_PROG_TYPE_1X_MASTER_TABLE)
692 return -EINVAL; 692 return -EINVAL;
@@ -727,7 +727,7 @@ static struct clk_prog *construct_clk_prog(struct gk20a *g, void *pargs)
727 struct boardobj *board_obj_ptr = NULL; 727 struct boardobj *board_obj_ptr = NULL;
728 u32 status; 728 u32 status;
729 729
730 gk20a_dbg_info(" type - %x", BOARDOBJ_GET_TYPE(pargs)); 730 nvgpu_log_info(g, " type - %x", BOARDOBJ_GET_TYPE(pargs));
731 switch (BOARDOBJ_GET_TYPE(pargs)) { 731 switch (BOARDOBJ_GET_TYPE(pargs)) {
732 case CTRL_CLK_CLK_PROG_TYPE_1X: 732 case CTRL_CLK_CLK_PROG_TYPE_1X:
733 status = clk_prog_construct_1x(g, &board_obj_ptr, 733 status = clk_prog_construct_1x(g, &board_obj_ptr,
@@ -754,7 +754,7 @@ static struct clk_prog *construct_clk_prog(struct gk20a *g, void *pargs)
754 return NULL; 754 return NULL;
755 } 755 }
756 756
757 gk20a_dbg_info(" Done"); 757 nvgpu_log_info(g, " Done");
758 758
759 return (struct clk_prog *)board_obj_ptr; 759 return (struct clk_prog *)board_obj_ptr;
760} 760}
@@ -777,7 +777,7 @@ static u32 vfflatten_prog_1x_master(struct gk20a *g,
777 u8 vf_point_idx; 777 u8 vf_point_idx;
778 u8 vf_rail_idx; 778 u8 vf_rail_idx;
779 779
780 gk20a_dbg_info(""); 780 nvgpu_log_info(g, " ");
781 memset(&vf_point_data, 0x0, sizeof(vf_point_data)); 781 memset(&vf_point_data, 0x0, sizeof(vf_point_data));
782 782
783 vf_point_idx = BOARDOBJGRP_NEXT_EMPTY_IDX( 783 vf_point_idx = BOARDOBJGRP_NEXT_EMPTY_IDX(
@@ -851,7 +851,7 @@ static u32 vfflatten_prog_1x_master(struct gk20a *g,
851 *pfreqmaxlastmhz = p1xmaster->super.freq_max_mhz; 851 *pfreqmaxlastmhz = p1xmaster->super.freq_max_mhz;
852 852
853done: 853done:
854 gk20a_dbg_info("done status %x", status); 854 nvgpu_log_info(g, "done status %x", status);
855 return status; 855 return status;
856} 856}
857 857
diff --git a/drivers/gpu/nvgpu/clk/clk_vf_point.c b/drivers/gpu/nvgpu/clk/clk_vf_point.c
index 8333b2b0..b459c012 100644
--- a/drivers/gpu/nvgpu/clk/clk_vf_point.c
+++ b/drivers/gpu/nvgpu/clk/clk_vf_point.c
@@ -59,7 +59,7 @@ static u32 _clk_vf_points_pmudata_instget(struct gk20a *g,
59 (struct nv_pmu_clk_clk_vf_point_boardobj_grp_set *) 59 (struct nv_pmu_clk_clk_vf_point_boardobj_grp_set *)
60 pmuboardobjgrp; 60 pmuboardobjgrp;
61 61
62 gk20a_dbg_info(""); 62 nvgpu_log_info(g, " ");
63 63
64 /*check whether pmuboardobjgrp has a valid boardobj in index*/ 64 /*check whether pmuboardobjgrp has a valid boardobj in index*/
65 if (idx >= CTRL_BOARDOBJGRP_E255_MAX_OBJECTS) 65 if (idx >= CTRL_BOARDOBJGRP_E255_MAX_OBJECTS)
@@ -67,7 +67,7 @@ static u32 _clk_vf_points_pmudata_instget(struct gk20a *g,
67 67
68 *ppboardobjpmudata = (struct nv_pmu_boardobj *) 68 *ppboardobjpmudata = (struct nv_pmu_boardobj *)
69 &pgrp_set->objects[idx].data.board_obj; 69 &pgrp_set->objects[idx].data.board_obj;
70 gk20a_dbg_info(" Done"); 70 nvgpu_log_info(g, " Done");
71 return 0; 71 return 0;
72} 72}
73 73
@@ -94,7 +94,7 @@ u32 clk_vf_point_sw_setup(struct gk20a *g)
94 u32 status; 94 u32 status;
95 struct boardobjgrp *pboardobjgrp = NULL; 95 struct boardobjgrp *pboardobjgrp = NULL;
96 96
97 gk20a_dbg_info(""); 97 nvgpu_log_info(g, " ");
98 98
99 status = boardobjgrpconstruct_e255(g, &g->clk_pmu.clk_vf_pointobjs.super); 99 status = boardobjgrpconstruct_e255(g, &g->clk_pmu.clk_vf_pointobjs.super);
100 if (status) { 100 if (status) {
@@ -132,7 +132,7 @@ u32 clk_vf_point_sw_setup(struct gk20a *g)
132 pboardobjgrp->pmustatusinstget = _clk_vf_points_pmustatus_instget; 132 pboardobjgrp->pmustatusinstget = _clk_vf_points_pmustatus_instget;
133 133
134done: 134done:
135 gk20a_dbg_info(" done status %x", status); 135 nvgpu_log_info(g, " done status %x", status);
136 return status; 136 return status;
137} 137}
138 138
@@ -141,7 +141,7 @@ u32 clk_vf_point_pmu_setup(struct gk20a *g)
141 u32 status; 141 u32 status;
142 struct boardobjgrp *pboardobjgrp = NULL; 142 struct boardobjgrp *pboardobjgrp = NULL;
143 143
144 gk20a_dbg_info(""); 144 nvgpu_log_info(g, " ");
145 145
146 pboardobjgrp = &g->clk_pmu.clk_vf_pointobjs.super.super; 146 pboardobjgrp = &g->clk_pmu.clk_vf_pointobjs.super.super;
147 147
@@ -150,7 +150,7 @@ u32 clk_vf_point_pmu_setup(struct gk20a *g)
150 150
151 status = pboardobjgrp->pmuinithandle(g, pboardobjgrp); 151 status = pboardobjgrp->pmuinithandle(g, pboardobjgrp);
152 152
153 gk20a_dbg_info("Done"); 153 nvgpu_log_info(g, "Done");
154 return status; 154 return status;
155} 155}
156 156
@@ -187,7 +187,7 @@ static u32 _clk_vf_point_pmudatainit_volt(struct gk20a *g,
187 struct clk_vf_point_volt *pclk_vf_point_volt; 187 struct clk_vf_point_volt *pclk_vf_point_volt;
188 struct nv_pmu_clk_clk_vf_point_volt_boardobj_set *pset; 188 struct nv_pmu_clk_clk_vf_point_volt_boardobj_set *pset;
189 189
190 gk20a_dbg_info(""); 190 nvgpu_log_info(g, " ");
191 191
192 status = _clk_vf_point_pmudatainit_super(g, board_obj_ptr, ppmudata); 192 status = _clk_vf_point_pmudatainit_super(g, board_obj_ptr, ppmudata);
193 if (status != 0) 193 if (status != 0)
@@ -214,7 +214,7 @@ static u32 _clk_vf_point_pmudatainit_freq(struct gk20a *g,
214 struct clk_vf_point_freq *pclk_vf_point_freq; 214 struct clk_vf_point_freq *pclk_vf_point_freq;
215 struct nv_pmu_clk_clk_vf_point_freq_boardobj_set *pset; 215 struct nv_pmu_clk_clk_vf_point_freq_boardobj_set *pset;
216 216
217 gk20a_dbg_info(""); 217 nvgpu_log_info(g, " ");
218 218
219 status = _clk_vf_point_pmudatainit_super(g, board_obj_ptr, ppmudata); 219 status = _clk_vf_point_pmudatainit_super(g, board_obj_ptr, ppmudata);
220 if (status != 0) 220 if (status != 0)
@@ -297,7 +297,7 @@ struct clk_vf_point *construct_clk_vf_point(struct gk20a *g, void *pargs)
297 struct boardobj *board_obj_ptr = NULL; 297 struct boardobj *board_obj_ptr = NULL;
298 u32 status; 298 u32 status;
299 299
300 gk20a_dbg_info(""); 300 nvgpu_log_info(g, " ");
301 switch (BOARDOBJ_GET_TYPE(pargs)) { 301 switch (BOARDOBJ_GET_TYPE(pargs)) {
302 case CTRL_CLK_CLK_VF_POINT_TYPE_FREQ: 302 case CTRL_CLK_CLK_VF_POINT_TYPE_FREQ:
303 status = clk_vf_point_construct_freq(g, &board_obj_ptr, 303 status = clk_vf_point_construct_freq(g, &board_obj_ptr,
@@ -316,7 +316,7 @@ struct clk_vf_point *construct_clk_vf_point(struct gk20a *g, void *pargs)
316 if (status) 316 if (status)
317 return NULL; 317 return NULL;
318 318
319 gk20a_dbg_info(" Done"); 319 nvgpu_log_info(g, " Done");
320 320
321 return (struct clk_vf_point *)board_obj_ptr; 321 return (struct clk_vf_point *)board_obj_ptr;
322} 322}
@@ -329,7 +329,7 @@ static u32 _clk_vf_point_pmudatainit_super(struct gk20a *g,
329 struct clk_vf_point *pclk_vf_point; 329 struct clk_vf_point *pclk_vf_point;
330 struct nv_pmu_clk_clk_vf_point_boardobj_set *pset; 330 struct nv_pmu_clk_clk_vf_point_boardobj_set *pset;
331 331
332 gk20a_dbg_info(""); 332 nvgpu_log_info(g, " ");
333 333
334 status = boardobj_pmudatainit_super(g, board_obj_ptr, ppmudata); 334 status = boardobj_pmudatainit_super(g, board_obj_ptr, ppmudata);
335 if (status != 0) 335 if (status != 0)
@@ -355,7 +355,7 @@ static u32 clk_vf_point_update(struct gk20a *g,
355 struct clk_vf_point *pclk_vf_point; 355 struct clk_vf_point *pclk_vf_point;
356 struct nv_pmu_clk_clk_vf_point_boardobj_get_status *pstatus; 356 struct nv_pmu_clk_clk_vf_point_boardobj_get_status *pstatus;
357 357
358 gk20a_dbg_info(""); 358 nvgpu_log_info(g, " ");
359 359
360 360
361 pclk_vf_point = 361 pclk_vf_point =
@@ -388,7 +388,7 @@ u32 clk_vf_point_cache(struct gk20a *g)
388 u32 status; 388 u32 status;
389 u8 index; 389 u8 index;
390 390
391 gk20a_dbg_info(""); 391 nvgpu_log_info(g, " ");
392 pclk_vf_points = &g->clk_pmu.clk_vf_pointobjs; 392 pclk_vf_points = &g->clk_pmu.clk_vf_pointobjs;
393 pboardobjgrp = &pclk_vf_points->super.super; 393 pboardobjgrp = &pclk_vf_points->super.super;
394 pboardobjgrpmask = &pclk_vf_points->super.mask.super; 394 pboardobjgrpmask = &pclk_vf_points->super.mask.super;
diff --git a/drivers/gpu/nvgpu/clk/clk_vin.c b/drivers/gpu/nvgpu/clk/clk_vin.c
index 74bcd247..66efefef 100644
--- a/drivers/gpu/nvgpu/clk/clk_vin.c
+++ b/drivers/gpu/nvgpu/clk/clk_vin.c
@@ -323,13 +323,13 @@ static u32 _clk_vin_devgrp_pmudatainit_super(struct gk20a *g,
323 struct avfsvinobjs *pvin_obbj = (struct avfsvinobjs *)pboardobjgrp; 323 struct avfsvinobjs *pvin_obbj = (struct avfsvinobjs *)pboardobjgrp;
324 u32 status = 0; 324 u32 status = 0;
325 325
326 gk20a_dbg_info(""); 326 nvgpu_log_info(g, " ");
327 327
328 status = boardobjgrp_pmudatainit_e32(g, pboardobjgrp, pboardobjgrppmu); 328 status = boardobjgrp_pmudatainit_e32(g, pboardobjgrp, pboardobjgrppmu);
329 329
330 pset->b_vin_is_disable_allowed = pvin_obbj->vin_is_disable_allowed; 330 pset->b_vin_is_disable_allowed = pvin_obbj->vin_is_disable_allowed;
331 331
332 gk20a_dbg_info(" Done"); 332 nvgpu_log_info(g, " Done");
333 return status; 333 return status;
334} 334}
335 335
@@ -342,7 +342,7 @@ static u32 _clk_vin_devgrp_pmudata_instget(struct gk20a *g,
342 (struct nv_pmu_clk_clk_vin_device_boardobj_grp_set *) 342 (struct nv_pmu_clk_clk_vin_device_boardobj_grp_set *)
343 pmuboardobjgrp; 343 pmuboardobjgrp;
344 344
345 gk20a_dbg_info(""); 345 nvgpu_log_info(g, " ");
346 346
347 /*check whether pmuboardobjgrp has a valid boardobj in index*/ 347 /*check whether pmuboardobjgrp has a valid boardobj in index*/
348 if (((u32)BIT(idx) & 348 if (((u32)BIT(idx) &
@@ -351,7 +351,7 @@ static u32 _clk_vin_devgrp_pmudata_instget(struct gk20a *g,
351 351
352 *ppboardobjpmudata = (struct nv_pmu_boardobj *) 352 *ppboardobjpmudata = (struct nv_pmu_boardobj *)
353 &pgrp_set->objects[idx].data.board_obj; 353 &pgrp_set->objects[idx].data.board_obj;
354 gk20a_dbg_info(" Done"); 354 nvgpu_log_info(g, " Done");
355 return 0; 355 return 0;
356} 356}
357 357
@@ -381,7 +381,7 @@ u32 clk_vin_sw_setup(struct gk20a *g)
381 struct vin_device_v20 *pvindev = NULL; 381 struct vin_device_v20 *pvindev = NULL;
382 struct avfsvinobjs *pvinobjs; 382 struct avfsvinobjs *pvinobjs;
383 383
384 gk20a_dbg_info(""); 384 nvgpu_log_info(g, " ");
385 385
386 status = boardobjgrpconstruct_e32(g, &g->clk_pmu.avfs_vinobjs.super); 386 status = boardobjgrpconstruct_e32(g, &g->clk_pmu.avfs_vinobjs.super);
387 if (status) { 387 if (status) {
@@ -427,7 +427,7 @@ u32 clk_vin_sw_setup(struct gk20a *g)
427 } 427 }
428 428
429done: 429done:
430 gk20a_dbg_info(" done status %x", status); 430 nvgpu_log_info(g, " done status %x", status);
431 return status; 431 return status;
432} 432}
433 433
@@ -436,7 +436,7 @@ u32 clk_vin_pmu_setup(struct gk20a *g)
436 u32 status; 436 u32 status;
437 struct boardobjgrp *pboardobjgrp = NULL; 437 struct boardobjgrp *pboardobjgrp = NULL;
438 438
439 gk20a_dbg_info(""); 439 nvgpu_log_info(g, " ");
440 440
441 pboardobjgrp = &g->clk_pmu.avfs_vinobjs.super.super; 441 pboardobjgrp = &g->clk_pmu.avfs_vinobjs.super.super;
442 442
@@ -445,7 +445,7 @@ u32 clk_vin_pmu_setup(struct gk20a *g)
445 445
446 status = pboardobjgrp->pmuinithandle(g, pboardobjgrp); 446 status = pboardobjgrp->pmuinithandle(g, pboardobjgrp);
447 447
448 gk20a_dbg_info("Done"); 448 nvgpu_log_info(g, "Done");
449 return status; 449 return status;
450} 450}
451 451
@@ -470,7 +470,7 @@ static u32 devinit_get_vin_device_table(struct gk20a *g,
470 struct vin_device_v20 vin_device_v20; 470 struct vin_device_v20 vin_device_v20;
471 } vin_device_data; 471 } vin_device_data;
472 472
473 gk20a_dbg_info(""); 473 nvgpu_log_info(g, " ");
474 474
475 vin_table_ptr = (u8 *)nvgpu_bios_get_perf_table_ptrs(g, 475 vin_table_ptr = (u8 *)nvgpu_bios_get_perf_table_ptrs(g,
476 g->bios.clock_token, VIN_TABLE); 476 g->bios.clock_token, VIN_TABLE);
@@ -557,7 +557,7 @@ static u32 devinit_get_vin_device_table(struct gk20a *g,
557 } 557 }
558 558
559done: 559done:
560 gk20a_dbg_info(" done status %x", status); 560 nvgpu_log_info(g, " done status %x", status);
561 return status; 561 return status;
562} 562}
563 563
@@ -645,7 +645,7 @@ static struct vin_device *construct_vin_device(struct gk20a *g, void *pargs)
645 struct boardobj *board_obj_ptr = NULL; 645 struct boardobj *board_obj_ptr = NULL;
646 u32 status; 646 u32 status;
647 647
648 gk20a_dbg_info(" %d", BOARDOBJ_GET_TYPE(pargs)); 648 nvgpu_log_info(g, " %d", BOARDOBJ_GET_TYPE(pargs));
649 switch (BOARDOBJ_GET_TYPE(pargs)) { 649 switch (BOARDOBJ_GET_TYPE(pargs)) {
650 case CTRL_CLK_VIN_TYPE_V10: 650 case CTRL_CLK_VIN_TYPE_V10:
651 status = vin_device_construct_v10(g, &board_obj_ptr, 651 status = vin_device_construct_v10(g, &board_obj_ptr,
@@ -664,7 +664,7 @@ static struct vin_device *construct_vin_device(struct gk20a *g, void *pargs)
664 if (status) 664 if (status)
665 return NULL; 665 return NULL;
666 666
667 gk20a_dbg_info(" Done"); 667 nvgpu_log_info(g, " Done");
668 668
669 return (struct vin_device *)board_obj_ptr; 669 return (struct vin_device *)board_obj_ptr;
670} 670}
@@ -679,7 +679,7 @@ static u32 vin_device_init_pmudata_v10(struct gk20a *g,
679 struct vin_device_v20 *pvin_dev_v20; 679 struct vin_device_v20 *pvin_dev_v20;
680 struct nv_pmu_clk_clk_vin_device_v10_boardobj_set *perf_pmu_data; 680 struct nv_pmu_clk_clk_vin_device_v10_boardobj_set *perf_pmu_data;
681 681
682 gk20a_dbg_info(""); 682 nvgpu_log_info(g, " ");
683 683
684 status = vin_device_init_pmudata_super(g, board_obj_ptr, ppmudata); 684 status = vin_device_init_pmudata_super(g, board_obj_ptr, ppmudata);
685 if (status != 0) 685 if (status != 0)
@@ -692,7 +692,7 @@ static u32 vin_device_init_pmudata_v10(struct gk20a *g,
692 perf_pmu_data->data.vin_cal.intercept = pvin_dev_v20->data.vin_cal.cal_v10.intercept; 692 perf_pmu_data->data.vin_cal.intercept = pvin_dev_v20->data.vin_cal.cal_v10.intercept;
693 perf_pmu_data->data.vin_cal.slope = pvin_dev_v20->data.vin_cal.cal_v10.slope; 693 perf_pmu_data->data.vin_cal.slope = pvin_dev_v20->data.vin_cal.cal_v10.slope;
694 694
695 gk20a_dbg_info(" Done"); 695 nvgpu_log_info(g, " Done");
696 696
697 return status; 697 return status;
698} 698}
@@ -705,7 +705,7 @@ static u32 vin_device_init_pmudata_v20(struct gk20a *g,
705 struct vin_device_v20 *pvin_dev_v20; 705 struct vin_device_v20 *pvin_dev_v20;
706 struct nv_pmu_clk_clk_vin_device_v20_boardobj_set *perf_pmu_data; 706 struct nv_pmu_clk_clk_vin_device_v20_boardobj_set *perf_pmu_data;
707 707
708 gk20a_dbg_info(""); 708 nvgpu_log_info(g, " ");
709 709
710 status = vin_device_init_pmudata_super(g, board_obj_ptr, ppmudata); 710 status = vin_device_init_pmudata_super(g, board_obj_ptr, ppmudata);
711 if (status != 0) 711 if (status != 0)
@@ -718,7 +718,7 @@ static u32 vin_device_init_pmudata_v20(struct gk20a *g,
718 perf_pmu_data->data.vin_cal.cal_v20.offset = pvin_dev_v20->data.vin_cal.cal_v20.offset; 718 perf_pmu_data->data.vin_cal.cal_v20.offset = pvin_dev_v20->data.vin_cal.cal_v20.offset;
719 perf_pmu_data->data.vin_cal.cal_v20.gain = pvin_dev_v20->data.vin_cal.cal_v20.gain; 719 perf_pmu_data->data.vin_cal.cal_v20.gain = pvin_dev_v20->data.vin_cal.cal_v20.gain;
720 720
721 gk20a_dbg_info(" Done"); 721 nvgpu_log_info(g, " Done");
722 722
723 return status; 723 return status;
724} 724}
@@ -731,7 +731,7 @@ static u32 vin_device_init_pmudata_super(struct gk20a *g,
731 struct vin_device *pvin_dev; 731 struct vin_device *pvin_dev;
732 struct nv_pmu_clk_clk_vin_device_boardobj_set *perf_pmu_data; 732 struct nv_pmu_clk_clk_vin_device_boardobj_set *perf_pmu_data;
733 733
734 gk20a_dbg_info(""); 734 nvgpu_log_info(g, " ");
735 735
736 status = boardobj_pmudatainit_super(g, board_obj_ptr, ppmudata); 736 status = boardobj_pmudatainit_super(g, board_obj_ptr, ppmudata);
737 if (status != 0) 737 if (status != 0)
@@ -745,7 +745,7 @@ static u32 vin_device_init_pmudata_super(struct gk20a *g,
745 perf_pmu_data->volt_domain = pvin_dev->volt_domain; 745 perf_pmu_data->volt_domain = pvin_dev->volt_domain;
746 perf_pmu_data->flls_shared_mask = pvin_dev->flls_shared_mask; 746 perf_pmu_data->flls_shared_mask = pvin_dev->flls_shared_mask;
747 747
748 gk20a_dbg_info(" Done"); 748 nvgpu_log_info(g, " Done");
749 749
750 return status; 750 return status;
751} 751}
diff --git a/drivers/gpu/nvgpu/common/as.c b/drivers/gpu/nvgpu/common/as.c
index 5b76cf0e..77f088b7 100644
--- a/drivers/gpu/nvgpu/common/as.c
+++ b/drivers/gpu/nvgpu/common/as.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * GK20A Address Spaces 2 * GK20A Address Spaces
3 * 3 *
4 * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"), 7 * copy of this software and associated documentation files (the "Software"),
@@ -34,13 +34,17 @@
34/* dumb allocator... */ 34/* dumb allocator... */
35static int generate_as_share_id(struct gk20a_as *as) 35static int generate_as_share_id(struct gk20a_as *as)
36{ 36{
37 gk20a_dbg_fn(""); 37 struct gk20a *g = gk20a_from_as(as);
38
39 nvgpu_log_fn(g, " ");
38 return ++as->last_share_id; 40 return ++as->last_share_id;
39} 41}
40/* still dumb */ 42/* still dumb */
41static void release_as_share_id(struct gk20a_as *as, int id) 43static void release_as_share_id(struct gk20a_as *as, int id)
42{ 44{
43 gk20a_dbg_fn(""); 45 struct gk20a *g = gk20a_from_as(as);
46
47 nvgpu_log_fn(g, " ");
44 return; 48 return;
45} 49}
46 50
@@ -56,7 +60,7 @@ static int gk20a_vm_alloc_share(struct gk20a_as_share *as_share,
56 const bool userspace_managed = 60 const bool userspace_managed =
57 (flags & NVGPU_GPU_IOCTL_ALLOC_AS_FLAGS_USERSPACE_MANAGED) != 0; 61 (flags & NVGPU_GPU_IOCTL_ALLOC_AS_FLAGS_USERSPACE_MANAGED) != 0;
58 62
59 gk20a_dbg_fn(""); 63 nvgpu_log_fn(g, " ");
60 64
61 if (big_page_size == 0) { 65 if (big_page_size == 0) {
62 big_page_size = g->ops.mm.get_default_big_page_size(); 66 big_page_size = g->ops.mm.get_default_big_page_size();
@@ -92,7 +96,7 @@ int gk20a_as_alloc_share(struct gk20a *g,
92 struct gk20a_as_share *as_share; 96 struct gk20a_as_share *as_share;
93 int err = 0; 97 int err = 0;
94 98
95 gk20a_dbg_fn(""); 99 nvgpu_log_fn(g, " ");
96 g = gk20a_get(g); 100 g = gk20a_get(g);
97 if (!g) 101 if (!g)
98 return -ENODEV; 102 return -ENODEV;
@@ -126,8 +130,9 @@ failed:
126int gk20a_vm_release_share(struct gk20a_as_share *as_share) 130int gk20a_vm_release_share(struct gk20a_as_share *as_share)
127{ 131{
128 struct vm_gk20a *vm = as_share->vm; 132 struct vm_gk20a *vm = as_share->vm;
133 struct gk20a *g = gk20a_from_vm(vm);
129 134
130 gk20a_dbg_fn(""); 135 nvgpu_log_fn(g, " ");
131 136
132 vm->as_share = NULL; 137 vm->as_share = NULL;
133 as_share->vm = NULL; 138 as_share->vm = NULL;
@@ -146,7 +151,7 @@ int gk20a_as_release_share(struct gk20a_as_share *as_share)
146 struct gk20a *g = as_share->vm->mm->g; 151 struct gk20a *g = as_share->vm->mm->g;
147 int err; 152 int err;
148 153
149 gk20a_dbg_fn(""); 154 nvgpu_log_fn(g, " ");
150 155
151 err = gk20a_busy(g); 156 err = gk20a_busy(g);
152 157
diff --git a/drivers/gpu/nvgpu/common/linux/cde.c b/drivers/gpu/nvgpu/common/linux/cde.c
index 7c92246c..511d564f 100644
--- a/drivers/gpu/nvgpu/common/linux/cde.c
+++ b/drivers/gpu/nvgpu/common/linux/cde.c
@@ -464,7 +464,7 @@ static int gk20a_cde_patch_params(struct gk20a_cde_ctx *cde_ctx)
464 new_data = cde_ctx->user_param_values[user_id]; 464 new_data = cde_ctx->user_param_values[user_id];
465 } 465 }
466 466
467 gk20a_dbg(gpu_dbg_cde, "cde: patch: idx_in_file=%d param_id=%d target_buf=%u target_byte_offset=%lld data_value=0x%llx data_offset/data_diff=%lld data_type=%d data_shift=%d data_mask=0x%llx", 467 nvgpu_log(g, gpu_dbg_cde, "cde: patch: idx_in_file=%d param_id=%d target_buf=%u target_byte_offset=%lld data_value=0x%llx data_offset/data_diff=%lld data_type=%d data_shift=%d data_mask=0x%llx",
468 i, param->id, param->target_buf, 468 i, param->id, param->target_buf,
469 param->target_byte_offset, new_data, 469 param->target_byte_offset, new_data,
470 param->data_offset, param->type, param->shift, 470 param->data_offset, param->type, param->shift,
@@ -790,8 +790,9 @@ __acquires(&cde_app->mutex)
790__releases(&cde_app->mutex) 790__releases(&cde_app->mutex)
791{ 791{
792 struct gk20a_cde_app *cde_app = &cde_ctx->l->cde_app; 792 struct gk20a_cde_app *cde_app = &cde_ctx->l->cde_app;
793 struct gk20a *g = &cde_ctx->l->g;
793 794
794 gk20a_dbg(gpu_dbg_cde_ctx, "releasing use on %p", cde_ctx); 795 nvgpu_log(g, gpu_dbg_cde_ctx, "releasing use on %p", cde_ctx);
795 trace_gk20a_cde_release(cde_ctx); 796 trace_gk20a_cde_release(cde_ctx);
796 797
797 nvgpu_mutex_acquire(&cde_app->mutex); 798 nvgpu_mutex_acquire(&cde_app->mutex);
@@ -801,7 +802,7 @@ __releases(&cde_app->mutex)
801 nvgpu_list_move(&cde_ctx->list, &cde_app->free_contexts); 802 nvgpu_list_move(&cde_ctx->list, &cde_app->free_contexts);
802 cde_app->ctx_usecount--; 803 cde_app->ctx_usecount--;
803 } else { 804 } else {
804 gk20a_dbg_info("double release cde context %p", cde_ctx); 805 nvgpu_log_info(g, "double release cde context %p", cde_ctx);
805 } 806 }
806 807
807 nvgpu_mutex_release(&cde_app->mutex); 808 nvgpu_mutex_release(&cde_app->mutex);
@@ -823,7 +824,7 @@ __releases(&cde_app->mutex)
823 if (cde_ctx->in_use || !cde_app->initialised) 824 if (cde_ctx->in_use || !cde_app->initialised)
824 return; 825 return;
825 826
826 gk20a_dbg(gpu_dbg_fn | gpu_dbg_cde_ctx, 827 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_cde_ctx,
827 "cde: attempting to delete temporary %p", cde_ctx); 828 "cde: attempting to delete temporary %p", cde_ctx);
828 829
829 err = gk20a_busy(g); 830 err = gk20a_busy(g);
@@ -837,7 +838,7 @@ __releases(&cde_app->mutex)
837 838
838 nvgpu_mutex_acquire(&cde_app->mutex); 839 nvgpu_mutex_acquire(&cde_app->mutex);
839 if (cde_ctx->in_use || !cde_app->initialised) { 840 if (cde_ctx->in_use || !cde_app->initialised) {
840 gk20a_dbg(gpu_dbg_cde_ctx, 841 nvgpu_log(g, gpu_dbg_cde_ctx,
841 "cde: context use raced, not deleting %p", 842 "cde: context use raced, not deleting %p",
842 cde_ctx); 843 cde_ctx);
843 goto out; 844 goto out;
@@ -847,7 +848,7 @@ __releases(&cde_app->mutex)
847 "double pending %p", cde_ctx); 848 "double pending %p", cde_ctx);
848 849
849 gk20a_cde_remove_ctx(cde_ctx); 850 gk20a_cde_remove_ctx(cde_ctx);
850 gk20a_dbg(gpu_dbg_fn | gpu_dbg_cde_ctx, 851 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_cde_ctx,
851 "cde: destroyed %p count=%d use=%d max=%d", 852 "cde: destroyed %p count=%d use=%d max=%d",
852 cde_ctx, cde_app->ctx_count, cde_app->ctx_usecount, 853 cde_ctx, cde_app->ctx_count, cde_app->ctx_usecount,
853 cde_app->ctx_count_top); 854 cde_app->ctx_count_top);
@@ -874,7 +875,7 @@ __must_hold(&cde_app->mutex)
874 if (!nvgpu_list_empty(&cde_app->free_contexts)) { 875 if (!nvgpu_list_empty(&cde_app->free_contexts)) {
875 cde_ctx = nvgpu_list_first_entry(&cde_app->free_contexts, 876 cde_ctx = nvgpu_list_first_entry(&cde_app->free_contexts,
876 gk20a_cde_ctx, list); 877 gk20a_cde_ctx, list);
877 gk20a_dbg(gpu_dbg_fn | gpu_dbg_cde_ctx, 878 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_cde_ctx,
878 "cde: got free %p count=%d use=%d max=%d", 879 "cde: got free %p count=%d use=%d max=%d",
879 cde_ctx, cde_app->ctx_count, 880 cde_ctx, cde_app->ctx_count,
880 cde_app->ctx_usecount, 881 cde_app->ctx_usecount,
@@ -893,7 +894,7 @@ __must_hold(&cde_app->mutex)
893 894
894 /* no free contexts, get a temporary one */ 895 /* no free contexts, get a temporary one */
895 896
896 gk20a_dbg(gpu_dbg_fn | gpu_dbg_cde_ctx, 897 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_cde_ctx,
897 "cde: no free contexts, count=%d", 898 "cde: no free contexts, count=%d",
898 cde_app->ctx_count); 899 cde_app->ctx_count);
899 900
@@ -967,7 +968,7 @@ static struct gk20a_cde_ctx *gk20a_cde_allocate_context(struct nvgpu_os_linux *l
967 INIT_DELAYED_WORK(&cde_ctx->ctx_deleter_work, 968 INIT_DELAYED_WORK(&cde_ctx->ctx_deleter_work,
968 gk20a_cde_ctx_deleter_fn); 969 gk20a_cde_ctx_deleter_fn);
969 970
970 gk20a_dbg(gpu_dbg_fn | gpu_dbg_cde_ctx, "cde: allocated %p", cde_ctx); 971 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_cde_ctx, "cde: allocated %p", cde_ctx);
971 trace_gk20a_cde_allocate_context(cde_ctx); 972 trace_gk20a_cde_allocate_context(cde_ctx);
972 return cde_ctx; 973 return cde_ctx;
973} 974}
@@ -1005,7 +1006,7 @@ __releases(&l->cde_app->mutex)
1005 u32 submit_op; 1006 u32 submit_op;
1006 struct dma_buf_attachment *attachment; 1007 struct dma_buf_attachment *attachment;
1007 1008
1008 gk20a_dbg(gpu_dbg_cde, "compbits_byte_offset=%llu scatterbuffer_byte_offset=%llu", 1009 nvgpu_log(g, gpu_dbg_cde, "compbits_byte_offset=%llu scatterbuffer_byte_offset=%llu",
1009 compbits_byte_offset, scatterbuffer_byte_offset); 1010 compbits_byte_offset, scatterbuffer_byte_offset);
1010 1011
1011 /* scatter buffer must be after compbits buffer */ 1012 /* scatter buffer must be after compbits buffer */
@@ -1055,11 +1056,11 @@ __releases(&l->cde_app->mutex)
1055 compbits_byte_offset; 1056 compbits_byte_offset;
1056 } 1057 }
1057 1058
1058 gk20a_dbg(gpu_dbg_cde, "map_offset=%llu map_size=%llu", 1059 nvgpu_log(g, gpu_dbg_cde, "map_offset=%llu map_size=%llu",
1059 map_offset, map_size); 1060 map_offset, map_size);
1060 gk20a_dbg(gpu_dbg_cde, "mapped_compbits_offset=%llu compbits_size=%llu", 1061 nvgpu_log(g, gpu_dbg_cde, "mapped_compbits_offset=%llu compbits_size=%llu",
1061 mapped_compbits_offset, compbits_size); 1062 mapped_compbits_offset, compbits_size);
1062 gk20a_dbg(gpu_dbg_cde, "mapped_scatterbuffer_offset=%llu scatterbuffer_size=%llu", 1063 nvgpu_log(g, gpu_dbg_cde, "mapped_scatterbuffer_offset=%llu scatterbuffer_size=%llu",
1063 mapped_scatterbuffer_offset, scatterbuffer_size); 1064 mapped_scatterbuffer_offset, scatterbuffer_size);
1064 1065
1065 1066
@@ -1096,7 +1097,7 @@ __releases(&l->cde_app->mutex)
1096 1097
1097 scatter_buffer = surface + scatterbuffer_byte_offset; 1098 scatter_buffer = surface + scatterbuffer_byte_offset;
1098 1099
1099 gk20a_dbg(gpu_dbg_cde, "surface=0x%p scatterBuffer=0x%p", 1100 nvgpu_log(g, gpu_dbg_cde, "surface=0x%p scatterBuffer=0x%p",
1100 surface, scatter_buffer); 1101 surface, scatter_buffer);
1101 sgt = gk20a_mm_pin(dev_from_gk20a(g), compbits_scatter_buf, 1102 sgt = gk20a_mm_pin(dev_from_gk20a(g), compbits_scatter_buf,
1102 &attachment); 1103 &attachment);
@@ -1163,11 +1164,11 @@ __releases(&l->cde_app->mutex)
1163 goto exit_unmap_surface; 1164 goto exit_unmap_surface;
1164 } 1165 }
1165 1166
1166 gk20a_dbg(gpu_dbg_cde, "cde: buffer=cbc, size=%zu, gpuva=%llx\n", 1167 nvgpu_log(g, gpu_dbg_cde, "cde: buffer=cbc, size=%zu, gpuva=%llx\n",
1167 g->gr.compbit_store.mem.size, cde_ctx->backing_store_vaddr); 1168 g->gr.compbit_store.mem.size, cde_ctx->backing_store_vaddr);
1168 gk20a_dbg(gpu_dbg_cde, "cde: buffer=compbits, size=%llu, gpuva=%llx\n", 1169 nvgpu_log(g, gpu_dbg_cde, "cde: buffer=compbits, size=%llu, gpuva=%llx\n",
1169 cde_ctx->compbit_size, cde_ctx->compbit_vaddr); 1170 cde_ctx->compbit_size, cde_ctx->compbit_vaddr);
1170 gk20a_dbg(gpu_dbg_cde, "cde: buffer=scatterbuffer, size=%llu, gpuva=%llx\n", 1171 nvgpu_log(g, gpu_dbg_cde, "cde: buffer=scatterbuffer, size=%llu, gpuva=%llx\n",
1171 cde_ctx->scatterbuffer_size, cde_ctx->scatterbuffer_vaddr); 1172 cde_ctx->scatterbuffer_size, cde_ctx->scatterbuffer_vaddr);
1172 1173
1173 /* take always the postfence as it is needed for protecting the 1174 /* take always the postfence as it is needed for protecting the
@@ -1234,9 +1235,9 @@ __releases(&cde_app->mutex)
1234 return; 1235 return;
1235 1236
1236 trace_gk20a_cde_finished_ctx_cb(cde_ctx); 1237 trace_gk20a_cde_finished_ctx_cb(cde_ctx);
1237 gk20a_dbg(gpu_dbg_fn | gpu_dbg_cde_ctx, "cde: finished %p", cde_ctx); 1238 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_cde_ctx, "cde: finished %p", cde_ctx);
1238 if (!cde_ctx->in_use) 1239 if (!cde_ctx->in_use)
1239 gk20a_dbg_info("double finish cde context %p on channel %p", 1240 nvgpu_log_info(g, "double finish cde context %p on channel %p",
1240 cde_ctx, ch); 1241 cde_ctx, ch);
1241 1242
1242 if (ch->has_timedout) { 1243 if (ch->has_timedout) {
@@ -1406,12 +1407,13 @@ __acquires(&cde_app->mutex)
1406__releases(&cde_app->mutex) 1407__releases(&cde_app->mutex)
1407{ 1408{
1408 struct gk20a_cde_app *cde_app = &l->cde_app; 1409 struct gk20a_cde_app *cde_app = &l->cde_app;
1410 struct gk20a *g = &l->g;
1409 int err; 1411 int err;
1410 1412
1411 if (cde_app->initialised) 1413 if (cde_app->initialised)
1412 return 0; 1414 return 0;
1413 1415
1414 gk20a_dbg(gpu_dbg_fn | gpu_dbg_cde_ctx, "cde: init"); 1416 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_cde_ctx, "cde: init");
1415 1417
1416 err = nvgpu_mutex_init(&cde_app->mutex); 1418 err = nvgpu_mutex_init(&cde_app->mutex);
1417 if (err) 1419 if (err)
@@ -1430,7 +1432,7 @@ __releases(&cde_app->mutex)
1430 cde_app->initialised = true; 1432 cde_app->initialised = true;
1431 1433
1432 nvgpu_mutex_release(&cde_app->mutex); 1434 nvgpu_mutex_release(&cde_app->mutex);
1433 gk20a_dbg(gpu_dbg_cde_ctx, "cde: init finished: %d", err); 1435 nvgpu_log(g, gpu_dbg_cde_ctx, "cde: init finished: %d", err);
1434 1436
1435 if (err) 1437 if (err)
1436 nvgpu_mutex_destroy(&cde_app->mutex); 1438 nvgpu_mutex_destroy(&cde_app->mutex);
@@ -1528,14 +1530,14 @@ static int gk20a_buffer_convert_gpu_to_cde_v1(
1528 nvgpu_warn(g, "cde: surface is exceptionally large (xtiles=%d, ytiles=%d)", 1530 nvgpu_warn(g, "cde: surface is exceptionally large (xtiles=%d, ytiles=%d)",
1529 xtiles, ytiles); 1531 xtiles, ytiles);
1530 1532
1531 gk20a_dbg(gpu_dbg_cde, "w=%d, h=%d, bh_log2=%d, compbits_hoffset=0x%llx, compbits_voffset=0x%llx, scatterbuffer_offset=0x%llx", 1533 nvgpu_log(g, gpu_dbg_cde, "w=%d, h=%d, bh_log2=%d, compbits_hoffset=0x%llx, compbits_voffset=0x%llx, scatterbuffer_offset=0x%llx",
1532 width, height, block_height_log2, 1534 width, height, block_height_log2,
1533 compbits_hoffset, compbits_voffset, scatterbuffer_offset); 1535 compbits_hoffset, compbits_voffset, scatterbuffer_offset);
1534 gk20a_dbg(gpu_dbg_cde, "resolution (%d, %d) tiles (%d, %d)", 1536 nvgpu_log(g, gpu_dbg_cde, "resolution (%d, %d) tiles (%d, %d)",
1535 width, height, xtiles, ytiles); 1537 width, height, xtiles, ytiles);
1536 gk20a_dbg(gpu_dbg_cde, "group (%d, %d) gridH (%d, %d) gridV (%d, %d)", 1538 nvgpu_log(g, gpu_dbg_cde, "group (%d, %d) gridH (%d, %d) gridV (%d, %d)",
1537 wgx, wgy, gridw_h, gridh_h, gridw_v, gridh_v); 1539 wgx, wgy, gridw_h, gridh_h, gridw_v, gridh_v);
1538 gk20a_dbg(gpu_dbg_cde, "hprog=%d, offset=0x%x, regs=%d, vprog=%d, offset=0x%x, regs=%d", 1540 nvgpu_log(g, gpu_dbg_cde, "hprog=%d, offset=0x%x, regs=%d, vprog=%d, offset=0x%x, regs=%d",
1539 hprog, 1541 hprog,
1540 l->cde_app.arrays[ARRAY_PROGRAM_OFFSET][hprog], 1542 l->cde_app.arrays[ARRAY_PROGRAM_OFFSET][hprog],
1541 l->cde_app.arrays[ARRAY_REGISTER_COUNT][hprog], 1543 l->cde_app.arrays[ARRAY_REGISTER_COUNT][hprog],
@@ -1634,7 +1636,7 @@ static int gk20a_buffer_convert_gpu_to_cde(
1634 if (!l->cde_app.initialised) 1636 if (!l->cde_app.initialised)
1635 return -ENOSYS; 1637 return -ENOSYS;
1636 1638
1637 gk20a_dbg(gpu_dbg_cde, "firmware version = %d\n", 1639 nvgpu_log(g, gpu_dbg_cde, "firmware version = %d\n",
1638 l->cde_app.firmware_version); 1640 l->cde_app.firmware_version);
1639 1641
1640 if (l->cde_app.firmware_version == 1) { 1642 if (l->cde_app.firmware_version == 1) {
diff --git a/drivers/gpu/nvgpu/common/linux/cde_gp10b.c b/drivers/gpu/nvgpu/common/linux/cde_gp10b.c
index 483a3ee7..5c0e79a7 100644
--- a/drivers/gpu/nvgpu/common/linux/cde_gp10b.c
+++ b/drivers/gpu/nvgpu/common/linux/cde_gp10b.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * GP10B CDE 2 * GP10B CDE
3 * 3 *
4 * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"), 7 * copy of this software and associated documentation files (the "Software"),
@@ -117,7 +117,7 @@ int gp10b_populate_scatter_buffer(struct gk20a *g,
117 u64 surf_pa = sg_phys(sg); 117 u64 surf_pa = sg_phys(sg);
118 unsigned int n = (int)(sg->length >> page_size_log2); 118 unsigned int n = (int)(sg->length >> page_size_log2);
119 119
120 gk20a_dbg(gpu_dbg_cde, "surfPA=0x%llx + %d pages", surf_pa, n); 120 nvgpu_log(g, gpu_dbg_cde, "surfPA=0x%llx + %d pages", surf_pa, n);
121 121
122 for (j=0; j < n && pages_left > 0; j++, surf_pa += page_size) { 122 for (j=0; j < n && pages_left > 0; j++, surf_pa += page_size) {
123 u32 addr = (((u32)(surf_pa>>7)) & getSliceMaskGP10B) >> page_size_shift; 123 u32 addr = (((u32)(surf_pa>>7)) & getSliceMaskGP10B) >> page_size_shift;
@@ -143,9 +143,9 @@ int gp10b_populate_scatter_buffer(struct gk20a *g,
143 scatter_buffer[page >> 3] = d; 143 scatter_buffer[page >> 3] = d;
144 144
145 if (nvgpu_log_mask_enabled(g, gpu_dbg_cde)) { 145 if (nvgpu_log_mask_enabled(g, gpu_dbg_cde)) {
146 gk20a_dbg(gpu_dbg_cde, "scatterBuffer content:"); 146 nvgpu_log(g, gpu_dbg_cde, "scatterBuffer content:");
147 for (i = 0; i < page >> 3; i++) { 147 for (i = 0; i < page >> 3; i++) {
148 gk20a_dbg(gpu_dbg_cde, " %x", scatter_buffer[i]); 148 nvgpu_log(g, gpu_dbg_cde, " %x", scatter_buffer[i]);
149 } 149 }
150 } 150 }
151 151
diff --git a/drivers/gpu/nvgpu/common/linux/channel.c b/drivers/gpu/nvgpu/common/linux/channel.c
index 8f2adc3a..d767374b 100644
--- a/drivers/gpu/nvgpu/common/linux/channel.c
+++ b/drivers/gpu/nvgpu/common/linux/channel.c
@@ -834,7 +834,7 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
834 /* update debug settings */ 834 /* update debug settings */
835 nvgpu_ltc_sync_enabled(g); 835 nvgpu_ltc_sync_enabled(g);
836 836
837 gk20a_dbg_info("channel %d", c->chid); 837 nvgpu_log_info(g, "channel %d", c->chid);
838 838
839 /* 839 /*
840 * Job tracking is necessary for any of the following conditions: 840 * Job tracking is necessary for any of the following conditions:
@@ -943,7 +943,7 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
943 fence ? fence->id : 0, 943 fence ? fence->id : 0,
944 fence ? fence->value : 0); 944 fence ? fence->value : 0);
945 945
946 gk20a_dbg_info("pre-submit put %d, get %d, size %d", 946 nvgpu_log_info(g, "pre-submit put %d, get %d, size %d",
947 c->gpfifo.put, c->gpfifo.get, c->gpfifo.entry_num); 947 c->gpfifo.put, c->gpfifo.get, c->gpfifo.entry_num);
948 948
949 /* 949 /*
@@ -1023,18 +1023,18 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
1023 post_fence ? post_fence->syncpt_id : 0, 1023 post_fence ? post_fence->syncpt_id : 0,
1024 post_fence ? post_fence->syncpt_value : 0); 1024 post_fence ? post_fence->syncpt_value : 0);
1025 1025
1026 gk20a_dbg_info("post-submit put %d, get %d, size %d", 1026 nvgpu_log_info(g, "post-submit put %d, get %d, size %d",
1027 c->gpfifo.put, c->gpfifo.get, c->gpfifo.entry_num); 1027 c->gpfifo.put, c->gpfifo.get, c->gpfifo.entry_num);
1028 1028
1029 if (profile) 1029 if (profile)
1030 profile->timestamp[PROFILE_END] = sched_clock(); 1030 profile->timestamp[PROFILE_END] = sched_clock();
1031 gk20a_dbg_fn("done"); 1031 nvgpu_log_fn(g, "done");
1032 return err; 1032 return err;
1033 1033
1034clean_up_job: 1034clean_up_job:
1035 channel_gk20a_free_job(c, job); 1035 channel_gk20a_free_job(c, job);
1036clean_up: 1036clean_up:
1037 gk20a_dbg_fn("fail"); 1037 nvgpu_log_fn(g, "fail");
1038 gk20a_fence_put(post_fence); 1038 gk20a_fence_put(post_fence);
1039 if (c->deterministic) 1039 if (c->deterministic)
1040 nvgpu_rwsem_up_read(&g->deterministic_busy); 1040 nvgpu_rwsem_up_read(&g->deterministic_busy);
diff --git a/drivers/gpu/nvgpu/common/linux/ctxsw_trace.c b/drivers/gpu/nvgpu/common/linux/ctxsw_trace.c
index 8268bf60..2f0c3e89 100644
--- a/drivers/gpu/nvgpu/common/linux/ctxsw_trace.c
+++ b/drivers/gpu/nvgpu/common/linux/ctxsw_trace.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify it 4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License, 5 * under the terms and conditions of the GNU General Public License,
@@ -79,13 +79,14 @@ ssize_t gk20a_ctxsw_dev_read(struct file *filp, char __user *buf, size_t size,
79 loff_t *off) 79 loff_t *off)
80{ 80{
81 struct gk20a_ctxsw_dev *dev = filp->private_data; 81 struct gk20a_ctxsw_dev *dev = filp->private_data;
82 struct gk20a *g = dev->g;
82 struct nvgpu_ctxsw_ring_header *hdr = dev->hdr; 83 struct nvgpu_ctxsw_ring_header *hdr = dev->hdr;
83 struct nvgpu_ctxsw_trace_entry __user *entry = 84 struct nvgpu_ctxsw_trace_entry __user *entry =
84 (struct nvgpu_ctxsw_trace_entry *) buf; 85 (struct nvgpu_ctxsw_trace_entry *) buf;
85 size_t copied = 0; 86 size_t copied = 0;
86 int err; 87 int err;
87 88
88 gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, 89 nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw,
89 "filp=%p buf=%p size=%zu", filp, buf, size); 90 "filp=%p buf=%p size=%zu", filp, buf, size);
90 91
91 nvgpu_mutex_acquire(&dev->write_lock); 92 nvgpu_mutex_acquire(&dev->write_lock);
@@ -119,7 +120,7 @@ ssize_t gk20a_ctxsw_dev_read(struct file *filp, char __user *buf, size_t size,
119 size -= sizeof(*entry); 120 size -= sizeof(*entry);
120 } 121 }
121 122
122 gk20a_dbg(gpu_dbg_ctxsw, "copied=%zu read_idx=%d", copied, 123 nvgpu_log(g, gpu_dbg_ctxsw, "copied=%zu read_idx=%d", copied,
123 hdr->read_idx); 124 hdr->read_idx);
124 125
125 *off = hdr->read_idx; 126 *off = hdr->read_idx;
@@ -130,7 +131,9 @@ ssize_t gk20a_ctxsw_dev_read(struct file *filp, char __user *buf, size_t size,
130 131
131static int gk20a_ctxsw_dev_ioctl_trace_enable(struct gk20a_ctxsw_dev *dev) 132static int gk20a_ctxsw_dev_ioctl_trace_enable(struct gk20a_ctxsw_dev *dev)
132{ 133{
133 gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, "trace enabled"); 134 struct gk20a *g = dev->g;
135
136 nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, "trace enabled");
134 nvgpu_mutex_acquire(&dev->write_lock); 137 nvgpu_mutex_acquire(&dev->write_lock);
135 dev->write_enabled = true; 138 dev->write_enabled = true;
136 nvgpu_mutex_release(&dev->write_lock); 139 nvgpu_mutex_release(&dev->write_lock);
@@ -140,7 +143,9 @@ static int gk20a_ctxsw_dev_ioctl_trace_enable(struct gk20a_ctxsw_dev *dev)
140 143
141static int gk20a_ctxsw_dev_ioctl_trace_disable(struct gk20a_ctxsw_dev *dev) 144static int gk20a_ctxsw_dev_ioctl_trace_disable(struct gk20a_ctxsw_dev *dev)
142{ 145{
143 gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, "trace disabled"); 146 struct gk20a *g = dev->g;
147
148 nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, "trace disabled");
144 dev->g->ops.fecs_trace.disable(dev->g); 149 dev->g->ops.fecs_trace.disable(dev->g);
145 nvgpu_mutex_acquire(&dev->write_lock); 150 nvgpu_mutex_acquire(&dev->write_lock);
146 dev->write_enabled = false; 151 dev->write_enabled = false;
@@ -168,7 +173,7 @@ static int gk20a_ctxsw_dev_alloc_buffer(struct gk20a_ctxsw_dev *dev,
168 dev->size = size; 173 dev->size = size;
169 dev->num_ents = dev->hdr->num_ents; 174 dev->num_ents = dev->hdr->num_ents;
170 175
171 gk20a_dbg(gpu_dbg_ctxsw, "size=%zu hdr=%p ents=%p num_ents=%d", 176 nvgpu_log(g, gpu_dbg_ctxsw, "size=%zu hdr=%p ents=%p num_ents=%d",
172 dev->size, dev->hdr, dev->ents, dev->hdr->num_ents); 177 dev->size, dev->hdr, dev->ents, dev->hdr->num_ents);
173 return 0; 178 return 0;
174} 179}
@@ -208,10 +213,11 @@ int gk20a_ctxsw_dev_ring_free(struct gk20a *g)
208static int gk20a_ctxsw_dev_ioctl_ring_setup(struct gk20a_ctxsw_dev *dev, 213static int gk20a_ctxsw_dev_ioctl_ring_setup(struct gk20a_ctxsw_dev *dev,
209 struct nvgpu_ctxsw_ring_setup_args *args) 214 struct nvgpu_ctxsw_ring_setup_args *args)
210{ 215{
216 struct gk20a *g = dev->g;
211 size_t size = args->size; 217 size_t size = args->size;
212 int ret; 218 int ret;
213 219
214 gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, "size=%zu", size); 220 nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, "size=%zu", size);
215 221
216 if (size > GK20A_CTXSW_TRACE_MAX_VM_RING_SIZE) 222 if (size > GK20A_CTXSW_TRACE_MAX_VM_RING_SIZE)
217 return -EINVAL; 223 return -EINVAL;
@@ -252,7 +258,7 @@ static int gk20a_ctxsw_dev_ioctl_poll(struct gk20a_ctxsw_dev *dev)
252 struct gk20a *g = dev->g; 258 struct gk20a *g = dev->g;
253 int err; 259 int err;
254 260
255 gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, ""); 261 nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, " ");
256 262
257 err = gk20a_busy(g); 263 err = gk20a_busy(g);
258 if (err) 264 if (err)
@@ -286,7 +292,7 @@ int gk20a_ctxsw_dev_open(struct inode *inode, struct file *filp)
286 if (!g) 292 if (!g)
287 return -ENODEV; 293 return -ENODEV;
288 294
289 gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, "g=%p", g); 295 nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, "g=%p", g);
290 296
291 if (!capable(CAP_SYS_ADMIN)) { 297 if (!capable(CAP_SYS_ADMIN)) {
292 err = -EPERM; 298 err = -EPERM;
@@ -322,13 +328,13 @@ int gk20a_ctxsw_dev_open(struct inode *inode, struct file *filp)
322 328
323 size = sizeof(struct nvgpu_ctxsw_ring_header) + 329 size = sizeof(struct nvgpu_ctxsw_ring_header) +
324 n * sizeof(struct nvgpu_ctxsw_trace_entry); 330 n * sizeof(struct nvgpu_ctxsw_trace_entry);
325 gk20a_dbg(gpu_dbg_ctxsw, "size=%zu entries=%d ent_size=%zu", 331 nvgpu_log(g, gpu_dbg_ctxsw, "size=%zu entries=%d ent_size=%zu",
326 size, n, sizeof(struct nvgpu_ctxsw_trace_entry)); 332 size, n, sizeof(struct nvgpu_ctxsw_trace_entry));
327 333
328 err = gk20a_ctxsw_dev_alloc_buffer(dev, size); 334 err = gk20a_ctxsw_dev_alloc_buffer(dev, size);
329 if (!err) { 335 if (!err) {
330 filp->private_data = dev; 336 filp->private_data = dev;
331 gk20a_dbg(gpu_dbg_ctxsw, "filp=%p dev=%p size=%zu", 337 nvgpu_log(g, gpu_dbg_ctxsw, "filp=%p dev=%p size=%zu",
332 filp, dev, size); 338 filp, dev, size);
333 } 339 }
334 340
@@ -348,7 +354,7 @@ int gk20a_ctxsw_dev_release(struct inode *inode, struct file *filp)
348 struct gk20a_ctxsw_dev *dev = filp->private_data; 354 struct gk20a_ctxsw_dev *dev = filp->private_data;
349 struct gk20a *g = dev->g; 355 struct gk20a *g = dev->g;
350 356
351 gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, "dev: %p", dev); 357 nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, "dev: %p", dev);
352 358
353 g->ops.fecs_trace.disable(g); 359 g->ops.fecs_trace.disable(g);
354 360
@@ -372,7 +378,7 @@ long gk20a_ctxsw_dev_ioctl(struct file *filp, unsigned int cmd,
372 u8 buf[NVGPU_CTXSW_IOCTL_MAX_ARG_SIZE]; 378 u8 buf[NVGPU_CTXSW_IOCTL_MAX_ARG_SIZE];
373 int err = 0; 379 int err = 0;
374 380
375 gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, "nr=%d", _IOC_NR(cmd)); 381 nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, "nr=%d", _IOC_NR(cmd));
376 382
377 if ((_IOC_TYPE(cmd) != NVGPU_CTXSW_IOCTL_MAGIC) || 383 if ((_IOC_TYPE(cmd) != NVGPU_CTXSW_IOCTL_MAGIC) ||
378 (_IOC_NR(cmd) == 0) || 384 (_IOC_NR(cmd) == 0) ||
@@ -423,10 +429,11 @@ long gk20a_ctxsw_dev_ioctl(struct file *filp, unsigned int cmd,
423unsigned int gk20a_ctxsw_dev_poll(struct file *filp, poll_table *wait) 429unsigned int gk20a_ctxsw_dev_poll(struct file *filp, poll_table *wait)
424{ 430{
425 struct gk20a_ctxsw_dev *dev = filp->private_data; 431 struct gk20a_ctxsw_dev *dev = filp->private_data;
432 struct gk20a *g = dev->g;
426 struct nvgpu_ctxsw_ring_header *hdr = dev->hdr; 433 struct nvgpu_ctxsw_ring_header *hdr = dev->hdr;
427 unsigned int mask = 0; 434 unsigned int mask = 0;
428 435
429 gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, ""); 436 nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, " ");
430 437
431 nvgpu_mutex_acquire(&dev->write_lock); 438 nvgpu_mutex_acquire(&dev->write_lock);
432 poll_wait(filp, &dev->readout_wq.wq, wait); 439 poll_wait(filp, &dev->readout_wq.wq, wait);
@@ -440,18 +447,20 @@ unsigned int gk20a_ctxsw_dev_poll(struct file *filp, poll_table *wait)
440static void gk20a_ctxsw_dev_vma_open(struct vm_area_struct *vma) 447static void gk20a_ctxsw_dev_vma_open(struct vm_area_struct *vma)
441{ 448{
442 struct gk20a_ctxsw_dev *dev = vma->vm_private_data; 449 struct gk20a_ctxsw_dev *dev = vma->vm_private_data;
450 struct gk20a *g = dev->g;
443 451
444 nvgpu_atomic_inc(&dev->vma_ref); 452 nvgpu_atomic_inc(&dev->vma_ref);
445 gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, "vma_ref=%d", 453 nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, "vma_ref=%d",
446 nvgpu_atomic_read(&dev->vma_ref)); 454 nvgpu_atomic_read(&dev->vma_ref));
447} 455}
448 456
449static void gk20a_ctxsw_dev_vma_close(struct vm_area_struct *vma) 457static void gk20a_ctxsw_dev_vma_close(struct vm_area_struct *vma)
450{ 458{
451 struct gk20a_ctxsw_dev *dev = vma->vm_private_data; 459 struct gk20a_ctxsw_dev *dev = vma->vm_private_data;
460 struct gk20a *g = dev->g;
452 461
453 nvgpu_atomic_dec(&dev->vma_ref); 462 nvgpu_atomic_dec(&dev->vma_ref);
454 gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, "vma_ref=%d", 463 nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, "vma_ref=%d",
455 nvgpu_atomic_read(&dev->vma_ref)); 464 nvgpu_atomic_read(&dev->vma_ref));
456} 465}
457 466
@@ -469,9 +478,10 @@ int gk20a_ctxsw_dev_mmap_buffer(struct gk20a *g,
469int gk20a_ctxsw_dev_mmap(struct file *filp, struct vm_area_struct *vma) 478int gk20a_ctxsw_dev_mmap(struct file *filp, struct vm_area_struct *vma)
470{ 479{
471 struct gk20a_ctxsw_dev *dev = filp->private_data; 480 struct gk20a_ctxsw_dev *dev = filp->private_data;
481 struct gk20a *g = dev->g;
472 int ret; 482 int ret;
473 483
474 gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, "vm_start=%lx vm_end=%lx", 484 nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, "vm_start=%lx vm_end=%lx",
475 vma->vm_start, vma->vm_end); 485 vma->vm_start, vma->vm_end);
476 486
477 ret = dev->g->ops.fecs_trace.mmap_user_buffer(dev->g, vma); 487 ret = dev->g->ops.fecs_trace.mmap_user_buffer(dev->g, vma);
@@ -513,7 +523,7 @@ int gk20a_ctxsw_trace_init(struct gk20a *g)
513 struct gk20a_ctxsw_trace *trace = g->ctxsw_trace; 523 struct gk20a_ctxsw_trace *trace = g->ctxsw_trace;
514 int err; 524 int err;
515 525
516 gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, "g=%p trace=%p", g, trace); 526 nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, "g=%p trace=%p", g, trace);
517 527
518 /* if tracing is not supported, skip this */ 528 /* if tracing is not supported, skip this */
519 if (!g->ops.fecs_trace.init) 529 if (!g->ops.fecs_trace.init)
@@ -590,7 +600,7 @@ int gk20a_ctxsw_trace_write(struct gk20a *g,
590 dev = &g->ctxsw_trace->devs[entry->vmid]; 600 dev = &g->ctxsw_trace->devs[entry->vmid];
591 hdr = dev->hdr; 601 hdr = dev->hdr;
592 602
593 gk20a_dbg(gpu_dbg_fn | gpu_dbg_ctxsw, 603 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_ctxsw,
594 "dev=%p hdr=%p", dev, hdr); 604 "dev=%p hdr=%p", dev, hdr);
595 605
596 nvgpu_mutex_acquire(&dev->write_lock); 606 nvgpu_mutex_acquire(&dev->write_lock);
@@ -630,7 +640,7 @@ int gk20a_ctxsw_trace_write(struct gk20a *g,
630 goto filter; 640 goto filter;
631 } 641 }
632 642
633 gk20a_dbg(gpu_dbg_ctxsw, 643 nvgpu_log(g, gpu_dbg_ctxsw,
634 "seqno=%d context_id=%08x pid=%lld tag=%x timestamp=%llx", 644 "seqno=%d context_id=%08x pid=%lld tag=%x timestamp=%llx",
635 entry->seqno, entry->context_id, entry->pid, 645 entry->seqno, entry->context_id, entry->pid,
636 entry->tag, entry->timestamp); 646 entry->tag, entry->timestamp);
@@ -644,7 +654,7 @@ int gk20a_ctxsw_trace_write(struct gk20a *g,
644 if (unlikely(write_idx >= hdr->num_ents)) 654 if (unlikely(write_idx >= hdr->num_ents))
645 write_idx = 0; 655 write_idx = 0;
646 hdr->write_idx = write_idx; 656 hdr->write_idx = write_idx;
647 gk20a_dbg(gpu_dbg_ctxsw, "added: read=%d write=%d len=%d", 657 nvgpu_log(g, gpu_dbg_ctxsw, "added: read=%d write=%d len=%d",
648 hdr->read_idx, hdr->write_idx, ring_len(hdr)); 658 hdr->read_idx, hdr->write_idx, ring_len(hdr));
649 659
650 nvgpu_mutex_release(&dev->write_lock); 660 nvgpu_mutex_release(&dev->write_lock);
@@ -657,7 +667,7 @@ drop:
657 hdr->drop_count++; 667 hdr->drop_count++;
658 668
659filter: 669filter:
660 gk20a_dbg(gpu_dbg_ctxsw, 670 nvgpu_log(g, gpu_dbg_ctxsw,
661 "dropping seqno=%d context_id=%08x pid=%lld " 671 "dropping seqno=%d context_id=%08x pid=%lld "
662 "tag=%x time=%llx (%s)", 672 "tag=%x time=%llx (%s)",
663 entry->seqno, entry->context_id, entry->pid, 673 entry->seqno, entry->context_id, entry->pid,
diff --git a/drivers/gpu/nvgpu/common/linux/debug.c b/drivers/gpu/nvgpu/common/linux/debug.c
index a458a3d4..e8c0417a 100644
--- a/drivers/gpu/nvgpu/common/linux/debug.c
+++ b/drivers/gpu/nvgpu/common/linux/debug.c
@@ -307,10 +307,6 @@ void gk20a_debug_init(struct gk20a *g, const char *debugfs_symlink)
307 debugfs_create_u32("disable_syncpoints", S_IRUGO, 307 debugfs_create_u32("disable_syncpoints", S_IRUGO,
308 l->debugfs, &g->disable_syncpoints); 308 l->debugfs, &g->disable_syncpoints);
309 309
310 /* Legacy debugging API. */
311 debugfs_create_u64("dbg_mask", S_IRUGO|S_IWUSR,
312 l->debugfs, &nvgpu_dbg_mask);
313
314 /* New debug logging API. */ 310 /* New debug logging API. */
315 debugfs_create_u64("log_mask", S_IRUGO|S_IWUSR, 311 debugfs_create_u64("log_mask", S_IRUGO|S_IWUSR,
316 l->debugfs, &g->log_mask); 312 l->debugfs, &g->log_mask);
diff --git a/drivers/gpu/nvgpu/common/linux/debug_fifo.c b/drivers/gpu/nvgpu/common/linux/debug_fifo.c
index aeab0c92..b2a87e0d 100644
--- a/drivers/gpu/nvgpu/common/linux/debug_fifo.c
+++ b/drivers/gpu/nvgpu/common/linux/debug_fifo.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2017 NVIDIA Corporation. All rights reserved. 2 * Copyright (C) 2017-2018 NVIDIA Corporation. All rights reserved.
3 * 3 *
4 * This software is licensed under the terms of the GNU General Public 4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and 5 * License version 2, as published by the Free Software Foundation, and
@@ -108,6 +108,7 @@ static const struct seq_operations gk20a_fifo_sched_debugfs_seq_ops = {
108static int gk20a_fifo_sched_debugfs_open(struct inode *inode, 108static int gk20a_fifo_sched_debugfs_open(struct inode *inode,
109 struct file *file) 109 struct file *file)
110{ 110{
111 struct gk20a *g = inode->i_private;
111 int err; 112 int err;
112 113
113 if (!capable(CAP_SYS_ADMIN)) 114 if (!capable(CAP_SYS_ADMIN))
@@ -117,7 +118,7 @@ static int gk20a_fifo_sched_debugfs_open(struct inode *inode,
117 if (err) 118 if (err)
118 return err; 119 return err;
119 120
120 gk20a_dbg(gpu_dbg_info, "i_private=%p", inode->i_private); 121 nvgpu_log(g, gpu_dbg_info, "i_private=%p", inode->i_private);
121 122
122 ((struct seq_file *)file->private_data)->private = inode->i_private; 123 ((struct seq_file *)file->private_data)->private = inode->i_private;
123 return 0; 124 return 0;
@@ -301,7 +302,7 @@ void gk20a_fifo_debugfs_init(struct gk20a *g)
301 if (IS_ERR_OR_NULL(fifo_root)) 302 if (IS_ERR_OR_NULL(fifo_root))
302 return; 303 return;
303 304
304 gk20a_dbg(gpu_dbg_info, "g=%p", g); 305 nvgpu_log(g, gpu_dbg_info, "g=%p", g);
305 306
306 debugfs_create_file("sched", 0600, fifo_root, g, 307 debugfs_create_file("sched", 0600, fifo_root, g,
307 &gk20a_fifo_sched_debugfs_fops); 308 &gk20a_fifo_sched_debugfs_fops);
diff --git a/drivers/gpu/nvgpu/common/linux/driver_common.c b/drivers/gpu/nvgpu/common/linux/driver_common.c
index 53789423..769f7e03 100644
--- a/drivers/gpu/nvgpu/common/linux/driver_common.c
+++ b/drivers/gpu/nvgpu/common/linux/driver_common.c
@@ -87,7 +87,7 @@ static void nvgpu_init_gr_vars(struct gk20a *g)
87{ 87{
88 gk20a_init_gr(g); 88 gk20a_init_gr(g);
89 89
90 gk20a_dbg_info("total ram pages : %lu", totalram_pages); 90 nvgpu_log_info(g, "total ram pages : %lu", totalram_pages);
91 g->gr.max_comptag_mem = totalram_pages 91 g->gr.max_comptag_mem = totalram_pages
92 >> (10 - (PAGE_SHIFT - 10)); 92 >> (10 - (PAGE_SHIFT - 10));
93} 93}
diff --git a/drivers/gpu/nvgpu/common/linux/intr.c b/drivers/gpu/nvgpu/common/linux/intr.c
index 05dd3f2a..7ffc7e87 100644
--- a/drivers/gpu/nvgpu/common/linux/intr.c
+++ b/drivers/gpu/nvgpu/common/linux/intr.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify it 4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License, 5 * under the terms and conditions of the GNU General Public License,
@@ -50,7 +50,7 @@ irqreturn_t nvgpu_intr_thread_stall(struct gk20a *g)
50 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g); 50 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
51 int hw_irq_count; 51 int hw_irq_count;
52 52
53 gk20a_dbg(gpu_dbg_intr, "interrupt thread launched"); 53 nvgpu_log(g, gpu_dbg_intr, "interrupt thread launched");
54 54
55 trace_mc_gk20a_intr_thread_stall(g->name); 55 trace_mc_gk20a_intr_thread_stall(g->name);
56 56
diff --git a/drivers/gpu/nvgpu/common/linux/io.c b/drivers/gpu/nvgpu/common/linux/io.c
index cde90ddd..c06512a5 100644
--- a/drivers/gpu/nvgpu/common/linux/io.c
+++ b/drivers/gpu/nvgpu/common/linux/io.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify it 4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License, 5 * under the terms and conditions of the GNU General Public License,
@@ -23,11 +23,11 @@ void nvgpu_writel(struct gk20a *g, u32 r, u32 v)
23 23
24 if (unlikely(!l->regs)) { 24 if (unlikely(!l->regs)) {
25 __gk20a_warn_on_no_regs(); 25 __gk20a_warn_on_no_regs();
26 gk20a_dbg(gpu_dbg_reg, "r=0x%x v=0x%x (failed)", r, v); 26 nvgpu_log(g, gpu_dbg_reg, "r=0x%x v=0x%x (failed)", r, v);
27 } else { 27 } else {
28 writel_relaxed(v, l->regs + r); 28 writel_relaxed(v, l->regs + r);
29 nvgpu_wmb(); 29 nvgpu_wmb();
30 gk20a_dbg(gpu_dbg_reg, "r=0x%x v=0x%x", r, v); 30 nvgpu_log(g, gpu_dbg_reg, "r=0x%x v=0x%x", r, v);
31 } 31 }
32} 32}
33 33
@@ -48,10 +48,10 @@ u32 __nvgpu_readl(struct gk20a *g, u32 r)
48 48
49 if (unlikely(!l->regs)) { 49 if (unlikely(!l->regs)) {
50 __gk20a_warn_on_no_regs(); 50 __gk20a_warn_on_no_regs();
51 gk20a_dbg(gpu_dbg_reg, "r=0x%x v=0x%x (failed)", r, v); 51 nvgpu_log(g, gpu_dbg_reg, "r=0x%x v=0x%x (failed)", r, v);
52 } else { 52 } else {
53 v = readl(l->regs + r); 53 v = readl(l->regs + r);
54 gk20a_dbg(gpu_dbg_reg, "r=0x%x v=0x%x", r, v); 54 nvgpu_log(g, gpu_dbg_reg, "r=0x%x v=0x%x", r, v);
55 } 55 }
56 56
57 return v; 57 return v;
@@ -63,13 +63,13 @@ void nvgpu_writel_check(struct gk20a *g, u32 r, u32 v)
63 63
64 if (unlikely(!l->regs)) { 64 if (unlikely(!l->regs)) {
65 __gk20a_warn_on_no_regs(); 65 __gk20a_warn_on_no_regs();
66 gk20a_dbg(gpu_dbg_reg, "r=0x%x v=0x%x (failed)", r, v); 66 nvgpu_log(g, gpu_dbg_reg, "r=0x%x v=0x%x (failed)", r, v);
67 } else { 67 } else {
68 nvgpu_wmb(); 68 nvgpu_wmb();
69 do { 69 do {
70 writel_relaxed(v, l->regs + r); 70 writel_relaxed(v, l->regs + r);
71 } while (readl(l->regs + r) != v); 71 } while (readl(l->regs + r) != v);
72 gk20a_dbg(gpu_dbg_reg, "r=0x%x v=0x%x", r, v); 72 nvgpu_log(g, gpu_dbg_reg, "r=0x%x v=0x%x", r, v);
73 } 73 }
74} 74}
75 75
@@ -79,11 +79,11 @@ void nvgpu_bar1_writel(struct gk20a *g, u32 b, u32 v)
79 79
80 if (unlikely(!l->bar1)) { 80 if (unlikely(!l->bar1)) {
81 __gk20a_warn_on_no_regs(); 81 __gk20a_warn_on_no_regs();
82 gk20a_dbg(gpu_dbg_reg, "b=0x%x v=0x%x (failed)", b, v); 82 nvgpu_log(g, gpu_dbg_reg, "b=0x%x v=0x%x (failed)", b, v);
83 } else { 83 } else {
84 nvgpu_wmb(); 84 nvgpu_wmb();
85 writel_relaxed(v, l->bar1 + b); 85 writel_relaxed(v, l->bar1 + b);
86 gk20a_dbg(gpu_dbg_reg, "b=0x%x v=0x%x", b, v); 86 nvgpu_log(g, gpu_dbg_reg, "b=0x%x v=0x%x", b, v);
87 } 87 }
88} 88}
89 89
@@ -94,10 +94,10 @@ u32 nvgpu_bar1_readl(struct gk20a *g, u32 b)
94 94
95 if (unlikely(!l->bar1)) { 95 if (unlikely(!l->bar1)) {
96 __gk20a_warn_on_no_regs(); 96 __gk20a_warn_on_no_regs();
97 gk20a_dbg(gpu_dbg_reg, "b=0x%x v=0x%x (failed)", b, v); 97 nvgpu_log(g, gpu_dbg_reg, "b=0x%x v=0x%x (failed)", b, v);
98 } else { 98 } else {
99 v = readl(l->bar1 + b); 99 v = readl(l->bar1 + b);
100 gk20a_dbg(gpu_dbg_reg, "b=0x%x v=0x%x", b, v); 100 nvgpu_log(g, gpu_dbg_reg, "b=0x%x v=0x%x", b, v);
101 } 101 }
102 102
103 return v; 103 return v;
diff --git a/drivers/gpu/nvgpu/common/linux/io_usermode.c b/drivers/gpu/nvgpu/common/linux/io_usermode.c
index 888be318..a7b728dd 100644
--- a/drivers/gpu/nvgpu/common/linux/io_usermode.c
+++ b/drivers/gpu/nvgpu/common/linux/io_usermode.c
@@ -25,5 +25,5 @@ void nvgpu_usermode_writel(struct gk20a *g, u32 r, u32 v)
25 void __iomem *reg = l->usermode_regs + (r - usermode_cfg0_r()); 25 void __iomem *reg = l->usermode_regs + (r - usermode_cfg0_r());
26 26
27 writel_relaxed(v, reg); 27 writel_relaxed(v, reg);
28 gk20a_dbg(gpu_dbg_reg, "usermode r=0x%x v=0x%x", r, v); 28 nvgpu_log(g, gpu_dbg_reg, "usermode r=0x%x v=0x%x", r, v);
29} 29}
diff --git a/drivers/gpu/nvgpu/common/linux/ioctl.c b/drivers/gpu/nvgpu/common/linux/ioctl.c
index 04974786..359e5103 100644
--- a/drivers/gpu/nvgpu/common/linux/ioctl.c
+++ b/drivers/gpu/nvgpu/common/linux/ioctl.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * NVGPU IOCTLs 2 * NVGPU IOCTLs
3 * 3 *
4 * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -139,8 +139,9 @@ static int gk20a_create_device(
139{ 139{
140 struct device *subdev; 140 struct device *subdev;
141 int err; 141 int err;
142 struct gk20a *g = gk20a_from_dev(dev);
142 143
143 gk20a_dbg_fn(""); 144 nvgpu_log_fn(g, " ");
144 145
145 cdev_init(cdev, ops); 146 cdev_init(cdev, ops);
146 cdev->owner = THIS_MODULE; 147 cdev->owner = THIS_MODULE;
diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_as.c b/drivers/gpu/nvgpu/common/linux/ioctl_as.c
index e09e099b..41bbdfcb 100644
--- a/drivers/gpu/nvgpu/common/linux/ioctl_as.c
+++ b/drivers/gpu/nvgpu/common/linux/ioctl_as.c
@@ -50,8 +50,9 @@ static int gk20a_as_ioctl_bind_channel(
50{ 50{
51 int err = 0; 51 int err = 0;
52 struct channel_gk20a *ch; 52 struct channel_gk20a *ch;
53 struct gk20a *g = gk20a_from_vm(as_share->vm);
53 54
54 gk20a_dbg_fn(""); 55 nvgpu_log_fn(g, " ");
55 56
56 ch = gk20a_get_channel_from_file(args->channel_fd); 57 ch = gk20a_get_channel_from_file(args->channel_fd);
57 if (!ch) 58 if (!ch)
@@ -76,7 +77,7 @@ static int gk20a_as_ioctl_alloc_space(
76{ 77{
77 struct gk20a *g = gk20a_from_vm(as_share->vm); 78 struct gk20a *g = gk20a_from_vm(as_share->vm);
78 79
79 gk20a_dbg_fn(""); 80 nvgpu_log_fn(g, " ");
80 return nvgpu_vm_area_alloc(as_share->vm, args->pages, args->page_size, 81 return nvgpu_vm_area_alloc(as_share->vm, args->pages, args->page_size,
81 &args->o_a.offset, 82 &args->o_a.offset,
82 gk20a_as_translate_linux_flags(g, 83 gk20a_as_translate_linux_flags(g,
@@ -87,7 +88,9 @@ static int gk20a_as_ioctl_free_space(
87 struct gk20a_as_share *as_share, 88 struct gk20a_as_share *as_share,
88 struct nvgpu_as_free_space_args *args) 89 struct nvgpu_as_free_space_args *args)
89{ 90{
90 gk20a_dbg_fn(""); 91 struct gk20a *g = gk20a_from_vm(as_share->vm);
92
93 nvgpu_log_fn(g, " ");
91 return nvgpu_vm_area_free(as_share->vm, args->offset); 94 return nvgpu_vm_area_free(as_share->vm, args->offset);
92} 95}
93 96
@@ -95,7 +98,9 @@ static int gk20a_as_ioctl_map_buffer_ex(
95 struct gk20a_as_share *as_share, 98 struct gk20a_as_share *as_share,
96 struct nvgpu_as_map_buffer_ex_args *args) 99 struct nvgpu_as_map_buffer_ex_args *args)
97{ 100{
98 gk20a_dbg_fn(""); 101 struct gk20a *g = gk20a_from_vm(as_share->vm);
102
103 nvgpu_log_fn(g, " ");
99 104
100 /* unsupported, direct kind control must be used */ 105 /* unsupported, direct kind control must be used */
101 if (!(args->flags & NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL)) { 106 if (!(args->flags & NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL)) {
@@ -117,7 +122,9 @@ static int gk20a_as_ioctl_unmap_buffer(
117 struct gk20a_as_share *as_share, 122 struct gk20a_as_share *as_share,
118 struct nvgpu_as_unmap_buffer_args *args) 123 struct nvgpu_as_unmap_buffer_args *args)
119{ 124{
120 gk20a_dbg_fn(""); 125 struct gk20a *g = gk20a_from_vm(as_share->vm);
126
127 nvgpu_log_fn(g, " ");
121 128
122 nvgpu_vm_unmap(as_share->vm, args->offset, NULL); 129 nvgpu_vm_unmap(as_share->vm, args->offset, NULL);
123 130
@@ -128,6 +135,7 @@ static int gk20a_as_ioctl_map_buffer_batch(
128 struct gk20a_as_share *as_share, 135 struct gk20a_as_share *as_share,
129 struct nvgpu_as_map_buffer_batch_args *args) 136 struct nvgpu_as_map_buffer_batch_args *args)
130{ 137{
138 struct gk20a *g = gk20a_from_vm(as_share->vm);
131 u32 i; 139 u32 i;
132 int err = 0; 140 int err = 0;
133 141
@@ -140,7 +148,7 @@ static int gk20a_as_ioctl_map_buffer_batch(
140 148
141 struct vm_gk20a_mapping_batch batch; 149 struct vm_gk20a_mapping_batch batch;
142 150
143 gk20a_dbg_fn(""); 151 nvgpu_log_fn(g, " ");
144 152
145 if (args->num_unmaps > NVGPU_IOCTL_AS_MAP_BUFFER_BATCH_LIMIT || 153 if (args->num_unmaps > NVGPU_IOCTL_AS_MAP_BUFFER_BATCH_LIMIT ||
146 args->num_maps > NVGPU_IOCTL_AS_MAP_BUFFER_BATCH_LIMIT) 154 args->num_maps > NVGPU_IOCTL_AS_MAP_BUFFER_BATCH_LIMIT)
@@ -220,9 +228,10 @@ static int gk20a_as_ioctl_get_va_regions(
220 unsigned int write_entries; 228 unsigned int write_entries;
221 struct nvgpu_as_va_region __user *user_region_ptr; 229 struct nvgpu_as_va_region __user *user_region_ptr;
222 struct vm_gk20a *vm = as_share->vm; 230 struct vm_gk20a *vm = as_share->vm;
231 struct gk20a *g = gk20a_from_vm(vm);
223 unsigned int page_sizes = gmmu_page_size_kernel; 232 unsigned int page_sizes = gmmu_page_size_kernel;
224 233
225 gk20a_dbg_fn(""); 234 nvgpu_log_fn(g, " ");
226 235
227 if (!vm->big_pages) 236 if (!vm->big_pages)
228 page_sizes--; 237 page_sizes--;
@@ -293,14 +302,14 @@ int gk20a_as_dev_open(struct inode *inode, struct file *filp)
293 struct gk20a *g; 302 struct gk20a *g;
294 int err; 303 int err;
295 304
296 gk20a_dbg_fn("");
297
298 l = container_of(inode->i_cdev, struct nvgpu_os_linux, as_dev.cdev); 305 l = container_of(inode->i_cdev, struct nvgpu_os_linux, as_dev.cdev);
299 g = &l->g; 306 g = &l->g;
300 307
308 nvgpu_log_fn(g, " ");
309
301 err = gk20a_as_alloc_share(g, 0, 0, &as_share); 310 err = gk20a_as_alloc_share(g, 0, 0, &as_share);
302 if (err) { 311 if (err) {
303 gk20a_dbg_fn("failed to alloc share"); 312 nvgpu_log_fn(g, "failed to alloc share");
304 return err; 313 return err;
305 } 314 }
306 315
@@ -312,8 +321,6 @@ int gk20a_as_dev_release(struct inode *inode, struct file *filp)
312{ 321{
313 struct gk20a_as_share *as_share = filp->private_data; 322 struct gk20a_as_share *as_share = filp->private_data;
314 323
315 gk20a_dbg_fn("");
316
317 if (!as_share) 324 if (!as_share)
318 return 0; 325 return 0;
319 326
@@ -328,7 +335,7 @@ long gk20a_as_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
328 335
329 u8 buf[NVGPU_AS_IOCTL_MAX_ARG_SIZE]; 336 u8 buf[NVGPU_AS_IOCTL_MAX_ARG_SIZE];
330 337
331 gk20a_dbg_fn("start %d", _IOC_NR(cmd)); 338 nvgpu_log_fn(g, "start %d", _IOC_NR(cmd));
332 339
333 if ((_IOC_TYPE(cmd) != NVGPU_AS_IOCTL_MAGIC) || 340 if ((_IOC_TYPE(cmd) != NVGPU_AS_IOCTL_MAGIC) ||
334 (_IOC_NR(cmd) == 0) || 341 (_IOC_NR(cmd) == 0) ||
diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_channel.c b/drivers/gpu/nvgpu/common/linux/ioctl_channel.c
index 06dfb180..606c5251 100644
--- a/drivers/gpu/nvgpu/common/linux/ioctl_channel.c
+++ b/drivers/gpu/nvgpu/common/linux/ioctl_channel.c
@@ -476,7 +476,7 @@ static int __gk20a_channel_open(struct gk20a *g,
476 struct channel_gk20a *ch; 476 struct channel_gk20a *ch;
477 struct channel_priv *priv; 477 struct channel_priv *priv;
478 478
479 gk20a_dbg_fn(""); 479 nvgpu_log_fn(g, " ");
480 480
481 g = gk20a_get(g); 481 g = gk20a_get(g);
482 if (!g) 482 if (!g)
@@ -529,10 +529,10 @@ int gk20a_channel_open(struct inode *inode, struct file *filp)
529 struct gk20a *g = &l->g; 529 struct gk20a *g = &l->g;
530 int ret; 530 int ret;
531 531
532 gk20a_dbg_fn("start"); 532 nvgpu_log_fn(g, "start");
533 ret = __gk20a_channel_open(g, filp, -1); 533 ret = __gk20a_channel_open(g, filp, -1);
534 534
535 gk20a_dbg_fn("end"); 535 nvgpu_log_fn(g, "end");
536 return ret; 536 return ret;
537} 537}
538 538
@@ -676,7 +676,7 @@ static int gk20a_channel_wait(struct channel_gk20a *ch,
676 int remain, ret = 0; 676 int remain, ret = 0;
677 u64 end; 677 u64 end;
678 678
679 gk20a_dbg_fn(""); 679 nvgpu_log_fn(g, " ");
680 680
681 if (ch->has_timedout) 681 if (ch->has_timedout)
682 return -ETIMEDOUT; 682 return -ETIMEDOUT;
@@ -760,7 +760,7 @@ static int gk20a_channel_zcull_bind(struct channel_gk20a *ch,
760 struct gk20a *g = ch->g; 760 struct gk20a *g = ch->g;
761 struct gr_gk20a *gr = &g->gr; 761 struct gr_gk20a *gr = &g->gr;
762 762
763 gk20a_dbg_fn(""); 763 nvgpu_log_fn(gr->g, " ");
764 764
765 return g->ops.gr.bind_ctxsw_zcull(g, gr, ch, 765 return g->ops.gr.bind_ctxsw_zcull(g, gr, ch,
766 args->gpu_va, args->mode); 766 args->gpu_va, args->mode);
@@ -775,9 +775,10 @@ static int gk20a_ioctl_channel_submit_gpfifo(
775 struct fifo_profile_gk20a *profile = NULL; 775 struct fifo_profile_gk20a *profile = NULL;
776 u32 submit_flags = 0; 776 u32 submit_flags = 0;
777 int fd = -1; 777 int fd = -1;
778 struct gk20a *g = ch->g;
778 779
779 int ret = 0; 780 int ret = 0;
780 gk20a_dbg_fn(""); 781 nvgpu_log_fn(g, " ");
781 782
782#ifdef CONFIG_DEBUG_FS 783#ifdef CONFIG_DEBUG_FS
783 profile = gk20a_fifo_profile_acquire(ch->g); 784 profile = gk20a_fifo_profile_acquire(ch->g);
@@ -1064,8 +1065,9 @@ long gk20a_channel_ioctl(struct file *filp,
1064 struct device *dev = dev_from_gk20a(ch->g); 1065 struct device *dev = dev_from_gk20a(ch->g);
1065 u8 buf[NVGPU_IOCTL_CHANNEL_MAX_ARG_SIZE] = {0}; 1066 u8 buf[NVGPU_IOCTL_CHANNEL_MAX_ARG_SIZE] = {0};
1066 int err = 0; 1067 int err = 0;
1068 struct gk20a *g = ch->g;
1067 1069
1068 gk20a_dbg_fn("start %d", _IOC_NR(cmd)); 1070 nvgpu_log_fn(g, "start %d", _IOC_NR(cmd));
1069 1071
1070 if ((_IOC_TYPE(cmd) != NVGPU_IOCTL_MAGIC) || 1072 if ((_IOC_TYPE(cmd) != NVGPU_IOCTL_MAGIC) ||
1071 (_IOC_NR(cmd) == 0) || 1073 (_IOC_NR(cmd) == 0) ||
@@ -1224,7 +1226,7 @@ long gk20a_channel_ioctl(struct file *filp,
1224 { 1226 {
1225 u32 timeout = 1227 u32 timeout =
1226 (u32)((struct nvgpu_set_timeout_args *)buf)->timeout; 1228 (u32)((struct nvgpu_set_timeout_args *)buf)->timeout;
1227 gk20a_dbg(gpu_dbg_gpu_dbg, "setting timeout (%d ms) for chid %d", 1229 nvgpu_log(g, gpu_dbg_gpu_dbg, "setting timeout (%d ms) for chid %d",
1228 timeout, ch->chid); 1230 timeout, ch->chid);
1229 ch->timeout_ms_max = timeout; 1231 ch->timeout_ms_max = timeout;
1230 gk20a_channel_trace_sched_param( 1232 gk20a_channel_trace_sched_param(
@@ -1238,7 +1240,7 @@ long gk20a_channel_ioctl(struct file *filp,
1238 bool timeout_debug_dump = !((u32) 1240 bool timeout_debug_dump = !((u32)
1239 ((struct nvgpu_set_timeout_ex_args *)buf)->flags & 1241 ((struct nvgpu_set_timeout_ex_args *)buf)->flags &
1240 (1 << NVGPU_TIMEOUT_FLAG_DISABLE_DUMP)); 1242 (1 << NVGPU_TIMEOUT_FLAG_DISABLE_DUMP));
1241 gk20a_dbg(gpu_dbg_gpu_dbg, "setting timeout (%d ms) for chid %d", 1243 nvgpu_log(g, gpu_dbg_gpu_dbg, "setting timeout (%d ms) for chid %d",
1242 timeout, ch->chid); 1244 timeout, ch->chid);
1243 ch->timeout_ms_max = timeout; 1245 ch->timeout_ms_max = timeout;
1244 ch->timeout_debug_dump = timeout_debug_dump; 1246 ch->timeout_debug_dump = timeout_debug_dump;
@@ -1367,7 +1369,7 @@ long gk20a_channel_ioctl(struct file *filp,
1367 1369
1368 gk20a_channel_put(ch); 1370 gk20a_channel_put(ch);
1369 1371
1370 gk20a_dbg_fn("end"); 1372 nvgpu_log_fn(g, "end");
1371 1373
1372 return err; 1374 return err;
1373} 1375}
diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_clk_arb.c b/drivers/gpu/nvgpu/common/linux/ioctl_clk_arb.c
index 039f65f8..3ab8cf9e 100644
--- a/drivers/gpu/nvgpu/common/linux/ioctl_clk_arb.c
+++ b/drivers/gpu/nvgpu/common/linux/ioctl_clk_arb.c
@@ -209,9 +209,10 @@ static ssize_t nvgpu_clk_arb_read_event_dev(struct file *filp, char __user *buf,
209static int nvgpu_clk_arb_set_event_filter(struct nvgpu_clk_dev *dev, 209static int nvgpu_clk_arb_set_event_filter(struct nvgpu_clk_dev *dev,
210 struct nvgpu_gpu_set_event_filter_args *args) 210 struct nvgpu_gpu_set_event_filter_args *args)
211{ 211{
212 struct gk20a *g = dev->session->g;
212 u32 mask; 213 u32 mask;
213 214
214 gk20a_dbg(gpu_dbg_fn, ""); 215 nvgpu_log(g, gpu_dbg_fn, " ");
215 216
216 if (args->flags) 217 if (args->flags)
217 return -EINVAL; 218 return -EINVAL;
@@ -237,7 +238,7 @@ static long nvgpu_clk_arb_ioctl_event_dev(struct file *filp, unsigned int cmd,
237 u8 buf[NVGPU_EVENT_IOCTL_MAX_ARG_SIZE]; 238 u8 buf[NVGPU_EVENT_IOCTL_MAX_ARG_SIZE];
238 int err = 0; 239 int err = 0;
239 240
240 gk20a_dbg(gpu_dbg_fn, "nr=%d", _IOC_NR(cmd)); 241 nvgpu_log(g, gpu_dbg_fn, "nr=%d", _IOC_NR(cmd));
241 242
242 if ((_IOC_TYPE(cmd) != NVGPU_EVENT_IOCTL_MAGIC) || (_IOC_NR(cmd) == 0) 243 if ((_IOC_TYPE(cmd) != NVGPU_EVENT_IOCTL_MAGIC) || (_IOC_NR(cmd) == 0)
243 || (_IOC_NR(cmd) > NVGPU_EVENT_IOCTL_LAST)) 244 || (_IOC_NR(cmd) > NVGPU_EVENT_IOCTL_LAST))
@@ -681,7 +682,7 @@ int nvgpu_clk_arb_debugfs_init(struct gk20a *g)
681 struct dentry *gpu_root = l->debugfs; 682 struct dentry *gpu_root = l->debugfs;
682 struct dentry *d; 683 struct dentry *d;
683 684
684 gk20a_dbg(gpu_dbg_info, "g=%p", g); 685 nvgpu_log(g, gpu_dbg_info, "g=%p", g);
685 686
686 d = debugfs_create_file( 687 d = debugfs_create_file(
687 "arb_stats", 688 "arb_stats",
diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_ctrl.c b/drivers/gpu/nvgpu/common/linux/ioctl_ctrl.c
index 70707a5c..7bb97369 100644
--- a/drivers/gpu/nvgpu/common/linux/ioctl_ctrl.c
+++ b/drivers/gpu/nvgpu/common/linux/ioctl_ctrl.c
@@ -62,14 +62,14 @@ int gk20a_ctrl_dev_open(struct inode *inode, struct file *filp)
62 struct gk20a_ctrl_priv *priv; 62 struct gk20a_ctrl_priv *priv;
63 int err = 0; 63 int err = 0;
64 64
65 gk20a_dbg_fn("");
66
67 l = container_of(inode->i_cdev, 65 l = container_of(inode->i_cdev,
68 struct nvgpu_os_linux, ctrl.cdev); 66 struct nvgpu_os_linux, ctrl.cdev);
69 g = gk20a_get(&l->g); 67 g = gk20a_get(&l->g);
70 if (!g) 68 if (!g)
71 return -ENODEV; 69 return -ENODEV;
72 70
71 nvgpu_log_fn(g, " ");
72
73 priv = nvgpu_kzalloc(g, sizeof(struct gk20a_ctrl_priv)); 73 priv = nvgpu_kzalloc(g, sizeof(struct gk20a_ctrl_priv));
74 if (!priv) { 74 if (!priv) {
75 err = -ENOMEM; 75 err = -ENOMEM;
@@ -102,7 +102,7 @@ int gk20a_ctrl_dev_release(struct inode *inode, struct file *filp)
102 struct gk20a_ctrl_priv *priv = filp->private_data; 102 struct gk20a_ctrl_priv *priv = filp->private_data;
103 struct gk20a *g = priv->g; 103 struct gk20a *g = priv->g;
104 104
105 gk20a_dbg_fn(""); 105 nvgpu_log_fn(g, " ");
106 106
107 if (priv->clk_session) 107 if (priv->clk_session)
108 nvgpu_clk_arb_release_session(g, priv->clk_session); 108 nvgpu_clk_arb_release_session(g, priv->clk_session);
@@ -684,7 +684,7 @@ static int nvgpu_gpu_ioctl_wait_for_pause(struct gk20a *g,
684 /* Copy to user space - pointed by "args->pwarpstate" */ 684 /* Copy to user space - pointed by "args->pwarpstate" */
685 if (copy_to_user((void __user *)(uintptr_t)args->pwarpstate, 685 if (copy_to_user((void __user *)(uintptr_t)args->pwarpstate,
686 w_state, ioctl_size)) { 686 w_state, ioctl_size)) {
687 gk20a_dbg_fn("copy_to_user failed!"); 687 nvgpu_log_fn(g, "copy_to_user failed!");
688 err = -EFAULT; 688 err = -EFAULT;
689 } 689 }
690 690
@@ -901,7 +901,7 @@ static int nvgpu_gpu_alloc_vidmem(struct gk20a *g,
901 u32 align = args->in.alignment ? args->in.alignment : SZ_4K; 901 u32 align = args->in.alignment ? args->in.alignment : SZ_4K;
902 int fd; 902 int fd;
903 903
904 gk20a_dbg_fn(""); 904 nvgpu_log_fn(g, " ");
905 905
906 /* not yet supported */ 906 /* not yet supported */
907 if (WARN_ON(args->in.flags & NVGPU_GPU_ALLOC_VIDMEM_FLAG_CPU_MASK)) 907 if (WARN_ON(args->in.flags & NVGPU_GPU_ALLOC_VIDMEM_FLAG_CPU_MASK))
@@ -933,7 +933,7 @@ static int nvgpu_gpu_alloc_vidmem(struct gk20a *g,
933 933
934 args->out.dmabuf_fd = fd; 934 args->out.dmabuf_fd = fd;
935 935
936 gk20a_dbg_fn("done, fd=%d", fd); 936 nvgpu_log_fn(g, "done, fd=%d", fd);
937 937
938 return 0; 938 return 0;
939} 939}
@@ -943,7 +943,7 @@ static int nvgpu_gpu_get_memory_state(struct gk20a *g,
943{ 943{
944 int err; 944 int err;
945 945
946 gk20a_dbg_fn(""); 946 nvgpu_log_fn(g, " ");
947 947
948 if (args->reserved[0] || args->reserved[1] || 948 if (args->reserved[0] || args->reserved[1] ||
949 args->reserved[2] || args->reserved[3]) 949 args->reserved[2] || args->reserved[3])
@@ -951,7 +951,7 @@ static int nvgpu_gpu_get_memory_state(struct gk20a *g,
951 951
952 err = nvgpu_vidmem_get_space(g, &args->total_free_bytes); 952 err = nvgpu_vidmem_get_space(g, &args->total_free_bytes);
953 953
954 gk20a_dbg_fn("done, err=%d, bytes=%lld", err, args->total_free_bytes); 954 nvgpu_log_fn(g, "done, err=%d, bytes=%lld", err, args->total_free_bytes);
955 955
956 return err; 956 return err;
957} 957}
@@ -973,7 +973,7 @@ static int nvgpu_gpu_clk_get_vf_points(struct gk20a *g,
973 u16 min_mhz; 973 u16 min_mhz;
974 u16 max_mhz; 974 u16 max_mhz;
975 975
976 gk20a_dbg_fn(""); 976 nvgpu_log_fn(g, " ");
977 977
978 if (!session || args->flags) 978 if (!session || args->flags)
979 return -EINVAL; 979 return -EINVAL;
@@ -1059,7 +1059,7 @@ static int nvgpu_gpu_clk_get_range(struct gk20a *g,
1059 int err; 1059 int err;
1060 u16 min_mhz, max_mhz; 1060 u16 min_mhz, max_mhz;
1061 1061
1062 gk20a_dbg_fn(""); 1062 nvgpu_log_fn(g, " ");
1063 1063
1064 if (!session) 1064 if (!session)
1065 return -EINVAL; 1065 return -EINVAL;
@@ -1138,7 +1138,7 @@ static int nvgpu_gpu_clk_set_info(struct gk20a *g,
1138 int i; 1138 int i;
1139 int ret; 1139 int ret;
1140 1140
1141 gk20a_dbg_fn(""); 1141 nvgpu_log_fn(g, " ");
1142 1142
1143 if (!session || args->flags) 1143 if (!session || args->flags)
1144 return -EINVAL; 1144 return -EINVAL;
@@ -1201,7 +1201,7 @@ static int nvgpu_gpu_clk_get_info(struct gk20a *g,
1201 int err; 1201 int err;
1202 int bit; 1202 int bit;
1203 1203
1204 gk20a_dbg_fn(""); 1204 nvgpu_log_fn(g, " ");
1205 1205
1206 if (!session) 1206 if (!session)
1207 return -EINVAL; 1207 return -EINVAL;
@@ -1287,7 +1287,7 @@ static int nvgpu_gpu_get_event_fd(struct gk20a *g,
1287{ 1287{
1288 struct nvgpu_clk_session *session = priv->clk_session; 1288 struct nvgpu_clk_session *session = priv->clk_session;
1289 1289
1290 gk20a_dbg_fn(""); 1290 nvgpu_log_fn(g, " ");
1291 1291
1292 if (!session) 1292 if (!session)
1293 return -EINVAL; 1293 return -EINVAL;
@@ -1301,7 +1301,7 @@ static int nvgpu_gpu_get_voltage(struct gk20a *g,
1301{ 1301{
1302 int err = -EINVAL; 1302 int err = -EINVAL;
1303 1303
1304 gk20a_dbg_fn(""); 1304 nvgpu_log_fn(g, " ");
1305 1305
1306 if (args->reserved) 1306 if (args->reserved)
1307 return -EINVAL; 1307 return -EINVAL;
@@ -1337,7 +1337,7 @@ static int nvgpu_gpu_get_current(struct gk20a *g,
1337{ 1337{
1338 int err; 1338 int err;
1339 1339
1340 gk20a_dbg_fn(""); 1340 nvgpu_log_fn(g, " ");
1341 1341
1342 if (args->reserved[0] || args->reserved[1] || args->reserved[2]) 1342 if (args->reserved[0] || args->reserved[1] || args->reserved[2])
1343 return -EINVAL; 1343 return -EINVAL;
@@ -1361,7 +1361,7 @@ static int nvgpu_gpu_get_power(struct gk20a *g,
1361{ 1361{
1362 int err; 1362 int err;
1363 1363
1364 gk20a_dbg_fn(""); 1364 nvgpu_log_fn(g, " ");
1365 1365
1366 if (args->reserved[0] || args->reserved[1] || args->reserved[2]) 1366 if (args->reserved[0] || args->reserved[1] || args->reserved[2])
1367 return -EINVAL; 1367 return -EINVAL;
@@ -1386,7 +1386,7 @@ static int nvgpu_gpu_get_temperature(struct gk20a *g,
1386 int err; 1386 int err;
1387 u32 temp_f24_8; 1387 u32 temp_f24_8;
1388 1388
1389 gk20a_dbg_fn(""); 1389 nvgpu_log_fn(g, " ");
1390 1390
1391 if (args->reserved[0] || args->reserved[1] || args->reserved[2]) 1391 if (args->reserved[0] || args->reserved[1] || args->reserved[2])
1392 return -EINVAL; 1392 return -EINVAL;
@@ -1415,7 +1415,7 @@ static int nvgpu_gpu_set_therm_alert_limit(struct gk20a *g,
1415{ 1415{
1416 int err; 1416 int err;
1417 1417
1418 gk20a_dbg_fn(""); 1418 nvgpu_log_fn(g, " ");
1419 1419
1420 if (args->reserved[0] || args->reserved[1] || args->reserved[2]) 1420 if (args->reserved[0] || args->reserved[1] || args->reserved[2])
1421 return -EINVAL; 1421 return -EINVAL;
@@ -1491,7 +1491,7 @@ static int nvgpu_gpu_set_deterministic_opts(struct gk20a *g,
1491 u32 i = 0; 1491 u32 i = 0;
1492 int err = 0; 1492 int err = 0;
1493 1493
1494 gk20a_dbg_fn(""); 1494 nvgpu_log_fn(g, " ");
1495 1495
1496 user_channels = (int __user *)(uintptr_t)args->channels; 1496 user_channels = (int __user *)(uintptr_t)args->channels;
1497 1497
@@ -1556,7 +1556,7 @@ long gk20a_ctrl_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg
1556 struct zbc_query_params *zbc_tbl; 1556 struct zbc_query_params *zbc_tbl;
1557 int i, err = 0; 1557 int i, err = 0;
1558 1558
1559 gk20a_dbg_fn("start %d", _IOC_NR(cmd)); 1559 nvgpu_log_fn(g, "start %d", _IOC_NR(cmd));
1560 1560
1561 if ((_IOC_TYPE(cmd) != NVGPU_GPU_IOCTL_MAGIC) || 1561 if ((_IOC_TYPE(cmd) != NVGPU_GPU_IOCTL_MAGIC) ||
1562 (_IOC_NR(cmd) == 0) || 1562 (_IOC_NR(cmd) == 0) ||
@@ -1855,7 +1855,7 @@ long gk20a_ctrl_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg
1855 break; 1855 break;
1856 1856
1857 default: 1857 default:
1858 gk20a_dbg_info("unrecognized gpu ioctl cmd: 0x%x", cmd); 1858 nvgpu_log_info(g, "unrecognized gpu ioctl cmd: 0x%x", cmd);
1859 err = -ENOTTY; 1859 err = -ENOTTY;
1860 break; 1860 break;
1861 } 1861 }
diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_dbg.c b/drivers/gpu/nvgpu/common/linux/ioctl_dbg.c
index a53d1cfb..2aba2664 100644
--- a/drivers/gpu/nvgpu/common/linux/ioctl_dbg.c
+++ b/drivers/gpu/nvgpu/common/linux/ioctl_dbg.c
@@ -56,7 +56,7 @@ static int alloc_profiler(struct gk20a *g,
56 struct dbg_profiler_object_data *prof; 56 struct dbg_profiler_object_data *prof;
57 *_prof = NULL; 57 *_prof = NULL;
58 58
59 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); 59 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
60 60
61 prof = nvgpu_kzalloc(g, sizeof(*prof)); 61 prof = nvgpu_kzalloc(g, sizeof(*prof));
62 if (!prof) 62 if (!prof)
@@ -72,7 +72,7 @@ static int alloc_session(struct gk20a *g, struct dbg_session_gk20a_linux **_dbg_
72 struct dbg_session_gk20a_linux *dbg_s_linux; 72 struct dbg_session_gk20a_linux *dbg_s_linux;
73 *_dbg_s_linux = NULL; 73 *_dbg_s_linux = NULL;
74 74
75 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); 75 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
76 76
77 dbg_s_linux = nvgpu_kzalloc(g, sizeof(*dbg_s_linux)); 77 dbg_s_linux = nvgpu_kzalloc(g, sizeof(*dbg_s_linux));
78 if (!dbg_s_linux) 78 if (!dbg_s_linux)
@@ -142,8 +142,9 @@ unsigned int gk20a_dbg_gpu_dev_poll(struct file *filep, poll_table *wait)
142 unsigned int mask = 0; 142 unsigned int mask = 0;
143 struct dbg_session_gk20a_linux *dbg_session_linux = filep->private_data; 143 struct dbg_session_gk20a_linux *dbg_session_linux = filep->private_data;
144 struct dbg_session_gk20a *dbg_s = &dbg_session_linux->dbg_s; 144 struct dbg_session_gk20a *dbg_s = &dbg_session_linux->dbg_s;
145 struct gk20a *g = dbg_s->g;
145 146
146 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); 147 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
147 148
148 poll_wait(filep, &dbg_s->dbg_events.wait_queue.wq, wait); 149 poll_wait(filep, &dbg_s->dbg_events.wait_queue.wq, wait);
149 150
@@ -151,9 +152,9 @@ unsigned int gk20a_dbg_gpu_dev_poll(struct file *filep, poll_table *wait)
151 152
152 if (dbg_s->dbg_events.events_enabled && 153 if (dbg_s->dbg_events.events_enabled &&
153 dbg_s->dbg_events.num_pending_events > 0) { 154 dbg_s->dbg_events.num_pending_events > 0) {
154 gk20a_dbg(gpu_dbg_gpu_dbg, "found pending event on session id %d", 155 nvgpu_log(g, gpu_dbg_gpu_dbg, "found pending event on session id %d",
155 dbg_s->id); 156 dbg_s->id);
156 gk20a_dbg(gpu_dbg_gpu_dbg, "%d events pending", 157 nvgpu_log(g, gpu_dbg_gpu_dbg, "%d events pending",
157 dbg_s->dbg_events.num_pending_events); 158 dbg_s->dbg_events.num_pending_events);
158 mask = (POLLPRI | POLLIN); 159 mask = (POLLPRI | POLLIN);
159 } 160 }
@@ -170,7 +171,7 @@ int gk20a_dbg_gpu_dev_release(struct inode *inode, struct file *filp)
170 struct gk20a *g = dbg_s->g; 171 struct gk20a *g = dbg_s->g;
171 struct dbg_profiler_object_data *prof_obj, *tmp_obj; 172 struct dbg_profiler_object_data *prof_obj, *tmp_obj;
172 173
173 gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn, "%s", g->name); 174 nvgpu_log(g, gpu_dbg_gpu_dbg | gpu_dbg_fn, "%s", g->name);
174 175
175 /* unbind channels */ 176 /* unbind channels */
176 dbg_unbind_all_channels_gk20a(dbg_s); 177 dbg_unbind_all_channels_gk20a(dbg_s);
@@ -213,7 +214,11 @@ int gk20a_dbg_gpu_dev_release(struct inode *inode, struct file *filp)
213 214
214int gk20a_prof_gpu_dev_open(struct inode *inode, struct file *filp) 215int gk20a_prof_gpu_dev_open(struct inode *inode, struct file *filp)
215{ 216{
216 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); 217 struct nvgpu_os_linux *l = container_of(inode->i_cdev,
218 struct nvgpu_os_linux, prof.cdev);
219 struct gk20a *g = &l->g;
220
221 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
217 return gk20a_dbg_gpu_do_dev_open(inode, filp, true /* is profiler */); 222 return gk20a_dbg_gpu_do_dev_open(inode, filp, true /* is profiler */);
218} 223}
219 224
@@ -223,7 +228,7 @@ static int nvgpu_dbg_gpu_ioctl_timeout(struct dbg_session_gk20a *dbg_s,
223 int err; 228 int err;
224 struct gk20a *g = dbg_s->g; 229 struct gk20a *g = dbg_s->g;
225 230
226 gk20a_dbg_fn("powergate mode = %d", args->enable); 231 nvgpu_log_fn(g, "powergate mode = %d", args->enable);
227 232
228 nvgpu_mutex_acquire(&g->dbg_sessions_lock); 233 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
229 err = nvgpu_dbg_timeout_enable(dbg_s, args->enable); 234 err = nvgpu_dbg_timeout_enable(dbg_s, args->enable);
@@ -356,7 +361,9 @@ static int nvgpu_dbg_gpu_ioctl_set_next_stop_trigger_type(
356 struct dbg_session_gk20a *dbg_s, 361 struct dbg_session_gk20a *dbg_s,
357 struct nvgpu_dbg_gpu_set_next_stop_trigger_type_args *args) 362 struct nvgpu_dbg_gpu_set_next_stop_trigger_type_args *args)
358{ 363{
359 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); 364 struct gk20a *g = dbg_s->g;
365
366 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
360 367
361 gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s); 368 gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s);
362 369
@@ -373,7 +380,7 @@ static int nvgpu_dbg_timeout_enable(struct dbg_session_gk20a *dbg_s,
373 struct gk20a *g = dbg_s->g; 380 struct gk20a *g = dbg_s->g;
374 int err = 0; 381 int err = 0;
375 382
376 gk20a_dbg(gpu_dbg_gpu_dbg, "Timeouts mode requested : %d", 383 nvgpu_log(g, gpu_dbg_gpu_dbg, "Timeouts mode requested : %d",
377 timeout_mode); 384 timeout_mode);
378 385
379 switch (timeout_mode) { 386 switch (timeout_mode) {
@@ -401,7 +408,7 @@ static int nvgpu_dbg_timeout_enable(struct dbg_session_gk20a *dbg_s,
401 break; 408 break;
402 } 409 }
403 410
404 gk20a_dbg(gpu_dbg_gpu_dbg, "Timeouts enabled : %s", 411 nvgpu_log(g, gpu_dbg_gpu_dbg, "Timeouts enabled : %s",
405 g->timeouts_enabled ? "Yes" : "No"); 412 g->timeouts_enabled ? "Yes" : "No");
406 413
407 return err; 414 return err;
@@ -431,7 +438,7 @@ static int gk20a_dbg_gpu_do_dev_open(struct inode *inode,
431 438
432 dev = dev_from_gk20a(g); 439 dev = dev_from_gk20a(g);
433 440
434 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "dbg session: %s", g->name); 441 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "dbg session: %s", g->name);
435 442
436 err = alloc_session(g, &dbg_session_linux); 443 err = alloc_session(g, &dbg_session_linux);
437 if (err) 444 if (err)
@@ -482,7 +489,7 @@ static int dbg_unbind_single_channel_gk20a(struct dbg_session_gk20a *dbg_s,
482 struct dbg_profiler_object_data *prof_obj, *tmp_obj; 489 struct dbg_profiler_object_data *prof_obj, *tmp_obj;
483 struct dbg_session_channel_data_linux *ch_data_linux; 490 struct dbg_session_channel_data_linux *ch_data_linux;
484 491
485 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); 492 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
486 493
487 chid = ch_data->chid; 494 chid = ch_data->chid;
488 495
@@ -527,7 +534,7 @@ static int dbg_bind_channel_gk20a(struct dbg_session_gk20a *dbg_s,
527 struct dbg_session_data *session_data; 534 struct dbg_session_data *session_data;
528 int err = 0; 535 int err = 0;
529 536
530 gk20a_dbg(gpu_dbg_fn|gpu_dbg_gpu_dbg, "%s fd=%d", 537 nvgpu_log(g, gpu_dbg_fn|gpu_dbg_gpu_dbg, "%s fd=%d",
531 g->name, args->channel_fd); 538 g->name, args->channel_fd);
532 539
533 /* 540 /*
@@ -541,12 +548,12 @@ static int dbg_bind_channel_gk20a(struct dbg_session_gk20a *dbg_s,
541 548
542 ch = gk20a_get_channel_from_file(args->channel_fd); 549 ch = gk20a_get_channel_from_file(args->channel_fd);
543 if (!ch) { 550 if (!ch) {
544 gk20a_dbg_fn("no channel found for fd"); 551 nvgpu_log_fn(g, "no channel found for fd");
545 err = -EINVAL; 552 err = -EINVAL;
546 goto out_fput; 553 goto out_fput;
547 } 554 }
548 555
549 gk20a_dbg_fn("%s hwchid=%d", g->name, ch->chid); 556 nvgpu_log_fn(g, "%s hwchid=%d", g->name, ch->chid);
550 557
551 nvgpu_mutex_acquire(&g->dbg_sessions_lock); 558 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
552 nvgpu_mutex_acquire(&ch->dbg_s_lock); 559 nvgpu_mutex_acquire(&ch->dbg_s_lock);
@@ -818,7 +825,7 @@ static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s,
818 struct gk20a *g = dbg_s->g; 825 struct gk20a *g = dbg_s->g;
819 struct channel_gk20a *ch; 826 struct channel_gk20a *ch;
820 827
821 gk20a_dbg_fn("%d ops, max fragment %d", args->num_ops, g->dbg_regops_tmp_buf_ops); 828 nvgpu_log_fn(g, "%d ops, max fragment %d", args->num_ops, g->dbg_regops_tmp_buf_ops);
822 829
823 if (args->num_ops > NVGPU_IOCTL_DBG_REG_OPS_LIMIT) { 830 if (args->num_ops > NVGPU_IOCTL_DBG_REG_OPS_LIMIT) {
824 nvgpu_err(g, "regops limit exceeded"); 831 nvgpu_err(g, "regops limit exceeded");
@@ -890,10 +897,10 @@ static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s,
890 (args->ops + 897 (args->ops +
891 ops_offset * sizeof(struct nvgpu_dbg_gpu_reg_op)); 898 ops_offset * sizeof(struct nvgpu_dbg_gpu_reg_op));
892 899
893 gk20a_dbg_fn("Regops fragment: start_op=%llu ops=%llu", 900 nvgpu_log_fn(g, "Regops fragment: start_op=%llu ops=%llu",
894 ops_offset, num_ops); 901 ops_offset, num_ops);
895 902
896 gk20a_dbg_fn("Copying regops from userspace"); 903 nvgpu_log_fn(g, "Copying regops from userspace");
897 904
898 if (copy_from_user(linux_fragment, 905 if (copy_from_user(linux_fragment,
899 fragment, fragment_size)) { 906 fragment, fragment_size)) {
@@ -917,7 +924,7 @@ static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s,
917 if (err) 924 if (err)
918 break; 925 break;
919 926
920 gk20a_dbg_fn("Copying result to userspace"); 927 nvgpu_log_fn(g, "Copying result to userspace");
921 928
922 if (copy_to_user(fragment, linux_fragment, 929 if (copy_to_user(fragment, linux_fragment,
923 fragment_size)) { 930 fragment_size)) {
@@ -955,7 +962,7 @@ static int nvgpu_ioctl_powergate_gk20a(struct dbg_session_gk20a *dbg_s,
955{ 962{
956 int err; 963 int err;
957 struct gk20a *g = dbg_s->g; 964 struct gk20a *g = dbg_s->g;
958 gk20a_dbg_fn("%s powergate mode = %d", 965 nvgpu_log_fn(g, "%s powergate mode = %d",
959 g->name, args->mode); 966 g->name, args->mode);
960 967
961 nvgpu_mutex_acquire(&g->dbg_sessions_lock); 968 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
@@ -978,7 +985,7 @@ static int nvgpu_dbg_gpu_ioctl_smpc_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
978 struct gk20a *g = dbg_s->g; 985 struct gk20a *g = dbg_s->g;
979 struct channel_gk20a *ch_gk20a; 986 struct channel_gk20a *ch_gk20a;
980 987
981 gk20a_dbg_fn("%s smpc ctxsw mode = %d", 988 nvgpu_log_fn(g, "%s smpc ctxsw mode = %d",
982 g->name, args->mode); 989 g->name, args->mode);
983 990
984 err = gk20a_busy(g); 991 err = gk20a_busy(g);
@@ -1075,7 +1082,7 @@ static int nvgpu_dbg_gpu_ioctl_suspend_resume_sm(
1075 struct channel_gk20a *ch; 1082 struct channel_gk20a *ch;
1076 int err = 0, action = args->mode; 1083 int err = 0, action = args->mode;
1077 1084
1078 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "action: %d", args->mode); 1085 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "action: %d", args->mode);
1079 1086
1080 ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); 1087 ch = nvgpu_dbg_gpu_get_session_channel(dbg_s);
1081 if (!ch) 1088 if (!ch)
@@ -1127,7 +1134,7 @@ static int nvgpu_ioctl_allocate_profiler_object(
1127 struct gk20a *g = get_gk20a(dbg_session_linux->dev); 1134 struct gk20a *g = get_gk20a(dbg_session_linux->dev);
1128 struct dbg_profiler_object_data *prof_obj; 1135 struct dbg_profiler_object_data *prof_obj;
1129 1136
1130 gk20a_dbg_fn("%s", g->name); 1137 nvgpu_log_fn(g, "%s", g->name);
1131 1138
1132 nvgpu_mutex_acquire(&g->dbg_sessions_lock); 1139 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
1133 1140
@@ -1171,7 +1178,7 @@ static int nvgpu_ioctl_free_profiler_object(
1171 struct dbg_profiler_object_data *prof_obj, *tmp_obj; 1178 struct dbg_profiler_object_data *prof_obj, *tmp_obj;
1172 bool obj_found = false; 1179 bool obj_found = false;
1173 1180
1174 gk20a_dbg_fn("%s session_id = %d profiler_handle = %x", 1181 nvgpu_log_fn(g, "%s session_id = %d profiler_handle = %x",
1175 g->name, dbg_s->id, args->profiler_handle); 1182 g->name, dbg_s->id, args->profiler_handle);
1176 1183
1177 nvgpu_mutex_acquire(&g->dbg_sessions_lock); 1184 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
@@ -1253,7 +1260,9 @@ static void gk20a_dbg_session_nvgpu_mutex_release(struct dbg_session_gk20a *dbg_
1253 1260
1254static void gk20a_dbg_gpu_events_enable(struct dbg_session_gk20a *dbg_s) 1261static void gk20a_dbg_gpu_events_enable(struct dbg_session_gk20a *dbg_s)
1255{ 1262{
1256 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); 1263 struct gk20a *g = dbg_s->g;
1264
1265 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
1257 1266
1258 gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s); 1267 gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s);
1259 1268
@@ -1265,7 +1274,9 @@ static void gk20a_dbg_gpu_events_enable(struct dbg_session_gk20a *dbg_s)
1265 1274
1266static void gk20a_dbg_gpu_events_disable(struct dbg_session_gk20a *dbg_s) 1275static void gk20a_dbg_gpu_events_disable(struct dbg_session_gk20a *dbg_s)
1267{ 1276{
1268 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); 1277 struct gk20a *g = dbg_s->g;
1278
1279 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
1269 1280
1270 gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s); 1281 gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s);
1271 1282
@@ -1277,7 +1288,9 @@ static void gk20a_dbg_gpu_events_disable(struct dbg_session_gk20a *dbg_s)
1277 1288
1278static void gk20a_dbg_gpu_events_clear(struct dbg_session_gk20a *dbg_s) 1289static void gk20a_dbg_gpu_events_clear(struct dbg_session_gk20a *dbg_s)
1279{ 1290{
1280 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); 1291 struct gk20a *g = dbg_s->g;
1292
1293 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
1281 1294
1282 gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s); 1295 gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s);
1283 1296
@@ -1294,13 +1307,13 @@ static int gk20a_dbg_gpu_events_ctrl(struct dbg_session_gk20a *dbg_s,
1294{ 1307{
1295 int ret = 0; 1308 int ret = 0;
1296 struct channel_gk20a *ch; 1309 struct channel_gk20a *ch;
1310 struct gk20a *g = dbg_s->g;
1297 1311
1298 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "dbg events ctrl cmd %d", args->cmd); 1312 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "dbg events ctrl cmd %d", args->cmd);
1299 1313
1300 ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); 1314 ch = nvgpu_dbg_gpu_get_session_channel(dbg_s);
1301 if (!ch) { 1315 if (!ch) {
1302 nvgpu_err(dbg_s->g, 1316 nvgpu_err(g, "no channel bound to dbg session");
1303 "no channel bound to dbg session");
1304 return -EINVAL; 1317 return -EINVAL;
1305 } 1318 }
1306 1319
@@ -1318,8 +1331,7 @@ static int gk20a_dbg_gpu_events_ctrl(struct dbg_session_gk20a *dbg_s,
1318 break; 1331 break;
1319 1332
1320 default: 1333 default:
1321 nvgpu_err(dbg_s->g, 1334 nvgpu_err(g, "unrecognized dbg gpu events ctrl cmd: 0x%x",
1322 "unrecognized dbg gpu events ctrl cmd: 0x%x",
1323 args->cmd); 1335 args->cmd);
1324 ret = -EINVAL; 1336 ret = -EINVAL;
1325 break; 1337 break;
@@ -1422,7 +1434,7 @@ static int gk20a_dbg_pc_sampling(struct dbg_session_gk20a *dbg_s,
1422 if (!ch) 1434 if (!ch)
1423 return -EINVAL; 1435 return -EINVAL;
1424 1436
1425 gk20a_dbg_fn(""); 1437 nvgpu_log_fn(g, " ");
1426 1438
1427 return g->ops.gr.update_pc_sampling ? 1439 return g->ops.gr.update_pc_sampling ?
1428 g->ops.gr.update_pc_sampling(ch, args->enable) : -EINVAL; 1440 g->ops.gr.update_pc_sampling(ch, args->enable) : -EINVAL;
@@ -1646,7 +1658,7 @@ static int nvgpu_profiler_reserve_release(struct dbg_session_gk20a *dbg_s,
1646 struct dbg_profiler_object_data *prof_obj; 1658 struct dbg_profiler_object_data *prof_obj;
1647 int err = 0; 1659 int err = 0;
1648 1660
1649 gk20a_dbg_fn("%s profiler_handle = %x", g->name, profiler_handle); 1661 nvgpu_log_fn(g, "%s profiler_handle = %x", g->name, profiler_handle);
1650 1662
1651 nvgpu_mutex_acquire(&g->dbg_sessions_lock); 1663 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
1652 1664
@@ -1678,7 +1690,7 @@ static int nvgpu_profiler_reserve_acquire(struct dbg_session_gk20a *dbg_s,
1678 struct dbg_profiler_object_data *prof_obj, *my_prof_obj; 1690 struct dbg_profiler_object_data *prof_obj, *my_prof_obj;
1679 int err = 0; 1691 int err = 0;
1680 1692
1681 gk20a_dbg_fn("%s profiler_handle = %x", g->name, profiler_handle); 1693 nvgpu_log_fn(g, "%s profiler_handle = %x", g->name, profiler_handle);
1682 1694
1683 if (g->profiler_reservation_count < 0) { 1695 if (g->profiler_reservation_count < 0) {
1684 nvgpu_err(g, "Negative reservation count!"); 1696 nvgpu_err(g, "Negative reservation count!");
@@ -1782,12 +1794,12 @@ static int dbg_unbind_channel_gk20a(struct dbg_session_gk20a *dbg_s,
1782 struct channel_gk20a *ch; 1794 struct channel_gk20a *ch;
1783 int err; 1795 int err;
1784 1796
1785 gk20a_dbg(gpu_dbg_fn|gpu_dbg_gpu_dbg, "%s fd=%d", 1797 nvgpu_log(g, gpu_dbg_fn|gpu_dbg_gpu_dbg, "%s fd=%d",
1786 g->name, args->channel_fd); 1798 g->name, args->channel_fd);
1787 1799
1788 ch = gk20a_get_channel_from_file(args->channel_fd); 1800 ch = gk20a_get_channel_from_file(args->channel_fd);
1789 if (!ch) { 1801 if (!ch) {
1790 gk20a_dbg_fn("no channel found for fd"); 1802 nvgpu_log_fn(g, "no channel found for fd");
1791 return -EINVAL; 1803 return -EINVAL;
1792 } 1804 }
1793 1805
@@ -1802,7 +1814,7 @@ static int dbg_unbind_channel_gk20a(struct dbg_session_gk20a *dbg_s,
1802 nvgpu_mutex_release(&dbg_s->ch_list_lock); 1814 nvgpu_mutex_release(&dbg_s->ch_list_lock);
1803 1815
1804 if (!channel_found) { 1816 if (!channel_found) {
1805 gk20a_dbg_fn("channel not bounded, fd=%d\n", args->channel_fd); 1817 nvgpu_log_fn(g, "channel not bounded, fd=%d\n", args->channel_fd);
1806 err = -EINVAL; 1818 err = -EINVAL;
1807 goto out; 1819 goto out;
1808 } 1820 }
@@ -1820,7 +1832,11 @@ out:
1820 1832
1821int gk20a_dbg_gpu_dev_open(struct inode *inode, struct file *filp) 1833int gk20a_dbg_gpu_dev_open(struct inode *inode, struct file *filp)
1822{ 1834{
1823 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); 1835 struct nvgpu_os_linux *l = container_of(inode->i_cdev,
1836 struct nvgpu_os_linux, dbg.cdev);
1837 struct gk20a *g = &l->g;
1838
1839 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
1824 return gk20a_dbg_gpu_do_dev_open(inode, filp, false /* not profiler */); 1840 return gk20a_dbg_gpu_do_dev_open(inode, filp, false /* not profiler */);
1825} 1841}
1826 1842
@@ -1833,7 +1849,7 @@ long gk20a_dbg_gpu_dev_ioctl(struct file *filp, unsigned int cmd,
1833 u8 buf[NVGPU_DBG_GPU_IOCTL_MAX_ARG_SIZE]; 1849 u8 buf[NVGPU_DBG_GPU_IOCTL_MAX_ARG_SIZE];
1834 int err = 0; 1850 int err = 0;
1835 1851
1836 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); 1852 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
1837 1853
1838 if ((_IOC_TYPE(cmd) != NVGPU_DBG_GPU_IOCTL_MAGIC) || 1854 if ((_IOC_TYPE(cmd) != NVGPU_DBG_GPU_IOCTL_MAGIC) ||
1839 (_IOC_NR(cmd) == 0) || 1855 (_IOC_NR(cmd) == 0) ||
@@ -1979,7 +1995,7 @@ long gk20a_dbg_gpu_dev_ioctl(struct file *filp, unsigned int cmd,
1979 1995
1980 nvgpu_mutex_release(&dbg_s->ioctl_lock); 1996 nvgpu_mutex_release(&dbg_s->ioctl_lock);
1981 1997
1982 gk20a_dbg(gpu_dbg_gpu_dbg, "ret=%d", err); 1998 nvgpu_log(g, gpu_dbg_gpu_dbg, "ret=%d", err);
1983 1999
1984 if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ)) 2000 if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
1985 err = copy_to_user((void __user *)arg, 2001 err = copy_to_user((void __user *)arg,
diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_tsg.c b/drivers/gpu/nvgpu/common/linux/ioctl_tsg.c
index be2315bd..d0bfd55a 100644
--- a/drivers/gpu/nvgpu/common/linux/ioctl_tsg.c
+++ b/drivers/gpu/nvgpu/common/linux/ioctl_tsg.c
@@ -175,6 +175,7 @@ void gk20a_tsg_event_id_post_event(struct tsg_gk20a *tsg,
175 struct gk20a_event_id_data *event_id_data; 175 struct gk20a_event_id_data *event_id_data;
176 u32 event_id; 176 u32 event_id;
177 int err = 0; 177 int err = 0;
178 struct gk20a *g = tsg->g;
178 179
179 event_id = nvgpu_event_id_to_ioctl_channel_event_id(__event_id); 180 event_id = nvgpu_event_id_to_ioctl_channel_event_id(__event_id);
180 if (event_id >= NVGPU_IOCTL_CHANNEL_EVENT_ID_MAX) 181 if (event_id >= NVGPU_IOCTL_CHANNEL_EVENT_ID_MAX)
@@ -187,7 +188,7 @@ void gk20a_tsg_event_id_post_event(struct tsg_gk20a *tsg,
187 188
188 nvgpu_mutex_acquire(&event_id_data->lock); 189 nvgpu_mutex_acquire(&event_id_data->lock);
189 190
190 gk20a_dbg_info( 191 nvgpu_log_info(g,
191 "posting event for event_id=%d on tsg=%d\n", 192 "posting event for event_id=%d on tsg=%d\n",
192 event_id, tsg->tsgid); 193 event_id, tsg->tsgid);
193 event_id_data->event_posted = true; 194 event_id_data->event_posted = true;
@@ -205,14 +206,14 @@ static unsigned int gk20a_event_id_poll(struct file *filep, poll_table *wait)
205 u32 event_id = event_id_data->event_id; 206 u32 event_id = event_id_data->event_id;
206 struct tsg_gk20a *tsg = g->fifo.tsg + event_id_data->id; 207 struct tsg_gk20a *tsg = g->fifo.tsg + event_id_data->id;
207 208
208 gk20a_dbg(gpu_dbg_fn | gpu_dbg_info, ""); 209 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_info, " ");
209 210
210 poll_wait(filep, &event_id_data->event_id_wq.wq, wait); 211 poll_wait(filep, &event_id_data->event_id_wq.wq, wait);
211 212
212 nvgpu_mutex_acquire(&event_id_data->lock); 213 nvgpu_mutex_acquire(&event_id_data->lock);
213 214
214 if (event_id_data->event_posted) { 215 if (event_id_data->event_posted) {
215 gk20a_dbg_info( 216 nvgpu_log_info(g,
216 "found pending event_id=%d on TSG=%d\n", 217 "found pending event_id=%d on TSG=%d\n",
217 event_id, tsg->tsgid); 218 event_id, tsg->tsgid);
218 mask = (POLLPRI | POLLIN); 219 mask = (POLLPRI | POLLIN);
@@ -363,7 +364,7 @@ int nvgpu_ioctl_tsg_open(struct gk20a *g, struct file *filp)
363 364
364 dev = dev_from_gk20a(g); 365 dev = dev_from_gk20a(g);
365 366
366 gk20a_dbg(gpu_dbg_fn, "tsg: %s", dev_name(dev)); 367 nvgpu_log(g, gpu_dbg_fn, "tsg: %s", dev_name(dev));
367 368
368 priv = nvgpu_kmalloc(g, sizeof(*priv)); 369 priv = nvgpu_kmalloc(g, sizeof(*priv));
369 if (!priv) { 370 if (!priv) {
@@ -397,12 +398,12 @@ int nvgpu_ioctl_tsg_dev_open(struct inode *inode, struct file *filp)
397 struct gk20a *g; 398 struct gk20a *g;
398 int ret; 399 int ret;
399 400
400 gk20a_dbg_fn("");
401
402 l = container_of(inode->i_cdev, 401 l = container_of(inode->i_cdev,
403 struct nvgpu_os_linux, tsg.cdev); 402 struct nvgpu_os_linux, tsg.cdev);
404 g = &l->g; 403 g = &l->g;
405 404
405 nvgpu_log_fn(g, " ");
406
406 ret = gk20a_busy(g); 407 ret = gk20a_busy(g);
407 if (ret) { 408 if (ret) {
408 nvgpu_err(g, "failed to power on, %d", ret); 409 nvgpu_err(g, "failed to power on, %d", ret);
@@ -412,7 +413,7 @@ int nvgpu_ioctl_tsg_dev_open(struct inode *inode, struct file *filp)
412 ret = nvgpu_ioctl_tsg_open(&l->g, filp); 413 ret = nvgpu_ioctl_tsg_open(&l->g, filp);
413 414
414 gk20a_idle(g); 415 gk20a_idle(g);
415 gk20a_dbg_fn("done"); 416 nvgpu_log_fn(g, "done");
416 return ret; 417 return ret;
417} 418}
418 419
@@ -445,7 +446,7 @@ static int gk20a_tsg_ioctl_set_runlist_interleave(struct gk20a *g,
445 u32 level = arg->level; 446 u32 level = arg->level;
446 int err; 447 int err;
447 448
448 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid); 449 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid);
449 450
450 nvgpu_mutex_acquire(&sched->control_lock); 451 nvgpu_mutex_acquire(&sched->control_lock);
451 if (sched->control_locked) { 452 if (sched->control_locked) {
@@ -474,7 +475,7 @@ static int gk20a_tsg_ioctl_set_timeslice(struct gk20a *g,
474 struct gk20a_sched_ctrl *sched = &l->sched_ctrl; 475 struct gk20a_sched_ctrl *sched = &l->sched_ctrl;
475 int err; 476 int err;
476 477
477 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid); 478 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid);
478 479
479 nvgpu_mutex_acquire(&sched->control_lock); 480 nvgpu_mutex_acquire(&sched->control_lock);
480 if (sched->control_locked) { 481 if (sched->control_locked) {
@@ -509,7 +510,7 @@ long nvgpu_ioctl_tsg_dev_ioctl(struct file *filp, unsigned int cmd,
509 u8 __maybe_unused buf[NVGPU_TSG_IOCTL_MAX_ARG_SIZE]; 510 u8 __maybe_unused buf[NVGPU_TSG_IOCTL_MAX_ARG_SIZE];
510 int err = 0; 511 int err = 0;
511 512
512 gk20a_dbg_fn("start %d", _IOC_NR(cmd)); 513 nvgpu_log_fn(g, "start %d", _IOC_NR(cmd));
513 514
514 if ((_IOC_TYPE(cmd) != NVGPU_TSG_IOCTL_MAGIC) || 515 if ((_IOC_TYPE(cmd) != NVGPU_TSG_IOCTL_MAGIC) ||
515 (_IOC_NR(cmd) == 0) || 516 (_IOC_NR(cmd) == 0) ||
diff --git a/drivers/gpu/nvgpu/common/linux/log.c b/drivers/gpu/nvgpu/common/linux/log.c
index 4dc8f667..ca29e0f3 100644
--- a/drivers/gpu/nvgpu/common/linux/log.c
+++ b/drivers/gpu/nvgpu/common/linux/log.c
@@ -38,8 +38,6 @@
38 */ 38 */
39#define LOG_FMT "nvgpu: %s %33s:%-4d [%s] %s\n" 39#define LOG_FMT "nvgpu: %s %33s:%-4d [%s] %s\n"
40 40
41u64 nvgpu_dbg_mask = NVGPU_DEFAULT_DBG_MASK;
42
43static const char *log_types[] = { 41static const char *log_types[] = {
44 "ERR", 42 "ERR",
45 "WRN", 43 "WRN",
diff --git a/drivers/gpu/nvgpu/common/linux/module.c b/drivers/gpu/nvgpu/common/linux/module.c
index 34850013..f00b3cce 100644
--- a/drivers/gpu/nvgpu/common/linux/module.c
+++ b/drivers/gpu/nvgpu/common/linux/module.c
@@ -218,7 +218,7 @@ int gk20a_pm_finalize_poweron(struct device *dev)
218 struct gk20a_platform *platform = gk20a_get_platform(dev); 218 struct gk20a_platform *platform = gk20a_get_platform(dev);
219 int err; 219 int err;
220 220
221 gk20a_dbg_fn(""); 221 nvgpu_log_fn(g, " ");
222 222
223 if (g->power_on) 223 if (g->power_on)
224 return 0; 224 return 0;
@@ -331,7 +331,7 @@ static int gk20a_pm_prepare_poweroff(struct device *dev)
331 struct gk20a_platform *platform = gk20a_get_platform(dev); 331 struct gk20a_platform *platform = gk20a_get_platform(dev);
332 bool irqs_enabled; 332 bool irqs_enabled;
333 333
334 gk20a_dbg_fn(""); 334 nvgpu_log_fn(g, " ");
335 335
336 nvgpu_mutex_acquire(&g->poweroff_lock); 336 nvgpu_mutex_acquire(&g->poweroff_lock);
337 337
@@ -1013,7 +1013,7 @@ static int gk20a_pm_init(struct device *dev)
1013 struct gk20a *g = get_gk20a(dev); 1013 struct gk20a *g = get_gk20a(dev);
1014 int err = 0; 1014 int err = 0;
1015 1015
1016 gk20a_dbg_fn(""); 1016 nvgpu_log_fn(g, " ");
1017 1017
1018 /* Initialise pm runtime */ 1018 /* Initialise pm runtime */
1019 if (g->railgate_delay) { 1019 if (g->railgate_delay) {
@@ -1043,7 +1043,7 @@ void gk20a_driver_start_unload(struct gk20a *g)
1043{ 1043{
1044 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g); 1044 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
1045 1045
1046 gk20a_dbg(gpu_dbg_shutdown, "Driver is now going down!\n"); 1046 nvgpu_log(g, gpu_dbg_shutdown, "Driver is now going down!\n");
1047 1047
1048 down_write(&l->busy_lock); 1048 down_write(&l->busy_lock);
1049 __nvgpu_set_enabled(g, NVGPU_DRIVER_IS_DYING, true); 1049 __nvgpu_set_enabled(g, NVGPU_DRIVER_IS_DYING, true);
@@ -1134,8 +1134,6 @@ static int gk20a_probe(struct platform_device *dev)
1134 return -ENODATA; 1134 return -ENODATA;
1135 } 1135 }
1136 1136
1137 gk20a_dbg_fn("");
1138
1139 platform_set_drvdata(dev, platform); 1137 platform_set_drvdata(dev, platform);
1140 1138
1141 if (gk20a_gpu_is_virtual(&dev->dev)) 1139 if (gk20a_gpu_is_virtual(&dev->dev))
@@ -1148,6 +1146,9 @@ static int gk20a_probe(struct platform_device *dev)
1148 } 1146 }
1149 1147
1150 gk20a = &l->g; 1148 gk20a = &l->g;
1149
1150 nvgpu_log_fn(gk20a, " ");
1151
1151 nvgpu_init_gk20a(gk20a); 1152 nvgpu_init_gk20a(gk20a);
1152 set_gk20a(dev, gk20a); 1153 set_gk20a(dev, gk20a);
1153 l->dev = &dev->dev; 1154 l->dev = &dev->dev;
@@ -1248,7 +1249,7 @@ int nvgpu_remove(struct device *dev, struct class *class)
1248 struct gk20a_platform *platform = gk20a_get_platform(dev); 1249 struct gk20a_platform *platform = gk20a_get_platform(dev);
1249 int err; 1250 int err;
1250 1251
1251 gk20a_dbg_fn(""); 1252 nvgpu_log_fn(g, " ");
1252 1253
1253 err = nvgpu_quiesce(g); 1254 err = nvgpu_quiesce(g);
1254 WARN(err, "gpu failed to idle during driver removal"); 1255 WARN(err, "gpu failed to idle during driver removal");
@@ -1288,7 +1289,7 @@ int nvgpu_remove(struct device *dev, struct class *class)
1288 if (platform->remove) 1289 if (platform->remove)
1289 platform->remove(dev); 1290 platform->remove(dev);
1290 1291
1291 gk20a_dbg_fn("removed"); 1292 nvgpu_log_fn(g, "removed");
1292 1293
1293 return err; 1294 return err;
1294} 1295}
diff --git a/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c b/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c
index 3cac13ba..015295ba 100644
--- a/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c
+++ b/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c
@@ -140,7 +140,7 @@ u32 nvgpu_mem_rd32(struct gk20a *g, struct nvgpu_mem *mem, u32 w)
140 WARN_ON(!ptr); 140 WARN_ON(!ptr);
141 data = ptr[w]; 141 data = ptr[w];
142#ifdef CONFIG_TEGRA_SIMULATION_PLATFORM 142#ifdef CONFIG_TEGRA_SIMULATION_PLATFORM
143 gk20a_dbg(gpu_dbg_mem, " %p = 0x%x", ptr + w, data); 143 nvgpu_log(g, gpu_dbg_mem, " %p = 0x%x", ptr + w, data);
144#endif 144#endif
145 } else if (mem->aperture == APERTURE_VIDMEM || g->mm.force_pramin) { 145 } else if (mem->aperture == APERTURE_VIDMEM || g->mm.force_pramin) {
146 u32 value; 146 u32 value;
@@ -177,7 +177,7 @@ void nvgpu_mem_rd_n(struct gk20a *g, struct nvgpu_mem *mem,
177 memcpy(dest, src, size); 177 memcpy(dest, src, size);
178#ifdef CONFIG_TEGRA_SIMULATION_PLATFORM 178#ifdef CONFIG_TEGRA_SIMULATION_PLATFORM
179 if (size) 179 if (size)
180 gk20a_dbg(gpu_dbg_mem, " %p = 0x%x ... [%d bytes]", 180 nvgpu_log(g, gpu_dbg_mem, " %p = 0x%x ... [%d bytes]",
181 src, *dest, size); 181 src, *dest, size);
182#endif 182#endif
183 } else if (mem->aperture == APERTURE_VIDMEM || g->mm.force_pramin) { 183 } else if (mem->aperture == APERTURE_VIDMEM || g->mm.force_pramin) {
@@ -215,7 +215,7 @@ void nvgpu_mem_wr32(struct gk20a *g, struct nvgpu_mem *mem, u32 w, u32 data)
215 215
216 WARN_ON(!ptr); 216 WARN_ON(!ptr);
217#ifdef CONFIG_TEGRA_SIMULATION_PLATFORM 217#ifdef CONFIG_TEGRA_SIMULATION_PLATFORM
218 gk20a_dbg(gpu_dbg_mem, " %p = 0x%x", ptr + w, data); 218 nvgpu_log(g, gpu_dbg_mem, " %p = 0x%x", ptr + w, data);
219#endif 219#endif
220 ptr[w] = data; 220 ptr[w] = data;
221 } else if (mem->aperture == APERTURE_VIDMEM || g->mm.force_pramin) { 221 } else if (mem->aperture == APERTURE_VIDMEM || g->mm.force_pramin) {
@@ -249,7 +249,7 @@ void nvgpu_mem_wr_n(struct gk20a *g, struct nvgpu_mem *mem, u32 offset,
249 WARN_ON(!mem->cpu_va); 249 WARN_ON(!mem->cpu_va);
250#ifdef CONFIG_TEGRA_SIMULATION_PLATFORM 250#ifdef CONFIG_TEGRA_SIMULATION_PLATFORM
251 if (size) 251 if (size)
252 gk20a_dbg(gpu_dbg_mem, " %p = 0x%x ... [%d bytes]", 252 nvgpu_log(g, gpu_dbg_mem, " %p = 0x%x ... [%d bytes]",
253 dest, *src, size); 253 dest, *src, size);
254#endif 254#endif
255 memcpy(dest, src, size); 255 memcpy(dest, src, size);
@@ -296,7 +296,7 @@ void nvgpu_memset(struct gk20a *g, struct nvgpu_mem *mem, u32 offset,
296 WARN_ON(!mem->cpu_va); 296 WARN_ON(!mem->cpu_va);
297#ifdef CONFIG_TEGRA_SIMULATION_PLATFORM 297#ifdef CONFIG_TEGRA_SIMULATION_PLATFORM
298 if (size) 298 if (size)
299 gk20a_dbg(gpu_dbg_mem, " %p = 0x%x [times %d]", 299 nvgpu_log(g, gpu_dbg_mem, " %p = 0x%x [times %d]",
300 dest, c, size); 300 dest, c, size);
301#endif 301#endif
302 memset(dest, c, size); 302 memset(dest, c, size);
diff --git a/drivers/gpu/nvgpu/common/linux/platform_gk20a_tegra.c b/drivers/gpu/nvgpu/common/linux/platform_gk20a_tegra.c
index 82648ca3..5301b13d 100644
--- a/drivers/gpu/nvgpu/common/linux/platform_gk20a_tegra.c
+++ b/drivers/gpu/nvgpu/common/linux/platform_gk20a_tegra.c
@@ -551,6 +551,7 @@ static void gk20a_tegra_scale_init(struct device *dev)
551 struct gk20a_platform *platform = gk20a_get_platform(dev); 551 struct gk20a_platform *platform = gk20a_get_platform(dev);
552 struct gk20a_scale_profile *profile = platform->g->scale_profile; 552 struct gk20a_scale_profile *profile = platform->g->scale_profile;
553 struct gk20a_emc_params *emc_params; 553 struct gk20a_emc_params *emc_params;
554 struct gk20a *g = platform->g;
554 555
555 if (!profile) 556 if (!profile)
556 return; 557 return;
@@ -568,7 +569,7 @@ static void gk20a_tegra_scale_init(struct device *dev)
568#ifdef CONFIG_TEGRA_BWMGR 569#ifdef CONFIG_TEGRA_BWMGR
569 emc_params->bwmgr_cl = tegra_bwmgr_register(TEGRA_BWMGR_CLIENT_GPU); 570 emc_params->bwmgr_cl = tegra_bwmgr_register(TEGRA_BWMGR_CLIENT_GPU);
570 if (!emc_params->bwmgr_cl) { 571 if (!emc_params->bwmgr_cl) {
571 gk20a_dbg_info("%s Missing GPU BWMGR client\n", __func__); 572 nvgpu_log_info(g, "%s Missing GPU BWMGR client\n", __func__);
572 return; 573 return;
573 } 574 }
574#endif 575#endif
@@ -767,6 +768,7 @@ static int gk20a_tegra_probe(struct device *dev)
767 struct device_node *np = dev->of_node; 768 struct device_node *np = dev->of_node;
768 bool joint_xpu_rail = false; 769 bool joint_xpu_rail = false;
769 int ret; 770 int ret;
771 struct gk20a *g = platform->g;
770 772
771#ifdef CONFIG_COMMON_CLK 773#ifdef CONFIG_COMMON_CLK
772 /* DVFS is not guaranteed to be initialized at the time of probe on 774 /* DVFS is not guaranteed to be initialized at the time of probe on
@@ -775,13 +777,13 @@ static int gk20a_tegra_probe(struct device *dev)
775 if (!platform->gpu_rail) { 777 if (!platform->gpu_rail) {
776 platform->gpu_rail = tegra_dvfs_get_rail_by_name(GPU_RAIL_NAME); 778 platform->gpu_rail = tegra_dvfs_get_rail_by_name(GPU_RAIL_NAME);
777 if (!platform->gpu_rail) { 779 if (!platform->gpu_rail) {
778 gk20a_dbg_info("deferring probe no gpu_rail\n"); 780 nvgpu_log_info(g, "deferring probe no gpu_rail");
779 return -EPROBE_DEFER; 781 return -EPROBE_DEFER;
780 } 782 }
781 } 783 }
782 784
783 if (!tegra_dvfs_is_rail_ready(platform->gpu_rail)) { 785 if (!tegra_dvfs_is_rail_ready(platform->gpu_rail)) {
784 gk20a_dbg_info("deferring probe gpu_rail not ready\n"); 786 nvgpu_log_info(g, "deferring probe gpu_rail not ready");
785 return -EPROBE_DEFER; 787 return -EPROBE_DEFER;
786 } 788 }
787#endif 789#endif
@@ -798,7 +800,7 @@ static int gk20a_tegra_probe(struct device *dev)
798#endif 800#endif
799 801
800 if (joint_xpu_rail) { 802 if (joint_xpu_rail) {
801 gk20a_dbg_info("XPU rails are joint\n"); 803 nvgpu_log_info(g, "XPU rails are joint\n");
802 platform->g->can_railgate = false; 804 platform->g->can_railgate = false;
803 } 805 }
804 806
diff --git a/drivers/gpu/nvgpu/common/linux/platform_gp10b_tegra.c b/drivers/gpu/nvgpu/common/linux/platform_gp10b_tegra.c
index 6e54d00b..08c5df0f 100644
--- a/drivers/gpu/nvgpu/common/linux/platform_gp10b_tegra.c
+++ b/drivers/gpu/nvgpu/common/linux/platform_gp10b_tegra.c
@@ -273,11 +273,11 @@ void gp10b_tegra_prescale(struct device *dev)
273 struct gk20a *g = get_gk20a(dev); 273 struct gk20a *g = get_gk20a(dev);
274 u32 avg = 0; 274 u32 avg = 0;
275 275
276 gk20a_dbg_fn(""); 276 nvgpu_log_fn(g, " ");
277 277
278 nvgpu_pmu_load_norm(g, &avg); 278 nvgpu_pmu_load_norm(g, &avg);
279 279
280 gk20a_dbg_fn("done"); 280 nvgpu_log_fn(g, "done");
281} 281}
282 282
283void gp10b_tegra_postscale(struct device *pdev, 283void gp10b_tegra_postscale(struct device *pdev,
@@ -288,7 +288,7 @@ void gp10b_tegra_postscale(struct device *pdev,
288 struct gk20a *g = get_gk20a(pdev); 288 struct gk20a *g = get_gk20a(pdev);
289 unsigned long emc_rate; 289 unsigned long emc_rate;
290 290
291 gk20a_dbg_fn(""); 291 nvgpu_log_fn(g, " ");
292 if (profile && !platform->is_railgated(pdev)) { 292 if (profile && !platform->is_railgated(pdev)) {
293 unsigned long emc_scale; 293 unsigned long emc_scale;
294 294
@@ -306,7 +306,7 @@ void gp10b_tegra_postscale(struct device *pdev,
306 (struct tegra_bwmgr_client *)profile->private_data, 306 (struct tegra_bwmgr_client *)profile->private_data,
307 emc_rate, TEGRA_BWMGR_SET_EMC_FLOOR); 307 emc_rate, TEGRA_BWMGR_SET_EMC_FLOOR);
308 } 308 }
309 gk20a_dbg_fn("done"); 309 nvgpu_log_fn(g, "done");
310} 310}
311 311
312long gp10b_round_clk_rate(struct device *dev, unsigned long rate) 312long gp10b_round_clk_rate(struct device *dev, unsigned long rate)
@@ -328,6 +328,7 @@ int gp10b_clk_get_freqs(struct device *dev,
328 unsigned long **freqs, int *num_freqs) 328 unsigned long **freqs, int *num_freqs)
329{ 329{
330 struct gk20a_platform *platform = gk20a_get_platform(dev); 330 struct gk20a_platform *platform = gk20a_get_platform(dev);
331 struct gk20a *g = platform->g;
331 unsigned long max_rate; 332 unsigned long max_rate;
332 unsigned long new_rate = 0, prev_rate = 0; 333 unsigned long new_rate = 0, prev_rate = 0;
333 int i = 0, freq_counter = 0; 334 int i = 0, freq_counter = 0;
@@ -358,7 +359,7 @@ int gp10b_clk_get_freqs(struct device *dev,
358 *freqs = gp10b_freq_table; 359 *freqs = gp10b_freq_table;
359 *num_freqs = freq_counter; 360 *num_freqs = freq_counter;
360 361
361 gk20a_dbg_info("min rate: %ld max rate: %ld num_of_freq %d\n", 362 nvgpu_log_info(g, "min rate: %ld max rate: %ld num_of_freq %d\n",
362 gp10b_freq_table[0], max_rate, *num_freqs); 363 gp10b_freq_table[0], max_rate, *num_freqs);
363 364
364 return 0; 365 return 0;
diff --git a/drivers/gpu/nvgpu/common/linux/sched.c b/drivers/gpu/nvgpu/common/linux/sched.c
index a7da020c..2ad5aabf 100644
--- a/drivers/gpu/nvgpu/common/linux/sched.c
+++ b/drivers/gpu/nvgpu/common/linux/sched.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify it 4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License, 5 * under the terms and conditions of the GNU General Public License,
@@ -37,10 +37,11 @@ ssize_t gk20a_sched_dev_read(struct file *filp, char __user *buf,
37 size_t size, loff_t *off) 37 size_t size, loff_t *off)
38{ 38{
39 struct gk20a_sched_ctrl *sched = filp->private_data; 39 struct gk20a_sched_ctrl *sched = filp->private_data;
40 struct gk20a *g = sched->g;
40 struct nvgpu_sched_event_arg event = { 0 }; 41 struct nvgpu_sched_event_arg event = { 0 };
41 int err; 42 int err;
42 43
43 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, 44 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched,
44 "filp=%p buf=%p size=%zu", filp, buf, size); 45 "filp=%p buf=%p size=%zu", filp, buf, size);
45 46
46 if (size < sizeof(event)) 47 if (size < sizeof(event))
@@ -77,9 +78,10 @@ ssize_t gk20a_sched_dev_read(struct file *filp, char __user *buf,
77unsigned int gk20a_sched_dev_poll(struct file *filp, poll_table *wait) 78unsigned int gk20a_sched_dev_poll(struct file *filp, poll_table *wait)
78{ 79{
79 struct gk20a_sched_ctrl *sched = filp->private_data; 80 struct gk20a_sched_ctrl *sched = filp->private_data;
81 struct gk20a *g = sched->g;
80 unsigned int mask = 0; 82 unsigned int mask = 0;
81 83
82 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, ""); 84 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, " ");
83 85
84 nvgpu_mutex_acquire(&sched->status_lock); 86 nvgpu_mutex_acquire(&sched->status_lock);
85 poll_wait(filp, &sched->readout_wq.wq, wait); 87 poll_wait(filp, &sched->readout_wq.wq, wait);
@@ -93,7 +95,9 @@ unsigned int gk20a_sched_dev_poll(struct file *filp, poll_table *wait)
93static int gk20a_sched_dev_ioctl_get_tsgs(struct gk20a_sched_ctrl *sched, 95static int gk20a_sched_dev_ioctl_get_tsgs(struct gk20a_sched_ctrl *sched,
94 struct nvgpu_sched_get_tsgs_args *arg) 96 struct nvgpu_sched_get_tsgs_args *arg)
95{ 97{
96 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "size=%u buffer=%llx", 98 struct gk20a *g = sched->g;
99
100 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "size=%u buffer=%llx",
97 arg->size, arg->buffer); 101 arg->size, arg->buffer);
98 102
99 if ((arg->size < sched->bitmap_size) || (!arg->buffer)) { 103 if ((arg->size < sched->bitmap_size) || (!arg->buffer)) {
@@ -115,7 +119,9 @@ static int gk20a_sched_dev_ioctl_get_tsgs(struct gk20a_sched_ctrl *sched,
115static int gk20a_sched_dev_ioctl_get_recent_tsgs(struct gk20a_sched_ctrl *sched, 119static int gk20a_sched_dev_ioctl_get_recent_tsgs(struct gk20a_sched_ctrl *sched,
116 struct nvgpu_sched_get_tsgs_args *arg) 120 struct nvgpu_sched_get_tsgs_args *arg)
117{ 121{
118 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "size=%u buffer=%llx", 122 struct gk20a *g = sched->g;
123
124 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "size=%u buffer=%llx",
119 arg->size, arg->buffer); 125 arg->size, arg->buffer);
120 126
121 if ((arg->size < sched->bitmap_size) || (!arg->buffer)) { 127 if ((arg->size < sched->bitmap_size) || (!arg->buffer)) {
@@ -139,7 +145,8 @@ static int gk20a_sched_dev_ioctl_get_recent_tsgs(struct gk20a_sched_ctrl *sched,
139static int gk20a_sched_dev_ioctl_get_tsgs_by_pid(struct gk20a_sched_ctrl *sched, 145static int gk20a_sched_dev_ioctl_get_tsgs_by_pid(struct gk20a_sched_ctrl *sched,
140 struct nvgpu_sched_get_tsgs_by_pid_args *arg) 146 struct nvgpu_sched_get_tsgs_by_pid_args *arg)
141{ 147{
142 struct fifo_gk20a *f = &sched->g->fifo; 148 struct gk20a *g = sched->g;
149 struct fifo_gk20a *f = &g->fifo;
143 struct tsg_gk20a *tsg; 150 struct tsg_gk20a *tsg;
144 u64 *bitmap; 151 u64 *bitmap;
145 unsigned int tsgid; 152 unsigned int tsgid;
@@ -147,7 +154,7 @@ static int gk20a_sched_dev_ioctl_get_tsgs_by_pid(struct gk20a_sched_ctrl *sched,
147 pid_t tgid = (pid_t)arg->pid; 154 pid_t tgid = (pid_t)arg->pid;
148 int err = 0; 155 int err = 0;
149 156
150 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "pid=%d size=%u buffer=%llx", 157 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "pid=%d size=%u buffer=%llx",
151 (pid_t)arg->pid, arg->size, arg->buffer); 158 (pid_t)arg->pid, arg->size, arg->buffer);
152 159
153 if ((arg->size < sched->bitmap_size) || (!arg->buffer)) { 160 if ((arg->size < sched->bitmap_size) || (!arg->buffer)) {
@@ -186,7 +193,7 @@ static int gk20a_sched_dev_ioctl_get_params(struct gk20a_sched_ctrl *sched,
186 struct tsg_gk20a *tsg; 193 struct tsg_gk20a *tsg;
187 u32 tsgid = arg->tsgid; 194 u32 tsgid = arg->tsgid;
188 195
189 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid); 196 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid);
190 197
191 if (tsgid >= f->num_channels) 198 if (tsgid >= f->num_channels)
192 return -EINVAL; 199 return -EINVAL;
@@ -221,7 +228,7 @@ static int gk20a_sched_dev_ioctl_tsg_set_timeslice(
221 u32 tsgid = arg->tsgid; 228 u32 tsgid = arg->tsgid;
222 int err; 229 int err;
223 230
224 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid); 231 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid);
225 232
226 if (tsgid >= f->num_channels) 233 if (tsgid >= f->num_channels)
227 return -EINVAL; 234 return -EINVAL;
@@ -256,7 +263,7 @@ static int gk20a_sched_dev_ioctl_tsg_set_runlist_interleave(
256 u32 tsgid = arg->tsgid; 263 u32 tsgid = arg->tsgid;
257 int err; 264 int err;
258 265
259 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid); 266 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid);
260 267
261 if (tsgid >= f->num_channels) 268 if (tsgid >= f->num_channels)
262 return -EINVAL; 269 return -EINVAL;
@@ -283,7 +290,9 @@ done:
283 290
284static int gk20a_sched_dev_ioctl_lock_control(struct gk20a_sched_ctrl *sched) 291static int gk20a_sched_dev_ioctl_lock_control(struct gk20a_sched_ctrl *sched)
285{ 292{
286 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, ""); 293 struct gk20a *g = sched->g;
294
295 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, " ");
287 296
288 nvgpu_mutex_acquire(&sched->control_lock); 297 nvgpu_mutex_acquire(&sched->control_lock);
289 sched->control_locked = true; 298 sched->control_locked = true;
@@ -293,7 +302,9 @@ static int gk20a_sched_dev_ioctl_lock_control(struct gk20a_sched_ctrl *sched)
293 302
294static int gk20a_sched_dev_ioctl_unlock_control(struct gk20a_sched_ctrl *sched) 303static int gk20a_sched_dev_ioctl_unlock_control(struct gk20a_sched_ctrl *sched)
295{ 304{
296 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, ""); 305 struct gk20a *g = sched->g;
306
307 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, " ");
297 308
298 nvgpu_mutex_acquire(&sched->control_lock); 309 nvgpu_mutex_acquire(&sched->control_lock);
299 sched->control_locked = false; 310 sched->control_locked = false;
@@ -304,7 +315,9 @@ static int gk20a_sched_dev_ioctl_unlock_control(struct gk20a_sched_ctrl *sched)
304static int gk20a_sched_dev_ioctl_get_api_version(struct gk20a_sched_ctrl *sched, 315static int gk20a_sched_dev_ioctl_get_api_version(struct gk20a_sched_ctrl *sched,
305 struct nvgpu_sched_api_version_args *args) 316 struct nvgpu_sched_api_version_args *args)
306{ 317{
307 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, ""); 318 struct gk20a *g = sched->g;
319
320 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, " ");
308 321
309 args->version = NVGPU_SCHED_API_VERSION; 322 args->version = NVGPU_SCHED_API_VERSION;
310 return 0; 323 return 0;
@@ -318,7 +331,7 @@ static int gk20a_sched_dev_ioctl_get_tsg(struct gk20a_sched_ctrl *sched,
318 struct tsg_gk20a *tsg; 331 struct tsg_gk20a *tsg;
319 u32 tsgid = arg->tsgid; 332 u32 tsgid = arg->tsgid;
320 333
321 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid); 334 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid);
322 335
323 if (tsgid >= f->num_channels) 336 if (tsgid >= f->num_channels)
324 return -EINVAL; 337 return -EINVAL;
@@ -355,7 +368,7 @@ static int gk20a_sched_dev_ioctl_put_tsg(struct gk20a_sched_ctrl *sched,
355 struct tsg_gk20a *tsg; 368 struct tsg_gk20a *tsg;
356 u32 tsgid = arg->tsgid; 369 u32 tsgid = arg->tsgid;
357 370
358 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid); 371 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid);
359 372
360 if (tsgid >= f->num_channels) 373 if (tsgid >= f->num_channels)
361 return -EINVAL; 374 return -EINVAL;
@@ -390,7 +403,7 @@ int gk20a_sched_dev_open(struct inode *inode, struct file *filp)
390 return -ENODEV; 403 return -ENODEV;
391 sched = &l->sched_ctrl; 404 sched = &l->sched_ctrl;
392 405
393 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "g=%p", g); 406 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "g=%p", g);
394 407
395 if (!sched->sw_ready) { 408 if (!sched->sw_ready) {
396 err = gk20a_busy(g); 409 err = gk20a_busy(g);
@@ -410,7 +423,7 @@ int gk20a_sched_dev_open(struct inode *inode, struct file *filp)
410 memset(sched->ref_tsg_bitmap, 0, sched->bitmap_size); 423 memset(sched->ref_tsg_bitmap, 0, sched->bitmap_size);
411 424
412 filp->private_data = sched; 425 filp->private_data = sched;
413 gk20a_dbg(gpu_dbg_sched, "filp=%p sched=%p", filp, sched); 426 nvgpu_log(g, gpu_dbg_sched, "filp=%p sched=%p", filp, sched);
414 427
415free_ref: 428free_ref:
416 if (err) 429 if (err)
@@ -426,7 +439,7 @@ long gk20a_sched_dev_ioctl(struct file *filp, unsigned int cmd,
426 u8 buf[NVGPU_CTXSW_IOCTL_MAX_ARG_SIZE]; 439 u8 buf[NVGPU_CTXSW_IOCTL_MAX_ARG_SIZE];
427 int err = 0; 440 int err = 0;
428 441
429 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "nr=%d", _IOC_NR(cmd)); 442 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "nr=%d", _IOC_NR(cmd));
430 443
431 if ((_IOC_TYPE(cmd) != NVGPU_SCHED_IOCTL_MAGIC) || 444 if ((_IOC_TYPE(cmd) != NVGPU_SCHED_IOCTL_MAGIC) ||
432 (_IOC_NR(cmd) == 0) || 445 (_IOC_NR(cmd) == 0) ||
@@ -509,7 +522,7 @@ int gk20a_sched_dev_release(struct inode *inode, struct file *filp)
509 struct tsg_gk20a *tsg; 522 struct tsg_gk20a *tsg;
510 unsigned int tsgid; 523 unsigned int tsgid;
511 524
512 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "sched: %p", sched); 525 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "sched: %p", sched);
513 526
514 /* release any reference to TSGs */ 527 /* release any reference to TSGs */
515 for (tsgid = 0; tsgid < f->num_channels; tsgid++) { 528 for (tsgid = 0; tsgid < f->num_channels; tsgid++) {
@@ -535,7 +548,7 @@ void gk20a_sched_ctrl_tsg_added(struct gk20a *g, struct tsg_gk20a *tsg)
535 struct gk20a_sched_ctrl *sched = &l->sched_ctrl; 548 struct gk20a_sched_ctrl *sched = &l->sched_ctrl;
536 int err; 549 int err;
537 550
538 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid); 551 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid);
539 552
540 if (!sched->sw_ready) { 553 if (!sched->sw_ready) {
541 err = gk20a_busy(g); 554 err = gk20a_busy(g);
@@ -560,7 +573,7 @@ void gk20a_sched_ctrl_tsg_removed(struct gk20a *g, struct tsg_gk20a *tsg)
560 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g); 573 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
561 struct gk20a_sched_ctrl *sched = &l->sched_ctrl; 574 struct gk20a_sched_ctrl *sched = &l->sched_ctrl;
562 575
563 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid); 576 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid);
564 577
565 nvgpu_mutex_acquire(&sched->status_lock); 578 nvgpu_mutex_acquire(&sched->status_lock);
566 NVGPU_SCHED_CLR(tsg->tsgid, sched->active_tsg_bitmap); 579 NVGPU_SCHED_CLR(tsg->tsgid, sched->active_tsg_bitmap);
@@ -592,7 +605,7 @@ int gk20a_sched_ctrl_init(struct gk20a *g)
592 sched->bitmap_size = roundup(f->num_channels, 64) / 8; 605 sched->bitmap_size = roundup(f->num_channels, 64) / 8;
593 sched->status = 0; 606 sched->status = 0;
594 607
595 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "g=%p sched=%p size=%zu", 608 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "g=%p sched=%p size=%zu",
596 g, sched, sched->bitmap_size); 609 g, sched, sched->bitmap_size);
597 610
598 sched->active_tsg_bitmap = nvgpu_kzalloc(g, sched->bitmap_size); 611 sched->active_tsg_bitmap = nvgpu_kzalloc(g, sched->bitmap_size);
diff --git a/drivers/gpu/nvgpu/common/linux/vgpu/clk_vgpu.c b/drivers/gpu/nvgpu/common/linux/vgpu/clk_vgpu.c
index 0bd8e2bc..0858e6b1 100644
--- a/drivers/gpu/nvgpu/common/linux/vgpu/clk_vgpu.c
+++ b/drivers/gpu/nvgpu/common/linux/vgpu/clk_vgpu.c
@@ -33,7 +33,7 @@ static unsigned long vgpu_clk_get_rate(struct gk20a *g, u32 api_domain)
33 int err; 33 int err;
34 unsigned long ret = 0; 34 unsigned long ret = 0;
35 35
36 gk20a_dbg_fn(""); 36 nvgpu_log_fn(g, " ");
37 37
38 switch (api_domain) { 38 switch (api_domain) {
39 case CTRL_CLK_DOMAIN_GPCCLK: 39 case CTRL_CLK_DOMAIN_GPCCLK:
@@ -65,7 +65,7 @@ static int vgpu_clk_set_rate(struct gk20a *g,
65 struct tegra_vgpu_gpu_clk_rate_params *p = &msg.params.gpu_clk_rate; 65 struct tegra_vgpu_gpu_clk_rate_params *p = &msg.params.gpu_clk_rate;
66 int err = -EINVAL; 66 int err = -EINVAL;
67 67
68 gk20a_dbg_fn(""); 68 nvgpu_log_fn(g, " ");
69 69
70 switch (api_domain) { 70 switch (api_domain) {
71 case CTRL_CLK_DOMAIN_GPCCLK: 71 case CTRL_CLK_DOMAIN_GPCCLK:
@@ -121,7 +121,7 @@ int vgpu_clk_get_freqs(struct device *dev,
121 unsigned int i; 121 unsigned int i;
122 int err; 122 int err;
123 123
124 gk20a_dbg_fn(""); 124 nvgpu_log_fn(g, " ");
125 125
126 msg.cmd = TEGRA_VGPU_CMD_GET_GPU_FREQ_TABLE; 126 msg.cmd = TEGRA_VGPU_CMD_GET_GPU_FREQ_TABLE;
127 msg.handle = vgpu_get_handle(g); 127 msg.handle = vgpu_get_handle(g);
@@ -152,7 +152,7 @@ int vgpu_clk_cap_rate(struct device *dev, unsigned long rate)
152 struct tegra_vgpu_gpu_clk_rate_params *p = &msg.params.gpu_clk_rate; 152 struct tegra_vgpu_gpu_clk_rate_params *p = &msg.params.gpu_clk_rate;
153 int err = 0; 153 int err = 0;
154 154
155 gk20a_dbg_fn(""); 155 nvgpu_log_fn(g, " ");
156 156
157 msg.cmd = TEGRA_VGPU_CMD_CAP_GPU_CLK_RATE; 157 msg.cmd = TEGRA_VGPU_CMD_CAP_GPU_CLK_RATE;
158 msg.handle = vgpu_get_handle(g); 158 msg.handle = vgpu_get_handle(g);
diff --git a/drivers/gpu/nvgpu/common/linux/vgpu/css_vgpu.c b/drivers/gpu/nvgpu/common/linux/vgpu/css_vgpu.c
index fe9dc670..ba2bf58b 100644
--- a/drivers/gpu/nvgpu/common/linux/vgpu/css_vgpu.c
+++ b/drivers/gpu/nvgpu/common/linux/vgpu/css_vgpu.c
@@ -86,7 +86,7 @@ static int vgpu_css_init_snapshot_buffer(struct gr_gk20a *gr)
86 int err; 86 int err;
87 u64 size; 87 u64 size;
88 88
89 gk20a_dbg_fn(""); 89 nvgpu_log_fn(g, " ");
90 90
91 if (data->hw_snapshot) 91 if (data->hw_snapshot)
92 return 0; 92 return 0;
@@ -125,6 +125,7 @@ fail:
125void vgpu_css_release_snapshot_buffer(struct gr_gk20a *gr) 125void vgpu_css_release_snapshot_buffer(struct gr_gk20a *gr)
126{ 126{
127 struct gk20a_cs_snapshot *data = gr->cs_data; 127 struct gk20a_cs_snapshot *data = gr->cs_data;
128 struct gk20a *g = gr->g;
128 129
129 if (!data->hw_snapshot) 130 if (!data->hw_snapshot)
130 return; 131 return;
@@ -135,7 +136,7 @@ void vgpu_css_release_snapshot_buffer(struct gr_gk20a *gr)
135 vgpu_ivm_mempool_unreserve(css_cookie); 136 vgpu_ivm_mempool_unreserve(css_cookie);
136 css_cookie = NULL; 137 css_cookie = NULL;
137 138
138 gk20a_dbg_info("cyclestats(vgpu): buffer for snapshots released\n"); 139 nvgpu_log_info(g, "cyclestats(vgpu): buffer for snapshots released\n");
139} 140}
140 141
141int vgpu_css_flush_snapshots(struct channel_gk20a *ch, 142int vgpu_css_flush_snapshots(struct channel_gk20a *ch,
@@ -148,7 +149,7 @@ int vgpu_css_flush_snapshots(struct channel_gk20a *ch,
148 struct gk20a_cs_snapshot *data = gr->cs_data; 149 struct gk20a_cs_snapshot *data = gr->cs_data;
149 int err; 150 int err;
150 151
151 gk20a_dbg_fn(""); 152 nvgpu_log_fn(g, " ");
152 153
153 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_CYCLESTATS_SNAPSHOT; 154 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_CYCLESTATS_SNAPSHOT;
154 msg.handle = vgpu_get_handle(g); 155 msg.handle = vgpu_get_handle(g);
@@ -176,7 +177,7 @@ static int vgpu_css_attach(struct channel_gk20a *ch,
176 &msg.params.cyclestats_snapshot; 177 &msg.params.cyclestats_snapshot;
177 int err; 178 int err;
178 179
179 gk20a_dbg_fn(""); 180 nvgpu_log_fn(g, " ");
180 181
181 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_CYCLESTATS_SNAPSHOT; 182 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_CYCLESTATS_SNAPSHOT;
182 msg.handle = vgpu_get_handle(g); 183 msg.handle = vgpu_get_handle(g);
@@ -203,7 +204,7 @@ int vgpu_css_detach(struct channel_gk20a *ch,
203 &msg.params.cyclestats_snapshot; 204 &msg.params.cyclestats_snapshot;
204 int err; 205 int err;
205 206
206 gk20a_dbg_fn(""); 207 nvgpu_log_fn(g, " ");
207 208
208 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_CYCLESTATS_SNAPSHOT; 209 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_CYCLESTATS_SNAPSHOT;
209 msg.handle = vgpu_get_handle(g); 210 msg.handle = vgpu_get_handle(g);
diff --git a/drivers/gpu/nvgpu/common/linux/vgpu/fecs_trace_vgpu.c b/drivers/gpu/nvgpu/common/linux/vgpu/fecs_trace_vgpu.c
index 31d89853..499a8eb4 100644
--- a/drivers/gpu/nvgpu/common/linux/vgpu/fecs_trace_vgpu.c
+++ b/drivers/gpu/nvgpu/common/linux/vgpu/fecs_trace_vgpu.c
@@ -46,7 +46,7 @@ int vgpu_fecs_trace_init(struct gk20a *g)
46 u32 mempool; 46 u32 mempool;
47 int err; 47 int err;
48 48
49 gk20a_dbg_fn(""); 49 nvgpu_log_fn(g, " ");
50 50
51 vcst = nvgpu_kzalloc(g, sizeof(*vcst)); 51 vcst = nvgpu_kzalloc(g, sizeof(*vcst));
52 if (!vcst) 52 if (!vcst)
diff --git a/drivers/gpu/nvgpu/common/linux/vgpu/vgpu_linux.c b/drivers/gpu/nvgpu/common/linux/vgpu/vgpu_linux.c
index c3d95b4a..5d3598b5 100644
--- a/drivers/gpu/nvgpu/common/linux/vgpu/vgpu_linux.c
+++ b/drivers/gpu/nvgpu/common/linux/vgpu/vgpu_linux.c
@@ -142,7 +142,7 @@ int vgpu_pm_prepare_poweroff(struct device *dev)
142 struct gk20a *g = get_gk20a(dev); 142 struct gk20a *g = get_gk20a(dev);
143 int ret = 0; 143 int ret = 0;
144 144
145 gk20a_dbg_fn(""); 145 nvgpu_log_fn(g, " ");
146 146
147 if (!g->power_on) 147 if (!g->power_on)
148 return 0; 148 return 0;
@@ -162,7 +162,7 @@ int vgpu_pm_finalize_poweron(struct device *dev)
162 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g); 162 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
163 int err; 163 int err;
164 164
165 gk20a_dbg_fn(""); 165 nvgpu_log_fn(g, " ");
166 166
167 if (g->power_on) 167 if (g->power_on)
168 return 0; 168 return 0;
@@ -227,7 +227,7 @@ static int vgpu_qos_notify(struct notifier_block *nb,
227 u32 max_freq; 227 u32 max_freq;
228 int err; 228 int err;
229 229
230 gk20a_dbg_fn(""); 230 nvgpu_log_fn(g, " ");
231 231
232 max_freq = (u32)pm_qos_read_max_bound(PM_QOS_GPU_FREQ_BOUNDS); 232 max_freq = (u32)pm_qos_read_max_bound(PM_QOS_GPU_FREQ_BOUNDS);
233 err = vgpu_clk_cap_rate(profile->dev, max_freq); 233 err = vgpu_clk_cap_rate(profile->dev, max_freq);
@@ -277,7 +277,7 @@ static int vgpu_pm_init(struct device *dev)
277 int num_freqs; 277 int num_freqs;
278 int err = 0; 278 int err = 0;
279 279
280 gk20a_dbg_fn(""); 280 nvgpu_log_fn(g, " ");
281 281
282 if (nvgpu_platform_is_simulation(g)) 282 if (nvgpu_platform_is_simulation(g))
283 return 0; 283 return 0;
@@ -321,14 +321,15 @@ int vgpu_probe(struct platform_device *pdev)
321 return -ENODATA; 321 return -ENODATA;
322 } 322 }
323 323
324 gk20a_dbg_fn("");
325
326 l = kzalloc(sizeof(*l), GFP_KERNEL); 324 l = kzalloc(sizeof(*l), GFP_KERNEL);
327 if (!l) { 325 if (!l) {
328 dev_err(dev, "couldn't allocate gk20a support"); 326 dev_err(dev, "couldn't allocate gk20a support");
329 return -ENOMEM; 327 return -ENOMEM;
330 } 328 }
331 gk20a = &l->g; 329 gk20a = &l->g;
330
331 nvgpu_log_fn(gk20a, " ");
332
332 nvgpu_init_gk20a(gk20a); 333 nvgpu_init_gk20a(gk20a);
333 334
334 nvgpu_kmem_init(gk20a); 335 nvgpu_kmem_init(gk20a);
@@ -428,7 +429,7 @@ int vgpu_probe(struct platform_device *pdev)
428 vgpu_create_sysfs(dev); 429 vgpu_create_sysfs(dev);
429 gk20a_init_gr(gk20a); 430 gk20a_init_gr(gk20a);
430 431
431 gk20a_dbg_info("total ram pages : %lu", totalram_pages); 432 nvgpu_log_info(gk20a, "total ram pages : %lu", totalram_pages);
432 gk20a->gr.max_comptag_mem = totalram_pages 433 gk20a->gr.max_comptag_mem = totalram_pages
433 >> (10 - (PAGE_SHIFT - 10)); 434 >> (10 - (PAGE_SHIFT - 10));
434 435
@@ -442,7 +443,7 @@ int vgpu_remove(struct platform_device *pdev)
442 struct device *dev = &pdev->dev; 443 struct device *dev = &pdev->dev;
443 struct gk20a *g = get_gk20a(dev); 444 struct gk20a *g = get_gk20a(dev);
444 445
445 gk20a_dbg_fn(""); 446 nvgpu_log_fn(g, " ");
446 447
447 vgpu_pm_qos_remove(dev); 448 vgpu_pm_qos_remove(dev);
448 if (g->remove_support) 449 if (g->remove_support)
diff --git a/drivers/gpu/nvgpu/common/linux/vm.c b/drivers/gpu/nvgpu/common/linux/vm.c
index 75572b93..baa77515 100644
--- a/drivers/gpu/nvgpu/common/linux/vm.c
+++ b/drivers/gpu/nvgpu/common/linux/vm.c
@@ -88,8 +88,9 @@ int nvgpu_vm_find_buf(struct vm_gk20a *vm, u64 gpu_va,
88 u64 *offset) 88 u64 *offset)
89{ 89{
90 struct nvgpu_mapped_buf *mapped_buffer; 90 struct nvgpu_mapped_buf *mapped_buffer;
91 struct gk20a *g = gk20a_from_vm(vm);
91 92
92 gk20a_dbg_fn("gpu_va=0x%llx", gpu_va); 93 nvgpu_log_fn(g, "gpu_va=0x%llx", gpu_va);
93 94
94 nvgpu_mutex_acquire(&vm->update_gmmu_lock); 95 nvgpu_mutex_acquire(&vm->update_gmmu_lock);
95 96
diff --git a/drivers/gpu/nvgpu/common/mm/vidmem.c b/drivers/gpu/nvgpu/common/mm/vidmem.c
index 0fb423b6..37435f97 100644
--- a/drivers/gpu/nvgpu/common/mm/vidmem.c
+++ b/drivers/gpu/nvgpu/common/mm/vidmem.c
@@ -394,7 +394,7 @@ int nvgpu_vidmem_get_space(struct gk20a *g, u64 *space)
394{ 394{
395 struct nvgpu_allocator *allocator = &g->mm.vidmem.allocator; 395 struct nvgpu_allocator *allocator = &g->mm.vidmem.allocator;
396 396
397 gk20a_dbg_fn(""); 397 nvgpu_log_fn(g, " ");
398 398
399 if (!nvgpu_alloc_initialized(allocator)) 399 if (!nvgpu_alloc_initialized(allocator))
400 return -ENOSYS; 400 return -ENOSYS;
diff --git a/drivers/gpu/nvgpu/common/vbios/bios.c b/drivers/gpu/nvgpu/common/vbios/bios.c
index 52c0a798..01f9262c 100644
--- a/drivers/gpu/nvgpu/common/vbios/bios.c
+++ b/drivers/gpu/nvgpu/common/vbios/bios.c
@@ -296,7 +296,7 @@ int nvgpu_bios_parse_rom(struct gk20a *g)
296 struct pci_ext_data_struct *pci_ext_data; 296 struct pci_ext_data_struct *pci_ext_data;
297 297
298 pci_rom = (struct pci_exp_rom *)&g->bios.data[offset]; 298 pci_rom = (struct pci_exp_rom *)&g->bios.data[offset];
299 gk20a_dbg_fn("pci rom sig %04x ptr %04x block %x", 299 nvgpu_log_fn(g, "pci rom sig %04x ptr %04x block %x",
300 pci_rom->sig, pci_rom->pci_data_struct_ptr, 300 pci_rom->sig, pci_rom->pci_data_struct_ptr,
301 pci_rom->size_of_block); 301 pci_rom->size_of_block);
302 302
@@ -309,7 +309,7 @@ int nvgpu_bios_parse_rom(struct gk20a *g)
309 pci_data = 309 pci_data =
310 (struct pci_data_struct *) 310 (struct pci_data_struct *)
311 &g->bios.data[offset + pci_rom->pci_data_struct_ptr]; 311 &g->bios.data[offset + pci_rom->pci_data_struct_ptr];
312 gk20a_dbg_fn("pci data sig %08x len %d image len %x type %x last %d max %08x", 312 nvgpu_log_fn(g, "pci data sig %08x len %d image len %x type %x last %d max %08x",
313 pci_data->sig, pci_data->pci_data_struct_len, 313 pci_data->sig, pci_data->pci_data_struct_len,
314 pci_data->image_len, pci_data->code_type, 314 pci_data->image_len, pci_data->code_type,
315 pci_data->last_image, 315 pci_data->last_image,
@@ -322,7 +322,7 @@ int nvgpu_bios_parse_rom(struct gk20a *g)
322 pci_data->pci_data_struct_len + 322 pci_data->pci_data_struct_len +
323 0xf) 323 0xf)
324 & ~0xf]; 324 & ~0xf];
325 gk20a_dbg_fn("pci ext data sig %08x rev %x len %x sub_image_len %x priv_last %d flags %x", 325 nvgpu_log_fn(g, "pci ext data sig %08x rev %x len %x sub_image_len %x priv_last %d flags %x",
326 pci_ext_data->sig, 326 pci_ext_data->sig,
327 pci_ext_data->nv_pci_data_ext_rev, 327 pci_ext_data->nv_pci_data_ext_rev,
328 pci_ext_data->nv_pci_data_ext_len, 328 pci_ext_data->nv_pci_data_ext_len,
@@ -330,7 +330,7 @@ int nvgpu_bios_parse_rom(struct gk20a *g)
330 pci_ext_data->priv_last_image, 330 pci_ext_data->priv_last_image,
331 pci_ext_data->flags); 331 pci_ext_data->flags);
332 332
333 gk20a_dbg_fn("expansion rom offset %x", 333 nvgpu_log_fn(g, "expansion rom offset %x",
334 pci_data->image_len * 512); 334 pci_data->image_len * 512);
335 g->bios.expansion_rom_offset = 335 g->bios.expansion_rom_offset =
336 pci_data->image_len * 512; 336 pci_data->image_len * 512;
@@ -342,7 +342,7 @@ int nvgpu_bios_parse_rom(struct gk20a *g)
342 } 342 }
343 } 343 }
344 344
345 gk20a_dbg_info("read bios"); 345 nvgpu_log_info(g, "read bios");
346 for (i = 0; i < g->bios.size - 6; i++) { 346 for (i = 0; i < g->bios.size - 6; i++) {
347 if (nvgpu_bios_rdu16(g, i) == BIT_HEADER_ID && 347 if (nvgpu_bios_rdu16(g, i) == BIT_HEADER_ID &&
348 nvgpu_bios_rdu32(g, i+2) == BIT_HEADER_SIGNATURE) { 348 nvgpu_bios_rdu32(g, i+2) == BIT_HEADER_SIGNATURE) {
@@ -362,7 +362,7 @@ static void nvgpu_bios_parse_biosdata(struct gk20a *g, int offset)
362 struct biosdata biosdata; 362 struct biosdata biosdata;
363 363
364 memcpy(&biosdata, &g->bios.data[offset], sizeof(biosdata)); 364 memcpy(&biosdata, &g->bios.data[offset], sizeof(biosdata));
365 gk20a_dbg_fn("bios version %x, oem version %x", 365 nvgpu_log_fn(g, "bios version %x, oem version %x",
366 biosdata.version, 366 biosdata.version,
367 biosdata.oem_version); 367 biosdata.oem_version);
368 368
@@ -375,9 +375,9 @@ static void nvgpu_bios_parse_nvinit_ptrs(struct gk20a *g, int offset)
375 struct nvinit_ptrs nvinit_ptrs; 375 struct nvinit_ptrs nvinit_ptrs;
376 376
377 memcpy(&nvinit_ptrs, &g->bios.data[offset], sizeof(nvinit_ptrs)); 377 memcpy(&nvinit_ptrs, &g->bios.data[offset], sizeof(nvinit_ptrs));
378 gk20a_dbg_fn("devinit ptr %x size %d", nvinit_ptrs.devinit_tables_ptr, 378 nvgpu_log_fn(g, "devinit ptr %x size %d", nvinit_ptrs.devinit_tables_ptr,
379 nvinit_ptrs.devinit_tables_size); 379 nvinit_ptrs.devinit_tables_size);
380 gk20a_dbg_fn("bootscripts ptr %x size %d", nvinit_ptrs.bootscripts_ptr, 380 nvgpu_log_fn(g, "bootscripts ptr %x size %d", nvinit_ptrs.bootscripts_ptr,
381 nvinit_ptrs.bootscripts_size); 381 nvinit_ptrs.bootscripts_size);
382 382
383 g->bios.devinit_tables = &g->bios.data[nvinit_ptrs.devinit_tables_ptr]; 383 g->bios.devinit_tables = &g->bios.data[nvinit_ptrs.devinit_tables_ptr];
@@ -449,7 +449,7 @@ static void nvgpu_bios_parse_devinit_appinfo(struct gk20a *g, int dmem_offset)
449 struct devinit_engine_interface interface; 449 struct devinit_engine_interface interface;
450 450
451 memcpy(&interface, &g->bios.devinit.dmem[dmem_offset], sizeof(interface)); 451 memcpy(&interface, &g->bios.devinit.dmem[dmem_offset], sizeof(interface));
452 gk20a_dbg_fn("devinit version %x tables phys %x script phys %x size %d", 452 nvgpu_log_fn(g, "devinit version %x tables phys %x script phys %x size %d",
453 interface.version, 453 interface.version,
454 interface.tables_phys_base, 454 interface.tables_phys_base,
455 interface.script_phys_base, 455 interface.script_phys_base,
@@ -468,7 +468,7 @@ static int nvgpu_bios_parse_appinfo_table(struct gk20a *g, int offset)
468 468
469 memcpy(&hdr, &g->bios.data[offset], sizeof(hdr)); 469 memcpy(&hdr, &g->bios.data[offset], sizeof(hdr));
470 470
471 gk20a_dbg_fn("appInfoHdr ver %d size %d entrySize %d entryCount %d", 471 nvgpu_log_fn(g, "appInfoHdr ver %d size %d entrySize %d entryCount %d",
472 hdr.version, hdr.header_size, 472 hdr.version, hdr.header_size,
473 hdr.entry_size, hdr.entry_count); 473 hdr.entry_size, hdr.entry_count);
474 474
@@ -481,7 +481,7 @@ static int nvgpu_bios_parse_appinfo_table(struct gk20a *g, int offset)
481 481
482 memcpy(&entry, &g->bios.data[offset], sizeof(entry)); 482 memcpy(&entry, &g->bios.data[offset], sizeof(entry));
483 483
484 gk20a_dbg_fn("appInfo id %d dmem_offset %d", 484 nvgpu_log_fn(g, "appInfo id %d dmem_offset %d",
485 entry.id, entry.dmem_offset); 485 entry.id, entry.dmem_offset);
486 486
487 if (entry.id == APPINFO_ID_DEVINIT) 487 if (entry.id == APPINFO_ID_DEVINIT)
@@ -530,26 +530,26 @@ static int nvgpu_bios_parse_falcon_ucode_desc(struct gk20a *g,
530 memcpy(&desc, &udesc, sizeof(udesc.v2)); 530 memcpy(&desc, &udesc, sizeof(udesc.v2));
531 break; 531 break;
532 default: 532 default:
533 gk20a_dbg_info("invalid version"); 533 nvgpu_log_info(g, "invalid version");
534 return -EINVAL; 534 return -EINVAL;
535 } 535 }
536 536
537 gk20a_dbg_info("falcon ucode desc version %x len %x", version, desc_size); 537 nvgpu_log_info(g, "falcon ucode desc version %x len %x", version, desc_size);
538 538
539 gk20a_dbg_info("falcon ucode desc stored size %x uncompressed size %x", 539 nvgpu_log_info(g, "falcon ucode desc stored size %x uncompressed size %x",
540 desc.stored_size, desc.uncompressed_size); 540 desc.stored_size, desc.uncompressed_size);
541 gk20a_dbg_info("falcon ucode desc virtualEntry %x, interfaceOffset %x", 541 nvgpu_log_info(g, "falcon ucode desc virtualEntry %x, interfaceOffset %x",
542 desc.virtual_entry, desc.interface_offset); 542 desc.virtual_entry, desc.interface_offset);
543 gk20a_dbg_info("falcon ucode IMEM phys base %x, load size %x virt base %x sec base %x sec size %x", 543 nvgpu_log_info(g, "falcon ucode IMEM phys base %x, load size %x virt base %x sec base %x sec size %x",
544 desc.imem_phys_base, desc.imem_load_size, 544 desc.imem_phys_base, desc.imem_load_size,
545 desc.imem_virt_base, desc.imem_sec_base, 545 desc.imem_virt_base, desc.imem_sec_base,
546 desc.imem_sec_size); 546 desc.imem_sec_size);
547 gk20a_dbg_info("falcon ucode DMEM offset %x phys base %x, load size %x", 547 nvgpu_log_info(g, "falcon ucode DMEM offset %x phys base %x, load size %x",
548 desc.dmem_offset, desc.dmem_phys_base, 548 desc.dmem_offset, desc.dmem_phys_base,
549 desc.dmem_load_size); 549 desc.dmem_load_size);
550 550
551 if (desc.stored_size != desc.uncompressed_size) { 551 if (desc.stored_size != desc.uncompressed_size) {
552 gk20a_dbg_info("does not match"); 552 nvgpu_log_info(g, "does not match");
553 return -EINVAL; 553 return -EINVAL;
554 } 554 }
555 555
@@ -575,7 +575,7 @@ static int nvgpu_bios_parse_falcon_ucode_table(struct gk20a *g, int offset)
575 int i; 575 int i;
576 576
577 memcpy(&hdr, &g->bios.data[offset], sizeof(hdr)); 577 memcpy(&hdr, &g->bios.data[offset], sizeof(hdr));
578 gk20a_dbg_fn("falcon ucode table ver %d size %d entrySize %d entryCount %d descVer %d descSize %d", 578 nvgpu_log_fn(g, "falcon ucode table ver %d size %d entrySize %d entryCount %d descVer %d descSize %d",
579 hdr.version, hdr.header_size, 579 hdr.version, hdr.header_size,
580 hdr.entry_size, hdr.entry_count, 580 hdr.entry_size, hdr.entry_count,
581 hdr.desc_version, hdr.desc_size); 581 hdr.desc_version, hdr.desc_size);
@@ -590,7 +590,7 @@ static int nvgpu_bios_parse_falcon_ucode_table(struct gk20a *g, int offset)
590 590
591 memcpy(&entry, &g->bios.data[offset], sizeof(entry)); 591 memcpy(&entry, &g->bios.data[offset], sizeof(entry));
592 592
593 gk20a_dbg_fn("falcon ucode table entry appid %x targetId %x descPtr %x", 593 nvgpu_log_fn(g, "falcon ucode table entry appid %x targetId %x descPtr %x",
594 entry.application_id, entry.target_id, 594 entry.application_id, entry.target_id,
595 entry.desc_ptr); 595 entry.desc_ptr);
596 596
@@ -638,7 +638,7 @@ static void nvgpu_bios_parse_falcon_data_v2(struct gk20a *g, int offset)
638 int err; 638 int err;
639 639
640 memcpy(&falcon_data, &g->bios.data[offset], sizeof(falcon_data)); 640 memcpy(&falcon_data, &g->bios.data[offset], sizeof(falcon_data));
641 gk20a_dbg_fn("falcon ucode table ptr %x", 641 nvgpu_log_fn(g, "falcon ucode table ptr %x",
642 falcon_data.falcon_ucode_table_ptr); 642 falcon_data.falcon_ucode_table_ptr);
643 err = nvgpu_bios_parse_falcon_ucode_table(g, 643 err = nvgpu_bios_parse_falcon_ucode_table(g,
644 falcon_data.falcon_ucode_table_ptr); 644 falcon_data.falcon_ucode_table_ptr);
@@ -676,7 +676,7 @@ void *nvgpu_bios_get_perf_table_ptrs(struct gk20a *g,
676 676
677 if (table_id < (ptoken->data_size/data_size)) { 677 if (table_id < (ptoken->data_size/data_size)) {
678 678
679 gk20a_dbg_info("Perf_Tbl_ID-offset 0x%x Tbl_ID_Ptr-offset- 0x%x", 679 nvgpu_log_info(g, "Perf_Tbl_ID-offset 0x%x Tbl_ID_Ptr-offset- 0x%x",
680 (ptoken->data_ptr + 680 (ptoken->data_ptr +
681 (table_id * data_size)), 681 (table_id * data_size)),
682 perf_table_id_offset); 682 perf_table_id_offset);
@@ -705,18 +705,18 @@ static void nvgpu_bios_parse_bit(struct gk20a *g, int offset)
705 struct bit_token bit_token; 705 struct bit_token bit_token;
706 int i; 706 int i;
707 707
708 gk20a_dbg_fn(""); 708 nvgpu_log_fn(g, " ");
709 memcpy(&bit, &g->bios.data[offset], sizeof(bit)); 709 memcpy(&bit, &g->bios.data[offset], sizeof(bit));
710 710
711 gk20a_dbg_info("BIT header: %04x %08x", bit.id, bit.signature); 711 nvgpu_log_info(g, "BIT header: %04x %08x", bit.id, bit.signature);
712 gk20a_dbg_info("tokens: %d entries * %d bytes", 712 nvgpu_log_info(g, "tokens: %d entries * %d bytes",
713 bit.token_entries, bit.token_size); 713 bit.token_entries, bit.token_size);
714 714
715 offset += bit.header_size; 715 offset += bit.header_size;
716 for (i = 0; i < bit.token_entries; i++) { 716 for (i = 0; i < bit.token_entries; i++) {
717 memcpy(&bit_token, &g->bios.data[offset], sizeof(bit_token)); 717 memcpy(&bit_token, &g->bios.data[offset], sizeof(bit_token));
718 718
719 gk20a_dbg_info("BIT token id %d ptr %d size %d ver %d", 719 nvgpu_log_info(g, "BIT token id %d ptr %d size %d ver %d",
720 bit_token.token_id, bit_token.data_ptr, 720 bit_token.token_id, bit_token.data_ptr,
721 bit_token.data_size, bit_token.data_version); 721 bit_token.data_size, bit_token.data_version);
722 722
@@ -753,7 +753,7 @@ static void nvgpu_bios_parse_bit(struct gk20a *g, int offset)
753 753
754 offset += bit.token_size; 754 offset += bit.token_size;
755 } 755 }
756 gk20a_dbg_fn("done"); 756 nvgpu_log_fn(g, "done");
757} 757}
758 758
759static u32 __nvgpu_bios_readbyte(struct gk20a *g, u32 offset) 759static u32 __nvgpu_bios_readbyte(struct gk20a *g, u32 offset)
diff --git a/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c b/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c
index 0280bbbb..086d4e7b 100644
--- a/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c
@@ -50,21 +50,21 @@
50 50
51static u32 ce2_nonblockpipe_isr(struct gk20a *g, u32 fifo_intr) 51static u32 ce2_nonblockpipe_isr(struct gk20a *g, u32 fifo_intr)
52{ 52{
53 gk20a_dbg(gpu_dbg_intr, "ce2 non-blocking pipe interrupt\n"); 53 nvgpu_log(g, gpu_dbg_intr, "ce2 non-blocking pipe interrupt\n");
54 54
55 return ce2_intr_status_nonblockpipe_pending_f(); 55 return ce2_intr_status_nonblockpipe_pending_f();
56} 56}
57 57
58static u32 ce2_blockpipe_isr(struct gk20a *g, u32 fifo_intr) 58static u32 ce2_blockpipe_isr(struct gk20a *g, u32 fifo_intr)
59{ 59{
60 gk20a_dbg(gpu_dbg_intr, "ce2 blocking pipe interrupt\n"); 60 nvgpu_log(g, gpu_dbg_intr, "ce2 blocking pipe interrupt\n");
61 61
62 return ce2_intr_status_blockpipe_pending_f(); 62 return ce2_intr_status_blockpipe_pending_f();
63} 63}
64 64
65static u32 ce2_launcherr_isr(struct gk20a *g, u32 fifo_intr) 65static u32 ce2_launcherr_isr(struct gk20a *g, u32 fifo_intr)
66{ 66{
67 gk20a_dbg(gpu_dbg_intr, "ce2 launch error interrupt\n"); 67 nvgpu_log(g, gpu_dbg_intr, "ce2 launch error interrupt\n");
68 68
69 return ce2_intr_status_launcherr_pending_f(); 69 return ce2_intr_status_launcherr_pending_f();
70} 70}
@@ -74,7 +74,7 @@ void gk20a_ce2_isr(struct gk20a *g, u32 inst_id, u32 pri_base)
74 u32 ce2_intr = gk20a_readl(g, ce2_intr_status_r()); 74 u32 ce2_intr = gk20a_readl(g, ce2_intr_status_r());
75 u32 clear_intr = 0; 75 u32 clear_intr = 0;
76 76
77 gk20a_dbg(gpu_dbg_intr, "ce2 isr %08x\n", ce2_intr); 77 nvgpu_log(g, gpu_dbg_intr, "ce2 isr %08x\n", ce2_intr);
78 78
79 /* clear blocking interrupts: they exibit broken behavior */ 79 /* clear blocking interrupts: they exibit broken behavior */
80 if (ce2_intr & ce2_intr_status_blockpipe_pending_f()) 80 if (ce2_intr & ce2_intr_status_blockpipe_pending_f())
@@ -92,7 +92,7 @@ int gk20a_ce2_nonstall_isr(struct gk20a *g, u32 inst_id, u32 pri_base)
92 int ops = 0; 92 int ops = 0;
93 u32 ce2_intr = gk20a_readl(g, ce2_intr_status_r()); 93 u32 ce2_intr = gk20a_readl(g, ce2_intr_status_r());
94 94
95 gk20a_dbg(gpu_dbg_intr, "ce2 nonstall isr %08x\n", ce2_intr); 95 nvgpu_log(g, gpu_dbg_intr, "ce2 nonstall isr %08x\n", ce2_intr);
96 96
97 if (ce2_intr & ce2_intr_status_nonblockpipe_pending_f()) { 97 if (ce2_intr & ce2_intr_status_nonblockpipe_pending_f()) {
98 gk20a_writel(g, ce2_intr_status_r(), 98 gk20a_writel(g, ce2_intr_status_r(),
@@ -340,7 +340,7 @@ int gk20a_init_ce_support(struct gk20a *g)
340 return 0; 340 return 0;
341 } 341 }
342 342
343 gk20a_dbg(gpu_dbg_fn, "ce: init"); 343 nvgpu_log(g, gpu_dbg_fn, "ce: init");
344 344
345 err = nvgpu_mutex_init(&ce_app->app_mutex); 345 err = nvgpu_mutex_init(&ce_app->app_mutex);
346 if (err) 346 if (err)
@@ -355,7 +355,7 @@ int gk20a_init_ce_support(struct gk20a *g)
355 ce_app->app_state = NVGPU_CE_ACTIVE; 355 ce_app->app_state = NVGPU_CE_ACTIVE;
356 356
357 nvgpu_mutex_release(&ce_app->app_mutex); 357 nvgpu_mutex_release(&ce_app->app_mutex);
358 gk20a_dbg(gpu_dbg_cde_ctx, "ce: init finished"); 358 nvgpu_log(g, gpu_dbg_cde_ctx, "ce: init finished");
359 359
360 return 0; 360 return 0;
361} 361}
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
index e65ed278..21abdf9a 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
@@ -116,7 +116,7 @@ int channel_gk20a_commit_va(struct channel_gk20a *c)
116{ 116{
117 struct gk20a *g = c->g; 117 struct gk20a *g = c->g;
118 118
119 gk20a_dbg_fn(""); 119 nvgpu_log_fn(g, " ");
120 120
121 g->ops.mm.init_inst_block(&c->inst_block, c->vm, 121 g->ops.mm.init_inst_block(&c->inst_block, c->vm,
122 c->vm->gmmu_page_sizes[gmmu_page_size_big]); 122 c->vm->gmmu_page_sizes[gmmu_page_size_big]);
@@ -208,7 +208,7 @@ void gk20a_channel_abort_clean_up(struct channel_gk20a *ch)
208 208
209void gk20a_channel_abort(struct channel_gk20a *ch, bool channel_preempt) 209void gk20a_channel_abort(struct channel_gk20a *ch, bool channel_preempt)
210{ 210{
211 gk20a_dbg_fn(""); 211 nvgpu_log_fn(ch->g, " ");
212 212
213 if (gk20a_is_channel_marked_as_tsg(ch)) 213 if (gk20a_is_channel_marked_as_tsg(ch))
214 return gk20a_fifo_abort_tsg(ch->g, ch->tsgid, channel_preempt); 214 return gk20a_fifo_abort_tsg(ch->g, ch->tsgid, channel_preempt);
@@ -291,7 +291,7 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
291 struct dbg_session_channel_data *ch_data, *tmp; 291 struct dbg_session_channel_data *ch_data, *tmp;
292 int err; 292 int err;
293 293
294 gk20a_dbg_fn(""); 294 nvgpu_log_fn(g, " ");
295 295
296 WARN_ON(ch->g == NULL); 296 WARN_ON(ch->g == NULL);
297 297
@@ -351,7 +351,7 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
351 /* if engine reset was deferred, perform it now */ 351 /* if engine reset was deferred, perform it now */
352 nvgpu_mutex_acquire(&f->deferred_reset_mutex); 352 nvgpu_mutex_acquire(&f->deferred_reset_mutex);
353 if (g->fifo.deferred_reset_pending) { 353 if (g->fifo.deferred_reset_pending) {
354 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "engine reset was" 354 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "engine reset was"
355 " deferred, running now"); 355 " deferred, running now");
356 /* if lock is already taken, a reset is taking place 356 /* if lock is already taken, a reset is taking place
357 so no need to repeat */ 357 so no need to repeat */
@@ -365,7 +365,7 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
365 if (!gk20a_channel_as_bound(ch)) 365 if (!gk20a_channel_as_bound(ch))
366 goto unbind; 366 goto unbind;
367 367
368 gk20a_dbg_info("freeing bound channel context, timeout=%ld", 368 nvgpu_log_info(g, "freeing bound channel context, timeout=%ld",
369 timeout); 369 timeout);
370 370
371#ifdef CONFIG_GK20A_CTXSW_TRACE 371#ifdef CONFIG_GK20A_CTXSW_TRACE
@@ -626,7 +626,7 @@ struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g,
626 runlist_id = gk20a_fifo_get_gr_runlist_id(g); 626 runlist_id = gk20a_fifo_get_gr_runlist_id(g);
627 } 627 }
628 628
629 gk20a_dbg_fn(""); 629 nvgpu_log_fn(g, " ");
630 630
631 ch = allocate_channel(f); 631 ch = allocate_channel(f);
632 if (ch == NULL) { 632 if (ch == NULL) {
@@ -765,7 +765,7 @@ int gk20a_channel_alloc_priv_cmdbuf(struct channel_gk20a *c, u32 orig_size,
765 u32 free_count; 765 u32 free_count;
766 u32 size = orig_size; 766 u32 size = orig_size;
767 767
768 gk20a_dbg_fn("size %d", orig_size); 768 nvgpu_log_fn(c->g, "size %d", orig_size);
769 769
770 if (!e) { 770 if (!e) {
771 nvgpu_err(c->g, 771 nvgpu_err(c->g,
@@ -779,7 +779,7 @@ int gk20a_channel_alloc_priv_cmdbuf(struct channel_gk20a *c, u32 orig_size,
779 if (q->put + size > q->size) 779 if (q->put + size > q->size)
780 size = orig_size + (q->size - q->put); 780 size = orig_size + (q->size - q->put);
781 781
782 gk20a_dbg_info("ch %d: priv cmd queue get:put %d:%d", 782 nvgpu_log_info(c->g, "ch %d: priv cmd queue get:put %d:%d",
783 c->chid, q->get, q->put); 783 c->chid, q->get, q->put);
784 784
785 free_count = (q->size - (q->put - q->get) - 1) % q->size; 785 free_count = (q->size - (q->put - q->get) - 1) % q->size;
@@ -812,7 +812,7 @@ int gk20a_channel_alloc_priv_cmdbuf(struct channel_gk20a *c, u32 orig_size,
812 nvgpu_smp_wmb(); 812 nvgpu_smp_wmb();
813 813
814 e->valid = true; 814 e->valid = true;
815 gk20a_dbg_fn("done"); 815 nvgpu_log_fn(c->g, "done");
816 816
817 return 0; 817 return 0;
818} 818}
@@ -1132,7 +1132,7 @@ int gk20a_channel_alloc_gpfifo(struct channel_gk20a *c,
1132 c->gpfifo.entry_num = gpfifo_size; 1132 c->gpfifo.entry_num = gpfifo_size;
1133 c->gpfifo.get = c->gpfifo.put = 0; 1133 c->gpfifo.get = c->gpfifo.put = 0;
1134 1134
1135 gk20a_dbg_info("channel %d : gpfifo_base 0x%016llx, size %d", 1135 nvgpu_log_info(g, "channel %d : gpfifo_base 0x%016llx, size %d",
1136 c->chid, c->gpfifo.mem.gpu_va, c->gpfifo.entry_num); 1136 c->chid, c->gpfifo.mem.gpu_va, c->gpfifo.entry_num);
1137 1137
1138 g->ops.fifo.setup_userd(c); 1138 g->ops.fifo.setup_userd(c);
@@ -1184,7 +1184,7 @@ int gk20a_channel_alloc_gpfifo(struct channel_gk20a *c,
1184 1184
1185 g->ops.fifo.bind_channel(c); 1185 g->ops.fifo.bind_channel(c);
1186 1186
1187 gk20a_dbg_fn("done"); 1187 nvgpu_log_fn(g, "done");
1188 return 0; 1188 return 0;
1189 1189
1190clean_up_priv_cmd: 1190clean_up_priv_cmd:
@@ -1400,7 +1400,7 @@ static void gk20a_channel_timeout_handler(struct channel_gk20a *ch)
1400 u64 pb_get; 1400 u64 pb_get;
1401 u64 new_pb_get; 1401 u64 new_pb_get;
1402 1402
1403 gk20a_dbg_fn(""); 1403 nvgpu_log_fn(g, " ");
1404 1404
1405 /* Get status and clear the timer */ 1405 /* Get status and clear the timer */
1406 nvgpu_raw_spinlock_acquire(&ch->timeout.lock); 1406 nvgpu_raw_spinlock_acquire(&ch->timeout.lock);
@@ -1480,7 +1480,7 @@ static void gk20a_channel_poll_timeouts(struct gk20a *g)
1480 */ 1480 */
1481static void gk20a_channel_worker_process_ch(struct channel_gk20a *ch) 1481static void gk20a_channel_worker_process_ch(struct channel_gk20a *ch)
1482{ 1482{
1483 gk20a_dbg_fn(""); 1483 nvgpu_log_fn(ch->g, " ");
1484 1484
1485 gk20a_channel_clean_up_jobs(ch, true); 1485 gk20a_channel_clean_up_jobs(ch, true);
1486 1486
@@ -1499,7 +1499,7 @@ static int __gk20a_channel_worker_wakeup(struct gk20a *g)
1499{ 1499{
1500 int put; 1500 int put;
1501 1501
1502 gk20a_dbg_fn(""); 1502 nvgpu_log_fn(g, " ");
1503 1503
1504 /* 1504 /*
1505 * Currently, the only work type is associated with a lock, which deals 1505 * Currently, the only work type is associated with a lock, which deals
@@ -1596,7 +1596,7 @@ static int gk20a_channel_poll_worker(void *arg)
1596 struct nvgpu_timeout timeout; 1596 struct nvgpu_timeout timeout;
1597 int get = 0; 1597 int get = 0;
1598 1598
1599 gk20a_dbg_fn(""); 1599 nvgpu_log_fn(g, " ");
1600 1600
1601 nvgpu_timeout_init(g, &timeout, watchdog_interval, 1601 nvgpu_timeout_init(g, &timeout, watchdog_interval,
1602 NVGPU_TIMER_CPU_TIMER); 1602 NVGPU_TIMER_CPU_TIMER);
@@ -1699,7 +1699,7 @@ static void gk20a_channel_worker_enqueue(struct channel_gk20a *ch)
1699{ 1699{
1700 struct gk20a *g = ch->g; 1700 struct gk20a *g = ch->g;
1701 1701
1702 gk20a_dbg_fn(""); 1702 nvgpu_log_fn(g, " ");
1703 1703
1704 /* 1704 /*
1705 * Warn if worker thread cannot run 1705 * Warn if worker thread cannot run
@@ -2142,12 +2142,12 @@ int gk20a_channel_suspend(struct gk20a *g)
2142 bool channels_in_use = false; 2142 bool channels_in_use = false;
2143 u32 active_runlist_ids = 0; 2143 u32 active_runlist_ids = 0;
2144 2144
2145 gk20a_dbg_fn(""); 2145 nvgpu_log_fn(g, " ");
2146 2146
2147 for (chid = 0; chid < f->num_channels; chid++) { 2147 for (chid = 0; chid < f->num_channels; chid++) {
2148 struct channel_gk20a *ch = &f->channel[chid]; 2148 struct channel_gk20a *ch = &f->channel[chid];
2149 if (gk20a_channel_get(ch)) { 2149 if (gk20a_channel_get(ch)) {
2150 gk20a_dbg_info("suspend channel %d", chid); 2150 nvgpu_log_info(g, "suspend channel %d", chid);
2151 /* disable channel */ 2151 /* disable channel */
2152 gk20a_disable_channel_tsg(g, ch); 2152 gk20a_disable_channel_tsg(g, ch);
2153 /* preempt the channel */ 2153 /* preempt the channel */
@@ -2175,7 +2175,7 @@ int gk20a_channel_suspend(struct gk20a *g)
2175 } 2175 }
2176 } 2176 }
2177 2177
2178 gk20a_dbg_fn("done"); 2178 nvgpu_log_fn(g, "done");
2179 return 0; 2179 return 0;
2180} 2180}
2181 2181
@@ -2186,11 +2186,11 @@ int gk20a_channel_resume(struct gk20a *g)
2186 bool channels_in_use = false; 2186 bool channels_in_use = false;
2187 u32 active_runlist_ids = 0; 2187 u32 active_runlist_ids = 0;
2188 2188
2189 gk20a_dbg_fn(""); 2189 nvgpu_log_fn(g, " ");
2190 2190
2191 for (chid = 0; chid < f->num_channels; chid++) { 2191 for (chid = 0; chid < f->num_channels; chid++) {
2192 if (gk20a_channel_get(&f->channel[chid])) { 2192 if (gk20a_channel_get(&f->channel[chid])) {
2193 gk20a_dbg_info("resume channel %d", chid); 2193 nvgpu_log_info(g, "resume channel %d", chid);
2194 g->ops.fifo.bind_channel(&f->channel[chid]); 2194 g->ops.fifo.bind_channel(&f->channel[chid]);
2195 channels_in_use = true; 2195 channels_in_use = true;
2196 active_runlist_ids |= BIT(f->channel[chid].runlist_id); 2196 active_runlist_ids |= BIT(f->channel[chid].runlist_id);
@@ -2201,7 +2201,7 @@ int gk20a_channel_resume(struct gk20a *g)
2201 if (channels_in_use) 2201 if (channels_in_use)
2202 gk20a_fifo_update_runlist_ids(g, active_runlist_ids, ~0, true, true); 2202 gk20a_fifo_update_runlist_ids(g, active_runlist_ids, ~0, true, true);
2203 2203
2204 gk20a_dbg_fn("done"); 2204 nvgpu_log_fn(g, "done");
2205 return 0; 2205 return 0;
2206} 2206}
2207 2207
@@ -2210,7 +2210,7 @@ void gk20a_channel_semaphore_wakeup(struct gk20a *g, bool post_events)
2210 struct fifo_gk20a *f = &g->fifo; 2210 struct fifo_gk20a *f = &g->fifo;
2211 u32 chid; 2211 u32 chid;
2212 2212
2213 gk20a_dbg_fn(""); 2213 nvgpu_log_fn(g, " ");
2214 2214
2215 /* 2215 /*
2216 * Ensure that all pending writes are actually done before trying to 2216 * Ensure that all pending writes are actually done before trying to
diff --git a/drivers/gpu/nvgpu/gk20a/css_gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/css_gr_gk20a.c
index 114386a2..0fc39bf4 100644
--- a/drivers/gpu/nvgpu/gk20a/css_gr_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/css_gr_gk20a.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * GK20A Cycle stats snapshots support (subsystem for gr_gk20a). 2 * GK20A Cycle stats snapshots support (subsystem for gr_gk20a).
3 * 3 *
4 * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"), 7 * copy of this software and associated documentation files (the "Software"),
@@ -189,7 +189,7 @@ int css_hw_enable_snapshot(struct channel_gk20a *ch,
189 perf_pmasys_mem_block_valid_true_f() | 189 perf_pmasys_mem_block_valid_true_f() |
190 perf_pmasys_mem_block_target_lfb_f()); 190 perf_pmasys_mem_block_target_lfb_f());
191 191
192 gk20a_dbg_info("cyclestats: buffer for hardware snapshots enabled\n"); 192 nvgpu_log_info(g, "cyclestats: buffer for hardware snapshots enabled\n");
193 193
194 return 0; 194 return 0;
195 195
@@ -227,7 +227,7 @@ void css_hw_disable_snapshot(struct gr_gk20a *gr)
227 memset(&data->hw_memdesc, 0, sizeof(data->hw_memdesc)); 227 memset(&data->hw_memdesc, 0, sizeof(data->hw_memdesc));
228 data->hw_snapshot = NULL; 228 data->hw_snapshot = NULL;
229 229
230 gk20a_dbg_info("cyclestats: buffer for hardware snapshots disabled\n"); 230 nvgpu_log_info(g, "cyclestats: buffer for hardware snapshots disabled\n");
231} 231}
232 232
233static void css_gr_free_shared_data(struct gr_gk20a *gr) 233static void css_gr_free_shared_data(struct gr_gk20a *gr)
diff --git a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
index ce06e78b..97de7138 100644
--- a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
@@ -90,8 +90,9 @@ void gk20a_dbg_gpu_post_events(struct channel_gk20a *ch)
90{ 90{
91 struct dbg_session_data *session_data; 91 struct dbg_session_data *session_data;
92 struct dbg_session_gk20a *dbg_s; 92 struct dbg_session_gk20a *dbg_s;
93 struct gk20a *g = ch->g;
93 94
94 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); 95 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
95 96
96 /* guard against the session list being modified */ 97 /* guard against the session list being modified */
97 nvgpu_mutex_acquire(&ch->dbg_s_lock); 98 nvgpu_mutex_acquire(&ch->dbg_s_lock);
@@ -100,9 +101,9 @@ void gk20a_dbg_gpu_post_events(struct channel_gk20a *ch)
100 dbg_session_data, dbg_s_entry) { 101 dbg_session_data, dbg_s_entry) {
101 dbg_s = session_data->dbg_s; 102 dbg_s = session_data->dbg_s;
102 if (dbg_s->dbg_events.events_enabled) { 103 if (dbg_s->dbg_events.events_enabled) {
103 gk20a_dbg(gpu_dbg_gpu_dbg, "posting event on session id %d", 104 nvgpu_log(g, gpu_dbg_gpu_dbg, "posting event on session id %d",
104 dbg_s->id); 105 dbg_s->id);
105 gk20a_dbg(gpu_dbg_gpu_dbg, "%d events pending", 106 nvgpu_log(g, gpu_dbg_gpu_dbg, "%d events pending",
106 dbg_s->dbg_events.num_pending_events); 107 dbg_s->dbg_events.num_pending_events);
107 108
108 dbg_s->dbg_events.num_pending_events++; 109 dbg_s->dbg_events.num_pending_events++;
@@ -119,8 +120,9 @@ bool gk20a_dbg_gpu_broadcast_stop_trigger(struct channel_gk20a *ch)
119 struct dbg_session_data *session_data; 120 struct dbg_session_data *session_data;
120 struct dbg_session_gk20a *dbg_s; 121 struct dbg_session_gk20a *dbg_s;
121 bool broadcast = false; 122 bool broadcast = false;
123 struct gk20a *g = ch->g;
122 124
123 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, ""); 125 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, " ");
124 126
125 /* guard against the session list being modified */ 127 /* guard against the session list being modified */
126 nvgpu_mutex_acquire(&ch->dbg_s_lock); 128 nvgpu_mutex_acquire(&ch->dbg_s_lock);
@@ -129,7 +131,7 @@ bool gk20a_dbg_gpu_broadcast_stop_trigger(struct channel_gk20a *ch)
129 dbg_session_data, dbg_s_entry) { 131 dbg_session_data, dbg_s_entry) {
130 dbg_s = session_data->dbg_s; 132 dbg_s = session_data->dbg_s;
131 if (dbg_s->broadcast_stop_trigger) { 133 if (dbg_s->broadcast_stop_trigger) {
132 gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn | gpu_dbg_intr, 134 nvgpu_log(g, gpu_dbg_gpu_dbg | gpu_dbg_fn | gpu_dbg_intr,
133 "stop trigger broadcast enabled"); 135 "stop trigger broadcast enabled");
134 broadcast = true; 136 broadcast = true;
135 break; 137 break;
@@ -145,8 +147,9 @@ int gk20a_dbg_gpu_clear_broadcast_stop_trigger(struct channel_gk20a *ch)
145{ 147{
146 struct dbg_session_data *session_data; 148 struct dbg_session_data *session_data;
147 struct dbg_session_gk20a *dbg_s; 149 struct dbg_session_gk20a *dbg_s;
150 struct gk20a *g = ch->g;
148 151
149 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, ""); 152 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, " ");
150 153
151 /* guard against the session list being modified */ 154 /* guard against the session list being modified */
152 nvgpu_mutex_acquire(&ch->dbg_s_lock); 155 nvgpu_mutex_acquire(&ch->dbg_s_lock);
@@ -155,7 +158,7 @@ int gk20a_dbg_gpu_clear_broadcast_stop_trigger(struct channel_gk20a *ch)
155 dbg_session_data, dbg_s_entry) { 158 dbg_session_data, dbg_s_entry) {
156 dbg_s = session_data->dbg_s; 159 dbg_s = session_data->dbg_s;
157 if (dbg_s->broadcast_stop_trigger) { 160 if (dbg_s->broadcast_stop_trigger) {
158 gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn | gpu_dbg_intr, 161 nvgpu_log(g, gpu_dbg_gpu_dbg | gpu_dbg_fn | gpu_dbg_intr,
159 "stop trigger broadcast disabled"); 162 "stop trigger broadcast disabled");
160 dbg_s->broadcast_stop_trigger = false; 163 dbg_s->broadcast_stop_trigger = false;
161 } 164 }
diff --git a/drivers/gpu/nvgpu/gk20a/fb_gk20a.c b/drivers/gpu/nvgpu/gk20a/fb_gk20a.c
index e3052701..c4be3313 100644
--- a/drivers/gpu/nvgpu/gk20a/fb_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fb_gk20a.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * GK20A memory interface 2 * GK20A memory interface
3 * 3 *
4 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"), 7 * copy of this software and associated documentation files (the "Software"),
@@ -36,7 +36,7 @@ void fb_gk20a_reset(struct gk20a *g)
36{ 36{
37 u32 val; 37 u32 val;
38 38
39 gk20a_dbg_info("reset gk20a fb"); 39 nvgpu_log_info(g, "reset gk20a fb");
40 40
41 g->ops.mc.reset(g, mc_enable_pfb_enabled_f() | 41 g->ops.mc.reset(g, mc_enable_pfb_enabled_f() |
42 mc_enable_l2_enabled_f() | 42 mc_enable_l2_enabled_f() |
@@ -63,7 +63,7 @@ void gk20a_fb_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb)
63 u32 addr_lo; 63 u32 addr_lo;
64 u32 data; 64 u32 data;
65 65
66 gk20a_dbg_fn(""); 66 nvgpu_log_fn(g, " ");
67 67
68 /* pagetables are considered sw states which are preserved after 68 /* pagetables are considered sw states which are preserved after
69 prepare_poweroff. When gk20a deinit releases those pagetables, 69 prepare_poweroff. When gk20a deinit releases those pagetables,
diff --git a/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c b/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c
index 4fda0d2e..c9d7ea06 100644
--- a/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -137,7 +137,7 @@ static int gk20a_fecs_trace_get_write_index(struct gk20a *g)
137 137
138static int gk20a_fecs_trace_set_read_index(struct gk20a *g, int index) 138static int gk20a_fecs_trace_set_read_index(struct gk20a *g, int index)
139{ 139{
140 gk20a_dbg(gpu_dbg_ctxsw, "set read=%d", index); 140 nvgpu_log(g, gpu_dbg_ctxsw, "set read=%d", index);
141 return gr_gk20a_elpg_protected_call(g, 141 return gr_gk20a_elpg_protected_call(g,
142 (gk20a_writel(g, gr_fecs_mailbox1_r(), index), 0)); 142 (gk20a_writel(g, gr_fecs_mailbox1_r(), index), 0));
143} 143}
@@ -148,12 +148,12 @@ void gk20a_fecs_trace_hash_dump(struct gk20a *g)
148 struct gk20a_fecs_trace_hash_ent *ent; 148 struct gk20a_fecs_trace_hash_ent *ent;
149 struct gk20a_fecs_trace *trace = g->fecs_trace; 149 struct gk20a_fecs_trace *trace = g->fecs_trace;
150 150
151 gk20a_dbg(gpu_dbg_ctxsw, "dumping hash table"); 151 nvgpu_log(g, gpu_dbg_ctxsw, "dumping hash table");
152 152
153 nvgpu_mutex_acquire(&trace->hash_lock); 153 nvgpu_mutex_acquire(&trace->hash_lock);
154 hash_for_each(trace->pid_hash_table, bkt, ent, node) 154 hash_for_each(trace->pid_hash_table, bkt, ent, node)
155 { 155 {
156 gk20a_dbg(gpu_dbg_ctxsw, " ent=%p bkt=%x context_ptr=%x pid=%d", 156 nvgpu_log(g, gpu_dbg_ctxsw, " ent=%p bkt=%x context_ptr=%x pid=%d",
157 ent, bkt, ent->context_ptr, ent->pid); 157 ent, bkt, ent->context_ptr, ent->pid);
158 158
159 } 159 }
@@ -165,7 +165,7 @@ static int gk20a_fecs_trace_hash_add(struct gk20a *g, u32 context_ptr, pid_t pid
165 struct gk20a_fecs_trace_hash_ent *he; 165 struct gk20a_fecs_trace_hash_ent *he;
166 struct gk20a_fecs_trace *trace = g->fecs_trace; 166 struct gk20a_fecs_trace *trace = g->fecs_trace;
167 167
168 gk20a_dbg(gpu_dbg_fn | gpu_dbg_ctxsw, 168 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_ctxsw,
169 "adding hash entry context_ptr=%x -> pid=%d", context_ptr, pid); 169 "adding hash entry context_ptr=%x -> pid=%d", context_ptr, pid);
170 170
171 he = nvgpu_kzalloc(g, sizeof(*he)); 171 he = nvgpu_kzalloc(g, sizeof(*he));
@@ -190,7 +190,7 @@ static void gk20a_fecs_trace_hash_del(struct gk20a *g, u32 context_ptr)
190 struct gk20a_fecs_trace_hash_ent *ent; 190 struct gk20a_fecs_trace_hash_ent *ent;
191 struct gk20a_fecs_trace *trace = g->fecs_trace; 191 struct gk20a_fecs_trace *trace = g->fecs_trace;
192 192
193 gk20a_dbg(gpu_dbg_fn | gpu_dbg_ctxsw, 193 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_ctxsw,
194 "freeing hash entry context_ptr=%x", context_ptr); 194 "freeing hash entry context_ptr=%x", context_ptr);
195 195
196 nvgpu_mutex_acquire(&trace->hash_lock); 196 nvgpu_mutex_acquire(&trace->hash_lock);
@@ -198,7 +198,7 @@ static void gk20a_fecs_trace_hash_del(struct gk20a *g, u32 context_ptr)
198 context_ptr) { 198 context_ptr) {
199 if (ent->context_ptr == context_ptr) { 199 if (ent->context_ptr == context_ptr) {
200 hash_del(&ent->node); 200 hash_del(&ent->node);
201 gk20a_dbg(gpu_dbg_ctxsw, 201 nvgpu_log(g, gpu_dbg_ctxsw,
202 "freed hash entry=%p context_ptr=%x", ent, 202 "freed hash entry=%p context_ptr=%x", ent,
203 ent->context_ptr); 203 ent->context_ptr);
204 nvgpu_kfree(g, ent); 204 nvgpu_kfree(g, ent);
@@ -215,7 +215,7 @@ static void gk20a_fecs_trace_free_hash_table(struct gk20a *g)
215 struct gk20a_fecs_trace_hash_ent *ent; 215 struct gk20a_fecs_trace_hash_ent *ent;
216 struct gk20a_fecs_trace *trace = g->fecs_trace; 216 struct gk20a_fecs_trace *trace = g->fecs_trace;
217 217
218 gk20a_dbg(gpu_dbg_fn | gpu_dbg_ctxsw, "trace=%p", trace); 218 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_ctxsw, "trace=%p", trace);
219 219
220 nvgpu_mutex_acquire(&trace->hash_lock); 220 nvgpu_mutex_acquire(&trace->hash_lock);
221 hash_for_each_safe(trace->pid_hash_table, bkt, tmp, ent, node) { 221 hash_for_each_safe(trace->pid_hash_table, bkt, tmp, ent, node) {
@@ -235,7 +235,7 @@ static pid_t gk20a_fecs_trace_find_pid(struct gk20a *g, u32 context_ptr)
235 nvgpu_mutex_acquire(&trace->hash_lock); 235 nvgpu_mutex_acquire(&trace->hash_lock);
236 hash_for_each_possible(trace->pid_hash_table, ent, node, context_ptr) { 236 hash_for_each_possible(trace->pid_hash_table, ent, node, context_ptr) {
237 if (ent->context_ptr == context_ptr) { 237 if (ent->context_ptr == context_ptr) {
238 gk20a_dbg(gpu_dbg_ctxsw, 238 nvgpu_log(g, gpu_dbg_ctxsw,
239 "found context_ptr=%x -> pid=%d", 239 "found context_ptr=%x -> pid=%d",
240 ent->context_ptr, ent->pid); 240 ent->context_ptr, ent->pid);
241 pid = ent->pid; 241 pid = ent->pid;
@@ -265,7 +265,7 @@ static int gk20a_fecs_trace_ring_read(struct gk20a *g, int index)
265 struct gk20a_fecs_trace_record *r = gk20a_fecs_trace_get_record( 265 struct gk20a_fecs_trace_record *r = gk20a_fecs_trace_get_record(
266 trace, index); 266 trace, index);
267 267
268 gk20a_dbg(gpu_dbg_fn | gpu_dbg_ctxsw, 268 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_ctxsw,
269 "consuming record trace=%p read=%d record=%p", trace, index, r); 269 "consuming record trace=%p read=%d record=%p", trace, index, r);
270 270
271 if (unlikely(!gk20a_fecs_trace_is_valid_record(r))) { 271 if (unlikely(!gk20a_fecs_trace_is_valid_record(r))) {
@@ -284,7 +284,7 @@ static int gk20a_fecs_trace_ring_read(struct gk20a *g, int index)
284 cur_pid = gk20a_fecs_trace_find_pid(g, r->context_ptr); 284 cur_pid = gk20a_fecs_trace_find_pid(g, r->context_ptr);
285 new_pid = gk20a_fecs_trace_find_pid(g, r->new_context_ptr); 285 new_pid = gk20a_fecs_trace_find_pid(g, r->new_context_ptr);
286 286
287 gk20a_dbg(gpu_dbg_fn | gpu_dbg_ctxsw, 287 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_ctxsw,
288 "context_ptr=%x (pid=%d) new_context_ptr=%x (pid=%d)", 288 "context_ptr=%x (pid=%d) new_context_ptr=%x (pid=%d)",
289 r->context_ptr, cur_pid, r->new_context_ptr, new_pid); 289 r->context_ptr, cur_pid, r->new_context_ptr, new_pid);
290 290
@@ -298,7 +298,7 @@ static int gk20a_fecs_trace_ring_read(struct gk20a *g, int index)
298 entry.timestamp = gk20a_fecs_trace_record_ts_timestamp_v(r->ts[i]); 298 entry.timestamp = gk20a_fecs_trace_record_ts_timestamp_v(r->ts[i]);
299 entry.timestamp <<= GK20A_FECS_TRACE_PTIMER_SHIFT; 299 entry.timestamp <<= GK20A_FECS_TRACE_PTIMER_SHIFT;
300 300
301 gk20a_dbg(gpu_dbg_ctxsw, 301 nvgpu_log(g, gpu_dbg_ctxsw,
302 "tag=%x timestamp=%llx context_id=%08x new_context_id=%08x", 302 "tag=%x timestamp=%llx context_id=%08x new_context_id=%08x",
303 entry.tag, entry.timestamp, r->context_id, 303 entry.tag, entry.timestamp, r->context_id,
304 r->new_context_id); 304 r->new_context_id);
@@ -327,7 +327,7 @@ static int gk20a_fecs_trace_ring_read(struct gk20a *g, int index)
327 continue; 327 continue;
328 } 328 }
329 329
330 gk20a_dbg(gpu_dbg_ctxsw, "tag=%x context_id=%x pid=%lld", 330 nvgpu_log(g, gpu_dbg_ctxsw, "tag=%x context_id=%x pid=%lld",
331 entry.tag, entry.context_id, entry.pid); 331 entry.tag, entry.context_id, entry.pid);
332 332
333 if (!entry.context_id) 333 if (!entry.context_id)
@@ -368,7 +368,7 @@ int gk20a_fecs_trace_poll(struct gk20a *g)
368 if (!cnt) 368 if (!cnt)
369 goto done; 369 goto done;
370 370
371 gk20a_dbg(gpu_dbg_ctxsw, 371 nvgpu_log(g, gpu_dbg_ctxsw,
372 "circular buffer: read=%d (mailbox=%d) write=%d cnt=%d", 372 "circular buffer: read=%d (mailbox=%d) write=%d cnt=%d",
373 read, gk20a_fecs_trace_get_read_index(g), write, cnt); 373 read, gk20a_fecs_trace_get_read_index(g), write, cnt);
374 374
@@ -633,7 +633,7 @@ int gk20a_fecs_trace_bind_channel(struct gk20a *g,
633 pid_t pid; 633 pid_t pid;
634 u32 aperture; 634 u32 aperture;
635 635
636 gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, 636 nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw,
637 "chid=%d context_ptr=%x inst_block=%llx", 637 "chid=%d context_ptr=%x inst_block=%llx",
638 ch->chid, context_ptr, 638 ch->chid, context_ptr,
639 nvgpu_inst_block_addr(g, &ch->inst_block)); 639 nvgpu_inst_block_addr(g, &ch->inst_block));
@@ -662,7 +662,7 @@ int gk20a_fecs_trace_bind_channel(struct gk20a *g,
662 lo = u64_lo32(pa); 662 lo = u64_lo32(pa);
663 hi = u64_hi32(pa); 663 hi = u64_hi32(pa);
664 664
665 gk20a_dbg(gpu_dbg_ctxsw, "addr_hi=%x addr_lo=%x count=%d", hi, 665 nvgpu_log(g, gpu_dbg_ctxsw, "addr_hi=%x addr_lo=%x count=%d", hi,
666 lo, GK20A_FECS_TRACE_NUM_RECORDS); 666 lo, GK20A_FECS_TRACE_NUM_RECORDS);
667 667
668 nvgpu_mem_wr(g, mem, 668 nvgpu_mem_wr(g, mem,
@@ -696,7 +696,7 @@ int gk20a_fecs_trace_unbind_channel(struct gk20a *g, struct channel_gk20a *ch)
696 u32 context_ptr = gk20a_fecs_trace_fecs_context_ptr(g, ch); 696 u32 context_ptr = gk20a_fecs_trace_fecs_context_ptr(g, ch);
697 697
698 if (g->fecs_trace) { 698 if (g->fecs_trace) {
699 gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, 699 nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw,
700 "ch=%p context_ptr=%x", ch, context_ptr); 700 "ch=%p context_ptr=%x", ch, context_ptr);
701 701
702 if (g->ops.fecs_trace.is_enabled(g)) { 702 if (g->ops.fecs_trace.is_enabled(g)) {
@@ -711,7 +711,7 @@ int gk20a_fecs_trace_unbind_channel(struct gk20a *g, struct channel_gk20a *ch)
711 711
712int gk20a_fecs_trace_reset(struct gk20a *g) 712int gk20a_fecs_trace_reset(struct gk20a *g)
713{ 713{
714 gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, ""); 714 nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, " ");
715 715
716 if (!g->ops.fecs_trace.is_enabled(g)) 716 if (!g->ops.fecs_trace.is_enabled(g))
717 return 0; 717 return 0;
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
index 75d66968..cc63c3b8 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
@@ -94,7 +94,7 @@ u32 gk20a_fifo_get_engine_ids(struct gk20a *g,
94 engine_id[instance_cnt] = active_engine_id; 94 engine_id[instance_cnt] = active_engine_id;
95 ++instance_cnt; 95 ++instance_cnt;
96 } else { 96 } else {
97 gk20a_dbg_info("warning engine_id table sz is small %d", 97 nvgpu_log_info(g, "warning engine_id table sz is small %d",
98 engine_id_sz); 98 engine_id_sz);
99 } 99 }
100 } 100 }
@@ -320,7 +320,7 @@ int gk20a_fifo_engine_enum_from_type(struct gk20a *g, u32 engine_type,
320{ 320{
321 int ret = ENGINE_INVAL_GK20A; 321 int ret = ENGINE_INVAL_GK20A;
322 322
323 gk20a_dbg_info("engine type %d", engine_type); 323 nvgpu_log_info(g, "engine type %d", engine_type);
324 if (engine_type == top_device_info_type_enum_graphics_v()) 324 if (engine_type == top_device_info_type_enum_graphics_v())
325 ret = ENGINE_GR_GK20A; 325 ret = ENGINE_GR_GK20A;
326 else if ((engine_type >= top_device_info_type_enum_copy0_v()) && 326 else if ((engine_type >= top_device_info_type_enum_copy0_v()) &&
@@ -354,7 +354,7 @@ int gk20a_fifo_init_engine_info(struct fifo_gk20a *f)
354 u32 gr_runlist_id = ~0; 354 u32 gr_runlist_id = ~0;
355 bool found_pbdma_for_runlist = false; 355 bool found_pbdma_for_runlist = false;
356 356
357 gk20a_dbg_fn(""); 357 nvgpu_log_fn(g, " ");
358 358
359 f->num_engines = 0; 359 f->num_engines = 0;
360 360
@@ -367,7 +367,7 @@ int gk20a_fifo_init_engine_info(struct fifo_gk20a *f)
367 if (top_device_info_engine_v(table_entry)) { 367 if (top_device_info_engine_v(table_entry)) {
368 engine_id = 368 engine_id =
369 top_device_info_engine_enum_v(table_entry); 369 top_device_info_engine_enum_v(table_entry);
370 gk20a_dbg_info("info: engine_id %d", 370 nvgpu_log_info(g, "info: engine_id %d",
371 top_device_info_engine_enum_v(table_entry)); 371 top_device_info_engine_enum_v(table_entry));
372 } 372 }
373 373
@@ -375,7 +375,7 @@ int gk20a_fifo_init_engine_info(struct fifo_gk20a *f)
375 if (top_device_info_runlist_v(table_entry)) { 375 if (top_device_info_runlist_v(table_entry)) {
376 runlist_id = 376 runlist_id =
377 top_device_info_runlist_enum_v(table_entry); 377 top_device_info_runlist_enum_v(table_entry);
378 gk20a_dbg_info("gr info: runlist_id %d", runlist_id); 378 nvgpu_log_info(g, "gr info: runlist_id %d", runlist_id);
379 379
380 runlist_bit = BIT(runlist_id); 380 runlist_bit = BIT(runlist_id);
381 381
@@ -384,7 +384,7 @@ int gk20a_fifo_init_engine_info(struct fifo_gk20a *f)
384 pbdma_id++) { 384 pbdma_id++) {
385 if (f->pbdma_map[pbdma_id] & 385 if (f->pbdma_map[pbdma_id] &
386 runlist_bit) { 386 runlist_bit) {
387 gk20a_dbg_info( 387 nvgpu_log_info(g,
388 "gr info: pbdma_map[%d]=%d", 388 "gr info: pbdma_map[%d]=%d",
389 pbdma_id, 389 pbdma_id,
390 f->pbdma_map[pbdma_id]); 390 f->pbdma_map[pbdma_id]);
@@ -402,13 +402,13 @@ int gk20a_fifo_init_engine_info(struct fifo_gk20a *f)
402 if (top_device_info_intr_v(table_entry)) { 402 if (top_device_info_intr_v(table_entry)) {
403 intr_id = 403 intr_id =
404 top_device_info_intr_enum_v(table_entry); 404 top_device_info_intr_enum_v(table_entry);
405 gk20a_dbg_info("gr info: intr_id %d", intr_id); 405 nvgpu_log_info(g, "gr info: intr_id %d", intr_id);
406 } 406 }
407 407
408 if (top_device_info_reset_v(table_entry)) { 408 if (top_device_info_reset_v(table_entry)) {
409 reset_id = 409 reset_id =
410 top_device_info_reset_enum_v(table_entry); 410 top_device_info_reset_enum_v(table_entry);
411 gk20a_dbg_info("gr info: reset_id %d", 411 nvgpu_log_info(g, "gr info: reset_id %d",
412 reset_id); 412 reset_id);
413 } 413 }
414 } else if (entry == top_device_info_entry_engine_type_v()) { 414 } else if (entry == top_device_info_entry_engine_type_v()) {
@@ -538,7 +538,7 @@ static void gk20a_remove_fifo_support(struct fifo_gk20a *f)
538 struct gk20a *g = f->g; 538 struct gk20a *g = f->g;
539 unsigned int i = 0; 539 unsigned int i = 0;
540 540
541 gk20a_dbg_fn(""); 541 nvgpu_log_fn(g, " ");
542 542
543 nvgpu_channel_worker_deinit(g); 543 nvgpu_channel_worker_deinit(g);
544 /* 544 /*
@@ -616,7 +616,7 @@ static void fifo_pbdma_exception_status(struct gk20a *g,
616 get_exception_pbdma_info(g, eng_info); 616 get_exception_pbdma_info(g, eng_info);
617 e = &eng_info->pbdma_exception_info; 617 e = &eng_info->pbdma_exception_info;
618 618
619 gk20a_dbg_fn("pbdma_id %d, " 619 nvgpu_log_fn(g, "pbdma_id %d, "
620 "id_type %s, id %d, chan_status %d, " 620 "id_type %s, id %d, chan_status %d, "
621 "next_id_type %s, next_id %d, " 621 "next_id_type %s, next_id %d, "
622 "chsw_in_progress %d", 622 "chsw_in_progress %d",
@@ -657,7 +657,7 @@ static void fifo_engine_exception_status(struct gk20a *g,
657 get_exception_engine_info(g, eng_info); 657 get_exception_engine_info(g, eng_info);
658 e = &eng_info->engine_exception_info; 658 e = &eng_info->engine_exception_info;
659 659
660 gk20a_dbg_fn("engine_id %d, id_type %s, id %d, ctx_status %d, " 660 nvgpu_log_fn(g, "engine_id %d, id_type %s, id %d, ctx_status %d, "
661 "faulted %d, idle %d, ctxsw_in_progress %d, ", 661 "faulted %d, idle %d, ctxsw_in_progress %d, ",
662 eng_info->engine_id, e->id_is_chid ? "chid" : "tsgid", 662 eng_info->engine_id, e->id_is_chid ? "chid" : "tsgid",
663 e->id, e->ctx_status_v, 663 e->id, e->ctx_status_v,
@@ -745,7 +745,7 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f)
745 745
746clean_up_runlist: 746clean_up_runlist:
747 gk20a_fifo_delete_runlist(f); 747 gk20a_fifo_delete_runlist(f);
748 gk20a_dbg_fn("fail"); 748 nvgpu_log_fn(g, "fail");
749 return -ENOMEM; 749 return -ENOMEM;
750} 750}
751 751
@@ -784,7 +784,7 @@ int gk20a_init_fifo_reset_enable_hw(struct gk20a *g)
784 unsigned int i; 784 unsigned int i;
785 u32 host_num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA); 785 u32 host_num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA);
786 786
787 gk20a_dbg_fn(""); 787 nvgpu_log_fn(g, " ");
788 788
789 /* enable pmc pfifo */ 789 /* enable pmc pfifo */
790 g->ops.mc.reset(g, mc_enable_pfifo_enabled_f()); 790 g->ops.mc.reset(g, mc_enable_pfifo_enabled_f());
@@ -805,7 +805,7 @@ int gk20a_init_fifo_reset_enable_hw(struct gk20a *g)
805 timeout = gk20a_readl(g, fifo_fb_timeout_r()); 805 timeout = gk20a_readl(g, fifo_fb_timeout_r());
806 timeout = set_field(timeout, fifo_fb_timeout_period_m(), 806 timeout = set_field(timeout, fifo_fb_timeout_period_m(),
807 fifo_fb_timeout_period_max_f()); 807 fifo_fb_timeout_period_max_f());
808 gk20a_dbg_info("fifo_fb_timeout reg val = 0x%08x", timeout); 808 nvgpu_log_info(g, "fifo_fb_timeout reg val = 0x%08x", timeout);
809 gk20a_writel(g, fifo_fb_timeout_r(), timeout); 809 gk20a_writel(g, fifo_fb_timeout_r(), timeout);
810 810
811 /* write pbdma timeout value */ 811 /* write pbdma timeout value */
@@ -813,7 +813,7 @@ int gk20a_init_fifo_reset_enable_hw(struct gk20a *g)
813 timeout = gk20a_readl(g, pbdma_timeout_r(i)); 813 timeout = gk20a_readl(g, pbdma_timeout_r(i));
814 timeout = set_field(timeout, pbdma_timeout_period_m(), 814 timeout = set_field(timeout, pbdma_timeout_period_m(),
815 pbdma_timeout_period_max_f()); 815 pbdma_timeout_period_max_f());
816 gk20a_dbg_info("pbdma_timeout reg val = 0x%08x", timeout); 816 nvgpu_log_info(g, "pbdma_timeout reg val = 0x%08x", timeout);
817 gk20a_writel(g, pbdma_timeout_r(i), timeout); 817 gk20a_writel(g, pbdma_timeout_r(i), timeout);
818 } 818 }
819 if (g->ops.fifo.apply_pb_timeout) 819 if (g->ops.fifo.apply_pb_timeout)
@@ -837,10 +837,10 @@ int gk20a_init_fifo_reset_enable_hw(struct gk20a *g)
837 intr_stall = gk20a_readl(g, pbdma_intr_stall_r(i)); 837 intr_stall = gk20a_readl(g, pbdma_intr_stall_r(i));
838 intr_stall &= ~pbdma_intr_stall_lbreq_enabled_f(); 838 intr_stall &= ~pbdma_intr_stall_lbreq_enabled_f();
839 gk20a_writel(g, pbdma_intr_stall_r(i), intr_stall); 839 gk20a_writel(g, pbdma_intr_stall_r(i), intr_stall);
840 gk20a_dbg_info("pbdma id:%u, intr_en_0 0x%08x", i, intr_stall); 840 nvgpu_log_info(g, "pbdma id:%u, intr_en_0 0x%08x", i, intr_stall);
841 gk20a_writel(g, pbdma_intr_en_0_r(i), intr_stall); 841 gk20a_writel(g, pbdma_intr_en_0_r(i), intr_stall);
842 842
843 gk20a_dbg_info("pbdma id:%u, intr_en_1 0x%08x", i, 843 nvgpu_log_info(g, "pbdma id:%u, intr_en_1 0x%08x", i,
844 ~pbdma_intr_en_0_lbreq_enabled_f()); 844 ~pbdma_intr_en_0_lbreq_enabled_f());
845 gk20a_writel(g, pbdma_intr_en_1_r(i), 845 gk20a_writel(g, pbdma_intr_en_1_r(i),
846 ~pbdma_intr_en_0_lbreq_enabled_f()); 846 ~pbdma_intr_en_0_lbreq_enabled_f());
@@ -852,12 +852,12 @@ int gk20a_init_fifo_reset_enable_hw(struct gk20a *g)
852 /* clear and enable pfifo interrupt */ 852 /* clear and enable pfifo interrupt */
853 gk20a_writel(g, fifo_intr_0_r(), 0xFFFFFFFF); 853 gk20a_writel(g, fifo_intr_0_r(), 0xFFFFFFFF);
854 mask = gk20a_fifo_intr_0_en_mask(g); 854 mask = gk20a_fifo_intr_0_en_mask(g);
855 gk20a_dbg_info("fifo_intr_en_0 0x%08x", mask); 855 nvgpu_log_info(g, "fifo_intr_en_0 0x%08x", mask);
856 gk20a_writel(g, fifo_intr_en_0_r(), mask); 856 gk20a_writel(g, fifo_intr_en_0_r(), mask);
857 gk20a_dbg_info("fifo_intr_en_1 = 0x80000000"); 857 nvgpu_log_info(g, "fifo_intr_en_1 = 0x80000000");
858 gk20a_writel(g, fifo_intr_en_1_r(), 0x80000000); 858 gk20a_writel(g, fifo_intr_en_1_r(), 0x80000000);
859 859
860 gk20a_dbg_fn("done"); 860 nvgpu_log_fn(g, "done");
861 861
862 return 0; 862 return 0;
863} 863}
@@ -868,7 +868,7 @@ int gk20a_init_fifo_setup_sw_common(struct gk20a *g)
868 unsigned int chid, i; 868 unsigned int chid, i;
869 int err = 0; 869 int err = 0;
870 870
871 gk20a_dbg_fn(""); 871 nvgpu_log_fn(g, " ");
872 872
873 f->g = g; 873 f->g = g;
874 874
@@ -945,7 +945,7 @@ int gk20a_init_fifo_setup_sw_common(struct gk20a *g)
945 goto clean_up; 945 goto clean_up;
946 } 946 }
947 947
948 gk20a_dbg_fn("done"); 948 nvgpu_log_fn(g, "done");
949 return 0; 949 return 0;
950 950
951clean_up: 951clean_up:
@@ -972,10 +972,10 @@ int gk20a_init_fifo_setup_sw(struct gk20a *g)
972 u64 userd_base; 972 u64 userd_base;
973 int err = 0; 973 int err = 0;
974 974
975 gk20a_dbg_fn(""); 975 nvgpu_log_fn(g, " ");
976 976
977 if (f->sw_ready) { 977 if (f->sw_ready) {
978 gk20a_dbg_fn("skip init"); 978 nvgpu_log_fn(g, "skip init");
979 return 0; 979 return 0;
980 } 980 }
981 981
@@ -997,7 +997,7 @@ int gk20a_init_fifo_setup_sw(struct gk20a *g)
997 nvgpu_err(g, "userd memory allocation failed"); 997 nvgpu_err(g, "userd memory allocation failed");
998 goto clean_up; 998 goto clean_up;
999 } 999 }
1000 gk20a_dbg(gpu_dbg_map, "userd gpu va = 0x%llx", f->userd.gpu_va); 1000 nvgpu_log(g, gpu_dbg_map, "userd gpu va = 0x%llx", f->userd.gpu_va);
1001 1001
1002 userd_base = nvgpu_mem_get_addr(g, &f->userd); 1002 userd_base = nvgpu_mem_get_addr(g, &f->userd);
1003 for (chid = 0; chid < f->num_channels; chid++) { 1003 for (chid = 0; chid < f->num_channels; chid++) {
@@ -1013,11 +1013,11 @@ int gk20a_init_fifo_setup_sw(struct gk20a *g)
1013 1013
1014 f->sw_ready = true; 1014 f->sw_ready = true;
1015 1015
1016 gk20a_dbg_fn("done"); 1016 nvgpu_log_fn(g, "done");
1017 return 0; 1017 return 0;
1018 1018
1019clean_up: 1019clean_up:
1020 gk20a_dbg_fn("fail"); 1020 nvgpu_log_fn(g, "fail");
1021 if (nvgpu_mem_is_valid(&f->userd)) { 1021 if (nvgpu_mem_is_valid(&f->userd)) {
1022 if (g->ops.mm.is_bar1_supported(g)) 1022 if (g->ops.mm.is_bar1_supported(g))
1023 nvgpu_dma_unmap_free(g->mm.bar1.vm, &f->userd); 1023 nvgpu_dma_unmap_free(g->mm.bar1.vm, &f->userd);
@@ -1032,7 +1032,7 @@ void gk20a_fifo_handle_runlist_event(struct gk20a *g)
1032{ 1032{
1033 u32 runlist_event = gk20a_readl(g, fifo_intr_runlist_r()); 1033 u32 runlist_event = gk20a_readl(g, fifo_intr_runlist_r());
1034 1034
1035 gk20a_dbg(gpu_dbg_intr, "runlist event %08x", 1035 nvgpu_log(g, gpu_dbg_intr, "runlist event %08x",
1036 runlist_event); 1036 runlist_event);
1037 1037
1038 gk20a_writel(g, fifo_intr_runlist_r(), runlist_event); 1038 gk20a_writel(g, fifo_intr_runlist_r(), runlist_event);
@@ -1042,7 +1042,7 @@ int gk20a_init_fifo_setup_hw(struct gk20a *g)
1042{ 1042{
1043 struct fifo_gk20a *f = &g->fifo; 1043 struct fifo_gk20a *f = &g->fifo;
1044 1044
1045 gk20a_dbg_fn(""); 1045 nvgpu_log_fn(g, " ");
1046 1046
1047 /* test write, read through bar1 @ userd region before 1047 /* test write, read through bar1 @ userd region before
1048 * turning on the snooping */ 1048 * turning on the snooping */
@@ -1053,7 +1053,7 @@ int gk20a_init_fifo_setup_hw(struct gk20a *g)
1053 u32 bar1_vaddr = f->userd.gpu_va; 1053 u32 bar1_vaddr = f->userd.gpu_va;
1054 volatile u32 *cpu_vaddr = f->userd.cpu_va; 1054 volatile u32 *cpu_vaddr = f->userd.cpu_va;
1055 1055
1056 gk20a_dbg_info("test bar1 @ vaddr 0x%x", 1056 nvgpu_log_info(g, "test bar1 @ vaddr 0x%x",
1057 bar1_vaddr); 1057 bar1_vaddr);
1058 1058
1059 v = gk20a_bar1_readl(g, bar1_vaddr); 1059 v = gk20a_bar1_readl(g, bar1_vaddr);
@@ -1093,7 +1093,7 @@ int gk20a_init_fifo_setup_hw(struct gk20a *g)
1093 fifo_bar1_base_ptr_f(f->userd.gpu_va >> 12) | 1093 fifo_bar1_base_ptr_f(f->userd.gpu_va >> 12) |
1094 fifo_bar1_base_valid_true_f()); 1094 fifo_bar1_base_valid_true_f());
1095 1095
1096 gk20a_dbg_fn("done"); 1096 nvgpu_log_fn(g, "done");
1097 1097
1098 return 0; 1098 return 0;
1099} 1099}
@@ -1261,7 +1261,7 @@ void gk20a_fifo_get_mmu_fault_info(struct gk20a *g, u32 mmu_fault_id,
1261 u32 fault_info; 1261 u32 fault_info;
1262 u32 addr_lo, addr_hi; 1262 u32 addr_lo, addr_hi;
1263 1263
1264 gk20a_dbg_fn("mmu_fault_id %d", mmu_fault_id); 1264 nvgpu_log_fn(g, "mmu_fault_id %d", mmu_fault_id);
1265 1265
1266 memset(mmfault, 0, sizeof(*mmfault)); 1266 memset(mmfault, 0, sizeof(*mmfault));
1267 1267
@@ -1291,7 +1291,7 @@ void gk20a_fifo_reset_engine(struct gk20a *g, u32 engine_id)
1291 u32 engine_enum = ENGINE_INVAL_GK20A; 1291 u32 engine_enum = ENGINE_INVAL_GK20A;
1292 struct fifo_engine_info_gk20a *engine_info; 1292 struct fifo_engine_info_gk20a *engine_info;
1293 1293
1294 gk20a_dbg_fn(""); 1294 nvgpu_log_fn(g, " ");
1295 1295
1296 if (!g) 1296 if (!g)
1297 return; 1297 return;
@@ -1489,7 +1489,7 @@ void gk20a_fifo_abort_tsg(struct gk20a *g, u32 tsgid, bool preempt)
1489 struct tsg_gk20a *tsg = &g->fifo.tsg[tsgid]; 1489 struct tsg_gk20a *tsg = &g->fifo.tsg[tsgid];
1490 struct channel_gk20a *ch; 1490 struct channel_gk20a *ch;
1491 1491
1492 gk20a_dbg_fn(""); 1492 nvgpu_log_fn(g, " ");
1493 1493
1494 g->ops.fifo.disable_tsg(tsg); 1494 g->ops.fifo.disable_tsg(tsg);
1495 1495
@@ -1556,7 +1556,7 @@ static bool gk20a_fifo_handle_mmu_fault(
1556 bool verbose = true; 1556 bool verbose = true;
1557 u32 grfifo_ctl; 1557 u32 grfifo_ctl;
1558 1558
1559 gk20a_dbg_fn(""); 1559 nvgpu_log_fn(g, " ");
1560 1560
1561 g->fifo.deferred_reset_pending = false; 1561 g->fifo.deferred_reset_pending = false;
1562 1562
@@ -1693,7 +1693,7 @@ static bool gk20a_fifo_handle_mmu_fault(
1693 1693
1694 /* handled during channel free */ 1694 /* handled during channel free */
1695 g->fifo.deferred_reset_pending = true; 1695 g->fifo.deferred_reset_pending = true;
1696 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, 1696 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg,
1697 "sm debugger attached," 1697 "sm debugger attached,"
1698 " deferring channel recovery to channel free"); 1698 " deferring channel recovery to channel free");
1699 } else { 1699 } else {
@@ -2196,6 +2196,7 @@ bool gk20a_fifo_check_tsg_ctxsw_timeout(struct tsg_gk20a *tsg,
2196 struct channel_gk20a *ch; 2196 struct channel_gk20a *ch;
2197 bool recover = false; 2197 bool recover = false;
2198 bool progress = false; 2198 bool progress = false;
2199 struct gk20a *g = tsg->g;
2199 2200
2200 *verbose = false; 2201 *verbose = false;
2201 *ms = GRFIFO_TIMEOUT_CHECK_PERIOD_US / 1000; 2202 *ms = GRFIFO_TIMEOUT_CHECK_PERIOD_US / 1000;
@@ -2221,7 +2222,7 @@ bool gk20a_fifo_check_tsg_ctxsw_timeout(struct tsg_gk20a *tsg,
2221 * this resets timeout for channels that already completed their work 2222 * this resets timeout for channels that already completed their work
2222 */ 2223 */
2223 if (progress) { 2224 if (progress) {
2224 gk20a_dbg_info("progress on tsg=%d ch=%d", 2225 nvgpu_log_info(g, "progress on tsg=%d ch=%d",
2225 tsg->tsgid, ch->chid); 2226 tsg->tsgid, ch->chid);
2226 gk20a_channel_put(ch); 2227 gk20a_channel_put(ch);
2227 *ms = GRFIFO_TIMEOUT_CHECK_PERIOD_US / 1000; 2228 *ms = GRFIFO_TIMEOUT_CHECK_PERIOD_US / 1000;
@@ -2239,7 +2240,7 @@ bool gk20a_fifo_check_tsg_ctxsw_timeout(struct tsg_gk20a *tsg,
2239 * caused the problem, so set timeout error notifier for all channels. 2240 * caused the problem, so set timeout error notifier for all channels.
2240 */ 2241 */
2241 if (recover) { 2242 if (recover) {
2242 gk20a_dbg_info("timeout on tsg=%d ch=%d", 2243 nvgpu_log_info(g, "timeout on tsg=%d ch=%d",
2243 tsg->tsgid, ch->chid); 2244 tsg->tsgid, ch->chid);
2244 *ms = ch->timeout_accumulated_ms; 2245 *ms = ch->timeout_accumulated_ms;
2245 gk20a_channel_put(ch); 2246 gk20a_channel_put(ch);
@@ -2311,7 +2312,7 @@ bool gk20a_fifo_handle_sched_error(struct gk20a *g)
2311 is_tsg, true, verbose, 2312 is_tsg, true, verbose,
2312 RC_TYPE_CTXSW_TIMEOUT); 2313 RC_TYPE_CTXSW_TIMEOUT);
2313 } else { 2314 } else {
2314 gk20a_dbg_info( 2315 nvgpu_log_info(g,
2315 "fifo is waiting for ctx switch for %d ms, " 2316 "fifo is waiting for ctx switch for %d ms, "
2316 "%s=%d", ms, is_tsg ? "tsg" : "ch", id); 2317 "%s=%d", ms, is_tsg ? "tsg" : "ch", id);
2317 } 2318 }
@@ -2330,7 +2331,7 @@ static u32 fifo_error_isr(struct gk20a *g, u32 fifo_intr)
2330 bool print_channel_reset_log = false; 2331 bool print_channel_reset_log = false;
2331 u32 handled = 0; 2332 u32 handled = 0;
2332 2333
2333 gk20a_dbg_fn("fifo_intr=0x%08x", fifo_intr); 2334 nvgpu_log_fn(g, "fifo_intr=0x%08x", fifo_intr);
2334 2335
2335 if (fifo_intr & fifo_intr_0_pio_error_pending_f()) { 2336 if (fifo_intr & fifo_intr_0_pio_error_pending_f()) {
2336 /* pio mode is unused. this shouldn't happen, ever. */ 2337 /* pio mode is unused. this shouldn't happen, ever. */
@@ -2381,7 +2382,7 @@ static u32 fifo_error_isr(struct gk20a *g, u32 fifo_intr)
2381 engine_id++) { 2382 engine_id++) {
2382 u32 active_engine_id = g->fifo.active_engines_list[engine_id]; 2383 u32 active_engine_id = g->fifo.active_engines_list[engine_id];
2383 u32 engine_enum = g->fifo.engine_info[active_engine_id].engine_enum; 2384 u32 engine_enum = g->fifo.engine_info[active_engine_id].engine_enum;
2384 gk20a_dbg_fn("enum:%d -> engine_id:%d", engine_enum, 2385 nvgpu_log_fn(g, "enum:%d -> engine_id:%d", engine_enum,
2385 active_engine_id); 2386 active_engine_id);
2386 fifo_pbdma_exception_status(g, 2387 fifo_pbdma_exception_status(g,
2387 &g->fifo.engine_info[active_engine_id]); 2388 &g->fifo.engine_info[active_engine_id]);
@@ -2632,7 +2633,7 @@ static u32 fifo_pbdma_isr(struct gk20a *g, u32 fifo_intr)
2632 2633
2633 for (i = 0; i < host_num_pbdma; i++) { 2634 for (i = 0; i < host_num_pbdma; i++) {
2634 if (fifo_intr_pbdma_id_status_v(pbdma_pending, i)) { 2635 if (fifo_intr_pbdma_id_status_v(pbdma_pending, i)) {
2635 gk20a_dbg(gpu_dbg_intr, "pbdma id %d intr pending", i); 2636 nvgpu_log(g, gpu_dbg_intr, "pbdma id %d intr pending", i);
2636 clear_intr |= 2637 clear_intr |=
2637 gk20a_fifo_handle_pbdma_intr(g, f, i, RC_YES); 2638 gk20a_fifo_handle_pbdma_intr(g, f, i, RC_YES);
2638 } 2639 }
@@ -2653,7 +2654,7 @@ void gk20a_fifo_isr(struct gk20a *g)
2653 * in a threaded interrupt context... */ 2654 * in a threaded interrupt context... */
2654 nvgpu_mutex_acquire(&g->fifo.intr.isr.mutex); 2655 nvgpu_mutex_acquire(&g->fifo.intr.isr.mutex);
2655 2656
2656 gk20a_dbg(gpu_dbg_intr, "fifo isr %08x\n", fifo_intr); 2657 nvgpu_log(g, gpu_dbg_intr, "fifo isr %08x\n", fifo_intr);
2657 2658
2658 /* handle runlist update */ 2659 /* handle runlist update */
2659 if (fifo_intr & fifo_intr_0_runlist_event_pending_f()) { 2660 if (fifo_intr & fifo_intr_0_runlist_event_pending_f()) {
@@ -2681,7 +2682,7 @@ int gk20a_fifo_nonstall_isr(struct gk20a *g)
2681 u32 fifo_intr = gk20a_readl(g, fifo_intr_0_r()); 2682 u32 fifo_intr = gk20a_readl(g, fifo_intr_0_r());
2682 u32 clear_intr = 0; 2683 u32 clear_intr = 0;
2683 2684
2684 gk20a_dbg(gpu_dbg_intr, "fifo nonstall isr %08x\n", fifo_intr); 2685 nvgpu_log(g, gpu_dbg_intr, "fifo nonstall isr %08x\n", fifo_intr);
2685 2686
2686 if (fifo_intr & fifo_intr_0_channel_intr_pending_f()) 2687 if (fifo_intr & fifo_intr_0_channel_intr_pending_f())
2687 clear_intr = fifo_intr_0_channel_intr_pending_f(); 2688 clear_intr = fifo_intr_0_channel_intr_pending_f();
@@ -2769,7 +2770,7 @@ int __locked_fifo_preempt(struct gk20a *g, u32 id, bool is_tsg)
2769 int ret; 2770 int ret;
2770 unsigned int id_type; 2771 unsigned int id_type;
2771 2772
2772 gk20a_dbg_fn("%d", id); 2773 nvgpu_log_fn(g, "%d", id);
2773 2774
2774 /* issue preempt */ 2775 /* issue preempt */
2775 gk20a_fifo_issue_preempt(g, id, is_tsg); 2776 gk20a_fifo_issue_preempt(g, id, is_tsg);
@@ -2794,7 +2795,7 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, u32 chid)
2794 u32 mutex_ret = 0; 2795 u32 mutex_ret = 0;
2795 u32 i; 2796 u32 i;
2796 2797
2797 gk20a_dbg_fn("%d", chid); 2798 nvgpu_log_fn(g, "%d", chid);
2798 2799
2799 /* we have no idea which runlist we are using. lock all */ 2800 /* we have no idea which runlist we are using. lock all */
2800 for (i = 0; i < g->fifo.max_runlists; i++) 2801 for (i = 0; i < g->fifo.max_runlists; i++)
@@ -2821,7 +2822,7 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
2821 u32 mutex_ret = 0; 2822 u32 mutex_ret = 0;
2822 u32 i; 2823 u32 i;
2823 2824
2824 gk20a_dbg_fn("%d", tsgid); 2825 nvgpu_log_fn(g, "%d", tsgid);
2825 2826
2826 /* we have no idea which runlist we are using. lock all */ 2827 /* we have no idea which runlist we are using. lock all */
2827 for (i = 0; i < g->fifo.max_runlists; i++) 2828 for (i = 0; i < g->fifo.max_runlists; i++)
@@ -2938,7 +2939,7 @@ int gk20a_fifo_disable_engine_activity(struct gk20a *g,
2938 u32 mutex_ret; 2939 u32 mutex_ret;
2939 u32 err = 0; 2940 u32 err = 0;
2940 2941
2941 gk20a_dbg_fn(""); 2942 nvgpu_log_fn(g, " ");
2942 2943
2943 gr_stat = 2944 gr_stat =
2944 gk20a_readl(g, fifo_engine_status_r(eng_info->engine_id)); 2945 gk20a_readl(g, fifo_engine_status_r(eng_info->engine_id));
@@ -2988,12 +2989,12 @@ clean_up:
2988 nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 2989 nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
2989 2990
2990 if (err) { 2991 if (err) {
2991 gk20a_dbg_fn("failed"); 2992 nvgpu_log_fn(g, "failed");
2992 if (gk20a_fifo_enable_engine_activity(g, eng_info)) 2993 if (gk20a_fifo_enable_engine_activity(g, eng_info))
2993 nvgpu_err(g, 2994 nvgpu_err(g,
2994 "failed to enable gr engine activity"); 2995 "failed to enable gr engine activity");
2995 } else { 2996 } else {
2996 gk20a_dbg_fn("done"); 2997 nvgpu_log_fn(g, "done");
2997 } 2998 }
2998 return err; 2999 return err;
2999} 3000}
@@ -3129,8 +3130,9 @@ u32 *gk20a_runlist_construct_locked(struct fifo_gk20a *f,
3129 bool skip_next = false; 3130 bool skip_next = false;
3130 u32 tsgid, count = 0; 3131 u32 tsgid, count = 0;
3131 u32 runlist_entry_words = f->runlist_entry_size / sizeof(u32); 3132 u32 runlist_entry_words = f->runlist_entry_size / sizeof(u32);
3133 struct gk20a *g = f->g;
3132 3134
3133 gk20a_dbg_fn(""); 3135 nvgpu_log_fn(g, " ");
3134 3136
3135 /* for each TSG, T, on this level, insert all higher-level channels 3137 /* for each TSG, T, on this level, insert all higher-level channels
3136 and TSGs before inserting T. */ 3138 and TSGs before inserting T. */
@@ -3156,9 +3158,9 @@ u32 *gk20a_runlist_construct_locked(struct fifo_gk20a *f,
3156 return NULL; 3158 return NULL;
3157 3159
3158 /* add TSG entry */ 3160 /* add TSG entry */
3159 gk20a_dbg_info("add TSG %d to runlist", tsg->tsgid); 3161 nvgpu_log_info(g, "add TSG %d to runlist", tsg->tsgid);
3160 f->g->ops.fifo.get_tsg_runlist_entry(tsg, runlist_entry); 3162 f->g->ops.fifo.get_tsg_runlist_entry(tsg, runlist_entry);
3161 gk20a_dbg_info("tsg runlist count %d runlist [0] %x [1] %x\n", 3163 nvgpu_log_info(g, "tsg runlist count %d runlist [0] %x [1] %x\n",
3162 count, runlist_entry[0], runlist_entry[1]); 3164 count, runlist_entry[0], runlist_entry[1]);
3163 runlist_entry += runlist_entry_words; 3165 runlist_entry += runlist_entry_words;
3164 count++; 3166 count++;
@@ -3177,10 +3179,10 @@ u32 *gk20a_runlist_construct_locked(struct fifo_gk20a *f,
3177 return NULL; 3179 return NULL;
3178 } 3180 }
3179 3181
3180 gk20a_dbg_info("add channel %d to runlist", 3182 nvgpu_log_info(g, "add channel %d to runlist",
3181 ch->chid); 3183 ch->chid);
3182 f->g->ops.fifo.get_ch_runlist_entry(ch, runlist_entry); 3184 f->g->ops.fifo.get_ch_runlist_entry(ch, runlist_entry);
3183 gk20a_dbg_info( 3185 nvgpu_log_info(g,
3184 "run list count %d runlist [0] %x [1] %x\n", 3186 "run list count %d runlist [0] %x [1] %x\n",
3185 count, runlist_entry[0], runlist_entry[1]); 3187 count, runlist_entry[0], runlist_entry[1]);
3186 count++; 3188 count++;
@@ -3222,7 +3224,7 @@ int gk20a_fifo_set_runlist_interleave(struct gk20a *g,
3222 u32 runlist_id, 3224 u32 runlist_id,
3223 u32 new_level) 3225 u32 new_level)
3224{ 3226{
3225 gk20a_dbg_fn(""); 3227 nvgpu_log_fn(g, " ");
3226 3228
3227 g->fifo.tsg[id].interleave_level = new_level; 3229 g->fifo.tsg[id].interleave_level = new_level;
3228 3230
@@ -3313,7 +3315,7 @@ static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
3313 3315
3314 runlist_iova = nvgpu_mem_get_addr(g, &runlist->mem[new_buf]); 3316 runlist_iova = nvgpu_mem_get_addr(g, &runlist->mem[new_buf]);
3315 3317
3316 gk20a_dbg_info("runlist_id : %d, switch to new buffer 0x%16llx", 3318 nvgpu_log_info(g, "runlist_id : %d, switch to new buffer 0x%16llx",
3317 runlist_id, (u64)runlist_iova); 3319 runlist_id, (u64)runlist_iova);
3318 3320
3319 if (!runlist_iova) { 3321 if (!runlist_iova) {
@@ -3445,7 +3447,7 @@ int gk20a_fifo_update_runlist(struct gk20a *g, u32 runlist_id, u32 chid,
3445 u32 mutex_ret; 3447 u32 mutex_ret;
3446 u32 ret = 0; 3448 u32 ret = 0;
3447 3449
3448 gk20a_dbg_fn(""); 3450 nvgpu_log_fn(g, " ");
3449 3451
3450 runlist = &f->runlist_info[runlist_id]; 3452 runlist = &f->runlist_info[runlist_id];
3451 3453
@@ -3465,7 +3467,7 @@ int gk20a_fifo_update_runlist(struct gk20a *g, u32 runlist_id, u32 chid,
3465 3467
3466int gk20a_fifo_suspend(struct gk20a *g) 3468int gk20a_fifo_suspend(struct gk20a *g)
3467{ 3469{
3468 gk20a_dbg_fn(""); 3470 nvgpu_log_fn(g, " ");
3469 3471
3470 /* stop bar1 snooping */ 3472 /* stop bar1 snooping */
3471 if (g->ops.mm.is_bar1_supported(g)) 3473 if (g->ops.mm.is_bar1_supported(g))
@@ -3476,7 +3478,7 @@ int gk20a_fifo_suspend(struct gk20a *g)
3476 gk20a_writel(g, fifo_intr_en_0_r(), 0); 3478 gk20a_writel(g, fifo_intr_en_0_r(), 0);
3477 gk20a_writel(g, fifo_intr_en_1_r(), 0); 3479 gk20a_writel(g, fifo_intr_en_1_r(), 0);
3478 3480
3479 gk20a_dbg_fn("done"); 3481 nvgpu_log_fn(g, "done");
3480 return 0; 3482 return 0;
3481} 3483}
3482 3484
@@ -3511,7 +3513,7 @@ int gk20a_fifo_wait_engine_idle(struct gk20a *g)
3511 int ret = -ETIMEDOUT; 3513 int ret = -ETIMEDOUT;
3512 u32 i, host_num_engines; 3514 u32 i, host_num_engines;
3513 3515
3514 gk20a_dbg_fn(""); 3516 nvgpu_log_fn(g, " ");
3515 3517
3516 host_num_engines = 3518 host_num_engines =
3517 nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_ENGINES); 3519 nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_ENGINES);
@@ -3533,12 +3535,12 @@ int gk20a_fifo_wait_engine_idle(struct gk20a *g)
3533 } while (!nvgpu_timeout_expired(&timeout)); 3535 } while (!nvgpu_timeout_expired(&timeout));
3534 3536
3535 if (ret) { 3537 if (ret) {
3536 gk20a_dbg_info("cannot idle engine %u", i); 3538 nvgpu_log_info(g, "cannot idle engine %u", i);
3537 break; 3539 break;
3538 } 3540 }
3539 } 3541 }
3540 3542
3541 gk20a_dbg_fn("done"); 3543 nvgpu_log_fn(g, "done");
3542 3544
3543 return ret; 3545 return ret;
3544} 3546}
@@ -3839,7 +3841,7 @@ void gk20a_fifo_channel_unbind(struct channel_gk20a *ch_gk20a)
3839{ 3841{
3840 struct gk20a *g = ch_gk20a->g; 3842 struct gk20a *g = ch_gk20a->g;
3841 3843
3842 gk20a_dbg_fn(""); 3844 nvgpu_log_fn(g, " ");
3843 3845
3844 if (nvgpu_atomic_cmpxchg(&ch_gk20a->bound, true, false)) { 3846 if (nvgpu_atomic_cmpxchg(&ch_gk20a->bound, true, false)) {
3845 gk20a_writel(g, ccsr_channel_inst_r(ch_gk20a->chid), 3847 gk20a_writel(g, ccsr_channel_inst_r(ch_gk20a->chid),
@@ -3854,12 +3856,12 @@ static int gk20a_fifo_commit_userd(struct channel_gk20a *c)
3854 u32 addr_hi; 3856 u32 addr_hi;
3855 struct gk20a *g = c->g; 3857 struct gk20a *g = c->g;
3856 3858
3857 gk20a_dbg_fn(""); 3859 nvgpu_log_fn(g, " ");
3858 3860
3859 addr_lo = u64_lo32(c->userd_iova >> ram_userd_base_shift_v()); 3861 addr_lo = u64_lo32(c->userd_iova >> ram_userd_base_shift_v());
3860 addr_hi = u64_hi32(c->userd_iova); 3862 addr_hi = u64_hi32(c->userd_iova);
3861 3863
3862 gk20a_dbg_info("channel %d : set ramfc userd 0x%16llx", 3864 nvgpu_log_info(g, "channel %d : set ramfc userd 0x%16llx",
3863 c->chid, (u64)c->userd_iova); 3865 c->chid, (u64)c->userd_iova);
3864 3866
3865 nvgpu_mem_wr32(g, &c->inst_block, 3867 nvgpu_mem_wr32(g, &c->inst_block,
@@ -3885,7 +3887,7 @@ int gk20a_fifo_setup_ramfc(struct channel_gk20a *c,
3885 struct gk20a *g = c->g; 3887 struct gk20a *g = c->g;
3886 struct nvgpu_mem *mem = &c->inst_block; 3888 struct nvgpu_mem *mem = &c->inst_block;
3887 3889
3888 gk20a_dbg_fn(""); 3890 nvgpu_log_fn(g, " ");
3889 3891
3890 nvgpu_memset(g, mem, 0, 0, ram_fc_size_val_v()); 3892 nvgpu_memset(g, mem, 0, 0, ram_fc_size_val_v());
3891 3893
@@ -3946,7 +3948,7 @@ void gk20a_fifo_setup_ramfc_for_privileged_channel(struct channel_gk20a *c)
3946 struct gk20a *g = c->g; 3948 struct gk20a *g = c->g;
3947 struct nvgpu_mem *mem = &c->inst_block; 3949 struct nvgpu_mem *mem = &c->inst_block;
3948 3950
3949 gk20a_dbg_info("channel %d : set ramfc privileged_channel", c->chid); 3951 nvgpu_log_info(g, "channel %d : set ramfc privileged_channel", c->chid);
3950 3952
3951 /* Enable HCE priv mode for phys mode transfer */ 3953 /* Enable HCE priv mode for phys mode transfer */
3952 nvgpu_mem_wr32(g, mem, ram_fc_hce_ctrl_w(), 3954 nvgpu_mem_wr32(g, mem, ram_fc_hce_ctrl_w(),
@@ -3959,7 +3961,7 @@ int gk20a_fifo_setup_userd(struct channel_gk20a *c)
3959 struct nvgpu_mem *mem; 3961 struct nvgpu_mem *mem;
3960 u32 offset; 3962 u32 offset;
3961 3963
3962 gk20a_dbg_fn(""); 3964 nvgpu_log_fn(g, " ");
3963 3965
3964 if (nvgpu_mem_is_valid(&c->usermode_userd)) { 3966 if (nvgpu_mem_is_valid(&c->usermode_userd)) {
3965 mem = &c->usermode_userd; 3967 mem = &c->usermode_userd;
@@ -3987,16 +3989,16 @@ int gk20a_fifo_alloc_inst(struct gk20a *g, struct channel_gk20a *ch)
3987{ 3989{
3988 int err; 3990 int err;
3989 3991
3990 gk20a_dbg_fn(""); 3992 nvgpu_log_fn(g, " ");
3991 3993
3992 err = g->ops.mm.alloc_inst_block(g, &ch->inst_block); 3994 err = g->ops.mm.alloc_inst_block(g, &ch->inst_block);
3993 if (err) 3995 if (err)
3994 return err; 3996 return err;
3995 3997
3996 gk20a_dbg_info("channel %d inst block physical addr: 0x%16llx", 3998 nvgpu_log_info(g, "channel %d inst block physical addr: 0x%16llx",
3997 ch->chid, nvgpu_inst_block_addr(g, &ch->inst_block)); 3999 ch->chid, nvgpu_inst_block_addr(g, &ch->inst_block));
3998 4000
3999 gk20a_dbg_fn("done"); 4001 nvgpu_log_fn(g, "done");
4000 return 0; 4002 return 0;
4001} 4003}
4002 4004
@@ -4086,7 +4088,7 @@ void gk20a_fifo_add_syncpt_wait_cmd(struct gk20a *g,
4086 struct priv_cmd_entry *cmd, u32 off, 4088 struct priv_cmd_entry *cmd, u32 off,
4087 u32 id, u32 thresh, u64 gpu_va) 4089 u32 id, u32 thresh, u64 gpu_va)
4088{ 4090{
4089 gk20a_dbg_fn(""); 4091 nvgpu_log_fn(g, " ");
4090 4092
4091 off = cmd->off + off; 4093 off = cmd->off + off;
4092 /* syncpoint_a */ 4094 /* syncpoint_a */
@@ -4115,7 +4117,7 @@ void gk20a_fifo_add_syncpt_incr_cmd(struct gk20a *g,
4115{ 4117{
4116 u32 off = cmd->off; 4118 u32 off = cmd->off;
4117 4119
4118 gk20a_dbg_fn(""); 4120 nvgpu_log_fn(g, " ");
4119 if (wfi_cmd) { 4121 if (wfi_cmd) {
4120 /* wfi */ 4122 /* wfi */
4121 nvgpu_mem_wr32(g, cmd->mem, off++, 0x2001001E); 4123 nvgpu_mem_wr32(g, cmd->mem, off++, 0x2001001E);
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.c b/drivers/gpu/nvgpu/gk20a/gk20a.c
index e8008937..e862f2e4 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gk20a.c
@@ -77,7 +77,7 @@ int gk20a_detect_chip(struct gk20a *g)
77 77
78 gk20a_mc_boot_0(g, &p->gpu_arch, &p->gpu_impl, &p->gpu_rev); 78 gk20a_mc_boot_0(g, &p->gpu_arch, &p->gpu_impl, &p->gpu_rev);
79 79
80 gk20a_dbg_info("arch: %x, impl: %x, rev: %x\n", 80 nvgpu_log_info(g, "arch: %x, impl: %x, rev: %x\n",
81 g->params.gpu_arch, 81 g->params.gpu_arch,
82 g->params.gpu_impl, 82 g->params.gpu_impl,
83 g->params.gpu_rev); 83 g->params.gpu_rev);
@@ -89,7 +89,7 @@ int gk20a_prepare_poweroff(struct gk20a *g)
89{ 89{
90 int ret = 0; 90 int ret = 0;
91 91
92 gk20a_dbg_fn(""); 92 nvgpu_log_fn(g, " ");
93 93
94 if (g->ops.fifo.channel_suspend) { 94 if (g->ops.fifo.channel_suspend) {
95 ret = g->ops.fifo.channel_suspend(g); 95 ret = g->ops.fifo.channel_suspend(g);
@@ -126,7 +126,7 @@ int gk20a_finalize_poweron(struct gk20a *g)
126 u32 nr_pages; 126 u32 nr_pages;
127#endif 127#endif
128 128
129 gk20a_dbg_fn(""); 129 nvgpu_log_fn(g, " ");
130 130
131 if (g->power_on) 131 if (g->power_on)
132 return 0; 132 return 0;
@@ -434,7 +434,7 @@ static void gk20a_free_cb(struct nvgpu_ref *refcount)
434 struct gk20a *g = container_of(refcount, 434 struct gk20a *g = container_of(refcount,
435 struct gk20a, refcount); 435 struct gk20a, refcount);
436 436
437 gk20a_dbg(gpu_dbg_shutdown, "Freeing GK20A struct!"); 437 nvgpu_log(g, gpu_dbg_shutdown, "Freeing GK20A struct!");
438 438
439 gk20a_ce_destroy(g); 439 gk20a_ce_destroy(g);
440 440
@@ -465,7 +465,7 @@ struct gk20a * __must_check gk20a_get(struct gk20a *g)
465 */ 465 */
466 success = nvgpu_ref_get_unless_zero(&g->refcount); 466 success = nvgpu_ref_get_unless_zero(&g->refcount);
467 467
468 gk20a_dbg(gpu_dbg_shutdown, "GET: refs currently %d %s", 468 nvgpu_log(g, gpu_dbg_shutdown, "GET: refs currently %d %s",
469 nvgpu_atomic_read(&g->refcount.refcount), 469 nvgpu_atomic_read(&g->refcount.refcount),
470 success ? "" : "(FAILED)"); 470 success ? "" : "(FAILED)");
471 471
@@ -490,7 +490,7 @@ void gk20a_put(struct gk20a *g)
490 * ... PUT: refs currently 2 490 * ... PUT: refs currently 2
491 * ... Freeing GK20A struct! 491 * ... Freeing GK20A struct!
492 */ 492 */
493 gk20a_dbg(gpu_dbg_shutdown, "PUT: refs currently %d", 493 nvgpu_log(g, gpu_dbg_shutdown, "PUT: refs currently %d",
494 nvgpu_atomic_read(&g->refcount.refcount)); 494 nvgpu_atomic_read(&g->refcount.refcount));
495 495
496 nvgpu_ref_put(&g->refcount, gk20a_free_cb); 496 nvgpu_ref_put(&g->refcount, gk20a_free_cb);
diff --git a/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.c
index 7120059c..f829cb3a 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.c
@@ -1,9 +1,7 @@
1/* 1/*
2 * drivers/video/tegra/host/gk20a/gr_ctx_gk20a.c
3 *
4 * GK20A Graphics Context 2 * GK20A Graphics Context
5 * 3 *
6 * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
7 * 5 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"), 7 * copy of this software and associated documentation files (the "Software"),
@@ -79,7 +77,7 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr)
79 u32 i, major_v = ~0, major_v_hw, netlist_num; 77 u32 i, major_v = ~0, major_v_hw, netlist_num;
80 int net, max, err = -ENOENT; 78 int net, max, err = -ENOENT;
81 79
82 gk20a_dbg_fn(""); 80 nvgpu_log_fn(g, " ");
83 81
84 if (g->ops.gr_ctx.is_fw_defined()) { 82 if (g->ops.gr_ctx.is_fw_defined()) {
85 net = NETLIST_FINAL; 83 net = NETLIST_FINAL;
@@ -114,63 +112,63 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr)
114 112
115 switch (netlist->regions[i].region_id) { 113 switch (netlist->regions[i].region_id) {
116 case NETLIST_REGIONID_FECS_UCODE_DATA: 114 case NETLIST_REGIONID_FECS_UCODE_DATA:
117 gk20a_dbg_info("NETLIST_REGIONID_FECS_UCODE_DATA"); 115 nvgpu_log_info(g, "NETLIST_REGIONID_FECS_UCODE_DATA");
118 err = gr_gk20a_alloc_load_netlist_u32(g, 116 err = gr_gk20a_alloc_load_netlist_u32(g,
119 src, size, &g->gr.ctx_vars.ucode.fecs.data); 117 src, size, &g->gr.ctx_vars.ucode.fecs.data);
120 if (err) 118 if (err)
121 goto clean_up; 119 goto clean_up;
122 break; 120 break;
123 case NETLIST_REGIONID_FECS_UCODE_INST: 121 case NETLIST_REGIONID_FECS_UCODE_INST:
124 gk20a_dbg_info("NETLIST_REGIONID_FECS_UCODE_INST"); 122 nvgpu_log_info(g, "NETLIST_REGIONID_FECS_UCODE_INST");
125 err = gr_gk20a_alloc_load_netlist_u32(g, 123 err = gr_gk20a_alloc_load_netlist_u32(g,
126 src, size, &g->gr.ctx_vars.ucode.fecs.inst); 124 src, size, &g->gr.ctx_vars.ucode.fecs.inst);
127 if (err) 125 if (err)
128 goto clean_up; 126 goto clean_up;
129 break; 127 break;
130 case NETLIST_REGIONID_GPCCS_UCODE_DATA: 128 case NETLIST_REGIONID_GPCCS_UCODE_DATA:
131 gk20a_dbg_info("NETLIST_REGIONID_GPCCS_UCODE_DATA"); 129 nvgpu_log_info(g, "NETLIST_REGIONID_GPCCS_UCODE_DATA");
132 err = gr_gk20a_alloc_load_netlist_u32(g, 130 err = gr_gk20a_alloc_load_netlist_u32(g,
133 src, size, &g->gr.ctx_vars.ucode.gpccs.data); 131 src, size, &g->gr.ctx_vars.ucode.gpccs.data);
134 if (err) 132 if (err)
135 goto clean_up; 133 goto clean_up;
136 break; 134 break;
137 case NETLIST_REGIONID_GPCCS_UCODE_INST: 135 case NETLIST_REGIONID_GPCCS_UCODE_INST:
138 gk20a_dbg_info("NETLIST_REGIONID_GPCCS_UCODE_INST"); 136 nvgpu_log_info(g, "NETLIST_REGIONID_GPCCS_UCODE_INST");
139 err = gr_gk20a_alloc_load_netlist_u32(g, 137 err = gr_gk20a_alloc_load_netlist_u32(g,
140 src, size, &g->gr.ctx_vars.ucode.gpccs.inst); 138 src, size, &g->gr.ctx_vars.ucode.gpccs.inst);
141 if (err) 139 if (err)
142 goto clean_up; 140 goto clean_up;
143 break; 141 break;
144 case NETLIST_REGIONID_SW_BUNDLE_INIT: 142 case NETLIST_REGIONID_SW_BUNDLE_INIT:
145 gk20a_dbg_info("NETLIST_REGIONID_SW_BUNDLE_INIT"); 143 nvgpu_log_info(g, "NETLIST_REGIONID_SW_BUNDLE_INIT");
146 err = gr_gk20a_alloc_load_netlist_av(g, 144 err = gr_gk20a_alloc_load_netlist_av(g,
147 src, size, &g->gr.ctx_vars.sw_bundle_init); 145 src, size, &g->gr.ctx_vars.sw_bundle_init);
148 if (err) 146 if (err)
149 goto clean_up; 147 goto clean_up;
150 break; 148 break;
151 case NETLIST_REGIONID_SW_METHOD_INIT: 149 case NETLIST_REGIONID_SW_METHOD_INIT:
152 gk20a_dbg_info("NETLIST_REGIONID_SW_METHOD_INIT"); 150 nvgpu_log_info(g, "NETLIST_REGIONID_SW_METHOD_INIT");
153 err = gr_gk20a_alloc_load_netlist_av(g, 151 err = gr_gk20a_alloc_load_netlist_av(g,
154 src, size, &g->gr.ctx_vars.sw_method_init); 152 src, size, &g->gr.ctx_vars.sw_method_init);
155 if (err) 153 if (err)
156 goto clean_up; 154 goto clean_up;
157 break; 155 break;
158 case NETLIST_REGIONID_SW_CTX_LOAD: 156 case NETLIST_REGIONID_SW_CTX_LOAD:
159 gk20a_dbg_info("NETLIST_REGIONID_SW_CTX_LOAD"); 157 nvgpu_log_info(g, "NETLIST_REGIONID_SW_CTX_LOAD");
160 err = gr_gk20a_alloc_load_netlist_aiv(g, 158 err = gr_gk20a_alloc_load_netlist_aiv(g,
161 src, size, &g->gr.ctx_vars.sw_ctx_load); 159 src, size, &g->gr.ctx_vars.sw_ctx_load);
162 if (err) 160 if (err)
163 goto clean_up; 161 goto clean_up;
164 break; 162 break;
165 case NETLIST_REGIONID_SW_NON_CTX_LOAD: 163 case NETLIST_REGIONID_SW_NON_CTX_LOAD:
166 gk20a_dbg_info("NETLIST_REGIONID_SW_NON_CTX_LOAD"); 164 nvgpu_log_info(g, "NETLIST_REGIONID_SW_NON_CTX_LOAD");
167 err = gr_gk20a_alloc_load_netlist_av(g, 165 err = gr_gk20a_alloc_load_netlist_av(g,
168 src, size, &g->gr.ctx_vars.sw_non_ctx_load); 166 src, size, &g->gr.ctx_vars.sw_non_ctx_load);
169 if (err) 167 if (err)
170 goto clean_up; 168 goto clean_up;
171 break; 169 break;
172 case NETLIST_REGIONID_SWVEIDBUNDLEINIT: 170 case NETLIST_REGIONID_SWVEIDBUNDLEINIT:
173 gk20a_dbg_info( 171 nvgpu_log_info(g,
174 "NETLIST_REGIONID_SW_VEID_BUNDLE_INIT"); 172 "NETLIST_REGIONID_SW_VEID_BUNDLE_INIT");
175 err = gr_gk20a_alloc_load_netlist_av(g, 173 err = gr_gk20a_alloc_load_netlist_av(g,
176 src, size, 174 src, size,
@@ -179,56 +177,56 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr)
179 goto clean_up; 177 goto clean_up;
180 break; 178 break;
181 case NETLIST_REGIONID_CTXREG_SYS: 179 case NETLIST_REGIONID_CTXREG_SYS:
182 gk20a_dbg_info("NETLIST_REGIONID_CTXREG_SYS"); 180 nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_SYS");
183 err = gr_gk20a_alloc_load_netlist_aiv(g, 181 err = gr_gk20a_alloc_load_netlist_aiv(g,
184 src, size, &g->gr.ctx_vars.ctxsw_regs.sys); 182 src, size, &g->gr.ctx_vars.ctxsw_regs.sys);
185 if (err) 183 if (err)
186 goto clean_up; 184 goto clean_up;
187 break; 185 break;
188 case NETLIST_REGIONID_CTXREG_GPC: 186 case NETLIST_REGIONID_CTXREG_GPC:
189 gk20a_dbg_info("NETLIST_REGIONID_CTXREG_GPC"); 187 nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_GPC");
190 err = gr_gk20a_alloc_load_netlist_aiv(g, 188 err = gr_gk20a_alloc_load_netlist_aiv(g,
191 src, size, &g->gr.ctx_vars.ctxsw_regs.gpc); 189 src, size, &g->gr.ctx_vars.ctxsw_regs.gpc);
192 if (err) 190 if (err)
193 goto clean_up; 191 goto clean_up;
194 break; 192 break;
195 case NETLIST_REGIONID_CTXREG_TPC: 193 case NETLIST_REGIONID_CTXREG_TPC:
196 gk20a_dbg_info("NETLIST_REGIONID_CTXREG_TPC"); 194 nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_TPC");
197 err = gr_gk20a_alloc_load_netlist_aiv(g, 195 err = gr_gk20a_alloc_load_netlist_aiv(g,
198 src, size, &g->gr.ctx_vars.ctxsw_regs.tpc); 196 src, size, &g->gr.ctx_vars.ctxsw_regs.tpc);
199 if (err) 197 if (err)
200 goto clean_up; 198 goto clean_up;
201 break; 199 break;
202 case NETLIST_REGIONID_CTXREG_ZCULL_GPC: 200 case NETLIST_REGIONID_CTXREG_ZCULL_GPC:
203 gk20a_dbg_info("NETLIST_REGIONID_CTXREG_ZCULL_GPC"); 201 nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_ZCULL_GPC");
204 err = gr_gk20a_alloc_load_netlist_aiv(g, 202 err = gr_gk20a_alloc_load_netlist_aiv(g,
205 src, size, &g->gr.ctx_vars.ctxsw_regs.zcull_gpc); 203 src, size, &g->gr.ctx_vars.ctxsw_regs.zcull_gpc);
206 if (err) 204 if (err)
207 goto clean_up; 205 goto clean_up;
208 break; 206 break;
209 case NETLIST_REGIONID_CTXREG_PPC: 207 case NETLIST_REGIONID_CTXREG_PPC:
210 gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PPC"); 208 nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PPC");
211 err = gr_gk20a_alloc_load_netlist_aiv(g, 209 err = gr_gk20a_alloc_load_netlist_aiv(g,
212 src, size, &g->gr.ctx_vars.ctxsw_regs.ppc); 210 src, size, &g->gr.ctx_vars.ctxsw_regs.ppc);
213 if (err) 211 if (err)
214 goto clean_up; 212 goto clean_up;
215 break; 213 break;
216 case NETLIST_REGIONID_CTXREG_PM_SYS: 214 case NETLIST_REGIONID_CTXREG_PM_SYS:
217 gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PM_SYS"); 215 nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PM_SYS");
218 err = gr_gk20a_alloc_load_netlist_aiv(g, 216 err = gr_gk20a_alloc_load_netlist_aiv(g,
219 src, size, &g->gr.ctx_vars.ctxsw_regs.pm_sys); 217 src, size, &g->gr.ctx_vars.ctxsw_regs.pm_sys);
220 if (err) 218 if (err)
221 goto clean_up; 219 goto clean_up;
222 break; 220 break;
223 case NETLIST_REGIONID_CTXREG_PM_GPC: 221 case NETLIST_REGIONID_CTXREG_PM_GPC:
224 gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PM_GPC"); 222 nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PM_GPC");
225 err = gr_gk20a_alloc_load_netlist_aiv(g, 223 err = gr_gk20a_alloc_load_netlist_aiv(g,
226 src, size, &g->gr.ctx_vars.ctxsw_regs.pm_gpc); 224 src, size, &g->gr.ctx_vars.ctxsw_regs.pm_gpc);
227 if (err) 225 if (err)
228 goto clean_up; 226 goto clean_up;
229 break; 227 break;
230 case NETLIST_REGIONID_CTXREG_PM_TPC: 228 case NETLIST_REGIONID_CTXREG_PM_TPC:
231 gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PM_TPC"); 229 nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PM_TPC");
232 err = gr_gk20a_alloc_load_netlist_aiv(g, 230 err = gr_gk20a_alloc_load_netlist_aiv(g,
233 src, size, &g->gr.ctx_vars.ctxsw_regs.pm_tpc); 231 src, size, &g->gr.ctx_vars.ctxsw_regs.pm_tpc);
234 if (err) 232 if (err)
@@ -236,110 +234,110 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr)
236 break; 234 break;
237 case NETLIST_REGIONID_BUFFER_SIZE: 235 case NETLIST_REGIONID_BUFFER_SIZE:
238 g->gr.ctx_vars.buffer_size = *src; 236 g->gr.ctx_vars.buffer_size = *src;
239 gk20a_dbg_info("NETLIST_REGIONID_BUFFER_SIZE : %d", 237 nvgpu_log_info(g, "NETLIST_REGIONID_BUFFER_SIZE : %d",
240 g->gr.ctx_vars.buffer_size); 238 g->gr.ctx_vars.buffer_size);
241 break; 239 break;
242 case NETLIST_REGIONID_CTXSW_REG_BASE_INDEX: 240 case NETLIST_REGIONID_CTXSW_REG_BASE_INDEX:
243 g->gr.ctx_vars.regs_base_index = *src; 241 g->gr.ctx_vars.regs_base_index = *src;
244 gk20a_dbg_info("NETLIST_REGIONID_CTXSW_REG_BASE_INDEX : %u", 242 nvgpu_log_info(g, "NETLIST_REGIONID_CTXSW_REG_BASE_INDEX : %u",
245 g->gr.ctx_vars.regs_base_index); 243 g->gr.ctx_vars.regs_base_index);
246 break; 244 break;
247 case NETLIST_REGIONID_MAJORV: 245 case NETLIST_REGIONID_MAJORV:
248 major_v = *src; 246 major_v = *src;
249 gk20a_dbg_info("NETLIST_REGIONID_MAJORV : %d", 247 nvgpu_log_info(g, "NETLIST_REGIONID_MAJORV : %d",
250 major_v); 248 major_v);
251 break; 249 break;
252 case NETLIST_REGIONID_NETLIST_NUM: 250 case NETLIST_REGIONID_NETLIST_NUM:
253 netlist_num = *src; 251 netlist_num = *src;
254 gk20a_dbg_info("NETLIST_REGIONID_NETLIST_NUM : %d", 252 nvgpu_log_info(g, "NETLIST_REGIONID_NETLIST_NUM : %d",
255 netlist_num); 253 netlist_num);
256 break; 254 break;
257 case NETLIST_REGIONID_CTXREG_PMPPC: 255 case NETLIST_REGIONID_CTXREG_PMPPC:
258 gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PMPPC"); 256 nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PMPPC");
259 err = gr_gk20a_alloc_load_netlist_aiv(g, 257 err = gr_gk20a_alloc_load_netlist_aiv(g,
260 src, size, &g->gr.ctx_vars.ctxsw_regs.pm_ppc); 258 src, size, &g->gr.ctx_vars.ctxsw_regs.pm_ppc);
261 if (err) 259 if (err)
262 goto clean_up; 260 goto clean_up;
263 break; 261 break;
264 case NETLIST_REGIONID_NVPERF_CTXREG_SYS: 262 case NETLIST_REGIONID_NVPERF_CTXREG_SYS:
265 gk20a_dbg_info("NETLIST_REGIONID_NVPERF_CTXREG_SYS"); 263 nvgpu_log_info(g, "NETLIST_REGIONID_NVPERF_CTXREG_SYS");
266 err = gr_gk20a_alloc_load_netlist_aiv(g, 264 err = gr_gk20a_alloc_load_netlist_aiv(g,
267 src, size, &g->gr.ctx_vars.ctxsw_regs.perf_sys); 265 src, size, &g->gr.ctx_vars.ctxsw_regs.perf_sys);
268 if (err) 266 if (err)
269 goto clean_up; 267 goto clean_up;
270 break; 268 break;
271 case NETLIST_REGIONID_NVPERF_FBP_CTXREGS: 269 case NETLIST_REGIONID_NVPERF_FBP_CTXREGS:
272 gk20a_dbg_info("NETLIST_REGIONID_NVPERF_FBP_CTXREGS"); 270 nvgpu_log_info(g, "NETLIST_REGIONID_NVPERF_FBP_CTXREGS");
273 err = gr_gk20a_alloc_load_netlist_aiv(g, 271 err = gr_gk20a_alloc_load_netlist_aiv(g,
274 src, size, &g->gr.ctx_vars.ctxsw_regs.fbp); 272 src, size, &g->gr.ctx_vars.ctxsw_regs.fbp);
275 if (err) 273 if (err)
276 goto clean_up; 274 goto clean_up;
277 break; 275 break;
278 case NETLIST_REGIONID_NVPERF_CTXREG_GPC: 276 case NETLIST_REGIONID_NVPERF_CTXREG_GPC:
279 gk20a_dbg_info("NETLIST_REGIONID_NVPERF_CTXREG_GPC"); 277 nvgpu_log_info(g, "NETLIST_REGIONID_NVPERF_CTXREG_GPC");
280 err = gr_gk20a_alloc_load_netlist_aiv(g, 278 err = gr_gk20a_alloc_load_netlist_aiv(g,
281 src, size, &g->gr.ctx_vars.ctxsw_regs.perf_gpc); 279 src, size, &g->gr.ctx_vars.ctxsw_regs.perf_gpc);
282 if (err) 280 if (err)
283 goto clean_up; 281 goto clean_up;
284 break; 282 break;
285 case NETLIST_REGIONID_NVPERF_FBP_ROUTER: 283 case NETLIST_REGIONID_NVPERF_FBP_ROUTER:
286 gk20a_dbg_info("NETLIST_REGIONID_NVPERF_FBP_ROUTER"); 284 nvgpu_log_info(g, "NETLIST_REGIONID_NVPERF_FBP_ROUTER");
287 err = gr_gk20a_alloc_load_netlist_aiv(g, 285 err = gr_gk20a_alloc_load_netlist_aiv(g,
288 src, size, &g->gr.ctx_vars.ctxsw_regs.fbp_router); 286 src, size, &g->gr.ctx_vars.ctxsw_regs.fbp_router);
289 if (err) 287 if (err)
290 goto clean_up; 288 goto clean_up;
291 break; 289 break;
292 case NETLIST_REGIONID_NVPERF_GPC_ROUTER: 290 case NETLIST_REGIONID_NVPERF_GPC_ROUTER:
293 gk20a_dbg_info("NETLIST_REGIONID_NVPERF_GPC_ROUTER"); 291 nvgpu_log_info(g, "NETLIST_REGIONID_NVPERF_GPC_ROUTER");
294 err = gr_gk20a_alloc_load_netlist_aiv(g, 292 err = gr_gk20a_alloc_load_netlist_aiv(g,
295 src, size, &g->gr.ctx_vars.ctxsw_regs.gpc_router); 293 src, size, &g->gr.ctx_vars.ctxsw_regs.gpc_router);
296 if (err) 294 if (err)
297 goto clean_up; 295 goto clean_up;
298 break; 296 break;
299 case NETLIST_REGIONID_CTXREG_PMLTC: 297 case NETLIST_REGIONID_CTXREG_PMLTC:
300 gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PMLTC"); 298 nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PMLTC");
301 err = gr_gk20a_alloc_load_netlist_aiv(g, 299 err = gr_gk20a_alloc_load_netlist_aiv(g,
302 src, size, &g->gr.ctx_vars.ctxsw_regs.pm_ltc); 300 src, size, &g->gr.ctx_vars.ctxsw_regs.pm_ltc);
303 if (err) 301 if (err)
304 goto clean_up; 302 goto clean_up;
305 break; 303 break;
306 case NETLIST_REGIONID_CTXREG_PMFBPA: 304 case NETLIST_REGIONID_CTXREG_PMFBPA:
307 gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PMFBPA"); 305 nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PMFBPA");
308 err = gr_gk20a_alloc_load_netlist_aiv(g, 306 err = gr_gk20a_alloc_load_netlist_aiv(g,
309 src, size, &g->gr.ctx_vars.ctxsw_regs.pm_fbpa); 307 src, size, &g->gr.ctx_vars.ctxsw_regs.pm_fbpa);
310 if (err) 308 if (err)
311 goto clean_up; 309 goto clean_up;
312 break; 310 break;
313 case NETLIST_REGIONID_NVPERF_SYS_ROUTER: 311 case NETLIST_REGIONID_NVPERF_SYS_ROUTER:
314 gk20a_dbg_info("NETLIST_REGIONID_NVPERF_SYS_ROUTER"); 312 nvgpu_log_info(g, "NETLIST_REGIONID_NVPERF_SYS_ROUTER");
315 err = gr_gk20a_alloc_load_netlist_aiv(g, 313 err = gr_gk20a_alloc_load_netlist_aiv(g,
316 src, size, &g->gr.ctx_vars.ctxsw_regs.perf_sys_router); 314 src, size, &g->gr.ctx_vars.ctxsw_regs.perf_sys_router);
317 if (err) 315 if (err)
318 goto clean_up; 316 goto clean_up;
319 break; 317 break;
320 case NETLIST_REGIONID_NVPERF_PMA: 318 case NETLIST_REGIONID_NVPERF_PMA:
321 gk20a_dbg_info("NETLIST_REGIONID_NVPERF_PMA"); 319 nvgpu_log_info(g, "NETLIST_REGIONID_NVPERF_PMA");
322 err = gr_gk20a_alloc_load_netlist_aiv(g, 320 err = gr_gk20a_alloc_load_netlist_aiv(g,
323 src, size, &g->gr.ctx_vars.ctxsw_regs.perf_pma); 321 src, size, &g->gr.ctx_vars.ctxsw_regs.perf_pma);
324 if (err) 322 if (err)
325 goto clean_up; 323 goto clean_up;
326 break; 324 break;
327 case NETLIST_REGIONID_CTXREG_PMROP: 325 case NETLIST_REGIONID_CTXREG_PMROP:
328 gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PMROP"); 326 nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PMROP");
329 err = gr_gk20a_alloc_load_netlist_aiv(g, 327 err = gr_gk20a_alloc_load_netlist_aiv(g,
330 src, size, &g->gr.ctx_vars.ctxsw_regs.pm_rop); 328 src, size, &g->gr.ctx_vars.ctxsw_regs.pm_rop);
331 if (err) 329 if (err)
332 goto clean_up; 330 goto clean_up;
333 break; 331 break;
334 case NETLIST_REGIONID_CTXREG_PMUCGPC: 332 case NETLIST_REGIONID_CTXREG_PMUCGPC:
335 gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PMUCGPC"); 333 nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PMUCGPC");
336 err = gr_gk20a_alloc_load_netlist_aiv(g, 334 err = gr_gk20a_alloc_load_netlist_aiv(g,
337 src, size, &g->gr.ctx_vars.ctxsw_regs.pm_ucgpc); 335 src, size, &g->gr.ctx_vars.ctxsw_regs.pm_ucgpc);
338 if (err) 336 if (err)
339 goto clean_up; 337 goto clean_up;
340 break; 338 break;
341 case NETLIST_REGIONID_CTXREG_ETPC: 339 case NETLIST_REGIONID_CTXREG_ETPC:
342 gk20a_dbg_info("NETLIST_REGIONID_CTXREG_ETPC"); 340 nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_ETPC");
343 err = gr_gk20a_alloc_load_netlist_aiv(g, 341 err = gr_gk20a_alloc_load_netlist_aiv(g,
344 src, size, &g->gr.ctx_vars.ctxsw_regs.etpc); 342 src, size, &g->gr.ctx_vars.ctxsw_regs.etpc);
345 if (err) 343 if (err)
@@ -347,13 +345,13 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr)
347 break; 345 break;
348 346
349 default: 347 default:
350 gk20a_dbg_info("unrecognized region %d skipped", i); 348 nvgpu_log_info(g, "unrecognized region %d skipped", i);
351 break; 349 break;
352 } 350 }
353 } 351 }
354 352
355 if (net != NETLIST_FINAL && major_v != major_v_hw) { 353 if (net != NETLIST_FINAL && major_v != major_v_hw) {
356 gk20a_dbg_info("skip %s: major_v 0x%08x doesn't match hw 0x%08x", 354 nvgpu_log_info(g, "skip %s: major_v 0x%08x doesn't match hw 0x%08x",
357 name, major_v, major_v_hw); 355 name, major_v, major_v_hw);
358 goto clean_up; 356 goto clean_up;
359 } 357 }
@@ -362,7 +360,7 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr)
362 g->gr.netlist = net; 360 g->gr.netlist = net;
363 361
364 nvgpu_release_firmware(g, netlist_fw); 362 nvgpu_release_firmware(g, netlist_fw);
365 gk20a_dbg_fn("done"); 363 nvgpu_log_fn(g, "done");
366 goto done; 364 goto done;
367 365
368clean_up: 366clean_up:
@@ -403,7 +401,7 @@ clean_up:
403 401
404done: 402done:
405 if (g->gr.ctx_vars.valid) { 403 if (g->gr.ctx_vars.valid) {
406 gk20a_dbg_info("netlist image %s loaded", name); 404 nvgpu_log_info(g, "netlist image %s loaded", name);
407 return 0; 405 return 0;
408 } else { 406 } else {
409 nvgpu_err(g, "failed to load netlist image!!"); 407 nvgpu_err(g, "failed to load netlist image!!");
diff --git a/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a_sim.c b/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a_sim.c
index 9674e2d6..01c7ed3c 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a_sim.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a_sim.c
@@ -1,9 +1,7 @@
1/* 1/*
2 * drivers/video/tegra/host/gk20a/gr_ctx_sim_gk20a.c
3 *
4 * GK20A Graphics Context for Simulation 2 * GK20A Graphics Context for Simulation
5 * 3 *
6 * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
7 * 5 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"), 7 * copy of this software and associated documentation files (the "Software"),
@@ -35,7 +33,7 @@ int gr_gk20a_init_ctx_vars_sim(struct gk20a *g, struct gr_gk20a *gr)
35 int err = 0; 33 int err = 0;
36 u32 i, temp; 34 u32 i, temp;
37 35
38 gk20a_dbg(gpu_dbg_fn | gpu_dbg_info, 36 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_info,
39 "querying grctx info from chiplib"); 37 "querying grctx info from chiplib");
40 38
41 g->gr.ctx_vars.dynamic = true; 39 g->gr.ctx_vars.dynamic = true;
@@ -250,7 +248,7 @@ int gr_gk20a_init_ctx_vars_sim(struct gk20a *g, struct gr_gk20a *gr)
250 i, &l[i].value); 248 i, &l[i].value);
251 } 249 }
252 250
253 gk20a_dbg(gpu_dbg_info | gpu_dbg_fn, "query GRCTX_REG_LIST_ETPC"); 251 nvgpu_log(g, gpu_dbg_info | gpu_dbg_fn, "query GRCTX_REG_LIST_ETPC");
254 for (i = 0; i < g->gr.ctx_vars.ctxsw_regs.etpc.count; i++) { 252 for (i = 0; i < g->gr.ctx_vars.ctxsw_regs.etpc.count; i++) {
255 struct aiv_gk20a *l = g->gr.ctx_vars.ctxsw_regs.etpc.l; 253 struct aiv_gk20a *l = g->gr.ctx_vars.ctxsw_regs.etpc.l;
256 g->sim->esc_readl(g, "GRCTX_REG_LIST_ETPC:ADDR", 254 g->sim->esc_readl(g, "GRCTX_REG_LIST_ETPC:ADDR",
@@ -259,7 +257,7 @@ int gr_gk20a_init_ctx_vars_sim(struct gk20a *g, struct gr_gk20a *gr)
259 i, &l[i].index); 257 i, &l[i].index);
260 g->sim->esc_readl(g, "GRCTX_REG_LIST_ETPC:VALUE", 258 g->sim->esc_readl(g, "GRCTX_REG_LIST_ETPC:VALUE",
261 i, &l[i].value); 259 i, &l[i].value);
262 gk20a_dbg(gpu_dbg_info | gpu_dbg_fn, 260 nvgpu_log(g, gpu_dbg_info | gpu_dbg_fn,
263 "addr:0x%#08x index:0x%08x value:0x%08x", 261 "addr:0x%#08x index:0x%08x value:0x%08x",
264 l[i].addr, l[i].index, l[i].value); 262 l[i].addr, l[i].index, l[i].value);
265 } 263 }
@@ -269,7 +267,7 @@ int gr_gk20a_init_ctx_vars_sim(struct gk20a *g, struct gr_gk20a *gr)
269 g->sim->esc_readl(g, "GRCTX_GEN_CTX_REGS_BASE_INDEX", 0, 267 g->sim->esc_readl(g, "GRCTX_GEN_CTX_REGS_BASE_INDEX", 0,
270 &g->gr.ctx_vars.regs_base_index); 268 &g->gr.ctx_vars.regs_base_index);
271 269
272 gk20a_dbg(gpu_dbg_info | gpu_dbg_fn, "finished querying grctx info from chiplib"); 270 nvgpu_log(g, gpu_dbg_info | gpu_dbg_fn, "finished querying grctx info from chiplib");
273 return 0; 271 return 0;
274fail: 272fail:
275 nvgpu_err(g, "failed querying grctx info from chiplib"); 273 nvgpu_err(g, "failed querying grctx info from chiplib");
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
index 86111321..00f26650 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
@@ -122,7 +122,7 @@ int gr_gk20a_get_ctx_id(struct gk20a *g,
122 122
123 *ctx_id = nvgpu_mem_rd(g, mem, 123 *ctx_id = nvgpu_mem_rd(g, mem,
124 ctxsw_prog_main_image_context_id_o()); 124 ctxsw_prog_main_image_context_id_o());
125 gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, "ctx_id: 0x%x", *ctx_id); 125 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr, "ctx_id: 0x%x", *ctx_id);
126 126
127 nvgpu_mem_end(g, mem); 127 nvgpu_mem_end(g, mem);
128 128
@@ -220,7 +220,7 @@ static void gr_gk20a_load_falcon_dmem(struct gk20a *g)
220 const u32 *ucode_u32_data; 220 const u32 *ucode_u32_data;
221 u32 checksum; 221 u32 checksum;
222 222
223 gk20a_dbg_fn(""); 223 nvgpu_log_fn(g, " ");
224 224
225 gk20a_writel(g, gr_gpccs_dmemc_r(0), (gr_gpccs_dmemc_offs_f(0) | 225 gk20a_writel(g, gr_gpccs_dmemc_r(0), (gr_gpccs_dmemc_offs_f(0) |
226 gr_gpccs_dmemc_blk_f(0) | 226 gr_gpccs_dmemc_blk_f(0) |
@@ -245,7 +245,7 @@ static void gr_gk20a_load_falcon_dmem(struct gk20a *g)
245 gk20a_writel(g, gr_fecs_dmemd_r(0), ucode_u32_data[i]); 245 gk20a_writel(g, gr_fecs_dmemd_r(0), ucode_u32_data[i]);
246 checksum += ucode_u32_data[i]; 246 checksum += ucode_u32_data[i];
247 } 247 }
248 gk20a_dbg_fn("done"); 248 nvgpu_log_fn(g, "done");
249} 249}
250 250
251static void gr_gk20a_load_falcon_imem(struct gk20a *g) 251static void gr_gk20a_load_falcon_imem(struct gk20a *g)
@@ -255,7 +255,7 @@ static void gr_gk20a_load_falcon_imem(struct gk20a *g)
255 u32 tag, i, pad_start, pad_end; 255 u32 tag, i, pad_start, pad_end;
256 u32 checksum; 256 u32 checksum;
257 257
258 gk20a_dbg_fn(""); 258 nvgpu_log_fn(g, " ");
259 259
260 cfg = gk20a_readl(g, gr_fecs_cfg_r()); 260 cfg = gk20a_readl(g, gr_fecs_cfg_r());
261 fecs_imem_size = gr_fecs_cfg_imem_sz_v(cfg); 261 fecs_imem_size = gr_fecs_cfg_imem_sz_v(cfg);
@@ -343,7 +343,7 @@ int gr_gk20a_wait_idle(struct gk20a *g, unsigned long duration_ms,
343 bool ctx_status_invalid; 343 bool ctx_status_invalid;
344 struct nvgpu_timeout timeout; 344 struct nvgpu_timeout timeout;
345 345
346 gk20a_dbg_fn(""); 346 nvgpu_log_fn(g, " ");
347 347
348 gr_engine_id = gk20a_fifo_get_gr_engine_id(g); 348 gr_engine_id = gk20a_fifo_get_gr_engine_id(g);
349 349
@@ -372,7 +372,7 @@ int gr_gk20a_wait_idle(struct gk20a *g, unsigned long duration_ms,
372 372
373 if (!gr_enabled || ctx_status_invalid 373 if (!gr_enabled || ctx_status_invalid
374 || (!gr_busy && !ctxsw_active)) { 374 || (!gr_busy && !ctxsw_active)) {
375 gk20a_dbg_fn("done"); 375 nvgpu_log_fn(g, "done");
376 return 0; 376 return 0;
377 } 377 }
378 378
@@ -398,7 +398,7 @@ int gr_gk20a_wait_fe_idle(struct gk20a *g, unsigned long duration_ms,
398 if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) 398 if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL))
399 return 0; 399 return 0;
400 400
401 gk20a_dbg_fn(""); 401 nvgpu_log_fn(g, " ");
402 402
403 nvgpu_timeout_init(g, &timeout, duration_ms, NVGPU_TIMER_CPU_TIMER); 403 nvgpu_timeout_init(g, &timeout, duration_ms, NVGPU_TIMER_CPU_TIMER);
404 404
@@ -406,7 +406,7 @@ int gr_gk20a_wait_fe_idle(struct gk20a *g, unsigned long duration_ms,
406 val = gk20a_readl(g, gr_status_r()); 406 val = gk20a_readl(g, gr_status_r());
407 407
408 if (!gr_status_fe_method_lower_v(val)) { 408 if (!gr_status_fe_method_lower_v(val)) {
409 gk20a_dbg_fn("done"); 409 nvgpu_log_fn(g, "done");
410 return 0; 410 return 0;
411 } 411 }
412 412
@@ -430,7 +430,7 @@ int gr_gk20a_ctx_wait_ucode(struct gk20a *g, u32 mailbox_id,
430 u32 check = WAIT_UCODE_LOOP; 430 u32 check = WAIT_UCODE_LOOP;
431 u32 reg; 431 u32 reg;
432 432
433 gk20a_dbg_fn(""); 433 nvgpu_log_fn(g, " ");
434 434
435 if (sleepduringwait) 435 if (sleepduringwait)
436 delay = GR_IDLE_CHECK_DEFAULT; 436 delay = GR_IDLE_CHECK_DEFAULT;
@@ -532,7 +532,7 @@ int gr_gk20a_ctx_wait_ucode(struct gk20a *g, u32 mailbox_id,
532 return -1; 532 return -1;
533 } 533 }
534 534
535 gk20a_dbg_fn("done"); 535 nvgpu_log_fn(g, "done");
536 return 0; 536 return 0;
537} 537}
538 538
@@ -618,7 +618,7 @@ int gr_gk20a_disable_ctxsw(struct gk20a *g)
618{ 618{
619 int err = 0; 619 int err = 0;
620 620
621 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); 621 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
622 622
623 nvgpu_mutex_acquire(&g->ctxsw_disable_lock); 623 nvgpu_mutex_acquire(&g->ctxsw_disable_lock);
624 g->ctxsw_disable_count++; 624 g->ctxsw_disable_count++;
@@ -635,7 +635,7 @@ int gr_gk20a_enable_ctxsw(struct gk20a *g)
635{ 635{
636 int err = 0; 636 int err = 0;
637 637
638 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); 638 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
639 639
640 nvgpu_mutex_acquire(&g->ctxsw_disable_lock); 640 nvgpu_mutex_acquire(&g->ctxsw_disable_lock);
641 g->ctxsw_disable_count--; 641 g->ctxsw_disable_count--;
@@ -669,7 +669,7 @@ int gr_gk20a_commit_inst(struct channel_gk20a *c, u64 gpu_va)
669 u32 addr_lo; 669 u32 addr_lo;
670 u32 addr_hi; 670 u32 addr_hi;
671 671
672 gk20a_dbg_fn(""); 672 nvgpu_log_fn(c->g, " ");
673 673
674 addr_lo = u64_lo32(gpu_va) >> 12; 674 addr_lo = u64_lo32(gpu_va) >> 12;
675 addr_hi = u64_hi32(gpu_va); 675 addr_hi = u64_hi32(gpu_va);
@@ -775,7 +775,7 @@ int gr_gk20a_fecs_ctx_bind_channel(struct gk20a *g,
775 u32 data = fecs_current_ctx_data(g, &c->inst_block); 775 u32 data = fecs_current_ctx_data(g, &c->inst_block);
776 u32 ret; 776 u32 ret;
777 777
778 gk20a_dbg_info("bind channel %d inst ptr 0x%08x", 778 nvgpu_log_info(g, "bind channel %d inst ptr 0x%08x",
779 c->chid, inst_base_ptr); 779 c->chid, inst_base_ptr);
780 780
781 ret = gr_gk20a_submit_fecs_method_op(g, 781 ret = gr_gk20a_submit_fecs_method_op(g,
@@ -823,7 +823,7 @@ static int gr_gk20a_ctx_zcull_setup(struct gk20a *g, struct channel_gk20a *c)
823 struct nvgpu_mem *ctxheader = &ctx->mem; 823 struct nvgpu_mem *ctxheader = &ctx->mem;
824 int ret = 0; 824 int ret = 0;
825 825
826 gk20a_dbg_fn(""); 826 nvgpu_log_fn(g, " ");
827 827
828 tsg = tsg_gk20a_from_ch(c); 828 tsg = tsg_gk20a_from_ch(c);
829 if (!tsg) 829 if (!tsg)
@@ -905,7 +905,7 @@ int gr_gk20a_commit_global_ctx_buffers(struct gk20a *g,
905 u64 addr; 905 u64 addr;
906 u32 size; 906 u32 size;
907 907
908 gk20a_dbg_fn(""); 908 nvgpu_log_fn(g, " ");
909 909
910 tsg = tsg_gk20a_from_ch(c); 910 tsg = tsg_gk20a_from_ch(c);
911 if (!tsg) 911 if (!tsg)
@@ -931,7 +931,7 @@ int gr_gk20a_commit_global_ctx_buffers(struct gk20a *g,
931 if (size == g->ops.gr.pagepool_default_size(g)) 931 if (size == g->ops.gr.pagepool_default_size(g))
932 size = gr_scc_pagepool_total_pages_hwmax_v(); 932 size = gr_scc_pagepool_total_pages_hwmax_v();
933 933
934 gk20a_dbg_info("pagepool buffer addr : 0x%016llx, size : %d", 934 nvgpu_log_info(g, "pagepool buffer addr : 0x%016llx, size : %d",
935 addr, size); 935 addr, size);
936 936
937 g->ops.gr.commit_global_pagepool(g, gr_ctx, addr, size, patch); 937 g->ops.gr.commit_global_pagepool(g, gr_ctx, addr, size, patch);
@@ -944,7 +944,7 @@ int gr_gk20a_commit_global_ctx_buffers(struct gk20a *g,
944 944
945 size = gr->bundle_cb_default_size; 945 size = gr->bundle_cb_default_size;
946 946
947 gk20a_dbg_info("bundle cb addr : 0x%016llx, size : %d", 947 nvgpu_log_info(g, "bundle cb addr : 0x%016llx, size : %d",
948 addr, size); 948 addr, size);
949 949
950 g->ops.gr.commit_global_bundle_cb(g, gr_ctx, addr, size, patch); 950 g->ops.gr.commit_global_bundle_cb(g, gr_ctx, addr, size, patch);
@@ -955,7 +955,7 @@ int gr_gk20a_commit_global_ctx_buffers(struct gk20a *g,
955 (u64_hi32(gr_ctx->global_ctx_buffer_va[ATTRIBUTE_VA]) << 955 (u64_hi32(gr_ctx->global_ctx_buffer_va[ATTRIBUTE_VA]) <<
956 (32 - gr_gpcs_setup_attrib_cb_base_addr_39_12_align_bits_v())); 956 (32 - gr_gpcs_setup_attrib_cb_base_addr_39_12_align_bits_v()));
957 957
958 gk20a_dbg_info("attrib cb addr : 0x%016llx", addr); 958 nvgpu_log_info(g, "attrib cb addr : 0x%016llx", addr);
959 g->ops.gr.commit_global_attrib_cb(g, gr_ctx, addr, patch); 959 g->ops.gr.commit_global_attrib_cb(g, gr_ctx, addr, patch);
960 g->ops.gr.commit_global_cb_manager(g, c, patch); 960 g->ops.gr.commit_global_cb_manager(g, c, patch);
961 961
@@ -976,7 +976,7 @@ int gr_gk20a_commit_global_timeslice(struct gk20a *g, struct channel_gk20a *c)
976 u32 pe_vaf; 976 u32 pe_vaf;
977 u32 pe_vsc_vpc; 977 u32 pe_vsc_vpc;
978 978
979 gk20a_dbg_fn(""); 979 nvgpu_log_fn(g, " ");
980 980
981 gpm_pd_cfg = gk20a_readl(g, gr_gpcs_gpm_pd_cfg_r()); 981 gpm_pd_cfg = gk20a_readl(g, gr_gpcs_gpm_pd_cfg_r());
982 pd_ab_dist_cfg0 = gk20a_readl(g, gr_pd_ab_dist_cfg0_r()); 982 pd_ab_dist_cfg0 = gk20a_readl(g, gr_pd_ab_dist_cfg0_r());
@@ -1036,7 +1036,7 @@ int gr_gk20a_setup_rop_mapping(struct gk20a *g, struct gr_gk20a *gr)
1036 if (!gr->map_tiles) 1036 if (!gr->map_tiles)
1037 return -1; 1037 return -1;
1038 1038
1039 gk20a_dbg_fn(""); 1039 nvgpu_log_fn(g, " ");
1040 1040
1041 gk20a_writel(g, gr_crstr_map_table_cfg_r(), 1041 gk20a_writel(g, gr_crstr_map_table_cfg_r(),
1042 gr_crstr_map_table_cfg_row_offset_f(gr->map_row_offset) | 1042 gr_crstr_map_table_cfg_row_offset_f(gr->map_row_offset) |
@@ -1219,7 +1219,7 @@ int gr_gk20a_init_fs_state(struct gk20a *g)
1219 u32 reg_index; 1219 u32 reg_index;
1220 int err; 1220 int err;
1221 1221
1222 gk20a_dbg_fn(""); 1222 nvgpu_log_fn(g, " ");
1223 1223
1224 if (g->ops.gr.init_sm_id_table) { 1224 if (g->ops.gr.init_sm_id_table) {
1225 err = g->ops.gr.init_sm_id_table(g); 1225 err = g->ops.gr.init_sm_id_table(g);
@@ -1302,7 +1302,7 @@ int gr_gk20a_fecs_ctx_image_save(struct channel_gk20a *c, u32 save_type)
1302 struct gk20a *g = c->g; 1302 struct gk20a *g = c->g;
1303 int ret; 1303 int ret;
1304 1304
1305 gk20a_dbg_fn(""); 1305 nvgpu_log_fn(g, " ");
1306 1306
1307 ret = gr_gk20a_submit_fecs_method_op(g, 1307 ret = gr_gk20a_submit_fecs_method_op(g,
1308 (struct fecs_method_op_gk20a) { 1308 (struct fecs_method_op_gk20a) {
@@ -1411,7 +1411,7 @@ static int gr_gk20a_init_golden_ctx_image(struct gk20a *g,
1411 struct av_list_gk20a *sw_method_init = &g->gr.ctx_vars.sw_method_init; 1411 struct av_list_gk20a *sw_method_init = &g->gr.ctx_vars.sw_method_init;
1412 u32 last_method_data = 0; 1412 u32 last_method_data = 0;
1413 1413
1414 gk20a_dbg_fn(""); 1414 nvgpu_log_fn(g, " ");
1415 1415
1416 tsg = tsg_gk20a_from_ch(c); 1416 tsg = tsg_gk20a_from_ch(c);
1417 if (!tsg) 1417 if (!tsg)
@@ -1647,7 +1647,7 @@ clean_up:
1647 if (err) 1647 if (err)
1648 nvgpu_err(g, "fail"); 1648 nvgpu_err(g, "fail");
1649 else 1649 else
1650 gk20a_dbg_fn("done"); 1650 nvgpu_log_fn(g, "done");
1651 1651
1652 nvgpu_mem_end(g, gold_mem); 1652 nvgpu_mem_end(g, gold_mem);
1653 nvgpu_mem_end(g, gr_mem); 1653 nvgpu_mem_end(g, gr_mem);
@@ -1666,7 +1666,7 @@ int gr_gk20a_update_smpc_ctxsw_mode(struct gk20a *g,
1666 u32 data; 1666 u32 data;
1667 int ret; 1667 int ret;
1668 1668
1669 gk20a_dbg_fn(""); 1669 nvgpu_log_fn(g, " ");
1670 1670
1671 tsg = tsg_gk20a_from_ch(c); 1671 tsg = tsg_gk20a_from_ch(c);
1672 if (!tsg) 1672 if (!tsg)
@@ -1732,7 +1732,7 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g,
1732 struct nvgpu_mem *ctxheader = &ctx->mem; 1732 struct nvgpu_mem *ctxheader = &ctx->mem;
1733 int ret; 1733 int ret;
1734 1734
1735 gk20a_dbg_fn(""); 1735 nvgpu_log_fn(g, " ");
1736 1736
1737 tsg = tsg_gk20a_from_ch(c); 1737 tsg = tsg_gk20a_from_ch(c);
1738 if (!tsg) 1738 if (!tsg)
@@ -1884,7 +1884,7 @@ int gr_gk20a_load_golden_ctx_image(struct gk20a *g,
1884 int ret = 0; 1884 int ret = 0;
1885 struct nvgpu_mem *mem; 1885 struct nvgpu_mem *mem;
1886 1886
1887 gk20a_dbg_fn(""); 1887 nvgpu_log_fn(g, " ");
1888 1888
1889 tsg = tsg_gk20a_from_ch(c); 1889 tsg = tsg_gk20a_from_ch(c);
1890 if (!tsg) 1890 if (!tsg)
@@ -1991,7 +1991,7 @@ int gr_gk20a_load_golden_ctx_image(struct gk20a *g,
1991 1991
1992static void gr_gk20a_start_falcon_ucode(struct gk20a *g) 1992static void gr_gk20a_start_falcon_ucode(struct gk20a *g)
1993{ 1993{
1994 gk20a_dbg_fn(""); 1994 nvgpu_log_fn(g, " ");
1995 1995
1996 gk20a_writel(g, gr_fecs_ctxsw_mailbox_clear_r(0), 1996 gk20a_writel(g, gr_fecs_ctxsw_mailbox_clear_r(0),
1997 gr_fecs_ctxsw_mailbox_clear_value_f(~0)); 1997 gr_fecs_ctxsw_mailbox_clear_value_f(~0));
@@ -2002,7 +2002,7 @@ static void gr_gk20a_start_falcon_ucode(struct gk20a *g)
2002 gk20a_writel(g, gr_gpccs_cpuctl_r(), gr_gpccs_cpuctl_startcpu_f(1)); 2002 gk20a_writel(g, gr_gpccs_cpuctl_r(), gr_gpccs_cpuctl_startcpu_f(1));
2003 gk20a_writel(g, gr_fecs_cpuctl_r(), gr_fecs_cpuctl_startcpu_f(1)); 2003 gk20a_writel(g, gr_fecs_cpuctl_r(), gr_fecs_cpuctl_startcpu_f(1));
2004 2004
2005 gk20a_dbg_fn("done"); 2005 nvgpu_log_fn(g, "done");
2006} 2006}
2007 2007
2008static int gr_gk20a_init_ctxsw_ucode_vaspace(struct gk20a *g) 2008static int gr_gk20a_init_ctxsw_ucode_vaspace(struct gk20a *g)
@@ -2392,7 +2392,7 @@ int gr_gk20a_load_ctxsw_ucode(struct gk20a *g)
2392{ 2392{
2393 int err; 2393 int err;
2394 2394
2395 gk20a_dbg_fn(""); 2395 nvgpu_log_fn(g, " ");
2396 2396
2397 if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) { 2397 if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) {
2398 gk20a_writel(g, gr_fecs_ctxsw_mailbox_r(7), 2398 gk20a_writel(g, gr_fecs_ctxsw_mailbox_r(7),
@@ -2419,7 +2419,7 @@ int gr_gk20a_load_ctxsw_ucode(struct gk20a *g)
2419 gr_gk20a_load_falcon_with_bootloader(g); 2419 gr_gk20a_load_falcon_with_bootloader(g);
2420 g->gr.skip_ucode_init = true; 2420 g->gr.skip_ucode_init = true;
2421 } 2421 }
2422 gk20a_dbg_fn("done"); 2422 nvgpu_log_fn(g, "done");
2423 return 0; 2423 return 0;
2424} 2424}
2425 2425
@@ -2427,7 +2427,7 @@ static int gr_gk20a_wait_ctxsw_ready(struct gk20a *g)
2427{ 2427{
2428 u32 ret; 2428 u32 ret;
2429 2429
2430 gk20a_dbg_fn(""); 2430 nvgpu_log_fn(g, " ");
2431 2431
2432 ret = gr_gk20a_ctx_wait_ucode(g, 0, NULL, 2432 ret = gr_gk20a_ctx_wait_ucode(g, 0, NULL,
2433 GR_IS_UCODE_OP_EQUAL, 2433 GR_IS_UCODE_OP_EQUAL,
@@ -2448,7 +2448,7 @@ static int gr_gk20a_wait_ctxsw_ready(struct gk20a *g)
2448 gk20a_writel(g, gr_fecs_method_push_r(), 2448 gk20a_writel(g, gr_fecs_method_push_r(),
2449 gr_fecs_method_push_adr_set_watchdog_timeout_f()); 2449 gr_fecs_method_push_adr_set_watchdog_timeout_f());
2450 2450
2451 gk20a_dbg_fn("done"); 2451 nvgpu_log_fn(g, "done");
2452 return 0; 2452 return 0;
2453} 2453}
2454 2454
@@ -2463,7 +2463,7 @@ int gr_gk20a_init_ctx_state(struct gk20a *g)
2463 .cond.fail = GR_IS_UCODE_OP_SKIP, 2463 .cond.fail = GR_IS_UCODE_OP_SKIP,
2464 }; 2464 };
2465 2465
2466 gk20a_dbg_fn(""); 2466 nvgpu_log_fn(g, " ");
2467 /* query ctxsw image sizes, if golden context is not created */ 2467 /* query ctxsw image sizes, if golden context is not created */
2468 if (!g->gr.ctx_vars.golden_image_initialized) { 2468 if (!g->gr.ctx_vars.golden_image_initialized) {
2469 op.method.addr = 2469 op.method.addr =
@@ -2496,7 +2496,7 @@ int gr_gk20a_init_ctx_state(struct gk20a *g)
2496 g->gr.ctx_vars.priv_access_map_size = 512 * 1024; 2496 g->gr.ctx_vars.priv_access_map_size = 512 * 1024;
2497 } 2497 }
2498 2498
2499 gk20a_dbg_fn("done"); 2499 nvgpu_log_fn(g, "done");
2500 return 0; 2500 return 0;
2501} 2501}
2502 2502
@@ -2543,7 +2543,7 @@ static void gr_gk20a_free_global_ctx_buffers(struct gk20a *g)
2543 } 2543 }
2544 } 2544 }
2545 2545
2546 gk20a_dbg_fn("done"); 2546 nvgpu_log_fn(g, "done");
2547} 2547}
2548 2548
2549static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g) 2549static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
@@ -2557,11 +2557,11 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
2557 u32 pagepool_buffer_size = g->ops.gr.pagepool_default_size(g) * 2557 u32 pagepool_buffer_size = g->ops.gr.pagepool_default_size(g) *
2558 gr_scc_pagepool_total_pages_byte_granularity_v(); 2558 gr_scc_pagepool_total_pages_byte_granularity_v();
2559 2559
2560 gk20a_dbg_fn(""); 2560 nvgpu_log_fn(g, " ");
2561 2561
2562 attr_buffer_size = g->ops.gr.calc_global_ctx_buffer_size(g); 2562 attr_buffer_size = g->ops.gr.calc_global_ctx_buffer_size(g);
2563 2563
2564 gk20a_dbg_info("cb_buffer_size : %d", cb_buffer_size); 2564 nvgpu_log_info(g, "cb_buffer_size : %d", cb_buffer_size);
2565 2565
2566 err = gk20a_gr_alloc_ctx_buffer(g, &gr->global_ctx_buffer[CIRCULAR], 2566 err = gk20a_gr_alloc_ctx_buffer(g, &gr->global_ctx_buffer[CIRCULAR],
2567 cb_buffer_size); 2567 cb_buffer_size);
@@ -2576,7 +2576,7 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
2576 goto clean_up; 2576 goto clean_up;
2577 } 2577 }
2578 2578
2579 gk20a_dbg_info("pagepool_buffer_size : %d", pagepool_buffer_size); 2579 nvgpu_log_info(g, "pagepool_buffer_size : %d", pagepool_buffer_size);
2580 2580
2581 err = gk20a_gr_alloc_ctx_buffer(g, &gr->global_ctx_buffer[PAGEPOOL], 2581 err = gk20a_gr_alloc_ctx_buffer(g, &gr->global_ctx_buffer[PAGEPOOL],
2582 pagepool_buffer_size); 2582 pagepool_buffer_size);
@@ -2591,7 +2591,7 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
2591 goto clean_up; 2591 goto clean_up;
2592 } 2592 }
2593 2593
2594 gk20a_dbg_info("attr_buffer_size : %d", attr_buffer_size); 2594 nvgpu_log_info(g, "attr_buffer_size : %d", attr_buffer_size);
2595 2595
2596 err = gk20a_gr_alloc_ctx_buffer(g, &gr->global_ctx_buffer[ATTRIBUTE], 2596 err = gk20a_gr_alloc_ctx_buffer(g, &gr->global_ctx_buffer[ATTRIBUTE],
2597 attr_buffer_size); 2597 attr_buffer_size);
@@ -2606,7 +2606,7 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
2606 goto clean_up; 2606 goto clean_up;
2607 } 2607 }
2608 2608
2609 gk20a_dbg_info("golden_image_size : %d", 2609 nvgpu_log_info(g, "golden_image_size : %d",
2610 gr->ctx_vars.golden_image_size); 2610 gr->ctx_vars.golden_image_size);
2611 2611
2612 err = gk20a_gr_alloc_ctx_buffer(g, 2612 err = gk20a_gr_alloc_ctx_buffer(g,
@@ -2615,7 +2615,7 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
2615 if (err) 2615 if (err)
2616 goto clean_up; 2616 goto clean_up;
2617 2617
2618 gk20a_dbg_info("priv_access_map_size : %d", 2618 nvgpu_log_info(g, "priv_access_map_size : %d",
2619 gr->ctx_vars.priv_access_map_size); 2619 gr->ctx_vars.priv_access_map_size);
2620 2620
2621 err = gk20a_gr_alloc_ctx_buffer(g, 2621 err = gk20a_gr_alloc_ctx_buffer(g,
@@ -2625,7 +2625,7 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
2625 if (err) 2625 if (err)
2626 goto clean_up; 2626 goto clean_up;
2627 2627
2628 gk20a_dbg_fn("done"); 2628 nvgpu_log_fn(g, "done");
2629 return 0; 2629 return 0;
2630 2630
2631 clean_up: 2631 clean_up:
@@ -2643,7 +2643,7 @@ static void gr_gk20a_unmap_global_ctx_buffers(struct gk20a *g,
2643 int *g_bfr_index = gr_ctx->global_ctx_buffer_index; 2643 int *g_bfr_index = gr_ctx->global_ctx_buffer_index;
2644 u32 i; 2644 u32 i;
2645 2645
2646 gk20a_dbg_fn(""); 2646 nvgpu_log_fn(g, " ");
2647 2647
2648 for (i = 0; i < NR_GLOBAL_CTX_BUF_VA; i++) { 2648 for (i = 0; i < NR_GLOBAL_CTX_BUF_VA; i++) {
2649 if (g_bfr_index[i]) { 2649 if (g_bfr_index[i]) {
@@ -2679,7 +2679,7 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
2679 struct nvgpu_mem *mem; 2679 struct nvgpu_mem *mem;
2680 u64 gpu_va; 2680 u64 gpu_va;
2681 2681
2682 gk20a_dbg_fn(""); 2682 nvgpu_log_fn(g, " ");
2683 2683
2684 tsg = tsg_gk20a_from_ch(c); 2684 tsg = tsg_gk20a_from_ch(c);
2685 if (!tsg) 2685 if (!tsg)
@@ -2780,7 +2780,7 @@ int gr_gk20a_alloc_gr_ctx(struct gk20a *g,
2780 struct gr_gk20a *gr = &g->gr; 2780 struct gr_gk20a *gr = &g->gr;
2781 int err = 0; 2781 int err = 0;
2782 2782
2783 gk20a_dbg_fn(""); 2783 nvgpu_log_fn(g, " ");
2784 2784
2785 if (gr->ctx_vars.buffer_size == 0) 2785 if (gr->ctx_vars.buffer_size == 0)
2786 return 0; 2786 return 0;
@@ -2835,7 +2835,7 @@ static int gr_gk20a_alloc_tsg_gr_ctx(struct gk20a *g,
2835void gr_gk20a_free_gr_ctx(struct gk20a *g, 2835void gr_gk20a_free_gr_ctx(struct gk20a *g,
2836 struct vm_gk20a *vm, struct nvgpu_gr_ctx *gr_ctx) 2836 struct vm_gk20a *vm, struct nvgpu_gr_ctx *gr_ctx)
2837{ 2837{
2838 gk20a_dbg_fn(""); 2838 nvgpu_log_fn(g, " ");
2839 2839
2840 if (gr_ctx->mem.gpu_va) { 2840 if (gr_ctx->mem.gpu_va) {
2841 gr_gk20a_unmap_global_ctx_buffers(g, vm, gr_ctx); 2841 gr_gk20a_unmap_global_ctx_buffers(g, vm, gr_ctx);
@@ -2881,7 +2881,7 @@ static int gr_gk20a_alloc_channel_patch_ctx(struct gk20a *g,
2881 u32 alloc_size; 2881 u32 alloc_size;
2882 int err = 0; 2882 int err = 0;
2883 2883
2884 gk20a_dbg_fn(""); 2884 nvgpu_log_fn(g, " ");
2885 2885
2886 tsg = tsg_gk20a_from_ch(c); 2886 tsg = tsg_gk20a_from_ch(c);
2887 if (!tsg) 2887 if (!tsg)
@@ -2899,7 +2899,7 @@ static int gr_gk20a_alloc_channel_patch_ctx(struct gk20a *g,
2899 if (err) 2899 if (err)
2900 return err; 2900 return err;
2901 2901
2902 gk20a_dbg_fn("done"); 2902 nvgpu_log_fn(g, "done");
2903 return 0; 2903 return 0;
2904} 2904}
2905 2905
@@ -2909,7 +2909,7 @@ static void gr_gk20a_free_channel_patch_ctx(struct gk20a *g,
2909{ 2909{
2910 struct patch_desc *patch_ctx = &gr_ctx->patch_ctx; 2910 struct patch_desc *patch_ctx = &gr_ctx->patch_ctx;
2911 2911
2912 gk20a_dbg_fn(""); 2912 nvgpu_log_fn(g, " ");
2913 2913
2914 if (patch_ctx->mem.gpu_va) 2914 if (patch_ctx->mem.gpu_va)
2915 nvgpu_gmmu_unmap(vm, &patch_ctx->mem, 2915 nvgpu_gmmu_unmap(vm, &patch_ctx->mem,
@@ -2925,7 +2925,7 @@ static void gr_gk20a_free_channel_pm_ctx(struct gk20a *g,
2925{ 2925{
2926 struct pm_ctx_desc *pm_ctx = &gr_ctx->pm_ctx; 2926 struct pm_ctx_desc *pm_ctx = &gr_ctx->pm_ctx;
2927 2927
2928 gk20a_dbg_fn(""); 2928 nvgpu_log_fn(g, " ");
2929 2929
2930 if (pm_ctx->mem.gpu_va) { 2930 if (pm_ctx->mem.gpu_va) {
2931 nvgpu_gmmu_unmap(vm, &pm_ctx->mem, pm_ctx->mem.gpu_va); 2931 nvgpu_gmmu_unmap(vm, &pm_ctx->mem, pm_ctx->mem.gpu_va);
@@ -2942,7 +2942,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags)
2942 struct tsg_gk20a *tsg = NULL; 2942 struct tsg_gk20a *tsg = NULL;
2943 int err = 0; 2943 int err = 0;
2944 2944
2945 gk20a_dbg_fn(""); 2945 nvgpu_log_fn(g, " ");
2946 2946
2947 /* an address space needs to have been bound at this point.*/ 2947 /* an address space needs to have been bound at this point.*/
2948 if (!gk20a_channel_as_bound(c) && !c->vm) { 2948 if (!gk20a_channel_as_bound(c) && !c->vm) {
@@ -3047,7 +3047,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags)
3047 } 3047 }
3048 } 3048 }
3049 3049
3050 gk20a_dbg_fn("done"); 3050 nvgpu_log_fn(g, "done");
3051 return 0; 3051 return 0;
3052out: 3052out:
3053 /* 1. gr_ctx, patch_ctx and global ctx buffer mapping 3053 /* 1. gr_ctx, patch_ctx and global ctx buffer mapping
@@ -3062,7 +3062,7 @@ static void gk20a_remove_gr_support(struct gr_gk20a *gr)
3062{ 3062{
3063 struct gk20a *g = gr->g; 3063 struct gk20a *g = gr->g;
3064 3064
3065 gk20a_dbg_fn(""); 3065 nvgpu_log_fn(g, " ");
3066 3066
3067 gr_gk20a_free_cyclestats_snapshot_data(g); 3067 gr_gk20a_free_cyclestats_snapshot_data(g);
3068 3068
@@ -3322,35 +3322,35 @@ static int gr_gk20a_init_gr_config(struct gk20a *g, struct gr_gk20a *gr)
3322 sm_per_tpc * sizeof(struct sm_info)); 3322 sm_per_tpc * sizeof(struct sm_info));
3323 gr->no_of_sm = 0; 3323 gr->no_of_sm = 0;
3324 3324
3325 gk20a_dbg_info("fbps: %d", gr->num_fbps); 3325 nvgpu_log_info(g, "fbps: %d", gr->num_fbps);
3326 gk20a_dbg_info("max_gpc_count: %d", gr->max_gpc_count); 3326 nvgpu_log_info(g, "max_gpc_count: %d", gr->max_gpc_count);
3327 gk20a_dbg_info("max_fbps_count: %d", gr->max_fbps_count); 3327 nvgpu_log_info(g, "max_fbps_count: %d", gr->max_fbps_count);
3328 gk20a_dbg_info("max_tpc_per_gpc_count: %d", gr->max_tpc_per_gpc_count); 3328 nvgpu_log_info(g, "max_tpc_per_gpc_count: %d", gr->max_tpc_per_gpc_count);
3329 gk20a_dbg_info("max_zcull_per_gpc_count: %d", gr->max_zcull_per_gpc_count); 3329 nvgpu_log_info(g, "max_zcull_per_gpc_count: %d", gr->max_zcull_per_gpc_count);
3330 gk20a_dbg_info("max_tpc_count: %d", gr->max_tpc_count); 3330 nvgpu_log_info(g, "max_tpc_count: %d", gr->max_tpc_count);
3331 gk20a_dbg_info("sys_count: %d", gr->sys_count); 3331 nvgpu_log_info(g, "sys_count: %d", gr->sys_count);
3332 gk20a_dbg_info("gpc_count: %d", gr->gpc_count); 3332 nvgpu_log_info(g, "gpc_count: %d", gr->gpc_count);
3333 gk20a_dbg_info("pe_count_per_gpc: %d", gr->pe_count_per_gpc); 3333 nvgpu_log_info(g, "pe_count_per_gpc: %d", gr->pe_count_per_gpc);
3334 gk20a_dbg_info("tpc_count: %d", gr->tpc_count); 3334 nvgpu_log_info(g, "tpc_count: %d", gr->tpc_count);
3335 gk20a_dbg_info("ppc_count: %d", gr->ppc_count); 3335 nvgpu_log_info(g, "ppc_count: %d", gr->ppc_count);
3336 3336
3337 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) 3337 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++)
3338 gk20a_dbg_info("gpc_tpc_count[%d] : %d", 3338 nvgpu_log_info(g, "gpc_tpc_count[%d] : %d",
3339 gpc_index, gr->gpc_tpc_count[gpc_index]); 3339 gpc_index, gr->gpc_tpc_count[gpc_index]);
3340 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) 3340 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++)
3341 gk20a_dbg_info("gpc_zcb_count[%d] : %d", 3341 nvgpu_log_info(g, "gpc_zcb_count[%d] : %d",
3342 gpc_index, gr->gpc_zcb_count[gpc_index]); 3342 gpc_index, gr->gpc_zcb_count[gpc_index]);
3343 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) 3343 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++)
3344 gk20a_dbg_info("gpc_ppc_count[%d] : %d", 3344 nvgpu_log_info(g, "gpc_ppc_count[%d] : %d",
3345 gpc_index, gr->gpc_ppc_count[gpc_index]); 3345 gpc_index, gr->gpc_ppc_count[gpc_index]);
3346 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) 3346 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++)
3347 gk20a_dbg_info("gpc_skip_mask[%d] : %d", 3347 nvgpu_log_info(g, "gpc_skip_mask[%d] : %d",
3348 gpc_index, gr->gpc_skip_mask[gpc_index]); 3348 gpc_index, gr->gpc_skip_mask[gpc_index]);
3349 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) 3349 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++)
3350 for (pes_index = 0; 3350 for (pes_index = 0;
3351 pes_index < gr->pe_count_per_gpc; 3351 pes_index < gr->pe_count_per_gpc;
3352 pes_index++) 3352 pes_index++)
3353 gk20a_dbg_info("pes_tpc_count[%d][%d] : %d", 3353 nvgpu_log_info(g, "pes_tpc_count[%d][%d] : %d",
3354 pes_index, gpc_index, 3354 pes_index, gpc_index,
3355 gr->pes_tpc_count[pes_index][gpc_index]); 3355 gr->pes_tpc_count[pes_index][gpc_index]);
3356 3356
@@ -3358,7 +3358,7 @@ static int gr_gk20a_init_gr_config(struct gk20a *g, struct gr_gk20a *gr)
3358 for (pes_index = 0; 3358 for (pes_index = 0;
3359 pes_index < gr->pe_count_per_gpc; 3359 pes_index < gr->pe_count_per_gpc;
3360 pes_index++) 3360 pes_index++)
3361 gk20a_dbg_info("pes_tpc_mask[%d][%d] : %d", 3361 nvgpu_log_info(g, "pes_tpc_mask[%d][%d] : %d",
3362 pes_index, gpc_index, 3362 pes_index, gpc_index,
3363 gr->pes_tpc_mask[pes_index][gpc_index]); 3363 gr->pes_tpc_mask[pes_index][gpc_index]);
3364 3364
@@ -3367,16 +3367,16 @@ static int gr_gk20a_init_gr_config(struct gk20a *g, struct gr_gk20a *gr)
3367 g->ops.gr.calc_global_ctx_buffer_size(g); 3367 g->ops.gr.calc_global_ctx_buffer_size(g);
3368 gr->timeslice_mode = gr_gpcs_ppcs_cbm_cfg_timeslice_mode_enable_v(); 3368 gr->timeslice_mode = gr_gpcs_ppcs_cbm_cfg_timeslice_mode_enable_v();
3369 3369
3370 gk20a_dbg_info("bundle_cb_default_size: %d", 3370 nvgpu_log_info(g, "bundle_cb_default_size: %d",
3371 gr->bundle_cb_default_size); 3371 gr->bundle_cb_default_size);
3372 gk20a_dbg_info("min_gpm_fifo_depth: %d", gr->min_gpm_fifo_depth); 3372 nvgpu_log_info(g, "min_gpm_fifo_depth: %d", gr->min_gpm_fifo_depth);
3373 gk20a_dbg_info("bundle_cb_token_limit: %d", gr->bundle_cb_token_limit); 3373 nvgpu_log_info(g, "bundle_cb_token_limit: %d", gr->bundle_cb_token_limit);
3374 gk20a_dbg_info("attrib_cb_default_size: %d", 3374 nvgpu_log_info(g, "attrib_cb_default_size: %d",
3375 gr->attrib_cb_default_size); 3375 gr->attrib_cb_default_size);
3376 gk20a_dbg_info("attrib_cb_size: %d", gr->attrib_cb_size); 3376 nvgpu_log_info(g, "attrib_cb_size: %d", gr->attrib_cb_size);
3377 gk20a_dbg_info("alpha_cb_default_size: %d", gr->alpha_cb_default_size); 3377 nvgpu_log_info(g, "alpha_cb_default_size: %d", gr->alpha_cb_default_size);
3378 gk20a_dbg_info("alpha_cb_size: %d", gr->alpha_cb_size); 3378 nvgpu_log_info(g, "alpha_cb_size: %d", gr->alpha_cb_size);
3379 gk20a_dbg_info("timeslice_mode: %d", gr->timeslice_mode); 3379 nvgpu_log_info(g, "timeslice_mode: %d", gr->timeslice_mode);
3380 3380
3381 return 0; 3381 return 0;
3382 3382
@@ -3582,7 +3582,7 @@ clean_up:
3582 if (ret) 3582 if (ret)
3583 nvgpu_err(g, "fail"); 3583 nvgpu_err(g, "fail");
3584 else 3584 else
3585 gk20a_dbg_fn("done"); 3585 nvgpu_log_fn(g, "done");
3586 3586
3587 return ret; 3587 return ret;
3588} 3588}
@@ -4094,7 +4094,7 @@ clean_up:
4094int gk20a_gr_zbc_set_table(struct gk20a *g, struct gr_gk20a *gr, 4094int gk20a_gr_zbc_set_table(struct gk20a *g, struct gr_gk20a *gr,
4095 struct zbc_entry *zbc_val) 4095 struct zbc_entry *zbc_val)
4096{ 4096{
4097 gk20a_dbg_fn(""); 4097 nvgpu_log_fn(g, " ");
4098 4098
4099 return gr_gk20a_elpg_protected_call(g, 4099 return gr_gk20a_elpg_protected_call(g,
4100 gr_gk20a_add_zbc(g, gr, zbc_val)); 4100 gr_gk20a_add_zbc(g, gr, zbc_val));
@@ -4197,10 +4197,10 @@ void gr_gk20a_program_zcull_mapping(struct gk20a *g, u32 zcull_num_entries,
4197{ 4197{
4198 u32 val; 4198 u32 val;
4199 4199
4200 gk20a_dbg_fn(""); 4200 nvgpu_log_fn(g, " ");
4201 4201
4202 if (zcull_num_entries >= 8) { 4202 if (zcull_num_entries >= 8) {
4203 gk20a_dbg_fn("map0"); 4203 nvgpu_log_fn(g, "map0");
4204 val = 4204 val =
4205 gr_gpcs_zcull_sm_in_gpc_number_map0_tile_0_f( 4205 gr_gpcs_zcull_sm_in_gpc_number_map0_tile_0_f(
4206 zcull_map_tiles[0]) | 4206 zcull_map_tiles[0]) |
@@ -4223,7 +4223,7 @@ void gr_gk20a_program_zcull_mapping(struct gk20a *g, u32 zcull_num_entries,
4223 } 4223 }
4224 4224
4225 if (zcull_num_entries >= 16) { 4225 if (zcull_num_entries >= 16) {
4226 gk20a_dbg_fn("map1"); 4226 nvgpu_log_fn(g, "map1");
4227 val = 4227 val =
4228 gr_gpcs_zcull_sm_in_gpc_number_map1_tile_8_f( 4228 gr_gpcs_zcull_sm_in_gpc_number_map1_tile_8_f(
4229 zcull_map_tiles[8]) | 4229 zcull_map_tiles[8]) |
@@ -4246,7 +4246,7 @@ void gr_gk20a_program_zcull_mapping(struct gk20a *g, u32 zcull_num_entries,
4246 } 4246 }
4247 4247
4248 if (zcull_num_entries >= 24) { 4248 if (zcull_num_entries >= 24) {
4249 gk20a_dbg_fn("map2"); 4249 nvgpu_log_fn(g, "map2");
4250 val = 4250 val =
4251 gr_gpcs_zcull_sm_in_gpc_number_map2_tile_16_f( 4251 gr_gpcs_zcull_sm_in_gpc_number_map2_tile_16_f(
4252 zcull_map_tiles[16]) | 4252 zcull_map_tiles[16]) |
@@ -4269,7 +4269,7 @@ void gr_gk20a_program_zcull_mapping(struct gk20a *g, u32 zcull_num_entries,
4269 } 4269 }
4270 4270
4271 if (zcull_num_entries >= 32) { 4271 if (zcull_num_entries >= 32) {
4272 gk20a_dbg_fn("map3"); 4272 nvgpu_log_fn(g, "map3");
4273 val = 4273 val =
4274 gr_gpcs_zcull_sm_in_gpc_number_map3_tile_24_f( 4274 gr_gpcs_zcull_sm_in_gpc_number_map3_tile_24_f(
4275 zcull_map_tiles[24]) | 4275 zcull_map_tiles[24]) |
@@ -4452,7 +4452,7 @@ static int gk20a_init_gr_setup_hw(struct gk20a *g)
4452 u32 last_method_data = 0; 4452 u32 last_method_data = 0;
4453 u32 i, err; 4453 u32 i, err;
4454 4454
4455 gk20a_dbg_fn(""); 4455 nvgpu_log_fn(g, " ");
4456 4456
4457 /* init mmu debug buffer */ 4457 /* init mmu debug buffer */
4458 addr = nvgpu_mem_get_addr(g, &gr->mmu_wr_mem); 4458 addr = nvgpu_mem_get_addr(g, &gr->mmu_wr_mem);
@@ -4613,13 +4613,13 @@ restore_fe_go_idle:
4613 } 4613 }
4614 4614
4615out: 4615out:
4616 gk20a_dbg_fn("done"); 4616 nvgpu_log_fn(g, "done");
4617 return err; 4617 return err;
4618} 4618}
4619 4619
4620static void gr_gk20a_load_gating_prod(struct gk20a *g) 4620static void gr_gk20a_load_gating_prod(struct gk20a *g)
4621{ 4621{
4622 gk20a_dbg_fn(""); 4622 nvgpu_log_fn(g, " ");
4623 4623
4624 /* slcg prod values */ 4624 /* slcg prod values */
4625 if (g->ops.clock_gating.slcg_bus_load_gating_prod) 4625 if (g->ops.clock_gating.slcg_bus_load_gating_prod)
@@ -4657,7 +4657,7 @@ static void gr_gk20a_load_gating_prod(struct gk20a *g)
4657 if (g->ops.clock_gating.pg_gr_load_gating_prod) 4657 if (g->ops.clock_gating.pg_gr_load_gating_prod)
4658 g->ops.clock_gating.pg_gr_load_gating_prod(g, true); 4658 g->ops.clock_gating.pg_gr_load_gating_prod(g, true);
4659 4659
4660 gk20a_dbg_fn("done"); 4660 nvgpu_log_fn(g, "done");
4661} 4661}
4662 4662
4663static int gk20a_init_gr_prepare(struct gk20a *g) 4663static int gk20a_init_gr_prepare(struct gk20a *g)
@@ -4703,7 +4703,7 @@ static int gr_gk20a_wait_mem_scrubbing(struct gk20a *g)
4703 bool fecs_scrubbing; 4703 bool fecs_scrubbing;
4704 bool gpccs_scrubbing; 4704 bool gpccs_scrubbing;
4705 4705
4706 gk20a_dbg_fn(""); 4706 nvgpu_log_fn(g, " ");
4707 4707
4708 nvgpu_timeout_init(g, &timeout, 4708 nvgpu_timeout_init(g, &timeout,
4709 CTXSW_MEM_SCRUBBING_TIMEOUT_MAX / 4709 CTXSW_MEM_SCRUBBING_TIMEOUT_MAX /
@@ -4719,7 +4719,7 @@ static int gr_gk20a_wait_mem_scrubbing(struct gk20a *g)
4719 gr_gpccs_dmactl_imem_scrubbing_m()); 4719 gr_gpccs_dmactl_imem_scrubbing_m());
4720 4720
4721 if (!fecs_scrubbing && !gpccs_scrubbing) { 4721 if (!fecs_scrubbing && !gpccs_scrubbing) {
4722 gk20a_dbg_fn("done"); 4722 nvgpu_log_fn(g, "done");
4723 return 0; 4723 return 0;
4724 } 4724 }
4725 4725
@@ -4746,7 +4746,7 @@ out:
4746 if (err) 4746 if (err)
4747 nvgpu_err(g, "fail"); 4747 nvgpu_err(g, "fail");
4748 else 4748 else
4749 gk20a_dbg_fn("done"); 4749 nvgpu_log_fn(g, "done");
4750 4750
4751 return err; 4751 return err;
4752} 4752}
@@ -4756,7 +4756,7 @@ static int gk20a_init_gr_reset_enable_hw(struct gk20a *g)
4756 struct av_list_gk20a *sw_non_ctx_load = &g->gr.ctx_vars.sw_non_ctx_load; 4756 struct av_list_gk20a *sw_non_ctx_load = &g->gr.ctx_vars.sw_non_ctx_load;
4757 u32 i, err = 0; 4757 u32 i, err = 0;
4758 4758
4759 gk20a_dbg_fn(""); 4759 nvgpu_log_fn(g, " ");
4760 4760
4761 /* enable interrupts */ 4761 /* enable interrupts */
4762 gk20a_writel(g, gr_intr_r(), ~0); 4762 gk20a_writel(g, gr_intr_r(), ~0);
@@ -4780,7 +4780,7 @@ out:
4780 if (err) 4780 if (err)
4781 nvgpu_err(g, "fail"); 4781 nvgpu_err(g, "fail");
4782 else 4782 else
4783 gk20a_dbg_fn("done"); 4783 nvgpu_log_fn(g, "done");
4784 4784
4785 return 0; 4785 return 0;
4786} 4786}
@@ -4810,7 +4810,7 @@ static int gr_gk20a_init_access_map(struct gk20a *g)
4810 map_bit = whitelist[w] >> 2; 4810 map_bit = whitelist[w] >> 2;
4811 map_byte = map_bit >> 3; 4811 map_byte = map_bit >> 3;
4812 map_shift = map_bit & 0x7; /* i.e. 0-7 */ 4812 map_shift = map_bit & 0x7; /* i.e. 0-7 */
4813 gk20a_dbg_info("access map addr:0x%x byte:0x%x bit:%d", 4813 nvgpu_log_info(g, "access map addr:0x%x byte:0x%x bit:%d",
4814 whitelist[w], map_byte, map_shift); 4814 whitelist[w], map_byte, map_shift);
4815 x = nvgpu_mem_rd32(g, mem, map_byte / sizeof(u32)); 4815 x = nvgpu_mem_rd32(g, mem, map_byte / sizeof(u32));
4816 x |= 1 << ( 4816 x |= 1 << (
@@ -4828,10 +4828,10 @@ static int gk20a_init_gr_setup_sw(struct gk20a *g)
4828 struct gr_gk20a *gr = &g->gr; 4828 struct gr_gk20a *gr = &g->gr;
4829 int err; 4829 int err;
4830 4830
4831 gk20a_dbg_fn(""); 4831 nvgpu_log_fn(g, " ");
4832 4832
4833 if (gr->sw_ready) { 4833 if (gr->sw_ready) {
4834 gk20a_dbg_fn("skip init"); 4834 nvgpu_log_fn(g, "skip init");
4835 return 0; 4835 return 0;
4836 } 4836 }
4837 4837
@@ -4888,7 +4888,7 @@ static int gk20a_init_gr_setup_sw(struct gk20a *g)
4888 if (g->ops.gr.create_gr_sysfs) 4888 if (g->ops.gr.create_gr_sysfs)
4889 g->ops.gr.create_gr_sysfs(g); 4889 g->ops.gr.create_gr_sysfs(g);
4890 4890
4891 gk20a_dbg_fn("done"); 4891 nvgpu_log_fn(g, "done");
4892 return 0; 4892 return 0;
4893 4893
4894clean_up: 4894clean_up:
@@ -4906,7 +4906,7 @@ static int gk20a_init_gr_bind_fecs_elpg(struct gk20a *g)
4906 4906
4907 u32 size; 4907 u32 size;
4908 4908
4909 gk20a_dbg_fn(""); 4909 nvgpu_log_fn(g, " ");
4910 4910
4911 size = 0; 4911 size = 0;
4912 4912
@@ -4947,7 +4947,7 @@ int gk20a_init_gr_support(struct gk20a *g)
4947{ 4947{
4948 u32 err; 4948 u32 err;
4949 4949
4950 gk20a_dbg_fn(""); 4950 nvgpu_log_fn(g, " ");
4951 4951
4952 /* this is required before gr_gk20a_init_ctx_state */ 4952 /* this is required before gr_gk20a_init_ctx_state */
4953 nvgpu_mutex_init(&g->gr.fecs_mutex); 4953 nvgpu_mutex_init(&g->gr.fecs_mutex);
@@ -4999,7 +4999,7 @@ void gk20a_gr_wait_initialized(struct gk20a *g)
4999 4999
5000void gk20a_gr_set_shader_exceptions(struct gk20a *g, u32 data) 5000void gk20a_gr_set_shader_exceptions(struct gk20a *g, u32 data)
5001{ 5001{
5002 gk20a_dbg_fn(""); 5002 nvgpu_log_fn(g, " ");
5003 5003
5004 if (data == NVA297_SET_SHADER_EXCEPTIONS_ENABLE_FALSE) { 5004 if (data == NVA297_SET_SHADER_EXCEPTIONS_ENABLE_FALSE) {
5005 gk20a_writel(g, 5005 gk20a_writel(g,
@@ -5046,7 +5046,7 @@ int gk20a_enable_gr_hw(struct gk20a *g)
5046{ 5046{
5047 int err; 5047 int err;
5048 5048
5049 gk20a_dbg_fn(""); 5049 nvgpu_log_fn(g, " ");
5050 5050
5051 err = gk20a_init_gr_prepare(g); 5051 err = gk20a_init_gr_prepare(g);
5052 if (err) 5052 if (err)
@@ -5056,7 +5056,7 @@ int gk20a_enable_gr_hw(struct gk20a *g)
5056 if (err) 5056 if (err)
5057 return err; 5057 return err;
5058 5058
5059 gk20a_dbg_fn("done"); 5059 nvgpu_log_fn(g, "done");
5060 5060
5061 return 0; 5061 return 0;
5062} 5062}
@@ -5163,7 +5163,7 @@ static void gk20a_gr_set_error_notifier(struct gk20a *g,
5163static int gk20a_gr_handle_semaphore_timeout_pending(struct gk20a *g, 5163static int gk20a_gr_handle_semaphore_timeout_pending(struct gk20a *g,
5164 struct gr_gk20a_isr_data *isr_data) 5164 struct gr_gk20a_isr_data *isr_data)
5165{ 5165{
5166 gk20a_dbg_fn(""); 5166 nvgpu_log_fn(g, " ");
5167 gk20a_gr_set_error_notifier(g, isr_data, 5167 gk20a_gr_set_error_notifier(g, isr_data,
5168 NVGPU_ERR_NOTIFIER_GR_SEMAPHORE_TIMEOUT); 5168 NVGPU_ERR_NOTIFIER_GR_SEMAPHORE_TIMEOUT);
5169 nvgpu_err(g, 5169 nvgpu_err(g,
@@ -5174,7 +5174,7 @@ static int gk20a_gr_handle_semaphore_timeout_pending(struct gk20a *g,
5174static int gk20a_gr_intr_illegal_notify_pending(struct gk20a *g, 5174static int gk20a_gr_intr_illegal_notify_pending(struct gk20a *g,
5175 struct gr_gk20a_isr_data *isr_data) 5175 struct gr_gk20a_isr_data *isr_data)
5176{ 5176{
5177 gk20a_dbg_fn(""); 5177 nvgpu_log_fn(g, " ");
5178 gk20a_gr_set_error_notifier(g, isr_data, 5178 gk20a_gr_set_error_notifier(g, isr_data,
5179 NVGPU_ERR_NOTIFIER_GR_ILLEGAL_NOTIFY); 5179 NVGPU_ERR_NOTIFIER_GR_ILLEGAL_NOTIFY);
5180 /* This is an unrecoverable error, reset is needed */ 5180 /* This is an unrecoverable error, reset is needed */
@@ -5202,7 +5202,7 @@ static int gk20a_gr_handle_illegal_method(struct gk20a *g,
5202static int gk20a_gr_handle_illegal_class(struct gk20a *g, 5202static int gk20a_gr_handle_illegal_class(struct gk20a *g,
5203 struct gr_gk20a_isr_data *isr_data) 5203 struct gr_gk20a_isr_data *isr_data)
5204{ 5204{
5205 gk20a_dbg_fn(""); 5205 nvgpu_log_fn(g, " ");
5206 gk20a_gr_set_error_notifier(g, isr_data, 5206 gk20a_gr_set_error_notifier(g, isr_data,
5207 NVGPU_ERR_NOTIFIER_GR_ERROR_SW_NOTIFY); 5207 NVGPU_ERR_NOTIFIER_GR_ERROR_SW_NOTIFY);
5208 nvgpu_err(g, 5208 nvgpu_err(g,
@@ -5243,7 +5243,7 @@ static int gk20a_gr_handle_class_error(struct gk20a *g,
5243{ 5243{
5244 u32 gr_class_error; 5244 u32 gr_class_error;
5245 5245
5246 gk20a_dbg_fn(""); 5246 nvgpu_log_fn(g, " ");
5247 5247
5248 gr_class_error = 5248 gr_class_error =
5249 gr_class_error_code_v(gk20a_readl(g, gr_class_error_r())); 5249 gr_class_error_code_v(gk20a_readl(g, gr_class_error_r()));
@@ -5274,7 +5274,7 @@ static int gk20a_gr_handle_class_error(struct gk20a *g,
5274static int gk20a_gr_handle_firmware_method(struct gk20a *g, 5274static int gk20a_gr_handle_firmware_method(struct gk20a *g,
5275 struct gr_gk20a_isr_data *isr_data) 5275 struct gr_gk20a_isr_data *isr_data)
5276{ 5276{
5277 gk20a_dbg_fn(""); 5277 nvgpu_log_fn(g, " ");
5278 5278
5279 gk20a_gr_set_error_notifier(g, isr_data, 5279 gk20a_gr_set_error_notifier(g, isr_data,
5280 NVGPU_ERR_NOTIFIER_GR_ERROR_SW_NOTIFY); 5280 NVGPU_ERR_NOTIFIER_GR_ERROR_SW_NOTIFY);
@@ -5450,7 +5450,7 @@ int gk20a_gr_handle_notify_pending(struct gk20a *g,
5450 } 5450 }
5451 nvgpu_mutex_release(&ch->cyclestate.cyclestate_buffer_mutex); 5451 nvgpu_mutex_release(&ch->cyclestate.cyclestate_buffer_mutex);
5452#endif 5452#endif
5453 gk20a_dbg_fn(""); 5453 nvgpu_log_fn(g, " ");
5454 nvgpu_cond_broadcast_interruptible(&ch->notifier_wq); 5454 nvgpu_cond_broadcast_interruptible(&ch->notifier_wq);
5455 return 0; 5455 return 0;
5456} 5456}
@@ -5543,7 +5543,7 @@ int gk20a_gr_lock_down_sm(struct gk20a *g,
5543 u32 offset = gk20a_gr_gpc_offset(g, gpc) + gk20a_gr_tpc_offset(g, tpc); 5543 u32 offset = gk20a_gr_gpc_offset(g, gpc) + gk20a_gr_tpc_offset(g, tpc);
5544 u32 dbgr_control0; 5544 u32 dbgr_control0;
5545 5545
5546 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, 5546 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg,
5547 "GPC%d TPC%d SM%d: assert stop trigger", gpc, tpc, sm); 5547 "GPC%d TPC%d SM%d: assert stop trigger", gpc, tpc, sm);
5548 5548
5549 /* assert stop trigger */ 5549 /* assert stop trigger */
@@ -5582,7 +5582,7 @@ int gr_gk20a_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc, u32 sm,
5582 bool sm_debugger_attached; 5582 bool sm_debugger_attached;
5583 u32 global_esr, warp_esr, global_mask; 5583 u32 global_esr, warp_esr, global_mask;
5584 5584
5585 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); 5585 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
5586 5586
5587 sm_debugger_attached = g->ops.gr.sm_debugger_attached(g); 5587 sm_debugger_attached = g->ops.gr.sm_debugger_attached(g);
5588 5588
@@ -5597,7 +5597,7 @@ int gr_gk20a_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc, u32 sm,
5597 return -EFAULT; 5597 return -EFAULT;
5598 } 5598 }
5599 5599
5600 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, 5600 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg,
5601 "sm hww global 0x%08x warp 0x%08x", global_esr, warp_esr); 5601 "sm hww global 0x%08x warp 0x%08x", global_esr, warp_esr);
5602 5602
5603 gr_gk20a_elpg_protected_call(g, 5603 gr_gk20a_elpg_protected_call(g,
@@ -5617,7 +5617,7 @@ int gr_gk20a_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc, u32 sm,
5617 } 5617 }
5618 5618
5619 if (early_exit) { 5619 if (early_exit) {
5620 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, 5620 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg,
5621 "returning early"); 5621 "returning early");
5622 return ret; 5622 return ret;
5623 } 5623 }
@@ -5640,13 +5640,13 @@ int gr_gk20a_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc, u32 sm,
5640 gk20a_writel(g, 5640 gk20a_writel(g,
5641 gr_gpc0_tpc0_tpccs_tpc_exception_en_r() + offset, 5641 gr_gpc0_tpc0_tpccs_tpc_exception_en_r() + offset,
5642 tpc_exception_en); 5642 tpc_exception_en);
5643 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "SM Exceptions disabled"); 5643 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "SM Exceptions disabled");
5644 } 5644 }
5645 5645
5646 /* if a debugger is present and an error has occurred, do a warp sync */ 5646 /* if a debugger is present and an error has occurred, do a warp sync */
5647 if (!ignore_debugger && 5647 if (!ignore_debugger &&
5648 ((warp_esr != 0) || ((global_esr & ~global_mask) != 0))) { 5648 ((warp_esr != 0) || ((global_esr & ~global_mask) != 0))) {
5649 gk20a_dbg(gpu_dbg_intr, "warp sync needed"); 5649 nvgpu_log(g, gpu_dbg_intr, "warp sync needed");
5650 do_warp_sync = true; 5650 do_warp_sync = true;
5651 } 5651 }
5652 5652
@@ -5660,7 +5660,7 @@ int gr_gk20a_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc, u32 sm,
5660 } 5660 }
5661 5661
5662 if (ignore_debugger) 5662 if (ignore_debugger)
5663 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, 5663 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg,
5664 "ignore_debugger set, skipping event posting"); 5664 "ignore_debugger set, skipping event posting");
5665 else 5665 else
5666 *post_event |= true; 5666 *post_event |= true;
@@ -5677,11 +5677,11 @@ int gr_gk20a_handle_tex_exception(struct gk20a *g, u32 gpc, u32 tpc,
5677 u32 offset = gpc_stride * gpc + tpc_in_gpc_stride * tpc; 5677 u32 offset = gpc_stride * gpc + tpc_in_gpc_stride * tpc;
5678 u32 esr; 5678 u32 esr;
5679 5679
5680 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); 5680 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
5681 5681
5682 esr = gk20a_readl(g, 5682 esr = gk20a_readl(g,
5683 gr_gpc0_tpc0_tex_m_hww_esr_r() + offset); 5683 gr_gpc0_tpc0_tex_m_hww_esr_r() + offset);
5684 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "0x%08x", esr); 5684 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "0x%08x", esr);
5685 5685
5686 gk20a_writel(g, 5686 gk20a_writel(g,
5687 gr_gpc0_tpc0_tex_m_hww_esr_r() + offset, 5687 gr_gpc0_tpc0_tex_m_hww_esr_r() + offset,
@@ -5706,7 +5706,7 @@ static int gk20a_gr_handle_tpc_exception(struct gk20a *g, u32 gpc, u32 tpc,
5706 + offset); 5706 + offset);
5707 u32 sm_per_tpc = nvgpu_get_litter_value(g, GPU_LIT_NUM_SM_PER_TPC); 5707 u32 sm_per_tpc = nvgpu_get_litter_value(g, GPU_LIT_NUM_SM_PER_TPC);
5708 5708
5709 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, 5709 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg,
5710 "GPC%d TPC%d: pending exception 0x%x", 5710 "GPC%d TPC%d: pending exception 0x%x",
5711 gpc, tpc, tpc_exception); 5711 gpc, tpc, tpc_exception);
5712 5712
@@ -5715,7 +5715,7 @@ static int gk20a_gr_handle_tpc_exception(struct gk20a *g, u32 gpc, u32 tpc,
5715 gr_gpc0_tpc0_tpccs_tpc_exception_sm_pending_v()) { 5715 gr_gpc0_tpc0_tpccs_tpc_exception_sm_pending_v()) {
5716 u32 esr_sm_sel, sm; 5716 u32 esr_sm_sel, sm;
5717 5717
5718 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, 5718 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg,
5719 "GPC%d TPC%d: SM exception pending", gpc, tpc); 5719 "GPC%d TPC%d: SM exception pending", gpc, tpc);
5720 5720
5721 if (g->ops.gr.handle_tpc_sm_ecc_exception) 5721 if (g->ops.gr.handle_tpc_sm_ecc_exception)
@@ -5729,7 +5729,7 @@ static int gk20a_gr_handle_tpc_exception(struct gk20a *g, u32 gpc, u32 tpc,
5729 if (!(esr_sm_sel & (1 << sm))) 5729 if (!(esr_sm_sel & (1 << sm)))
5730 continue; 5730 continue;
5731 5731
5732 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, 5732 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg,
5733 "GPC%d TPC%d: SM%d exception pending", 5733 "GPC%d TPC%d: SM%d exception pending",
5734 gpc, tpc, sm); 5734 gpc, tpc, sm);
5735 5735
@@ -5750,7 +5750,7 @@ static int gk20a_gr_handle_tpc_exception(struct gk20a *g, u32 gpc, u32 tpc,
5750 /* check if a tex exeption is pending */ 5750 /* check if a tex exeption is pending */
5751 if (gr_gpc0_tpc0_tpccs_tpc_exception_tex_v(tpc_exception) == 5751 if (gr_gpc0_tpc0_tpccs_tpc_exception_tex_v(tpc_exception) ==
5752 gr_gpc0_tpc0_tpccs_tpc_exception_tex_pending_v()) { 5752 gr_gpc0_tpc0_tpccs_tpc_exception_tex_pending_v()) {
5753 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, 5753 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg,
5754 "GPC%d TPC%d: TEX exception pending", gpc, tpc); 5754 "GPC%d TPC%d: TEX exception pending", gpc, tpc);
5755 ret |= g->ops.gr.handle_tex_exception(g, gpc, tpc, post_event); 5755 ret |= g->ops.gr.handle_tex_exception(g, gpc, tpc, post_event);
5756 } 5756 }
@@ -5771,13 +5771,13 @@ static int gk20a_gr_handle_gpc_exception(struct gk20a *g, bool *post_event,
5771 u32 exception1 = gk20a_readl(g, gr_exception1_r()); 5771 u32 exception1 = gk20a_readl(g, gr_exception1_r());
5772 u32 gpc_exception; 5772 u32 gpc_exception;
5773 5773
5774 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, ""); 5774 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, " ");
5775 5775
5776 for (gpc = 0; gpc < gr->gpc_count; gpc++) { 5776 for (gpc = 0; gpc < gr->gpc_count; gpc++) {
5777 if ((exception1 & (1 << gpc)) == 0) 5777 if ((exception1 & (1 << gpc)) == 0)
5778 continue; 5778 continue;
5779 5779
5780 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, 5780 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg,
5781 "GPC%d exception pending", gpc); 5781 "GPC%d exception pending", gpc);
5782 5782
5783 gpc_offset = gk20a_gr_gpc_offset(g, gpc); 5783 gpc_offset = gk20a_gr_gpc_offset(g, gpc);
@@ -5791,7 +5791,7 @@ static int gk20a_gr_handle_gpc_exception(struct gk20a *g, bool *post_event,
5791 (1 << tpc)) == 0) 5791 (1 << tpc)) == 0)
5792 continue; 5792 continue;
5793 5793
5794 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, 5794 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg,
5795 "GPC%d: TPC%d exception pending", gpc, tpc); 5795 "GPC%d: TPC%d exception pending", gpc, tpc);
5796 5796
5797 ret |= gk20a_gr_handle_tpc_exception(g, gpc, tpc, 5797 ret |= gk20a_gr_handle_tpc_exception(g, gpc, tpc,
@@ -5860,8 +5860,8 @@ int gk20a_gr_isr(struct gk20a *g)
5860 u32 gr_engine_id; 5860 u32 gr_engine_id;
5861 u32 global_esr = 0; 5861 u32 global_esr = 0;
5862 5862
5863 gk20a_dbg_fn(""); 5863 nvgpu_log_fn(g, " ");
5864 gk20a_dbg(gpu_dbg_intr, "pgraph intr %08x", gr_intr); 5864 nvgpu_log(g, gpu_dbg_intr, "pgraph intr %08x", gr_intr);
5865 5865
5866 if (!gr_intr) 5866 if (!gr_intr)
5867 return 0; 5867 return 0;
@@ -5896,7 +5896,7 @@ int gk20a_gr_isr(struct gk20a *g)
5896 nvgpu_err(g, "ch id is INVALID 0xffffffff"); 5896 nvgpu_err(g, "ch id is INVALID 0xffffffff");
5897 } 5897 }
5898 5898
5899 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, 5899 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg,
5900 "channel %d: addr 0x%08x, " 5900 "channel %d: addr 0x%08x, "
5901 "data 0x%08x 0x%08x," 5901 "data 0x%08x 0x%08x,"
5902 "ctx 0x%08x, offset 0x%08x, " 5902 "ctx 0x%08x, offset 0x%08x, "
@@ -5968,7 +5968,7 @@ int gk20a_gr_isr(struct gk20a *g)
5968 * register using set_falcon[4] */ 5968 * register using set_falcon[4] */
5969 if (gr_intr & gr_intr_firmware_method_pending_f()) { 5969 if (gr_intr & gr_intr_firmware_method_pending_f()) {
5970 need_reset |= gk20a_gr_handle_firmware_method(g, &isr_data); 5970 need_reset |= gk20a_gr_handle_firmware_method(g, &isr_data);
5971 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "firmware method intr pending\n"); 5971 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "firmware method intr pending\n");
5972 gk20a_writel(g, gr_intr_r(), 5972 gk20a_writel(g, gr_intr_r(),
5973 gr_intr_firmware_method_reset_f()); 5973 gr_intr_firmware_method_reset_f());
5974 gr_intr &= ~gr_intr_firmware_method_pending_f(); 5974 gr_intr &= ~gr_intr_firmware_method_pending_f();
@@ -5977,7 +5977,7 @@ int gk20a_gr_isr(struct gk20a *g)
5977 if (gr_intr & gr_intr_exception_pending_f()) { 5977 if (gr_intr & gr_intr_exception_pending_f()) {
5978 u32 exception = gk20a_readl(g, gr_exception_r()); 5978 u32 exception = gk20a_readl(g, gr_exception_r());
5979 5979
5980 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "exception %08x\n", exception); 5980 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "exception %08x\n", exception);
5981 5981
5982 if (exception & gr_exception_fe_m()) { 5982 if (exception & gr_exception_fe_m()) {
5983 u32 fe = gk20a_readl(g, gr_fe_hww_esr_r()); 5983 u32 fe = gk20a_readl(g, gr_fe_hww_esr_r());
@@ -6057,7 +6057,7 @@ int gk20a_gr_isr(struct gk20a *g)
6057 if (exception & gr_exception_gpc_m() && need_reset == 0) { 6057 if (exception & gr_exception_gpc_m() && need_reset == 0) {
6058 bool post_event = false; 6058 bool post_event = false;
6059 6059
6060 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, 6060 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg,
6061 "GPC exception pending"); 6061 "GPC exception pending");
6062 6062
6063 fault_ch = gk20a_fifo_channel_from_chid(g, 6063 fault_ch = gk20a_fifo_channel_from_chid(g,
@@ -6133,7 +6133,7 @@ int gk20a_gr_nonstall_isr(struct gk20a *g)
6133 int ops = 0; 6133 int ops = 0;
6134 u32 gr_intr = gk20a_readl(g, gr_intr_nonstall_r()); 6134 u32 gr_intr = gk20a_readl(g, gr_intr_nonstall_r());
6135 6135
6136 gk20a_dbg(gpu_dbg_intr, "pgraph nonstall intr %08x", gr_intr); 6136 nvgpu_log(g, gpu_dbg_intr, "pgraph nonstall intr %08x", gr_intr);
6137 6137
6138 if (gr_intr & gr_intr_nonstall_trap_pending_f()) { 6138 if (gr_intr & gr_intr_nonstall_trap_pending_f()) {
6139 /* Clear the interrupt */ 6139 /* Clear the interrupt */
@@ -6201,7 +6201,7 @@ int gk20a_gr_suspend(struct gk20a *g)
6201{ 6201{
6202 u32 ret = 0; 6202 u32 ret = 0;
6203 6203
6204 gk20a_dbg_fn(""); 6204 nvgpu_log_fn(g, " ");
6205 6205
6206 ret = g->ops.gr.wait_empty(g, gk20a_get_gr_idle_timeout(g), 6206 ret = g->ops.gr.wait_empty(g, gk20a_get_gr_idle_timeout(g),
6207 GR_IDLE_CHECK_DEFAULT); 6207 GR_IDLE_CHECK_DEFAULT);
@@ -6227,7 +6227,7 @@ int gk20a_gr_suspend(struct gk20a *g)
6227 6227
6228 g->gr.initialized = false; 6228 g->gr.initialized = false;
6229 6229
6230 gk20a_dbg_fn("done"); 6230 nvgpu_log_fn(g, "done");
6231 return ret; 6231 return ret;
6232} 6232}
6233 6233
@@ -6250,7 +6250,7 @@ int gr_gk20a_decode_priv_addr(struct gk20a *g, u32 addr,
6250{ 6250{
6251 u32 gpc_addr; 6251 u32 gpc_addr;
6252 6252
6253 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); 6253 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr);
6254 6254
6255 /* setup defaults */ 6255 /* setup defaults */
6256 *addr_type = CTXSW_ADDR_TYPE_SYS; 6256 *addr_type = CTXSW_ADDR_TYPE_SYS;
@@ -6338,7 +6338,7 @@ int gr_gk20a_split_ppc_broadcast_addr(struct gk20a *g, u32 addr,
6338{ 6338{
6339 u32 ppc_num; 6339 u32 ppc_num;
6340 6340
6341 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); 6341 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr);
6342 6342
6343 for (ppc_num = 0; ppc_num < g->gr.pe_count_per_gpc; ppc_num++) 6343 for (ppc_num = 0; ppc_num < g->gr.pe_count_per_gpc; ppc_num++)
6344 priv_addr_table[(*t)++] = pri_ppc_addr(g, pri_ppccs_addr_mask(addr), 6344 priv_addr_table[(*t)++] = pri_ppc_addr(g, pri_ppccs_addr_mask(addr),
@@ -6369,12 +6369,12 @@ int gr_gk20a_create_priv_addr_table(struct gk20a *g,
6369 t = 0; 6369 t = 0;
6370 *num_registers = 0; 6370 *num_registers = 0;
6371 6371
6372 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); 6372 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr);
6373 6373
6374 err = g->ops.gr.decode_priv_addr(g, addr, &addr_type, 6374 err = g->ops.gr.decode_priv_addr(g, addr, &addr_type,
6375 &gpc_num, &tpc_num, &ppc_num, &be_num, 6375 &gpc_num, &tpc_num, &ppc_num, &be_num,
6376 &broadcast_flags); 6376 &broadcast_flags);
6377 gk20a_dbg(gpu_dbg_gpu_dbg, "addr_type = %d", addr_type); 6377 nvgpu_log(g, gpu_dbg_gpu_dbg, "addr_type = %d", addr_type);
6378 if (err) 6378 if (err)
6379 return err; 6379 return err;
6380 6380
@@ -6428,7 +6428,7 @@ int gr_gk20a_create_priv_addr_table(struct gk20a *g,
6428 } else if (((addr_type == CTXSW_ADDR_TYPE_EGPC) || 6428 } else if (((addr_type == CTXSW_ADDR_TYPE_EGPC) ||
6429 (addr_type == CTXSW_ADDR_TYPE_ETPC)) && 6429 (addr_type == CTXSW_ADDR_TYPE_ETPC)) &&
6430 g->ops.gr.egpc_etpc_priv_addr_table) { 6430 g->ops.gr.egpc_etpc_priv_addr_table) {
6431 gk20a_dbg(gpu_dbg_gpu_dbg, "addr_type : EGPC/ETPC"); 6431 nvgpu_log(g, gpu_dbg_gpu_dbg, "addr_type : EGPC/ETPC");
6432 g->ops.gr.egpc_etpc_priv_addr_table(g, addr, gpc_num, tpc_num, 6432 g->ops.gr.egpc_etpc_priv_addr_table(g, addr, gpc_num, tpc_num,
6433 broadcast_flags, priv_addr_table, &t); 6433 broadcast_flags, priv_addr_table, &t);
6434 } else if (broadcast_flags & PRI_BROADCAST_FLAGS_LTSS) { 6434 } else if (broadcast_flags & PRI_BROADCAST_FLAGS_LTSS) {
@@ -6477,11 +6477,11 @@ int gr_gk20a_get_ctx_buffer_offsets(struct gk20a *g,
6477 u32 potential_offsets = gr->max_gpc_count * gr->max_tpc_per_gpc_count * 6477 u32 potential_offsets = gr->max_gpc_count * gr->max_tpc_per_gpc_count *
6478 sm_per_tpc; 6478 sm_per_tpc;
6479 6479
6480 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); 6480 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr);
6481 6481
6482 /* implementation is crossed-up if either of these happen */ 6482 /* implementation is crossed-up if either of these happen */
6483 if (max_offsets > potential_offsets) { 6483 if (max_offsets > potential_offsets) {
6484 gk20a_dbg_fn("max_offsets > potential_offsets"); 6484 nvgpu_log_fn(g, "max_offsets > potential_offsets");
6485 return -EINVAL; 6485 return -EINVAL;
6486 } 6486 }
6487 6487
@@ -6490,7 +6490,7 @@ int gr_gk20a_get_ctx_buffer_offsets(struct gk20a *g,
6490 6490
6491 priv_registers = nvgpu_kzalloc(g, sizeof(u32) * potential_offsets); 6491 priv_registers = nvgpu_kzalloc(g, sizeof(u32) * potential_offsets);
6492 if (!priv_registers) { 6492 if (!priv_registers) {
6493 gk20a_dbg_fn("failed alloc for potential_offsets=%d", potential_offsets); 6493 nvgpu_log_fn(g, "failed alloc for potential_offsets=%d", potential_offsets);
6494 err = PTR_ERR(priv_registers); 6494 err = PTR_ERR(priv_registers);
6495 goto cleanup; 6495 goto cleanup;
6496 } 6496 }
@@ -6502,7 +6502,7 @@ int gr_gk20a_get_ctx_buffer_offsets(struct gk20a *g,
6502 &num_registers); 6502 &num_registers);
6503 6503
6504 if ((max_offsets > 1) && (num_registers > max_offsets)) { 6504 if ((max_offsets > 1) && (num_registers > max_offsets)) {
6505 gk20a_dbg_fn("max_offsets = %d, num_registers = %d", 6505 nvgpu_log_fn(g, "max_offsets = %d, num_registers = %d",
6506 max_offsets, num_registers); 6506 max_offsets, num_registers);
6507 err = -EINVAL; 6507 err = -EINVAL;
6508 goto cleanup; 6508 goto cleanup;
@@ -6512,7 +6512,7 @@ int gr_gk20a_get_ctx_buffer_offsets(struct gk20a *g,
6512 num_registers = 1; 6512 num_registers = 1;
6513 6513
6514 if (!g->gr.ctx_vars.local_golden_image) { 6514 if (!g->gr.ctx_vars.local_golden_image) {
6515 gk20a_dbg_fn("no context switch header info to work with"); 6515 nvgpu_log_fn(g, "no context switch header info to work with");
6516 err = -EINVAL; 6516 err = -EINVAL;
6517 goto cleanup; 6517 goto cleanup;
6518 } 6518 }
@@ -6525,7 +6525,7 @@ int gr_gk20a_get_ctx_buffer_offsets(struct gk20a *g,
6525 g->gr.ctx_vars.golden_image_size, 6525 g->gr.ctx_vars.golden_image_size,
6526 &priv_offset); 6526 &priv_offset);
6527 if (err) { 6527 if (err) {
6528 gk20a_dbg_fn("Could not determine priv_offset for addr:0x%x", 6528 nvgpu_log_fn(g, "Could not determine priv_offset for addr:0x%x",
6529 addr); /*, grPriRegStr(addr)));*/ 6529 addr); /*, grPriRegStr(addr)));*/
6530 goto cleanup; 6530 goto cleanup;
6531 } 6531 }
@@ -6558,7 +6558,7 @@ int gr_gk20a_get_pm_ctx_buffer_offsets(struct gk20a *g,
6558 u32 potential_offsets = gr->max_gpc_count * gr->max_tpc_per_gpc_count * 6558 u32 potential_offsets = gr->max_gpc_count * gr->max_tpc_per_gpc_count *
6559 sm_per_tpc; 6559 sm_per_tpc;
6560 6560
6561 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); 6561 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr);
6562 6562
6563 /* implementation is crossed-up if either of these happen */ 6563 /* implementation is crossed-up if either of these happen */
6564 if (max_offsets > potential_offsets) 6564 if (max_offsets > potential_offsets)
@@ -6569,7 +6569,7 @@ int gr_gk20a_get_pm_ctx_buffer_offsets(struct gk20a *g,
6569 6569
6570 priv_registers = nvgpu_kzalloc(g, sizeof(u32) * potential_offsets); 6570 priv_registers = nvgpu_kzalloc(g, sizeof(u32) * potential_offsets);
6571 if (!priv_registers) { 6571 if (!priv_registers) {
6572 gk20a_dbg_fn("failed alloc for potential_offsets=%d", potential_offsets); 6572 nvgpu_log_fn(g, "failed alloc for potential_offsets=%d", potential_offsets);
6573 return -ENOMEM; 6573 return -ENOMEM;
6574 } 6574 }
6575 memset(offsets, 0, sizeof(u32) * max_offsets); 6575 memset(offsets, 0, sizeof(u32) * max_offsets);
@@ -6588,7 +6588,7 @@ int gr_gk20a_get_pm_ctx_buffer_offsets(struct gk20a *g,
6588 num_registers = 1; 6588 num_registers = 1;
6589 6589
6590 if (!g->gr.ctx_vars.local_golden_image) { 6590 if (!g->gr.ctx_vars.local_golden_image) {
6591 gk20a_dbg_fn("no context switch header info to work with"); 6591 nvgpu_log_fn(g, "no context switch header info to work with");
6592 err = -EINVAL; 6592 err = -EINVAL;
6593 goto cleanup; 6593 goto cleanup;
6594 } 6594 }
@@ -6598,7 +6598,7 @@ int gr_gk20a_get_pm_ctx_buffer_offsets(struct gk20a *g,
6598 priv_registers[i], 6598 priv_registers[i],
6599 &priv_offset); 6599 &priv_offset);
6600 if (err) { 6600 if (err) {
6601 gk20a_dbg_fn("Could not determine priv_offset for addr:0x%x", 6601 nvgpu_log_fn(g, "Could not determine priv_offset for addr:0x%x",
6602 addr); /*, grPriRegStr(addr)));*/ 6602 addr); /*, grPriRegStr(addr)));*/
6603 goto cleanup; 6603 goto cleanup;
6604 } 6604 }
@@ -6684,7 +6684,7 @@ static int gr_gk20a_ctx_patch_smpc(struct gk20a *g,
6684 g->ops.gr.init_sm_dsm_reg_info(); 6684 g->ops.gr.init_sm_dsm_reg_info();
6685 g->ops.gr.get_ovr_perf_regs(g, &num_ovr_perf_regs, &ovr_perf_regs); 6685 g->ops.gr.get_ovr_perf_regs(g, &num_ovr_perf_regs, &ovr_perf_regs);
6686 6686
6687 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); 6687 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr);
6688 6688
6689 for (reg = 0; reg < num_ovr_perf_regs; reg++) { 6689 for (reg = 0; reg < num_ovr_perf_regs; reg++) {
6690 for (gpc = 0; gpc < num_gpc; gpc++) { 6690 for (gpc = 0; gpc < num_gpc; gpc++) {
@@ -6754,13 +6754,11 @@ static int gr_gk20a_ctx_patch_smpc(struct gk20a *g,
6754static inline bool check_main_image_header_magic(u8 *context) 6754static inline bool check_main_image_header_magic(u8 *context)
6755{ 6755{
6756 u32 magic = *(u32 *)(context + ctxsw_prog_main_image_magic_value_o()); 6756 u32 magic = *(u32 *)(context + ctxsw_prog_main_image_magic_value_o());
6757 gk20a_dbg(gpu_dbg_gpu_dbg, "main image magic=0x%x", magic);
6758 return magic == ctxsw_prog_main_image_magic_value_v_value_v(); 6757 return magic == ctxsw_prog_main_image_magic_value_v_value_v();
6759} 6758}
6760static inline bool check_local_header_magic(u8 *context) 6759static inline bool check_local_header_magic(u8 *context)
6761{ 6760{
6762 u32 magic = *(u32 *)(context + ctxsw_prog_local_magic_value_o()); 6761 u32 magic = *(u32 *)(context + ctxsw_prog_local_magic_value_o());
6763 gk20a_dbg(gpu_dbg_gpu_dbg, "local magic=0x%x", magic);
6764 return magic == ctxsw_prog_local_magic_value_v_value_v(); 6762 return magic == ctxsw_prog_local_magic_value_v_value_v();
6765 6763
6766} 6764}
@@ -6823,14 +6821,14 @@ static int gr_gk20a_find_priv_offset_in_ext_buffer(struct gk20a *g,
6823 else 6821 else
6824 return -EINVAL; 6822 return -EINVAL;
6825 6823
6826 gk20a_dbg_info(" gpc = %d tpc = %d", 6824 nvgpu_log_info(g, " gpc = %d tpc = %d",
6827 gpc_num, tpc_num); 6825 gpc_num, tpc_num);
6828 } else if ((g->ops.gr.is_etpc_addr) && 6826 } else if ((g->ops.gr.is_etpc_addr) &&
6829 g->ops.gr.is_etpc_addr(g, addr)) { 6827 g->ops.gr.is_etpc_addr(g, addr)) {
6830 g->ops.gr.get_egpc_etpc_num(g, addr, &gpc_num, &tpc_num); 6828 g->ops.gr.get_egpc_etpc_num(g, addr, &gpc_num, &tpc_num);
6831 gpc_base = g->ops.gr.get_egpc_base(g); 6829 gpc_base = g->ops.gr.get_egpc_base(g);
6832 } else { 6830 } else {
6833 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, 6831 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
6834 "does not exist in extended region"); 6832 "does not exist in extended region");
6835 return -EINVAL; 6833 return -EINVAL;
6836 } 6834 }
@@ -6857,7 +6855,7 @@ static int gr_gk20a_find_priv_offset_in_ext_buffer(struct gk20a *g,
6857 data32 = *(u32 *)(context + ctxsw_prog_main_extended_buffer_ctl_o()); 6855 data32 = *(u32 *)(context + ctxsw_prog_main_extended_buffer_ctl_o());
6858 ext_priv_size = ctxsw_prog_main_extended_buffer_ctl_size_v(data32); 6856 ext_priv_size = ctxsw_prog_main_extended_buffer_ctl_size_v(data32);
6859 if (0 == ext_priv_size) { 6857 if (0 == ext_priv_size) {
6860 gk20a_dbg_info(" No extended memory in context buffer"); 6858 nvgpu_log_info(g, " No extended memory in context buffer");
6861 return -EINVAL; 6859 return -EINVAL;
6862 } 6860 }
6863 ext_priv_offset = ctxsw_prog_main_extended_buffer_ctl_offset_v(data32); 6861 ext_priv_offset = ctxsw_prog_main_extended_buffer_ctl_offset_v(data32);
@@ -6891,7 +6889,7 @@ static int gr_gk20a_find_priv_offset_in_ext_buffer(struct gk20a *g,
6891 if ((addr & tpc_gpc_mask) == (sm_dsm_perf_regs[i] & tpc_gpc_mask)) { 6889 if ((addr & tpc_gpc_mask) == (sm_dsm_perf_regs[i] & tpc_gpc_mask)) {
6892 sm_dsm_perf_reg_id = i; 6890 sm_dsm_perf_reg_id = i;
6893 6891
6894 gk20a_dbg_info("register match: 0x%08x", 6892 nvgpu_log_info(g, "register match: 0x%08x",
6895 sm_dsm_perf_regs[i]); 6893 sm_dsm_perf_regs[i]);
6896 6894
6897 chk_addr = (gpc_base + gpc_stride * gpc_num) + 6895 chk_addr = (gpc_base + gpc_stride * gpc_num) +
@@ -6921,7 +6919,7 @@ static int gr_gk20a_find_priv_offset_in_ext_buffer(struct gk20a *g,
6921 (sm_dsm_perf_ctrl_regs[i] & tpc_gpc_mask)) { 6919 (sm_dsm_perf_ctrl_regs[i] & tpc_gpc_mask)) {
6922 sm_dsm_perf_ctrl_reg_id = i; 6920 sm_dsm_perf_ctrl_reg_id = i;
6923 6921
6924 gk20a_dbg_info("register match: 0x%08x", 6922 nvgpu_log_info(g, "register match: 0x%08x",
6925 sm_dsm_perf_ctrl_regs[i]); 6923 sm_dsm_perf_ctrl_regs[i]);
6926 6924
6927 chk_addr = (gpc_base + gpc_stride * gpc_num) + 6925 chk_addr = (gpc_base + gpc_stride * gpc_num) +
@@ -7032,7 +7030,7 @@ gr_gk20a_process_context_buffer_priv_segment(struct gk20a *g,
7032 u32 tpc_in_gpc_base = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_BASE); 7030 u32 tpc_in_gpc_base = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_BASE);
7033 u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_STRIDE); 7031 u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_STRIDE);
7034 7032
7035 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "pri_addr=0x%x", pri_addr); 7033 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "pri_addr=0x%x", pri_addr);
7036 7034
7037 if (!g->gr.ctx_vars.valid) 7035 if (!g->gr.ctx_vars.valid)
7038 return -EINVAL; 7036 return -EINVAL;
@@ -7215,12 +7213,12 @@ static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g,
7215 u8 *context; 7213 u8 *context;
7216 u32 offset_to_segment; 7214 u32 offset_to_segment;
7217 7215
7218 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); 7216 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr);
7219 7217
7220 err = g->ops.gr.decode_priv_addr(g, addr, &addr_type, 7218 err = g->ops.gr.decode_priv_addr(g, addr, &addr_type,
7221 &gpc_num, &tpc_num, &ppc_num, &be_num, 7219 &gpc_num, &tpc_num, &ppc_num, &be_num,
7222 &broadcast_flags); 7220 &broadcast_flags);
7223 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, 7221 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
7224 "addr_type = %d, broadcast_flags: %08x", 7222 "addr_type = %d, broadcast_flags: %08x",
7225 addr_type, broadcast_flags); 7223 addr_type, broadcast_flags);
7226 if (err) 7224 if (err)
@@ -7243,7 +7241,7 @@ static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g,
7243 } 7241 }
7244 data32 = *(u32 *)(context + ctxsw_prog_local_priv_register_ctl_o()); 7242 data32 = *(u32 *)(context + ctxsw_prog_local_priv_register_ctl_o());
7245 sys_priv_offset = ctxsw_prog_local_priv_register_ctl_offset_v(data32); 7243 sys_priv_offset = ctxsw_prog_local_priv_register_ctl_offset_v(data32);
7246 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "sys_priv_offset=0x%x", sys_priv_offset); 7244 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "sys_priv_offset=0x%x", sys_priv_offset);
7247 7245
7248 /* If found in Ext buffer, ok. 7246 /* If found in Ext buffer, ok.
7249 * If it failed and we expected to find it there (quad offset) 7247 * If it failed and we expected to find it there (quad offset)
@@ -7253,7 +7251,7 @@ static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g,
7253 addr, is_quad, quad, context_buffer, 7251 addr, is_quad, quad, context_buffer,
7254 context_buffer_size, priv_offset); 7252 context_buffer_size, priv_offset);
7255 if (!err || (err && is_quad)) { 7253 if (!err || (err && is_quad)) {
7256 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, 7254 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
7257 "err = %d, is_quad = %s", 7255 "err = %d, is_quad = %s",
7258 err, is_quad ? "true" : false); 7256 err, is_quad ? "true" : false);
7259 return err; 7257 return err;
@@ -7357,7 +7355,7 @@ static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g,
7357 num_tpcs) << 2); 7355 num_tpcs) << 2);
7358 } 7356 }
7359 } else { 7357 } else {
7360 gk20a_dbg_fn("Unknown address type."); 7358 nvgpu_log_fn(g, "Unknown address type.");
7361 return -EINVAL; 7359 return -EINVAL;
7362 } 7360 }
7363 err = gr_gk20a_process_context_buffer_priv_segment(g, 7361 err = gr_gk20a_process_context_buffer_priv_segment(g,
@@ -7668,7 +7666,7 @@ static int gr_gk20a_create_hwpm_ctxsw_buffer_offset_map(struct gk20a *g)
7668 u32 num_ltc = g->ops.gr.get_max_ltc_per_fbp(g) * g->gr.num_fbps; 7666 u32 num_ltc = g->ops.gr.get_max_ltc_per_fbp(g) * g->gr.num_fbps;
7669 7667
7670 if (hwpm_ctxsw_buffer_size == 0) { 7668 if (hwpm_ctxsw_buffer_size == 0) {
7671 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, 7669 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
7672 "no PM Ctxsw buffer memory in context buffer"); 7670 "no PM Ctxsw buffer memory in context buffer");
7673 return -EINVAL; 7671 return -EINVAL;
7674 } 7672 }
@@ -7760,10 +7758,10 @@ static int gr_gk20a_create_hwpm_ctxsw_buffer_offset_map(struct gk20a *g)
7760 g->gr.ctx_vars.hwpm_ctxsw_buffer_offset_map = map; 7758 g->gr.ctx_vars.hwpm_ctxsw_buffer_offset_map = map;
7761 g->gr.ctx_vars.hwpm_ctxsw_buffer_offset_map_count = count; 7759 g->gr.ctx_vars.hwpm_ctxsw_buffer_offset_map_count = count;
7762 7760
7763 gk20a_dbg_info("Reg Addr => HWPM Ctxt switch buffer offset"); 7761 nvgpu_log_info(g, "Reg Addr => HWPM Ctxt switch buffer offset");
7764 7762
7765 for (i = 0; i < count; i++) 7763 for (i = 0; i < count; i++)
7766 gk20a_dbg_info("%08x => %08x", map[i].addr, map[i].offset); 7764 nvgpu_log_info(g, "%08x => %08x", map[i].addr, map[i].offset);
7767 7765
7768 return 0; 7766 return 0;
7769cleanup: 7767cleanup:
@@ -7785,7 +7783,7 @@ static int gr_gk20a_find_priv_offset_in_pm_buffer(struct gk20a *g,
7785 u32 count; 7783 u32 count;
7786 struct ctxsw_buf_offset_map_entry *map, *result, map_key; 7784 struct ctxsw_buf_offset_map_entry *map, *result, map_key;
7787 7785
7788 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); 7786 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr);
7789 7787
7790 /* Create map of pri address and pm offset if necessary */ 7788 /* Create map of pri address and pm offset if necessary */
7791 if (gr->ctx_vars.hwpm_ctxsw_buffer_offset_map == NULL) { 7789 if (gr->ctx_vars.hwpm_ctxsw_buffer_offset_map == NULL) {
@@ -7831,7 +7829,7 @@ bool gk20a_is_channel_ctx_resident(struct channel_gk20a *ch)
7831 curr_ch = gk20a_gr_get_channel_from_ctx(g, curr_gr_ctx, 7829 curr_ch = gk20a_gr_get_channel_from_ctx(g, curr_gr_ctx,
7832 &curr_gr_tsgid); 7830 &curr_gr_tsgid);
7833 7831
7834 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, 7832 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
7835 "curr_gr_chid=%d curr_tsgid=%d, ch->tsgid=%d" 7833 "curr_gr_chid=%d curr_tsgid=%d, ch->tsgid=%d"
7836 " ch->chid=%d", 7834 " ch->chid=%d",
7837 curr_ch ? curr_ch->chid : -1, 7835 curr_ch ? curr_ch->chid : -1,
@@ -7873,7 +7871,7 @@ int __gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
7873 u32 ctx_op_nr, num_ctx_ops[2] = {num_ctx_wr_ops, num_ctx_rd_ops}; 7871 u32 ctx_op_nr, num_ctx_ops[2] = {num_ctx_wr_ops, num_ctx_rd_ops};
7874 int err = 0, pass; 7872 int err = 0, pass;
7875 7873
7876 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "wr_ops=%d rd_ops=%d", 7874 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "wr_ops=%d rd_ops=%d",
7877 num_ctx_wr_ops, num_ctx_rd_ops); 7875 num_ctx_wr_ops, num_ctx_rd_ops);
7878 7876
7879 tsg = tsg_gk20a_from_ch(ch); 7877 tsg = tsg_gk20a_from_ch(ch);
@@ -7906,7 +7904,7 @@ int __gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
7906 v |= ctx_ops[i].value_lo; 7904 v |= ctx_ops[i].value_lo;
7907 gk20a_writel(g, offset, v); 7905 gk20a_writel(g, offset, v);
7908 7906
7909 gk20a_dbg(gpu_dbg_gpu_dbg, 7907 nvgpu_log(g, gpu_dbg_gpu_dbg,
7910 "direct wr: offset=0x%x v=0x%x", 7908 "direct wr: offset=0x%x v=0x%x",
7911 offset, v); 7909 offset, v);
7912 7910
@@ -7916,7 +7914,7 @@ int __gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
7916 v |= ctx_ops[i].value_hi; 7914 v |= ctx_ops[i].value_hi;
7917 gk20a_writel(g, offset + 4, v); 7915 gk20a_writel(g, offset + 4, v);
7918 7916
7919 gk20a_dbg(gpu_dbg_gpu_dbg, 7917 nvgpu_log(g, gpu_dbg_gpu_dbg,
7920 "direct wr: offset=0x%x v=0x%x", 7918 "direct wr: offset=0x%x v=0x%x",
7921 offset + 4, v); 7919 offset + 4, v);
7922 } 7920 }
@@ -7925,7 +7923,7 @@ int __gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
7925 ctx_ops[i].value_lo = 7923 ctx_ops[i].value_lo =
7926 gk20a_readl(g, offset); 7924 gk20a_readl(g, offset);
7927 7925
7928 gk20a_dbg(gpu_dbg_gpu_dbg, 7926 nvgpu_log(g, gpu_dbg_gpu_dbg,
7929 "direct rd: offset=0x%x v=0x%x", 7927 "direct rd: offset=0x%x v=0x%x",
7930 offset, ctx_ops[i].value_lo); 7928 offset, ctx_ops[i].value_lo);
7931 7929
@@ -7933,7 +7931,7 @@ int __gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
7933 ctx_ops[i].value_hi = 7931 ctx_ops[i].value_hi =
7934 gk20a_readl(g, offset + 4); 7932 gk20a_readl(g, offset + 4);
7935 7933
7936 gk20a_dbg(gpu_dbg_gpu_dbg, 7934 nvgpu_log(g, gpu_dbg_gpu_dbg,
7937 "direct rd: offset=0x%x v=0x%x", 7935 "direct rd: offset=0x%x v=0x%x",
7938 offset, ctx_ops[i].value_lo); 7936 offset, ctx_ops[i].value_lo);
7939 } else 7937 } else
@@ -8001,7 +7999,7 @@ int __gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
8001 offsets, offset_addrs, 7999 offsets, offset_addrs,
8002 &num_offsets); 8000 &num_offsets);
8003 if (err) { 8001 if (err) {
8004 gk20a_dbg(gpu_dbg_gpu_dbg, 8002 nvgpu_log(g, gpu_dbg_gpu_dbg,
8005 "ctx op invalid offset: offset=0x%x", 8003 "ctx op invalid offset: offset=0x%x",
8006 ctx_ops[i].offset); 8004 ctx_ops[i].offset);
8007 ctx_ops[i].status = 8005 ctx_ops[i].status =
@@ -8044,7 +8042,7 @@ int __gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
8044 v |= ctx_ops[i].value_lo; 8042 v |= ctx_ops[i].value_lo;
8045 nvgpu_mem_wr(g, current_mem, offsets[j], v); 8043 nvgpu_mem_wr(g, current_mem, offsets[j], v);
8046 8044
8047 gk20a_dbg(gpu_dbg_gpu_dbg, 8045 nvgpu_log(g, gpu_dbg_gpu_dbg,
8048 "context wr: offset=0x%x v=0x%x", 8046 "context wr: offset=0x%x v=0x%x",
8049 offsets[j], v); 8047 offsets[j], v);
8050 8048
@@ -8054,7 +8052,7 @@ int __gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
8054 v |= ctx_ops[i].value_hi; 8052 v |= ctx_ops[i].value_hi;
8055 nvgpu_mem_wr(g, current_mem, offsets[j] + 4, v); 8053 nvgpu_mem_wr(g, current_mem, offsets[j] + 4, v);
8056 8054
8057 gk20a_dbg(gpu_dbg_gpu_dbg, 8055 nvgpu_log(g, gpu_dbg_gpu_dbg,
8058 "context wr: offset=0x%x v=0x%x", 8056 "context wr: offset=0x%x v=0x%x",
8059 offsets[j] + 4, v); 8057 offsets[j] + 4, v);
8060 } 8058 }
@@ -8068,14 +8066,14 @@ int __gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
8068 ctx_ops[i].value_lo = 8066 ctx_ops[i].value_lo =
8069 nvgpu_mem_rd(g, current_mem, offsets[0]); 8067 nvgpu_mem_rd(g, current_mem, offsets[0]);
8070 8068
8071 gk20a_dbg(gpu_dbg_gpu_dbg, "context rd: offset=0x%x v=0x%x", 8069 nvgpu_log(g, gpu_dbg_gpu_dbg, "context rd: offset=0x%x v=0x%x",
8072 offsets[0], ctx_ops[i].value_lo); 8070 offsets[0], ctx_ops[i].value_lo);
8073 8071
8074 if (ctx_ops[i].op == REGOP(READ_64)) { 8072 if (ctx_ops[i].op == REGOP(READ_64)) {
8075 ctx_ops[i].value_hi = 8073 ctx_ops[i].value_hi =
8076 nvgpu_mem_rd(g, current_mem, offsets[0] + 4); 8074 nvgpu_mem_rd(g, current_mem, offsets[0] + 4);
8077 8075
8078 gk20a_dbg(gpu_dbg_gpu_dbg, 8076 nvgpu_log(g, gpu_dbg_gpu_dbg,
8079 "context rd: offset=0x%x v=0x%x", 8077 "context rd: offset=0x%x v=0x%x",
8080 offsets[0] + 4, ctx_ops[i].value_hi); 8078 offsets[0] + 4, ctx_ops[i].value_hi);
8081 } else 8079 } else
@@ -8121,7 +8119,7 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
8121 8119
8122 ch_is_curr_ctx = gk20a_is_channel_ctx_resident(ch); 8120 ch_is_curr_ctx = gk20a_is_channel_ctx_resident(ch);
8123 8121
8124 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "is curr ctx=%d", 8122 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "is curr ctx=%d",
8125 ch_is_curr_ctx); 8123 ch_is_curr_ctx);
8126 8124
8127 err = __gr_gk20a_exec_ctx_ops(ch, ctx_ops, num_ops, num_ctx_wr_ops, 8125 err = __gr_gk20a_exec_ctx_ops(ch, ctx_ops, num_ops, num_ctx_wr_ops,
@@ -8176,7 +8174,7 @@ int gk20a_gr_wait_for_sm_lock_down(struct gk20a *g, u32 gpc, u32 tpc, u32 sm,
8176 struct nvgpu_timeout timeout; 8174 struct nvgpu_timeout timeout;
8177 u32 warp_esr; 8175 u32 warp_esr;
8178 8176
8179 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, 8177 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg,
8180 "GPC%d TPC%d SM%d: locking down SM", gpc, tpc, sm); 8178 "GPC%d TPC%d SM%d: locking down SM", gpc, tpc, sm);
8181 8179
8182 nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g), 8180 nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g),
@@ -8201,7 +8199,7 @@ int gk20a_gr_wait_for_sm_lock_down(struct gk20a *g, u32 gpc, u32 tpc, u32 sm,
8201 ((global_esr & ~global_esr_mask) == 0); 8199 ((global_esr & ~global_esr_mask) == 0);
8202 8200
8203 if (locked_down || no_error_pending) { 8201 if (locked_down || no_error_pending) {
8204 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, 8202 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg,
8205 "GPC%d TPC%d SM%d: locked down SM", 8203 "GPC%d TPC%d SM%d: locked down SM",
8206 gpc, tpc, sm); 8204 gpc, tpc, sm);
8207 return 0; 8205 return 0;
diff --git a/drivers/gpu/nvgpu/gk20a/hal.c b/drivers/gpu/nvgpu/gk20a/hal.c
index 939567e7..1787f573 100644
--- a/drivers/gpu/nvgpu/gk20a/hal.c
+++ b/drivers/gpu/nvgpu/gk20a/hal.c
@@ -41,7 +41,7 @@ int gpu_init_hal(struct gk20a *g)
41 switch (ver) { 41 switch (ver) {
42 case GK20A_GPUID_GM20B: 42 case GK20A_GPUID_GM20B:
43 case GK20A_GPUID_GM20B_B: 43 case GK20A_GPUID_GM20B_B:
44 gk20a_dbg_info("gm20b detected"); 44 nvgpu_log_info(g, "gm20b detected");
45 if (gm20b_init_hal(g)) 45 if (gm20b_init_hal(g))
46 return -ENODEV; 46 return -ENODEV;
47 break; 47 break;
diff --git a/drivers/gpu/nvgpu/gk20a/mc_gk20a.c b/drivers/gpu/nvgpu/gk20a/mc_gk20a.c
index 7fed410e..9473ad4f 100644
--- a/drivers/gpu/nvgpu/gk20a/mc_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/mc_gk20a.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * GK20A Master Control 2 * GK20A Master Control
3 * 3 *
4 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"), 7 * copy of this software and associated documentation files (the "Software"),
@@ -40,7 +40,7 @@ void mc_gk20a_isr_stall(struct gk20a *g)
40 40
41 mc_intr_0 = g->ops.mc.intr_stall(g); 41 mc_intr_0 = g->ops.mc.intr_stall(g);
42 42
43 gk20a_dbg(gpu_dbg_intr, "stall intr %08x\n", mc_intr_0); 43 nvgpu_log(g, gpu_dbg_intr, "stall intr %08x\n", mc_intr_0);
44 44
45 for (engine_id_idx = 0; engine_id_idx < g->fifo.num_engines; engine_id_idx++) { 45 for (engine_id_idx = 0; engine_id_idx < g->fifo.num_engines; engine_id_idx++) {
46 active_engine_id = g->fifo.active_engines_list[engine_id_idx]; 46 active_engine_id = g->fifo.active_engines_list[engine_id_idx];
@@ -200,7 +200,7 @@ void gk20a_mc_disable(struct gk20a *g, u32 units)
200{ 200{
201 u32 pmc; 201 u32 pmc;
202 202
203 gk20a_dbg(gpu_dbg_info, "pmc disable: %08x\n", units); 203 nvgpu_log(g, gpu_dbg_info, "pmc disable: %08x\n", units);
204 204
205 nvgpu_spinlock_acquire(&g->mc_enable_lock); 205 nvgpu_spinlock_acquire(&g->mc_enable_lock);
206 pmc = gk20a_readl(g, mc_enable_r()); 206 pmc = gk20a_readl(g, mc_enable_r());
@@ -213,7 +213,7 @@ void gk20a_mc_enable(struct gk20a *g, u32 units)
213{ 213{
214 u32 pmc; 214 u32 pmc;
215 215
216 gk20a_dbg(gpu_dbg_info, "pmc enable: %08x\n", units); 216 nvgpu_log(g, gpu_dbg_info, "pmc enable: %08x\n", units);
217 217
218 nvgpu_spinlock_acquire(&g->mc_enable_lock); 218 nvgpu_spinlock_acquire(&g->mc_enable_lock);
219 pmc = gk20a_readl(g, mc_enable_r()); 219 pmc = gk20a_readl(g, mc_enable_r());
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
index 14876296..dfdcc3a4 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
@@ -91,7 +91,7 @@ int gk20a_init_mm_setup_hw(struct gk20a *g)
91 struct mm_gk20a *mm = &g->mm; 91 struct mm_gk20a *mm = &g->mm;
92 int err; 92 int err;
93 93
94 gk20a_dbg_fn(""); 94 nvgpu_log_fn(g, " ");
95 95
96 g->ops.fb.set_mmu_page_size(g); 96 g->ops.fb.set_mmu_page_size(g);
97 if (g->ops.fb.set_use_full_comp_tag_line) 97 if (g->ops.fb.set_use_full_comp_tag_line)
@@ -112,7 +112,7 @@ int gk20a_init_mm_setup_hw(struct gk20a *g)
112 if (gk20a_mm_fb_flush(g) || gk20a_mm_fb_flush(g)) 112 if (gk20a_mm_fb_flush(g) || gk20a_mm_fb_flush(g))
113 return -EBUSY; 113 return -EBUSY;
114 114
115 gk20a_dbg_fn("done"); 115 nvgpu_log_fn(g, "done");
116 return 0; 116 return 0;
117} 117}
118 118
@@ -336,7 +336,7 @@ int gk20a_vm_bind_channel(struct vm_gk20a *vm, struct channel_gk20a *ch)
336{ 336{
337 int err = 0; 337 int err = 0;
338 338
339 gk20a_dbg_fn(""); 339 nvgpu_log_fn(ch->g, " ");
340 340
341 nvgpu_vm_get(vm); 341 nvgpu_vm_get(vm);
342 ch->vm = vm; 342 ch->vm = vm;
@@ -357,7 +357,7 @@ void gk20a_mm_init_pdb(struct gk20a *g, struct nvgpu_mem *inst_block,
357 u32 pdb_addr_lo = u64_lo32(pdb_addr >> ram_in_base_shift_v()); 357 u32 pdb_addr_lo = u64_lo32(pdb_addr >> ram_in_base_shift_v());
358 u32 pdb_addr_hi = u64_hi32(pdb_addr); 358 u32 pdb_addr_hi = u64_hi32(pdb_addr);
359 359
360 gk20a_dbg_info("pde pa=0x%llx", pdb_addr); 360 nvgpu_log_info(g, "pde pa=0x%llx", pdb_addr);
361 361
362 nvgpu_mem_wr32(g, inst_block, ram_in_page_dir_base_lo_w(), 362 nvgpu_mem_wr32(g, inst_block, ram_in_page_dir_base_lo_w(),
363 nvgpu_aperture_mask(g, vm->pdb.mem, 363 nvgpu_aperture_mask(g, vm->pdb.mem,
@@ -376,7 +376,7 @@ void gk20a_init_inst_block(struct nvgpu_mem *inst_block, struct vm_gk20a *vm,
376{ 376{
377 struct gk20a *g = gk20a_from_vm(vm); 377 struct gk20a *g = gk20a_from_vm(vm);
378 378
379 gk20a_dbg_info("inst block phys = 0x%llx, kv = 0x%p", 379 nvgpu_log_info(g, "inst block phys = 0x%llx, kv = 0x%p",
380 nvgpu_inst_block_addr(g, inst_block), inst_block->cpu_va); 380 nvgpu_inst_block_addr(g, inst_block), inst_block->cpu_va);
381 381
382 g->ops.mm.init_pdb(g, inst_block, vm); 382 g->ops.mm.init_pdb(g, inst_block, vm);
@@ -395,7 +395,7 @@ int gk20a_alloc_inst_block(struct gk20a *g, struct nvgpu_mem *inst_block)
395{ 395{
396 int err; 396 int err;
397 397
398 gk20a_dbg_fn(""); 398 nvgpu_log_fn(g, " ");
399 399
400 err = nvgpu_dma_alloc(g, ram_in_alloc_size_v(), inst_block); 400 err = nvgpu_dma_alloc(g, ram_in_alloc_size_v(), inst_block);
401 if (err) { 401 if (err) {
@@ -403,7 +403,7 @@ int gk20a_alloc_inst_block(struct gk20a *g, struct nvgpu_mem *inst_block)
403 return err; 403 return err;
404 } 404 }
405 405
406 gk20a_dbg_fn("done"); 406 nvgpu_log_fn(g, "done");
407 return 0; 407 return 0;
408} 408}
409 409
@@ -415,7 +415,7 @@ int gk20a_mm_fb_flush(struct gk20a *g)
415 struct nvgpu_timeout timeout; 415 struct nvgpu_timeout timeout;
416 u32 retries; 416 u32 retries;
417 417
418 gk20a_dbg_fn(""); 418 nvgpu_log_fn(g, " ");
419 419
420 gk20a_busy_noresume(g); 420 gk20a_busy_noresume(g);
421 if (!g->power_on) { 421 if (!g->power_on) {
@@ -448,7 +448,7 @@ int gk20a_mm_fb_flush(struct gk20a *g)
448 flush_fb_flush_outstanding_true_v() || 448 flush_fb_flush_outstanding_true_v() ||
449 flush_fb_flush_pending_v(data) == 449 flush_fb_flush_pending_v(data) ==
450 flush_fb_flush_pending_busy_v()) { 450 flush_fb_flush_pending_busy_v()) {
451 gk20a_dbg_info("fb_flush 0x%x", data); 451 nvgpu_log_info(g, "fb_flush 0x%x", data);
452 nvgpu_udelay(5); 452 nvgpu_udelay(5);
453 } else 453 } else
454 break; 454 break;
@@ -494,7 +494,7 @@ static void gk20a_mm_l2_invalidate_locked(struct gk20a *g)
494 flush_l2_system_invalidate_outstanding_true_v() || 494 flush_l2_system_invalidate_outstanding_true_v() ||
495 flush_l2_system_invalidate_pending_v(data) == 495 flush_l2_system_invalidate_pending_v(data) ==
496 flush_l2_system_invalidate_pending_busy_v()) { 496 flush_l2_system_invalidate_pending_busy_v()) {
497 gk20a_dbg_info("l2_system_invalidate 0x%x", 497 nvgpu_log_info(g, "l2_system_invalidate 0x%x",
498 data); 498 data);
499 nvgpu_udelay(5); 499 nvgpu_udelay(5);
500 } else 500 } else
@@ -526,7 +526,7 @@ void gk20a_mm_l2_flush(struct gk20a *g, bool invalidate)
526 struct nvgpu_timeout timeout; 526 struct nvgpu_timeout timeout;
527 u32 retries = 2000; 527 u32 retries = 2000;
528 528
529 gk20a_dbg_fn(""); 529 nvgpu_log_fn(g, " ");
530 530
531 gk20a_busy_noresume(g); 531 gk20a_busy_noresume(g);
532 if (!g->power_on) 532 if (!g->power_on)
@@ -553,7 +553,7 @@ void gk20a_mm_l2_flush(struct gk20a *g, bool invalidate)
553 flush_l2_flush_dirty_outstanding_true_v() || 553 flush_l2_flush_dirty_outstanding_true_v() ||
554 flush_l2_flush_dirty_pending_v(data) == 554 flush_l2_flush_dirty_pending_v(data) ==
555 flush_l2_flush_dirty_pending_busy_v()) { 555 flush_l2_flush_dirty_pending_busy_v()) {
556 gk20a_dbg_info("l2_flush_dirty 0x%x", data); 556 nvgpu_log_info(g, "l2_flush_dirty 0x%x", data);
557 nvgpu_udelay(5); 557 nvgpu_udelay(5);
558 } else 558 } else
559 break; 559 break;
@@ -578,7 +578,7 @@ void gk20a_mm_cbc_clean(struct gk20a *g)
578 struct nvgpu_timeout timeout; 578 struct nvgpu_timeout timeout;
579 u32 retries = 200; 579 u32 retries = 200;
580 580
581 gk20a_dbg_fn(""); 581 nvgpu_log_fn(g, " ");
582 582
583 gk20a_busy_noresume(g); 583 gk20a_busy_noresume(g);
584 if (!g->power_on) 584 if (!g->power_on)
@@ -602,7 +602,7 @@ void gk20a_mm_cbc_clean(struct gk20a *g)
602 flush_l2_clean_comptags_outstanding_true_v() || 602 flush_l2_clean_comptags_outstanding_true_v() ||
603 flush_l2_clean_comptags_pending_v(data) == 603 flush_l2_clean_comptags_pending_v(data) ==
604 flush_l2_clean_comptags_pending_busy_v()) { 604 flush_l2_clean_comptags_pending_busy_v()) {
605 gk20a_dbg_info("l2_clean_comptags 0x%x", data); 605 nvgpu_log_info(g, "l2_clean_comptags 0x%x", data);
606 nvgpu_udelay(5); 606 nvgpu_udelay(5);
607 } else 607 } else
608 break; 608 break;
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
index 0531b387..400a49a3 100644
--- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
@@ -39,8 +39,8 @@
39#include <nvgpu/hw/gk20a/hw_pwr_gk20a.h> 39#include <nvgpu/hw/gk20a/hw_pwr_gk20a.h>
40#include <nvgpu/hw/gk20a/hw_top_gk20a.h> 40#include <nvgpu/hw/gk20a/hw_top_gk20a.h>
41 41
42#define gk20a_dbg_pmu(fmt, arg...) \ 42#define gk20a_dbg_pmu(g, fmt, arg...) \
43 gk20a_dbg(gpu_dbg_pmu, fmt, ##arg) 43 nvgpu_log(g, gpu_dbg_pmu, fmt, ##arg)
44 44
45bool nvgpu_find_hex_in_string(char *strings, struct gk20a *g, u32 *hex_pos) 45bool nvgpu_find_hex_in_string(char *strings, struct gk20a *g, u32 *hex_pos)
46{ 46{
@@ -139,7 +139,7 @@ void pmu_enable_irq(struct nvgpu_pmu *pmu, bool enable)
139 u32 intr_mask; 139 u32 intr_mask;
140 u32 intr_dest; 140 u32 intr_dest;
141 141
142 gk20a_dbg_fn(""); 142 nvgpu_log_fn(g, " ");
143 143
144 g->ops.mc.intr_unit_config(g, MC_INTR_UNIT_DISABLE, true, 144 g->ops.mc.intr_unit_config(g, MC_INTR_UNIT_DISABLE, true,
145 mc_intr_mask_0_pmu_enabled_f()); 145 mc_intr_mask_0_pmu_enabled_f());
@@ -166,7 +166,7 @@ void pmu_enable_irq(struct nvgpu_pmu *pmu, bool enable)
166 mc_intr_mask_0_pmu_enabled_f()); 166 mc_intr_mask_0_pmu_enabled_f());
167 } 167 }
168 168
169 gk20a_dbg_fn("done"); 169 nvgpu_log_fn(g, "done");
170} 170}
171 171
172 172
@@ -179,7 +179,7 @@ int pmu_bootstrap(struct nvgpu_pmu *pmu)
179 u64 addr_code, addr_data, addr_load; 179 u64 addr_code, addr_data, addr_load;
180 u32 i, blocks, addr_args; 180 u32 i, blocks, addr_args;
181 181
182 gk20a_dbg_fn(""); 182 nvgpu_log_fn(g, " ");
183 183
184 gk20a_writel(g, pwr_falcon_itfen_r(), 184 gk20a_writel(g, pwr_falcon_itfen_r(),
185 gk20a_readl(g, pwr_falcon_itfen_r()) | 185 gk20a_readl(g, pwr_falcon_itfen_r()) |
@@ -286,7 +286,7 @@ int gk20a_pmu_mutex_acquire(struct nvgpu_pmu *pmu, u32 id, u32 *token)
286 286
287 if (*token != PMU_INVALID_MUTEX_OWNER_ID && *token == owner) { 287 if (*token != PMU_INVALID_MUTEX_OWNER_ID && *token == owner) {
288 BUG_ON(mutex->ref_cnt == 0); 288 BUG_ON(mutex->ref_cnt == 0);
289 gk20a_dbg_pmu("already acquired by owner : 0x%08x", *token); 289 gk20a_dbg_pmu(g, "already acquired by owner : 0x%08x", *token);
290 mutex->ref_cnt++; 290 mutex->ref_cnt++;
291 return 0; 291 return 0;
292 } 292 }
@@ -313,12 +313,12 @@ int gk20a_pmu_mutex_acquire(struct nvgpu_pmu *pmu, u32 id, u32 *token)
313 313
314 if (owner == data) { 314 if (owner == data) {
315 mutex->ref_cnt = 1; 315 mutex->ref_cnt = 1;
316 gk20a_dbg_pmu("mutex acquired: id=%d, token=0x%x", 316 gk20a_dbg_pmu(g, "mutex acquired: id=%d, token=0x%x",
317 mutex->index, *token); 317 mutex->index, *token);
318 *token = owner; 318 *token = owner;
319 return 0; 319 return 0;
320 } else { 320 } else {
321 gk20a_dbg_info("fail to acquire mutex idx=0x%08x", 321 nvgpu_log_info(g, "fail to acquire mutex idx=0x%08x",
322 mutex->index); 322 mutex->index);
323 323
324 data = gk20a_readl(g, pwr_pmu_mutex_id_release_r()); 324 data = gk20a_readl(g, pwr_pmu_mutex_id_release_r());
@@ -370,7 +370,7 @@ int gk20a_pmu_mutex_release(struct nvgpu_pmu *pmu, u32 id, u32 *token)
370 pwr_pmu_mutex_id_release_value_f(owner)); 370 pwr_pmu_mutex_id_release_value_f(owner));
371 gk20a_writel(g, pwr_pmu_mutex_id_release_r(), data); 371 gk20a_writel(g, pwr_pmu_mutex_id_release_r(), data);
372 372
373 gk20a_dbg_pmu("mutex released: id=%d, token=0x%x", 373 gk20a_dbg_pmu(g, "mutex released: id=%d, token=0x%x",
374 mutex->index, *token); 374 mutex->index, *token);
375 375
376 return 0; 376 return 0;
@@ -475,7 +475,7 @@ int gk20a_init_pmu_setup_hw1(struct gk20a *g)
475 struct nvgpu_pmu *pmu = &g->pmu; 475 struct nvgpu_pmu *pmu = &g->pmu;
476 int err = 0; 476 int err = 0;
477 477
478 gk20a_dbg_fn(""); 478 nvgpu_log_fn(g, " ");
479 479
480 nvgpu_mutex_acquire(&pmu->isr_mutex); 480 nvgpu_mutex_acquire(&pmu->isr_mutex);
481 nvgpu_flcn_reset(pmu->flcn); 481 nvgpu_flcn_reset(pmu->flcn);
@@ -554,7 +554,7 @@ static void pmu_handle_zbc_msg(struct gk20a *g, struct pmu_msg *msg,
554 void *param, u32 handle, u32 status) 554 void *param, u32 handle, u32 status)
555{ 555{
556 struct nvgpu_pmu *pmu = param; 556 struct nvgpu_pmu *pmu = param;
557 gk20a_dbg_pmu("reply ZBC_TABLE_UPDATE"); 557 gk20a_dbg_pmu(g, "reply ZBC_TABLE_UPDATE");
558 pmu->zbc_save_done = 1; 558 pmu->zbc_save_done = 1;
559} 559}
560 560
@@ -575,7 +575,7 @@ void gk20a_pmu_save_zbc(struct gk20a *g, u32 entries)
575 575
576 pmu->zbc_save_done = 0; 576 pmu->zbc_save_done = 0;
577 577
578 gk20a_dbg_pmu("cmd post ZBC_TABLE_UPDATE"); 578 gk20a_dbg_pmu(g, "cmd post ZBC_TABLE_UPDATE");
579 nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, 579 nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
580 pmu_handle_zbc_msg, pmu, &seq, ~0); 580 pmu_handle_zbc_msg, pmu, &seq, ~0);
581 pmu_wait_message_cond(pmu, gk20a_get_gr_idle_timeout(g), 581 pmu_wait_message_cond(pmu, gk20a_get_gr_idle_timeout(g),
@@ -587,18 +587,20 @@ void gk20a_pmu_save_zbc(struct gk20a *g, u32 entries)
587int nvgpu_pmu_handle_therm_event(struct nvgpu_pmu *pmu, 587int nvgpu_pmu_handle_therm_event(struct nvgpu_pmu *pmu,
588 struct nv_pmu_therm_msg *msg) 588 struct nv_pmu_therm_msg *msg)
589{ 589{
590 gk20a_dbg_fn(""); 590 struct gk20a *g = gk20a_from_pmu(pmu);
591
592 nvgpu_log_fn(g, " ");
591 593
592 switch (msg->msg_type) { 594 switch (msg->msg_type) {
593 case NV_PMU_THERM_MSG_ID_EVENT_HW_SLOWDOWN_NOTIFICATION: 595 case NV_PMU_THERM_MSG_ID_EVENT_HW_SLOWDOWN_NOTIFICATION:
594 if (msg->hw_slct_msg.mask == BIT(NV_PMU_THERM_EVENT_THERMAL_1)) 596 if (msg->hw_slct_msg.mask == BIT(NV_PMU_THERM_EVENT_THERMAL_1))
595 nvgpu_clk_arb_send_thermal_alarm(pmu->g); 597 nvgpu_clk_arb_send_thermal_alarm(pmu->g);
596 else 598 else
597 gk20a_dbg_pmu("Unwanted/Unregistered thermal event received %d", 599 gk20a_dbg_pmu(g, "Unwanted/Unregistered thermal event received %d",
598 msg->hw_slct_msg.mask); 600 msg->hw_slct_msg.mask);
599 break; 601 break;
600 default: 602 default:
601 gk20a_dbg_pmu("unkown therm event received %d", msg->msg_type); 603 gk20a_dbg_pmu(g, "unkown therm event received %d", msg->msg_type);
602 break; 604 break;
603 } 605 }
604 606
@@ -609,22 +611,22 @@ void gk20a_pmu_dump_elpg_stats(struct nvgpu_pmu *pmu)
609{ 611{
610 struct gk20a *g = gk20a_from_pmu(pmu); 612 struct gk20a *g = gk20a_from_pmu(pmu);
611 613
612 gk20a_dbg_pmu("pwr_pmu_idle_mask_supp_r(3): 0x%08x", 614 gk20a_dbg_pmu(g, "pwr_pmu_idle_mask_supp_r(3): 0x%08x",
613 gk20a_readl(g, pwr_pmu_idle_mask_supp_r(3))); 615 gk20a_readl(g, pwr_pmu_idle_mask_supp_r(3)));
614 gk20a_dbg_pmu("pwr_pmu_idle_mask_1_supp_r(3): 0x%08x", 616 gk20a_dbg_pmu(g, "pwr_pmu_idle_mask_1_supp_r(3): 0x%08x",
615 gk20a_readl(g, pwr_pmu_idle_mask_1_supp_r(3))); 617 gk20a_readl(g, pwr_pmu_idle_mask_1_supp_r(3)));
616 gk20a_dbg_pmu("pwr_pmu_idle_ctrl_supp_r(3): 0x%08x", 618 gk20a_dbg_pmu(g, "pwr_pmu_idle_ctrl_supp_r(3): 0x%08x",
617 gk20a_readl(g, pwr_pmu_idle_ctrl_supp_r(3))); 619 gk20a_readl(g, pwr_pmu_idle_ctrl_supp_r(3)));
618 gk20a_dbg_pmu("pwr_pmu_pg_idle_cnt_r(0): 0x%08x", 620 gk20a_dbg_pmu(g, "pwr_pmu_pg_idle_cnt_r(0): 0x%08x",
619 gk20a_readl(g, pwr_pmu_pg_idle_cnt_r(0))); 621 gk20a_readl(g, pwr_pmu_pg_idle_cnt_r(0)));
620 gk20a_dbg_pmu("pwr_pmu_pg_intren_r(0): 0x%08x", 622 gk20a_dbg_pmu(g, "pwr_pmu_pg_intren_r(0): 0x%08x",
621 gk20a_readl(g, pwr_pmu_pg_intren_r(0))); 623 gk20a_readl(g, pwr_pmu_pg_intren_r(0)));
622 624
623 gk20a_dbg_pmu("pwr_pmu_idle_count_r(3): 0x%08x", 625 gk20a_dbg_pmu(g, "pwr_pmu_idle_count_r(3): 0x%08x",
624 gk20a_readl(g, pwr_pmu_idle_count_r(3))); 626 gk20a_readl(g, pwr_pmu_idle_count_r(3)));
625 gk20a_dbg_pmu("pwr_pmu_idle_count_r(4): 0x%08x", 627 gk20a_dbg_pmu(g, "pwr_pmu_idle_count_r(4): 0x%08x",
626 gk20a_readl(g, pwr_pmu_idle_count_r(4))); 628 gk20a_readl(g, pwr_pmu_idle_count_r(4)));
627 gk20a_dbg_pmu("pwr_pmu_idle_count_r(7): 0x%08x", 629 gk20a_dbg_pmu(g, "pwr_pmu_idle_count_r(7): 0x%08x",
628 gk20a_readl(g, pwr_pmu_idle_count_r(7))); 630 gk20a_readl(g, pwr_pmu_idle_count_r(7)));
629} 631}
630 632
@@ -693,7 +695,7 @@ void gk20a_pmu_isr(struct gk20a *g)
693 u32 intr, mask; 695 u32 intr, mask;
694 bool recheck = false; 696 bool recheck = false;
695 697
696 gk20a_dbg_fn(""); 698 nvgpu_log_fn(g, " ");
697 699
698 nvgpu_mutex_acquire(&pmu->isr_mutex); 700 nvgpu_mutex_acquire(&pmu->isr_mutex);
699 if (!pmu->isr_enabled) { 701 if (!pmu->isr_enabled) {
@@ -706,7 +708,7 @@ void gk20a_pmu_isr(struct gk20a *g)
706 708
707 intr = gk20a_readl(g, pwr_falcon_irqstat_r()); 709 intr = gk20a_readl(g, pwr_falcon_irqstat_r());
708 710
709 gk20a_dbg_pmu("received falcon interrupt: 0x%08x", intr); 711 gk20a_dbg_pmu(g, "received falcon interrupt: 0x%08x", intr);
710 712
711 intr = gk20a_readl(g, pwr_falcon_irqstat_r()) & mask; 713 intr = gk20a_readl(g, pwr_falcon_irqstat_r()) & mask;
712 if (!intr || pmu->pmu_state == PMU_STATE_OFF) { 714 if (!intr || pmu->pmu_state == PMU_STATE_OFF) {
diff --git a/drivers/gpu/nvgpu/gk20a/pramin_gk20a.c b/drivers/gpu/nvgpu/gk20a/pramin_gk20a.c
index a76e2580..8dde61a2 100644
--- a/drivers/gpu/nvgpu/gk20a/pramin_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/pramin_gk20a.c
@@ -45,7 +45,7 @@ u32 gk20a_pramin_enter(struct gk20a *g, struct nvgpu_mem *mem,
45 bus_bar0_window_target_vid_mem_f()) | 45 bus_bar0_window_target_vid_mem_f()) |
46 bus_bar0_window_base_f(hi); 46 bus_bar0_window_base_f(hi);
47 47
48 gk20a_dbg(gpu_dbg_mem, 48 nvgpu_log(g, gpu_dbg_mem,
49 "0x%08x:%08x begin for %p,%p at [%llx,%llx] (sz %llx)", 49 "0x%08x:%08x begin for %p,%p at [%llx,%llx] (sz %llx)",
50 hi, lo, mem, sgl, bufbase, 50 hi, lo, mem, sgl, bufbase,
51 bufbase + nvgpu_sgt_get_phys(g, sgt, sgl), 51 bufbase + nvgpu_sgt_get_phys(g, sgt, sgl),
@@ -67,7 +67,7 @@ u32 gk20a_pramin_enter(struct gk20a *g, struct nvgpu_mem *mem,
67void gk20a_pramin_exit(struct gk20a *g, struct nvgpu_mem *mem, 67void gk20a_pramin_exit(struct gk20a *g, struct nvgpu_mem *mem,
68 struct nvgpu_sgl *sgl) 68 struct nvgpu_sgl *sgl)
69{ 69{
70 gk20a_dbg(gpu_dbg_mem, "end for %p,%p", mem, sgl); 70 nvgpu_log(g, gpu_dbg_mem, "end for %p,%p", mem, sgl);
71 71
72 nvgpu_spinlock_release(&g->mm.pramin_window_lock); 72 nvgpu_spinlock_release(&g->mm.pramin_window_lock);
73} 73}
diff --git a/drivers/gpu/nvgpu/gk20a/priv_ring_gk20a.c b/drivers/gpu/nvgpu/gk20a/priv_ring_gk20a.c
index ed5327cb..dea42b55 100644
--- a/drivers/gpu/nvgpu/gk20a/priv_ring_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/priv_ring_gk20a.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * GK20A priv ring 2 * GK20A priv ring
3 * 3 *
4 * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"), 7 * copy of this software and associated documentation files (the "Software"),
@@ -66,11 +66,11 @@ void gk20a_priv_ring_isr(struct gk20a *g)
66 status0 = gk20a_readl(g, pri_ringmaster_intr_status0_r()); 66 status0 = gk20a_readl(g, pri_ringmaster_intr_status0_r());
67 status1 = gk20a_readl(g, pri_ringmaster_intr_status1_r()); 67 status1 = gk20a_readl(g, pri_ringmaster_intr_status1_r());
68 68
69 gk20a_dbg(gpu_dbg_intr, "ringmaster intr status0: 0x%08x," 69 nvgpu_log(g, gpu_dbg_intr, "ringmaster intr status0: 0x%08x,"
70 "status1: 0x%08x", status0, status1); 70 "status1: 0x%08x", status0, status1);
71 71
72 if (pri_ringmaster_intr_status0_gbl_write_error_sys_v(status0) != 0) { 72 if (pri_ringmaster_intr_status0_gbl_write_error_sys_v(status0) != 0) {
73 gk20a_dbg(gpu_dbg_intr, "SYS write error. ADR %08x WRDAT %08x INFO %08x, CODE %08x", 73 nvgpu_log(g, gpu_dbg_intr, "SYS write error. ADR %08x WRDAT %08x INFO %08x, CODE %08x",
74 gk20a_readl(g, pri_ringstation_sys_priv_error_adr_r()), 74 gk20a_readl(g, pri_ringstation_sys_priv_error_adr_r()),
75 gk20a_readl(g, pri_ringstation_sys_priv_error_wrdat_r()), 75 gk20a_readl(g, pri_ringstation_sys_priv_error_wrdat_r()),
76 gk20a_readl(g, pri_ringstation_sys_priv_error_info_r()), 76 gk20a_readl(g, pri_ringstation_sys_priv_error_info_r()),
@@ -79,7 +79,7 @@ void gk20a_priv_ring_isr(struct gk20a *g)
79 79
80 for (gpc = 0; gpc < g->gr.gpc_count; gpc++) { 80 for (gpc = 0; gpc < g->gr.gpc_count; gpc++) {
81 if (status1 & BIT(gpc)) { 81 if (status1 & BIT(gpc)) {
82 gk20a_dbg(gpu_dbg_intr, "GPC%u write error. ADR %08x WRDAT %08x INFO %08x, CODE %08x", gpc, 82 nvgpu_log(g, gpu_dbg_intr, "GPC%u write error. ADR %08x WRDAT %08x INFO %08x, CODE %08x", gpc,
83 gk20a_readl(g, pri_ringstation_gpc_gpc0_priv_error_adr_r() + gpc * gpc_priv_stride), 83 gk20a_readl(g, pri_ringstation_gpc_gpc0_priv_error_adr_r() + gpc * gpc_priv_stride),
84 gk20a_readl(g, pri_ringstation_gpc_gpc0_priv_error_wrdat_r() + gpc * gpc_priv_stride), 84 gk20a_readl(g, pri_ringstation_gpc_gpc0_priv_error_wrdat_r() + gpc * gpc_priv_stride),
85 gk20a_readl(g, pri_ringstation_gpc_gpc0_priv_error_info_r() + gpc * gpc_priv_stride), 85 gk20a_readl(g, pri_ringstation_gpc_gpc0_priv_error_info_r() + gpc * gpc_priv_stride),
diff --git a/drivers/gpu/nvgpu/gk20a/regops_gk20a.c b/drivers/gpu/nvgpu/gk20a/regops_gk20a.c
index 60162f9d..5b9f973b 100644
--- a/drivers/gpu/nvgpu/gk20a/regops_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/regops_gk20a.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Tegra GK20A GPU Debugger Driver Register Ops 2 * Tegra GK20A GPU Debugger Driver Register Ops
3 * 3 *
4 * Copyright (c) 2013-2017, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2013-2018, NVIDIA CORPORATION. All rights reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"), 7 * copy of this software and associated documentation files (the "Software"),
@@ -72,7 +72,7 @@ int exec_regops_gk20a(struct dbg_session_gk20a *dbg_s,
72 bool skip_read_lo, skip_read_hi; 72 bool skip_read_lo, skip_read_hi;
73 bool ok; 73 bool ok;
74 74
75 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); 75 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
76 76
77 ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); 77 ch = nvgpu_dbg_gpu_get_session_channel(dbg_s);
78 78
@@ -108,7 +108,7 @@ int exec_regops_gk20a(struct dbg_session_gk20a *dbg_s,
108 case REGOP(READ_32): 108 case REGOP(READ_32):
109 ops[i].value_hi = 0; 109 ops[i].value_hi = 0;
110 ops[i].value_lo = gk20a_readl(g, ops[i].offset); 110 ops[i].value_lo = gk20a_readl(g, ops[i].offset);
111 gk20a_dbg(gpu_dbg_gpu_dbg, "read_32 0x%08x from 0x%08x", 111 nvgpu_log(g, gpu_dbg_gpu_dbg, "read_32 0x%08x from 0x%08x",
112 ops[i].value_lo, ops[i].offset); 112 ops[i].value_lo, ops[i].offset);
113 113
114 break; 114 break;
@@ -118,7 +118,7 @@ int exec_regops_gk20a(struct dbg_session_gk20a *dbg_s,
118 ops[i].value_hi = 118 ops[i].value_hi =
119 gk20a_readl(g, ops[i].offset + 4); 119 gk20a_readl(g, ops[i].offset + 4);
120 120
121 gk20a_dbg(gpu_dbg_gpu_dbg, "read_64 0x%08x:%08x from 0x%08x", 121 nvgpu_log(g, gpu_dbg_gpu_dbg, "read_64 0x%08x:%08x from 0x%08x",
122 ops[i].value_hi, ops[i].value_lo, 122 ops[i].value_hi, ops[i].value_lo,
123 ops[i].offset); 123 ops[i].offset);
124 break; 124 break;
@@ -157,12 +157,12 @@ int exec_regops_gk20a(struct dbg_session_gk20a *dbg_s,
157 157
158 /* now update first 32bits */ 158 /* now update first 32bits */
159 gk20a_writel(g, ops[i].offset, data32_lo); 159 gk20a_writel(g, ops[i].offset, data32_lo);
160 gk20a_dbg(gpu_dbg_gpu_dbg, "Wrote 0x%08x to 0x%08x ", 160 nvgpu_log(g, gpu_dbg_gpu_dbg, "Wrote 0x%08x to 0x%08x ",
161 data32_lo, ops[i].offset); 161 data32_lo, ops[i].offset);
162 /* if desired, update second 32bits */ 162 /* if desired, update second 32bits */
163 if (ops[i].op == REGOP(WRITE_64)) { 163 if (ops[i].op == REGOP(WRITE_64)) {
164 gk20a_writel(g, ops[i].offset + 4, data32_hi); 164 gk20a_writel(g, ops[i].offset + 4, data32_hi);
165 gk20a_dbg(gpu_dbg_gpu_dbg, "Wrote 0x%08x to 0x%08x ", 165 nvgpu_log(g, gpu_dbg_gpu_dbg, "Wrote 0x%08x to 0x%08x ",
166 data32_hi, ops[i].offset + 4); 166 data32_hi, ops[i].offset + 4);
167 167
168 } 168 }
@@ -189,7 +189,7 @@ int exec_regops_gk20a(struct dbg_session_gk20a *dbg_s,
189 } 189 }
190 190
191 clean_up: 191 clean_up:
192 gk20a_dbg(gpu_dbg_gpu_dbg, "ret=%d", err); 192 nvgpu_log(g, gpu_dbg_gpu_dbg, "ret=%d", err);
193 return err; 193 return err;
194 194
195} 195}
@@ -395,7 +395,7 @@ static bool validate_reg_ops(struct dbg_session_gk20a *dbg_s,
395 } 395 }
396 } 396 }
397 397
398 gk20a_dbg(gpu_dbg_gpu_dbg, "ctx_wrs:%d ctx_rds:%d", 398 nvgpu_log(g, gpu_dbg_gpu_dbg, "ctx_wrs:%d ctx_rds:%d",
399 *ctx_wr_count, *ctx_rd_count); 399 *ctx_wr_count, *ctx_rd_count);
400 400
401 return ok; 401 return ok;
diff --git a/drivers/gpu/nvgpu/gk20a/therm_gk20a.c b/drivers/gpu/nvgpu/gk20a/therm_gk20a.c
index de5d0f78..b08f3e0a 100644
--- a/drivers/gpu/nvgpu/gk20a/therm_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/therm_gk20a.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * GK20A Therm 2 * GK20A Therm
3 * 3 *
4 * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"), 7 * copy of this software and associated documentation files (the "Software"),
@@ -43,7 +43,7 @@ int gk20a_init_therm_support(struct gk20a *g)
43{ 43{
44 u32 err; 44 u32 err;
45 45
46 gk20a_dbg_fn(""); 46 nvgpu_log_fn(g, " ");
47 47
48 err = gk20a_init_therm_reset_enable_hw(g); 48 err = gk20a_init_therm_reset_enable_hw(g);
49 if (err) 49 if (err)
@@ -73,7 +73,7 @@ int gk20a_elcg_init_idle_filters(struct gk20a *g)
73 u32 active_engine_id = 0; 73 u32 active_engine_id = 0;
74 struct fifo_gk20a *f = &g->fifo; 74 struct fifo_gk20a *f = &g->fifo;
75 75
76 gk20a_dbg_fn(""); 76 nvgpu_log_fn(g, " ");
77 77
78 for (engine_id = 0; engine_id < f->num_engines; engine_id++) { 78 for (engine_id = 0; engine_id < f->num_engines; engine_id++) {
79 active_engine_id = f->active_engines_list[engine_id]; 79 active_engine_id = f->active_engines_list[engine_id];
@@ -104,6 +104,6 @@ int gk20a_elcg_init_idle_filters(struct gk20a *g)
104 idle_filter &= ~therm_hubmmu_idle_filter_value_m(); 104 idle_filter &= ~therm_hubmmu_idle_filter_value_m();
105 gk20a_writel(g, therm_hubmmu_idle_filter_r(), idle_filter); 105 gk20a_writel(g, therm_hubmmu_idle_filter_r(), idle_filter);
106 106
107 gk20a_dbg_fn("done"); 107 nvgpu_log_fn(g, "done");
108 return 0; 108 return 0;
109} 109}
diff --git a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
index 05b8fc61..62763da3 100644
--- a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
@@ -107,7 +107,9 @@ static bool gk20a_is_channel_active(struct gk20a *g, struct channel_gk20a *ch)
107int gk20a_tsg_bind_channel(struct tsg_gk20a *tsg, 107int gk20a_tsg_bind_channel(struct tsg_gk20a *tsg,
108 struct channel_gk20a *ch) 108 struct channel_gk20a *ch)
109{ 109{
110 gk20a_dbg_fn(""); 110 struct gk20a *g = ch->g;
111
112 nvgpu_log_fn(g, " ");
111 113
112 /* check if channel is already bound to some TSG */ 114 /* check if channel is already bound to some TSG */
113 if (gk20a_is_channel_marked_as_tsg(ch)) { 115 if (gk20a_is_channel_marked_as_tsg(ch)) {
@@ -137,10 +139,10 @@ int gk20a_tsg_bind_channel(struct tsg_gk20a *tsg,
137 139
138 nvgpu_ref_get(&tsg->refcount); 140 nvgpu_ref_get(&tsg->refcount);
139 141
140 gk20a_dbg(gpu_dbg_fn, "BIND tsg:%d channel:%d\n", 142 nvgpu_log(g, gpu_dbg_fn, "BIND tsg:%d channel:%d\n",
141 tsg->tsgid, ch->chid); 143 tsg->tsgid, ch->chid);
142 144
143 gk20a_dbg_fn("done"); 145 nvgpu_log_fn(g, "done");
144 return 0; 146 return 0;
145} 147}
146 148
@@ -167,7 +169,7 @@ int gk20a_tsg_unbind_channel(struct channel_gk20a *ch)
167 nvgpu_ref_put(&tsg->refcount, gk20a_tsg_release); 169 nvgpu_ref_put(&tsg->refcount, gk20a_tsg_release);
168 ch->tsgid = NVGPU_INVALID_TSG_ID; 170 ch->tsgid = NVGPU_INVALID_TSG_ID;
169 171
170 gk20a_dbg(gpu_dbg_fn, "UNBIND tsg:%d channel:%d\n", 172 nvgpu_log(g, gpu_dbg_fn, "UNBIND tsg:%d channel:%d\n",
171 tsg->tsgid, ch->chid); 173 tsg->tsgid, ch->chid);
172 174
173 return 0; 175 return 0;
@@ -204,7 +206,7 @@ int gk20a_tsg_set_runlist_interleave(struct tsg_gk20a *tsg, u32 level)
204 struct gk20a *g = tsg->g; 206 struct gk20a *g = tsg->g;
205 int ret; 207 int ret;
206 208
207 gk20a_dbg(gpu_dbg_sched, "tsgid=%u interleave=%u", tsg->tsgid, level); 209 nvgpu_log(g, gpu_dbg_sched, "tsgid=%u interleave=%u", tsg->tsgid, level);
208 210
209 switch (level) { 211 switch (level) {
210 case NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_LOW: 212 case NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_LOW:
@@ -227,7 +229,7 @@ int gk20a_tsg_set_timeslice(struct tsg_gk20a *tsg, u32 timeslice)
227{ 229{
228 struct gk20a *g = tsg->g; 230 struct gk20a *g = tsg->g;
229 231
230 gk20a_dbg(gpu_dbg_sched, "tsgid=%u timeslice=%u us", tsg->tsgid, timeslice); 232 nvgpu_log(g, gpu_dbg_sched, "tsgid=%u timeslice=%u us", tsg->tsgid, timeslice);
231 233
232 return g->ops.fifo.tsg_set_timeslice(tsg, timeslice); 234 return g->ops.fifo.tsg_set_timeslice(tsg, timeslice);
233} 235}
@@ -300,7 +302,7 @@ struct tsg_gk20a *gk20a_tsg_open(struct gk20a *g, pid_t pid)
300 } 302 }
301 } 303 }
302 304
303 gk20a_dbg(gpu_dbg_fn, "tsg opened %d\n", tsg->tsgid); 305 nvgpu_log(g, gpu_dbg_fn, "tsg opened %d\n", tsg->tsgid);
304 306
305 return tsg; 307 return tsg;
306 308
@@ -343,7 +345,7 @@ void gk20a_tsg_release(struct nvgpu_ref *ref)
343 345
344 tsg->runlist_id = ~0; 346 tsg->runlist_id = ~0;
345 347
346 gk20a_dbg(gpu_dbg_fn, "tsg released %d\n", tsg->tsgid); 348 nvgpu_log(g, gpu_dbg_fn, "tsg released %d\n", tsg->tsgid);
347} 349}
348 350
349struct tsg_gk20a *tsg_gk20a_from_ch(struct channel_gk20a *ch) 351struct tsg_gk20a *tsg_gk20a_from_ch(struct channel_gk20a *ch)
diff --git a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
index cffe7199..615b6b46 100644
--- a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
@@ -42,8 +42,8 @@
42#include <nvgpu/hw/gm20b/hw_pwr_gm20b.h> 42#include <nvgpu/hw/gm20b/hw_pwr_gm20b.h>
43 43
44/*Defines*/ 44/*Defines*/
45#define gm20b_dbg_pmu(fmt, arg...) \ 45#define gm20b_dbg_pmu(g, fmt, arg...) \
46 gk20a_dbg(gpu_dbg_pmu, fmt, ##arg) 46 nvgpu_log(g, gpu_dbg_pmu, fmt, ##arg)
47 47
48typedef int (*get_ucode_details)(struct gk20a *g, struct flcn_ucode_img *udata); 48typedef int (*get_ucode_details)(struct gk20a *g, struct flcn_ucode_img *udata);
49 49
@@ -101,16 +101,16 @@ static int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img)
101 struct nvgpu_pmu *pmu = &g->pmu; 101 struct nvgpu_pmu *pmu = &g->pmu;
102 struct lsf_ucode_desc *lsf_desc; 102 struct lsf_ucode_desc *lsf_desc;
103 int err; 103 int err;
104 gm20b_dbg_pmu("requesting PMU ucode in GM20B\n"); 104 gm20b_dbg_pmu(g, "requesting PMU ucode in GM20B\n");
105 pmu_fw = nvgpu_request_firmware(g, GM20B_PMU_UCODE_IMAGE, 0); 105 pmu_fw = nvgpu_request_firmware(g, GM20B_PMU_UCODE_IMAGE, 0);
106 if (!pmu_fw) { 106 if (!pmu_fw) {
107 nvgpu_err(g, "failed to load pmu ucode!!"); 107 nvgpu_err(g, "failed to load pmu ucode!!");
108 return -ENOENT; 108 return -ENOENT;
109 } 109 }
110 g->acr.pmu_fw = pmu_fw; 110 g->acr.pmu_fw = pmu_fw;
111 gm20b_dbg_pmu("Loaded PMU ucode in for blob preparation"); 111 gm20b_dbg_pmu(g, "Loaded PMU ucode in for blob preparation");
112 112
113 gm20b_dbg_pmu("requesting PMU ucode desc in GM20B\n"); 113 gm20b_dbg_pmu(g, "requesting PMU ucode desc in GM20B\n");
114 pmu_desc = nvgpu_request_firmware(g, GM20B_PMU_UCODE_DESC, 0); 114 pmu_desc = nvgpu_request_firmware(g, GM20B_PMU_UCODE_DESC, 0);
115 if (!pmu_desc) { 115 if (!pmu_desc) {
116 nvgpu_err(g, "failed to load pmu ucode desc!!"); 116 nvgpu_err(g, "failed to load pmu ucode desc!!");
@@ -129,7 +129,7 @@ static int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img)
129 129
130 err = nvgpu_init_pmu_fw_support(pmu); 130 err = nvgpu_init_pmu_fw_support(pmu);
131 if (err) { 131 if (err) {
132 gm20b_dbg_pmu("failed to set function pointers\n"); 132 gm20b_dbg_pmu(g, "failed to set function pointers\n");
133 goto release_sig; 133 goto release_sig;
134 } 134 }
135 135
@@ -148,7 +148,7 @@ static int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img)
148 p_img->fw_ver = NULL; 148 p_img->fw_ver = NULL;
149 p_img->header = NULL; 149 p_img->header = NULL;
150 p_img->lsf_desc = (struct lsf_ucode_desc *)lsf_desc; 150 p_img->lsf_desc = (struct lsf_ucode_desc *)lsf_desc;
151 gm20b_dbg_pmu("requesting PMU ucode in GM20B exit\n"); 151 gm20b_dbg_pmu(g, "requesting PMU ucode in GM20B exit\n");
152 nvgpu_release_firmware(g, pmu_sig); 152 nvgpu_release_firmware(g, pmu_sig);
153 return 0; 153 return 0;
154release_sig: 154release_sig:
@@ -221,7 +221,7 @@ static int fecs_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img)
221 p_img->fw_ver = NULL; 221 p_img->fw_ver = NULL;
222 p_img->header = NULL; 222 p_img->header = NULL;
223 p_img->lsf_desc = (struct lsf_ucode_desc *)lsf_desc; 223 p_img->lsf_desc = (struct lsf_ucode_desc *)lsf_desc;
224 gm20b_dbg_pmu("fecs fw loaded\n"); 224 gm20b_dbg_pmu(g, "fecs fw loaded\n");
225 nvgpu_release_firmware(g, fecs_sig); 225 nvgpu_release_firmware(g, fecs_sig);
226 return 0; 226 return 0;
227free_lsf_desc: 227free_lsf_desc:
@@ -292,7 +292,7 @@ static int gpccs_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img)
292 p_img->fw_ver = NULL; 292 p_img->fw_ver = NULL;
293 p_img->header = NULL; 293 p_img->header = NULL;
294 p_img->lsf_desc = (struct lsf_ucode_desc *)lsf_desc; 294 p_img->lsf_desc = (struct lsf_ucode_desc *)lsf_desc;
295 gm20b_dbg_pmu("gpccs fw loaded\n"); 295 gm20b_dbg_pmu(g, "gpccs fw loaded\n");
296 nvgpu_release_firmware(g, gpccs_sig); 296 nvgpu_release_firmware(g, gpccs_sig);
297 return 0; 297 return 0;
298free_lsf_desc: 298free_lsf_desc:
@@ -361,24 +361,24 @@ int prepare_ucode_blob(struct gk20a *g)
361 non WPR blob of ucodes*/ 361 non WPR blob of ucodes*/
362 err = nvgpu_init_pmu_fw_support(pmu); 362 err = nvgpu_init_pmu_fw_support(pmu);
363 if (err) { 363 if (err) {
364 gm20b_dbg_pmu("failed to set function pointers\n"); 364 gm20b_dbg_pmu(g, "failed to set function pointers\n");
365 return err; 365 return err;
366 } 366 }
367 return 0; 367 return 0;
368 } 368 }
369 plsfm = &lsfm_l; 369 plsfm = &lsfm_l;
370 memset((void *)plsfm, 0, sizeof(struct ls_flcn_mgr)); 370 memset((void *)plsfm, 0, sizeof(struct ls_flcn_mgr));
371 gm20b_dbg_pmu("fetching GMMU regs\n"); 371 gm20b_dbg_pmu(g, "fetching GMMU regs\n");
372 g->ops.fb.vpr_info_fetch(g); 372 g->ops.fb.vpr_info_fetch(g);
373 gr_gk20a_init_ctxsw_ucode(g); 373 gr_gk20a_init_ctxsw_ucode(g);
374 374
375 g->ops.pmu.get_wpr(g, &wpr_inf); 375 g->ops.pmu.get_wpr(g, &wpr_inf);
376 gm20b_dbg_pmu("wpr carveout base:%llx\n", wpr_inf.wpr_base); 376 gm20b_dbg_pmu(g, "wpr carveout base:%llx\n", wpr_inf.wpr_base);
377 gm20b_dbg_pmu("wpr carveout size :%llx\n", wpr_inf.size); 377 gm20b_dbg_pmu(g, "wpr carveout size :%llx\n", wpr_inf.size);
378 378
379 /* Discover all managed falcons*/ 379 /* Discover all managed falcons*/
380 err = lsfm_discover_ucode_images(g, plsfm); 380 err = lsfm_discover_ucode_images(g, plsfm);
381 gm20b_dbg_pmu(" Managed Falcon cnt %d\n", plsfm->managed_flcn_cnt); 381 gm20b_dbg_pmu(g, " Managed Falcon cnt %d\n", plsfm->managed_flcn_cnt);
382 if (err) 382 if (err)
383 goto free_sgt; 383 goto free_sgt;
384 384
@@ -394,13 +394,13 @@ int prepare_ucode_blob(struct gk20a *g)
394 if (err) 394 if (err)
395 goto free_sgt; 395 goto free_sgt;
396 396
397 gm20b_dbg_pmu("managed LS falcon %d, WPR size %d bytes.\n", 397 gm20b_dbg_pmu(g, "managed LS falcon %d, WPR size %d bytes.\n",
398 plsfm->managed_flcn_cnt, plsfm->wpr_size); 398 plsfm->managed_flcn_cnt, plsfm->wpr_size);
399 lsfm_init_wpr_contents(g, plsfm, &g->acr.ucode_blob); 399 lsfm_init_wpr_contents(g, plsfm, &g->acr.ucode_blob);
400 } else { 400 } else {
401 gm20b_dbg_pmu("LSFM is managing no falcons.\n"); 401 gm20b_dbg_pmu(g, "LSFM is managing no falcons.\n");
402 } 402 }
403 gm20b_dbg_pmu("prepare ucode blob return 0\n"); 403 gm20b_dbg_pmu(g, "prepare ucode blob return 0\n");
404 free_acr_resources(g, plsfm); 404 free_acr_resources(g, plsfm);
405free_sgt: 405free_sgt:
406 return err; 406 return err;
@@ -444,13 +444,13 @@ static int lsfm_discover_ucode_images(struct gk20a *g,
444 444
445 plsfm->managed_flcn_cnt++; 445 plsfm->managed_flcn_cnt++;
446 } else { 446 } else {
447 gm20b_dbg_pmu("id not managed %d\n", 447 gm20b_dbg_pmu(g, "id not managed %d\n",
448 ucode_img.lsf_desc->falcon_id); 448 ucode_img.lsf_desc->falcon_id);
449 } 449 }
450 450
451 /*Free any ucode image resources if not managing this falcon*/ 451 /*Free any ucode image resources if not managing this falcon*/
452 if (!(pmu->pmu_mode & PMU_LSFM_MANAGED)) { 452 if (!(pmu->pmu_mode & PMU_LSFM_MANAGED)) {
453 gm20b_dbg_pmu("pmu is not LSFM managed\n"); 453 gm20b_dbg_pmu(g, "pmu is not LSFM managed\n");
454 lsfm_free_ucode_img_res(g, &ucode_img); 454 lsfm_free_ucode_img_res(g, &ucode_img);
455 } 455 }
456 456
@@ -481,7 +481,7 @@ static int lsfm_discover_ucode_images(struct gk20a *g,
481 == 0) 481 == 0)
482 plsfm->managed_flcn_cnt++; 482 plsfm->managed_flcn_cnt++;
483 } else { 483 } else {
484 gm20b_dbg_pmu("not managed %d\n", 484 gm20b_dbg_pmu(g, "not managed %d\n",
485 ucode_img.lsf_desc->falcon_id); 485 ucode_img.lsf_desc->falcon_id);
486 lsfm_free_nonpmu_ucode_img_res(g, 486 lsfm_free_nonpmu_ucode_img_res(g,
487 &ucode_img); 487 &ucode_img);
@@ -489,7 +489,7 @@ static int lsfm_discover_ucode_images(struct gk20a *g,
489 } 489 }
490 } else { 490 } else {
491 /* Consumed all available falcon objects */ 491 /* Consumed all available falcon objects */
492 gm20b_dbg_pmu("Done checking for ucodes %d\n", i); 492 gm20b_dbg_pmu(g, "Done checking for ucodes %d\n", i);
493 break; 493 break;
494 } 494 }
495 } 495 }
@@ -526,26 +526,26 @@ int gm20b_pmu_populate_loader_cfg(struct gk20a *g,
526 addr_base = p_lsfm->lsb_header.ucode_off; 526 addr_base = p_lsfm->lsb_header.ucode_off;
527 g->ops.pmu.get_wpr(g, &wpr_inf); 527 g->ops.pmu.get_wpr(g, &wpr_inf);
528 addr_base += wpr_inf.wpr_base; 528 addr_base += wpr_inf.wpr_base;
529 gm20b_dbg_pmu("pmu loader cfg u32 addrbase %x\n", (u32)addr_base); 529 gm20b_dbg_pmu(g, "pmu loader cfg u32 addrbase %x\n", (u32)addr_base);
530 /*From linux*/ 530 /*From linux*/
531 addr_code = u64_lo32((addr_base + 531 addr_code = u64_lo32((addr_base +
532 desc->app_start_offset + 532 desc->app_start_offset +
533 desc->app_resident_code_offset) >> 8); 533 desc->app_resident_code_offset) >> 8);
534 gm20b_dbg_pmu("app start %d app res code off %d\n", 534 gm20b_dbg_pmu(g, "app start %d app res code off %d\n",
535 desc->app_start_offset, desc->app_resident_code_offset); 535 desc->app_start_offset, desc->app_resident_code_offset);
536 addr_data = u64_lo32((addr_base + 536 addr_data = u64_lo32((addr_base +
537 desc->app_start_offset + 537 desc->app_start_offset +
538 desc->app_resident_data_offset) >> 8); 538 desc->app_resident_data_offset) >> 8);
539 gm20b_dbg_pmu("app res data offset%d\n", 539 gm20b_dbg_pmu(g, "app res data offset%d\n",
540 desc->app_resident_data_offset); 540 desc->app_resident_data_offset);
541 gm20b_dbg_pmu("bl start off %d\n", desc->bootloader_start_offset); 541 gm20b_dbg_pmu(g, "bl start off %d\n", desc->bootloader_start_offset);
542 542
543 addr_args = ((pwr_falcon_hwcfg_dmem_size_v( 543 addr_args = ((pwr_falcon_hwcfg_dmem_size_v(
544 gk20a_readl(g, pwr_falcon_hwcfg_r()))) 544 gk20a_readl(g, pwr_falcon_hwcfg_r())))
545 << GK20A_PMU_DMEM_BLKSIZE2); 545 << GK20A_PMU_DMEM_BLKSIZE2);
546 addr_args -= g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu); 546 addr_args -= g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu);
547 547
548 gm20b_dbg_pmu("addr_args %x\n", addr_args); 548 gm20b_dbg_pmu(g, "addr_args %x\n", addr_args);
549 549
550 /* Populate the loader_config state*/ 550 /* Populate the loader_config state*/
551 ldr_cfg->dma_idx = GK20A_PMU_DMAIDX_UCODE; 551 ldr_cfg->dma_idx = GK20A_PMU_DMAIDX_UCODE;
@@ -599,7 +599,7 @@ int gm20b_flcn_populate_bl_dmem_desc(struct gk20a *g,
599 g->ops.pmu.get_wpr(g, &wpr_inf); 599 g->ops.pmu.get_wpr(g, &wpr_inf);
600 addr_base += wpr_inf.wpr_base; 600 addr_base += wpr_inf.wpr_base;
601 601
602 gm20b_dbg_pmu("gen loader cfg %x u32 addrbase %x ID\n", (u32)addr_base, 602 gm20b_dbg_pmu(g, "gen loader cfg %x u32 addrbase %x ID\n", (u32)addr_base,
603 p_lsfm->wpr_header.falcon_id); 603 p_lsfm->wpr_header.falcon_id);
604 addr_code = u64_lo32((addr_base + 604 addr_code = u64_lo32((addr_base +
605 desc->app_start_offset + 605 desc->app_start_offset +
@@ -608,7 +608,7 @@ int gm20b_flcn_populate_bl_dmem_desc(struct gk20a *g,
608 desc->app_start_offset + 608 desc->app_start_offset +
609 desc->app_resident_data_offset) >> 8); 609 desc->app_resident_data_offset) >> 8);
610 610
611 gm20b_dbg_pmu("gen cfg %x u32 addrcode %x & data %x load offset %xID\n", 611 gm20b_dbg_pmu(g, "gen cfg %x u32 addrcode %x & data %x load offset %xID\n",
612 (u32)addr_code, (u32)addr_data, desc->bootloader_start_offset, 612 (u32)addr_code, (u32)addr_data, desc->bootloader_start_offset,
613 p_lsfm->wpr_header.falcon_id); 613 p_lsfm->wpr_header.falcon_id);
614 614
@@ -631,7 +631,7 @@ static int lsfm_fill_flcn_bl_gen_desc(struct gk20a *g,
631 631
632 struct nvgpu_pmu *pmu = &g->pmu; 632 struct nvgpu_pmu *pmu = &g->pmu;
633 if (pnode->wpr_header.falcon_id != pmu->falcon_id) { 633 if (pnode->wpr_header.falcon_id != pmu->falcon_id) {
634 gm20b_dbg_pmu("non pmu. write flcn bl gen desc\n"); 634 gm20b_dbg_pmu(g, "non pmu. write flcn bl gen desc\n");
635 g->ops.pmu.flcn_populate_bl_dmem_desc(g, 635 g->ops.pmu.flcn_populate_bl_dmem_desc(g,
636 pnode, &pnode->bl_gen_desc_size, 636 pnode, &pnode->bl_gen_desc_size,
637 pnode->wpr_header.falcon_id); 637 pnode->wpr_header.falcon_id);
@@ -639,7 +639,7 @@ static int lsfm_fill_flcn_bl_gen_desc(struct gk20a *g,
639 } 639 }
640 640
641 if (pmu->pmu_mode & PMU_LSFM_MANAGED) { 641 if (pmu->pmu_mode & PMU_LSFM_MANAGED) {
642 gm20b_dbg_pmu("pmu write flcn bl gen desc\n"); 642 gm20b_dbg_pmu(g, "pmu write flcn bl gen desc\n");
643 if (pnode->wpr_header.falcon_id == pmu->falcon_id) 643 if (pnode->wpr_header.falcon_id == pmu->falcon_id)
644 return g->ops.pmu.pmu_populate_loader_cfg(g, pnode, 644 return g->ops.pmu.pmu_populate_loader_cfg(g, pnode,
645 &pnode->bl_gen_desc_size); 645 &pnode->bl_gen_desc_size);
@@ -672,46 +672,46 @@ static void lsfm_init_wpr_contents(struct gk20a *g, struct ls_flcn_mgr *plsfm,
672 nvgpu_mem_wr_n(g, ucode, i * sizeof(pnode->wpr_header), 672 nvgpu_mem_wr_n(g, ucode, i * sizeof(pnode->wpr_header),
673 &pnode->wpr_header, sizeof(pnode->wpr_header)); 673 &pnode->wpr_header, sizeof(pnode->wpr_header));
674 674
675 gm20b_dbg_pmu("wpr header"); 675 gm20b_dbg_pmu(g, "wpr header");
676 gm20b_dbg_pmu("falconid :%d", 676 gm20b_dbg_pmu(g, "falconid :%d",
677 pnode->wpr_header.falcon_id); 677 pnode->wpr_header.falcon_id);
678 gm20b_dbg_pmu("lsb_offset :%x", 678 gm20b_dbg_pmu(g, "lsb_offset :%x",
679 pnode->wpr_header.lsb_offset); 679 pnode->wpr_header.lsb_offset);
680 gm20b_dbg_pmu("bootstrap_owner :%d", 680 gm20b_dbg_pmu(g, "bootstrap_owner :%d",
681 pnode->wpr_header.bootstrap_owner); 681 pnode->wpr_header.bootstrap_owner);
682 gm20b_dbg_pmu("lazy_bootstrap :%d", 682 gm20b_dbg_pmu(g, "lazy_bootstrap :%d",
683 pnode->wpr_header.lazy_bootstrap); 683 pnode->wpr_header.lazy_bootstrap);
684 gm20b_dbg_pmu("status :%d", 684 gm20b_dbg_pmu(g, "status :%d",
685 pnode->wpr_header.status); 685 pnode->wpr_header.status);
686 686
687 /*Flush LSB header to memory*/ 687 /*Flush LSB header to memory*/
688 nvgpu_mem_wr_n(g, ucode, pnode->wpr_header.lsb_offset, 688 nvgpu_mem_wr_n(g, ucode, pnode->wpr_header.lsb_offset,
689 &pnode->lsb_header, sizeof(pnode->lsb_header)); 689 &pnode->lsb_header, sizeof(pnode->lsb_header));
690 690
691 gm20b_dbg_pmu("lsb header"); 691 gm20b_dbg_pmu(g, "lsb header");
692 gm20b_dbg_pmu("ucode_off :%x", 692 gm20b_dbg_pmu(g, "ucode_off :%x",
693 pnode->lsb_header.ucode_off); 693 pnode->lsb_header.ucode_off);
694 gm20b_dbg_pmu("ucode_size :%x", 694 gm20b_dbg_pmu(g, "ucode_size :%x",
695 pnode->lsb_header.ucode_size); 695 pnode->lsb_header.ucode_size);
696 gm20b_dbg_pmu("data_size :%x", 696 gm20b_dbg_pmu(g, "data_size :%x",
697 pnode->lsb_header.data_size); 697 pnode->lsb_header.data_size);
698 gm20b_dbg_pmu("bl_code_size :%x", 698 gm20b_dbg_pmu(g, "bl_code_size :%x",
699 pnode->lsb_header.bl_code_size); 699 pnode->lsb_header.bl_code_size);
700 gm20b_dbg_pmu("bl_imem_off :%x", 700 gm20b_dbg_pmu(g, "bl_imem_off :%x",
701 pnode->lsb_header.bl_imem_off); 701 pnode->lsb_header.bl_imem_off);
702 gm20b_dbg_pmu("bl_data_off :%x", 702 gm20b_dbg_pmu(g, "bl_data_off :%x",
703 pnode->lsb_header.bl_data_off); 703 pnode->lsb_header.bl_data_off);
704 gm20b_dbg_pmu("bl_data_size :%x", 704 gm20b_dbg_pmu(g, "bl_data_size :%x",
705 pnode->lsb_header.bl_data_size); 705 pnode->lsb_header.bl_data_size);
706 gm20b_dbg_pmu("app_code_off :%x", 706 gm20b_dbg_pmu(g, "app_code_off :%x",
707 pnode->lsb_header.app_code_off); 707 pnode->lsb_header.app_code_off);
708 gm20b_dbg_pmu("app_code_size :%x", 708 gm20b_dbg_pmu(g, "app_code_size :%x",
709 pnode->lsb_header.app_code_size); 709 pnode->lsb_header.app_code_size);
710 gm20b_dbg_pmu("app_data_off :%x", 710 gm20b_dbg_pmu(g, "app_data_off :%x",
711 pnode->lsb_header.app_data_off); 711 pnode->lsb_header.app_data_off);
712 gm20b_dbg_pmu("app_data_size :%x", 712 gm20b_dbg_pmu(g, "app_data_size :%x",
713 pnode->lsb_header.app_data_size); 713 pnode->lsb_header.app_data_size);
714 gm20b_dbg_pmu("flags :%x", 714 gm20b_dbg_pmu(g, "flags :%x",
715 pnode->lsb_header.flags); 715 pnode->lsb_header.flags);
716 716
717 /*If this falcon has a boot loader and related args, 717 /*If this falcon has a boot loader and related args,
@@ -1028,7 +1028,7 @@ int gm20b_bootstrap_hs_flcn(struct gk20a *g)
1028 start = nvgpu_mem_get_addr(g, &acr->ucode_blob); 1028 start = nvgpu_mem_get_addr(g, &acr->ucode_blob);
1029 size = acr->ucode_blob.size; 1029 size = acr->ucode_blob.size;
1030 1030
1031 gm20b_dbg_pmu(""); 1031 gm20b_dbg_pmu(g, " ");
1032 1032
1033 if (!acr_fw) { 1033 if (!acr_fw) {
1034 /*First time init case*/ 1034 /*First time init case*/
@@ -1141,14 +1141,14 @@ int acr_ucode_patch_sig(struct gk20a *g,
1141 unsigned int *p_patch_ind) 1141 unsigned int *p_patch_ind)
1142{ 1142{
1143 unsigned int i, *p_sig; 1143 unsigned int i, *p_sig;
1144 gm20b_dbg_pmu(""); 1144 gm20b_dbg_pmu(g, " ");
1145 1145
1146 if (!pmu_is_debug_mode_en(g)) { 1146 if (!pmu_is_debug_mode_en(g)) {
1147 p_sig = p_prod_sig; 1147 p_sig = p_prod_sig;
1148 gm20b_dbg_pmu("PRODUCTION MODE\n"); 1148 gm20b_dbg_pmu(g, "PRODUCTION MODE\n");
1149 } else { 1149 } else {
1150 p_sig = p_dbg_sig; 1150 p_sig = p_dbg_sig;
1151 gm20b_dbg_pmu("DEBUG MODE\n"); 1151 gm20b_dbg_pmu(g, "DEBUG MODE\n");
1152 } 1152 }
1153 1153
1154 /* Patching logic:*/ 1154 /* Patching logic:*/
@@ -1171,7 +1171,7 @@ static int bl_bootstrap(struct nvgpu_pmu *pmu,
1171 struct hsflcn_bl_desc *pmu_bl_gm10x_desc = g->acr.pmu_hsbl_desc; 1171 struct hsflcn_bl_desc *pmu_bl_gm10x_desc = g->acr.pmu_hsbl_desc;
1172 u32 dst; 1172 u32 dst;
1173 1173
1174 gk20a_dbg_fn(""); 1174 nvgpu_log_fn(g, " ");
1175 gk20a_writel(g, pwr_falcon_itfen_r(), 1175 gk20a_writel(g, pwr_falcon_itfen_r(),
1176 gk20a_readl(g, pwr_falcon_itfen_r()) | 1176 gk20a_readl(g, pwr_falcon_itfen_r()) |
1177 pwr_falcon_itfen_ctxen_enable_f()); 1177 pwr_falcon_itfen_ctxen_enable_f());
@@ -1193,7 +1193,7 @@ static int bl_bootstrap(struct nvgpu_pmu *pmu,
1193 (u8 *)(acr->hsbl_ucode.cpu_va), bl_sz, 0, 0, 1193 (u8 *)(acr->hsbl_ucode.cpu_va), bl_sz, 0, 0,
1194 pmu_bl_gm10x_desc->bl_start_tag); 1194 pmu_bl_gm10x_desc->bl_start_tag);
1195 1195
1196 gm20b_dbg_pmu("Before starting falcon with BL\n"); 1196 gm20b_dbg_pmu(g, "Before starting falcon with BL\n");
1197 1197
1198 virt_addr = pmu_bl_gm10x_desc->bl_start_tag << 8; 1198 virt_addr = pmu_bl_gm10x_desc->bl_start_tag << 8;
1199 1199
@@ -1207,7 +1207,7 @@ int gm20b_init_nspmu_setup_hw1(struct gk20a *g)
1207 struct nvgpu_pmu *pmu = &g->pmu; 1207 struct nvgpu_pmu *pmu = &g->pmu;
1208 int err = 0; 1208 int err = 0;
1209 1209
1210 gk20a_dbg_fn(""); 1210 nvgpu_log_fn(g, " ");
1211 1211
1212 nvgpu_mutex_acquire(&pmu->isr_mutex); 1212 nvgpu_mutex_acquire(&pmu->isr_mutex);
1213 nvgpu_flcn_reset(pmu->flcn); 1213 nvgpu_flcn_reset(pmu->flcn);
@@ -1279,7 +1279,7 @@ int gm20b_init_pmu_setup_hw1(struct gk20a *g,
1279 struct nvgpu_pmu *pmu = &g->pmu; 1279 struct nvgpu_pmu *pmu = &g->pmu;
1280 int err; 1280 int err;
1281 1281
1282 gk20a_dbg_fn(""); 1282 nvgpu_log_fn(g, " ");
1283 1283
1284 nvgpu_mutex_acquire(&pmu->isr_mutex); 1284 nvgpu_mutex_acquire(&pmu->isr_mutex);
1285 nvgpu_flcn_reset(pmu->flcn); 1285 nvgpu_flcn_reset(pmu->flcn);
@@ -1324,7 +1324,7 @@ int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt)
1324 struct nvgpu_firmware *hsbl_fw = acr->hsbl_fw; 1324 struct nvgpu_firmware *hsbl_fw = acr->hsbl_fw;
1325 struct hsflcn_bl_desc *pmu_bl_gm10x_desc; 1325 struct hsflcn_bl_desc *pmu_bl_gm10x_desc;
1326 u32 *pmu_bl_gm10x = NULL; 1326 u32 *pmu_bl_gm10x = NULL;
1327 gm20b_dbg_pmu(""); 1327 gm20b_dbg_pmu(g, " ");
1328 1328
1329 if (!hsbl_fw) { 1329 if (!hsbl_fw) {
1330 hsbl_fw = nvgpu_request_firmware(g, 1330 hsbl_fw = nvgpu_request_firmware(g,
@@ -1343,7 +1343,7 @@ int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt)
1343 bl_sz = ALIGN(pmu_bl_gm10x_desc->bl_img_hdr.bl_code_size, 1343 bl_sz = ALIGN(pmu_bl_gm10x_desc->bl_img_hdr.bl_code_size,
1344 256); 1344 256);
1345 acr->hsbl_ucode.size = bl_sz; 1345 acr->hsbl_ucode.size = bl_sz;
1346 gm20b_dbg_pmu("Executing Generic Bootloader\n"); 1346 gm20b_dbg_pmu(g, "Executing Generic Bootloader\n");
1347 1347
1348 /*TODO in code verify that enable PMU is done, 1348 /*TODO in code verify that enable PMU is done,
1349 scrubbing etc is done*/ 1349 scrubbing etc is done*/
@@ -1366,7 +1366,7 @@ int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt)
1366 } 1366 }
1367 1367
1368 nvgpu_mem_wr_n(g, &acr->hsbl_ucode, 0, pmu_bl_gm10x, bl_sz); 1368 nvgpu_mem_wr_n(g, &acr->hsbl_ucode, 0, pmu_bl_gm10x, bl_sz);
1369 gm20b_dbg_pmu("Copied bl ucode to bl_cpuva\n"); 1369 gm20b_dbg_pmu(g, "Copied bl ucode to bl_cpuva\n");
1370 } 1370 }
1371 /* 1371 /*
1372 * Disable interrupts to avoid kernel hitting breakpoint due 1372 * Disable interrupts to avoid kernel hitting breakpoint due
@@ -1377,9 +1377,9 @@ int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt)
1377 gk20a_get_gr_idle_timeout(g))) 1377 gk20a_get_gr_idle_timeout(g)))
1378 goto err_unmap_bl; 1378 goto err_unmap_bl;
1379 1379
1380 gm20b_dbg_pmu("phys sec reg %x\n", gk20a_readl(g, 1380 gm20b_dbg_pmu(g, "phys sec reg %x\n", gk20a_readl(g,
1381 pwr_falcon_mmu_phys_sec_r())); 1381 pwr_falcon_mmu_phys_sec_r()));
1382 gm20b_dbg_pmu("sctl reg %x\n", gk20a_readl(g, pwr_falcon_sctl_r())); 1382 gm20b_dbg_pmu(g, "sctl reg %x\n", gk20a_readl(g, pwr_falcon_sctl_r()));
1383 1383
1384 g->ops.pmu.init_falcon_setup_hw(g, desc, acr->hsbl_ucode.size); 1384 g->ops.pmu.init_falcon_setup_hw(g, desc, acr->hsbl_ucode.size);
1385 1385
@@ -1396,10 +1396,10 @@ int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt)
1396 else 1396 else
1397 goto err_unmap_bl; 1397 goto err_unmap_bl;
1398 } 1398 }
1399 gm20b_dbg_pmu("after waiting for halt, err %x\n", err); 1399 gm20b_dbg_pmu(g, "after waiting for halt, err %x\n", err);
1400 gm20b_dbg_pmu("phys sec reg %x\n", gk20a_readl(g, 1400 gm20b_dbg_pmu(g, "phys sec reg %x\n", gk20a_readl(g,
1401 pwr_falcon_mmu_phys_sec_r())); 1401 pwr_falcon_mmu_phys_sec_r()));
1402 gm20b_dbg_pmu("sctl reg %x\n", gk20a_readl(g, pwr_falcon_sctl_r())); 1402 gm20b_dbg_pmu(g, "sctl reg %x\n", gk20a_readl(g, pwr_falcon_sctl_r()));
1403 start_gm20b_pmu(g); 1403 start_gm20b_pmu(g);
1404 return 0; 1404 return 0;
1405err_unmap_bl: 1405err_unmap_bl:
@@ -1430,7 +1430,7 @@ int pmu_wait_for_halt(struct gk20a *g, unsigned int timeout_ms)
1430 } 1430 }
1431 1431
1432 g->acr.capabilities = gk20a_readl(g, pwr_falcon_mailbox1_r()); 1432 g->acr.capabilities = gk20a_readl(g, pwr_falcon_mailbox1_r());
1433 gm20b_dbg_pmu("ACR capabilities %x\n", g->acr.capabilities); 1433 gm20b_dbg_pmu(g, "ACR capabilities %x\n", g->acr.capabilities);
1434 data = gk20a_readl(g, pwr_falcon_mailbox0_r()); 1434 data = gk20a_readl(g, pwr_falcon_mailbox0_r());
1435 if (data) { 1435 if (data) {
1436 nvgpu_err(g, "ACR boot failed, err %x", data); 1436 nvgpu_err(g, "ACR boot failed, err %x", data);
diff --git a/drivers/gpu/nvgpu/gm20b/bus_gm20b.c b/drivers/gpu/nvgpu/gm20b/bus_gm20b.c
index cdd70d5b..ca2a40bf 100644
--- a/drivers/gpu/nvgpu/gm20b/bus_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/bus_gm20b.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * GM20B MMU 2 * GM20B MMU
3 * 3 *
4 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"), 7 * copy of this software and associated documentation files (the "Software"),
@@ -40,7 +40,7 @@ int gm20b_bus_bar1_bind(struct gk20a *g, struct nvgpu_mem *bar1_inst)
40 u64 iova = nvgpu_inst_block_addr(g, bar1_inst); 40 u64 iova = nvgpu_inst_block_addr(g, bar1_inst);
41 u32 ptr_v = (u32)(iova >> bus_bar1_block_ptr_shift_v()); 41 u32 ptr_v = (u32)(iova >> bus_bar1_block_ptr_shift_v());
42 42
43 gk20a_dbg_info("bar1 inst block ptr: 0x%08x", ptr_v); 43 nvgpu_log_info(g, "bar1 inst block ptr: 0x%08x", ptr_v);
44 44
45 gk20a_writel(g, bus_bar1_block_r(), 45 gk20a_writel(g, bus_bar1_block_r(),
46 nvgpu_aperture_mask(g, bar1_inst, 46 nvgpu_aperture_mask(g, bar1_inst,
diff --git a/drivers/gpu/nvgpu/gm20b/clk_gm20b.c b/drivers/gpu/nvgpu/gm20b/clk_gm20b.c
index fa751ecc..fb89752a 100644
--- a/drivers/gpu/nvgpu/gm20b/clk_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/clk_gm20b.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * GM20B Clocks 2 * GM20B Clocks
3 * 3 *
4 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"), 7 * copy of this software and associated documentation files (the "Software"),
@@ -36,8 +36,8 @@
36#include <nvgpu/hw/gm20b/hw_therm_gm20b.h> 36#include <nvgpu/hw/gm20b/hw_therm_gm20b.h>
37#include <nvgpu/hw/gm20b/hw_fuse_gm20b.h> 37#include <nvgpu/hw/gm20b/hw_fuse_gm20b.h>
38 38
39#define gk20a_dbg_clk(fmt, arg...) \ 39#define gk20a_dbg_clk(g, fmt, arg...) \
40 gk20a_dbg(gpu_dbg_clk, fmt, ##arg) 40 nvgpu_log(g, gpu_dbg_clk, fmt, ##arg)
41 41
42#define DFS_DET_RANGE 6 /* -2^6 ... 2^6-1 */ 42#define DFS_DET_RANGE 6 /* -2^6 ... 2^6-1 */
43#define SDM_DIN_RANGE 12 /* -2^12 ... 2^12-1 */ 43#define SDM_DIN_RANGE 12 /* -2^12 ... 2^12-1 */
@@ -138,6 +138,7 @@ static u32 get_interim_pldiv(struct gk20a *g, u32 old_pl, u32 new_pl)
138static int clk_config_pll(struct clk_gk20a *clk, struct pll *pll, 138static int clk_config_pll(struct clk_gk20a *clk, struct pll *pll,
139 struct pll_parms *pll_params, u32 *target_freq, bool best_fit) 139 struct pll_parms *pll_params, u32 *target_freq, bool best_fit)
140{ 140{
141 struct gk20a *g = clk->g;
141 u32 min_vco_f, max_vco_f; 142 u32 min_vco_f, max_vco_f;
142 u32 best_M, best_N; 143 u32 best_M, best_N;
143 u32 low_PL, high_PL, best_PL; 144 u32 low_PL, high_PL, best_PL;
@@ -149,7 +150,7 @@ static int clk_config_pll(struct clk_gk20a *clk, struct pll *pll,
149 150
150 BUG_ON(target_freq == NULL); 151 BUG_ON(target_freq == NULL);
151 152
152 gk20a_dbg_fn("request target freq %d MHz", *target_freq); 153 nvgpu_log_fn(g, "request target freq %d MHz", *target_freq);
153 154
154 ref_clk_f = pll->clk_in; 155 ref_clk_f = pll->clk_in;
155 target_clk_f = *target_freq; 156 target_clk_f = *target_freq;
@@ -172,7 +173,7 @@ static int clk_config_pll(struct clk_gk20a *clk, struct pll *pll,
172 low_PL = min(low_PL, pll_params->max_PL); 173 low_PL = min(low_PL, pll_params->max_PL);
173 low_PL = max(low_PL, pll_params->min_PL); 174 low_PL = max(low_PL, pll_params->min_PL);
174 175
175 gk20a_dbg_info("low_PL %d(div%d), high_PL %d(div%d)", 176 nvgpu_log_info(g, "low_PL %d(div%d), high_PL %d(div%d)",
176 low_PL, nvgpu_pl_to_div(low_PL), high_PL, nvgpu_pl_to_div(high_PL)); 177 low_PL, nvgpu_pl_to_div(low_PL), high_PL, nvgpu_pl_to_div(high_PL));
177 178
178 for (pl = low_PL; pl <= high_PL; pl++) { 179 for (pl = low_PL; pl <= high_PL; pl++) {
@@ -217,7 +218,7 @@ static int clk_config_pll(struct clk_gk20a *clk, struct pll *pll,
217 goto found_match; 218 goto found_match;
218 } 219 }
219 220
220 gk20a_dbg_info("delta %d @ M %d, N %d, PL %d", 221 nvgpu_log_info(g, "delta %d @ M %d, N %d, PL %d",
221 delta, m, n, pl); 222 delta, m, n, pl);
222 } 223 }
223 } 224 }
@@ -229,7 +230,7 @@ found_match:
229 BUG_ON(best_delta == ~0U); 230 BUG_ON(best_delta == ~0U);
230 231
231 if (best_fit && best_delta != 0) 232 if (best_fit && best_delta != 0)
232 gk20a_dbg_clk("no best match for target @ %dMHz on gpc_pll", 233 gk20a_dbg_clk(g, "no best match for target @ %dMHz on gpc_pll",
233 target_clk_f); 234 target_clk_f);
234 235
235 pll->M = best_M; 236 pll->M = best_M;
@@ -241,10 +242,10 @@ found_match:
241 242
242 *target_freq = pll->freq; 243 *target_freq = pll->freq;
243 244
244 gk20a_dbg_clk("actual target freq %d kHz, M %d, N %d, PL %d(div%d)", 245 gk20a_dbg_clk(g, "actual target freq %d kHz, M %d, N %d, PL %d(div%d)",
245 *target_freq, pll->M, pll->N, pll->PL, nvgpu_pl_to_div(pll->PL)); 246 *target_freq, pll->M, pll->N, pll->PL, nvgpu_pl_to_div(pll->PL));
246 247
247 gk20a_dbg_fn("done"); 248 nvgpu_log_fn(g, "done");
248 249
249 return 0; 250 return 0;
250} 251}
@@ -810,7 +811,7 @@ static int clk_lock_gpc_pll_under_bypass(struct gk20a *g, struct pll *gpll)
810 if (gpll->mode == GPC_PLL_MODE_DVFS) { 811 if (gpll->mode == GPC_PLL_MODE_DVFS) {
811 gk20a_readl(g, trim_sys_gpcpll_cfg_r()); 812 gk20a_readl(g, trim_sys_gpcpll_cfg_r());
812 nvgpu_udelay(gpc_pll_params.na_lock_delay); 813 nvgpu_udelay(gpc_pll_params.na_lock_delay);
813 gk20a_dbg_clk("NA config_pll under bypass: %u (%u) kHz %d mV", 814 gk20a_dbg_clk(g, "NA config_pll under bypass: %u (%u) kHz %d mV",
814 gpll->freq, gpll->freq / 2, 815 gpll->freq, gpll->freq / 2,
815 (trim_sys_gpcpll_cfg3_dfs_testout_v( 816 (trim_sys_gpcpll_cfg3_dfs_testout_v(
816 gk20a_readl(g, trim_sys_gpcpll_cfg3_r())) 817 gk20a_readl(g, trim_sys_gpcpll_cfg3_r()))
@@ -843,7 +844,7 @@ static int clk_lock_gpc_pll_under_bypass(struct gk20a *g, struct pll *gpll)
843 return -EBUSY; 844 return -EBUSY;
844 845
845pll_locked: 846pll_locked:
846 gk20a_dbg_clk("locked config_pll under bypass r=0x%x v=0x%x", 847 gk20a_dbg_clk(g, "locked config_pll under bypass r=0x%x v=0x%x",
847 trim_sys_gpcpll_cfg_r(), cfg); 848 trim_sys_gpcpll_cfg_r(), cfg);
848 849
849 /* set SYNC_MODE for glitchless switch out of bypass */ 850 /* set SYNC_MODE for glitchless switch out of bypass */
@@ -878,7 +879,7 @@ static int clk_program_gpc_pll(struct gk20a *g, struct pll *gpll_new,
878 bool can_slide, pldiv_only; 879 bool can_slide, pldiv_only;
879 struct pll gpll; 880 struct pll gpll;
880 881
881 gk20a_dbg_fn(""); 882 nvgpu_log_fn(g, " ");
882 883
883 if (!nvgpu_platform_is_silicon(g)) 884 if (!nvgpu_platform_is_silicon(g))
884 return 0; 885 return 0;
@@ -1028,7 +1029,7 @@ static void clk_config_pll_safe_dvfs(struct gk20a *g, struct pll *gpll)
1028 gpll->N = nsafe; 1029 gpll->N = nsafe;
1029 clk_config_dvfs_ndiv(gpll->dvfs.mv, gpll->N, &gpll->dvfs); 1030 clk_config_dvfs_ndiv(gpll->dvfs.mv, gpll->N, &gpll->dvfs);
1030 1031
1031 gk20a_dbg_clk("safe freq %d kHz, M %d, N %d, PL %d(div%d), mV(cal) %d(%d), DC %d", 1032 gk20a_dbg_clk(g, "safe freq %d kHz, M %d, N %d, PL %d(div%d), mV(cal) %d(%d), DC %d",
1032 gpll->freq, gpll->M, gpll->N, gpll->PL, nvgpu_pl_to_div(gpll->PL), 1033 gpll->freq, gpll->M, gpll->N, gpll->PL, nvgpu_pl_to_div(gpll->PL),
1033 gpll->dvfs.mv, gpll->dvfs.uv_cal / 1000, gpll->dvfs.dfs_coeff); 1034 gpll->dvfs.mv, gpll->dvfs.uv_cal / 1000, gpll->dvfs.dfs_coeff);
1034} 1035}
@@ -1103,7 +1104,7 @@ static int clk_program_na_gpc_pll(struct gk20a *g, struct pll *gpll_new,
1103 clk_set_dfs_ext_cal(g, gpll_new->dvfs.dfs_ext_cal); 1104 clk_set_dfs_ext_cal(g, gpll_new->dvfs.dfs_ext_cal);
1104 clk_set_dfs_coeff(g, gpll_new->dvfs.dfs_coeff); 1105 clk_set_dfs_coeff(g, gpll_new->dvfs.dfs_coeff);
1105 1106
1106 gk20a_dbg_clk("config_pll %d kHz, M %d, N %d, PL %d(div%d), mV(cal) %d(%d), DC %d", 1107 gk20a_dbg_clk(g, "config_pll %d kHz, M %d, N %d, PL %d(div%d), mV(cal) %d(%d), DC %d",
1107 gpll_new->freq, gpll_new->M, gpll_new->N, gpll_new->PL, 1108 gpll_new->freq, gpll_new->M, gpll_new->N, gpll_new->PL,
1108 nvgpu_pl_to_div(gpll_new->PL), 1109 nvgpu_pl_to_div(gpll_new->PL),
1109 max(gpll_new->dvfs.mv, gpll_old->dvfs.mv), 1110 max(gpll_new->dvfs.mv, gpll_old->dvfs.mv),
@@ -1168,14 +1169,14 @@ int gm20b_init_clk_setup_sw(struct gk20a *g)
1168 unsigned long safe_rate; 1169 unsigned long safe_rate;
1169 int err; 1170 int err;
1170 1171
1171 gk20a_dbg_fn(""); 1172 nvgpu_log_fn(g, " ");
1172 1173
1173 err = nvgpu_mutex_init(&clk->clk_mutex); 1174 err = nvgpu_mutex_init(&clk->clk_mutex);
1174 if (err) 1175 if (err)
1175 return err; 1176 return err;
1176 1177
1177 if (clk->sw_ready) { 1178 if (clk->sw_ready) {
1178 gk20a_dbg_fn("skip init"); 1179 nvgpu_log_fn(g, "skip init");
1179 return 0; 1180 return 0;
1180 } 1181 }
1181 1182
@@ -1229,7 +1230,7 @@ int gm20b_init_clk_setup_sw(struct gk20a *g)
1229 1230
1230 clk->sw_ready = true; 1231 clk->sw_ready = true;
1231 1232
1232 gk20a_dbg_fn("done"); 1233 nvgpu_log_fn(g, "done");
1233 nvgpu_info(g, 1234 nvgpu_info(g,
1234 "GPCPLL initial settings:%s M=%u, N=%u, P=%u (id = %u)", 1235 "GPCPLL initial settings:%s M=%u, N=%u, P=%u (id = %u)",
1235 clk->gpc_pll.mode == GPC_PLL_MODE_DVFS ? " NA mode," : "", 1236 clk->gpc_pll.mode == GPC_PLL_MODE_DVFS ? " NA mode," : "",
@@ -1321,7 +1322,7 @@ static int gm20b_init_clk_setup_hw(struct gk20a *g)
1321{ 1322{
1322 u32 data; 1323 u32 data;
1323 1324
1324 gk20a_dbg_fn(""); 1325 nvgpu_log_fn(g, " ");
1325 1326
1326 /* LDIV: Div4 mode (required); both bypass and vco ratios 1:1 */ 1327 /* LDIV: Div4 mode (required); both bypass and vco ratios 1:1 */
1327 data = gk20a_readl(g, trim_sys_gpc2clk_out_r()); 1328 data = gk20a_readl(g, trim_sys_gpc2clk_out_r());
@@ -1394,7 +1395,7 @@ static int set_pll_freq(struct gk20a *g, int allow_slide)
1394 struct clk_gk20a *clk = &g->clk; 1395 struct clk_gk20a *clk = &g->clk;
1395 int err = 0; 1396 int err = 0;
1396 1397
1397 gk20a_dbg_fn("last freq: %dMHz, target freq %dMHz", 1398 nvgpu_log_fn(g, "last freq: %dMHz, target freq %dMHz",
1398 clk->gpc_pll_last.freq, clk->gpc_pll.freq); 1399 clk->gpc_pll_last.freq, clk->gpc_pll.freq);
1399 1400
1400 /* If programming with dynamic sliding failed, re-try under bypass */ 1401 /* If programming with dynamic sliding failed, re-try under bypass */
@@ -1427,7 +1428,7 @@ int gm20b_init_clk_support(struct gk20a *g)
1427 struct clk_gk20a *clk = &g->clk; 1428 struct clk_gk20a *clk = &g->clk;
1428 u32 err; 1429 u32 err;
1429 1430
1430 gk20a_dbg_fn(""); 1431 nvgpu_log_fn(g, " ");
1431 1432
1432 nvgpu_mutex_acquire(&clk->clk_mutex); 1433 nvgpu_mutex_acquire(&clk->clk_mutex);
1433 clk->clk_hw_on = true; 1434 clk->clk_hw_on = true;
diff --git a/drivers/gpu/nvgpu/gm20b/fb_gm20b.c b/drivers/gpu/nvgpu/gm20b/fb_gm20b.c
index 5bc6d452..b2a815fb 100644
--- a/drivers/gpu/nvgpu/gm20b/fb_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/fb_gm20b.c
@@ -38,7 +38,7 @@
38 38
39void fb_gm20b_init_fs_state(struct gk20a *g) 39void fb_gm20b_init_fs_state(struct gk20a *g)
40{ 40{
41 gk20a_dbg_info("initialize gm20b fb"); 41 nvgpu_log_info(g, "initialize gm20b fb");
42 42
43 gk20a_writel(g, fb_fbhub_num_active_ltcs_r(), 43 gk20a_writel(g, fb_fbhub_num_active_ltcs_r(),
44 g->ltc_count); 44 g->ltc_count);
diff --git a/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c b/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c
index 35a7a9e1..b73abeda 100644
--- a/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c
@@ -47,7 +47,7 @@ void channel_gm20b_bind(struct channel_gk20a *c)
47 u32 inst_ptr = nvgpu_inst_block_addr(g, &c->inst_block) 47 u32 inst_ptr = nvgpu_inst_block_addr(g, &c->inst_block)
48 >> ram_in_base_shift_v(); 48 >> ram_in_base_shift_v();
49 49
50 gk20a_dbg_info("bind channel %d inst ptr 0x%08x", 50 nvgpu_log_info(g, "bind channel %d inst ptr 0x%08x",
51 c->chid, inst_ptr); 51 c->chid, inst_ptr);
52 52
53 53
diff --git a/drivers/gpu/nvgpu/gm20b/gr_gm20b.c b/drivers/gpu/nvgpu/gm20b/gr_gm20b.c
index 1c966c22..331c3af9 100644
--- a/drivers/gpu/nvgpu/gm20b/gr_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/gr_gm20b.c
@@ -47,7 +47,7 @@ void gr_gm20b_init_gpc_mmu(struct gk20a *g)
47{ 47{
48 u32 temp; 48 u32 temp;
49 49
50 gk20a_dbg_info("initialize gpc mmu"); 50 nvgpu_log_info(g, "initialize gpc mmu");
51 51
52 if (!nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) { 52 if (!nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) {
53 /* Bypass MMU check for non-secure boot. For 53 /* Bypass MMU check for non-secure boot. For
@@ -168,7 +168,7 @@ void gr_gm20b_commit_global_bundle_cb(struct gk20a *g,
168 168
169 data = min_t(u32, data, g->gr.min_gpm_fifo_depth); 169 data = min_t(u32, data, g->gr.min_gpm_fifo_depth);
170 170
171 gk20a_dbg_info("bundle cb token limit : %d, state limit : %d", 171 nvgpu_log_info(g, "bundle cb token limit : %d, state limit : %d",
172 g->gr.bundle_cb_token_limit, data); 172 g->gr.bundle_cb_token_limit, data);
173 173
174 gr_gk20a_ctx_patch_write(g, ch_ctx, gr_pd_ab_dist_cfg2_r(), 174 gr_gk20a_ctx_patch_write(g, ch_ctx, gr_pd_ab_dist_cfg2_r(),
@@ -193,7 +193,7 @@ int gr_gm20b_commit_global_cb_manager(struct gk20a *g,
193 u32 num_pes_per_gpc = nvgpu_get_litter_value(g, 193 u32 num_pes_per_gpc = nvgpu_get_litter_value(g,
194 GPU_LIT_NUM_PES_PER_GPC); 194 GPU_LIT_NUM_PES_PER_GPC);
195 195
196 gk20a_dbg_fn(""); 196 nvgpu_log_fn(g, " ");
197 197
198 tsg = tsg_gk20a_from_ch(c); 198 tsg = tsg_gk20a_from_ch(c);
199 if (!tsg) 199 if (!tsg)
@@ -280,20 +280,20 @@ void gr_gm20b_set_rd_coalesce(struct gk20a *g, u32 data)
280{ 280{
281 u32 val; 281 u32 val;
282 282
283 gk20a_dbg_fn(""); 283 nvgpu_log_fn(g, " ");
284 284
285 val = gk20a_readl(g, gr_gpcs_tpcs_tex_m_dbg2_r()); 285 val = gk20a_readl(g, gr_gpcs_tpcs_tex_m_dbg2_r());
286 val = set_field(val, gr_gpcs_tpcs_tex_m_dbg2_lg_rd_coalesce_en_m(), 286 val = set_field(val, gr_gpcs_tpcs_tex_m_dbg2_lg_rd_coalesce_en_m(),
287 gr_gpcs_tpcs_tex_m_dbg2_lg_rd_coalesce_en_f(data)); 287 gr_gpcs_tpcs_tex_m_dbg2_lg_rd_coalesce_en_f(data));
288 gk20a_writel(g, gr_gpcs_tpcs_tex_m_dbg2_r(), val); 288 gk20a_writel(g, gr_gpcs_tpcs_tex_m_dbg2_r(), val);
289 289
290 gk20a_dbg_fn("done"); 290 nvgpu_log_fn(g, "done");
291} 291}
292 292
293int gr_gm20b_handle_sw_method(struct gk20a *g, u32 addr, 293int gr_gm20b_handle_sw_method(struct gk20a *g, u32 addr,
294 u32 class_num, u32 offset, u32 data) 294 u32 class_num, u32 offset, u32 data)
295{ 295{
296 gk20a_dbg_fn(""); 296 nvgpu_log_fn(g, " ");
297 297
298 if (class_num == MAXWELL_COMPUTE_B) { 298 if (class_num == MAXWELL_COMPUTE_B) {
299 switch (offset << 2) { 299 switch (offset << 2) {
@@ -341,7 +341,7 @@ void gr_gm20b_set_alpha_circular_buffer_size(struct gk20a *g, u32 data)
341 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); 341 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
342 u32 ppc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_PPC_IN_GPC_STRIDE); 342 u32 ppc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_PPC_IN_GPC_STRIDE);
343 343
344 gk20a_dbg_fn(""); 344 nvgpu_log_fn(g, " ");
345 /* if (NO_ALPHA_BETA_TIMESLICE_SUPPORT_DEF) 345 /* if (NO_ALPHA_BETA_TIMESLICE_SUPPORT_DEF)
346 return; */ 346 return; */
347 347
@@ -390,7 +390,7 @@ void gr_gm20b_set_circular_buffer_size(struct gk20a *g, u32 data)
390 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); 390 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
391 u32 ppc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_PPC_IN_GPC_STRIDE); 391 u32 ppc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_PPC_IN_GPC_STRIDE);
392 392
393 gk20a_dbg_fn(""); 393 nvgpu_log_fn(g, " ");
394 394
395 if (cb_size > gr->attrib_cb_size) 395 if (cb_size > gr->attrib_cb_size)
396 cb_size = gr->attrib_cb_size; 396 cb_size = gr->attrib_cb_size;
@@ -665,7 +665,7 @@ int gr_gm20b_init_fs_state(struct gk20a *g)
665{ 665{
666 int err = 0; 666 int err = 0;
667 667
668 gk20a_dbg_fn(""); 668 nvgpu_log_fn(g, " ");
669 669
670 err = gr_gk20a_init_fs_state(g); 670 err = gr_gk20a_init_fs_state(g);
671 if (err) 671 if (err)
@@ -762,7 +762,7 @@ int gr_gm20b_load_ctxsw_ucode(struct gk20a *g)
762 gr_fecs_falcon_hwcfg_r(); 762 gr_fecs_falcon_hwcfg_r();
763 u8 falcon_id_mask = 0; 763 u8 falcon_id_mask = 0;
764 764
765 gk20a_dbg_fn(""); 765 nvgpu_log_fn(g, " ");
766 766
767 if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) { 767 if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) {
768 gk20a_writel(g, gr_fecs_ctxsw_mailbox_r(7), 768 gk20a_writel(g, gr_fecs_ctxsw_mailbox_r(7),
@@ -829,7 +829,7 @@ int gr_gm20b_load_ctxsw_ucode(struct gk20a *g)
829 gk20a_writel(g, gr_fecs_ctxsw_mailbox_clear_r(6), 0xffffffff); 829 gk20a_writel(g, gr_fecs_ctxsw_mailbox_clear_r(6), 0xffffffff);
830 gk20a_writel(g, gr_fecs_cpuctl_alias_r(), 830 gk20a_writel(g, gr_fecs_cpuctl_alias_r(),
831 gr_fecs_cpuctl_startcpu_f(1)); 831 gr_fecs_cpuctl_startcpu_f(1));
832 gk20a_dbg_fn("done"); 832 nvgpu_log_fn(g, "done");
833 833
834 return 0; 834 return 0;
835} 835}
@@ -858,7 +858,7 @@ int gr_gm20b_alloc_gr_ctx(struct gk20a *g,
858{ 858{
859 int err; 859 int err;
860 860
861 gk20a_dbg_fn(""); 861 nvgpu_log_fn(g, " ");
862 862
863 err = gr_gk20a_alloc_gr_ctx(g, gr_ctx, vm, class, flags); 863 err = gr_gk20a_alloc_gr_ctx(g, gr_ctx, vm, class, flags);
864 if (err) 864 if (err)
@@ -867,7 +867,7 @@ int gr_gm20b_alloc_gr_ctx(struct gk20a *g,
867 if (class == MAXWELL_COMPUTE_B) 867 if (class == MAXWELL_COMPUTE_B)
868 gr_ctx->compute_preempt_mode = NVGPU_PREEMPTION_MODE_COMPUTE_CTA; 868 gr_ctx->compute_preempt_mode = NVGPU_PREEMPTION_MODE_COMPUTE_CTA;
869 869
870 gk20a_dbg_fn("done"); 870 nvgpu_log_fn(g, "done");
871 871
872 return 0; 872 return 0;
873} 873}
@@ -881,7 +881,7 @@ void gr_gm20b_update_ctxsw_preemption_mode(struct gk20a *g,
881 u32 cta_preempt_option = 881 u32 cta_preempt_option =
882 ctxsw_prog_main_image_preemption_options_control_cta_enabled_f(); 882 ctxsw_prog_main_image_preemption_options_control_cta_enabled_f();
883 883
884 gk20a_dbg_fn(""); 884 nvgpu_log_fn(g, " ");
885 885
886 tsg = tsg_gk20a_from_ch(c); 886 tsg = tsg_gk20a_from_ch(c);
887 if (!tsg) 887 if (!tsg)
@@ -889,13 +889,13 @@ void gr_gm20b_update_ctxsw_preemption_mode(struct gk20a *g,
889 889
890 gr_ctx = &tsg->gr_ctx; 890 gr_ctx = &tsg->gr_ctx;
891 if (gr_ctx->compute_preempt_mode == NVGPU_PREEMPTION_MODE_COMPUTE_CTA) { 891 if (gr_ctx->compute_preempt_mode == NVGPU_PREEMPTION_MODE_COMPUTE_CTA) {
892 gk20a_dbg_info("CTA: %x", cta_preempt_option); 892 nvgpu_log_info(g, "CTA: %x", cta_preempt_option);
893 nvgpu_mem_wr(g, mem, 893 nvgpu_mem_wr(g, mem,
894 ctxsw_prog_main_image_preemption_options_o(), 894 ctxsw_prog_main_image_preemption_options_o(),
895 cta_preempt_option); 895 cta_preempt_option);
896 } 896 }
897 897
898 gk20a_dbg_fn("done"); 898 nvgpu_log_fn(g, "done");
899} 899}
900 900
901int gr_gm20b_dump_gr_status_regs(struct gk20a *g, 901int gr_gm20b_dump_gr_status_regs(struct gk20a *g,
@@ -1044,7 +1044,7 @@ int gr_gm20b_update_pc_sampling(struct channel_gk20a *c,
1044 struct nvgpu_mem *mem; 1044 struct nvgpu_mem *mem;
1045 u32 v; 1045 u32 v;
1046 1046
1047 gk20a_dbg_fn(""); 1047 nvgpu_log_fn(c->g, " ");
1048 1048
1049 tsg = tsg_gk20a_from_ch(c); 1049 tsg = tsg_gk20a_from_ch(c);
1050 if (!tsg) 1050 if (!tsg)
@@ -1066,7 +1066,7 @@ int gr_gm20b_update_pc_sampling(struct channel_gk20a *c,
1066 1066
1067 nvgpu_mem_end(c->g, mem); 1067 nvgpu_mem_end(c->g, mem);
1068 1068
1069 gk20a_dbg_fn("done"); 1069 nvgpu_log_fn(c->g, "done");
1070 1070
1071 return 0; 1071 return 0;
1072} 1072}
@@ -1220,19 +1220,19 @@ void gr_gm20b_bpt_reg_info(struct gk20a *g, struct nvgpu_warpstate *w_state)
1220 1220
1221 /* Only for debug purpose */ 1221 /* Only for debug purpose */
1222 for (sm_id = 0; sm_id < gr->no_of_sm; sm_id++) { 1222 for (sm_id = 0; sm_id < gr->no_of_sm; sm_id++) {
1223 gk20a_dbg_fn("w_state[%d].valid_warps[0]: %llx\n", 1223 nvgpu_log_fn(g, "w_state[%d].valid_warps[0]: %llx\n",
1224 sm_id, w_state[sm_id].valid_warps[0]); 1224 sm_id, w_state[sm_id].valid_warps[0]);
1225 gk20a_dbg_fn("w_state[%d].valid_warps[1]: %llx\n", 1225 nvgpu_log_fn(g, "w_state[%d].valid_warps[1]: %llx\n",
1226 sm_id, w_state[sm_id].valid_warps[1]); 1226 sm_id, w_state[sm_id].valid_warps[1]);
1227 1227
1228 gk20a_dbg_fn("w_state[%d].trapped_warps[0]: %llx\n", 1228 nvgpu_log_fn(g, "w_state[%d].trapped_warps[0]: %llx\n",
1229 sm_id, w_state[sm_id].trapped_warps[0]); 1229 sm_id, w_state[sm_id].trapped_warps[0]);
1230 gk20a_dbg_fn("w_state[%d].trapped_warps[1]: %llx\n", 1230 nvgpu_log_fn(g, "w_state[%d].trapped_warps[1]: %llx\n",
1231 sm_id, w_state[sm_id].trapped_warps[1]); 1231 sm_id, w_state[sm_id].trapped_warps[1]);
1232 1232
1233 gk20a_dbg_fn("w_state[%d].paused_warps[0]: %llx\n", 1233 nvgpu_log_fn(g, "w_state[%d].paused_warps[0]: %llx\n",
1234 sm_id, w_state[sm_id].paused_warps[0]); 1234 sm_id, w_state[sm_id].paused_warps[0]);
1235 gk20a_dbg_fn("w_state[%d].paused_warps[1]: %llx\n", 1235 nvgpu_log_fn(g, "w_state[%d].paused_warps[1]: %llx\n",
1236 sm_id, w_state[sm_id].paused_warps[1]); 1236 sm_id, w_state[sm_id].paused_warps[1]);
1237 } 1237 }
1238} 1238}
diff --git a/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c b/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c
index dcb65372..66cd49e7 100644
--- a/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c
@@ -61,7 +61,7 @@ int gm20b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr)
61 61
62 int err; 62 int err;
63 63
64 gk20a_dbg_fn(""); 64 nvgpu_log_fn(g, " ");
65 65
66 if (max_comptag_lines == 0U) 66 if (max_comptag_lines == 0U)
67 return 0; 67 return 0;
@@ -87,9 +87,9 @@ int gm20b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr)
87 if (max_comptag_lines > hw_max_comptag_lines) 87 if (max_comptag_lines > hw_max_comptag_lines)
88 max_comptag_lines = hw_max_comptag_lines; 88 max_comptag_lines = hw_max_comptag_lines;
89 89
90 gk20a_dbg_info("compbit backing store size : %d", 90 nvgpu_log_info(g, "compbit backing store size : %d",
91 compbit_backing_size); 91 compbit_backing_size);
92 gk20a_dbg_info("max comptag lines : %d", 92 nvgpu_log_info(g, "max comptag lines : %d",
93 max_comptag_lines); 93 max_comptag_lines);
94 94
95 err = nvgpu_ltc_alloc_cbc(g, compbit_backing_size); 95 err = nvgpu_ltc_alloc_cbc(g, compbit_backing_size);
@@ -121,7 +121,7 @@ int gm20b_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op,
121 u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE); 121 u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE);
122 const u32 max_lines = 16384U; 122 const u32 max_lines = 16384U;
123 123
124 gk20a_dbg_fn(""); 124 nvgpu_log_fn(g, " ");
125 125
126 trace_gk20a_ltc_cbc_ctrl_start(g->name, op, min, max); 126 trace_gk20a_ltc_cbc_ctrl_start(g->name, op, min, max);
127 127
@@ -134,7 +134,7 @@ int gm20b_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op,
134 134
135 nvgpu_mutex_acquire(&g->mm.l2_op_lock); 135 nvgpu_mutex_acquire(&g->mm.l2_op_lock);
136 136
137 gk20a_dbg_info("clearing CBC lines %u..%u", min, iter_max); 137 nvgpu_log_info(g, "clearing CBC lines %u..%u", min, iter_max);
138 138
139 if (op == gk20a_cbc_op_clear) { 139 if (op == gk20a_cbc_op_clear) {
140 gk20a_writel( 140 gk20a_writel(
@@ -205,11 +205,11 @@ void gm20b_ltc_init_fs_state(struct gk20a *g)
205{ 205{
206 u32 reg; 206 u32 reg;
207 207
208 gk20a_dbg_info("initialize gm20b l2"); 208 nvgpu_log_info(g, "initialize gm20b l2");
209 209
210 g->max_ltc_count = gk20a_readl(g, top_num_ltcs_r()); 210 g->max_ltc_count = gk20a_readl(g, top_num_ltcs_r());
211 g->ltc_count = gk20a_readl(g, pri_ringmaster_enum_ltc_r()); 211 g->ltc_count = gk20a_readl(g, pri_ringmaster_enum_ltc_r());
212 gk20a_dbg_info("%d ltcs out of %d", g->ltc_count, g->max_ltc_count); 212 nvgpu_log_info(g, "%d ltcs out of %d", g->ltc_count, g->max_ltc_count);
213 213
214 gk20a_writel(g, ltc_ltcs_ltss_cbc_num_active_ltcs_r(), 214 gk20a_writel(g, ltc_ltcs_ltss_cbc_num_active_ltcs_r(),
215 g->ltc_count); 215 g->ltc_count);
@@ -459,7 +459,7 @@ void gm20b_ltc_init_cbc(struct gk20a *g, struct gr_gk20a *gr)
459 gk20a_writel(g, ltc_ltcs_ltss_cbc_base_r(), 459 gk20a_writel(g, ltc_ltcs_ltss_cbc_base_r(),
460 compbit_base_post_divide); 460 compbit_base_post_divide);
461 461
462 gk20a_dbg(gpu_dbg_info | gpu_dbg_map_v | gpu_dbg_pte, 462 nvgpu_log(g, gpu_dbg_info | gpu_dbg_map_v | gpu_dbg_pte,
463 "compbit base.pa: 0x%x,%08x cbc_base:0x%08x\n", 463 "compbit base.pa: 0x%x,%08x cbc_base:0x%08x\n",
464 (u32)(compbit_store_iova >> 32), 464 (u32)(compbit_store_iova >> 32),
465 (u32)(compbit_store_iova & 0xffffffff), 465 (u32)(compbit_store_iova & 0xffffffff),
diff --git a/drivers/gpu/nvgpu/gm20b/mm_gm20b.c b/drivers/gpu/nvgpu/gm20b/mm_gm20b.c
index 46cd1fc6..deca6686 100644
--- a/drivers/gpu/nvgpu/gm20b/mm_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/mm_gm20b.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * GM20B MMU 2 * GM20B MMU
3 * 3 *
4 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"), 7 * copy of this software and associated documentation files (the "Software"),
@@ -36,9 +36,9 @@ void gm20b_mm_set_big_page_size(struct gk20a *g,
36{ 36{
37 u32 val; 37 u32 val;
38 38
39 gk20a_dbg_fn(""); 39 nvgpu_log_fn(g, " ");
40 40
41 gk20a_dbg_info("big page size %d\n", size); 41 nvgpu_log_info(g, "big page size %d\n", size);
42 val = nvgpu_mem_rd32(g, mem, ram_in_big_page_size_w()); 42 val = nvgpu_mem_rd32(g, mem, ram_in_big_page_size_w());
43 val &= ~ram_in_big_page_size_m(); 43 val &= ~ram_in_big_page_size_m();
44 44
@@ -48,7 +48,7 @@ void gm20b_mm_set_big_page_size(struct gk20a *g,
48 val |= ram_in_big_page_size_128kb_f(); 48 val |= ram_in_big_page_size_128kb_f();
49 49
50 nvgpu_mem_wr32(g, mem, ram_in_big_page_size_w(), val); 50 nvgpu_mem_wr32(g, mem, ram_in_big_page_size_w(), val);
51 gk20a_dbg_fn("done"); 51 nvgpu_log_fn(g, "done");
52} 52}
53 53
54u32 gm20b_mm_get_big_page_sizes(void) 54u32 gm20b_mm_get_big_page_sizes(void)
diff --git a/drivers/gpu/nvgpu/gm20b/pmu_gm20b.c b/drivers/gpu/nvgpu/gm20b/pmu_gm20b.c
index 1c5fdce0..aa992c37 100644
--- a/drivers/gpu/nvgpu/gm20b/pmu_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/pmu_gm20b.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * GM20B PMU 2 * GM20B PMU
3 * 3 *
4 * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved.
5* 5*
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"), 7 * copy of this software and associated documentation files (the "Software"),
@@ -37,8 +37,8 @@
37#include <nvgpu/hw/gm20b/hw_pwr_gm20b.h> 37#include <nvgpu/hw/gm20b/hw_pwr_gm20b.h>
38#include <nvgpu/hw/gm20b/hw_fuse_gm20b.h> 38#include <nvgpu/hw/gm20b/hw_fuse_gm20b.h>
39 39
40#define gm20b_dbg_pmu(fmt, arg...) \ 40#define gm20b_dbg_pmu(g, fmt, arg...) \
41 gk20a_dbg(gpu_dbg_pmu, fmt, ##arg) 41 nvgpu_log(g, gpu_dbg_pmu, fmt, ##arg)
42 42
43 43
44/* PROD settings for ELPG sequencing registers*/ 44/* PROD settings for ELPG sequencing registers*/
@@ -108,7 +108,7 @@ int gm20b_pmu_setup_elpg(struct gk20a *g)
108 u32 reg_writes; 108 u32 reg_writes;
109 u32 index; 109 u32 index;
110 110
111 gk20a_dbg_fn(""); 111 nvgpu_log_fn(g, " ");
112 112
113 if (g->elpg_enabled) { 113 if (g->elpg_enabled) {
114 reg_writes = ((sizeof(_pginitseq_gm20b) / 114 reg_writes = ((sizeof(_pginitseq_gm20b) /
@@ -120,20 +120,20 @@ int gm20b_pmu_setup_elpg(struct gk20a *g)
120 } 120 }
121 } 121 }
122 122
123 gk20a_dbg_fn("done"); 123 nvgpu_log_fn(g, "done");
124 return ret; 124 return ret;
125} 125}
126 126
127static void pmu_handle_acr_init_wpr_msg(struct gk20a *g, struct pmu_msg *msg, 127static void pmu_handle_acr_init_wpr_msg(struct gk20a *g, struct pmu_msg *msg,
128 void *param, u32 handle, u32 status) 128 void *param, u32 handle, u32 status)
129{ 129{
130 gk20a_dbg_fn(""); 130 nvgpu_log_fn(g, " ");
131 131
132 gm20b_dbg_pmu("reply PMU_ACR_CMD_ID_INIT_WPR_REGION"); 132 gm20b_dbg_pmu(g, "reply PMU_ACR_CMD_ID_INIT_WPR_REGION");
133 133
134 if (msg->msg.acr.acrmsg.errorcode == PMU_ACR_SUCCESS) 134 if (msg->msg.acr.acrmsg.errorcode == PMU_ACR_SUCCESS)
135 g->pmu_lsf_pmu_wpr_init_done = 1; 135 g->pmu_lsf_pmu_wpr_init_done = 1;
136 gk20a_dbg_fn("done"); 136 nvgpu_log_fn(g, "done");
137} 137}
138 138
139 139
@@ -143,7 +143,7 @@ int gm20b_pmu_init_acr(struct gk20a *g)
143 struct pmu_cmd cmd; 143 struct pmu_cmd cmd;
144 u32 seq; 144 u32 seq;
145 145
146 gk20a_dbg_fn(""); 146 nvgpu_log_fn(g, " ");
147 147
148 /* init ACR */ 148 /* init ACR */
149 memset(&cmd, 0, sizeof(struct pmu_cmd)); 149 memset(&cmd, 0, sizeof(struct pmu_cmd));
@@ -153,11 +153,11 @@ int gm20b_pmu_init_acr(struct gk20a *g)
153 cmd.cmd.acr.init_wpr.cmd_type = PMU_ACR_CMD_ID_INIT_WPR_REGION; 153 cmd.cmd.acr.init_wpr.cmd_type = PMU_ACR_CMD_ID_INIT_WPR_REGION;
154 cmd.cmd.acr.init_wpr.regionid = 0x01; 154 cmd.cmd.acr.init_wpr.regionid = 0x01;
155 cmd.cmd.acr.init_wpr.wproffset = 0x00; 155 cmd.cmd.acr.init_wpr.wproffset = 0x00;
156 gm20b_dbg_pmu("cmd post PMU_ACR_CMD_ID_INIT_WPR_REGION"); 156 gm20b_dbg_pmu(g, "cmd post PMU_ACR_CMD_ID_INIT_WPR_REGION");
157 nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, 157 nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
158 pmu_handle_acr_init_wpr_msg, pmu, &seq, ~0); 158 pmu_handle_acr_init_wpr_msg, pmu, &seq, ~0);
159 159
160 gk20a_dbg_fn("done"); 160 nvgpu_log_fn(g, "done");
161 return 0; 161 return 0;
162} 162}
163 163
@@ -165,14 +165,14 @@ void pmu_handle_fecs_boot_acr_msg(struct gk20a *g, struct pmu_msg *msg,
165 void *param, u32 handle, u32 status) 165 void *param, u32 handle, u32 status)
166{ 166{
167 167
168 gk20a_dbg_fn(""); 168 nvgpu_log_fn(g, " ");
169 169
170 170
171 gm20b_dbg_pmu("reply PMU_ACR_CMD_ID_BOOTSTRAP_FALCON"); 171 gm20b_dbg_pmu(g, "reply PMU_ACR_CMD_ID_BOOTSTRAP_FALCON");
172 172
173 gm20b_dbg_pmu("response code = %x\n", msg->msg.acr.acrmsg.falconid); 173 gm20b_dbg_pmu(g, "response code = %x\n", msg->msg.acr.acrmsg.falconid);
174 g->pmu_lsf_loaded_falcon_id = msg->msg.acr.acrmsg.falconid; 174 g->pmu_lsf_loaded_falcon_id = msg->msg.acr.acrmsg.falconid;
175 gk20a_dbg_fn("done"); 175 nvgpu_log_fn(g, "done");
176} 176}
177 177
178static int pmu_gm20b_ctx_wait_lsf_ready(struct gk20a *g, u32 timeout_ms, 178static int pmu_gm20b_ctx_wait_lsf_ready(struct gk20a *g, u32 timeout_ms,
@@ -182,7 +182,7 @@ static int pmu_gm20b_ctx_wait_lsf_ready(struct gk20a *g, u32 timeout_ms,
182 u32 reg; 182 u32 reg;
183 struct nvgpu_timeout timeout; 183 struct nvgpu_timeout timeout;
184 184
185 gk20a_dbg_fn(""); 185 nvgpu_log_fn(g, " ");
186 reg = gk20a_readl(g, gr_fecs_ctxsw_mailbox_r(0)); 186 reg = gk20a_readl(g, gr_fecs_ctxsw_mailbox_r(0));
187 187
188 nvgpu_timeout_init(g, &timeout, timeout_ms, NVGPU_TIMER_CPU_TIMER); 188 nvgpu_timeout_init(g, &timeout, timeout_ms, NVGPU_TIMER_CPU_TIMER);
@@ -203,9 +203,9 @@ void gm20b_pmu_load_lsf(struct gk20a *g, u32 falcon_id, u32 flags)
203 struct pmu_cmd cmd; 203 struct pmu_cmd cmd;
204 u32 seq; 204 u32 seq;
205 205
206 gk20a_dbg_fn(""); 206 nvgpu_log_fn(g, " ");
207 207
208 gm20b_dbg_pmu("wprinit status = %x\n", g->pmu_lsf_pmu_wpr_init_done); 208 gm20b_dbg_pmu(g, "wprinit status = %x\n", g->pmu_lsf_pmu_wpr_init_done);
209 if (g->pmu_lsf_pmu_wpr_init_done) { 209 if (g->pmu_lsf_pmu_wpr_init_done) {
210 /* send message to load FECS falcon */ 210 /* send message to load FECS falcon */
211 memset(&cmd, 0, sizeof(struct pmu_cmd)); 211 memset(&cmd, 0, sizeof(struct pmu_cmd));
@@ -216,13 +216,13 @@ void gm20b_pmu_load_lsf(struct gk20a *g, u32 falcon_id, u32 flags)
216 PMU_ACR_CMD_ID_BOOTSTRAP_FALCON; 216 PMU_ACR_CMD_ID_BOOTSTRAP_FALCON;
217 cmd.cmd.acr.bootstrap_falcon.flags = flags; 217 cmd.cmd.acr.bootstrap_falcon.flags = flags;
218 cmd.cmd.acr.bootstrap_falcon.falconid = falcon_id; 218 cmd.cmd.acr.bootstrap_falcon.falconid = falcon_id;
219 gm20b_dbg_pmu("cmd post PMU_ACR_CMD_ID_BOOTSTRAP_FALCON: %x\n", 219 gm20b_dbg_pmu(g, "cmd post PMU_ACR_CMD_ID_BOOTSTRAP_FALCON: %x\n",
220 falcon_id); 220 falcon_id);
221 nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, 221 nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
222 pmu_handle_fecs_boot_acr_msg, pmu, &seq, ~0); 222 pmu_handle_fecs_boot_acr_msg, pmu, &seq, ~0);
223 } 223 }
224 224
225 gk20a_dbg_fn("done"); 225 nvgpu_log_fn(g, "done");
226 return; 226 return;
227} 227}
228 228
diff --git a/drivers/gpu/nvgpu/gm20b/therm_gm20b.c b/drivers/gpu/nvgpu/gm20b/therm_gm20b.c
index ce4d4fab..dfe977ff 100644
--- a/drivers/gpu/nvgpu/gm20b/therm_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/therm_gm20b.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * GM20B THERMAL 2 * GM20B THERMAL
3 * 3 *
4 * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"), 7 * copy of this software and associated documentation files (the "Software"),
@@ -32,7 +32,7 @@ int gm20b_init_therm_setup_hw(struct gk20a *g)
32{ 32{
33 u32 v; 33 u32 v;
34 34
35 gk20a_dbg_fn(""); 35 nvgpu_log_fn(g, " ");
36 36
37 /* program NV_THERM registers */ 37 /* program NV_THERM registers */
38 gk20a_writel(g, therm_use_a_r(), therm_use_a_ext_therm_0_enable_f() | 38 gk20a_writel(g, therm_use_a_r(), therm_use_a_ext_therm_0_enable_f() |
diff --git a/drivers/gpu/nvgpu/gp106/acr_gp106.c b/drivers/gpu/nvgpu/gp106/acr_gp106.c
index 5ab8cfcc..61b443e0 100644
--- a/drivers/gpu/nvgpu/gp106/acr_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/acr_gp106.c
@@ -43,8 +43,8 @@
43#include <nvgpu/hw/gp106/hw_pwr_gp106.h> 43#include <nvgpu/hw/gp106/hw_pwr_gp106.h>
44 44
45/*Defines*/ 45/*Defines*/
46#define gp106_dbg_pmu(fmt, arg...) \ 46#define gp106_dbg_pmu(g, fmt, arg...) \
47 gk20a_dbg(gpu_dbg_pmu, fmt, ##arg) 47 nvgpu_log(g, gpu_dbg_pmu, fmt, ##arg)
48 48
49typedef int (*get_ucode_details)(struct gk20a *g, 49typedef int (*get_ucode_details)(struct gk20a *g,
50 struct flcn_ucode_img_v1 *udata); 50 struct flcn_ucode_img_v1 *udata);
@@ -113,7 +113,7 @@ int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img_v1 *p_img)
113 struct lsf_ucode_desc_v1 *lsf_desc; 113 struct lsf_ucode_desc_v1 *lsf_desc;
114 int err; 114 int err;
115 115
116 gp106_dbg_pmu("requesting PMU ucode in gp106\n"); 116 gp106_dbg_pmu(g, "requesting PMU ucode in gp106\n");
117 pmu_fw = nvgpu_request_firmware(g, GM20B_PMU_UCODE_IMAGE, 117 pmu_fw = nvgpu_request_firmware(g, GM20B_PMU_UCODE_IMAGE,
118 NVGPU_REQUEST_FIRMWARE_NO_SOC); 118 NVGPU_REQUEST_FIRMWARE_NO_SOC);
119 if (!pmu_fw) { 119 if (!pmu_fw) {
@@ -121,9 +121,9 @@ int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img_v1 *p_img)
121 return -ENOENT; 121 return -ENOENT;
122 } 122 }
123 g->acr.pmu_fw = pmu_fw; 123 g->acr.pmu_fw = pmu_fw;
124 gp106_dbg_pmu("Loaded PMU ucode in for blob preparation"); 124 gp106_dbg_pmu(g, "Loaded PMU ucode in for blob preparation");
125 125
126 gp106_dbg_pmu("requesting PMU ucode desc in GM20B\n"); 126 gp106_dbg_pmu(g, "requesting PMU ucode desc in GM20B\n");
127 pmu_desc = nvgpu_request_firmware(g, GM20B_PMU_UCODE_DESC, 127 pmu_desc = nvgpu_request_firmware(g, GM20B_PMU_UCODE_DESC,
128 NVGPU_REQUEST_FIRMWARE_NO_SOC); 128 NVGPU_REQUEST_FIRMWARE_NO_SOC);
129 if (!pmu_desc) { 129 if (!pmu_desc) {
@@ -164,7 +164,7 @@ int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img_v1 *p_img)
164 p_img->fw_ver = NULL; 164 p_img->fw_ver = NULL;
165 p_img->header = NULL; 165 p_img->header = NULL;
166 p_img->lsf_desc = (struct lsf_ucode_desc_v1 *)lsf_desc; 166 p_img->lsf_desc = (struct lsf_ucode_desc_v1 *)lsf_desc;
167 gp106_dbg_pmu("requesting PMU ucode in GM20B exit\n"); 167 gp106_dbg_pmu(g, "requesting PMU ucode in GM20B exit\n");
168 168
169 nvgpu_release_firmware(g, pmu_sig); 169 nvgpu_release_firmware(g, pmu_sig);
170 return 0; 170 return 0;
@@ -262,7 +262,7 @@ int fecs_ucode_details(struct gk20a *g, struct flcn_ucode_img_v1 *p_img)
262 p_img->fw_ver = NULL; 262 p_img->fw_ver = NULL;
263 p_img->header = NULL; 263 p_img->header = NULL;
264 p_img->lsf_desc = (struct lsf_ucode_desc_v1 *)lsf_desc; 264 p_img->lsf_desc = (struct lsf_ucode_desc_v1 *)lsf_desc;
265 gp106_dbg_pmu("fecs fw loaded\n"); 265 gp106_dbg_pmu(g, "fecs fw loaded\n");
266 nvgpu_release_firmware(g, fecs_sig); 266 nvgpu_release_firmware(g, fecs_sig);
267 return 0; 267 return 0;
268free_lsf_desc: 268free_lsf_desc:
@@ -358,7 +358,7 @@ int gpccs_ucode_details(struct gk20a *g, struct flcn_ucode_img_v1 *p_img)
358 p_img->fw_ver = NULL; 358 p_img->fw_ver = NULL;
359 p_img->header = NULL; 359 p_img->header = NULL;
360 p_img->lsf_desc = (struct lsf_ucode_desc_v1 *)lsf_desc; 360 p_img->lsf_desc = (struct lsf_ucode_desc_v1 *)lsf_desc;
361 gp106_dbg_pmu("gpccs fw loaded\n"); 361 gp106_dbg_pmu(g, "gpccs fw loaded\n");
362 nvgpu_release_firmware(g, gpccs_sig); 362 nvgpu_release_firmware(g, gpccs_sig);
363 return 0; 363 return 0;
364free_lsf_desc: 364free_lsf_desc:
@@ -381,7 +381,7 @@ int gp106_prepare_ucode_blob(struct gk20a *g)
381 non WPR blob of ucodes*/ 381 non WPR blob of ucodes*/
382 err = nvgpu_init_pmu_fw_support(pmu); 382 err = nvgpu_init_pmu_fw_support(pmu);
383 if (err) { 383 if (err) {
384 gp106_dbg_pmu("failed to set function pointers\n"); 384 gp106_dbg_pmu(g, "failed to set function pointers\n");
385 return err; 385 return err;
386 } 386 }
387 return 0; 387 return 0;
@@ -391,12 +391,12 @@ int gp106_prepare_ucode_blob(struct gk20a *g)
391 gr_gk20a_init_ctxsw_ucode(g); 391 gr_gk20a_init_ctxsw_ucode(g);
392 392
393 g->ops.pmu.get_wpr(g, &wpr_inf); 393 g->ops.pmu.get_wpr(g, &wpr_inf);
394 gp106_dbg_pmu("wpr carveout base:%llx\n", (wpr_inf.wpr_base)); 394 gp106_dbg_pmu(g, "wpr carveout base:%llx\n", (wpr_inf.wpr_base));
395 gp106_dbg_pmu("wpr carveout size :%x\n", (u32)wpr_inf.size); 395 gp106_dbg_pmu(g, "wpr carveout size :%x\n", (u32)wpr_inf.size);
396 396
397 /* Discover all managed falcons*/ 397 /* Discover all managed falcons*/
398 err = lsfm_discover_ucode_images(g, plsfm); 398 err = lsfm_discover_ucode_images(g, plsfm);
399 gp106_dbg_pmu(" Managed Falcon cnt %d\n", plsfm->managed_flcn_cnt); 399 gp106_dbg_pmu(g, " Managed Falcon cnt %d\n", plsfm->managed_flcn_cnt);
400 if (err) 400 if (err)
401 goto exit_err; 401 goto exit_err;
402 402
@@ -412,14 +412,14 @@ int gp106_prepare_ucode_blob(struct gk20a *g)
412 if (err) 412 if (err)
413 goto exit_err; 413 goto exit_err;
414 414
415 gp106_dbg_pmu("managed LS falcon %d, WPR size %d bytes.\n", 415 gp106_dbg_pmu(g, "managed LS falcon %d, WPR size %d bytes.\n",
416 plsfm->managed_flcn_cnt, plsfm->wpr_size); 416 plsfm->managed_flcn_cnt, plsfm->wpr_size);
417 417
418 lsfm_init_wpr_contents(g, plsfm, &g->acr.ucode_blob); 418 lsfm_init_wpr_contents(g, plsfm, &g->acr.ucode_blob);
419 } else { 419 } else {
420 gp106_dbg_pmu("LSFM is managing no falcons.\n"); 420 gp106_dbg_pmu(g, "LSFM is managing no falcons.\n");
421 } 421 }
422 gp106_dbg_pmu("prepare ucode blob return 0\n"); 422 gp106_dbg_pmu(g, "prepare ucode blob return 0\n");
423 free_acr_resources(g, plsfm); 423 free_acr_resources(g, plsfm);
424 424
425 exit_err: 425 exit_err:
@@ -465,14 +465,14 @@ int lsfm_discover_ucode_images(struct gk20a *g,
465 465
466 plsfm->managed_flcn_cnt++; 466 plsfm->managed_flcn_cnt++;
467 } else { 467 } else {
468 gp106_dbg_pmu("id not managed %d\n", 468 gp106_dbg_pmu(g, "id not managed %d\n",
469 ucode_img.lsf_desc->falcon_id); 469 ucode_img.lsf_desc->falcon_id);
470 } 470 }
471 } 471 }
472 472
473 /*Free any ucode image resources if not managing this falcon*/ 473 /*Free any ucode image resources if not managing this falcon*/
474 if (!(pmu->pmu_mode & PMU_LSFM_MANAGED)) { 474 if (!(pmu->pmu_mode & PMU_LSFM_MANAGED)) {
475 gp106_dbg_pmu("pmu is not LSFM managed\n"); 475 gp106_dbg_pmu(g, "pmu is not LSFM managed\n");
476 lsfm_free_ucode_img_res(g, &ucode_img); 476 lsfm_free_ucode_img_res(g, &ucode_img);
477 } 477 }
478 478
@@ -503,7 +503,7 @@ int lsfm_discover_ucode_images(struct gk20a *g,
503 == 0) 503 == 0)
504 plsfm->managed_flcn_cnt++; 504 plsfm->managed_flcn_cnt++;
505 } else { 505 } else {
506 gp106_dbg_pmu("not managed %d\n", 506 gp106_dbg_pmu(g, "not managed %d\n",
507 ucode_img.lsf_desc->falcon_id); 507 ucode_img.lsf_desc->falcon_id);
508 lsfm_free_nonpmu_ucode_img_res(g, 508 lsfm_free_nonpmu_ucode_img_res(g,
509 &ucode_img); 509 &ucode_img);
@@ -511,7 +511,7 @@ int lsfm_discover_ucode_images(struct gk20a *g,
511 } 511 }
512 } else { 512 } else {
513 /* Consumed all available falcon objects */ 513 /* Consumed all available falcon objects */
514 gp106_dbg_pmu("Done checking for ucodes %d\n", i); 514 gp106_dbg_pmu(g, "Done checking for ucodes %d\n", i);
515 break; 515 break;
516 } 516 }
517 } 517 }
@@ -549,19 +549,19 @@ int gp106_pmu_populate_loader_cfg(struct gk20a *g,
549 g->ops.pmu.get_wpr(g, &wpr_inf); 549 g->ops.pmu.get_wpr(g, &wpr_inf);
550 addr_base += (wpr_inf.wpr_base); 550 addr_base += (wpr_inf.wpr_base);
551 551
552 gp106_dbg_pmu("pmu loader cfg addrbase 0x%llx\n", addr_base); 552 gp106_dbg_pmu(g, "pmu loader cfg addrbase 0x%llx\n", addr_base);
553 /*From linux*/ 553 /*From linux*/
554 addr_code = addr_base + 554 addr_code = addr_base +
555 desc->app_start_offset + 555 desc->app_start_offset +
556 desc->app_resident_code_offset; 556 desc->app_resident_code_offset;
557 gp106_dbg_pmu("app start %d app res code off %d\n", 557 gp106_dbg_pmu(g, "app start %d app res code off %d\n",
558 desc->app_start_offset, desc->app_resident_code_offset); 558 desc->app_start_offset, desc->app_resident_code_offset);
559 addr_data = addr_base + 559 addr_data = addr_base +
560 desc->app_start_offset + 560 desc->app_start_offset +
561 desc->app_resident_data_offset; 561 desc->app_resident_data_offset;
562 gp106_dbg_pmu("app res data offset%d\n", 562 gp106_dbg_pmu(g, "app res data offset%d\n",
563 desc->app_resident_data_offset); 563 desc->app_resident_data_offset);
564 gp106_dbg_pmu("bl start off %d\n", desc->bootloader_start_offset); 564 gp106_dbg_pmu(g, "bl start off %d\n", desc->bootloader_start_offset);
565 565
566 addr_args = ((pwr_falcon_hwcfg_dmem_size_v( 566 addr_args = ((pwr_falcon_hwcfg_dmem_size_v(
567 gk20a_readl(g, pwr_falcon_hwcfg_r()))) 567 gk20a_readl(g, pwr_falcon_hwcfg_r())))
@@ -569,7 +569,7 @@ int gp106_pmu_populate_loader_cfg(struct gk20a *g,
569 569
570 addr_args -= g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu); 570 addr_args -= g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu);
571 571
572 gp106_dbg_pmu("addr_args %x\n", addr_args); 572 gp106_dbg_pmu(g, "addr_args %x\n", addr_args);
573 573
574 /* Populate the LOADER_CONFIG state */ 574 /* Populate the LOADER_CONFIG state */
575 memset((void *) ldr_cfg, 0, sizeof(struct flcn_bl_dmem_desc_v1)); 575 memset((void *) ldr_cfg, 0, sizeof(struct flcn_bl_dmem_desc_v1));
@@ -621,8 +621,8 @@ int gp106_flcn_populate_bl_dmem_desc(struct gk20a *g,
621 g->ops.pmu.get_wpr(g, &wpr_inf); 621 g->ops.pmu.get_wpr(g, &wpr_inf);
622 addr_base += wpr_inf.wpr_base; 622 addr_base += wpr_inf.wpr_base;
623 623
624 gp106_dbg_pmu("falcon ID %x", p_lsfm->wpr_header.falcon_id); 624 gp106_dbg_pmu(g, "falcon ID %x", p_lsfm->wpr_header.falcon_id);
625 gp106_dbg_pmu("gen loader cfg addrbase %llx ", addr_base); 625 gp106_dbg_pmu(g, "gen loader cfg addrbase %llx ", addr_base);
626 addr_code = addr_base + 626 addr_code = addr_base +
627 desc->app_start_offset + 627 desc->app_start_offset +
628 desc->app_resident_code_offset; 628 desc->app_resident_code_offset;
@@ -630,7 +630,7 @@ int gp106_flcn_populate_bl_dmem_desc(struct gk20a *g,
630 desc->app_start_offset + 630 desc->app_start_offset +
631 desc->app_resident_data_offset; 631 desc->app_resident_data_offset;
632 632
633 gp106_dbg_pmu("gen cfg addrcode %llx data %llx load offset %x", 633 gp106_dbg_pmu(g, "gen cfg addrcode %llx data %llx load offset %x",
634 addr_code, addr_data, desc->bootloader_start_offset); 634 addr_code, addr_data, desc->bootloader_start_offset);
635 635
636 /* Populate the LOADER_CONFIG state */ 636 /* Populate the LOADER_CONFIG state */
@@ -653,7 +653,7 @@ int lsfm_fill_flcn_bl_gen_desc(struct gk20a *g,
653 653
654 struct nvgpu_pmu *pmu = &g->pmu; 654 struct nvgpu_pmu *pmu = &g->pmu;
655 if (pnode->wpr_header.falcon_id != pmu->falcon_id) { 655 if (pnode->wpr_header.falcon_id != pmu->falcon_id) {
656 gp106_dbg_pmu("non pmu. write flcn bl gen desc\n"); 656 gp106_dbg_pmu(g, "non pmu. write flcn bl gen desc\n");
657 g->ops.pmu.flcn_populate_bl_dmem_desc(g, 657 g->ops.pmu.flcn_populate_bl_dmem_desc(g,
658 pnode, &pnode->bl_gen_desc_size, 658 pnode, &pnode->bl_gen_desc_size,
659 pnode->wpr_header.falcon_id); 659 pnode->wpr_header.falcon_id);
@@ -661,7 +661,7 @@ int lsfm_fill_flcn_bl_gen_desc(struct gk20a *g,
661 } 661 }
662 662
663 if (pmu->pmu_mode & PMU_LSFM_MANAGED) { 663 if (pmu->pmu_mode & PMU_LSFM_MANAGED) {
664 gp106_dbg_pmu("pmu write flcn bl gen desc\n"); 664 gp106_dbg_pmu(g, "pmu write flcn bl gen desc\n");
665 if (pnode->wpr_header.falcon_id == pmu->falcon_id) 665 if (pnode->wpr_header.falcon_id == pmu->falcon_id)
666 return g->ops.pmu.pmu_populate_loader_cfg(g, pnode, 666 return g->ops.pmu.pmu_populate_loader_cfg(g, pnode,
667 &pnode->bl_gen_desc_size); 667 &pnode->bl_gen_desc_size);
@@ -694,46 +694,46 @@ void lsfm_init_wpr_contents(struct gk20a *g,
694 nvgpu_mem_wr_n(g, ucode, i * sizeof(pnode->wpr_header), 694 nvgpu_mem_wr_n(g, ucode, i * sizeof(pnode->wpr_header),
695 &pnode->wpr_header, sizeof(pnode->wpr_header)); 695 &pnode->wpr_header, sizeof(pnode->wpr_header));
696 696
697 gp106_dbg_pmu("wpr header"); 697 gp106_dbg_pmu(g, "wpr header");
698 gp106_dbg_pmu("falconid :%d", 698 gp106_dbg_pmu(g, "falconid :%d",
699 pnode->wpr_header.falcon_id); 699 pnode->wpr_header.falcon_id);
700 gp106_dbg_pmu("lsb_offset :%x", 700 gp106_dbg_pmu(g, "lsb_offset :%x",
701 pnode->wpr_header.lsb_offset); 701 pnode->wpr_header.lsb_offset);
702 gp106_dbg_pmu("bootstrap_owner :%d", 702 gp106_dbg_pmu(g, "bootstrap_owner :%d",
703 pnode->wpr_header.bootstrap_owner); 703 pnode->wpr_header.bootstrap_owner);
704 gp106_dbg_pmu("lazy_bootstrap :%d", 704 gp106_dbg_pmu(g, "lazy_bootstrap :%d",
705 pnode->wpr_header.lazy_bootstrap); 705 pnode->wpr_header.lazy_bootstrap);
706 gp106_dbg_pmu("status :%d", 706 gp106_dbg_pmu(g, "status :%d",
707 pnode->wpr_header.status); 707 pnode->wpr_header.status);
708 708
709 /*Flush LSB header to memory*/ 709 /*Flush LSB header to memory*/
710 nvgpu_mem_wr_n(g, ucode, pnode->wpr_header.lsb_offset, 710 nvgpu_mem_wr_n(g, ucode, pnode->wpr_header.lsb_offset,
711 &pnode->lsb_header, sizeof(pnode->lsb_header)); 711 &pnode->lsb_header, sizeof(pnode->lsb_header));
712 712
713 gp106_dbg_pmu("lsb header"); 713 gp106_dbg_pmu(g, "lsb header");
714 gp106_dbg_pmu("ucode_off :%x", 714 gp106_dbg_pmu(g, "ucode_off :%x",
715 pnode->lsb_header.ucode_off); 715 pnode->lsb_header.ucode_off);
716 gp106_dbg_pmu("ucode_size :%x", 716 gp106_dbg_pmu(g, "ucode_size :%x",
717 pnode->lsb_header.ucode_size); 717 pnode->lsb_header.ucode_size);
718 gp106_dbg_pmu("data_size :%x", 718 gp106_dbg_pmu(g, "data_size :%x",
719 pnode->lsb_header.data_size); 719 pnode->lsb_header.data_size);
720 gp106_dbg_pmu("bl_code_size :%x", 720 gp106_dbg_pmu(g, "bl_code_size :%x",
721 pnode->lsb_header.bl_code_size); 721 pnode->lsb_header.bl_code_size);
722 gp106_dbg_pmu("bl_imem_off :%x", 722 gp106_dbg_pmu(g, "bl_imem_off :%x",
723 pnode->lsb_header.bl_imem_off); 723 pnode->lsb_header.bl_imem_off);
724 gp106_dbg_pmu("bl_data_off :%x", 724 gp106_dbg_pmu(g, "bl_data_off :%x",
725 pnode->lsb_header.bl_data_off); 725 pnode->lsb_header.bl_data_off);
726 gp106_dbg_pmu("bl_data_size :%x", 726 gp106_dbg_pmu(g, "bl_data_size :%x",
727 pnode->lsb_header.bl_data_size); 727 pnode->lsb_header.bl_data_size);
728 gp106_dbg_pmu("app_code_off :%x", 728 gp106_dbg_pmu(g, "app_code_off :%x",
729 pnode->lsb_header.app_code_off); 729 pnode->lsb_header.app_code_off);
730 gp106_dbg_pmu("app_code_size :%x", 730 gp106_dbg_pmu(g, "app_code_size :%x",
731 pnode->lsb_header.app_code_size); 731 pnode->lsb_header.app_code_size);
732 gp106_dbg_pmu("app_data_off :%x", 732 gp106_dbg_pmu(g, "app_data_off :%x",
733 pnode->lsb_header.app_data_off); 733 pnode->lsb_header.app_data_off);
734 gp106_dbg_pmu("app_data_size :%x", 734 gp106_dbg_pmu(g, "app_data_size :%x",
735 pnode->lsb_header.app_data_size); 735 pnode->lsb_header.app_data_size);
736 gp106_dbg_pmu("flags :%x", 736 gp106_dbg_pmu(g, "flags :%x",
737 pnode->lsb_header.flags); 737 pnode->lsb_header.flags);
738 738
739 /*If this falcon has a boot loader and related args, 739 /*If this falcon has a boot loader and related args,
@@ -1049,7 +1049,7 @@ int gp106_bootstrap_hs_flcn(struct gk20a *g)
1049 u32 *acr_ucode_data_t210_load; 1049 u32 *acr_ucode_data_t210_load;
1050 struct wpr_carveout_info wpr_inf; 1050 struct wpr_carveout_info wpr_inf;
1051 1051
1052 gp106_dbg_pmu(""); 1052 gp106_dbg_pmu(g, " ");
1053 1053
1054 if (!acr_fw) { 1054 if (!acr_fw) {
1055 /*First time init case*/ 1055 /*First time init case*/
diff --git a/drivers/gpu/nvgpu/gp106/bios_gp106.c b/drivers/gpu/nvgpu/gp106/bios_gp106.c
index 8511d3c2..3363aeba 100644
--- a/drivers/gpu/nvgpu/gp106/bios_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/bios_gp106.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -56,13 +56,13 @@ static void upload_data(struct gk20a *g, u32 dst, u8 *src, u32 size, u8 port)
56 u32 *src_u32 = (u32 *)src; 56 u32 *src_u32 = (u32 *)src;
57 u32 blk; 57 u32 blk;
58 58
59 gk20a_dbg_info("upload %d bytes to %x", size, dst); 59 nvgpu_log_info(g, "upload %d bytes to %x", size, dst);
60 60
61 words = DIV_ROUND_UP(size, 4); 61 words = DIV_ROUND_UP(size, 4);
62 62
63 blk = dst >> 8; 63 blk = dst >> 8;
64 64
65 gk20a_dbg_info("upload %d words to %x blk %d", 65 nvgpu_log_info(g, "upload %d words to %x blk %d",
66 words, dst, blk); 66 words, dst, blk);
67 gk20a_writel(g, pwr_falcon_dmemc_r(port), 67 gk20a_writel(g, pwr_falcon_dmemc_r(port),
68 pwr_falcon_dmemc_offs_f(dst >> 2) | 68 pwr_falcon_dmemc_offs_f(dst >> 2) |
@@ -79,7 +79,7 @@ static int gp106_bios_devinit(struct gk20a *g)
79 int devinit_completed; 79 int devinit_completed;
80 struct nvgpu_timeout timeout; 80 struct nvgpu_timeout timeout;
81 81
82 gk20a_dbg_fn(""); 82 nvgpu_log_fn(g, " ");
83 83
84 if (nvgpu_flcn_reset(g->pmu.flcn)) { 84 if (nvgpu_flcn_reset(g->pmu.flcn)) {
85 err = -ETIMEDOUT; 85 err = -ETIMEDOUT;
@@ -128,7 +128,7 @@ static int gp106_bios_devinit(struct gk20a *g)
128 gk20a_get_gr_idle_timeout(g)); 128 gk20a_get_gr_idle_timeout(g));
129 129
130out: 130out:
131 gk20a_dbg_fn("done"); 131 nvgpu_log_fn(g, "done");
132 return err; 132 return err;
133} 133}
134 134
@@ -146,7 +146,7 @@ static int gp106_bios_preos(struct gk20a *g)
146{ 146{
147 int err = 0; 147 int err = 0;
148 148
149 gk20a_dbg_fn(""); 149 nvgpu_log_fn(g, " ");
150 150
151 if (nvgpu_flcn_reset(g->pmu.flcn)) { 151 if (nvgpu_flcn_reset(g->pmu.flcn)) {
152 err = -ETIMEDOUT; 152 err = -ETIMEDOUT;
@@ -177,7 +177,7 @@ static int gp106_bios_preos(struct gk20a *g)
177 gk20a_get_gr_idle_timeout(g)); 177 gk20a_get_gr_idle_timeout(g));
178 178
179out: 179out:
180 gk20a_dbg_fn("done"); 180 nvgpu_log_fn(g, "done");
181 return err; 181 return err;
182} 182}
183 183
@@ -186,12 +186,12 @@ int gp106_bios_init(struct gk20a *g)
186 unsigned int i; 186 unsigned int i;
187 int err; 187 int err;
188 188
189 gk20a_dbg_fn(""); 189 nvgpu_log_fn(g, " ");
190 190
191 if (g->bios_is_init) 191 if (g->bios_is_init)
192 return 0; 192 return 0;
193 193
194 gk20a_dbg_info("reading bios from EEPROM"); 194 nvgpu_log_info(g, "reading bios from EEPROM");
195 g->bios.size = BIOS_SIZE; 195 g->bios.size = BIOS_SIZE;
196 g->bios.data = nvgpu_vmalloc(g, BIOS_SIZE); 196 g->bios.data = nvgpu_vmalloc(g, BIOS_SIZE);
197 if (!g->bios.data) 197 if (!g->bios.data)
@@ -218,7 +218,7 @@ int gp106_bios_init(struct gk20a *g)
218 goto free_firmware; 218 goto free_firmware;
219 } 219 }
220 220
221 gk20a_dbg_fn("done"); 221 nvgpu_log_fn(g, "done");
222 222
223 err = gp106_bios_devinit(g); 223 err = gp106_bios_devinit(g);
224 if (err) { 224 if (err) {
diff --git a/drivers/gpu/nvgpu/gp106/clk_gp106.c b/drivers/gpu/nvgpu/gp106/clk_gp106.c
index 9a94a7b9..d19baac5 100644
--- a/drivers/gpu/nvgpu/gp106/clk_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/clk_gp106.c
@@ -36,9 +36,6 @@
36 36
37#include <nvgpu/hw/gp106/hw_trim_gp106.h> 37#include <nvgpu/hw/gp106/hw_trim_gp106.h>
38 38
39#define gk20a_dbg_clk(fmt, arg...) \
40 gk20a_dbg(gpu_dbg_clk, fmt, ##arg)
41
42#ifdef CONFIG_DEBUG_FS 39#ifdef CONFIG_DEBUG_FS
43static int clk_gp106_debugfs_init(struct gk20a *g); 40static int clk_gp106_debugfs_init(struct gk20a *g);
44#endif 41#endif
@@ -82,7 +79,7 @@ int gp106_init_clk_support(struct gk20a *g)
82 struct clk_gk20a *clk = &g->clk; 79 struct clk_gk20a *clk = &g->clk;
83 u32 err = 0; 80 u32 err = 0;
84 81
85 gk20a_dbg_fn(""); 82 nvgpu_log_fn(g, " ");
86 83
87 err = nvgpu_mutex_init(&clk->clk_mutex); 84 err = nvgpu_mutex_init(&clk->clk_mutex);
88 if (err) 85 if (err)
@@ -374,7 +371,7 @@ static int clk_gp106_debugfs_init(struct gk20a *g)
374 d = debugfs_create_file("gpc", S_IRUGO | S_IWUSR, clk_freq_ctlr_root, 371 d = debugfs_create_file("gpc", S_IRUGO | S_IWUSR, clk_freq_ctlr_root,
375 g, &gpc_cfc_fops); 372 g, &gpc_cfc_fops);
376 373
377 gk20a_dbg(gpu_dbg_info, "g=%p", g); 374 nvgpu_log(g, gpu_dbg_info, "g=%p", g);
378 375
379 for (i = 0; i < g->clk.namemap_num; i++) { 376 for (i = 0; i < g->clk.namemap_num; i++) {
380 if (g->clk.clk_namemap[i].is_enable) { 377 if (g->clk.clk_namemap[i].is_enable) {
diff --git a/drivers/gpu/nvgpu/gp106/fb_gp106.c b/drivers/gpu/nvgpu/gp106/fb_gp106.c
index 34e9ee30..2bf97f61 100644
--- a/drivers/gpu/nvgpu/gp106/fb_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/fb_gp106.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -39,7 +39,7 @@ void gp106_fb_reset(struct gk20a *g)
39 do { 39 do {
40 u32 w = gk20a_readl(g, fb_niso_scrub_status_r()); 40 u32 w = gk20a_readl(g, fb_niso_scrub_status_r());
41 if (fb_niso_scrub_status_flag_v(w)) { 41 if (fb_niso_scrub_status_flag_v(w)) {
42 gk20a_dbg_fn("done"); 42 nvgpu_log_fn(g, "done");
43 break; 43 break;
44 } 44 }
45 nvgpu_udelay(HW_SCRUB_TIMEOUT_DEFAULT); 45 nvgpu_udelay(HW_SCRUB_TIMEOUT_DEFAULT);
diff --git a/drivers/gpu/nvgpu/gp106/gr_gp106.c b/drivers/gpu/nvgpu/gp106/gr_gp106.c
index 1bd24b45..2e5f29ee 100644
--- a/drivers/gpu/nvgpu/gp106/gr_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/gr_gp106.c
@@ -58,7 +58,7 @@ bool gr_gp106_is_valid_class(struct gk20a *g, u32 class_num)
58 default: 58 default:
59 break; 59 break;
60 } 60 }
61 gk20a_dbg_info("class=0x%x valid=%d", class_num, valid); 61 nvgpu_log_info(g, "class=0x%x valid=%d", class_num, valid);
62 return valid; 62 return valid;
63} 63}
64 64
@@ -75,7 +75,7 @@ static void gr_gp106_set_go_idle_timeout(struct gk20a *g, u32 data)
75int gr_gp106_handle_sw_method(struct gk20a *g, u32 addr, 75int gr_gp106_handle_sw_method(struct gk20a *g, u32 addr,
76 u32 class_num, u32 offset, u32 data) 76 u32 class_num, u32 offset, u32 data)
77{ 77{
78 gk20a_dbg_fn(""); 78 nvgpu_log_fn(g, " ");
79 79
80 if (class_num == PASCAL_COMPUTE_B) { 80 if (class_num == PASCAL_COMPUTE_B) {
81 switch (offset << 2) { 81 switch (offset << 2) {
@@ -177,9 +177,9 @@ int gr_gp106_set_ctxsw_preemption_mode(struct gk20a *g,
177 g->gr.max_tpc_count; 177 g->gr.max_tpc_count;
178 attrib_cb_size = ALIGN(attrib_cb_size, 128); 178 attrib_cb_size = ALIGN(attrib_cb_size, 128);
179 179
180 gk20a_dbg_info("gfxp context spill_size=%d", spill_size); 180 nvgpu_log_info(g, "gfxp context spill_size=%d", spill_size);
181 gk20a_dbg_info("gfxp context pagepool_size=%d", pagepool_size); 181 nvgpu_log_info(g, "gfxp context pagepool_size=%d", pagepool_size);
182 gk20a_dbg_info("gfxp context attrib_cb_size=%d", 182 nvgpu_log_info(g, "gfxp context attrib_cb_size=%d",
183 attrib_cb_size); 183 attrib_cb_size);
184 184
185 err = gr_gp10b_alloc_buffer(vm, 185 err = gr_gp10b_alloc_buffer(vm,
diff --git a/drivers/gpu/nvgpu/gp106/hal_gp106.c b/drivers/gpu/nvgpu/gp106/hal_gp106.c
index 82cc36aa..6d3154e3 100644
--- a/drivers/gpu/nvgpu/gp106/hal_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/hal_gp106.c
@@ -765,7 +765,7 @@ int gp106_init_hal(struct gk20a *g)
765{ 765{
766 struct gpu_ops *gops = &g->ops; 766 struct gpu_ops *gops = &g->ops;
767 767
768 gk20a_dbg_fn(""); 768 nvgpu_log_fn(g, " ");
769 769
770 gops->bios = gp106_ops.bios; 770 gops->bios = gp106_ops.bios;
771 gops->ltc = gp106_ops.ltc; 771 gops->ltc = gp106_ops.ltc;
@@ -828,7 +828,7 @@ int gp106_init_hal(struct gk20a *g)
828 828
829 g->name = "gp10x"; 829 g->name = "gp10x";
830 830
831 gk20a_dbg_fn("done"); 831 nvgpu_log_fn(g, "done");
832 832
833 return 0; 833 return 0;
834} 834}
diff --git a/drivers/gpu/nvgpu/gp106/mclk_gp106.c b/drivers/gpu/nvgpu/gp106/mclk_gp106.c
index 44f0b1d9..bfb66e6e 100644
--- a/drivers/gpu/nvgpu/gp106/mclk_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/mclk_gp106.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -2998,7 +2998,7 @@ static void mclk_seq_pmucmdhandler(struct gk20a *g, struct pmu_msg *_msg,
2998 struct nv_pmu_seq_msg_run_script *seq_msg; 2998 struct nv_pmu_seq_msg_run_script *seq_msg;
2999 u32 msg_status = 0; 2999 u32 msg_status = 0;
3000 3000
3001 gk20a_dbg_info(""); 3001 nvgpu_log_info(g, " ");
3002 3002
3003 if (status != 0) { 3003 if (status != 0) {
3004 nvgpu_err(g, "mclk seq_script cmd aborted"); 3004 nvgpu_err(g, "mclk seq_script cmd aborted");
@@ -3041,7 +3041,7 @@ static int mclk_get_memclk_table(struct gk20a *g)
3041 u8 *mem_entry_ptr = NULL; 3041 u8 *mem_entry_ptr = NULL;
3042 int index; 3042 int index;
3043 3043
3044 gk20a_dbg_info(""); 3044 nvgpu_log_info(g, " ");
3045 3045
3046 mem_table_ptr = (u8 *)nvgpu_bios_get_perf_table_ptrs(g, 3046 mem_table_ptr = (u8 *)nvgpu_bios_get_perf_table_ptrs(g,
3047 g->bios.perf_token, 3047 g->bios.perf_token,
@@ -3213,7 +3213,7 @@ int gp106_mclk_init(struct gk20a *g)
3213 u32 index; 3213 u32 index;
3214 struct memory_config *m; 3214 struct memory_config *m;
3215 3215
3216 gk20a_dbg_fn(""); 3216 nvgpu_log_fn(g, " ");
3217 3217
3218 mclk = &g->clk_pmu.clk_mclk; 3218 mclk = &g->clk_pmu.clk_mclk;
3219 3219
@@ -3316,7 +3316,7 @@ int gp106_mclk_change(struct gk20a *g, u16 val)
3316#endif 3316#endif
3317 u32 speed; 3317 u32 speed;
3318 3318
3319 gk20a_dbg_info(""); 3319 nvgpu_log_info(g, " ");
3320 3320
3321 memset(&payload, 0, sizeof(struct pmu_payload)); 3321 memset(&payload, 0, sizeof(struct pmu_payload));
3322 3322
@@ -3508,7 +3508,7 @@ static int mclk_debugfs_init(struct gk20a *g)
3508 struct dentry *gpu_root = l->debugfs; 3508 struct dentry *gpu_root = l->debugfs;
3509 struct dentry *d; 3509 struct dentry *d;
3510 3510
3511 gk20a_dbg(gpu_dbg_info, "g=%p", g); 3511 nvgpu_log(g, gpu_dbg_info, "g=%p", g);
3512 3512
3513 d = debugfs_create_file( 3513 d = debugfs_create_file(
3514 "mclk_speed_set", 3514 "mclk_speed_set",
diff --git a/drivers/gpu/nvgpu/gp106/pmu_gp106.c b/drivers/gpu/nvgpu/gp106/pmu_gp106.c
index d4041905..2a52dd4e 100644
--- a/drivers/gpu/nvgpu/gp106/pmu_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/pmu_gp106.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -98,14 +98,14 @@ u32 gp106_pmu_pg_engines_list(struct gk20a *g)
98static void pmu_handle_param_msg(struct gk20a *g, struct pmu_msg *msg, 98static void pmu_handle_param_msg(struct gk20a *g, struct pmu_msg *msg,
99 void *param, u32 handle, u32 status) 99 void *param, u32 handle, u32 status)
100{ 100{
101 gk20a_dbg_fn(""); 101 nvgpu_log_fn(g, " ");
102 102
103 if (status != 0) { 103 if (status != 0) {
104 nvgpu_err(g, "PG PARAM cmd aborted"); 104 nvgpu_err(g, "PG PARAM cmd aborted");
105 return; 105 return;
106 } 106 }
107 107
108 gp106_dbg_pmu("PG PARAM is acknowledged from PMU %x", 108 gp106_dbg_pmu(g, "PG PARAM is acknowledged from PMU %x",
109 msg->msg.pg.msg_type); 109 msg->msg.pg.msg_type);
110} 110}
111 111
@@ -135,7 +135,7 @@ int gp106_pg_param_init(struct gk20a *g, u32 pg_engine_id)
135 cmd.cmd.pg.gr_init_param.featuremask = 135 cmd.cmd.pg.gr_init_param.featuremask =
136 NVGPU_PMU_GR_FEATURE_MASK_RPPG; 136 NVGPU_PMU_GR_FEATURE_MASK_RPPG;
137 137
138 gp106_dbg_pmu("cmd post GR PMU_PG_CMD_ID_PG_PARAM"); 138 gp106_dbg_pmu(g, "cmd post GR PMU_PG_CMD_ID_PG_PARAM");
139 nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, 139 nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
140 pmu_handle_param_msg, pmu, &seq, ~0); 140 pmu_handle_param_msg, pmu, &seq, ~0);
141 } else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS) { 141 } else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS) {
@@ -152,7 +152,7 @@ int gp106_pg_param_init(struct gk20a *g, u32 pg_engine_id)
152 NVGPU_PMU_MS_FEATURE_MASK_RPPG | 152 NVGPU_PMU_MS_FEATURE_MASK_RPPG |
153 NVGPU_PMU_MS_FEATURE_MASK_FB_TRAINING; 153 NVGPU_PMU_MS_FEATURE_MASK_FB_TRAINING;
154 154
155 gp106_dbg_pmu("cmd post MS PMU_PG_CMD_ID_PG_PARAM"); 155 gp106_dbg_pmu(g, "cmd post MS PMU_PG_CMD_ID_PG_PARAM");
156 nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, 156 nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
157 pmu_handle_param_msg, pmu, &seq, ~0); 157 pmu_handle_param_msg, pmu, &seq, ~0);
158 } 158 }
@@ -240,9 +240,9 @@ static void gp106_pmu_load_multiple_falcons(struct gk20a *g, u32 falconidmask,
240 struct pmu_cmd cmd; 240 struct pmu_cmd cmd;
241 u32 seq; 241 u32 seq;
242 242
243 gk20a_dbg_fn(""); 243 nvgpu_log_fn(g, " ");
244 244
245 gp106_dbg_pmu("wprinit status = %x\n", g->pmu_lsf_pmu_wpr_init_done); 245 gp106_dbg_pmu(g, "wprinit status = %x\n", g->pmu_lsf_pmu_wpr_init_done);
246 if (g->pmu_lsf_pmu_wpr_init_done) { 246 if (g->pmu_lsf_pmu_wpr_init_done) {
247 /* send message to load FECS falcon */ 247 /* send message to load FECS falcon */
248 memset(&cmd, 0, sizeof(struct pmu_cmd)); 248 memset(&cmd, 0, sizeof(struct pmu_cmd));
@@ -258,13 +258,13 @@ static void gp106_pmu_load_multiple_falcons(struct gk20a *g, u32 falconidmask,
258 cmd.cmd.acr.boot_falcons.wprvirtualbase.lo = 0; 258 cmd.cmd.acr.boot_falcons.wprvirtualbase.lo = 0;
259 cmd.cmd.acr.boot_falcons.wprvirtualbase.hi = 0; 259 cmd.cmd.acr.boot_falcons.wprvirtualbase.hi = 0;
260 260
261 gp106_dbg_pmu("PMU_ACR_CMD_ID_BOOTSTRAP_MULTIPLE_FALCONS:%x\n", 261 gp106_dbg_pmu(g, "PMU_ACR_CMD_ID_BOOTSTRAP_MULTIPLE_FALCONS:%x\n",
262 falconidmask); 262 falconidmask);
263 nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, 263 nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
264 pmu_handle_fecs_boot_acr_msg, pmu, &seq, ~0); 264 pmu_handle_fecs_boot_acr_msg, pmu, &seq, ~0);
265 } 265 }
266 266
267 gk20a_dbg_fn("done"); 267 nvgpu_log_fn(g, "done");
268} 268}
269 269
270int gp106_load_falcon_ucode(struct gk20a *g, u32 falconidmask) 270int gp106_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
diff --git a/drivers/gpu/nvgpu/gp106/pmu_gp106.h b/drivers/gpu/nvgpu/gp106/pmu_gp106.h
index bd640869..361f6e8b 100644
--- a/drivers/gpu/nvgpu/gp106/pmu_gp106.h
+++ b/drivers/gpu/nvgpu/gp106/pmu_gp106.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -23,8 +23,8 @@
23#ifndef __PMU_GP106_H_ 23#ifndef __PMU_GP106_H_
24#define __PMU_GP106_H_ 24#define __PMU_GP106_H_
25 25
26#define gp106_dbg_pmu(fmt, arg...) \ 26#define gp106_dbg_pmu(g, fmt, arg...) \
27 gk20a_dbg(gpu_dbg_pmu, fmt, ##arg) 27 nvgpu_log(g, gpu_dbg_pmu, fmt, ##arg)
28 28
29struct gk20a; 29struct gk20a;
30 30
diff --git a/drivers/gpu/nvgpu/gp106/sec2_gp106.c b/drivers/gpu/nvgpu/gp106/sec2_gp106.c
index 08c7f84a..1c959022 100644
--- a/drivers/gpu/nvgpu/gp106/sec2_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/sec2_gp106.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -31,8 +31,8 @@
31#include <nvgpu/hw/gp106/hw_psec_gp106.h> 31#include <nvgpu/hw/gp106/hw_psec_gp106.h>
32 32
33/*Defines*/ 33/*Defines*/
34#define gm20b_dbg_pmu(fmt, arg...) \ 34#define gm20b_dbg_pmu(g, fmt, arg...) \
35 gk20a_dbg(gpu_dbg_pmu, fmt, ##arg) 35 nvgpu_log(g, gpu_dbg_pmu, fmt, ##arg)
36 36
37int sec2_clear_halt_interrupt_status(struct gk20a *g, unsigned int timeout) 37int sec2_clear_halt_interrupt_status(struct gk20a *g, unsigned int timeout)
38{ 38{
@@ -56,7 +56,7 @@ int sec2_wait_for_halt(struct gk20a *g, unsigned int timeout)
56 } 56 }
57 57
58 g->acr.capabilities = gk20a_readl(g, psec_falcon_mailbox1_r()); 58 g->acr.capabilities = gk20a_readl(g, psec_falcon_mailbox1_r());
59 gm20b_dbg_pmu("ACR capabilities %x\n", g->acr.capabilities); 59 gm20b_dbg_pmu(g, "ACR capabilities %x\n", g->acr.capabilities);
60 data = gk20a_readl(g, psec_falcon_mailbox0_r()); 60 data = gk20a_readl(g, psec_falcon_mailbox0_r());
61 if (data) { 61 if (data) {
62 nvgpu_err(g, "ACR boot failed, err %x", data); 62 nvgpu_err(g, "ACR boot failed, err %x", data);
@@ -87,7 +87,7 @@ int bl_bootstrap_sec2(struct nvgpu_pmu *pmu,
87 u32 data = 0; 87 u32 data = 0;
88 u32 dst; 88 u32 dst;
89 89
90 gk20a_dbg_fn(""); 90 nvgpu_log_fn(g, " ");
91 91
92 /* SEC2 Config */ 92 /* SEC2 Config */
93 gk20a_writel(g, psec_falcon_itfen_r(), 93 gk20a_writel(g, psec_falcon_itfen_r(),
@@ -123,7 +123,7 @@ int bl_bootstrap_sec2(struct nvgpu_pmu *pmu,
123 (u8 *)(acr->hsbl_ucode.cpu_va), bl_sz, 0, 0, 123 (u8 *)(acr->hsbl_ucode.cpu_va), bl_sz, 0, 0,
124 pmu_bl_gm10x_desc->bl_start_tag); 124 pmu_bl_gm10x_desc->bl_start_tag);
125 125
126 gm20b_dbg_pmu("Before starting falcon with BL\n"); 126 gm20b_dbg_pmu(g, "Before starting falcon with BL\n");
127 127
128 gk20a_writel(g, psec_falcon_mailbox0_r(), 0xDEADA5A5); 128 gk20a_writel(g, psec_falcon_mailbox0_r(), 0xDEADA5A5);
129 129
diff --git a/drivers/gpu/nvgpu/gp106/therm_gp106.c b/drivers/gpu/nvgpu/gp106/therm_gp106.c
index 64d602cf..b3862abe 100644
--- a/drivers/gpu/nvgpu/gp106/therm_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/therm_gp106.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -97,7 +97,7 @@ int gp106_elcg_init_idle_filters(struct gk20a *g)
97 u32 active_engine_id = 0; 97 u32 active_engine_id = 0;
98 struct fifo_gk20a *f = &g->fifo; 98 struct fifo_gk20a *f = &g->fifo;
99 99
100 gk20a_dbg_fn(""); 100 nvgpu_log_fn(g, " ");
101 101
102 for (engine_id = 0; engine_id < f->num_engines; engine_id++) { 102 for (engine_id = 0; engine_id < f->num_engines; engine_id++) {
103 active_engine_id = f->active_engines_list[engine_id]; 103 active_engine_id = f->active_engines_list[engine_id];
@@ -124,7 +124,7 @@ int gp106_elcg_init_idle_filters(struct gk20a *g)
124 idle_filter &= ~therm_hubmmu_idle_filter_value_m(); 124 idle_filter &= ~therm_hubmmu_idle_filter_value_m();
125 gk20a_writel(g, therm_hubmmu_idle_filter_r(), idle_filter); 125 gk20a_writel(g, therm_hubmmu_idle_filter_r(), idle_filter);
126 126
127 gk20a_dbg_fn("done"); 127 nvgpu_log_fn(g, "done");
128 return 0; 128 return 0;
129} 129}
130 130
diff --git a/drivers/gpu/nvgpu/gp106/xve_gp106.c b/drivers/gpu/nvgpu/gp106/xve_gp106.c
index 9becd0f2..e77ea5c1 100644
--- a/drivers/gpu/nvgpu/gp106/xve_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/xve_gp106.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -204,19 +204,19 @@ static int __do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed)
204 int attempts = 10, err_status = 0; 204 int attempts = 10, err_status = 0;
205 205
206 g->ops.xve.get_speed(g, &current_link_speed); 206 g->ops.xve.get_speed(g, &current_link_speed);
207 xv_sc_dbg(PRE_CHANGE, "Executing PCIe link change."); 207 xv_sc_dbg(g, PRE_CHANGE, "Executing PCIe link change.");
208 xv_sc_dbg(PRE_CHANGE, " Current speed: %s", 208 xv_sc_dbg(g, PRE_CHANGE, " Current speed: %s",
209 xve_speed_to_str(current_link_speed)); 209 xve_speed_to_str(current_link_speed));
210 xv_sc_dbg(PRE_CHANGE, " Next speed: %s", 210 xv_sc_dbg(g, PRE_CHANGE, " Next speed: %s",
211 xve_speed_to_str(next_link_speed)); 211 xve_speed_to_str(next_link_speed));
212 xv_sc_dbg(PRE_CHANGE, " PL_LINK_CONFIG: 0x%08x", 212 xv_sc_dbg(g, PRE_CHANGE, " PL_LINK_CONFIG: 0x%08x",
213 gk20a_readl(g, xp_pl_link_config_r(0))); 213 gk20a_readl(g, xp_pl_link_config_r(0)));
214 214
215 xv_sc_dbg(DISABLE_ASPM, "Disabling ASPM..."); 215 xv_sc_dbg(g, DISABLE_ASPM, "Disabling ASPM...");
216 disable_aspm_gp106(g); 216 disable_aspm_gp106(g);
217 xv_sc_dbg(DISABLE_ASPM, " Done!"); 217 xv_sc_dbg(g, DISABLE_ASPM, " Done!");
218 218
219 xv_sc_dbg(DL_SAFE_MODE, "Putting DL in safe mode..."); 219 xv_sc_dbg(g, DL_SAFE_MODE, "Putting DL in safe mode...");
220 saved_dl_mgr = gk20a_readl(g, xp_dl_mgr_r(0)); 220 saved_dl_mgr = gk20a_readl(g, xp_dl_mgr_r(0));
221 221
222 /* 222 /*
@@ -225,12 +225,12 @@ static int __do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed)
225 dl_mgr = saved_dl_mgr; 225 dl_mgr = saved_dl_mgr;
226 dl_mgr |= xp_dl_mgr_safe_timing_f(1); 226 dl_mgr |= xp_dl_mgr_safe_timing_f(1);
227 gk20a_writel(g, xp_dl_mgr_r(0), dl_mgr); 227 gk20a_writel(g, xp_dl_mgr_r(0), dl_mgr);
228 xv_sc_dbg(DL_SAFE_MODE, " Done!"); 228 xv_sc_dbg(g, DL_SAFE_MODE, " Done!");
229 229
230 nvgpu_timeout_init(g, &timeout, GPU_XVE_TIMEOUT_MS, 230 nvgpu_timeout_init(g, &timeout, GPU_XVE_TIMEOUT_MS,
231 NVGPU_TIMER_CPU_TIMER); 231 NVGPU_TIMER_CPU_TIMER);
232 232
233 xv_sc_dbg(CHECK_LINK, "Checking for link idle..."); 233 xv_sc_dbg(g, CHECK_LINK, "Checking for link idle...");
234 do { 234 do {
235 pl_link_config = gk20a_readl(g, xp_pl_link_config_r(0)); 235 pl_link_config = gk20a_readl(g, xp_pl_link_config_r(0));
236 if ((xp_pl_link_config_ltssm_status_f(pl_link_config) == 236 if ((xp_pl_link_config_ltssm_status_f(pl_link_config) ==
@@ -245,9 +245,9 @@ static int __do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed)
245 goto done; 245 goto done;
246 } 246 }
247 247
248 xv_sc_dbg(CHECK_LINK, " Done"); 248 xv_sc_dbg(g, CHECK_LINK, " Done");
249 249
250 xv_sc_dbg(LINK_SETTINGS, "Preparing next link settings"); 250 xv_sc_dbg(g, LINK_SETTINGS, "Preparing next link settings");
251 pl_link_config &= ~xp_pl_link_config_max_link_rate_m(); 251 pl_link_config &= ~xp_pl_link_config_max_link_rate_m();
252 switch (next_link_speed) { 252 switch (next_link_speed) {
253 case GPU_XVE_SPEED_2P5: 253 case GPU_XVE_SPEED_2P5:
@@ -297,10 +297,10 @@ static int __do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed)
297 else 297 else
298 BUG(); 298 BUG();
299 299
300 xv_sc_dbg(LINK_SETTINGS, " pl_link_config = 0x%08x", pl_link_config); 300 xv_sc_dbg(g, LINK_SETTINGS, " pl_link_config = 0x%08x", pl_link_config);
301 xv_sc_dbg(LINK_SETTINGS, " Done"); 301 xv_sc_dbg(g, LINK_SETTINGS, " Done");
302 302
303 xv_sc_dbg(EXEC_CHANGE, "Running link speed change..."); 303 xv_sc_dbg(g, EXEC_CHANGE, "Running link speed change...");
304 304
305 nvgpu_timeout_init(g, &timeout, GPU_XVE_TIMEOUT_MS, 305 nvgpu_timeout_init(g, &timeout, GPU_XVE_TIMEOUT_MS,
306 NVGPU_TIMER_CPU_TIMER); 306 NVGPU_TIMER_CPU_TIMER);
@@ -316,7 +316,7 @@ static int __do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed)
316 goto done; 316 goto done;
317 } 317 }
318 318
319 xv_sc_dbg(EXEC_CHANGE, " Wrote PL_LINK_CONFIG."); 319 xv_sc_dbg(g, EXEC_CHANGE, " Wrote PL_LINK_CONFIG.");
320 320
321 pl_link_config = gk20a_readl(g, xp_pl_link_config_r(0)); 321 pl_link_config = gk20a_readl(g, xp_pl_link_config_r(0));
322 322
@@ -326,7 +326,7 @@ static int __do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed)
326 xp_pl_link_config_ltssm_directive_f( 326 xp_pl_link_config_ltssm_directive_f(
327 xp_pl_link_config_ltssm_directive_change_speed_v())); 327 xp_pl_link_config_ltssm_directive_change_speed_v()));
328 328
329 xv_sc_dbg(EXEC_CHANGE, " Executing change (0x%08x)!", 329 xv_sc_dbg(g, EXEC_CHANGE, " Executing change (0x%08x)!",
330 pl_link_config); 330 pl_link_config);
331 gk20a_writel(g, xp_pl_link_config_r(0), pl_link_config); 331 gk20a_writel(g, xp_pl_link_config_r(0), pl_link_config);
332 332
@@ -348,11 +348,11 @@ static int __do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed)
348 348
349 if (nvgpu_timeout_peek_expired(&timeout)) { 349 if (nvgpu_timeout_peek_expired(&timeout)) {
350 err_status = -ETIMEDOUT; 350 err_status = -ETIMEDOUT;
351 xv_sc_dbg(EXEC_CHANGE, " timeout; pl_link_config = 0x%x", 351 xv_sc_dbg(g, EXEC_CHANGE, " timeout; pl_link_config = 0x%x",
352 pl_link_config); 352 pl_link_config);
353 } 353 }
354 354
355 xv_sc_dbg(EXEC_CHANGE, " Change done... Checking status"); 355 xv_sc_dbg(g, EXEC_CHANGE, " Change done... Checking status");
356 356
357 if (pl_link_config == 0xffffffff) { 357 if (pl_link_config == 0xffffffff) {
358 WARN(1, "GPU fell of PCI bus!?"); 358 WARN(1, "GPU fell of PCI bus!?");
@@ -366,19 +366,19 @@ static int __do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed)
366 366
367 link_control_status = 367 link_control_status =
368 g->ops.xve.xve_readl(g, xve_link_control_status_r()); 368 g->ops.xve.xve_readl(g, xve_link_control_status_r());
369 xv_sc_dbg(EXEC_CHANGE, " target %d vs current %d", 369 xv_sc_dbg(g, EXEC_CHANGE, " target %d vs current %d",
370 link_speed_setting, 370 link_speed_setting,
371 xve_link_control_status_link_speed_v(link_control_status)); 371 xve_link_control_status_link_speed_v(link_control_status));
372 372
373 if (err_status == -ETIMEDOUT) { 373 if (err_status == -ETIMEDOUT) {
374 xv_sc_dbg(EXEC_CHANGE, " Oops timed out?"); 374 xv_sc_dbg(g, EXEC_CHANGE, " Oops timed out?");
375 break; 375 break;
376 } 376 }
377 } while (attempts-- > 0 && 377 } while (attempts-- > 0 &&
378 link_speed_setting != 378 link_speed_setting !=
379 xve_link_control_status_link_speed_v(link_control_status)); 379 xve_link_control_status_link_speed_v(link_control_status));
380 380
381 xv_sc_dbg(EXEC_VERIF, "Verifying speed change..."); 381 xv_sc_dbg(g, EXEC_VERIF, "Verifying speed change...");
382 382
383 /* 383 /*
384 * Check that the new link speed is actually active. If we failed to 384 * Check that the new link speed is actually active. If we failed to
@@ -390,10 +390,10 @@ static int __do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed)
390 if (link_speed_setting != new_link_speed) { 390 if (link_speed_setting != new_link_speed) {
391 u32 link_config = gk20a_readl(g, xp_pl_link_config_r(0)); 391 u32 link_config = gk20a_readl(g, xp_pl_link_config_r(0));
392 392
393 xv_sc_dbg(EXEC_VERIF, " Current and target speeds mismatch!"); 393 xv_sc_dbg(g, EXEC_VERIF, " Current and target speeds mismatch!");
394 xv_sc_dbg(EXEC_VERIF, " LINK_CONTROL_STATUS: 0x%08x", 394 xv_sc_dbg(g, EXEC_VERIF, " LINK_CONTROL_STATUS: 0x%08x",
395 g->ops.xve.xve_readl(g, xve_link_control_status_r())); 395 g->ops.xve.xve_readl(g, xve_link_control_status_r()));
396 xv_sc_dbg(EXEC_VERIF, " Link speed is %s - should be %s", 396 xv_sc_dbg(g, EXEC_VERIF, " Link speed is %s - should be %s",
397 xve_speed_to_str(new_link_speed), 397 xve_speed_to_str(new_link_speed),
398 xve_speed_to_str(link_speed_setting)); 398 xve_speed_to_str(link_speed_setting));
399 399
@@ -417,19 +417,19 @@ static int __do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed)
417 gk20a_writel(g, xp_pl_link_config_r(0), link_config); 417 gk20a_writel(g, xp_pl_link_config_r(0), link_config);
418 err_status = -ENODEV; 418 err_status = -ENODEV;
419 } else { 419 } else {
420 xv_sc_dbg(EXEC_VERIF, " Current and target speeds match!"); 420 xv_sc_dbg(g, EXEC_VERIF, " Current and target speeds match!");
421 err_status = 0; 421 err_status = 0;
422 } 422 }
423 423
424done: 424done:
425 /* Restore safe timings. */ 425 /* Restore safe timings. */
426 xv_sc_dbg(CLEANUP, "Restoring saved DL settings..."); 426 xv_sc_dbg(g, CLEANUP, "Restoring saved DL settings...");
427 gk20a_writel(g, xp_dl_mgr_r(0), saved_dl_mgr); 427 gk20a_writel(g, xp_dl_mgr_r(0), saved_dl_mgr);
428 xv_sc_dbg(CLEANUP, " Done"); 428 xv_sc_dbg(g, CLEANUP, " Done");
429 429
430 xv_sc_dbg(CLEANUP, "Re-enabling ASPM settings..."); 430 xv_sc_dbg(g, CLEANUP, "Re-enabling ASPM settings...");
431 enable_aspm_gp106(g); 431 enable_aspm_gp106(g);
432 xv_sc_dbg(CLEANUP, " Done"); 432 xv_sc_dbg(g, CLEANUP, " Done");
433 433
434 return err_status; 434 return err_status;
435} 435}
diff --git a/drivers/gpu/nvgpu/gp106/xve_gp106.h b/drivers/gpu/nvgpu/gp106/xve_gp106.h
index d48b0991..e0be35ac 100644
--- a/drivers/gpu/nvgpu/gp106/xve_gp106.h
+++ b/drivers/gpu/nvgpu/gp106/xve_gp106.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -49,11 +49,11 @@ enum xv_speed_change_steps {
49 CLEANUP 49 CLEANUP
50}; 50};
51 51
52#define xv_dbg(fmt, args...) \ 52#define xv_dbg(g, fmt, args...) \
53 gk20a_dbg(gpu_dbg_xv, fmt, ##args) 53 nvgpu_log(g, gpu_dbg_xv, fmt, ##args)
54 54
55#define xv_sc_dbg(step, fmt, args...) \ 55#define xv_sc_dbg(g, step, fmt, args...) \
56 xv_dbg("[%d] %15s | " fmt, step, __stringify(step), ##args) 56 xv_dbg(g, "[%d] %15s | " fmt, step, __stringify(step), ##args)
57 57
58void xve_xve_writel_gp106(struct gk20a *g, u32 reg, u32 val); 58void xve_xve_writel_gp106(struct gk20a *g, u32 reg, u32 val);
59u32 xve_xve_readl_gp106(struct gk20a *g, u32 reg); 59u32 xve_xve_readl_gp106(struct gk20a *g, u32 reg);
diff --git a/drivers/gpu/nvgpu/gp10b/ce_gp10b.c b/drivers/gpu/nvgpu/gp10b/ce_gp10b.c
index 86a2b751..e2ad1bd3 100644
--- a/drivers/gpu/nvgpu/gp10b/ce_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/ce_gp10b.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Pascal GPU series Copy Engine. 2 * Pascal GPU series Copy Engine.
3 * 3 *
4 * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"), 7 * copy of this software and associated documentation files (the "Software"),
@@ -30,14 +30,14 @@
30 30
31static u32 ce_blockpipe_isr(struct gk20a *g, u32 fifo_intr) 31static u32 ce_blockpipe_isr(struct gk20a *g, u32 fifo_intr)
32{ 32{
33 gk20a_dbg(gpu_dbg_intr, "ce blocking pipe interrupt\n"); 33 nvgpu_log(g, gpu_dbg_intr, "ce blocking pipe interrupt\n");
34 34
35 return ce_intr_status_blockpipe_pending_f(); 35 return ce_intr_status_blockpipe_pending_f();
36} 36}
37 37
38static u32 ce_launcherr_isr(struct gk20a *g, u32 fifo_intr) 38static u32 ce_launcherr_isr(struct gk20a *g, u32 fifo_intr)
39{ 39{
40 gk20a_dbg(gpu_dbg_intr, "ce launch error interrupt\n"); 40 nvgpu_log(g, gpu_dbg_intr, "ce launch error interrupt\n");
41 41
42 return ce_intr_status_launcherr_pending_f(); 42 return ce_intr_status_launcherr_pending_f();
43} 43}
@@ -47,7 +47,7 @@ void gp10b_ce_isr(struct gk20a *g, u32 inst_id, u32 pri_base)
47 u32 ce_intr = gk20a_readl(g, ce_intr_status_r(inst_id)); 47 u32 ce_intr = gk20a_readl(g, ce_intr_status_r(inst_id));
48 u32 clear_intr = 0; 48 u32 clear_intr = 0;
49 49
50 gk20a_dbg(gpu_dbg_intr, "ce isr %08x %08x\n", ce_intr, inst_id); 50 nvgpu_log(g, gpu_dbg_intr, "ce isr %08x %08x\n", ce_intr, inst_id);
51 51
52 /* clear blocking interrupts: they exibit broken behavior */ 52 /* clear blocking interrupts: they exibit broken behavior */
53 if (ce_intr & ce_intr_status_blockpipe_pending_f()) 53 if (ce_intr & ce_intr_status_blockpipe_pending_f())
@@ -65,7 +65,7 @@ int gp10b_ce_nonstall_isr(struct gk20a *g, u32 inst_id, u32 pri_base)
65 int ops = 0; 65 int ops = 0;
66 u32 ce_intr = gk20a_readl(g, ce_intr_status_r(inst_id)); 66 u32 ce_intr = gk20a_readl(g, ce_intr_status_r(inst_id));
67 67
68 gk20a_dbg(gpu_dbg_intr, "ce nonstall isr %08x %08x\n", ce_intr, inst_id); 68 nvgpu_log(g, gpu_dbg_intr, "ce nonstall isr %08x %08x\n", ce_intr, inst_id);
69 69
70 if (ce_intr & ce_intr_status_nonblockpipe_pending_f()) { 70 if (ce_intr & ce_intr_status_nonblockpipe_pending_f()) {
71 gk20a_writel(g, ce_intr_status_r(inst_id), 71 gk20a_writel(g, ce_intr_status_r(inst_id),
diff --git a/drivers/gpu/nvgpu/gp10b/fecs_trace_gp10b.c b/drivers/gpu/nvgpu/gp10b/fecs_trace_gp10b.c
index 511d565a..c477c77d 100644
--- a/drivers/gpu/nvgpu/gp10b/fecs_trace_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/fecs_trace_gp10b.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * GP10B GPU FECS traces 2 * GP10B GPU FECS traces
3 * 3 *
4 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"), 7 * copy of this software and associated documentation files (the "Software"),
@@ -43,7 +43,7 @@ int gp10b_fecs_trace_flush(struct gk20a *g)
43 }; 43 };
44 int err; 44 int err;
45 45
46 gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, ""); 46 nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, " ");
47 47
48 err = gr_gk20a_elpg_protected_call(g, 48 err = gr_gk20a_elpg_protected_call(g,
49 gr_gk20a_submit_fecs_method_op(g, op, false)); 49 gr_gk20a_submit_fecs_method_op(g, op, false));
diff --git a/drivers/gpu/nvgpu/gp10b/fifo_gp10b.c b/drivers/gpu/nvgpu/gp10b/fifo_gp10b.c
index 66f3012f..fd4ec34e 100644
--- a/drivers/gpu/nvgpu/gp10b/fifo_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/fifo_gp10b.c
@@ -43,7 +43,7 @@ static void gp10b_set_pdb_fault_replay_flags(struct gk20a *g,
43{ 43{
44 u32 val; 44 u32 val;
45 45
46 gk20a_dbg_fn(""); 46 nvgpu_log_fn(g, " ");
47 47
48 val = nvgpu_mem_rd32(g, mem, 48 val = nvgpu_mem_rd32(g, mem,
49 ram_in_page_dir_base_fault_replay_tex_w()); 49 ram_in_page_dir_base_fault_replay_tex_w());
@@ -59,7 +59,7 @@ static void gp10b_set_pdb_fault_replay_flags(struct gk20a *g,
59 nvgpu_mem_wr32(g, mem, 59 nvgpu_mem_wr32(g, mem,
60 ram_in_page_dir_base_fault_replay_gcc_w(), val); 60 ram_in_page_dir_base_fault_replay_gcc_w(), val);
61 61
62 gk20a_dbg_fn("done"); 62 nvgpu_log_fn(g, "done");
63} 63}
64 64
65int channel_gp10b_commit_userd(struct channel_gk20a *c) 65int channel_gp10b_commit_userd(struct channel_gk20a *c)
@@ -68,12 +68,12 @@ int channel_gp10b_commit_userd(struct channel_gk20a *c)
68 u32 addr_hi; 68 u32 addr_hi;
69 struct gk20a *g = c->g; 69 struct gk20a *g = c->g;
70 70
71 gk20a_dbg_fn(""); 71 nvgpu_log_fn(g, " ");
72 72
73 addr_lo = u64_lo32(c->userd_iova >> ram_userd_base_shift_v()); 73 addr_lo = u64_lo32(c->userd_iova >> ram_userd_base_shift_v());
74 addr_hi = u64_hi32(c->userd_iova); 74 addr_hi = u64_hi32(c->userd_iova);
75 75
76 gk20a_dbg_info("channel %d : set ramfc userd 0x%16llx", 76 nvgpu_log_info(g, "channel %d : set ramfc userd 0x%16llx",
77 c->chid, (u64)c->userd_iova); 77 c->chid, (u64)c->userd_iova);
78 78
79 nvgpu_mem_wr32(g, &c->inst_block, 79 nvgpu_mem_wr32(g, &c->inst_block,
@@ -98,7 +98,7 @@ int channel_gp10b_setup_ramfc(struct channel_gk20a *c,
98 struct gk20a *g = c->g; 98 struct gk20a *g = c->g;
99 struct nvgpu_mem *mem = &c->inst_block; 99 struct nvgpu_mem *mem = &c->inst_block;
100 100
101 gk20a_dbg_fn(""); 101 nvgpu_log_fn(g, " ");
102 102
103 nvgpu_memset(g, mem, 0, 0, ram_fc_size_val_v()); 103 nvgpu_memset(g, mem, 0, 0, ram_fc_size_val_v());
104 104
@@ -167,8 +167,9 @@ int gp10b_fifo_resetup_ramfc(struct channel_gk20a *c)
167{ 167{
168 u32 new_syncpt = 0, old_syncpt; 168 u32 new_syncpt = 0, old_syncpt;
169 u32 v; 169 u32 v;
170 struct gk20a *g = c->g;
170 171
171 gk20a_dbg_fn(""); 172 nvgpu_log_fn(g, " ");
172 173
173 v = nvgpu_mem_rd32(c->g, &c->inst_block, 174 v = nvgpu_mem_rd32(c->g, &c->inst_block,
174 ram_fc_allowed_syncpoints_w()); 175 ram_fc_allowed_syncpoints_w());
@@ -185,7 +186,7 @@ int gp10b_fifo_resetup_ramfc(struct channel_gk20a *c)
185 186
186 v = pbdma_allowed_syncpoints_0_valid_f(1); 187 v = pbdma_allowed_syncpoints_0_valid_f(1);
187 188
188 gk20a_dbg_info("Channel %d, syncpt id %d\n", 189 nvgpu_log_info(g, "Channel %d, syncpt id %d\n",
189 c->chid, new_syncpt); 190 c->chid, new_syncpt);
190 191
191 v |= pbdma_allowed_syncpoints_0_index_f(new_syncpt); 192 v |= pbdma_allowed_syncpoints_0_index_f(new_syncpt);
@@ -197,7 +198,7 @@ int gp10b_fifo_resetup_ramfc(struct channel_gk20a *c)
197 /* enable channel */ 198 /* enable channel */
198 gk20a_enable_channel_tsg(c->g, c); 199 gk20a_enable_channel_tsg(c->g, c);
199 200
200 gk20a_dbg_fn("done"); 201 nvgpu_log_fn(g, "done");
201 202
202 return 0; 203 return 0;
203} 204}
@@ -207,7 +208,7 @@ int gp10b_fifo_engine_enum_from_type(struct gk20a *g, u32 engine_type,
207{ 208{
208 int ret = ENGINE_INVAL_GK20A; 209 int ret = ENGINE_INVAL_GK20A;
209 210
210 gk20a_dbg_info("engine type %d", engine_type); 211 nvgpu_log_info(g, "engine type %d", engine_type);
211 if (engine_type == top_device_info_type_enum_graphics_v()) 212 if (engine_type == top_device_info_type_enum_graphics_v())
212 ret = ENGINE_GR_GK20A; 213 ret = ENGINE_GR_GK20A;
213 else if (engine_type == top_device_info_type_enum_lce_v()) { 214 else if (engine_type == top_device_info_type_enum_lce_v()) {
@@ -229,13 +230,13 @@ void gp10b_device_info_data_parse(struct gk20a *g, u32 table_entry,
229 *pri_base = 230 *pri_base =
230 (top_device_info_data_pri_base_v(table_entry) 231 (top_device_info_data_pri_base_v(table_entry)
231 << top_device_info_data_pri_base_align_v()); 232 << top_device_info_data_pri_base_align_v());
232 gk20a_dbg_info("device info: pri_base: %d", *pri_base); 233 nvgpu_log_info(g, "device info: pri_base: %d", *pri_base);
233 } 234 }
234 if (fault_id && (top_device_info_data_fault_id_v(table_entry) == 235 if (fault_id && (top_device_info_data_fault_id_v(table_entry) ==
235 top_device_info_data_fault_id_valid_v())) { 236 top_device_info_data_fault_id_valid_v())) {
236 *fault_id = 237 *fault_id =
237 g->ops.fifo.device_info_fault_id(table_entry); 238 g->ops.fifo.device_info_fault_id(table_entry);
238 gk20a_dbg_info("device info: fault_id: %d", *fault_id); 239 nvgpu_log_info(g, "device info: fault_id: %d", *fault_id);
239 } 240 }
240 } else 241 } else
241 nvgpu_err(g, "unknown device_info_data %d", 242 nvgpu_err(g, "unknown device_info_data %d",
@@ -293,7 +294,7 @@ void gp10b_fifo_get_mmu_fault_info(struct gk20a *g, u32 mmu_fault_id,
293 u32 fault_info; 294 u32 fault_info;
294 u32 addr_lo, addr_hi; 295 u32 addr_lo, addr_hi;
295 296
296 gk20a_dbg_fn("mmu_fault_id %d", mmu_fault_id); 297 nvgpu_log_fn(g, "mmu_fault_id %d", mmu_fault_id);
297 298
298 memset(mmfault, 0, sizeof(*mmfault)); 299 memset(mmfault, 0, sizeof(*mmfault));
299 300
diff --git a/drivers/gpu/nvgpu/gp10b/gr_gp10b.c b/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
index 0178abbf..bc982d30 100644
--- a/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
@@ -69,7 +69,7 @@ bool gr_gp10b_is_valid_class(struct gk20a *g, u32 class_num)
69 default: 69 default:
70 break; 70 break;
71 } 71 }
72 gk20a_dbg_info("class=0x%x valid=%d", class_num, valid); 72 nvgpu_log_info(g, "class=0x%x valid=%d", class_num, valid);
73 return valid; 73 return valid;
74} 74}
75 75
@@ -169,7 +169,7 @@ int gr_gp10b_handle_sm_exception(struct gk20a *g,
169 gr_pri_gpc0_tpc0_sm_lrf_ecc_double_err_count_r() + offset, 169 gr_pri_gpc0_tpc0_sm_lrf_ecc_double_err_count_r() + offset,
170 0); 170 0);
171 if (lrf_ecc_sed_status) { 171 if (lrf_ecc_sed_status) {
172 gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, 172 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr,
173 "Single bit error detected in SM LRF!"); 173 "Single bit error detected in SM LRF!");
174 174
175 gr_gp10b_sm_lrf_ecc_overcount_war(1, 175 gr_gp10b_sm_lrf_ecc_overcount_war(1,
@@ -181,7 +181,7 @@ int gr_gp10b_handle_sm_exception(struct gk20a *g,
181 lrf_single_count_delta; 181 lrf_single_count_delta;
182 } 182 }
183 if (lrf_ecc_ded_status) { 183 if (lrf_ecc_ded_status) {
184 gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, 184 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr,
185 "Double bit error detected in SM LRF!"); 185 "Double bit error detected in SM LRF!");
186 186
187 gr_gp10b_sm_lrf_ecc_overcount_war(0, 187 gr_gp10b_sm_lrf_ecc_overcount_war(0,
@@ -208,7 +208,7 @@ int gr_gp10b_handle_sm_exception(struct gk20a *g,
208 gr_pri_gpc0_tpc0_sm_shm_ecc_status_single_err_detected_shm1_pending_f()) ) { 208 gr_pri_gpc0_tpc0_sm_shm_ecc_status_single_err_detected_shm1_pending_f()) ) {
209 u32 ecc_stats_reg_val; 209 u32 ecc_stats_reg_val;
210 210
211 gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, 211 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr,
212 "Single bit error detected in SM SHM!"); 212 "Single bit error detected in SM SHM!");
213 213
214 ecc_stats_reg_val = 214 ecc_stats_reg_val =
@@ -230,7 +230,7 @@ int gr_gp10b_handle_sm_exception(struct gk20a *g,
230 gr_pri_gpc0_tpc0_sm_shm_ecc_status_double_err_detected_shm1_pending_f()) ) { 230 gr_pri_gpc0_tpc0_sm_shm_ecc_status_double_err_detected_shm1_pending_f()) ) {
231 u32 ecc_stats_reg_val; 231 u32 ecc_stats_reg_val;
232 232
233 gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, 233 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr,
234 "Double bit error detected in SM SHM!"); 234 "Double bit error detected in SM SHM!");
235 235
236 ecc_stats_reg_val = 236 ecc_stats_reg_val =
@@ -260,14 +260,14 @@ int gr_gp10b_handle_tex_exception(struct gk20a *g, u32 gpc, u32 tpc,
260 u32 esr; 260 u32 esr;
261 u32 ecc_stats_reg_val; 261 u32 ecc_stats_reg_val;
262 262
263 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); 263 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
264 264
265 esr = gk20a_readl(g, 265 esr = gk20a_readl(g,
266 gr_gpc0_tpc0_tex_m_hww_esr_r() + offset); 266 gr_gpc0_tpc0_tex_m_hww_esr_r() + offset);
267 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "0x%08x", esr); 267 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "0x%08x", esr);
268 268
269 if (esr & gr_gpc0_tpc0_tex_m_hww_esr_ecc_sec_pending_f()) { 269 if (esr & gr_gpc0_tpc0_tex_m_hww_esr_ecc_sec_pending_f()) {
270 gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, 270 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr,
271 "Single bit error detected in TEX!"); 271 "Single bit error detected in TEX!");
272 272
273 /* Pipe 0 counters */ 273 /* Pipe 0 counters */
@@ -323,7 +323,7 @@ int gr_gp10b_handle_tex_exception(struct gk20a *g, u32 gpc, u32 tpc,
323 gr_pri_gpc0_tpc0_tex_m_routing_sel_default_f()); 323 gr_pri_gpc0_tpc0_tex_m_routing_sel_default_f());
324 } 324 }
325 if (esr & gr_gpc0_tpc0_tex_m_hww_esr_ecc_ded_pending_f()) { 325 if (esr & gr_gpc0_tpc0_tex_m_hww_esr_ecc_ded_pending_f()) {
326 gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, 326 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr,
327 "Double bit error detected in TEX!"); 327 "Double bit error detected in TEX!");
328 328
329 /* Pipe 0 counters */ 329 /* Pipe 0 counters */
@@ -403,7 +403,7 @@ int gr_gp10b_commit_global_cb_manager(struct gk20a *g,
403 u32 ppc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_PPC_IN_GPC_STRIDE); 403 u32 ppc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_PPC_IN_GPC_STRIDE);
404 u32 num_pes_per_gpc = nvgpu_get_litter_value(g, GPU_LIT_NUM_PES_PER_GPC); 404 u32 num_pes_per_gpc = nvgpu_get_litter_value(g, GPU_LIT_NUM_PES_PER_GPC);
405 405
406 gk20a_dbg_fn(""); 406 nvgpu_log_fn(g, " ");
407 407
408 tsg = tsg_gk20a_from_ch(c); 408 tsg = tsg_gk20a_from_ch(c);
409 if (!tsg) 409 if (!tsg)
@@ -660,21 +660,21 @@ static void gr_gp10b_set_coalesce_buffer_size(struct gk20a *g, u32 data)
660{ 660{
661 u32 val; 661 u32 val;
662 662
663 gk20a_dbg_fn(""); 663 nvgpu_log_fn(g, " ");
664 664
665 val = gk20a_readl(g, gr_gpcs_tc_debug0_r()); 665 val = gk20a_readl(g, gr_gpcs_tc_debug0_r());
666 val = set_field(val, gr_gpcs_tc_debug0_limit_coalesce_buffer_size_m(), 666 val = set_field(val, gr_gpcs_tc_debug0_limit_coalesce_buffer_size_m(),
667 gr_gpcs_tc_debug0_limit_coalesce_buffer_size_f(data)); 667 gr_gpcs_tc_debug0_limit_coalesce_buffer_size_f(data));
668 gk20a_writel(g, gr_gpcs_tc_debug0_r(), val); 668 gk20a_writel(g, gr_gpcs_tc_debug0_r(), val);
669 669
670 gk20a_dbg_fn("done"); 670 nvgpu_log_fn(g, "done");
671} 671}
672 672
673void gr_gp10b_set_bes_crop_debug3(struct gk20a *g, u32 data) 673void gr_gp10b_set_bes_crop_debug3(struct gk20a *g, u32 data)
674{ 674{
675 u32 val; 675 u32 val;
676 676
677 gk20a_dbg_fn(""); 677 nvgpu_log_fn(g, " ");
678 678
679 val = gk20a_readl(g, gr_bes_crop_debug3_r()); 679 val = gk20a_readl(g, gr_bes_crop_debug3_r());
680 if ((data & 1)) { 680 if ((data & 1)) {
@@ -722,7 +722,7 @@ void gr_gp10b_set_bes_crop_debug4(struct gk20a *g, u32 data)
722int gr_gp10b_handle_sw_method(struct gk20a *g, u32 addr, 722int gr_gp10b_handle_sw_method(struct gk20a *g, u32 addr,
723 u32 class_num, u32 offset, u32 data) 723 u32 class_num, u32 offset, u32 data)
724{ 724{
725 gk20a_dbg_fn(""); 725 nvgpu_log_fn(g, " ");
726 726
727 if (class_num == PASCAL_COMPUTE_A) { 727 if (class_num == PASCAL_COMPUTE_A) {
728 switch (offset << 2) { 728 switch (offset << 2) {
@@ -800,7 +800,7 @@ void gr_gp10b_set_alpha_circular_buffer_size(struct gk20a *g, u32 data)
800 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); 800 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
801 u32 ppc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_PPC_IN_GPC_STRIDE); 801 u32 ppc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_PPC_IN_GPC_STRIDE);
802 802
803 gk20a_dbg_fn(""); 803 nvgpu_log_fn(g, " ");
804 804
805 if (alpha_cb_size > gr->alpha_cb_size) 805 if (alpha_cb_size > gr->alpha_cb_size)
806 alpha_cb_size = gr->alpha_cb_size; 806 alpha_cb_size = gr->alpha_cb_size;
@@ -853,7 +853,7 @@ void gr_gp10b_set_circular_buffer_size(struct gk20a *g, u32 data)
853 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); 853 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
854 u32 ppc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_PPC_IN_GPC_STRIDE); 854 u32 ppc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_PPC_IN_GPC_STRIDE);
855 855
856 gk20a_dbg_fn(""); 856 nvgpu_log_fn(g, " ");
857 857
858 if (cb_size_steady > gr->attrib_cb_size) 858 if (cb_size_steady > gr->attrib_cb_size)
859 cb_size_steady = gr->attrib_cb_size; 859 cb_size_steady = gr->attrib_cb_size;
@@ -923,7 +923,7 @@ int gr_gp10b_init_ctx_state(struct gk20a *g)
923 }; 923 };
924 int err; 924 int err;
925 925
926 gk20a_dbg_fn(""); 926 nvgpu_log_fn(g, " ");
927 927
928 err = gr_gk20a_init_ctx_state(g); 928 err = gr_gk20a_init_ctx_state(g);
929 if (err) 929 if (err)
@@ -940,10 +940,10 @@ int gr_gp10b_init_ctx_state(struct gk20a *g)
940 } 940 }
941 } 941 }
942 942
943 gk20a_dbg_info("preempt image size: %u", 943 nvgpu_log_info(g, "preempt image size: %u",
944 g->gr.ctx_vars.preempt_image_size); 944 g->gr.ctx_vars.preempt_image_size);
945 945
946 gk20a_dbg_fn("done"); 946 nvgpu_log_fn(g, "done");
947 947
948 return 0; 948 return 0;
949} 949}
@@ -952,8 +952,9 @@ int gr_gp10b_alloc_buffer(struct vm_gk20a *vm, size_t size,
952 struct nvgpu_mem *mem) 952 struct nvgpu_mem *mem)
953{ 953{
954 int err; 954 int err;
955 struct gk20a *g = gk20a_from_vm(vm);
955 956
956 gk20a_dbg_fn(""); 957 nvgpu_log_fn(g, " ");
957 958
958 err = nvgpu_dma_alloc_sys(vm->mm->g, size, mem); 959 err = nvgpu_dma_alloc_sys(vm->mm->g, size, mem);
959 if (err) 960 if (err)
@@ -1029,9 +1030,9 @@ int gr_gp10b_set_ctxsw_preemption_mode(struct gk20a *g,
1029 g->gr.max_tpc_count; 1030 g->gr.max_tpc_count;
1030 attrib_cb_size = ALIGN(attrib_cb_size, 128); 1031 attrib_cb_size = ALIGN(attrib_cb_size, 128);
1031 1032
1032 gk20a_dbg_info("gfxp context spill_size=%d", spill_size); 1033 nvgpu_log_info(g, "gfxp context spill_size=%d", spill_size);
1033 gk20a_dbg_info("gfxp context pagepool_size=%d", pagepool_size); 1034 nvgpu_log_info(g, "gfxp context pagepool_size=%d", pagepool_size);
1034 gk20a_dbg_info("gfxp context attrib_cb_size=%d", 1035 nvgpu_log_info(g, "gfxp context attrib_cb_size=%d",
1035 attrib_cb_size); 1036 attrib_cb_size);
1036 1037
1037 err = gr_gp10b_alloc_buffer(vm, 1038 err = gr_gp10b_alloc_buffer(vm,
@@ -1112,7 +1113,7 @@ int gr_gp10b_alloc_gr_ctx(struct gk20a *g,
1112 u32 graphics_preempt_mode = 0; 1113 u32 graphics_preempt_mode = 0;
1113 u32 compute_preempt_mode = 0; 1114 u32 compute_preempt_mode = 0;
1114 1115
1115 gk20a_dbg_fn(""); 1116 nvgpu_log_fn(g, " ");
1116 1117
1117 err = gr_gk20a_alloc_gr_ctx(g, gr_ctx, vm, class, flags); 1118 err = gr_gk20a_alloc_gr_ctx(g, gr_ctx, vm, class, flags);
1118 if (err) 1119 if (err)
@@ -1137,7 +1138,7 @@ int gr_gp10b_alloc_gr_ctx(struct gk20a *g,
1137 goto fail_free_gk20a_ctx; 1138 goto fail_free_gk20a_ctx;
1138 } 1139 }
1139 1140
1140 gk20a_dbg_fn("done"); 1141 nvgpu_log_fn(g, "done");
1141 1142
1142 return 0; 1143 return 0;
1143 1144
@@ -1215,7 +1216,7 @@ void gr_gp10b_update_ctxsw_preemption_mode(struct gk20a *g,
1215 ctxsw_prog_main_image_compute_preemption_options_control_cta_f(); 1216 ctxsw_prog_main_image_compute_preemption_options_control_cta_f();
1216 int err; 1217 int err;
1217 1218
1218 gk20a_dbg_fn(""); 1219 nvgpu_log_fn(g, " ");
1219 1220
1220 tsg = tsg_gk20a_from_ch(c); 1221 tsg = tsg_gk20a_from_ch(c);
1221 if (!tsg) 1222 if (!tsg)
@@ -1224,21 +1225,21 @@ void gr_gp10b_update_ctxsw_preemption_mode(struct gk20a *g,
1224 gr_ctx = &tsg->gr_ctx; 1225 gr_ctx = &tsg->gr_ctx;
1225 1226
1226 if (gr_ctx->graphics_preempt_mode == NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP) { 1227 if (gr_ctx->graphics_preempt_mode == NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP) {
1227 gk20a_dbg_info("GfxP: %x", gfxp_preempt_option); 1228 nvgpu_log_info(g, "GfxP: %x", gfxp_preempt_option);
1228 nvgpu_mem_wr(g, mem, 1229 nvgpu_mem_wr(g, mem,
1229 ctxsw_prog_main_image_graphics_preemption_options_o(), 1230 ctxsw_prog_main_image_graphics_preemption_options_o(),
1230 gfxp_preempt_option); 1231 gfxp_preempt_option);
1231 } 1232 }
1232 1233
1233 if (gr_ctx->compute_preempt_mode == NVGPU_PREEMPTION_MODE_COMPUTE_CILP) { 1234 if (gr_ctx->compute_preempt_mode == NVGPU_PREEMPTION_MODE_COMPUTE_CILP) {
1234 gk20a_dbg_info("CILP: %x", cilp_preempt_option); 1235 nvgpu_log_info(g, "CILP: %x", cilp_preempt_option);
1235 nvgpu_mem_wr(g, mem, 1236 nvgpu_mem_wr(g, mem,
1236 ctxsw_prog_main_image_compute_preemption_options_o(), 1237 ctxsw_prog_main_image_compute_preemption_options_o(),
1237 cilp_preempt_option); 1238 cilp_preempt_option);
1238 } 1239 }
1239 1240
1240 if (gr_ctx->compute_preempt_mode == NVGPU_PREEMPTION_MODE_COMPUTE_CTA) { 1241 if (gr_ctx->compute_preempt_mode == NVGPU_PREEMPTION_MODE_COMPUTE_CTA) {
1241 gk20a_dbg_info("CTA: %x", cta_preempt_option); 1242 nvgpu_log_info(g, "CTA: %x", cta_preempt_option);
1242 nvgpu_mem_wr(g, mem, 1243 nvgpu_mem_wr(g, mem,
1243 ctxsw_prog_main_image_compute_preemption_options_o(), 1244 ctxsw_prog_main_image_compute_preemption_options_o(),
1244 cta_preempt_option); 1245 cta_preempt_option);
@@ -1269,7 +1270,7 @@ void gr_gp10b_update_ctxsw_preemption_mode(struct gk20a *g,
1269 (u64_hi32(gr_ctx->betacb_ctxsw_buffer.gpu_va) << 1270 (u64_hi32(gr_ctx->betacb_ctxsw_buffer.gpu_va) <<
1270 (32 - gr_gpcs_setup_attrib_cb_base_addr_39_12_align_bits_v())); 1271 (32 - gr_gpcs_setup_attrib_cb_base_addr_39_12_align_bits_v()));
1271 1272
1272 gk20a_dbg_info("attrib cb addr : 0x%016x", addr); 1273 nvgpu_log_info(g, "attrib cb addr : 0x%016x", addr);
1273 g->ops.gr.commit_global_attrib_cb(g, gr_ctx, addr, true); 1274 g->ops.gr.commit_global_attrib_cb(g, gr_ctx, addr, true);
1274 1275
1275 addr = (u64_lo32(gr_ctx->pagepool_ctxsw_buffer.gpu_va) >> 1276 addr = (u64_lo32(gr_ctx->pagepool_ctxsw_buffer.gpu_va) >>
@@ -1315,7 +1316,7 @@ void gr_gp10b_update_ctxsw_preemption_mode(struct gk20a *g,
1315 } 1316 }
1316 1317
1317out: 1318out:
1318 gk20a_dbg_fn("done"); 1319 nvgpu_log_fn(g, "done");
1319} 1320}
1320 1321
1321int gr_gp10b_dump_gr_status_regs(struct gk20a *g, 1322int gr_gp10b_dump_gr_status_regs(struct gk20a *g,
@@ -1475,7 +1476,7 @@ int gr_gp10b_wait_empty(struct gk20a *g, unsigned long duration_ms,
1475 u32 activity0, activity1, activity2, activity4; 1476 u32 activity0, activity1, activity2, activity4;
1476 struct nvgpu_timeout timeout; 1477 struct nvgpu_timeout timeout;
1477 1478
1478 gk20a_dbg_fn(""); 1479 nvgpu_log_fn(g, " ");
1479 1480
1480 nvgpu_timeout_init(g, &timeout, duration_ms, NVGPU_TIMER_CPU_TIMER); 1481 nvgpu_timeout_init(g, &timeout, duration_ms, NVGPU_TIMER_CPU_TIMER);
1481 1482
@@ -1500,7 +1501,7 @@ int gr_gp10b_wait_empty(struct gk20a *g, unsigned long duration_ms,
1500 gr_activity_empty_or_preempted(activity4)); 1501 gr_activity_empty_or_preempted(activity4));
1501 1502
1502 if (!gr_enabled || (!gr_busy && !ctxsw_active)) { 1503 if (!gr_enabled || (!gr_busy && !ctxsw_active)) {
1503 gk20a_dbg_fn("done"); 1504 nvgpu_log_fn(g, "done");
1504 return 0; 1505 return 0;
1505 } 1506 }
1506 1507
@@ -1569,7 +1570,7 @@ void gr_gp10b_commit_global_bundle_cb(struct gk20a *g,
1569 1570
1570 data = min_t(u32, data, g->gr.min_gpm_fifo_depth); 1571 data = min_t(u32, data, g->gr.min_gpm_fifo_depth);
1571 1572
1572 gk20a_dbg_info("bundle cb token limit : %d, state limit : %d", 1573 nvgpu_log_info(g, "bundle cb token limit : %d, state limit : %d",
1573 g->gr.bundle_cb_token_limit, data); 1574 g->gr.bundle_cb_token_limit, data);
1574 1575
1575 gr_gk20a_ctx_patch_write(g, gr_ctx, gr_pd_ab_dist_cfg2_r(), 1576 gr_gk20a_ctx_patch_write(g, gr_ctx, gr_pd_ab_dist_cfg2_r(),
@@ -1626,7 +1627,7 @@ int gr_gp10b_init_fs_state(struct gk20a *g)
1626{ 1627{
1627 u32 data; 1628 u32 data;
1628 1629
1629 gk20a_dbg_fn(""); 1630 nvgpu_log_fn(g, " ");
1630 1631
1631 data = gk20a_readl(g, gr_gpcs_tpcs_sm_texio_control_r()); 1632 data = gk20a_readl(g, gr_gpcs_tpcs_sm_texio_control_r());
1632 data = set_field(data, gr_gpcs_tpcs_sm_texio_control_oor_addr_check_mode_m(), 1633 data = set_field(data, gr_gpcs_tpcs_sm_texio_control_oor_addr_check_mode_m(),
@@ -1705,7 +1706,7 @@ static int gr_gp10b_disable_channel_or_tsg(struct gk20a *g, struct channel_gk20a
1705{ 1706{
1706 int ret = 0; 1707 int ret = 0;
1707 1708
1708 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, ""); 1709 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, " ");
1709 1710
1710 ret = gk20a_disable_channel_tsg(g, fault_ch); 1711 ret = gk20a_disable_channel_tsg(g, fault_ch);
1711 if (ret) { 1712 if (ret) {
@@ -1721,18 +1722,18 @@ static int gr_gp10b_disable_channel_or_tsg(struct gk20a *g, struct channel_gk20a
1721 return ret; 1722 return ret;
1722 } 1723 }
1723 1724
1724 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, "CILP: restarted runlist"); 1725 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, "CILP: restarted runlist");
1725 1726
1726 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, 1727 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr,
1727 "CILP: tsgid: 0x%x", fault_ch->tsgid); 1728 "CILP: tsgid: 0x%x", fault_ch->tsgid);
1728 1729
1729 if (gk20a_is_channel_marked_as_tsg(fault_ch)) { 1730 if (gk20a_is_channel_marked_as_tsg(fault_ch)) {
1730 gk20a_fifo_issue_preempt(g, fault_ch->tsgid, true); 1731 gk20a_fifo_issue_preempt(g, fault_ch->tsgid, true);
1731 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, 1732 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr,
1732 "CILP: preempted tsg"); 1733 "CILP: preempted tsg");
1733 } else { 1734 } else {
1734 gk20a_fifo_issue_preempt(g, fault_ch->chid, false); 1735 gk20a_fifo_issue_preempt(g, fault_ch->chid, false);
1735 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, 1736 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr,
1736 "CILP: preempted channel"); 1737 "CILP: preempted channel");
1737 } 1738 }
1738 1739
@@ -1746,7 +1747,7 @@ int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g,
1746 struct tsg_gk20a *tsg; 1747 struct tsg_gk20a *tsg;
1747 struct nvgpu_gr_ctx *gr_ctx; 1748 struct nvgpu_gr_ctx *gr_ctx;
1748 1749
1749 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, ""); 1750 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, " ");
1750 1751
1751 tsg = tsg_gk20a_from_ch(fault_ch); 1752 tsg = tsg_gk20a_from_ch(fault_ch);
1752 if (!tsg) 1753 if (!tsg)
@@ -1755,7 +1756,7 @@ int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g,
1755 gr_ctx = &tsg->gr_ctx; 1756 gr_ctx = &tsg->gr_ctx;
1756 1757
1757 if (gr_ctx->cilp_preempt_pending) { 1758 if (gr_ctx->cilp_preempt_pending) {
1758 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, 1759 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr,
1759 "CILP is already pending for chid %d", 1760 "CILP is already pending for chid %d",
1760 fault_ch->chid); 1761 fault_ch->chid);
1761 return 0; 1762 return 0;
@@ -1763,7 +1764,7 @@ int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g,
1763 1764
1764 /* get ctx_id from the ucode image */ 1765 /* get ctx_id from the ucode image */
1765 if (!gr_ctx->ctx_id_valid) { 1766 if (!gr_ctx->ctx_id_valid) {
1766 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, 1767 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr,
1767 "CILP: looking up ctx id"); 1768 "CILP: looking up ctx id");
1768 ret = gr_gk20a_get_ctx_id(g, fault_ch, &gr_ctx->ctx_id); 1769 ret = gr_gk20a_get_ctx_id(g, fault_ch, &gr_ctx->ctx_id);
1769 if (ret) { 1770 if (ret) {
@@ -1773,7 +1774,7 @@ int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g,
1773 gr_ctx->ctx_id_valid = true; 1774 gr_ctx->ctx_id_valid = true;
1774 } 1775 }
1775 1776
1776 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, 1777 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr,
1777 "CILP: ctx id is 0x%x", gr_ctx->ctx_id); 1778 "CILP: ctx id is 0x%x", gr_ctx->ctx_id);
1778 1779
1779 /* send ucode method to set ctxsw interrupt */ 1780 /* send ucode method to set ctxsw interrupt */
@@ -1795,10 +1796,10 @@ int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g,
1795 return ret; 1796 return ret;
1796 } 1797 }
1797 1798
1798 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, 1799 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr,
1799 "CILP: enabled ctxsw completion interrupt"); 1800 "CILP: enabled ctxsw completion interrupt");
1800 1801
1801 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, 1802 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr,
1802 "CILP: disabling channel %d", 1803 "CILP: disabling channel %d",
1803 fault_ch->chid); 1804 fault_ch->chid);
1804 1805
@@ -1826,7 +1827,7 @@ static int gr_gp10b_clear_cilp_preempt_pending(struct gk20a *g,
1826 struct tsg_gk20a *tsg; 1827 struct tsg_gk20a *tsg;
1827 struct nvgpu_gr_ctx *gr_ctx; 1828 struct nvgpu_gr_ctx *gr_ctx;
1828 1829
1829 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, ""); 1830 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, " ");
1830 1831
1831 tsg = tsg_gk20a_from_ch(fault_ch); 1832 tsg = tsg_gk20a_from_ch(fault_ch);
1832 if (!tsg) 1833 if (!tsg)
@@ -1837,7 +1838,7 @@ static int gr_gp10b_clear_cilp_preempt_pending(struct gk20a *g,
1837 /* The ucode is self-clearing, so all we need to do here is 1838 /* The ucode is self-clearing, so all we need to do here is
1838 to clear cilp_preempt_pending. */ 1839 to clear cilp_preempt_pending. */
1839 if (!gr_ctx->cilp_preempt_pending) { 1840 if (!gr_ctx->cilp_preempt_pending) {
1840 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, 1841 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr,
1841 "CILP is already cleared for chid %d\n", 1842 "CILP is already cleared for chid %d\n",
1842 fault_ch->chid); 1843 fault_ch->chid);
1843 return 0; 1844 return 0;
@@ -1878,7 +1879,7 @@ int gr_gp10b_pre_process_sm_exception(struct gk20a *g,
1878 NVGPU_PREEMPTION_MODE_COMPUTE_CILP); 1879 NVGPU_PREEMPTION_MODE_COMPUTE_CILP);
1879 } 1880 }
1880 1881
1881 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "SM Exception received on gpc %d tpc %d = %u\n", 1882 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "SM Exception received on gpc %d tpc %d = %u\n",
1882 gpc, tpc, global_esr); 1883 gpc, tpc, global_esr);
1883 1884
1884 if (cilp_enabled && sm_debugger_attached) { 1885 if (cilp_enabled && sm_debugger_attached) {
@@ -1900,19 +1901,19 @@ int gr_gp10b_pre_process_sm_exception(struct gk20a *g,
1900 if (warp_esr != 0 || (global_esr & global_mask) != 0) { 1901 if (warp_esr != 0 || (global_esr & global_mask) != 0) {
1901 *ignore_debugger = true; 1902 *ignore_debugger = true;
1902 1903
1903 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, 1904 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
1904 "CILP: starting wait for LOCKED_DOWN on gpc %d tpc %d\n", 1905 "CILP: starting wait for LOCKED_DOWN on gpc %d tpc %d\n",
1905 gpc, tpc); 1906 gpc, tpc);
1906 1907
1907 if (gk20a_dbg_gpu_broadcast_stop_trigger(fault_ch)) { 1908 if (gk20a_dbg_gpu_broadcast_stop_trigger(fault_ch)) {
1908 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, 1909 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
1909 "CILP: Broadcasting STOP_TRIGGER from gpc %d tpc %d\n", 1910 "CILP: Broadcasting STOP_TRIGGER from gpc %d tpc %d\n",
1910 gpc, tpc); 1911 gpc, tpc);
1911 g->ops.gr.suspend_all_sms(g, global_mask, false); 1912 g->ops.gr.suspend_all_sms(g, global_mask, false);
1912 1913
1913 gk20a_dbg_gpu_clear_broadcast_stop_trigger(fault_ch); 1914 gk20a_dbg_gpu_clear_broadcast_stop_trigger(fault_ch);
1914 } else { 1915 } else {
1915 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, 1916 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
1916 "CILP: STOP_TRIGGER from gpc %d tpc %d\n", 1917 "CILP: STOP_TRIGGER from gpc %d tpc %d\n",
1917 gpc, tpc); 1918 gpc, tpc);
1918 g->ops.gr.suspend_single_sm(g, gpc, tpc, sm, global_mask, true); 1919 g->ops.gr.suspend_single_sm(g, gpc, tpc, sm, global_mask, true);
@@ -1923,11 +1924,11 @@ int gr_gp10b_pre_process_sm_exception(struct gk20a *g,
1923 gpc, tpc, sm); 1924 gpc, tpc, sm);
1924 g->ops.gr.clear_sm_hww(g, 1925 g->ops.gr.clear_sm_hww(g,
1925 gpc, tpc, sm, global_esr_copy); 1926 gpc, tpc, sm, global_esr_copy);
1926 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, 1927 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
1927 "CILP: HWWs cleared for gpc %d tpc %d\n", 1928 "CILP: HWWs cleared for gpc %d tpc %d\n",
1928 gpc, tpc); 1929 gpc, tpc);
1929 1930
1930 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: Setting CILP preempt pending\n"); 1931 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: Setting CILP preempt pending\n");
1931 ret = gr_gp10b_set_cilp_preempt_pending(g, fault_ch); 1932 ret = gr_gp10b_set_cilp_preempt_pending(g, fault_ch);
1932 if (ret) { 1933 if (ret) {
1933 nvgpu_err(g, "CILP: error while setting CILP preempt pending!"); 1934 nvgpu_err(g, "CILP: error while setting CILP preempt pending!");
@@ -1936,7 +1937,7 @@ int gr_gp10b_pre_process_sm_exception(struct gk20a *g,
1936 1937
1937 dbgr_control0 = gk20a_readl(g, gr_gpc0_tpc0_sm_dbgr_control0_r() + offset); 1938 dbgr_control0 = gk20a_readl(g, gr_gpc0_tpc0_sm_dbgr_control0_r() + offset);
1938 if (dbgr_control0 & gr_gpcs_tpcs_sm_dbgr_control0_single_step_mode_enable_f()) { 1939 if (dbgr_control0 & gr_gpcs_tpcs_sm_dbgr_control0_single_step_mode_enable_f()) {
1939 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, 1940 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
1940 "CILP: clearing SINGLE_STEP_MODE before resume for gpc %d tpc %d\n", 1941 "CILP: clearing SINGLE_STEP_MODE before resume for gpc %d tpc %d\n",
1941 gpc, tpc); 1942 gpc, tpc);
1942 dbgr_control0 = set_field(dbgr_control0, 1943 dbgr_control0 = set_field(dbgr_control0,
@@ -1945,13 +1946,13 @@ int gr_gp10b_pre_process_sm_exception(struct gk20a *g,
1945 gk20a_writel(g, gr_gpc0_tpc0_sm_dbgr_control0_r() + offset, dbgr_control0); 1946 gk20a_writel(g, gr_gpc0_tpc0_sm_dbgr_control0_r() + offset, dbgr_control0);
1946 } 1947 }
1947 1948
1948 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, 1949 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
1949 "CILP: resume for gpc %d tpc %d\n", 1950 "CILP: resume for gpc %d tpc %d\n",
1950 gpc, tpc); 1951 gpc, tpc);
1951 g->ops.gr.resume_single_sm(g, gpc, tpc, sm); 1952 g->ops.gr.resume_single_sm(g, gpc, tpc, sm);
1952 1953
1953 *ignore_debugger = true; 1954 *ignore_debugger = true;
1954 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: All done on gpc %d, tpc %d\n", gpc, tpc); 1955 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: All done on gpc %d, tpc %d\n", gpc, tpc);
1955 } 1956 }
1956 1957
1957 *early_exit = true; 1958 *early_exit = true;
@@ -1999,14 +2000,14 @@ int gr_gp10b_handle_fecs_error(struct gk20a *g,
1999 int ret = 0; 2000 int ret = 0;
2000 struct tsg_gk20a *tsg; 2001 struct tsg_gk20a *tsg;
2001 2002
2002 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, ""); 2003 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, " ");
2003 2004
2004 /* 2005 /*
2005 * INTR1 (bit 1 of the HOST_INT_STATUS_CTXSW_INTR) 2006 * INTR1 (bit 1 of the HOST_INT_STATUS_CTXSW_INTR)
2006 * indicates that a CILP ctxsw save has finished 2007 * indicates that a CILP ctxsw save has finished
2007 */ 2008 */
2008 if (gr_fecs_intr & gr_fecs_host_int_status_ctxsw_intr_f(2)) { 2009 if (gr_fecs_intr & gr_fecs_host_int_status_ctxsw_intr_f(2)) {
2009 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, 2010 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr,
2010 "CILP: ctxsw save completed!\n"); 2011 "CILP: ctxsw save completed!\n");
2011 2012
2012 /* now clear the interrupt */ 2013 /* now clear the interrupt */
@@ -2162,7 +2163,7 @@ int gr_gp10b_suspend_contexts(struct gk20a *g,
2162 struct nvgpu_gr_ctx *gr_ctx; 2163 struct nvgpu_gr_ctx *gr_ctx;
2163 struct nvgpu_timeout timeout; 2164 struct nvgpu_timeout timeout;
2164 2165
2165 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, 2166 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr,
2166 "CILP preempt pending, waiting %lu msecs for preemption", 2167 "CILP preempt pending, waiting %lu msecs for preemption",
2167 gk20a_get_gr_idle_timeout(g)); 2168 gk20a_get_gr_idle_timeout(g));
2168 2169
@@ -2285,7 +2286,7 @@ int gr_gp10b_set_preemption_mode(struct channel_gk20a *ch,
2285 2286
2286 if (g->ops.gr.set_ctxsw_preemption_mode) { 2287 if (g->ops.gr.set_ctxsw_preemption_mode) {
2287 2288
2288 gk20a_dbg(gpu_dbg_sched, "chid=%d tsgid=%d pid=%d " 2289 nvgpu_log(g, gpu_dbg_sched, "chid=%d tsgid=%d pid=%d "
2289 "graphics_preempt=%d compute_preempt=%d", 2290 "graphics_preempt=%d compute_preempt=%d",
2290 ch->chid, 2291 ch->chid,
2291 ch->tsgid, 2292 ch->tsgid,
diff --git a/drivers/gpu/nvgpu/gp10b/ltc_gp10b.c b/drivers/gpu/nvgpu/gp10b/ltc_gp10b.c
index 71764a7c..f74ca8f3 100644
--- a/drivers/gpu/nvgpu/gp10b/ltc_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/ltc_gp10b.c
@@ -41,7 +41,7 @@ int gp10b_determine_L2_size_bytes(struct gk20a *g)
41 u32 tmp; 41 u32 tmp;
42 int ret; 42 int ret;
43 43
44 gk20a_dbg_fn(""); 44 nvgpu_log_fn(g, " ");
45 45
46 tmp = gk20a_readl(g, ltc_ltc0_lts0_tstg_info_1_r()); 46 tmp = gk20a_readl(g, ltc_ltc0_lts0_tstg_info_1_r());
47 47
@@ -49,9 +49,9 @@ int gp10b_determine_L2_size_bytes(struct gk20a *g)
49 ltc_ltc0_lts0_tstg_info_1_slice_size_in_kb_v(tmp)*1024 * 49 ltc_ltc0_lts0_tstg_info_1_slice_size_in_kb_v(tmp)*1024 *
50 ltc_ltc0_lts0_tstg_info_1_slices_per_l2_v(tmp); 50 ltc_ltc0_lts0_tstg_info_1_slices_per_l2_v(tmp);
51 51
52 gk20a_dbg(gpu_dbg_info, "L2 size: %d\n", ret); 52 nvgpu_log(g, gpu_dbg_info, "L2 size: %d\n", ret);
53 53
54 gk20a_dbg_fn("done"); 54 nvgpu_log_fn(g, "done");
55 55
56 return ret; 56 return ret;
57} 57}
@@ -83,7 +83,7 @@ int gp10b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr)
83 83
84 int err; 84 int err;
85 85
86 gk20a_dbg_fn(""); 86 nvgpu_log_fn(g, " ");
87 87
88 if (max_comptag_lines == 0U) 88 if (max_comptag_lines == 0U)
89 return 0; 89 return 0;
@@ -109,11 +109,11 @@ int gp10b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr)
109 /* must be a multiple of 64KB */ 109 /* must be a multiple of 64KB */
110 compbit_backing_size = roundup(compbit_backing_size, 64*1024); 110 compbit_backing_size = roundup(compbit_backing_size, 64*1024);
111 111
112 gk20a_dbg_info("compbit backing store size : %d", 112 nvgpu_log_info(g, "compbit backing store size : %d",
113 compbit_backing_size); 113 compbit_backing_size);
114 gk20a_dbg_info("max comptag lines : %d", 114 nvgpu_log_info(g, "max comptag lines : %d",
115 max_comptag_lines); 115 max_comptag_lines);
116 gk20a_dbg_info("gobs_per_comptagline_per_slice: %d", 116 nvgpu_log_info(g, "gobs_per_comptagline_per_slice: %d",
117 gobs_per_comptagline_per_slice); 117 gobs_per_comptagline_per_slice);
118 118
119 err = nvgpu_ltc_alloc_cbc(g, compbit_backing_size); 119 err = nvgpu_ltc_alloc_cbc(g, compbit_backing_size);
diff --git a/drivers/gpu/nvgpu/gp10b/mc_gp10b.c b/drivers/gpu/nvgpu/gp10b/mc_gp10b.c
index dde12854..5969e45d 100644
--- a/drivers/gpu/nvgpu/gp10b/mc_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/mc_gp10b.c
@@ -87,7 +87,7 @@ void mc_gp10b_isr_stall(struct gk20a *g)
87 87
88 mc_intr_0 = gk20a_readl(g, mc_intr_r(0)); 88 mc_intr_0 = gk20a_readl(g, mc_intr_r(0));
89 89
90 gk20a_dbg(gpu_dbg_intr, "stall intr 0x%08x\n", mc_intr_0); 90 nvgpu_log(g, gpu_dbg_intr, "stall intr 0x%08x\n", mc_intr_0);
91 91
92 for (engine_id_idx = 0; engine_id_idx < g->fifo.num_engines; engine_id_idx++) { 92 for (engine_id_idx = 0; engine_id_idx < g->fifo.num_engines; engine_id_idx++) {
93 active_engine_id = g->fifo.active_engines_list[engine_id_idx]; 93 active_engine_id = g->fifo.active_engines_list[engine_id_idx];
@@ -126,7 +126,7 @@ void mc_gp10b_isr_stall(struct gk20a *g)
126 g->ops.mc.is_intr_nvlink_pending(g, mc_intr_0)) 126 g->ops.mc.is_intr_nvlink_pending(g, mc_intr_0))
127 g->ops.nvlink.isr(g); 127 g->ops.nvlink.isr(g);
128 128
129 gk20a_dbg(gpu_dbg_intr, "stall intr done 0x%08x\n", mc_intr_0); 129 nvgpu_log(g, gpu_dbg_intr, "stall intr done 0x%08x\n", mc_intr_0);
130 130
131} 131}
132 132
diff --git a/drivers/gpu/nvgpu/gp10b/mm_gp10b.c b/drivers/gpu/nvgpu/gp10b/mm_gp10b.c
index 978b6f50..811697c3 100644
--- a/drivers/gpu/nvgpu/gp10b/mm_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/mm_gp10b.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * GP10B MMU 2 * GP10B MMU
3 * 3 *
4 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"), 7 * copy of this software and associated documentation files (the "Software"),
@@ -53,7 +53,7 @@ int gp10b_init_mm_setup_hw(struct gk20a *g)
53 struct nvgpu_mem *inst_block = &mm->bar1.inst_block; 53 struct nvgpu_mem *inst_block = &mm->bar1.inst_block;
54 int err = 0; 54 int err = 0;
55 55
56 gk20a_dbg_fn(""); 56 nvgpu_log_fn(g, " ");
57 57
58 g->ops.fb.set_mmu_page_size(g); 58 g->ops.fb.set_mmu_page_size(g);
59 59
@@ -73,7 +73,7 @@ int gp10b_init_mm_setup_hw(struct gk20a *g)
73 73
74 err = gp10b_replayable_pagefault_buffer_init(g); 74 err = gp10b_replayable_pagefault_buffer_init(g);
75 75
76 gk20a_dbg_fn("done"); 76 nvgpu_log_fn(g, "done");
77 return err; 77 return err;
78 78
79} 79}
@@ -87,7 +87,7 @@ int gp10b_init_bar2_vm(struct gk20a *g)
87 87
88 /* BAR2 aperture size is 32MB */ 88 /* BAR2 aperture size is 32MB */
89 mm->bar2.aperture_size = 32 << 20; 89 mm->bar2.aperture_size = 32 << 20;
90 gk20a_dbg_info("bar2 vm size = 0x%x", mm->bar2.aperture_size); 90 nvgpu_log_info(g, "bar2 vm size = 0x%x", mm->bar2.aperture_size);
91 91
92 mm->bar2.vm = nvgpu_vm_init(g, big_page_size, SZ_4K, 92 mm->bar2.vm = nvgpu_vm_init(g, big_page_size, SZ_4K,
93 mm->bar2.aperture_size - SZ_4K, 93 mm->bar2.aperture_size - SZ_4K,
@@ -115,12 +115,12 @@ int gp10b_init_bar2_mm_hw_setup(struct gk20a *g)
115 struct nvgpu_mem *inst_block = &mm->bar2.inst_block; 115 struct nvgpu_mem *inst_block = &mm->bar2.inst_block;
116 u64 inst_pa = nvgpu_inst_block_addr(g, inst_block); 116 u64 inst_pa = nvgpu_inst_block_addr(g, inst_block);
117 117
118 gk20a_dbg_fn(""); 118 nvgpu_log_fn(g, " ");
119 119
120 g->ops.fb.set_mmu_page_size(g); 120 g->ops.fb.set_mmu_page_size(g);
121 121
122 inst_pa = (u32)(inst_pa >> bus_bar2_block_ptr_shift_v()); 122 inst_pa = (u32)(inst_pa >> bus_bar2_block_ptr_shift_v());
123 gk20a_dbg_info("bar2 inst block ptr: 0x%08x", (u32)inst_pa); 123 nvgpu_log_info(g, "bar2 inst block ptr: 0x%08x", (u32)inst_pa);
124 124
125 gk20a_writel(g, bus_bar2_block_r(), 125 gk20a_writel(g, bus_bar2_block_r(),
126 nvgpu_aperture_mask(g, inst_block, 126 nvgpu_aperture_mask(g, inst_block,
@@ -130,7 +130,7 @@ int gp10b_init_bar2_mm_hw_setup(struct gk20a *g)
130 bus_bar2_block_mode_virtual_f() | 130 bus_bar2_block_mode_virtual_f() |
131 bus_bar2_block_ptr_f(inst_pa)); 131 bus_bar2_block_ptr_f(inst_pa));
132 132
133 gk20a_dbg_fn("done"); 133 nvgpu_log_fn(g, "done");
134 return 0; 134 return 0;
135} 135}
136 136
@@ -433,7 +433,7 @@ void gp10b_mm_init_pdb(struct gk20a *g, struct nvgpu_mem *inst_block,
433 u32 pdb_addr_lo = u64_lo32(pdb_addr >> ram_in_base_shift_v()); 433 u32 pdb_addr_lo = u64_lo32(pdb_addr >> ram_in_base_shift_v());
434 u32 pdb_addr_hi = u64_hi32(pdb_addr); 434 u32 pdb_addr_hi = u64_hi32(pdb_addr);
435 435
436 gk20a_dbg_info("pde pa=0x%llx", pdb_addr); 436 nvgpu_log_info(g, "pde pa=0x%llx", pdb_addr);
437 437
438 nvgpu_mem_wr32(g, inst_block, ram_in_page_dir_base_lo_w(), 438 nvgpu_mem_wr32(g, inst_block, ram_in_page_dir_base_lo_w(),
439 nvgpu_aperture_mask(g, vm->pdb.mem, 439 nvgpu_aperture_mask(g, vm->pdb.mem,
diff --git a/drivers/gpu/nvgpu/gp10b/pmu_gp10b.c b/drivers/gpu/nvgpu/gp10b/pmu_gp10b.c
index c94d580a..ca111725 100644
--- a/drivers/gpu/nvgpu/gp10b/pmu_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/pmu_gp10b.c
@@ -37,8 +37,8 @@
37#include <nvgpu/hw/gp10b/hw_pwr_gp10b.h> 37#include <nvgpu/hw/gp10b/hw_pwr_gp10b.h>
38#include <nvgpu/hw/gp10b/hw_fuse_gp10b.h> 38#include <nvgpu/hw/gp10b/hw_fuse_gp10b.h>
39 39
40#define gp10b_dbg_pmu(fmt, arg...) \ 40#define gp10b_dbg_pmu(g, fmt, arg...) \
41 gk20a_dbg(gpu_dbg_pmu, fmt, ##arg) 41 nvgpu_log(g, gpu_dbg_pmu, fmt, ##arg)
42 42
43/* PROD settings for ELPG sequencing registers*/ 43/* PROD settings for ELPG sequencing registers*/
44static struct pg_init_sequence_list _pginitseq_gp10b[] = { 44static struct pg_init_sequence_list _pginitseq_gp10b[] = {
@@ -147,9 +147,9 @@ static void gp10b_pmu_load_multiple_falcons(struct gk20a *g, u32 falconidmask,
147 struct pmu_cmd cmd; 147 struct pmu_cmd cmd;
148 u32 seq; 148 u32 seq;
149 149
150 gk20a_dbg_fn(""); 150 nvgpu_log_fn(g, " ");
151 151
152 gp10b_dbg_pmu("wprinit status = %x\n", g->pmu_lsf_pmu_wpr_init_done); 152 gp10b_dbg_pmu(g, "wprinit status = %x\n", g->pmu_lsf_pmu_wpr_init_done);
153 if (g->pmu_lsf_pmu_wpr_init_done) { 153 if (g->pmu_lsf_pmu_wpr_init_done) {
154 /* send message to load FECS falcon */ 154 /* send message to load FECS falcon */
155 memset(&cmd, 0, sizeof(struct pmu_cmd)); 155 memset(&cmd, 0, sizeof(struct pmu_cmd));
@@ -164,13 +164,13 @@ static void gp10b_pmu_load_multiple_falcons(struct gk20a *g, u32 falconidmask,
164 cmd.cmd.acr.boot_falcons.usevamask = 0; 164 cmd.cmd.acr.boot_falcons.usevamask = 0;
165 cmd.cmd.acr.boot_falcons.wprvirtualbase.lo = 0x0; 165 cmd.cmd.acr.boot_falcons.wprvirtualbase.lo = 0x0;
166 cmd.cmd.acr.boot_falcons.wprvirtualbase.hi = 0x0; 166 cmd.cmd.acr.boot_falcons.wprvirtualbase.hi = 0x0;
167 gp10b_dbg_pmu("PMU_ACR_CMD_ID_BOOTSTRAP_MULTIPLE_FALCONS:%x\n", 167 gp10b_dbg_pmu(g, "PMU_ACR_CMD_ID_BOOTSTRAP_MULTIPLE_FALCONS:%x\n",
168 falconidmask); 168 falconidmask);
169 nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, 169 nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
170 pmu_handle_fecs_boot_acr_msg, pmu, &seq, ~0); 170 pmu_handle_fecs_boot_acr_msg, pmu, &seq, ~0);
171 } 171 }
172 172
173 gk20a_dbg_fn("done"); 173 nvgpu_log_fn(g, "done");
174 return; 174 return;
175} 175}
176 176
@@ -209,7 +209,7 @@ int gp10b_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
209static void pmu_handle_gr_param_msg(struct gk20a *g, struct pmu_msg *msg, 209static void pmu_handle_gr_param_msg(struct gk20a *g, struct pmu_msg *msg,
210 void *param, u32 handle, u32 status) 210 void *param, u32 handle, u32 status)
211{ 211{
212 gk20a_dbg_fn(""); 212 nvgpu_log_fn(g, " ");
213 213
214 if (status != 0) { 214 if (status != 0) {
215 nvgpu_err(g, "GR PARAM cmd aborted"); 215 nvgpu_err(g, "GR PARAM cmd aborted");
@@ -217,7 +217,7 @@ static void pmu_handle_gr_param_msg(struct gk20a *g, struct pmu_msg *msg,
217 return; 217 return;
218 } 218 }
219 219
220 gp10b_dbg_pmu("GR PARAM is acknowledged from PMU %x \n", 220 gp10b_dbg_pmu(g, "GR PARAM is acknowledged from PMU %x \n",
221 msg->msg.pg.msg_type); 221 msg->msg.pg.msg_type);
222 222
223 return; 223 return;
@@ -243,7 +243,7 @@ int gp10b_pg_gr_init(struct gk20a *g, u32 pg_engine_id)
243 cmd.cmd.pg.gr_init_param_v2.ldiv_slowdown_factor = 243 cmd.cmd.pg.gr_init_param_v2.ldiv_slowdown_factor =
244 g->ldiv_slowdown_factor; 244 g->ldiv_slowdown_factor;
245 245
246 gp10b_dbg_pmu("cmd post PMU_PG_CMD_ID_PG_PARAM "); 246 gp10b_dbg_pmu(g, "cmd post PMU_PG_CMD_ID_PG_PARAM ");
247 nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, 247 nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
248 pmu_handle_gr_param_msg, pmu, &seq, ~0); 248 pmu_handle_gr_param_msg, pmu, &seq, ~0);
249 249
@@ -276,7 +276,7 @@ int gp10b_pmu_setup_elpg(struct gk20a *g)
276 u32 reg_writes; 276 u32 reg_writes;
277 u32 index; 277 u32 index;
278 278
279 gk20a_dbg_fn(""); 279 nvgpu_log_fn(g, " ");
280 280
281 if (g->elpg_enabled) { 281 if (g->elpg_enabled) {
282 reg_writes = ((sizeof(_pginitseq_gp10b) / 282 reg_writes = ((sizeof(_pginitseq_gp10b) /
@@ -288,7 +288,7 @@ int gp10b_pmu_setup_elpg(struct gk20a *g)
288 } 288 }
289 } 289 }
290 290
291 gk20a_dbg_fn("done"); 291 nvgpu_log_fn(g, "done");
292 return ret; 292 return ret;
293} 293}
294 294
@@ -305,7 +305,7 @@ int gp10b_init_pmu_setup_hw1(struct gk20a *g)
305 struct nvgpu_pmu *pmu = &g->pmu; 305 struct nvgpu_pmu *pmu = &g->pmu;
306 int err; 306 int err;
307 307
308 gk20a_dbg_fn(""); 308 nvgpu_log_fn(g, " ");
309 309
310 nvgpu_mutex_acquire(&pmu->isr_mutex); 310 nvgpu_mutex_acquire(&pmu->isr_mutex);
311 nvgpu_flcn_reset(pmu->flcn); 311 nvgpu_flcn_reset(pmu->flcn);
@@ -333,7 +333,7 @@ int gp10b_init_pmu_setup_hw1(struct gk20a *g)
333 if (err) 333 if (err)
334 return err; 334 return err;
335 335
336 gk20a_dbg_fn("done"); 336 nvgpu_log_fn(g, "done");
337 return 0; 337 return 0;
338 338
339} 339}
diff --git a/drivers/gpu/nvgpu/gp10b/rpfb_gp10b.c b/drivers/gpu/nvgpu/gp10b/rpfb_gp10b.c
index 385bebbd..3f089545 100644
--- a/drivers/gpu/nvgpu/gp10b/rpfb_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/rpfb_gp10b.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * GP10B RPFB 2 * GP10B RPFB
3 * 3 *
4 * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"), 7 * copy of this software and associated documentation files (the "Software"),
@@ -42,7 +42,7 @@ int gp10b_replayable_pagefault_buffer_init(struct gk20a *g)
42 size_t rbfb_size = NV_UVM_FAULT_BUF_SIZE * 42 size_t rbfb_size = NV_UVM_FAULT_BUF_SIZE *
43 fifo_replay_fault_buffer_size_hw_entries_v(); 43 fifo_replay_fault_buffer_size_hw_entries_v();
44 44
45 gk20a_dbg_fn(""); 45 nvgpu_log_fn(g, " ");
46 46
47 if (!g->mm.bar2_desc.gpu_va) { 47 if (!g->mm.bar2_desc.gpu_va) {
48 err = nvgpu_dma_alloc_map_sys(vm, rbfb_size, 48 err = nvgpu_dma_alloc_map_sys(vm, rbfb_size,
@@ -60,7 +60,7 @@ int gp10b_replayable_pagefault_buffer_init(struct gk20a *g)
60 gk20a_writel(g, fifo_replay_fault_buffer_lo_r(), 60 gk20a_writel(g, fifo_replay_fault_buffer_lo_r(),
61 fifo_replay_fault_buffer_lo_base_f(addr_lo) | 61 fifo_replay_fault_buffer_lo_base_f(addr_lo) |
62 fifo_replay_fault_buffer_lo_enable_true_v()); 62 fifo_replay_fault_buffer_lo_enable_true_v());
63 gk20a_dbg_fn("done"); 63 nvgpu_log_fn(g, "done");
64 return 0; 64 return 0;
65} 65}
66 66
@@ -75,14 +75,14 @@ u32 gp10b_replayable_pagefault_buffer_get_index(struct gk20a *g)
75{ 75{
76 u32 get_idx = 0; 76 u32 get_idx = 0;
77 77
78 gk20a_dbg_fn(""); 78 nvgpu_log_fn(g, " ");
79 79
80 get_idx = gk20a_readl(g, fifo_replay_fault_buffer_get_r()); 80 get_idx = gk20a_readl(g, fifo_replay_fault_buffer_get_r());
81 81
82 if (get_idx >= fifo_replay_fault_buffer_size_hw_entries_v()) 82 if (get_idx >= fifo_replay_fault_buffer_size_hw_entries_v())
83 nvgpu_err(g, "Error in replayable fault buffer"); 83 nvgpu_err(g, "Error in replayable fault buffer");
84 84
85 gk20a_dbg_fn("done"); 85 nvgpu_log_fn(g, "done");
86 return get_idx; 86 return get_idx;
87} 87}
88 88
@@ -90,13 +90,13 @@ u32 gp10b_replayable_pagefault_buffer_put_index(struct gk20a *g)
90{ 90{
91 u32 put_idx = 0; 91 u32 put_idx = 0;
92 92
93 gk20a_dbg_fn(""); 93 nvgpu_log_fn(g, " ");
94 put_idx = gk20a_readl(g, fifo_replay_fault_buffer_put_r()); 94 put_idx = gk20a_readl(g, fifo_replay_fault_buffer_put_r());
95 95
96 if (put_idx >= fifo_replay_fault_buffer_size_hw_entries_v()) 96 if (put_idx >= fifo_replay_fault_buffer_size_hw_entries_v())
97 nvgpu_err(g, "Error in UVM"); 97 nvgpu_err(g, "Error in UVM");
98 98
99 gk20a_dbg_fn("done"); 99 nvgpu_log_fn(g, "done");
100 return put_idx; 100 return put_idx;
101} 101}
102 102
diff --git a/drivers/gpu/nvgpu/gp10b/therm_gp10b.c b/drivers/gpu/nvgpu/gp10b/therm_gp10b.c
index c69bd0bb..4f1de559 100644
--- a/drivers/gpu/nvgpu/gp10b/therm_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/therm_gp10b.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * GP10B Therm 2 * GP10B Therm
3 * 3 *
4 * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"), 7 * copy of this software and associated documentation files (the "Software"),
@@ -33,7 +33,7 @@ int gp10b_init_therm_setup_hw(struct gk20a *g)
33{ 33{
34 u32 v; 34 u32 v;
35 35
36 gk20a_dbg_fn(""); 36 nvgpu_log_fn(g, " ");
37 37
38 /* program NV_THERM registers */ 38 /* program NV_THERM registers */
39 gk20a_writel(g, therm_use_a_r(), therm_use_a_ext_therm_0_enable_f() | 39 gk20a_writel(g, therm_use_a_r(), therm_use_a_ext_therm_0_enable_f() |
@@ -96,7 +96,7 @@ int gp10b_elcg_init_idle_filters(struct gk20a *g)
96 u32 active_engine_id = 0; 96 u32 active_engine_id = 0;
97 struct fifo_gk20a *f = &g->fifo; 97 struct fifo_gk20a *f = &g->fifo;
98 98
99 gk20a_dbg_fn(""); 99 nvgpu_log_fn(g, " ");
100 100
101 for (engine_id = 0; engine_id < f->num_engines; engine_id++) { 101 for (engine_id = 0; engine_id < f->num_engines; engine_id++) {
102 active_engine_id = f->active_engines_list[engine_id]; 102 active_engine_id = f->active_engines_list[engine_id];
@@ -130,6 +130,6 @@ int gp10b_elcg_init_idle_filters(struct gk20a *g)
130 idle_filter &= ~therm_hubmmu_idle_filter_value_m(); 130 idle_filter &= ~therm_hubmmu_idle_filter_value_m();
131 gk20a_writel(g, therm_hubmmu_idle_filter_r(), idle_filter); 131 gk20a_writel(g, therm_hubmmu_idle_filter_r(), idle_filter);
132 132
133 gk20a_dbg_fn("done"); 133 nvgpu_log_fn(g, "done");
134 return 0; 134 return 0;
135} 135}
diff --git a/drivers/gpu/nvgpu/gv11b/acr_gv11b.c b/drivers/gpu/nvgpu/gv11b/acr_gv11b.c
index 7ca8c703..673cb7f2 100644
--- a/drivers/gpu/nvgpu/gv11b/acr_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/acr_gv11b.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -43,8 +43,8 @@
43#include <nvgpu/hw/gv11b/hw_pwr_gv11b.h> 43#include <nvgpu/hw/gv11b/hw_pwr_gv11b.h>
44 44
45/*Defines*/ 45/*Defines*/
46#define gv11b_dbg_pmu(fmt, arg...) \ 46#define gv11b_dbg_pmu(g, fmt, arg...) \
47 gk20a_dbg(gpu_dbg_pmu, fmt, ##arg) 47 nvgpu_log(g, gpu_dbg_pmu, fmt, ##arg)
48 48
49static void flcn64_set_dma(struct falc_u64 *dma_addr, u64 value) 49static void flcn64_set_dma(struct falc_u64 *dma_addr, u64 value)
50{ 50{
@@ -60,7 +60,7 @@ int gv11b_alloc_blob_space(struct gk20a *g,
60{ 60{
61 int err; 61 int err;
62 62
63 gv11b_dbg_pmu("alloc blob space: NVGPU_DMA_FORCE_CONTIGUOUS"); 63 gv11b_dbg_pmu(g, "alloc blob space: NVGPU_DMA_FORCE_CONTIGUOUS");
64 err = nvgpu_dma_alloc_flags_sys(g, NVGPU_DMA_FORCE_CONTIGUOUS, 64 err = nvgpu_dma_alloc_flags_sys(g, NVGPU_DMA_FORCE_CONTIGUOUS,
65 size, mem); 65 size, mem);
66 66
@@ -87,10 +87,10 @@ int gv11b_bootstrap_hs_flcn(struct gk20a *g)
87 start = nvgpu_mem_get_addr(g, &acr->ucode_blob); 87 start = nvgpu_mem_get_addr(g, &acr->ucode_blob);
88 size = acr->ucode_blob.size; 88 size = acr->ucode_blob.size;
89 89
90 gv11b_dbg_pmu("acr ucode blob start %llx\n", start); 90 gv11b_dbg_pmu(g, "acr ucode blob start %llx\n", start);
91 gv11b_dbg_pmu("acr ucode blob size %x\n", size); 91 gv11b_dbg_pmu(g, "acr ucode blob size %x\n", size);
92 92
93 gv11b_dbg_pmu(""); 93 gv11b_dbg_pmu(g, " ");
94 94
95 if (!acr_fw) { 95 if (!acr_fw) {
96 /*First time init case*/ 96 /*First time init case*/
@@ -110,17 +110,17 @@ int gv11b_bootstrap_hs_flcn(struct gk20a *g)
110 acr->fw_hdr->hdr_offset); 110 acr->fw_hdr->hdr_offset);
111 img_size_in_bytes = ALIGN((acr->hsbin_hdr->data_size), 256); 111 img_size_in_bytes = ALIGN((acr->hsbin_hdr->data_size), 256);
112 112
113 gv11b_dbg_pmu("sig dbg offset %u\n", 113 gv11b_dbg_pmu(g, "sig dbg offset %u\n",
114 acr->fw_hdr->sig_dbg_offset); 114 acr->fw_hdr->sig_dbg_offset);
115 gv11b_dbg_pmu("sig dbg size %u\n", acr->fw_hdr->sig_dbg_size); 115 gv11b_dbg_pmu(g, "sig dbg size %u\n", acr->fw_hdr->sig_dbg_size);
116 gv11b_dbg_pmu("sig prod offset %u\n", 116 gv11b_dbg_pmu(g, "sig prod offset %u\n",
117 acr->fw_hdr->sig_prod_offset); 117 acr->fw_hdr->sig_prod_offset);
118 gv11b_dbg_pmu("sig prod size %u\n", 118 gv11b_dbg_pmu(g, "sig prod size %u\n",
119 acr->fw_hdr->sig_prod_size); 119 acr->fw_hdr->sig_prod_size);
120 gv11b_dbg_pmu("patch loc %u\n", acr->fw_hdr->patch_loc); 120 gv11b_dbg_pmu(g, "patch loc %u\n", acr->fw_hdr->patch_loc);
121 gv11b_dbg_pmu("patch sig %u\n", acr->fw_hdr->patch_sig); 121 gv11b_dbg_pmu(g, "patch sig %u\n", acr->fw_hdr->patch_sig);
122 gv11b_dbg_pmu("header offset %u\n", acr->fw_hdr->hdr_offset); 122 gv11b_dbg_pmu(g, "header offset %u\n", acr->fw_hdr->hdr_offset);
123 gv11b_dbg_pmu("header size %u\n", acr->fw_hdr->hdr_size); 123 gv11b_dbg_pmu(g, "header size %u\n", acr->fw_hdr->hdr_size);
124 124
125 /* Lets patch the signatures first.. */ 125 /* Lets patch the signatures first.. */
126 if (acr_ucode_patch_sig(g, acr_ucode_data_t210_load, 126 if (acr_ucode_patch_sig(g, acr_ucode_data_t210_load,
@@ -144,7 +144,7 @@ int gv11b_bootstrap_hs_flcn(struct gk20a *g)
144 } 144 }
145 145
146 for (index = 0; index < 9; index++) 146 for (index = 0; index < 9; index++)
147 gv11b_dbg_pmu("acr_ucode_header_t210_load %u\n", 147 gv11b_dbg_pmu(g, "acr_ucode_header_t210_load %u\n",
148 acr_ucode_header_t210_load[index]); 148 acr_ucode_header_t210_load[index]);
149 149
150 acr_dmem = (u64 *) 150 acr_dmem = (u64 *)
@@ -212,7 +212,7 @@ static int bl_bootstrap(struct nvgpu_pmu *pmu,
212 struct hsflcn_bl_desc *pmu_bl_gm10x_desc = g->acr.pmu_hsbl_desc; 212 struct hsflcn_bl_desc *pmu_bl_gm10x_desc = g->acr.pmu_hsbl_desc;
213 u32 dst; 213 u32 dst;
214 214
215 gk20a_dbg_fn(""); 215 nvgpu_log_fn(g, " ");
216 216
217 gk20a_writel(g, pwr_falcon_itfen_r(), 217 gk20a_writel(g, pwr_falcon_itfen_r(),
218 gk20a_readl(g, pwr_falcon_itfen_r()) | 218 gk20a_readl(g, pwr_falcon_itfen_r()) |
@@ -237,7 +237,7 @@ static int bl_bootstrap(struct nvgpu_pmu *pmu,
237 (u8 *)(acr->hsbl_ucode.cpu_va), bl_sz, 0, 0, 237 (u8 *)(acr->hsbl_ucode.cpu_va), bl_sz, 0, 0,
238 pmu_bl_gm10x_desc->bl_start_tag); 238 pmu_bl_gm10x_desc->bl_start_tag);
239 239
240 gv11b_dbg_pmu("Before starting falcon with BL\n"); 240 gv11b_dbg_pmu(g, "Before starting falcon with BL\n");
241 241
242 virt_addr = pmu_bl_gm10x_desc->bl_start_tag << 8; 242 virt_addr = pmu_bl_gm10x_desc->bl_start_tag << 8;
243 243
@@ -281,7 +281,7 @@ int gv11b_init_pmu_setup_hw1(struct gk20a *g,
281 struct nvgpu_pmu *pmu = &g->pmu; 281 struct nvgpu_pmu *pmu = &g->pmu;
282 int err; 282 int err;
283 283
284 gk20a_dbg_fn(""); 284 nvgpu_log_fn(g, " ");
285 285
286 nvgpu_mutex_acquire(&pmu->isr_mutex); 286 nvgpu_mutex_acquire(&pmu->isr_mutex);
287 nvgpu_flcn_reset(pmu->flcn); 287 nvgpu_flcn_reset(pmu->flcn);
diff --git a/drivers/gpu/nvgpu/gv11b/css_gr_gv11b.c b/drivers/gpu/nvgpu/gv11b/css_gr_gv11b.c
index bb7c37bd..b4e2cb79 100644
--- a/drivers/gpu/nvgpu/gv11b/css_gr_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/css_gr_gv11b.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * GV11B Cycle stats snapshots support 2 * GV11B Cycle stats snapshots support
3 * 3 *
4 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"), 7 * copy of this software and associated documentation files (the "Software"),
@@ -148,7 +148,7 @@ int gv11b_css_hw_enable_snapshot(struct channel_gk20a *ch,
148 perf_pmasys_mem_block_target_lfb_f())); 148 perf_pmasys_mem_block_target_lfb_f()));
149 149
150 150
151 gk20a_dbg_info("cyclestats: buffer for hardware snapshots enabled\n"); 151 nvgpu_log_info(g, "cyclestats: buffer for hardware snapshots enabled\n");
152 152
153 return 0; 153 return 0;
154 154
@@ -186,7 +186,7 @@ void gv11b_css_hw_disable_snapshot(struct gr_gk20a *gr)
186 memset(&data->hw_memdesc, 0, sizeof(data->hw_memdesc)); 186 memset(&data->hw_memdesc, 0, sizeof(data->hw_memdesc));
187 data->hw_snapshot = NULL; 187 data->hw_snapshot = NULL;
188 188
189 gk20a_dbg_info("cyclestats: buffer for hardware snapshots disabled\n"); 189 nvgpu_log_info(g, "cyclestats: buffer for hardware snapshots disabled\n");
190} 190}
191 191
192int gv11b_css_hw_check_data_available(struct channel_gk20a *ch, u32 *pending, 192int gv11b_css_hw_check_data_available(struct channel_gk20a *ch, u32 *pending,
diff --git a/drivers/gpu/nvgpu/gv11b/dbg_gpu_gv11b.c b/drivers/gpu/nvgpu/gv11b/dbg_gpu_gv11b.c
index db09016c..5dea7654 100644
--- a/drivers/gpu/nvgpu/gv11b/dbg_gpu_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/dbg_gpu_gv11b.c
@@ -57,7 +57,7 @@ int gv11b_perfbuf_enable_locked(struct gk20a *g, u64 offset, u32 size)
57 u32 inst_pa_page; 57 u32 inst_pa_page;
58 int err; 58 int err;
59 59
60 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); 60 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
61 err = gk20a_busy(g); 61 err = gk20a_busy(g);
62 if (err) { 62 if (err) {
63 nvgpu_err(g, "failed to poweron"); 63 nvgpu_err(g, "failed to poweron");
@@ -100,7 +100,7 @@ int gv11b_perfbuf_disable_locked(struct gk20a *g)
100{ 100{
101 int err; 101 int err;
102 102
103 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); 103 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
104 err = gk20a_busy(g); 104 err = gk20a_busy(g);
105 if (err) { 105 if (err) {
106 nvgpu_err(g, "failed to poweron"); 106 nvgpu_err(g, "failed to poweron");
diff --git a/drivers/gpu/nvgpu/gv11b/fb_gv11b.c b/drivers/gpu/nvgpu/gv11b/fb_gv11b.c
index 30a2bca2..8bbde5c3 100644
--- a/drivers/gpu/nvgpu/gv11b/fb_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/fb_gv11b.c
@@ -1427,7 +1427,7 @@ static int gv11b_fb_mmu_invalidate_replay(struct gk20a *g,
1427 u32 reg_val; 1427 u32 reg_val;
1428 struct nvgpu_timeout timeout; 1428 struct nvgpu_timeout timeout;
1429 1429
1430 gk20a_dbg_fn(""); 1430 nvgpu_log_fn(g, " ");
1431 1431
1432 nvgpu_mutex_acquire(&g->mm.tlb_lock); 1432 nvgpu_mutex_acquire(&g->mm.tlb_lock);
1433 1433
diff --git a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c
index 11b393e5..932e7626 100644
--- a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c
@@ -60,7 +60,7 @@
60 60
61void gv11b_get_tsg_runlist_entry(struct tsg_gk20a *tsg, u32 *runlist) 61void gv11b_get_tsg_runlist_entry(struct tsg_gk20a *tsg, u32 *runlist)
62{ 62{
63 63 struct gk20a *g = tsg->g;
64 u32 runlist_entry_0 = ram_rl_entry_type_tsg_v(); 64 u32 runlist_entry_0 = ram_rl_entry_type_tsg_v();
65 65
66 if (tsg->timeslice_timeout) 66 if (tsg->timeslice_timeout)
@@ -79,7 +79,7 @@ void gv11b_get_tsg_runlist_entry(struct tsg_gk20a *tsg, u32 *runlist)
79 runlist[2] = ram_rl_entry_tsg_tsgid_f(tsg->tsgid); 79 runlist[2] = ram_rl_entry_tsg_tsgid_f(tsg->tsgid);
80 runlist[3] = 0; 80 runlist[3] = 0;
81 81
82 gk20a_dbg_info("gv11b tsg runlist [0] %x [1] %x [2] %x [3] %x\n", 82 nvgpu_log_info(g, "gv11b tsg runlist [0] %x [1] %x [2] %x [3] %x\n",
83 runlist[0], runlist[1], runlist[2], runlist[3]); 83 runlist[0], runlist[1], runlist[2], runlist[3]);
84 84
85} 85}
@@ -119,7 +119,7 @@ void gv11b_get_ch_runlist_entry(struct channel_gk20a *c, u32 *runlist)
119 ram_rl_entry_chid_f(c->chid); 119 ram_rl_entry_chid_f(c->chid);
120 runlist[3] = ram_rl_entry_chan_inst_ptr_hi_f(addr_hi); 120 runlist[3] = ram_rl_entry_chan_inst_ptr_hi_f(addr_hi);
121 121
122 gk20a_dbg_info("gv11b channel runlist [0] %x [1] %x [2] %x [3] %x\n", 122 nvgpu_log_info(g, "gv11b channel runlist [0] %x [1] %x [2] %x [3] %x\n",
123 runlist[0], runlist[1], runlist[2], runlist[3]); 123 runlist[0], runlist[1], runlist[2], runlist[3]);
124} 124}
125 125
@@ -139,7 +139,7 @@ int channel_gv11b_setup_ramfc(struct channel_gk20a *c,
139 struct nvgpu_mem *mem = &c->inst_block; 139 struct nvgpu_mem *mem = &c->inst_block;
140 u32 data; 140 u32 data;
141 141
142 gk20a_dbg_fn(""); 142 nvgpu_log_fn(g, " ");
143 143
144 nvgpu_memset(g, mem, 0, 0, ram_fc_size_val_v()); 144 nvgpu_memset(g, mem, 0, 0, ram_fc_size_val_v());
145 145
@@ -211,10 +211,11 @@ int channel_gv11b_setup_ramfc(struct channel_gk20a *c,
211 211
212void gv11b_ring_channel_doorbell(struct channel_gk20a *c) 212void gv11b_ring_channel_doorbell(struct channel_gk20a *c)
213{ 213{
214 struct fifo_gk20a *f = &c->g->fifo; 214 struct gk20a *g = c->g;
215 struct fifo_gk20a *f = &g->fifo;
215 u32 hw_chid = f->channel_base + c->chid; 216 u32 hw_chid = f->channel_base + c->chid;
216 217
217 gk20a_dbg_info("channel ring door bell %d\n", c->chid); 218 nvgpu_log_info(g, "channel ring door bell %d\n", c->chid);
218 219
219 nvgpu_usermode_writel(c->g, usermode_notify_channel_pending_r(), 220 nvgpu_usermode_writel(c->g, usermode_notify_channel_pending_r(),
220 usermode_notify_channel_pending_id_f(hw_chid)); 221 usermode_notify_channel_pending_id_f(hw_chid));
@@ -256,7 +257,7 @@ void channel_gv11b_unbind(struct channel_gk20a *ch)
256{ 257{
257 struct gk20a *g = ch->g; 258 struct gk20a *g = ch->g;
258 259
259 gk20a_dbg_fn(""); 260 nvgpu_log_fn(g, " ");
260 261
261 if (nvgpu_atomic_cmpxchg(&ch->bound, true, false)) { 262 if (nvgpu_atomic_cmpxchg(&ch->bound, true, false)) {
262 gk20a_writel(g, ccsr_channel_inst_r(ch->chid), 263 gk20a_writel(g, ccsr_channel_inst_r(ch->chid),
@@ -729,7 +730,7 @@ int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id,
729 func_ret = gv11b_fifo_poll_pbdma_chan_status(g, tsgid, pbdma_id, 730 func_ret = gv11b_fifo_poll_pbdma_chan_status(g, tsgid, pbdma_id,
730 timeout_rc_type); 731 timeout_rc_type);
731 if (func_ret != 0) { 732 if (func_ret != 0) {
732 gk20a_dbg_info("preempt timeout pbdma %d", pbdma_id); 733 nvgpu_log_info(g, "preempt timeout pbdma %d", pbdma_id);
733 ret |= func_ret; 734 ret |= func_ret;
734 } 735 }
735 } 736 }
@@ -743,7 +744,7 @@ int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id,
743 timeout_rc_type); 744 timeout_rc_type);
744 745
745 if (func_ret != 0) { 746 if (func_ret != 0) {
746 gk20a_dbg_info("preempt timeout engine %d", act_eng_id); 747 nvgpu_log_info(g, "preempt timeout engine %d", act_eng_id);
747 ret |= func_ret; 748 ret |= func_ret;
748 } 749 }
749 } 750 }
@@ -812,10 +813,10 @@ int gv11b_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
812 u32 mutex_ret = 0; 813 u32 mutex_ret = 0;
813 u32 runlist_id; 814 u32 runlist_id;
814 815
815 gk20a_dbg_fn("%d", tsgid); 816 nvgpu_log_fn(g, "%d", tsgid);
816 817
817 runlist_id = f->tsg[tsgid].runlist_id; 818 runlist_id = f->tsg[tsgid].runlist_id;
818 gk20a_dbg_fn("runlist_id %d", runlist_id); 819 nvgpu_log_fn(g, "runlist_id %d", runlist_id);
819 820
820 nvgpu_mutex_acquire(&f->runlist_info[runlist_id].runlist_lock); 821 nvgpu_mutex_acquire(&f->runlist_info[runlist_id].runlist_lock);
821 822
@@ -839,7 +840,7 @@ static int gv11b_fifo_preempt_runlists(struct gk20a *g, u32 runlists_mask)
839 u32 mutex_ret = 0; 840 u32 mutex_ret = 0;
840 u32 runlist_id; 841 u32 runlist_id;
841 842
842 gk20a_dbg_fn(""); 843 nvgpu_log_fn(g, " ");
843 844
844 for (runlist_id = 0; runlist_id < g->fifo.max_runlists; runlist_id++) { 845 for (runlist_id = 0; runlist_id < g->fifo.max_runlists; runlist_id++) {
845 if (runlists_mask & fifo_runlist_preempt_runlist_m(runlist_id)) 846 if (runlists_mask & fifo_runlist_preempt_runlist_m(runlist_id))
@@ -910,11 +911,11 @@ int gv11b_fifo_preempt_ch_tsg(struct gk20a *g, u32 id,
910 return -EINVAL; 911 return -EINVAL;
911 912
912 if (runlist_id >= g->fifo.max_runlists) { 913 if (runlist_id >= g->fifo.max_runlists) {
913 gk20a_dbg_info("runlist_id = %d", runlist_id); 914 nvgpu_log_info(g, "runlist_id = %d", runlist_id);
914 return -EINVAL; 915 return -EINVAL;
915 } 916 }
916 917
917 gk20a_dbg_fn("preempt id = %d, runlist_id = %d", id, runlist_id); 918 nvgpu_log_fn(g, "preempt id = %d, runlist_id = %d", id, runlist_id);
918 919
919 nvgpu_mutex_acquire(&f->runlist_info[runlist_id].runlist_lock); 920 nvgpu_mutex_acquire(&f->runlist_info[runlist_id].runlist_lock);
920 921
@@ -1155,7 +1156,7 @@ int gv11b_init_fifo_reset_enable_hw(struct gk20a *g)
1155 unsigned int i; 1156 unsigned int i;
1156 u32 host_num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA); 1157 u32 host_num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA);
1157 1158
1158 gk20a_dbg_fn(""); 1159 nvgpu_log_fn(g, " ");
1159 1160
1160 /* enable pmc pfifo */ 1161 /* enable pmc pfifo */
1161 g->ops.mc.reset(g, mc_enable_pfifo_enabled_f()); 1162 g->ops.mc.reset(g, mc_enable_pfifo_enabled_f());
@@ -1208,11 +1209,11 @@ int gv11b_init_fifo_reset_enable_hw(struct gk20a *g)
1208 gk20a_writel(g, pbdma_intr_1_r(i), 0xFFFFFFFF); 1209 gk20a_writel(g, pbdma_intr_1_r(i), 0xFFFFFFFF);
1209 1210
1210 intr_stall = gk20a_readl(g, pbdma_intr_stall_r(i)); 1211 intr_stall = gk20a_readl(g, pbdma_intr_stall_r(i));
1211 gk20a_dbg_info("pbdma id:%u, intr_en_0 0x%08x", i, intr_stall); 1212 nvgpu_log_info(g, "pbdma id:%u, intr_en_0 0x%08x", i, intr_stall);
1212 gk20a_writel(g, pbdma_intr_en_0_r(i), intr_stall); 1213 gk20a_writel(g, pbdma_intr_en_0_r(i), intr_stall);
1213 1214
1214 intr_stall = gk20a_readl(g, pbdma_intr_stall_1_r(i)); 1215 intr_stall = gk20a_readl(g, pbdma_intr_stall_1_r(i));
1215 gk20a_dbg_info("pbdma id:%u, intr_en_1 0x%08x", i, intr_stall); 1216 nvgpu_log_info(g, "pbdma id:%u, intr_en_1 0x%08x", i, intr_stall);
1216 gk20a_writel(g, pbdma_intr_en_1_r(i), intr_stall); 1217 gk20a_writel(g, pbdma_intr_en_1_r(i), intr_stall);
1217 } 1218 }
1218 1219
@@ -1246,12 +1247,12 @@ int gv11b_init_fifo_reset_enable_hw(struct gk20a *g)
1246 /* clear and enable pfifo interrupt */ 1247 /* clear and enable pfifo interrupt */
1247 gk20a_writel(g, fifo_intr_0_r(), 0xFFFFFFFF); 1248 gk20a_writel(g, fifo_intr_0_r(), 0xFFFFFFFF);
1248 mask = gv11b_fifo_intr_0_en_mask(g); 1249 mask = gv11b_fifo_intr_0_en_mask(g);
1249 gk20a_dbg_info("fifo_intr_en_0 0x%08x", mask); 1250 nvgpu_log_info(g, "fifo_intr_en_0 0x%08x", mask);
1250 gk20a_writel(g, fifo_intr_en_0_r(), mask); 1251 gk20a_writel(g, fifo_intr_en_0_r(), mask);
1251 gk20a_dbg_info("fifo_intr_en_1 = 0x80000000"); 1252 nvgpu_log_info(g, "fifo_intr_en_1 = 0x80000000");
1252 gk20a_writel(g, fifo_intr_en_1_r(), 0x80000000); 1253 gk20a_writel(g, fifo_intr_en_1_r(), 0x80000000);
1253 1254
1254 gk20a_dbg_fn("done"); 1255 nvgpu_log_fn(g, "done");
1255 1256
1256 return 0; 1257 return 0;
1257} 1258}
@@ -1350,7 +1351,7 @@ static u32 gv11b_fifo_ctxsw_timeout_info(struct gk20a *g, u32 active_eng_id,
1350 1351
1351 tsgid = fifo_intr_ctxsw_timeout_info_prev_tsgid_v(timeout_info); 1352 tsgid = fifo_intr_ctxsw_timeout_info_prev_tsgid_v(timeout_info);
1352 } 1353 }
1353 gk20a_dbg_info("ctxsw timeout info: tsgid = %d", tsgid); 1354 nvgpu_log_info(g, "ctxsw timeout info: tsgid = %d", tsgid);
1354 1355
1355 /* 1356 /*
1356 * STATUS indicates whether the context request ack was eventually 1357 * STATUS indicates whether the context request ack was eventually
@@ -1391,14 +1392,14 @@ static u32 gv11b_fifo_ctxsw_timeout_info(struct gk20a *g, u32 active_eng_id,
1391 if (*info_status == 1392 if (*info_status ==
1392 fifo_intr_ctxsw_timeout_info_status_ack_received_v()) { 1393 fifo_intr_ctxsw_timeout_info_status_ack_received_v()) {
1393 1394
1394 gk20a_dbg_info("ctxsw timeout info : ack received"); 1395 nvgpu_log_info(g, "ctxsw timeout info : ack received");
1395 /* no need to recover */ 1396 /* no need to recover */
1396 tsgid = FIFO_INVAL_TSG_ID; 1397 tsgid = FIFO_INVAL_TSG_ID;
1397 1398
1398 } else if (*info_status == 1399 } else if (*info_status ==
1399 fifo_intr_ctxsw_timeout_info_status_dropped_timeout_v()) { 1400 fifo_intr_ctxsw_timeout_info_status_dropped_timeout_v()) {
1400 1401
1401 gk20a_dbg_info("ctxsw timeout info : dropped timeout"); 1402 nvgpu_log_info(g, "ctxsw timeout info : dropped timeout");
1402 /* no need to recover */ 1403 /* no need to recover */
1403 tsgid = FIFO_INVAL_TSG_ID; 1404 tsgid = FIFO_INVAL_TSG_ID;
1404 1405
@@ -1429,7 +1430,7 @@ bool gv11b_fifo_handle_ctxsw_timeout(struct gk20a *g, u32 fifo_intr)
1429 timeout_val = gk20a_readl(g, fifo_eng_ctxsw_timeout_r()); 1430 timeout_val = gk20a_readl(g, fifo_eng_ctxsw_timeout_r());
1430 timeout_val = fifo_eng_ctxsw_timeout_period_v(timeout_val); 1431 timeout_val = fifo_eng_ctxsw_timeout_period_v(timeout_val);
1431 1432
1432 gk20a_dbg_info("eng ctxsw timeout period = 0x%x", timeout_val); 1433 nvgpu_log_info(g, "eng ctxsw timeout period = 0x%x", timeout_val);
1433 1434
1434 for (engine_id = 0; engine_id < g->fifo.num_engines; engine_id++) { 1435 for (engine_id = 0; engine_id < g->fifo.num_engines; engine_id++) {
1435 active_eng_id = g->fifo.active_engines_list[engine_id]; 1436 active_eng_id = g->fifo.active_engines_list[engine_id];
@@ -1469,7 +1470,7 @@ bool gv11b_fifo_handle_ctxsw_timeout(struct gk20a *g, u32 fifo_intr)
1469 true, true, verbose, 1470 true, true, verbose,
1470 RC_TYPE_CTXSW_TIMEOUT); 1471 RC_TYPE_CTXSW_TIMEOUT);
1471 } else { 1472 } else {
1472 gk20a_dbg_info( 1473 nvgpu_log_info(g,
1473 "fifo is waiting for ctx switch: " 1474 "fifo is waiting for ctx switch: "
1474 "for %d ms, %s=%d", ms, "tsg", tsgid); 1475 "for %d ms, %s=%d", ms, "tsg", tsgid);
1475 } 1476 }
@@ -1490,7 +1491,7 @@ unsigned int gv11b_fifo_handle_pbdma_intr_0(struct gk20a *g,
1490 pbdma_intr_0, handled, error_notifier); 1491 pbdma_intr_0, handled, error_notifier);
1491 1492
1492 if (pbdma_intr_0 & pbdma_intr_0_clear_faulted_error_pending_f()) { 1493 if (pbdma_intr_0 & pbdma_intr_0_clear_faulted_error_pending_f()) {
1493 gk20a_dbg(gpu_dbg_intr, "clear faulted error on pbdma id %d", 1494 nvgpu_log(g, gpu_dbg_intr, "clear faulted error on pbdma id %d",
1494 pbdma_id); 1495 pbdma_id);
1495 gk20a_fifo_reset_pbdma_method(g, pbdma_id, 0); 1496 gk20a_fifo_reset_pbdma_method(g, pbdma_id, 0);
1496 *handled |= pbdma_intr_0_clear_faulted_error_pending_f(); 1497 *handled |= pbdma_intr_0_clear_faulted_error_pending_f();
@@ -1498,7 +1499,7 @@ unsigned int gv11b_fifo_handle_pbdma_intr_0(struct gk20a *g,
1498 } 1499 }
1499 1500
1500 if (pbdma_intr_0 & pbdma_intr_0_eng_reset_pending_f()) { 1501 if (pbdma_intr_0 & pbdma_intr_0_eng_reset_pending_f()) {
1501 gk20a_dbg(gpu_dbg_intr, "eng reset intr on pbdma id %d", 1502 nvgpu_log(g, gpu_dbg_intr, "eng reset intr on pbdma id %d",
1502 pbdma_id); 1503 pbdma_id);
1503 *handled |= pbdma_intr_0_eng_reset_pending_f(); 1504 *handled |= pbdma_intr_0_eng_reset_pending_f();
1504 rc_type = RC_TYPE_PBDMA_FAULT; 1505 rc_type = RC_TYPE_PBDMA_FAULT;
@@ -1545,7 +1546,7 @@ unsigned int gv11b_fifo_handle_pbdma_intr_1(struct gk20a *g,
1545 return RC_TYPE_NO_RC; 1546 return RC_TYPE_NO_RC;
1546 1547
1547 if (pbdma_intr_1 & pbdma_intr_1_ctxnotvalid_pending_f()) { 1548 if (pbdma_intr_1 & pbdma_intr_1_ctxnotvalid_pending_f()) {
1548 gk20a_dbg(gpu_dbg_intr, "ctxnotvalid intr on pbdma id %d", 1549 nvgpu_log(g, gpu_dbg_intr, "ctxnotvalid intr on pbdma id %d",
1549 pbdma_id); 1550 pbdma_id);
1550 nvgpu_err(g, "pbdma_intr_1(%d)= 0x%08x ", 1551 nvgpu_err(g, "pbdma_intr_1(%d)= 0x%08x ",
1551 pbdma_id, pbdma_intr_1); 1552 pbdma_id, pbdma_intr_1);
@@ -1753,7 +1754,7 @@ void gv11b_fifo_add_syncpt_wait_cmd(struct gk20a *g,
1753 u64 gpu_va = gpu_va_base + 1754 u64 gpu_va = gpu_va_base +
1754 nvgpu_nvhost_syncpt_unit_interface_get_byte_offset(id); 1755 nvgpu_nvhost_syncpt_unit_interface_get_byte_offset(id);
1755 1756
1756 gk20a_dbg_fn(""); 1757 nvgpu_log_fn(g, " ");
1757 1758
1758 off = cmd->off + off; 1759 off = cmd->off + off;
1759 1760
@@ -1792,7 +1793,7 @@ void gv11b_fifo_add_syncpt_incr_cmd(struct gk20a *g,
1792{ 1793{
1793 u32 off = cmd->off; 1794 u32 off = cmd->off;
1794 1795
1795 gk20a_dbg_fn(""); 1796 nvgpu_log_fn(g, " ");
1796 1797
1797 /* semaphore_a */ 1798 /* semaphore_a */
1798 nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010004); 1799 nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010004);
diff --git a/drivers/gpu/nvgpu/gv11b/gr_gv11b.c b/drivers/gpu/nvgpu/gv11b/gr_gv11b.c
index 52e442f3..536d9dcb 100644
--- a/drivers/gpu/nvgpu/gv11b/gr_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/gr_gv11b.c
@@ -96,7 +96,7 @@ bool gr_gv11b_is_valid_class(struct gk20a *g, u32 class_num)
96 default: 96 default:
97 break; 97 break;
98 } 98 }
99 gk20a_dbg_info("class=0x%x valid=%d", class_num, valid); 99 nvgpu_log_info(g, "class=0x%x valid=%d", class_num, valid);
100 return valid; 100 return valid;
101} 101}
102 102
@@ -190,7 +190,7 @@ static int gr_gv11b_handle_l1_tag_exception(struct gk20a *g, u32 gpc, u32 tpc,
190 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_uncorrected_err_total_counter_overflow_v(l1_tag_ecc_status); 190 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_uncorrected_err_total_counter_overflow_v(l1_tag_ecc_status);
191 191
192 if ((l1_tag_corrected_err_count_delta > 0) || is_l1_tag_ecc_corrected_total_err_overflow) { 192 if ((l1_tag_corrected_err_count_delta > 0) || is_l1_tag_ecc_corrected_total_err_overflow) {
193 gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, 193 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr,
194 "corrected error (SBE) detected in SM L1 tag! err_mask [%08x] is_overf [%d]", 194 "corrected error (SBE) detected in SM L1 tag! err_mask [%08x] is_overf [%d]",
195 l1_tag_ecc_corrected_err_status, is_l1_tag_ecc_corrected_total_err_overflow); 195 l1_tag_ecc_corrected_err_status, is_l1_tag_ecc_corrected_total_err_overflow);
196 196
@@ -205,7 +205,7 @@ static int gr_gv11b_handle_l1_tag_exception(struct gk20a *g, u32 gpc, u32 tpc,
205 0); 205 0);
206 } 206 }
207 if ((l1_tag_uncorrected_err_count_delta > 0) || is_l1_tag_ecc_uncorrected_total_err_overflow) { 207 if ((l1_tag_uncorrected_err_count_delta > 0) || is_l1_tag_ecc_uncorrected_total_err_overflow) {
208 gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, 208 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr,
209 "Uncorrected error (DBE) detected in SM L1 tag! err_mask [%08x] is_overf [%d]", 209 "Uncorrected error (DBE) detected in SM L1 tag! err_mask [%08x] is_overf [%d]",
210 l1_tag_ecc_uncorrected_err_status, is_l1_tag_ecc_uncorrected_total_err_overflow); 210 l1_tag_ecc_uncorrected_err_status, is_l1_tag_ecc_uncorrected_total_err_overflow);
211 211
@@ -282,7 +282,7 @@ static int gr_gv11b_handle_lrf_exception(struct gk20a *g, u32 gpc, u32 tpc,
282 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_total_counter_overflow_v(lrf_ecc_status); 282 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_total_counter_overflow_v(lrf_ecc_status);
283 283
284 if ((lrf_corrected_err_count_delta > 0) || is_lrf_ecc_corrected_total_err_overflow) { 284 if ((lrf_corrected_err_count_delta > 0) || is_lrf_ecc_corrected_total_err_overflow) {
285 gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, 285 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr,
286 "corrected error (SBE) detected in SM LRF! err_mask [%08x] is_overf [%d]", 286 "corrected error (SBE) detected in SM LRF! err_mask [%08x] is_overf [%d]",
287 lrf_ecc_corrected_err_status, is_lrf_ecc_corrected_total_err_overflow); 287 lrf_ecc_corrected_err_status, is_lrf_ecc_corrected_total_err_overflow);
288 288
@@ -297,7 +297,7 @@ static int gr_gv11b_handle_lrf_exception(struct gk20a *g, u32 gpc, u32 tpc,
297 0); 297 0);
298 } 298 }
299 if ((lrf_uncorrected_err_count_delta > 0) || is_lrf_ecc_uncorrected_total_err_overflow) { 299 if ((lrf_uncorrected_err_count_delta > 0) || is_lrf_ecc_uncorrected_total_err_overflow) {
300 gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, 300 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr,
301 "Uncorrected error (DBE) detected in SM LRF! err_mask [%08x] is_overf [%d]", 301 "Uncorrected error (DBE) detected in SM LRF! err_mask [%08x] is_overf [%d]",
302 lrf_ecc_uncorrected_err_status, is_lrf_ecc_uncorrected_total_err_overflow); 302 lrf_ecc_uncorrected_err_status, is_lrf_ecc_uncorrected_total_err_overflow);
303 303
@@ -441,7 +441,7 @@ static int gr_gv11b_handle_cbu_exception(struct gk20a *g, u32 gpc, u32 tpc,
441 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_uncorrected_err_total_counter_overflow_v(cbu_ecc_status); 441 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_uncorrected_err_total_counter_overflow_v(cbu_ecc_status);
442 442
443 if ((cbu_corrected_err_count_delta > 0) || is_cbu_ecc_corrected_total_err_overflow) { 443 if ((cbu_corrected_err_count_delta > 0) || is_cbu_ecc_corrected_total_err_overflow) {
444 gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, 444 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr,
445 "corrected error (SBE) detected in SM CBU! err_mask [%08x] is_overf [%d]", 445 "corrected error (SBE) detected in SM CBU! err_mask [%08x] is_overf [%d]",
446 cbu_ecc_corrected_err_status, is_cbu_ecc_corrected_total_err_overflow); 446 cbu_ecc_corrected_err_status, is_cbu_ecc_corrected_total_err_overflow);
447 447
@@ -456,7 +456,7 @@ static int gr_gv11b_handle_cbu_exception(struct gk20a *g, u32 gpc, u32 tpc,
456 0); 456 0);
457 } 457 }
458 if ((cbu_uncorrected_err_count_delta > 0) || is_cbu_ecc_uncorrected_total_err_overflow) { 458 if ((cbu_uncorrected_err_count_delta > 0) || is_cbu_ecc_uncorrected_total_err_overflow) {
459 gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, 459 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr,
460 "Uncorrected error (DBE) detected in SM CBU! err_mask [%08x] is_overf [%d]", 460 "Uncorrected error (DBE) detected in SM CBU! err_mask [%08x] is_overf [%d]",
461 cbu_ecc_uncorrected_err_status, is_cbu_ecc_uncorrected_total_err_overflow); 461 cbu_ecc_uncorrected_err_status, is_cbu_ecc_uncorrected_total_err_overflow);
462 462
@@ -521,7 +521,7 @@ static int gr_gv11b_handle_l1_data_exception(struct gk20a *g, u32 gpc, u32 tpc,
521 gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_uncorrected_err_total_counter_overflow_v(l1_data_ecc_status); 521 gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_uncorrected_err_total_counter_overflow_v(l1_data_ecc_status);
522 522
523 if ((l1_data_corrected_err_count_delta > 0) || is_l1_data_ecc_corrected_total_err_overflow) { 523 if ((l1_data_corrected_err_count_delta > 0) || is_l1_data_ecc_corrected_total_err_overflow) {
524 gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, 524 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr,
525 "corrected error (SBE) detected in SM L1 data! err_mask [%08x] is_overf [%d]", 525 "corrected error (SBE) detected in SM L1 data! err_mask [%08x] is_overf [%d]",
526 l1_data_ecc_corrected_err_status, is_l1_data_ecc_corrected_total_err_overflow); 526 l1_data_ecc_corrected_err_status, is_l1_data_ecc_corrected_total_err_overflow);
527 527
@@ -536,7 +536,7 @@ static int gr_gv11b_handle_l1_data_exception(struct gk20a *g, u32 gpc, u32 tpc,
536 0); 536 0);
537 } 537 }
538 if ((l1_data_uncorrected_err_count_delta > 0) || is_l1_data_ecc_uncorrected_total_err_overflow) { 538 if ((l1_data_uncorrected_err_count_delta > 0) || is_l1_data_ecc_uncorrected_total_err_overflow) {
539 gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, 539 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr,
540 "Uncorrected error (DBE) detected in SM L1 data! err_mask [%08x] is_overf [%d]", 540 "Uncorrected error (DBE) detected in SM L1 data! err_mask [%08x] is_overf [%d]",
541 l1_data_ecc_uncorrected_err_status, is_l1_data_ecc_uncorrected_total_err_overflow); 541 l1_data_ecc_uncorrected_err_status, is_l1_data_ecc_uncorrected_total_err_overflow);
542 542
@@ -605,7 +605,7 @@ static int gr_gv11b_handle_icache_exception(struct gk20a *g, u32 gpc, u32 tpc,
605 gr_pri_gpc0_tpc0_sm_icache_ecc_status_uncorrected_err_total_counter_overflow_v(icache_ecc_status); 605 gr_pri_gpc0_tpc0_sm_icache_ecc_status_uncorrected_err_total_counter_overflow_v(icache_ecc_status);
606 606
607 if ((icache_corrected_err_count_delta > 0) || is_icache_ecc_corrected_total_err_overflow) { 607 if ((icache_corrected_err_count_delta > 0) || is_icache_ecc_corrected_total_err_overflow) {
608 gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, 608 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr,
609 "corrected error (SBE) detected in SM L0 && L1 icache! err_mask [%08x] is_overf [%d]", 609 "corrected error (SBE) detected in SM L0 && L1 icache! err_mask [%08x] is_overf [%d]",
610 icache_ecc_corrected_err_status, is_icache_ecc_corrected_total_err_overflow); 610 icache_ecc_corrected_err_status, is_icache_ecc_corrected_total_err_overflow);
611 611
@@ -620,7 +620,7 @@ static int gr_gv11b_handle_icache_exception(struct gk20a *g, u32 gpc, u32 tpc,
620 0); 620 0);
621 } 621 }
622 if ((icache_uncorrected_err_count_delta > 0) || is_icache_ecc_uncorrected_total_err_overflow) { 622 if ((icache_uncorrected_err_count_delta > 0) || is_icache_ecc_uncorrected_total_err_overflow) {
623 gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, 623 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr,
624 "Uncorrected error (DBE) detected in SM L0 && L1 icache! err_mask [%08x] is_overf [%d]", 624 "Uncorrected error (DBE) detected in SM L0 && L1 icache! err_mask [%08x] is_overf [%d]",
625 icache_ecc_uncorrected_err_status, is_icache_ecc_uncorrected_total_err_overflow); 625 icache_ecc_uncorrected_err_status, is_icache_ecc_uncorrected_total_err_overflow);
626 626
@@ -1129,14 +1129,14 @@ static void gr_gv11b_set_coalesce_buffer_size(struct gk20a *g, u32 data)
1129{ 1129{
1130 u32 val; 1130 u32 val;
1131 1131
1132 gk20a_dbg_fn(""); 1132 nvgpu_log_fn(g, " ");
1133 1133
1134 val = gk20a_readl(g, gr_gpcs_tc_debug0_r()); 1134 val = gk20a_readl(g, gr_gpcs_tc_debug0_r());
1135 val = set_field(val, gr_gpcs_tc_debug0_limit_coalesce_buffer_size_m(), 1135 val = set_field(val, gr_gpcs_tc_debug0_limit_coalesce_buffer_size_m(),
1136 gr_gpcs_tc_debug0_limit_coalesce_buffer_size_f(data)); 1136 gr_gpcs_tc_debug0_limit_coalesce_buffer_size_f(data));
1137 gk20a_writel(g, gr_gpcs_tc_debug0_r(), val); 1137 gk20a_writel(g, gr_gpcs_tc_debug0_r(), val);
1138 1138
1139 gk20a_dbg_fn("done"); 1139 nvgpu_log_fn(g, "done");
1140} 1140}
1141 1141
1142static void gr_gv11b_set_tex_in_dbg(struct gk20a *g, u32 data) 1142static void gr_gv11b_set_tex_in_dbg(struct gk20a *g, u32 data)
@@ -1144,7 +1144,7 @@ static void gr_gv11b_set_tex_in_dbg(struct gk20a *g, u32 data)
1144 u32 val; 1144 u32 val;
1145 bool flag; 1145 bool flag;
1146 1146
1147 gk20a_dbg_fn(""); 1147 nvgpu_log_fn(g, " ");
1148 1148
1149 val = gk20a_readl(g, gr_gpcs_tpcs_tex_in_dbg_r()); 1149 val = gk20a_readl(g, gr_gpcs_tpcs_tex_in_dbg_r());
1150 flag = (data & NVC397_SET_TEX_IN_DBG_TSL1_RVCH_INVALIDATE) ? 1 : 0; 1150 flag = (data & NVC397_SET_TEX_IN_DBG_TSL1_RVCH_INVALIDATE) ? 1 : 0;
@@ -1190,7 +1190,7 @@ static void gr_gv11b_set_skedcheck(struct gk20a *g, u32 data)
1190 1190
1191static void gv11b_gr_set_shader_exceptions(struct gk20a *g, u32 data) 1191static void gv11b_gr_set_shader_exceptions(struct gk20a *g, u32 data)
1192{ 1192{
1193 gk20a_dbg_fn(""); 1193 nvgpu_log_fn(g, " ");
1194 1194
1195 if (data == NVA297_SET_SHADER_EXCEPTIONS_ENABLE_FALSE) { 1195 if (data == NVA297_SET_SHADER_EXCEPTIONS_ENABLE_FALSE) {
1196 gk20a_writel(g, gr_gpcs_tpcs_sms_hww_warp_esr_report_mask_r(), 1196 gk20a_writel(g, gr_gpcs_tpcs_sms_hww_warp_esr_report_mask_r(),
@@ -1224,7 +1224,7 @@ static void gr_gv11b_set_shader_cut_collector(struct gk20a *g, u32 data)
1224int gr_gv11b_handle_sw_method(struct gk20a *g, u32 addr, 1224int gr_gv11b_handle_sw_method(struct gk20a *g, u32 addr,
1225 u32 class_num, u32 offset, u32 data) 1225 u32 class_num, u32 offset, u32 data)
1226{ 1226{
1227 gk20a_dbg_fn(""); 1227 nvgpu_log_fn(g, " ");
1228 1228
1229 if (class_num == VOLTA_COMPUTE_A) { 1229 if (class_num == VOLTA_COMPUTE_A) {
1230 switch (offset << 2) { 1230 switch (offset << 2) {
@@ -1315,7 +1315,7 @@ void gr_gv11b_set_alpha_circular_buffer_size(struct gk20a *g, u32 data)
1315 u32 pd_ab_max_output; 1315 u32 pd_ab_max_output;
1316 u32 alpha_cb_size = data * 4; 1316 u32 alpha_cb_size = data * 4;
1317 1317
1318 gk20a_dbg_fn(""); 1318 nvgpu_log_fn(g, " ");
1319 1319
1320 if (alpha_cb_size > gr->alpha_cb_size) 1320 if (alpha_cb_size > gr->alpha_cb_size)
1321 alpha_cb_size = gr->alpha_cb_size; 1321 alpha_cb_size = gr->alpha_cb_size;
@@ -1360,7 +1360,7 @@ void gr_gv11b_set_circular_buffer_size(struct gk20a *g, u32 data)
1360 u32 gpc_index, ppc_index, stride, val; 1360 u32 gpc_index, ppc_index, stride, val;
1361 u32 cb_size_steady = data * 4, cb_size; 1361 u32 cb_size_steady = data * 4, cb_size;
1362 1362
1363 gk20a_dbg_fn(""); 1363 nvgpu_log_fn(g, " ");
1364 1364
1365 if (cb_size_steady > gr->attrib_cb_size) 1365 if (cb_size_steady > gr->attrib_cb_size)
1366 cb_size_steady = gr->attrib_cb_size; 1366 cb_size_steady = gr->attrib_cb_size;
@@ -1423,8 +1423,9 @@ int gr_gv11b_alloc_buffer(struct vm_gk20a *vm, size_t size,
1423 struct nvgpu_mem *mem) 1423 struct nvgpu_mem *mem)
1424{ 1424{
1425 int err; 1425 int err;
1426 struct gk20a *g = gk20a_from_vm(vm);
1426 1427
1427 gk20a_dbg_fn(""); 1428 nvgpu_log_fn(g, " ");
1428 1429
1429 err = nvgpu_dma_alloc_sys(vm->mm->g, size, mem); 1430 err = nvgpu_dma_alloc_sys(vm->mm->g, size, mem);
1430 if (err) 1431 if (err)
@@ -1500,9 +1501,9 @@ int gr_gv11b_set_ctxsw_preemption_mode(struct gk20a *g,
1500 g->gr.max_tpc_count; 1501 g->gr.max_tpc_count;
1501 attrib_cb_size = ALIGN(attrib_cb_size, 128); 1502 attrib_cb_size = ALIGN(attrib_cb_size, 128);
1502 1503
1503 gk20a_dbg_info("gfxp context spill_size=%d", spill_size); 1504 nvgpu_log_info(g, "gfxp context spill_size=%d", spill_size);
1504 gk20a_dbg_info("gfxp context pagepool_size=%d", pagepool_size); 1505 nvgpu_log_info(g, "gfxp context pagepool_size=%d", pagepool_size);
1505 gk20a_dbg_info("gfxp context attrib_cb_size=%d", 1506 nvgpu_log_info(g, "gfxp context attrib_cb_size=%d",
1506 attrib_cb_size); 1507 attrib_cb_size);
1507 1508
1508 err = gr_gp10b_alloc_buffer(vm, 1509 err = gr_gp10b_alloc_buffer(vm,
@@ -1590,7 +1591,7 @@ void gr_gv11b_update_ctxsw_preemption_mode(struct gk20a *g,
1590 ctxsw_prog_main_image_compute_preemption_options_control_cta_f(); 1591 ctxsw_prog_main_image_compute_preemption_options_control_cta_f();
1591 int err; 1592 int err;
1592 1593
1593 gk20a_dbg_fn(""); 1594 nvgpu_log_fn(g, " ");
1594 1595
1595 tsg = tsg_gk20a_from_ch(c); 1596 tsg = tsg_gk20a_from_ch(c);
1596 if (!tsg) 1597 if (!tsg)
@@ -1600,7 +1601,7 @@ void gr_gv11b_update_ctxsw_preemption_mode(struct gk20a *g,
1600 1601
1601 if (gr_ctx->graphics_preempt_mode == 1602 if (gr_ctx->graphics_preempt_mode ==
1602 NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP) { 1603 NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP) {
1603 gk20a_dbg_info("GfxP: %x", gfxp_preempt_option); 1604 nvgpu_log_info(g, "GfxP: %x", gfxp_preempt_option);
1604 nvgpu_mem_wr(g, mem, 1605 nvgpu_mem_wr(g, mem,
1605 ctxsw_prog_main_image_graphics_preemption_options_o(), 1606 ctxsw_prog_main_image_graphics_preemption_options_o(),
1606 gfxp_preempt_option); 1607 gfxp_preempt_option);
@@ -1608,7 +1609,7 @@ void gr_gv11b_update_ctxsw_preemption_mode(struct gk20a *g,
1608 1609
1609 if (gr_ctx->compute_preempt_mode == 1610 if (gr_ctx->compute_preempt_mode ==
1610 NVGPU_PREEMPTION_MODE_COMPUTE_CILP) { 1611 NVGPU_PREEMPTION_MODE_COMPUTE_CILP) {
1611 gk20a_dbg_info("CILP: %x", cilp_preempt_option); 1612 nvgpu_log_info(g, "CILP: %x", cilp_preempt_option);
1612 nvgpu_mem_wr(g, mem, 1613 nvgpu_mem_wr(g, mem,
1613 ctxsw_prog_main_image_compute_preemption_options_o(), 1614 ctxsw_prog_main_image_compute_preemption_options_o(),
1614 cilp_preempt_option); 1615 cilp_preempt_option);
@@ -1616,7 +1617,7 @@ void gr_gv11b_update_ctxsw_preemption_mode(struct gk20a *g,
1616 1617
1617 if (gr_ctx->compute_preempt_mode == 1618 if (gr_ctx->compute_preempt_mode ==
1618 NVGPU_PREEMPTION_MODE_COMPUTE_CTA) { 1619 NVGPU_PREEMPTION_MODE_COMPUTE_CTA) {
1619 gk20a_dbg_info("CTA: %x", cta_preempt_option); 1620 nvgpu_log_info(g, "CTA: %x", cta_preempt_option);
1620 nvgpu_mem_wr(g, mem, 1621 nvgpu_mem_wr(g, mem,
1621 ctxsw_prog_main_image_compute_preemption_options_o(), 1622 ctxsw_prog_main_image_compute_preemption_options_o(),
1622 cta_preempt_option); 1623 cta_preempt_option);
@@ -1647,7 +1648,7 @@ void gr_gv11b_update_ctxsw_preemption_mode(struct gk20a *g,
1647 (u64_hi32(gr_ctx->betacb_ctxsw_buffer.gpu_va) << 1648 (u64_hi32(gr_ctx->betacb_ctxsw_buffer.gpu_va) <<
1648 (32 - gr_gpcs_setup_attrib_cb_base_addr_39_12_align_bits_v())); 1649 (32 - gr_gpcs_setup_attrib_cb_base_addr_39_12_align_bits_v()));
1649 1650
1650 gk20a_dbg_info("attrib cb addr : 0x%016x", addr); 1651 nvgpu_log_info(g, "attrib cb addr : 0x%016x", addr);
1651 g->ops.gr.commit_global_attrib_cb(g, gr_ctx, addr, true); 1652 g->ops.gr.commit_global_attrib_cb(g, gr_ctx, addr, true);
1652 1653
1653 addr = (u64_lo32(gr_ctx->pagepool_ctxsw_buffer.gpu_va) >> 1654 addr = (u64_lo32(gr_ctx->pagepool_ctxsw_buffer.gpu_va) >>
@@ -1698,7 +1699,7 @@ void gr_gv11b_update_ctxsw_preemption_mode(struct gk20a *g,
1698 } 1699 }
1699 1700
1700out: 1701out:
1701 gk20a_dbg_fn("done"); 1702 nvgpu_log_fn(g, "done");
1702} 1703}
1703static void gr_gv11b_dump_gr_per_sm_regs(struct gk20a *g, 1704static void gr_gv11b_dump_gr_per_sm_regs(struct gk20a *g,
1704 struct gk20a_debug_output *o, 1705 struct gk20a_debug_output *o,
@@ -1949,7 +1950,7 @@ int gr_gv11b_wait_empty(struct gk20a *g, unsigned long duration_ms,
1949 u32 activity0, activity1, activity2, activity4; 1950 u32 activity0, activity1, activity2, activity4;
1950 struct nvgpu_timeout timeout; 1951 struct nvgpu_timeout timeout;
1951 1952
1952 gk20a_dbg_fn(""); 1953 nvgpu_log_fn(g, " ");
1953 1954
1954 nvgpu_timeout_init(g, &timeout, duration_ms, NVGPU_TIMER_CPU_TIMER); 1955 nvgpu_timeout_init(g, &timeout, duration_ms, NVGPU_TIMER_CPU_TIMER);
1955 1956
@@ -1974,7 +1975,7 @@ int gr_gv11b_wait_empty(struct gk20a *g, unsigned long duration_ms,
1974 gr_activity_empty_or_preempted(activity4)); 1975 gr_activity_empty_or_preempted(activity4));
1975 1976
1976 if (!gr_enabled || (!gr_busy && !ctxsw_active)) { 1977 if (!gr_enabled || (!gr_busy && !ctxsw_active)) {
1977 gk20a_dbg_fn("done"); 1978 nvgpu_log_fn(g, "done");
1978 return 0; 1979 return 0;
1979 } 1980 }
1980 1981
@@ -2191,7 +2192,7 @@ int gr_gv11b_pre_process_sm_exception(struct gk20a *g,
2191 NVGPU_PREEMPTION_MODE_COMPUTE_CILP); 2192 NVGPU_PREEMPTION_MODE_COMPUTE_CILP);
2192 } 2193 }
2193 2194
2194 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, 2195 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
2195 "SM Exception received on gpc %d tpc %d sm %d = 0x%08x", 2196 "SM Exception received on gpc %d tpc %d sm %d = 0x%08x",
2196 gpc, tpc, sm, global_esr); 2197 gpc, tpc, sm, global_esr);
2197 2198
@@ -2210,13 +2211,13 @@ int gr_gv11b_pre_process_sm_exception(struct gk20a *g,
2210 if (warp_esr != 0 || (global_esr & global_mask) != 0) { 2211 if (warp_esr != 0 || (global_esr & global_mask) != 0) {
2211 *ignore_debugger = true; 2212 *ignore_debugger = true;
2212 2213
2213 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, 2214 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
2214 "CILP: starting wait for LOCKED_DOWN on " 2215 "CILP: starting wait for LOCKED_DOWN on "
2215 "gpc %d tpc %d sm %d", 2216 "gpc %d tpc %d sm %d",
2216 gpc, tpc, sm); 2217 gpc, tpc, sm);
2217 2218
2218 if (gk20a_dbg_gpu_broadcast_stop_trigger(fault_ch)) { 2219 if (gk20a_dbg_gpu_broadcast_stop_trigger(fault_ch)) {
2219 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, 2220 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
2220 "CILP: Broadcasting STOP_TRIGGER from " 2221 "CILP: Broadcasting STOP_TRIGGER from "
2221 "gpc %d tpc %d sm %d", 2222 "gpc %d tpc %d sm %d",
2222 gpc, tpc, sm); 2223 gpc, tpc, sm);
@@ -2225,7 +2226,7 @@ int gr_gv11b_pre_process_sm_exception(struct gk20a *g,
2225 2226
2226 gk20a_dbg_gpu_clear_broadcast_stop_trigger(fault_ch); 2227 gk20a_dbg_gpu_clear_broadcast_stop_trigger(fault_ch);
2227 } else { 2228 } else {
2228 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, 2229 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
2229 "CILP: STOP_TRIGGER from " 2230 "CILP: STOP_TRIGGER from "
2230 "gpc %d tpc %d sm %d", 2231 "gpc %d tpc %d sm %d",
2231 gpc, tpc, sm); 2232 gpc, tpc, sm);
@@ -2238,12 +2239,12 @@ int gr_gv11b_pre_process_sm_exception(struct gk20a *g,
2238 gpc, tpc, sm); 2239 gpc, tpc, sm);
2239 g->ops.gr.clear_sm_hww(g, 2240 g->ops.gr.clear_sm_hww(g,
2240 gpc, tpc, sm, global_esr_copy); 2241 gpc, tpc, sm, global_esr_copy);
2241 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, 2242 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
2242 "CILP: HWWs cleared for " 2243 "CILP: HWWs cleared for "
2243 "gpc %d tpc %d sm %d", 2244 "gpc %d tpc %d sm %d",
2244 gpc, tpc, sm); 2245 gpc, tpc, sm);
2245 2246
2246 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: Setting CILP preempt pending\n"); 2247 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: Setting CILP preempt pending\n");
2247 ret = gr_gp10b_set_cilp_preempt_pending(g, fault_ch); 2248 ret = gr_gp10b_set_cilp_preempt_pending(g, fault_ch);
2248 if (ret) { 2249 if (ret) {
2249 nvgpu_err(g, "CILP: error while setting CILP preempt pending!"); 2250 nvgpu_err(g, "CILP: error while setting CILP preempt pending!");
@@ -2252,7 +2253,7 @@ int gr_gv11b_pre_process_sm_exception(struct gk20a *g,
2252 2253
2253 dbgr_control0 = gk20a_readl(g, gr_gpc0_tpc0_sm0_dbgr_control0_r() + offset); 2254 dbgr_control0 = gk20a_readl(g, gr_gpc0_tpc0_sm0_dbgr_control0_r() + offset);
2254 if (dbgr_control0 & gr_gpc0_tpc0_sm0_dbgr_control0_single_step_mode_enable_f()) { 2255 if (dbgr_control0 & gr_gpc0_tpc0_sm0_dbgr_control0_single_step_mode_enable_f()) {
2255 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, 2256 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
2256 "CILP: clearing SINGLE_STEP_MODE " 2257 "CILP: clearing SINGLE_STEP_MODE "
2257 "before resume for gpc %d tpc %d sm %d", 2258 "before resume for gpc %d tpc %d sm %d",
2258 gpc, tpc, sm); 2259 gpc, tpc, sm);
@@ -2262,13 +2263,13 @@ int gr_gv11b_pre_process_sm_exception(struct gk20a *g,
2262 gk20a_writel(g, gr_gpc0_tpc0_sm0_dbgr_control0_r() + offset, dbgr_control0); 2263 gk20a_writel(g, gr_gpc0_tpc0_sm0_dbgr_control0_r() + offset, dbgr_control0);
2263 } 2264 }
2264 2265
2265 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, 2266 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
2266 "CILP: resume for gpc %d tpc %d sm %d", 2267 "CILP: resume for gpc %d tpc %d sm %d",
2267 gpc, tpc, sm); 2268 gpc, tpc, sm);
2268 g->ops.gr.resume_single_sm(g, gpc, tpc, sm); 2269 g->ops.gr.resume_single_sm(g, gpc, tpc, sm);
2269 2270
2270 *ignore_debugger = true; 2271 *ignore_debugger = true;
2271 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, 2272 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
2272 "CILP: All done on gpc %d, tpc %d sm %d", 2273 "CILP: All done on gpc %d, tpc %d sm %d",
2273 gpc, tpc, sm); 2274 gpc, tpc, sm);
2274 } 2275 }
@@ -2388,7 +2389,7 @@ int gr_gv11b_setup_rop_mapping(struct gk20a *g, struct gr_gk20a *gr)
2388 GPU_LIT_NUM_TPC_PER_GPC); 2389 GPU_LIT_NUM_TPC_PER_GPC);
2389 u32 num_tpcs = num_gpcs * num_tpc_per_gpc; 2390 u32 num_tpcs = num_gpcs * num_tpc_per_gpc;
2390 2391
2391 gk20a_dbg_fn(""); 2392 nvgpu_log_fn(g, " ");
2392 2393
2393 if (!gr->map_tiles) 2394 if (!gr->map_tiles)
2394 return -1; 2395 return -1;
@@ -2535,7 +2536,7 @@ void gr_gv11b_program_zcull_mapping(struct gk20a *g, u32 zcull_num_entries,
2535{ 2536{
2536 u32 val, i, j; 2537 u32 val, i, j;
2537 2538
2538 gk20a_dbg_fn(""); 2539 nvgpu_log_fn(g, " ");
2539 2540
2540 for (i = 0, j = 0; i < (zcull_num_entries / 8); i++, j += 8) { 2541 for (i = 0, j = 0; i < (zcull_num_entries / 8); i++, j += 8) {
2541 val = 2542 val =
@@ -2666,8 +2667,9 @@ int gr_gv11b_commit_inst(struct channel_gk20a *c, u64 gpu_va)
2666 u32 addr_hi; 2667 u32 addr_hi;
2667 struct ctx_header_desc *ctx; 2668 struct ctx_header_desc *ctx;
2668 int err; 2669 int err;
2670 struct gk20a *g = c->g;
2669 2671
2670 gk20a_dbg_fn(""); 2672 nvgpu_log_fn(g, " ");
2671 2673
2672 err = gv11b_alloc_subctx_header(c); 2674 err = gv11b_alloc_subctx_header(c);
2673 if (err) 2675 if (err)
@@ -2704,7 +2706,7 @@ int gr_gv11b_commit_global_timeslice(struct gk20a *g, struct channel_gk20a *c)
2704 u32 pe_vaf; 2706 u32 pe_vaf;
2705 u32 pe_vsc_vpc; 2707 u32 pe_vsc_vpc;
2706 2708
2707 gk20a_dbg_fn(""); 2709 nvgpu_log_fn(g, " ");
2708 2710
2709 pd_ab_dist_cfg0 = gk20a_readl(g, gr_pd_ab_dist_cfg0_r()); 2711 pd_ab_dist_cfg0 = gk20a_readl(g, gr_pd_ab_dist_cfg0_r());
2710 ds_debug = gk20a_readl(g, gr_ds_debug_r()); 2712 ds_debug = gk20a_readl(g, gr_ds_debug_r());
@@ -2814,7 +2816,7 @@ void gr_gv11b_load_tpc_mask(struct gk20a *g)
2814 } 2816 }
2815 } 2817 }
2816 2818
2817 gk20a_dbg_info("pes_tpc_mask %u\n", pes_tpc_mask); 2819 nvgpu_log_info(g, "pes_tpc_mask %u\n", pes_tpc_mask);
2818 fuse_tpc_mask = g->ops.gr.get_gpc_tpc_mask(g, gpc); 2820 fuse_tpc_mask = g->ops.gr.get_gpc_tpc_mask(g, gpc);
2819 if (g->tpc_fs_mask_user && 2821 if (g->tpc_fs_mask_user &&
2820 g->tpc_fs_mask_user != fuse_tpc_mask && 2822 g->tpc_fs_mask_user != fuse_tpc_mask &&
@@ -2860,7 +2862,7 @@ int gr_gv11b_init_fs_state(struct gk20a *g)
2860 u32 ver = g->params.gpu_arch + g->params.gpu_impl; 2862 u32 ver = g->params.gpu_arch + g->params.gpu_impl;
2861 u32 rev = g->params.gpu_rev; 2863 u32 rev = g->params.gpu_rev;
2862 2864
2863 gk20a_dbg_fn(""); 2865 nvgpu_log_fn(g, " ");
2864 2866
2865 data = gk20a_readl(g, gr_gpcs_tpcs_sm_texio_control_r()); 2867 data = gk20a_readl(g, gr_gpcs_tpcs_sm_texio_control_r());
2866 data = set_field(data, gr_gpcs_tpcs_sm_texio_control_oor_addr_check_mode_m(), 2868 data = set_field(data, gr_gpcs_tpcs_sm_texio_control_oor_addr_check_mode_m(),
@@ -2928,14 +2930,14 @@ void gv11b_gr_get_esr_sm_sel(struct gk20a *g, u32 gpc, u32 tpc,
2928 u32 offset = gk20a_gr_gpc_offset(g, gpc) + gk20a_gr_tpc_offset(g, tpc); 2930 u32 offset = gk20a_gr_gpc_offset(g, gpc) + gk20a_gr_tpc_offset(g, tpc);
2929 2931
2930 reg_val = gk20a_readl(g, gr_gpc0_tpc0_sm_tpc_esr_sm_sel_r() + offset); 2932 reg_val = gk20a_readl(g, gr_gpc0_tpc0_sm_tpc_esr_sm_sel_r() + offset);
2931 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, 2933 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
2932 "sm tpc esr sm sel reg val: 0x%x", reg_val); 2934 "sm tpc esr sm sel reg val: 0x%x", reg_val);
2933 *esr_sm_sel = 0; 2935 *esr_sm_sel = 0;
2934 if (gr_gpc0_tpc0_sm_tpc_esr_sm_sel_sm0_error_v(reg_val)) 2936 if (gr_gpc0_tpc0_sm_tpc_esr_sm_sel_sm0_error_v(reg_val))
2935 *esr_sm_sel = 1; 2937 *esr_sm_sel = 1;
2936 if (gr_gpc0_tpc0_sm_tpc_esr_sm_sel_sm1_error_v(reg_val)) 2938 if (gr_gpc0_tpc0_sm_tpc_esr_sm_sel_sm1_error_v(reg_val))
2937 *esr_sm_sel |= 1 << 1; 2939 *esr_sm_sel |= 1 << 1;
2938 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, 2940 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
2939 "esr_sm_sel bitmask: 0x%x", *esr_sm_sel); 2941 "esr_sm_sel bitmask: 0x%x", *esr_sm_sel);
2940} 2942}
2941 2943
@@ -2954,7 +2956,7 @@ int gv11b_gr_sm_trigger_suspend(struct gk20a *g)
2954 gk20a_writel(g, 2956 gk20a_writel(g,
2955 gr_gpcs_tpcs_sms_dbgr_control0_r(), dbgr_control0); 2957 gr_gpcs_tpcs_sms_dbgr_control0_r(), dbgr_control0);
2956 2958
2957 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, 2959 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg,
2958 "stop trigger enable: broadcast dbgr_control0: 0x%x ", 2960 "stop trigger enable: broadcast dbgr_control0: 0x%x ",
2959 dbgr_control0); 2961 dbgr_control0);
2960 2962
@@ -3012,19 +3014,19 @@ void gv11b_gr_bpt_reg_info(struct gk20a *g, struct nvgpu_warpstate *w_state)
3012 3014
3013 /* Only for debug purpose */ 3015 /* Only for debug purpose */
3014 for (sm_id = 0; sm_id < gr->no_of_sm; sm_id++) { 3016 for (sm_id = 0; sm_id < gr->no_of_sm; sm_id++) {
3015 gk20a_dbg_fn("w_state[%d].valid_warps[0]: %llx\n", 3017 nvgpu_log_fn(g, "w_state[%d].valid_warps[0]: %llx\n",
3016 sm_id, w_state[sm_id].valid_warps[0]); 3018 sm_id, w_state[sm_id].valid_warps[0]);
3017 gk20a_dbg_fn("w_state[%d].valid_warps[1]: %llx\n", 3019 nvgpu_log_fn(g, "w_state[%d].valid_warps[1]: %llx\n",
3018 sm_id, w_state[sm_id].valid_warps[1]); 3020 sm_id, w_state[sm_id].valid_warps[1]);
3019 3021
3020 gk20a_dbg_fn("w_state[%d].trapped_warps[0]: %llx\n", 3022 nvgpu_log_fn(g, "w_state[%d].trapped_warps[0]: %llx\n",
3021 sm_id, w_state[sm_id].trapped_warps[0]); 3023 sm_id, w_state[sm_id].trapped_warps[0]);
3022 gk20a_dbg_fn("w_state[%d].trapped_warps[1]: %llx\n", 3024 nvgpu_log_fn(g, "w_state[%d].trapped_warps[1]: %llx\n",
3023 sm_id, w_state[sm_id].trapped_warps[1]); 3025 sm_id, w_state[sm_id].trapped_warps[1]);
3024 3026
3025 gk20a_dbg_fn("w_state[%d].paused_warps[0]: %llx\n", 3027 nvgpu_log_fn(g, "w_state[%d].paused_warps[0]: %llx\n",
3026 sm_id, w_state[sm_id].paused_warps[0]); 3028 sm_id, w_state[sm_id].paused_warps[0]);
3027 gk20a_dbg_fn("w_state[%d].paused_warps[1]: %llx\n", 3029 nvgpu_log_fn(g, "w_state[%d].paused_warps[1]: %llx\n",
3028 sm_id, w_state[sm_id].paused_warps[1]); 3030 sm_id, w_state[sm_id].paused_warps[1]);
3029 } 3031 }
3030} 3032}
@@ -3257,7 +3259,7 @@ bool gv11b_gr_sm_debugger_attached(struct gk20a *g)
3257 */ 3259 */
3258 debugger_mode = 3260 debugger_mode =
3259 gr_gpc0_tpc0_sm0_dbgr_control0_debugger_mode_v(dbgr_control0); 3261 gr_gpc0_tpc0_sm0_dbgr_control0_debugger_mode_v(dbgr_control0);
3260 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, 3262 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg,
3261 "SM Debugger Mode: %d", debugger_mode); 3263 "SM Debugger Mode: %d", debugger_mode);
3262 if (debugger_mode == 3264 if (debugger_mode ==
3263 gr_gpc0_tpc0_sm0_dbgr_control0_debugger_mode_on_v()) 3265 gr_gpc0_tpc0_sm0_dbgr_control0_debugger_mode_on_v())
@@ -3576,7 +3578,7 @@ static void gv11b_gr_sm_dump_warp_bpt_pause_trap_mask_regs(struct gk20a *g,
3576 dbgr_status0, dbgr_control0, warps_valid, 3578 dbgr_status0, dbgr_control0, warps_valid,
3577 warps_paused, warps_trapped); 3579 warps_paused, warps_trapped);
3578 else 3580 else
3579 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, 3581 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg,
3580 "STATUS0=0x%x CONTROL0=0x%x VALID_MASK=0x%llx " 3582 "STATUS0=0x%x CONTROL0=0x%x VALID_MASK=0x%llx "
3581 "PAUSE_MASK=0x%llx TRAP_MASK=0x%llx\n", 3583 "PAUSE_MASK=0x%llx TRAP_MASK=0x%llx\n",
3582 dbgr_status0, dbgr_control0, warps_valid, 3584 dbgr_status0, dbgr_control0, warps_valid,
@@ -3598,7 +3600,7 @@ int gv11b_gr_wait_for_sm_lock_down(struct gk20a *g,
3598 gk20a_gr_tpc_offset(g, tpc) + 3600 gk20a_gr_tpc_offset(g, tpc) +
3599 gv11b_gr_sm_offset(g, sm); 3601 gv11b_gr_sm_offset(g, sm);
3600 3602
3601 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, 3603 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg,
3602 "GPC%d TPC%d: locking down SM%d", gpc, tpc, sm); 3604 "GPC%d TPC%d: locking down SM%d", gpc, tpc, sm);
3603 3605
3604 nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g), 3606 nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g),
@@ -3642,7 +3644,7 @@ int gv11b_gr_wait_for_sm_lock_down(struct gk20a *g,
3642 } 3644 }
3643 3645
3644 if (locked_down || no_error_pending) { 3646 if (locked_down || no_error_pending) {
3645 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, 3647 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg,
3646 "GPC%d TPC%d: locked down SM%d", gpc, tpc, sm); 3648 "GPC%d TPC%d: locked down SM%d", gpc, tpc, sm);
3647 return 0; 3649 return 0;
3648 } 3650 }
@@ -3677,7 +3679,7 @@ int gv11b_gr_lock_down_sm(struct gk20a *g,
3677 u32 offset = gk20a_gr_gpc_offset(g, gpc) + gk20a_gr_tpc_offset(g, tpc) + 3679 u32 offset = gk20a_gr_gpc_offset(g, gpc) + gk20a_gr_tpc_offset(g, tpc) +
3678 gv11b_gr_sm_offset(g, sm); 3680 gv11b_gr_sm_offset(g, sm);
3679 3681
3680 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, 3682 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg,
3681 "GPC%d TPC%d SM%d: assert stop trigger", gpc, tpc, sm); 3683 "GPC%d TPC%d SM%d: assert stop trigger", gpc, tpc, sm);
3682 3684
3683 /* assert stop trigger */ 3685 /* assert stop trigger */
@@ -3699,13 +3701,13 @@ void gv11b_gr_clear_sm_hww(struct gk20a *g, u32 gpc, u32 tpc, u32 sm,
3699 3701
3700 gk20a_writel(g, gr_gpc0_tpc0_sm0_hww_global_esr_r() + offset, 3702 gk20a_writel(g, gr_gpc0_tpc0_sm0_hww_global_esr_r() + offset,
3701 global_esr); 3703 global_esr);
3702 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, 3704 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
3703 "Cleared HWW global esr, current reg val: 0x%x", 3705 "Cleared HWW global esr, current reg val: 0x%x",
3704 gk20a_readl(g, gr_gpc0_tpc0_sm0_hww_global_esr_r() + 3706 gk20a_readl(g, gr_gpc0_tpc0_sm0_hww_global_esr_r() +
3705 offset)); 3707 offset));
3706 3708
3707 gk20a_writel(g, gr_gpc0_tpc0_sm0_hww_warp_esr_r() + offset, 0); 3709 gk20a_writel(g, gr_gpc0_tpc0_sm0_hww_warp_esr_r() + offset, 0);
3708 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, 3710 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
3709 "Cleared HWW warp esr, current reg val: 0x%x", 3711 "Cleared HWW warp esr, current reg val: 0x%x",
3710 gk20a_readl(g, gr_gpc0_tpc0_sm0_hww_warp_esr_r() + 3712 gk20a_readl(g, gr_gpc0_tpc0_sm0_hww_warp_esr_r() +
3711 offset)); 3713 offset));
@@ -4440,7 +4442,7 @@ int gr_gv11b_decode_priv_addr(struct gk20a *g, u32 addr,
4440{ 4442{
4441 u32 gpc_addr; 4443 u32 gpc_addr;
4442 4444
4443 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); 4445 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr);
4444 4446
4445 /* setup defaults */ 4447 /* setup defaults */
4446 *addr_type = CTXSW_ADDR_TYPE_SYS; 4448 *addr_type = CTXSW_ADDR_TYPE_SYS;
@@ -4591,12 +4593,12 @@ int gr_gv11b_create_priv_addr_table(struct gk20a *g,
4591 t = 0; 4593 t = 0;
4592 *num_registers = 0; 4594 *num_registers = 0;
4593 4595
4594 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); 4596 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr);
4595 4597
4596 err = g->ops.gr.decode_priv_addr(g, addr, &addr_type, 4598 err = g->ops.gr.decode_priv_addr(g, addr, &addr_type,
4597 &gpc_num, &tpc_num, &ppc_num, &be_num, 4599 &gpc_num, &tpc_num, &ppc_num, &be_num,
4598 &broadcast_flags); 4600 &broadcast_flags);
4599 gk20a_dbg(gpu_dbg_gpu_dbg, "addr_type = %d", addr_type); 4601 nvgpu_log(g, gpu_dbg_gpu_dbg, "addr_type = %d", addr_type);
4600 if (err) 4602 if (err)
4601 return err; 4603 return err;
4602 4604
@@ -4690,7 +4692,7 @@ int gr_gv11b_create_priv_addr_table(struct gk20a *g,
4690 } else if (((addr_type == CTXSW_ADDR_TYPE_EGPC) || 4692 } else if (((addr_type == CTXSW_ADDR_TYPE_EGPC) ||
4691 (addr_type == CTXSW_ADDR_TYPE_ETPC)) && 4693 (addr_type == CTXSW_ADDR_TYPE_ETPC)) &&
4692 g->ops.gr.egpc_etpc_priv_addr_table) { 4694 g->ops.gr.egpc_etpc_priv_addr_table) {
4693 gk20a_dbg(gpu_dbg_gpu_dbg, "addr_type : EGPC/ETPC"); 4695 nvgpu_log(g, gpu_dbg_gpu_dbg, "addr_type : EGPC/ETPC");
4694 g->ops.gr.egpc_etpc_priv_addr_table(g, addr, gpc_num, tpc_num, 4696 g->ops.gr.egpc_etpc_priv_addr_table(g, addr, gpc_num, tpc_num,
4695 broadcast_flags, priv_addr_table, &t); 4697 broadcast_flags, priv_addr_table, &t);
4696 } else if (broadcast_flags & PRI_BROADCAST_FLAGS_LTSS) { 4698 } else if (broadcast_flags & PRI_BROADCAST_FLAGS_LTSS) {
diff --git a/drivers/gpu/nvgpu/gv11b/ltc_gv11b.c b/drivers/gpu/nvgpu/gv11b/ltc_gv11b.c
index 9f6d176e..9f9ff337 100644
--- a/drivers/gpu/nvgpu/gv11b/ltc_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/ltc_gv11b.c
@@ -56,7 +56,7 @@ void gv11b_ltc_init_fs_state(struct gk20a *g)
56 u32 ltc_intr; 56 u32 ltc_intr;
57 u32 reg; 57 u32 reg;
58 58
59 gk20a_dbg_info("initialize gv11b l2"); 59 nvgpu_log_info(g, "initialize gv11b l2");
60 60
61 g->ops.mc.reset(g, mc_enable_pfb_enabled_f() | 61 g->ops.mc.reset(g, mc_enable_pfb_enabled_f() |
62 mc_enable_l2_enabled_f()); 62 mc_enable_l2_enabled_f());
@@ -67,7 +67,7 @@ void gv11b_ltc_init_fs_state(struct gk20a *g)
67 67
68 g->max_ltc_count = gk20a_readl(g, top_num_ltcs_r()); 68 g->max_ltc_count = gk20a_readl(g, top_num_ltcs_r());
69 g->ltc_count = gk20a_readl(g, pri_ringmaster_enum_ltc_r()); 69 g->ltc_count = gk20a_readl(g, pri_ringmaster_enum_ltc_r());
70 gk20a_dbg_info("%u ltcs out of %u", g->ltc_count, g->max_ltc_count); 70 nvgpu_log_info(g, "%u ltcs out of %u", g->ltc_count, g->max_ltc_count);
71 71
72 /* Disable LTC interrupts */ 72 /* Disable LTC interrupts */
73 reg = gk20a_readl(g, ltc_ltcs_ltss_intr_r()); 73 reg = gk20a_readl(g, ltc_ltcs_ltss_intr_r());
diff --git a/drivers/gpu/nvgpu/gv11b/mm_gv11b.c b/drivers/gpu/nvgpu/gv11b/mm_gv11b.c
index b46ecb0a..f4084ad6 100644
--- a/drivers/gpu/nvgpu/gv11b/mm_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/mm_gv11b.c
@@ -54,7 +54,7 @@ void gv11b_init_inst_block(struct nvgpu_mem *inst_block,
54{ 54{
55 struct gk20a *g = gk20a_from_vm(vm); 55 struct gk20a *g = gk20a_from_vm(vm);
56 56
57 gk20a_dbg_info("inst block phys = 0x%llx, kv = 0x%p", 57 nvgpu_log_info(g, "inst block phys = 0x%llx, kv = 0x%p",
58 nvgpu_inst_block_addr(g, inst_block), inst_block->cpu_va); 58 nvgpu_inst_block_addr(g, inst_block), inst_block->cpu_va);
59 59
60 g->ops.mm.init_pdb(g, inst_block, vm); 60 g->ops.mm.init_pdb(g, inst_block, vm);
diff --git a/drivers/gpu/nvgpu/gv11b/pmu_gv11b.c b/drivers/gpu/nvgpu/gv11b/pmu_gv11b.c
index c1b519d0..3f0e2f22 100644
--- a/drivers/gpu/nvgpu/gv11b/pmu_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/pmu_gv11b.c
@@ -37,8 +37,8 @@
37 37
38#include <nvgpu/hw/gv11b/hw_pwr_gv11b.h> 38#include <nvgpu/hw/gv11b/hw_pwr_gv11b.h>
39 39
40#define gv11b_dbg_pmu(fmt, arg...) \ 40#define gv11b_dbg_pmu(g, fmt, arg...) \
41 gk20a_dbg(gpu_dbg_pmu, fmt, ##arg) 41 nvgpu_log(g, gpu_dbg_pmu, fmt, ##arg)
42 42
43#define ALIGN_4KB 12 43#define ALIGN_4KB 12
44 44
@@ -121,7 +121,7 @@ int gv11b_pmu_setup_elpg(struct gk20a *g)
121 u32 reg_writes; 121 u32 reg_writes;
122 u32 index; 122 u32 index;
123 123
124 gk20a_dbg_fn(""); 124 nvgpu_log_fn(g, " ");
125 125
126 if (g->elpg_enabled) { 126 if (g->elpg_enabled) {
127 reg_writes = ((sizeof(_pginitseq_gv11b) / 127 reg_writes = ((sizeof(_pginitseq_gv11b) /
@@ -133,7 +133,7 @@ int gv11b_pmu_setup_elpg(struct gk20a *g)
133 } 133 }
134 } 134 }
135 135
136 gk20a_dbg_fn("done"); 136 nvgpu_log_fn(g, "done");
137 return ret; 137 return ret;
138} 138}
139 139
@@ -187,7 +187,7 @@ int gv11b_pmu_bootstrap(struct nvgpu_pmu *pmu)
187 u64 addr_code_hi, addr_data_hi; 187 u64 addr_code_hi, addr_data_hi;
188 u32 i, blocks, addr_args; 188 u32 i, blocks, addr_args;
189 189
190 gk20a_dbg_fn(""); 190 nvgpu_log_fn(g, " ");
191 191
192 gk20a_writel(g, pwr_falcon_itfen_r(), 192 gk20a_writel(g, pwr_falcon_itfen_r(),
193 gk20a_readl(g, pwr_falcon_itfen_r()) | 193 gk20a_readl(g, pwr_falcon_itfen_r()) |
@@ -407,28 +407,28 @@ u32 gv11b_pmu_get_irqdest(struct gk20a *g)
407static void pmu_handle_pg_sub_feature_msg(struct gk20a *g, struct pmu_msg *msg, 407static void pmu_handle_pg_sub_feature_msg(struct gk20a *g, struct pmu_msg *msg,
408 void *param, u32 handle, u32 status) 408 void *param, u32 handle, u32 status)
409{ 409{
410 gk20a_dbg_fn(""); 410 nvgpu_log_fn(g, " ");
411 411
412 if (status != 0) { 412 if (status != 0) {
413 nvgpu_err(g, "Sub-feature mask update cmd aborted\n"); 413 nvgpu_err(g, "Sub-feature mask update cmd aborted\n");
414 return; 414 return;
415 } 415 }
416 416
417 gv11b_dbg_pmu("sub-feature mask update is acknowledged from PMU %x\n", 417 gv11b_dbg_pmu(g, "sub-feature mask update is acknowledged from PMU %x\n",
418 msg->msg.pg.msg_type); 418 msg->msg.pg.msg_type);
419} 419}
420 420
421static void pmu_handle_pg_param_msg(struct gk20a *g, struct pmu_msg *msg, 421static void pmu_handle_pg_param_msg(struct gk20a *g, struct pmu_msg *msg,
422 void *param, u32 handle, u32 status) 422 void *param, u32 handle, u32 status)
423{ 423{
424 gk20a_dbg_fn(""); 424 nvgpu_log_fn(g, " ");
425 425
426 if (status != 0) { 426 if (status != 0) {
427 nvgpu_err(g, "GR PARAM cmd aborted\n"); 427 nvgpu_err(g, "GR PARAM cmd aborted\n");
428 return; 428 return;
429 } 429 }
430 430
431 gv11b_dbg_pmu("GR PARAM is acknowledged from PMU %x\n", 431 gv11b_dbg_pmu(g, "GR PARAM is acknowledged from PMU %x\n",
432 msg->msg.pg.msg_type); 432 msg->msg.pg.msg_type);
433} 433}
434 434
@@ -450,7 +450,7 @@ int gv11b_pg_gr_init(struct gk20a *g, u32 pg_engine_id)
450 cmd.cmd.pg.gr_init_param_v1.featuremask = 450 cmd.cmd.pg.gr_init_param_v1.featuremask =
451 NVGPU_PMU_GR_FEATURE_MASK_ALL; 451 NVGPU_PMU_GR_FEATURE_MASK_ALL;
452 452
453 gv11b_dbg_pmu("cmd post PMU_PG_CMD_ID_PG_PARAM_INIT\n"); 453 gv11b_dbg_pmu(g, "cmd post PMU_PG_CMD_ID_PG_PARAM_INIT\n");
454 nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, 454 nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
455 pmu_handle_pg_param_msg, pmu, &seq, ~0); 455 pmu_handle_pg_param_msg, pmu, &seq, ~0);
456 456
@@ -488,7 +488,7 @@ int gv11b_pg_set_subfeature_mask(struct gk20a *g, u32 pg_engine_id)
488 NVGPU_PMU_GR_FEATURE_MASK_ELPG_LOGIC | 488 NVGPU_PMU_GR_FEATURE_MASK_ELPG_LOGIC |
489 NVGPU_PMU_GR_FEATURE_MASK_ELPG_L2RPPG; 489 NVGPU_PMU_GR_FEATURE_MASK_ELPG_L2RPPG;
490 490
491 gv11b_dbg_pmu("cmd post PMU_PG_CMD_SUB_FEATURE_MASK_UPDATE\n"); 491 gv11b_dbg_pmu(g, "cmd post PMU_PG_CMD_SUB_FEATURE_MASK_UPDATE\n");
492 nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, 492 nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
493 pmu_handle_pg_sub_feature_msg, pmu, &seq, ~0); 493 pmu_handle_pg_sub_feature_msg, pmu, &seq, ~0);
494 } else 494 } else
diff --git a/drivers/gpu/nvgpu/gv11b/therm_gv11b.c b/drivers/gpu/nvgpu/gv11b/therm_gv11b.c
index 067c464b..961ab5c0 100644
--- a/drivers/gpu/nvgpu/gv11b/therm_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/therm_gv11b.c
@@ -34,7 +34,7 @@ int gv11b_init_therm_setup_hw(struct gk20a *g)
34{ 34{
35 u32 v; 35 u32 v;
36 36
37 gk20a_dbg_fn(""); 37 nvgpu_log_fn(g, " ");
38 38
39 /* program NV_THERM registers */ 39 /* program NV_THERM registers */
40 gk20a_writel(g, therm_use_a_r(), therm_use_a_ext_therm_0_enable_f() | 40 gk20a_writel(g, therm_use_a_r(), therm_use_a_ext_therm_0_enable_f() |
@@ -108,7 +108,7 @@ int gv11b_elcg_init_idle_filters(struct gk20a *g)
108 if (nvgpu_platform_is_simulation(g)) 108 if (nvgpu_platform_is_simulation(g))
109 return 0; 109 return 0;
110 110
111 gk20a_dbg_info("init clock/power gate reg"); 111 nvgpu_log_info(g, "init clock/power gate reg");
112 112
113 for (engine_id = 0; engine_id < f->num_engines; engine_id++) { 113 for (engine_id = 0; engine_id < f->num_engines; engine_id++) {
114 active_engine_id = f->active_engines_list[engine_id]; 114 active_engine_id = f->active_engines_list[engine_id];
diff --git a/drivers/gpu/nvgpu/lpwr/lpwr.c b/drivers/gpu/nvgpu/lpwr/lpwr.c
index c470f330..751965e6 100644
--- a/drivers/gpu/nvgpu/lpwr/lpwr.c
+++ b/drivers/gpu/nvgpu/lpwr/lpwr.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -185,7 +185,7 @@ u32 nvgpu_lpwr_pg_setup(struct gk20a *g)
185{ 185{
186 u32 err = 0; 186 u32 err = 0;
187 187
188 gk20a_dbg_fn(""); 188 nvgpu_log_fn(g, " ");
189 189
190 err = get_lpwr_gr_table(g); 190 err = get_lpwr_gr_table(g);
191 if (err) 191 if (err)
@@ -206,7 +206,7 @@ static void nvgpu_pmu_handle_param_lpwr_msg(struct gk20a *g,
206{ 206{
207 u32 *ack_status = param; 207 u32 *ack_status = param;
208 208
209 gk20a_dbg_fn(""); 209 nvgpu_log_fn(g, " ");
210 210
211 if (status != 0) { 211 if (status != 0) {
212 nvgpu_err(g, "LWPR PARAM cmd aborted"); 212 nvgpu_err(g, "LWPR PARAM cmd aborted");
@@ -227,7 +227,7 @@ int nvgpu_lwpr_mclk_change(struct gk20a *g, u32 pstate)
227 struct clk_set_info *pstate_info; 227 struct clk_set_info *pstate_info;
228 u32 ack_status = 0; 228 u32 ack_status = 0;
229 229
230 gk20a_dbg_fn(""); 230 nvgpu_log_fn(g, " ");
231 231
232 pstate_info = pstate_get_clk_set_info(g, pstate, 232 pstate_info = pstate_get_clk_set_info(g, pstate,
233 clkwhich_mclk); 233 clkwhich_mclk);
@@ -308,7 +308,7 @@ u32 nvgpu_lpwr_is_mscg_supported(struct gk20a *g, u32 pstate_num)
308 struct pstate *pstate = pstate_find(g, pstate_num); 308 struct pstate *pstate = pstate_find(g, pstate_num);
309 u32 ms_idx; 309 u32 ms_idx;
310 310
311 gk20a_dbg_fn(""); 311 nvgpu_log_fn(g, " ");
312 312
313 if (!pstate) 313 if (!pstate)
314 return 0; 314 return 0;
@@ -329,7 +329,7 @@ u32 nvgpu_lpwr_is_rppg_supported(struct gk20a *g, u32 pstate_num)
329 struct pstate *pstate = pstate_find(g, pstate_num); 329 struct pstate *pstate = pstate_find(g, pstate_num);
330 u32 idx; 330 u32 idx;
331 331
332 gk20a_dbg_fn(""); 332 nvgpu_log_fn(g, " ");
333 333
334 if (!pstate) 334 if (!pstate)
335 return 0; 335 return 0;
@@ -350,7 +350,7 @@ int nvgpu_lpwr_enable_pg(struct gk20a *g, bool pstate_lock)
350 u32 is_rppg_supported = 0; 350 u32 is_rppg_supported = 0;
351 u32 present_pstate = 0; 351 u32 present_pstate = 0;
352 352
353 gk20a_dbg_fn(""); 353 nvgpu_log_fn(g, " ");
354 354
355 if (pstate_lock) 355 if (pstate_lock)
356 nvgpu_clk_arb_pstate_change_lock(g, true); 356 nvgpu_clk_arb_pstate_change_lock(g, true);
@@ -387,7 +387,7 @@ int nvgpu_lpwr_disable_pg(struct gk20a *g, bool pstate_lock)
387 u32 is_rppg_supported = 0; 387 u32 is_rppg_supported = 0;
388 u32 present_pstate = 0; 388 u32 present_pstate = 0;
389 389
390 gk20a_dbg_fn(""); 390 nvgpu_log_fn(g, " ");
391 391
392 if (pstate_lock) 392 if (pstate_lock)
393 nvgpu_clk_arb_pstate_change_lock(g, true); 393 nvgpu_clk_arb_pstate_change_lock(g, true);
@@ -417,6 +417,6 @@ exit_unlock:
417 if (pstate_lock) 417 if (pstate_lock)
418 nvgpu_clk_arb_pstate_change_lock(g, false); 418 nvgpu_clk_arb_pstate_change_lock(g, false);
419 419
420 gk20a_dbg_fn("done"); 420 nvgpu_log_fn(g, "done");
421 return status; 421 return status;
422} 422}
diff --git a/drivers/gpu/nvgpu/perf/perf.c b/drivers/gpu/nvgpu/perf/perf.c
index 55e67b15..bf63e1ea 100644
--- a/drivers/gpu/nvgpu/perf/perf.c
+++ b/drivers/gpu/nvgpu/perf/perf.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -37,7 +37,7 @@ static void perfrpc_pmucmdhandler(struct gk20a *g, struct pmu_msg *msg,
37 struct perfrpc_pmucmdhandler_params *phandlerparams = 37 struct perfrpc_pmucmdhandler_params *phandlerparams =
38 (struct perfrpc_pmucmdhandler_params *)param; 38 (struct perfrpc_pmucmdhandler_params *)param;
39 39
40 gk20a_dbg_info(""); 40 nvgpu_log_info(g, " ");
41 41
42 if (msg->msg.perf.msg_type != NV_PMU_PERF_MSG_ID_RPC) { 42 if (msg->msg.perf.msg_type != NV_PMU_PERF_MSG_ID_RPC) {
43 nvgpu_err(g, "unsupported msg for VFE LOAD RPC %x", 43 nvgpu_err(g, "unsupported msg for VFE LOAD RPC %x",
@@ -53,7 +53,7 @@ static int pmu_handle_perf_event(struct gk20a *g, void *pmu_msg)
53{ 53{
54 struct nv_pmu_perf_msg *msg = (struct nv_pmu_perf_msg *)pmu_msg; 54 struct nv_pmu_perf_msg *msg = (struct nv_pmu_perf_msg *)pmu_msg;
55 55
56 gk20a_dbg_fn(""); 56 nvgpu_log_fn(g, " ");
57 switch (msg->msg_type) { 57 switch (msg->msg_type) {
58 case NV_PMU_PERF_MSG_ID_VFE_CALLBACK: 58 case NV_PMU_PERF_MSG_ID_VFE_CALLBACK:
59 nvgpu_clk_arb_schedule_vf_table_update(g); 59 nvgpu_clk_arb_schedule_vf_table_update(g);
diff --git a/drivers/gpu/nvgpu/perf/vfe_equ.c b/drivers/gpu/nvgpu/perf/vfe_equ.c
index 2493061e..8321d98d 100644
--- a/drivers/gpu/nvgpu/perf/vfe_equ.c
+++ b/drivers/gpu/nvgpu/perf/vfe_equ.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -59,7 +59,7 @@ static u32 _vfe_equs_pmudata_instget(struct gk20a *g,
59 struct nv_pmu_perf_vfe_equ_boardobj_grp_set *pgrp_set = 59 struct nv_pmu_perf_vfe_equ_boardobj_grp_set *pgrp_set =
60 (struct nv_pmu_perf_vfe_equ_boardobj_grp_set *)pmuboardobjgrp; 60 (struct nv_pmu_perf_vfe_equ_boardobj_grp_set *)pmuboardobjgrp;
61 61
62 gk20a_dbg_info(""); 62 nvgpu_log_info(g, " ");
63 63
64 /* check whether pmuboardobjgrp has a valid boardobj in index */ 64 /* check whether pmuboardobjgrp has a valid boardobj in index */
65 if (idx >= CTRL_BOARDOBJGRP_E255_MAX_OBJECTS) 65 if (idx >= CTRL_BOARDOBJGRP_E255_MAX_OBJECTS)
@@ -67,7 +67,7 @@ static u32 _vfe_equs_pmudata_instget(struct gk20a *g,
67 67
68 *ppboardobjpmudata = (struct nv_pmu_boardobj *) 68 *ppboardobjpmudata = (struct nv_pmu_boardobj *)
69 &pgrp_set->objects[idx].data.board_obj; 69 &pgrp_set->objects[idx].data.board_obj;
70 gk20a_dbg_info(" Done"); 70 nvgpu_log_info(g, " Done");
71 return 0; 71 return 0;
72} 72}
73 73
@@ -77,7 +77,7 @@ u32 vfe_equ_sw_setup(struct gk20a *g)
77 struct boardobjgrp *pboardobjgrp = NULL; 77 struct boardobjgrp *pboardobjgrp = NULL;
78 struct vfe_equs *pvfeequobjs; 78 struct vfe_equs *pvfeequobjs;
79 79
80 gk20a_dbg_info(""); 80 nvgpu_log_info(g, " ");
81 81
82 status = boardobjgrpconstruct_e255(g, &g->perf_pmu.vfe_equobjs.super); 82 status = boardobjgrpconstruct_e255(g, &g->perf_pmu.vfe_equobjs.super);
83 if (status) { 83 if (status) {
@@ -109,7 +109,7 @@ u32 vfe_equ_sw_setup(struct gk20a *g)
109 goto done; 109 goto done;
110 110
111done: 111done:
112 gk20a_dbg_info(" done status %x", status); 112 nvgpu_log_info(g, " done status %x", status);
113 return status; 113 return status;
114} 114}
115 115
@@ -118,7 +118,7 @@ u32 vfe_equ_pmu_setup(struct gk20a *g)
118 u32 status; 118 u32 status;
119 struct boardobjgrp *pboardobjgrp = NULL; 119 struct boardobjgrp *pboardobjgrp = NULL;
120 120
121 gk20a_dbg_info(""); 121 nvgpu_log_info(g, " ");
122 122
123 pboardobjgrp = &g->perf_pmu.vfe_equobjs.super.super; 123 pboardobjgrp = &g->perf_pmu.vfe_equobjs.super.super;
124 124
@@ -127,7 +127,7 @@ u32 vfe_equ_pmu_setup(struct gk20a *g)
127 127
128 status = pboardobjgrp->pmuinithandle(g, pboardobjgrp); 128 status = pboardobjgrp->pmuinithandle(g, pboardobjgrp);
129 129
130 gk20a_dbg_info("Done"); 130 nvgpu_log_info(g, "Done");
131 return status; 131 return status;
132} 132}
133 133
@@ -152,7 +152,7 @@ static u32 devinit_get_vfe_equ_table(struct gk20a *g,
152 struct vfe_equ_quadratic quadratic; 152 struct vfe_equ_quadratic quadratic;
153 } equ_data; 153 } equ_data;
154 154
155 gk20a_dbg_info(""); 155 nvgpu_log_info(g, " ");
156 156
157 vfeequs_tbl_ptr = (u8 *)nvgpu_bios_get_perf_table_ptrs(g, 157 vfeequs_tbl_ptr = (u8 *)nvgpu_bios_get_perf_table_ptrs(g,
158 g->bios.perf_token, 158 g->bios.perf_token,
@@ -325,7 +325,7 @@ static u32 devinit_get_vfe_equ_table(struct gk20a *g,
325 } 325 }
326 } 326 }
327done: 327done:
328 gk20a_dbg_info(" done status %x", status); 328 nvgpu_log_info(g, " done status %x", status);
329 return status; 329 return status;
330} 330}
331 331
@@ -337,7 +337,7 @@ static u32 _vfe_equ_pmudatainit_super(struct gk20a *g,
337 struct vfe_equ *pvfe_equ; 337 struct vfe_equ *pvfe_equ;
338 struct nv_pmu_vfe_equ *pset; 338 struct nv_pmu_vfe_equ *pset;
339 339
340 gk20a_dbg_info(""); 340 nvgpu_log_info(g, " ");
341 341
342 status = boardobj_pmudatainit_super(g, board_obj_ptr, ppmudata); 342 status = boardobj_pmudatainit_super(g, board_obj_ptr, ppmudata);
343 if (status != 0) 343 if (status != 0)
@@ -392,7 +392,7 @@ static u32 _vfe_equ_pmudatainit_compare(struct gk20a *g,
392 struct vfe_equ_compare *pvfe_equ_compare; 392 struct vfe_equ_compare *pvfe_equ_compare;
393 struct nv_pmu_vfe_equ_compare *pset; 393 struct nv_pmu_vfe_equ_compare *pset;
394 394
395 gk20a_dbg_info(""); 395 nvgpu_log_info(g, " ");
396 396
397 status = _vfe_equ_pmudatainit_super(g, board_obj_ptr, ppmudata); 397 status = _vfe_equ_pmudatainit_super(g, board_obj_ptr, ppmudata);
398 if (status != 0) 398 if (status != 0)
@@ -451,7 +451,7 @@ static u32 _vfe_equ_pmudatainit_minmax(struct gk20a *g,
451 struct vfe_equ_minmax *pvfe_equ_minmax; 451 struct vfe_equ_minmax *pvfe_equ_minmax;
452 struct nv_pmu_vfe_equ_minmax *pset; 452 struct nv_pmu_vfe_equ_minmax *pset;
453 453
454 gk20a_dbg_info(""); 454 nvgpu_log_info(g, " ");
455 455
456 status = _vfe_equ_pmudatainit_super(g, board_obj_ptr, ppmudata); 456 status = _vfe_equ_pmudatainit_super(g, board_obj_ptr, ppmudata);
457 if (status != 0) 457 if (status != 0)
@@ -507,7 +507,7 @@ static u32 _vfe_equ_pmudatainit_quadratic(struct gk20a *g,
507 struct nv_pmu_vfe_equ_quadratic *pset; 507 struct nv_pmu_vfe_equ_quadratic *pset;
508 u32 i; 508 u32 i;
509 509
510 gk20a_dbg_info(""); 510 nvgpu_log_info(g, " ");
511 511
512 status = _vfe_equ_pmudatainit_super(g, board_obj_ptr, ppmudata); 512 status = _vfe_equ_pmudatainit_super(g, board_obj_ptr, ppmudata);
513 if (status != 0) 513 if (status != 0)
@@ -558,7 +558,7 @@ static struct vfe_equ *construct_vfe_equ(struct gk20a *g, void *pargs)
558 struct boardobj *board_obj_ptr = NULL; 558 struct boardobj *board_obj_ptr = NULL;
559 u32 status; 559 u32 status;
560 560
561 gk20a_dbg_info(""); 561 nvgpu_log_info(g, " ");
562 562
563 switch (BOARDOBJ_GET_TYPE(pargs)) { 563 switch (BOARDOBJ_GET_TYPE(pargs)) {
564 case CTRL_PERF_VFE_EQU_TYPE_COMPARE: 564 case CTRL_PERF_VFE_EQU_TYPE_COMPARE:
@@ -584,7 +584,7 @@ static struct vfe_equ *construct_vfe_equ(struct gk20a *g, void *pargs)
584 if (status) 584 if (status)
585 return NULL; 585 return NULL;
586 586
587 gk20a_dbg_info(" Done"); 587 nvgpu_log_info(g, " Done");
588 588
589 return (struct vfe_equ *)board_obj_ptr; 589 return (struct vfe_equ *)board_obj_ptr;
590} 590}
diff --git a/drivers/gpu/nvgpu/perf/vfe_var.c b/drivers/gpu/nvgpu/perf/vfe_var.c
index a44c39ad..8b95230e 100644
--- a/drivers/gpu/nvgpu/perf/vfe_var.c
+++ b/drivers/gpu/nvgpu/perf/vfe_var.c
@@ -69,7 +69,7 @@ static u32 _vfe_vars_pmudata_instget(struct gk20a *g,
69 (struct nv_pmu_perf_vfe_var_boardobj_grp_set *) 69 (struct nv_pmu_perf_vfe_var_boardobj_grp_set *)
70 pmuboardobjgrp; 70 pmuboardobjgrp;
71 71
72 gk20a_dbg_info(""); 72 nvgpu_log_info(g, " ");
73 73
74 /*check whether pmuboardobjgrp has a valid boardobj in index*/ 74 /*check whether pmuboardobjgrp has a valid boardobj in index*/
75 if (idx >= CTRL_BOARDOBJGRP_E32_MAX_OBJECTS) 75 if (idx >= CTRL_BOARDOBJGRP_E32_MAX_OBJECTS)
@@ -78,7 +78,7 @@ static u32 _vfe_vars_pmudata_instget(struct gk20a *g,
78 *ppboardobjpmudata = (struct nv_pmu_boardobj *) 78 *ppboardobjpmudata = (struct nv_pmu_boardobj *)
79 &pgrp_set->objects[idx].data.board_obj; 79 &pgrp_set->objects[idx].data.board_obj;
80 80
81 gk20a_dbg_info(" Done"); 81 nvgpu_log_info(g, " Done");
82 return 0; 82 return 0;
83} 83}
84 84
@@ -105,7 +105,7 @@ u32 vfe_var_sw_setup(struct gk20a *g)
105 struct boardobjgrp *pboardobjgrp = NULL; 105 struct boardobjgrp *pboardobjgrp = NULL;
106 struct vfe_vars *pvfevarobjs; 106 struct vfe_vars *pvfevarobjs;
107 107
108 gk20a_dbg_info(""); 108 nvgpu_log_info(g, " ");
109 109
110 status = boardobjgrpconstruct_e32(g, &g->perf_pmu.vfe_varobjs.super); 110 status = boardobjgrpconstruct_e32(g, &g->perf_pmu.vfe_varobjs.super);
111 if (status) { 111 if (status) {
@@ -148,7 +148,7 @@ u32 vfe_var_sw_setup(struct gk20a *g)
148 } 148 }
149 149
150done: 150done:
151 gk20a_dbg_info(" done status %x", status); 151 nvgpu_log_info(g, " done status %x", status);
152 return status; 152 return status;
153} 153}
154 154
@@ -157,7 +157,7 @@ u32 vfe_var_pmu_setup(struct gk20a *g)
157 u32 status; 157 u32 status;
158 struct boardobjgrp *pboardobjgrp = NULL; 158 struct boardobjgrp *pboardobjgrp = NULL;
159 159
160 gk20a_dbg_info(""); 160 nvgpu_log_info(g, " ");
161 161
162 pboardobjgrp = &g->perf_pmu.vfe_varobjs.super.super; 162 pboardobjgrp = &g->perf_pmu.vfe_varobjs.super.super;
163 163
@@ -166,7 +166,7 @@ u32 vfe_var_pmu_setup(struct gk20a *g)
166 166
167 status = pboardobjgrp->pmuinithandle(g, pboardobjgrp); 167 status = pboardobjgrp->pmuinithandle(g, pboardobjgrp);
168 168
169 gk20a_dbg_info("Done"); 169 nvgpu_log_info(g, "Done");
170 return status; 170 return status;
171} 171}
172 172
@@ -301,7 +301,7 @@ static u32 _vfe_var_pmudatainit_super(struct gk20a *g,
301 struct vfe_var *pvfe_var; 301 struct vfe_var *pvfe_var;
302 struct nv_pmu_vfe_var *pset; 302 struct nv_pmu_vfe_var *pset;
303 303
304 gk20a_dbg_info(""); 304 nvgpu_log_info(g, " ");
305 305
306 status = boardobj_pmudatainit_super(g, board_obj_ptr, ppmudata); 306 status = boardobj_pmudatainit_super(g, board_obj_ptr, ppmudata);
307 if (status != 0) 307 if (status != 0)
@@ -329,7 +329,7 @@ static u32 vfe_var_construct_super(struct gk20a *g,
329 struct vfe_var *ptmpvar = (struct vfe_var *)pargs; 329 struct vfe_var *ptmpvar = (struct vfe_var *)pargs;
330 u32 status = 0; 330 u32 status = 0;
331 331
332 gk20a_dbg_info(""); 332 nvgpu_log_info(g, " ");
333 333
334 status = boardobj_construct_super(g, ppboardobj, size, pargs); 334 status = boardobj_construct_super(g, ppboardobj, size, pargs);
335 if (status) 335 if (status)
@@ -345,7 +345,7 @@ static u32 vfe_var_construct_super(struct gk20a *g,
345 pvfevar->b_is_dynamic_valid = false; 345 pvfevar->b_is_dynamic_valid = false;
346 status = boardobjgrpmask_e32_init(&pvfevar->mask_dependent_vars, NULL); 346 status = boardobjgrpmask_e32_init(&pvfevar->mask_dependent_vars, NULL);
347 status = boardobjgrpmask_e255_init(&pvfevar->mask_dependent_equs, NULL); 347 status = boardobjgrpmask_e255_init(&pvfevar->mask_dependent_equs, NULL);
348 gk20a_dbg_info(""); 348 nvgpu_log_info(g, " ");
349 349
350 return status; 350 return status;
351} 351}
@@ -356,7 +356,7 @@ static u32 _vfe_var_pmudatainit_derived(struct gk20a *g,
356{ 356{
357 u32 status = 0; 357 u32 status = 0;
358 358
359 gk20a_dbg_info(""); 359 nvgpu_log_info(g, " ");
360 360
361 status = _vfe_var_pmudatainit_super(g, board_obj_ptr, ppmudata); 361 status = _vfe_var_pmudatainit_super(g, board_obj_ptr, ppmudata);
362 362
@@ -392,7 +392,7 @@ static u32 _vfe_var_pmudatainit_derived_product(struct gk20a *g,
392 struct vfe_var_derived_product *pvfe_var_derived_product; 392 struct vfe_var_derived_product *pvfe_var_derived_product;
393 struct nv_pmu_vfe_var_derived_product *pset; 393 struct nv_pmu_vfe_var_derived_product *pset;
394 394
395 gk20a_dbg_info(""); 395 nvgpu_log_info(g, " ");
396 396
397 status = _vfe_var_pmudatainit_derived(g, board_obj_ptr, ppmudata); 397 status = _vfe_var_pmudatainit_derived(g, board_obj_ptr, ppmudata);
398 if (status != 0) 398 if (status != 0)
@@ -446,7 +446,7 @@ static u32 _vfe_var_pmudatainit_derived_sum(struct gk20a *g,
446 struct vfe_var_derived_sum *pvfe_var_derived_sum; 446 struct vfe_var_derived_sum *pvfe_var_derived_sum;
447 struct nv_pmu_vfe_var_derived_sum *pset; 447 struct nv_pmu_vfe_var_derived_sum *pset;
448 448
449 gk20a_dbg_info(""); 449 nvgpu_log_info(g, " ");
450 450
451 status = _vfe_var_pmudatainit_derived(g, board_obj_ptr, ppmudata); 451 status = _vfe_var_pmudatainit_derived(g, board_obj_ptr, ppmudata);
452 if (status != 0) 452 if (status != 0)
@@ -498,7 +498,7 @@ static u32 _vfe_var_pmudatainit_single(struct gk20a *g,
498 struct vfe_var_single *pvfe_var_single; 498 struct vfe_var_single *pvfe_var_single;
499 struct nv_pmu_vfe_var_single *pset; 499 struct nv_pmu_vfe_var_single *pset;
500 500
501 gk20a_dbg_info(""); 501 nvgpu_log_info(g, " ");
502 502
503 status = _vfe_var_pmudatainit_super(g, board_obj_ptr, ppmudata); 503 status = _vfe_var_pmudatainit_super(g, board_obj_ptr, ppmudata);
504 if (status != 0) 504 if (status != 0)
@@ -520,7 +520,7 @@ static u32 _vfe_var_pmudatainit_single_frequency(struct gk20a *g,
520{ 520{
521 u32 status = 0; 521 u32 status = 0;
522 522
523 gk20a_dbg_info(""); 523 nvgpu_log_info(g, " ");
524 524
525 status = _vfe_var_pmudatainit_single(g, board_obj_ptr, ppmudata); 525 status = _vfe_var_pmudatainit_single(g, board_obj_ptr, ppmudata);
526 526
@@ -535,7 +535,7 @@ static u32 vfe_var_construct_single_frequency(struct gk20a *g,
535 struct vfe_var_single_frequency *pvfevar; 535 struct vfe_var_single_frequency *pvfevar;
536 u32 status = 0; 536 u32 status = 0;
537 537
538 gk20a_dbg_info(""); 538 nvgpu_log_info(g, " ");
539 539
540 if (BOARDOBJ_GET_TYPE(pargs) != CTRL_PERF_VFE_VAR_TYPE_SINGLE_FREQUENCY) 540 if (BOARDOBJ_GET_TYPE(pargs) != CTRL_PERF_VFE_VAR_TYPE_SINGLE_FREQUENCY)
541 return -EINVAL; 541 return -EINVAL;
@@ -553,7 +553,7 @@ static u32 vfe_var_construct_single_frequency(struct gk20a *g,
553 pvfevar->super.super.b_is_dynamic = false; 553 pvfevar->super.super.b_is_dynamic = false;
554 pvfevar->super.super.b_is_dynamic_valid = true; 554 pvfevar->super.super.b_is_dynamic_valid = true;
555 555
556 gk20a_dbg_info("Done"); 556 nvgpu_log_info(g, "Done");
557 return status; 557 return status;
558} 558}
559 559
@@ -563,7 +563,7 @@ static u32 _vfe_var_pmudatainit_single_sensed(struct gk20a *g,
563{ 563{
564 u32 status = 0; 564 u32 status = 0;
565 565
566 gk20a_dbg_info(""); 566 nvgpu_log_info(g, " ");
567 567
568 status = _vfe_var_pmudatainit_single(g, board_obj_ptr, ppmudata); 568 status = _vfe_var_pmudatainit_single(g, board_obj_ptr, ppmudata);
569 569
@@ -578,7 +578,7 @@ static u32 _vfe_var_pmudatainit_single_sensed_fuse(struct gk20a *g,
578 struct vfe_var_single_sensed_fuse *pvfe_var_single_sensed_fuse; 578 struct vfe_var_single_sensed_fuse *pvfe_var_single_sensed_fuse;
579 struct nv_pmu_vfe_var_single_sensed_fuse *pset; 579 struct nv_pmu_vfe_var_single_sensed_fuse *pset;
580 580
581 gk20a_dbg_info(""); 581 nvgpu_log_info(g, " ");
582 582
583 status = _vfe_var_pmudatainit_single_sensed(g, board_obj_ptr, ppmudata); 583 status = _vfe_var_pmudatainit_single_sensed(g, board_obj_ptr, ppmudata);
584 if (status != 0) 584 if (status != 0)
@@ -614,7 +614,7 @@ static u32 vfe_var_construct_single_sensed(struct gk20a *g,
614 614
615 u32 status = 0; 615 u32 status = 0;
616 616
617 gk20a_dbg_info(" "); 617 nvgpu_log_info(g, " ");
618 618
619 ptmpobj->type_mask |= BIT(CTRL_PERF_VFE_VAR_TYPE_SINGLE_SENSED); 619 ptmpobj->type_mask |= BIT(CTRL_PERF_VFE_VAR_TYPE_SINGLE_SENSED);
620 status = vfe_var_construct_single(g, ppboardobj, size, pargs); 620 status = vfe_var_construct_single(g, ppboardobj, size, pargs);
@@ -626,7 +626,7 @@ static u32 vfe_var_construct_single_sensed(struct gk20a *g,
626 pvfevar->super.super.super.pmudatainit = 626 pvfevar->super.super.super.pmudatainit =
627 _vfe_var_pmudatainit_single_sensed; 627 _vfe_var_pmudatainit_single_sensed;
628 628
629 gk20a_dbg_info("Done"); 629 nvgpu_log_info(g, "Done");
630 630
631 return status; 631 return status;
632} 632}
@@ -641,7 +641,7 @@ static u32 vfe_var_construct_single_sensed_fuse(struct gk20a *g,
641 (struct vfe_var_single_sensed_fuse *)pargs; 641 (struct vfe_var_single_sensed_fuse *)pargs;
642 u32 status = 0; 642 u32 status = 0;
643 643
644 gk20a_dbg_info(""); 644 nvgpu_log_info(g, " ");
645 645
646 if (BOARDOBJ_GET_TYPE(pargs) != CTRL_PERF_VFE_VAR_TYPE_SINGLE_SENSED_FUSE) 646 if (BOARDOBJ_GET_TYPE(pargs) != CTRL_PERF_VFE_VAR_TYPE_SINGLE_SENSED_FUSE)
647 return -EINVAL; 647 return -EINVAL;
@@ -704,7 +704,7 @@ static u32 _vfe_var_pmudatainit_single_sensed_temp(struct gk20a *g,
704 struct vfe_var_single_sensed_temp *pvfe_var_single_sensed_temp; 704 struct vfe_var_single_sensed_temp *pvfe_var_single_sensed_temp;
705 struct nv_pmu_vfe_var_single_sensed_temp *pset; 705 struct nv_pmu_vfe_var_single_sensed_temp *pset;
706 706
707 gk20a_dbg_info(""); 707 nvgpu_log_info(g, " ");
708 708
709 status = _vfe_var_pmudatainit_single_sensed(g, board_obj_ptr, ppmudata); 709 status = _vfe_var_pmudatainit_single_sensed(g, board_obj_ptr, ppmudata);
710 if (status != 0) 710 if (status != 0)
@@ -769,7 +769,7 @@ static u32 _vfe_var_pmudatainit_single_voltage(struct gk20a *g,
769{ 769{
770 u32 status = 0; 770 u32 status = 0;
771 771
772 gk20a_dbg_info(""); 772 nvgpu_log_info(g, " ");
773 773
774 status = _vfe_var_pmudatainit_single(g, board_obj_ptr, ppmudata); 774 status = _vfe_var_pmudatainit_single(g, board_obj_ptr, ppmudata);
775 775
@@ -808,7 +808,7 @@ static struct vfe_var *construct_vfe_var(struct gk20a *g, void *pargs)
808 struct boardobj *board_obj_ptr = NULL; 808 struct boardobj *board_obj_ptr = NULL;
809 u32 status; 809 u32 status;
810 810
811 gk20a_dbg_info(""); 811 nvgpu_log_info(g, " ");
812 switch (BOARDOBJ_GET_TYPE(pargs)) { 812 switch (BOARDOBJ_GET_TYPE(pargs)) {
813 case CTRL_PERF_VFE_VAR_TYPE_DERIVED_PRODUCT: 813 case CTRL_PERF_VFE_VAR_TYPE_DERIVED_PRODUCT:
814 status = vfe_var_construct_derived_product(g, &board_obj_ptr, 814 status = vfe_var_construct_derived_product(g, &board_obj_ptr,
@@ -850,7 +850,7 @@ static struct vfe_var *construct_vfe_var(struct gk20a *g, void *pargs)
850 if (status) 850 if (status)
851 return NULL; 851 return NULL;
852 852
853 gk20a_dbg_info("done"); 853 nvgpu_log_info(g, "done");
854 854
855 return (struct vfe_var *)board_obj_ptr; 855 return (struct vfe_var *)board_obj_ptr;
856} 856}
@@ -877,7 +877,7 @@ static u32 devinit_get_vfe_var_table(struct gk20a *g,
877 struct vfe_var_single_sensed_temp single_sensed_temp; 877 struct vfe_var_single_sensed_temp single_sensed_temp;
878 } var_data; 878 } var_data;
879 879
880 gk20a_dbg_info(""); 880 nvgpu_log_info(g, " ");
881 881
882 vfevars_tbl_ptr = (u8 *)nvgpu_bios_get_perf_table_ptrs(g, 882 vfevars_tbl_ptr = (u8 *)nvgpu_bios_get_perf_table_ptrs(g,
883 g->bios.perf_token, 883 g->bios.perf_token,
@@ -1031,7 +1031,7 @@ static u32 devinit_get_vfe_var_table(struct gk20a *g,
1031 } 1031 }
1032 pvfevarobjs->polling_periodms = vfevars_tbl_header.polling_periodms; 1032 pvfevarobjs->polling_periodms = vfevars_tbl_header.polling_periodms;
1033done: 1033done:
1034 gk20a_dbg_info("done status %x", status); 1034 nvgpu_log_info(g, "done status %x", status);
1035 return status; 1035 return status;
1036} 1036}
1037 1037
@@ -1043,7 +1043,7 @@ static u32 vfe_var_construct_single(struct gk20a *g,
1043 struct vfe_var_single *pvfevar; 1043 struct vfe_var_single *pvfevar;
1044 u32 status = 0; 1044 u32 status = 0;
1045 1045
1046 gk20a_dbg_info(""); 1046 nvgpu_log_info(g, " ");
1047 1047
1048 ptmpobj->type_mask |= BIT(CTRL_PERF_VFE_VAR_TYPE_SINGLE); 1048 ptmpobj->type_mask |= BIT(CTRL_PERF_VFE_VAR_TYPE_SINGLE);
1049 status = vfe_var_construct_super(g, ppboardobj, size, pargs); 1049 status = vfe_var_construct_super(g, ppboardobj, size, pargs);
@@ -1058,6 +1058,6 @@ static u32 vfe_var_construct_single(struct gk20a *g,
1058 pvfevar->override_type = CTRL_PERF_VFE_VAR_SINGLE_OVERRIDE_TYPE_NONE; 1058 pvfevar->override_type = CTRL_PERF_VFE_VAR_SINGLE_OVERRIDE_TYPE_NONE;
1059 pvfevar->override_value = 0; 1059 pvfevar->override_value = 0;
1060 1060
1061 gk20a_dbg_info("Done"); 1061 nvgpu_log_info(g, "Done");
1062 return status; 1062 return status;
1063} 1063}
diff --git a/drivers/gpu/nvgpu/pmgr/pwrdev.c b/drivers/gpu/nvgpu/pmgr/pwrdev.c
index 7f4ab716..da51ac4b 100644
--- a/drivers/gpu/nvgpu/pmgr/pwrdev.c
+++ b/drivers/gpu/nvgpu/pmgr/pwrdev.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -36,7 +36,7 @@ static u32 _pwr_device_pmudata_instget(struct gk20a *g,
36 struct nv_pmu_pmgr_pwr_device_desc_table *ppmgrdevice = 36 struct nv_pmu_pmgr_pwr_device_desc_table *ppmgrdevice =
37 (struct nv_pmu_pmgr_pwr_device_desc_table *)pmuboardobjgrp; 37 (struct nv_pmu_pmgr_pwr_device_desc_table *)pmuboardobjgrp;
38 38
39 gk20a_dbg_info(""); 39 nvgpu_log_info(g, " ");
40 40
41 /*check whether pmuboardobjgrp has a valid boardobj in index*/ 41 /*check whether pmuboardobjgrp has a valid boardobj in index*/
42 if (((u32)BIT(idx) & 42 if (((u32)BIT(idx) &
@@ -46,7 +46,7 @@ static u32 _pwr_device_pmudata_instget(struct gk20a *g,
46 *ppboardobjpmudata = (struct nv_pmu_boardobj *) 46 *ppboardobjpmudata = (struct nv_pmu_boardobj *)
47 &ppmgrdevice->devices[idx].data.board_obj; 47 &ppmgrdevice->devices[idx].data.board_obj;
48 48
49 gk20a_dbg_info(" Done"); 49 nvgpu_log_info(g, " Done");
50 50
51 return 0; 51 return 0;
52} 52}
@@ -122,7 +122,7 @@ static struct boardobj *construct_pwr_device(struct gk20a *g,
122 pwrdev->r_shuntm_ohm[indx] = ina3221->r_shuntm_ohm[indx]; 122 pwrdev->r_shuntm_ohm[indx] = ina3221->r_shuntm_ohm[indx];
123 } 123 }
124 124
125 gk20a_dbg_info(" Done"); 125 nvgpu_log_info(g, " Done");
126 126
127 return board_obj_ptr; 127 return board_obj_ptr;
128} 128}
@@ -145,7 +145,7 @@ static u32 devinit_get_pwr_device_table(struct gk20a *g,
145 struct pwr_device_ina3221 ina3221; 145 struct pwr_device_ina3221 ina3221;
146 } pwr_device_data; 146 } pwr_device_data;
147 147
148 gk20a_dbg_info(""); 148 nvgpu_log_info(g, " ");
149 149
150 pwr_device_table_ptr = (u8 *)nvgpu_bios_get_perf_table_ptrs(g, 150 pwr_device_table_ptr = (u8 *)nvgpu_bios_get_perf_table_ptrs(g,
151 g->bios.perf_token, POWER_SENSORS_TABLE); 151 g->bios.perf_token, POWER_SENSORS_TABLE);
@@ -280,7 +280,7 @@ static u32 devinit_get_pwr_device_table(struct gk20a *g,
280 } 280 }
281 281
282done: 282done:
283 gk20a_dbg_info(" done status %x", status); 283 nvgpu_log_info(g, " done status %x", status);
284 return status; 284 return status;
285} 285}
286 286
@@ -310,6 +310,6 @@ u32 pmgr_device_sw_setup(struct gk20a *g)
310 goto done; 310 goto done;
311 311
312done: 312done:
313 gk20a_dbg_info(" done status %x", status); 313 nvgpu_log_info(g, " done status %x", status);
314 return status; 314 return status;
315} 315}
diff --git a/drivers/gpu/nvgpu/pmgr/pwrmonitor.c b/drivers/gpu/nvgpu/pmgr/pwrmonitor.c
index 00c930a6..5e0cc966 100644
--- a/drivers/gpu/nvgpu/pmgr/pwrmonitor.c
+++ b/drivers/gpu/nvgpu/pmgr/pwrmonitor.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -36,7 +36,7 @@ static u32 _pwr_channel_pmudata_instget(struct gk20a *g,
36 struct nv_pmu_pmgr_pwr_channel_desc *ppmgrchannel = 36 struct nv_pmu_pmgr_pwr_channel_desc *ppmgrchannel =
37 (struct nv_pmu_pmgr_pwr_channel_desc *)pmuboardobjgrp; 37 (struct nv_pmu_pmgr_pwr_channel_desc *)pmuboardobjgrp;
38 38
39 gk20a_dbg_info(""); 39 nvgpu_log_info(g, " ");
40 40
41 /*check whether pmuboardobjgrp has a valid boardobj in index*/ 41 /*check whether pmuboardobjgrp has a valid boardobj in index*/
42 if (((u32)BIT(idx) & 42 if (((u32)BIT(idx) &
@@ -49,7 +49,7 @@ static u32 _pwr_channel_pmudata_instget(struct gk20a *g,
49 /* handle Global/common data here as we need index */ 49 /* handle Global/common data here as we need index */
50 ppmgrchannel->channels[idx].data.pwr_channel.ch_idx = idx; 50 ppmgrchannel->channels[idx].data.pwr_channel.ch_idx = idx;
51 51
52 gk20a_dbg_info(" Done"); 52 nvgpu_log_info(g, " Done");
53 53
54 return 0; 54 return 0;
55} 55}
@@ -62,7 +62,7 @@ static u32 _pwr_channel_rels_pmudata_instget(struct gk20a *g,
62 struct nv_pmu_pmgr_pwr_chrelationship_desc *ppmgrchrels = 62 struct nv_pmu_pmgr_pwr_chrelationship_desc *ppmgrchrels =
63 (struct nv_pmu_pmgr_pwr_chrelationship_desc *)pmuboardobjgrp; 63 (struct nv_pmu_pmgr_pwr_chrelationship_desc *)pmuboardobjgrp;
64 64
65 gk20a_dbg_info(""); 65 nvgpu_log_info(g, " ");
66 66
67 /*check whether pmuboardobjgrp has a valid boardobj in index*/ 67 /*check whether pmuboardobjgrp has a valid boardobj in index*/
68 if (((u32)BIT(idx) & 68 if (((u32)BIT(idx) &
@@ -72,7 +72,7 @@ static u32 _pwr_channel_rels_pmudata_instget(struct gk20a *g,
72 *ppboardobjpmudata = (struct nv_pmu_boardobj *) 72 *ppboardobjpmudata = (struct nv_pmu_boardobj *)
73 &ppmgrchrels->ch_rels[idx].data.board_obj; 73 &ppmgrchrels->ch_rels[idx].data.board_obj;
74 74
75 gk20a_dbg_info(" Done"); 75 nvgpu_log_info(g, " Done");
76 76
77 return 0; 77 return 0;
78} 78}
@@ -169,7 +169,7 @@ static struct boardobj *construct_pwr_topology(struct gk20a *g,
169 pwrchannel->pwr_dev_idx = sensor->pwr_dev_idx; 169 pwrchannel->pwr_dev_idx = sensor->pwr_dev_idx;
170 pwrchannel->pwr_dev_prov_idx = sensor->pwr_dev_prov_idx; 170 pwrchannel->pwr_dev_prov_idx = sensor->pwr_dev_prov_idx;
171 171
172 gk20a_dbg_info(" Done"); 172 nvgpu_log_info(g, " Done");
173 173
174 return board_obj_ptr; 174 return board_obj_ptr;
175} 175}
@@ -192,7 +192,7 @@ static u32 devinit_get_pwr_topology_table(struct gk20a *g,
192 struct pwr_channel_sensor sensor; 192 struct pwr_channel_sensor sensor;
193 } pwr_topology_data; 193 } pwr_topology_data;
194 194
195 gk20a_dbg_info(""); 195 nvgpu_log_info(g, " ");
196 196
197 pwr_topology_table_ptr = (u8 *)nvgpu_bios_get_perf_table_ptrs(g, 197 pwr_topology_table_ptr = (u8 *)nvgpu_bios_get_perf_table_ptrs(g,
198 g->bios.perf_token, POWER_TOPOLOGY_TABLE); 198 g->bios.perf_token, POWER_TOPOLOGY_TABLE);
@@ -292,7 +292,7 @@ static u32 devinit_get_pwr_topology_table(struct gk20a *g,
292 } 292 }
293 293
294done: 294done:
295 gk20a_dbg_info(" done status %x", status); 295 nvgpu_log_info(g, " done status %x", status);
296 return status; 296 return status;
297} 297}
298 298
@@ -365,6 +365,6 @@ u32 pmgr_monitor_sw_setup(struct gk20a *g)
365 } 365 }
366 366
367done: 367done:
368 gk20a_dbg_info(" done status %x", status); 368 nvgpu_log_info(g, " done status %x", status);
369 return status; 369 return status;
370} 370}
diff --git a/drivers/gpu/nvgpu/pmgr/pwrpolicy.c b/drivers/gpu/nvgpu/pmgr/pwrpolicy.c
index 2942268f..0d617f6a 100644
--- a/drivers/gpu/nvgpu/pmgr/pwrpolicy.c
+++ b/drivers/gpu/nvgpu/pmgr/pwrpolicy.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -270,7 +270,7 @@ static struct boardobj *construct_pwr_policy(struct gk20a *g,
270 pwrpolicyhwthreshold = (struct pwr_policy_hw_threshold*)board_obj_ptr; 270 pwrpolicyhwthreshold = (struct pwr_policy_hw_threshold*)board_obj_ptr;
271 pwrpolicy = (struct pwr_policy *)board_obj_ptr; 271 pwrpolicy = (struct pwr_policy *)board_obj_ptr;
272 272
273 gk20a_dbg_fn("min=%u rated=%u max=%u", 273 nvgpu_log_fn(g, "min=%u rated=%u max=%u",
274 pwrpolicyparams->limit_min, 274 pwrpolicyparams->limit_min,
275 pwrpolicyparams->limit_rated, 275 pwrpolicyparams->limit_rated,
276 pwrpolicyparams->limit_max); 276 pwrpolicyparams->limit_max);
@@ -358,7 +358,7 @@ static struct boardobj *construct_pwr_policy(struct gk20a *g,
358 pwrpolicyswthreshold->event_id = swthreshold->event_id; 358 pwrpolicyswthreshold->event_id = swthreshold->event_id;
359 } 359 }
360 360
361 gk20a_dbg_info(" Done"); 361 nvgpu_log_info(g, " Done");
362 362
363 return board_obj_ptr; 363 return board_obj_ptr;
364} 364}
@@ -527,7 +527,7 @@ static u32 devinit_get_pwr_policy_table(struct gk20a *g,
527 u32 hw_threshold_policy_index = 0; 527 u32 hw_threshold_policy_index = 0;
528 union pwr_policy_data_union pwr_policy_data; 528 union pwr_policy_data_union pwr_policy_data;
529 529
530 gk20a_dbg_info(""); 530 nvgpu_log_info(g, " ");
531 531
532 ptr = (u8 *)nvgpu_bios_get_perf_table_ptrs(g, 532 ptr = (u8 *)nvgpu_bios_get_perf_table_ptrs(g,
533 g->bios.perf_token, POWER_CAPPING_TABLE); 533 g->bios.perf_token, POWER_CAPPING_TABLE);
@@ -702,7 +702,7 @@ static u32 devinit_get_pwr_policy_table(struct gk20a *g,
702 } 702 }
703 703
704done: 704done:
705 gk20a_dbg_info(" done status %x", status); 705 nvgpu_log_info(g, " done status %x", status);
706 return status; 706 return status;
707} 707}
708 708
@@ -773,6 +773,6 @@ u32 pmgr_policy_sw_setup(struct gk20a *g)
773 g->pmgr_pmu.pmgr_policyobjs.client_work_item.b_pending = false; 773 g->pmgr_pmu.pmgr_policyobjs.client_work_item.b_pending = false;
774 774
775done: 775done:
776 gk20a_dbg_info(" done status %x", status); 776 nvgpu_log_info(g, " done status %x", status);
777 return status; 777 return status;
778} 778}
diff --git a/drivers/gpu/nvgpu/pstate/pstate.c b/drivers/gpu/nvgpu/pstate/pstate.c
index c3f34027..e61ec0f8 100644
--- a/drivers/gpu/nvgpu/pstate/pstate.c
+++ b/drivers/gpu/nvgpu/pstate/pstate.c
@@ -46,7 +46,7 @@ int gk20a_init_pstate_support(struct gk20a *g)
46{ 46{
47 u32 err; 47 u32 err;
48 48
49 gk20a_dbg_fn(""); 49 nvgpu_log_fn(g, " ");
50 50
51 err = volt_rail_sw_setup(g); 51 err = volt_rail_sw_setup(g);
52 if (err) 52 if (err)
@@ -114,7 +114,7 @@ int gk20a_init_pstate_pmu_support(struct gk20a *g)
114{ 114{
115 u32 err; 115 u32 err;
116 116
117 gk20a_dbg_fn(""); 117 nvgpu_log_fn(g, " ");
118 118
119 if (g->ops.clk.mclk_init) { 119 if (g->ops.clk.mclk_init) {
120 err = g->ops.clk.mclk_init(g); 120 err = g->ops.clk.mclk_init(g);
@@ -269,7 +269,7 @@ static int parse_pstate_entry_5x(struct gk20a *g,
269 pstate->clklist.num_info = hdr->clock_entry_count; 269 pstate->clklist.num_info = hdr->clock_entry_count;
270 pstate->lpwr_entry_idx = entry->lpwr_entry_idx; 270 pstate->lpwr_entry_idx = entry->lpwr_entry_idx;
271 271
272 gk20a_dbg_info("pstate P%u", pstate->num); 272 nvgpu_log_info(g, "pstate P%u", pstate->num);
273 273
274 for (clkidx = 0; clkidx < hdr->clock_entry_count; clkidx++) { 274 for (clkidx = 0; clkidx < hdr->clock_entry_count; clkidx++) {
275 struct clk_set_info *pclksetinfo; 275 struct clk_set_info *pclksetinfo;
@@ -293,7 +293,7 @@ static int parse_pstate_entry_5x(struct gk20a *g,
293 BIOS_GET_FIELD(clk_entry->param1, 293 BIOS_GET_FIELD(clk_entry->param1,
294 VBIOS_PSTATE_5X_CLOCK_PROG_PARAM1_MAX_FREQ_MHZ); 294 VBIOS_PSTATE_5X_CLOCK_PROG_PARAM1_MAX_FREQ_MHZ);
295 295
296 gk20a_dbg_info( 296 nvgpu_log_info(g,
297 "clk_domain=%u nominal_mhz=%u min_mhz=%u max_mhz=%u", 297 "clk_domain=%u nominal_mhz=%u min_mhz=%u max_mhz=%u",
298 pclksetinfo->clkwhich, pclksetinfo->nominal_mhz, 298 pclksetinfo->clkwhich, pclksetinfo->nominal_mhz,
299 pclksetinfo->min_mhz, pclksetinfo->max_mhz); 299 pclksetinfo->min_mhz, pclksetinfo->max_mhz);
@@ -355,7 +355,7 @@ static int pstate_sw_setup(struct gk20a *g)
355 struct vbios_pstate_header_5x *hdr = NULL; 355 struct vbios_pstate_header_5x *hdr = NULL;
356 int err = 0; 356 int err = 0;
357 357
358 gk20a_dbg_fn(""); 358 nvgpu_log_fn(g, " ");
359 359
360 nvgpu_cond_init(&g->perf_pmu.pstatesobjs.pstate_notifier_wq); 360 nvgpu_cond_init(&g->perf_pmu.pstatesobjs.pstate_notifier_wq);
361 361
@@ -401,11 +401,11 @@ struct pstate *pstate_find(struct gk20a *g, u32 num)
401 struct pstate *pstate; 401 struct pstate *pstate;
402 u8 i; 402 u8 i;
403 403
404 gk20a_dbg_info("pstates = %p", pstates); 404 nvgpu_log_info(g, "pstates = %p", pstates);
405 405
406 BOARDOBJGRP_FOR_EACH(&pstates->super.super, 406 BOARDOBJGRP_FOR_EACH(&pstates->super.super,
407 struct pstate *, pstate, i) { 407 struct pstate *, pstate, i) {
408 gk20a_dbg_info("pstate=%p num=%u (looking for num=%u)", 408 nvgpu_log_info(g, "pstate=%p num=%u (looking for num=%u)",
409 pstate, pstate->num, num); 409 pstate, pstate->num, num);
410 if (pstate->num == num) 410 if (pstate->num == num)
411 return pstate; 411 return pstate;
@@ -420,7 +420,7 @@ struct clk_set_info *pstate_get_clk_set_info(struct gk20a *g,
420 struct clk_set_info *info; 420 struct clk_set_info *info;
421 u32 clkidx; 421 u32 clkidx;
422 422
423 gk20a_dbg_info("pstate = %p", pstate); 423 nvgpu_log_info(g, "pstate = %p", pstate);
424 424
425 if (!pstate) 425 if (!pstate)
426 return NULL; 426 return NULL;
diff --git a/drivers/gpu/nvgpu/therm/thrmchannel.c b/drivers/gpu/nvgpu/therm/thrmchannel.c
index f8be8430..de3de62a 100644
--- a/drivers/gpu/nvgpu/therm/thrmchannel.c
+++ b/drivers/gpu/nvgpu/therm/thrmchannel.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -90,7 +90,7 @@ static struct boardobj *construct_channel_device(struct gk20a *g,
90 pchannel_device->therm_dev_idx = therm_device->therm_dev_idx; 90 pchannel_device->therm_dev_idx = therm_device->therm_dev_idx;
91 pchannel_device->therm_dev_prov_idx = therm_device->therm_dev_prov_idx; 91 pchannel_device->therm_dev_prov_idx = therm_device->therm_dev_prov_idx;
92 92
93 gk20a_dbg_info(" Done"); 93 nvgpu_log_info(g, " Done");
94 94
95 return board_obj_ptr; 95 return board_obj_ptr;
96} 96}
@@ -104,7 +104,7 @@ static u32 _therm_channel_pmudata_instget(struct gk20a *g,
104 (struct nv_pmu_therm_therm_channel_boardobj_grp_set *) 104 (struct nv_pmu_therm_therm_channel_boardobj_grp_set *)
105 pmuboardobjgrp; 105 pmuboardobjgrp;
106 106
107 gk20a_dbg_info(""); 107 nvgpu_log_info(g, " ");
108 108
109 /*check whether pmuboardobjgrp has a valid boardobj in index*/ 109 /*check whether pmuboardobjgrp has a valid boardobj in index*/
110 if (((u32)BIT(idx) & 110 if (((u32)BIT(idx) &
@@ -114,7 +114,7 @@ static u32 _therm_channel_pmudata_instget(struct gk20a *g,
114 *ppboardobjpmudata = (struct nv_pmu_boardobj *) 114 *ppboardobjpmudata = (struct nv_pmu_boardobj *)
115 &pgrp_set->objects[idx].data.board_obj; 115 &pgrp_set->objects[idx].data.board_obj;
116 116
117 gk20a_dbg_info(" Done"); 117 nvgpu_log_info(g, " Done");
118 118
119 return 0; 119 return 0;
120} 120}
@@ -137,7 +137,7 @@ static u32 devinit_get_therm_channel_table(struct gk20a *g,
137 struct therm_channel_device device; 137 struct therm_channel_device device;
138 } therm_channel_data; 138 } therm_channel_data;
139 139
140 gk20a_dbg_info(""); 140 nvgpu_log_info(g, " ");
141 141
142 therm_channel_table_ptr = (u8 *)nvgpu_bios_get_perf_table_ptrs(g, 142 therm_channel_table_ptr = (u8 *)nvgpu_bios_get_perf_table_ptrs(g,
143 g->bios.perf_token, THERMAL_CHANNEL_TABLE); 143 g->bios.perf_token, THERMAL_CHANNEL_TABLE);
@@ -206,7 +206,7 @@ static u32 devinit_get_therm_channel_table(struct gk20a *g,
206 } 206 }
207 207
208done: 208done:
209 gk20a_dbg_info(" done status %x", status); 209 nvgpu_log_info(g, " done status %x", status);
210 return status; 210 return status;
211} 211}
212 212
@@ -248,6 +248,6 @@ u32 therm_channel_sw_setup(struct gk20a *g)
248 } 248 }
249 249
250done: 250done:
251 gk20a_dbg_info(" done status %x", status); 251 nvgpu_log_info(g, " done status %x", status);
252 return status; 252 return status;
253} 253}
diff --git a/drivers/gpu/nvgpu/therm/thrmdev.c b/drivers/gpu/nvgpu/therm/thrmdev.c
index 1aff119c..7371ba25 100644
--- a/drivers/gpu/nvgpu/therm/thrmdev.c
+++ b/drivers/gpu/nvgpu/therm/thrmdev.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -41,7 +41,7 @@ static struct boardobj *construct_therm_device(struct gk20a *g,
41 if (status) 41 if (status)
42 return NULL; 42 return NULL;
43 43
44 gk20a_dbg_info(" Done"); 44 nvgpu_log_info(g, " Done");
45 45
46 return board_obj_ptr; 46 return board_obj_ptr;
47} 47}
@@ -55,7 +55,7 @@ static u32 _therm_device_pmudata_instget(struct gk20a *g,
55 (struct nv_pmu_therm_therm_device_boardobj_grp_set *) 55 (struct nv_pmu_therm_therm_device_boardobj_grp_set *)
56 pmuboardobjgrp; 56 pmuboardobjgrp;
57 57
58 gk20a_dbg_info(""); 58 nvgpu_log_info(g, " ");
59 59
60 /*check whether pmuboardobjgrp has a valid boardobj in index*/ 60 /*check whether pmuboardobjgrp has a valid boardobj in index*/
61 if (((u32)BIT(idx) & 61 if (((u32)BIT(idx) &
@@ -65,7 +65,7 @@ static u32 _therm_device_pmudata_instget(struct gk20a *g,
65 *ppboardobjpmudata = (struct nv_pmu_boardobj *) 65 *ppboardobjpmudata = (struct nv_pmu_boardobj *)
66 &pgrp_set->objects[idx].data; 66 &pgrp_set->objects[idx].data;
67 67
68 gk20a_dbg_info(" Done"); 68 nvgpu_log_info(g, " Done");
69 69
70 return 0; 70 return 0;
71} 71}
@@ -87,7 +87,7 @@ static u32 devinit_get_therm_device_table(struct gk20a *g,
87 struct therm_device therm_device; 87 struct therm_device therm_device;
88 } therm_device_data; 88 } therm_device_data;
89 89
90 gk20a_dbg_info(""); 90 nvgpu_log_info(g, " ");
91 91
92 therm_device_table_ptr = (u8 *)nvgpu_bios_get_perf_table_ptrs(g, 92 therm_device_table_ptr = (u8 *)nvgpu_bios_get_perf_table_ptrs(g,
93 g->bios.perf_token, THERMAL_DEVICE_TABLE); 93 g->bios.perf_token, THERMAL_DEVICE_TABLE);
@@ -153,7 +153,7 @@ static u32 devinit_get_therm_device_table(struct gk20a *g,
153 } 153 }
154 154
155done: 155done:
156 gk20a_dbg_info(" done status %x", status); 156 nvgpu_log_info(g, " done status %x", status);
157 return status; 157 return status;
158} 158}
159 159
@@ -195,6 +195,6 @@ u32 therm_device_sw_setup(struct gk20a *g)
195 } 195 }
196 196
197done: 197done:
198 gk20a_dbg_info(" done status %x", status); 198 nvgpu_log_info(g, " done status %x", status);
199 return status; 199 return status;
200} 200}
diff --git a/drivers/gpu/nvgpu/vgpu/ce2_vgpu.c b/drivers/gpu/nvgpu/vgpu/ce2_vgpu.c
index a552ad44..563c3a2b 100644
--- a/drivers/gpu/nvgpu/vgpu/ce2_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/ce2_vgpu.c
@@ -30,7 +30,7 @@
30int vgpu_ce2_nonstall_isr(struct gk20a *g, 30int vgpu_ce2_nonstall_isr(struct gk20a *g,
31 struct tegra_vgpu_ce2_nonstall_intr_info *info) 31 struct tegra_vgpu_ce2_nonstall_intr_info *info)
32{ 32{
33 gk20a_dbg_fn(""); 33 nvgpu_log_fn(g, " ");
34 34
35 switch (info->type) { 35 switch (info->type) {
36 case TEGRA_VGPU_CE2_NONSTALL_INTR_NONBLOCKPIPE: 36 case TEGRA_VGPU_CE2_NONSTALL_INTR_NONBLOCKPIPE:
diff --git a/drivers/gpu/nvgpu/vgpu/dbg_vgpu.c b/drivers/gpu/nvgpu/vgpu/dbg_vgpu.c
index 092954ed..2bb3b205 100644
--- a/drivers/gpu/nvgpu/vgpu/dbg_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/dbg_vgpu.c
@@ -42,8 +42,9 @@ int vgpu_exec_regops(struct dbg_session_gk20a *dbg_s,
42 size_t oob_size, ops_size; 42 size_t oob_size, ops_size;
43 void *handle = NULL; 43 void *handle = NULL;
44 int err = 0; 44 int err = 0;
45 struct gk20a *g = dbg_s->g;
45 46
46 gk20a_dbg_fn(""); 47 nvgpu_log_fn(g, " ");
47 BUG_ON(sizeof(*ops) != sizeof(struct tegra_vgpu_reg_op)); 48 BUG_ON(sizeof(*ops) != sizeof(struct tegra_vgpu_reg_op));
48 49
49 handle = vgpu_ivc_oob_get_ptr(vgpu_ivc_get_server_vmid(), 50 handle = vgpu_ivc_oob_get_ptr(vgpu_ivc_get_server_vmid(),
@@ -82,8 +83,9 @@ int vgpu_dbg_set_powergate(struct dbg_session_gk20a *dbg_s, bool disable_powerga
82 struct tegra_vgpu_set_powergate_params *p = &msg.params.set_powergate; 83 struct tegra_vgpu_set_powergate_params *p = &msg.params.set_powergate;
83 int err = 0; 84 int err = 0;
84 u32 mode; 85 u32 mode;
86 struct gk20a *g = dbg_s->g;
85 87
86 gk20a_dbg_fn(""); 88 nvgpu_log_fn(g, " ");
87 89
88 /* Just return if requested mode is the same as the session's mode */ 90 /* Just return if requested mode is the same as the session's mode */
89 if (disable_powergate) { 91 if (disable_powergate) {
diff --git a/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c b/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
index 3ea326b8..eb25cf3a 100644
--- a/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
@@ -45,8 +45,9 @@ void vgpu_channel_bind(struct channel_gk20a *ch)
45 struct tegra_vgpu_channel_config_params *p = 45 struct tegra_vgpu_channel_config_params *p =
46 &msg.params.channel_config; 46 &msg.params.channel_config;
47 int err; 47 int err;
48 struct gk20a *g = ch->g;
48 49
49 gk20a_dbg_info("bind channel %d", ch->chid); 50 nvgpu_log_info(g, "bind channel %d", ch->chid);
50 51
51 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_BIND; 52 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_BIND;
52 msg.handle = vgpu_get_handle(ch->g); 53 msg.handle = vgpu_get_handle(ch->g);
@@ -60,8 +61,9 @@ void vgpu_channel_bind(struct channel_gk20a *ch)
60 61
61void vgpu_channel_unbind(struct channel_gk20a *ch) 62void vgpu_channel_unbind(struct channel_gk20a *ch)
62{ 63{
64 struct gk20a *g = ch->g;
63 65
64 gk20a_dbg_fn(""); 66 nvgpu_log_fn(g, " ");
65 67
66 if (nvgpu_atomic_cmpxchg(&ch->bound, true, false)) { 68 if (nvgpu_atomic_cmpxchg(&ch->bound, true, false)) {
67 struct tegra_vgpu_cmd_msg msg; 69 struct tegra_vgpu_cmd_msg msg;
@@ -84,7 +86,7 @@ int vgpu_channel_alloc_inst(struct gk20a *g, struct channel_gk20a *ch)
84 struct tegra_vgpu_channel_hwctx_params *p = &msg.params.channel_hwctx; 86 struct tegra_vgpu_channel_hwctx_params *p = &msg.params.channel_hwctx;
85 int err; 87 int err;
86 88
87 gk20a_dbg_fn(""); 89 nvgpu_log_fn(g, " ");
88 90
89 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ALLOC_HWCTX; 91 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ALLOC_HWCTX;
90 msg.handle = vgpu_get_handle(g); 92 msg.handle = vgpu_get_handle(g);
@@ -97,7 +99,7 @@ int vgpu_channel_alloc_inst(struct gk20a *g, struct channel_gk20a *ch)
97 } 99 }
98 100
99 ch->virt_ctx = p->handle; 101 ch->virt_ctx = p->handle;
100 gk20a_dbg_fn("done"); 102 nvgpu_log_fn(g, "done");
101 return 0; 103 return 0;
102} 104}
103 105
@@ -107,7 +109,7 @@ void vgpu_channel_free_inst(struct gk20a *g, struct channel_gk20a *ch)
107 struct tegra_vgpu_channel_hwctx_params *p = &msg.params.channel_hwctx; 109 struct tegra_vgpu_channel_hwctx_params *p = &msg.params.channel_hwctx;
108 int err; 110 int err;
109 111
110 gk20a_dbg_fn(""); 112 nvgpu_log_fn(g, " ");
111 113
112 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_FREE_HWCTX; 114 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_FREE_HWCTX;
113 msg.handle = vgpu_get_handle(g); 115 msg.handle = vgpu_get_handle(g);
@@ -122,8 +124,9 @@ void vgpu_channel_enable(struct channel_gk20a *ch)
122 struct tegra_vgpu_channel_config_params *p = 124 struct tegra_vgpu_channel_config_params *p =
123 &msg.params.channel_config; 125 &msg.params.channel_config;
124 int err; 126 int err;
127 struct gk20a *g = ch->g;
125 128
126 gk20a_dbg_fn(""); 129 nvgpu_log_fn(g, " ");
127 130
128 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ENABLE; 131 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ENABLE;
129 msg.handle = vgpu_get_handle(ch->g); 132 msg.handle = vgpu_get_handle(ch->g);
@@ -138,8 +141,9 @@ void vgpu_channel_disable(struct channel_gk20a *ch)
138 struct tegra_vgpu_channel_config_params *p = 141 struct tegra_vgpu_channel_config_params *p =
139 &msg.params.channel_config; 142 &msg.params.channel_config;
140 int err; 143 int err;
144 struct gk20a *g = ch->g;
141 145
142 gk20a_dbg_fn(""); 146 nvgpu_log_fn(g, " ");
143 147
144 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_DISABLE; 148 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_DISABLE;
145 msg.handle = vgpu_get_handle(ch->g); 149 msg.handle = vgpu_get_handle(ch->g);
@@ -155,8 +159,9 @@ int vgpu_channel_setup_ramfc(struct channel_gk20a *ch, u64 gpfifo_base,
155 struct tegra_vgpu_cmd_msg msg; 159 struct tegra_vgpu_cmd_msg msg;
156 struct tegra_vgpu_ramfc_params *p = &msg.params.ramfc; 160 struct tegra_vgpu_ramfc_params *p = &msg.params.ramfc;
157 int err; 161 int err;
162 struct gk20a *g = ch->g;
158 163
159 gk20a_dbg_fn(""); 164 nvgpu_log_fn(g, " ");
160 165
161 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SETUP_RAMFC; 166 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SETUP_RAMFC;
162 msg.handle = vgpu_get_handle(ch->g); 167 msg.handle = vgpu_get_handle(ch->g);
@@ -175,8 +180,9 @@ int vgpu_fifo_init_engine_info(struct fifo_gk20a *f)
175 struct vgpu_priv_data *priv = vgpu_get_priv_data(f->g); 180 struct vgpu_priv_data *priv = vgpu_get_priv_data(f->g);
176 struct tegra_vgpu_engines_info *engines = &priv->constants.engines_info; 181 struct tegra_vgpu_engines_info *engines = &priv->constants.engines_info;
177 u32 i; 182 u32 i;
183 struct gk20a *g = f->g;
178 184
179 gk20a_dbg_fn(""); 185 nvgpu_log_fn(g, " ");
180 186
181 if (engines->num_engines > TEGRA_VGPU_MAX_ENGINES) { 187 if (engines->num_engines > TEGRA_VGPU_MAX_ENGINES) {
182 nvgpu_err(f->g, "num_engines %d larger than max %d", 188 nvgpu_err(f->g, "num_engines %d larger than max %d",
@@ -207,7 +213,7 @@ int vgpu_fifo_init_engine_info(struct fifo_gk20a *f)
207 f->active_engines_list[i] = engines->info[i].engine_id; 213 f->active_engines_list[i] = engines->info[i].engine_id;
208 } 214 }
209 215
210 gk20a_dbg_fn("done"); 216 nvgpu_log_fn(g, "done");
211 217
212 return 0; 218 return 0;
213} 219}
@@ -219,7 +225,7 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f)
219 u32 i; 225 u32 i;
220 u64 runlist_size; 226 u64 runlist_size;
221 227
222 gk20a_dbg_fn(""); 228 nvgpu_log_fn(g, " ");
223 229
224 f->max_runlists = g->ops.fifo.eng_runlist_base_size(); 230 f->max_runlists = g->ops.fifo.eng_runlist_base_size();
225 f->runlist_info = nvgpu_kzalloc(g, 231 f->runlist_info = nvgpu_kzalloc(g,
@@ -256,12 +262,12 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f)
256 runlist->cur_buffer = MAX_RUNLIST_BUFFERS; 262 runlist->cur_buffer = MAX_RUNLIST_BUFFERS;
257 } 263 }
258 264
259 gk20a_dbg_fn("done"); 265 nvgpu_log_fn(g, "done");
260 return 0; 266 return 0;
261 267
262clean_up_runlist: 268clean_up_runlist:
263 gk20a_fifo_delete_runlist(f); 269 gk20a_fifo_delete_runlist(f);
264 gk20a_dbg_fn("fail"); 270 nvgpu_log_fn(g, "fail");
265 return -ENOMEM; 271 return -ENOMEM;
266} 272}
267 273
@@ -272,10 +278,10 @@ static int vgpu_init_fifo_setup_sw(struct gk20a *g)
272 unsigned int chid; 278 unsigned int chid;
273 int err = 0; 279 int err = 0;
274 280
275 gk20a_dbg_fn(""); 281 nvgpu_log_fn(g, " ");
276 282
277 if (f->sw_ready) { 283 if (f->sw_ready) {
278 gk20a_dbg_fn("skip init"); 284 nvgpu_log_fn(g, "skip init");
279 return 0; 285 return 0;
280 } 286 }
281 287
@@ -306,7 +312,7 @@ static int vgpu_init_fifo_setup_sw(struct gk20a *g)
306 f->userd.gpu_va = 0; 312 f->userd.gpu_va = 0;
307 } 313 }
308 314
309 gk20a_dbg(gpu_dbg_map_v, "userd bar1 va = 0x%llx", f->userd.gpu_va); 315 nvgpu_log(g, gpu_dbg_map_v, "userd bar1 va = 0x%llx", f->userd.gpu_va);
310 316
311 f->channel = nvgpu_vzalloc(g, f->num_channels * sizeof(*f->channel)); 317 f->channel = nvgpu_vzalloc(g, f->num_channels * sizeof(*f->channel));
312 f->tsg = nvgpu_vzalloc(g, f->num_channels * sizeof(*f->tsg)); 318 f->tsg = nvgpu_vzalloc(g, f->num_channels * sizeof(*f->tsg));
@@ -350,11 +356,11 @@ static int vgpu_init_fifo_setup_sw(struct gk20a *g)
350 356
351 f->sw_ready = true; 357 f->sw_ready = true;
352 358
353 gk20a_dbg_fn("done"); 359 nvgpu_log_fn(g, "done");
354 return 0; 360 return 0;
355 361
356clean_up: 362clean_up:
357 gk20a_dbg_fn("fail"); 363 nvgpu_log_fn(g, "fail");
358 /* FIXME: unmap from bar1 */ 364 /* FIXME: unmap from bar1 */
359 nvgpu_dma_free(g, &f->userd); 365 nvgpu_dma_free(g, &f->userd);
360 366
@@ -374,7 +380,7 @@ clean_up:
374 380
375int vgpu_init_fifo_setup_hw(struct gk20a *g) 381int vgpu_init_fifo_setup_hw(struct gk20a *g)
376{ 382{
377 gk20a_dbg_fn(""); 383 nvgpu_log_fn(g, " ");
378 384
379 /* test write, read through bar1 @ userd region before 385 /* test write, read through bar1 @ userd region before
380 * turning on the snooping */ 386 * turning on the snooping */
@@ -385,7 +391,7 @@ int vgpu_init_fifo_setup_hw(struct gk20a *g)
385 u32 bar1_vaddr = f->userd.gpu_va; 391 u32 bar1_vaddr = f->userd.gpu_va;
386 volatile u32 *cpu_vaddr = f->userd.cpu_va; 392 volatile u32 *cpu_vaddr = f->userd.cpu_va;
387 393
388 gk20a_dbg_info("test bar1 @ vaddr 0x%x", 394 nvgpu_log_info(g, "test bar1 @ vaddr 0x%x",
389 bar1_vaddr); 395 bar1_vaddr);
390 396
391 v = gk20a_bar1_readl(g, bar1_vaddr); 397 v = gk20a_bar1_readl(g, bar1_vaddr);
@@ -415,7 +421,7 @@ int vgpu_init_fifo_setup_hw(struct gk20a *g)
415 gk20a_bar1_writel(g, bar1_vaddr, v); 421 gk20a_bar1_writel(g, bar1_vaddr, v);
416 } 422 }
417 423
418 gk20a_dbg_fn("done"); 424 nvgpu_log_fn(g, "done");
419 425
420 return 0; 426 return 0;
421} 427}
@@ -424,7 +430,7 @@ int vgpu_init_fifo_support(struct gk20a *g)
424{ 430{
425 u32 err; 431 u32 err;
426 432
427 gk20a_dbg_fn(""); 433 nvgpu_log_fn(g, " ");
428 434
429 err = vgpu_init_fifo_setup_sw(g); 435 err = vgpu_init_fifo_setup_sw(g);
430 if (err) 436 if (err)
@@ -444,7 +450,7 @@ int vgpu_fifo_preempt_channel(struct gk20a *g, u32 chid)
444 &msg.params.channel_config; 450 &msg.params.channel_config;
445 int err; 451 int err;
446 452
447 gk20a_dbg_fn(""); 453 nvgpu_log_fn(g, " ");
448 454
449 if (!nvgpu_atomic_read(&ch->bound)) 455 if (!nvgpu_atomic_read(&ch->bound))
450 return 0; 456 return 0;
@@ -470,7 +476,7 @@ int vgpu_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
470 &msg.params.tsg_preempt; 476 &msg.params.tsg_preempt;
471 int err; 477 int err;
472 478
473 gk20a_dbg_fn(""); 479 nvgpu_log_fn(g, " ");
474 480
475 msg.cmd = TEGRA_VGPU_CMD_TSG_PREEMPT; 481 msg.cmd = TEGRA_VGPU_CMD_TSG_PREEMPT;
476 msg.handle = vgpu_get_handle(g); 482 msg.handle = vgpu_get_handle(g);
@@ -533,7 +539,7 @@ static int vgpu_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
533 u16 *runlist_entry = NULL; 539 u16 *runlist_entry = NULL;
534 u32 count = 0; 540 u32 count = 0;
535 541
536 gk20a_dbg_fn(""); 542 nvgpu_log_fn(g, " ");
537 543
538 runlist = &f->runlist_info[runlist_id]; 544 runlist = &f->runlist_info[runlist_id];
539 545
@@ -558,7 +564,7 @@ static int vgpu_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
558 runlist_entry = runlist->mem[0].cpu_va; 564 runlist_entry = runlist->mem[0].cpu_va;
559 for_each_set_bit(cid, 565 for_each_set_bit(cid,
560 runlist->active_channels, f->num_channels) { 566 runlist->active_channels, f->num_channels) {
561 gk20a_dbg_info("add channel %d to runlist", cid); 567 nvgpu_log_info(g, "add channel %d to runlist", cid);
562 runlist_entry[0] = cid; 568 runlist_entry[0] = cid;
563 runlist_entry++; 569 runlist_entry++;
564 count++; 570 count++;
@@ -581,7 +587,7 @@ int vgpu_fifo_update_runlist(struct gk20a *g, u32 runlist_id,
581 struct fifo_gk20a *f = &g->fifo; 587 struct fifo_gk20a *f = &g->fifo;
582 u32 ret = 0; 588 u32 ret = 0;
583 589
584 gk20a_dbg_fn(""); 590 nvgpu_log_fn(g, " ");
585 591
586 runlist = &f->runlist_info[runlist_id]; 592 runlist = &f->runlist_info[runlist_id];
587 593
@@ -596,7 +602,7 @@ int vgpu_fifo_update_runlist(struct gk20a *g, u32 runlist_id,
596 602
597int vgpu_fifo_wait_engine_idle(struct gk20a *g) 603int vgpu_fifo_wait_engine_idle(struct gk20a *g)
598{ 604{
599 gk20a_dbg_fn(""); 605 nvgpu_log_fn(g, " ");
600 606
601 return 0; 607 return 0;
602} 608}
@@ -611,7 +617,7 @@ int vgpu_fifo_set_runlist_interleave(struct gk20a *g,
611 &msg.params.tsg_interleave; 617 &msg.params.tsg_interleave;
612 int err; 618 int err;
613 619
614 gk20a_dbg_fn(""); 620 nvgpu_log_fn(g, " ");
615 621
616 msg.cmd = TEGRA_VGPU_CMD_TSG_SET_RUNLIST_INTERLEAVE; 622 msg.cmd = TEGRA_VGPU_CMD_TSG_SET_RUNLIST_INTERLEAVE;
617 msg.handle = vgpu_get_handle(g); 623 msg.handle = vgpu_get_handle(g);
@@ -633,7 +639,7 @@ int vgpu_fifo_force_reset_ch(struct channel_gk20a *ch,
633 &msg.params.channel_config; 639 &msg.params.channel_config;
634 int err; 640 int err;
635 641
636 gk20a_dbg_fn(""); 642 nvgpu_log_fn(g, " ");
637 643
638 if (gk20a_is_channel_marked_as_tsg(ch)) { 644 if (gk20a_is_channel_marked_as_tsg(ch)) {
639 tsg = &g->fifo.tsg[ch->tsgid]; 645 tsg = &g->fifo.tsg[ch->tsgid];
@@ -716,7 +722,7 @@ int vgpu_fifo_isr(struct gk20a *g, struct tegra_vgpu_fifo_intr_info *info)
716 struct fifo_gk20a *f = &g->fifo; 722 struct fifo_gk20a *f = &g->fifo;
717 struct channel_gk20a *ch = gk20a_channel_get(&f->channel[info->chid]); 723 struct channel_gk20a *ch = gk20a_channel_get(&f->channel[info->chid]);
718 724
719 gk20a_dbg_fn(""); 725 nvgpu_log_fn(g, " ");
720 if (!ch) 726 if (!ch)
721 return 0; 727 return 0;
722 728
@@ -750,7 +756,7 @@ int vgpu_fifo_isr(struct gk20a *g, struct tegra_vgpu_fifo_intr_info *info)
750int vgpu_fifo_nonstall_isr(struct gk20a *g, 756int vgpu_fifo_nonstall_isr(struct gk20a *g,
751 struct tegra_vgpu_fifo_nonstall_intr_info *info) 757 struct tegra_vgpu_fifo_nonstall_intr_info *info)
752{ 758{
753 gk20a_dbg_fn(""); 759 nvgpu_log_fn(g, " ");
754 760
755 switch (info->type) { 761 switch (info->type) {
756 case TEGRA_VGPU_FIFO_NONSTALL_INTR_CHANNEL: 762 case TEGRA_VGPU_FIFO_NONSTALL_INTR_CHANNEL:
diff --git a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c
index ab35dc67..86184336 100644
--- a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c
+++ b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c
@@ -43,7 +43,7 @@ int vgpu_gr_gp10b_alloc_gr_ctx(struct gk20a *g,
43 struct vgpu_priv_data *priv = vgpu_get_priv_data(g); 43 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
44 int err; 44 int err;
45 45
46 gk20a_dbg_fn(""); 46 nvgpu_log_fn(g, " ");
47 47
48 err = vgpu_gr_alloc_gr_ctx(g, gr_ctx, vm, class, flags); 48 err = vgpu_gr_alloc_gr_ctx(g, gr_ctx, vm, class, flags);
49 if (err) 49 if (err)
@@ -78,7 +78,7 @@ int vgpu_gr_gp10b_alloc_gr_ctx(struct gk20a *g,
78 } 78 }
79 } 79 }
80 80
81 gk20a_dbg_fn("done"); 81 nvgpu_log_fn(g, "done");
82 return err; 82 return err;
83 83
84fail: 84fail:
@@ -132,11 +132,11 @@ int vgpu_gr_gp10b_set_ctxsw_preemption_mode(struct gk20a *g,
132 132
133 attrib_cb_size = ALIGN(attrib_cb_size, 128); 133 attrib_cb_size = ALIGN(attrib_cb_size, 128);
134 134
135 gk20a_dbg_info("gfxp context preempt size=%d", 135 nvgpu_log_info(g, "gfxp context preempt size=%d",
136 g->gr.ctx_vars.preempt_image_size); 136 g->gr.ctx_vars.preempt_image_size);
137 gk20a_dbg_info("gfxp context spill size=%d", spill_size); 137 nvgpu_log_info(g, "gfxp context spill size=%d", spill_size);
138 gk20a_dbg_info("gfxp context pagepool size=%d", pagepool_size); 138 nvgpu_log_info(g, "gfxp context pagepool size=%d", pagepool_size);
139 gk20a_dbg_info("gfxp context attrib cb size=%d", 139 nvgpu_log_info(g, "gfxp context attrib cb size=%d",
140 attrib_cb_size); 140 attrib_cb_size);
141 141
142 err = gr_gp10b_alloc_buffer(vm, 142 err = gr_gp10b_alloc_buffer(vm,
@@ -293,7 +293,7 @@ int vgpu_gr_gp10b_init_ctx_state(struct gk20a *g)
293 struct vgpu_priv_data *priv = vgpu_get_priv_data(g); 293 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
294 int err; 294 int err;
295 295
296 gk20a_dbg_fn(""); 296 nvgpu_log_fn(g, " ");
297 297
298 err = vgpu_gr_init_ctx_state(g); 298 err = vgpu_gr_init_ctx_state(g);
299 if (err) 299 if (err)
diff --git a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c
index e615c486..b8c4d2de 100644
--- a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c
+++ b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c
@@ -78,7 +78,7 @@ u64 vgpu_gp10b_locked_gmmu_map(struct vm_gk20a *vm,
78 u8 prot; 78 u8 prot;
79 struct nvgpu_sgl *sgl; 79 struct nvgpu_sgl *sgl;
80 80
81 gk20a_dbg_fn(""); 81 nvgpu_log_fn(g, " ");
82 82
83 /* FIXME: add support for sparse mappings */ 83 /* FIXME: add support for sparse mappings */
84 84
diff --git a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
index 2f1280ac..1e633d5f 100644
--- a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
@@ -43,7 +43,7 @@ void vgpu_gr_detect_sm_arch(struct gk20a *g)
43{ 43{
44 struct vgpu_priv_data *priv = vgpu_get_priv_data(g); 44 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
45 45
46 gk20a_dbg_fn(""); 46 nvgpu_log_fn(g, " ");
47 47
48 g->params.sm_arch_sm_version = 48 g->params.sm_arch_sm_version =
49 priv->constants.sm_arch_sm_version; 49 priv->constants.sm_arch_sm_version;
@@ -58,8 +58,9 @@ int vgpu_gr_commit_inst(struct channel_gk20a *c, u64 gpu_va)
58 struct tegra_vgpu_cmd_msg msg; 58 struct tegra_vgpu_cmd_msg msg;
59 struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx; 59 struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx;
60 int err; 60 int err;
61 struct gk20a *g = c->g;
61 62
62 gk20a_dbg_fn(""); 63 nvgpu_log_fn(g, " ");
63 64
64 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_COMMIT_GR_CTX; 65 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_COMMIT_GR_CTX;
65 msg.handle = vgpu_get_handle(c->g); 66 msg.handle = vgpu_get_handle(c->g);
@@ -76,7 +77,7 @@ static int vgpu_gr_commit_global_ctx_buffers(struct gk20a *g,
76 struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx; 77 struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx;
77 int err; 78 int err;
78 79
79 gk20a_dbg_fn(""); 80 nvgpu_log_fn(g, " ");
80 81
81 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_COMMIT_GR_GLOBAL_CTX; 82 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_COMMIT_GR_GLOBAL_CTX;
82 msg.handle = vgpu_get_handle(g); 83 msg.handle = vgpu_get_handle(g);
@@ -94,7 +95,7 @@ static int vgpu_gr_load_golden_ctx_image(struct gk20a *g,
94 struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx; 95 struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx;
95 int err; 96 int err;
96 97
97 gk20a_dbg_fn(""); 98 nvgpu_log_fn(g, " ");
98 99
99 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_LOAD_GR_GOLDEN_CTX; 100 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_LOAD_GR_GOLDEN_CTX;
100 msg.handle = vgpu_get_handle(g); 101 msg.handle = vgpu_get_handle(g);
@@ -109,7 +110,7 @@ int vgpu_gr_init_ctx_state(struct gk20a *g)
109 struct gr_gk20a *gr = &g->gr; 110 struct gr_gk20a *gr = &g->gr;
110 struct vgpu_priv_data *priv = vgpu_get_priv_data(g); 111 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
111 112
112 gk20a_dbg_fn(""); 113 nvgpu_log_fn(g, " ");
113 114
114 g->gr.ctx_vars.golden_image_size = priv->constants.golden_ctx_size; 115 g->gr.ctx_vars.golden_image_size = priv->constants.golden_ctx_size;
115 g->gr.ctx_vars.zcull_ctxsw_image_size = priv->constants.zcull_ctx_size; 116 g->gr.ctx_vars.zcull_ctxsw_image_size = priv->constants.zcull_ctx_size;
@@ -135,20 +136,20 @@ static int vgpu_gr_alloc_global_ctx_buffers(struct gk20a *g)
135 u32 pagepool_buffer_size = g->ops.gr.pagepool_default_size(g) * 136 u32 pagepool_buffer_size = g->ops.gr.pagepool_default_size(g) *
136 gr_scc_pagepool_total_pages_byte_granularity_v(); 137 gr_scc_pagepool_total_pages_byte_granularity_v();
137 138
138 gk20a_dbg_fn(""); 139 nvgpu_log_fn(g, " ");
139 140
140 attr_buffer_size = g->ops.gr.calc_global_ctx_buffer_size(g); 141 attr_buffer_size = g->ops.gr.calc_global_ctx_buffer_size(g);
141 142
142 gk20a_dbg_info("cb_buffer_size : %d", cb_buffer_size); 143 nvgpu_log_info(g, "cb_buffer_size : %d", cb_buffer_size);
143 gr->global_ctx_buffer[CIRCULAR].mem.size = cb_buffer_size; 144 gr->global_ctx_buffer[CIRCULAR].mem.size = cb_buffer_size;
144 145
145 gk20a_dbg_info("pagepool_buffer_size : %d", pagepool_buffer_size); 146 nvgpu_log_info(g, "pagepool_buffer_size : %d", pagepool_buffer_size);
146 gr->global_ctx_buffer[PAGEPOOL].mem.size = pagepool_buffer_size; 147 gr->global_ctx_buffer[PAGEPOOL].mem.size = pagepool_buffer_size;
147 148
148 gk20a_dbg_info("attr_buffer_size : %d", attr_buffer_size); 149 nvgpu_log_info(g, "attr_buffer_size : %d", attr_buffer_size);
149 gr->global_ctx_buffer[ATTRIBUTE].mem.size = attr_buffer_size; 150 gr->global_ctx_buffer[ATTRIBUTE].mem.size = attr_buffer_size;
150 151
151 gk20a_dbg_info("priv access map size : %d", 152 nvgpu_log_info(g, "priv access map size : %d",
152 gr->ctx_vars.priv_access_map_size); 153 gr->ctx_vars.priv_access_map_size);
153 gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.size = 154 gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.size =
154 gr->ctx_vars.priv_access_map_size; 155 gr->ctx_vars.priv_access_map_size;
@@ -170,7 +171,7 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g,
170 u32 i; 171 u32 i;
171 int err; 172 int err;
172 173
173 gk20a_dbg_fn(""); 174 nvgpu_log_fn(g, " ");
174 175
175 tsg = tsg_gk20a_from_ch(c); 176 tsg = tsg_gk20a_from_ch(c);
176 if (!tsg) 177 if (!tsg)
@@ -249,8 +250,9 @@ static void vgpu_gr_unmap_global_ctx_buffers(struct tsg_gk20a *tsg)
249 u64 *g_bfr_va = tsg->gr_ctx.global_ctx_buffer_va; 250 u64 *g_bfr_va = tsg->gr_ctx.global_ctx_buffer_va;
250 u64 *g_bfr_size = tsg->gr_ctx.global_ctx_buffer_size; 251 u64 *g_bfr_size = tsg->gr_ctx.global_ctx_buffer_size;
251 u32 i; 252 u32 i;
253 struct gk20a *g = tsg->g;
252 254
253 gk20a_dbg_fn(""); 255 nvgpu_log_fn(g, " ");
254 256
255 if (tsg->gr_ctx.global_ctx_buffer_mapped) { 257 if (tsg->gr_ctx.global_ctx_buffer_mapped) {
256 /* server will unmap on channel close */ 258 /* server will unmap on channel close */
@@ -279,7 +281,7 @@ int vgpu_gr_alloc_gr_ctx(struct gk20a *g,
279 struct gr_gk20a *gr = &g->gr; 281 struct gr_gk20a *gr = &g->gr;
280 int err; 282 int err;
281 283
282 gk20a_dbg_fn(""); 284 nvgpu_log_fn(g, " ");
283 285
284 if (gr->ctx_vars.buffer_size == 0) 286 if (gr->ctx_vars.buffer_size == 0)
285 return 0; 287 return 0;
@@ -328,7 +330,7 @@ static int vgpu_gr_alloc_channel_patch_ctx(struct gk20a *g,
328 struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx; 330 struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx;
329 int err; 331 int err;
330 332
331 gk20a_dbg_fn(""); 333 nvgpu_log_fn(g, " ");
332 334
333 tsg = tsg_gk20a_from_ch(c); 335 tsg = tsg_gk20a_from_ch(c);
334 if (!tsg) 336 if (!tsg)
@@ -359,8 +361,9 @@ static int vgpu_gr_alloc_channel_patch_ctx(struct gk20a *g,
359static void vgpu_gr_free_channel_patch_ctx(struct tsg_gk20a *tsg) 361static void vgpu_gr_free_channel_patch_ctx(struct tsg_gk20a *tsg)
360{ 362{
361 struct patch_desc *patch_ctx = &tsg->gr_ctx.patch_ctx; 363 struct patch_desc *patch_ctx = &tsg->gr_ctx.patch_ctx;
364 struct gk20a *g = tsg->g;
362 365
363 gk20a_dbg_fn(""); 366 nvgpu_log_fn(g, " ");
364 367
365 if (patch_ctx->mem.gpu_va) { 368 if (patch_ctx->mem.gpu_va) {
366 /* server will free on channel close */ 369 /* server will free on channel close */
@@ -375,8 +378,9 @@ static void vgpu_gr_free_channel_pm_ctx(struct tsg_gk20a *tsg)
375{ 378{
376 struct nvgpu_gr_ctx *ch_ctx = &tsg->gr_ctx; 379 struct nvgpu_gr_ctx *ch_ctx = &tsg->gr_ctx;
377 struct pm_ctx_desc *pm_ctx = &ch_ctx->pm_ctx; 380 struct pm_ctx_desc *pm_ctx = &ch_ctx->pm_ctx;
381 struct gk20a *g = tsg->g;
378 382
379 gk20a_dbg_fn(""); 383 nvgpu_log_fn(g, " ");
380 384
381 /* check if hwpm was ever initialized. If not, nothing to do */ 385 /* check if hwpm was ever initialized. If not, nothing to do */
382 if (pm_ctx->mem.gpu_va == 0) 386 if (pm_ctx->mem.gpu_va == 0)
@@ -394,7 +398,7 @@ void vgpu_gr_free_gr_ctx(struct gk20a *g,
394{ 398{
395 struct tsg_gk20a *tsg; 399 struct tsg_gk20a *tsg;
396 400
397 gk20a_dbg_fn(""); 401 nvgpu_log_fn(g, " ");
398 402
399 if (gr_ctx->mem.gpu_va) { 403 if (gr_ctx->mem.gpu_va) {
400 struct tegra_vgpu_cmd_msg msg; 404 struct tegra_vgpu_cmd_msg msg;
@@ -477,7 +481,7 @@ int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags)
477 struct tsg_gk20a *tsg = NULL; 481 struct tsg_gk20a *tsg = NULL;
478 int err = 0; 482 int err = 0;
479 483
480 gk20a_dbg_fn(""); 484 nvgpu_log_fn(g, " ");
481 485
482 /* an address space needs to have been bound at this point.*/ 486 /* an address space needs to have been bound at this point.*/
483 if (!gk20a_channel_as_bound(c)) { 487 if (!gk20a_channel_as_bound(c)) {
@@ -577,7 +581,7 @@ int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags)
577 /* PM ctxt switch is off by default */ 581 /* PM ctxt switch is off by default */
578 gr_ctx->pm_ctx.pm_mode = ctxsw_prog_main_image_pm_mode_no_ctxsw_f(); 582 gr_ctx->pm_ctx.pm_mode = ctxsw_prog_main_image_pm_mode_no_ctxsw_f();
579 583
580 gk20a_dbg_fn("done"); 584 nvgpu_log_fn(g, "done");
581 return 0; 585 return 0;
582out: 586out:
583 /* 1. gr_ctx, patch_ctx and global ctx buffer mapping 587 /* 1. gr_ctx, patch_ctx and global ctx buffer mapping
@@ -595,7 +599,7 @@ static int vgpu_gr_init_gr_config(struct gk20a *g, struct gr_gk20a *gr)
595 u32 sm_per_tpc; 599 u32 sm_per_tpc;
596 int err = -ENOMEM; 600 int err = -ENOMEM;
597 601
598 gk20a_dbg_fn(""); 602 nvgpu_log_fn(g, " ");
599 603
600 gr->max_gpc_count = priv->constants.max_gpc_count; 604 gr->max_gpc_count = priv->constants.max_gpc_count;
601 gr->gpc_count = priv->constants.gpc_count; 605 gr->gpc_count = priv->constants.gpc_count;
@@ -658,7 +662,7 @@ int vgpu_gr_bind_ctxsw_zcull(struct gk20a *g, struct gr_gk20a *gr,
658 struct tegra_vgpu_zcull_bind_params *p = &msg.params.zcull_bind; 662 struct tegra_vgpu_zcull_bind_params *p = &msg.params.zcull_bind;
659 int err; 663 int err;
660 664
661 gk20a_dbg_fn(""); 665 nvgpu_log_fn(g, " ");
662 666
663 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_BIND_ZCULL; 667 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_BIND_ZCULL;
664 msg.handle = vgpu_get_handle(g); 668 msg.handle = vgpu_get_handle(g);
@@ -677,7 +681,7 @@ int vgpu_gr_get_zcull_info(struct gk20a *g, struct gr_gk20a *gr,
677 struct tegra_vgpu_zcull_info_params *p = &msg.params.zcull_info; 681 struct tegra_vgpu_zcull_info_params *p = &msg.params.zcull_info;
678 int err; 682 int err;
679 683
680 gk20a_dbg_fn(""); 684 nvgpu_log_fn(g, " ");
681 685
682 msg.cmd = TEGRA_VGPU_CMD_GET_ZCULL_INFO; 686 msg.cmd = TEGRA_VGPU_CMD_GET_ZCULL_INFO;
683 msg.handle = vgpu_get_handle(g); 687 msg.handle = vgpu_get_handle(g);
@@ -712,7 +716,7 @@ u32 vgpu_gr_get_max_fbps_count(struct gk20a *g)
712{ 716{
713 struct vgpu_priv_data *priv = vgpu_get_priv_data(g); 717 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
714 718
715 gk20a_dbg_fn(""); 719 nvgpu_log_fn(g, " ");
716 720
717 return priv->constants.num_fbps; 721 return priv->constants.num_fbps;
718} 722}
@@ -721,7 +725,7 @@ u32 vgpu_gr_get_fbp_en_mask(struct gk20a *g)
721{ 725{
722 struct vgpu_priv_data *priv = vgpu_get_priv_data(g); 726 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
723 727
724 gk20a_dbg_fn(""); 728 nvgpu_log_fn(g, " ");
725 729
726 return priv->constants.fbp_en_mask; 730 return priv->constants.fbp_en_mask;
727} 731}
@@ -730,7 +734,7 @@ u32 vgpu_gr_get_max_ltc_per_fbp(struct gk20a *g)
730{ 734{
731 struct vgpu_priv_data *priv = vgpu_get_priv_data(g); 735 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
732 736
733 gk20a_dbg_fn(""); 737 nvgpu_log_fn(g, " ");
734 738
735 return priv->constants.ltc_per_fbp; 739 return priv->constants.ltc_per_fbp;
736} 740}
@@ -739,7 +743,7 @@ u32 vgpu_gr_get_max_lts_per_ltc(struct gk20a *g)
739{ 743{
740 struct vgpu_priv_data *priv = vgpu_get_priv_data(g); 744 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
741 745
742 gk20a_dbg_fn(""); 746 nvgpu_log_fn(g, " ");
743 747
744 return priv->constants.max_lts_per_ltc; 748 return priv->constants.max_lts_per_ltc;
745} 749}
@@ -749,7 +753,7 @@ u32 *vgpu_gr_rop_l2_en_mask(struct gk20a *g)
749 struct vgpu_priv_data *priv = vgpu_get_priv_data(g); 753 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
750 u32 i, max_fbps_count = priv->constants.num_fbps; 754 u32 i, max_fbps_count = priv->constants.num_fbps;
751 755
752 gk20a_dbg_fn(""); 756 nvgpu_log_fn(g, " ");
753 757
754 if (g->gr.fbp_rop_l2_en_mask == NULL) { 758 if (g->gr.fbp_rop_l2_en_mask == NULL) {
755 g->gr.fbp_rop_l2_en_mask = 759 g->gr.fbp_rop_l2_en_mask =
@@ -772,7 +776,7 @@ int vgpu_gr_add_zbc(struct gk20a *g, struct gr_gk20a *gr,
772 struct tegra_vgpu_zbc_set_table_params *p = &msg.params.zbc_set_table; 776 struct tegra_vgpu_zbc_set_table_params *p = &msg.params.zbc_set_table;
773 int err; 777 int err;
774 778
775 gk20a_dbg_fn(""); 779 nvgpu_log_fn(g, " ");
776 780
777 msg.cmd = TEGRA_VGPU_CMD_ZBC_SET_TABLE; 781 msg.cmd = TEGRA_VGPU_CMD_ZBC_SET_TABLE;
778 msg.handle = vgpu_get_handle(g); 782 msg.handle = vgpu_get_handle(g);
@@ -804,7 +808,7 @@ int vgpu_gr_query_zbc(struct gk20a *g, struct gr_gk20a *gr,
804 &msg.params.zbc_query_table; 808 &msg.params.zbc_query_table;
805 int err; 809 int err;
806 810
807 gk20a_dbg_fn(""); 811 nvgpu_log_fn(g, " ");
808 812
809 msg.cmd = TEGRA_VGPU_CMD_ZBC_QUERY_TABLE; 813 msg.cmd = TEGRA_VGPU_CMD_ZBC_QUERY_TABLE;
810 msg.handle = vgpu_get_handle(g); 814 msg.handle = vgpu_get_handle(g);
@@ -840,7 +844,7 @@ int vgpu_gr_query_zbc(struct gk20a *g, struct gr_gk20a *gr,
840 844
841static void vgpu_remove_gr_support(struct gr_gk20a *gr) 845static void vgpu_remove_gr_support(struct gr_gk20a *gr)
842{ 846{
843 gk20a_dbg_fn(""); 847 nvgpu_log_fn(gr->g, " ");
844 848
845 gk20a_comptag_allocator_destroy(gr->g, &gr->comp_tags); 849 gk20a_comptag_allocator_destroy(gr->g, &gr->comp_tags);
846 850
@@ -865,10 +869,10 @@ static int vgpu_gr_init_gr_setup_sw(struct gk20a *g)
865 struct gr_gk20a *gr = &g->gr; 869 struct gr_gk20a *gr = &g->gr;
866 int err; 870 int err;
867 871
868 gk20a_dbg_fn(""); 872 nvgpu_log_fn(g, " ");
869 873
870 if (gr->sw_ready) { 874 if (gr->sw_ready) {
871 gk20a_dbg_fn("skip init"); 875 nvgpu_log_fn(g, "skip init");
872 return 0; 876 return 0;
873 } 877 }
874 878
@@ -907,7 +911,7 @@ static int vgpu_gr_init_gr_setup_sw(struct gk20a *g)
907 gr->remove_support = vgpu_remove_gr_support; 911 gr->remove_support = vgpu_remove_gr_support;
908 gr->sw_ready = true; 912 gr->sw_ready = true;
909 913
910 gk20a_dbg_fn("done"); 914 nvgpu_log_fn(g, "done");
911 return 0; 915 return 0;
912 916
913clean_up: 917clean_up:
@@ -918,7 +922,7 @@ clean_up:
918 922
919int vgpu_init_gr_support(struct gk20a *g) 923int vgpu_init_gr_support(struct gk20a *g)
920{ 924{
921 gk20a_dbg_fn(""); 925 nvgpu_log_fn(g, " ");
922 926
923 return vgpu_gr_init_gr_setup_sw(g); 927 return vgpu_gr_init_gr_setup_sw(g);
924} 928}
@@ -928,7 +932,7 @@ int vgpu_gr_isr(struct gk20a *g, struct tegra_vgpu_gr_intr_info *info)
928 struct fifo_gk20a *f = &g->fifo; 932 struct fifo_gk20a *f = &g->fifo;
929 struct channel_gk20a *ch = gk20a_channel_get(&f->channel[info->chid]); 933 struct channel_gk20a *ch = gk20a_channel_get(&f->channel[info->chid]);
930 934
931 gk20a_dbg_fn(""); 935 nvgpu_log_fn(g, " ");
932 if (!ch) 936 if (!ch)
933 return 0; 937 return 0;
934 938
@@ -985,7 +989,7 @@ int vgpu_gr_isr(struct gk20a *g, struct tegra_vgpu_gr_intr_info *info)
985int vgpu_gr_nonstall_isr(struct gk20a *g, 989int vgpu_gr_nonstall_isr(struct gk20a *g,
986 struct tegra_vgpu_gr_nonstall_intr_info *info) 990 struct tegra_vgpu_gr_nonstall_intr_info *info)
987{ 991{
988 gk20a_dbg_fn(""); 992 nvgpu_log_fn(g, " ");
989 993
990 switch (info->type) { 994 switch (info->type) {
991 case TEGRA_VGPU_GR_NONSTALL_INTR_SEMAPHORE: 995 case TEGRA_VGPU_GR_NONSTALL_INTR_SEMAPHORE:
@@ -1006,7 +1010,7 @@ int vgpu_gr_set_sm_debug_mode(struct gk20a *g,
1006 struct tegra_vgpu_sm_debug_mode *p = &msg.params.sm_debug_mode; 1010 struct tegra_vgpu_sm_debug_mode *p = &msg.params.sm_debug_mode;
1007 int err; 1011 int err;
1008 1012
1009 gk20a_dbg_fn(""); 1013 nvgpu_log_fn(g, " ");
1010 1014
1011 msg.cmd = TEGRA_VGPU_CMD_SET_SM_DEBUG_MODE; 1015 msg.cmd = TEGRA_VGPU_CMD_SET_SM_DEBUG_MODE;
1012 msg.handle = vgpu_get_handle(g); 1016 msg.handle = vgpu_get_handle(g);
@@ -1026,7 +1030,7 @@ int vgpu_gr_update_smpc_ctxsw_mode(struct gk20a *g,
1026 struct tegra_vgpu_channel_set_ctxsw_mode *p = &msg.params.set_ctxsw_mode; 1030 struct tegra_vgpu_channel_set_ctxsw_mode *p = &msg.params.set_ctxsw_mode;
1027 int err; 1031 int err;
1028 1032
1029 gk20a_dbg_fn(""); 1033 nvgpu_log_fn(g, " ");
1030 1034
1031 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SET_SMPC_CTXSW_MODE; 1035 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SET_SMPC_CTXSW_MODE;
1032 msg.handle = vgpu_get_handle(g); 1036 msg.handle = vgpu_get_handle(g);
@@ -1053,7 +1057,7 @@ int vgpu_gr_update_hwpm_ctxsw_mode(struct gk20a *g,
1053 struct tegra_vgpu_channel_set_ctxsw_mode *p = &msg.params.set_ctxsw_mode; 1057 struct tegra_vgpu_channel_set_ctxsw_mode *p = &msg.params.set_ctxsw_mode;
1054 int err; 1058 int err;
1055 1059
1056 gk20a_dbg_fn(""); 1060 nvgpu_log_fn(g, " ");
1057 1061
1058 tsg = tsg_gk20a_from_ch(ch); 1062 tsg = tsg_gk20a_from_ch(ch);
1059 if (!tsg) 1063 if (!tsg)
diff --git a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_gv11b.c b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_gv11b.c
index 933e8357..1bcd151a 100644
--- a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_gv11b.c
+++ b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_gv11b.c
@@ -30,7 +30,7 @@ int vgpu_gv11b_init_gpu_characteristics(struct gk20a *g)
30{ 30{
31 int err; 31 int err;
32 32
33 gk20a_dbg_fn(""); 33 nvgpu_log_fn(g, " ");
34 34
35 err = vgpu_init_gpu_characteristics(g); 35 err = vgpu_init_gpu_characteristics(g);
36 if (err) { 36 if (err) {
diff --git a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_tsg_gv11b.c b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_tsg_gv11b.c
index b249b5af..367c1299 100644
--- a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_tsg_gv11b.c
+++ b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_tsg_gv11b.c
@@ -33,8 +33,9 @@ int vgpu_gv11b_tsg_bind_channel(struct tsg_gk20a *tsg,
33 struct tegra_vgpu_tsg_bind_channel_ex_params *p = 33 struct tegra_vgpu_tsg_bind_channel_ex_params *p =
34 &msg.params.tsg_bind_channel_ex; 34 &msg.params.tsg_bind_channel_ex;
35 int err; 35 int err;
36 struct gk20a *g = tsg->g;
36 37
37 gk20a_dbg_fn(""); 38 nvgpu_log_fn(g, " ");
38 39
39 err = gk20a_tsg_bind_channel(tsg, ch); 40 err = gk20a_tsg_bind_channel(tsg, ch);
40 if (err) 41 if (err)
diff --git a/drivers/gpu/nvgpu/vgpu/ltc_vgpu.c b/drivers/gpu/nvgpu/vgpu/ltc_vgpu.c
index d451a1f2..f68c8454 100644
--- a/drivers/gpu/nvgpu/vgpu/ltc_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/ltc_vgpu.c
@@ -31,7 +31,7 @@ int vgpu_determine_L2_size_bytes(struct gk20a *g)
31{ 31{
32 struct vgpu_priv_data *priv = vgpu_get_priv_data(g); 32 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
33 33
34 gk20a_dbg_fn(""); 34 nvgpu_log_fn(g, " ");
35 35
36 return priv->constants.l2_size; 36 return priv->constants.l2_size;
37} 37}
@@ -42,7 +42,7 @@ int vgpu_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr)
42 u32 max_comptag_lines = 0; 42 u32 max_comptag_lines = 0;
43 int err; 43 int err;
44 44
45 gk20a_dbg_fn(""); 45 nvgpu_log_fn(g, " ");
46 46
47 gr->cacheline_size = priv->constants.cacheline_size; 47 gr->cacheline_size = priv->constants.cacheline_size;
48 gr->comptags_per_cacheline = priv->constants.comptags_per_cacheline; 48 gr->comptags_per_cacheline = priv->constants.comptags_per_cacheline;
@@ -65,7 +65,7 @@ void vgpu_ltc_init_fs_state(struct gk20a *g)
65{ 65{
66 struct vgpu_priv_data *priv = vgpu_get_priv_data(g); 66 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
67 67
68 gk20a_dbg_fn(""); 68 nvgpu_log_fn(g, " ");
69 69
70 g->ltc_count = priv->constants.ltc_count; 70 g->ltc_count = priv->constants.ltc_count;
71} 71}
diff --git a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
index 3e75cee3..b8eaa1db 100644
--- a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
@@ -40,10 +40,10 @@ static int vgpu_init_mm_setup_sw(struct gk20a *g)
40{ 40{
41 struct mm_gk20a *mm = &g->mm; 41 struct mm_gk20a *mm = &g->mm;
42 42
43 gk20a_dbg_fn(""); 43 nvgpu_log_fn(g, " ");
44 44
45 if (mm->sw_ready) { 45 if (mm->sw_ready) {
46 gk20a_dbg_fn("skip init"); 46 nvgpu_log_fn(g, "skip init");
47 return 0; 47 return 0;
48 } 48 }
49 49
@@ -56,7 +56,7 @@ static int vgpu_init_mm_setup_sw(struct gk20a *g)
56 mm->channel.user_size = NV_MM_DEFAULT_USER_SIZE; 56 mm->channel.user_size = NV_MM_DEFAULT_USER_SIZE;
57 mm->channel.kernel_size = NV_MM_DEFAULT_KERNEL_SIZE; 57 mm->channel.kernel_size = NV_MM_DEFAULT_KERNEL_SIZE;
58 58
59 gk20a_dbg_info("channel vm size: user %dMB kernel %dMB", 59 nvgpu_log_info(g, "channel vm size: user %dMB kernel %dMB",
60 (int)(mm->channel.user_size >> 20), 60 (int)(mm->channel.user_size >> 20),
61 (int)(mm->channel.kernel_size >> 20)); 61 (int)(mm->channel.kernel_size >> 20));
62 62
@@ -69,7 +69,7 @@ int vgpu_init_mm_support(struct gk20a *g)
69{ 69{
70 int err; 70 int err;
71 71
72 gk20a_dbg_fn(""); 72 nvgpu_log_fn(g, " ");
73 73
74 err = vgpu_init_mm_setup_sw(g); 74 err = vgpu_init_mm_setup_sw(g);
75 if (err) 75 if (err)
@@ -95,7 +95,7 @@ void vgpu_locked_gmmu_unmap(struct vm_gk20a *vm,
95 struct tegra_vgpu_as_map_params *p = &msg.params.as_map; 95 struct tegra_vgpu_as_map_params *p = &msg.params.as_map;
96 int err; 96 int err;
97 97
98 gk20a_dbg_fn(""); 98 nvgpu_log_fn(g, " ");
99 99
100 msg.cmd = TEGRA_VGPU_CMD_AS_UNMAP; 100 msg.cmd = TEGRA_VGPU_CMD_AS_UNMAP;
101 msg.handle = vgpu_get_handle(g); 101 msg.handle = vgpu_get_handle(g);
@@ -183,8 +183,9 @@ int vgpu_vm_bind_channel(struct vm_gk20a *vm,
183 struct tegra_vgpu_cmd_msg msg; 183 struct tegra_vgpu_cmd_msg msg;
184 struct tegra_vgpu_as_bind_share_params *p = &msg.params.as_bind_share; 184 struct tegra_vgpu_as_bind_share_params *p = &msg.params.as_bind_share;
185 int err; 185 int err;
186 struct gk20a *g = ch->g;
186 187
187 gk20a_dbg_fn(""); 188 nvgpu_log_fn(g, " ");
188 189
189 ch->vm = vm; 190 ch->vm = vm;
190 msg.cmd = TEGRA_VGPU_CMD_AS_BIND_SHARE; 191 msg.cmd = TEGRA_VGPU_CMD_AS_BIND_SHARE;
@@ -220,7 +221,7 @@ static void vgpu_cache_maint(u64 handle, u8 op)
220int vgpu_mm_fb_flush(struct gk20a *g) 221int vgpu_mm_fb_flush(struct gk20a *g)
221{ 222{
222 223
223 gk20a_dbg_fn(""); 224 nvgpu_log_fn(g, " ");
224 225
225 vgpu_cache_maint(vgpu_get_handle(g), TEGRA_VGPU_FB_FLUSH); 226 vgpu_cache_maint(vgpu_get_handle(g), TEGRA_VGPU_FB_FLUSH);
226 return 0; 227 return 0;
@@ -229,7 +230,7 @@ int vgpu_mm_fb_flush(struct gk20a *g)
229void vgpu_mm_l2_invalidate(struct gk20a *g) 230void vgpu_mm_l2_invalidate(struct gk20a *g)
230{ 231{
231 232
232 gk20a_dbg_fn(""); 233 nvgpu_log_fn(g, " ");
233 234
234 vgpu_cache_maint(vgpu_get_handle(g), TEGRA_VGPU_L2_MAINT_INV); 235 vgpu_cache_maint(vgpu_get_handle(g), TEGRA_VGPU_L2_MAINT_INV);
235} 236}
@@ -238,7 +239,7 @@ void vgpu_mm_l2_flush(struct gk20a *g, bool invalidate)
238{ 239{
239 u8 op; 240 u8 op;
240 241
241 gk20a_dbg_fn(""); 242 nvgpu_log_fn(g, " ");
242 243
243 if (invalidate) 244 if (invalidate)
244 op = TEGRA_VGPU_L2_MAINT_FLUSH_INV; 245 op = TEGRA_VGPU_L2_MAINT_FLUSH_INV;
@@ -250,7 +251,7 @@ void vgpu_mm_l2_flush(struct gk20a *g, bool invalidate)
250 251
251void vgpu_mm_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb) 252void vgpu_mm_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb)
252{ 253{
253 gk20a_dbg_fn(""); 254 nvgpu_log_fn(g, " ");
254 255
255 nvgpu_err(g, "call to RM server not supported"); 256 nvgpu_err(g, "call to RM server not supported");
256} 257}
@@ -261,7 +262,7 @@ void vgpu_mm_mmu_set_debug_mode(struct gk20a *g, bool enable)
261 struct tegra_vgpu_mmu_debug_mode *p = &msg.params.mmu_debug_mode; 262 struct tegra_vgpu_mmu_debug_mode *p = &msg.params.mmu_debug_mode;
262 int err; 263 int err;
263 264
264 gk20a_dbg_fn(""); 265 nvgpu_log_fn(g, " ");
265 266
266 msg.cmd = TEGRA_VGPU_CMD_SET_MMU_DEBUG_MODE; 267 msg.cmd = TEGRA_VGPU_CMD_SET_MMU_DEBUG_MODE;
267 msg.handle = vgpu_get_handle(g); 268 msg.handle = vgpu_get_handle(g);
diff --git a/drivers/gpu/nvgpu/vgpu/tsg_vgpu.c b/drivers/gpu/nvgpu/vgpu/tsg_vgpu.c
index a6e493d0..7bb8f671 100644
--- a/drivers/gpu/nvgpu/vgpu/tsg_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/tsg_vgpu.c
@@ -35,8 +35,9 @@ int vgpu_tsg_open(struct tsg_gk20a *tsg)
35 struct tegra_vgpu_tsg_open_rel_params *p = 35 struct tegra_vgpu_tsg_open_rel_params *p =
36 &msg.params.tsg_open; 36 &msg.params.tsg_open;
37 int err; 37 int err;
38 struct gk20a *g = tsg->g;
38 39
39 gk20a_dbg_fn(""); 40 nvgpu_log_fn(g, " ");
40 41
41 msg.cmd = TEGRA_VGPU_CMD_TSG_OPEN; 42 msg.cmd = TEGRA_VGPU_CMD_TSG_OPEN;
42 msg.handle = vgpu_get_handle(tsg->g); 43 msg.handle = vgpu_get_handle(tsg->g);
@@ -57,8 +58,9 @@ void vgpu_tsg_release(struct tsg_gk20a *tsg)
57 struct tegra_vgpu_tsg_open_rel_params *p = 58 struct tegra_vgpu_tsg_open_rel_params *p =
58 &msg.params.tsg_release; 59 &msg.params.tsg_release;
59 int err; 60 int err;
61 struct gk20a *g = tsg->g;
60 62
61 gk20a_dbg_fn(""); 63 nvgpu_log_fn(g, " ");
62 64
63 msg.cmd = TEGRA_VGPU_CMD_TSG_RELEASE; 65 msg.cmd = TEGRA_VGPU_CMD_TSG_RELEASE;
64 msg.handle = vgpu_get_handle(tsg->g); 66 msg.handle = vgpu_get_handle(tsg->g);
@@ -91,8 +93,9 @@ int vgpu_tsg_bind_channel(struct tsg_gk20a *tsg,
91 struct tegra_vgpu_tsg_bind_unbind_channel_params *p = 93 struct tegra_vgpu_tsg_bind_unbind_channel_params *p =
92 &msg.params.tsg_bind_unbind_channel; 94 &msg.params.tsg_bind_unbind_channel;
93 int err; 95 int err;
96 struct gk20a *g = ch->g;
94 97
95 gk20a_dbg_fn(""); 98 nvgpu_log_fn(g, " ");
96 99
97 err = gk20a_tsg_bind_channel(tsg, ch); 100 err = gk20a_tsg_bind_channel(tsg, ch);
98 if (err) 101 if (err)
@@ -120,8 +123,9 @@ int vgpu_tsg_unbind_channel(struct channel_gk20a *ch)
120 struct tegra_vgpu_tsg_bind_unbind_channel_params *p = 123 struct tegra_vgpu_tsg_bind_unbind_channel_params *p =
121 &msg.params.tsg_bind_unbind_channel; 124 &msg.params.tsg_bind_unbind_channel;
122 int err; 125 int err;
126 struct gk20a *g = ch->g;
123 127
124 gk20a_dbg_fn(""); 128 nvgpu_log_fn(g, " ");
125 129
126 err = gk20a_fifo_tsg_unbind_channel(ch); 130 err = gk20a_fifo_tsg_unbind_channel(ch);
127 if (err) 131 if (err)
@@ -143,8 +147,9 @@ int vgpu_tsg_set_timeslice(struct tsg_gk20a *tsg, u32 timeslice)
143 struct tegra_vgpu_tsg_timeslice_params *p = 147 struct tegra_vgpu_tsg_timeslice_params *p =
144 &msg.params.tsg_timeslice; 148 &msg.params.tsg_timeslice;
145 int err; 149 int err;
150 struct gk20a *g = tsg->g;
146 151
147 gk20a_dbg_fn(""); 152 nvgpu_log_fn(g, " ");
148 153
149 msg.cmd = TEGRA_VGPU_CMD_TSG_SET_TIMESLICE; 154 msg.cmd = TEGRA_VGPU_CMD_TSG_SET_TIMESLICE;
150 msg.handle = vgpu_get_handle(tsg->g); 155 msg.handle = vgpu_get_handle(tsg->g);
diff --git a/drivers/gpu/nvgpu/vgpu/vgpu.c b/drivers/gpu/nvgpu/vgpu/vgpu.c
index 1e77cda9..17e80cd7 100644
--- a/drivers/gpu/nvgpu/vgpu/vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/vgpu.c
@@ -249,7 +249,7 @@ void vgpu_detect_chip(struct gk20a *g)
249 p->gpu_impl = priv->constants.impl; 249 p->gpu_impl = priv->constants.impl;
250 p->gpu_rev = priv->constants.rev; 250 p->gpu_rev = priv->constants.rev;
251 251
252 gk20a_dbg_info("arch: %x, impl: %x, rev: %x\n", 252 nvgpu_log_info(g, "arch: %x, impl: %x, rev: %x\n",
253 p->gpu_arch, 253 p->gpu_arch,
254 p->gpu_impl, 254 p->gpu_impl,
255 p->gpu_rev); 255 p->gpu_rev);
@@ -259,7 +259,7 @@ int vgpu_init_gpu_characteristics(struct gk20a *g)
259{ 259{
260 int err; 260 int err;
261 261
262 gk20a_dbg_fn(""); 262 nvgpu_log_fn(g, " ");
263 263
264 err = gk20a_init_gpu_characteristics(g); 264 err = gk20a_init_gpu_characteristics(g);
265 if (err) 265 if (err)
@@ -279,7 +279,7 @@ int vgpu_read_ptimer(struct gk20a *g, u64 *value)
279 struct tegra_vgpu_read_ptimer_params *p = &msg.params.read_ptimer; 279 struct tegra_vgpu_read_ptimer_params *p = &msg.params.read_ptimer;
280 int err; 280 int err;
281 281
282 gk20a_dbg_fn(""); 282 nvgpu_log_fn(g, " ");
283 283
284 msg.cmd = TEGRA_VGPU_CMD_READ_PTIMER; 284 msg.cmd = TEGRA_VGPU_CMD_READ_PTIMER;
285 msg.handle = vgpu_get_handle(g); 285 msg.handle = vgpu_get_handle(g);
@@ -304,7 +304,7 @@ int vgpu_get_timestamps_zipper(struct gk20a *g,
304 int err; 304 int err;
305 u32 i; 305 u32 i;
306 306
307 gk20a_dbg_fn(""); 307 nvgpu_log_fn(g, " ");
308 308
309 if (count > TEGRA_VGPU_GET_TIMESTAMPS_ZIPPER_MAX_COUNT) { 309 if (count > TEGRA_VGPU_GET_TIMESTAMPS_ZIPPER_MAX_COUNT) {
310 nvgpu_err(g, "count %u overflow", count); 310 nvgpu_err(g, "count %u overflow", count);
@@ -338,7 +338,7 @@ int vgpu_init_hal(struct gk20a *g)
338 338
339 switch (ver) { 339 switch (ver) {
340 case NVGPU_GPUID_GP10B: 340 case NVGPU_GPUID_GP10B:
341 gk20a_dbg_info("gp10b detected"); 341 nvgpu_log_info(g, "gp10b detected");
342 err = vgpu_gp10b_init_hal(g); 342 err = vgpu_gp10b_init_hal(g);
343 break; 343 break;
344 case NVGPU_GPUID_GV11B: 344 case NVGPU_GPUID_GV11B:
@@ -360,7 +360,7 @@ int vgpu_get_constants(struct gk20a *g)
360 struct vgpu_priv_data *priv = vgpu_get_priv_data(g); 360 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
361 int err; 361 int err;
362 362
363 gk20a_dbg_fn(""); 363 nvgpu_log_fn(g, " ");
364 364
365 msg.cmd = TEGRA_VGPU_CMD_GET_CONSTANTS; 365 msg.cmd = TEGRA_VGPU_CMD_GET_CONSTANTS;
366 msg.handle = vgpu_get_handle(g); 366 msg.handle = vgpu_get_handle(g);
diff --git a/drivers/gpu/nvgpu/volt/volt_dev.c b/drivers/gpu/nvgpu/volt/volt_dev.c
index 38df2105..d900b37b 100644
--- a/drivers/gpu/nvgpu/volt/volt_dev.c
+++ b/drivers/gpu/nvgpu/volt/volt_dev.c
@@ -408,7 +408,7 @@ static u32 _volt_device_devgrp_pmudata_instget(struct gk20a *g,
408 (struct nv_pmu_volt_volt_device_boardobj_grp_set *) 408 (struct nv_pmu_volt_volt_device_boardobj_grp_set *)
409 pmuboardobjgrp; 409 pmuboardobjgrp;
410 410
411 gk20a_dbg_info(""); 411 nvgpu_log_info(g, " ");
412 412
413 /*check whether pmuboardobjgrp has a valid boardobj in index*/ 413 /*check whether pmuboardobjgrp has a valid boardobj in index*/
414 if (((u32)BIT(idx) & 414 if (((u32)BIT(idx) &
@@ -417,7 +417,7 @@ static u32 _volt_device_devgrp_pmudata_instget(struct gk20a *g,
417 417
418 *ppboardobjpmudata = (struct nv_pmu_boardobj *) 418 *ppboardobjpmudata = (struct nv_pmu_boardobj *)
419 &pgrp_set->objects[idx].data.board_obj; 419 &pgrp_set->objects[idx].data.board_obj;
420 gk20a_dbg_info("Done"); 420 nvgpu_log_info(g, "Done");
421 return 0; 421 return 0;
422} 422}
423 423
@@ -506,7 +506,7 @@ u32 volt_dev_pmu_setup(struct gk20a *g)
506 u32 status; 506 u32 status;
507 struct boardobjgrp *pboardobjgrp = NULL; 507 struct boardobjgrp *pboardobjgrp = NULL;
508 508
509 gk20a_dbg_info(""); 509 nvgpu_log_info(g, " ");
510 510
511 pboardobjgrp = &g->perf_pmu.volt.volt_dev_metadata.volt_devices.super; 511 pboardobjgrp = &g->perf_pmu.volt.volt_dev_metadata.volt_devices.super;
512 512
@@ -515,7 +515,7 @@ u32 volt_dev_pmu_setup(struct gk20a *g)
515 515
516 status = pboardobjgrp->pmuinithandle(g, pboardobjgrp); 516 status = pboardobjgrp->pmuinithandle(g, pboardobjgrp);
517 517
518 gk20a_dbg_info("Done"); 518 nvgpu_log_info(g, "Done");
519 return status; 519 return status;
520} 520}
521 521
@@ -526,7 +526,7 @@ u32 volt_dev_sw_setup(struct gk20a *g)
526 struct voltage_device *pvolt_device; 526 struct voltage_device *pvolt_device;
527 u8 i; 527 u8 i;
528 528
529 gk20a_dbg_info(""); 529 nvgpu_log_info(g, " ");
530 530
531 status = boardobjgrpconstruct_e32(g, 531 status = boardobjgrpconstruct_e32(g,
532 &g->perf_pmu.volt.volt_dev_metadata.volt_devices); 532 &g->perf_pmu.volt.volt_dev_metadata.volt_devices);
@@ -585,6 +585,6 @@ u32 volt_dev_sw_setup(struct gk20a *g)
585 } 585 }
586 586
587done: 587done:
588 gk20a_dbg_info(" done status %x", status); 588 nvgpu_log_info(g, " done status %x", status);
589 return status; 589 return status;
590} 590}
diff --git a/drivers/gpu/nvgpu/volt/volt_pmu.c b/drivers/gpu/nvgpu/volt/volt_pmu.c
index 07bff84a..bd9177ff 100644
--- a/drivers/gpu/nvgpu/volt/volt_pmu.c
+++ b/drivers/gpu/nvgpu/volt/volt_pmu.c
@@ -46,7 +46,7 @@ static void volt_rpc_pmucmdhandler(struct gk20a *g, struct pmu_msg *msg,
46 struct volt_rpc_pmucmdhandler_params *phandlerparams = 46 struct volt_rpc_pmucmdhandler_params *phandlerparams =
47 (struct volt_rpc_pmucmdhandler_params *)param; 47 (struct volt_rpc_pmucmdhandler_params *)param;
48 48
49 gk20a_dbg_info(""); 49 nvgpu_log_info(g, " ");
50 50
51 if (msg->msg.volt.msg_type != NV_PMU_VOLT_MSG_ID_RPC) { 51 if (msg->msg.volt.msg_type != NV_PMU_VOLT_MSG_ID_RPC) {
52 nvgpu_err(g, "unsupported msg for VOLT RPC %x", 52 nvgpu_err(g, "unsupported msg for VOLT RPC %x",
diff --git a/drivers/gpu/nvgpu/volt/volt_policy.c b/drivers/gpu/nvgpu/volt/volt_policy.c
index a69c38bb..3783dc32 100644
--- a/drivers/gpu/nvgpu/volt/volt_policy.c
+++ b/drivers/gpu/nvgpu/volt/volt_policy.c
@@ -370,7 +370,7 @@ static u32 _volt_policy_devgrp_pmudata_instget(struct gk20a *g,
370 (struct nv_pmu_volt_volt_policy_boardobj_grp_set *) 370 (struct nv_pmu_volt_volt_policy_boardobj_grp_set *)
371 pmuboardobjgrp; 371 pmuboardobjgrp;
372 372
373 gk20a_dbg_info(""); 373 nvgpu_log_info(g, " ");
374 374
375 /*check whether pmuboardobjgrp has a valid boardobj in index*/ 375 /*check whether pmuboardobjgrp has a valid boardobj in index*/
376 if (((u32)BIT(idx) & 376 if (((u32)BIT(idx) &
@@ -379,7 +379,7 @@ static u32 _volt_policy_devgrp_pmudata_instget(struct gk20a *g,
379 379
380 *ppboardobjpmudata = (struct nv_pmu_boardobj *) 380 *ppboardobjpmudata = (struct nv_pmu_boardobj *)
381 &pgrp_set->objects[idx].data.board_obj; 381 &pgrp_set->objects[idx].data.board_obj;
382 gk20a_dbg_info(" Done"); 382 nvgpu_log_info(g, " Done");
383 return 0; 383 return 0;
384} 384}
385 385
@@ -430,7 +430,7 @@ u32 volt_policy_pmu_setup(struct gk20a *g)
430 u32 status; 430 u32 status;
431 struct boardobjgrp *pboardobjgrp = NULL; 431 struct boardobjgrp *pboardobjgrp = NULL;
432 432
433 gk20a_dbg_info(""); 433 nvgpu_log_info(g, " ");
434 434
435 pboardobjgrp = 435 pboardobjgrp =
436 &g->perf_pmu.volt.volt_policy_metadata.volt_policies.super; 436 &g->perf_pmu.volt.volt_policy_metadata.volt_policies.super;
@@ -440,7 +440,7 @@ u32 volt_policy_pmu_setup(struct gk20a *g)
440 440
441 status = pboardobjgrp->pmuinithandle(g, pboardobjgrp); 441 status = pboardobjgrp->pmuinithandle(g, pboardobjgrp);
442 442
443 gk20a_dbg_info("Done"); 443 nvgpu_log_info(g, "Done");
444 return status; 444 return status;
445} 445}
446 446
@@ -449,7 +449,7 @@ u32 volt_policy_sw_setup(struct gk20a *g)
449 u32 status = 0; 449 u32 status = 0;
450 struct boardobjgrp *pboardobjgrp = NULL; 450 struct boardobjgrp *pboardobjgrp = NULL;
451 451
452 gk20a_dbg_info(""); 452 nvgpu_log_info(g, " ");
453 453
454 status = boardobjgrpconstruct_e32(g, 454 status = boardobjgrpconstruct_e32(g,
455 &g->perf_pmu.volt.volt_policy_metadata.volt_policies); 455 &g->perf_pmu.volt.volt_policy_metadata.volt_policies);
@@ -496,6 +496,6 @@ u32 volt_policy_sw_setup(struct gk20a *g)
496 } 496 }
497 497
498done: 498done:
499 gk20a_dbg_info(" done status %x", status); 499 nvgpu_log_info(g, " done status %x", status);
500 return status; 500 return status;
501} 501}
diff --git a/drivers/gpu/nvgpu/volt/volt_rail.c b/drivers/gpu/nvgpu/volt/volt_rail.c
index 3461653f..6a7dcdbe 100644
--- a/drivers/gpu/nvgpu/volt/volt_rail.c
+++ b/drivers/gpu/nvgpu/volt/volt_rail.c
@@ -135,7 +135,7 @@ static u32 volt_rail_init_pmudata_super(struct gk20a *g,
135 struct nv_pmu_volt_volt_rail_boardobj_set *rail_pmu_data; 135 struct nv_pmu_volt_volt_rail_boardobj_set *rail_pmu_data;
136 u32 i; 136 u32 i;
137 137
138 gk20a_dbg_info(""); 138 nvgpu_log_info(g, " ");
139 139
140 status = boardobj_pmudatainit_super(g, board_obj_ptr, ppmudata); 140 status = boardobj_pmudatainit_super(g, board_obj_ptr, ppmudata);
141 if (status) 141 if (status)
@@ -170,7 +170,7 @@ static u32 volt_rail_init_pmudata_super(struct gk20a *g,
170 nvgpu_err(g, 170 nvgpu_err(g,
171 "Failed to export BOARDOBJGRPMASK of VOLTAGE_DEVICEs"); 171 "Failed to export BOARDOBJGRPMASK of VOLTAGE_DEVICEs");
172 172
173 gk20a_dbg_info("Done"); 173 nvgpu_log_info(g, "Done");
174 174
175 return status; 175 return status;
176} 176}
@@ -182,7 +182,7 @@ static struct voltage_rail *construct_volt_rail(struct gk20a *g, void *pargs)
182 struct voltage_rail *board_obj_volt_rail_ptr = NULL; 182 struct voltage_rail *board_obj_volt_rail_ptr = NULL;
183 u32 status; 183 u32 status;
184 184
185 gk20a_dbg_info(""); 185 nvgpu_log_info(g, " ");
186 status = boardobj_construct_super(g, &board_obj_ptr, 186 status = boardobj_construct_super(g, &board_obj_ptr,
187 sizeof(struct voltage_rail), pargs); 187 sizeof(struct voltage_rail), pargs);
188 if (status) 188 if (status)
@@ -211,7 +211,7 @@ static struct voltage_rail *construct_volt_rail(struct gk20a *g, void *pargs)
211 board_obj_volt_rail_ptr->volt_scale_exp_pwr_equ_idx = 211 board_obj_volt_rail_ptr->volt_scale_exp_pwr_equ_idx =
212 ptemp_rail->volt_scale_exp_pwr_equ_idx; 212 ptemp_rail->volt_scale_exp_pwr_equ_idx;
213 213
214 gk20a_dbg_info("Done"); 214 nvgpu_log_info(g, "Done");
215 215
216 return (struct voltage_rail *)board_obj_ptr; 216 return (struct voltage_rail *)board_obj_ptr;
217} 217}
@@ -242,7 +242,7 @@ u32 volt_rail_pmu_setup(struct gk20a *g)
242 u32 status; 242 u32 status;
243 struct boardobjgrp *pboardobjgrp = NULL; 243 struct boardobjgrp *pboardobjgrp = NULL;
244 244
245 gk20a_dbg_info(""); 245 nvgpu_log_info(g, " ");
246 246
247 pboardobjgrp = &g->perf_pmu.volt.volt_rail_metadata.volt_rails.super; 247 pboardobjgrp = &g->perf_pmu.volt.volt_rail_metadata.volt_rails.super;
248 248
@@ -251,7 +251,7 @@ u32 volt_rail_pmu_setup(struct gk20a *g)
251 251
252 status = pboardobjgrp->pmuinithandle(g, pboardobjgrp); 252 status = pboardobjgrp->pmuinithandle(g, pboardobjgrp);
253 253
254 gk20a_dbg_info("Done"); 254 nvgpu_log_info(g, "Done");
255 return status; 255 return status;
256} 256}
257 257
@@ -366,7 +366,7 @@ static u32 _volt_rail_devgrp_pmudata_instget(struct gk20a *g,
366 (struct nv_pmu_volt_volt_rail_boardobj_grp_set *) 366 (struct nv_pmu_volt_volt_rail_boardobj_grp_set *)
367 pmuboardobjgrp; 367 pmuboardobjgrp;
368 368
369 gk20a_dbg_info(""); 369 nvgpu_log_info(g, " ");
370 370
371 /*check whether pmuboardobjgrp has a valid boardobj in index*/ 371 /*check whether pmuboardobjgrp has a valid boardobj in index*/
372 if (((u32)BIT(idx) & 372 if (((u32)BIT(idx) &
@@ -375,7 +375,7 @@ static u32 _volt_rail_devgrp_pmudata_instget(struct gk20a *g,
375 375
376 *ppboardobjpmudata = (struct nv_pmu_boardobj *) 376 *ppboardobjpmudata = (struct nv_pmu_boardobj *)
377 &pgrp_set->objects[idx].data.board_obj; 377 &pgrp_set->objects[idx].data.board_obj;
378 gk20a_dbg_info(" Done"); 378 nvgpu_log_info(g, " Done");
379 return 0; 379 return 0;
380} 380}
381 381
@@ -404,7 +404,7 @@ u32 volt_rail_sw_setup(struct gk20a *g)
404 struct voltage_rail *pvolt_rail; 404 struct voltage_rail *pvolt_rail;
405 u8 i; 405 u8 i;
406 406
407 gk20a_dbg_info(""); 407 nvgpu_log_info(g, " ");
408 408
409 status = boardobjgrpconstruct_e32(g, 409 status = boardobjgrpconstruct_e32(g,
410 &g->perf_pmu.volt.volt_rail_metadata.volt_rails); 410 &g->perf_pmu.volt.volt_rail_metadata.volt_rails);
@@ -465,6 +465,6 @@ u32 volt_rail_sw_setup(struct gk20a *g)
465 } 465 }
466 466
467done: 467done:
468 gk20a_dbg_info(" done status %x", status); 468 nvgpu_log_info(g, " done status %x", status);
469 return status; 469 return status;
470} 470}