From dd739fcb039d51606e9a5454ec0aab17bcb01965 Mon Sep 17 00:00:00 2001 From: Terje Bergstrom Date: Wed, 18 Apr 2018 19:39:46 -0700 Subject: gpu: nvgpu: Remove gk20a_dbg* functions Switch all logging to nvgpu_log*(). gk20a_dbg* macros are intentionally left there because of use from other repositories. Because the new functions do not work without a pointer to struct gk20a, and piping it just for logging is excessive, some log messages are deleted. Change-Id: I00e22e75fe4596a330bb0282ab4774b3639ee31e Signed-off-by: Terje Bergstrom Reviewed-on: https://git-master.nvidia.com/r/1704148 Reviewed-by: mobile promotions Tested-by: mobile promotions --- drivers/gpu/nvgpu/boardobj/boardobj.c | 14 +- drivers/gpu/nvgpu/boardobj/boardobjgrp.c | 52 +-- drivers/gpu/nvgpu/boardobj/boardobjgrp_e255.c | 6 +- drivers/gpu/nvgpu/boardobj/boardobjgrp_e32.c | 6 +- drivers/gpu/nvgpu/clk/clk.c | 4 +- drivers/gpu/nvgpu/clk/clk_domain.c | 56 ++-- drivers/gpu/nvgpu/clk/clk_fll.c | 28 +- drivers/gpu/nvgpu/clk/clk_freq_controller.c | 14 +- drivers/gpu/nvgpu/clk/clk_prog.c | 46 +-- drivers/gpu/nvgpu/clk/clk_vf_point.c | 26 +- drivers/gpu/nvgpu/clk/clk_vin.c | 36 +- drivers/gpu/nvgpu/common/as.c | 19 +- drivers/gpu/nvgpu/common/linux/cde.c | 54 +-- drivers/gpu/nvgpu/common/linux/cde_gp10b.c | 8 +- drivers/gpu/nvgpu/common/linux/channel.c | 10 +- drivers/gpu/nvgpu/common/linux/ctxsw_trace.c | 54 +-- drivers/gpu/nvgpu/common/linux/debug.c | 4 - drivers/gpu/nvgpu/common/linux/debug_fifo.c | 7 +- drivers/gpu/nvgpu/common/linux/driver_common.c | 2 +- drivers/gpu/nvgpu/common/linux/intr.c | 4 +- drivers/gpu/nvgpu/common/linux/io.c | 22 +- drivers/gpu/nvgpu/common/linux/io_usermode.c | 2 +- drivers/gpu/nvgpu/common/linux/ioctl.c | 5 +- drivers/gpu/nvgpu/common/linux/ioctl_as.c | 33 +- drivers/gpu/nvgpu/common/linux/ioctl_channel.c | 22 +- drivers/gpu/nvgpu/common/linux/ioctl_clk_arb.c | 7 +- drivers/gpu/nvgpu/common/linux/ioctl_ctrl.c | 42 +-- drivers/gpu/nvgpu/common/linux/ioctl_dbg.c | 100 +++--- drivers/gpu/nvgpu/common/linux/ioctl_tsg.c | 21 +- drivers/gpu/nvgpu/common/linux/log.c | 2 - drivers/gpu/nvgpu/common/linux/module.c | 17 +- drivers/gpu/nvgpu/common/linux/nvgpu_mem.c | 10 +- .../gpu/nvgpu/common/linux/platform_gk20a_tegra.c | 10 +- .../gpu/nvgpu/common/linux/platform_gp10b_tegra.c | 11 +- drivers/gpu/nvgpu/common/linux/sched.c | 57 ++-- drivers/gpu/nvgpu/common/linux/vgpu/clk_vgpu.c | 8 +- drivers/gpu/nvgpu/common/linux/vgpu/css_vgpu.c | 11 +- .../gpu/nvgpu/common/linux/vgpu/fecs_trace_vgpu.c | 2 +- drivers/gpu/nvgpu/common/linux/vgpu/vgpu_linux.c | 17 +- drivers/gpu/nvgpu/common/linux/vm.c | 3 +- drivers/gpu/nvgpu/common/mm/vidmem.c | 2 +- drivers/gpu/nvgpu/common/vbios/bios.c | 54 +-- drivers/gpu/nvgpu/gk20a/ce2_gk20a.c | 14 +- drivers/gpu/nvgpu/gk20a/channel_gk20a.c | 46 +-- drivers/gpu/nvgpu/gk20a/css_gr_gk20a.c | 6 +- drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c | 17 +- drivers/gpu/nvgpu/gk20a/fb_gk20a.c | 6 +- drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c | 36 +- drivers/gpu/nvgpu/gk20a/fifo_gk20a.c | 150 ++++----- drivers/gpu/nvgpu/gk20a/gk20a.c | 12 +- drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.c | 82 +++-- drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a_sim.c | 12 +- drivers/gpu/nvgpu/gk20a/gr_gk20a.c | 364 ++++++++++----------- drivers/gpu/nvgpu/gk20a/hal.c | 2 +- drivers/gpu/nvgpu/gk20a/mc_gk20a.c | 8 +- drivers/gpu/nvgpu/gk20a/mm_gk20a.c | 28 +- drivers/gpu/nvgpu/gk20a/pmu_gk20a.c | 52 +-- drivers/gpu/nvgpu/gk20a/pramin_gk20a.c | 4 +- drivers/gpu/nvgpu/gk20a/priv_ring_gk20a.c | 8 +- drivers/gpu/nvgpu/gk20a/regops_gk20a.c | 16 +- drivers/gpu/nvgpu/gk20a/therm_gk20a.c | 8 +- drivers/gpu/nvgpu/gk20a/tsg_gk20a.c | 18 +- drivers/gpu/nvgpu/gm20b/acr_gm20b.c | 132 ++++---- drivers/gpu/nvgpu/gm20b/bus_gm20b.c | 4 +- drivers/gpu/nvgpu/gm20b/clk_gm20b.c | 41 +-- drivers/gpu/nvgpu/gm20b/fb_gm20b.c | 2 +- drivers/gpu/nvgpu/gm20b/fifo_gm20b.c | 2 +- drivers/gpu/nvgpu/gm20b/gr_gm20b.c | 48 +-- drivers/gpu/nvgpu/gm20b/ltc_gm20b.c | 16 +- drivers/gpu/nvgpu/gm20b/mm_gm20b.c | 8 +- drivers/gpu/nvgpu/gm20b/pmu_gm20b.c | 40 +-- drivers/gpu/nvgpu/gm20b/therm_gm20b.c | 4 +- drivers/gpu/nvgpu/gp106/acr_gp106.c | 98 +++--- drivers/gpu/nvgpu/gp106/bios_gp106.c | 20 +- drivers/gpu/nvgpu/gp106/clk_gp106.c | 7 +- drivers/gpu/nvgpu/gp106/fb_gp106.c | 4 +- drivers/gpu/nvgpu/gp106/gr_gp106.c | 10 +- drivers/gpu/nvgpu/gp106/hal_gp106.c | 4 +- drivers/gpu/nvgpu/gp106/mclk_gp106.c | 12 +- drivers/gpu/nvgpu/gp106/pmu_gp106.c | 18 +- drivers/gpu/nvgpu/gp106/pmu_gp106.h | 6 +- drivers/gpu/nvgpu/gp106/sec2_gp106.c | 12 +- drivers/gpu/nvgpu/gp106/therm_gp106.c | 6 +- drivers/gpu/nvgpu/gp106/xve_gp106.c | 60 ++-- drivers/gpu/nvgpu/gp106/xve_gp106.h | 10 +- drivers/gpu/nvgpu/gp10b/ce_gp10b.c | 10 +- drivers/gpu/nvgpu/gp10b/fecs_trace_gp10b.c | 4 +- drivers/gpu/nvgpu/gp10b/fifo_gp10b.c | 25 +- drivers/gpu/nvgpu/gp10b/gr_gp10b.c | 123 +++---- drivers/gpu/nvgpu/gp10b/ltc_gp10b.c | 14 +- drivers/gpu/nvgpu/gp10b/mc_gp10b.c | 4 +- drivers/gpu/nvgpu/gp10b/mm_gp10b.c | 16 +- drivers/gpu/nvgpu/gp10b/pmu_gp10b.c | 26 +- drivers/gpu/nvgpu/gp10b/rpfb_gp10b.c | 14 +- drivers/gpu/nvgpu/gp10b/therm_gp10b.c | 8 +- drivers/gpu/nvgpu/gv11b/acr_gv11b.c | 38 +-- drivers/gpu/nvgpu/gv11b/css_gr_gv11b.c | 6 +- drivers/gpu/nvgpu/gv11b/dbg_gpu_gv11b.c | 4 +- drivers/gpu/nvgpu/gv11b/fb_gv11b.c | 2 +- drivers/gpu/nvgpu/gv11b/fifo_gv11b.c | 61 ++-- drivers/gpu/nvgpu/gv11b/gr_gv11b.c | 132 ++++---- drivers/gpu/nvgpu/gv11b/ltc_gv11b.c | 4 +- drivers/gpu/nvgpu/gv11b/mm_gv11b.c | 2 +- drivers/gpu/nvgpu/gv11b/pmu_gv11b.c | 22 +- drivers/gpu/nvgpu/gv11b/therm_gv11b.c | 4 +- drivers/gpu/nvgpu/lpwr/lpwr.c | 18 +- drivers/gpu/nvgpu/perf/perf.c | 6 +- drivers/gpu/nvgpu/perf/vfe_equ.c | 30 +- drivers/gpu/nvgpu/perf/vfe_var.c | 58 ++-- drivers/gpu/nvgpu/pmgr/pwrdev.c | 14 +- drivers/gpu/nvgpu/pmgr/pwrmonitor.c | 18 +- drivers/gpu/nvgpu/pmgr/pwrpolicy.c | 12 +- drivers/gpu/nvgpu/pstate/pstate.c | 16 +- drivers/gpu/nvgpu/therm/thrmchannel.c | 14 +- drivers/gpu/nvgpu/therm/thrmdev.c | 14 +- drivers/gpu/nvgpu/vgpu/ce2_vgpu.c | 2 +- drivers/gpu/nvgpu/vgpu/dbg_vgpu.c | 6 +- drivers/gpu/nvgpu/vgpu/fifo_vgpu.c | 70 ++-- drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c | 14 +- drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c | 2 +- drivers/gpu/nvgpu/vgpu/gr_vgpu.c | 82 ++--- drivers/gpu/nvgpu/vgpu/gv11b/vgpu_gv11b.c | 2 +- drivers/gpu/nvgpu/vgpu/gv11b/vgpu_tsg_gv11b.c | 3 +- drivers/gpu/nvgpu/vgpu/ltc_vgpu.c | 6 +- drivers/gpu/nvgpu/vgpu/mm_vgpu.c | 23 +- drivers/gpu/nvgpu/vgpu/tsg_vgpu.c | 15 +- drivers/gpu/nvgpu/vgpu/vgpu.c | 12 +- drivers/gpu/nvgpu/volt/volt_dev.c | 12 +- drivers/gpu/nvgpu/volt/volt_pmu.c | 2 +- drivers/gpu/nvgpu/volt/volt_policy.c | 12 +- drivers/gpu/nvgpu/volt/volt_rail.c | 20 +- 131 files changed, 1726 insertions(+), 1637 deletions(-) diff --git a/drivers/gpu/nvgpu/boardobj/boardobj.c b/drivers/gpu/nvgpu/boardobj/boardobj.c index f9be6981..f38c7c4a 100644 --- a/drivers/gpu/nvgpu/boardobj/boardobj.c +++ b/drivers/gpu/nvgpu/boardobj/boardobj.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -32,7 +32,7 @@ u32 boardobj_construct_super(struct gk20a *g, struct boardobj **ppboardobj, struct boardobj *pboardobj = NULL; struct boardobj *devtmp = (struct boardobj *)args; - gk20a_dbg_info(" "); + nvgpu_log_info(g, " "); if (devtmp == NULL) return -EINVAL; @@ -61,7 +61,9 @@ u32 boardobj_construct_super(struct gk20a *g, struct boardobj **ppboardobj, u32 boardobj_destruct_super(struct boardobj *pboardobj) { - gk20a_dbg_info(""); + struct gk20a *g = pboardobj->g; + + nvgpu_log_info(g, " "); if (pboardobj == NULL) return -EINVAL; @@ -75,7 +77,7 @@ u32 boardobj_destruct_super(struct boardobj *pboardobj) bool boardobj_implements_super(struct gk20a *g, struct boardobj *pboardobj, u8 type) { - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); return (0 != (pboardobj->type_mask & BIT(type))); } @@ -83,12 +85,12 @@ bool boardobj_implements_super(struct gk20a *g, struct boardobj *pboardobj, u32 boardobj_pmudatainit_super(struct gk20a *g, struct boardobj *pboardobj, struct nv_pmu_boardobj *pmudata) { - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); if (pboardobj == NULL) return -EINVAL; if (pmudata == NULL) return -EINVAL; pmudata->type = pboardobj->type; - gk20a_dbg_info(" Done"); + nvgpu_log_info(g, " Done"); return 0; } diff --git a/drivers/gpu/nvgpu/boardobj/boardobjgrp.c b/drivers/gpu/nvgpu/boardobj/boardobjgrp.c index 64c02a84..7343f66c 100644 --- a/drivers/gpu/nvgpu/boardobj/boardobjgrp.c +++ b/drivers/gpu/nvgpu/boardobj/boardobjgrp.c @@ -50,7 +50,7 @@ struct boardobjgrp_pmucmdhandler_params { u32 boardobjgrp_construct_super(struct gk20a *g, struct boardobjgrp *pboardobjgrp) { - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); if (pboardobjgrp == NULL) return -EINVAL; @@ -101,7 +101,9 @@ u32 boardobjgrp_construct_super(struct gk20a *g, struct boardobjgrp *pboardobjgr u32 boardobjgrp_destruct_impl(struct boardobjgrp *pboardobjgrp) { - gk20a_dbg_info(""); + struct gk20a *g = pboardobjgrp->g; + + nvgpu_log_info(g, " "); if (pboardobjgrp == NULL) return -EINVAL; @@ -120,7 +122,7 @@ u32 boardobjgrp_destruct_super(struct boardobjgrp *pboardobjgrp) u32 stat; u8 index; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); if (pboardobjgrp->mask == NULL) return -EINVAL; @@ -165,7 +167,7 @@ u32 boardobjgrp_pmucmd_construct_impl(struct gk20a *g, struct boardobjgrp *pboardobjgrp, struct boardobjgrp_pmu_cmd *cmd, u8 id, u8 msgid, u8 hdrsize, u8 entrysize, u16 fbsize, u32 ss_offset, u8 rpc_func_id) { - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); /* Copy the parameters into the CMD*/ cmd->id = id; @@ -234,7 +236,7 @@ u32 boardobjgrp_pmucmd_pmuinithandle_impl(struct gk20a *g, u32 status = 0; struct nvgpu_mem *sysmem_desc = &pcmd->surf.sysmem_desc; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); if (g->ops.pmu_ver.boardobj.is_boardobjgrp_pmucmd_id_valid(g, pboardobjgrp, pcmd)) @@ -259,7 +261,7 @@ u32 boardobjgrp_pmuinithandle_impl(struct gk20a *g, { u32 status = 0; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); status = boardobjgrp_pmucmd_pmuinithandle_impl(g, pboardobjgrp, &pboardobjgrp->pmu.set); @@ -295,7 +297,7 @@ u32 boardobjgrp_pmuhdrdatainit_super(struct gk20a *g, struct boardobjgrp *pboardobjgrp, struct nv_pmu_boardobjgrp_super *pboardobjgrppmu, struct boardobjgrpmask *mask) { - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); if (pboardobjgrp == NULL) return -EINVAL; @@ -306,7 +308,7 @@ u32 boardobjgrp_pmuhdrdatainit_super(struct gk20a *g, struct boardobjgrp pboardobjgrppmu->obj_slots = BOARDOBJGRP_PMU_SLOTS_GET(pboardobjgrp); pboardobjgrppmu->flags = 0; - gk20a_dbg_info(" Done"); + nvgpu_log_info(g, " Done"); return 0; } @@ -314,7 +316,7 @@ static u32 boardobjgrp_pmudatainstget_stub(struct gk20a *g, struct nv_pmu_boardobjgrp *boardobjgrppmu, struct nv_pmu_boardobj **ppboardobjpmudata, u8 idx) { - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); return -EINVAL; } @@ -323,7 +325,7 @@ static u32 boardobjgrp_pmustatusinstget_stub(struct gk20a *g, void *pboardobjgrppmu, struct nv_pmu_boardobj_query **ppBoardobjpmustatus, u8 idx) { - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); return -EINVAL; } @@ -336,7 +338,7 @@ u32 boardobjgrp_pmudatainit_legacy(struct gk20a *g, struct nv_pmu_boardobj *ppmudata = NULL; u8 index; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); if (pboardobjgrp == NULL) return -EINVAL; @@ -374,7 +376,7 @@ u32 boardobjgrp_pmudatainit_legacy(struct gk20a *g, BOARDOBJGRP_FOR_EACH_INDEX_IN_MASK_END boardobjgrppmudatainit_legacy_done: - gk20a_dbg_info(" Done"); + nvgpu_log_info(g, " Done"); return status; } @@ -386,7 +388,7 @@ u32 boardobjgrp_pmudatainit_super(struct gk20a *g, struct boardobjgrp struct nv_pmu_boardobj *ppmudata = NULL; u8 index; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); if (pboardobjgrp == NULL) return -EINVAL; @@ -420,7 +422,7 @@ u32 boardobjgrp_pmudatainit_super(struct gk20a *g, struct boardobjgrp } boardobjgrppmudatainit_super_done: - gk20a_dbg_info(" Done"); + nvgpu_log_info(g, " Done"); return status; } @@ -452,7 +454,7 @@ u32 boardobjgrp_pmuset_impl(struct gk20a *g, struct boardobjgrp *pboardobjgrp) struct boardobjgrp_pmu_cmd *pcmd = (struct boardobjgrp_pmu_cmd *)(&pboardobjgrp->pmu.set); - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); if (check_boardobjgrp_param(g, pboardobjgrp)) return -EINVAL; @@ -511,7 +513,7 @@ u32 boardobjgrp_pmuset_impl_v1(struct gk20a *g, struct boardobjgrp *pboardobjgrp struct boardobjgrp_pmu_cmd *pcmd = (struct boardobjgrp_pmu_cmd *)(&pboardobjgrp->pmu.set); - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); if (check_boardobjgrp_param(g, pboardobjgrp)) return -EINVAL; @@ -568,7 +570,7 @@ boardobjgrp_pmugetstatus_impl(struct gk20a *g, struct boardobjgrp *pboardobjgrp, struct boardobjgrp_pmu_cmd *pset = (struct boardobjgrp_pmu_cmd *)(&pboardobjgrp->pmu.set); - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); if (check_boardobjgrp_param(g, pboardobjgrp)) return -EINVAL; @@ -635,7 +637,7 @@ boardobjgrp_pmugetstatus_impl_v1(struct gk20a *g, struct boardobjgrp *pboardobjg struct boardobjgrp_pmu_cmd *pcmd = (struct boardobjgrp_pmu_cmd *)(&pboardobjgrp->pmu.getstatus); - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); if (check_boardobjgrp_param(g, pboardobjgrp)) return -EINVAL; @@ -690,8 +692,9 @@ static u32 boardobjgrp_objinsert_final(struct boardobjgrp *pboardobjgrp, struct boardobj *pboardobj, u8 index) { + struct gk20a *g = pboardobjgrp->g; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); if (pboardobjgrp == NULL) return -EINVAL; @@ -719,7 +722,7 @@ boardobjgrp_objinsert_final(struct boardobjgrp *pboardobjgrp, pboardobjgrp->objmask |= BIT(index); - gk20a_dbg_info(" Done"); + nvgpu_log_info(g, " Done"); return boardobjgrpmask_bitset(pboardobjgrp->mask, index); } @@ -789,8 +792,9 @@ static u32 boardobjgrp_objremoveanddestroy_final( { u32 status = 0; u32 stat; + struct gk20a *g = pboardobjgrp->g; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); if (!boardobjgrp_idxisvalid(pboardobjgrp, index)) return -EINVAL; @@ -824,8 +828,6 @@ void boardobjgrpe32hdrset(struct nv_pmu_boardobjgrp *hdr, u32 objmask) { u32 slots = objmask; - gk20a_dbg_info(""); - HIGHESTBITIDX_32(slots); slots++; @@ -844,7 +846,7 @@ static void boardobjgrp_pmucmdhandler(struct gk20a *g, struct pmu_msg *msg, struct boardobjgrp *pboardobjgrp = phandlerparams->pboardobjgrp; struct boardobjgrp_pmu_cmd *pgrpcmd = phandlerparams->pcmd; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); pgrpmsg = &msg->msg.boardobj.grp; @@ -895,7 +897,7 @@ static u32 boardobjgrp_pmucmdsend(struct gk20a *g, u32 seqdesc; u32 status = 0; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); memset(&payload, 0, sizeof(payload)); memset(&handlerparams, 0, sizeof(handlerparams)); diff --git a/drivers/gpu/nvgpu/boardobj/boardobjgrp_e255.c b/drivers/gpu/nvgpu/boardobj/boardobjgrp_e255.c index 7aabb89e..1f2cd836 100644 --- a/drivers/gpu/nvgpu/boardobj/boardobjgrp_e255.c +++ b/drivers/gpu/nvgpu/boardobj/boardobjgrp_e255.c @@ -1,5 +1,5 @@ /* -* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. +* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -33,7 +33,7 @@ u32 boardobjgrpconstruct_e255(struct gk20a *g, u32 status = 0; u8 objslots; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); objslots = 255; status = boardobjgrpmask_e255_init(&pboardobjgrp_e255->mask, NULL); @@ -65,7 +65,7 @@ u32 boardobjgrp_pmuhdrdatainit_e255(struct gk20a *g, (struct nv_pmu_boardobjgrp_e255 *)pboardobjgrppmu; u32 status; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); if (pboardobjgrp == NULL) return -EINVAL; diff --git a/drivers/gpu/nvgpu/boardobj/boardobjgrp_e32.c b/drivers/gpu/nvgpu/boardobj/boardobjgrp_e32.c index e793e34c..6d4b4520 100644 --- a/drivers/gpu/nvgpu/boardobj/boardobjgrp_e32.c +++ b/drivers/gpu/nvgpu/boardobj/boardobjgrp_e32.c @@ -1,5 +1,5 @@ /* -* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. +* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -34,7 +34,7 @@ u32 boardobjgrpconstruct_e32(struct gk20a *g, u32 status; u8 objslots; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); objslots = 32; status = boardobjgrpmask_e32_init(&pboardobjgrp_e32->mask, NULL); @@ -65,7 +65,7 @@ u32 boardobjgrp_pmuhdrdatainit_e32(struct gk20a *g, (struct nv_pmu_boardobjgrp_e32 *)pboardobjgrppmu; u32 status; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); if (pboardobjgrp == NULL) return -EINVAL; diff --git a/drivers/gpu/nvgpu/clk/clk.c b/drivers/gpu/nvgpu/clk/clk.c index ecc352b1..a8d99bbb 100644 --- a/drivers/gpu/nvgpu/clk/clk.c +++ b/drivers/gpu/nvgpu/clk/clk.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -43,7 +43,7 @@ static void clkrpc_pmucmdhandler(struct gk20a *g, struct pmu_msg *msg, struct clkrpc_pmucmdhandler_params *phandlerparams = (struct clkrpc_pmucmdhandler_params *)param; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); if (msg->msg.clk.msg_type != NV_PMU_CLK_MSG_ID_RPC) { nvgpu_err(g, "unsupported msg for VFE LOAD RPC %x", diff --git a/drivers/gpu/nvgpu/clk/clk_domain.c b/drivers/gpu/nvgpu/clk/clk_domain.c index 1d47d2d5..f306cf56 100644 --- a/drivers/gpu/nvgpu/clk/clk_domain.c +++ b/drivers/gpu/nvgpu/clk/clk_domain.c @@ -153,7 +153,7 @@ static u32 _clk_domains_pmudata_instget(struct gk20a *g, (struct nv_pmu_clk_clk_domain_boardobj_grp_set *) pmuboardobjgrp; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); /*check whether pmuboardobjgrp has a valid boardobj in index*/ if (((u32)BIT(idx) & @@ -162,7 +162,7 @@ static u32 _clk_domains_pmudata_instget(struct gk20a *g, *ppboardobjpmudata = (struct nv_pmu_boardobj *) &pgrp_set->objects[idx].data.board_obj; - gk20a_dbg_info(" Done"); + nvgpu_log_info(g, " Done"); return 0; } @@ -176,7 +176,7 @@ u32 clk_domain_sw_setup(struct gk20a *g) struct clk_domain_3x_slave *pdomain_slave; u8 i; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); status = boardobjgrpconstruct_e32(g, &g->clk_pmu.clk_domainobjs.super); if (status) { @@ -255,7 +255,7 @@ u32 clk_domain_sw_setup(struct gk20a *g) } done: - gk20a_dbg_info(" done status %x", status); + nvgpu_log_info(g, " done status %x", status); return status; } @@ -264,7 +264,7 @@ u32 clk_domain_pmu_setup(struct gk20a *g) u32 status; struct boardobjgrp *pboardobjgrp = NULL; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); pboardobjgrp = &g->clk_pmu.clk_domainobjs.super.super; @@ -273,7 +273,7 @@ u32 clk_domain_pmu_setup(struct gk20a *g) status = pboardobjgrp->pmuinithandle(g, pboardobjgrp); - gk20a_dbg_info("Done"); + nvgpu_log_info(g, "Done"); return status; } @@ -298,7 +298,7 @@ static u32 devinit_get_clocks_table(struct gk20a *g, struct clk_domain_3x_slave v3x_slave; } clk_domain_data; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); clocks_table_ptr = (u8 *)nvgpu_bios_get_perf_table_ptrs(g, g->bios.clock_token, CLOCKS_TABLE); @@ -459,7 +459,7 @@ static u32 devinit_get_clocks_table(struct gk20a *g, } done: - gk20a_dbg_info(" done status %x", status); + nvgpu_log_info(g, " done status %x", status); return status; } @@ -467,7 +467,7 @@ static u32 clkdomainclkproglink_not_supported(struct gk20a *g, struct clk_pmupstate *pclk, struct clk_domain *pdomain) { - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); return -EINVAL; } @@ -480,7 +480,7 @@ static int clkdomainvfsearch_stub( u8 rail) { - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); return -EINVAL; } @@ -492,7 +492,7 @@ static u32 clkdomaingetfpoints_stub( u16 *pfreqpointsinmhz, u8 rail) { - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); return -EINVAL; } @@ -541,7 +541,7 @@ static u32 _clk_domain_pmudatainit_3x(struct gk20a *g, struct clk_domain_3x *pclk_domain_3x; struct nv_pmu_clk_clk_domain_3x_boardobj_set *pset; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); status = clk_domain_pmudatainit_super(g, board_obj_ptr, ppmudata); if (status != 0) @@ -592,7 +592,7 @@ static u32 clkdomainclkproglink_3x_prog(struct gk20a *g, struct clk_prog *pprog = NULL; u8 i; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); for (i = p3xprog->clk_prog_idx_first; i <= p3xprog->clk_prog_idx_last; @@ -616,7 +616,7 @@ static int clkdomaingetslaveclk(struct gk20a *g, u8 slaveidx; struct clk_domain_3x_master *p3xmaster; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); if (pclkmhz == NULL) return -EINVAL; @@ -657,7 +657,7 @@ static int clkdomainvfsearch(struct gk20a *g, u16 bestclkmhz; u32 bestvoltuv; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); if ((pclkmhz == NULL) || (pvoltuv == NULL)) return -EINVAL; @@ -719,7 +719,7 @@ static int clkdomainvfsearch(struct gk20a *g, goto done; } done: - gk20a_dbg_info("done status %x", status); + nvgpu_log_info(g, "done status %x", status); return status; } @@ -744,7 +744,7 @@ static u32 clkdomaingetfpoints u16 *freqpointsdata; u8 i; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); if (pfpointscount == NULL) return -EINVAL; @@ -783,7 +783,7 @@ static u32 clkdomaingetfpoints *pfpointscount = totalcount; done: - gk20a_dbg_info("done status %x", status); + nvgpu_log_info(g, "done status %x", status); return status; } @@ -796,7 +796,7 @@ static u32 _clk_domain_pmudatainit_3x_prog(struct gk20a *g, struct nv_pmu_clk_clk_domain_3x_prog_boardobj_set *pset; struct clk_domains *pdomains = &(g->clk_pmu.clk_domainobjs); - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); status = _clk_domain_pmudatainit_3x(g, board_obj_ptr, ppmudata); if (status != 0) @@ -876,7 +876,7 @@ static u32 _clk_domain_pmudatainit_3x_slave(struct gk20a *g, struct clk_domain_3x_slave *pclk_domain_3x_slave; struct nv_pmu_clk_clk_domain_3x_slave_boardobj_set *pset; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); status = _clk_domain_pmudatainit_3x_prog(g, board_obj_ptr, ppmudata); if (status != 0) @@ -935,7 +935,7 @@ static u32 clkdomainclkproglink_3x_master(struct gk20a *g, u16 freq_max_last_mhz = 0; u8 i; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); status = clkdomainclkproglink_3x_prog(g, pclk, pdomain); if (status) @@ -961,7 +961,7 @@ static u32 clkdomainclkproglink_3x_master(struct gk20a *g, goto done; } done: - gk20a_dbg_info("done status %x", status); + nvgpu_log_info(g, "done status %x", status); return status; } @@ -973,7 +973,7 @@ static u32 _clk_domain_pmudatainit_3x_master(struct gk20a *g, struct clk_domain_3x_master *pclk_domain_3x_master; struct nv_pmu_clk_clk_domain_3x_master_boardobj_set *pset; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); status = _clk_domain_pmudatainit_3x_prog(g, board_obj_ptr, ppmudata); if (status != 0) @@ -1021,7 +1021,7 @@ static u32 clkdomainclkproglink_fixed(struct gk20a *g, struct clk_pmupstate *pclk, struct clk_domain *pdomain) { - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); return 0; } @@ -1033,7 +1033,7 @@ static u32 _clk_domain_pmudatainit_3x_fixed(struct gk20a *g, struct clk_domain_3x_fixed *pclk_domain_3x_fixed; struct nv_pmu_clk_clk_domain_3x_fixed_boardobj_set *pset; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); status = _clk_domain_pmudatainit_3x(g, board_obj_ptr, ppmudata); if (status != 0) @@ -1085,7 +1085,7 @@ static struct clk_domain *construct_clk_domain(struct gk20a *g, void *pargs) struct boardobj *board_obj_ptr = NULL; u32 status; - gk20a_dbg_info(" %d", BOARDOBJ_GET_TYPE(pargs)); + nvgpu_log_info(g, " %d", BOARDOBJ_GET_TYPE(pargs)); switch (BOARDOBJ_GET_TYPE(pargs)) { case CTRL_CLK_CLK_DOMAIN_TYPE_3X_FIXED: status = clk_domain_construct_3x_fixed(g, &board_obj_ptr, @@ -1109,7 +1109,7 @@ static struct clk_domain *construct_clk_domain(struct gk20a *g, void *pargs) if (status) return NULL; - gk20a_dbg_info(" Done"); + nvgpu_log_info(g, " Done"); return (struct clk_domain *)board_obj_ptr; } @@ -1122,7 +1122,7 @@ static u32 clk_domain_pmudatainit_super(struct gk20a *g, struct clk_domain *pclk_domain; struct nv_pmu_clk_clk_domain_boardobj_set *pset; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); status = boardobj_pmudatainit_super(g, board_obj_ptr, ppmudata); if (status != 0) diff --git a/drivers/gpu/nvgpu/clk/clk_fll.c b/drivers/gpu/nvgpu/clk/clk_fll.c index 15d386d5..87222b90 100644 --- a/drivers/gpu/nvgpu/clk/clk_fll.c +++ b/drivers/gpu/nvgpu/clk/clk_fll.c @@ -50,7 +50,7 @@ static u32 _clk_fll_devgrp_pmudatainit_super(struct gk20a *g, pboardobjgrp; u32 status = 0; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); status = boardobjgrp_pmudatainit_e32(g, pboardobjgrp, pboardobjgrppmu); if (status) { @@ -67,7 +67,7 @@ static u32 _clk_fll_devgrp_pmudatainit_super(struct gk20a *g, pfll_objs->lut_prog_master_mask.super.bitcount, &pset->lut_prog_master_mask.super); - gk20a_dbg_info(" Done"); + nvgpu_log_info(g, " Done"); return status; } @@ -80,7 +80,7 @@ static u32 _clk_fll_devgrp_pmudata_instget(struct gk20a *g, (struct nv_pmu_clk_clk_fll_device_boardobj_grp_set *) pmuboardobjgrp; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); /*check whether pmuboardobjgrp has a valid boardobj in index*/ if (((u32)BIT(idx) & @@ -89,7 +89,7 @@ static u32 _clk_fll_devgrp_pmudata_instget(struct gk20a *g, *ppboardobjpmudata = (struct nv_pmu_boardobj *) &pgrp_set->objects[idx].data.board_obj; - gk20a_dbg_info(" Done"); + nvgpu_log_info(g, " Done"); return 0; } @@ -123,7 +123,7 @@ u32 clk_fll_sw_setup(struct gk20a *g) u8 i; u8 j; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); status = boardobjgrpconstruct_e32(g, &g->clk_pmu.avfs_fllobjs.super); if (status) { @@ -202,7 +202,7 @@ u32 clk_fll_sw_setup(struct gk20a *g) } } done: - gk20a_dbg_info(" done status %x", status); + nvgpu_log_info(g, " done status %x", status); return status; } @@ -211,7 +211,7 @@ u32 clk_fll_pmu_setup(struct gk20a *g) u32 status; struct boardobjgrp *pboardobjgrp = NULL; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); pboardobjgrp = &g->clk_pmu.avfs_fllobjs.super.super; @@ -220,7 +220,7 @@ u32 clk_fll_pmu_setup(struct gk20a *g) status = pboardobjgrp->pmuinithandle(g, pboardobjgrp); - gk20a_dbg_info("Done"); + nvgpu_log_info(g, "Done"); return status; } @@ -241,7 +241,7 @@ static u32 devinit_get_fll_device_table(struct gk20a *g, u32 vbios_domain = NV_PERF_DOMAIN_4X_CLOCK_DOMAIN_SKIP; struct avfsvinobjs *pvinobjs = &g->clk_pmu.avfs_vinobjs; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); fll_table_ptr = (u8 *)nvgpu_bios_get_perf_table_ptrs(g, g->bios.clock_token, FLL_TABLE); @@ -350,7 +350,7 @@ static u32 devinit_get_fll_device_table(struct gk20a *g, } done: - gk20a_dbg_info(" done status %x", status); + nvgpu_log_info(g, " done status %x", status); return status; } @@ -399,7 +399,7 @@ static struct fll_device *construct_fll_device(struct gk20a *g, struct fll_device *board_obj_fll_ptr = NULL; u32 status; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); status = boardobj_construct_super(g, &board_obj_ptr, sizeof(struct fll_device), pargs); if (status) @@ -429,7 +429,7 @@ static struct fll_device *construct_fll_device(struct gk20a *g, boardobjgrpmask_e32_init( &board_obj_fll_ptr->lut_prog_broadcast_slave_mask, NULL); - gk20a_dbg_info(" Done"); + nvgpu_log_info(g, " Done"); return (struct fll_device *)board_obj_ptr; } @@ -442,7 +442,7 @@ static u32 fll_device_init_pmudata_super(struct gk20a *g, struct fll_device *pfll_dev; struct nv_pmu_clk_clk_fll_device_boardobj_set *perf_pmu_data; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); status = boardobj_pmudatainit_super(g, board_obj_ptr, ppmudata); if (status != 0) @@ -473,7 +473,7 @@ static u32 fll_device_init_pmudata_super(struct gk20a *g, pfll_dev->lut_prog_broadcast_slave_mask.super.bitcount, &perf_pmu_data->lut_prog_broadcast_slave_mask.super); - gk20a_dbg_info(" Done"); + nvgpu_log_info(g, " Done"); return status; } diff --git a/drivers/gpu/nvgpu/clk/clk_freq_controller.c b/drivers/gpu/nvgpu/clk/clk_freq_controller.c index fce177a7..9091f71b 100644 --- a/drivers/gpu/nvgpu/clk/clk_freq_controller.c +++ b/drivers/gpu/nvgpu/clk/clk_freq_controller.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -321,7 +321,7 @@ u32 clk_freq_controller_pmu_setup(struct gk20a *g) u32 status; struct boardobjgrp *pboardobjgrp = NULL; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); pboardobjgrp = &g->clk_pmu.clk_freq_controllers.super.super; @@ -330,7 +330,7 @@ u32 clk_freq_controller_pmu_setup(struct gk20a *g) status = pboardobjgrp->pmuinithandle(g, pboardobjgrp); - gk20a_dbg_info("Done"); + nvgpu_log_info(g, "Done"); return status; } @@ -343,7 +343,7 @@ static u32 _clk_freq_controller_devgrp_pmudata_instget(struct gk20a *g, (struct nv_pmu_clk_clk_freq_controller_boardobj_grp_set *) pmuboardobjgrp; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); /*check whether pmuboardobjgrp has a valid boardobj in index*/ if (((u32)BIT(idx) & @@ -352,7 +352,7 @@ static u32 _clk_freq_controller_devgrp_pmudata_instget(struct gk20a *g, *ppboardobjpmudata = (struct nv_pmu_boardobj *) &pgrp_set->objects[idx].data.board_obj; - gk20a_dbg_info(" Done"); + nvgpu_log_info(g, " Done"); return 0; } @@ -392,7 +392,7 @@ u32 clk_freq_controller_sw_setup(struct gk20a *g) u8 i; u8 j; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); pclk_freq_controllers = &g->clk_pmu.clk_freq_controllers; status = boardobjgrpconstruct_e32(g, &pclk_freq_controllers->super); @@ -447,6 +447,6 @@ u32 clk_freq_controller_sw_setup(struct gk20a *g) freq_ctrl_load_mask.super, i); } done: - gk20a_dbg_info(" done status %x", status); + nvgpu_log_info(g, " done status %x", status); return status; } diff --git a/drivers/gpu/nvgpu/clk/clk_prog.c b/drivers/gpu/nvgpu/clk/clk_prog.c index 6b5315b4..8926b9f5 100644 --- a/drivers/gpu/nvgpu/clk/clk_prog.c +++ b/drivers/gpu/nvgpu/clk/clk_prog.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -72,7 +72,7 @@ static u32 _clk_progs_pmudata_instget(struct gk20a *g, struct nv_pmu_clk_clk_prog_boardobj_grp_set *pgrp_set = (struct nv_pmu_clk_clk_prog_boardobj_grp_set *)pmuboardobjgrp; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); /*check whether pmuboardobjgrp has a valid boardobj in index*/ if (((u32)BIT(idx) & @@ -81,7 +81,7 @@ static u32 _clk_progs_pmudata_instget(struct gk20a *g, *ppboardobjpmudata = (struct nv_pmu_boardobj *) &pgrp_set->objects[idx].data.board_obj; - gk20a_dbg_info(" Done"); + nvgpu_log_info(g, " Done"); return 0; } @@ -91,7 +91,7 @@ u32 clk_prog_sw_setup(struct gk20a *g) struct boardobjgrp *pboardobjgrp = NULL; struct clk_progs *pclkprogobjs; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); status = boardobjgrpconstruct_e255(g, &g->clk_pmu.clk_progobjs.super); if (status) { @@ -130,7 +130,7 @@ u32 clk_prog_sw_setup(struct gk20a *g) done: - gk20a_dbg_info(" done status %x", status); + nvgpu_log_info(g, " done status %x", status); return status; } @@ -139,7 +139,7 @@ u32 clk_prog_pmu_setup(struct gk20a *g) u32 status; struct boardobjgrp *pboardobjgrp = NULL; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); pboardobjgrp = &g->clk_pmu.clk_progobjs.super.super; @@ -148,7 +148,7 @@ u32 clk_prog_pmu_setup(struct gk20a *g) status = pboardobjgrp->pmuinithandle(g, pboardobjgrp); - gk20a_dbg_info("Done"); + nvgpu_log_info(g, "Done"); return status; } @@ -186,7 +186,7 @@ static u32 devinit_get_clk_prog_table(struct gk20a *g, struct clk_prog_1x_master_table v1x_master_table; } prog_data; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); clkprogs_tbl_ptr = (u8 *)nvgpu_bios_get_perf_table_ptrs(g, g->bios.clock_token, CLOCK_PROGRAMMING_TABLE); @@ -372,7 +372,7 @@ static u32 devinit_get_clk_prog_table(struct gk20a *g, } } done: - gk20a_dbg_info(" done status %x", status); + nvgpu_log_info(g, " done status %x", status); return status; } @@ -382,7 +382,7 @@ static u32 _clk_prog_pmudatainit_super(struct gk20a *g, { u32 status = 0; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); status = boardobj_pmudatainit_super(g, board_obj_ptr, ppmudata); return status; @@ -396,7 +396,7 @@ static u32 _clk_prog_pmudatainit_1x(struct gk20a *g, struct clk_prog_1x *pclk_prog_1x; struct nv_pmu_clk_clk_prog_1x_boardobj_set *pset; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); status = _clk_prog_pmudatainit_super(g, board_obj_ptr, ppmudata); if (status != 0) @@ -424,7 +424,7 @@ static u32 _clk_prog_pmudatainit_1x_master(struct gk20a *g, u32 vfsize = sizeof(struct ctrl_clk_clk_prog_1x_master_vf_entry) * g->clk_pmu.clk_progobjs.vf_entry_count; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); status = _clk_prog_pmudatainit_1x(g, board_obj_ptr, ppmudata); @@ -455,7 +455,7 @@ static u32 _clk_prog_pmudatainit_1x_master_ratio(struct gk20a *g, u32 slavesize = sizeof(struct ctrl_clk_clk_prog_1x_master_ratio_slave_entry) * g->clk_pmu.clk_progobjs.slave_entry_count; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); status = _clk_prog_pmudatainit_1x_master(g, board_obj_ptr, ppmudata); if (status != 0) @@ -483,7 +483,7 @@ static u32 _clk_prog_pmudatainit_1x_master_table(struct gk20a *g, u32 slavesize = sizeof(struct ctrl_clk_clk_prog_1x_master_ratio_slave_entry) * g->clk_pmu.clk_progobjs.slave_entry_count; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); status = _clk_prog_pmudatainit_1x_master(g, board_obj_ptr, ppmudata); if (status != 0) @@ -510,7 +510,7 @@ static u32 _clk_prog_1x_master_rail_construct_vf_point(struct gk20a *g, struct clk_vf_point *p_vf_point; u32 status; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); p_vf_point = construct_clk_vf_point(g, (void *)p_vf_point_tmp); if (p_vf_point == NULL) { @@ -527,7 +527,7 @@ static u32 _clk_prog_1x_master_rail_construct_vf_point(struct gk20a *g, p_vf_rail->vf_point_idx_last = (*p_vf_point_idx)++; done: - gk20a_dbg_info("done status %x", status); + nvgpu_log_info(g, "done status %x", status); return status; } @@ -561,7 +561,7 @@ static u32 clk_prog_construct_1x(struct gk20a *g, (struct clk_prog_1x *)pargs; u32 status = 0; - gk20a_dbg_info(" "); + nvgpu_log_info(g, " "); ptmpobj->type_mask |= BIT(CTRL_CLK_CLK_PROG_TYPE_1X); status = clk_prog_construct_super(g, ppboardobj, size, pargs); if (status) @@ -592,7 +592,7 @@ static u32 clk_prog_construct_1x_master(struct gk20a *g, g->clk_pmu.clk_progobjs.vf_entry_count; u8 railidx; - gk20a_dbg_info(" type - %x", BOARDOBJ_GET_TYPE(pargs)); + nvgpu_log_info(g, " type - %x", BOARDOBJ_GET_TYPE(pargs)); ptmpobj->type_mask |= BIT(CTRL_CLK_CLK_PROG_TYPE_1X_MASTER); status = clk_prog_construct_1x(g, ppboardobj, size, pargs); @@ -686,7 +686,7 @@ static u32 clk_prog_construct_1x_master_table(struct gk20a *g, u32 slavesize = sizeof(struct ctrl_clk_clk_prog_1x_master_ratio_slave_entry) * g->clk_pmu.clk_progobjs.slave_entry_count; - gk20a_dbg_info("type - %x", BOARDOBJ_GET_TYPE(pargs)); + nvgpu_log_info(g, "type - %x", BOARDOBJ_GET_TYPE(pargs)); if (BOARDOBJ_GET_TYPE(pargs) != CTRL_CLK_CLK_PROG_TYPE_1X_MASTER_TABLE) return -EINVAL; @@ -727,7 +727,7 @@ static struct clk_prog *construct_clk_prog(struct gk20a *g, void *pargs) struct boardobj *board_obj_ptr = NULL; u32 status; - gk20a_dbg_info(" type - %x", BOARDOBJ_GET_TYPE(pargs)); + nvgpu_log_info(g, " type - %x", BOARDOBJ_GET_TYPE(pargs)); switch (BOARDOBJ_GET_TYPE(pargs)) { case CTRL_CLK_CLK_PROG_TYPE_1X: status = clk_prog_construct_1x(g, &board_obj_ptr, @@ -754,7 +754,7 @@ static struct clk_prog *construct_clk_prog(struct gk20a *g, void *pargs) return NULL; } - gk20a_dbg_info(" Done"); + nvgpu_log_info(g, " Done"); return (struct clk_prog *)board_obj_ptr; } @@ -777,7 +777,7 @@ static u32 vfflatten_prog_1x_master(struct gk20a *g, u8 vf_point_idx; u8 vf_rail_idx; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); memset(&vf_point_data, 0x0, sizeof(vf_point_data)); vf_point_idx = BOARDOBJGRP_NEXT_EMPTY_IDX( @@ -851,7 +851,7 @@ static u32 vfflatten_prog_1x_master(struct gk20a *g, *pfreqmaxlastmhz = p1xmaster->super.freq_max_mhz; done: - gk20a_dbg_info("done status %x", status); + nvgpu_log_info(g, "done status %x", status); return status; } diff --git a/drivers/gpu/nvgpu/clk/clk_vf_point.c b/drivers/gpu/nvgpu/clk/clk_vf_point.c index 8333b2b0..b459c012 100644 --- a/drivers/gpu/nvgpu/clk/clk_vf_point.c +++ b/drivers/gpu/nvgpu/clk/clk_vf_point.c @@ -59,7 +59,7 @@ static u32 _clk_vf_points_pmudata_instget(struct gk20a *g, (struct nv_pmu_clk_clk_vf_point_boardobj_grp_set *) pmuboardobjgrp; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); /*check whether pmuboardobjgrp has a valid boardobj in index*/ if (idx >= CTRL_BOARDOBJGRP_E255_MAX_OBJECTS) @@ -67,7 +67,7 @@ static u32 _clk_vf_points_pmudata_instget(struct gk20a *g, *ppboardobjpmudata = (struct nv_pmu_boardobj *) &pgrp_set->objects[idx].data.board_obj; - gk20a_dbg_info(" Done"); + nvgpu_log_info(g, " Done"); return 0; } @@ -94,7 +94,7 @@ u32 clk_vf_point_sw_setup(struct gk20a *g) u32 status; struct boardobjgrp *pboardobjgrp = NULL; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); status = boardobjgrpconstruct_e255(g, &g->clk_pmu.clk_vf_pointobjs.super); if (status) { @@ -132,7 +132,7 @@ u32 clk_vf_point_sw_setup(struct gk20a *g) pboardobjgrp->pmustatusinstget = _clk_vf_points_pmustatus_instget; done: - gk20a_dbg_info(" done status %x", status); + nvgpu_log_info(g, " done status %x", status); return status; } @@ -141,7 +141,7 @@ u32 clk_vf_point_pmu_setup(struct gk20a *g) u32 status; struct boardobjgrp *pboardobjgrp = NULL; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); pboardobjgrp = &g->clk_pmu.clk_vf_pointobjs.super.super; @@ -150,7 +150,7 @@ u32 clk_vf_point_pmu_setup(struct gk20a *g) status = pboardobjgrp->pmuinithandle(g, pboardobjgrp); - gk20a_dbg_info("Done"); + nvgpu_log_info(g, "Done"); return status; } @@ -187,7 +187,7 @@ static u32 _clk_vf_point_pmudatainit_volt(struct gk20a *g, struct clk_vf_point_volt *pclk_vf_point_volt; struct nv_pmu_clk_clk_vf_point_volt_boardobj_set *pset; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); status = _clk_vf_point_pmudatainit_super(g, board_obj_ptr, ppmudata); if (status != 0) @@ -214,7 +214,7 @@ static u32 _clk_vf_point_pmudatainit_freq(struct gk20a *g, struct clk_vf_point_freq *pclk_vf_point_freq; struct nv_pmu_clk_clk_vf_point_freq_boardobj_set *pset; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); status = _clk_vf_point_pmudatainit_super(g, board_obj_ptr, ppmudata); if (status != 0) @@ -297,7 +297,7 @@ struct clk_vf_point *construct_clk_vf_point(struct gk20a *g, void *pargs) struct boardobj *board_obj_ptr = NULL; u32 status; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); switch (BOARDOBJ_GET_TYPE(pargs)) { case CTRL_CLK_CLK_VF_POINT_TYPE_FREQ: status = clk_vf_point_construct_freq(g, &board_obj_ptr, @@ -316,7 +316,7 @@ struct clk_vf_point *construct_clk_vf_point(struct gk20a *g, void *pargs) if (status) return NULL; - gk20a_dbg_info(" Done"); + nvgpu_log_info(g, " Done"); return (struct clk_vf_point *)board_obj_ptr; } @@ -329,7 +329,7 @@ static u32 _clk_vf_point_pmudatainit_super(struct gk20a *g, struct clk_vf_point *pclk_vf_point; struct nv_pmu_clk_clk_vf_point_boardobj_set *pset; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); status = boardobj_pmudatainit_super(g, board_obj_ptr, ppmudata); if (status != 0) @@ -355,7 +355,7 @@ static u32 clk_vf_point_update(struct gk20a *g, struct clk_vf_point *pclk_vf_point; struct nv_pmu_clk_clk_vf_point_boardobj_get_status *pstatus; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); pclk_vf_point = @@ -388,7 +388,7 @@ u32 clk_vf_point_cache(struct gk20a *g) u32 status; u8 index; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); pclk_vf_points = &g->clk_pmu.clk_vf_pointobjs; pboardobjgrp = &pclk_vf_points->super.super; pboardobjgrpmask = &pclk_vf_points->super.mask.super; diff --git a/drivers/gpu/nvgpu/clk/clk_vin.c b/drivers/gpu/nvgpu/clk/clk_vin.c index 74bcd247..66efefef 100644 --- a/drivers/gpu/nvgpu/clk/clk_vin.c +++ b/drivers/gpu/nvgpu/clk/clk_vin.c @@ -323,13 +323,13 @@ static u32 _clk_vin_devgrp_pmudatainit_super(struct gk20a *g, struct avfsvinobjs *pvin_obbj = (struct avfsvinobjs *)pboardobjgrp; u32 status = 0; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); status = boardobjgrp_pmudatainit_e32(g, pboardobjgrp, pboardobjgrppmu); pset->b_vin_is_disable_allowed = pvin_obbj->vin_is_disable_allowed; - gk20a_dbg_info(" Done"); + nvgpu_log_info(g, " Done"); return status; } @@ -342,7 +342,7 @@ static u32 _clk_vin_devgrp_pmudata_instget(struct gk20a *g, (struct nv_pmu_clk_clk_vin_device_boardobj_grp_set *) pmuboardobjgrp; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); /*check whether pmuboardobjgrp has a valid boardobj in index*/ if (((u32)BIT(idx) & @@ -351,7 +351,7 @@ static u32 _clk_vin_devgrp_pmudata_instget(struct gk20a *g, *ppboardobjpmudata = (struct nv_pmu_boardobj *) &pgrp_set->objects[idx].data.board_obj; - gk20a_dbg_info(" Done"); + nvgpu_log_info(g, " Done"); return 0; } @@ -381,7 +381,7 @@ u32 clk_vin_sw_setup(struct gk20a *g) struct vin_device_v20 *pvindev = NULL; struct avfsvinobjs *pvinobjs; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); status = boardobjgrpconstruct_e32(g, &g->clk_pmu.avfs_vinobjs.super); if (status) { @@ -427,7 +427,7 @@ u32 clk_vin_sw_setup(struct gk20a *g) } done: - gk20a_dbg_info(" done status %x", status); + nvgpu_log_info(g, " done status %x", status); return status; } @@ -436,7 +436,7 @@ u32 clk_vin_pmu_setup(struct gk20a *g) u32 status; struct boardobjgrp *pboardobjgrp = NULL; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); pboardobjgrp = &g->clk_pmu.avfs_vinobjs.super.super; @@ -445,7 +445,7 @@ u32 clk_vin_pmu_setup(struct gk20a *g) status = pboardobjgrp->pmuinithandle(g, pboardobjgrp); - gk20a_dbg_info("Done"); + nvgpu_log_info(g, "Done"); return status; } @@ -470,7 +470,7 @@ static u32 devinit_get_vin_device_table(struct gk20a *g, struct vin_device_v20 vin_device_v20; } vin_device_data; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); vin_table_ptr = (u8 *)nvgpu_bios_get_perf_table_ptrs(g, g->bios.clock_token, VIN_TABLE); @@ -557,7 +557,7 @@ static u32 devinit_get_vin_device_table(struct gk20a *g, } done: - gk20a_dbg_info(" done status %x", status); + nvgpu_log_info(g, " done status %x", status); return status; } @@ -645,7 +645,7 @@ static struct vin_device *construct_vin_device(struct gk20a *g, void *pargs) struct boardobj *board_obj_ptr = NULL; u32 status; - gk20a_dbg_info(" %d", BOARDOBJ_GET_TYPE(pargs)); + nvgpu_log_info(g, " %d", BOARDOBJ_GET_TYPE(pargs)); switch (BOARDOBJ_GET_TYPE(pargs)) { case CTRL_CLK_VIN_TYPE_V10: status = vin_device_construct_v10(g, &board_obj_ptr, @@ -664,7 +664,7 @@ static struct vin_device *construct_vin_device(struct gk20a *g, void *pargs) if (status) return NULL; - gk20a_dbg_info(" Done"); + nvgpu_log_info(g, " Done"); return (struct vin_device *)board_obj_ptr; } @@ -679,7 +679,7 @@ static u32 vin_device_init_pmudata_v10(struct gk20a *g, struct vin_device_v20 *pvin_dev_v20; struct nv_pmu_clk_clk_vin_device_v10_boardobj_set *perf_pmu_data; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); status = vin_device_init_pmudata_super(g, board_obj_ptr, ppmudata); if (status != 0) @@ -692,7 +692,7 @@ static u32 vin_device_init_pmudata_v10(struct gk20a *g, perf_pmu_data->data.vin_cal.intercept = pvin_dev_v20->data.vin_cal.cal_v10.intercept; perf_pmu_data->data.vin_cal.slope = pvin_dev_v20->data.vin_cal.cal_v10.slope; - gk20a_dbg_info(" Done"); + nvgpu_log_info(g, " Done"); return status; } @@ -705,7 +705,7 @@ static u32 vin_device_init_pmudata_v20(struct gk20a *g, struct vin_device_v20 *pvin_dev_v20; struct nv_pmu_clk_clk_vin_device_v20_boardobj_set *perf_pmu_data; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); status = vin_device_init_pmudata_super(g, board_obj_ptr, ppmudata); if (status != 0) @@ -718,7 +718,7 @@ static u32 vin_device_init_pmudata_v20(struct gk20a *g, perf_pmu_data->data.vin_cal.cal_v20.offset = pvin_dev_v20->data.vin_cal.cal_v20.offset; perf_pmu_data->data.vin_cal.cal_v20.gain = pvin_dev_v20->data.vin_cal.cal_v20.gain; - gk20a_dbg_info(" Done"); + nvgpu_log_info(g, " Done"); return status; } @@ -731,7 +731,7 @@ static u32 vin_device_init_pmudata_super(struct gk20a *g, struct vin_device *pvin_dev; struct nv_pmu_clk_clk_vin_device_boardobj_set *perf_pmu_data; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); status = boardobj_pmudatainit_super(g, board_obj_ptr, ppmudata); if (status != 0) @@ -745,7 +745,7 @@ static u32 vin_device_init_pmudata_super(struct gk20a *g, perf_pmu_data->volt_domain = pvin_dev->volt_domain; perf_pmu_data->flls_shared_mask = pvin_dev->flls_shared_mask; - gk20a_dbg_info(" Done"); + nvgpu_log_info(g, " Done"); return status; } diff --git a/drivers/gpu/nvgpu/common/as.c b/drivers/gpu/nvgpu/common/as.c index 5b76cf0e..77f088b7 100644 --- a/drivers/gpu/nvgpu/common/as.c +++ b/drivers/gpu/nvgpu/common/as.c @@ -1,7 +1,7 @@ /* * GK20A Address Spaces * - * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -34,13 +34,17 @@ /* dumb allocator... */ static int generate_as_share_id(struct gk20a_as *as) { - gk20a_dbg_fn(""); + struct gk20a *g = gk20a_from_as(as); + + nvgpu_log_fn(g, " "); return ++as->last_share_id; } /* still dumb */ static void release_as_share_id(struct gk20a_as *as, int id) { - gk20a_dbg_fn(""); + struct gk20a *g = gk20a_from_as(as); + + nvgpu_log_fn(g, " "); return; } @@ -56,7 +60,7 @@ static int gk20a_vm_alloc_share(struct gk20a_as_share *as_share, const bool userspace_managed = (flags & NVGPU_GPU_IOCTL_ALLOC_AS_FLAGS_USERSPACE_MANAGED) != 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (big_page_size == 0) { big_page_size = g->ops.mm.get_default_big_page_size(); @@ -92,7 +96,7 @@ int gk20a_as_alloc_share(struct gk20a *g, struct gk20a_as_share *as_share; int err = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); g = gk20a_get(g); if (!g) return -ENODEV; @@ -126,8 +130,9 @@ failed: int gk20a_vm_release_share(struct gk20a_as_share *as_share) { struct vm_gk20a *vm = as_share->vm; + struct gk20a *g = gk20a_from_vm(vm); - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); vm->as_share = NULL; as_share->vm = NULL; @@ -146,7 +151,7 @@ int gk20a_as_release_share(struct gk20a_as_share *as_share) struct gk20a *g = as_share->vm->mm->g; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); err = gk20a_busy(g); diff --git a/drivers/gpu/nvgpu/common/linux/cde.c b/drivers/gpu/nvgpu/common/linux/cde.c index 7c92246c..511d564f 100644 --- a/drivers/gpu/nvgpu/common/linux/cde.c +++ b/drivers/gpu/nvgpu/common/linux/cde.c @@ -464,7 +464,7 @@ static int gk20a_cde_patch_params(struct gk20a_cde_ctx *cde_ctx) new_data = cde_ctx->user_param_values[user_id]; } - gk20a_dbg(gpu_dbg_cde, "cde: patch: idx_in_file=%d param_id=%d target_buf=%u target_byte_offset=%lld data_value=0x%llx data_offset/data_diff=%lld data_type=%d data_shift=%d data_mask=0x%llx", + nvgpu_log(g, gpu_dbg_cde, "cde: patch: idx_in_file=%d param_id=%d target_buf=%u target_byte_offset=%lld data_value=0x%llx data_offset/data_diff=%lld data_type=%d data_shift=%d data_mask=0x%llx", i, param->id, param->target_buf, param->target_byte_offset, new_data, param->data_offset, param->type, param->shift, @@ -790,8 +790,9 @@ __acquires(&cde_app->mutex) __releases(&cde_app->mutex) { struct gk20a_cde_app *cde_app = &cde_ctx->l->cde_app; + struct gk20a *g = &cde_ctx->l->g; - gk20a_dbg(gpu_dbg_cde_ctx, "releasing use on %p", cde_ctx); + nvgpu_log(g, gpu_dbg_cde_ctx, "releasing use on %p", cde_ctx); trace_gk20a_cde_release(cde_ctx); nvgpu_mutex_acquire(&cde_app->mutex); @@ -801,7 +802,7 @@ __releases(&cde_app->mutex) nvgpu_list_move(&cde_ctx->list, &cde_app->free_contexts); cde_app->ctx_usecount--; } else { - gk20a_dbg_info("double release cde context %p", cde_ctx); + nvgpu_log_info(g, "double release cde context %p", cde_ctx); } nvgpu_mutex_release(&cde_app->mutex); @@ -823,7 +824,7 @@ __releases(&cde_app->mutex) if (cde_ctx->in_use || !cde_app->initialised) return; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_cde_ctx, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_cde_ctx, "cde: attempting to delete temporary %p", cde_ctx); err = gk20a_busy(g); @@ -837,7 +838,7 @@ __releases(&cde_app->mutex) nvgpu_mutex_acquire(&cde_app->mutex); if (cde_ctx->in_use || !cde_app->initialised) { - gk20a_dbg(gpu_dbg_cde_ctx, + nvgpu_log(g, gpu_dbg_cde_ctx, "cde: context use raced, not deleting %p", cde_ctx); goto out; @@ -847,7 +848,7 @@ __releases(&cde_app->mutex) "double pending %p", cde_ctx); gk20a_cde_remove_ctx(cde_ctx); - gk20a_dbg(gpu_dbg_fn | gpu_dbg_cde_ctx, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_cde_ctx, "cde: destroyed %p count=%d use=%d max=%d", cde_ctx, cde_app->ctx_count, cde_app->ctx_usecount, cde_app->ctx_count_top); @@ -874,7 +875,7 @@ __must_hold(&cde_app->mutex) if (!nvgpu_list_empty(&cde_app->free_contexts)) { cde_ctx = nvgpu_list_first_entry(&cde_app->free_contexts, gk20a_cde_ctx, list); - gk20a_dbg(gpu_dbg_fn | gpu_dbg_cde_ctx, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_cde_ctx, "cde: got free %p count=%d use=%d max=%d", cde_ctx, cde_app->ctx_count, cde_app->ctx_usecount, @@ -893,7 +894,7 @@ __must_hold(&cde_app->mutex) /* no free contexts, get a temporary one */ - gk20a_dbg(gpu_dbg_fn | gpu_dbg_cde_ctx, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_cde_ctx, "cde: no free contexts, count=%d", cde_app->ctx_count); @@ -967,7 +968,7 @@ static struct gk20a_cde_ctx *gk20a_cde_allocate_context(struct nvgpu_os_linux *l INIT_DELAYED_WORK(&cde_ctx->ctx_deleter_work, gk20a_cde_ctx_deleter_fn); - gk20a_dbg(gpu_dbg_fn | gpu_dbg_cde_ctx, "cde: allocated %p", cde_ctx); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_cde_ctx, "cde: allocated %p", cde_ctx); trace_gk20a_cde_allocate_context(cde_ctx); return cde_ctx; } @@ -1005,7 +1006,7 @@ __releases(&l->cde_app->mutex) u32 submit_op; struct dma_buf_attachment *attachment; - gk20a_dbg(gpu_dbg_cde, "compbits_byte_offset=%llu scatterbuffer_byte_offset=%llu", + nvgpu_log(g, gpu_dbg_cde, "compbits_byte_offset=%llu scatterbuffer_byte_offset=%llu", compbits_byte_offset, scatterbuffer_byte_offset); /* scatter buffer must be after compbits buffer */ @@ -1055,11 +1056,11 @@ __releases(&l->cde_app->mutex) compbits_byte_offset; } - gk20a_dbg(gpu_dbg_cde, "map_offset=%llu map_size=%llu", + nvgpu_log(g, gpu_dbg_cde, "map_offset=%llu map_size=%llu", map_offset, map_size); - gk20a_dbg(gpu_dbg_cde, "mapped_compbits_offset=%llu compbits_size=%llu", + nvgpu_log(g, gpu_dbg_cde, "mapped_compbits_offset=%llu compbits_size=%llu", mapped_compbits_offset, compbits_size); - gk20a_dbg(gpu_dbg_cde, "mapped_scatterbuffer_offset=%llu scatterbuffer_size=%llu", + nvgpu_log(g, gpu_dbg_cde, "mapped_scatterbuffer_offset=%llu scatterbuffer_size=%llu", mapped_scatterbuffer_offset, scatterbuffer_size); @@ -1096,7 +1097,7 @@ __releases(&l->cde_app->mutex) scatter_buffer = surface + scatterbuffer_byte_offset; - gk20a_dbg(gpu_dbg_cde, "surface=0x%p scatterBuffer=0x%p", + nvgpu_log(g, gpu_dbg_cde, "surface=0x%p scatterBuffer=0x%p", surface, scatter_buffer); sgt = gk20a_mm_pin(dev_from_gk20a(g), compbits_scatter_buf, &attachment); @@ -1163,11 +1164,11 @@ __releases(&l->cde_app->mutex) goto exit_unmap_surface; } - gk20a_dbg(gpu_dbg_cde, "cde: buffer=cbc, size=%zu, gpuva=%llx\n", + nvgpu_log(g, gpu_dbg_cde, "cde: buffer=cbc, size=%zu, gpuva=%llx\n", g->gr.compbit_store.mem.size, cde_ctx->backing_store_vaddr); - gk20a_dbg(gpu_dbg_cde, "cde: buffer=compbits, size=%llu, gpuva=%llx\n", + nvgpu_log(g, gpu_dbg_cde, "cde: buffer=compbits, size=%llu, gpuva=%llx\n", cde_ctx->compbit_size, cde_ctx->compbit_vaddr); - gk20a_dbg(gpu_dbg_cde, "cde: buffer=scatterbuffer, size=%llu, gpuva=%llx\n", + nvgpu_log(g, gpu_dbg_cde, "cde: buffer=scatterbuffer, size=%llu, gpuva=%llx\n", cde_ctx->scatterbuffer_size, cde_ctx->scatterbuffer_vaddr); /* take always the postfence as it is needed for protecting the @@ -1234,9 +1235,9 @@ __releases(&cde_app->mutex) return; trace_gk20a_cde_finished_ctx_cb(cde_ctx); - gk20a_dbg(gpu_dbg_fn | gpu_dbg_cde_ctx, "cde: finished %p", cde_ctx); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_cde_ctx, "cde: finished %p", cde_ctx); if (!cde_ctx->in_use) - gk20a_dbg_info("double finish cde context %p on channel %p", + nvgpu_log_info(g, "double finish cde context %p on channel %p", cde_ctx, ch); if (ch->has_timedout) { @@ -1406,12 +1407,13 @@ __acquires(&cde_app->mutex) __releases(&cde_app->mutex) { struct gk20a_cde_app *cde_app = &l->cde_app; + struct gk20a *g = &l->g; int err; if (cde_app->initialised) return 0; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_cde_ctx, "cde: init"); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_cde_ctx, "cde: init"); err = nvgpu_mutex_init(&cde_app->mutex); if (err) @@ -1430,7 +1432,7 @@ __releases(&cde_app->mutex) cde_app->initialised = true; nvgpu_mutex_release(&cde_app->mutex); - gk20a_dbg(gpu_dbg_cde_ctx, "cde: init finished: %d", err); + nvgpu_log(g, gpu_dbg_cde_ctx, "cde: init finished: %d", err); if (err) nvgpu_mutex_destroy(&cde_app->mutex); @@ -1528,14 +1530,14 @@ static int gk20a_buffer_convert_gpu_to_cde_v1( nvgpu_warn(g, "cde: surface is exceptionally large (xtiles=%d, ytiles=%d)", xtiles, ytiles); - gk20a_dbg(gpu_dbg_cde, "w=%d, h=%d, bh_log2=%d, compbits_hoffset=0x%llx, compbits_voffset=0x%llx, scatterbuffer_offset=0x%llx", + nvgpu_log(g, gpu_dbg_cde, "w=%d, h=%d, bh_log2=%d, compbits_hoffset=0x%llx, compbits_voffset=0x%llx, scatterbuffer_offset=0x%llx", width, height, block_height_log2, compbits_hoffset, compbits_voffset, scatterbuffer_offset); - gk20a_dbg(gpu_dbg_cde, "resolution (%d, %d) tiles (%d, %d)", + nvgpu_log(g, gpu_dbg_cde, "resolution (%d, %d) tiles (%d, %d)", width, height, xtiles, ytiles); - gk20a_dbg(gpu_dbg_cde, "group (%d, %d) gridH (%d, %d) gridV (%d, %d)", + nvgpu_log(g, gpu_dbg_cde, "group (%d, %d) gridH (%d, %d) gridV (%d, %d)", wgx, wgy, gridw_h, gridh_h, gridw_v, gridh_v); - gk20a_dbg(gpu_dbg_cde, "hprog=%d, offset=0x%x, regs=%d, vprog=%d, offset=0x%x, regs=%d", + nvgpu_log(g, gpu_dbg_cde, "hprog=%d, offset=0x%x, regs=%d, vprog=%d, offset=0x%x, regs=%d", hprog, l->cde_app.arrays[ARRAY_PROGRAM_OFFSET][hprog], l->cde_app.arrays[ARRAY_REGISTER_COUNT][hprog], @@ -1634,7 +1636,7 @@ static int gk20a_buffer_convert_gpu_to_cde( if (!l->cde_app.initialised) return -ENOSYS; - gk20a_dbg(gpu_dbg_cde, "firmware version = %d\n", + nvgpu_log(g, gpu_dbg_cde, "firmware version = %d\n", l->cde_app.firmware_version); if (l->cde_app.firmware_version == 1) { diff --git a/drivers/gpu/nvgpu/common/linux/cde_gp10b.c b/drivers/gpu/nvgpu/common/linux/cde_gp10b.c index 483a3ee7..5c0e79a7 100644 --- a/drivers/gpu/nvgpu/common/linux/cde_gp10b.c +++ b/drivers/gpu/nvgpu/common/linux/cde_gp10b.c @@ -1,7 +1,7 @@ /* * GP10B CDE * - * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -117,7 +117,7 @@ int gp10b_populate_scatter_buffer(struct gk20a *g, u64 surf_pa = sg_phys(sg); unsigned int n = (int)(sg->length >> page_size_log2); - gk20a_dbg(gpu_dbg_cde, "surfPA=0x%llx + %d pages", surf_pa, n); + nvgpu_log(g, gpu_dbg_cde, "surfPA=0x%llx + %d pages", surf_pa, n); for (j=0; j < n && pages_left > 0; j++, surf_pa += page_size) { u32 addr = (((u32)(surf_pa>>7)) & getSliceMaskGP10B) >> page_size_shift; @@ -143,9 +143,9 @@ int gp10b_populate_scatter_buffer(struct gk20a *g, scatter_buffer[page >> 3] = d; if (nvgpu_log_mask_enabled(g, gpu_dbg_cde)) { - gk20a_dbg(gpu_dbg_cde, "scatterBuffer content:"); + nvgpu_log(g, gpu_dbg_cde, "scatterBuffer content:"); for (i = 0; i < page >> 3; i++) { - gk20a_dbg(gpu_dbg_cde, " %x", scatter_buffer[i]); + nvgpu_log(g, gpu_dbg_cde, " %x", scatter_buffer[i]); } } diff --git a/drivers/gpu/nvgpu/common/linux/channel.c b/drivers/gpu/nvgpu/common/linux/channel.c index 8f2adc3a..d767374b 100644 --- a/drivers/gpu/nvgpu/common/linux/channel.c +++ b/drivers/gpu/nvgpu/common/linux/channel.c @@ -834,7 +834,7 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c, /* update debug settings */ nvgpu_ltc_sync_enabled(g); - gk20a_dbg_info("channel %d", c->chid); + nvgpu_log_info(g, "channel %d", c->chid); /* * Job tracking is necessary for any of the following conditions: @@ -943,7 +943,7 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c, fence ? fence->id : 0, fence ? fence->value : 0); - gk20a_dbg_info("pre-submit put %d, get %d, size %d", + nvgpu_log_info(g, "pre-submit put %d, get %d, size %d", c->gpfifo.put, c->gpfifo.get, c->gpfifo.entry_num); /* @@ -1023,18 +1023,18 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c, post_fence ? post_fence->syncpt_id : 0, post_fence ? post_fence->syncpt_value : 0); - gk20a_dbg_info("post-submit put %d, get %d, size %d", + nvgpu_log_info(g, "post-submit put %d, get %d, size %d", c->gpfifo.put, c->gpfifo.get, c->gpfifo.entry_num); if (profile) profile->timestamp[PROFILE_END] = sched_clock(); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return err; clean_up_job: channel_gk20a_free_job(c, job); clean_up: - gk20a_dbg_fn("fail"); + nvgpu_log_fn(g, "fail"); gk20a_fence_put(post_fence); if (c->deterministic) nvgpu_rwsem_up_read(&g->deterministic_busy); diff --git a/drivers/gpu/nvgpu/common/linux/ctxsw_trace.c b/drivers/gpu/nvgpu/common/linux/ctxsw_trace.c index 8268bf60..2f0c3e89 100644 --- a/drivers/gpu/nvgpu/common/linux/ctxsw_trace.c +++ b/drivers/gpu/nvgpu/common/linux/ctxsw_trace.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -79,13 +79,14 @@ ssize_t gk20a_ctxsw_dev_read(struct file *filp, char __user *buf, size_t size, loff_t *off) { struct gk20a_ctxsw_dev *dev = filp->private_data; + struct gk20a *g = dev->g; struct nvgpu_ctxsw_ring_header *hdr = dev->hdr; struct nvgpu_ctxsw_trace_entry __user *entry = (struct nvgpu_ctxsw_trace_entry *) buf; size_t copied = 0; int err; - gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, + nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, "filp=%p buf=%p size=%zu", filp, buf, size); nvgpu_mutex_acquire(&dev->write_lock); @@ -119,7 +120,7 @@ ssize_t gk20a_ctxsw_dev_read(struct file *filp, char __user *buf, size_t size, size -= sizeof(*entry); } - gk20a_dbg(gpu_dbg_ctxsw, "copied=%zu read_idx=%d", copied, + nvgpu_log(g, gpu_dbg_ctxsw, "copied=%zu read_idx=%d", copied, hdr->read_idx); *off = hdr->read_idx; @@ -130,7 +131,9 @@ ssize_t gk20a_ctxsw_dev_read(struct file *filp, char __user *buf, size_t size, static int gk20a_ctxsw_dev_ioctl_trace_enable(struct gk20a_ctxsw_dev *dev) { - gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, "trace enabled"); + struct gk20a *g = dev->g; + + nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, "trace enabled"); nvgpu_mutex_acquire(&dev->write_lock); dev->write_enabled = true; nvgpu_mutex_release(&dev->write_lock); @@ -140,7 +143,9 @@ static int gk20a_ctxsw_dev_ioctl_trace_enable(struct gk20a_ctxsw_dev *dev) static int gk20a_ctxsw_dev_ioctl_trace_disable(struct gk20a_ctxsw_dev *dev) { - gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, "trace disabled"); + struct gk20a *g = dev->g; + + nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, "trace disabled"); dev->g->ops.fecs_trace.disable(dev->g); nvgpu_mutex_acquire(&dev->write_lock); dev->write_enabled = false; @@ -168,7 +173,7 @@ static int gk20a_ctxsw_dev_alloc_buffer(struct gk20a_ctxsw_dev *dev, dev->size = size; dev->num_ents = dev->hdr->num_ents; - gk20a_dbg(gpu_dbg_ctxsw, "size=%zu hdr=%p ents=%p num_ents=%d", + nvgpu_log(g, gpu_dbg_ctxsw, "size=%zu hdr=%p ents=%p num_ents=%d", dev->size, dev->hdr, dev->ents, dev->hdr->num_ents); return 0; } @@ -208,10 +213,11 @@ int gk20a_ctxsw_dev_ring_free(struct gk20a *g) static int gk20a_ctxsw_dev_ioctl_ring_setup(struct gk20a_ctxsw_dev *dev, struct nvgpu_ctxsw_ring_setup_args *args) { + struct gk20a *g = dev->g; size_t size = args->size; int ret; - gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, "size=%zu", size); + nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, "size=%zu", size); if (size > GK20A_CTXSW_TRACE_MAX_VM_RING_SIZE) return -EINVAL; @@ -252,7 +258,7 @@ static int gk20a_ctxsw_dev_ioctl_poll(struct gk20a_ctxsw_dev *dev) struct gk20a *g = dev->g; int err; - gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, ""); + nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, " "); err = gk20a_busy(g); if (err) @@ -286,7 +292,7 @@ int gk20a_ctxsw_dev_open(struct inode *inode, struct file *filp) if (!g) return -ENODEV; - gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, "g=%p", g); + nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, "g=%p", g); if (!capable(CAP_SYS_ADMIN)) { err = -EPERM; @@ -322,13 +328,13 @@ int gk20a_ctxsw_dev_open(struct inode *inode, struct file *filp) size = sizeof(struct nvgpu_ctxsw_ring_header) + n * sizeof(struct nvgpu_ctxsw_trace_entry); - gk20a_dbg(gpu_dbg_ctxsw, "size=%zu entries=%d ent_size=%zu", + nvgpu_log(g, gpu_dbg_ctxsw, "size=%zu entries=%d ent_size=%zu", size, n, sizeof(struct nvgpu_ctxsw_trace_entry)); err = gk20a_ctxsw_dev_alloc_buffer(dev, size); if (!err) { filp->private_data = dev; - gk20a_dbg(gpu_dbg_ctxsw, "filp=%p dev=%p size=%zu", + nvgpu_log(g, gpu_dbg_ctxsw, "filp=%p dev=%p size=%zu", filp, dev, size); } @@ -348,7 +354,7 @@ int gk20a_ctxsw_dev_release(struct inode *inode, struct file *filp) struct gk20a_ctxsw_dev *dev = filp->private_data; struct gk20a *g = dev->g; - gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, "dev: %p", dev); + nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, "dev: %p", dev); g->ops.fecs_trace.disable(g); @@ -372,7 +378,7 @@ long gk20a_ctxsw_dev_ioctl(struct file *filp, unsigned int cmd, u8 buf[NVGPU_CTXSW_IOCTL_MAX_ARG_SIZE]; int err = 0; - gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, "nr=%d", _IOC_NR(cmd)); + nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, "nr=%d", _IOC_NR(cmd)); if ((_IOC_TYPE(cmd) != NVGPU_CTXSW_IOCTL_MAGIC) || (_IOC_NR(cmd) == 0) || @@ -423,10 +429,11 @@ long gk20a_ctxsw_dev_ioctl(struct file *filp, unsigned int cmd, unsigned int gk20a_ctxsw_dev_poll(struct file *filp, poll_table *wait) { struct gk20a_ctxsw_dev *dev = filp->private_data; + struct gk20a *g = dev->g; struct nvgpu_ctxsw_ring_header *hdr = dev->hdr; unsigned int mask = 0; - gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, ""); + nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, " "); nvgpu_mutex_acquire(&dev->write_lock); poll_wait(filp, &dev->readout_wq.wq, wait); @@ -440,18 +447,20 @@ unsigned int gk20a_ctxsw_dev_poll(struct file *filp, poll_table *wait) static void gk20a_ctxsw_dev_vma_open(struct vm_area_struct *vma) { struct gk20a_ctxsw_dev *dev = vma->vm_private_data; + struct gk20a *g = dev->g; nvgpu_atomic_inc(&dev->vma_ref); - gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, "vma_ref=%d", + nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, "vma_ref=%d", nvgpu_atomic_read(&dev->vma_ref)); } static void gk20a_ctxsw_dev_vma_close(struct vm_area_struct *vma) { struct gk20a_ctxsw_dev *dev = vma->vm_private_data; + struct gk20a *g = dev->g; nvgpu_atomic_dec(&dev->vma_ref); - gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, "vma_ref=%d", + nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, "vma_ref=%d", nvgpu_atomic_read(&dev->vma_ref)); } @@ -469,9 +478,10 @@ int gk20a_ctxsw_dev_mmap_buffer(struct gk20a *g, int gk20a_ctxsw_dev_mmap(struct file *filp, struct vm_area_struct *vma) { struct gk20a_ctxsw_dev *dev = filp->private_data; + struct gk20a *g = dev->g; int ret; - gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, "vm_start=%lx vm_end=%lx", + nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, "vm_start=%lx vm_end=%lx", vma->vm_start, vma->vm_end); ret = dev->g->ops.fecs_trace.mmap_user_buffer(dev->g, vma); @@ -513,7 +523,7 @@ int gk20a_ctxsw_trace_init(struct gk20a *g) struct gk20a_ctxsw_trace *trace = g->ctxsw_trace; int err; - gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, "g=%p trace=%p", g, trace); + nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, "g=%p trace=%p", g, trace); /* if tracing is not supported, skip this */ if (!g->ops.fecs_trace.init) @@ -590,7 +600,7 @@ int gk20a_ctxsw_trace_write(struct gk20a *g, dev = &g->ctxsw_trace->devs[entry->vmid]; hdr = dev->hdr; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_ctxsw, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_ctxsw, "dev=%p hdr=%p", dev, hdr); nvgpu_mutex_acquire(&dev->write_lock); @@ -630,7 +640,7 @@ int gk20a_ctxsw_trace_write(struct gk20a *g, goto filter; } - gk20a_dbg(gpu_dbg_ctxsw, + nvgpu_log(g, gpu_dbg_ctxsw, "seqno=%d context_id=%08x pid=%lld tag=%x timestamp=%llx", entry->seqno, entry->context_id, entry->pid, entry->tag, entry->timestamp); @@ -644,7 +654,7 @@ int gk20a_ctxsw_trace_write(struct gk20a *g, if (unlikely(write_idx >= hdr->num_ents)) write_idx = 0; hdr->write_idx = write_idx; - gk20a_dbg(gpu_dbg_ctxsw, "added: read=%d write=%d len=%d", + nvgpu_log(g, gpu_dbg_ctxsw, "added: read=%d write=%d len=%d", hdr->read_idx, hdr->write_idx, ring_len(hdr)); nvgpu_mutex_release(&dev->write_lock); @@ -657,7 +667,7 @@ drop: hdr->drop_count++; filter: - gk20a_dbg(gpu_dbg_ctxsw, + nvgpu_log(g, gpu_dbg_ctxsw, "dropping seqno=%d context_id=%08x pid=%lld " "tag=%x time=%llx (%s)", entry->seqno, entry->context_id, entry->pid, diff --git a/drivers/gpu/nvgpu/common/linux/debug.c b/drivers/gpu/nvgpu/common/linux/debug.c index a458a3d4..e8c0417a 100644 --- a/drivers/gpu/nvgpu/common/linux/debug.c +++ b/drivers/gpu/nvgpu/common/linux/debug.c @@ -307,10 +307,6 @@ void gk20a_debug_init(struct gk20a *g, const char *debugfs_symlink) debugfs_create_u32("disable_syncpoints", S_IRUGO, l->debugfs, &g->disable_syncpoints); - /* Legacy debugging API. */ - debugfs_create_u64("dbg_mask", S_IRUGO|S_IWUSR, - l->debugfs, &nvgpu_dbg_mask); - /* New debug logging API. */ debugfs_create_u64("log_mask", S_IRUGO|S_IWUSR, l->debugfs, &g->log_mask); diff --git a/drivers/gpu/nvgpu/common/linux/debug_fifo.c b/drivers/gpu/nvgpu/common/linux/debug_fifo.c index aeab0c92..b2a87e0d 100644 --- a/drivers/gpu/nvgpu/common/linux/debug_fifo.c +++ b/drivers/gpu/nvgpu/common/linux/debug_fifo.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2017 NVIDIA Corporation. All rights reserved. + * Copyright (C) 2017-2018 NVIDIA Corporation. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -108,6 +108,7 @@ static const struct seq_operations gk20a_fifo_sched_debugfs_seq_ops = { static int gk20a_fifo_sched_debugfs_open(struct inode *inode, struct file *file) { + struct gk20a *g = inode->i_private; int err; if (!capable(CAP_SYS_ADMIN)) @@ -117,7 +118,7 @@ static int gk20a_fifo_sched_debugfs_open(struct inode *inode, if (err) return err; - gk20a_dbg(gpu_dbg_info, "i_private=%p", inode->i_private); + nvgpu_log(g, gpu_dbg_info, "i_private=%p", inode->i_private); ((struct seq_file *)file->private_data)->private = inode->i_private; return 0; @@ -301,7 +302,7 @@ void gk20a_fifo_debugfs_init(struct gk20a *g) if (IS_ERR_OR_NULL(fifo_root)) return; - gk20a_dbg(gpu_dbg_info, "g=%p", g); + nvgpu_log(g, gpu_dbg_info, "g=%p", g); debugfs_create_file("sched", 0600, fifo_root, g, &gk20a_fifo_sched_debugfs_fops); diff --git a/drivers/gpu/nvgpu/common/linux/driver_common.c b/drivers/gpu/nvgpu/common/linux/driver_common.c index 53789423..769f7e03 100644 --- a/drivers/gpu/nvgpu/common/linux/driver_common.c +++ b/drivers/gpu/nvgpu/common/linux/driver_common.c @@ -87,7 +87,7 @@ static void nvgpu_init_gr_vars(struct gk20a *g) { gk20a_init_gr(g); - gk20a_dbg_info("total ram pages : %lu", totalram_pages); + nvgpu_log_info(g, "total ram pages : %lu", totalram_pages); g->gr.max_comptag_mem = totalram_pages >> (10 - (PAGE_SHIFT - 10)); } diff --git a/drivers/gpu/nvgpu/common/linux/intr.c b/drivers/gpu/nvgpu/common/linux/intr.c index 05dd3f2a..7ffc7e87 100644 --- a/drivers/gpu/nvgpu/common/linux/intr.c +++ b/drivers/gpu/nvgpu/common/linux/intr.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -50,7 +50,7 @@ irqreturn_t nvgpu_intr_thread_stall(struct gk20a *g) struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g); int hw_irq_count; - gk20a_dbg(gpu_dbg_intr, "interrupt thread launched"); + nvgpu_log(g, gpu_dbg_intr, "interrupt thread launched"); trace_mc_gk20a_intr_thread_stall(g->name); diff --git a/drivers/gpu/nvgpu/common/linux/io.c b/drivers/gpu/nvgpu/common/linux/io.c index cde90ddd..c06512a5 100644 --- a/drivers/gpu/nvgpu/common/linux/io.c +++ b/drivers/gpu/nvgpu/common/linux/io.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -23,11 +23,11 @@ void nvgpu_writel(struct gk20a *g, u32 r, u32 v) if (unlikely(!l->regs)) { __gk20a_warn_on_no_regs(); - gk20a_dbg(gpu_dbg_reg, "r=0x%x v=0x%x (failed)", r, v); + nvgpu_log(g, gpu_dbg_reg, "r=0x%x v=0x%x (failed)", r, v); } else { writel_relaxed(v, l->regs + r); nvgpu_wmb(); - gk20a_dbg(gpu_dbg_reg, "r=0x%x v=0x%x", r, v); + nvgpu_log(g, gpu_dbg_reg, "r=0x%x v=0x%x", r, v); } } @@ -48,10 +48,10 @@ u32 __nvgpu_readl(struct gk20a *g, u32 r) if (unlikely(!l->regs)) { __gk20a_warn_on_no_regs(); - gk20a_dbg(gpu_dbg_reg, "r=0x%x v=0x%x (failed)", r, v); + nvgpu_log(g, gpu_dbg_reg, "r=0x%x v=0x%x (failed)", r, v); } else { v = readl(l->regs + r); - gk20a_dbg(gpu_dbg_reg, "r=0x%x v=0x%x", r, v); + nvgpu_log(g, gpu_dbg_reg, "r=0x%x v=0x%x", r, v); } return v; @@ -63,13 +63,13 @@ void nvgpu_writel_check(struct gk20a *g, u32 r, u32 v) if (unlikely(!l->regs)) { __gk20a_warn_on_no_regs(); - gk20a_dbg(gpu_dbg_reg, "r=0x%x v=0x%x (failed)", r, v); + nvgpu_log(g, gpu_dbg_reg, "r=0x%x v=0x%x (failed)", r, v); } else { nvgpu_wmb(); do { writel_relaxed(v, l->regs + r); } while (readl(l->regs + r) != v); - gk20a_dbg(gpu_dbg_reg, "r=0x%x v=0x%x", r, v); + nvgpu_log(g, gpu_dbg_reg, "r=0x%x v=0x%x", r, v); } } @@ -79,11 +79,11 @@ void nvgpu_bar1_writel(struct gk20a *g, u32 b, u32 v) if (unlikely(!l->bar1)) { __gk20a_warn_on_no_regs(); - gk20a_dbg(gpu_dbg_reg, "b=0x%x v=0x%x (failed)", b, v); + nvgpu_log(g, gpu_dbg_reg, "b=0x%x v=0x%x (failed)", b, v); } else { nvgpu_wmb(); writel_relaxed(v, l->bar1 + b); - gk20a_dbg(gpu_dbg_reg, "b=0x%x v=0x%x", b, v); + nvgpu_log(g, gpu_dbg_reg, "b=0x%x v=0x%x", b, v); } } @@ -94,10 +94,10 @@ u32 nvgpu_bar1_readl(struct gk20a *g, u32 b) if (unlikely(!l->bar1)) { __gk20a_warn_on_no_regs(); - gk20a_dbg(gpu_dbg_reg, "b=0x%x v=0x%x (failed)", b, v); + nvgpu_log(g, gpu_dbg_reg, "b=0x%x v=0x%x (failed)", b, v); } else { v = readl(l->bar1 + b); - gk20a_dbg(gpu_dbg_reg, "b=0x%x v=0x%x", b, v); + nvgpu_log(g, gpu_dbg_reg, "b=0x%x v=0x%x", b, v); } return v; diff --git a/drivers/gpu/nvgpu/common/linux/io_usermode.c b/drivers/gpu/nvgpu/common/linux/io_usermode.c index 888be318..a7b728dd 100644 --- a/drivers/gpu/nvgpu/common/linux/io_usermode.c +++ b/drivers/gpu/nvgpu/common/linux/io_usermode.c @@ -25,5 +25,5 @@ void nvgpu_usermode_writel(struct gk20a *g, u32 r, u32 v) void __iomem *reg = l->usermode_regs + (r - usermode_cfg0_r()); writel_relaxed(v, reg); - gk20a_dbg(gpu_dbg_reg, "usermode r=0x%x v=0x%x", r, v); + nvgpu_log(g, gpu_dbg_reg, "usermode r=0x%x v=0x%x", r, v); } diff --git a/drivers/gpu/nvgpu/common/linux/ioctl.c b/drivers/gpu/nvgpu/common/linux/ioctl.c index 04974786..359e5103 100644 --- a/drivers/gpu/nvgpu/common/linux/ioctl.c +++ b/drivers/gpu/nvgpu/common/linux/ioctl.c @@ -1,7 +1,7 @@ /* * NVGPU IOCTLs * - * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -139,8 +139,9 @@ static int gk20a_create_device( { struct device *subdev; int err; + struct gk20a *g = gk20a_from_dev(dev); - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); cdev_init(cdev, ops); cdev->owner = THIS_MODULE; diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_as.c b/drivers/gpu/nvgpu/common/linux/ioctl_as.c index e09e099b..41bbdfcb 100644 --- a/drivers/gpu/nvgpu/common/linux/ioctl_as.c +++ b/drivers/gpu/nvgpu/common/linux/ioctl_as.c @@ -50,8 +50,9 @@ static int gk20a_as_ioctl_bind_channel( { int err = 0; struct channel_gk20a *ch; + struct gk20a *g = gk20a_from_vm(as_share->vm); - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); ch = gk20a_get_channel_from_file(args->channel_fd); if (!ch) @@ -76,7 +77,7 @@ static int gk20a_as_ioctl_alloc_space( { struct gk20a *g = gk20a_from_vm(as_share->vm); - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); return nvgpu_vm_area_alloc(as_share->vm, args->pages, args->page_size, &args->o_a.offset, gk20a_as_translate_linux_flags(g, @@ -87,7 +88,9 @@ static int gk20a_as_ioctl_free_space( struct gk20a_as_share *as_share, struct nvgpu_as_free_space_args *args) { - gk20a_dbg_fn(""); + struct gk20a *g = gk20a_from_vm(as_share->vm); + + nvgpu_log_fn(g, " "); return nvgpu_vm_area_free(as_share->vm, args->offset); } @@ -95,7 +98,9 @@ static int gk20a_as_ioctl_map_buffer_ex( struct gk20a_as_share *as_share, struct nvgpu_as_map_buffer_ex_args *args) { - gk20a_dbg_fn(""); + struct gk20a *g = gk20a_from_vm(as_share->vm); + + nvgpu_log_fn(g, " "); /* unsupported, direct kind control must be used */ if (!(args->flags & NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL)) { @@ -117,7 +122,9 @@ static int gk20a_as_ioctl_unmap_buffer( struct gk20a_as_share *as_share, struct nvgpu_as_unmap_buffer_args *args) { - gk20a_dbg_fn(""); + struct gk20a *g = gk20a_from_vm(as_share->vm); + + nvgpu_log_fn(g, " "); nvgpu_vm_unmap(as_share->vm, args->offset, NULL); @@ -128,6 +135,7 @@ static int gk20a_as_ioctl_map_buffer_batch( struct gk20a_as_share *as_share, struct nvgpu_as_map_buffer_batch_args *args) { + struct gk20a *g = gk20a_from_vm(as_share->vm); u32 i; int err = 0; @@ -140,7 +148,7 @@ static int gk20a_as_ioctl_map_buffer_batch( struct vm_gk20a_mapping_batch batch; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (args->num_unmaps > NVGPU_IOCTL_AS_MAP_BUFFER_BATCH_LIMIT || args->num_maps > NVGPU_IOCTL_AS_MAP_BUFFER_BATCH_LIMIT) @@ -220,9 +228,10 @@ static int gk20a_as_ioctl_get_va_regions( unsigned int write_entries; struct nvgpu_as_va_region __user *user_region_ptr; struct vm_gk20a *vm = as_share->vm; + struct gk20a *g = gk20a_from_vm(vm); unsigned int page_sizes = gmmu_page_size_kernel; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (!vm->big_pages) page_sizes--; @@ -293,14 +302,14 @@ int gk20a_as_dev_open(struct inode *inode, struct file *filp) struct gk20a *g; int err; - gk20a_dbg_fn(""); - l = container_of(inode->i_cdev, struct nvgpu_os_linux, as_dev.cdev); g = &l->g; + nvgpu_log_fn(g, " "); + err = gk20a_as_alloc_share(g, 0, 0, &as_share); if (err) { - gk20a_dbg_fn("failed to alloc share"); + nvgpu_log_fn(g, "failed to alloc share"); return err; } @@ -312,8 +321,6 @@ int gk20a_as_dev_release(struct inode *inode, struct file *filp) { struct gk20a_as_share *as_share = filp->private_data; - gk20a_dbg_fn(""); - if (!as_share) return 0; @@ -328,7 +335,7 @@ long gk20a_as_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) u8 buf[NVGPU_AS_IOCTL_MAX_ARG_SIZE]; - gk20a_dbg_fn("start %d", _IOC_NR(cmd)); + nvgpu_log_fn(g, "start %d", _IOC_NR(cmd)); if ((_IOC_TYPE(cmd) != NVGPU_AS_IOCTL_MAGIC) || (_IOC_NR(cmd) == 0) || diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_channel.c b/drivers/gpu/nvgpu/common/linux/ioctl_channel.c index 06dfb180..606c5251 100644 --- a/drivers/gpu/nvgpu/common/linux/ioctl_channel.c +++ b/drivers/gpu/nvgpu/common/linux/ioctl_channel.c @@ -476,7 +476,7 @@ static int __gk20a_channel_open(struct gk20a *g, struct channel_gk20a *ch; struct channel_priv *priv; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); g = gk20a_get(g); if (!g) @@ -529,10 +529,10 @@ int gk20a_channel_open(struct inode *inode, struct file *filp) struct gk20a *g = &l->g; int ret; - gk20a_dbg_fn("start"); + nvgpu_log_fn(g, "start"); ret = __gk20a_channel_open(g, filp, -1); - gk20a_dbg_fn("end"); + nvgpu_log_fn(g, "end"); return ret; } @@ -676,7 +676,7 @@ static int gk20a_channel_wait(struct channel_gk20a *ch, int remain, ret = 0; u64 end; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (ch->has_timedout) return -ETIMEDOUT; @@ -760,7 +760,7 @@ static int gk20a_channel_zcull_bind(struct channel_gk20a *ch, struct gk20a *g = ch->g; struct gr_gk20a *gr = &g->gr; - gk20a_dbg_fn(""); + nvgpu_log_fn(gr->g, " "); return g->ops.gr.bind_ctxsw_zcull(g, gr, ch, args->gpu_va, args->mode); @@ -775,9 +775,10 @@ static int gk20a_ioctl_channel_submit_gpfifo( struct fifo_profile_gk20a *profile = NULL; u32 submit_flags = 0; int fd = -1; + struct gk20a *g = ch->g; int ret = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); #ifdef CONFIG_DEBUG_FS profile = gk20a_fifo_profile_acquire(ch->g); @@ -1064,8 +1065,9 @@ long gk20a_channel_ioctl(struct file *filp, struct device *dev = dev_from_gk20a(ch->g); u8 buf[NVGPU_IOCTL_CHANNEL_MAX_ARG_SIZE] = {0}; int err = 0; + struct gk20a *g = ch->g; - gk20a_dbg_fn("start %d", _IOC_NR(cmd)); + nvgpu_log_fn(g, "start %d", _IOC_NR(cmd)); if ((_IOC_TYPE(cmd) != NVGPU_IOCTL_MAGIC) || (_IOC_NR(cmd) == 0) || @@ -1224,7 +1226,7 @@ long gk20a_channel_ioctl(struct file *filp, { u32 timeout = (u32)((struct nvgpu_set_timeout_args *)buf)->timeout; - gk20a_dbg(gpu_dbg_gpu_dbg, "setting timeout (%d ms) for chid %d", + nvgpu_log(g, gpu_dbg_gpu_dbg, "setting timeout (%d ms) for chid %d", timeout, ch->chid); ch->timeout_ms_max = timeout; gk20a_channel_trace_sched_param( @@ -1238,7 +1240,7 @@ long gk20a_channel_ioctl(struct file *filp, bool timeout_debug_dump = !((u32) ((struct nvgpu_set_timeout_ex_args *)buf)->flags & (1 << NVGPU_TIMEOUT_FLAG_DISABLE_DUMP)); - gk20a_dbg(gpu_dbg_gpu_dbg, "setting timeout (%d ms) for chid %d", + nvgpu_log(g, gpu_dbg_gpu_dbg, "setting timeout (%d ms) for chid %d", timeout, ch->chid); ch->timeout_ms_max = timeout; ch->timeout_debug_dump = timeout_debug_dump; @@ -1367,7 +1369,7 @@ long gk20a_channel_ioctl(struct file *filp, gk20a_channel_put(ch); - gk20a_dbg_fn("end"); + nvgpu_log_fn(g, "end"); return err; } diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_clk_arb.c b/drivers/gpu/nvgpu/common/linux/ioctl_clk_arb.c index 039f65f8..3ab8cf9e 100644 --- a/drivers/gpu/nvgpu/common/linux/ioctl_clk_arb.c +++ b/drivers/gpu/nvgpu/common/linux/ioctl_clk_arb.c @@ -209,9 +209,10 @@ static ssize_t nvgpu_clk_arb_read_event_dev(struct file *filp, char __user *buf, static int nvgpu_clk_arb_set_event_filter(struct nvgpu_clk_dev *dev, struct nvgpu_gpu_set_event_filter_args *args) { + struct gk20a *g = dev->session->g; u32 mask; - gk20a_dbg(gpu_dbg_fn, ""); + nvgpu_log(g, gpu_dbg_fn, " "); if (args->flags) return -EINVAL; @@ -237,7 +238,7 @@ static long nvgpu_clk_arb_ioctl_event_dev(struct file *filp, unsigned int cmd, u8 buf[NVGPU_EVENT_IOCTL_MAX_ARG_SIZE]; int err = 0; - gk20a_dbg(gpu_dbg_fn, "nr=%d", _IOC_NR(cmd)); + nvgpu_log(g, gpu_dbg_fn, "nr=%d", _IOC_NR(cmd)); if ((_IOC_TYPE(cmd) != NVGPU_EVENT_IOCTL_MAGIC) || (_IOC_NR(cmd) == 0) || (_IOC_NR(cmd) > NVGPU_EVENT_IOCTL_LAST)) @@ -681,7 +682,7 @@ int nvgpu_clk_arb_debugfs_init(struct gk20a *g) struct dentry *gpu_root = l->debugfs; struct dentry *d; - gk20a_dbg(gpu_dbg_info, "g=%p", g); + nvgpu_log(g, gpu_dbg_info, "g=%p", g); d = debugfs_create_file( "arb_stats", diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_ctrl.c b/drivers/gpu/nvgpu/common/linux/ioctl_ctrl.c index 70707a5c..7bb97369 100644 --- a/drivers/gpu/nvgpu/common/linux/ioctl_ctrl.c +++ b/drivers/gpu/nvgpu/common/linux/ioctl_ctrl.c @@ -62,14 +62,14 @@ int gk20a_ctrl_dev_open(struct inode *inode, struct file *filp) struct gk20a_ctrl_priv *priv; int err = 0; - gk20a_dbg_fn(""); - l = container_of(inode->i_cdev, struct nvgpu_os_linux, ctrl.cdev); g = gk20a_get(&l->g); if (!g) return -ENODEV; + nvgpu_log_fn(g, " "); + priv = nvgpu_kzalloc(g, sizeof(struct gk20a_ctrl_priv)); if (!priv) { err = -ENOMEM; @@ -102,7 +102,7 @@ int gk20a_ctrl_dev_release(struct inode *inode, struct file *filp) struct gk20a_ctrl_priv *priv = filp->private_data; struct gk20a *g = priv->g; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (priv->clk_session) nvgpu_clk_arb_release_session(g, priv->clk_session); @@ -684,7 +684,7 @@ static int nvgpu_gpu_ioctl_wait_for_pause(struct gk20a *g, /* Copy to user space - pointed by "args->pwarpstate" */ if (copy_to_user((void __user *)(uintptr_t)args->pwarpstate, w_state, ioctl_size)) { - gk20a_dbg_fn("copy_to_user failed!"); + nvgpu_log_fn(g, "copy_to_user failed!"); err = -EFAULT; } @@ -901,7 +901,7 @@ static int nvgpu_gpu_alloc_vidmem(struct gk20a *g, u32 align = args->in.alignment ? args->in.alignment : SZ_4K; int fd; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); /* not yet supported */ if (WARN_ON(args->in.flags & NVGPU_GPU_ALLOC_VIDMEM_FLAG_CPU_MASK)) @@ -933,7 +933,7 @@ static int nvgpu_gpu_alloc_vidmem(struct gk20a *g, args->out.dmabuf_fd = fd; - gk20a_dbg_fn("done, fd=%d", fd); + nvgpu_log_fn(g, "done, fd=%d", fd); return 0; } @@ -943,7 +943,7 @@ static int nvgpu_gpu_get_memory_state(struct gk20a *g, { int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (args->reserved[0] || args->reserved[1] || args->reserved[2] || args->reserved[3]) @@ -951,7 +951,7 @@ static int nvgpu_gpu_get_memory_state(struct gk20a *g, err = nvgpu_vidmem_get_space(g, &args->total_free_bytes); - gk20a_dbg_fn("done, err=%d, bytes=%lld", err, args->total_free_bytes); + nvgpu_log_fn(g, "done, err=%d, bytes=%lld", err, args->total_free_bytes); return err; } @@ -973,7 +973,7 @@ static int nvgpu_gpu_clk_get_vf_points(struct gk20a *g, u16 min_mhz; u16 max_mhz; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (!session || args->flags) return -EINVAL; @@ -1059,7 +1059,7 @@ static int nvgpu_gpu_clk_get_range(struct gk20a *g, int err; u16 min_mhz, max_mhz; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (!session) return -EINVAL; @@ -1138,7 +1138,7 @@ static int nvgpu_gpu_clk_set_info(struct gk20a *g, int i; int ret; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (!session || args->flags) return -EINVAL; @@ -1201,7 +1201,7 @@ static int nvgpu_gpu_clk_get_info(struct gk20a *g, int err; int bit; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (!session) return -EINVAL; @@ -1287,7 +1287,7 @@ static int nvgpu_gpu_get_event_fd(struct gk20a *g, { struct nvgpu_clk_session *session = priv->clk_session; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (!session) return -EINVAL; @@ -1301,7 +1301,7 @@ static int nvgpu_gpu_get_voltage(struct gk20a *g, { int err = -EINVAL; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (args->reserved) return -EINVAL; @@ -1337,7 +1337,7 @@ static int nvgpu_gpu_get_current(struct gk20a *g, { int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (args->reserved[0] || args->reserved[1] || args->reserved[2]) return -EINVAL; @@ -1361,7 +1361,7 @@ static int nvgpu_gpu_get_power(struct gk20a *g, { int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (args->reserved[0] || args->reserved[1] || args->reserved[2]) return -EINVAL; @@ -1386,7 +1386,7 @@ static int nvgpu_gpu_get_temperature(struct gk20a *g, int err; u32 temp_f24_8; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (args->reserved[0] || args->reserved[1] || args->reserved[2]) return -EINVAL; @@ -1415,7 +1415,7 @@ static int nvgpu_gpu_set_therm_alert_limit(struct gk20a *g, { int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (args->reserved[0] || args->reserved[1] || args->reserved[2]) return -EINVAL; @@ -1491,7 +1491,7 @@ static int nvgpu_gpu_set_deterministic_opts(struct gk20a *g, u32 i = 0; int err = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); user_channels = (int __user *)(uintptr_t)args->channels; @@ -1556,7 +1556,7 @@ long gk20a_ctrl_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg struct zbc_query_params *zbc_tbl; int i, err = 0; - gk20a_dbg_fn("start %d", _IOC_NR(cmd)); + nvgpu_log_fn(g, "start %d", _IOC_NR(cmd)); if ((_IOC_TYPE(cmd) != NVGPU_GPU_IOCTL_MAGIC) || (_IOC_NR(cmd) == 0) || @@ -1855,7 +1855,7 @@ long gk20a_ctrl_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg break; default: - gk20a_dbg_info("unrecognized gpu ioctl cmd: 0x%x", cmd); + nvgpu_log_info(g, "unrecognized gpu ioctl cmd: 0x%x", cmd); err = -ENOTTY; break; } diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_dbg.c b/drivers/gpu/nvgpu/common/linux/ioctl_dbg.c index a53d1cfb..2aba2664 100644 --- a/drivers/gpu/nvgpu/common/linux/ioctl_dbg.c +++ b/drivers/gpu/nvgpu/common/linux/ioctl_dbg.c @@ -56,7 +56,7 @@ static int alloc_profiler(struct gk20a *g, struct dbg_profiler_object_data *prof; *_prof = NULL; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " "); prof = nvgpu_kzalloc(g, sizeof(*prof)); if (!prof) @@ -72,7 +72,7 @@ static int alloc_session(struct gk20a *g, struct dbg_session_gk20a_linux **_dbg_ struct dbg_session_gk20a_linux *dbg_s_linux; *_dbg_s_linux = NULL; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " "); dbg_s_linux = nvgpu_kzalloc(g, sizeof(*dbg_s_linux)); if (!dbg_s_linux) @@ -142,8 +142,9 @@ unsigned int gk20a_dbg_gpu_dev_poll(struct file *filep, poll_table *wait) unsigned int mask = 0; struct dbg_session_gk20a_linux *dbg_session_linux = filep->private_data; struct dbg_session_gk20a *dbg_s = &dbg_session_linux->dbg_s; + struct gk20a *g = dbg_s->g; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " "); poll_wait(filep, &dbg_s->dbg_events.wait_queue.wq, wait); @@ -151,9 +152,9 @@ unsigned int gk20a_dbg_gpu_dev_poll(struct file *filep, poll_table *wait) if (dbg_s->dbg_events.events_enabled && dbg_s->dbg_events.num_pending_events > 0) { - gk20a_dbg(gpu_dbg_gpu_dbg, "found pending event on session id %d", + nvgpu_log(g, gpu_dbg_gpu_dbg, "found pending event on session id %d", dbg_s->id); - gk20a_dbg(gpu_dbg_gpu_dbg, "%d events pending", + nvgpu_log(g, gpu_dbg_gpu_dbg, "%d events pending", dbg_s->dbg_events.num_pending_events); mask = (POLLPRI | POLLIN); } @@ -170,7 +171,7 @@ int gk20a_dbg_gpu_dev_release(struct inode *inode, struct file *filp) struct gk20a *g = dbg_s->g; struct dbg_profiler_object_data *prof_obj, *tmp_obj; - gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn, "%s", g->name); + nvgpu_log(g, gpu_dbg_gpu_dbg | gpu_dbg_fn, "%s", g->name); /* unbind channels */ dbg_unbind_all_channels_gk20a(dbg_s); @@ -213,7 +214,11 @@ int gk20a_dbg_gpu_dev_release(struct inode *inode, struct file *filp) int gk20a_prof_gpu_dev_open(struct inode *inode, struct file *filp) { - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); + struct nvgpu_os_linux *l = container_of(inode->i_cdev, + struct nvgpu_os_linux, prof.cdev); + struct gk20a *g = &l->g; + + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " "); return gk20a_dbg_gpu_do_dev_open(inode, filp, true /* is profiler */); } @@ -223,7 +228,7 @@ static int nvgpu_dbg_gpu_ioctl_timeout(struct dbg_session_gk20a *dbg_s, int err; struct gk20a *g = dbg_s->g; - gk20a_dbg_fn("powergate mode = %d", args->enable); + nvgpu_log_fn(g, "powergate mode = %d", args->enable); nvgpu_mutex_acquire(&g->dbg_sessions_lock); err = nvgpu_dbg_timeout_enable(dbg_s, args->enable); @@ -356,7 +361,9 @@ static int nvgpu_dbg_gpu_ioctl_set_next_stop_trigger_type( struct dbg_session_gk20a *dbg_s, struct nvgpu_dbg_gpu_set_next_stop_trigger_type_args *args) { - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); + struct gk20a *g = dbg_s->g; + + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " "); gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s); @@ -373,7 +380,7 @@ static int nvgpu_dbg_timeout_enable(struct dbg_session_gk20a *dbg_s, struct gk20a *g = dbg_s->g; int err = 0; - gk20a_dbg(gpu_dbg_gpu_dbg, "Timeouts mode requested : %d", + nvgpu_log(g, gpu_dbg_gpu_dbg, "Timeouts mode requested : %d", timeout_mode); switch (timeout_mode) { @@ -401,7 +408,7 @@ static int nvgpu_dbg_timeout_enable(struct dbg_session_gk20a *dbg_s, break; } - gk20a_dbg(gpu_dbg_gpu_dbg, "Timeouts enabled : %s", + nvgpu_log(g, gpu_dbg_gpu_dbg, "Timeouts enabled : %s", g->timeouts_enabled ? "Yes" : "No"); return err; @@ -431,7 +438,7 @@ static int gk20a_dbg_gpu_do_dev_open(struct inode *inode, dev = dev_from_gk20a(g); - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "dbg session: %s", g->name); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "dbg session: %s", g->name); err = alloc_session(g, &dbg_session_linux); if (err) @@ -482,7 +489,7 @@ static int dbg_unbind_single_channel_gk20a(struct dbg_session_gk20a *dbg_s, struct dbg_profiler_object_data *prof_obj, *tmp_obj; struct dbg_session_channel_data_linux *ch_data_linux; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " "); chid = ch_data->chid; @@ -527,7 +534,7 @@ static int dbg_bind_channel_gk20a(struct dbg_session_gk20a *dbg_s, struct dbg_session_data *session_data; int err = 0; - gk20a_dbg(gpu_dbg_fn|gpu_dbg_gpu_dbg, "%s fd=%d", + nvgpu_log(g, gpu_dbg_fn|gpu_dbg_gpu_dbg, "%s fd=%d", g->name, args->channel_fd); /* @@ -541,12 +548,12 @@ static int dbg_bind_channel_gk20a(struct dbg_session_gk20a *dbg_s, ch = gk20a_get_channel_from_file(args->channel_fd); if (!ch) { - gk20a_dbg_fn("no channel found for fd"); + nvgpu_log_fn(g, "no channel found for fd"); err = -EINVAL; goto out_fput; } - gk20a_dbg_fn("%s hwchid=%d", g->name, ch->chid); + nvgpu_log_fn(g, "%s hwchid=%d", g->name, ch->chid); nvgpu_mutex_acquire(&g->dbg_sessions_lock); nvgpu_mutex_acquire(&ch->dbg_s_lock); @@ -818,7 +825,7 @@ static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s, struct gk20a *g = dbg_s->g; struct channel_gk20a *ch; - gk20a_dbg_fn("%d ops, max fragment %d", args->num_ops, g->dbg_regops_tmp_buf_ops); + nvgpu_log_fn(g, "%d ops, max fragment %d", args->num_ops, g->dbg_regops_tmp_buf_ops); if (args->num_ops > NVGPU_IOCTL_DBG_REG_OPS_LIMIT) { nvgpu_err(g, "regops limit exceeded"); @@ -890,10 +897,10 @@ static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s, (args->ops + ops_offset * sizeof(struct nvgpu_dbg_gpu_reg_op)); - gk20a_dbg_fn("Regops fragment: start_op=%llu ops=%llu", + nvgpu_log_fn(g, "Regops fragment: start_op=%llu ops=%llu", ops_offset, num_ops); - gk20a_dbg_fn("Copying regops from userspace"); + nvgpu_log_fn(g, "Copying regops from userspace"); if (copy_from_user(linux_fragment, fragment, fragment_size)) { @@ -917,7 +924,7 @@ static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s, if (err) break; - gk20a_dbg_fn("Copying result to userspace"); + nvgpu_log_fn(g, "Copying result to userspace"); if (copy_to_user(fragment, linux_fragment, fragment_size)) { @@ -955,7 +962,7 @@ static int nvgpu_ioctl_powergate_gk20a(struct dbg_session_gk20a *dbg_s, { int err; struct gk20a *g = dbg_s->g; - gk20a_dbg_fn("%s powergate mode = %d", + nvgpu_log_fn(g, "%s powergate mode = %d", g->name, args->mode); nvgpu_mutex_acquire(&g->dbg_sessions_lock); @@ -978,7 +985,7 @@ static int nvgpu_dbg_gpu_ioctl_smpc_ctxsw_mode(struct dbg_session_gk20a *dbg_s, struct gk20a *g = dbg_s->g; struct channel_gk20a *ch_gk20a; - gk20a_dbg_fn("%s smpc ctxsw mode = %d", + nvgpu_log_fn(g, "%s smpc ctxsw mode = %d", g->name, args->mode); err = gk20a_busy(g); @@ -1075,7 +1082,7 @@ static int nvgpu_dbg_gpu_ioctl_suspend_resume_sm( struct channel_gk20a *ch; int err = 0, action = args->mode; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "action: %d", args->mode); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "action: %d", args->mode); ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); if (!ch) @@ -1127,7 +1134,7 @@ static int nvgpu_ioctl_allocate_profiler_object( struct gk20a *g = get_gk20a(dbg_session_linux->dev); struct dbg_profiler_object_data *prof_obj; - gk20a_dbg_fn("%s", g->name); + nvgpu_log_fn(g, "%s", g->name); nvgpu_mutex_acquire(&g->dbg_sessions_lock); @@ -1171,7 +1178,7 @@ static int nvgpu_ioctl_free_profiler_object( struct dbg_profiler_object_data *prof_obj, *tmp_obj; bool obj_found = false; - gk20a_dbg_fn("%s session_id = %d profiler_handle = %x", + nvgpu_log_fn(g, "%s session_id = %d profiler_handle = %x", g->name, dbg_s->id, args->profiler_handle); nvgpu_mutex_acquire(&g->dbg_sessions_lock); @@ -1253,7 +1260,9 @@ static void gk20a_dbg_session_nvgpu_mutex_release(struct dbg_session_gk20a *dbg_ static void gk20a_dbg_gpu_events_enable(struct dbg_session_gk20a *dbg_s) { - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); + struct gk20a *g = dbg_s->g; + + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " "); gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s); @@ -1265,7 +1274,9 @@ static void gk20a_dbg_gpu_events_enable(struct dbg_session_gk20a *dbg_s) static void gk20a_dbg_gpu_events_disable(struct dbg_session_gk20a *dbg_s) { - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); + struct gk20a *g = dbg_s->g; + + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " "); gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s); @@ -1277,7 +1288,9 @@ static void gk20a_dbg_gpu_events_disable(struct dbg_session_gk20a *dbg_s) static void gk20a_dbg_gpu_events_clear(struct dbg_session_gk20a *dbg_s) { - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); + struct gk20a *g = dbg_s->g; + + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " "); gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s); @@ -1294,13 +1307,13 @@ static int gk20a_dbg_gpu_events_ctrl(struct dbg_session_gk20a *dbg_s, { int ret = 0; struct channel_gk20a *ch; + struct gk20a *g = dbg_s->g; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "dbg events ctrl cmd %d", args->cmd); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "dbg events ctrl cmd %d", args->cmd); ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); if (!ch) { - nvgpu_err(dbg_s->g, - "no channel bound to dbg session"); + nvgpu_err(g, "no channel bound to dbg session"); return -EINVAL; } @@ -1318,8 +1331,7 @@ static int gk20a_dbg_gpu_events_ctrl(struct dbg_session_gk20a *dbg_s, break; default: - nvgpu_err(dbg_s->g, - "unrecognized dbg gpu events ctrl cmd: 0x%x", + nvgpu_err(g, "unrecognized dbg gpu events ctrl cmd: 0x%x", args->cmd); ret = -EINVAL; break; @@ -1422,7 +1434,7 @@ static int gk20a_dbg_pc_sampling(struct dbg_session_gk20a *dbg_s, if (!ch) return -EINVAL; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); return g->ops.gr.update_pc_sampling ? g->ops.gr.update_pc_sampling(ch, args->enable) : -EINVAL; @@ -1646,7 +1658,7 @@ static int nvgpu_profiler_reserve_release(struct dbg_session_gk20a *dbg_s, struct dbg_profiler_object_data *prof_obj; int err = 0; - gk20a_dbg_fn("%s profiler_handle = %x", g->name, profiler_handle); + nvgpu_log_fn(g, "%s profiler_handle = %x", g->name, profiler_handle); nvgpu_mutex_acquire(&g->dbg_sessions_lock); @@ -1678,7 +1690,7 @@ static int nvgpu_profiler_reserve_acquire(struct dbg_session_gk20a *dbg_s, struct dbg_profiler_object_data *prof_obj, *my_prof_obj; int err = 0; - gk20a_dbg_fn("%s profiler_handle = %x", g->name, profiler_handle); + nvgpu_log_fn(g, "%s profiler_handle = %x", g->name, profiler_handle); if (g->profiler_reservation_count < 0) { nvgpu_err(g, "Negative reservation count!"); @@ -1782,12 +1794,12 @@ static int dbg_unbind_channel_gk20a(struct dbg_session_gk20a *dbg_s, struct channel_gk20a *ch; int err; - gk20a_dbg(gpu_dbg_fn|gpu_dbg_gpu_dbg, "%s fd=%d", + nvgpu_log(g, gpu_dbg_fn|gpu_dbg_gpu_dbg, "%s fd=%d", g->name, args->channel_fd); ch = gk20a_get_channel_from_file(args->channel_fd); if (!ch) { - gk20a_dbg_fn("no channel found for fd"); + nvgpu_log_fn(g, "no channel found for fd"); return -EINVAL; } @@ -1802,7 +1814,7 @@ static int dbg_unbind_channel_gk20a(struct dbg_session_gk20a *dbg_s, nvgpu_mutex_release(&dbg_s->ch_list_lock); if (!channel_found) { - gk20a_dbg_fn("channel not bounded, fd=%d\n", args->channel_fd); + nvgpu_log_fn(g, "channel not bounded, fd=%d\n", args->channel_fd); err = -EINVAL; goto out; } @@ -1820,7 +1832,11 @@ out: int gk20a_dbg_gpu_dev_open(struct inode *inode, struct file *filp) { - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); + struct nvgpu_os_linux *l = container_of(inode->i_cdev, + struct nvgpu_os_linux, dbg.cdev); + struct gk20a *g = &l->g; + + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " "); return gk20a_dbg_gpu_do_dev_open(inode, filp, false /* not profiler */); } @@ -1833,7 +1849,7 @@ long gk20a_dbg_gpu_dev_ioctl(struct file *filp, unsigned int cmd, u8 buf[NVGPU_DBG_GPU_IOCTL_MAX_ARG_SIZE]; int err = 0; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " "); if ((_IOC_TYPE(cmd) != NVGPU_DBG_GPU_IOCTL_MAGIC) || (_IOC_NR(cmd) == 0) || @@ -1979,7 +1995,7 @@ long gk20a_dbg_gpu_dev_ioctl(struct file *filp, unsigned int cmd, nvgpu_mutex_release(&dbg_s->ioctl_lock); - gk20a_dbg(gpu_dbg_gpu_dbg, "ret=%d", err); + nvgpu_log(g, gpu_dbg_gpu_dbg, "ret=%d", err); if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ)) err = copy_to_user((void __user *)arg, diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_tsg.c b/drivers/gpu/nvgpu/common/linux/ioctl_tsg.c index be2315bd..d0bfd55a 100644 --- a/drivers/gpu/nvgpu/common/linux/ioctl_tsg.c +++ b/drivers/gpu/nvgpu/common/linux/ioctl_tsg.c @@ -175,6 +175,7 @@ void gk20a_tsg_event_id_post_event(struct tsg_gk20a *tsg, struct gk20a_event_id_data *event_id_data; u32 event_id; int err = 0; + struct gk20a *g = tsg->g; event_id = nvgpu_event_id_to_ioctl_channel_event_id(__event_id); if (event_id >= NVGPU_IOCTL_CHANNEL_EVENT_ID_MAX) @@ -187,7 +188,7 @@ void gk20a_tsg_event_id_post_event(struct tsg_gk20a *tsg, nvgpu_mutex_acquire(&event_id_data->lock); - gk20a_dbg_info( + nvgpu_log_info(g, "posting event for event_id=%d on tsg=%d\n", event_id, tsg->tsgid); event_id_data->event_posted = true; @@ -205,14 +206,14 @@ static unsigned int gk20a_event_id_poll(struct file *filep, poll_table *wait) u32 event_id = event_id_data->event_id; struct tsg_gk20a *tsg = g->fifo.tsg + event_id_data->id; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_info, ""); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_info, " "); poll_wait(filep, &event_id_data->event_id_wq.wq, wait); nvgpu_mutex_acquire(&event_id_data->lock); if (event_id_data->event_posted) { - gk20a_dbg_info( + nvgpu_log_info(g, "found pending event_id=%d on TSG=%d\n", event_id, tsg->tsgid); mask = (POLLPRI | POLLIN); @@ -363,7 +364,7 @@ int nvgpu_ioctl_tsg_open(struct gk20a *g, struct file *filp) dev = dev_from_gk20a(g); - gk20a_dbg(gpu_dbg_fn, "tsg: %s", dev_name(dev)); + nvgpu_log(g, gpu_dbg_fn, "tsg: %s", dev_name(dev)); priv = nvgpu_kmalloc(g, sizeof(*priv)); if (!priv) { @@ -397,12 +398,12 @@ int nvgpu_ioctl_tsg_dev_open(struct inode *inode, struct file *filp) struct gk20a *g; int ret; - gk20a_dbg_fn(""); - l = container_of(inode->i_cdev, struct nvgpu_os_linux, tsg.cdev); g = &l->g; + nvgpu_log_fn(g, " "); + ret = gk20a_busy(g); if (ret) { nvgpu_err(g, "failed to power on, %d", ret); @@ -412,7 +413,7 @@ int nvgpu_ioctl_tsg_dev_open(struct inode *inode, struct file *filp) ret = nvgpu_ioctl_tsg_open(&l->g, filp); gk20a_idle(g); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return ret; } @@ -445,7 +446,7 @@ static int gk20a_tsg_ioctl_set_runlist_interleave(struct gk20a *g, u32 level = arg->level; int err; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid); nvgpu_mutex_acquire(&sched->control_lock); if (sched->control_locked) { @@ -474,7 +475,7 @@ static int gk20a_tsg_ioctl_set_timeslice(struct gk20a *g, struct gk20a_sched_ctrl *sched = &l->sched_ctrl; int err; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid); nvgpu_mutex_acquire(&sched->control_lock); if (sched->control_locked) { @@ -509,7 +510,7 @@ long nvgpu_ioctl_tsg_dev_ioctl(struct file *filp, unsigned int cmd, u8 __maybe_unused buf[NVGPU_TSG_IOCTL_MAX_ARG_SIZE]; int err = 0; - gk20a_dbg_fn("start %d", _IOC_NR(cmd)); + nvgpu_log_fn(g, "start %d", _IOC_NR(cmd)); if ((_IOC_TYPE(cmd) != NVGPU_TSG_IOCTL_MAGIC) || (_IOC_NR(cmd) == 0) || diff --git a/drivers/gpu/nvgpu/common/linux/log.c b/drivers/gpu/nvgpu/common/linux/log.c index 4dc8f667..ca29e0f3 100644 --- a/drivers/gpu/nvgpu/common/linux/log.c +++ b/drivers/gpu/nvgpu/common/linux/log.c @@ -38,8 +38,6 @@ */ #define LOG_FMT "nvgpu: %s %33s:%-4d [%s] %s\n" -u64 nvgpu_dbg_mask = NVGPU_DEFAULT_DBG_MASK; - static const char *log_types[] = { "ERR", "WRN", diff --git a/drivers/gpu/nvgpu/common/linux/module.c b/drivers/gpu/nvgpu/common/linux/module.c index 34850013..f00b3cce 100644 --- a/drivers/gpu/nvgpu/common/linux/module.c +++ b/drivers/gpu/nvgpu/common/linux/module.c @@ -218,7 +218,7 @@ int gk20a_pm_finalize_poweron(struct device *dev) struct gk20a_platform *platform = gk20a_get_platform(dev); int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (g->power_on) return 0; @@ -331,7 +331,7 @@ static int gk20a_pm_prepare_poweroff(struct device *dev) struct gk20a_platform *platform = gk20a_get_platform(dev); bool irqs_enabled; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); nvgpu_mutex_acquire(&g->poweroff_lock); @@ -1013,7 +1013,7 @@ static int gk20a_pm_init(struct device *dev) struct gk20a *g = get_gk20a(dev); int err = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); /* Initialise pm runtime */ if (g->railgate_delay) { @@ -1043,7 +1043,7 @@ void gk20a_driver_start_unload(struct gk20a *g) { struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g); - gk20a_dbg(gpu_dbg_shutdown, "Driver is now going down!\n"); + nvgpu_log(g, gpu_dbg_shutdown, "Driver is now going down!\n"); down_write(&l->busy_lock); __nvgpu_set_enabled(g, NVGPU_DRIVER_IS_DYING, true); @@ -1134,8 +1134,6 @@ static int gk20a_probe(struct platform_device *dev) return -ENODATA; } - gk20a_dbg_fn(""); - platform_set_drvdata(dev, platform); if (gk20a_gpu_is_virtual(&dev->dev)) @@ -1148,6 +1146,9 @@ static int gk20a_probe(struct platform_device *dev) } gk20a = &l->g; + + nvgpu_log_fn(gk20a, " "); + nvgpu_init_gk20a(gk20a); set_gk20a(dev, gk20a); l->dev = &dev->dev; @@ -1248,7 +1249,7 @@ int nvgpu_remove(struct device *dev, struct class *class) struct gk20a_platform *platform = gk20a_get_platform(dev); int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); err = nvgpu_quiesce(g); WARN(err, "gpu failed to idle during driver removal"); @@ -1288,7 +1289,7 @@ int nvgpu_remove(struct device *dev, struct class *class) if (platform->remove) platform->remove(dev); - gk20a_dbg_fn("removed"); + nvgpu_log_fn(g, "removed"); return err; } diff --git a/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c b/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c index 3cac13ba..015295ba 100644 --- a/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c +++ b/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c @@ -140,7 +140,7 @@ u32 nvgpu_mem_rd32(struct gk20a *g, struct nvgpu_mem *mem, u32 w) WARN_ON(!ptr); data = ptr[w]; #ifdef CONFIG_TEGRA_SIMULATION_PLATFORM - gk20a_dbg(gpu_dbg_mem, " %p = 0x%x", ptr + w, data); + nvgpu_log(g, gpu_dbg_mem, " %p = 0x%x", ptr + w, data); #endif } else if (mem->aperture == APERTURE_VIDMEM || g->mm.force_pramin) { u32 value; @@ -177,7 +177,7 @@ void nvgpu_mem_rd_n(struct gk20a *g, struct nvgpu_mem *mem, memcpy(dest, src, size); #ifdef CONFIG_TEGRA_SIMULATION_PLATFORM if (size) - gk20a_dbg(gpu_dbg_mem, " %p = 0x%x ... [%d bytes]", + nvgpu_log(g, gpu_dbg_mem, " %p = 0x%x ... [%d bytes]", src, *dest, size); #endif } else if (mem->aperture == APERTURE_VIDMEM || g->mm.force_pramin) { @@ -215,7 +215,7 @@ void nvgpu_mem_wr32(struct gk20a *g, struct nvgpu_mem *mem, u32 w, u32 data) WARN_ON(!ptr); #ifdef CONFIG_TEGRA_SIMULATION_PLATFORM - gk20a_dbg(gpu_dbg_mem, " %p = 0x%x", ptr + w, data); + nvgpu_log(g, gpu_dbg_mem, " %p = 0x%x", ptr + w, data); #endif ptr[w] = data; } else if (mem->aperture == APERTURE_VIDMEM || g->mm.force_pramin) { @@ -249,7 +249,7 @@ void nvgpu_mem_wr_n(struct gk20a *g, struct nvgpu_mem *mem, u32 offset, WARN_ON(!mem->cpu_va); #ifdef CONFIG_TEGRA_SIMULATION_PLATFORM if (size) - gk20a_dbg(gpu_dbg_mem, " %p = 0x%x ... [%d bytes]", + nvgpu_log(g, gpu_dbg_mem, " %p = 0x%x ... [%d bytes]", dest, *src, size); #endif memcpy(dest, src, size); @@ -296,7 +296,7 @@ void nvgpu_memset(struct gk20a *g, struct nvgpu_mem *mem, u32 offset, WARN_ON(!mem->cpu_va); #ifdef CONFIG_TEGRA_SIMULATION_PLATFORM if (size) - gk20a_dbg(gpu_dbg_mem, " %p = 0x%x [times %d]", + nvgpu_log(g, gpu_dbg_mem, " %p = 0x%x [times %d]", dest, c, size); #endif memset(dest, c, size); diff --git a/drivers/gpu/nvgpu/common/linux/platform_gk20a_tegra.c b/drivers/gpu/nvgpu/common/linux/platform_gk20a_tegra.c index 82648ca3..5301b13d 100644 --- a/drivers/gpu/nvgpu/common/linux/platform_gk20a_tegra.c +++ b/drivers/gpu/nvgpu/common/linux/platform_gk20a_tegra.c @@ -551,6 +551,7 @@ static void gk20a_tegra_scale_init(struct device *dev) struct gk20a_platform *platform = gk20a_get_platform(dev); struct gk20a_scale_profile *profile = platform->g->scale_profile; struct gk20a_emc_params *emc_params; + struct gk20a *g = platform->g; if (!profile) return; @@ -568,7 +569,7 @@ static void gk20a_tegra_scale_init(struct device *dev) #ifdef CONFIG_TEGRA_BWMGR emc_params->bwmgr_cl = tegra_bwmgr_register(TEGRA_BWMGR_CLIENT_GPU); if (!emc_params->bwmgr_cl) { - gk20a_dbg_info("%s Missing GPU BWMGR client\n", __func__); + nvgpu_log_info(g, "%s Missing GPU BWMGR client\n", __func__); return; } #endif @@ -767,6 +768,7 @@ static int gk20a_tegra_probe(struct device *dev) struct device_node *np = dev->of_node; bool joint_xpu_rail = false; int ret; + struct gk20a *g = platform->g; #ifdef CONFIG_COMMON_CLK /* DVFS is not guaranteed to be initialized at the time of probe on @@ -775,13 +777,13 @@ static int gk20a_tegra_probe(struct device *dev) if (!platform->gpu_rail) { platform->gpu_rail = tegra_dvfs_get_rail_by_name(GPU_RAIL_NAME); if (!platform->gpu_rail) { - gk20a_dbg_info("deferring probe no gpu_rail\n"); + nvgpu_log_info(g, "deferring probe no gpu_rail"); return -EPROBE_DEFER; } } if (!tegra_dvfs_is_rail_ready(platform->gpu_rail)) { - gk20a_dbg_info("deferring probe gpu_rail not ready\n"); + nvgpu_log_info(g, "deferring probe gpu_rail not ready"); return -EPROBE_DEFER; } #endif @@ -798,7 +800,7 @@ static int gk20a_tegra_probe(struct device *dev) #endif if (joint_xpu_rail) { - gk20a_dbg_info("XPU rails are joint\n"); + nvgpu_log_info(g, "XPU rails are joint\n"); platform->g->can_railgate = false; } diff --git a/drivers/gpu/nvgpu/common/linux/platform_gp10b_tegra.c b/drivers/gpu/nvgpu/common/linux/platform_gp10b_tegra.c index 6e54d00b..08c5df0f 100644 --- a/drivers/gpu/nvgpu/common/linux/platform_gp10b_tegra.c +++ b/drivers/gpu/nvgpu/common/linux/platform_gp10b_tegra.c @@ -273,11 +273,11 @@ void gp10b_tegra_prescale(struct device *dev) struct gk20a *g = get_gk20a(dev); u32 avg = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); nvgpu_pmu_load_norm(g, &avg); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); } void gp10b_tegra_postscale(struct device *pdev, @@ -288,7 +288,7 @@ void gp10b_tegra_postscale(struct device *pdev, struct gk20a *g = get_gk20a(pdev); unsigned long emc_rate; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (profile && !platform->is_railgated(pdev)) { unsigned long emc_scale; @@ -306,7 +306,7 @@ void gp10b_tegra_postscale(struct device *pdev, (struct tegra_bwmgr_client *)profile->private_data, emc_rate, TEGRA_BWMGR_SET_EMC_FLOOR); } - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); } long gp10b_round_clk_rate(struct device *dev, unsigned long rate) @@ -328,6 +328,7 @@ int gp10b_clk_get_freqs(struct device *dev, unsigned long **freqs, int *num_freqs) { struct gk20a_platform *platform = gk20a_get_platform(dev); + struct gk20a *g = platform->g; unsigned long max_rate; unsigned long new_rate = 0, prev_rate = 0; int i = 0, freq_counter = 0; @@ -358,7 +359,7 @@ int gp10b_clk_get_freqs(struct device *dev, *freqs = gp10b_freq_table; *num_freqs = freq_counter; - gk20a_dbg_info("min rate: %ld max rate: %ld num_of_freq %d\n", + nvgpu_log_info(g, "min rate: %ld max rate: %ld num_of_freq %d\n", gp10b_freq_table[0], max_rate, *num_freqs); return 0; diff --git a/drivers/gpu/nvgpu/common/linux/sched.c b/drivers/gpu/nvgpu/common/linux/sched.c index a7da020c..2ad5aabf 100644 --- a/drivers/gpu/nvgpu/common/linux/sched.c +++ b/drivers/gpu/nvgpu/common/linux/sched.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -37,10 +37,11 @@ ssize_t gk20a_sched_dev_read(struct file *filp, char __user *buf, size_t size, loff_t *off) { struct gk20a_sched_ctrl *sched = filp->private_data; + struct gk20a *g = sched->g; struct nvgpu_sched_event_arg event = { 0 }; int err; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "filp=%p buf=%p size=%zu", filp, buf, size); if (size < sizeof(event)) @@ -77,9 +78,10 @@ ssize_t gk20a_sched_dev_read(struct file *filp, char __user *buf, unsigned int gk20a_sched_dev_poll(struct file *filp, poll_table *wait) { struct gk20a_sched_ctrl *sched = filp->private_data; + struct gk20a *g = sched->g; unsigned int mask = 0; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, ""); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, " "); nvgpu_mutex_acquire(&sched->status_lock); poll_wait(filp, &sched->readout_wq.wq, wait); @@ -93,7 +95,9 @@ unsigned int gk20a_sched_dev_poll(struct file *filp, poll_table *wait) static int gk20a_sched_dev_ioctl_get_tsgs(struct gk20a_sched_ctrl *sched, struct nvgpu_sched_get_tsgs_args *arg) { - gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "size=%u buffer=%llx", + struct gk20a *g = sched->g; + + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "size=%u buffer=%llx", arg->size, arg->buffer); if ((arg->size < sched->bitmap_size) || (!arg->buffer)) { @@ -115,7 +119,9 @@ static int gk20a_sched_dev_ioctl_get_tsgs(struct gk20a_sched_ctrl *sched, static int gk20a_sched_dev_ioctl_get_recent_tsgs(struct gk20a_sched_ctrl *sched, struct nvgpu_sched_get_tsgs_args *arg) { - gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "size=%u buffer=%llx", + struct gk20a *g = sched->g; + + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "size=%u buffer=%llx", arg->size, arg->buffer); if ((arg->size < sched->bitmap_size) || (!arg->buffer)) { @@ -139,7 +145,8 @@ static int gk20a_sched_dev_ioctl_get_recent_tsgs(struct gk20a_sched_ctrl *sched, static int gk20a_sched_dev_ioctl_get_tsgs_by_pid(struct gk20a_sched_ctrl *sched, struct nvgpu_sched_get_tsgs_by_pid_args *arg) { - struct fifo_gk20a *f = &sched->g->fifo; + struct gk20a *g = sched->g; + struct fifo_gk20a *f = &g->fifo; struct tsg_gk20a *tsg; u64 *bitmap; unsigned int tsgid; @@ -147,7 +154,7 @@ static int gk20a_sched_dev_ioctl_get_tsgs_by_pid(struct gk20a_sched_ctrl *sched, pid_t tgid = (pid_t)arg->pid; int err = 0; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "pid=%d size=%u buffer=%llx", + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "pid=%d size=%u buffer=%llx", (pid_t)arg->pid, arg->size, arg->buffer); if ((arg->size < sched->bitmap_size) || (!arg->buffer)) { @@ -186,7 +193,7 @@ static int gk20a_sched_dev_ioctl_get_params(struct gk20a_sched_ctrl *sched, struct tsg_gk20a *tsg; u32 tsgid = arg->tsgid; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid); if (tsgid >= f->num_channels) return -EINVAL; @@ -221,7 +228,7 @@ static int gk20a_sched_dev_ioctl_tsg_set_timeslice( u32 tsgid = arg->tsgid; int err; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid); if (tsgid >= f->num_channels) return -EINVAL; @@ -256,7 +263,7 @@ static int gk20a_sched_dev_ioctl_tsg_set_runlist_interleave( u32 tsgid = arg->tsgid; int err; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid); if (tsgid >= f->num_channels) return -EINVAL; @@ -283,7 +290,9 @@ done: static int gk20a_sched_dev_ioctl_lock_control(struct gk20a_sched_ctrl *sched) { - gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, ""); + struct gk20a *g = sched->g; + + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, " "); nvgpu_mutex_acquire(&sched->control_lock); sched->control_locked = true; @@ -293,7 +302,9 @@ static int gk20a_sched_dev_ioctl_lock_control(struct gk20a_sched_ctrl *sched) static int gk20a_sched_dev_ioctl_unlock_control(struct gk20a_sched_ctrl *sched) { - gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, ""); + struct gk20a *g = sched->g; + + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, " "); nvgpu_mutex_acquire(&sched->control_lock); sched->control_locked = false; @@ -304,7 +315,9 @@ static int gk20a_sched_dev_ioctl_unlock_control(struct gk20a_sched_ctrl *sched) static int gk20a_sched_dev_ioctl_get_api_version(struct gk20a_sched_ctrl *sched, struct nvgpu_sched_api_version_args *args) { - gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, ""); + struct gk20a *g = sched->g; + + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, " "); args->version = NVGPU_SCHED_API_VERSION; return 0; @@ -318,7 +331,7 @@ static int gk20a_sched_dev_ioctl_get_tsg(struct gk20a_sched_ctrl *sched, struct tsg_gk20a *tsg; u32 tsgid = arg->tsgid; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid); if (tsgid >= f->num_channels) return -EINVAL; @@ -355,7 +368,7 @@ static int gk20a_sched_dev_ioctl_put_tsg(struct gk20a_sched_ctrl *sched, struct tsg_gk20a *tsg; u32 tsgid = arg->tsgid; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid); if (tsgid >= f->num_channels) return -EINVAL; @@ -390,7 +403,7 @@ int gk20a_sched_dev_open(struct inode *inode, struct file *filp) return -ENODEV; sched = &l->sched_ctrl; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "g=%p", g); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "g=%p", g); if (!sched->sw_ready) { err = gk20a_busy(g); @@ -410,7 +423,7 @@ int gk20a_sched_dev_open(struct inode *inode, struct file *filp) memset(sched->ref_tsg_bitmap, 0, sched->bitmap_size); filp->private_data = sched; - gk20a_dbg(gpu_dbg_sched, "filp=%p sched=%p", filp, sched); + nvgpu_log(g, gpu_dbg_sched, "filp=%p sched=%p", filp, sched); free_ref: if (err) @@ -426,7 +439,7 @@ long gk20a_sched_dev_ioctl(struct file *filp, unsigned int cmd, u8 buf[NVGPU_CTXSW_IOCTL_MAX_ARG_SIZE]; int err = 0; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "nr=%d", _IOC_NR(cmd)); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "nr=%d", _IOC_NR(cmd)); if ((_IOC_TYPE(cmd) != NVGPU_SCHED_IOCTL_MAGIC) || (_IOC_NR(cmd) == 0) || @@ -509,7 +522,7 @@ int gk20a_sched_dev_release(struct inode *inode, struct file *filp) struct tsg_gk20a *tsg; unsigned int tsgid; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "sched: %p", sched); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "sched: %p", sched); /* release any reference to TSGs */ for (tsgid = 0; tsgid < f->num_channels; tsgid++) { @@ -535,7 +548,7 @@ void gk20a_sched_ctrl_tsg_added(struct gk20a *g, struct tsg_gk20a *tsg) struct gk20a_sched_ctrl *sched = &l->sched_ctrl; int err; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid); if (!sched->sw_ready) { err = gk20a_busy(g); @@ -560,7 +573,7 @@ void gk20a_sched_ctrl_tsg_removed(struct gk20a *g, struct tsg_gk20a *tsg) struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g); struct gk20a_sched_ctrl *sched = &l->sched_ctrl; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid); nvgpu_mutex_acquire(&sched->status_lock); NVGPU_SCHED_CLR(tsg->tsgid, sched->active_tsg_bitmap); @@ -592,7 +605,7 @@ int gk20a_sched_ctrl_init(struct gk20a *g) sched->bitmap_size = roundup(f->num_channels, 64) / 8; sched->status = 0; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "g=%p sched=%p size=%zu", + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "g=%p sched=%p size=%zu", g, sched, sched->bitmap_size); sched->active_tsg_bitmap = nvgpu_kzalloc(g, sched->bitmap_size); diff --git a/drivers/gpu/nvgpu/common/linux/vgpu/clk_vgpu.c b/drivers/gpu/nvgpu/common/linux/vgpu/clk_vgpu.c index 0bd8e2bc..0858e6b1 100644 --- a/drivers/gpu/nvgpu/common/linux/vgpu/clk_vgpu.c +++ b/drivers/gpu/nvgpu/common/linux/vgpu/clk_vgpu.c @@ -33,7 +33,7 @@ static unsigned long vgpu_clk_get_rate(struct gk20a *g, u32 api_domain) int err; unsigned long ret = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); switch (api_domain) { case CTRL_CLK_DOMAIN_GPCCLK: @@ -65,7 +65,7 @@ static int vgpu_clk_set_rate(struct gk20a *g, struct tegra_vgpu_gpu_clk_rate_params *p = &msg.params.gpu_clk_rate; int err = -EINVAL; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); switch (api_domain) { case CTRL_CLK_DOMAIN_GPCCLK: @@ -121,7 +121,7 @@ int vgpu_clk_get_freqs(struct device *dev, unsigned int i; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); msg.cmd = TEGRA_VGPU_CMD_GET_GPU_FREQ_TABLE; msg.handle = vgpu_get_handle(g); @@ -152,7 +152,7 @@ int vgpu_clk_cap_rate(struct device *dev, unsigned long rate) struct tegra_vgpu_gpu_clk_rate_params *p = &msg.params.gpu_clk_rate; int err = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); msg.cmd = TEGRA_VGPU_CMD_CAP_GPU_CLK_RATE; msg.handle = vgpu_get_handle(g); diff --git a/drivers/gpu/nvgpu/common/linux/vgpu/css_vgpu.c b/drivers/gpu/nvgpu/common/linux/vgpu/css_vgpu.c index fe9dc670..ba2bf58b 100644 --- a/drivers/gpu/nvgpu/common/linux/vgpu/css_vgpu.c +++ b/drivers/gpu/nvgpu/common/linux/vgpu/css_vgpu.c @@ -86,7 +86,7 @@ static int vgpu_css_init_snapshot_buffer(struct gr_gk20a *gr) int err; u64 size; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (data->hw_snapshot) return 0; @@ -125,6 +125,7 @@ fail: void vgpu_css_release_snapshot_buffer(struct gr_gk20a *gr) { struct gk20a_cs_snapshot *data = gr->cs_data; + struct gk20a *g = gr->g; if (!data->hw_snapshot) return; @@ -135,7 +136,7 @@ void vgpu_css_release_snapshot_buffer(struct gr_gk20a *gr) vgpu_ivm_mempool_unreserve(css_cookie); css_cookie = NULL; - gk20a_dbg_info("cyclestats(vgpu): buffer for snapshots released\n"); + nvgpu_log_info(g, "cyclestats(vgpu): buffer for snapshots released\n"); } int vgpu_css_flush_snapshots(struct channel_gk20a *ch, @@ -148,7 +149,7 @@ int vgpu_css_flush_snapshots(struct channel_gk20a *ch, struct gk20a_cs_snapshot *data = gr->cs_data; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); msg.cmd = TEGRA_VGPU_CMD_CHANNEL_CYCLESTATS_SNAPSHOT; msg.handle = vgpu_get_handle(g); @@ -176,7 +177,7 @@ static int vgpu_css_attach(struct channel_gk20a *ch, &msg.params.cyclestats_snapshot; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); msg.cmd = TEGRA_VGPU_CMD_CHANNEL_CYCLESTATS_SNAPSHOT; msg.handle = vgpu_get_handle(g); @@ -203,7 +204,7 @@ int vgpu_css_detach(struct channel_gk20a *ch, &msg.params.cyclestats_snapshot; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); msg.cmd = TEGRA_VGPU_CMD_CHANNEL_CYCLESTATS_SNAPSHOT; msg.handle = vgpu_get_handle(g); diff --git a/drivers/gpu/nvgpu/common/linux/vgpu/fecs_trace_vgpu.c b/drivers/gpu/nvgpu/common/linux/vgpu/fecs_trace_vgpu.c index 31d89853..499a8eb4 100644 --- a/drivers/gpu/nvgpu/common/linux/vgpu/fecs_trace_vgpu.c +++ b/drivers/gpu/nvgpu/common/linux/vgpu/fecs_trace_vgpu.c @@ -46,7 +46,7 @@ int vgpu_fecs_trace_init(struct gk20a *g) u32 mempool; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); vcst = nvgpu_kzalloc(g, sizeof(*vcst)); if (!vcst) diff --git a/drivers/gpu/nvgpu/common/linux/vgpu/vgpu_linux.c b/drivers/gpu/nvgpu/common/linux/vgpu/vgpu_linux.c index c3d95b4a..5d3598b5 100644 --- a/drivers/gpu/nvgpu/common/linux/vgpu/vgpu_linux.c +++ b/drivers/gpu/nvgpu/common/linux/vgpu/vgpu_linux.c @@ -142,7 +142,7 @@ int vgpu_pm_prepare_poweroff(struct device *dev) struct gk20a *g = get_gk20a(dev); int ret = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (!g->power_on) return 0; @@ -162,7 +162,7 @@ int vgpu_pm_finalize_poweron(struct device *dev) struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g); int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (g->power_on) return 0; @@ -227,7 +227,7 @@ static int vgpu_qos_notify(struct notifier_block *nb, u32 max_freq; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); max_freq = (u32)pm_qos_read_max_bound(PM_QOS_GPU_FREQ_BOUNDS); err = vgpu_clk_cap_rate(profile->dev, max_freq); @@ -277,7 +277,7 @@ static int vgpu_pm_init(struct device *dev) int num_freqs; int err = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (nvgpu_platform_is_simulation(g)) return 0; @@ -321,14 +321,15 @@ int vgpu_probe(struct platform_device *pdev) return -ENODATA; } - gk20a_dbg_fn(""); - l = kzalloc(sizeof(*l), GFP_KERNEL); if (!l) { dev_err(dev, "couldn't allocate gk20a support"); return -ENOMEM; } gk20a = &l->g; + + nvgpu_log_fn(gk20a, " "); + nvgpu_init_gk20a(gk20a); nvgpu_kmem_init(gk20a); @@ -428,7 +429,7 @@ int vgpu_probe(struct platform_device *pdev) vgpu_create_sysfs(dev); gk20a_init_gr(gk20a); - gk20a_dbg_info("total ram pages : %lu", totalram_pages); + nvgpu_log_info(gk20a, "total ram pages : %lu", totalram_pages); gk20a->gr.max_comptag_mem = totalram_pages >> (10 - (PAGE_SHIFT - 10)); @@ -442,7 +443,7 @@ int vgpu_remove(struct platform_device *pdev) struct device *dev = &pdev->dev; struct gk20a *g = get_gk20a(dev); - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); vgpu_pm_qos_remove(dev); if (g->remove_support) diff --git a/drivers/gpu/nvgpu/common/linux/vm.c b/drivers/gpu/nvgpu/common/linux/vm.c index 75572b93..baa77515 100644 --- a/drivers/gpu/nvgpu/common/linux/vm.c +++ b/drivers/gpu/nvgpu/common/linux/vm.c @@ -88,8 +88,9 @@ int nvgpu_vm_find_buf(struct vm_gk20a *vm, u64 gpu_va, u64 *offset) { struct nvgpu_mapped_buf *mapped_buffer; + struct gk20a *g = gk20a_from_vm(vm); - gk20a_dbg_fn("gpu_va=0x%llx", gpu_va); + nvgpu_log_fn(g, "gpu_va=0x%llx", gpu_va); nvgpu_mutex_acquire(&vm->update_gmmu_lock); diff --git a/drivers/gpu/nvgpu/common/mm/vidmem.c b/drivers/gpu/nvgpu/common/mm/vidmem.c index 0fb423b6..37435f97 100644 --- a/drivers/gpu/nvgpu/common/mm/vidmem.c +++ b/drivers/gpu/nvgpu/common/mm/vidmem.c @@ -394,7 +394,7 @@ int nvgpu_vidmem_get_space(struct gk20a *g, u64 *space) { struct nvgpu_allocator *allocator = &g->mm.vidmem.allocator; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (!nvgpu_alloc_initialized(allocator)) return -ENOSYS; diff --git a/drivers/gpu/nvgpu/common/vbios/bios.c b/drivers/gpu/nvgpu/common/vbios/bios.c index 52c0a798..01f9262c 100644 --- a/drivers/gpu/nvgpu/common/vbios/bios.c +++ b/drivers/gpu/nvgpu/common/vbios/bios.c @@ -296,7 +296,7 @@ int nvgpu_bios_parse_rom(struct gk20a *g) struct pci_ext_data_struct *pci_ext_data; pci_rom = (struct pci_exp_rom *)&g->bios.data[offset]; - gk20a_dbg_fn("pci rom sig %04x ptr %04x block %x", + nvgpu_log_fn(g, "pci rom sig %04x ptr %04x block %x", pci_rom->sig, pci_rom->pci_data_struct_ptr, pci_rom->size_of_block); @@ -309,7 +309,7 @@ int nvgpu_bios_parse_rom(struct gk20a *g) pci_data = (struct pci_data_struct *) &g->bios.data[offset + pci_rom->pci_data_struct_ptr]; - gk20a_dbg_fn("pci data sig %08x len %d image len %x type %x last %d max %08x", + nvgpu_log_fn(g, "pci data sig %08x len %d image len %x type %x last %d max %08x", pci_data->sig, pci_data->pci_data_struct_len, pci_data->image_len, pci_data->code_type, pci_data->last_image, @@ -322,7 +322,7 @@ int nvgpu_bios_parse_rom(struct gk20a *g) pci_data->pci_data_struct_len + 0xf) & ~0xf]; - gk20a_dbg_fn("pci ext data sig %08x rev %x len %x sub_image_len %x priv_last %d flags %x", + nvgpu_log_fn(g, "pci ext data sig %08x rev %x len %x sub_image_len %x priv_last %d flags %x", pci_ext_data->sig, pci_ext_data->nv_pci_data_ext_rev, pci_ext_data->nv_pci_data_ext_len, @@ -330,7 +330,7 @@ int nvgpu_bios_parse_rom(struct gk20a *g) pci_ext_data->priv_last_image, pci_ext_data->flags); - gk20a_dbg_fn("expansion rom offset %x", + nvgpu_log_fn(g, "expansion rom offset %x", pci_data->image_len * 512); g->bios.expansion_rom_offset = pci_data->image_len * 512; @@ -342,7 +342,7 @@ int nvgpu_bios_parse_rom(struct gk20a *g) } } - gk20a_dbg_info("read bios"); + nvgpu_log_info(g, "read bios"); for (i = 0; i < g->bios.size - 6; i++) { if (nvgpu_bios_rdu16(g, i) == BIT_HEADER_ID && nvgpu_bios_rdu32(g, i+2) == BIT_HEADER_SIGNATURE) { @@ -362,7 +362,7 @@ static void nvgpu_bios_parse_biosdata(struct gk20a *g, int offset) struct biosdata biosdata; memcpy(&biosdata, &g->bios.data[offset], sizeof(biosdata)); - gk20a_dbg_fn("bios version %x, oem version %x", + nvgpu_log_fn(g, "bios version %x, oem version %x", biosdata.version, biosdata.oem_version); @@ -375,9 +375,9 @@ static void nvgpu_bios_parse_nvinit_ptrs(struct gk20a *g, int offset) struct nvinit_ptrs nvinit_ptrs; memcpy(&nvinit_ptrs, &g->bios.data[offset], sizeof(nvinit_ptrs)); - gk20a_dbg_fn("devinit ptr %x size %d", nvinit_ptrs.devinit_tables_ptr, + nvgpu_log_fn(g, "devinit ptr %x size %d", nvinit_ptrs.devinit_tables_ptr, nvinit_ptrs.devinit_tables_size); - gk20a_dbg_fn("bootscripts ptr %x size %d", nvinit_ptrs.bootscripts_ptr, + nvgpu_log_fn(g, "bootscripts ptr %x size %d", nvinit_ptrs.bootscripts_ptr, nvinit_ptrs.bootscripts_size); g->bios.devinit_tables = &g->bios.data[nvinit_ptrs.devinit_tables_ptr]; @@ -449,7 +449,7 @@ static void nvgpu_bios_parse_devinit_appinfo(struct gk20a *g, int dmem_offset) struct devinit_engine_interface interface; memcpy(&interface, &g->bios.devinit.dmem[dmem_offset], sizeof(interface)); - gk20a_dbg_fn("devinit version %x tables phys %x script phys %x size %d", + nvgpu_log_fn(g, "devinit version %x tables phys %x script phys %x size %d", interface.version, interface.tables_phys_base, interface.script_phys_base, @@ -468,7 +468,7 @@ static int nvgpu_bios_parse_appinfo_table(struct gk20a *g, int offset) memcpy(&hdr, &g->bios.data[offset], sizeof(hdr)); - gk20a_dbg_fn("appInfoHdr ver %d size %d entrySize %d entryCount %d", + nvgpu_log_fn(g, "appInfoHdr ver %d size %d entrySize %d entryCount %d", hdr.version, hdr.header_size, hdr.entry_size, hdr.entry_count); @@ -481,7 +481,7 @@ static int nvgpu_bios_parse_appinfo_table(struct gk20a *g, int offset) memcpy(&entry, &g->bios.data[offset], sizeof(entry)); - gk20a_dbg_fn("appInfo id %d dmem_offset %d", + nvgpu_log_fn(g, "appInfo id %d dmem_offset %d", entry.id, entry.dmem_offset); if (entry.id == APPINFO_ID_DEVINIT) @@ -530,26 +530,26 @@ static int nvgpu_bios_parse_falcon_ucode_desc(struct gk20a *g, memcpy(&desc, &udesc, sizeof(udesc.v2)); break; default: - gk20a_dbg_info("invalid version"); + nvgpu_log_info(g, "invalid version"); return -EINVAL; } - gk20a_dbg_info("falcon ucode desc version %x len %x", version, desc_size); + nvgpu_log_info(g, "falcon ucode desc version %x len %x", version, desc_size); - gk20a_dbg_info("falcon ucode desc stored size %x uncompressed size %x", + nvgpu_log_info(g, "falcon ucode desc stored size %x uncompressed size %x", desc.stored_size, desc.uncompressed_size); - gk20a_dbg_info("falcon ucode desc virtualEntry %x, interfaceOffset %x", + nvgpu_log_info(g, "falcon ucode desc virtualEntry %x, interfaceOffset %x", desc.virtual_entry, desc.interface_offset); - gk20a_dbg_info("falcon ucode IMEM phys base %x, load size %x virt base %x sec base %x sec size %x", + nvgpu_log_info(g, "falcon ucode IMEM phys base %x, load size %x virt base %x sec base %x sec size %x", desc.imem_phys_base, desc.imem_load_size, desc.imem_virt_base, desc.imem_sec_base, desc.imem_sec_size); - gk20a_dbg_info("falcon ucode DMEM offset %x phys base %x, load size %x", + nvgpu_log_info(g, "falcon ucode DMEM offset %x phys base %x, load size %x", desc.dmem_offset, desc.dmem_phys_base, desc.dmem_load_size); if (desc.stored_size != desc.uncompressed_size) { - gk20a_dbg_info("does not match"); + nvgpu_log_info(g, "does not match"); return -EINVAL; } @@ -575,7 +575,7 @@ static int nvgpu_bios_parse_falcon_ucode_table(struct gk20a *g, int offset) int i; memcpy(&hdr, &g->bios.data[offset], sizeof(hdr)); - gk20a_dbg_fn("falcon ucode table ver %d size %d entrySize %d entryCount %d descVer %d descSize %d", + nvgpu_log_fn(g, "falcon ucode table ver %d size %d entrySize %d entryCount %d descVer %d descSize %d", hdr.version, hdr.header_size, hdr.entry_size, hdr.entry_count, hdr.desc_version, hdr.desc_size); @@ -590,7 +590,7 @@ static int nvgpu_bios_parse_falcon_ucode_table(struct gk20a *g, int offset) memcpy(&entry, &g->bios.data[offset], sizeof(entry)); - gk20a_dbg_fn("falcon ucode table entry appid %x targetId %x descPtr %x", + nvgpu_log_fn(g, "falcon ucode table entry appid %x targetId %x descPtr %x", entry.application_id, entry.target_id, entry.desc_ptr); @@ -638,7 +638,7 @@ static void nvgpu_bios_parse_falcon_data_v2(struct gk20a *g, int offset) int err; memcpy(&falcon_data, &g->bios.data[offset], sizeof(falcon_data)); - gk20a_dbg_fn("falcon ucode table ptr %x", + nvgpu_log_fn(g, "falcon ucode table ptr %x", falcon_data.falcon_ucode_table_ptr); err = nvgpu_bios_parse_falcon_ucode_table(g, falcon_data.falcon_ucode_table_ptr); @@ -676,7 +676,7 @@ void *nvgpu_bios_get_perf_table_ptrs(struct gk20a *g, if (table_id < (ptoken->data_size/data_size)) { - gk20a_dbg_info("Perf_Tbl_ID-offset 0x%x Tbl_ID_Ptr-offset- 0x%x", + nvgpu_log_info(g, "Perf_Tbl_ID-offset 0x%x Tbl_ID_Ptr-offset- 0x%x", (ptoken->data_ptr + (table_id * data_size)), perf_table_id_offset); @@ -705,18 +705,18 @@ static void nvgpu_bios_parse_bit(struct gk20a *g, int offset) struct bit_token bit_token; int i; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); memcpy(&bit, &g->bios.data[offset], sizeof(bit)); - gk20a_dbg_info("BIT header: %04x %08x", bit.id, bit.signature); - gk20a_dbg_info("tokens: %d entries * %d bytes", + nvgpu_log_info(g, "BIT header: %04x %08x", bit.id, bit.signature); + nvgpu_log_info(g, "tokens: %d entries * %d bytes", bit.token_entries, bit.token_size); offset += bit.header_size; for (i = 0; i < bit.token_entries; i++) { memcpy(&bit_token, &g->bios.data[offset], sizeof(bit_token)); - gk20a_dbg_info("BIT token id %d ptr %d size %d ver %d", + nvgpu_log_info(g, "BIT token id %d ptr %d size %d ver %d", bit_token.token_id, bit_token.data_ptr, bit_token.data_size, bit_token.data_version); @@ -753,7 +753,7 @@ static void nvgpu_bios_parse_bit(struct gk20a *g, int offset) offset += bit.token_size; } - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); } static u32 __nvgpu_bios_readbyte(struct gk20a *g, u32 offset) diff --git a/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c b/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c index 0280bbbb..086d4e7b 100644 --- a/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c @@ -50,21 +50,21 @@ static u32 ce2_nonblockpipe_isr(struct gk20a *g, u32 fifo_intr) { - gk20a_dbg(gpu_dbg_intr, "ce2 non-blocking pipe interrupt\n"); + nvgpu_log(g, gpu_dbg_intr, "ce2 non-blocking pipe interrupt\n"); return ce2_intr_status_nonblockpipe_pending_f(); } static u32 ce2_blockpipe_isr(struct gk20a *g, u32 fifo_intr) { - gk20a_dbg(gpu_dbg_intr, "ce2 blocking pipe interrupt\n"); + nvgpu_log(g, gpu_dbg_intr, "ce2 blocking pipe interrupt\n"); return ce2_intr_status_blockpipe_pending_f(); } static u32 ce2_launcherr_isr(struct gk20a *g, u32 fifo_intr) { - gk20a_dbg(gpu_dbg_intr, "ce2 launch error interrupt\n"); + nvgpu_log(g, gpu_dbg_intr, "ce2 launch error interrupt\n"); return ce2_intr_status_launcherr_pending_f(); } @@ -74,7 +74,7 @@ void gk20a_ce2_isr(struct gk20a *g, u32 inst_id, u32 pri_base) u32 ce2_intr = gk20a_readl(g, ce2_intr_status_r()); u32 clear_intr = 0; - gk20a_dbg(gpu_dbg_intr, "ce2 isr %08x\n", ce2_intr); + nvgpu_log(g, gpu_dbg_intr, "ce2 isr %08x\n", ce2_intr); /* clear blocking interrupts: they exibit broken behavior */ if (ce2_intr & ce2_intr_status_blockpipe_pending_f()) @@ -92,7 +92,7 @@ int gk20a_ce2_nonstall_isr(struct gk20a *g, u32 inst_id, u32 pri_base) int ops = 0; u32 ce2_intr = gk20a_readl(g, ce2_intr_status_r()); - gk20a_dbg(gpu_dbg_intr, "ce2 nonstall isr %08x\n", ce2_intr); + nvgpu_log(g, gpu_dbg_intr, "ce2 nonstall isr %08x\n", ce2_intr); if (ce2_intr & ce2_intr_status_nonblockpipe_pending_f()) { gk20a_writel(g, ce2_intr_status_r(), @@ -340,7 +340,7 @@ int gk20a_init_ce_support(struct gk20a *g) return 0; } - gk20a_dbg(gpu_dbg_fn, "ce: init"); + nvgpu_log(g, gpu_dbg_fn, "ce: init"); err = nvgpu_mutex_init(&ce_app->app_mutex); if (err) @@ -355,7 +355,7 @@ int gk20a_init_ce_support(struct gk20a *g) ce_app->app_state = NVGPU_CE_ACTIVE; nvgpu_mutex_release(&ce_app->app_mutex); - gk20a_dbg(gpu_dbg_cde_ctx, "ce: init finished"); + nvgpu_log(g, gpu_dbg_cde_ctx, "ce: init finished"); return 0; } diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c index e65ed278..21abdf9a 100644 --- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c @@ -116,7 +116,7 @@ int channel_gk20a_commit_va(struct channel_gk20a *c) { struct gk20a *g = c->g; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); g->ops.mm.init_inst_block(&c->inst_block, c->vm, c->vm->gmmu_page_sizes[gmmu_page_size_big]); @@ -208,7 +208,7 @@ void gk20a_channel_abort_clean_up(struct channel_gk20a *ch) void gk20a_channel_abort(struct channel_gk20a *ch, bool channel_preempt) { - gk20a_dbg_fn(""); + nvgpu_log_fn(ch->g, " "); if (gk20a_is_channel_marked_as_tsg(ch)) return gk20a_fifo_abort_tsg(ch->g, ch->tsgid, channel_preempt); @@ -291,7 +291,7 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force) struct dbg_session_channel_data *ch_data, *tmp; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); WARN_ON(ch->g == NULL); @@ -351,7 +351,7 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force) /* if engine reset was deferred, perform it now */ nvgpu_mutex_acquire(&f->deferred_reset_mutex); if (g->fifo.deferred_reset_pending) { - gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "engine reset was" + nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "engine reset was" " deferred, running now"); /* if lock is already taken, a reset is taking place so no need to repeat */ @@ -365,7 +365,7 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force) if (!gk20a_channel_as_bound(ch)) goto unbind; - gk20a_dbg_info("freeing bound channel context, timeout=%ld", + nvgpu_log_info(g, "freeing bound channel context, timeout=%ld", timeout); #ifdef CONFIG_GK20A_CTXSW_TRACE @@ -626,7 +626,7 @@ struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g, runlist_id = gk20a_fifo_get_gr_runlist_id(g); } - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); ch = allocate_channel(f); if (ch == NULL) { @@ -765,7 +765,7 @@ int gk20a_channel_alloc_priv_cmdbuf(struct channel_gk20a *c, u32 orig_size, u32 free_count; u32 size = orig_size; - gk20a_dbg_fn("size %d", orig_size); + nvgpu_log_fn(c->g, "size %d", orig_size); if (!e) { nvgpu_err(c->g, @@ -779,7 +779,7 @@ int gk20a_channel_alloc_priv_cmdbuf(struct channel_gk20a *c, u32 orig_size, if (q->put + size > q->size) size = orig_size + (q->size - q->put); - gk20a_dbg_info("ch %d: priv cmd queue get:put %d:%d", + nvgpu_log_info(c->g, "ch %d: priv cmd queue get:put %d:%d", c->chid, q->get, q->put); free_count = (q->size - (q->put - q->get) - 1) % q->size; @@ -812,7 +812,7 @@ int gk20a_channel_alloc_priv_cmdbuf(struct channel_gk20a *c, u32 orig_size, nvgpu_smp_wmb(); e->valid = true; - gk20a_dbg_fn("done"); + nvgpu_log_fn(c->g, "done"); return 0; } @@ -1132,7 +1132,7 @@ int gk20a_channel_alloc_gpfifo(struct channel_gk20a *c, c->gpfifo.entry_num = gpfifo_size; c->gpfifo.get = c->gpfifo.put = 0; - gk20a_dbg_info("channel %d : gpfifo_base 0x%016llx, size %d", + nvgpu_log_info(g, "channel %d : gpfifo_base 0x%016llx, size %d", c->chid, c->gpfifo.mem.gpu_va, c->gpfifo.entry_num); g->ops.fifo.setup_userd(c); @@ -1184,7 +1184,7 @@ int gk20a_channel_alloc_gpfifo(struct channel_gk20a *c, g->ops.fifo.bind_channel(c); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; clean_up_priv_cmd: @@ -1400,7 +1400,7 @@ static void gk20a_channel_timeout_handler(struct channel_gk20a *ch) u64 pb_get; u64 new_pb_get; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); /* Get status and clear the timer */ nvgpu_raw_spinlock_acquire(&ch->timeout.lock); @@ -1480,7 +1480,7 @@ static void gk20a_channel_poll_timeouts(struct gk20a *g) */ static void gk20a_channel_worker_process_ch(struct channel_gk20a *ch) { - gk20a_dbg_fn(""); + nvgpu_log_fn(ch->g, " "); gk20a_channel_clean_up_jobs(ch, true); @@ -1499,7 +1499,7 @@ static int __gk20a_channel_worker_wakeup(struct gk20a *g) { int put; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); /* * Currently, the only work type is associated with a lock, which deals @@ -1596,7 +1596,7 @@ static int gk20a_channel_poll_worker(void *arg) struct nvgpu_timeout timeout; int get = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); nvgpu_timeout_init(g, &timeout, watchdog_interval, NVGPU_TIMER_CPU_TIMER); @@ -1699,7 +1699,7 @@ static void gk20a_channel_worker_enqueue(struct channel_gk20a *ch) { struct gk20a *g = ch->g; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); /* * Warn if worker thread cannot run @@ -2142,12 +2142,12 @@ int gk20a_channel_suspend(struct gk20a *g) bool channels_in_use = false; u32 active_runlist_ids = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); for (chid = 0; chid < f->num_channels; chid++) { struct channel_gk20a *ch = &f->channel[chid]; if (gk20a_channel_get(ch)) { - gk20a_dbg_info("suspend channel %d", chid); + nvgpu_log_info(g, "suspend channel %d", chid); /* disable channel */ gk20a_disable_channel_tsg(g, ch); /* preempt the channel */ @@ -2175,7 +2175,7 @@ int gk20a_channel_suspend(struct gk20a *g) } } - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } @@ -2186,11 +2186,11 @@ int gk20a_channel_resume(struct gk20a *g) bool channels_in_use = false; u32 active_runlist_ids = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); for (chid = 0; chid < f->num_channels; chid++) { if (gk20a_channel_get(&f->channel[chid])) { - gk20a_dbg_info("resume channel %d", chid); + nvgpu_log_info(g, "resume channel %d", chid); g->ops.fifo.bind_channel(&f->channel[chid]); channels_in_use = true; active_runlist_ids |= BIT(f->channel[chid].runlist_id); @@ -2201,7 +2201,7 @@ int gk20a_channel_resume(struct gk20a *g) if (channels_in_use) gk20a_fifo_update_runlist_ids(g, active_runlist_ids, ~0, true, true); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } @@ -2210,7 +2210,7 @@ void gk20a_channel_semaphore_wakeup(struct gk20a *g, bool post_events) struct fifo_gk20a *f = &g->fifo; u32 chid; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); /* * Ensure that all pending writes are actually done before trying to diff --git a/drivers/gpu/nvgpu/gk20a/css_gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/css_gr_gk20a.c index 114386a2..0fc39bf4 100644 --- a/drivers/gpu/nvgpu/gk20a/css_gr_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/css_gr_gk20a.c @@ -1,7 +1,7 @@ /* * GK20A Cycle stats snapshots support (subsystem for gr_gk20a). * - * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -189,7 +189,7 @@ int css_hw_enable_snapshot(struct channel_gk20a *ch, perf_pmasys_mem_block_valid_true_f() | perf_pmasys_mem_block_target_lfb_f()); - gk20a_dbg_info("cyclestats: buffer for hardware snapshots enabled\n"); + nvgpu_log_info(g, "cyclestats: buffer for hardware snapshots enabled\n"); return 0; @@ -227,7 +227,7 @@ void css_hw_disable_snapshot(struct gr_gk20a *gr) memset(&data->hw_memdesc, 0, sizeof(data->hw_memdesc)); data->hw_snapshot = NULL; - gk20a_dbg_info("cyclestats: buffer for hardware snapshots disabled\n"); + nvgpu_log_info(g, "cyclestats: buffer for hardware snapshots disabled\n"); } static void css_gr_free_shared_data(struct gr_gk20a *gr) diff --git a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c index ce06e78b..97de7138 100644 --- a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c @@ -90,8 +90,9 @@ void gk20a_dbg_gpu_post_events(struct channel_gk20a *ch) { struct dbg_session_data *session_data; struct dbg_session_gk20a *dbg_s; + struct gk20a *g = ch->g; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " "); /* guard against the session list being modified */ nvgpu_mutex_acquire(&ch->dbg_s_lock); @@ -100,9 +101,9 @@ void gk20a_dbg_gpu_post_events(struct channel_gk20a *ch) dbg_session_data, dbg_s_entry) { dbg_s = session_data->dbg_s; if (dbg_s->dbg_events.events_enabled) { - gk20a_dbg(gpu_dbg_gpu_dbg, "posting event on session id %d", + nvgpu_log(g, gpu_dbg_gpu_dbg, "posting event on session id %d", dbg_s->id); - gk20a_dbg(gpu_dbg_gpu_dbg, "%d events pending", + nvgpu_log(g, gpu_dbg_gpu_dbg, "%d events pending", dbg_s->dbg_events.num_pending_events); dbg_s->dbg_events.num_pending_events++; @@ -119,8 +120,9 @@ bool gk20a_dbg_gpu_broadcast_stop_trigger(struct channel_gk20a *ch) struct dbg_session_data *session_data; struct dbg_session_gk20a *dbg_s; bool broadcast = false; + struct gk20a *g = ch->g; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, ""); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, " "); /* guard against the session list being modified */ nvgpu_mutex_acquire(&ch->dbg_s_lock); @@ -129,7 +131,7 @@ bool gk20a_dbg_gpu_broadcast_stop_trigger(struct channel_gk20a *ch) dbg_session_data, dbg_s_entry) { dbg_s = session_data->dbg_s; if (dbg_s->broadcast_stop_trigger) { - gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn | gpu_dbg_intr, + nvgpu_log(g, gpu_dbg_gpu_dbg | gpu_dbg_fn | gpu_dbg_intr, "stop trigger broadcast enabled"); broadcast = true; break; @@ -145,8 +147,9 @@ int gk20a_dbg_gpu_clear_broadcast_stop_trigger(struct channel_gk20a *ch) { struct dbg_session_data *session_data; struct dbg_session_gk20a *dbg_s; + struct gk20a *g = ch->g; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, ""); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, " "); /* guard against the session list being modified */ nvgpu_mutex_acquire(&ch->dbg_s_lock); @@ -155,7 +158,7 @@ int gk20a_dbg_gpu_clear_broadcast_stop_trigger(struct channel_gk20a *ch) dbg_session_data, dbg_s_entry) { dbg_s = session_data->dbg_s; if (dbg_s->broadcast_stop_trigger) { - gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn | gpu_dbg_intr, + nvgpu_log(g, gpu_dbg_gpu_dbg | gpu_dbg_fn | gpu_dbg_intr, "stop trigger broadcast disabled"); dbg_s->broadcast_stop_trigger = false; } diff --git a/drivers/gpu/nvgpu/gk20a/fb_gk20a.c b/drivers/gpu/nvgpu/gk20a/fb_gk20a.c index e3052701..c4be3313 100644 --- a/drivers/gpu/nvgpu/gk20a/fb_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/fb_gk20a.c @@ -1,7 +1,7 @@ /* * GK20A memory interface * - * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -36,7 +36,7 @@ void fb_gk20a_reset(struct gk20a *g) { u32 val; - gk20a_dbg_info("reset gk20a fb"); + nvgpu_log_info(g, "reset gk20a fb"); g->ops.mc.reset(g, mc_enable_pfb_enabled_f() | mc_enable_l2_enabled_f() | @@ -63,7 +63,7 @@ void gk20a_fb_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb) u32 addr_lo; u32 data; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); /* pagetables are considered sw states which are preserved after prepare_poweroff. When gk20a deinit releases those pagetables, diff --git a/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c b/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c index 4fda0d2e..c9d7ea06 100644 --- a/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -137,7 +137,7 @@ static int gk20a_fecs_trace_get_write_index(struct gk20a *g) static int gk20a_fecs_trace_set_read_index(struct gk20a *g, int index) { - gk20a_dbg(gpu_dbg_ctxsw, "set read=%d", index); + nvgpu_log(g, gpu_dbg_ctxsw, "set read=%d", index); return gr_gk20a_elpg_protected_call(g, (gk20a_writel(g, gr_fecs_mailbox1_r(), index), 0)); } @@ -148,12 +148,12 @@ void gk20a_fecs_trace_hash_dump(struct gk20a *g) struct gk20a_fecs_trace_hash_ent *ent; struct gk20a_fecs_trace *trace = g->fecs_trace; - gk20a_dbg(gpu_dbg_ctxsw, "dumping hash table"); + nvgpu_log(g, gpu_dbg_ctxsw, "dumping hash table"); nvgpu_mutex_acquire(&trace->hash_lock); hash_for_each(trace->pid_hash_table, bkt, ent, node) { - gk20a_dbg(gpu_dbg_ctxsw, " ent=%p bkt=%x context_ptr=%x pid=%d", + nvgpu_log(g, gpu_dbg_ctxsw, " ent=%p bkt=%x context_ptr=%x pid=%d", ent, bkt, ent->context_ptr, ent->pid); } @@ -165,7 +165,7 @@ static int gk20a_fecs_trace_hash_add(struct gk20a *g, u32 context_ptr, pid_t pid struct gk20a_fecs_trace_hash_ent *he; struct gk20a_fecs_trace *trace = g->fecs_trace; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_ctxsw, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_ctxsw, "adding hash entry context_ptr=%x -> pid=%d", context_ptr, pid); he = nvgpu_kzalloc(g, sizeof(*he)); @@ -190,7 +190,7 @@ static void gk20a_fecs_trace_hash_del(struct gk20a *g, u32 context_ptr) struct gk20a_fecs_trace_hash_ent *ent; struct gk20a_fecs_trace *trace = g->fecs_trace; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_ctxsw, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_ctxsw, "freeing hash entry context_ptr=%x", context_ptr); nvgpu_mutex_acquire(&trace->hash_lock); @@ -198,7 +198,7 @@ static void gk20a_fecs_trace_hash_del(struct gk20a *g, u32 context_ptr) context_ptr) { if (ent->context_ptr == context_ptr) { hash_del(&ent->node); - gk20a_dbg(gpu_dbg_ctxsw, + nvgpu_log(g, gpu_dbg_ctxsw, "freed hash entry=%p context_ptr=%x", ent, ent->context_ptr); nvgpu_kfree(g, ent); @@ -215,7 +215,7 @@ static void gk20a_fecs_trace_free_hash_table(struct gk20a *g) struct gk20a_fecs_trace_hash_ent *ent; struct gk20a_fecs_trace *trace = g->fecs_trace; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_ctxsw, "trace=%p", trace); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_ctxsw, "trace=%p", trace); nvgpu_mutex_acquire(&trace->hash_lock); hash_for_each_safe(trace->pid_hash_table, bkt, tmp, ent, node) { @@ -235,7 +235,7 @@ static pid_t gk20a_fecs_trace_find_pid(struct gk20a *g, u32 context_ptr) nvgpu_mutex_acquire(&trace->hash_lock); hash_for_each_possible(trace->pid_hash_table, ent, node, context_ptr) { if (ent->context_ptr == context_ptr) { - gk20a_dbg(gpu_dbg_ctxsw, + nvgpu_log(g, gpu_dbg_ctxsw, "found context_ptr=%x -> pid=%d", ent->context_ptr, ent->pid); pid = ent->pid; @@ -265,7 +265,7 @@ static int gk20a_fecs_trace_ring_read(struct gk20a *g, int index) struct gk20a_fecs_trace_record *r = gk20a_fecs_trace_get_record( trace, index); - gk20a_dbg(gpu_dbg_fn | gpu_dbg_ctxsw, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_ctxsw, "consuming record trace=%p read=%d record=%p", trace, index, r); if (unlikely(!gk20a_fecs_trace_is_valid_record(r))) { @@ -284,7 +284,7 @@ static int gk20a_fecs_trace_ring_read(struct gk20a *g, int index) cur_pid = gk20a_fecs_trace_find_pid(g, r->context_ptr); new_pid = gk20a_fecs_trace_find_pid(g, r->new_context_ptr); - gk20a_dbg(gpu_dbg_fn | gpu_dbg_ctxsw, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_ctxsw, "context_ptr=%x (pid=%d) new_context_ptr=%x (pid=%d)", r->context_ptr, cur_pid, r->new_context_ptr, new_pid); @@ -298,7 +298,7 @@ static int gk20a_fecs_trace_ring_read(struct gk20a *g, int index) entry.timestamp = gk20a_fecs_trace_record_ts_timestamp_v(r->ts[i]); entry.timestamp <<= GK20A_FECS_TRACE_PTIMER_SHIFT; - gk20a_dbg(gpu_dbg_ctxsw, + nvgpu_log(g, gpu_dbg_ctxsw, "tag=%x timestamp=%llx context_id=%08x new_context_id=%08x", entry.tag, entry.timestamp, r->context_id, r->new_context_id); @@ -327,7 +327,7 @@ static int gk20a_fecs_trace_ring_read(struct gk20a *g, int index) continue; } - gk20a_dbg(gpu_dbg_ctxsw, "tag=%x context_id=%x pid=%lld", + nvgpu_log(g, gpu_dbg_ctxsw, "tag=%x context_id=%x pid=%lld", entry.tag, entry.context_id, entry.pid); if (!entry.context_id) @@ -368,7 +368,7 @@ int gk20a_fecs_trace_poll(struct gk20a *g) if (!cnt) goto done; - gk20a_dbg(gpu_dbg_ctxsw, + nvgpu_log(g, gpu_dbg_ctxsw, "circular buffer: read=%d (mailbox=%d) write=%d cnt=%d", read, gk20a_fecs_trace_get_read_index(g), write, cnt); @@ -633,7 +633,7 @@ int gk20a_fecs_trace_bind_channel(struct gk20a *g, pid_t pid; u32 aperture; - gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, + nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, "chid=%d context_ptr=%x inst_block=%llx", ch->chid, context_ptr, nvgpu_inst_block_addr(g, &ch->inst_block)); @@ -662,7 +662,7 @@ int gk20a_fecs_trace_bind_channel(struct gk20a *g, lo = u64_lo32(pa); hi = u64_hi32(pa); - gk20a_dbg(gpu_dbg_ctxsw, "addr_hi=%x addr_lo=%x count=%d", hi, + nvgpu_log(g, gpu_dbg_ctxsw, "addr_hi=%x addr_lo=%x count=%d", hi, lo, GK20A_FECS_TRACE_NUM_RECORDS); nvgpu_mem_wr(g, mem, @@ -696,7 +696,7 @@ int gk20a_fecs_trace_unbind_channel(struct gk20a *g, struct channel_gk20a *ch) u32 context_ptr = gk20a_fecs_trace_fecs_context_ptr(g, ch); if (g->fecs_trace) { - gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, + nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, "ch=%p context_ptr=%x", ch, context_ptr); if (g->ops.fecs_trace.is_enabled(g)) { @@ -711,7 +711,7 @@ int gk20a_fecs_trace_unbind_channel(struct gk20a *g, struct channel_gk20a *ch) int gk20a_fecs_trace_reset(struct gk20a *g) { - gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, ""); + nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, " "); if (!g->ops.fecs_trace.is_enabled(g)) return 0; diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c index 75d66968..cc63c3b8 100644 --- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c @@ -94,7 +94,7 @@ u32 gk20a_fifo_get_engine_ids(struct gk20a *g, engine_id[instance_cnt] = active_engine_id; ++instance_cnt; } else { - gk20a_dbg_info("warning engine_id table sz is small %d", + nvgpu_log_info(g, "warning engine_id table sz is small %d", engine_id_sz); } } @@ -320,7 +320,7 @@ int gk20a_fifo_engine_enum_from_type(struct gk20a *g, u32 engine_type, { int ret = ENGINE_INVAL_GK20A; - gk20a_dbg_info("engine type %d", engine_type); + nvgpu_log_info(g, "engine type %d", engine_type); if (engine_type == top_device_info_type_enum_graphics_v()) ret = ENGINE_GR_GK20A; else if ((engine_type >= top_device_info_type_enum_copy0_v()) && @@ -354,7 +354,7 @@ int gk20a_fifo_init_engine_info(struct fifo_gk20a *f) u32 gr_runlist_id = ~0; bool found_pbdma_for_runlist = false; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); f->num_engines = 0; @@ -367,7 +367,7 @@ int gk20a_fifo_init_engine_info(struct fifo_gk20a *f) if (top_device_info_engine_v(table_entry)) { engine_id = top_device_info_engine_enum_v(table_entry); - gk20a_dbg_info("info: engine_id %d", + nvgpu_log_info(g, "info: engine_id %d", top_device_info_engine_enum_v(table_entry)); } @@ -375,7 +375,7 @@ int gk20a_fifo_init_engine_info(struct fifo_gk20a *f) if (top_device_info_runlist_v(table_entry)) { runlist_id = top_device_info_runlist_enum_v(table_entry); - gk20a_dbg_info("gr info: runlist_id %d", runlist_id); + nvgpu_log_info(g, "gr info: runlist_id %d", runlist_id); runlist_bit = BIT(runlist_id); @@ -384,7 +384,7 @@ int gk20a_fifo_init_engine_info(struct fifo_gk20a *f) pbdma_id++) { if (f->pbdma_map[pbdma_id] & runlist_bit) { - gk20a_dbg_info( + nvgpu_log_info(g, "gr info: pbdma_map[%d]=%d", pbdma_id, f->pbdma_map[pbdma_id]); @@ -402,13 +402,13 @@ int gk20a_fifo_init_engine_info(struct fifo_gk20a *f) if (top_device_info_intr_v(table_entry)) { intr_id = top_device_info_intr_enum_v(table_entry); - gk20a_dbg_info("gr info: intr_id %d", intr_id); + nvgpu_log_info(g, "gr info: intr_id %d", intr_id); } if (top_device_info_reset_v(table_entry)) { reset_id = top_device_info_reset_enum_v(table_entry); - gk20a_dbg_info("gr info: reset_id %d", + nvgpu_log_info(g, "gr info: reset_id %d", reset_id); } } else if (entry == top_device_info_entry_engine_type_v()) { @@ -538,7 +538,7 @@ static void gk20a_remove_fifo_support(struct fifo_gk20a *f) struct gk20a *g = f->g; unsigned int i = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); nvgpu_channel_worker_deinit(g); /* @@ -616,7 +616,7 @@ static void fifo_pbdma_exception_status(struct gk20a *g, get_exception_pbdma_info(g, eng_info); e = &eng_info->pbdma_exception_info; - gk20a_dbg_fn("pbdma_id %d, " + nvgpu_log_fn(g, "pbdma_id %d, " "id_type %s, id %d, chan_status %d, " "next_id_type %s, next_id %d, " "chsw_in_progress %d", @@ -657,7 +657,7 @@ static void fifo_engine_exception_status(struct gk20a *g, get_exception_engine_info(g, eng_info); e = &eng_info->engine_exception_info; - gk20a_dbg_fn("engine_id %d, id_type %s, id %d, ctx_status %d, " + nvgpu_log_fn(g, "engine_id %d, id_type %s, id %d, ctx_status %d, " "faulted %d, idle %d, ctxsw_in_progress %d, ", eng_info->engine_id, e->id_is_chid ? "chid" : "tsgid", e->id, e->ctx_status_v, @@ -745,7 +745,7 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f) clean_up_runlist: gk20a_fifo_delete_runlist(f); - gk20a_dbg_fn("fail"); + nvgpu_log_fn(g, "fail"); return -ENOMEM; } @@ -784,7 +784,7 @@ int gk20a_init_fifo_reset_enable_hw(struct gk20a *g) unsigned int i; u32 host_num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA); - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); /* enable pmc pfifo */ g->ops.mc.reset(g, mc_enable_pfifo_enabled_f()); @@ -805,7 +805,7 @@ int gk20a_init_fifo_reset_enable_hw(struct gk20a *g) timeout = gk20a_readl(g, fifo_fb_timeout_r()); timeout = set_field(timeout, fifo_fb_timeout_period_m(), fifo_fb_timeout_period_max_f()); - gk20a_dbg_info("fifo_fb_timeout reg val = 0x%08x", timeout); + nvgpu_log_info(g, "fifo_fb_timeout reg val = 0x%08x", timeout); gk20a_writel(g, fifo_fb_timeout_r(), timeout); /* write pbdma timeout value */ @@ -813,7 +813,7 @@ int gk20a_init_fifo_reset_enable_hw(struct gk20a *g) timeout = gk20a_readl(g, pbdma_timeout_r(i)); timeout = set_field(timeout, pbdma_timeout_period_m(), pbdma_timeout_period_max_f()); - gk20a_dbg_info("pbdma_timeout reg val = 0x%08x", timeout); + nvgpu_log_info(g, "pbdma_timeout reg val = 0x%08x", timeout); gk20a_writel(g, pbdma_timeout_r(i), timeout); } if (g->ops.fifo.apply_pb_timeout) @@ -837,10 +837,10 @@ int gk20a_init_fifo_reset_enable_hw(struct gk20a *g) intr_stall = gk20a_readl(g, pbdma_intr_stall_r(i)); intr_stall &= ~pbdma_intr_stall_lbreq_enabled_f(); gk20a_writel(g, pbdma_intr_stall_r(i), intr_stall); - gk20a_dbg_info("pbdma id:%u, intr_en_0 0x%08x", i, intr_stall); + nvgpu_log_info(g, "pbdma id:%u, intr_en_0 0x%08x", i, intr_stall); gk20a_writel(g, pbdma_intr_en_0_r(i), intr_stall); - gk20a_dbg_info("pbdma id:%u, intr_en_1 0x%08x", i, + nvgpu_log_info(g, "pbdma id:%u, intr_en_1 0x%08x", i, ~pbdma_intr_en_0_lbreq_enabled_f()); gk20a_writel(g, pbdma_intr_en_1_r(i), ~pbdma_intr_en_0_lbreq_enabled_f()); @@ -852,12 +852,12 @@ int gk20a_init_fifo_reset_enable_hw(struct gk20a *g) /* clear and enable pfifo interrupt */ gk20a_writel(g, fifo_intr_0_r(), 0xFFFFFFFF); mask = gk20a_fifo_intr_0_en_mask(g); - gk20a_dbg_info("fifo_intr_en_0 0x%08x", mask); + nvgpu_log_info(g, "fifo_intr_en_0 0x%08x", mask); gk20a_writel(g, fifo_intr_en_0_r(), mask); - gk20a_dbg_info("fifo_intr_en_1 = 0x80000000"); + nvgpu_log_info(g, "fifo_intr_en_1 = 0x80000000"); gk20a_writel(g, fifo_intr_en_1_r(), 0x80000000); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } @@ -868,7 +868,7 @@ int gk20a_init_fifo_setup_sw_common(struct gk20a *g) unsigned int chid, i; int err = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); f->g = g; @@ -945,7 +945,7 @@ int gk20a_init_fifo_setup_sw_common(struct gk20a *g) goto clean_up; } - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; clean_up: @@ -972,10 +972,10 @@ int gk20a_init_fifo_setup_sw(struct gk20a *g) u64 userd_base; int err = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (f->sw_ready) { - gk20a_dbg_fn("skip init"); + nvgpu_log_fn(g, "skip init"); return 0; } @@ -997,7 +997,7 @@ int gk20a_init_fifo_setup_sw(struct gk20a *g) nvgpu_err(g, "userd memory allocation failed"); goto clean_up; } - gk20a_dbg(gpu_dbg_map, "userd gpu va = 0x%llx", f->userd.gpu_va); + nvgpu_log(g, gpu_dbg_map, "userd gpu va = 0x%llx", f->userd.gpu_va); userd_base = nvgpu_mem_get_addr(g, &f->userd); for (chid = 0; chid < f->num_channels; chid++) { @@ -1013,11 +1013,11 @@ int gk20a_init_fifo_setup_sw(struct gk20a *g) f->sw_ready = true; - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; clean_up: - gk20a_dbg_fn("fail"); + nvgpu_log_fn(g, "fail"); if (nvgpu_mem_is_valid(&f->userd)) { if (g->ops.mm.is_bar1_supported(g)) nvgpu_dma_unmap_free(g->mm.bar1.vm, &f->userd); @@ -1032,7 +1032,7 @@ void gk20a_fifo_handle_runlist_event(struct gk20a *g) { u32 runlist_event = gk20a_readl(g, fifo_intr_runlist_r()); - gk20a_dbg(gpu_dbg_intr, "runlist event %08x", + nvgpu_log(g, gpu_dbg_intr, "runlist event %08x", runlist_event); gk20a_writel(g, fifo_intr_runlist_r(), runlist_event); @@ -1042,7 +1042,7 @@ int gk20a_init_fifo_setup_hw(struct gk20a *g) { struct fifo_gk20a *f = &g->fifo; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); /* test write, read through bar1 @ userd region before * turning on the snooping */ @@ -1053,7 +1053,7 @@ int gk20a_init_fifo_setup_hw(struct gk20a *g) u32 bar1_vaddr = f->userd.gpu_va; volatile u32 *cpu_vaddr = f->userd.cpu_va; - gk20a_dbg_info("test bar1 @ vaddr 0x%x", + nvgpu_log_info(g, "test bar1 @ vaddr 0x%x", bar1_vaddr); v = gk20a_bar1_readl(g, bar1_vaddr); @@ -1093,7 +1093,7 @@ int gk20a_init_fifo_setup_hw(struct gk20a *g) fifo_bar1_base_ptr_f(f->userd.gpu_va >> 12) | fifo_bar1_base_valid_true_f()); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } @@ -1261,7 +1261,7 @@ void gk20a_fifo_get_mmu_fault_info(struct gk20a *g, u32 mmu_fault_id, u32 fault_info; u32 addr_lo, addr_hi; - gk20a_dbg_fn("mmu_fault_id %d", mmu_fault_id); + nvgpu_log_fn(g, "mmu_fault_id %d", mmu_fault_id); memset(mmfault, 0, sizeof(*mmfault)); @@ -1291,7 +1291,7 @@ void gk20a_fifo_reset_engine(struct gk20a *g, u32 engine_id) u32 engine_enum = ENGINE_INVAL_GK20A; struct fifo_engine_info_gk20a *engine_info; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (!g) return; @@ -1489,7 +1489,7 @@ void gk20a_fifo_abort_tsg(struct gk20a *g, u32 tsgid, bool preempt) struct tsg_gk20a *tsg = &g->fifo.tsg[tsgid]; struct channel_gk20a *ch; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); g->ops.fifo.disable_tsg(tsg); @@ -1556,7 +1556,7 @@ static bool gk20a_fifo_handle_mmu_fault( bool verbose = true; u32 grfifo_ctl; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); g->fifo.deferred_reset_pending = false; @@ -1693,7 +1693,7 @@ static bool gk20a_fifo_handle_mmu_fault( /* handled during channel free */ g->fifo.deferred_reset_pending = true; - gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "sm debugger attached," " deferring channel recovery to channel free"); } else { @@ -2196,6 +2196,7 @@ bool gk20a_fifo_check_tsg_ctxsw_timeout(struct tsg_gk20a *tsg, struct channel_gk20a *ch; bool recover = false; bool progress = false; + struct gk20a *g = tsg->g; *verbose = false; *ms = GRFIFO_TIMEOUT_CHECK_PERIOD_US / 1000; @@ -2221,7 +2222,7 @@ bool gk20a_fifo_check_tsg_ctxsw_timeout(struct tsg_gk20a *tsg, * this resets timeout for channels that already completed their work */ if (progress) { - gk20a_dbg_info("progress on tsg=%d ch=%d", + nvgpu_log_info(g, "progress on tsg=%d ch=%d", tsg->tsgid, ch->chid); gk20a_channel_put(ch); *ms = GRFIFO_TIMEOUT_CHECK_PERIOD_US / 1000; @@ -2239,7 +2240,7 @@ bool gk20a_fifo_check_tsg_ctxsw_timeout(struct tsg_gk20a *tsg, * caused the problem, so set timeout error notifier for all channels. */ if (recover) { - gk20a_dbg_info("timeout on tsg=%d ch=%d", + nvgpu_log_info(g, "timeout on tsg=%d ch=%d", tsg->tsgid, ch->chid); *ms = ch->timeout_accumulated_ms; gk20a_channel_put(ch); @@ -2311,7 +2312,7 @@ bool gk20a_fifo_handle_sched_error(struct gk20a *g) is_tsg, true, verbose, RC_TYPE_CTXSW_TIMEOUT); } else { - gk20a_dbg_info( + nvgpu_log_info(g, "fifo is waiting for ctx switch for %d ms, " "%s=%d", ms, is_tsg ? "tsg" : "ch", id); } @@ -2330,7 +2331,7 @@ static u32 fifo_error_isr(struct gk20a *g, u32 fifo_intr) bool print_channel_reset_log = false; u32 handled = 0; - gk20a_dbg_fn("fifo_intr=0x%08x", fifo_intr); + nvgpu_log_fn(g, "fifo_intr=0x%08x", fifo_intr); if (fifo_intr & fifo_intr_0_pio_error_pending_f()) { /* pio mode is unused. this shouldn't happen, ever. */ @@ -2381,7 +2382,7 @@ static u32 fifo_error_isr(struct gk20a *g, u32 fifo_intr) engine_id++) { u32 active_engine_id = g->fifo.active_engines_list[engine_id]; u32 engine_enum = g->fifo.engine_info[active_engine_id].engine_enum; - gk20a_dbg_fn("enum:%d -> engine_id:%d", engine_enum, + nvgpu_log_fn(g, "enum:%d -> engine_id:%d", engine_enum, active_engine_id); fifo_pbdma_exception_status(g, &g->fifo.engine_info[active_engine_id]); @@ -2632,7 +2633,7 @@ static u32 fifo_pbdma_isr(struct gk20a *g, u32 fifo_intr) for (i = 0; i < host_num_pbdma; i++) { if (fifo_intr_pbdma_id_status_v(pbdma_pending, i)) { - gk20a_dbg(gpu_dbg_intr, "pbdma id %d intr pending", i); + nvgpu_log(g, gpu_dbg_intr, "pbdma id %d intr pending", i); clear_intr |= gk20a_fifo_handle_pbdma_intr(g, f, i, RC_YES); } @@ -2653,7 +2654,7 @@ void gk20a_fifo_isr(struct gk20a *g) * in a threaded interrupt context... */ nvgpu_mutex_acquire(&g->fifo.intr.isr.mutex); - gk20a_dbg(gpu_dbg_intr, "fifo isr %08x\n", fifo_intr); + nvgpu_log(g, gpu_dbg_intr, "fifo isr %08x\n", fifo_intr); /* handle runlist update */ if (fifo_intr & fifo_intr_0_runlist_event_pending_f()) { @@ -2681,7 +2682,7 @@ int gk20a_fifo_nonstall_isr(struct gk20a *g) u32 fifo_intr = gk20a_readl(g, fifo_intr_0_r()); u32 clear_intr = 0; - gk20a_dbg(gpu_dbg_intr, "fifo nonstall isr %08x\n", fifo_intr); + nvgpu_log(g, gpu_dbg_intr, "fifo nonstall isr %08x\n", fifo_intr); if (fifo_intr & fifo_intr_0_channel_intr_pending_f()) clear_intr = fifo_intr_0_channel_intr_pending_f(); @@ -2769,7 +2770,7 @@ int __locked_fifo_preempt(struct gk20a *g, u32 id, bool is_tsg) int ret; unsigned int id_type; - gk20a_dbg_fn("%d", id); + nvgpu_log_fn(g, "%d", id); /* issue preempt */ gk20a_fifo_issue_preempt(g, id, is_tsg); @@ -2794,7 +2795,7 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, u32 chid) u32 mutex_ret = 0; u32 i; - gk20a_dbg_fn("%d", chid); + nvgpu_log_fn(g, "%d", chid); /* we have no idea which runlist we are using. lock all */ for (i = 0; i < g->fifo.max_runlists; i++) @@ -2821,7 +2822,7 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid) u32 mutex_ret = 0; u32 i; - gk20a_dbg_fn("%d", tsgid); + nvgpu_log_fn(g, "%d", tsgid); /* we have no idea which runlist we are using. lock all */ for (i = 0; i < g->fifo.max_runlists; i++) @@ -2938,7 +2939,7 @@ int gk20a_fifo_disable_engine_activity(struct gk20a *g, u32 mutex_ret; u32 err = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); gr_stat = gk20a_readl(g, fifo_engine_status_r(eng_info->engine_id)); @@ -2988,12 +2989,12 @@ clean_up: nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); if (err) { - gk20a_dbg_fn("failed"); + nvgpu_log_fn(g, "failed"); if (gk20a_fifo_enable_engine_activity(g, eng_info)) nvgpu_err(g, "failed to enable gr engine activity"); } else { - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); } return err; } @@ -3129,8 +3130,9 @@ u32 *gk20a_runlist_construct_locked(struct fifo_gk20a *f, bool skip_next = false; u32 tsgid, count = 0; u32 runlist_entry_words = f->runlist_entry_size / sizeof(u32); + struct gk20a *g = f->g; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); /* for each TSG, T, on this level, insert all higher-level channels and TSGs before inserting T. */ @@ -3156,9 +3158,9 @@ u32 *gk20a_runlist_construct_locked(struct fifo_gk20a *f, return NULL; /* add TSG entry */ - gk20a_dbg_info("add TSG %d to runlist", tsg->tsgid); + nvgpu_log_info(g, "add TSG %d to runlist", tsg->tsgid); f->g->ops.fifo.get_tsg_runlist_entry(tsg, runlist_entry); - gk20a_dbg_info("tsg runlist count %d runlist [0] %x [1] %x\n", + nvgpu_log_info(g, "tsg runlist count %d runlist [0] %x [1] %x\n", count, runlist_entry[0], runlist_entry[1]); runlist_entry += runlist_entry_words; count++; @@ -3177,10 +3179,10 @@ u32 *gk20a_runlist_construct_locked(struct fifo_gk20a *f, return NULL; } - gk20a_dbg_info("add channel %d to runlist", + nvgpu_log_info(g, "add channel %d to runlist", ch->chid); f->g->ops.fifo.get_ch_runlist_entry(ch, runlist_entry); - gk20a_dbg_info( + nvgpu_log_info(g, "run list count %d runlist [0] %x [1] %x\n", count, runlist_entry[0], runlist_entry[1]); count++; @@ -3222,7 +3224,7 @@ int gk20a_fifo_set_runlist_interleave(struct gk20a *g, u32 runlist_id, u32 new_level) { - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); g->fifo.tsg[id].interleave_level = new_level; @@ -3313,7 +3315,7 @@ static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id, runlist_iova = nvgpu_mem_get_addr(g, &runlist->mem[new_buf]); - gk20a_dbg_info("runlist_id : %d, switch to new buffer 0x%16llx", + nvgpu_log_info(g, "runlist_id : %d, switch to new buffer 0x%16llx", runlist_id, (u64)runlist_iova); if (!runlist_iova) { @@ -3445,7 +3447,7 @@ int gk20a_fifo_update_runlist(struct gk20a *g, u32 runlist_id, u32 chid, u32 mutex_ret; u32 ret = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); runlist = &f->runlist_info[runlist_id]; @@ -3465,7 +3467,7 @@ int gk20a_fifo_update_runlist(struct gk20a *g, u32 runlist_id, u32 chid, int gk20a_fifo_suspend(struct gk20a *g) { - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); /* stop bar1 snooping */ if (g->ops.mm.is_bar1_supported(g)) @@ -3476,7 +3478,7 @@ int gk20a_fifo_suspend(struct gk20a *g) gk20a_writel(g, fifo_intr_en_0_r(), 0); gk20a_writel(g, fifo_intr_en_1_r(), 0); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } @@ -3511,7 +3513,7 @@ int gk20a_fifo_wait_engine_idle(struct gk20a *g) int ret = -ETIMEDOUT; u32 i, host_num_engines; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); host_num_engines = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_ENGINES); @@ -3533,12 +3535,12 @@ int gk20a_fifo_wait_engine_idle(struct gk20a *g) } while (!nvgpu_timeout_expired(&timeout)); if (ret) { - gk20a_dbg_info("cannot idle engine %u", i); + nvgpu_log_info(g, "cannot idle engine %u", i); break; } } - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return ret; } @@ -3839,7 +3841,7 @@ void gk20a_fifo_channel_unbind(struct channel_gk20a *ch_gk20a) { struct gk20a *g = ch_gk20a->g; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (nvgpu_atomic_cmpxchg(&ch_gk20a->bound, true, false)) { gk20a_writel(g, ccsr_channel_inst_r(ch_gk20a->chid), @@ -3854,12 +3856,12 @@ static int gk20a_fifo_commit_userd(struct channel_gk20a *c) u32 addr_hi; struct gk20a *g = c->g; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); addr_lo = u64_lo32(c->userd_iova >> ram_userd_base_shift_v()); addr_hi = u64_hi32(c->userd_iova); - gk20a_dbg_info("channel %d : set ramfc userd 0x%16llx", + nvgpu_log_info(g, "channel %d : set ramfc userd 0x%16llx", c->chid, (u64)c->userd_iova); nvgpu_mem_wr32(g, &c->inst_block, @@ -3885,7 +3887,7 @@ int gk20a_fifo_setup_ramfc(struct channel_gk20a *c, struct gk20a *g = c->g; struct nvgpu_mem *mem = &c->inst_block; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); nvgpu_memset(g, mem, 0, 0, ram_fc_size_val_v()); @@ -3946,7 +3948,7 @@ void gk20a_fifo_setup_ramfc_for_privileged_channel(struct channel_gk20a *c) struct gk20a *g = c->g; struct nvgpu_mem *mem = &c->inst_block; - gk20a_dbg_info("channel %d : set ramfc privileged_channel", c->chid); + nvgpu_log_info(g, "channel %d : set ramfc privileged_channel", c->chid); /* Enable HCE priv mode for phys mode transfer */ nvgpu_mem_wr32(g, mem, ram_fc_hce_ctrl_w(), @@ -3959,7 +3961,7 @@ int gk20a_fifo_setup_userd(struct channel_gk20a *c) struct nvgpu_mem *mem; u32 offset; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (nvgpu_mem_is_valid(&c->usermode_userd)) { mem = &c->usermode_userd; @@ -3987,16 +3989,16 @@ int gk20a_fifo_alloc_inst(struct gk20a *g, struct channel_gk20a *ch) { int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); err = g->ops.mm.alloc_inst_block(g, &ch->inst_block); if (err) return err; - gk20a_dbg_info("channel %d inst block physical addr: 0x%16llx", + nvgpu_log_info(g, "channel %d inst block physical addr: 0x%16llx", ch->chid, nvgpu_inst_block_addr(g, &ch->inst_block)); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } @@ -4086,7 +4088,7 @@ void gk20a_fifo_add_syncpt_wait_cmd(struct gk20a *g, struct priv_cmd_entry *cmd, u32 off, u32 id, u32 thresh, u64 gpu_va) { - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); off = cmd->off + off; /* syncpoint_a */ @@ -4115,7 +4117,7 @@ void gk20a_fifo_add_syncpt_incr_cmd(struct gk20a *g, { u32 off = cmd->off; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (wfi_cmd) { /* wfi */ nvgpu_mem_wr32(g, cmd->mem, off++, 0x2001001E); diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.c b/drivers/gpu/nvgpu/gk20a/gk20a.c index e8008937..e862f2e4 100644 --- a/drivers/gpu/nvgpu/gk20a/gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/gk20a.c @@ -77,7 +77,7 @@ int gk20a_detect_chip(struct gk20a *g) gk20a_mc_boot_0(g, &p->gpu_arch, &p->gpu_impl, &p->gpu_rev); - gk20a_dbg_info("arch: %x, impl: %x, rev: %x\n", + nvgpu_log_info(g, "arch: %x, impl: %x, rev: %x\n", g->params.gpu_arch, g->params.gpu_impl, g->params.gpu_rev); @@ -89,7 +89,7 @@ int gk20a_prepare_poweroff(struct gk20a *g) { int ret = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (g->ops.fifo.channel_suspend) { ret = g->ops.fifo.channel_suspend(g); @@ -126,7 +126,7 @@ int gk20a_finalize_poweron(struct gk20a *g) u32 nr_pages; #endif - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (g->power_on) return 0; @@ -434,7 +434,7 @@ static void gk20a_free_cb(struct nvgpu_ref *refcount) struct gk20a *g = container_of(refcount, struct gk20a, refcount); - gk20a_dbg(gpu_dbg_shutdown, "Freeing GK20A struct!"); + nvgpu_log(g, gpu_dbg_shutdown, "Freeing GK20A struct!"); gk20a_ce_destroy(g); @@ -465,7 +465,7 @@ struct gk20a * __must_check gk20a_get(struct gk20a *g) */ success = nvgpu_ref_get_unless_zero(&g->refcount); - gk20a_dbg(gpu_dbg_shutdown, "GET: refs currently %d %s", + nvgpu_log(g, gpu_dbg_shutdown, "GET: refs currently %d %s", nvgpu_atomic_read(&g->refcount.refcount), success ? "" : "(FAILED)"); @@ -490,7 +490,7 @@ void gk20a_put(struct gk20a *g) * ... PUT: refs currently 2 * ... Freeing GK20A struct! */ - gk20a_dbg(gpu_dbg_shutdown, "PUT: refs currently %d", + nvgpu_log(g, gpu_dbg_shutdown, "PUT: refs currently %d", nvgpu_atomic_read(&g->refcount.refcount)); nvgpu_ref_put(&g->refcount, gk20a_free_cb); diff --git a/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.c index 7120059c..f829cb3a 100644 --- a/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.c @@ -1,9 +1,7 @@ /* - * drivers/video/tegra/host/gk20a/gr_ctx_gk20a.c - * * GK20A Graphics Context * - * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -79,7 +77,7 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr) u32 i, major_v = ~0, major_v_hw, netlist_num; int net, max, err = -ENOENT; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (g->ops.gr_ctx.is_fw_defined()) { net = NETLIST_FINAL; @@ -114,63 +112,63 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr) switch (netlist->regions[i].region_id) { case NETLIST_REGIONID_FECS_UCODE_DATA: - gk20a_dbg_info("NETLIST_REGIONID_FECS_UCODE_DATA"); + nvgpu_log_info(g, "NETLIST_REGIONID_FECS_UCODE_DATA"); err = gr_gk20a_alloc_load_netlist_u32(g, src, size, &g->gr.ctx_vars.ucode.fecs.data); if (err) goto clean_up; break; case NETLIST_REGIONID_FECS_UCODE_INST: - gk20a_dbg_info("NETLIST_REGIONID_FECS_UCODE_INST"); + nvgpu_log_info(g, "NETLIST_REGIONID_FECS_UCODE_INST"); err = gr_gk20a_alloc_load_netlist_u32(g, src, size, &g->gr.ctx_vars.ucode.fecs.inst); if (err) goto clean_up; break; case NETLIST_REGIONID_GPCCS_UCODE_DATA: - gk20a_dbg_info("NETLIST_REGIONID_GPCCS_UCODE_DATA"); + nvgpu_log_info(g, "NETLIST_REGIONID_GPCCS_UCODE_DATA"); err = gr_gk20a_alloc_load_netlist_u32(g, src, size, &g->gr.ctx_vars.ucode.gpccs.data); if (err) goto clean_up; break; case NETLIST_REGIONID_GPCCS_UCODE_INST: - gk20a_dbg_info("NETLIST_REGIONID_GPCCS_UCODE_INST"); + nvgpu_log_info(g, "NETLIST_REGIONID_GPCCS_UCODE_INST"); err = gr_gk20a_alloc_load_netlist_u32(g, src, size, &g->gr.ctx_vars.ucode.gpccs.inst); if (err) goto clean_up; break; case NETLIST_REGIONID_SW_BUNDLE_INIT: - gk20a_dbg_info("NETLIST_REGIONID_SW_BUNDLE_INIT"); + nvgpu_log_info(g, "NETLIST_REGIONID_SW_BUNDLE_INIT"); err = gr_gk20a_alloc_load_netlist_av(g, src, size, &g->gr.ctx_vars.sw_bundle_init); if (err) goto clean_up; break; case NETLIST_REGIONID_SW_METHOD_INIT: - gk20a_dbg_info("NETLIST_REGIONID_SW_METHOD_INIT"); + nvgpu_log_info(g, "NETLIST_REGIONID_SW_METHOD_INIT"); err = gr_gk20a_alloc_load_netlist_av(g, src, size, &g->gr.ctx_vars.sw_method_init); if (err) goto clean_up; break; case NETLIST_REGIONID_SW_CTX_LOAD: - gk20a_dbg_info("NETLIST_REGIONID_SW_CTX_LOAD"); + nvgpu_log_info(g, "NETLIST_REGIONID_SW_CTX_LOAD"); err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.sw_ctx_load); if (err) goto clean_up; break; case NETLIST_REGIONID_SW_NON_CTX_LOAD: - gk20a_dbg_info("NETLIST_REGIONID_SW_NON_CTX_LOAD"); + nvgpu_log_info(g, "NETLIST_REGIONID_SW_NON_CTX_LOAD"); err = gr_gk20a_alloc_load_netlist_av(g, src, size, &g->gr.ctx_vars.sw_non_ctx_load); if (err) goto clean_up; break; case NETLIST_REGIONID_SWVEIDBUNDLEINIT: - gk20a_dbg_info( + nvgpu_log_info(g, "NETLIST_REGIONID_SW_VEID_BUNDLE_INIT"); err = gr_gk20a_alloc_load_netlist_av(g, src, size, @@ -179,56 +177,56 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr) goto clean_up; break; case NETLIST_REGIONID_CTXREG_SYS: - gk20a_dbg_info("NETLIST_REGIONID_CTXREG_SYS"); + nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_SYS"); err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.sys); if (err) goto clean_up; break; case NETLIST_REGIONID_CTXREG_GPC: - gk20a_dbg_info("NETLIST_REGIONID_CTXREG_GPC"); + nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_GPC"); err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.gpc); if (err) goto clean_up; break; case NETLIST_REGIONID_CTXREG_TPC: - gk20a_dbg_info("NETLIST_REGIONID_CTXREG_TPC"); + nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_TPC"); err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.tpc); if (err) goto clean_up; break; case NETLIST_REGIONID_CTXREG_ZCULL_GPC: - gk20a_dbg_info("NETLIST_REGIONID_CTXREG_ZCULL_GPC"); + nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_ZCULL_GPC"); err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.zcull_gpc); if (err) goto clean_up; break; case NETLIST_REGIONID_CTXREG_PPC: - gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PPC"); + nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PPC"); err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.ppc); if (err) goto clean_up; break; case NETLIST_REGIONID_CTXREG_PM_SYS: - gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PM_SYS"); + nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PM_SYS"); err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.pm_sys); if (err) goto clean_up; break; case NETLIST_REGIONID_CTXREG_PM_GPC: - gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PM_GPC"); + nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PM_GPC"); err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.pm_gpc); if (err) goto clean_up; break; case NETLIST_REGIONID_CTXREG_PM_TPC: - gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PM_TPC"); + nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PM_TPC"); err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.pm_tpc); if (err) @@ -236,110 +234,110 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr) break; case NETLIST_REGIONID_BUFFER_SIZE: g->gr.ctx_vars.buffer_size = *src; - gk20a_dbg_info("NETLIST_REGIONID_BUFFER_SIZE : %d", + nvgpu_log_info(g, "NETLIST_REGIONID_BUFFER_SIZE : %d", g->gr.ctx_vars.buffer_size); break; case NETLIST_REGIONID_CTXSW_REG_BASE_INDEX: g->gr.ctx_vars.regs_base_index = *src; - gk20a_dbg_info("NETLIST_REGIONID_CTXSW_REG_BASE_INDEX : %u", + nvgpu_log_info(g, "NETLIST_REGIONID_CTXSW_REG_BASE_INDEX : %u", g->gr.ctx_vars.regs_base_index); break; case NETLIST_REGIONID_MAJORV: major_v = *src; - gk20a_dbg_info("NETLIST_REGIONID_MAJORV : %d", + nvgpu_log_info(g, "NETLIST_REGIONID_MAJORV : %d", major_v); break; case NETLIST_REGIONID_NETLIST_NUM: netlist_num = *src; - gk20a_dbg_info("NETLIST_REGIONID_NETLIST_NUM : %d", + nvgpu_log_info(g, "NETLIST_REGIONID_NETLIST_NUM : %d", netlist_num); break; case NETLIST_REGIONID_CTXREG_PMPPC: - gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PMPPC"); + nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PMPPC"); err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.pm_ppc); if (err) goto clean_up; break; case NETLIST_REGIONID_NVPERF_CTXREG_SYS: - gk20a_dbg_info("NETLIST_REGIONID_NVPERF_CTXREG_SYS"); + nvgpu_log_info(g, "NETLIST_REGIONID_NVPERF_CTXREG_SYS"); err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.perf_sys); if (err) goto clean_up; break; case NETLIST_REGIONID_NVPERF_FBP_CTXREGS: - gk20a_dbg_info("NETLIST_REGIONID_NVPERF_FBP_CTXREGS"); + nvgpu_log_info(g, "NETLIST_REGIONID_NVPERF_FBP_CTXREGS"); err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.fbp); if (err) goto clean_up; break; case NETLIST_REGIONID_NVPERF_CTXREG_GPC: - gk20a_dbg_info("NETLIST_REGIONID_NVPERF_CTXREG_GPC"); + nvgpu_log_info(g, "NETLIST_REGIONID_NVPERF_CTXREG_GPC"); err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.perf_gpc); if (err) goto clean_up; break; case NETLIST_REGIONID_NVPERF_FBP_ROUTER: - gk20a_dbg_info("NETLIST_REGIONID_NVPERF_FBP_ROUTER"); + nvgpu_log_info(g, "NETLIST_REGIONID_NVPERF_FBP_ROUTER"); err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.fbp_router); if (err) goto clean_up; break; case NETLIST_REGIONID_NVPERF_GPC_ROUTER: - gk20a_dbg_info("NETLIST_REGIONID_NVPERF_GPC_ROUTER"); + nvgpu_log_info(g, "NETLIST_REGIONID_NVPERF_GPC_ROUTER"); err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.gpc_router); if (err) goto clean_up; break; case NETLIST_REGIONID_CTXREG_PMLTC: - gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PMLTC"); + nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PMLTC"); err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.pm_ltc); if (err) goto clean_up; break; case NETLIST_REGIONID_CTXREG_PMFBPA: - gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PMFBPA"); + nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PMFBPA"); err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.pm_fbpa); if (err) goto clean_up; break; case NETLIST_REGIONID_NVPERF_SYS_ROUTER: - gk20a_dbg_info("NETLIST_REGIONID_NVPERF_SYS_ROUTER"); + nvgpu_log_info(g, "NETLIST_REGIONID_NVPERF_SYS_ROUTER"); err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.perf_sys_router); if (err) goto clean_up; break; case NETLIST_REGIONID_NVPERF_PMA: - gk20a_dbg_info("NETLIST_REGIONID_NVPERF_PMA"); + nvgpu_log_info(g, "NETLIST_REGIONID_NVPERF_PMA"); err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.perf_pma); if (err) goto clean_up; break; case NETLIST_REGIONID_CTXREG_PMROP: - gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PMROP"); + nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PMROP"); err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.pm_rop); if (err) goto clean_up; break; case NETLIST_REGIONID_CTXREG_PMUCGPC: - gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PMUCGPC"); + nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PMUCGPC"); err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.pm_ucgpc); if (err) goto clean_up; break; case NETLIST_REGIONID_CTXREG_ETPC: - gk20a_dbg_info("NETLIST_REGIONID_CTXREG_ETPC"); + nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_ETPC"); err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.etpc); if (err) @@ -347,13 +345,13 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr) break; default: - gk20a_dbg_info("unrecognized region %d skipped", i); + nvgpu_log_info(g, "unrecognized region %d skipped", i); break; } } if (net != NETLIST_FINAL && major_v != major_v_hw) { - gk20a_dbg_info("skip %s: major_v 0x%08x doesn't match hw 0x%08x", + nvgpu_log_info(g, "skip %s: major_v 0x%08x doesn't match hw 0x%08x", name, major_v, major_v_hw); goto clean_up; } @@ -362,7 +360,7 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr) g->gr.netlist = net; nvgpu_release_firmware(g, netlist_fw); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); goto done; clean_up: @@ -403,7 +401,7 @@ clean_up: done: if (g->gr.ctx_vars.valid) { - gk20a_dbg_info("netlist image %s loaded", name); + nvgpu_log_info(g, "netlist image %s loaded", name); return 0; } else { nvgpu_err(g, "failed to load netlist image!!"); diff --git a/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a_sim.c b/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a_sim.c index 9674e2d6..01c7ed3c 100644 --- a/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a_sim.c +++ b/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a_sim.c @@ -1,9 +1,7 @@ /* - * drivers/video/tegra/host/gk20a/gr_ctx_sim_gk20a.c - * * GK20A Graphics Context for Simulation * - * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -35,7 +33,7 @@ int gr_gk20a_init_ctx_vars_sim(struct gk20a *g, struct gr_gk20a *gr) int err = 0; u32 i, temp; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_info, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_info, "querying grctx info from chiplib"); g->gr.ctx_vars.dynamic = true; @@ -250,7 +248,7 @@ int gr_gk20a_init_ctx_vars_sim(struct gk20a *g, struct gr_gk20a *gr) i, &l[i].value); } - gk20a_dbg(gpu_dbg_info | gpu_dbg_fn, "query GRCTX_REG_LIST_ETPC"); + nvgpu_log(g, gpu_dbg_info | gpu_dbg_fn, "query GRCTX_REG_LIST_ETPC"); for (i = 0; i < g->gr.ctx_vars.ctxsw_regs.etpc.count; i++) { struct aiv_gk20a *l = g->gr.ctx_vars.ctxsw_regs.etpc.l; g->sim->esc_readl(g, "GRCTX_REG_LIST_ETPC:ADDR", @@ -259,7 +257,7 @@ int gr_gk20a_init_ctx_vars_sim(struct gk20a *g, struct gr_gk20a *gr) i, &l[i].index); g->sim->esc_readl(g, "GRCTX_REG_LIST_ETPC:VALUE", i, &l[i].value); - gk20a_dbg(gpu_dbg_info | gpu_dbg_fn, + nvgpu_log(g, gpu_dbg_info | gpu_dbg_fn, "addr:0x%#08x index:0x%08x value:0x%08x", l[i].addr, l[i].index, l[i].value); } @@ -269,7 +267,7 @@ int gr_gk20a_init_ctx_vars_sim(struct gk20a *g, struct gr_gk20a *gr) g->sim->esc_readl(g, "GRCTX_GEN_CTX_REGS_BASE_INDEX", 0, &g->gr.ctx_vars.regs_base_index); - gk20a_dbg(gpu_dbg_info | gpu_dbg_fn, "finished querying grctx info from chiplib"); + nvgpu_log(g, gpu_dbg_info | gpu_dbg_fn, "finished querying grctx info from chiplib"); return 0; fail: nvgpu_err(g, "failed querying grctx info from chiplib"); diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c index 86111321..00f26650 100644 --- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c @@ -122,7 +122,7 @@ int gr_gk20a_get_ctx_id(struct gk20a *g, *ctx_id = nvgpu_mem_rd(g, mem, ctxsw_prog_main_image_context_id_o()); - gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, "ctx_id: 0x%x", *ctx_id); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr, "ctx_id: 0x%x", *ctx_id); nvgpu_mem_end(g, mem); @@ -220,7 +220,7 @@ static void gr_gk20a_load_falcon_dmem(struct gk20a *g) const u32 *ucode_u32_data; u32 checksum; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); gk20a_writel(g, gr_gpccs_dmemc_r(0), (gr_gpccs_dmemc_offs_f(0) | gr_gpccs_dmemc_blk_f(0) | @@ -245,7 +245,7 @@ static void gr_gk20a_load_falcon_dmem(struct gk20a *g) gk20a_writel(g, gr_fecs_dmemd_r(0), ucode_u32_data[i]); checksum += ucode_u32_data[i]; } - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); } static void gr_gk20a_load_falcon_imem(struct gk20a *g) @@ -255,7 +255,7 @@ static void gr_gk20a_load_falcon_imem(struct gk20a *g) u32 tag, i, pad_start, pad_end; u32 checksum; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); cfg = gk20a_readl(g, gr_fecs_cfg_r()); fecs_imem_size = gr_fecs_cfg_imem_sz_v(cfg); @@ -343,7 +343,7 @@ int gr_gk20a_wait_idle(struct gk20a *g, unsigned long duration_ms, bool ctx_status_invalid; struct nvgpu_timeout timeout; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); gr_engine_id = gk20a_fifo_get_gr_engine_id(g); @@ -372,7 +372,7 @@ int gr_gk20a_wait_idle(struct gk20a *g, unsigned long duration_ms, if (!gr_enabled || ctx_status_invalid || (!gr_busy && !ctxsw_active)) { - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } @@ -398,7 +398,7 @@ int gr_gk20a_wait_fe_idle(struct gk20a *g, unsigned long duration_ms, if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) return 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); nvgpu_timeout_init(g, &timeout, duration_ms, NVGPU_TIMER_CPU_TIMER); @@ -406,7 +406,7 @@ int gr_gk20a_wait_fe_idle(struct gk20a *g, unsigned long duration_ms, val = gk20a_readl(g, gr_status_r()); if (!gr_status_fe_method_lower_v(val)) { - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } @@ -430,7 +430,7 @@ int gr_gk20a_ctx_wait_ucode(struct gk20a *g, u32 mailbox_id, u32 check = WAIT_UCODE_LOOP; u32 reg; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (sleepduringwait) delay = GR_IDLE_CHECK_DEFAULT; @@ -532,7 +532,7 @@ int gr_gk20a_ctx_wait_ucode(struct gk20a *g, u32 mailbox_id, return -1; } - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } @@ -618,7 +618,7 @@ int gr_gk20a_disable_ctxsw(struct gk20a *g) { int err = 0; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " "); nvgpu_mutex_acquire(&g->ctxsw_disable_lock); g->ctxsw_disable_count++; @@ -635,7 +635,7 @@ int gr_gk20a_enable_ctxsw(struct gk20a *g) { int err = 0; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " "); nvgpu_mutex_acquire(&g->ctxsw_disable_lock); g->ctxsw_disable_count--; @@ -669,7 +669,7 @@ int gr_gk20a_commit_inst(struct channel_gk20a *c, u64 gpu_va) u32 addr_lo; u32 addr_hi; - gk20a_dbg_fn(""); + nvgpu_log_fn(c->g, " "); addr_lo = u64_lo32(gpu_va) >> 12; addr_hi = u64_hi32(gpu_va); @@ -775,7 +775,7 @@ int gr_gk20a_fecs_ctx_bind_channel(struct gk20a *g, u32 data = fecs_current_ctx_data(g, &c->inst_block); u32 ret; - gk20a_dbg_info("bind channel %d inst ptr 0x%08x", + nvgpu_log_info(g, "bind channel %d inst ptr 0x%08x", c->chid, inst_base_ptr); ret = gr_gk20a_submit_fecs_method_op(g, @@ -823,7 +823,7 @@ static int gr_gk20a_ctx_zcull_setup(struct gk20a *g, struct channel_gk20a *c) struct nvgpu_mem *ctxheader = &ctx->mem; int ret = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); tsg = tsg_gk20a_from_ch(c); if (!tsg) @@ -905,7 +905,7 @@ int gr_gk20a_commit_global_ctx_buffers(struct gk20a *g, u64 addr; u32 size; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); tsg = tsg_gk20a_from_ch(c); if (!tsg) @@ -931,7 +931,7 @@ int gr_gk20a_commit_global_ctx_buffers(struct gk20a *g, if (size == g->ops.gr.pagepool_default_size(g)) size = gr_scc_pagepool_total_pages_hwmax_v(); - gk20a_dbg_info("pagepool buffer addr : 0x%016llx, size : %d", + nvgpu_log_info(g, "pagepool buffer addr : 0x%016llx, size : %d", addr, size); g->ops.gr.commit_global_pagepool(g, gr_ctx, addr, size, patch); @@ -944,7 +944,7 @@ int gr_gk20a_commit_global_ctx_buffers(struct gk20a *g, size = gr->bundle_cb_default_size; - gk20a_dbg_info("bundle cb addr : 0x%016llx, size : %d", + nvgpu_log_info(g, "bundle cb addr : 0x%016llx, size : %d", addr, size); g->ops.gr.commit_global_bundle_cb(g, gr_ctx, addr, size, patch); @@ -955,7 +955,7 @@ int gr_gk20a_commit_global_ctx_buffers(struct gk20a *g, (u64_hi32(gr_ctx->global_ctx_buffer_va[ATTRIBUTE_VA]) << (32 - gr_gpcs_setup_attrib_cb_base_addr_39_12_align_bits_v())); - gk20a_dbg_info("attrib cb addr : 0x%016llx", addr); + nvgpu_log_info(g, "attrib cb addr : 0x%016llx", addr); g->ops.gr.commit_global_attrib_cb(g, gr_ctx, addr, patch); g->ops.gr.commit_global_cb_manager(g, c, patch); @@ -976,7 +976,7 @@ int gr_gk20a_commit_global_timeslice(struct gk20a *g, struct channel_gk20a *c) u32 pe_vaf; u32 pe_vsc_vpc; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); gpm_pd_cfg = gk20a_readl(g, gr_gpcs_gpm_pd_cfg_r()); pd_ab_dist_cfg0 = gk20a_readl(g, gr_pd_ab_dist_cfg0_r()); @@ -1036,7 +1036,7 @@ int gr_gk20a_setup_rop_mapping(struct gk20a *g, struct gr_gk20a *gr) if (!gr->map_tiles) return -1; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); gk20a_writel(g, gr_crstr_map_table_cfg_r(), gr_crstr_map_table_cfg_row_offset_f(gr->map_row_offset) | @@ -1219,7 +1219,7 @@ int gr_gk20a_init_fs_state(struct gk20a *g) u32 reg_index; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (g->ops.gr.init_sm_id_table) { err = g->ops.gr.init_sm_id_table(g); @@ -1302,7 +1302,7 @@ int gr_gk20a_fecs_ctx_image_save(struct channel_gk20a *c, u32 save_type) struct gk20a *g = c->g; int ret; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); ret = gr_gk20a_submit_fecs_method_op(g, (struct fecs_method_op_gk20a) { @@ -1411,7 +1411,7 @@ static int gr_gk20a_init_golden_ctx_image(struct gk20a *g, struct av_list_gk20a *sw_method_init = &g->gr.ctx_vars.sw_method_init; u32 last_method_data = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); tsg = tsg_gk20a_from_ch(c); if (!tsg) @@ -1647,7 +1647,7 @@ clean_up: if (err) nvgpu_err(g, "fail"); else - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); nvgpu_mem_end(g, gold_mem); nvgpu_mem_end(g, gr_mem); @@ -1666,7 +1666,7 @@ int gr_gk20a_update_smpc_ctxsw_mode(struct gk20a *g, u32 data; int ret; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); tsg = tsg_gk20a_from_ch(c); if (!tsg) @@ -1732,7 +1732,7 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g, struct nvgpu_mem *ctxheader = &ctx->mem; int ret; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); tsg = tsg_gk20a_from_ch(c); if (!tsg) @@ -1884,7 +1884,7 @@ int gr_gk20a_load_golden_ctx_image(struct gk20a *g, int ret = 0; struct nvgpu_mem *mem; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); tsg = tsg_gk20a_from_ch(c); if (!tsg) @@ -1991,7 +1991,7 @@ int gr_gk20a_load_golden_ctx_image(struct gk20a *g, static void gr_gk20a_start_falcon_ucode(struct gk20a *g) { - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); gk20a_writel(g, gr_fecs_ctxsw_mailbox_clear_r(0), gr_fecs_ctxsw_mailbox_clear_value_f(~0)); @@ -2002,7 +2002,7 @@ static void gr_gk20a_start_falcon_ucode(struct gk20a *g) gk20a_writel(g, gr_gpccs_cpuctl_r(), gr_gpccs_cpuctl_startcpu_f(1)); gk20a_writel(g, gr_fecs_cpuctl_r(), gr_fecs_cpuctl_startcpu_f(1)); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); } static int gr_gk20a_init_ctxsw_ucode_vaspace(struct gk20a *g) @@ -2392,7 +2392,7 @@ int gr_gk20a_load_ctxsw_ucode(struct gk20a *g) { int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) { gk20a_writel(g, gr_fecs_ctxsw_mailbox_r(7), @@ -2419,7 +2419,7 @@ int gr_gk20a_load_ctxsw_ucode(struct gk20a *g) gr_gk20a_load_falcon_with_bootloader(g); g->gr.skip_ucode_init = true; } - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } @@ -2427,7 +2427,7 @@ static int gr_gk20a_wait_ctxsw_ready(struct gk20a *g) { u32 ret; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); ret = gr_gk20a_ctx_wait_ucode(g, 0, NULL, GR_IS_UCODE_OP_EQUAL, @@ -2448,7 +2448,7 @@ static int gr_gk20a_wait_ctxsw_ready(struct gk20a *g) gk20a_writel(g, gr_fecs_method_push_r(), gr_fecs_method_push_adr_set_watchdog_timeout_f()); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } @@ -2463,7 +2463,7 @@ int gr_gk20a_init_ctx_state(struct gk20a *g) .cond.fail = GR_IS_UCODE_OP_SKIP, }; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); /* query ctxsw image sizes, if golden context is not created */ if (!g->gr.ctx_vars.golden_image_initialized) { op.method.addr = @@ -2496,7 +2496,7 @@ int gr_gk20a_init_ctx_state(struct gk20a *g) g->gr.ctx_vars.priv_access_map_size = 512 * 1024; } - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } @@ -2543,7 +2543,7 @@ static void gr_gk20a_free_global_ctx_buffers(struct gk20a *g) } } - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); } static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g) @@ -2557,11 +2557,11 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g) u32 pagepool_buffer_size = g->ops.gr.pagepool_default_size(g) * gr_scc_pagepool_total_pages_byte_granularity_v(); - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); attr_buffer_size = g->ops.gr.calc_global_ctx_buffer_size(g); - gk20a_dbg_info("cb_buffer_size : %d", cb_buffer_size); + nvgpu_log_info(g, "cb_buffer_size : %d", cb_buffer_size); err = gk20a_gr_alloc_ctx_buffer(g, &gr->global_ctx_buffer[CIRCULAR], cb_buffer_size); @@ -2576,7 +2576,7 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g) goto clean_up; } - gk20a_dbg_info("pagepool_buffer_size : %d", pagepool_buffer_size); + nvgpu_log_info(g, "pagepool_buffer_size : %d", pagepool_buffer_size); err = gk20a_gr_alloc_ctx_buffer(g, &gr->global_ctx_buffer[PAGEPOOL], pagepool_buffer_size); @@ -2591,7 +2591,7 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g) goto clean_up; } - gk20a_dbg_info("attr_buffer_size : %d", attr_buffer_size); + nvgpu_log_info(g, "attr_buffer_size : %d", attr_buffer_size); err = gk20a_gr_alloc_ctx_buffer(g, &gr->global_ctx_buffer[ATTRIBUTE], attr_buffer_size); @@ -2606,7 +2606,7 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g) goto clean_up; } - gk20a_dbg_info("golden_image_size : %d", + nvgpu_log_info(g, "golden_image_size : %d", gr->ctx_vars.golden_image_size); err = gk20a_gr_alloc_ctx_buffer(g, @@ -2615,7 +2615,7 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g) if (err) goto clean_up; - gk20a_dbg_info("priv_access_map_size : %d", + nvgpu_log_info(g, "priv_access_map_size : %d", gr->ctx_vars.priv_access_map_size); err = gk20a_gr_alloc_ctx_buffer(g, @@ -2625,7 +2625,7 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g) if (err) goto clean_up; - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; clean_up: @@ -2643,7 +2643,7 @@ static void gr_gk20a_unmap_global_ctx_buffers(struct gk20a *g, int *g_bfr_index = gr_ctx->global_ctx_buffer_index; u32 i; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); for (i = 0; i < NR_GLOBAL_CTX_BUF_VA; i++) { if (g_bfr_index[i]) { @@ -2679,7 +2679,7 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g, struct nvgpu_mem *mem; u64 gpu_va; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); tsg = tsg_gk20a_from_ch(c); if (!tsg) @@ -2780,7 +2780,7 @@ int gr_gk20a_alloc_gr_ctx(struct gk20a *g, struct gr_gk20a *gr = &g->gr; int err = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (gr->ctx_vars.buffer_size == 0) return 0; @@ -2835,7 +2835,7 @@ static int gr_gk20a_alloc_tsg_gr_ctx(struct gk20a *g, void gr_gk20a_free_gr_ctx(struct gk20a *g, struct vm_gk20a *vm, struct nvgpu_gr_ctx *gr_ctx) { - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (gr_ctx->mem.gpu_va) { gr_gk20a_unmap_global_ctx_buffers(g, vm, gr_ctx); @@ -2881,7 +2881,7 @@ static int gr_gk20a_alloc_channel_patch_ctx(struct gk20a *g, u32 alloc_size; int err = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); tsg = tsg_gk20a_from_ch(c); if (!tsg) @@ -2899,7 +2899,7 @@ static int gr_gk20a_alloc_channel_patch_ctx(struct gk20a *g, if (err) return err; - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } @@ -2909,7 +2909,7 @@ static void gr_gk20a_free_channel_patch_ctx(struct gk20a *g, { struct patch_desc *patch_ctx = &gr_ctx->patch_ctx; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (patch_ctx->mem.gpu_va) nvgpu_gmmu_unmap(vm, &patch_ctx->mem, @@ -2925,7 +2925,7 @@ static void gr_gk20a_free_channel_pm_ctx(struct gk20a *g, { struct pm_ctx_desc *pm_ctx = &gr_ctx->pm_ctx; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (pm_ctx->mem.gpu_va) { nvgpu_gmmu_unmap(vm, &pm_ctx->mem, pm_ctx->mem.gpu_va); @@ -2942,7 +2942,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags) struct tsg_gk20a *tsg = NULL; int err = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); /* an address space needs to have been bound at this point.*/ if (!gk20a_channel_as_bound(c) && !c->vm) { @@ -3047,7 +3047,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags) } } - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; out: /* 1. gr_ctx, patch_ctx and global ctx buffer mapping @@ -3062,7 +3062,7 @@ static void gk20a_remove_gr_support(struct gr_gk20a *gr) { struct gk20a *g = gr->g; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); gr_gk20a_free_cyclestats_snapshot_data(g); @@ -3322,35 +3322,35 @@ static int gr_gk20a_init_gr_config(struct gk20a *g, struct gr_gk20a *gr) sm_per_tpc * sizeof(struct sm_info)); gr->no_of_sm = 0; - gk20a_dbg_info("fbps: %d", gr->num_fbps); - gk20a_dbg_info("max_gpc_count: %d", gr->max_gpc_count); - gk20a_dbg_info("max_fbps_count: %d", gr->max_fbps_count); - gk20a_dbg_info("max_tpc_per_gpc_count: %d", gr->max_tpc_per_gpc_count); - gk20a_dbg_info("max_zcull_per_gpc_count: %d", gr->max_zcull_per_gpc_count); - gk20a_dbg_info("max_tpc_count: %d", gr->max_tpc_count); - gk20a_dbg_info("sys_count: %d", gr->sys_count); - gk20a_dbg_info("gpc_count: %d", gr->gpc_count); - gk20a_dbg_info("pe_count_per_gpc: %d", gr->pe_count_per_gpc); - gk20a_dbg_info("tpc_count: %d", gr->tpc_count); - gk20a_dbg_info("ppc_count: %d", gr->ppc_count); + nvgpu_log_info(g, "fbps: %d", gr->num_fbps); + nvgpu_log_info(g, "max_gpc_count: %d", gr->max_gpc_count); + nvgpu_log_info(g, "max_fbps_count: %d", gr->max_fbps_count); + nvgpu_log_info(g, "max_tpc_per_gpc_count: %d", gr->max_tpc_per_gpc_count); + nvgpu_log_info(g, "max_zcull_per_gpc_count: %d", gr->max_zcull_per_gpc_count); + nvgpu_log_info(g, "max_tpc_count: %d", gr->max_tpc_count); + nvgpu_log_info(g, "sys_count: %d", gr->sys_count); + nvgpu_log_info(g, "gpc_count: %d", gr->gpc_count); + nvgpu_log_info(g, "pe_count_per_gpc: %d", gr->pe_count_per_gpc); + nvgpu_log_info(g, "tpc_count: %d", gr->tpc_count); + nvgpu_log_info(g, "ppc_count: %d", gr->ppc_count); for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) - gk20a_dbg_info("gpc_tpc_count[%d] : %d", + nvgpu_log_info(g, "gpc_tpc_count[%d] : %d", gpc_index, gr->gpc_tpc_count[gpc_index]); for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) - gk20a_dbg_info("gpc_zcb_count[%d] : %d", + nvgpu_log_info(g, "gpc_zcb_count[%d] : %d", gpc_index, gr->gpc_zcb_count[gpc_index]); for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) - gk20a_dbg_info("gpc_ppc_count[%d] : %d", + nvgpu_log_info(g, "gpc_ppc_count[%d] : %d", gpc_index, gr->gpc_ppc_count[gpc_index]); for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) - gk20a_dbg_info("gpc_skip_mask[%d] : %d", + nvgpu_log_info(g, "gpc_skip_mask[%d] : %d", gpc_index, gr->gpc_skip_mask[gpc_index]); for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) for (pes_index = 0; pes_index < gr->pe_count_per_gpc; pes_index++) - gk20a_dbg_info("pes_tpc_count[%d][%d] : %d", + nvgpu_log_info(g, "pes_tpc_count[%d][%d] : %d", pes_index, gpc_index, gr->pes_tpc_count[pes_index][gpc_index]); @@ -3358,7 +3358,7 @@ static int gr_gk20a_init_gr_config(struct gk20a *g, struct gr_gk20a *gr) for (pes_index = 0; pes_index < gr->pe_count_per_gpc; pes_index++) - gk20a_dbg_info("pes_tpc_mask[%d][%d] : %d", + nvgpu_log_info(g, "pes_tpc_mask[%d][%d] : %d", pes_index, gpc_index, gr->pes_tpc_mask[pes_index][gpc_index]); @@ -3367,16 +3367,16 @@ static int gr_gk20a_init_gr_config(struct gk20a *g, struct gr_gk20a *gr) g->ops.gr.calc_global_ctx_buffer_size(g); gr->timeslice_mode = gr_gpcs_ppcs_cbm_cfg_timeslice_mode_enable_v(); - gk20a_dbg_info("bundle_cb_default_size: %d", + nvgpu_log_info(g, "bundle_cb_default_size: %d", gr->bundle_cb_default_size); - gk20a_dbg_info("min_gpm_fifo_depth: %d", gr->min_gpm_fifo_depth); - gk20a_dbg_info("bundle_cb_token_limit: %d", gr->bundle_cb_token_limit); - gk20a_dbg_info("attrib_cb_default_size: %d", + nvgpu_log_info(g, "min_gpm_fifo_depth: %d", gr->min_gpm_fifo_depth); + nvgpu_log_info(g, "bundle_cb_token_limit: %d", gr->bundle_cb_token_limit); + nvgpu_log_info(g, "attrib_cb_default_size: %d", gr->attrib_cb_default_size); - gk20a_dbg_info("attrib_cb_size: %d", gr->attrib_cb_size); - gk20a_dbg_info("alpha_cb_default_size: %d", gr->alpha_cb_default_size); - gk20a_dbg_info("alpha_cb_size: %d", gr->alpha_cb_size); - gk20a_dbg_info("timeslice_mode: %d", gr->timeslice_mode); + nvgpu_log_info(g, "attrib_cb_size: %d", gr->attrib_cb_size); + nvgpu_log_info(g, "alpha_cb_default_size: %d", gr->alpha_cb_default_size); + nvgpu_log_info(g, "alpha_cb_size: %d", gr->alpha_cb_size); + nvgpu_log_info(g, "timeslice_mode: %d", gr->timeslice_mode); return 0; @@ -3582,7 +3582,7 @@ clean_up: if (ret) nvgpu_err(g, "fail"); else - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return ret; } @@ -4094,7 +4094,7 @@ clean_up: int gk20a_gr_zbc_set_table(struct gk20a *g, struct gr_gk20a *gr, struct zbc_entry *zbc_val) { - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); return gr_gk20a_elpg_protected_call(g, gr_gk20a_add_zbc(g, gr, zbc_val)); @@ -4197,10 +4197,10 @@ void gr_gk20a_program_zcull_mapping(struct gk20a *g, u32 zcull_num_entries, { u32 val; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (zcull_num_entries >= 8) { - gk20a_dbg_fn("map0"); + nvgpu_log_fn(g, "map0"); val = gr_gpcs_zcull_sm_in_gpc_number_map0_tile_0_f( zcull_map_tiles[0]) | @@ -4223,7 +4223,7 @@ void gr_gk20a_program_zcull_mapping(struct gk20a *g, u32 zcull_num_entries, } if (zcull_num_entries >= 16) { - gk20a_dbg_fn("map1"); + nvgpu_log_fn(g, "map1"); val = gr_gpcs_zcull_sm_in_gpc_number_map1_tile_8_f( zcull_map_tiles[8]) | @@ -4246,7 +4246,7 @@ void gr_gk20a_program_zcull_mapping(struct gk20a *g, u32 zcull_num_entries, } if (zcull_num_entries >= 24) { - gk20a_dbg_fn("map2"); + nvgpu_log_fn(g, "map2"); val = gr_gpcs_zcull_sm_in_gpc_number_map2_tile_16_f( zcull_map_tiles[16]) | @@ -4269,7 +4269,7 @@ void gr_gk20a_program_zcull_mapping(struct gk20a *g, u32 zcull_num_entries, } if (zcull_num_entries >= 32) { - gk20a_dbg_fn("map3"); + nvgpu_log_fn(g, "map3"); val = gr_gpcs_zcull_sm_in_gpc_number_map3_tile_24_f( zcull_map_tiles[24]) | @@ -4452,7 +4452,7 @@ static int gk20a_init_gr_setup_hw(struct gk20a *g) u32 last_method_data = 0; u32 i, err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); /* init mmu debug buffer */ addr = nvgpu_mem_get_addr(g, &gr->mmu_wr_mem); @@ -4613,13 +4613,13 @@ restore_fe_go_idle: } out: - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return err; } static void gr_gk20a_load_gating_prod(struct gk20a *g) { - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); /* slcg prod values */ if (g->ops.clock_gating.slcg_bus_load_gating_prod) @@ -4657,7 +4657,7 @@ static void gr_gk20a_load_gating_prod(struct gk20a *g) if (g->ops.clock_gating.pg_gr_load_gating_prod) g->ops.clock_gating.pg_gr_load_gating_prod(g, true); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); } static int gk20a_init_gr_prepare(struct gk20a *g) @@ -4703,7 +4703,7 @@ static int gr_gk20a_wait_mem_scrubbing(struct gk20a *g) bool fecs_scrubbing; bool gpccs_scrubbing; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); nvgpu_timeout_init(g, &timeout, CTXSW_MEM_SCRUBBING_TIMEOUT_MAX / @@ -4719,7 +4719,7 @@ static int gr_gk20a_wait_mem_scrubbing(struct gk20a *g) gr_gpccs_dmactl_imem_scrubbing_m()); if (!fecs_scrubbing && !gpccs_scrubbing) { - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } @@ -4746,7 +4746,7 @@ out: if (err) nvgpu_err(g, "fail"); else - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return err; } @@ -4756,7 +4756,7 @@ static int gk20a_init_gr_reset_enable_hw(struct gk20a *g) struct av_list_gk20a *sw_non_ctx_load = &g->gr.ctx_vars.sw_non_ctx_load; u32 i, err = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); /* enable interrupts */ gk20a_writel(g, gr_intr_r(), ~0); @@ -4780,7 +4780,7 @@ out: if (err) nvgpu_err(g, "fail"); else - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } @@ -4810,7 +4810,7 @@ static int gr_gk20a_init_access_map(struct gk20a *g) map_bit = whitelist[w] >> 2; map_byte = map_bit >> 3; map_shift = map_bit & 0x7; /* i.e. 0-7 */ - gk20a_dbg_info("access map addr:0x%x byte:0x%x bit:%d", + nvgpu_log_info(g, "access map addr:0x%x byte:0x%x bit:%d", whitelist[w], map_byte, map_shift); x = nvgpu_mem_rd32(g, mem, map_byte / sizeof(u32)); x |= 1 << ( @@ -4828,10 +4828,10 @@ static int gk20a_init_gr_setup_sw(struct gk20a *g) struct gr_gk20a *gr = &g->gr; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (gr->sw_ready) { - gk20a_dbg_fn("skip init"); + nvgpu_log_fn(g, "skip init"); return 0; } @@ -4888,7 +4888,7 @@ static int gk20a_init_gr_setup_sw(struct gk20a *g) if (g->ops.gr.create_gr_sysfs) g->ops.gr.create_gr_sysfs(g); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; clean_up: @@ -4906,7 +4906,7 @@ static int gk20a_init_gr_bind_fecs_elpg(struct gk20a *g) u32 size; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); size = 0; @@ -4947,7 +4947,7 @@ int gk20a_init_gr_support(struct gk20a *g) { u32 err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); /* this is required before gr_gk20a_init_ctx_state */ nvgpu_mutex_init(&g->gr.fecs_mutex); @@ -4999,7 +4999,7 @@ void gk20a_gr_wait_initialized(struct gk20a *g) void gk20a_gr_set_shader_exceptions(struct gk20a *g, u32 data) { - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (data == NVA297_SET_SHADER_EXCEPTIONS_ENABLE_FALSE) { gk20a_writel(g, @@ -5046,7 +5046,7 @@ int gk20a_enable_gr_hw(struct gk20a *g) { int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); err = gk20a_init_gr_prepare(g); if (err) @@ -5056,7 +5056,7 @@ int gk20a_enable_gr_hw(struct gk20a *g) if (err) return err; - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } @@ -5163,7 +5163,7 @@ static void gk20a_gr_set_error_notifier(struct gk20a *g, static int gk20a_gr_handle_semaphore_timeout_pending(struct gk20a *g, struct gr_gk20a_isr_data *isr_data) { - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); gk20a_gr_set_error_notifier(g, isr_data, NVGPU_ERR_NOTIFIER_GR_SEMAPHORE_TIMEOUT); nvgpu_err(g, @@ -5174,7 +5174,7 @@ static int gk20a_gr_handle_semaphore_timeout_pending(struct gk20a *g, static int gk20a_gr_intr_illegal_notify_pending(struct gk20a *g, struct gr_gk20a_isr_data *isr_data) { - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); gk20a_gr_set_error_notifier(g, isr_data, NVGPU_ERR_NOTIFIER_GR_ILLEGAL_NOTIFY); /* This is an unrecoverable error, reset is needed */ @@ -5202,7 +5202,7 @@ static int gk20a_gr_handle_illegal_method(struct gk20a *g, static int gk20a_gr_handle_illegal_class(struct gk20a *g, struct gr_gk20a_isr_data *isr_data) { - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); gk20a_gr_set_error_notifier(g, isr_data, NVGPU_ERR_NOTIFIER_GR_ERROR_SW_NOTIFY); nvgpu_err(g, @@ -5243,7 +5243,7 @@ static int gk20a_gr_handle_class_error(struct gk20a *g, { u32 gr_class_error; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); gr_class_error = gr_class_error_code_v(gk20a_readl(g, gr_class_error_r())); @@ -5274,7 +5274,7 @@ static int gk20a_gr_handle_class_error(struct gk20a *g, static int gk20a_gr_handle_firmware_method(struct gk20a *g, struct gr_gk20a_isr_data *isr_data) { - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); gk20a_gr_set_error_notifier(g, isr_data, NVGPU_ERR_NOTIFIER_GR_ERROR_SW_NOTIFY); @@ -5450,7 +5450,7 @@ int gk20a_gr_handle_notify_pending(struct gk20a *g, } nvgpu_mutex_release(&ch->cyclestate.cyclestate_buffer_mutex); #endif - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); nvgpu_cond_broadcast_interruptible(&ch->notifier_wq); return 0; } @@ -5543,7 +5543,7 @@ int gk20a_gr_lock_down_sm(struct gk20a *g, u32 offset = gk20a_gr_gpc_offset(g, gpc) + gk20a_gr_tpc_offset(g, tpc); u32 dbgr_control0; - gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "GPC%d TPC%d SM%d: assert stop trigger", gpc, tpc, sm); /* assert stop trigger */ @@ -5582,7 +5582,7 @@ int gr_gk20a_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc, u32 sm, bool sm_debugger_attached; u32 global_esr, warp_esr, global_mask; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " "); sm_debugger_attached = g->ops.gr.sm_debugger_attached(g); @@ -5597,7 +5597,7 @@ int gr_gk20a_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc, u32 sm, return -EFAULT; } - gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "sm hww global 0x%08x warp 0x%08x", global_esr, warp_esr); gr_gk20a_elpg_protected_call(g, @@ -5617,7 +5617,7 @@ int gr_gk20a_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc, u32 sm, } if (early_exit) { - gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "returning early"); return ret; } @@ -5640,13 +5640,13 @@ int gr_gk20a_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc, u32 sm, gk20a_writel(g, gr_gpc0_tpc0_tpccs_tpc_exception_en_r() + offset, tpc_exception_en); - gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "SM Exceptions disabled"); + nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "SM Exceptions disabled"); } /* if a debugger is present and an error has occurred, do a warp sync */ if (!ignore_debugger && ((warp_esr != 0) || ((global_esr & ~global_mask) != 0))) { - gk20a_dbg(gpu_dbg_intr, "warp sync needed"); + nvgpu_log(g, gpu_dbg_intr, "warp sync needed"); do_warp_sync = true; } @@ -5660,7 +5660,7 @@ int gr_gk20a_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc, u32 sm, } if (ignore_debugger) - gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "ignore_debugger set, skipping event posting"); else *post_event |= true; @@ -5677,11 +5677,11 @@ int gr_gk20a_handle_tex_exception(struct gk20a *g, u32 gpc, u32 tpc, u32 offset = gpc_stride * gpc + tpc_in_gpc_stride * tpc; u32 esr; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " "); esr = gk20a_readl(g, gr_gpc0_tpc0_tex_m_hww_esr_r() + offset); - gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "0x%08x", esr); + nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "0x%08x", esr); gk20a_writel(g, gr_gpc0_tpc0_tex_m_hww_esr_r() + offset, @@ -5706,7 +5706,7 @@ static int gk20a_gr_handle_tpc_exception(struct gk20a *g, u32 gpc, u32 tpc, + offset); u32 sm_per_tpc = nvgpu_get_litter_value(g, GPU_LIT_NUM_SM_PER_TPC); - gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "GPC%d TPC%d: pending exception 0x%x", gpc, tpc, tpc_exception); @@ -5715,7 +5715,7 @@ static int gk20a_gr_handle_tpc_exception(struct gk20a *g, u32 gpc, u32 tpc, gr_gpc0_tpc0_tpccs_tpc_exception_sm_pending_v()) { u32 esr_sm_sel, sm; - gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "GPC%d TPC%d: SM exception pending", gpc, tpc); if (g->ops.gr.handle_tpc_sm_ecc_exception) @@ -5729,7 +5729,7 @@ static int gk20a_gr_handle_tpc_exception(struct gk20a *g, u32 gpc, u32 tpc, if (!(esr_sm_sel & (1 << sm))) continue; - gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "GPC%d TPC%d: SM%d exception pending", gpc, tpc, sm); @@ -5750,7 +5750,7 @@ static int gk20a_gr_handle_tpc_exception(struct gk20a *g, u32 gpc, u32 tpc, /* check if a tex exeption is pending */ if (gr_gpc0_tpc0_tpccs_tpc_exception_tex_v(tpc_exception) == gr_gpc0_tpc0_tpccs_tpc_exception_tex_pending_v()) { - gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "GPC%d TPC%d: TEX exception pending", gpc, tpc); ret |= g->ops.gr.handle_tex_exception(g, gpc, tpc, post_event); } @@ -5771,13 +5771,13 @@ static int gk20a_gr_handle_gpc_exception(struct gk20a *g, bool *post_event, u32 exception1 = gk20a_readl(g, gr_exception1_r()); u32 gpc_exception; - gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, ""); + nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, " "); for (gpc = 0; gpc < gr->gpc_count; gpc++) { if ((exception1 & (1 << gpc)) == 0) continue; - gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "GPC%d exception pending", gpc); gpc_offset = gk20a_gr_gpc_offset(g, gpc); @@ -5791,7 +5791,7 @@ static int gk20a_gr_handle_gpc_exception(struct gk20a *g, bool *post_event, (1 << tpc)) == 0) continue; - gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "GPC%d: TPC%d exception pending", gpc, tpc); ret |= gk20a_gr_handle_tpc_exception(g, gpc, tpc, @@ -5860,8 +5860,8 @@ int gk20a_gr_isr(struct gk20a *g) u32 gr_engine_id; u32 global_esr = 0; - gk20a_dbg_fn(""); - gk20a_dbg(gpu_dbg_intr, "pgraph intr %08x", gr_intr); + nvgpu_log_fn(g, " "); + nvgpu_log(g, gpu_dbg_intr, "pgraph intr %08x", gr_intr); if (!gr_intr) return 0; @@ -5896,7 +5896,7 @@ int gk20a_gr_isr(struct gk20a *g) nvgpu_err(g, "ch id is INVALID 0xffffffff"); } - gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "channel %d: addr 0x%08x, " "data 0x%08x 0x%08x," "ctx 0x%08x, offset 0x%08x, " @@ -5968,7 +5968,7 @@ int gk20a_gr_isr(struct gk20a *g) * register using set_falcon[4] */ if (gr_intr & gr_intr_firmware_method_pending_f()) { need_reset |= gk20a_gr_handle_firmware_method(g, &isr_data); - gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "firmware method intr pending\n"); + nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "firmware method intr pending\n"); gk20a_writel(g, gr_intr_r(), gr_intr_firmware_method_reset_f()); gr_intr &= ~gr_intr_firmware_method_pending_f(); @@ -5977,7 +5977,7 @@ int gk20a_gr_isr(struct gk20a *g) if (gr_intr & gr_intr_exception_pending_f()) { u32 exception = gk20a_readl(g, gr_exception_r()); - gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "exception %08x\n", exception); + nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "exception %08x\n", exception); if (exception & gr_exception_fe_m()) { u32 fe = gk20a_readl(g, gr_fe_hww_esr_r()); @@ -6057,7 +6057,7 @@ int gk20a_gr_isr(struct gk20a *g) if (exception & gr_exception_gpc_m() && need_reset == 0) { bool post_event = false; - gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "GPC exception pending"); fault_ch = gk20a_fifo_channel_from_chid(g, @@ -6133,7 +6133,7 @@ int gk20a_gr_nonstall_isr(struct gk20a *g) int ops = 0; u32 gr_intr = gk20a_readl(g, gr_intr_nonstall_r()); - gk20a_dbg(gpu_dbg_intr, "pgraph nonstall intr %08x", gr_intr); + nvgpu_log(g, gpu_dbg_intr, "pgraph nonstall intr %08x", gr_intr); if (gr_intr & gr_intr_nonstall_trap_pending_f()) { /* Clear the interrupt */ @@ -6201,7 +6201,7 @@ int gk20a_gr_suspend(struct gk20a *g) { u32 ret = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); ret = g->ops.gr.wait_empty(g, gk20a_get_gr_idle_timeout(g), GR_IDLE_CHECK_DEFAULT); @@ -6227,7 +6227,7 @@ int gk20a_gr_suspend(struct gk20a *g) g->gr.initialized = false; - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return ret; } @@ -6250,7 +6250,7 @@ int gr_gk20a_decode_priv_addr(struct gk20a *g, u32 addr, { u32 gpc_addr; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); /* setup defaults */ *addr_type = CTXSW_ADDR_TYPE_SYS; @@ -6338,7 +6338,7 @@ int gr_gk20a_split_ppc_broadcast_addr(struct gk20a *g, u32 addr, { u32 ppc_num; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); for (ppc_num = 0; ppc_num < g->gr.pe_count_per_gpc; ppc_num++) priv_addr_table[(*t)++] = pri_ppc_addr(g, pri_ppccs_addr_mask(addr), @@ -6369,12 +6369,12 @@ int gr_gk20a_create_priv_addr_table(struct gk20a *g, t = 0; *num_registers = 0; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); err = g->ops.gr.decode_priv_addr(g, addr, &addr_type, &gpc_num, &tpc_num, &ppc_num, &be_num, &broadcast_flags); - gk20a_dbg(gpu_dbg_gpu_dbg, "addr_type = %d", addr_type); + nvgpu_log(g, gpu_dbg_gpu_dbg, "addr_type = %d", addr_type); if (err) return err; @@ -6428,7 +6428,7 @@ int gr_gk20a_create_priv_addr_table(struct gk20a *g, } else if (((addr_type == CTXSW_ADDR_TYPE_EGPC) || (addr_type == CTXSW_ADDR_TYPE_ETPC)) && g->ops.gr.egpc_etpc_priv_addr_table) { - gk20a_dbg(gpu_dbg_gpu_dbg, "addr_type : EGPC/ETPC"); + nvgpu_log(g, gpu_dbg_gpu_dbg, "addr_type : EGPC/ETPC"); g->ops.gr.egpc_etpc_priv_addr_table(g, addr, gpc_num, tpc_num, broadcast_flags, priv_addr_table, &t); } else if (broadcast_flags & PRI_BROADCAST_FLAGS_LTSS) { @@ -6477,11 +6477,11 @@ int gr_gk20a_get_ctx_buffer_offsets(struct gk20a *g, u32 potential_offsets = gr->max_gpc_count * gr->max_tpc_per_gpc_count * sm_per_tpc; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); /* implementation is crossed-up if either of these happen */ if (max_offsets > potential_offsets) { - gk20a_dbg_fn("max_offsets > potential_offsets"); + nvgpu_log_fn(g, "max_offsets > potential_offsets"); return -EINVAL; } @@ -6490,7 +6490,7 @@ int gr_gk20a_get_ctx_buffer_offsets(struct gk20a *g, priv_registers = nvgpu_kzalloc(g, sizeof(u32) * potential_offsets); if (!priv_registers) { - gk20a_dbg_fn("failed alloc for potential_offsets=%d", potential_offsets); + nvgpu_log_fn(g, "failed alloc for potential_offsets=%d", potential_offsets); err = PTR_ERR(priv_registers); goto cleanup; } @@ -6502,7 +6502,7 @@ int gr_gk20a_get_ctx_buffer_offsets(struct gk20a *g, &num_registers); if ((max_offsets > 1) && (num_registers > max_offsets)) { - gk20a_dbg_fn("max_offsets = %d, num_registers = %d", + nvgpu_log_fn(g, "max_offsets = %d, num_registers = %d", max_offsets, num_registers); err = -EINVAL; goto cleanup; @@ -6512,7 +6512,7 @@ int gr_gk20a_get_ctx_buffer_offsets(struct gk20a *g, num_registers = 1; if (!g->gr.ctx_vars.local_golden_image) { - gk20a_dbg_fn("no context switch header info to work with"); + nvgpu_log_fn(g, "no context switch header info to work with"); err = -EINVAL; goto cleanup; } @@ -6525,7 +6525,7 @@ int gr_gk20a_get_ctx_buffer_offsets(struct gk20a *g, g->gr.ctx_vars.golden_image_size, &priv_offset); if (err) { - gk20a_dbg_fn("Could not determine priv_offset for addr:0x%x", + nvgpu_log_fn(g, "Could not determine priv_offset for addr:0x%x", addr); /*, grPriRegStr(addr)));*/ goto cleanup; } @@ -6558,7 +6558,7 @@ int gr_gk20a_get_pm_ctx_buffer_offsets(struct gk20a *g, u32 potential_offsets = gr->max_gpc_count * gr->max_tpc_per_gpc_count * sm_per_tpc; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); /* implementation is crossed-up if either of these happen */ if (max_offsets > potential_offsets) @@ -6569,7 +6569,7 @@ int gr_gk20a_get_pm_ctx_buffer_offsets(struct gk20a *g, priv_registers = nvgpu_kzalloc(g, sizeof(u32) * potential_offsets); if (!priv_registers) { - gk20a_dbg_fn("failed alloc for potential_offsets=%d", potential_offsets); + nvgpu_log_fn(g, "failed alloc for potential_offsets=%d", potential_offsets); return -ENOMEM; } memset(offsets, 0, sizeof(u32) * max_offsets); @@ -6588,7 +6588,7 @@ int gr_gk20a_get_pm_ctx_buffer_offsets(struct gk20a *g, num_registers = 1; if (!g->gr.ctx_vars.local_golden_image) { - gk20a_dbg_fn("no context switch header info to work with"); + nvgpu_log_fn(g, "no context switch header info to work with"); err = -EINVAL; goto cleanup; } @@ -6598,7 +6598,7 @@ int gr_gk20a_get_pm_ctx_buffer_offsets(struct gk20a *g, priv_registers[i], &priv_offset); if (err) { - gk20a_dbg_fn("Could not determine priv_offset for addr:0x%x", + nvgpu_log_fn(g, "Could not determine priv_offset for addr:0x%x", addr); /*, grPriRegStr(addr)));*/ goto cleanup; } @@ -6684,7 +6684,7 @@ static int gr_gk20a_ctx_patch_smpc(struct gk20a *g, g->ops.gr.init_sm_dsm_reg_info(); g->ops.gr.get_ovr_perf_regs(g, &num_ovr_perf_regs, &ovr_perf_regs); - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); for (reg = 0; reg < num_ovr_perf_regs; reg++) { for (gpc = 0; gpc < num_gpc; gpc++) { @@ -6754,13 +6754,11 @@ static int gr_gk20a_ctx_patch_smpc(struct gk20a *g, static inline bool check_main_image_header_magic(u8 *context) { u32 magic = *(u32 *)(context + ctxsw_prog_main_image_magic_value_o()); - gk20a_dbg(gpu_dbg_gpu_dbg, "main image magic=0x%x", magic); return magic == ctxsw_prog_main_image_magic_value_v_value_v(); } static inline bool check_local_header_magic(u8 *context) { u32 magic = *(u32 *)(context + ctxsw_prog_local_magic_value_o()); - gk20a_dbg(gpu_dbg_gpu_dbg, "local magic=0x%x", magic); return magic == ctxsw_prog_local_magic_value_v_value_v(); } @@ -6823,14 +6821,14 @@ static int gr_gk20a_find_priv_offset_in_ext_buffer(struct gk20a *g, else return -EINVAL; - gk20a_dbg_info(" gpc = %d tpc = %d", + nvgpu_log_info(g, " gpc = %d tpc = %d", gpc_num, tpc_num); } else if ((g->ops.gr.is_etpc_addr) && g->ops.gr.is_etpc_addr(g, addr)) { g->ops.gr.get_egpc_etpc_num(g, addr, &gpc_num, &tpc_num); gpc_base = g->ops.gr.get_egpc_base(g); } else { - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "does not exist in extended region"); return -EINVAL; } @@ -6857,7 +6855,7 @@ static int gr_gk20a_find_priv_offset_in_ext_buffer(struct gk20a *g, data32 = *(u32 *)(context + ctxsw_prog_main_extended_buffer_ctl_o()); ext_priv_size = ctxsw_prog_main_extended_buffer_ctl_size_v(data32); if (0 == ext_priv_size) { - gk20a_dbg_info(" No extended memory in context buffer"); + nvgpu_log_info(g, " No extended memory in context buffer"); return -EINVAL; } ext_priv_offset = ctxsw_prog_main_extended_buffer_ctl_offset_v(data32); @@ -6891,7 +6889,7 @@ static int gr_gk20a_find_priv_offset_in_ext_buffer(struct gk20a *g, if ((addr & tpc_gpc_mask) == (sm_dsm_perf_regs[i] & tpc_gpc_mask)) { sm_dsm_perf_reg_id = i; - gk20a_dbg_info("register match: 0x%08x", + nvgpu_log_info(g, "register match: 0x%08x", sm_dsm_perf_regs[i]); chk_addr = (gpc_base + gpc_stride * gpc_num) + @@ -6921,7 +6919,7 @@ static int gr_gk20a_find_priv_offset_in_ext_buffer(struct gk20a *g, (sm_dsm_perf_ctrl_regs[i] & tpc_gpc_mask)) { sm_dsm_perf_ctrl_reg_id = i; - gk20a_dbg_info("register match: 0x%08x", + nvgpu_log_info(g, "register match: 0x%08x", sm_dsm_perf_ctrl_regs[i]); chk_addr = (gpc_base + gpc_stride * gpc_num) + @@ -7032,7 +7030,7 @@ gr_gk20a_process_context_buffer_priv_segment(struct gk20a *g, u32 tpc_in_gpc_base = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_BASE); u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_STRIDE); - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "pri_addr=0x%x", pri_addr); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "pri_addr=0x%x", pri_addr); if (!g->gr.ctx_vars.valid) return -EINVAL; @@ -7215,12 +7213,12 @@ static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g, u8 *context; u32 offset_to_segment; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); err = g->ops.gr.decode_priv_addr(g, addr, &addr_type, &gpc_num, &tpc_num, &ppc_num, &be_num, &broadcast_flags); - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr_type = %d, broadcast_flags: %08x", addr_type, broadcast_flags); if (err) @@ -7243,7 +7241,7 @@ static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g, } data32 = *(u32 *)(context + ctxsw_prog_local_priv_register_ctl_o()); sys_priv_offset = ctxsw_prog_local_priv_register_ctl_offset_v(data32); - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "sys_priv_offset=0x%x", sys_priv_offset); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "sys_priv_offset=0x%x", sys_priv_offset); /* If found in Ext buffer, ok. * If it failed and we expected to find it there (quad offset) @@ -7253,7 +7251,7 @@ static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g, addr, is_quad, quad, context_buffer, context_buffer_size, priv_offset); if (!err || (err && is_quad)) { - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "err = %d, is_quad = %s", err, is_quad ? "true" : false); return err; @@ -7357,7 +7355,7 @@ static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g, num_tpcs) << 2); } } else { - gk20a_dbg_fn("Unknown address type."); + nvgpu_log_fn(g, "Unknown address type."); return -EINVAL; } err = gr_gk20a_process_context_buffer_priv_segment(g, @@ -7668,7 +7666,7 @@ static int gr_gk20a_create_hwpm_ctxsw_buffer_offset_map(struct gk20a *g) u32 num_ltc = g->ops.gr.get_max_ltc_per_fbp(g) * g->gr.num_fbps; if (hwpm_ctxsw_buffer_size == 0) { - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "no PM Ctxsw buffer memory in context buffer"); return -EINVAL; } @@ -7760,10 +7758,10 @@ static int gr_gk20a_create_hwpm_ctxsw_buffer_offset_map(struct gk20a *g) g->gr.ctx_vars.hwpm_ctxsw_buffer_offset_map = map; g->gr.ctx_vars.hwpm_ctxsw_buffer_offset_map_count = count; - gk20a_dbg_info("Reg Addr => HWPM Ctxt switch buffer offset"); + nvgpu_log_info(g, "Reg Addr => HWPM Ctxt switch buffer offset"); for (i = 0; i < count; i++) - gk20a_dbg_info("%08x => %08x", map[i].addr, map[i].offset); + nvgpu_log_info(g, "%08x => %08x", map[i].addr, map[i].offset); return 0; cleanup: @@ -7785,7 +7783,7 @@ static int gr_gk20a_find_priv_offset_in_pm_buffer(struct gk20a *g, u32 count; struct ctxsw_buf_offset_map_entry *map, *result, map_key; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); /* Create map of pri address and pm offset if necessary */ if (gr->ctx_vars.hwpm_ctxsw_buffer_offset_map == NULL) { @@ -7831,7 +7829,7 @@ bool gk20a_is_channel_ctx_resident(struct channel_gk20a *ch) curr_ch = gk20a_gr_get_channel_from_ctx(g, curr_gr_ctx, &curr_gr_tsgid); - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "curr_gr_chid=%d curr_tsgid=%d, ch->tsgid=%d" " ch->chid=%d", curr_ch ? curr_ch->chid : -1, @@ -7873,7 +7871,7 @@ int __gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch, u32 ctx_op_nr, num_ctx_ops[2] = {num_ctx_wr_ops, num_ctx_rd_ops}; int err = 0, pass; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "wr_ops=%d rd_ops=%d", + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "wr_ops=%d rd_ops=%d", num_ctx_wr_ops, num_ctx_rd_ops); tsg = tsg_gk20a_from_ch(ch); @@ -7906,7 +7904,7 @@ int __gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch, v |= ctx_ops[i].value_lo; gk20a_writel(g, offset, v); - gk20a_dbg(gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_gpu_dbg, "direct wr: offset=0x%x v=0x%x", offset, v); @@ -7916,7 +7914,7 @@ int __gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch, v |= ctx_ops[i].value_hi; gk20a_writel(g, offset + 4, v); - gk20a_dbg(gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_gpu_dbg, "direct wr: offset=0x%x v=0x%x", offset + 4, v); } @@ -7925,7 +7923,7 @@ int __gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch, ctx_ops[i].value_lo = gk20a_readl(g, offset); - gk20a_dbg(gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_gpu_dbg, "direct rd: offset=0x%x v=0x%x", offset, ctx_ops[i].value_lo); @@ -7933,7 +7931,7 @@ int __gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch, ctx_ops[i].value_hi = gk20a_readl(g, offset + 4); - gk20a_dbg(gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_gpu_dbg, "direct rd: offset=0x%x v=0x%x", offset, ctx_ops[i].value_lo); } else @@ -8001,7 +7999,7 @@ int __gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch, offsets, offset_addrs, &num_offsets); if (err) { - gk20a_dbg(gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_gpu_dbg, "ctx op invalid offset: offset=0x%x", ctx_ops[i].offset); ctx_ops[i].status = @@ -8044,7 +8042,7 @@ int __gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch, v |= ctx_ops[i].value_lo; nvgpu_mem_wr(g, current_mem, offsets[j], v); - gk20a_dbg(gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_gpu_dbg, "context wr: offset=0x%x v=0x%x", offsets[j], v); @@ -8054,7 +8052,7 @@ int __gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch, v |= ctx_ops[i].value_hi; nvgpu_mem_wr(g, current_mem, offsets[j] + 4, v); - gk20a_dbg(gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_gpu_dbg, "context wr: offset=0x%x v=0x%x", offsets[j] + 4, v); } @@ -8068,14 +8066,14 @@ int __gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch, ctx_ops[i].value_lo = nvgpu_mem_rd(g, current_mem, offsets[0]); - gk20a_dbg(gpu_dbg_gpu_dbg, "context rd: offset=0x%x v=0x%x", + nvgpu_log(g, gpu_dbg_gpu_dbg, "context rd: offset=0x%x v=0x%x", offsets[0], ctx_ops[i].value_lo); if (ctx_ops[i].op == REGOP(READ_64)) { ctx_ops[i].value_hi = nvgpu_mem_rd(g, current_mem, offsets[0] + 4); - gk20a_dbg(gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_gpu_dbg, "context rd: offset=0x%x v=0x%x", offsets[0] + 4, ctx_ops[i].value_hi); } else @@ -8121,7 +8119,7 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch, ch_is_curr_ctx = gk20a_is_channel_ctx_resident(ch); - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "is curr ctx=%d", + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "is curr ctx=%d", ch_is_curr_ctx); err = __gr_gk20a_exec_ctx_ops(ch, ctx_ops, num_ops, num_ctx_wr_ops, @@ -8176,7 +8174,7 @@ int gk20a_gr_wait_for_sm_lock_down(struct gk20a *g, u32 gpc, u32 tpc, u32 sm, struct nvgpu_timeout timeout; u32 warp_esr; - gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "GPC%d TPC%d SM%d: locking down SM", gpc, tpc, sm); nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g), @@ -8201,7 +8199,7 @@ int gk20a_gr_wait_for_sm_lock_down(struct gk20a *g, u32 gpc, u32 tpc, u32 sm, ((global_esr & ~global_esr_mask) == 0); if (locked_down || no_error_pending) { - gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "GPC%d TPC%d SM%d: locked down SM", gpc, tpc, sm); return 0; diff --git a/drivers/gpu/nvgpu/gk20a/hal.c b/drivers/gpu/nvgpu/gk20a/hal.c index 939567e7..1787f573 100644 --- a/drivers/gpu/nvgpu/gk20a/hal.c +++ b/drivers/gpu/nvgpu/gk20a/hal.c @@ -41,7 +41,7 @@ int gpu_init_hal(struct gk20a *g) switch (ver) { case GK20A_GPUID_GM20B: case GK20A_GPUID_GM20B_B: - gk20a_dbg_info("gm20b detected"); + nvgpu_log_info(g, "gm20b detected"); if (gm20b_init_hal(g)) return -ENODEV; break; diff --git a/drivers/gpu/nvgpu/gk20a/mc_gk20a.c b/drivers/gpu/nvgpu/gk20a/mc_gk20a.c index 7fed410e..9473ad4f 100644 --- a/drivers/gpu/nvgpu/gk20a/mc_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/mc_gk20a.c @@ -1,7 +1,7 @@ /* * GK20A Master Control * - * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -40,7 +40,7 @@ void mc_gk20a_isr_stall(struct gk20a *g) mc_intr_0 = g->ops.mc.intr_stall(g); - gk20a_dbg(gpu_dbg_intr, "stall intr %08x\n", mc_intr_0); + nvgpu_log(g, gpu_dbg_intr, "stall intr %08x\n", mc_intr_0); for (engine_id_idx = 0; engine_id_idx < g->fifo.num_engines; engine_id_idx++) { active_engine_id = g->fifo.active_engines_list[engine_id_idx]; @@ -200,7 +200,7 @@ void gk20a_mc_disable(struct gk20a *g, u32 units) { u32 pmc; - gk20a_dbg(gpu_dbg_info, "pmc disable: %08x\n", units); + nvgpu_log(g, gpu_dbg_info, "pmc disable: %08x\n", units); nvgpu_spinlock_acquire(&g->mc_enable_lock); pmc = gk20a_readl(g, mc_enable_r()); @@ -213,7 +213,7 @@ void gk20a_mc_enable(struct gk20a *g, u32 units) { u32 pmc; - gk20a_dbg(gpu_dbg_info, "pmc enable: %08x\n", units); + nvgpu_log(g, gpu_dbg_info, "pmc enable: %08x\n", units); nvgpu_spinlock_acquire(&g->mc_enable_lock); pmc = gk20a_readl(g, mc_enable_r()); diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c index 14876296..dfdcc3a4 100644 --- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c @@ -91,7 +91,7 @@ int gk20a_init_mm_setup_hw(struct gk20a *g) struct mm_gk20a *mm = &g->mm; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); g->ops.fb.set_mmu_page_size(g); if (g->ops.fb.set_use_full_comp_tag_line) @@ -112,7 +112,7 @@ int gk20a_init_mm_setup_hw(struct gk20a *g) if (gk20a_mm_fb_flush(g) || gk20a_mm_fb_flush(g)) return -EBUSY; - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } @@ -336,7 +336,7 @@ int gk20a_vm_bind_channel(struct vm_gk20a *vm, struct channel_gk20a *ch) { int err = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(ch->g, " "); nvgpu_vm_get(vm); ch->vm = vm; @@ -357,7 +357,7 @@ void gk20a_mm_init_pdb(struct gk20a *g, struct nvgpu_mem *inst_block, u32 pdb_addr_lo = u64_lo32(pdb_addr >> ram_in_base_shift_v()); u32 pdb_addr_hi = u64_hi32(pdb_addr); - gk20a_dbg_info("pde pa=0x%llx", pdb_addr); + nvgpu_log_info(g, "pde pa=0x%llx", pdb_addr); nvgpu_mem_wr32(g, inst_block, ram_in_page_dir_base_lo_w(), nvgpu_aperture_mask(g, vm->pdb.mem, @@ -376,7 +376,7 @@ void gk20a_init_inst_block(struct nvgpu_mem *inst_block, struct vm_gk20a *vm, { struct gk20a *g = gk20a_from_vm(vm); - gk20a_dbg_info("inst block phys = 0x%llx, kv = 0x%p", + nvgpu_log_info(g, "inst block phys = 0x%llx, kv = 0x%p", nvgpu_inst_block_addr(g, inst_block), inst_block->cpu_va); g->ops.mm.init_pdb(g, inst_block, vm); @@ -395,7 +395,7 @@ int gk20a_alloc_inst_block(struct gk20a *g, struct nvgpu_mem *inst_block) { int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); err = nvgpu_dma_alloc(g, ram_in_alloc_size_v(), inst_block); if (err) { @@ -403,7 +403,7 @@ int gk20a_alloc_inst_block(struct gk20a *g, struct nvgpu_mem *inst_block) return err; } - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } @@ -415,7 +415,7 @@ int gk20a_mm_fb_flush(struct gk20a *g) struct nvgpu_timeout timeout; u32 retries; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); gk20a_busy_noresume(g); if (!g->power_on) { @@ -448,7 +448,7 @@ int gk20a_mm_fb_flush(struct gk20a *g) flush_fb_flush_outstanding_true_v() || flush_fb_flush_pending_v(data) == flush_fb_flush_pending_busy_v()) { - gk20a_dbg_info("fb_flush 0x%x", data); + nvgpu_log_info(g, "fb_flush 0x%x", data); nvgpu_udelay(5); } else break; @@ -494,7 +494,7 @@ static void gk20a_mm_l2_invalidate_locked(struct gk20a *g) flush_l2_system_invalidate_outstanding_true_v() || flush_l2_system_invalidate_pending_v(data) == flush_l2_system_invalidate_pending_busy_v()) { - gk20a_dbg_info("l2_system_invalidate 0x%x", + nvgpu_log_info(g, "l2_system_invalidate 0x%x", data); nvgpu_udelay(5); } else @@ -526,7 +526,7 @@ void gk20a_mm_l2_flush(struct gk20a *g, bool invalidate) struct nvgpu_timeout timeout; u32 retries = 2000; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); gk20a_busy_noresume(g); if (!g->power_on) @@ -553,7 +553,7 @@ void gk20a_mm_l2_flush(struct gk20a *g, bool invalidate) flush_l2_flush_dirty_outstanding_true_v() || flush_l2_flush_dirty_pending_v(data) == flush_l2_flush_dirty_pending_busy_v()) { - gk20a_dbg_info("l2_flush_dirty 0x%x", data); + nvgpu_log_info(g, "l2_flush_dirty 0x%x", data); nvgpu_udelay(5); } else break; @@ -578,7 +578,7 @@ void gk20a_mm_cbc_clean(struct gk20a *g) struct nvgpu_timeout timeout; u32 retries = 200; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); gk20a_busy_noresume(g); if (!g->power_on) @@ -602,7 +602,7 @@ void gk20a_mm_cbc_clean(struct gk20a *g) flush_l2_clean_comptags_outstanding_true_v() || flush_l2_clean_comptags_pending_v(data) == flush_l2_clean_comptags_pending_busy_v()) { - gk20a_dbg_info("l2_clean_comptags 0x%x", data); + nvgpu_log_info(g, "l2_clean_comptags 0x%x", data); nvgpu_udelay(5); } else break; diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c index 0531b387..400a49a3 100644 --- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c @@ -39,8 +39,8 @@ #include #include -#define gk20a_dbg_pmu(fmt, arg...) \ - gk20a_dbg(gpu_dbg_pmu, fmt, ##arg) +#define gk20a_dbg_pmu(g, fmt, arg...) \ + nvgpu_log(g, gpu_dbg_pmu, fmt, ##arg) bool nvgpu_find_hex_in_string(char *strings, struct gk20a *g, u32 *hex_pos) { @@ -139,7 +139,7 @@ void pmu_enable_irq(struct nvgpu_pmu *pmu, bool enable) u32 intr_mask; u32 intr_dest; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); g->ops.mc.intr_unit_config(g, MC_INTR_UNIT_DISABLE, true, mc_intr_mask_0_pmu_enabled_f()); @@ -166,7 +166,7 @@ void pmu_enable_irq(struct nvgpu_pmu *pmu, bool enable) mc_intr_mask_0_pmu_enabled_f()); } - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); } @@ -179,7 +179,7 @@ int pmu_bootstrap(struct nvgpu_pmu *pmu) u64 addr_code, addr_data, addr_load; u32 i, blocks, addr_args; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); gk20a_writel(g, pwr_falcon_itfen_r(), gk20a_readl(g, pwr_falcon_itfen_r()) | @@ -286,7 +286,7 @@ int gk20a_pmu_mutex_acquire(struct nvgpu_pmu *pmu, u32 id, u32 *token) if (*token != PMU_INVALID_MUTEX_OWNER_ID && *token == owner) { BUG_ON(mutex->ref_cnt == 0); - gk20a_dbg_pmu("already acquired by owner : 0x%08x", *token); + gk20a_dbg_pmu(g, "already acquired by owner : 0x%08x", *token); mutex->ref_cnt++; return 0; } @@ -313,12 +313,12 @@ int gk20a_pmu_mutex_acquire(struct nvgpu_pmu *pmu, u32 id, u32 *token) if (owner == data) { mutex->ref_cnt = 1; - gk20a_dbg_pmu("mutex acquired: id=%d, token=0x%x", + gk20a_dbg_pmu(g, "mutex acquired: id=%d, token=0x%x", mutex->index, *token); *token = owner; return 0; } else { - gk20a_dbg_info("fail to acquire mutex idx=0x%08x", + nvgpu_log_info(g, "fail to acquire mutex idx=0x%08x", mutex->index); data = gk20a_readl(g, pwr_pmu_mutex_id_release_r()); @@ -370,7 +370,7 @@ int gk20a_pmu_mutex_release(struct nvgpu_pmu *pmu, u32 id, u32 *token) pwr_pmu_mutex_id_release_value_f(owner)); gk20a_writel(g, pwr_pmu_mutex_id_release_r(), data); - gk20a_dbg_pmu("mutex released: id=%d, token=0x%x", + gk20a_dbg_pmu(g, "mutex released: id=%d, token=0x%x", mutex->index, *token); return 0; @@ -475,7 +475,7 @@ int gk20a_init_pmu_setup_hw1(struct gk20a *g) struct nvgpu_pmu *pmu = &g->pmu; int err = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); nvgpu_mutex_acquire(&pmu->isr_mutex); nvgpu_flcn_reset(pmu->flcn); @@ -554,7 +554,7 @@ static void pmu_handle_zbc_msg(struct gk20a *g, struct pmu_msg *msg, void *param, u32 handle, u32 status) { struct nvgpu_pmu *pmu = param; - gk20a_dbg_pmu("reply ZBC_TABLE_UPDATE"); + gk20a_dbg_pmu(g, "reply ZBC_TABLE_UPDATE"); pmu->zbc_save_done = 1; } @@ -575,7 +575,7 @@ void gk20a_pmu_save_zbc(struct gk20a *g, u32 entries) pmu->zbc_save_done = 0; - gk20a_dbg_pmu("cmd post ZBC_TABLE_UPDATE"); + gk20a_dbg_pmu(g, "cmd post ZBC_TABLE_UPDATE"); nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, pmu_handle_zbc_msg, pmu, &seq, ~0); pmu_wait_message_cond(pmu, gk20a_get_gr_idle_timeout(g), @@ -587,18 +587,20 @@ void gk20a_pmu_save_zbc(struct gk20a *g, u32 entries) int nvgpu_pmu_handle_therm_event(struct nvgpu_pmu *pmu, struct nv_pmu_therm_msg *msg) { - gk20a_dbg_fn(""); + struct gk20a *g = gk20a_from_pmu(pmu); + + nvgpu_log_fn(g, " "); switch (msg->msg_type) { case NV_PMU_THERM_MSG_ID_EVENT_HW_SLOWDOWN_NOTIFICATION: if (msg->hw_slct_msg.mask == BIT(NV_PMU_THERM_EVENT_THERMAL_1)) nvgpu_clk_arb_send_thermal_alarm(pmu->g); else - gk20a_dbg_pmu("Unwanted/Unregistered thermal event received %d", + gk20a_dbg_pmu(g, "Unwanted/Unregistered thermal event received %d", msg->hw_slct_msg.mask); break; default: - gk20a_dbg_pmu("unkown therm event received %d", msg->msg_type); + gk20a_dbg_pmu(g, "unkown therm event received %d", msg->msg_type); break; } @@ -609,22 +611,22 @@ void gk20a_pmu_dump_elpg_stats(struct nvgpu_pmu *pmu) { struct gk20a *g = gk20a_from_pmu(pmu); - gk20a_dbg_pmu("pwr_pmu_idle_mask_supp_r(3): 0x%08x", + gk20a_dbg_pmu(g, "pwr_pmu_idle_mask_supp_r(3): 0x%08x", gk20a_readl(g, pwr_pmu_idle_mask_supp_r(3))); - gk20a_dbg_pmu("pwr_pmu_idle_mask_1_supp_r(3): 0x%08x", + gk20a_dbg_pmu(g, "pwr_pmu_idle_mask_1_supp_r(3): 0x%08x", gk20a_readl(g, pwr_pmu_idle_mask_1_supp_r(3))); - gk20a_dbg_pmu("pwr_pmu_idle_ctrl_supp_r(3): 0x%08x", + gk20a_dbg_pmu(g, "pwr_pmu_idle_ctrl_supp_r(3): 0x%08x", gk20a_readl(g, pwr_pmu_idle_ctrl_supp_r(3))); - gk20a_dbg_pmu("pwr_pmu_pg_idle_cnt_r(0): 0x%08x", + gk20a_dbg_pmu(g, "pwr_pmu_pg_idle_cnt_r(0): 0x%08x", gk20a_readl(g, pwr_pmu_pg_idle_cnt_r(0))); - gk20a_dbg_pmu("pwr_pmu_pg_intren_r(0): 0x%08x", + gk20a_dbg_pmu(g, "pwr_pmu_pg_intren_r(0): 0x%08x", gk20a_readl(g, pwr_pmu_pg_intren_r(0))); - gk20a_dbg_pmu("pwr_pmu_idle_count_r(3): 0x%08x", + gk20a_dbg_pmu(g, "pwr_pmu_idle_count_r(3): 0x%08x", gk20a_readl(g, pwr_pmu_idle_count_r(3))); - gk20a_dbg_pmu("pwr_pmu_idle_count_r(4): 0x%08x", + gk20a_dbg_pmu(g, "pwr_pmu_idle_count_r(4): 0x%08x", gk20a_readl(g, pwr_pmu_idle_count_r(4))); - gk20a_dbg_pmu("pwr_pmu_idle_count_r(7): 0x%08x", + gk20a_dbg_pmu(g, "pwr_pmu_idle_count_r(7): 0x%08x", gk20a_readl(g, pwr_pmu_idle_count_r(7))); } @@ -693,7 +695,7 @@ void gk20a_pmu_isr(struct gk20a *g) u32 intr, mask; bool recheck = false; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); nvgpu_mutex_acquire(&pmu->isr_mutex); if (!pmu->isr_enabled) { @@ -706,7 +708,7 @@ void gk20a_pmu_isr(struct gk20a *g) intr = gk20a_readl(g, pwr_falcon_irqstat_r()); - gk20a_dbg_pmu("received falcon interrupt: 0x%08x", intr); + gk20a_dbg_pmu(g, "received falcon interrupt: 0x%08x", intr); intr = gk20a_readl(g, pwr_falcon_irqstat_r()) & mask; if (!intr || pmu->pmu_state == PMU_STATE_OFF) { diff --git a/drivers/gpu/nvgpu/gk20a/pramin_gk20a.c b/drivers/gpu/nvgpu/gk20a/pramin_gk20a.c index a76e2580..8dde61a2 100644 --- a/drivers/gpu/nvgpu/gk20a/pramin_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/pramin_gk20a.c @@ -45,7 +45,7 @@ u32 gk20a_pramin_enter(struct gk20a *g, struct nvgpu_mem *mem, bus_bar0_window_target_vid_mem_f()) | bus_bar0_window_base_f(hi); - gk20a_dbg(gpu_dbg_mem, + nvgpu_log(g, gpu_dbg_mem, "0x%08x:%08x begin for %p,%p at [%llx,%llx] (sz %llx)", hi, lo, mem, sgl, bufbase, bufbase + nvgpu_sgt_get_phys(g, sgt, sgl), @@ -67,7 +67,7 @@ u32 gk20a_pramin_enter(struct gk20a *g, struct nvgpu_mem *mem, void gk20a_pramin_exit(struct gk20a *g, struct nvgpu_mem *mem, struct nvgpu_sgl *sgl) { - gk20a_dbg(gpu_dbg_mem, "end for %p,%p", mem, sgl); + nvgpu_log(g, gpu_dbg_mem, "end for %p,%p", mem, sgl); nvgpu_spinlock_release(&g->mm.pramin_window_lock); } diff --git a/drivers/gpu/nvgpu/gk20a/priv_ring_gk20a.c b/drivers/gpu/nvgpu/gk20a/priv_ring_gk20a.c index ed5327cb..dea42b55 100644 --- a/drivers/gpu/nvgpu/gk20a/priv_ring_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/priv_ring_gk20a.c @@ -1,7 +1,7 @@ /* * GK20A priv ring * - * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -66,11 +66,11 @@ void gk20a_priv_ring_isr(struct gk20a *g) status0 = gk20a_readl(g, pri_ringmaster_intr_status0_r()); status1 = gk20a_readl(g, pri_ringmaster_intr_status1_r()); - gk20a_dbg(gpu_dbg_intr, "ringmaster intr status0: 0x%08x," + nvgpu_log(g, gpu_dbg_intr, "ringmaster intr status0: 0x%08x," "status1: 0x%08x", status0, status1); if (pri_ringmaster_intr_status0_gbl_write_error_sys_v(status0) != 0) { - gk20a_dbg(gpu_dbg_intr, "SYS write error. ADR %08x WRDAT %08x INFO %08x, CODE %08x", + nvgpu_log(g, gpu_dbg_intr, "SYS write error. ADR %08x WRDAT %08x INFO %08x, CODE %08x", gk20a_readl(g, pri_ringstation_sys_priv_error_adr_r()), gk20a_readl(g, pri_ringstation_sys_priv_error_wrdat_r()), gk20a_readl(g, pri_ringstation_sys_priv_error_info_r()), @@ -79,7 +79,7 @@ void gk20a_priv_ring_isr(struct gk20a *g) for (gpc = 0; gpc < g->gr.gpc_count; gpc++) { if (status1 & BIT(gpc)) { - gk20a_dbg(gpu_dbg_intr, "GPC%u write error. ADR %08x WRDAT %08x INFO %08x, CODE %08x", gpc, + nvgpu_log(g, gpu_dbg_intr, "GPC%u write error. ADR %08x WRDAT %08x INFO %08x, CODE %08x", gpc, gk20a_readl(g, pri_ringstation_gpc_gpc0_priv_error_adr_r() + gpc * gpc_priv_stride), gk20a_readl(g, pri_ringstation_gpc_gpc0_priv_error_wrdat_r() + gpc * gpc_priv_stride), gk20a_readl(g, pri_ringstation_gpc_gpc0_priv_error_info_r() + gpc * gpc_priv_stride), diff --git a/drivers/gpu/nvgpu/gk20a/regops_gk20a.c b/drivers/gpu/nvgpu/gk20a/regops_gk20a.c index 60162f9d..5b9f973b 100644 --- a/drivers/gpu/nvgpu/gk20a/regops_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/regops_gk20a.c @@ -1,7 +1,7 @@ /* * Tegra GK20A GPU Debugger Driver Register Ops * - * Copyright (c) 2013-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2013-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -72,7 +72,7 @@ int exec_regops_gk20a(struct dbg_session_gk20a *dbg_s, bool skip_read_lo, skip_read_hi; bool ok; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " "); ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); @@ -108,7 +108,7 @@ int exec_regops_gk20a(struct dbg_session_gk20a *dbg_s, case REGOP(READ_32): ops[i].value_hi = 0; ops[i].value_lo = gk20a_readl(g, ops[i].offset); - gk20a_dbg(gpu_dbg_gpu_dbg, "read_32 0x%08x from 0x%08x", + nvgpu_log(g, gpu_dbg_gpu_dbg, "read_32 0x%08x from 0x%08x", ops[i].value_lo, ops[i].offset); break; @@ -118,7 +118,7 @@ int exec_regops_gk20a(struct dbg_session_gk20a *dbg_s, ops[i].value_hi = gk20a_readl(g, ops[i].offset + 4); - gk20a_dbg(gpu_dbg_gpu_dbg, "read_64 0x%08x:%08x from 0x%08x", + nvgpu_log(g, gpu_dbg_gpu_dbg, "read_64 0x%08x:%08x from 0x%08x", ops[i].value_hi, ops[i].value_lo, ops[i].offset); break; @@ -157,12 +157,12 @@ int exec_regops_gk20a(struct dbg_session_gk20a *dbg_s, /* now update first 32bits */ gk20a_writel(g, ops[i].offset, data32_lo); - gk20a_dbg(gpu_dbg_gpu_dbg, "Wrote 0x%08x to 0x%08x ", + nvgpu_log(g, gpu_dbg_gpu_dbg, "Wrote 0x%08x to 0x%08x ", data32_lo, ops[i].offset); /* if desired, update second 32bits */ if (ops[i].op == REGOP(WRITE_64)) { gk20a_writel(g, ops[i].offset + 4, data32_hi); - gk20a_dbg(gpu_dbg_gpu_dbg, "Wrote 0x%08x to 0x%08x ", + nvgpu_log(g, gpu_dbg_gpu_dbg, "Wrote 0x%08x to 0x%08x ", data32_hi, ops[i].offset + 4); } @@ -189,7 +189,7 @@ int exec_regops_gk20a(struct dbg_session_gk20a *dbg_s, } clean_up: - gk20a_dbg(gpu_dbg_gpu_dbg, "ret=%d", err); + nvgpu_log(g, gpu_dbg_gpu_dbg, "ret=%d", err); return err; } @@ -395,7 +395,7 @@ static bool validate_reg_ops(struct dbg_session_gk20a *dbg_s, } } - gk20a_dbg(gpu_dbg_gpu_dbg, "ctx_wrs:%d ctx_rds:%d", + nvgpu_log(g, gpu_dbg_gpu_dbg, "ctx_wrs:%d ctx_rds:%d", *ctx_wr_count, *ctx_rd_count); return ok; diff --git a/drivers/gpu/nvgpu/gk20a/therm_gk20a.c b/drivers/gpu/nvgpu/gk20a/therm_gk20a.c index de5d0f78..b08f3e0a 100644 --- a/drivers/gpu/nvgpu/gk20a/therm_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/therm_gk20a.c @@ -1,7 +1,7 @@ /* * GK20A Therm * - * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -43,7 +43,7 @@ int gk20a_init_therm_support(struct gk20a *g) { u32 err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); err = gk20a_init_therm_reset_enable_hw(g); if (err) @@ -73,7 +73,7 @@ int gk20a_elcg_init_idle_filters(struct gk20a *g) u32 active_engine_id = 0; struct fifo_gk20a *f = &g->fifo; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); for (engine_id = 0; engine_id < f->num_engines; engine_id++) { active_engine_id = f->active_engines_list[engine_id]; @@ -104,6 +104,6 @@ int gk20a_elcg_init_idle_filters(struct gk20a *g) idle_filter &= ~therm_hubmmu_idle_filter_value_m(); gk20a_writel(g, therm_hubmmu_idle_filter_r(), idle_filter); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } diff --git a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c index 05b8fc61..62763da3 100644 --- a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c @@ -107,7 +107,9 @@ static bool gk20a_is_channel_active(struct gk20a *g, struct channel_gk20a *ch) int gk20a_tsg_bind_channel(struct tsg_gk20a *tsg, struct channel_gk20a *ch) { - gk20a_dbg_fn(""); + struct gk20a *g = ch->g; + + nvgpu_log_fn(g, " "); /* check if channel is already bound to some TSG */ if (gk20a_is_channel_marked_as_tsg(ch)) { @@ -137,10 +139,10 @@ int gk20a_tsg_bind_channel(struct tsg_gk20a *tsg, nvgpu_ref_get(&tsg->refcount); - gk20a_dbg(gpu_dbg_fn, "BIND tsg:%d channel:%d\n", + nvgpu_log(g, gpu_dbg_fn, "BIND tsg:%d channel:%d\n", tsg->tsgid, ch->chid); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } @@ -167,7 +169,7 @@ int gk20a_tsg_unbind_channel(struct channel_gk20a *ch) nvgpu_ref_put(&tsg->refcount, gk20a_tsg_release); ch->tsgid = NVGPU_INVALID_TSG_ID; - gk20a_dbg(gpu_dbg_fn, "UNBIND tsg:%d channel:%d\n", + nvgpu_log(g, gpu_dbg_fn, "UNBIND tsg:%d channel:%d\n", tsg->tsgid, ch->chid); return 0; @@ -204,7 +206,7 @@ int gk20a_tsg_set_runlist_interleave(struct tsg_gk20a *tsg, u32 level) struct gk20a *g = tsg->g; int ret; - gk20a_dbg(gpu_dbg_sched, "tsgid=%u interleave=%u", tsg->tsgid, level); + nvgpu_log(g, gpu_dbg_sched, "tsgid=%u interleave=%u", tsg->tsgid, level); switch (level) { case NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_LOW: @@ -227,7 +229,7 @@ int gk20a_tsg_set_timeslice(struct tsg_gk20a *tsg, u32 timeslice) { struct gk20a *g = tsg->g; - gk20a_dbg(gpu_dbg_sched, "tsgid=%u timeslice=%u us", tsg->tsgid, timeslice); + nvgpu_log(g, gpu_dbg_sched, "tsgid=%u timeslice=%u us", tsg->tsgid, timeslice); return g->ops.fifo.tsg_set_timeslice(tsg, timeslice); } @@ -300,7 +302,7 @@ struct tsg_gk20a *gk20a_tsg_open(struct gk20a *g, pid_t pid) } } - gk20a_dbg(gpu_dbg_fn, "tsg opened %d\n", tsg->tsgid); + nvgpu_log(g, gpu_dbg_fn, "tsg opened %d\n", tsg->tsgid); return tsg; @@ -343,7 +345,7 @@ void gk20a_tsg_release(struct nvgpu_ref *ref) tsg->runlist_id = ~0; - gk20a_dbg(gpu_dbg_fn, "tsg released %d\n", tsg->tsgid); + nvgpu_log(g, gpu_dbg_fn, "tsg released %d\n", tsg->tsgid); } struct tsg_gk20a *tsg_gk20a_from_ch(struct channel_gk20a *ch) diff --git a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c index cffe7199..615b6b46 100644 --- a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c +++ b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c @@ -42,8 +42,8 @@ #include /*Defines*/ -#define gm20b_dbg_pmu(fmt, arg...) \ - gk20a_dbg(gpu_dbg_pmu, fmt, ##arg) +#define gm20b_dbg_pmu(g, fmt, arg...) \ + nvgpu_log(g, gpu_dbg_pmu, fmt, ##arg) typedef int (*get_ucode_details)(struct gk20a *g, struct flcn_ucode_img *udata); @@ -101,16 +101,16 @@ static int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img) struct nvgpu_pmu *pmu = &g->pmu; struct lsf_ucode_desc *lsf_desc; int err; - gm20b_dbg_pmu("requesting PMU ucode in GM20B\n"); + gm20b_dbg_pmu(g, "requesting PMU ucode in GM20B\n"); pmu_fw = nvgpu_request_firmware(g, GM20B_PMU_UCODE_IMAGE, 0); if (!pmu_fw) { nvgpu_err(g, "failed to load pmu ucode!!"); return -ENOENT; } g->acr.pmu_fw = pmu_fw; - gm20b_dbg_pmu("Loaded PMU ucode in for blob preparation"); + gm20b_dbg_pmu(g, "Loaded PMU ucode in for blob preparation"); - gm20b_dbg_pmu("requesting PMU ucode desc in GM20B\n"); + gm20b_dbg_pmu(g, "requesting PMU ucode desc in GM20B\n"); pmu_desc = nvgpu_request_firmware(g, GM20B_PMU_UCODE_DESC, 0); if (!pmu_desc) { nvgpu_err(g, "failed to load pmu ucode desc!!"); @@ -129,7 +129,7 @@ static int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img) err = nvgpu_init_pmu_fw_support(pmu); if (err) { - gm20b_dbg_pmu("failed to set function pointers\n"); + gm20b_dbg_pmu(g, "failed to set function pointers\n"); goto release_sig; } @@ -148,7 +148,7 @@ static int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img) p_img->fw_ver = NULL; p_img->header = NULL; p_img->lsf_desc = (struct lsf_ucode_desc *)lsf_desc; - gm20b_dbg_pmu("requesting PMU ucode in GM20B exit\n"); + gm20b_dbg_pmu(g, "requesting PMU ucode in GM20B exit\n"); nvgpu_release_firmware(g, pmu_sig); return 0; release_sig: @@ -221,7 +221,7 @@ static int fecs_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img) p_img->fw_ver = NULL; p_img->header = NULL; p_img->lsf_desc = (struct lsf_ucode_desc *)lsf_desc; - gm20b_dbg_pmu("fecs fw loaded\n"); + gm20b_dbg_pmu(g, "fecs fw loaded\n"); nvgpu_release_firmware(g, fecs_sig); return 0; free_lsf_desc: @@ -292,7 +292,7 @@ static int gpccs_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img) p_img->fw_ver = NULL; p_img->header = NULL; p_img->lsf_desc = (struct lsf_ucode_desc *)lsf_desc; - gm20b_dbg_pmu("gpccs fw loaded\n"); + gm20b_dbg_pmu(g, "gpccs fw loaded\n"); nvgpu_release_firmware(g, gpccs_sig); return 0; free_lsf_desc: @@ -361,24 +361,24 @@ int prepare_ucode_blob(struct gk20a *g) non WPR blob of ucodes*/ err = nvgpu_init_pmu_fw_support(pmu); if (err) { - gm20b_dbg_pmu("failed to set function pointers\n"); + gm20b_dbg_pmu(g, "failed to set function pointers\n"); return err; } return 0; } plsfm = &lsfm_l; memset((void *)plsfm, 0, sizeof(struct ls_flcn_mgr)); - gm20b_dbg_pmu("fetching GMMU regs\n"); + gm20b_dbg_pmu(g, "fetching GMMU regs\n"); g->ops.fb.vpr_info_fetch(g); gr_gk20a_init_ctxsw_ucode(g); g->ops.pmu.get_wpr(g, &wpr_inf); - gm20b_dbg_pmu("wpr carveout base:%llx\n", wpr_inf.wpr_base); - gm20b_dbg_pmu("wpr carveout size :%llx\n", wpr_inf.size); + gm20b_dbg_pmu(g, "wpr carveout base:%llx\n", wpr_inf.wpr_base); + gm20b_dbg_pmu(g, "wpr carveout size :%llx\n", wpr_inf.size); /* Discover all managed falcons*/ err = lsfm_discover_ucode_images(g, plsfm); - gm20b_dbg_pmu(" Managed Falcon cnt %d\n", plsfm->managed_flcn_cnt); + gm20b_dbg_pmu(g, " Managed Falcon cnt %d\n", plsfm->managed_flcn_cnt); if (err) goto free_sgt; @@ -394,13 +394,13 @@ int prepare_ucode_blob(struct gk20a *g) if (err) goto free_sgt; - gm20b_dbg_pmu("managed LS falcon %d, WPR size %d bytes.\n", + gm20b_dbg_pmu(g, "managed LS falcon %d, WPR size %d bytes.\n", plsfm->managed_flcn_cnt, plsfm->wpr_size); lsfm_init_wpr_contents(g, plsfm, &g->acr.ucode_blob); } else { - gm20b_dbg_pmu("LSFM is managing no falcons.\n"); + gm20b_dbg_pmu(g, "LSFM is managing no falcons.\n"); } - gm20b_dbg_pmu("prepare ucode blob return 0\n"); + gm20b_dbg_pmu(g, "prepare ucode blob return 0\n"); free_acr_resources(g, plsfm); free_sgt: return err; @@ -444,13 +444,13 @@ static int lsfm_discover_ucode_images(struct gk20a *g, plsfm->managed_flcn_cnt++; } else { - gm20b_dbg_pmu("id not managed %d\n", + gm20b_dbg_pmu(g, "id not managed %d\n", ucode_img.lsf_desc->falcon_id); } /*Free any ucode image resources if not managing this falcon*/ if (!(pmu->pmu_mode & PMU_LSFM_MANAGED)) { - gm20b_dbg_pmu("pmu is not LSFM managed\n"); + gm20b_dbg_pmu(g, "pmu is not LSFM managed\n"); lsfm_free_ucode_img_res(g, &ucode_img); } @@ -481,7 +481,7 @@ static int lsfm_discover_ucode_images(struct gk20a *g, == 0) plsfm->managed_flcn_cnt++; } else { - gm20b_dbg_pmu("not managed %d\n", + gm20b_dbg_pmu(g, "not managed %d\n", ucode_img.lsf_desc->falcon_id); lsfm_free_nonpmu_ucode_img_res(g, &ucode_img); @@ -489,7 +489,7 @@ static int lsfm_discover_ucode_images(struct gk20a *g, } } else { /* Consumed all available falcon objects */ - gm20b_dbg_pmu("Done checking for ucodes %d\n", i); + gm20b_dbg_pmu(g, "Done checking for ucodes %d\n", i); break; } } @@ -526,26 +526,26 @@ int gm20b_pmu_populate_loader_cfg(struct gk20a *g, addr_base = p_lsfm->lsb_header.ucode_off; g->ops.pmu.get_wpr(g, &wpr_inf); addr_base += wpr_inf.wpr_base; - gm20b_dbg_pmu("pmu loader cfg u32 addrbase %x\n", (u32)addr_base); + gm20b_dbg_pmu(g, "pmu loader cfg u32 addrbase %x\n", (u32)addr_base); /*From linux*/ addr_code = u64_lo32((addr_base + desc->app_start_offset + desc->app_resident_code_offset) >> 8); - gm20b_dbg_pmu("app start %d app res code off %d\n", + gm20b_dbg_pmu(g, "app start %d app res code off %d\n", desc->app_start_offset, desc->app_resident_code_offset); addr_data = u64_lo32((addr_base + desc->app_start_offset + desc->app_resident_data_offset) >> 8); - gm20b_dbg_pmu("app res data offset%d\n", + gm20b_dbg_pmu(g, "app res data offset%d\n", desc->app_resident_data_offset); - gm20b_dbg_pmu("bl start off %d\n", desc->bootloader_start_offset); + gm20b_dbg_pmu(g, "bl start off %d\n", desc->bootloader_start_offset); addr_args = ((pwr_falcon_hwcfg_dmem_size_v( gk20a_readl(g, pwr_falcon_hwcfg_r()))) << GK20A_PMU_DMEM_BLKSIZE2); addr_args -= g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu); - gm20b_dbg_pmu("addr_args %x\n", addr_args); + gm20b_dbg_pmu(g, "addr_args %x\n", addr_args); /* Populate the loader_config state*/ ldr_cfg->dma_idx = GK20A_PMU_DMAIDX_UCODE; @@ -599,7 +599,7 @@ int gm20b_flcn_populate_bl_dmem_desc(struct gk20a *g, g->ops.pmu.get_wpr(g, &wpr_inf); addr_base += wpr_inf.wpr_base; - gm20b_dbg_pmu("gen loader cfg %x u32 addrbase %x ID\n", (u32)addr_base, + gm20b_dbg_pmu(g, "gen loader cfg %x u32 addrbase %x ID\n", (u32)addr_base, p_lsfm->wpr_header.falcon_id); addr_code = u64_lo32((addr_base + desc->app_start_offset + @@ -608,7 +608,7 @@ int gm20b_flcn_populate_bl_dmem_desc(struct gk20a *g, desc->app_start_offset + desc->app_resident_data_offset) >> 8); - gm20b_dbg_pmu("gen cfg %x u32 addrcode %x & data %x load offset %xID\n", + gm20b_dbg_pmu(g, "gen cfg %x u32 addrcode %x & data %x load offset %xID\n", (u32)addr_code, (u32)addr_data, desc->bootloader_start_offset, p_lsfm->wpr_header.falcon_id); @@ -631,7 +631,7 @@ static int lsfm_fill_flcn_bl_gen_desc(struct gk20a *g, struct nvgpu_pmu *pmu = &g->pmu; if (pnode->wpr_header.falcon_id != pmu->falcon_id) { - gm20b_dbg_pmu("non pmu. write flcn bl gen desc\n"); + gm20b_dbg_pmu(g, "non pmu. write flcn bl gen desc\n"); g->ops.pmu.flcn_populate_bl_dmem_desc(g, pnode, &pnode->bl_gen_desc_size, pnode->wpr_header.falcon_id); @@ -639,7 +639,7 @@ static int lsfm_fill_flcn_bl_gen_desc(struct gk20a *g, } if (pmu->pmu_mode & PMU_LSFM_MANAGED) { - gm20b_dbg_pmu("pmu write flcn bl gen desc\n"); + gm20b_dbg_pmu(g, "pmu write flcn bl gen desc\n"); if (pnode->wpr_header.falcon_id == pmu->falcon_id) return g->ops.pmu.pmu_populate_loader_cfg(g, pnode, &pnode->bl_gen_desc_size); @@ -672,46 +672,46 @@ static void lsfm_init_wpr_contents(struct gk20a *g, struct ls_flcn_mgr *plsfm, nvgpu_mem_wr_n(g, ucode, i * sizeof(pnode->wpr_header), &pnode->wpr_header, sizeof(pnode->wpr_header)); - gm20b_dbg_pmu("wpr header"); - gm20b_dbg_pmu("falconid :%d", + gm20b_dbg_pmu(g, "wpr header"); + gm20b_dbg_pmu(g, "falconid :%d", pnode->wpr_header.falcon_id); - gm20b_dbg_pmu("lsb_offset :%x", + gm20b_dbg_pmu(g, "lsb_offset :%x", pnode->wpr_header.lsb_offset); - gm20b_dbg_pmu("bootstrap_owner :%d", + gm20b_dbg_pmu(g, "bootstrap_owner :%d", pnode->wpr_header.bootstrap_owner); - gm20b_dbg_pmu("lazy_bootstrap :%d", + gm20b_dbg_pmu(g, "lazy_bootstrap :%d", pnode->wpr_header.lazy_bootstrap); - gm20b_dbg_pmu("status :%d", + gm20b_dbg_pmu(g, "status :%d", pnode->wpr_header.status); /*Flush LSB header to memory*/ nvgpu_mem_wr_n(g, ucode, pnode->wpr_header.lsb_offset, &pnode->lsb_header, sizeof(pnode->lsb_header)); - gm20b_dbg_pmu("lsb header"); - gm20b_dbg_pmu("ucode_off :%x", + gm20b_dbg_pmu(g, "lsb header"); + gm20b_dbg_pmu(g, "ucode_off :%x", pnode->lsb_header.ucode_off); - gm20b_dbg_pmu("ucode_size :%x", + gm20b_dbg_pmu(g, "ucode_size :%x", pnode->lsb_header.ucode_size); - gm20b_dbg_pmu("data_size :%x", + gm20b_dbg_pmu(g, "data_size :%x", pnode->lsb_header.data_size); - gm20b_dbg_pmu("bl_code_size :%x", + gm20b_dbg_pmu(g, "bl_code_size :%x", pnode->lsb_header.bl_code_size); - gm20b_dbg_pmu("bl_imem_off :%x", + gm20b_dbg_pmu(g, "bl_imem_off :%x", pnode->lsb_header.bl_imem_off); - gm20b_dbg_pmu("bl_data_off :%x", + gm20b_dbg_pmu(g, "bl_data_off :%x", pnode->lsb_header.bl_data_off); - gm20b_dbg_pmu("bl_data_size :%x", + gm20b_dbg_pmu(g, "bl_data_size :%x", pnode->lsb_header.bl_data_size); - gm20b_dbg_pmu("app_code_off :%x", + gm20b_dbg_pmu(g, "app_code_off :%x", pnode->lsb_header.app_code_off); - gm20b_dbg_pmu("app_code_size :%x", + gm20b_dbg_pmu(g, "app_code_size :%x", pnode->lsb_header.app_code_size); - gm20b_dbg_pmu("app_data_off :%x", + gm20b_dbg_pmu(g, "app_data_off :%x", pnode->lsb_header.app_data_off); - gm20b_dbg_pmu("app_data_size :%x", + gm20b_dbg_pmu(g, "app_data_size :%x", pnode->lsb_header.app_data_size); - gm20b_dbg_pmu("flags :%x", + gm20b_dbg_pmu(g, "flags :%x", pnode->lsb_header.flags); /*If this falcon has a boot loader and related args, @@ -1028,7 +1028,7 @@ int gm20b_bootstrap_hs_flcn(struct gk20a *g) start = nvgpu_mem_get_addr(g, &acr->ucode_blob); size = acr->ucode_blob.size; - gm20b_dbg_pmu(""); + gm20b_dbg_pmu(g, " "); if (!acr_fw) { /*First time init case*/ @@ -1141,14 +1141,14 @@ int acr_ucode_patch_sig(struct gk20a *g, unsigned int *p_patch_ind) { unsigned int i, *p_sig; - gm20b_dbg_pmu(""); + gm20b_dbg_pmu(g, " "); if (!pmu_is_debug_mode_en(g)) { p_sig = p_prod_sig; - gm20b_dbg_pmu("PRODUCTION MODE\n"); + gm20b_dbg_pmu(g, "PRODUCTION MODE\n"); } else { p_sig = p_dbg_sig; - gm20b_dbg_pmu("DEBUG MODE\n"); + gm20b_dbg_pmu(g, "DEBUG MODE\n"); } /* Patching logic:*/ @@ -1171,7 +1171,7 @@ static int bl_bootstrap(struct nvgpu_pmu *pmu, struct hsflcn_bl_desc *pmu_bl_gm10x_desc = g->acr.pmu_hsbl_desc; u32 dst; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); gk20a_writel(g, pwr_falcon_itfen_r(), gk20a_readl(g, pwr_falcon_itfen_r()) | pwr_falcon_itfen_ctxen_enable_f()); @@ -1193,7 +1193,7 @@ static int bl_bootstrap(struct nvgpu_pmu *pmu, (u8 *)(acr->hsbl_ucode.cpu_va), bl_sz, 0, 0, pmu_bl_gm10x_desc->bl_start_tag); - gm20b_dbg_pmu("Before starting falcon with BL\n"); + gm20b_dbg_pmu(g, "Before starting falcon with BL\n"); virt_addr = pmu_bl_gm10x_desc->bl_start_tag << 8; @@ -1207,7 +1207,7 @@ int gm20b_init_nspmu_setup_hw1(struct gk20a *g) struct nvgpu_pmu *pmu = &g->pmu; int err = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); nvgpu_mutex_acquire(&pmu->isr_mutex); nvgpu_flcn_reset(pmu->flcn); @@ -1279,7 +1279,7 @@ int gm20b_init_pmu_setup_hw1(struct gk20a *g, struct nvgpu_pmu *pmu = &g->pmu; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); nvgpu_mutex_acquire(&pmu->isr_mutex); nvgpu_flcn_reset(pmu->flcn); @@ -1324,7 +1324,7 @@ int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt) struct nvgpu_firmware *hsbl_fw = acr->hsbl_fw; struct hsflcn_bl_desc *pmu_bl_gm10x_desc; u32 *pmu_bl_gm10x = NULL; - gm20b_dbg_pmu(""); + gm20b_dbg_pmu(g, " "); if (!hsbl_fw) { hsbl_fw = nvgpu_request_firmware(g, @@ -1343,7 +1343,7 @@ int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt) bl_sz = ALIGN(pmu_bl_gm10x_desc->bl_img_hdr.bl_code_size, 256); acr->hsbl_ucode.size = bl_sz; - gm20b_dbg_pmu("Executing Generic Bootloader\n"); + gm20b_dbg_pmu(g, "Executing Generic Bootloader\n"); /*TODO in code verify that enable PMU is done, scrubbing etc is done*/ @@ -1366,7 +1366,7 @@ int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt) } nvgpu_mem_wr_n(g, &acr->hsbl_ucode, 0, pmu_bl_gm10x, bl_sz); - gm20b_dbg_pmu("Copied bl ucode to bl_cpuva\n"); + gm20b_dbg_pmu(g, "Copied bl ucode to bl_cpuva\n"); } /* * Disable interrupts to avoid kernel hitting breakpoint due @@ -1377,9 +1377,9 @@ int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt) gk20a_get_gr_idle_timeout(g))) goto err_unmap_bl; - gm20b_dbg_pmu("phys sec reg %x\n", gk20a_readl(g, + gm20b_dbg_pmu(g, "phys sec reg %x\n", gk20a_readl(g, pwr_falcon_mmu_phys_sec_r())); - gm20b_dbg_pmu("sctl reg %x\n", gk20a_readl(g, pwr_falcon_sctl_r())); + gm20b_dbg_pmu(g, "sctl reg %x\n", gk20a_readl(g, pwr_falcon_sctl_r())); g->ops.pmu.init_falcon_setup_hw(g, desc, acr->hsbl_ucode.size); @@ -1396,10 +1396,10 @@ int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt) else goto err_unmap_bl; } - gm20b_dbg_pmu("after waiting for halt, err %x\n", err); - gm20b_dbg_pmu("phys sec reg %x\n", gk20a_readl(g, + gm20b_dbg_pmu(g, "after waiting for halt, err %x\n", err); + gm20b_dbg_pmu(g, "phys sec reg %x\n", gk20a_readl(g, pwr_falcon_mmu_phys_sec_r())); - gm20b_dbg_pmu("sctl reg %x\n", gk20a_readl(g, pwr_falcon_sctl_r())); + gm20b_dbg_pmu(g, "sctl reg %x\n", gk20a_readl(g, pwr_falcon_sctl_r())); start_gm20b_pmu(g); return 0; err_unmap_bl: @@ -1430,7 +1430,7 @@ int pmu_wait_for_halt(struct gk20a *g, unsigned int timeout_ms) } g->acr.capabilities = gk20a_readl(g, pwr_falcon_mailbox1_r()); - gm20b_dbg_pmu("ACR capabilities %x\n", g->acr.capabilities); + gm20b_dbg_pmu(g, "ACR capabilities %x\n", g->acr.capabilities); data = gk20a_readl(g, pwr_falcon_mailbox0_r()); if (data) { nvgpu_err(g, "ACR boot failed, err %x", data); diff --git a/drivers/gpu/nvgpu/gm20b/bus_gm20b.c b/drivers/gpu/nvgpu/gm20b/bus_gm20b.c index cdd70d5b..ca2a40bf 100644 --- a/drivers/gpu/nvgpu/gm20b/bus_gm20b.c +++ b/drivers/gpu/nvgpu/gm20b/bus_gm20b.c @@ -1,7 +1,7 @@ /* * GM20B MMU * - * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -40,7 +40,7 @@ int gm20b_bus_bar1_bind(struct gk20a *g, struct nvgpu_mem *bar1_inst) u64 iova = nvgpu_inst_block_addr(g, bar1_inst); u32 ptr_v = (u32)(iova >> bus_bar1_block_ptr_shift_v()); - gk20a_dbg_info("bar1 inst block ptr: 0x%08x", ptr_v); + nvgpu_log_info(g, "bar1 inst block ptr: 0x%08x", ptr_v); gk20a_writel(g, bus_bar1_block_r(), nvgpu_aperture_mask(g, bar1_inst, diff --git a/drivers/gpu/nvgpu/gm20b/clk_gm20b.c b/drivers/gpu/nvgpu/gm20b/clk_gm20b.c index fa751ecc..fb89752a 100644 --- a/drivers/gpu/nvgpu/gm20b/clk_gm20b.c +++ b/drivers/gpu/nvgpu/gm20b/clk_gm20b.c @@ -1,7 +1,7 @@ /* * GM20B Clocks * - * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -36,8 +36,8 @@ #include #include -#define gk20a_dbg_clk(fmt, arg...) \ - gk20a_dbg(gpu_dbg_clk, fmt, ##arg) +#define gk20a_dbg_clk(g, fmt, arg...) \ + nvgpu_log(g, gpu_dbg_clk, fmt, ##arg) #define DFS_DET_RANGE 6 /* -2^6 ... 2^6-1 */ #define SDM_DIN_RANGE 12 /* -2^12 ... 2^12-1 */ @@ -138,6 +138,7 @@ static u32 get_interim_pldiv(struct gk20a *g, u32 old_pl, u32 new_pl) static int clk_config_pll(struct clk_gk20a *clk, struct pll *pll, struct pll_parms *pll_params, u32 *target_freq, bool best_fit) { + struct gk20a *g = clk->g; u32 min_vco_f, max_vco_f; u32 best_M, best_N; u32 low_PL, high_PL, best_PL; @@ -149,7 +150,7 @@ static int clk_config_pll(struct clk_gk20a *clk, struct pll *pll, BUG_ON(target_freq == NULL); - gk20a_dbg_fn("request target freq %d MHz", *target_freq); + nvgpu_log_fn(g, "request target freq %d MHz", *target_freq); ref_clk_f = pll->clk_in; target_clk_f = *target_freq; @@ -172,7 +173,7 @@ static int clk_config_pll(struct clk_gk20a *clk, struct pll *pll, low_PL = min(low_PL, pll_params->max_PL); low_PL = max(low_PL, pll_params->min_PL); - gk20a_dbg_info("low_PL %d(div%d), high_PL %d(div%d)", + nvgpu_log_info(g, "low_PL %d(div%d), high_PL %d(div%d)", low_PL, nvgpu_pl_to_div(low_PL), high_PL, nvgpu_pl_to_div(high_PL)); for (pl = low_PL; pl <= high_PL; pl++) { @@ -217,7 +218,7 @@ static int clk_config_pll(struct clk_gk20a *clk, struct pll *pll, goto found_match; } - gk20a_dbg_info("delta %d @ M %d, N %d, PL %d", + nvgpu_log_info(g, "delta %d @ M %d, N %d, PL %d", delta, m, n, pl); } } @@ -229,7 +230,7 @@ found_match: BUG_ON(best_delta == ~0U); if (best_fit && best_delta != 0) - gk20a_dbg_clk("no best match for target @ %dMHz on gpc_pll", + gk20a_dbg_clk(g, "no best match for target @ %dMHz on gpc_pll", target_clk_f); pll->M = best_M; @@ -241,10 +242,10 @@ found_match: *target_freq = pll->freq; - gk20a_dbg_clk("actual target freq %d kHz, M %d, N %d, PL %d(div%d)", + gk20a_dbg_clk(g, "actual target freq %d kHz, M %d, N %d, PL %d(div%d)", *target_freq, pll->M, pll->N, pll->PL, nvgpu_pl_to_div(pll->PL)); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } @@ -810,7 +811,7 @@ static int clk_lock_gpc_pll_under_bypass(struct gk20a *g, struct pll *gpll) if (gpll->mode == GPC_PLL_MODE_DVFS) { gk20a_readl(g, trim_sys_gpcpll_cfg_r()); nvgpu_udelay(gpc_pll_params.na_lock_delay); - gk20a_dbg_clk("NA config_pll under bypass: %u (%u) kHz %d mV", + gk20a_dbg_clk(g, "NA config_pll under bypass: %u (%u) kHz %d mV", gpll->freq, gpll->freq / 2, (trim_sys_gpcpll_cfg3_dfs_testout_v( gk20a_readl(g, trim_sys_gpcpll_cfg3_r())) @@ -843,7 +844,7 @@ static int clk_lock_gpc_pll_under_bypass(struct gk20a *g, struct pll *gpll) return -EBUSY; pll_locked: - gk20a_dbg_clk("locked config_pll under bypass r=0x%x v=0x%x", + gk20a_dbg_clk(g, "locked config_pll under bypass r=0x%x v=0x%x", trim_sys_gpcpll_cfg_r(), cfg); /* set SYNC_MODE for glitchless switch out of bypass */ @@ -878,7 +879,7 @@ static int clk_program_gpc_pll(struct gk20a *g, struct pll *gpll_new, bool can_slide, pldiv_only; struct pll gpll; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (!nvgpu_platform_is_silicon(g)) return 0; @@ -1028,7 +1029,7 @@ static void clk_config_pll_safe_dvfs(struct gk20a *g, struct pll *gpll) gpll->N = nsafe; clk_config_dvfs_ndiv(gpll->dvfs.mv, gpll->N, &gpll->dvfs); - gk20a_dbg_clk("safe freq %d kHz, M %d, N %d, PL %d(div%d), mV(cal) %d(%d), DC %d", + gk20a_dbg_clk(g, "safe freq %d kHz, M %d, N %d, PL %d(div%d), mV(cal) %d(%d), DC %d", gpll->freq, gpll->M, gpll->N, gpll->PL, nvgpu_pl_to_div(gpll->PL), gpll->dvfs.mv, gpll->dvfs.uv_cal / 1000, gpll->dvfs.dfs_coeff); } @@ -1103,7 +1104,7 @@ static int clk_program_na_gpc_pll(struct gk20a *g, struct pll *gpll_new, clk_set_dfs_ext_cal(g, gpll_new->dvfs.dfs_ext_cal); clk_set_dfs_coeff(g, gpll_new->dvfs.dfs_coeff); - gk20a_dbg_clk("config_pll %d kHz, M %d, N %d, PL %d(div%d), mV(cal) %d(%d), DC %d", + gk20a_dbg_clk(g, "config_pll %d kHz, M %d, N %d, PL %d(div%d), mV(cal) %d(%d), DC %d", gpll_new->freq, gpll_new->M, gpll_new->N, gpll_new->PL, nvgpu_pl_to_div(gpll_new->PL), max(gpll_new->dvfs.mv, gpll_old->dvfs.mv), @@ -1168,14 +1169,14 @@ int gm20b_init_clk_setup_sw(struct gk20a *g) unsigned long safe_rate; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); err = nvgpu_mutex_init(&clk->clk_mutex); if (err) return err; if (clk->sw_ready) { - gk20a_dbg_fn("skip init"); + nvgpu_log_fn(g, "skip init"); return 0; } @@ -1229,7 +1230,7 @@ int gm20b_init_clk_setup_sw(struct gk20a *g) clk->sw_ready = true; - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); nvgpu_info(g, "GPCPLL initial settings:%s M=%u, N=%u, P=%u (id = %u)", clk->gpc_pll.mode == GPC_PLL_MODE_DVFS ? " NA mode," : "", @@ -1321,7 +1322,7 @@ static int gm20b_init_clk_setup_hw(struct gk20a *g) { u32 data; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); /* LDIV: Div4 mode (required); both bypass and vco ratios 1:1 */ data = gk20a_readl(g, trim_sys_gpc2clk_out_r()); @@ -1394,7 +1395,7 @@ static int set_pll_freq(struct gk20a *g, int allow_slide) struct clk_gk20a *clk = &g->clk; int err = 0; - gk20a_dbg_fn("last freq: %dMHz, target freq %dMHz", + nvgpu_log_fn(g, "last freq: %dMHz, target freq %dMHz", clk->gpc_pll_last.freq, clk->gpc_pll.freq); /* If programming with dynamic sliding failed, re-try under bypass */ @@ -1427,7 +1428,7 @@ int gm20b_init_clk_support(struct gk20a *g) struct clk_gk20a *clk = &g->clk; u32 err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); nvgpu_mutex_acquire(&clk->clk_mutex); clk->clk_hw_on = true; diff --git a/drivers/gpu/nvgpu/gm20b/fb_gm20b.c b/drivers/gpu/nvgpu/gm20b/fb_gm20b.c index 5bc6d452..b2a815fb 100644 --- a/drivers/gpu/nvgpu/gm20b/fb_gm20b.c +++ b/drivers/gpu/nvgpu/gm20b/fb_gm20b.c @@ -38,7 +38,7 @@ void fb_gm20b_init_fs_state(struct gk20a *g) { - gk20a_dbg_info("initialize gm20b fb"); + nvgpu_log_info(g, "initialize gm20b fb"); gk20a_writel(g, fb_fbhub_num_active_ltcs_r(), g->ltc_count); diff --git a/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c b/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c index 35a7a9e1..b73abeda 100644 --- a/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c +++ b/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c @@ -47,7 +47,7 @@ void channel_gm20b_bind(struct channel_gk20a *c) u32 inst_ptr = nvgpu_inst_block_addr(g, &c->inst_block) >> ram_in_base_shift_v(); - gk20a_dbg_info("bind channel %d inst ptr 0x%08x", + nvgpu_log_info(g, "bind channel %d inst ptr 0x%08x", c->chid, inst_ptr); diff --git a/drivers/gpu/nvgpu/gm20b/gr_gm20b.c b/drivers/gpu/nvgpu/gm20b/gr_gm20b.c index 1c966c22..331c3af9 100644 --- a/drivers/gpu/nvgpu/gm20b/gr_gm20b.c +++ b/drivers/gpu/nvgpu/gm20b/gr_gm20b.c @@ -47,7 +47,7 @@ void gr_gm20b_init_gpc_mmu(struct gk20a *g) { u32 temp; - gk20a_dbg_info("initialize gpc mmu"); + nvgpu_log_info(g, "initialize gpc mmu"); if (!nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) { /* Bypass MMU check for non-secure boot. For @@ -168,7 +168,7 @@ void gr_gm20b_commit_global_bundle_cb(struct gk20a *g, data = min_t(u32, data, g->gr.min_gpm_fifo_depth); - gk20a_dbg_info("bundle cb token limit : %d, state limit : %d", + nvgpu_log_info(g, "bundle cb token limit : %d, state limit : %d", g->gr.bundle_cb_token_limit, data); gr_gk20a_ctx_patch_write(g, ch_ctx, gr_pd_ab_dist_cfg2_r(), @@ -193,7 +193,7 @@ int gr_gm20b_commit_global_cb_manager(struct gk20a *g, u32 num_pes_per_gpc = nvgpu_get_litter_value(g, GPU_LIT_NUM_PES_PER_GPC); - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); tsg = tsg_gk20a_from_ch(c); if (!tsg) @@ -280,20 +280,20 @@ void gr_gm20b_set_rd_coalesce(struct gk20a *g, u32 data) { u32 val; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); val = gk20a_readl(g, gr_gpcs_tpcs_tex_m_dbg2_r()); val = set_field(val, gr_gpcs_tpcs_tex_m_dbg2_lg_rd_coalesce_en_m(), gr_gpcs_tpcs_tex_m_dbg2_lg_rd_coalesce_en_f(data)); gk20a_writel(g, gr_gpcs_tpcs_tex_m_dbg2_r(), val); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); } int gr_gm20b_handle_sw_method(struct gk20a *g, u32 addr, u32 class_num, u32 offset, u32 data) { - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (class_num == MAXWELL_COMPUTE_B) { switch (offset << 2) { @@ -341,7 +341,7 @@ void gr_gm20b_set_alpha_circular_buffer_size(struct gk20a *g, u32 data) u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); u32 ppc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_PPC_IN_GPC_STRIDE); - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); /* if (NO_ALPHA_BETA_TIMESLICE_SUPPORT_DEF) return; */ @@ -390,7 +390,7 @@ void gr_gm20b_set_circular_buffer_size(struct gk20a *g, u32 data) u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); u32 ppc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_PPC_IN_GPC_STRIDE); - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (cb_size > gr->attrib_cb_size) cb_size = gr->attrib_cb_size; @@ -665,7 +665,7 @@ int gr_gm20b_init_fs_state(struct gk20a *g) { int err = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); err = gr_gk20a_init_fs_state(g); if (err) @@ -762,7 +762,7 @@ int gr_gm20b_load_ctxsw_ucode(struct gk20a *g) gr_fecs_falcon_hwcfg_r(); u8 falcon_id_mask = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) { gk20a_writel(g, gr_fecs_ctxsw_mailbox_r(7), @@ -829,7 +829,7 @@ int gr_gm20b_load_ctxsw_ucode(struct gk20a *g) gk20a_writel(g, gr_fecs_ctxsw_mailbox_clear_r(6), 0xffffffff); gk20a_writel(g, gr_fecs_cpuctl_alias_r(), gr_fecs_cpuctl_startcpu_f(1)); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } @@ -858,7 +858,7 @@ int gr_gm20b_alloc_gr_ctx(struct gk20a *g, { int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); err = gr_gk20a_alloc_gr_ctx(g, gr_ctx, vm, class, flags); if (err) @@ -867,7 +867,7 @@ int gr_gm20b_alloc_gr_ctx(struct gk20a *g, if (class == MAXWELL_COMPUTE_B) gr_ctx->compute_preempt_mode = NVGPU_PREEMPTION_MODE_COMPUTE_CTA; - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } @@ -881,7 +881,7 @@ void gr_gm20b_update_ctxsw_preemption_mode(struct gk20a *g, u32 cta_preempt_option = ctxsw_prog_main_image_preemption_options_control_cta_enabled_f(); - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); tsg = tsg_gk20a_from_ch(c); if (!tsg) @@ -889,13 +889,13 @@ void gr_gm20b_update_ctxsw_preemption_mode(struct gk20a *g, gr_ctx = &tsg->gr_ctx; if (gr_ctx->compute_preempt_mode == NVGPU_PREEMPTION_MODE_COMPUTE_CTA) { - gk20a_dbg_info("CTA: %x", cta_preempt_option); + nvgpu_log_info(g, "CTA: %x", cta_preempt_option); nvgpu_mem_wr(g, mem, ctxsw_prog_main_image_preemption_options_o(), cta_preempt_option); } - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); } int gr_gm20b_dump_gr_status_regs(struct gk20a *g, @@ -1044,7 +1044,7 @@ int gr_gm20b_update_pc_sampling(struct channel_gk20a *c, struct nvgpu_mem *mem; u32 v; - gk20a_dbg_fn(""); + nvgpu_log_fn(c->g, " "); tsg = tsg_gk20a_from_ch(c); if (!tsg) @@ -1066,7 +1066,7 @@ int gr_gm20b_update_pc_sampling(struct channel_gk20a *c, nvgpu_mem_end(c->g, mem); - gk20a_dbg_fn("done"); + nvgpu_log_fn(c->g, "done"); return 0; } @@ -1220,19 +1220,19 @@ void gr_gm20b_bpt_reg_info(struct gk20a *g, struct nvgpu_warpstate *w_state) /* Only for debug purpose */ for (sm_id = 0; sm_id < gr->no_of_sm; sm_id++) { - gk20a_dbg_fn("w_state[%d].valid_warps[0]: %llx\n", + nvgpu_log_fn(g, "w_state[%d].valid_warps[0]: %llx\n", sm_id, w_state[sm_id].valid_warps[0]); - gk20a_dbg_fn("w_state[%d].valid_warps[1]: %llx\n", + nvgpu_log_fn(g, "w_state[%d].valid_warps[1]: %llx\n", sm_id, w_state[sm_id].valid_warps[1]); - gk20a_dbg_fn("w_state[%d].trapped_warps[0]: %llx\n", + nvgpu_log_fn(g, "w_state[%d].trapped_warps[0]: %llx\n", sm_id, w_state[sm_id].trapped_warps[0]); - gk20a_dbg_fn("w_state[%d].trapped_warps[1]: %llx\n", + nvgpu_log_fn(g, "w_state[%d].trapped_warps[1]: %llx\n", sm_id, w_state[sm_id].trapped_warps[1]); - gk20a_dbg_fn("w_state[%d].paused_warps[0]: %llx\n", + nvgpu_log_fn(g, "w_state[%d].paused_warps[0]: %llx\n", sm_id, w_state[sm_id].paused_warps[0]); - gk20a_dbg_fn("w_state[%d].paused_warps[1]: %llx\n", + nvgpu_log_fn(g, "w_state[%d].paused_warps[1]: %llx\n", sm_id, w_state[sm_id].paused_warps[1]); } } diff --git a/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c b/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c index dcb65372..66cd49e7 100644 --- a/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c +++ b/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c @@ -61,7 +61,7 @@ int gm20b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr) int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (max_comptag_lines == 0U) return 0; @@ -87,9 +87,9 @@ int gm20b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr) if (max_comptag_lines > hw_max_comptag_lines) max_comptag_lines = hw_max_comptag_lines; - gk20a_dbg_info("compbit backing store size : %d", + nvgpu_log_info(g, "compbit backing store size : %d", compbit_backing_size); - gk20a_dbg_info("max comptag lines : %d", + nvgpu_log_info(g, "max comptag lines : %d", max_comptag_lines); err = nvgpu_ltc_alloc_cbc(g, compbit_backing_size); @@ -121,7 +121,7 @@ int gm20b_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op, u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE); const u32 max_lines = 16384U; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); trace_gk20a_ltc_cbc_ctrl_start(g->name, op, min, max); @@ -134,7 +134,7 @@ int gm20b_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op, nvgpu_mutex_acquire(&g->mm.l2_op_lock); - gk20a_dbg_info("clearing CBC lines %u..%u", min, iter_max); + nvgpu_log_info(g, "clearing CBC lines %u..%u", min, iter_max); if (op == gk20a_cbc_op_clear) { gk20a_writel( @@ -205,11 +205,11 @@ void gm20b_ltc_init_fs_state(struct gk20a *g) { u32 reg; - gk20a_dbg_info("initialize gm20b l2"); + nvgpu_log_info(g, "initialize gm20b l2"); g->max_ltc_count = gk20a_readl(g, top_num_ltcs_r()); g->ltc_count = gk20a_readl(g, pri_ringmaster_enum_ltc_r()); - gk20a_dbg_info("%d ltcs out of %d", g->ltc_count, g->max_ltc_count); + nvgpu_log_info(g, "%d ltcs out of %d", g->ltc_count, g->max_ltc_count); gk20a_writel(g, ltc_ltcs_ltss_cbc_num_active_ltcs_r(), g->ltc_count); @@ -459,7 +459,7 @@ void gm20b_ltc_init_cbc(struct gk20a *g, struct gr_gk20a *gr) gk20a_writel(g, ltc_ltcs_ltss_cbc_base_r(), compbit_base_post_divide); - gk20a_dbg(gpu_dbg_info | gpu_dbg_map_v | gpu_dbg_pte, + nvgpu_log(g, gpu_dbg_info | gpu_dbg_map_v | gpu_dbg_pte, "compbit base.pa: 0x%x,%08x cbc_base:0x%08x\n", (u32)(compbit_store_iova >> 32), (u32)(compbit_store_iova & 0xffffffff), diff --git a/drivers/gpu/nvgpu/gm20b/mm_gm20b.c b/drivers/gpu/nvgpu/gm20b/mm_gm20b.c index 46cd1fc6..deca6686 100644 --- a/drivers/gpu/nvgpu/gm20b/mm_gm20b.c +++ b/drivers/gpu/nvgpu/gm20b/mm_gm20b.c @@ -1,7 +1,7 @@ /* * GM20B MMU * - * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -36,9 +36,9 @@ void gm20b_mm_set_big_page_size(struct gk20a *g, { u32 val; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); - gk20a_dbg_info("big page size %d\n", size); + nvgpu_log_info(g, "big page size %d\n", size); val = nvgpu_mem_rd32(g, mem, ram_in_big_page_size_w()); val &= ~ram_in_big_page_size_m(); @@ -48,7 +48,7 @@ void gm20b_mm_set_big_page_size(struct gk20a *g, val |= ram_in_big_page_size_128kb_f(); nvgpu_mem_wr32(g, mem, ram_in_big_page_size_w(), val); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); } u32 gm20b_mm_get_big_page_sizes(void) diff --git a/drivers/gpu/nvgpu/gm20b/pmu_gm20b.c b/drivers/gpu/nvgpu/gm20b/pmu_gm20b.c index 1c5fdce0..aa992c37 100644 --- a/drivers/gpu/nvgpu/gm20b/pmu_gm20b.c +++ b/drivers/gpu/nvgpu/gm20b/pmu_gm20b.c @@ -1,7 +1,7 @@ /* * GM20B PMU * - * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -37,8 +37,8 @@ #include #include -#define gm20b_dbg_pmu(fmt, arg...) \ - gk20a_dbg(gpu_dbg_pmu, fmt, ##arg) +#define gm20b_dbg_pmu(g, fmt, arg...) \ + nvgpu_log(g, gpu_dbg_pmu, fmt, ##arg) /* PROD settings for ELPG sequencing registers*/ @@ -108,7 +108,7 @@ int gm20b_pmu_setup_elpg(struct gk20a *g) u32 reg_writes; u32 index; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (g->elpg_enabled) { reg_writes = ((sizeof(_pginitseq_gm20b) / @@ -120,20 +120,20 @@ int gm20b_pmu_setup_elpg(struct gk20a *g) } } - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return ret; } static void pmu_handle_acr_init_wpr_msg(struct gk20a *g, struct pmu_msg *msg, void *param, u32 handle, u32 status) { - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); - gm20b_dbg_pmu("reply PMU_ACR_CMD_ID_INIT_WPR_REGION"); + gm20b_dbg_pmu(g, "reply PMU_ACR_CMD_ID_INIT_WPR_REGION"); if (msg->msg.acr.acrmsg.errorcode == PMU_ACR_SUCCESS) g->pmu_lsf_pmu_wpr_init_done = 1; - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); } @@ -143,7 +143,7 @@ int gm20b_pmu_init_acr(struct gk20a *g) struct pmu_cmd cmd; u32 seq; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); /* init ACR */ memset(&cmd, 0, sizeof(struct pmu_cmd)); @@ -153,11 +153,11 @@ int gm20b_pmu_init_acr(struct gk20a *g) cmd.cmd.acr.init_wpr.cmd_type = PMU_ACR_CMD_ID_INIT_WPR_REGION; cmd.cmd.acr.init_wpr.regionid = 0x01; cmd.cmd.acr.init_wpr.wproffset = 0x00; - gm20b_dbg_pmu("cmd post PMU_ACR_CMD_ID_INIT_WPR_REGION"); + gm20b_dbg_pmu(g, "cmd post PMU_ACR_CMD_ID_INIT_WPR_REGION"); nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, pmu_handle_acr_init_wpr_msg, pmu, &seq, ~0); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } @@ -165,14 +165,14 @@ void pmu_handle_fecs_boot_acr_msg(struct gk20a *g, struct pmu_msg *msg, void *param, u32 handle, u32 status) { - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); - gm20b_dbg_pmu("reply PMU_ACR_CMD_ID_BOOTSTRAP_FALCON"); + gm20b_dbg_pmu(g, "reply PMU_ACR_CMD_ID_BOOTSTRAP_FALCON"); - gm20b_dbg_pmu("response code = %x\n", msg->msg.acr.acrmsg.falconid); + gm20b_dbg_pmu(g, "response code = %x\n", msg->msg.acr.acrmsg.falconid); g->pmu_lsf_loaded_falcon_id = msg->msg.acr.acrmsg.falconid; - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); } static int pmu_gm20b_ctx_wait_lsf_ready(struct gk20a *g, u32 timeout_ms, @@ -182,7 +182,7 @@ static int pmu_gm20b_ctx_wait_lsf_ready(struct gk20a *g, u32 timeout_ms, u32 reg; struct nvgpu_timeout timeout; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); reg = gk20a_readl(g, gr_fecs_ctxsw_mailbox_r(0)); nvgpu_timeout_init(g, &timeout, timeout_ms, NVGPU_TIMER_CPU_TIMER); @@ -203,9 +203,9 @@ void gm20b_pmu_load_lsf(struct gk20a *g, u32 falcon_id, u32 flags) struct pmu_cmd cmd; u32 seq; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); - gm20b_dbg_pmu("wprinit status = %x\n", g->pmu_lsf_pmu_wpr_init_done); + gm20b_dbg_pmu(g, "wprinit status = %x\n", g->pmu_lsf_pmu_wpr_init_done); if (g->pmu_lsf_pmu_wpr_init_done) { /* send message to load FECS falcon */ memset(&cmd, 0, sizeof(struct pmu_cmd)); @@ -216,13 +216,13 @@ void gm20b_pmu_load_lsf(struct gk20a *g, u32 falcon_id, u32 flags) PMU_ACR_CMD_ID_BOOTSTRAP_FALCON; cmd.cmd.acr.bootstrap_falcon.flags = flags; cmd.cmd.acr.bootstrap_falcon.falconid = falcon_id; - gm20b_dbg_pmu("cmd post PMU_ACR_CMD_ID_BOOTSTRAP_FALCON: %x\n", + gm20b_dbg_pmu(g, "cmd post PMU_ACR_CMD_ID_BOOTSTRAP_FALCON: %x\n", falcon_id); nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, pmu_handle_fecs_boot_acr_msg, pmu, &seq, ~0); } - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return; } diff --git a/drivers/gpu/nvgpu/gm20b/therm_gm20b.c b/drivers/gpu/nvgpu/gm20b/therm_gm20b.c index ce4d4fab..dfe977ff 100644 --- a/drivers/gpu/nvgpu/gm20b/therm_gm20b.c +++ b/drivers/gpu/nvgpu/gm20b/therm_gm20b.c @@ -1,7 +1,7 @@ /* * GM20B THERMAL * - * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -32,7 +32,7 @@ int gm20b_init_therm_setup_hw(struct gk20a *g) { u32 v; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); /* program NV_THERM registers */ gk20a_writel(g, therm_use_a_r(), therm_use_a_ext_therm_0_enable_f() | diff --git a/drivers/gpu/nvgpu/gp106/acr_gp106.c b/drivers/gpu/nvgpu/gp106/acr_gp106.c index 5ab8cfcc..61b443e0 100644 --- a/drivers/gpu/nvgpu/gp106/acr_gp106.c +++ b/drivers/gpu/nvgpu/gp106/acr_gp106.c @@ -43,8 +43,8 @@ #include /*Defines*/ -#define gp106_dbg_pmu(fmt, arg...) \ - gk20a_dbg(gpu_dbg_pmu, fmt, ##arg) +#define gp106_dbg_pmu(g, fmt, arg...) \ + nvgpu_log(g, gpu_dbg_pmu, fmt, ##arg) typedef int (*get_ucode_details)(struct gk20a *g, struct flcn_ucode_img_v1 *udata); @@ -113,7 +113,7 @@ int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img_v1 *p_img) struct lsf_ucode_desc_v1 *lsf_desc; int err; - gp106_dbg_pmu("requesting PMU ucode in gp106\n"); + gp106_dbg_pmu(g, "requesting PMU ucode in gp106\n"); pmu_fw = nvgpu_request_firmware(g, GM20B_PMU_UCODE_IMAGE, NVGPU_REQUEST_FIRMWARE_NO_SOC); if (!pmu_fw) { @@ -121,9 +121,9 @@ int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img_v1 *p_img) return -ENOENT; } g->acr.pmu_fw = pmu_fw; - gp106_dbg_pmu("Loaded PMU ucode in for blob preparation"); + gp106_dbg_pmu(g, "Loaded PMU ucode in for blob preparation"); - gp106_dbg_pmu("requesting PMU ucode desc in GM20B\n"); + gp106_dbg_pmu(g, "requesting PMU ucode desc in GM20B\n"); pmu_desc = nvgpu_request_firmware(g, GM20B_PMU_UCODE_DESC, NVGPU_REQUEST_FIRMWARE_NO_SOC); if (!pmu_desc) { @@ -164,7 +164,7 @@ int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img_v1 *p_img) p_img->fw_ver = NULL; p_img->header = NULL; p_img->lsf_desc = (struct lsf_ucode_desc_v1 *)lsf_desc; - gp106_dbg_pmu("requesting PMU ucode in GM20B exit\n"); + gp106_dbg_pmu(g, "requesting PMU ucode in GM20B exit\n"); nvgpu_release_firmware(g, pmu_sig); return 0; @@ -262,7 +262,7 @@ int fecs_ucode_details(struct gk20a *g, struct flcn_ucode_img_v1 *p_img) p_img->fw_ver = NULL; p_img->header = NULL; p_img->lsf_desc = (struct lsf_ucode_desc_v1 *)lsf_desc; - gp106_dbg_pmu("fecs fw loaded\n"); + gp106_dbg_pmu(g, "fecs fw loaded\n"); nvgpu_release_firmware(g, fecs_sig); return 0; free_lsf_desc: @@ -358,7 +358,7 @@ int gpccs_ucode_details(struct gk20a *g, struct flcn_ucode_img_v1 *p_img) p_img->fw_ver = NULL; p_img->header = NULL; p_img->lsf_desc = (struct lsf_ucode_desc_v1 *)lsf_desc; - gp106_dbg_pmu("gpccs fw loaded\n"); + gp106_dbg_pmu(g, "gpccs fw loaded\n"); nvgpu_release_firmware(g, gpccs_sig); return 0; free_lsf_desc: @@ -381,7 +381,7 @@ int gp106_prepare_ucode_blob(struct gk20a *g) non WPR blob of ucodes*/ err = nvgpu_init_pmu_fw_support(pmu); if (err) { - gp106_dbg_pmu("failed to set function pointers\n"); + gp106_dbg_pmu(g, "failed to set function pointers\n"); return err; } return 0; @@ -391,12 +391,12 @@ int gp106_prepare_ucode_blob(struct gk20a *g) gr_gk20a_init_ctxsw_ucode(g); g->ops.pmu.get_wpr(g, &wpr_inf); - gp106_dbg_pmu("wpr carveout base:%llx\n", (wpr_inf.wpr_base)); - gp106_dbg_pmu("wpr carveout size :%x\n", (u32)wpr_inf.size); + gp106_dbg_pmu(g, "wpr carveout base:%llx\n", (wpr_inf.wpr_base)); + gp106_dbg_pmu(g, "wpr carveout size :%x\n", (u32)wpr_inf.size); /* Discover all managed falcons*/ err = lsfm_discover_ucode_images(g, plsfm); - gp106_dbg_pmu(" Managed Falcon cnt %d\n", plsfm->managed_flcn_cnt); + gp106_dbg_pmu(g, " Managed Falcon cnt %d\n", plsfm->managed_flcn_cnt); if (err) goto exit_err; @@ -412,14 +412,14 @@ int gp106_prepare_ucode_blob(struct gk20a *g) if (err) goto exit_err; - gp106_dbg_pmu("managed LS falcon %d, WPR size %d bytes.\n", + gp106_dbg_pmu(g, "managed LS falcon %d, WPR size %d bytes.\n", plsfm->managed_flcn_cnt, plsfm->wpr_size); lsfm_init_wpr_contents(g, plsfm, &g->acr.ucode_blob); } else { - gp106_dbg_pmu("LSFM is managing no falcons.\n"); + gp106_dbg_pmu(g, "LSFM is managing no falcons.\n"); } - gp106_dbg_pmu("prepare ucode blob return 0\n"); + gp106_dbg_pmu(g, "prepare ucode blob return 0\n"); free_acr_resources(g, plsfm); exit_err: @@ -465,14 +465,14 @@ int lsfm_discover_ucode_images(struct gk20a *g, plsfm->managed_flcn_cnt++; } else { - gp106_dbg_pmu("id not managed %d\n", + gp106_dbg_pmu(g, "id not managed %d\n", ucode_img.lsf_desc->falcon_id); } } /*Free any ucode image resources if not managing this falcon*/ if (!(pmu->pmu_mode & PMU_LSFM_MANAGED)) { - gp106_dbg_pmu("pmu is not LSFM managed\n"); + gp106_dbg_pmu(g, "pmu is not LSFM managed\n"); lsfm_free_ucode_img_res(g, &ucode_img); } @@ -503,7 +503,7 @@ int lsfm_discover_ucode_images(struct gk20a *g, == 0) plsfm->managed_flcn_cnt++; } else { - gp106_dbg_pmu("not managed %d\n", + gp106_dbg_pmu(g, "not managed %d\n", ucode_img.lsf_desc->falcon_id); lsfm_free_nonpmu_ucode_img_res(g, &ucode_img); @@ -511,7 +511,7 @@ int lsfm_discover_ucode_images(struct gk20a *g, } } else { /* Consumed all available falcon objects */ - gp106_dbg_pmu("Done checking for ucodes %d\n", i); + gp106_dbg_pmu(g, "Done checking for ucodes %d\n", i); break; } } @@ -549,19 +549,19 @@ int gp106_pmu_populate_loader_cfg(struct gk20a *g, g->ops.pmu.get_wpr(g, &wpr_inf); addr_base += (wpr_inf.wpr_base); - gp106_dbg_pmu("pmu loader cfg addrbase 0x%llx\n", addr_base); + gp106_dbg_pmu(g, "pmu loader cfg addrbase 0x%llx\n", addr_base); /*From linux*/ addr_code = addr_base + desc->app_start_offset + desc->app_resident_code_offset; - gp106_dbg_pmu("app start %d app res code off %d\n", + gp106_dbg_pmu(g, "app start %d app res code off %d\n", desc->app_start_offset, desc->app_resident_code_offset); addr_data = addr_base + desc->app_start_offset + desc->app_resident_data_offset; - gp106_dbg_pmu("app res data offset%d\n", + gp106_dbg_pmu(g, "app res data offset%d\n", desc->app_resident_data_offset); - gp106_dbg_pmu("bl start off %d\n", desc->bootloader_start_offset); + gp106_dbg_pmu(g, "bl start off %d\n", desc->bootloader_start_offset); addr_args = ((pwr_falcon_hwcfg_dmem_size_v( gk20a_readl(g, pwr_falcon_hwcfg_r()))) @@ -569,7 +569,7 @@ int gp106_pmu_populate_loader_cfg(struct gk20a *g, addr_args -= g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu); - gp106_dbg_pmu("addr_args %x\n", addr_args); + gp106_dbg_pmu(g, "addr_args %x\n", addr_args); /* Populate the LOADER_CONFIG state */ memset((void *) ldr_cfg, 0, sizeof(struct flcn_bl_dmem_desc_v1)); @@ -621,8 +621,8 @@ int gp106_flcn_populate_bl_dmem_desc(struct gk20a *g, g->ops.pmu.get_wpr(g, &wpr_inf); addr_base += wpr_inf.wpr_base; - gp106_dbg_pmu("falcon ID %x", p_lsfm->wpr_header.falcon_id); - gp106_dbg_pmu("gen loader cfg addrbase %llx ", addr_base); + gp106_dbg_pmu(g, "falcon ID %x", p_lsfm->wpr_header.falcon_id); + gp106_dbg_pmu(g, "gen loader cfg addrbase %llx ", addr_base); addr_code = addr_base + desc->app_start_offset + desc->app_resident_code_offset; @@ -630,7 +630,7 @@ int gp106_flcn_populate_bl_dmem_desc(struct gk20a *g, desc->app_start_offset + desc->app_resident_data_offset; - gp106_dbg_pmu("gen cfg addrcode %llx data %llx load offset %x", + gp106_dbg_pmu(g, "gen cfg addrcode %llx data %llx load offset %x", addr_code, addr_data, desc->bootloader_start_offset); /* Populate the LOADER_CONFIG state */ @@ -653,7 +653,7 @@ int lsfm_fill_flcn_bl_gen_desc(struct gk20a *g, struct nvgpu_pmu *pmu = &g->pmu; if (pnode->wpr_header.falcon_id != pmu->falcon_id) { - gp106_dbg_pmu("non pmu. write flcn bl gen desc\n"); + gp106_dbg_pmu(g, "non pmu. write flcn bl gen desc\n"); g->ops.pmu.flcn_populate_bl_dmem_desc(g, pnode, &pnode->bl_gen_desc_size, pnode->wpr_header.falcon_id); @@ -661,7 +661,7 @@ int lsfm_fill_flcn_bl_gen_desc(struct gk20a *g, } if (pmu->pmu_mode & PMU_LSFM_MANAGED) { - gp106_dbg_pmu("pmu write flcn bl gen desc\n"); + gp106_dbg_pmu(g, "pmu write flcn bl gen desc\n"); if (pnode->wpr_header.falcon_id == pmu->falcon_id) return g->ops.pmu.pmu_populate_loader_cfg(g, pnode, &pnode->bl_gen_desc_size); @@ -694,46 +694,46 @@ void lsfm_init_wpr_contents(struct gk20a *g, nvgpu_mem_wr_n(g, ucode, i * sizeof(pnode->wpr_header), &pnode->wpr_header, sizeof(pnode->wpr_header)); - gp106_dbg_pmu("wpr header"); - gp106_dbg_pmu("falconid :%d", + gp106_dbg_pmu(g, "wpr header"); + gp106_dbg_pmu(g, "falconid :%d", pnode->wpr_header.falcon_id); - gp106_dbg_pmu("lsb_offset :%x", + gp106_dbg_pmu(g, "lsb_offset :%x", pnode->wpr_header.lsb_offset); - gp106_dbg_pmu("bootstrap_owner :%d", + gp106_dbg_pmu(g, "bootstrap_owner :%d", pnode->wpr_header.bootstrap_owner); - gp106_dbg_pmu("lazy_bootstrap :%d", + gp106_dbg_pmu(g, "lazy_bootstrap :%d", pnode->wpr_header.lazy_bootstrap); - gp106_dbg_pmu("status :%d", + gp106_dbg_pmu(g, "status :%d", pnode->wpr_header.status); /*Flush LSB header to memory*/ nvgpu_mem_wr_n(g, ucode, pnode->wpr_header.lsb_offset, &pnode->lsb_header, sizeof(pnode->lsb_header)); - gp106_dbg_pmu("lsb header"); - gp106_dbg_pmu("ucode_off :%x", + gp106_dbg_pmu(g, "lsb header"); + gp106_dbg_pmu(g, "ucode_off :%x", pnode->lsb_header.ucode_off); - gp106_dbg_pmu("ucode_size :%x", + gp106_dbg_pmu(g, "ucode_size :%x", pnode->lsb_header.ucode_size); - gp106_dbg_pmu("data_size :%x", + gp106_dbg_pmu(g, "data_size :%x", pnode->lsb_header.data_size); - gp106_dbg_pmu("bl_code_size :%x", + gp106_dbg_pmu(g, "bl_code_size :%x", pnode->lsb_header.bl_code_size); - gp106_dbg_pmu("bl_imem_off :%x", + gp106_dbg_pmu(g, "bl_imem_off :%x", pnode->lsb_header.bl_imem_off); - gp106_dbg_pmu("bl_data_off :%x", + gp106_dbg_pmu(g, "bl_data_off :%x", pnode->lsb_header.bl_data_off); - gp106_dbg_pmu("bl_data_size :%x", + gp106_dbg_pmu(g, "bl_data_size :%x", pnode->lsb_header.bl_data_size); - gp106_dbg_pmu("app_code_off :%x", + gp106_dbg_pmu(g, "app_code_off :%x", pnode->lsb_header.app_code_off); - gp106_dbg_pmu("app_code_size :%x", + gp106_dbg_pmu(g, "app_code_size :%x", pnode->lsb_header.app_code_size); - gp106_dbg_pmu("app_data_off :%x", + gp106_dbg_pmu(g, "app_data_off :%x", pnode->lsb_header.app_data_off); - gp106_dbg_pmu("app_data_size :%x", + gp106_dbg_pmu(g, "app_data_size :%x", pnode->lsb_header.app_data_size); - gp106_dbg_pmu("flags :%x", + gp106_dbg_pmu(g, "flags :%x", pnode->lsb_header.flags); /*If this falcon has a boot loader and related args, @@ -1049,7 +1049,7 @@ int gp106_bootstrap_hs_flcn(struct gk20a *g) u32 *acr_ucode_data_t210_load; struct wpr_carveout_info wpr_inf; - gp106_dbg_pmu(""); + gp106_dbg_pmu(g, " "); if (!acr_fw) { /*First time init case*/ diff --git a/drivers/gpu/nvgpu/gp106/bios_gp106.c b/drivers/gpu/nvgpu/gp106/bios_gp106.c index 8511d3c2..3363aeba 100644 --- a/drivers/gpu/nvgpu/gp106/bios_gp106.c +++ b/drivers/gpu/nvgpu/gp106/bios_gp106.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -56,13 +56,13 @@ static void upload_data(struct gk20a *g, u32 dst, u8 *src, u32 size, u8 port) u32 *src_u32 = (u32 *)src; u32 blk; - gk20a_dbg_info("upload %d bytes to %x", size, dst); + nvgpu_log_info(g, "upload %d bytes to %x", size, dst); words = DIV_ROUND_UP(size, 4); blk = dst >> 8; - gk20a_dbg_info("upload %d words to %x blk %d", + nvgpu_log_info(g, "upload %d words to %x blk %d", words, dst, blk); gk20a_writel(g, pwr_falcon_dmemc_r(port), pwr_falcon_dmemc_offs_f(dst >> 2) | @@ -79,7 +79,7 @@ static int gp106_bios_devinit(struct gk20a *g) int devinit_completed; struct nvgpu_timeout timeout; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (nvgpu_flcn_reset(g->pmu.flcn)) { err = -ETIMEDOUT; @@ -128,7 +128,7 @@ static int gp106_bios_devinit(struct gk20a *g) gk20a_get_gr_idle_timeout(g)); out: - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return err; } @@ -146,7 +146,7 @@ static int gp106_bios_preos(struct gk20a *g) { int err = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (nvgpu_flcn_reset(g->pmu.flcn)) { err = -ETIMEDOUT; @@ -177,7 +177,7 @@ static int gp106_bios_preos(struct gk20a *g) gk20a_get_gr_idle_timeout(g)); out: - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return err; } @@ -186,12 +186,12 @@ int gp106_bios_init(struct gk20a *g) unsigned int i; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (g->bios_is_init) return 0; - gk20a_dbg_info("reading bios from EEPROM"); + nvgpu_log_info(g, "reading bios from EEPROM"); g->bios.size = BIOS_SIZE; g->bios.data = nvgpu_vmalloc(g, BIOS_SIZE); if (!g->bios.data) @@ -218,7 +218,7 @@ int gp106_bios_init(struct gk20a *g) goto free_firmware; } - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); err = gp106_bios_devinit(g); if (err) { diff --git a/drivers/gpu/nvgpu/gp106/clk_gp106.c b/drivers/gpu/nvgpu/gp106/clk_gp106.c index 9a94a7b9..d19baac5 100644 --- a/drivers/gpu/nvgpu/gp106/clk_gp106.c +++ b/drivers/gpu/nvgpu/gp106/clk_gp106.c @@ -36,9 +36,6 @@ #include -#define gk20a_dbg_clk(fmt, arg...) \ - gk20a_dbg(gpu_dbg_clk, fmt, ##arg) - #ifdef CONFIG_DEBUG_FS static int clk_gp106_debugfs_init(struct gk20a *g); #endif @@ -82,7 +79,7 @@ int gp106_init_clk_support(struct gk20a *g) struct clk_gk20a *clk = &g->clk; u32 err = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); err = nvgpu_mutex_init(&clk->clk_mutex); if (err) @@ -374,7 +371,7 @@ static int clk_gp106_debugfs_init(struct gk20a *g) d = debugfs_create_file("gpc", S_IRUGO | S_IWUSR, clk_freq_ctlr_root, g, &gpc_cfc_fops); - gk20a_dbg(gpu_dbg_info, "g=%p", g); + nvgpu_log(g, gpu_dbg_info, "g=%p", g); for (i = 0; i < g->clk.namemap_num; i++) { if (g->clk.clk_namemap[i].is_enable) { diff --git a/drivers/gpu/nvgpu/gp106/fb_gp106.c b/drivers/gpu/nvgpu/gp106/fb_gp106.c index 34e9ee30..2bf97f61 100644 --- a/drivers/gpu/nvgpu/gp106/fb_gp106.c +++ b/drivers/gpu/nvgpu/gp106/fb_gp106.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -39,7 +39,7 @@ void gp106_fb_reset(struct gk20a *g) do { u32 w = gk20a_readl(g, fb_niso_scrub_status_r()); if (fb_niso_scrub_status_flag_v(w)) { - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); break; } nvgpu_udelay(HW_SCRUB_TIMEOUT_DEFAULT); diff --git a/drivers/gpu/nvgpu/gp106/gr_gp106.c b/drivers/gpu/nvgpu/gp106/gr_gp106.c index 1bd24b45..2e5f29ee 100644 --- a/drivers/gpu/nvgpu/gp106/gr_gp106.c +++ b/drivers/gpu/nvgpu/gp106/gr_gp106.c @@ -58,7 +58,7 @@ bool gr_gp106_is_valid_class(struct gk20a *g, u32 class_num) default: break; } - gk20a_dbg_info("class=0x%x valid=%d", class_num, valid); + nvgpu_log_info(g, "class=0x%x valid=%d", class_num, valid); return valid; } @@ -75,7 +75,7 @@ static void gr_gp106_set_go_idle_timeout(struct gk20a *g, u32 data) int gr_gp106_handle_sw_method(struct gk20a *g, u32 addr, u32 class_num, u32 offset, u32 data) { - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (class_num == PASCAL_COMPUTE_B) { switch (offset << 2) { @@ -177,9 +177,9 @@ int gr_gp106_set_ctxsw_preemption_mode(struct gk20a *g, g->gr.max_tpc_count; attrib_cb_size = ALIGN(attrib_cb_size, 128); - gk20a_dbg_info("gfxp context spill_size=%d", spill_size); - gk20a_dbg_info("gfxp context pagepool_size=%d", pagepool_size); - gk20a_dbg_info("gfxp context attrib_cb_size=%d", + nvgpu_log_info(g, "gfxp context spill_size=%d", spill_size); + nvgpu_log_info(g, "gfxp context pagepool_size=%d", pagepool_size); + nvgpu_log_info(g, "gfxp context attrib_cb_size=%d", attrib_cb_size); err = gr_gp10b_alloc_buffer(vm, diff --git a/drivers/gpu/nvgpu/gp106/hal_gp106.c b/drivers/gpu/nvgpu/gp106/hal_gp106.c index 82cc36aa..6d3154e3 100644 --- a/drivers/gpu/nvgpu/gp106/hal_gp106.c +++ b/drivers/gpu/nvgpu/gp106/hal_gp106.c @@ -765,7 +765,7 @@ int gp106_init_hal(struct gk20a *g) { struct gpu_ops *gops = &g->ops; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); gops->bios = gp106_ops.bios; gops->ltc = gp106_ops.ltc; @@ -828,7 +828,7 @@ int gp106_init_hal(struct gk20a *g) g->name = "gp10x"; - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } diff --git a/drivers/gpu/nvgpu/gp106/mclk_gp106.c b/drivers/gpu/nvgpu/gp106/mclk_gp106.c index 44f0b1d9..bfb66e6e 100644 --- a/drivers/gpu/nvgpu/gp106/mclk_gp106.c +++ b/drivers/gpu/nvgpu/gp106/mclk_gp106.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -2998,7 +2998,7 @@ static void mclk_seq_pmucmdhandler(struct gk20a *g, struct pmu_msg *_msg, struct nv_pmu_seq_msg_run_script *seq_msg; u32 msg_status = 0; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); if (status != 0) { nvgpu_err(g, "mclk seq_script cmd aborted"); @@ -3041,7 +3041,7 @@ static int mclk_get_memclk_table(struct gk20a *g) u8 *mem_entry_ptr = NULL; int index; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); mem_table_ptr = (u8 *)nvgpu_bios_get_perf_table_ptrs(g, g->bios.perf_token, @@ -3213,7 +3213,7 @@ int gp106_mclk_init(struct gk20a *g) u32 index; struct memory_config *m; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); mclk = &g->clk_pmu.clk_mclk; @@ -3316,7 +3316,7 @@ int gp106_mclk_change(struct gk20a *g, u16 val) #endif u32 speed; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); memset(&payload, 0, sizeof(struct pmu_payload)); @@ -3508,7 +3508,7 @@ static int mclk_debugfs_init(struct gk20a *g) struct dentry *gpu_root = l->debugfs; struct dentry *d; - gk20a_dbg(gpu_dbg_info, "g=%p", g); + nvgpu_log(g, gpu_dbg_info, "g=%p", g); d = debugfs_create_file( "mclk_speed_set", diff --git a/drivers/gpu/nvgpu/gp106/pmu_gp106.c b/drivers/gpu/nvgpu/gp106/pmu_gp106.c index d4041905..2a52dd4e 100644 --- a/drivers/gpu/nvgpu/gp106/pmu_gp106.c +++ b/drivers/gpu/nvgpu/gp106/pmu_gp106.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -98,14 +98,14 @@ u32 gp106_pmu_pg_engines_list(struct gk20a *g) static void pmu_handle_param_msg(struct gk20a *g, struct pmu_msg *msg, void *param, u32 handle, u32 status) { - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (status != 0) { nvgpu_err(g, "PG PARAM cmd aborted"); return; } - gp106_dbg_pmu("PG PARAM is acknowledged from PMU %x", + gp106_dbg_pmu(g, "PG PARAM is acknowledged from PMU %x", msg->msg.pg.msg_type); } @@ -135,7 +135,7 @@ int gp106_pg_param_init(struct gk20a *g, u32 pg_engine_id) cmd.cmd.pg.gr_init_param.featuremask = NVGPU_PMU_GR_FEATURE_MASK_RPPG; - gp106_dbg_pmu("cmd post GR PMU_PG_CMD_ID_PG_PARAM"); + gp106_dbg_pmu(g, "cmd post GR PMU_PG_CMD_ID_PG_PARAM"); nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, pmu_handle_param_msg, pmu, &seq, ~0); } else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS) { @@ -152,7 +152,7 @@ int gp106_pg_param_init(struct gk20a *g, u32 pg_engine_id) NVGPU_PMU_MS_FEATURE_MASK_RPPG | NVGPU_PMU_MS_FEATURE_MASK_FB_TRAINING; - gp106_dbg_pmu("cmd post MS PMU_PG_CMD_ID_PG_PARAM"); + gp106_dbg_pmu(g, "cmd post MS PMU_PG_CMD_ID_PG_PARAM"); nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, pmu_handle_param_msg, pmu, &seq, ~0); } @@ -240,9 +240,9 @@ static void gp106_pmu_load_multiple_falcons(struct gk20a *g, u32 falconidmask, struct pmu_cmd cmd; u32 seq; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); - gp106_dbg_pmu("wprinit status = %x\n", g->pmu_lsf_pmu_wpr_init_done); + gp106_dbg_pmu(g, "wprinit status = %x\n", g->pmu_lsf_pmu_wpr_init_done); if (g->pmu_lsf_pmu_wpr_init_done) { /* send message to load FECS falcon */ memset(&cmd, 0, sizeof(struct pmu_cmd)); @@ -258,13 +258,13 @@ static void gp106_pmu_load_multiple_falcons(struct gk20a *g, u32 falconidmask, cmd.cmd.acr.boot_falcons.wprvirtualbase.lo = 0; cmd.cmd.acr.boot_falcons.wprvirtualbase.hi = 0; - gp106_dbg_pmu("PMU_ACR_CMD_ID_BOOTSTRAP_MULTIPLE_FALCONS:%x\n", + gp106_dbg_pmu(g, "PMU_ACR_CMD_ID_BOOTSTRAP_MULTIPLE_FALCONS:%x\n", falconidmask); nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, pmu_handle_fecs_boot_acr_msg, pmu, &seq, ~0); } - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); } int gp106_load_falcon_ucode(struct gk20a *g, u32 falconidmask) diff --git a/drivers/gpu/nvgpu/gp106/pmu_gp106.h b/drivers/gpu/nvgpu/gp106/pmu_gp106.h index bd640869..361f6e8b 100644 --- a/drivers/gpu/nvgpu/gp106/pmu_gp106.h +++ b/drivers/gpu/nvgpu/gp106/pmu_gp106.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -23,8 +23,8 @@ #ifndef __PMU_GP106_H_ #define __PMU_GP106_H_ -#define gp106_dbg_pmu(fmt, arg...) \ - gk20a_dbg(gpu_dbg_pmu, fmt, ##arg) +#define gp106_dbg_pmu(g, fmt, arg...) \ + nvgpu_log(g, gpu_dbg_pmu, fmt, ##arg) struct gk20a; diff --git a/drivers/gpu/nvgpu/gp106/sec2_gp106.c b/drivers/gpu/nvgpu/gp106/sec2_gp106.c index 08c7f84a..1c959022 100644 --- a/drivers/gpu/nvgpu/gp106/sec2_gp106.c +++ b/drivers/gpu/nvgpu/gp106/sec2_gp106.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -31,8 +31,8 @@ #include /*Defines*/ -#define gm20b_dbg_pmu(fmt, arg...) \ - gk20a_dbg(gpu_dbg_pmu, fmt, ##arg) +#define gm20b_dbg_pmu(g, fmt, arg...) \ + nvgpu_log(g, gpu_dbg_pmu, fmt, ##arg) int sec2_clear_halt_interrupt_status(struct gk20a *g, unsigned int timeout) { @@ -56,7 +56,7 @@ int sec2_wait_for_halt(struct gk20a *g, unsigned int timeout) } g->acr.capabilities = gk20a_readl(g, psec_falcon_mailbox1_r()); - gm20b_dbg_pmu("ACR capabilities %x\n", g->acr.capabilities); + gm20b_dbg_pmu(g, "ACR capabilities %x\n", g->acr.capabilities); data = gk20a_readl(g, psec_falcon_mailbox0_r()); if (data) { nvgpu_err(g, "ACR boot failed, err %x", data); @@ -87,7 +87,7 @@ int bl_bootstrap_sec2(struct nvgpu_pmu *pmu, u32 data = 0; u32 dst; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); /* SEC2 Config */ gk20a_writel(g, psec_falcon_itfen_r(), @@ -123,7 +123,7 @@ int bl_bootstrap_sec2(struct nvgpu_pmu *pmu, (u8 *)(acr->hsbl_ucode.cpu_va), bl_sz, 0, 0, pmu_bl_gm10x_desc->bl_start_tag); - gm20b_dbg_pmu("Before starting falcon with BL\n"); + gm20b_dbg_pmu(g, "Before starting falcon with BL\n"); gk20a_writel(g, psec_falcon_mailbox0_r(), 0xDEADA5A5); diff --git a/drivers/gpu/nvgpu/gp106/therm_gp106.c b/drivers/gpu/nvgpu/gp106/therm_gp106.c index 64d602cf..b3862abe 100644 --- a/drivers/gpu/nvgpu/gp106/therm_gp106.c +++ b/drivers/gpu/nvgpu/gp106/therm_gp106.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -97,7 +97,7 @@ int gp106_elcg_init_idle_filters(struct gk20a *g) u32 active_engine_id = 0; struct fifo_gk20a *f = &g->fifo; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); for (engine_id = 0; engine_id < f->num_engines; engine_id++) { active_engine_id = f->active_engines_list[engine_id]; @@ -124,7 +124,7 @@ int gp106_elcg_init_idle_filters(struct gk20a *g) idle_filter &= ~therm_hubmmu_idle_filter_value_m(); gk20a_writel(g, therm_hubmmu_idle_filter_r(), idle_filter); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } diff --git a/drivers/gpu/nvgpu/gp106/xve_gp106.c b/drivers/gpu/nvgpu/gp106/xve_gp106.c index 9becd0f2..e77ea5c1 100644 --- a/drivers/gpu/nvgpu/gp106/xve_gp106.c +++ b/drivers/gpu/nvgpu/gp106/xve_gp106.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -204,19 +204,19 @@ static int __do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed) int attempts = 10, err_status = 0; g->ops.xve.get_speed(g, ¤t_link_speed); - xv_sc_dbg(PRE_CHANGE, "Executing PCIe link change."); - xv_sc_dbg(PRE_CHANGE, " Current speed: %s", + xv_sc_dbg(g, PRE_CHANGE, "Executing PCIe link change."); + xv_sc_dbg(g, PRE_CHANGE, " Current speed: %s", xve_speed_to_str(current_link_speed)); - xv_sc_dbg(PRE_CHANGE, " Next speed: %s", + xv_sc_dbg(g, PRE_CHANGE, " Next speed: %s", xve_speed_to_str(next_link_speed)); - xv_sc_dbg(PRE_CHANGE, " PL_LINK_CONFIG: 0x%08x", + xv_sc_dbg(g, PRE_CHANGE, " PL_LINK_CONFIG: 0x%08x", gk20a_readl(g, xp_pl_link_config_r(0))); - xv_sc_dbg(DISABLE_ASPM, "Disabling ASPM..."); + xv_sc_dbg(g, DISABLE_ASPM, "Disabling ASPM..."); disable_aspm_gp106(g); - xv_sc_dbg(DISABLE_ASPM, " Done!"); + xv_sc_dbg(g, DISABLE_ASPM, " Done!"); - xv_sc_dbg(DL_SAFE_MODE, "Putting DL in safe mode..."); + xv_sc_dbg(g, DL_SAFE_MODE, "Putting DL in safe mode..."); saved_dl_mgr = gk20a_readl(g, xp_dl_mgr_r(0)); /* @@ -225,12 +225,12 @@ static int __do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed) dl_mgr = saved_dl_mgr; dl_mgr |= xp_dl_mgr_safe_timing_f(1); gk20a_writel(g, xp_dl_mgr_r(0), dl_mgr); - xv_sc_dbg(DL_SAFE_MODE, " Done!"); + xv_sc_dbg(g, DL_SAFE_MODE, " Done!"); nvgpu_timeout_init(g, &timeout, GPU_XVE_TIMEOUT_MS, NVGPU_TIMER_CPU_TIMER); - xv_sc_dbg(CHECK_LINK, "Checking for link idle..."); + xv_sc_dbg(g, CHECK_LINK, "Checking for link idle..."); do { pl_link_config = gk20a_readl(g, xp_pl_link_config_r(0)); if ((xp_pl_link_config_ltssm_status_f(pl_link_config) == @@ -245,9 +245,9 @@ static int __do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed) goto done; } - xv_sc_dbg(CHECK_LINK, " Done"); + xv_sc_dbg(g, CHECK_LINK, " Done"); - xv_sc_dbg(LINK_SETTINGS, "Preparing next link settings"); + xv_sc_dbg(g, LINK_SETTINGS, "Preparing next link settings"); pl_link_config &= ~xp_pl_link_config_max_link_rate_m(); switch (next_link_speed) { case GPU_XVE_SPEED_2P5: @@ -297,10 +297,10 @@ static int __do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed) else BUG(); - xv_sc_dbg(LINK_SETTINGS, " pl_link_config = 0x%08x", pl_link_config); - xv_sc_dbg(LINK_SETTINGS, " Done"); + xv_sc_dbg(g, LINK_SETTINGS, " pl_link_config = 0x%08x", pl_link_config); + xv_sc_dbg(g, LINK_SETTINGS, " Done"); - xv_sc_dbg(EXEC_CHANGE, "Running link speed change..."); + xv_sc_dbg(g, EXEC_CHANGE, "Running link speed change..."); nvgpu_timeout_init(g, &timeout, GPU_XVE_TIMEOUT_MS, NVGPU_TIMER_CPU_TIMER); @@ -316,7 +316,7 @@ static int __do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed) goto done; } - xv_sc_dbg(EXEC_CHANGE, " Wrote PL_LINK_CONFIG."); + xv_sc_dbg(g, EXEC_CHANGE, " Wrote PL_LINK_CONFIG."); pl_link_config = gk20a_readl(g, xp_pl_link_config_r(0)); @@ -326,7 +326,7 @@ static int __do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed) xp_pl_link_config_ltssm_directive_f( xp_pl_link_config_ltssm_directive_change_speed_v())); - xv_sc_dbg(EXEC_CHANGE, " Executing change (0x%08x)!", + xv_sc_dbg(g, EXEC_CHANGE, " Executing change (0x%08x)!", pl_link_config); gk20a_writel(g, xp_pl_link_config_r(0), pl_link_config); @@ -348,11 +348,11 @@ static int __do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed) if (nvgpu_timeout_peek_expired(&timeout)) { err_status = -ETIMEDOUT; - xv_sc_dbg(EXEC_CHANGE, " timeout; pl_link_config = 0x%x", + xv_sc_dbg(g, EXEC_CHANGE, " timeout; pl_link_config = 0x%x", pl_link_config); } - xv_sc_dbg(EXEC_CHANGE, " Change done... Checking status"); + xv_sc_dbg(g, EXEC_CHANGE, " Change done... Checking status"); if (pl_link_config == 0xffffffff) { WARN(1, "GPU fell of PCI bus!?"); @@ -366,19 +366,19 @@ static int __do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed) link_control_status = g->ops.xve.xve_readl(g, xve_link_control_status_r()); - xv_sc_dbg(EXEC_CHANGE, " target %d vs current %d", + xv_sc_dbg(g, EXEC_CHANGE, " target %d vs current %d", link_speed_setting, xve_link_control_status_link_speed_v(link_control_status)); if (err_status == -ETIMEDOUT) { - xv_sc_dbg(EXEC_CHANGE, " Oops timed out?"); + xv_sc_dbg(g, EXEC_CHANGE, " Oops timed out?"); break; } } while (attempts-- > 0 && link_speed_setting != xve_link_control_status_link_speed_v(link_control_status)); - xv_sc_dbg(EXEC_VERIF, "Verifying speed change..."); + xv_sc_dbg(g, EXEC_VERIF, "Verifying speed change..."); /* * Check that the new link speed is actually active. If we failed to @@ -390,10 +390,10 @@ static int __do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed) if (link_speed_setting != new_link_speed) { u32 link_config = gk20a_readl(g, xp_pl_link_config_r(0)); - xv_sc_dbg(EXEC_VERIF, " Current and target speeds mismatch!"); - xv_sc_dbg(EXEC_VERIF, " LINK_CONTROL_STATUS: 0x%08x", + xv_sc_dbg(g, EXEC_VERIF, " Current and target speeds mismatch!"); + xv_sc_dbg(g, EXEC_VERIF, " LINK_CONTROL_STATUS: 0x%08x", g->ops.xve.xve_readl(g, xve_link_control_status_r())); - xv_sc_dbg(EXEC_VERIF, " Link speed is %s - should be %s", + xv_sc_dbg(g, EXEC_VERIF, " Link speed is %s - should be %s", xve_speed_to_str(new_link_speed), xve_speed_to_str(link_speed_setting)); @@ -417,19 +417,19 @@ static int __do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed) gk20a_writel(g, xp_pl_link_config_r(0), link_config); err_status = -ENODEV; } else { - xv_sc_dbg(EXEC_VERIF, " Current and target speeds match!"); + xv_sc_dbg(g, EXEC_VERIF, " Current and target speeds match!"); err_status = 0; } done: /* Restore safe timings. */ - xv_sc_dbg(CLEANUP, "Restoring saved DL settings..."); + xv_sc_dbg(g, CLEANUP, "Restoring saved DL settings..."); gk20a_writel(g, xp_dl_mgr_r(0), saved_dl_mgr); - xv_sc_dbg(CLEANUP, " Done"); + xv_sc_dbg(g, CLEANUP, " Done"); - xv_sc_dbg(CLEANUP, "Re-enabling ASPM settings..."); + xv_sc_dbg(g, CLEANUP, "Re-enabling ASPM settings..."); enable_aspm_gp106(g); - xv_sc_dbg(CLEANUP, " Done"); + xv_sc_dbg(g, CLEANUP, " Done"); return err_status; } diff --git a/drivers/gpu/nvgpu/gp106/xve_gp106.h b/drivers/gpu/nvgpu/gp106/xve_gp106.h index d48b0991..e0be35ac 100644 --- a/drivers/gpu/nvgpu/gp106/xve_gp106.h +++ b/drivers/gpu/nvgpu/gp106/xve_gp106.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -49,11 +49,11 @@ enum xv_speed_change_steps { CLEANUP }; -#define xv_dbg(fmt, args...) \ - gk20a_dbg(gpu_dbg_xv, fmt, ##args) +#define xv_dbg(g, fmt, args...) \ + nvgpu_log(g, gpu_dbg_xv, fmt, ##args) -#define xv_sc_dbg(step, fmt, args...) \ - xv_dbg("[%d] %15s | " fmt, step, __stringify(step), ##args) +#define xv_sc_dbg(g, step, fmt, args...) \ + xv_dbg(g, "[%d] %15s | " fmt, step, __stringify(step), ##args) void xve_xve_writel_gp106(struct gk20a *g, u32 reg, u32 val); u32 xve_xve_readl_gp106(struct gk20a *g, u32 reg); diff --git a/drivers/gpu/nvgpu/gp10b/ce_gp10b.c b/drivers/gpu/nvgpu/gp10b/ce_gp10b.c index 86a2b751..e2ad1bd3 100644 --- a/drivers/gpu/nvgpu/gp10b/ce_gp10b.c +++ b/drivers/gpu/nvgpu/gp10b/ce_gp10b.c @@ -1,7 +1,7 @@ /* * Pascal GPU series Copy Engine. * - * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -30,14 +30,14 @@ static u32 ce_blockpipe_isr(struct gk20a *g, u32 fifo_intr) { - gk20a_dbg(gpu_dbg_intr, "ce blocking pipe interrupt\n"); + nvgpu_log(g, gpu_dbg_intr, "ce blocking pipe interrupt\n"); return ce_intr_status_blockpipe_pending_f(); } static u32 ce_launcherr_isr(struct gk20a *g, u32 fifo_intr) { - gk20a_dbg(gpu_dbg_intr, "ce launch error interrupt\n"); + nvgpu_log(g, gpu_dbg_intr, "ce launch error interrupt\n"); return ce_intr_status_launcherr_pending_f(); } @@ -47,7 +47,7 @@ void gp10b_ce_isr(struct gk20a *g, u32 inst_id, u32 pri_base) u32 ce_intr = gk20a_readl(g, ce_intr_status_r(inst_id)); u32 clear_intr = 0; - gk20a_dbg(gpu_dbg_intr, "ce isr %08x %08x\n", ce_intr, inst_id); + nvgpu_log(g, gpu_dbg_intr, "ce isr %08x %08x\n", ce_intr, inst_id); /* clear blocking interrupts: they exibit broken behavior */ if (ce_intr & ce_intr_status_blockpipe_pending_f()) @@ -65,7 +65,7 @@ int gp10b_ce_nonstall_isr(struct gk20a *g, u32 inst_id, u32 pri_base) int ops = 0; u32 ce_intr = gk20a_readl(g, ce_intr_status_r(inst_id)); - gk20a_dbg(gpu_dbg_intr, "ce nonstall isr %08x %08x\n", ce_intr, inst_id); + nvgpu_log(g, gpu_dbg_intr, "ce nonstall isr %08x %08x\n", ce_intr, inst_id); if (ce_intr & ce_intr_status_nonblockpipe_pending_f()) { gk20a_writel(g, ce_intr_status_r(inst_id), diff --git a/drivers/gpu/nvgpu/gp10b/fecs_trace_gp10b.c b/drivers/gpu/nvgpu/gp10b/fecs_trace_gp10b.c index 511d565a..c477c77d 100644 --- a/drivers/gpu/nvgpu/gp10b/fecs_trace_gp10b.c +++ b/drivers/gpu/nvgpu/gp10b/fecs_trace_gp10b.c @@ -1,7 +1,7 @@ /* * GP10B GPU FECS traces * - * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -43,7 +43,7 @@ int gp10b_fecs_trace_flush(struct gk20a *g) }; int err; - gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, ""); + nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, " "); err = gr_gk20a_elpg_protected_call(g, gr_gk20a_submit_fecs_method_op(g, op, false)); diff --git a/drivers/gpu/nvgpu/gp10b/fifo_gp10b.c b/drivers/gpu/nvgpu/gp10b/fifo_gp10b.c index 66f3012f..fd4ec34e 100644 --- a/drivers/gpu/nvgpu/gp10b/fifo_gp10b.c +++ b/drivers/gpu/nvgpu/gp10b/fifo_gp10b.c @@ -43,7 +43,7 @@ static void gp10b_set_pdb_fault_replay_flags(struct gk20a *g, { u32 val; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); val = nvgpu_mem_rd32(g, mem, ram_in_page_dir_base_fault_replay_tex_w()); @@ -59,7 +59,7 @@ static void gp10b_set_pdb_fault_replay_flags(struct gk20a *g, nvgpu_mem_wr32(g, mem, ram_in_page_dir_base_fault_replay_gcc_w(), val); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); } int channel_gp10b_commit_userd(struct channel_gk20a *c) @@ -68,12 +68,12 @@ int channel_gp10b_commit_userd(struct channel_gk20a *c) u32 addr_hi; struct gk20a *g = c->g; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); addr_lo = u64_lo32(c->userd_iova >> ram_userd_base_shift_v()); addr_hi = u64_hi32(c->userd_iova); - gk20a_dbg_info("channel %d : set ramfc userd 0x%16llx", + nvgpu_log_info(g, "channel %d : set ramfc userd 0x%16llx", c->chid, (u64)c->userd_iova); nvgpu_mem_wr32(g, &c->inst_block, @@ -98,7 +98,7 @@ int channel_gp10b_setup_ramfc(struct channel_gk20a *c, struct gk20a *g = c->g; struct nvgpu_mem *mem = &c->inst_block; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); nvgpu_memset(g, mem, 0, 0, ram_fc_size_val_v()); @@ -167,8 +167,9 @@ int gp10b_fifo_resetup_ramfc(struct channel_gk20a *c) { u32 new_syncpt = 0, old_syncpt; u32 v; + struct gk20a *g = c->g; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); v = nvgpu_mem_rd32(c->g, &c->inst_block, ram_fc_allowed_syncpoints_w()); @@ -185,7 +186,7 @@ int gp10b_fifo_resetup_ramfc(struct channel_gk20a *c) v = pbdma_allowed_syncpoints_0_valid_f(1); - gk20a_dbg_info("Channel %d, syncpt id %d\n", + nvgpu_log_info(g, "Channel %d, syncpt id %d\n", c->chid, new_syncpt); v |= pbdma_allowed_syncpoints_0_index_f(new_syncpt); @@ -197,7 +198,7 @@ int gp10b_fifo_resetup_ramfc(struct channel_gk20a *c) /* enable channel */ gk20a_enable_channel_tsg(c->g, c); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } @@ -207,7 +208,7 @@ int gp10b_fifo_engine_enum_from_type(struct gk20a *g, u32 engine_type, { int ret = ENGINE_INVAL_GK20A; - gk20a_dbg_info("engine type %d", engine_type); + nvgpu_log_info(g, "engine type %d", engine_type); if (engine_type == top_device_info_type_enum_graphics_v()) ret = ENGINE_GR_GK20A; else if (engine_type == top_device_info_type_enum_lce_v()) { @@ -229,13 +230,13 @@ void gp10b_device_info_data_parse(struct gk20a *g, u32 table_entry, *pri_base = (top_device_info_data_pri_base_v(table_entry) << top_device_info_data_pri_base_align_v()); - gk20a_dbg_info("device info: pri_base: %d", *pri_base); + nvgpu_log_info(g, "device info: pri_base: %d", *pri_base); } if (fault_id && (top_device_info_data_fault_id_v(table_entry) == top_device_info_data_fault_id_valid_v())) { *fault_id = g->ops.fifo.device_info_fault_id(table_entry); - gk20a_dbg_info("device info: fault_id: %d", *fault_id); + nvgpu_log_info(g, "device info: fault_id: %d", *fault_id); } } else nvgpu_err(g, "unknown device_info_data %d", @@ -293,7 +294,7 @@ void gp10b_fifo_get_mmu_fault_info(struct gk20a *g, u32 mmu_fault_id, u32 fault_info; u32 addr_lo, addr_hi; - gk20a_dbg_fn("mmu_fault_id %d", mmu_fault_id); + nvgpu_log_fn(g, "mmu_fault_id %d", mmu_fault_id); memset(mmfault, 0, sizeof(*mmfault)); diff --git a/drivers/gpu/nvgpu/gp10b/gr_gp10b.c b/drivers/gpu/nvgpu/gp10b/gr_gp10b.c index 0178abbf..bc982d30 100644 --- a/drivers/gpu/nvgpu/gp10b/gr_gp10b.c +++ b/drivers/gpu/nvgpu/gp10b/gr_gp10b.c @@ -69,7 +69,7 @@ bool gr_gp10b_is_valid_class(struct gk20a *g, u32 class_num) default: break; } - gk20a_dbg_info("class=0x%x valid=%d", class_num, valid); + nvgpu_log_info(g, "class=0x%x valid=%d", class_num, valid); return valid; } @@ -169,7 +169,7 @@ int gr_gp10b_handle_sm_exception(struct gk20a *g, gr_pri_gpc0_tpc0_sm_lrf_ecc_double_err_count_r() + offset, 0); if (lrf_ecc_sed_status) { - gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr, "Single bit error detected in SM LRF!"); gr_gp10b_sm_lrf_ecc_overcount_war(1, @@ -181,7 +181,7 @@ int gr_gp10b_handle_sm_exception(struct gk20a *g, lrf_single_count_delta; } if (lrf_ecc_ded_status) { - gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr, "Double bit error detected in SM LRF!"); gr_gp10b_sm_lrf_ecc_overcount_war(0, @@ -208,7 +208,7 @@ int gr_gp10b_handle_sm_exception(struct gk20a *g, gr_pri_gpc0_tpc0_sm_shm_ecc_status_single_err_detected_shm1_pending_f()) ) { u32 ecc_stats_reg_val; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr, "Single bit error detected in SM SHM!"); ecc_stats_reg_val = @@ -230,7 +230,7 @@ int gr_gp10b_handle_sm_exception(struct gk20a *g, gr_pri_gpc0_tpc0_sm_shm_ecc_status_double_err_detected_shm1_pending_f()) ) { u32 ecc_stats_reg_val; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr, "Double bit error detected in SM SHM!"); ecc_stats_reg_val = @@ -260,14 +260,14 @@ int gr_gp10b_handle_tex_exception(struct gk20a *g, u32 gpc, u32 tpc, u32 esr; u32 ecc_stats_reg_val; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " "); esr = gk20a_readl(g, gr_gpc0_tpc0_tex_m_hww_esr_r() + offset); - gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "0x%08x", esr); + nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "0x%08x", esr); if (esr & gr_gpc0_tpc0_tex_m_hww_esr_ecc_sec_pending_f()) { - gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr, "Single bit error detected in TEX!"); /* Pipe 0 counters */ @@ -323,7 +323,7 @@ int gr_gp10b_handle_tex_exception(struct gk20a *g, u32 gpc, u32 tpc, gr_pri_gpc0_tpc0_tex_m_routing_sel_default_f()); } if (esr & gr_gpc0_tpc0_tex_m_hww_esr_ecc_ded_pending_f()) { - gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr, "Double bit error detected in TEX!"); /* Pipe 0 counters */ @@ -403,7 +403,7 @@ int gr_gp10b_commit_global_cb_manager(struct gk20a *g, u32 ppc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_PPC_IN_GPC_STRIDE); u32 num_pes_per_gpc = nvgpu_get_litter_value(g, GPU_LIT_NUM_PES_PER_GPC); - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); tsg = tsg_gk20a_from_ch(c); if (!tsg) @@ -660,21 +660,21 @@ static void gr_gp10b_set_coalesce_buffer_size(struct gk20a *g, u32 data) { u32 val; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); val = gk20a_readl(g, gr_gpcs_tc_debug0_r()); val = set_field(val, gr_gpcs_tc_debug0_limit_coalesce_buffer_size_m(), gr_gpcs_tc_debug0_limit_coalesce_buffer_size_f(data)); gk20a_writel(g, gr_gpcs_tc_debug0_r(), val); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); } void gr_gp10b_set_bes_crop_debug3(struct gk20a *g, u32 data) { u32 val; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); val = gk20a_readl(g, gr_bes_crop_debug3_r()); if ((data & 1)) { @@ -722,7 +722,7 @@ void gr_gp10b_set_bes_crop_debug4(struct gk20a *g, u32 data) int gr_gp10b_handle_sw_method(struct gk20a *g, u32 addr, u32 class_num, u32 offset, u32 data) { - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (class_num == PASCAL_COMPUTE_A) { switch (offset << 2) { @@ -800,7 +800,7 @@ void gr_gp10b_set_alpha_circular_buffer_size(struct gk20a *g, u32 data) u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); u32 ppc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_PPC_IN_GPC_STRIDE); - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (alpha_cb_size > gr->alpha_cb_size) alpha_cb_size = gr->alpha_cb_size; @@ -853,7 +853,7 @@ void gr_gp10b_set_circular_buffer_size(struct gk20a *g, u32 data) u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); u32 ppc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_PPC_IN_GPC_STRIDE); - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (cb_size_steady > gr->attrib_cb_size) cb_size_steady = gr->attrib_cb_size; @@ -923,7 +923,7 @@ int gr_gp10b_init_ctx_state(struct gk20a *g) }; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); err = gr_gk20a_init_ctx_state(g); if (err) @@ -940,10 +940,10 @@ int gr_gp10b_init_ctx_state(struct gk20a *g) } } - gk20a_dbg_info("preempt image size: %u", + nvgpu_log_info(g, "preempt image size: %u", g->gr.ctx_vars.preempt_image_size); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } @@ -952,8 +952,9 @@ int gr_gp10b_alloc_buffer(struct vm_gk20a *vm, size_t size, struct nvgpu_mem *mem) { int err; + struct gk20a *g = gk20a_from_vm(vm); - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); err = nvgpu_dma_alloc_sys(vm->mm->g, size, mem); if (err) @@ -1029,9 +1030,9 @@ int gr_gp10b_set_ctxsw_preemption_mode(struct gk20a *g, g->gr.max_tpc_count; attrib_cb_size = ALIGN(attrib_cb_size, 128); - gk20a_dbg_info("gfxp context spill_size=%d", spill_size); - gk20a_dbg_info("gfxp context pagepool_size=%d", pagepool_size); - gk20a_dbg_info("gfxp context attrib_cb_size=%d", + nvgpu_log_info(g, "gfxp context spill_size=%d", spill_size); + nvgpu_log_info(g, "gfxp context pagepool_size=%d", pagepool_size); + nvgpu_log_info(g, "gfxp context attrib_cb_size=%d", attrib_cb_size); err = gr_gp10b_alloc_buffer(vm, @@ -1112,7 +1113,7 @@ int gr_gp10b_alloc_gr_ctx(struct gk20a *g, u32 graphics_preempt_mode = 0; u32 compute_preempt_mode = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); err = gr_gk20a_alloc_gr_ctx(g, gr_ctx, vm, class, flags); if (err) @@ -1137,7 +1138,7 @@ int gr_gp10b_alloc_gr_ctx(struct gk20a *g, goto fail_free_gk20a_ctx; } - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; @@ -1215,7 +1216,7 @@ void gr_gp10b_update_ctxsw_preemption_mode(struct gk20a *g, ctxsw_prog_main_image_compute_preemption_options_control_cta_f(); int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); tsg = tsg_gk20a_from_ch(c); if (!tsg) @@ -1224,21 +1225,21 @@ void gr_gp10b_update_ctxsw_preemption_mode(struct gk20a *g, gr_ctx = &tsg->gr_ctx; if (gr_ctx->graphics_preempt_mode == NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP) { - gk20a_dbg_info("GfxP: %x", gfxp_preempt_option); + nvgpu_log_info(g, "GfxP: %x", gfxp_preempt_option); nvgpu_mem_wr(g, mem, ctxsw_prog_main_image_graphics_preemption_options_o(), gfxp_preempt_option); } if (gr_ctx->compute_preempt_mode == NVGPU_PREEMPTION_MODE_COMPUTE_CILP) { - gk20a_dbg_info("CILP: %x", cilp_preempt_option); + nvgpu_log_info(g, "CILP: %x", cilp_preempt_option); nvgpu_mem_wr(g, mem, ctxsw_prog_main_image_compute_preemption_options_o(), cilp_preempt_option); } if (gr_ctx->compute_preempt_mode == NVGPU_PREEMPTION_MODE_COMPUTE_CTA) { - gk20a_dbg_info("CTA: %x", cta_preempt_option); + nvgpu_log_info(g, "CTA: %x", cta_preempt_option); nvgpu_mem_wr(g, mem, ctxsw_prog_main_image_compute_preemption_options_o(), cta_preempt_option); @@ -1269,7 +1270,7 @@ void gr_gp10b_update_ctxsw_preemption_mode(struct gk20a *g, (u64_hi32(gr_ctx->betacb_ctxsw_buffer.gpu_va) << (32 - gr_gpcs_setup_attrib_cb_base_addr_39_12_align_bits_v())); - gk20a_dbg_info("attrib cb addr : 0x%016x", addr); + nvgpu_log_info(g, "attrib cb addr : 0x%016x", addr); g->ops.gr.commit_global_attrib_cb(g, gr_ctx, addr, true); addr = (u64_lo32(gr_ctx->pagepool_ctxsw_buffer.gpu_va) >> @@ -1315,7 +1316,7 @@ void gr_gp10b_update_ctxsw_preemption_mode(struct gk20a *g, } out: - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); } int gr_gp10b_dump_gr_status_regs(struct gk20a *g, @@ -1475,7 +1476,7 @@ int gr_gp10b_wait_empty(struct gk20a *g, unsigned long duration_ms, u32 activity0, activity1, activity2, activity4; struct nvgpu_timeout timeout; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); nvgpu_timeout_init(g, &timeout, duration_ms, NVGPU_TIMER_CPU_TIMER); @@ -1500,7 +1501,7 @@ int gr_gp10b_wait_empty(struct gk20a *g, unsigned long duration_ms, gr_activity_empty_or_preempted(activity4)); if (!gr_enabled || (!gr_busy && !ctxsw_active)) { - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } @@ -1569,7 +1570,7 @@ void gr_gp10b_commit_global_bundle_cb(struct gk20a *g, data = min_t(u32, data, g->gr.min_gpm_fifo_depth); - gk20a_dbg_info("bundle cb token limit : %d, state limit : %d", + nvgpu_log_info(g, "bundle cb token limit : %d, state limit : %d", g->gr.bundle_cb_token_limit, data); gr_gk20a_ctx_patch_write(g, gr_ctx, gr_pd_ab_dist_cfg2_r(), @@ -1626,7 +1627,7 @@ int gr_gp10b_init_fs_state(struct gk20a *g) { u32 data; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); data = gk20a_readl(g, gr_gpcs_tpcs_sm_texio_control_r()); data = set_field(data, gr_gpcs_tpcs_sm_texio_control_oor_addr_check_mode_m(), @@ -1705,7 +1706,7 @@ static int gr_gp10b_disable_channel_or_tsg(struct gk20a *g, struct channel_gk20a { int ret = 0; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, ""); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, " "); ret = gk20a_disable_channel_tsg(g, fault_ch); if (ret) { @@ -1721,18 +1722,18 @@ static int gr_gp10b_disable_channel_or_tsg(struct gk20a *g, struct channel_gk20a return ret; } - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, "CILP: restarted runlist"); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, "CILP: restarted runlist"); - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, "CILP: tsgid: 0x%x", fault_ch->tsgid); if (gk20a_is_channel_marked_as_tsg(fault_ch)) { gk20a_fifo_issue_preempt(g, fault_ch->tsgid, true); - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, "CILP: preempted tsg"); } else { gk20a_fifo_issue_preempt(g, fault_ch->chid, false); - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, "CILP: preempted channel"); } @@ -1746,7 +1747,7 @@ int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g, struct tsg_gk20a *tsg; struct nvgpu_gr_ctx *gr_ctx; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, ""); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, " "); tsg = tsg_gk20a_from_ch(fault_ch); if (!tsg) @@ -1755,7 +1756,7 @@ int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g, gr_ctx = &tsg->gr_ctx; if (gr_ctx->cilp_preempt_pending) { - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, "CILP is already pending for chid %d", fault_ch->chid); return 0; @@ -1763,7 +1764,7 @@ int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g, /* get ctx_id from the ucode image */ if (!gr_ctx->ctx_id_valid) { - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, "CILP: looking up ctx id"); ret = gr_gk20a_get_ctx_id(g, fault_ch, &gr_ctx->ctx_id); if (ret) { @@ -1773,7 +1774,7 @@ int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g, gr_ctx->ctx_id_valid = true; } - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, "CILP: ctx id is 0x%x", gr_ctx->ctx_id); /* send ucode method to set ctxsw interrupt */ @@ -1795,10 +1796,10 @@ int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g, return ret; } - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, "CILP: enabled ctxsw completion interrupt"); - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, "CILP: disabling channel %d", fault_ch->chid); @@ -1826,7 +1827,7 @@ static int gr_gp10b_clear_cilp_preempt_pending(struct gk20a *g, struct tsg_gk20a *tsg; struct nvgpu_gr_ctx *gr_ctx; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, ""); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, " "); tsg = tsg_gk20a_from_ch(fault_ch); if (!tsg) @@ -1837,7 +1838,7 @@ static int gr_gp10b_clear_cilp_preempt_pending(struct gk20a *g, /* The ucode is self-clearing, so all we need to do here is to clear cilp_preempt_pending. */ if (!gr_ctx->cilp_preempt_pending) { - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, "CILP is already cleared for chid %d\n", fault_ch->chid); return 0; @@ -1878,7 +1879,7 @@ int gr_gp10b_pre_process_sm_exception(struct gk20a *g, NVGPU_PREEMPTION_MODE_COMPUTE_CILP); } - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "SM Exception received on gpc %d tpc %d = %u\n", + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "SM Exception received on gpc %d tpc %d = %u\n", gpc, tpc, global_esr); if (cilp_enabled && sm_debugger_attached) { @@ -1900,19 +1901,19 @@ int gr_gp10b_pre_process_sm_exception(struct gk20a *g, if (warp_esr != 0 || (global_esr & global_mask) != 0) { *ignore_debugger = true; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: starting wait for LOCKED_DOWN on gpc %d tpc %d\n", gpc, tpc); if (gk20a_dbg_gpu_broadcast_stop_trigger(fault_ch)) { - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: Broadcasting STOP_TRIGGER from gpc %d tpc %d\n", gpc, tpc); g->ops.gr.suspend_all_sms(g, global_mask, false); gk20a_dbg_gpu_clear_broadcast_stop_trigger(fault_ch); } else { - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: STOP_TRIGGER from gpc %d tpc %d\n", gpc, tpc); g->ops.gr.suspend_single_sm(g, gpc, tpc, sm, global_mask, true); @@ -1923,11 +1924,11 @@ int gr_gp10b_pre_process_sm_exception(struct gk20a *g, gpc, tpc, sm); g->ops.gr.clear_sm_hww(g, gpc, tpc, sm, global_esr_copy); - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: HWWs cleared for gpc %d tpc %d\n", gpc, tpc); - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: Setting CILP preempt pending\n"); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: Setting CILP preempt pending\n"); ret = gr_gp10b_set_cilp_preempt_pending(g, fault_ch); if (ret) { nvgpu_err(g, "CILP: error while setting CILP preempt pending!"); @@ -1936,7 +1937,7 @@ int gr_gp10b_pre_process_sm_exception(struct gk20a *g, dbgr_control0 = gk20a_readl(g, gr_gpc0_tpc0_sm_dbgr_control0_r() + offset); if (dbgr_control0 & gr_gpcs_tpcs_sm_dbgr_control0_single_step_mode_enable_f()) { - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: clearing SINGLE_STEP_MODE before resume for gpc %d tpc %d\n", gpc, tpc); dbgr_control0 = set_field(dbgr_control0, @@ -1945,13 +1946,13 @@ int gr_gp10b_pre_process_sm_exception(struct gk20a *g, gk20a_writel(g, gr_gpc0_tpc0_sm_dbgr_control0_r() + offset, dbgr_control0); } - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: resume for gpc %d tpc %d\n", gpc, tpc); g->ops.gr.resume_single_sm(g, gpc, tpc, sm); *ignore_debugger = true; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: All done on gpc %d, tpc %d\n", gpc, tpc); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: All done on gpc %d, tpc %d\n", gpc, tpc); } *early_exit = true; @@ -1999,14 +2000,14 @@ int gr_gp10b_handle_fecs_error(struct gk20a *g, int ret = 0; struct tsg_gk20a *tsg; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, ""); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, " "); /* * INTR1 (bit 1 of the HOST_INT_STATUS_CTXSW_INTR) * indicates that a CILP ctxsw save has finished */ if (gr_fecs_intr & gr_fecs_host_int_status_ctxsw_intr_f(2)) { - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, "CILP: ctxsw save completed!\n"); /* now clear the interrupt */ @@ -2162,7 +2163,7 @@ int gr_gp10b_suspend_contexts(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx; struct nvgpu_timeout timeout; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, "CILP preempt pending, waiting %lu msecs for preemption", gk20a_get_gr_idle_timeout(g)); @@ -2285,7 +2286,7 @@ int gr_gp10b_set_preemption_mode(struct channel_gk20a *ch, if (g->ops.gr.set_ctxsw_preemption_mode) { - gk20a_dbg(gpu_dbg_sched, "chid=%d tsgid=%d pid=%d " + nvgpu_log(g, gpu_dbg_sched, "chid=%d tsgid=%d pid=%d " "graphics_preempt=%d compute_preempt=%d", ch->chid, ch->tsgid, diff --git a/drivers/gpu/nvgpu/gp10b/ltc_gp10b.c b/drivers/gpu/nvgpu/gp10b/ltc_gp10b.c index 71764a7c..f74ca8f3 100644 --- a/drivers/gpu/nvgpu/gp10b/ltc_gp10b.c +++ b/drivers/gpu/nvgpu/gp10b/ltc_gp10b.c @@ -41,7 +41,7 @@ int gp10b_determine_L2_size_bytes(struct gk20a *g) u32 tmp; int ret; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); tmp = gk20a_readl(g, ltc_ltc0_lts0_tstg_info_1_r()); @@ -49,9 +49,9 @@ int gp10b_determine_L2_size_bytes(struct gk20a *g) ltc_ltc0_lts0_tstg_info_1_slice_size_in_kb_v(tmp)*1024 * ltc_ltc0_lts0_tstg_info_1_slices_per_l2_v(tmp); - gk20a_dbg(gpu_dbg_info, "L2 size: %d\n", ret); + nvgpu_log(g, gpu_dbg_info, "L2 size: %d\n", ret); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return ret; } @@ -83,7 +83,7 @@ int gp10b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr) int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (max_comptag_lines == 0U) return 0; @@ -109,11 +109,11 @@ int gp10b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr) /* must be a multiple of 64KB */ compbit_backing_size = roundup(compbit_backing_size, 64*1024); - gk20a_dbg_info("compbit backing store size : %d", + nvgpu_log_info(g, "compbit backing store size : %d", compbit_backing_size); - gk20a_dbg_info("max comptag lines : %d", + nvgpu_log_info(g, "max comptag lines : %d", max_comptag_lines); - gk20a_dbg_info("gobs_per_comptagline_per_slice: %d", + nvgpu_log_info(g, "gobs_per_comptagline_per_slice: %d", gobs_per_comptagline_per_slice); err = nvgpu_ltc_alloc_cbc(g, compbit_backing_size); diff --git a/drivers/gpu/nvgpu/gp10b/mc_gp10b.c b/drivers/gpu/nvgpu/gp10b/mc_gp10b.c index dde12854..5969e45d 100644 --- a/drivers/gpu/nvgpu/gp10b/mc_gp10b.c +++ b/drivers/gpu/nvgpu/gp10b/mc_gp10b.c @@ -87,7 +87,7 @@ void mc_gp10b_isr_stall(struct gk20a *g) mc_intr_0 = gk20a_readl(g, mc_intr_r(0)); - gk20a_dbg(gpu_dbg_intr, "stall intr 0x%08x\n", mc_intr_0); + nvgpu_log(g, gpu_dbg_intr, "stall intr 0x%08x\n", mc_intr_0); for (engine_id_idx = 0; engine_id_idx < g->fifo.num_engines; engine_id_idx++) { active_engine_id = g->fifo.active_engines_list[engine_id_idx]; @@ -126,7 +126,7 @@ void mc_gp10b_isr_stall(struct gk20a *g) g->ops.mc.is_intr_nvlink_pending(g, mc_intr_0)) g->ops.nvlink.isr(g); - gk20a_dbg(gpu_dbg_intr, "stall intr done 0x%08x\n", mc_intr_0); + nvgpu_log(g, gpu_dbg_intr, "stall intr done 0x%08x\n", mc_intr_0); } diff --git a/drivers/gpu/nvgpu/gp10b/mm_gp10b.c b/drivers/gpu/nvgpu/gp10b/mm_gp10b.c index 978b6f50..811697c3 100644 --- a/drivers/gpu/nvgpu/gp10b/mm_gp10b.c +++ b/drivers/gpu/nvgpu/gp10b/mm_gp10b.c @@ -1,7 +1,7 @@ /* * GP10B MMU * - * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -53,7 +53,7 @@ int gp10b_init_mm_setup_hw(struct gk20a *g) struct nvgpu_mem *inst_block = &mm->bar1.inst_block; int err = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); g->ops.fb.set_mmu_page_size(g); @@ -73,7 +73,7 @@ int gp10b_init_mm_setup_hw(struct gk20a *g) err = gp10b_replayable_pagefault_buffer_init(g); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return err; } @@ -87,7 +87,7 @@ int gp10b_init_bar2_vm(struct gk20a *g) /* BAR2 aperture size is 32MB */ mm->bar2.aperture_size = 32 << 20; - gk20a_dbg_info("bar2 vm size = 0x%x", mm->bar2.aperture_size); + nvgpu_log_info(g, "bar2 vm size = 0x%x", mm->bar2.aperture_size); mm->bar2.vm = nvgpu_vm_init(g, big_page_size, SZ_4K, mm->bar2.aperture_size - SZ_4K, @@ -115,12 +115,12 @@ int gp10b_init_bar2_mm_hw_setup(struct gk20a *g) struct nvgpu_mem *inst_block = &mm->bar2.inst_block; u64 inst_pa = nvgpu_inst_block_addr(g, inst_block); - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); g->ops.fb.set_mmu_page_size(g); inst_pa = (u32)(inst_pa >> bus_bar2_block_ptr_shift_v()); - gk20a_dbg_info("bar2 inst block ptr: 0x%08x", (u32)inst_pa); + nvgpu_log_info(g, "bar2 inst block ptr: 0x%08x", (u32)inst_pa); gk20a_writel(g, bus_bar2_block_r(), nvgpu_aperture_mask(g, inst_block, @@ -130,7 +130,7 @@ int gp10b_init_bar2_mm_hw_setup(struct gk20a *g) bus_bar2_block_mode_virtual_f() | bus_bar2_block_ptr_f(inst_pa)); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } @@ -433,7 +433,7 @@ void gp10b_mm_init_pdb(struct gk20a *g, struct nvgpu_mem *inst_block, u32 pdb_addr_lo = u64_lo32(pdb_addr >> ram_in_base_shift_v()); u32 pdb_addr_hi = u64_hi32(pdb_addr); - gk20a_dbg_info("pde pa=0x%llx", pdb_addr); + nvgpu_log_info(g, "pde pa=0x%llx", pdb_addr); nvgpu_mem_wr32(g, inst_block, ram_in_page_dir_base_lo_w(), nvgpu_aperture_mask(g, vm->pdb.mem, diff --git a/drivers/gpu/nvgpu/gp10b/pmu_gp10b.c b/drivers/gpu/nvgpu/gp10b/pmu_gp10b.c index c94d580a..ca111725 100644 --- a/drivers/gpu/nvgpu/gp10b/pmu_gp10b.c +++ b/drivers/gpu/nvgpu/gp10b/pmu_gp10b.c @@ -37,8 +37,8 @@ #include #include -#define gp10b_dbg_pmu(fmt, arg...) \ - gk20a_dbg(gpu_dbg_pmu, fmt, ##arg) +#define gp10b_dbg_pmu(g, fmt, arg...) \ + nvgpu_log(g, gpu_dbg_pmu, fmt, ##arg) /* PROD settings for ELPG sequencing registers*/ static struct pg_init_sequence_list _pginitseq_gp10b[] = { @@ -147,9 +147,9 @@ static void gp10b_pmu_load_multiple_falcons(struct gk20a *g, u32 falconidmask, struct pmu_cmd cmd; u32 seq; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); - gp10b_dbg_pmu("wprinit status = %x\n", g->pmu_lsf_pmu_wpr_init_done); + gp10b_dbg_pmu(g, "wprinit status = %x\n", g->pmu_lsf_pmu_wpr_init_done); if (g->pmu_lsf_pmu_wpr_init_done) { /* send message to load FECS falcon */ memset(&cmd, 0, sizeof(struct pmu_cmd)); @@ -164,13 +164,13 @@ static void gp10b_pmu_load_multiple_falcons(struct gk20a *g, u32 falconidmask, cmd.cmd.acr.boot_falcons.usevamask = 0; cmd.cmd.acr.boot_falcons.wprvirtualbase.lo = 0x0; cmd.cmd.acr.boot_falcons.wprvirtualbase.hi = 0x0; - gp10b_dbg_pmu("PMU_ACR_CMD_ID_BOOTSTRAP_MULTIPLE_FALCONS:%x\n", + gp10b_dbg_pmu(g, "PMU_ACR_CMD_ID_BOOTSTRAP_MULTIPLE_FALCONS:%x\n", falconidmask); nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, pmu_handle_fecs_boot_acr_msg, pmu, &seq, ~0); } - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return; } @@ -209,7 +209,7 @@ int gp10b_load_falcon_ucode(struct gk20a *g, u32 falconidmask) static void pmu_handle_gr_param_msg(struct gk20a *g, struct pmu_msg *msg, void *param, u32 handle, u32 status) { - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (status != 0) { nvgpu_err(g, "GR PARAM cmd aborted"); @@ -217,7 +217,7 @@ static void pmu_handle_gr_param_msg(struct gk20a *g, struct pmu_msg *msg, return; } - gp10b_dbg_pmu("GR PARAM is acknowledged from PMU %x \n", + gp10b_dbg_pmu(g, "GR PARAM is acknowledged from PMU %x \n", msg->msg.pg.msg_type); return; @@ -243,7 +243,7 @@ int gp10b_pg_gr_init(struct gk20a *g, u32 pg_engine_id) cmd.cmd.pg.gr_init_param_v2.ldiv_slowdown_factor = g->ldiv_slowdown_factor; - gp10b_dbg_pmu("cmd post PMU_PG_CMD_ID_PG_PARAM "); + gp10b_dbg_pmu(g, "cmd post PMU_PG_CMD_ID_PG_PARAM "); nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, pmu_handle_gr_param_msg, pmu, &seq, ~0); @@ -276,7 +276,7 @@ int gp10b_pmu_setup_elpg(struct gk20a *g) u32 reg_writes; u32 index; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (g->elpg_enabled) { reg_writes = ((sizeof(_pginitseq_gp10b) / @@ -288,7 +288,7 @@ int gp10b_pmu_setup_elpg(struct gk20a *g) } } - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return ret; } @@ -305,7 +305,7 @@ int gp10b_init_pmu_setup_hw1(struct gk20a *g) struct nvgpu_pmu *pmu = &g->pmu; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); nvgpu_mutex_acquire(&pmu->isr_mutex); nvgpu_flcn_reset(pmu->flcn); @@ -333,7 +333,7 @@ int gp10b_init_pmu_setup_hw1(struct gk20a *g) if (err) return err; - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } diff --git a/drivers/gpu/nvgpu/gp10b/rpfb_gp10b.c b/drivers/gpu/nvgpu/gp10b/rpfb_gp10b.c index 385bebbd..3f089545 100644 --- a/drivers/gpu/nvgpu/gp10b/rpfb_gp10b.c +++ b/drivers/gpu/nvgpu/gp10b/rpfb_gp10b.c @@ -1,7 +1,7 @@ /* * GP10B RPFB * - * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -42,7 +42,7 @@ int gp10b_replayable_pagefault_buffer_init(struct gk20a *g) size_t rbfb_size = NV_UVM_FAULT_BUF_SIZE * fifo_replay_fault_buffer_size_hw_entries_v(); - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (!g->mm.bar2_desc.gpu_va) { err = nvgpu_dma_alloc_map_sys(vm, rbfb_size, @@ -60,7 +60,7 @@ int gp10b_replayable_pagefault_buffer_init(struct gk20a *g) gk20a_writel(g, fifo_replay_fault_buffer_lo_r(), fifo_replay_fault_buffer_lo_base_f(addr_lo) | fifo_replay_fault_buffer_lo_enable_true_v()); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } @@ -75,14 +75,14 @@ u32 gp10b_replayable_pagefault_buffer_get_index(struct gk20a *g) { u32 get_idx = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); get_idx = gk20a_readl(g, fifo_replay_fault_buffer_get_r()); if (get_idx >= fifo_replay_fault_buffer_size_hw_entries_v()) nvgpu_err(g, "Error in replayable fault buffer"); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return get_idx; } @@ -90,13 +90,13 @@ u32 gp10b_replayable_pagefault_buffer_put_index(struct gk20a *g) { u32 put_idx = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); put_idx = gk20a_readl(g, fifo_replay_fault_buffer_put_r()); if (put_idx >= fifo_replay_fault_buffer_size_hw_entries_v()) nvgpu_err(g, "Error in UVM"); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return put_idx; } diff --git a/drivers/gpu/nvgpu/gp10b/therm_gp10b.c b/drivers/gpu/nvgpu/gp10b/therm_gp10b.c index c69bd0bb..4f1de559 100644 --- a/drivers/gpu/nvgpu/gp10b/therm_gp10b.c +++ b/drivers/gpu/nvgpu/gp10b/therm_gp10b.c @@ -1,7 +1,7 @@ /* * GP10B Therm * - * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -33,7 +33,7 @@ int gp10b_init_therm_setup_hw(struct gk20a *g) { u32 v; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); /* program NV_THERM registers */ gk20a_writel(g, therm_use_a_r(), therm_use_a_ext_therm_0_enable_f() | @@ -96,7 +96,7 @@ int gp10b_elcg_init_idle_filters(struct gk20a *g) u32 active_engine_id = 0; struct fifo_gk20a *f = &g->fifo; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); for (engine_id = 0; engine_id < f->num_engines; engine_id++) { active_engine_id = f->active_engines_list[engine_id]; @@ -130,6 +130,6 @@ int gp10b_elcg_init_idle_filters(struct gk20a *g) idle_filter &= ~therm_hubmmu_idle_filter_value_m(); gk20a_writel(g, therm_hubmmu_idle_filter_r(), idle_filter); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } diff --git a/drivers/gpu/nvgpu/gv11b/acr_gv11b.c b/drivers/gpu/nvgpu/gv11b/acr_gv11b.c index 7ca8c703..673cb7f2 100644 --- a/drivers/gpu/nvgpu/gv11b/acr_gv11b.c +++ b/drivers/gpu/nvgpu/gv11b/acr_gv11b.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -43,8 +43,8 @@ #include /*Defines*/ -#define gv11b_dbg_pmu(fmt, arg...) \ - gk20a_dbg(gpu_dbg_pmu, fmt, ##arg) +#define gv11b_dbg_pmu(g, fmt, arg...) \ + nvgpu_log(g, gpu_dbg_pmu, fmt, ##arg) static void flcn64_set_dma(struct falc_u64 *dma_addr, u64 value) { @@ -60,7 +60,7 @@ int gv11b_alloc_blob_space(struct gk20a *g, { int err; - gv11b_dbg_pmu("alloc blob space: NVGPU_DMA_FORCE_CONTIGUOUS"); + gv11b_dbg_pmu(g, "alloc blob space: NVGPU_DMA_FORCE_CONTIGUOUS"); err = nvgpu_dma_alloc_flags_sys(g, NVGPU_DMA_FORCE_CONTIGUOUS, size, mem); @@ -87,10 +87,10 @@ int gv11b_bootstrap_hs_flcn(struct gk20a *g) start = nvgpu_mem_get_addr(g, &acr->ucode_blob); size = acr->ucode_blob.size; - gv11b_dbg_pmu("acr ucode blob start %llx\n", start); - gv11b_dbg_pmu("acr ucode blob size %x\n", size); + gv11b_dbg_pmu(g, "acr ucode blob start %llx\n", start); + gv11b_dbg_pmu(g, "acr ucode blob size %x\n", size); - gv11b_dbg_pmu(""); + gv11b_dbg_pmu(g, " "); if (!acr_fw) { /*First time init case*/ @@ -110,17 +110,17 @@ int gv11b_bootstrap_hs_flcn(struct gk20a *g) acr->fw_hdr->hdr_offset); img_size_in_bytes = ALIGN((acr->hsbin_hdr->data_size), 256); - gv11b_dbg_pmu("sig dbg offset %u\n", + gv11b_dbg_pmu(g, "sig dbg offset %u\n", acr->fw_hdr->sig_dbg_offset); - gv11b_dbg_pmu("sig dbg size %u\n", acr->fw_hdr->sig_dbg_size); - gv11b_dbg_pmu("sig prod offset %u\n", + gv11b_dbg_pmu(g, "sig dbg size %u\n", acr->fw_hdr->sig_dbg_size); + gv11b_dbg_pmu(g, "sig prod offset %u\n", acr->fw_hdr->sig_prod_offset); - gv11b_dbg_pmu("sig prod size %u\n", + gv11b_dbg_pmu(g, "sig prod size %u\n", acr->fw_hdr->sig_prod_size); - gv11b_dbg_pmu("patch loc %u\n", acr->fw_hdr->patch_loc); - gv11b_dbg_pmu("patch sig %u\n", acr->fw_hdr->patch_sig); - gv11b_dbg_pmu("header offset %u\n", acr->fw_hdr->hdr_offset); - gv11b_dbg_pmu("header size %u\n", acr->fw_hdr->hdr_size); + gv11b_dbg_pmu(g, "patch loc %u\n", acr->fw_hdr->patch_loc); + gv11b_dbg_pmu(g, "patch sig %u\n", acr->fw_hdr->patch_sig); + gv11b_dbg_pmu(g, "header offset %u\n", acr->fw_hdr->hdr_offset); + gv11b_dbg_pmu(g, "header size %u\n", acr->fw_hdr->hdr_size); /* Lets patch the signatures first.. */ if (acr_ucode_patch_sig(g, acr_ucode_data_t210_load, @@ -144,7 +144,7 @@ int gv11b_bootstrap_hs_flcn(struct gk20a *g) } for (index = 0; index < 9; index++) - gv11b_dbg_pmu("acr_ucode_header_t210_load %u\n", + gv11b_dbg_pmu(g, "acr_ucode_header_t210_load %u\n", acr_ucode_header_t210_load[index]); acr_dmem = (u64 *) @@ -212,7 +212,7 @@ static int bl_bootstrap(struct nvgpu_pmu *pmu, struct hsflcn_bl_desc *pmu_bl_gm10x_desc = g->acr.pmu_hsbl_desc; u32 dst; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); gk20a_writel(g, pwr_falcon_itfen_r(), gk20a_readl(g, pwr_falcon_itfen_r()) | @@ -237,7 +237,7 @@ static int bl_bootstrap(struct nvgpu_pmu *pmu, (u8 *)(acr->hsbl_ucode.cpu_va), bl_sz, 0, 0, pmu_bl_gm10x_desc->bl_start_tag); - gv11b_dbg_pmu("Before starting falcon with BL\n"); + gv11b_dbg_pmu(g, "Before starting falcon with BL\n"); virt_addr = pmu_bl_gm10x_desc->bl_start_tag << 8; @@ -281,7 +281,7 @@ int gv11b_init_pmu_setup_hw1(struct gk20a *g, struct nvgpu_pmu *pmu = &g->pmu; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); nvgpu_mutex_acquire(&pmu->isr_mutex); nvgpu_flcn_reset(pmu->flcn); diff --git a/drivers/gpu/nvgpu/gv11b/css_gr_gv11b.c b/drivers/gpu/nvgpu/gv11b/css_gr_gv11b.c index bb7c37bd..b4e2cb79 100644 --- a/drivers/gpu/nvgpu/gv11b/css_gr_gv11b.c +++ b/drivers/gpu/nvgpu/gv11b/css_gr_gv11b.c @@ -1,7 +1,7 @@ /* * GV11B Cycle stats snapshots support * - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -148,7 +148,7 @@ int gv11b_css_hw_enable_snapshot(struct channel_gk20a *ch, perf_pmasys_mem_block_target_lfb_f())); - gk20a_dbg_info("cyclestats: buffer for hardware snapshots enabled\n"); + nvgpu_log_info(g, "cyclestats: buffer for hardware snapshots enabled\n"); return 0; @@ -186,7 +186,7 @@ void gv11b_css_hw_disable_snapshot(struct gr_gk20a *gr) memset(&data->hw_memdesc, 0, sizeof(data->hw_memdesc)); data->hw_snapshot = NULL; - gk20a_dbg_info("cyclestats: buffer for hardware snapshots disabled\n"); + nvgpu_log_info(g, "cyclestats: buffer for hardware snapshots disabled\n"); } int gv11b_css_hw_check_data_available(struct channel_gk20a *ch, u32 *pending, diff --git a/drivers/gpu/nvgpu/gv11b/dbg_gpu_gv11b.c b/drivers/gpu/nvgpu/gv11b/dbg_gpu_gv11b.c index db09016c..5dea7654 100644 --- a/drivers/gpu/nvgpu/gv11b/dbg_gpu_gv11b.c +++ b/drivers/gpu/nvgpu/gv11b/dbg_gpu_gv11b.c @@ -57,7 +57,7 @@ int gv11b_perfbuf_enable_locked(struct gk20a *g, u64 offset, u32 size) u32 inst_pa_page; int err; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " "); err = gk20a_busy(g); if (err) { nvgpu_err(g, "failed to poweron"); @@ -100,7 +100,7 @@ int gv11b_perfbuf_disable_locked(struct gk20a *g) { int err; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " "); err = gk20a_busy(g); if (err) { nvgpu_err(g, "failed to poweron"); diff --git a/drivers/gpu/nvgpu/gv11b/fb_gv11b.c b/drivers/gpu/nvgpu/gv11b/fb_gv11b.c index 30a2bca2..8bbde5c3 100644 --- a/drivers/gpu/nvgpu/gv11b/fb_gv11b.c +++ b/drivers/gpu/nvgpu/gv11b/fb_gv11b.c @@ -1427,7 +1427,7 @@ static int gv11b_fb_mmu_invalidate_replay(struct gk20a *g, u32 reg_val; struct nvgpu_timeout timeout; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); nvgpu_mutex_acquire(&g->mm.tlb_lock); diff --git a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c index 11b393e5..932e7626 100644 --- a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c +++ b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c @@ -60,7 +60,7 @@ void gv11b_get_tsg_runlist_entry(struct tsg_gk20a *tsg, u32 *runlist) { - + struct gk20a *g = tsg->g; u32 runlist_entry_0 = ram_rl_entry_type_tsg_v(); if (tsg->timeslice_timeout) @@ -79,7 +79,7 @@ void gv11b_get_tsg_runlist_entry(struct tsg_gk20a *tsg, u32 *runlist) runlist[2] = ram_rl_entry_tsg_tsgid_f(tsg->tsgid); runlist[3] = 0; - gk20a_dbg_info("gv11b tsg runlist [0] %x [1] %x [2] %x [3] %x\n", + nvgpu_log_info(g, "gv11b tsg runlist [0] %x [1] %x [2] %x [3] %x\n", runlist[0], runlist[1], runlist[2], runlist[3]); } @@ -119,7 +119,7 @@ void gv11b_get_ch_runlist_entry(struct channel_gk20a *c, u32 *runlist) ram_rl_entry_chid_f(c->chid); runlist[3] = ram_rl_entry_chan_inst_ptr_hi_f(addr_hi); - gk20a_dbg_info("gv11b channel runlist [0] %x [1] %x [2] %x [3] %x\n", + nvgpu_log_info(g, "gv11b channel runlist [0] %x [1] %x [2] %x [3] %x\n", runlist[0], runlist[1], runlist[2], runlist[3]); } @@ -139,7 +139,7 @@ int channel_gv11b_setup_ramfc(struct channel_gk20a *c, struct nvgpu_mem *mem = &c->inst_block; u32 data; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); nvgpu_memset(g, mem, 0, 0, ram_fc_size_val_v()); @@ -211,10 +211,11 @@ int channel_gv11b_setup_ramfc(struct channel_gk20a *c, void gv11b_ring_channel_doorbell(struct channel_gk20a *c) { - struct fifo_gk20a *f = &c->g->fifo; + struct gk20a *g = c->g; + struct fifo_gk20a *f = &g->fifo; u32 hw_chid = f->channel_base + c->chid; - gk20a_dbg_info("channel ring door bell %d\n", c->chid); + nvgpu_log_info(g, "channel ring door bell %d\n", c->chid); nvgpu_usermode_writel(c->g, usermode_notify_channel_pending_r(), usermode_notify_channel_pending_id_f(hw_chid)); @@ -256,7 +257,7 @@ void channel_gv11b_unbind(struct channel_gk20a *ch) { struct gk20a *g = ch->g; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (nvgpu_atomic_cmpxchg(&ch->bound, true, false)) { gk20a_writel(g, ccsr_channel_inst_r(ch->chid), @@ -729,7 +730,7 @@ int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id, func_ret = gv11b_fifo_poll_pbdma_chan_status(g, tsgid, pbdma_id, timeout_rc_type); if (func_ret != 0) { - gk20a_dbg_info("preempt timeout pbdma %d", pbdma_id); + nvgpu_log_info(g, "preempt timeout pbdma %d", pbdma_id); ret |= func_ret; } } @@ -743,7 +744,7 @@ int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id, timeout_rc_type); if (func_ret != 0) { - gk20a_dbg_info("preempt timeout engine %d", act_eng_id); + nvgpu_log_info(g, "preempt timeout engine %d", act_eng_id); ret |= func_ret; } } @@ -812,10 +813,10 @@ int gv11b_fifo_preempt_tsg(struct gk20a *g, u32 tsgid) u32 mutex_ret = 0; u32 runlist_id; - gk20a_dbg_fn("%d", tsgid); + nvgpu_log_fn(g, "%d", tsgid); runlist_id = f->tsg[tsgid].runlist_id; - gk20a_dbg_fn("runlist_id %d", runlist_id); + nvgpu_log_fn(g, "runlist_id %d", runlist_id); nvgpu_mutex_acquire(&f->runlist_info[runlist_id].runlist_lock); @@ -839,7 +840,7 @@ static int gv11b_fifo_preempt_runlists(struct gk20a *g, u32 runlists_mask) u32 mutex_ret = 0; u32 runlist_id; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); for (runlist_id = 0; runlist_id < g->fifo.max_runlists; runlist_id++) { if (runlists_mask & fifo_runlist_preempt_runlist_m(runlist_id)) @@ -910,11 +911,11 @@ int gv11b_fifo_preempt_ch_tsg(struct gk20a *g, u32 id, return -EINVAL; if (runlist_id >= g->fifo.max_runlists) { - gk20a_dbg_info("runlist_id = %d", runlist_id); + nvgpu_log_info(g, "runlist_id = %d", runlist_id); return -EINVAL; } - gk20a_dbg_fn("preempt id = %d, runlist_id = %d", id, runlist_id); + nvgpu_log_fn(g, "preempt id = %d, runlist_id = %d", id, runlist_id); nvgpu_mutex_acquire(&f->runlist_info[runlist_id].runlist_lock); @@ -1155,7 +1156,7 @@ int gv11b_init_fifo_reset_enable_hw(struct gk20a *g) unsigned int i; u32 host_num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA); - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); /* enable pmc pfifo */ g->ops.mc.reset(g, mc_enable_pfifo_enabled_f()); @@ -1208,11 +1209,11 @@ int gv11b_init_fifo_reset_enable_hw(struct gk20a *g) gk20a_writel(g, pbdma_intr_1_r(i), 0xFFFFFFFF); intr_stall = gk20a_readl(g, pbdma_intr_stall_r(i)); - gk20a_dbg_info("pbdma id:%u, intr_en_0 0x%08x", i, intr_stall); + nvgpu_log_info(g, "pbdma id:%u, intr_en_0 0x%08x", i, intr_stall); gk20a_writel(g, pbdma_intr_en_0_r(i), intr_stall); intr_stall = gk20a_readl(g, pbdma_intr_stall_1_r(i)); - gk20a_dbg_info("pbdma id:%u, intr_en_1 0x%08x", i, intr_stall); + nvgpu_log_info(g, "pbdma id:%u, intr_en_1 0x%08x", i, intr_stall); gk20a_writel(g, pbdma_intr_en_1_r(i), intr_stall); } @@ -1246,12 +1247,12 @@ int gv11b_init_fifo_reset_enable_hw(struct gk20a *g) /* clear and enable pfifo interrupt */ gk20a_writel(g, fifo_intr_0_r(), 0xFFFFFFFF); mask = gv11b_fifo_intr_0_en_mask(g); - gk20a_dbg_info("fifo_intr_en_0 0x%08x", mask); + nvgpu_log_info(g, "fifo_intr_en_0 0x%08x", mask); gk20a_writel(g, fifo_intr_en_0_r(), mask); - gk20a_dbg_info("fifo_intr_en_1 = 0x80000000"); + nvgpu_log_info(g, "fifo_intr_en_1 = 0x80000000"); gk20a_writel(g, fifo_intr_en_1_r(), 0x80000000); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } @@ -1350,7 +1351,7 @@ static u32 gv11b_fifo_ctxsw_timeout_info(struct gk20a *g, u32 active_eng_id, tsgid = fifo_intr_ctxsw_timeout_info_prev_tsgid_v(timeout_info); } - gk20a_dbg_info("ctxsw timeout info: tsgid = %d", tsgid); + nvgpu_log_info(g, "ctxsw timeout info: tsgid = %d", tsgid); /* * STATUS indicates whether the context request ack was eventually @@ -1391,14 +1392,14 @@ static u32 gv11b_fifo_ctxsw_timeout_info(struct gk20a *g, u32 active_eng_id, if (*info_status == fifo_intr_ctxsw_timeout_info_status_ack_received_v()) { - gk20a_dbg_info("ctxsw timeout info : ack received"); + nvgpu_log_info(g, "ctxsw timeout info : ack received"); /* no need to recover */ tsgid = FIFO_INVAL_TSG_ID; } else if (*info_status == fifo_intr_ctxsw_timeout_info_status_dropped_timeout_v()) { - gk20a_dbg_info("ctxsw timeout info : dropped timeout"); + nvgpu_log_info(g, "ctxsw timeout info : dropped timeout"); /* no need to recover */ tsgid = FIFO_INVAL_TSG_ID; @@ -1429,7 +1430,7 @@ bool gv11b_fifo_handle_ctxsw_timeout(struct gk20a *g, u32 fifo_intr) timeout_val = gk20a_readl(g, fifo_eng_ctxsw_timeout_r()); timeout_val = fifo_eng_ctxsw_timeout_period_v(timeout_val); - gk20a_dbg_info("eng ctxsw timeout period = 0x%x", timeout_val); + nvgpu_log_info(g, "eng ctxsw timeout period = 0x%x", timeout_val); for (engine_id = 0; engine_id < g->fifo.num_engines; engine_id++) { active_eng_id = g->fifo.active_engines_list[engine_id]; @@ -1469,7 +1470,7 @@ bool gv11b_fifo_handle_ctxsw_timeout(struct gk20a *g, u32 fifo_intr) true, true, verbose, RC_TYPE_CTXSW_TIMEOUT); } else { - gk20a_dbg_info( + nvgpu_log_info(g, "fifo is waiting for ctx switch: " "for %d ms, %s=%d", ms, "tsg", tsgid); } @@ -1490,7 +1491,7 @@ unsigned int gv11b_fifo_handle_pbdma_intr_0(struct gk20a *g, pbdma_intr_0, handled, error_notifier); if (pbdma_intr_0 & pbdma_intr_0_clear_faulted_error_pending_f()) { - gk20a_dbg(gpu_dbg_intr, "clear faulted error on pbdma id %d", + nvgpu_log(g, gpu_dbg_intr, "clear faulted error on pbdma id %d", pbdma_id); gk20a_fifo_reset_pbdma_method(g, pbdma_id, 0); *handled |= pbdma_intr_0_clear_faulted_error_pending_f(); @@ -1498,7 +1499,7 @@ unsigned int gv11b_fifo_handle_pbdma_intr_0(struct gk20a *g, } if (pbdma_intr_0 & pbdma_intr_0_eng_reset_pending_f()) { - gk20a_dbg(gpu_dbg_intr, "eng reset intr on pbdma id %d", + nvgpu_log(g, gpu_dbg_intr, "eng reset intr on pbdma id %d", pbdma_id); *handled |= pbdma_intr_0_eng_reset_pending_f(); rc_type = RC_TYPE_PBDMA_FAULT; @@ -1545,7 +1546,7 @@ unsigned int gv11b_fifo_handle_pbdma_intr_1(struct gk20a *g, return RC_TYPE_NO_RC; if (pbdma_intr_1 & pbdma_intr_1_ctxnotvalid_pending_f()) { - gk20a_dbg(gpu_dbg_intr, "ctxnotvalid intr on pbdma id %d", + nvgpu_log(g, gpu_dbg_intr, "ctxnotvalid intr on pbdma id %d", pbdma_id); nvgpu_err(g, "pbdma_intr_1(%d)= 0x%08x ", pbdma_id, pbdma_intr_1); @@ -1753,7 +1754,7 @@ void gv11b_fifo_add_syncpt_wait_cmd(struct gk20a *g, u64 gpu_va = gpu_va_base + nvgpu_nvhost_syncpt_unit_interface_get_byte_offset(id); - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); off = cmd->off + off; @@ -1792,7 +1793,7 @@ void gv11b_fifo_add_syncpt_incr_cmd(struct gk20a *g, { u32 off = cmd->off; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); /* semaphore_a */ nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010004); diff --git a/drivers/gpu/nvgpu/gv11b/gr_gv11b.c b/drivers/gpu/nvgpu/gv11b/gr_gv11b.c index 52e442f3..536d9dcb 100644 --- a/drivers/gpu/nvgpu/gv11b/gr_gv11b.c +++ b/drivers/gpu/nvgpu/gv11b/gr_gv11b.c @@ -96,7 +96,7 @@ bool gr_gv11b_is_valid_class(struct gk20a *g, u32 class_num) default: break; } - gk20a_dbg_info("class=0x%x valid=%d", class_num, valid); + nvgpu_log_info(g, "class=0x%x valid=%d", class_num, valid); return valid; } @@ -190,7 +190,7 @@ static int gr_gv11b_handle_l1_tag_exception(struct gk20a *g, u32 gpc, u32 tpc, gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_uncorrected_err_total_counter_overflow_v(l1_tag_ecc_status); if ((l1_tag_corrected_err_count_delta > 0) || is_l1_tag_ecc_corrected_total_err_overflow) { - gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr, "corrected error (SBE) detected in SM L1 tag! err_mask [%08x] is_overf [%d]", l1_tag_ecc_corrected_err_status, is_l1_tag_ecc_corrected_total_err_overflow); @@ -205,7 +205,7 @@ static int gr_gv11b_handle_l1_tag_exception(struct gk20a *g, u32 gpc, u32 tpc, 0); } if ((l1_tag_uncorrected_err_count_delta > 0) || is_l1_tag_ecc_uncorrected_total_err_overflow) { - gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr, "Uncorrected error (DBE) detected in SM L1 tag! err_mask [%08x] is_overf [%d]", l1_tag_ecc_uncorrected_err_status, is_l1_tag_ecc_uncorrected_total_err_overflow); @@ -282,7 +282,7 @@ static int gr_gv11b_handle_lrf_exception(struct gk20a *g, u32 gpc, u32 tpc, gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_total_counter_overflow_v(lrf_ecc_status); if ((lrf_corrected_err_count_delta > 0) || is_lrf_ecc_corrected_total_err_overflow) { - gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr, "corrected error (SBE) detected in SM LRF! err_mask [%08x] is_overf [%d]", lrf_ecc_corrected_err_status, is_lrf_ecc_corrected_total_err_overflow); @@ -297,7 +297,7 @@ static int gr_gv11b_handle_lrf_exception(struct gk20a *g, u32 gpc, u32 tpc, 0); } if ((lrf_uncorrected_err_count_delta > 0) || is_lrf_ecc_uncorrected_total_err_overflow) { - gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr, "Uncorrected error (DBE) detected in SM LRF! err_mask [%08x] is_overf [%d]", lrf_ecc_uncorrected_err_status, is_lrf_ecc_uncorrected_total_err_overflow); @@ -441,7 +441,7 @@ static int gr_gv11b_handle_cbu_exception(struct gk20a *g, u32 gpc, u32 tpc, gr_pri_gpc0_tpc0_sm_cbu_ecc_status_uncorrected_err_total_counter_overflow_v(cbu_ecc_status); if ((cbu_corrected_err_count_delta > 0) || is_cbu_ecc_corrected_total_err_overflow) { - gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr, "corrected error (SBE) detected in SM CBU! err_mask [%08x] is_overf [%d]", cbu_ecc_corrected_err_status, is_cbu_ecc_corrected_total_err_overflow); @@ -456,7 +456,7 @@ static int gr_gv11b_handle_cbu_exception(struct gk20a *g, u32 gpc, u32 tpc, 0); } if ((cbu_uncorrected_err_count_delta > 0) || is_cbu_ecc_uncorrected_total_err_overflow) { - gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr, "Uncorrected error (DBE) detected in SM CBU! err_mask [%08x] is_overf [%d]", cbu_ecc_uncorrected_err_status, is_cbu_ecc_uncorrected_total_err_overflow); @@ -521,7 +521,7 @@ static int gr_gv11b_handle_l1_data_exception(struct gk20a *g, u32 gpc, u32 tpc, gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_uncorrected_err_total_counter_overflow_v(l1_data_ecc_status); if ((l1_data_corrected_err_count_delta > 0) || is_l1_data_ecc_corrected_total_err_overflow) { - gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr, "corrected error (SBE) detected in SM L1 data! err_mask [%08x] is_overf [%d]", l1_data_ecc_corrected_err_status, is_l1_data_ecc_corrected_total_err_overflow); @@ -536,7 +536,7 @@ static int gr_gv11b_handle_l1_data_exception(struct gk20a *g, u32 gpc, u32 tpc, 0); } if ((l1_data_uncorrected_err_count_delta > 0) || is_l1_data_ecc_uncorrected_total_err_overflow) { - gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr, "Uncorrected error (DBE) detected in SM L1 data! err_mask [%08x] is_overf [%d]", l1_data_ecc_uncorrected_err_status, is_l1_data_ecc_uncorrected_total_err_overflow); @@ -605,7 +605,7 @@ static int gr_gv11b_handle_icache_exception(struct gk20a *g, u32 gpc, u32 tpc, gr_pri_gpc0_tpc0_sm_icache_ecc_status_uncorrected_err_total_counter_overflow_v(icache_ecc_status); if ((icache_corrected_err_count_delta > 0) || is_icache_ecc_corrected_total_err_overflow) { - gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr, "corrected error (SBE) detected in SM L0 && L1 icache! err_mask [%08x] is_overf [%d]", icache_ecc_corrected_err_status, is_icache_ecc_corrected_total_err_overflow); @@ -620,7 +620,7 @@ static int gr_gv11b_handle_icache_exception(struct gk20a *g, u32 gpc, u32 tpc, 0); } if ((icache_uncorrected_err_count_delta > 0) || is_icache_ecc_uncorrected_total_err_overflow) { - gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr, "Uncorrected error (DBE) detected in SM L0 && L1 icache! err_mask [%08x] is_overf [%d]", icache_ecc_uncorrected_err_status, is_icache_ecc_uncorrected_total_err_overflow); @@ -1129,14 +1129,14 @@ static void gr_gv11b_set_coalesce_buffer_size(struct gk20a *g, u32 data) { u32 val; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); val = gk20a_readl(g, gr_gpcs_tc_debug0_r()); val = set_field(val, gr_gpcs_tc_debug0_limit_coalesce_buffer_size_m(), gr_gpcs_tc_debug0_limit_coalesce_buffer_size_f(data)); gk20a_writel(g, gr_gpcs_tc_debug0_r(), val); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); } static void gr_gv11b_set_tex_in_dbg(struct gk20a *g, u32 data) @@ -1144,7 +1144,7 @@ static void gr_gv11b_set_tex_in_dbg(struct gk20a *g, u32 data) u32 val; bool flag; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); val = gk20a_readl(g, gr_gpcs_tpcs_tex_in_dbg_r()); flag = (data & NVC397_SET_TEX_IN_DBG_TSL1_RVCH_INVALIDATE) ? 1 : 0; @@ -1190,7 +1190,7 @@ static void gr_gv11b_set_skedcheck(struct gk20a *g, u32 data) static void gv11b_gr_set_shader_exceptions(struct gk20a *g, u32 data) { - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (data == NVA297_SET_SHADER_EXCEPTIONS_ENABLE_FALSE) { gk20a_writel(g, gr_gpcs_tpcs_sms_hww_warp_esr_report_mask_r(), @@ -1224,7 +1224,7 @@ static void gr_gv11b_set_shader_cut_collector(struct gk20a *g, u32 data) int gr_gv11b_handle_sw_method(struct gk20a *g, u32 addr, u32 class_num, u32 offset, u32 data) { - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (class_num == VOLTA_COMPUTE_A) { switch (offset << 2) { @@ -1315,7 +1315,7 @@ void gr_gv11b_set_alpha_circular_buffer_size(struct gk20a *g, u32 data) u32 pd_ab_max_output; u32 alpha_cb_size = data * 4; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (alpha_cb_size > gr->alpha_cb_size) alpha_cb_size = gr->alpha_cb_size; @@ -1360,7 +1360,7 @@ void gr_gv11b_set_circular_buffer_size(struct gk20a *g, u32 data) u32 gpc_index, ppc_index, stride, val; u32 cb_size_steady = data * 4, cb_size; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (cb_size_steady > gr->attrib_cb_size) cb_size_steady = gr->attrib_cb_size; @@ -1423,8 +1423,9 @@ int gr_gv11b_alloc_buffer(struct vm_gk20a *vm, size_t size, struct nvgpu_mem *mem) { int err; + struct gk20a *g = gk20a_from_vm(vm); - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); err = nvgpu_dma_alloc_sys(vm->mm->g, size, mem); if (err) @@ -1500,9 +1501,9 @@ int gr_gv11b_set_ctxsw_preemption_mode(struct gk20a *g, g->gr.max_tpc_count; attrib_cb_size = ALIGN(attrib_cb_size, 128); - gk20a_dbg_info("gfxp context spill_size=%d", spill_size); - gk20a_dbg_info("gfxp context pagepool_size=%d", pagepool_size); - gk20a_dbg_info("gfxp context attrib_cb_size=%d", + nvgpu_log_info(g, "gfxp context spill_size=%d", spill_size); + nvgpu_log_info(g, "gfxp context pagepool_size=%d", pagepool_size); + nvgpu_log_info(g, "gfxp context attrib_cb_size=%d", attrib_cb_size); err = gr_gp10b_alloc_buffer(vm, @@ -1590,7 +1591,7 @@ void gr_gv11b_update_ctxsw_preemption_mode(struct gk20a *g, ctxsw_prog_main_image_compute_preemption_options_control_cta_f(); int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); tsg = tsg_gk20a_from_ch(c); if (!tsg) @@ -1600,7 +1601,7 @@ void gr_gv11b_update_ctxsw_preemption_mode(struct gk20a *g, if (gr_ctx->graphics_preempt_mode == NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP) { - gk20a_dbg_info("GfxP: %x", gfxp_preempt_option); + nvgpu_log_info(g, "GfxP: %x", gfxp_preempt_option); nvgpu_mem_wr(g, mem, ctxsw_prog_main_image_graphics_preemption_options_o(), gfxp_preempt_option); @@ -1608,7 +1609,7 @@ void gr_gv11b_update_ctxsw_preemption_mode(struct gk20a *g, if (gr_ctx->compute_preempt_mode == NVGPU_PREEMPTION_MODE_COMPUTE_CILP) { - gk20a_dbg_info("CILP: %x", cilp_preempt_option); + nvgpu_log_info(g, "CILP: %x", cilp_preempt_option); nvgpu_mem_wr(g, mem, ctxsw_prog_main_image_compute_preemption_options_o(), cilp_preempt_option); @@ -1616,7 +1617,7 @@ void gr_gv11b_update_ctxsw_preemption_mode(struct gk20a *g, if (gr_ctx->compute_preempt_mode == NVGPU_PREEMPTION_MODE_COMPUTE_CTA) { - gk20a_dbg_info("CTA: %x", cta_preempt_option); + nvgpu_log_info(g, "CTA: %x", cta_preempt_option); nvgpu_mem_wr(g, mem, ctxsw_prog_main_image_compute_preemption_options_o(), cta_preempt_option); @@ -1647,7 +1648,7 @@ void gr_gv11b_update_ctxsw_preemption_mode(struct gk20a *g, (u64_hi32(gr_ctx->betacb_ctxsw_buffer.gpu_va) << (32 - gr_gpcs_setup_attrib_cb_base_addr_39_12_align_bits_v())); - gk20a_dbg_info("attrib cb addr : 0x%016x", addr); + nvgpu_log_info(g, "attrib cb addr : 0x%016x", addr); g->ops.gr.commit_global_attrib_cb(g, gr_ctx, addr, true); addr = (u64_lo32(gr_ctx->pagepool_ctxsw_buffer.gpu_va) >> @@ -1698,7 +1699,7 @@ void gr_gv11b_update_ctxsw_preemption_mode(struct gk20a *g, } out: - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); } static void gr_gv11b_dump_gr_per_sm_regs(struct gk20a *g, struct gk20a_debug_output *o, @@ -1949,7 +1950,7 @@ int gr_gv11b_wait_empty(struct gk20a *g, unsigned long duration_ms, u32 activity0, activity1, activity2, activity4; struct nvgpu_timeout timeout; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); nvgpu_timeout_init(g, &timeout, duration_ms, NVGPU_TIMER_CPU_TIMER); @@ -1974,7 +1975,7 @@ int gr_gv11b_wait_empty(struct gk20a *g, unsigned long duration_ms, gr_activity_empty_or_preempted(activity4)); if (!gr_enabled || (!gr_busy && !ctxsw_active)) { - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } @@ -2191,7 +2192,7 @@ int gr_gv11b_pre_process_sm_exception(struct gk20a *g, NVGPU_PREEMPTION_MODE_COMPUTE_CILP); } - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "SM Exception received on gpc %d tpc %d sm %d = 0x%08x", gpc, tpc, sm, global_esr); @@ -2210,13 +2211,13 @@ int gr_gv11b_pre_process_sm_exception(struct gk20a *g, if (warp_esr != 0 || (global_esr & global_mask) != 0) { *ignore_debugger = true; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: starting wait for LOCKED_DOWN on " "gpc %d tpc %d sm %d", gpc, tpc, sm); if (gk20a_dbg_gpu_broadcast_stop_trigger(fault_ch)) { - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: Broadcasting STOP_TRIGGER from " "gpc %d tpc %d sm %d", gpc, tpc, sm); @@ -2225,7 +2226,7 @@ int gr_gv11b_pre_process_sm_exception(struct gk20a *g, gk20a_dbg_gpu_clear_broadcast_stop_trigger(fault_ch); } else { - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: STOP_TRIGGER from " "gpc %d tpc %d sm %d", gpc, tpc, sm); @@ -2238,12 +2239,12 @@ int gr_gv11b_pre_process_sm_exception(struct gk20a *g, gpc, tpc, sm); g->ops.gr.clear_sm_hww(g, gpc, tpc, sm, global_esr_copy); - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: HWWs cleared for " "gpc %d tpc %d sm %d", gpc, tpc, sm); - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: Setting CILP preempt pending\n"); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: Setting CILP preempt pending\n"); ret = gr_gp10b_set_cilp_preempt_pending(g, fault_ch); if (ret) { nvgpu_err(g, "CILP: error while setting CILP preempt pending!"); @@ -2252,7 +2253,7 @@ int gr_gv11b_pre_process_sm_exception(struct gk20a *g, dbgr_control0 = gk20a_readl(g, gr_gpc0_tpc0_sm0_dbgr_control0_r() + offset); if (dbgr_control0 & gr_gpc0_tpc0_sm0_dbgr_control0_single_step_mode_enable_f()) { - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: clearing SINGLE_STEP_MODE " "before resume for gpc %d tpc %d sm %d", gpc, tpc, sm); @@ -2262,13 +2263,13 @@ int gr_gv11b_pre_process_sm_exception(struct gk20a *g, gk20a_writel(g, gr_gpc0_tpc0_sm0_dbgr_control0_r() + offset, dbgr_control0); } - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: resume for gpc %d tpc %d sm %d", gpc, tpc, sm); g->ops.gr.resume_single_sm(g, gpc, tpc, sm); *ignore_debugger = true; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: All done on gpc %d, tpc %d sm %d", gpc, tpc, sm); } @@ -2388,7 +2389,7 @@ int gr_gv11b_setup_rop_mapping(struct gk20a *g, struct gr_gk20a *gr) GPU_LIT_NUM_TPC_PER_GPC); u32 num_tpcs = num_gpcs * num_tpc_per_gpc; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (!gr->map_tiles) return -1; @@ -2535,7 +2536,7 @@ void gr_gv11b_program_zcull_mapping(struct gk20a *g, u32 zcull_num_entries, { u32 val, i, j; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); for (i = 0, j = 0; i < (zcull_num_entries / 8); i++, j += 8) { val = @@ -2666,8 +2667,9 @@ int gr_gv11b_commit_inst(struct channel_gk20a *c, u64 gpu_va) u32 addr_hi; struct ctx_header_desc *ctx; int err; + struct gk20a *g = c->g; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); err = gv11b_alloc_subctx_header(c); if (err) @@ -2704,7 +2706,7 @@ int gr_gv11b_commit_global_timeslice(struct gk20a *g, struct channel_gk20a *c) u32 pe_vaf; u32 pe_vsc_vpc; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); pd_ab_dist_cfg0 = gk20a_readl(g, gr_pd_ab_dist_cfg0_r()); ds_debug = gk20a_readl(g, gr_ds_debug_r()); @@ -2814,7 +2816,7 @@ void gr_gv11b_load_tpc_mask(struct gk20a *g) } } - gk20a_dbg_info("pes_tpc_mask %u\n", pes_tpc_mask); + nvgpu_log_info(g, "pes_tpc_mask %u\n", pes_tpc_mask); fuse_tpc_mask = g->ops.gr.get_gpc_tpc_mask(g, gpc); if (g->tpc_fs_mask_user && g->tpc_fs_mask_user != fuse_tpc_mask && @@ -2860,7 +2862,7 @@ int gr_gv11b_init_fs_state(struct gk20a *g) u32 ver = g->params.gpu_arch + g->params.gpu_impl; u32 rev = g->params.gpu_rev; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); data = gk20a_readl(g, gr_gpcs_tpcs_sm_texio_control_r()); data = set_field(data, gr_gpcs_tpcs_sm_texio_control_oor_addr_check_mode_m(), @@ -2928,14 +2930,14 @@ void gv11b_gr_get_esr_sm_sel(struct gk20a *g, u32 gpc, u32 tpc, u32 offset = gk20a_gr_gpc_offset(g, gpc) + gk20a_gr_tpc_offset(g, tpc); reg_val = gk20a_readl(g, gr_gpc0_tpc0_sm_tpc_esr_sm_sel_r() + offset); - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "sm tpc esr sm sel reg val: 0x%x", reg_val); *esr_sm_sel = 0; if (gr_gpc0_tpc0_sm_tpc_esr_sm_sel_sm0_error_v(reg_val)) *esr_sm_sel = 1; if (gr_gpc0_tpc0_sm_tpc_esr_sm_sel_sm1_error_v(reg_val)) *esr_sm_sel |= 1 << 1; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "esr_sm_sel bitmask: 0x%x", *esr_sm_sel); } @@ -2954,7 +2956,7 @@ int gv11b_gr_sm_trigger_suspend(struct gk20a *g) gk20a_writel(g, gr_gpcs_tpcs_sms_dbgr_control0_r(), dbgr_control0); - gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "stop trigger enable: broadcast dbgr_control0: 0x%x ", dbgr_control0); @@ -3012,19 +3014,19 @@ void gv11b_gr_bpt_reg_info(struct gk20a *g, struct nvgpu_warpstate *w_state) /* Only for debug purpose */ for (sm_id = 0; sm_id < gr->no_of_sm; sm_id++) { - gk20a_dbg_fn("w_state[%d].valid_warps[0]: %llx\n", + nvgpu_log_fn(g, "w_state[%d].valid_warps[0]: %llx\n", sm_id, w_state[sm_id].valid_warps[0]); - gk20a_dbg_fn("w_state[%d].valid_warps[1]: %llx\n", + nvgpu_log_fn(g, "w_state[%d].valid_warps[1]: %llx\n", sm_id, w_state[sm_id].valid_warps[1]); - gk20a_dbg_fn("w_state[%d].trapped_warps[0]: %llx\n", + nvgpu_log_fn(g, "w_state[%d].trapped_warps[0]: %llx\n", sm_id, w_state[sm_id].trapped_warps[0]); - gk20a_dbg_fn("w_state[%d].trapped_warps[1]: %llx\n", + nvgpu_log_fn(g, "w_state[%d].trapped_warps[1]: %llx\n", sm_id, w_state[sm_id].trapped_warps[1]); - gk20a_dbg_fn("w_state[%d].paused_warps[0]: %llx\n", + nvgpu_log_fn(g, "w_state[%d].paused_warps[0]: %llx\n", sm_id, w_state[sm_id].paused_warps[0]); - gk20a_dbg_fn("w_state[%d].paused_warps[1]: %llx\n", + nvgpu_log_fn(g, "w_state[%d].paused_warps[1]: %llx\n", sm_id, w_state[sm_id].paused_warps[1]); } } @@ -3257,7 +3259,7 @@ bool gv11b_gr_sm_debugger_attached(struct gk20a *g) */ debugger_mode = gr_gpc0_tpc0_sm0_dbgr_control0_debugger_mode_v(dbgr_control0); - gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "SM Debugger Mode: %d", debugger_mode); if (debugger_mode == gr_gpc0_tpc0_sm0_dbgr_control0_debugger_mode_on_v()) @@ -3576,7 +3578,7 @@ static void gv11b_gr_sm_dump_warp_bpt_pause_trap_mask_regs(struct gk20a *g, dbgr_status0, dbgr_control0, warps_valid, warps_paused, warps_trapped); else - gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "STATUS0=0x%x CONTROL0=0x%x VALID_MASK=0x%llx " "PAUSE_MASK=0x%llx TRAP_MASK=0x%llx\n", dbgr_status0, dbgr_control0, warps_valid, @@ -3598,7 +3600,7 @@ int gv11b_gr_wait_for_sm_lock_down(struct gk20a *g, gk20a_gr_tpc_offset(g, tpc) + gv11b_gr_sm_offset(g, sm); - gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "GPC%d TPC%d: locking down SM%d", gpc, tpc, sm); nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g), @@ -3642,7 +3644,7 @@ int gv11b_gr_wait_for_sm_lock_down(struct gk20a *g, } if (locked_down || no_error_pending) { - gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "GPC%d TPC%d: locked down SM%d", gpc, tpc, sm); return 0; } @@ -3677,7 +3679,7 @@ int gv11b_gr_lock_down_sm(struct gk20a *g, u32 offset = gk20a_gr_gpc_offset(g, gpc) + gk20a_gr_tpc_offset(g, tpc) + gv11b_gr_sm_offset(g, sm); - gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "GPC%d TPC%d SM%d: assert stop trigger", gpc, tpc, sm); /* assert stop trigger */ @@ -3699,13 +3701,13 @@ void gv11b_gr_clear_sm_hww(struct gk20a *g, u32 gpc, u32 tpc, u32 sm, gk20a_writel(g, gr_gpc0_tpc0_sm0_hww_global_esr_r() + offset, global_esr); - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "Cleared HWW global esr, current reg val: 0x%x", gk20a_readl(g, gr_gpc0_tpc0_sm0_hww_global_esr_r() + offset)); gk20a_writel(g, gr_gpc0_tpc0_sm0_hww_warp_esr_r() + offset, 0); - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "Cleared HWW warp esr, current reg val: 0x%x", gk20a_readl(g, gr_gpc0_tpc0_sm0_hww_warp_esr_r() + offset)); @@ -4440,7 +4442,7 @@ int gr_gv11b_decode_priv_addr(struct gk20a *g, u32 addr, { u32 gpc_addr; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); /* setup defaults */ *addr_type = CTXSW_ADDR_TYPE_SYS; @@ -4591,12 +4593,12 @@ int gr_gv11b_create_priv_addr_table(struct gk20a *g, t = 0; *num_registers = 0; - gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); err = g->ops.gr.decode_priv_addr(g, addr, &addr_type, &gpc_num, &tpc_num, &ppc_num, &be_num, &broadcast_flags); - gk20a_dbg(gpu_dbg_gpu_dbg, "addr_type = %d", addr_type); + nvgpu_log(g, gpu_dbg_gpu_dbg, "addr_type = %d", addr_type); if (err) return err; @@ -4690,7 +4692,7 @@ int gr_gv11b_create_priv_addr_table(struct gk20a *g, } else if (((addr_type == CTXSW_ADDR_TYPE_EGPC) || (addr_type == CTXSW_ADDR_TYPE_ETPC)) && g->ops.gr.egpc_etpc_priv_addr_table) { - gk20a_dbg(gpu_dbg_gpu_dbg, "addr_type : EGPC/ETPC"); + nvgpu_log(g, gpu_dbg_gpu_dbg, "addr_type : EGPC/ETPC"); g->ops.gr.egpc_etpc_priv_addr_table(g, addr, gpc_num, tpc_num, broadcast_flags, priv_addr_table, &t); } else if (broadcast_flags & PRI_BROADCAST_FLAGS_LTSS) { diff --git a/drivers/gpu/nvgpu/gv11b/ltc_gv11b.c b/drivers/gpu/nvgpu/gv11b/ltc_gv11b.c index 9f6d176e..9f9ff337 100644 --- a/drivers/gpu/nvgpu/gv11b/ltc_gv11b.c +++ b/drivers/gpu/nvgpu/gv11b/ltc_gv11b.c @@ -56,7 +56,7 @@ void gv11b_ltc_init_fs_state(struct gk20a *g) u32 ltc_intr; u32 reg; - gk20a_dbg_info("initialize gv11b l2"); + nvgpu_log_info(g, "initialize gv11b l2"); g->ops.mc.reset(g, mc_enable_pfb_enabled_f() | mc_enable_l2_enabled_f()); @@ -67,7 +67,7 @@ void gv11b_ltc_init_fs_state(struct gk20a *g) g->max_ltc_count = gk20a_readl(g, top_num_ltcs_r()); g->ltc_count = gk20a_readl(g, pri_ringmaster_enum_ltc_r()); - gk20a_dbg_info("%u ltcs out of %u", g->ltc_count, g->max_ltc_count); + nvgpu_log_info(g, "%u ltcs out of %u", g->ltc_count, g->max_ltc_count); /* Disable LTC interrupts */ reg = gk20a_readl(g, ltc_ltcs_ltss_intr_r()); diff --git a/drivers/gpu/nvgpu/gv11b/mm_gv11b.c b/drivers/gpu/nvgpu/gv11b/mm_gv11b.c index b46ecb0a..f4084ad6 100644 --- a/drivers/gpu/nvgpu/gv11b/mm_gv11b.c +++ b/drivers/gpu/nvgpu/gv11b/mm_gv11b.c @@ -54,7 +54,7 @@ void gv11b_init_inst_block(struct nvgpu_mem *inst_block, { struct gk20a *g = gk20a_from_vm(vm); - gk20a_dbg_info("inst block phys = 0x%llx, kv = 0x%p", + nvgpu_log_info(g, "inst block phys = 0x%llx, kv = 0x%p", nvgpu_inst_block_addr(g, inst_block), inst_block->cpu_va); g->ops.mm.init_pdb(g, inst_block, vm); diff --git a/drivers/gpu/nvgpu/gv11b/pmu_gv11b.c b/drivers/gpu/nvgpu/gv11b/pmu_gv11b.c index c1b519d0..3f0e2f22 100644 --- a/drivers/gpu/nvgpu/gv11b/pmu_gv11b.c +++ b/drivers/gpu/nvgpu/gv11b/pmu_gv11b.c @@ -37,8 +37,8 @@ #include -#define gv11b_dbg_pmu(fmt, arg...) \ - gk20a_dbg(gpu_dbg_pmu, fmt, ##arg) +#define gv11b_dbg_pmu(g, fmt, arg...) \ + nvgpu_log(g, gpu_dbg_pmu, fmt, ##arg) #define ALIGN_4KB 12 @@ -121,7 +121,7 @@ int gv11b_pmu_setup_elpg(struct gk20a *g) u32 reg_writes; u32 index; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (g->elpg_enabled) { reg_writes = ((sizeof(_pginitseq_gv11b) / @@ -133,7 +133,7 @@ int gv11b_pmu_setup_elpg(struct gk20a *g) } } - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return ret; } @@ -187,7 +187,7 @@ int gv11b_pmu_bootstrap(struct nvgpu_pmu *pmu) u64 addr_code_hi, addr_data_hi; u32 i, blocks, addr_args; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); gk20a_writel(g, pwr_falcon_itfen_r(), gk20a_readl(g, pwr_falcon_itfen_r()) | @@ -407,28 +407,28 @@ u32 gv11b_pmu_get_irqdest(struct gk20a *g) static void pmu_handle_pg_sub_feature_msg(struct gk20a *g, struct pmu_msg *msg, void *param, u32 handle, u32 status) { - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (status != 0) { nvgpu_err(g, "Sub-feature mask update cmd aborted\n"); return; } - gv11b_dbg_pmu("sub-feature mask update is acknowledged from PMU %x\n", + gv11b_dbg_pmu(g, "sub-feature mask update is acknowledged from PMU %x\n", msg->msg.pg.msg_type); } static void pmu_handle_pg_param_msg(struct gk20a *g, struct pmu_msg *msg, void *param, u32 handle, u32 status) { - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (status != 0) { nvgpu_err(g, "GR PARAM cmd aborted\n"); return; } - gv11b_dbg_pmu("GR PARAM is acknowledged from PMU %x\n", + gv11b_dbg_pmu(g, "GR PARAM is acknowledged from PMU %x\n", msg->msg.pg.msg_type); } @@ -450,7 +450,7 @@ int gv11b_pg_gr_init(struct gk20a *g, u32 pg_engine_id) cmd.cmd.pg.gr_init_param_v1.featuremask = NVGPU_PMU_GR_FEATURE_MASK_ALL; - gv11b_dbg_pmu("cmd post PMU_PG_CMD_ID_PG_PARAM_INIT\n"); + gv11b_dbg_pmu(g, "cmd post PMU_PG_CMD_ID_PG_PARAM_INIT\n"); nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, pmu_handle_pg_param_msg, pmu, &seq, ~0); @@ -488,7 +488,7 @@ int gv11b_pg_set_subfeature_mask(struct gk20a *g, u32 pg_engine_id) NVGPU_PMU_GR_FEATURE_MASK_ELPG_LOGIC | NVGPU_PMU_GR_FEATURE_MASK_ELPG_L2RPPG; - gv11b_dbg_pmu("cmd post PMU_PG_CMD_SUB_FEATURE_MASK_UPDATE\n"); + gv11b_dbg_pmu(g, "cmd post PMU_PG_CMD_SUB_FEATURE_MASK_UPDATE\n"); nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, pmu_handle_pg_sub_feature_msg, pmu, &seq, ~0); } else diff --git a/drivers/gpu/nvgpu/gv11b/therm_gv11b.c b/drivers/gpu/nvgpu/gv11b/therm_gv11b.c index 067c464b..961ab5c0 100644 --- a/drivers/gpu/nvgpu/gv11b/therm_gv11b.c +++ b/drivers/gpu/nvgpu/gv11b/therm_gv11b.c @@ -34,7 +34,7 @@ int gv11b_init_therm_setup_hw(struct gk20a *g) { u32 v; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); /* program NV_THERM registers */ gk20a_writel(g, therm_use_a_r(), therm_use_a_ext_therm_0_enable_f() | @@ -108,7 +108,7 @@ int gv11b_elcg_init_idle_filters(struct gk20a *g) if (nvgpu_platform_is_simulation(g)) return 0; - gk20a_dbg_info("init clock/power gate reg"); + nvgpu_log_info(g, "init clock/power gate reg"); for (engine_id = 0; engine_id < f->num_engines; engine_id++) { active_engine_id = f->active_engines_list[engine_id]; diff --git a/drivers/gpu/nvgpu/lpwr/lpwr.c b/drivers/gpu/nvgpu/lpwr/lpwr.c index c470f330..751965e6 100644 --- a/drivers/gpu/nvgpu/lpwr/lpwr.c +++ b/drivers/gpu/nvgpu/lpwr/lpwr.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -185,7 +185,7 @@ u32 nvgpu_lpwr_pg_setup(struct gk20a *g) { u32 err = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); err = get_lpwr_gr_table(g); if (err) @@ -206,7 +206,7 @@ static void nvgpu_pmu_handle_param_lpwr_msg(struct gk20a *g, { u32 *ack_status = param; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (status != 0) { nvgpu_err(g, "LWPR PARAM cmd aborted"); @@ -227,7 +227,7 @@ int nvgpu_lwpr_mclk_change(struct gk20a *g, u32 pstate) struct clk_set_info *pstate_info; u32 ack_status = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); pstate_info = pstate_get_clk_set_info(g, pstate, clkwhich_mclk); @@ -308,7 +308,7 @@ u32 nvgpu_lpwr_is_mscg_supported(struct gk20a *g, u32 pstate_num) struct pstate *pstate = pstate_find(g, pstate_num); u32 ms_idx; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (!pstate) return 0; @@ -329,7 +329,7 @@ u32 nvgpu_lpwr_is_rppg_supported(struct gk20a *g, u32 pstate_num) struct pstate *pstate = pstate_find(g, pstate_num); u32 idx; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (!pstate) return 0; @@ -350,7 +350,7 @@ int nvgpu_lpwr_enable_pg(struct gk20a *g, bool pstate_lock) u32 is_rppg_supported = 0; u32 present_pstate = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (pstate_lock) nvgpu_clk_arb_pstate_change_lock(g, true); @@ -387,7 +387,7 @@ int nvgpu_lpwr_disable_pg(struct gk20a *g, bool pstate_lock) u32 is_rppg_supported = 0; u32 present_pstate = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (pstate_lock) nvgpu_clk_arb_pstate_change_lock(g, true); @@ -417,6 +417,6 @@ exit_unlock: if (pstate_lock) nvgpu_clk_arb_pstate_change_lock(g, false); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return status; } diff --git a/drivers/gpu/nvgpu/perf/perf.c b/drivers/gpu/nvgpu/perf/perf.c index 55e67b15..bf63e1ea 100644 --- a/drivers/gpu/nvgpu/perf/perf.c +++ b/drivers/gpu/nvgpu/perf/perf.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -37,7 +37,7 @@ static void perfrpc_pmucmdhandler(struct gk20a *g, struct pmu_msg *msg, struct perfrpc_pmucmdhandler_params *phandlerparams = (struct perfrpc_pmucmdhandler_params *)param; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); if (msg->msg.perf.msg_type != NV_PMU_PERF_MSG_ID_RPC) { nvgpu_err(g, "unsupported msg for VFE LOAD RPC %x", @@ -53,7 +53,7 @@ static int pmu_handle_perf_event(struct gk20a *g, void *pmu_msg) { struct nv_pmu_perf_msg *msg = (struct nv_pmu_perf_msg *)pmu_msg; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); switch (msg->msg_type) { case NV_PMU_PERF_MSG_ID_VFE_CALLBACK: nvgpu_clk_arb_schedule_vf_table_update(g); diff --git a/drivers/gpu/nvgpu/perf/vfe_equ.c b/drivers/gpu/nvgpu/perf/vfe_equ.c index 2493061e..8321d98d 100644 --- a/drivers/gpu/nvgpu/perf/vfe_equ.c +++ b/drivers/gpu/nvgpu/perf/vfe_equ.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -59,7 +59,7 @@ static u32 _vfe_equs_pmudata_instget(struct gk20a *g, struct nv_pmu_perf_vfe_equ_boardobj_grp_set *pgrp_set = (struct nv_pmu_perf_vfe_equ_boardobj_grp_set *)pmuboardobjgrp; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); /* check whether pmuboardobjgrp has a valid boardobj in index */ if (idx >= CTRL_BOARDOBJGRP_E255_MAX_OBJECTS) @@ -67,7 +67,7 @@ static u32 _vfe_equs_pmudata_instget(struct gk20a *g, *ppboardobjpmudata = (struct nv_pmu_boardobj *) &pgrp_set->objects[idx].data.board_obj; - gk20a_dbg_info(" Done"); + nvgpu_log_info(g, " Done"); return 0; } @@ -77,7 +77,7 @@ u32 vfe_equ_sw_setup(struct gk20a *g) struct boardobjgrp *pboardobjgrp = NULL; struct vfe_equs *pvfeequobjs; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); status = boardobjgrpconstruct_e255(g, &g->perf_pmu.vfe_equobjs.super); if (status) { @@ -109,7 +109,7 @@ u32 vfe_equ_sw_setup(struct gk20a *g) goto done; done: - gk20a_dbg_info(" done status %x", status); + nvgpu_log_info(g, " done status %x", status); return status; } @@ -118,7 +118,7 @@ u32 vfe_equ_pmu_setup(struct gk20a *g) u32 status; struct boardobjgrp *pboardobjgrp = NULL; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); pboardobjgrp = &g->perf_pmu.vfe_equobjs.super.super; @@ -127,7 +127,7 @@ u32 vfe_equ_pmu_setup(struct gk20a *g) status = pboardobjgrp->pmuinithandle(g, pboardobjgrp); - gk20a_dbg_info("Done"); + nvgpu_log_info(g, "Done"); return status; } @@ -152,7 +152,7 @@ static u32 devinit_get_vfe_equ_table(struct gk20a *g, struct vfe_equ_quadratic quadratic; } equ_data; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); vfeequs_tbl_ptr = (u8 *)nvgpu_bios_get_perf_table_ptrs(g, g->bios.perf_token, @@ -325,7 +325,7 @@ static u32 devinit_get_vfe_equ_table(struct gk20a *g, } } done: - gk20a_dbg_info(" done status %x", status); + nvgpu_log_info(g, " done status %x", status); return status; } @@ -337,7 +337,7 @@ static u32 _vfe_equ_pmudatainit_super(struct gk20a *g, struct vfe_equ *pvfe_equ; struct nv_pmu_vfe_equ *pset; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); status = boardobj_pmudatainit_super(g, board_obj_ptr, ppmudata); if (status != 0) @@ -392,7 +392,7 @@ static u32 _vfe_equ_pmudatainit_compare(struct gk20a *g, struct vfe_equ_compare *pvfe_equ_compare; struct nv_pmu_vfe_equ_compare *pset; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); status = _vfe_equ_pmudatainit_super(g, board_obj_ptr, ppmudata); if (status != 0) @@ -451,7 +451,7 @@ static u32 _vfe_equ_pmudatainit_minmax(struct gk20a *g, struct vfe_equ_minmax *pvfe_equ_minmax; struct nv_pmu_vfe_equ_minmax *pset; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); status = _vfe_equ_pmudatainit_super(g, board_obj_ptr, ppmudata); if (status != 0) @@ -507,7 +507,7 @@ static u32 _vfe_equ_pmudatainit_quadratic(struct gk20a *g, struct nv_pmu_vfe_equ_quadratic *pset; u32 i; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); status = _vfe_equ_pmudatainit_super(g, board_obj_ptr, ppmudata); if (status != 0) @@ -558,7 +558,7 @@ static struct vfe_equ *construct_vfe_equ(struct gk20a *g, void *pargs) struct boardobj *board_obj_ptr = NULL; u32 status; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); switch (BOARDOBJ_GET_TYPE(pargs)) { case CTRL_PERF_VFE_EQU_TYPE_COMPARE: @@ -584,7 +584,7 @@ static struct vfe_equ *construct_vfe_equ(struct gk20a *g, void *pargs) if (status) return NULL; - gk20a_dbg_info(" Done"); + nvgpu_log_info(g, " Done"); return (struct vfe_equ *)board_obj_ptr; } diff --git a/drivers/gpu/nvgpu/perf/vfe_var.c b/drivers/gpu/nvgpu/perf/vfe_var.c index a44c39ad..8b95230e 100644 --- a/drivers/gpu/nvgpu/perf/vfe_var.c +++ b/drivers/gpu/nvgpu/perf/vfe_var.c @@ -69,7 +69,7 @@ static u32 _vfe_vars_pmudata_instget(struct gk20a *g, (struct nv_pmu_perf_vfe_var_boardobj_grp_set *) pmuboardobjgrp; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); /*check whether pmuboardobjgrp has a valid boardobj in index*/ if (idx >= CTRL_BOARDOBJGRP_E32_MAX_OBJECTS) @@ -78,7 +78,7 @@ static u32 _vfe_vars_pmudata_instget(struct gk20a *g, *ppboardobjpmudata = (struct nv_pmu_boardobj *) &pgrp_set->objects[idx].data.board_obj; - gk20a_dbg_info(" Done"); + nvgpu_log_info(g, " Done"); return 0; } @@ -105,7 +105,7 @@ u32 vfe_var_sw_setup(struct gk20a *g) struct boardobjgrp *pboardobjgrp = NULL; struct vfe_vars *pvfevarobjs; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); status = boardobjgrpconstruct_e32(g, &g->perf_pmu.vfe_varobjs.super); if (status) { @@ -148,7 +148,7 @@ u32 vfe_var_sw_setup(struct gk20a *g) } done: - gk20a_dbg_info(" done status %x", status); + nvgpu_log_info(g, " done status %x", status); return status; } @@ -157,7 +157,7 @@ u32 vfe_var_pmu_setup(struct gk20a *g) u32 status; struct boardobjgrp *pboardobjgrp = NULL; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); pboardobjgrp = &g->perf_pmu.vfe_varobjs.super.super; @@ -166,7 +166,7 @@ u32 vfe_var_pmu_setup(struct gk20a *g) status = pboardobjgrp->pmuinithandle(g, pboardobjgrp); - gk20a_dbg_info("Done"); + nvgpu_log_info(g, "Done"); return status; } @@ -301,7 +301,7 @@ static u32 _vfe_var_pmudatainit_super(struct gk20a *g, struct vfe_var *pvfe_var; struct nv_pmu_vfe_var *pset; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); status = boardobj_pmudatainit_super(g, board_obj_ptr, ppmudata); if (status != 0) @@ -329,7 +329,7 @@ static u32 vfe_var_construct_super(struct gk20a *g, struct vfe_var *ptmpvar = (struct vfe_var *)pargs; u32 status = 0; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); status = boardobj_construct_super(g, ppboardobj, size, pargs); if (status) @@ -345,7 +345,7 @@ static u32 vfe_var_construct_super(struct gk20a *g, pvfevar->b_is_dynamic_valid = false; status = boardobjgrpmask_e32_init(&pvfevar->mask_dependent_vars, NULL); status = boardobjgrpmask_e255_init(&pvfevar->mask_dependent_equs, NULL); - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); return status; } @@ -356,7 +356,7 @@ static u32 _vfe_var_pmudatainit_derived(struct gk20a *g, { u32 status = 0; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); status = _vfe_var_pmudatainit_super(g, board_obj_ptr, ppmudata); @@ -392,7 +392,7 @@ static u32 _vfe_var_pmudatainit_derived_product(struct gk20a *g, struct vfe_var_derived_product *pvfe_var_derived_product; struct nv_pmu_vfe_var_derived_product *pset; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); status = _vfe_var_pmudatainit_derived(g, board_obj_ptr, ppmudata); if (status != 0) @@ -446,7 +446,7 @@ static u32 _vfe_var_pmudatainit_derived_sum(struct gk20a *g, struct vfe_var_derived_sum *pvfe_var_derived_sum; struct nv_pmu_vfe_var_derived_sum *pset; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); status = _vfe_var_pmudatainit_derived(g, board_obj_ptr, ppmudata); if (status != 0) @@ -498,7 +498,7 @@ static u32 _vfe_var_pmudatainit_single(struct gk20a *g, struct vfe_var_single *pvfe_var_single; struct nv_pmu_vfe_var_single *pset; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); status = _vfe_var_pmudatainit_super(g, board_obj_ptr, ppmudata); if (status != 0) @@ -520,7 +520,7 @@ static u32 _vfe_var_pmudatainit_single_frequency(struct gk20a *g, { u32 status = 0; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); status = _vfe_var_pmudatainit_single(g, board_obj_ptr, ppmudata); @@ -535,7 +535,7 @@ static u32 vfe_var_construct_single_frequency(struct gk20a *g, struct vfe_var_single_frequency *pvfevar; u32 status = 0; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); if (BOARDOBJ_GET_TYPE(pargs) != CTRL_PERF_VFE_VAR_TYPE_SINGLE_FREQUENCY) return -EINVAL; @@ -553,7 +553,7 @@ static u32 vfe_var_construct_single_frequency(struct gk20a *g, pvfevar->super.super.b_is_dynamic = false; pvfevar->super.super.b_is_dynamic_valid = true; - gk20a_dbg_info("Done"); + nvgpu_log_info(g, "Done"); return status; } @@ -563,7 +563,7 @@ static u32 _vfe_var_pmudatainit_single_sensed(struct gk20a *g, { u32 status = 0; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); status = _vfe_var_pmudatainit_single(g, board_obj_ptr, ppmudata); @@ -578,7 +578,7 @@ static u32 _vfe_var_pmudatainit_single_sensed_fuse(struct gk20a *g, struct vfe_var_single_sensed_fuse *pvfe_var_single_sensed_fuse; struct nv_pmu_vfe_var_single_sensed_fuse *pset; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); status = _vfe_var_pmudatainit_single_sensed(g, board_obj_ptr, ppmudata); if (status != 0) @@ -614,7 +614,7 @@ static u32 vfe_var_construct_single_sensed(struct gk20a *g, u32 status = 0; - gk20a_dbg_info(" "); + nvgpu_log_info(g, " "); ptmpobj->type_mask |= BIT(CTRL_PERF_VFE_VAR_TYPE_SINGLE_SENSED); status = vfe_var_construct_single(g, ppboardobj, size, pargs); @@ -626,7 +626,7 @@ static u32 vfe_var_construct_single_sensed(struct gk20a *g, pvfevar->super.super.super.pmudatainit = _vfe_var_pmudatainit_single_sensed; - gk20a_dbg_info("Done"); + nvgpu_log_info(g, "Done"); return status; } @@ -641,7 +641,7 @@ static u32 vfe_var_construct_single_sensed_fuse(struct gk20a *g, (struct vfe_var_single_sensed_fuse *)pargs; u32 status = 0; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); if (BOARDOBJ_GET_TYPE(pargs) != CTRL_PERF_VFE_VAR_TYPE_SINGLE_SENSED_FUSE) return -EINVAL; @@ -704,7 +704,7 @@ static u32 _vfe_var_pmudatainit_single_sensed_temp(struct gk20a *g, struct vfe_var_single_sensed_temp *pvfe_var_single_sensed_temp; struct nv_pmu_vfe_var_single_sensed_temp *pset; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); status = _vfe_var_pmudatainit_single_sensed(g, board_obj_ptr, ppmudata); if (status != 0) @@ -769,7 +769,7 @@ static u32 _vfe_var_pmudatainit_single_voltage(struct gk20a *g, { u32 status = 0; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); status = _vfe_var_pmudatainit_single(g, board_obj_ptr, ppmudata); @@ -808,7 +808,7 @@ static struct vfe_var *construct_vfe_var(struct gk20a *g, void *pargs) struct boardobj *board_obj_ptr = NULL; u32 status; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); switch (BOARDOBJ_GET_TYPE(pargs)) { case CTRL_PERF_VFE_VAR_TYPE_DERIVED_PRODUCT: status = vfe_var_construct_derived_product(g, &board_obj_ptr, @@ -850,7 +850,7 @@ static struct vfe_var *construct_vfe_var(struct gk20a *g, void *pargs) if (status) return NULL; - gk20a_dbg_info("done"); + nvgpu_log_info(g, "done"); return (struct vfe_var *)board_obj_ptr; } @@ -877,7 +877,7 @@ static u32 devinit_get_vfe_var_table(struct gk20a *g, struct vfe_var_single_sensed_temp single_sensed_temp; } var_data; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); vfevars_tbl_ptr = (u8 *)nvgpu_bios_get_perf_table_ptrs(g, g->bios.perf_token, @@ -1031,7 +1031,7 @@ static u32 devinit_get_vfe_var_table(struct gk20a *g, } pvfevarobjs->polling_periodms = vfevars_tbl_header.polling_periodms; done: - gk20a_dbg_info("done status %x", status); + nvgpu_log_info(g, "done status %x", status); return status; } @@ -1043,7 +1043,7 @@ static u32 vfe_var_construct_single(struct gk20a *g, struct vfe_var_single *pvfevar; u32 status = 0; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); ptmpobj->type_mask |= BIT(CTRL_PERF_VFE_VAR_TYPE_SINGLE); status = vfe_var_construct_super(g, ppboardobj, size, pargs); @@ -1058,6 +1058,6 @@ static u32 vfe_var_construct_single(struct gk20a *g, pvfevar->override_type = CTRL_PERF_VFE_VAR_SINGLE_OVERRIDE_TYPE_NONE; pvfevar->override_value = 0; - gk20a_dbg_info("Done"); + nvgpu_log_info(g, "Done"); return status; } diff --git a/drivers/gpu/nvgpu/pmgr/pwrdev.c b/drivers/gpu/nvgpu/pmgr/pwrdev.c index 7f4ab716..da51ac4b 100644 --- a/drivers/gpu/nvgpu/pmgr/pwrdev.c +++ b/drivers/gpu/nvgpu/pmgr/pwrdev.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -36,7 +36,7 @@ static u32 _pwr_device_pmudata_instget(struct gk20a *g, struct nv_pmu_pmgr_pwr_device_desc_table *ppmgrdevice = (struct nv_pmu_pmgr_pwr_device_desc_table *)pmuboardobjgrp; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); /*check whether pmuboardobjgrp has a valid boardobj in index*/ if (((u32)BIT(idx) & @@ -46,7 +46,7 @@ static u32 _pwr_device_pmudata_instget(struct gk20a *g, *ppboardobjpmudata = (struct nv_pmu_boardobj *) &ppmgrdevice->devices[idx].data.board_obj; - gk20a_dbg_info(" Done"); + nvgpu_log_info(g, " Done"); return 0; } @@ -122,7 +122,7 @@ static struct boardobj *construct_pwr_device(struct gk20a *g, pwrdev->r_shuntm_ohm[indx] = ina3221->r_shuntm_ohm[indx]; } - gk20a_dbg_info(" Done"); + nvgpu_log_info(g, " Done"); return board_obj_ptr; } @@ -145,7 +145,7 @@ static u32 devinit_get_pwr_device_table(struct gk20a *g, struct pwr_device_ina3221 ina3221; } pwr_device_data; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); pwr_device_table_ptr = (u8 *)nvgpu_bios_get_perf_table_ptrs(g, g->bios.perf_token, POWER_SENSORS_TABLE); @@ -280,7 +280,7 @@ static u32 devinit_get_pwr_device_table(struct gk20a *g, } done: - gk20a_dbg_info(" done status %x", status); + nvgpu_log_info(g, " done status %x", status); return status; } @@ -310,6 +310,6 @@ u32 pmgr_device_sw_setup(struct gk20a *g) goto done; done: - gk20a_dbg_info(" done status %x", status); + nvgpu_log_info(g, " done status %x", status); return status; } diff --git a/drivers/gpu/nvgpu/pmgr/pwrmonitor.c b/drivers/gpu/nvgpu/pmgr/pwrmonitor.c index 00c930a6..5e0cc966 100644 --- a/drivers/gpu/nvgpu/pmgr/pwrmonitor.c +++ b/drivers/gpu/nvgpu/pmgr/pwrmonitor.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -36,7 +36,7 @@ static u32 _pwr_channel_pmudata_instget(struct gk20a *g, struct nv_pmu_pmgr_pwr_channel_desc *ppmgrchannel = (struct nv_pmu_pmgr_pwr_channel_desc *)pmuboardobjgrp; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); /*check whether pmuboardobjgrp has a valid boardobj in index*/ if (((u32)BIT(idx) & @@ -49,7 +49,7 @@ static u32 _pwr_channel_pmudata_instget(struct gk20a *g, /* handle Global/common data here as we need index */ ppmgrchannel->channels[idx].data.pwr_channel.ch_idx = idx; - gk20a_dbg_info(" Done"); + nvgpu_log_info(g, " Done"); return 0; } @@ -62,7 +62,7 @@ static u32 _pwr_channel_rels_pmudata_instget(struct gk20a *g, struct nv_pmu_pmgr_pwr_chrelationship_desc *ppmgrchrels = (struct nv_pmu_pmgr_pwr_chrelationship_desc *)pmuboardobjgrp; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); /*check whether pmuboardobjgrp has a valid boardobj in index*/ if (((u32)BIT(idx) & @@ -72,7 +72,7 @@ static u32 _pwr_channel_rels_pmudata_instget(struct gk20a *g, *ppboardobjpmudata = (struct nv_pmu_boardobj *) &ppmgrchrels->ch_rels[idx].data.board_obj; - gk20a_dbg_info(" Done"); + nvgpu_log_info(g, " Done"); return 0; } @@ -169,7 +169,7 @@ static struct boardobj *construct_pwr_topology(struct gk20a *g, pwrchannel->pwr_dev_idx = sensor->pwr_dev_idx; pwrchannel->pwr_dev_prov_idx = sensor->pwr_dev_prov_idx; - gk20a_dbg_info(" Done"); + nvgpu_log_info(g, " Done"); return board_obj_ptr; } @@ -192,7 +192,7 @@ static u32 devinit_get_pwr_topology_table(struct gk20a *g, struct pwr_channel_sensor sensor; } pwr_topology_data; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); pwr_topology_table_ptr = (u8 *)nvgpu_bios_get_perf_table_ptrs(g, g->bios.perf_token, POWER_TOPOLOGY_TABLE); @@ -292,7 +292,7 @@ static u32 devinit_get_pwr_topology_table(struct gk20a *g, } done: - gk20a_dbg_info(" done status %x", status); + nvgpu_log_info(g, " done status %x", status); return status; } @@ -365,6 +365,6 @@ u32 pmgr_monitor_sw_setup(struct gk20a *g) } done: - gk20a_dbg_info(" done status %x", status); + nvgpu_log_info(g, " done status %x", status); return status; } diff --git a/drivers/gpu/nvgpu/pmgr/pwrpolicy.c b/drivers/gpu/nvgpu/pmgr/pwrpolicy.c index 2942268f..0d617f6a 100644 --- a/drivers/gpu/nvgpu/pmgr/pwrpolicy.c +++ b/drivers/gpu/nvgpu/pmgr/pwrpolicy.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -270,7 +270,7 @@ static struct boardobj *construct_pwr_policy(struct gk20a *g, pwrpolicyhwthreshold = (struct pwr_policy_hw_threshold*)board_obj_ptr; pwrpolicy = (struct pwr_policy *)board_obj_ptr; - gk20a_dbg_fn("min=%u rated=%u max=%u", + nvgpu_log_fn(g, "min=%u rated=%u max=%u", pwrpolicyparams->limit_min, pwrpolicyparams->limit_rated, pwrpolicyparams->limit_max); @@ -358,7 +358,7 @@ static struct boardobj *construct_pwr_policy(struct gk20a *g, pwrpolicyswthreshold->event_id = swthreshold->event_id; } - gk20a_dbg_info(" Done"); + nvgpu_log_info(g, " Done"); return board_obj_ptr; } @@ -527,7 +527,7 @@ static u32 devinit_get_pwr_policy_table(struct gk20a *g, u32 hw_threshold_policy_index = 0; union pwr_policy_data_union pwr_policy_data; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); ptr = (u8 *)nvgpu_bios_get_perf_table_ptrs(g, g->bios.perf_token, POWER_CAPPING_TABLE); @@ -702,7 +702,7 @@ static u32 devinit_get_pwr_policy_table(struct gk20a *g, } done: - gk20a_dbg_info(" done status %x", status); + nvgpu_log_info(g, " done status %x", status); return status; } @@ -773,6 +773,6 @@ u32 pmgr_policy_sw_setup(struct gk20a *g) g->pmgr_pmu.pmgr_policyobjs.client_work_item.b_pending = false; done: - gk20a_dbg_info(" done status %x", status); + nvgpu_log_info(g, " done status %x", status); return status; } diff --git a/drivers/gpu/nvgpu/pstate/pstate.c b/drivers/gpu/nvgpu/pstate/pstate.c index c3f34027..e61ec0f8 100644 --- a/drivers/gpu/nvgpu/pstate/pstate.c +++ b/drivers/gpu/nvgpu/pstate/pstate.c @@ -46,7 +46,7 @@ int gk20a_init_pstate_support(struct gk20a *g) { u32 err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); err = volt_rail_sw_setup(g); if (err) @@ -114,7 +114,7 @@ int gk20a_init_pstate_pmu_support(struct gk20a *g) { u32 err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (g->ops.clk.mclk_init) { err = g->ops.clk.mclk_init(g); @@ -269,7 +269,7 @@ static int parse_pstate_entry_5x(struct gk20a *g, pstate->clklist.num_info = hdr->clock_entry_count; pstate->lpwr_entry_idx = entry->lpwr_entry_idx; - gk20a_dbg_info("pstate P%u", pstate->num); + nvgpu_log_info(g, "pstate P%u", pstate->num); for (clkidx = 0; clkidx < hdr->clock_entry_count; clkidx++) { struct clk_set_info *pclksetinfo; @@ -293,7 +293,7 @@ static int parse_pstate_entry_5x(struct gk20a *g, BIOS_GET_FIELD(clk_entry->param1, VBIOS_PSTATE_5X_CLOCK_PROG_PARAM1_MAX_FREQ_MHZ); - gk20a_dbg_info( + nvgpu_log_info(g, "clk_domain=%u nominal_mhz=%u min_mhz=%u max_mhz=%u", pclksetinfo->clkwhich, pclksetinfo->nominal_mhz, pclksetinfo->min_mhz, pclksetinfo->max_mhz); @@ -355,7 +355,7 @@ static int pstate_sw_setup(struct gk20a *g) struct vbios_pstate_header_5x *hdr = NULL; int err = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); nvgpu_cond_init(&g->perf_pmu.pstatesobjs.pstate_notifier_wq); @@ -401,11 +401,11 @@ struct pstate *pstate_find(struct gk20a *g, u32 num) struct pstate *pstate; u8 i; - gk20a_dbg_info("pstates = %p", pstates); + nvgpu_log_info(g, "pstates = %p", pstates); BOARDOBJGRP_FOR_EACH(&pstates->super.super, struct pstate *, pstate, i) { - gk20a_dbg_info("pstate=%p num=%u (looking for num=%u)", + nvgpu_log_info(g, "pstate=%p num=%u (looking for num=%u)", pstate, pstate->num, num); if (pstate->num == num) return pstate; @@ -420,7 +420,7 @@ struct clk_set_info *pstate_get_clk_set_info(struct gk20a *g, struct clk_set_info *info; u32 clkidx; - gk20a_dbg_info("pstate = %p", pstate); + nvgpu_log_info(g, "pstate = %p", pstate); if (!pstate) return NULL; diff --git a/drivers/gpu/nvgpu/therm/thrmchannel.c b/drivers/gpu/nvgpu/therm/thrmchannel.c index f8be8430..de3de62a 100644 --- a/drivers/gpu/nvgpu/therm/thrmchannel.c +++ b/drivers/gpu/nvgpu/therm/thrmchannel.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -90,7 +90,7 @@ static struct boardobj *construct_channel_device(struct gk20a *g, pchannel_device->therm_dev_idx = therm_device->therm_dev_idx; pchannel_device->therm_dev_prov_idx = therm_device->therm_dev_prov_idx; - gk20a_dbg_info(" Done"); + nvgpu_log_info(g, " Done"); return board_obj_ptr; } @@ -104,7 +104,7 @@ static u32 _therm_channel_pmudata_instget(struct gk20a *g, (struct nv_pmu_therm_therm_channel_boardobj_grp_set *) pmuboardobjgrp; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); /*check whether pmuboardobjgrp has a valid boardobj in index*/ if (((u32)BIT(idx) & @@ -114,7 +114,7 @@ static u32 _therm_channel_pmudata_instget(struct gk20a *g, *ppboardobjpmudata = (struct nv_pmu_boardobj *) &pgrp_set->objects[idx].data.board_obj; - gk20a_dbg_info(" Done"); + nvgpu_log_info(g, " Done"); return 0; } @@ -137,7 +137,7 @@ static u32 devinit_get_therm_channel_table(struct gk20a *g, struct therm_channel_device device; } therm_channel_data; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); therm_channel_table_ptr = (u8 *)nvgpu_bios_get_perf_table_ptrs(g, g->bios.perf_token, THERMAL_CHANNEL_TABLE); @@ -206,7 +206,7 @@ static u32 devinit_get_therm_channel_table(struct gk20a *g, } done: - gk20a_dbg_info(" done status %x", status); + nvgpu_log_info(g, " done status %x", status); return status; } @@ -248,6 +248,6 @@ u32 therm_channel_sw_setup(struct gk20a *g) } done: - gk20a_dbg_info(" done status %x", status); + nvgpu_log_info(g, " done status %x", status); return status; } diff --git a/drivers/gpu/nvgpu/therm/thrmdev.c b/drivers/gpu/nvgpu/therm/thrmdev.c index 1aff119c..7371ba25 100644 --- a/drivers/gpu/nvgpu/therm/thrmdev.c +++ b/drivers/gpu/nvgpu/therm/thrmdev.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -41,7 +41,7 @@ static struct boardobj *construct_therm_device(struct gk20a *g, if (status) return NULL; - gk20a_dbg_info(" Done"); + nvgpu_log_info(g, " Done"); return board_obj_ptr; } @@ -55,7 +55,7 @@ static u32 _therm_device_pmudata_instget(struct gk20a *g, (struct nv_pmu_therm_therm_device_boardobj_grp_set *) pmuboardobjgrp; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); /*check whether pmuboardobjgrp has a valid boardobj in index*/ if (((u32)BIT(idx) & @@ -65,7 +65,7 @@ static u32 _therm_device_pmudata_instget(struct gk20a *g, *ppboardobjpmudata = (struct nv_pmu_boardobj *) &pgrp_set->objects[idx].data; - gk20a_dbg_info(" Done"); + nvgpu_log_info(g, " Done"); return 0; } @@ -87,7 +87,7 @@ static u32 devinit_get_therm_device_table(struct gk20a *g, struct therm_device therm_device; } therm_device_data; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); therm_device_table_ptr = (u8 *)nvgpu_bios_get_perf_table_ptrs(g, g->bios.perf_token, THERMAL_DEVICE_TABLE); @@ -153,7 +153,7 @@ static u32 devinit_get_therm_device_table(struct gk20a *g, } done: - gk20a_dbg_info(" done status %x", status); + nvgpu_log_info(g, " done status %x", status); return status; } @@ -195,6 +195,6 @@ u32 therm_device_sw_setup(struct gk20a *g) } done: - gk20a_dbg_info(" done status %x", status); + nvgpu_log_info(g, " done status %x", status); return status; } diff --git a/drivers/gpu/nvgpu/vgpu/ce2_vgpu.c b/drivers/gpu/nvgpu/vgpu/ce2_vgpu.c index a552ad44..563c3a2b 100644 --- a/drivers/gpu/nvgpu/vgpu/ce2_vgpu.c +++ b/drivers/gpu/nvgpu/vgpu/ce2_vgpu.c @@ -30,7 +30,7 @@ int vgpu_ce2_nonstall_isr(struct gk20a *g, struct tegra_vgpu_ce2_nonstall_intr_info *info) { - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); switch (info->type) { case TEGRA_VGPU_CE2_NONSTALL_INTR_NONBLOCKPIPE: diff --git a/drivers/gpu/nvgpu/vgpu/dbg_vgpu.c b/drivers/gpu/nvgpu/vgpu/dbg_vgpu.c index 092954ed..2bb3b205 100644 --- a/drivers/gpu/nvgpu/vgpu/dbg_vgpu.c +++ b/drivers/gpu/nvgpu/vgpu/dbg_vgpu.c @@ -42,8 +42,9 @@ int vgpu_exec_regops(struct dbg_session_gk20a *dbg_s, size_t oob_size, ops_size; void *handle = NULL; int err = 0; + struct gk20a *g = dbg_s->g; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); BUG_ON(sizeof(*ops) != sizeof(struct tegra_vgpu_reg_op)); handle = vgpu_ivc_oob_get_ptr(vgpu_ivc_get_server_vmid(), @@ -82,8 +83,9 @@ int vgpu_dbg_set_powergate(struct dbg_session_gk20a *dbg_s, bool disable_powerga struct tegra_vgpu_set_powergate_params *p = &msg.params.set_powergate; int err = 0; u32 mode; + struct gk20a *g = dbg_s->g; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); /* Just return if requested mode is the same as the session's mode */ if (disable_powergate) { diff --git a/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c b/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c index 3ea326b8..eb25cf3a 100644 --- a/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c +++ b/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c @@ -45,8 +45,9 @@ void vgpu_channel_bind(struct channel_gk20a *ch) struct tegra_vgpu_channel_config_params *p = &msg.params.channel_config; int err; + struct gk20a *g = ch->g; - gk20a_dbg_info("bind channel %d", ch->chid); + nvgpu_log_info(g, "bind channel %d", ch->chid); msg.cmd = TEGRA_VGPU_CMD_CHANNEL_BIND; msg.handle = vgpu_get_handle(ch->g); @@ -60,8 +61,9 @@ void vgpu_channel_bind(struct channel_gk20a *ch) void vgpu_channel_unbind(struct channel_gk20a *ch) { + struct gk20a *g = ch->g; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (nvgpu_atomic_cmpxchg(&ch->bound, true, false)) { struct tegra_vgpu_cmd_msg msg; @@ -84,7 +86,7 @@ int vgpu_channel_alloc_inst(struct gk20a *g, struct channel_gk20a *ch) struct tegra_vgpu_channel_hwctx_params *p = &msg.params.channel_hwctx; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ALLOC_HWCTX; msg.handle = vgpu_get_handle(g); @@ -97,7 +99,7 @@ int vgpu_channel_alloc_inst(struct gk20a *g, struct channel_gk20a *ch) } ch->virt_ctx = p->handle; - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } @@ -107,7 +109,7 @@ void vgpu_channel_free_inst(struct gk20a *g, struct channel_gk20a *ch) struct tegra_vgpu_channel_hwctx_params *p = &msg.params.channel_hwctx; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); msg.cmd = TEGRA_VGPU_CMD_CHANNEL_FREE_HWCTX; msg.handle = vgpu_get_handle(g); @@ -122,8 +124,9 @@ void vgpu_channel_enable(struct channel_gk20a *ch) struct tegra_vgpu_channel_config_params *p = &msg.params.channel_config; int err; + struct gk20a *g = ch->g; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ENABLE; msg.handle = vgpu_get_handle(ch->g); @@ -138,8 +141,9 @@ void vgpu_channel_disable(struct channel_gk20a *ch) struct tegra_vgpu_channel_config_params *p = &msg.params.channel_config; int err; + struct gk20a *g = ch->g; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); msg.cmd = TEGRA_VGPU_CMD_CHANNEL_DISABLE; msg.handle = vgpu_get_handle(ch->g); @@ -155,8 +159,9 @@ int vgpu_channel_setup_ramfc(struct channel_gk20a *ch, u64 gpfifo_base, struct tegra_vgpu_cmd_msg msg; struct tegra_vgpu_ramfc_params *p = &msg.params.ramfc; int err; + struct gk20a *g = ch->g; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SETUP_RAMFC; msg.handle = vgpu_get_handle(ch->g); @@ -175,8 +180,9 @@ int vgpu_fifo_init_engine_info(struct fifo_gk20a *f) struct vgpu_priv_data *priv = vgpu_get_priv_data(f->g); struct tegra_vgpu_engines_info *engines = &priv->constants.engines_info; u32 i; + struct gk20a *g = f->g; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (engines->num_engines > TEGRA_VGPU_MAX_ENGINES) { nvgpu_err(f->g, "num_engines %d larger than max %d", @@ -207,7 +213,7 @@ int vgpu_fifo_init_engine_info(struct fifo_gk20a *f) f->active_engines_list[i] = engines->info[i].engine_id; } - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } @@ -219,7 +225,7 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f) u32 i; u64 runlist_size; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); f->max_runlists = g->ops.fifo.eng_runlist_base_size(); f->runlist_info = nvgpu_kzalloc(g, @@ -256,12 +262,12 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f) runlist->cur_buffer = MAX_RUNLIST_BUFFERS; } - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; clean_up_runlist: gk20a_fifo_delete_runlist(f); - gk20a_dbg_fn("fail"); + nvgpu_log_fn(g, "fail"); return -ENOMEM; } @@ -272,10 +278,10 @@ static int vgpu_init_fifo_setup_sw(struct gk20a *g) unsigned int chid; int err = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (f->sw_ready) { - gk20a_dbg_fn("skip init"); + nvgpu_log_fn(g, "skip init"); return 0; } @@ -306,7 +312,7 @@ static int vgpu_init_fifo_setup_sw(struct gk20a *g) f->userd.gpu_va = 0; } - gk20a_dbg(gpu_dbg_map_v, "userd bar1 va = 0x%llx", f->userd.gpu_va); + nvgpu_log(g, gpu_dbg_map_v, "userd bar1 va = 0x%llx", f->userd.gpu_va); f->channel = nvgpu_vzalloc(g, f->num_channels * sizeof(*f->channel)); f->tsg = nvgpu_vzalloc(g, f->num_channels * sizeof(*f->tsg)); @@ -350,11 +356,11 @@ static int vgpu_init_fifo_setup_sw(struct gk20a *g) f->sw_ready = true; - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; clean_up: - gk20a_dbg_fn("fail"); + nvgpu_log_fn(g, "fail"); /* FIXME: unmap from bar1 */ nvgpu_dma_free(g, &f->userd); @@ -374,7 +380,7 @@ clean_up: int vgpu_init_fifo_setup_hw(struct gk20a *g) { - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); /* test write, read through bar1 @ userd region before * turning on the snooping */ @@ -385,7 +391,7 @@ int vgpu_init_fifo_setup_hw(struct gk20a *g) u32 bar1_vaddr = f->userd.gpu_va; volatile u32 *cpu_vaddr = f->userd.cpu_va; - gk20a_dbg_info("test bar1 @ vaddr 0x%x", + nvgpu_log_info(g, "test bar1 @ vaddr 0x%x", bar1_vaddr); v = gk20a_bar1_readl(g, bar1_vaddr); @@ -415,7 +421,7 @@ int vgpu_init_fifo_setup_hw(struct gk20a *g) gk20a_bar1_writel(g, bar1_vaddr, v); } - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; } @@ -424,7 +430,7 @@ int vgpu_init_fifo_support(struct gk20a *g) { u32 err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); err = vgpu_init_fifo_setup_sw(g); if (err) @@ -444,7 +450,7 @@ int vgpu_fifo_preempt_channel(struct gk20a *g, u32 chid) &msg.params.channel_config; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (!nvgpu_atomic_read(&ch->bound)) return 0; @@ -470,7 +476,7 @@ int vgpu_fifo_preempt_tsg(struct gk20a *g, u32 tsgid) &msg.params.tsg_preempt; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); msg.cmd = TEGRA_VGPU_CMD_TSG_PREEMPT; msg.handle = vgpu_get_handle(g); @@ -533,7 +539,7 @@ static int vgpu_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id, u16 *runlist_entry = NULL; u32 count = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); runlist = &f->runlist_info[runlist_id]; @@ -558,7 +564,7 @@ static int vgpu_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id, runlist_entry = runlist->mem[0].cpu_va; for_each_set_bit(cid, runlist->active_channels, f->num_channels) { - gk20a_dbg_info("add channel %d to runlist", cid); + nvgpu_log_info(g, "add channel %d to runlist", cid); runlist_entry[0] = cid; runlist_entry++; count++; @@ -581,7 +587,7 @@ int vgpu_fifo_update_runlist(struct gk20a *g, u32 runlist_id, struct fifo_gk20a *f = &g->fifo; u32 ret = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); runlist = &f->runlist_info[runlist_id]; @@ -596,7 +602,7 @@ int vgpu_fifo_update_runlist(struct gk20a *g, u32 runlist_id, int vgpu_fifo_wait_engine_idle(struct gk20a *g) { - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); return 0; } @@ -611,7 +617,7 @@ int vgpu_fifo_set_runlist_interleave(struct gk20a *g, &msg.params.tsg_interleave; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); msg.cmd = TEGRA_VGPU_CMD_TSG_SET_RUNLIST_INTERLEAVE; msg.handle = vgpu_get_handle(g); @@ -633,7 +639,7 @@ int vgpu_fifo_force_reset_ch(struct channel_gk20a *ch, &msg.params.channel_config; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (gk20a_is_channel_marked_as_tsg(ch)) { tsg = &g->fifo.tsg[ch->tsgid]; @@ -716,7 +722,7 @@ int vgpu_fifo_isr(struct gk20a *g, struct tegra_vgpu_fifo_intr_info *info) struct fifo_gk20a *f = &g->fifo; struct channel_gk20a *ch = gk20a_channel_get(&f->channel[info->chid]); - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (!ch) return 0; @@ -750,7 +756,7 @@ int vgpu_fifo_isr(struct gk20a *g, struct tegra_vgpu_fifo_intr_info *info) int vgpu_fifo_nonstall_isr(struct gk20a *g, struct tegra_vgpu_fifo_nonstall_intr_info *info) { - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); switch (info->type) { case TEGRA_VGPU_FIFO_NONSTALL_INTR_CHANNEL: diff --git a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c index ab35dc67..86184336 100644 --- a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c +++ b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c @@ -43,7 +43,7 @@ int vgpu_gr_gp10b_alloc_gr_ctx(struct gk20a *g, struct vgpu_priv_data *priv = vgpu_get_priv_data(g); int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); err = vgpu_gr_alloc_gr_ctx(g, gr_ctx, vm, class, flags); if (err) @@ -78,7 +78,7 @@ int vgpu_gr_gp10b_alloc_gr_ctx(struct gk20a *g, } } - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return err; fail: @@ -132,11 +132,11 @@ int vgpu_gr_gp10b_set_ctxsw_preemption_mode(struct gk20a *g, attrib_cb_size = ALIGN(attrib_cb_size, 128); - gk20a_dbg_info("gfxp context preempt size=%d", + nvgpu_log_info(g, "gfxp context preempt size=%d", g->gr.ctx_vars.preempt_image_size); - gk20a_dbg_info("gfxp context spill size=%d", spill_size); - gk20a_dbg_info("gfxp context pagepool size=%d", pagepool_size); - gk20a_dbg_info("gfxp context attrib cb size=%d", + nvgpu_log_info(g, "gfxp context spill size=%d", spill_size); + nvgpu_log_info(g, "gfxp context pagepool size=%d", pagepool_size); + nvgpu_log_info(g, "gfxp context attrib cb size=%d", attrib_cb_size); err = gr_gp10b_alloc_buffer(vm, @@ -293,7 +293,7 @@ int vgpu_gr_gp10b_init_ctx_state(struct gk20a *g) struct vgpu_priv_data *priv = vgpu_get_priv_data(g); int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); err = vgpu_gr_init_ctx_state(g); if (err) diff --git a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c index e615c486..b8c4d2de 100644 --- a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c +++ b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c @@ -78,7 +78,7 @@ u64 vgpu_gp10b_locked_gmmu_map(struct vm_gk20a *vm, u8 prot; struct nvgpu_sgl *sgl; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); /* FIXME: add support for sparse mappings */ diff --git a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c index 2f1280ac..1e633d5f 100644 --- a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c +++ b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c @@ -43,7 +43,7 @@ void vgpu_gr_detect_sm_arch(struct gk20a *g) { struct vgpu_priv_data *priv = vgpu_get_priv_data(g); - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); g->params.sm_arch_sm_version = priv->constants.sm_arch_sm_version; @@ -58,8 +58,9 @@ int vgpu_gr_commit_inst(struct channel_gk20a *c, u64 gpu_va) struct tegra_vgpu_cmd_msg msg; struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx; int err; + struct gk20a *g = c->g; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); msg.cmd = TEGRA_VGPU_CMD_CHANNEL_COMMIT_GR_CTX; msg.handle = vgpu_get_handle(c->g); @@ -76,7 +77,7 @@ static int vgpu_gr_commit_global_ctx_buffers(struct gk20a *g, struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); msg.cmd = TEGRA_VGPU_CMD_CHANNEL_COMMIT_GR_GLOBAL_CTX; msg.handle = vgpu_get_handle(g); @@ -94,7 +95,7 @@ static int vgpu_gr_load_golden_ctx_image(struct gk20a *g, struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); msg.cmd = TEGRA_VGPU_CMD_CHANNEL_LOAD_GR_GOLDEN_CTX; msg.handle = vgpu_get_handle(g); @@ -109,7 +110,7 @@ int vgpu_gr_init_ctx_state(struct gk20a *g) struct gr_gk20a *gr = &g->gr; struct vgpu_priv_data *priv = vgpu_get_priv_data(g); - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); g->gr.ctx_vars.golden_image_size = priv->constants.golden_ctx_size; g->gr.ctx_vars.zcull_ctxsw_image_size = priv->constants.zcull_ctx_size; @@ -135,20 +136,20 @@ static int vgpu_gr_alloc_global_ctx_buffers(struct gk20a *g) u32 pagepool_buffer_size = g->ops.gr.pagepool_default_size(g) * gr_scc_pagepool_total_pages_byte_granularity_v(); - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); attr_buffer_size = g->ops.gr.calc_global_ctx_buffer_size(g); - gk20a_dbg_info("cb_buffer_size : %d", cb_buffer_size); + nvgpu_log_info(g, "cb_buffer_size : %d", cb_buffer_size); gr->global_ctx_buffer[CIRCULAR].mem.size = cb_buffer_size; - gk20a_dbg_info("pagepool_buffer_size : %d", pagepool_buffer_size); + nvgpu_log_info(g, "pagepool_buffer_size : %d", pagepool_buffer_size); gr->global_ctx_buffer[PAGEPOOL].mem.size = pagepool_buffer_size; - gk20a_dbg_info("attr_buffer_size : %d", attr_buffer_size); + nvgpu_log_info(g, "attr_buffer_size : %d", attr_buffer_size); gr->global_ctx_buffer[ATTRIBUTE].mem.size = attr_buffer_size; - gk20a_dbg_info("priv access map size : %d", + nvgpu_log_info(g, "priv access map size : %d", gr->ctx_vars.priv_access_map_size); gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.size = gr->ctx_vars.priv_access_map_size; @@ -170,7 +171,7 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g, u32 i; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); tsg = tsg_gk20a_from_ch(c); if (!tsg) @@ -249,8 +250,9 @@ static void vgpu_gr_unmap_global_ctx_buffers(struct tsg_gk20a *tsg) u64 *g_bfr_va = tsg->gr_ctx.global_ctx_buffer_va; u64 *g_bfr_size = tsg->gr_ctx.global_ctx_buffer_size; u32 i; + struct gk20a *g = tsg->g; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (tsg->gr_ctx.global_ctx_buffer_mapped) { /* server will unmap on channel close */ @@ -279,7 +281,7 @@ int vgpu_gr_alloc_gr_ctx(struct gk20a *g, struct gr_gk20a *gr = &g->gr; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (gr->ctx_vars.buffer_size == 0) return 0; @@ -328,7 +330,7 @@ static int vgpu_gr_alloc_channel_patch_ctx(struct gk20a *g, struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); tsg = tsg_gk20a_from_ch(c); if (!tsg) @@ -359,8 +361,9 @@ static int vgpu_gr_alloc_channel_patch_ctx(struct gk20a *g, static void vgpu_gr_free_channel_patch_ctx(struct tsg_gk20a *tsg) { struct patch_desc *patch_ctx = &tsg->gr_ctx.patch_ctx; + struct gk20a *g = tsg->g; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (patch_ctx->mem.gpu_va) { /* server will free on channel close */ @@ -375,8 +378,9 @@ static void vgpu_gr_free_channel_pm_ctx(struct tsg_gk20a *tsg) { struct nvgpu_gr_ctx *ch_ctx = &tsg->gr_ctx; struct pm_ctx_desc *pm_ctx = &ch_ctx->pm_ctx; + struct gk20a *g = tsg->g; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); /* check if hwpm was ever initialized. If not, nothing to do */ if (pm_ctx->mem.gpu_va == 0) @@ -394,7 +398,7 @@ void vgpu_gr_free_gr_ctx(struct gk20a *g, { struct tsg_gk20a *tsg; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (gr_ctx->mem.gpu_va) { struct tegra_vgpu_cmd_msg msg; @@ -477,7 +481,7 @@ int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags) struct tsg_gk20a *tsg = NULL; int err = 0; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); /* an address space needs to have been bound at this point.*/ if (!gk20a_channel_as_bound(c)) { @@ -577,7 +581,7 @@ int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags) /* PM ctxt switch is off by default */ gr_ctx->pm_ctx.pm_mode = ctxsw_prog_main_image_pm_mode_no_ctxsw_f(); - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; out: /* 1. gr_ctx, patch_ctx and global ctx buffer mapping @@ -595,7 +599,7 @@ static int vgpu_gr_init_gr_config(struct gk20a *g, struct gr_gk20a *gr) u32 sm_per_tpc; int err = -ENOMEM; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); gr->max_gpc_count = priv->constants.max_gpc_count; gr->gpc_count = priv->constants.gpc_count; @@ -658,7 +662,7 @@ int vgpu_gr_bind_ctxsw_zcull(struct gk20a *g, struct gr_gk20a *gr, struct tegra_vgpu_zcull_bind_params *p = &msg.params.zcull_bind; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); msg.cmd = TEGRA_VGPU_CMD_CHANNEL_BIND_ZCULL; msg.handle = vgpu_get_handle(g); @@ -677,7 +681,7 @@ int vgpu_gr_get_zcull_info(struct gk20a *g, struct gr_gk20a *gr, struct tegra_vgpu_zcull_info_params *p = &msg.params.zcull_info; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); msg.cmd = TEGRA_VGPU_CMD_GET_ZCULL_INFO; msg.handle = vgpu_get_handle(g); @@ -712,7 +716,7 @@ u32 vgpu_gr_get_max_fbps_count(struct gk20a *g) { struct vgpu_priv_data *priv = vgpu_get_priv_data(g); - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); return priv->constants.num_fbps; } @@ -721,7 +725,7 @@ u32 vgpu_gr_get_fbp_en_mask(struct gk20a *g) { struct vgpu_priv_data *priv = vgpu_get_priv_data(g); - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); return priv->constants.fbp_en_mask; } @@ -730,7 +734,7 @@ u32 vgpu_gr_get_max_ltc_per_fbp(struct gk20a *g) { struct vgpu_priv_data *priv = vgpu_get_priv_data(g); - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); return priv->constants.ltc_per_fbp; } @@ -739,7 +743,7 @@ u32 vgpu_gr_get_max_lts_per_ltc(struct gk20a *g) { struct vgpu_priv_data *priv = vgpu_get_priv_data(g); - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); return priv->constants.max_lts_per_ltc; } @@ -749,7 +753,7 @@ u32 *vgpu_gr_rop_l2_en_mask(struct gk20a *g) struct vgpu_priv_data *priv = vgpu_get_priv_data(g); u32 i, max_fbps_count = priv->constants.num_fbps; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (g->gr.fbp_rop_l2_en_mask == NULL) { g->gr.fbp_rop_l2_en_mask = @@ -772,7 +776,7 @@ int vgpu_gr_add_zbc(struct gk20a *g, struct gr_gk20a *gr, struct tegra_vgpu_zbc_set_table_params *p = &msg.params.zbc_set_table; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); msg.cmd = TEGRA_VGPU_CMD_ZBC_SET_TABLE; msg.handle = vgpu_get_handle(g); @@ -804,7 +808,7 @@ int vgpu_gr_query_zbc(struct gk20a *g, struct gr_gk20a *gr, &msg.params.zbc_query_table; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); msg.cmd = TEGRA_VGPU_CMD_ZBC_QUERY_TABLE; msg.handle = vgpu_get_handle(g); @@ -840,7 +844,7 @@ int vgpu_gr_query_zbc(struct gk20a *g, struct gr_gk20a *gr, static void vgpu_remove_gr_support(struct gr_gk20a *gr) { - gk20a_dbg_fn(""); + nvgpu_log_fn(gr->g, " "); gk20a_comptag_allocator_destroy(gr->g, &gr->comp_tags); @@ -865,10 +869,10 @@ static int vgpu_gr_init_gr_setup_sw(struct gk20a *g) struct gr_gk20a *gr = &g->gr; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (gr->sw_ready) { - gk20a_dbg_fn("skip init"); + nvgpu_log_fn(g, "skip init"); return 0; } @@ -907,7 +911,7 @@ static int vgpu_gr_init_gr_setup_sw(struct gk20a *g) gr->remove_support = vgpu_remove_gr_support; gr->sw_ready = true; - gk20a_dbg_fn("done"); + nvgpu_log_fn(g, "done"); return 0; clean_up: @@ -918,7 +922,7 @@ clean_up: int vgpu_init_gr_support(struct gk20a *g) { - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); return vgpu_gr_init_gr_setup_sw(g); } @@ -928,7 +932,7 @@ int vgpu_gr_isr(struct gk20a *g, struct tegra_vgpu_gr_intr_info *info) struct fifo_gk20a *f = &g->fifo; struct channel_gk20a *ch = gk20a_channel_get(&f->channel[info->chid]); - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (!ch) return 0; @@ -985,7 +989,7 @@ int vgpu_gr_isr(struct gk20a *g, struct tegra_vgpu_gr_intr_info *info) int vgpu_gr_nonstall_isr(struct gk20a *g, struct tegra_vgpu_gr_nonstall_intr_info *info) { - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); switch (info->type) { case TEGRA_VGPU_GR_NONSTALL_INTR_SEMAPHORE: @@ -1006,7 +1010,7 @@ int vgpu_gr_set_sm_debug_mode(struct gk20a *g, struct tegra_vgpu_sm_debug_mode *p = &msg.params.sm_debug_mode; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); msg.cmd = TEGRA_VGPU_CMD_SET_SM_DEBUG_MODE; msg.handle = vgpu_get_handle(g); @@ -1026,7 +1030,7 @@ int vgpu_gr_update_smpc_ctxsw_mode(struct gk20a *g, struct tegra_vgpu_channel_set_ctxsw_mode *p = &msg.params.set_ctxsw_mode; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SET_SMPC_CTXSW_MODE; msg.handle = vgpu_get_handle(g); @@ -1053,7 +1057,7 @@ int vgpu_gr_update_hwpm_ctxsw_mode(struct gk20a *g, struct tegra_vgpu_channel_set_ctxsw_mode *p = &msg.params.set_ctxsw_mode; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); tsg = tsg_gk20a_from_ch(ch); if (!tsg) diff --git a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_gv11b.c b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_gv11b.c index 933e8357..1bcd151a 100644 --- a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_gv11b.c +++ b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_gv11b.c @@ -30,7 +30,7 @@ int vgpu_gv11b_init_gpu_characteristics(struct gk20a *g) { int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); err = vgpu_init_gpu_characteristics(g); if (err) { diff --git a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_tsg_gv11b.c b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_tsg_gv11b.c index b249b5af..367c1299 100644 --- a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_tsg_gv11b.c +++ b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_tsg_gv11b.c @@ -33,8 +33,9 @@ int vgpu_gv11b_tsg_bind_channel(struct tsg_gk20a *tsg, struct tegra_vgpu_tsg_bind_channel_ex_params *p = &msg.params.tsg_bind_channel_ex; int err; + struct gk20a *g = tsg->g; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); err = gk20a_tsg_bind_channel(tsg, ch); if (err) diff --git a/drivers/gpu/nvgpu/vgpu/ltc_vgpu.c b/drivers/gpu/nvgpu/vgpu/ltc_vgpu.c index d451a1f2..f68c8454 100644 --- a/drivers/gpu/nvgpu/vgpu/ltc_vgpu.c +++ b/drivers/gpu/nvgpu/vgpu/ltc_vgpu.c @@ -31,7 +31,7 @@ int vgpu_determine_L2_size_bytes(struct gk20a *g) { struct vgpu_priv_data *priv = vgpu_get_priv_data(g); - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); return priv->constants.l2_size; } @@ -42,7 +42,7 @@ int vgpu_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr) u32 max_comptag_lines = 0; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); gr->cacheline_size = priv->constants.cacheline_size; gr->comptags_per_cacheline = priv->constants.comptags_per_cacheline; @@ -65,7 +65,7 @@ void vgpu_ltc_init_fs_state(struct gk20a *g) { struct vgpu_priv_data *priv = vgpu_get_priv_data(g); - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); g->ltc_count = priv->constants.ltc_count; } diff --git a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c index 3e75cee3..b8eaa1db 100644 --- a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c +++ b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c @@ -40,10 +40,10 @@ static int vgpu_init_mm_setup_sw(struct gk20a *g) { struct mm_gk20a *mm = &g->mm; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (mm->sw_ready) { - gk20a_dbg_fn("skip init"); + nvgpu_log_fn(g, "skip init"); return 0; } @@ -56,7 +56,7 @@ static int vgpu_init_mm_setup_sw(struct gk20a *g) mm->channel.user_size = NV_MM_DEFAULT_USER_SIZE; mm->channel.kernel_size = NV_MM_DEFAULT_KERNEL_SIZE; - gk20a_dbg_info("channel vm size: user %dMB kernel %dMB", + nvgpu_log_info(g, "channel vm size: user %dMB kernel %dMB", (int)(mm->channel.user_size >> 20), (int)(mm->channel.kernel_size >> 20)); @@ -69,7 +69,7 @@ int vgpu_init_mm_support(struct gk20a *g) { int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); err = vgpu_init_mm_setup_sw(g); if (err) @@ -95,7 +95,7 @@ void vgpu_locked_gmmu_unmap(struct vm_gk20a *vm, struct tegra_vgpu_as_map_params *p = &msg.params.as_map; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); msg.cmd = TEGRA_VGPU_CMD_AS_UNMAP; msg.handle = vgpu_get_handle(g); @@ -183,8 +183,9 @@ int vgpu_vm_bind_channel(struct vm_gk20a *vm, struct tegra_vgpu_cmd_msg msg; struct tegra_vgpu_as_bind_share_params *p = &msg.params.as_bind_share; int err; + struct gk20a *g = ch->g; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); ch->vm = vm; msg.cmd = TEGRA_VGPU_CMD_AS_BIND_SHARE; @@ -220,7 +221,7 @@ static void vgpu_cache_maint(u64 handle, u8 op) int vgpu_mm_fb_flush(struct gk20a *g) { - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); vgpu_cache_maint(vgpu_get_handle(g), TEGRA_VGPU_FB_FLUSH); return 0; @@ -229,7 +230,7 @@ int vgpu_mm_fb_flush(struct gk20a *g) void vgpu_mm_l2_invalidate(struct gk20a *g) { - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); vgpu_cache_maint(vgpu_get_handle(g), TEGRA_VGPU_L2_MAINT_INV); } @@ -238,7 +239,7 @@ void vgpu_mm_l2_flush(struct gk20a *g, bool invalidate) { u8 op; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (invalidate) op = TEGRA_VGPU_L2_MAINT_FLUSH_INV; @@ -250,7 +251,7 @@ void vgpu_mm_l2_flush(struct gk20a *g, bool invalidate) void vgpu_mm_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb) { - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); nvgpu_err(g, "call to RM server not supported"); } @@ -261,7 +262,7 @@ void vgpu_mm_mmu_set_debug_mode(struct gk20a *g, bool enable) struct tegra_vgpu_mmu_debug_mode *p = &msg.params.mmu_debug_mode; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); msg.cmd = TEGRA_VGPU_CMD_SET_MMU_DEBUG_MODE; msg.handle = vgpu_get_handle(g); diff --git a/drivers/gpu/nvgpu/vgpu/tsg_vgpu.c b/drivers/gpu/nvgpu/vgpu/tsg_vgpu.c index a6e493d0..7bb8f671 100644 --- a/drivers/gpu/nvgpu/vgpu/tsg_vgpu.c +++ b/drivers/gpu/nvgpu/vgpu/tsg_vgpu.c @@ -35,8 +35,9 @@ int vgpu_tsg_open(struct tsg_gk20a *tsg) struct tegra_vgpu_tsg_open_rel_params *p = &msg.params.tsg_open; int err; + struct gk20a *g = tsg->g; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); msg.cmd = TEGRA_VGPU_CMD_TSG_OPEN; msg.handle = vgpu_get_handle(tsg->g); @@ -57,8 +58,9 @@ void vgpu_tsg_release(struct tsg_gk20a *tsg) struct tegra_vgpu_tsg_open_rel_params *p = &msg.params.tsg_release; int err; + struct gk20a *g = tsg->g; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); msg.cmd = TEGRA_VGPU_CMD_TSG_RELEASE; msg.handle = vgpu_get_handle(tsg->g); @@ -91,8 +93,9 @@ int vgpu_tsg_bind_channel(struct tsg_gk20a *tsg, struct tegra_vgpu_tsg_bind_unbind_channel_params *p = &msg.params.tsg_bind_unbind_channel; int err; + struct gk20a *g = ch->g; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); err = gk20a_tsg_bind_channel(tsg, ch); if (err) @@ -120,8 +123,9 @@ int vgpu_tsg_unbind_channel(struct channel_gk20a *ch) struct tegra_vgpu_tsg_bind_unbind_channel_params *p = &msg.params.tsg_bind_unbind_channel; int err; + struct gk20a *g = ch->g; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); err = gk20a_fifo_tsg_unbind_channel(ch); if (err) @@ -143,8 +147,9 @@ int vgpu_tsg_set_timeslice(struct tsg_gk20a *tsg, u32 timeslice) struct tegra_vgpu_tsg_timeslice_params *p = &msg.params.tsg_timeslice; int err; + struct gk20a *g = tsg->g; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); msg.cmd = TEGRA_VGPU_CMD_TSG_SET_TIMESLICE; msg.handle = vgpu_get_handle(tsg->g); diff --git a/drivers/gpu/nvgpu/vgpu/vgpu.c b/drivers/gpu/nvgpu/vgpu/vgpu.c index 1e77cda9..17e80cd7 100644 --- a/drivers/gpu/nvgpu/vgpu/vgpu.c +++ b/drivers/gpu/nvgpu/vgpu/vgpu.c @@ -249,7 +249,7 @@ void vgpu_detect_chip(struct gk20a *g) p->gpu_impl = priv->constants.impl; p->gpu_rev = priv->constants.rev; - gk20a_dbg_info("arch: %x, impl: %x, rev: %x\n", + nvgpu_log_info(g, "arch: %x, impl: %x, rev: %x\n", p->gpu_arch, p->gpu_impl, p->gpu_rev); @@ -259,7 +259,7 @@ int vgpu_init_gpu_characteristics(struct gk20a *g) { int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); err = gk20a_init_gpu_characteristics(g); if (err) @@ -279,7 +279,7 @@ int vgpu_read_ptimer(struct gk20a *g, u64 *value) struct tegra_vgpu_read_ptimer_params *p = &msg.params.read_ptimer; int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); msg.cmd = TEGRA_VGPU_CMD_READ_PTIMER; msg.handle = vgpu_get_handle(g); @@ -304,7 +304,7 @@ int vgpu_get_timestamps_zipper(struct gk20a *g, int err; u32 i; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); if (count > TEGRA_VGPU_GET_TIMESTAMPS_ZIPPER_MAX_COUNT) { nvgpu_err(g, "count %u overflow", count); @@ -338,7 +338,7 @@ int vgpu_init_hal(struct gk20a *g) switch (ver) { case NVGPU_GPUID_GP10B: - gk20a_dbg_info("gp10b detected"); + nvgpu_log_info(g, "gp10b detected"); err = vgpu_gp10b_init_hal(g); break; case NVGPU_GPUID_GV11B: @@ -360,7 +360,7 @@ int vgpu_get_constants(struct gk20a *g) struct vgpu_priv_data *priv = vgpu_get_priv_data(g); int err; - gk20a_dbg_fn(""); + nvgpu_log_fn(g, " "); msg.cmd = TEGRA_VGPU_CMD_GET_CONSTANTS; msg.handle = vgpu_get_handle(g); diff --git a/drivers/gpu/nvgpu/volt/volt_dev.c b/drivers/gpu/nvgpu/volt/volt_dev.c index 38df2105..d900b37b 100644 --- a/drivers/gpu/nvgpu/volt/volt_dev.c +++ b/drivers/gpu/nvgpu/volt/volt_dev.c @@ -408,7 +408,7 @@ static u32 _volt_device_devgrp_pmudata_instget(struct gk20a *g, (struct nv_pmu_volt_volt_device_boardobj_grp_set *) pmuboardobjgrp; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); /*check whether pmuboardobjgrp has a valid boardobj in index*/ if (((u32)BIT(idx) & @@ -417,7 +417,7 @@ static u32 _volt_device_devgrp_pmudata_instget(struct gk20a *g, *ppboardobjpmudata = (struct nv_pmu_boardobj *) &pgrp_set->objects[idx].data.board_obj; - gk20a_dbg_info("Done"); + nvgpu_log_info(g, "Done"); return 0; } @@ -506,7 +506,7 @@ u32 volt_dev_pmu_setup(struct gk20a *g) u32 status; struct boardobjgrp *pboardobjgrp = NULL; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); pboardobjgrp = &g->perf_pmu.volt.volt_dev_metadata.volt_devices.super; @@ -515,7 +515,7 @@ u32 volt_dev_pmu_setup(struct gk20a *g) status = pboardobjgrp->pmuinithandle(g, pboardobjgrp); - gk20a_dbg_info("Done"); + nvgpu_log_info(g, "Done"); return status; } @@ -526,7 +526,7 @@ u32 volt_dev_sw_setup(struct gk20a *g) struct voltage_device *pvolt_device; u8 i; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); status = boardobjgrpconstruct_e32(g, &g->perf_pmu.volt.volt_dev_metadata.volt_devices); @@ -585,6 +585,6 @@ u32 volt_dev_sw_setup(struct gk20a *g) } done: - gk20a_dbg_info(" done status %x", status); + nvgpu_log_info(g, " done status %x", status); return status; } diff --git a/drivers/gpu/nvgpu/volt/volt_pmu.c b/drivers/gpu/nvgpu/volt/volt_pmu.c index 07bff84a..bd9177ff 100644 --- a/drivers/gpu/nvgpu/volt/volt_pmu.c +++ b/drivers/gpu/nvgpu/volt/volt_pmu.c @@ -46,7 +46,7 @@ static void volt_rpc_pmucmdhandler(struct gk20a *g, struct pmu_msg *msg, struct volt_rpc_pmucmdhandler_params *phandlerparams = (struct volt_rpc_pmucmdhandler_params *)param; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); if (msg->msg.volt.msg_type != NV_PMU_VOLT_MSG_ID_RPC) { nvgpu_err(g, "unsupported msg for VOLT RPC %x", diff --git a/drivers/gpu/nvgpu/volt/volt_policy.c b/drivers/gpu/nvgpu/volt/volt_policy.c index a69c38bb..3783dc32 100644 --- a/drivers/gpu/nvgpu/volt/volt_policy.c +++ b/drivers/gpu/nvgpu/volt/volt_policy.c @@ -370,7 +370,7 @@ static u32 _volt_policy_devgrp_pmudata_instget(struct gk20a *g, (struct nv_pmu_volt_volt_policy_boardobj_grp_set *) pmuboardobjgrp; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); /*check whether pmuboardobjgrp has a valid boardobj in index*/ if (((u32)BIT(idx) & @@ -379,7 +379,7 @@ static u32 _volt_policy_devgrp_pmudata_instget(struct gk20a *g, *ppboardobjpmudata = (struct nv_pmu_boardobj *) &pgrp_set->objects[idx].data.board_obj; - gk20a_dbg_info(" Done"); + nvgpu_log_info(g, " Done"); return 0; } @@ -430,7 +430,7 @@ u32 volt_policy_pmu_setup(struct gk20a *g) u32 status; struct boardobjgrp *pboardobjgrp = NULL; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); pboardobjgrp = &g->perf_pmu.volt.volt_policy_metadata.volt_policies.super; @@ -440,7 +440,7 @@ u32 volt_policy_pmu_setup(struct gk20a *g) status = pboardobjgrp->pmuinithandle(g, pboardobjgrp); - gk20a_dbg_info("Done"); + nvgpu_log_info(g, "Done"); return status; } @@ -449,7 +449,7 @@ u32 volt_policy_sw_setup(struct gk20a *g) u32 status = 0; struct boardobjgrp *pboardobjgrp = NULL; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); status = boardobjgrpconstruct_e32(g, &g->perf_pmu.volt.volt_policy_metadata.volt_policies); @@ -496,6 +496,6 @@ u32 volt_policy_sw_setup(struct gk20a *g) } done: - gk20a_dbg_info(" done status %x", status); + nvgpu_log_info(g, " done status %x", status); return status; } diff --git a/drivers/gpu/nvgpu/volt/volt_rail.c b/drivers/gpu/nvgpu/volt/volt_rail.c index 3461653f..6a7dcdbe 100644 --- a/drivers/gpu/nvgpu/volt/volt_rail.c +++ b/drivers/gpu/nvgpu/volt/volt_rail.c @@ -135,7 +135,7 @@ static u32 volt_rail_init_pmudata_super(struct gk20a *g, struct nv_pmu_volt_volt_rail_boardobj_set *rail_pmu_data; u32 i; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); status = boardobj_pmudatainit_super(g, board_obj_ptr, ppmudata); if (status) @@ -170,7 +170,7 @@ static u32 volt_rail_init_pmudata_super(struct gk20a *g, nvgpu_err(g, "Failed to export BOARDOBJGRPMASK of VOLTAGE_DEVICEs"); - gk20a_dbg_info("Done"); + nvgpu_log_info(g, "Done"); return status; } @@ -182,7 +182,7 @@ static struct voltage_rail *construct_volt_rail(struct gk20a *g, void *pargs) struct voltage_rail *board_obj_volt_rail_ptr = NULL; u32 status; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); status = boardobj_construct_super(g, &board_obj_ptr, sizeof(struct voltage_rail), pargs); if (status) @@ -211,7 +211,7 @@ static struct voltage_rail *construct_volt_rail(struct gk20a *g, void *pargs) board_obj_volt_rail_ptr->volt_scale_exp_pwr_equ_idx = ptemp_rail->volt_scale_exp_pwr_equ_idx; - gk20a_dbg_info("Done"); + nvgpu_log_info(g, "Done"); return (struct voltage_rail *)board_obj_ptr; } @@ -242,7 +242,7 @@ u32 volt_rail_pmu_setup(struct gk20a *g) u32 status; struct boardobjgrp *pboardobjgrp = NULL; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); pboardobjgrp = &g->perf_pmu.volt.volt_rail_metadata.volt_rails.super; @@ -251,7 +251,7 @@ u32 volt_rail_pmu_setup(struct gk20a *g) status = pboardobjgrp->pmuinithandle(g, pboardobjgrp); - gk20a_dbg_info("Done"); + nvgpu_log_info(g, "Done"); return status; } @@ -366,7 +366,7 @@ static u32 _volt_rail_devgrp_pmudata_instget(struct gk20a *g, (struct nv_pmu_volt_volt_rail_boardobj_grp_set *) pmuboardobjgrp; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); /*check whether pmuboardobjgrp has a valid boardobj in index*/ if (((u32)BIT(idx) & @@ -375,7 +375,7 @@ static u32 _volt_rail_devgrp_pmudata_instget(struct gk20a *g, *ppboardobjpmudata = (struct nv_pmu_boardobj *) &pgrp_set->objects[idx].data.board_obj; - gk20a_dbg_info(" Done"); + nvgpu_log_info(g, " Done"); return 0; } @@ -404,7 +404,7 @@ u32 volt_rail_sw_setup(struct gk20a *g) struct voltage_rail *pvolt_rail; u8 i; - gk20a_dbg_info(""); + nvgpu_log_info(g, " "); status = boardobjgrpconstruct_e32(g, &g->perf_pmu.volt.volt_rail_metadata.volt_rails); @@ -465,6 +465,6 @@ u32 volt_rail_sw_setup(struct gk20a *g) } done: - gk20a_dbg_info(" done status %x", status); + nvgpu_log_info(g, " done status %x", status); return status; } -- cgit v1.2.2