summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2018-04-18 22:39:46 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-05-09 21:26:04 -0400
commitdd739fcb039d51606e9a5454ec0aab17bcb01965 (patch)
tree806ba8575d146367ad1be00086ca0cdae35a6b28 /drivers/gpu/nvgpu/vgpu/mm_vgpu.c
parent7e66f2a63d4855e763fa768047dfc32f6f96b771 (diff)
gpu: nvgpu: Remove gk20a_dbg* functions
Switch all logging to nvgpu_log*(). gk20a_dbg* macros are intentionally left there because of use from other repositories. Because the new functions do not work without a pointer to struct gk20a, and piping it just for logging is excessive, some log messages are deleted. Change-Id: I00e22e75fe4596a330bb0282ab4774b3639ee31e Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1704148 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/vgpu/mm_vgpu.c')
-rw-r--r--drivers/gpu/nvgpu/vgpu/mm_vgpu.c23
1 files changed, 12 insertions, 11 deletions
diff --git a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
index 3e75cee3..b8eaa1db 100644
--- a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
@@ -40,10 +40,10 @@ static int vgpu_init_mm_setup_sw(struct gk20a *g)
40{ 40{
41 struct mm_gk20a *mm = &g->mm; 41 struct mm_gk20a *mm = &g->mm;
42 42
43 gk20a_dbg_fn(""); 43 nvgpu_log_fn(g, " ");
44 44
45 if (mm->sw_ready) { 45 if (mm->sw_ready) {
46 gk20a_dbg_fn("skip init"); 46 nvgpu_log_fn(g, "skip init");
47 return 0; 47 return 0;
48 } 48 }
49 49
@@ -56,7 +56,7 @@ static int vgpu_init_mm_setup_sw(struct gk20a *g)
56 mm->channel.user_size = NV_MM_DEFAULT_USER_SIZE; 56 mm->channel.user_size = NV_MM_DEFAULT_USER_SIZE;
57 mm->channel.kernel_size = NV_MM_DEFAULT_KERNEL_SIZE; 57 mm->channel.kernel_size = NV_MM_DEFAULT_KERNEL_SIZE;
58 58
59 gk20a_dbg_info("channel vm size: user %dMB kernel %dMB", 59 nvgpu_log_info(g, "channel vm size: user %dMB kernel %dMB",
60 (int)(mm->channel.user_size >> 20), 60 (int)(mm->channel.user_size >> 20),
61 (int)(mm->channel.kernel_size >> 20)); 61 (int)(mm->channel.kernel_size >> 20));
62 62
@@ -69,7 +69,7 @@ int vgpu_init_mm_support(struct gk20a *g)
69{ 69{
70 int err; 70 int err;
71 71
72 gk20a_dbg_fn(""); 72 nvgpu_log_fn(g, " ");
73 73
74 err = vgpu_init_mm_setup_sw(g); 74 err = vgpu_init_mm_setup_sw(g);
75 if (err) 75 if (err)
@@ -95,7 +95,7 @@ void vgpu_locked_gmmu_unmap(struct vm_gk20a *vm,
95 struct tegra_vgpu_as_map_params *p = &msg.params.as_map; 95 struct tegra_vgpu_as_map_params *p = &msg.params.as_map;
96 int err; 96 int err;
97 97
98 gk20a_dbg_fn(""); 98 nvgpu_log_fn(g, " ");
99 99
100 msg.cmd = TEGRA_VGPU_CMD_AS_UNMAP; 100 msg.cmd = TEGRA_VGPU_CMD_AS_UNMAP;
101 msg.handle = vgpu_get_handle(g); 101 msg.handle = vgpu_get_handle(g);
@@ -183,8 +183,9 @@ int vgpu_vm_bind_channel(struct vm_gk20a *vm,
183 struct tegra_vgpu_cmd_msg msg; 183 struct tegra_vgpu_cmd_msg msg;
184 struct tegra_vgpu_as_bind_share_params *p = &msg.params.as_bind_share; 184 struct tegra_vgpu_as_bind_share_params *p = &msg.params.as_bind_share;
185 int err; 185 int err;
186 struct gk20a *g = ch->g;
186 187
187 gk20a_dbg_fn(""); 188 nvgpu_log_fn(g, " ");
188 189
189 ch->vm = vm; 190 ch->vm = vm;
190 msg.cmd = TEGRA_VGPU_CMD_AS_BIND_SHARE; 191 msg.cmd = TEGRA_VGPU_CMD_AS_BIND_SHARE;
@@ -220,7 +221,7 @@ static void vgpu_cache_maint(u64 handle, u8 op)
220int vgpu_mm_fb_flush(struct gk20a *g) 221int vgpu_mm_fb_flush(struct gk20a *g)
221{ 222{
222 223
223 gk20a_dbg_fn(""); 224 nvgpu_log_fn(g, " ");
224 225
225 vgpu_cache_maint(vgpu_get_handle(g), TEGRA_VGPU_FB_FLUSH); 226 vgpu_cache_maint(vgpu_get_handle(g), TEGRA_VGPU_FB_FLUSH);
226 return 0; 227 return 0;
@@ -229,7 +230,7 @@ int vgpu_mm_fb_flush(struct gk20a *g)
229void vgpu_mm_l2_invalidate(struct gk20a *g) 230void vgpu_mm_l2_invalidate(struct gk20a *g)
230{ 231{
231 232
232 gk20a_dbg_fn(""); 233 nvgpu_log_fn(g, " ");
233 234
234 vgpu_cache_maint(vgpu_get_handle(g), TEGRA_VGPU_L2_MAINT_INV); 235 vgpu_cache_maint(vgpu_get_handle(g), TEGRA_VGPU_L2_MAINT_INV);
235} 236}
@@ -238,7 +239,7 @@ void vgpu_mm_l2_flush(struct gk20a *g, bool invalidate)
238{ 239{
239 u8 op; 240 u8 op;
240 241
241 gk20a_dbg_fn(""); 242 nvgpu_log_fn(g, " ");
242 243
243 if (invalidate) 244 if (invalidate)
244 op = TEGRA_VGPU_L2_MAINT_FLUSH_INV; 245 op = TEGRA_VGPU_L2_MAINT_FLUSH_INV;
@@ -250,7 +251,7 @@ void vgpu_mm_l2_flush(struct gk20a *g, bool invalidate)
250 251
251void vgpu_mm_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb) 252void vgpu_mm_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb)
252{ 253{
253 gk20a_dbg_fn(""); 254 nvgpu_log_fn(g, " ");
254 255
255 nvgpu_err(g, "call to RM server not supported"); 256 nvgpu_err(g, "call to RM server not supported");
256} 257}
@@ -261,7 +262,7 @@ void vgpu_mm_mmu_set_debug_mode(struct gk20a *g, bool enable)
261 struct tegra_vgpu_mmu_debug_mode *p = &msg.params.mmu_debug_mode; 262 struct tegra_vgpu_mmu_debug_mode *p = &msg.params.mmu_debug_mode;
262 int err; 263 int err;
263 264
264 gk20a_dbg_fn(""); 265 nvgpu_log_fn(g, " ");
265 266
266 msg.cmd = TEGRA_VGPU_CMD_SET_MMU_DEBUG_MODE; 267 msg.cmd = TEGRA_VGPU_CMD_SET_MMU_DEBUG_MODE;
267 msg.handle = vgpu_get_handle(g); 268 msg.handle = vgpu_get_handle(g);