summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/vgpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/vgpu')
-rw-r--r--drivers/gpu/nvgpu/vgpu/ce2_vgpu.c2
-rw-r--r--drivers/gpu/nvgpu/vgpu/dbg_vgpu.c6
-rw-r--r--drivers/gpu/nvgpu/vgpu/fifo_vgpu.c70
-rw-r--r--drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c14
-rw-r--r--drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c2
-rw-r--r--drivers/gpu/nvgpu/vgpu/gr_vgpu.c82
-rw-r--r--drivers/gpu/nvgpu/vgpu/gv11b/vgpu_gv11b.c2
-rw-r--r--drivers/gpu/nvgpu/vgpu/gv11b/vgpu_tsg_gv11b.c3
-rw-r--r--drivers/gpu/nvgpu/vgpu/ltc_vgpu.c6
-rw-r--r--drivers/gpu/nvgpu/vgpu/mm_vgpu.c23
-rw-r--r--drivers/gpu/nvgpu/vgpu/tsg_vgpu.c15
-rw-r--r--drivers/gpu/nvgpu/vgpu/vgpu.c12
12 files changed, 128 insertions, 109 deletions
diff --git a/drivers/gpu/nvgpu/vgpu/ce2_vgpu.c b/drivers/gpu/nvgpu/vgpu/ce2_vgpu.c
index a552ad44..563c3a2b 100644
--- a/drivers/gpu/nvgpu/vgpu/ce2_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/ce2_vgpu.c
@@ -30,7 +30,7 @@
30int vgpu_ce2_nonstall_isr(struct gk20a *g, 30int vgpu_ce2_nonstall_isr(struct gk20a *g,
31 struct tegra_vgpu_ce2_nonstall_intr_info *info) 31 struct tegra_vgpu_ce2_nonstall_intr_info *info)
32{ 32{
33 gk20a_dbg_fn(""); 33 nvgpu_log_fn(g, " ");
34 34
35 switch (info->type) { 35 switch (info->type) {
36 case TEGRA_VGPU_CE2_NONSTALL_INTR_NONBLOCKPIPE: 36 case TEGRA_VGPU_CE2_NONSTALL_INTR_NONBLOCKPIPE:
diff --git a/drivers/gpu/nvgpu/vgpu/dbg_vgpu.c b/drivers/gpu/nvgpu/vgpu/dbg_vgpu.c
index 092954ed..2bb3b205 100644
--- a/drivers/gpu/nvgpu/vgpu/dbg_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/dbg_vgpu.c
@@ -42,8 +42,9 @@ int vgpu_exec_regops(struct dbg_session_gk20a *dbg_s,
42 size_t oob_size, ops_size; 42 size_t oob_size, ops_size;
43 void *handle = NULL; 43 void *handle = NULL;
44 int err = 0; 44 int err = 0;
45 struct gk20a *g = dbg_s->g;
45 46
46 gk20a_dbg_fn(""); 47 nvgpu_log_fn(g, " ");
47 BUG_ON(sizeof(*ops) != sizeof(struct tegra_vgpu_reg_op)); 48 BUG_ON(sizeof(*ops) != sizeof(struct tegra_vgpu_reg_op));
48 49
49 handle = vgpu_ivc_oob_get_ptr(vgpu_ivc_get_server_vmid(), 50 handle = vgpu_ivc_oob_get_ptr(vgpu_ivc_get_server_vmid(),
@@ -82,8 +83,9 @@ int vgpu_dbg_set_powergate(struct dbg_session_gk20a *dbg_s, bool disable_powerga
82 struct tegra_vgpu_set_powergate_params *p = &msg.params.set_powergate; 83 struct tegra_vgpu_set_powergate_params *p = &msg.params.set_powergate;
83 int err = 0; 84 int err = 0;
84 u32 mode; 85 u32 mode;
86 struct gk20a *g = dbg_s->g;
85 87
86 gk20a_dbg_fn(""); 88 nvgpu_log_fn(g, " ");
87 89
88 /* Just return if requested mode is the same as the session's mode */ 90 /* Just return if requested mode is the same as the session's mode */
89 if (disable_powergate) { 91 if (disable_powergate) {
diff --git a/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c b/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
index 3ea326b8..eb25cf3a 100644
--- a/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
@@ -45,8 +45,9 @@ void vgpu_channel_bind(struct channel_gk20a *ch)
45 struct tegra_vgpu_channel_config_params *p = 45 struct tegra_vgpu_channel_config_params *p =
46 &msg.params.channel_config; 46 &msg.params.channel_config;
47 int err; 47 int err;
48 struct gk20a *g = ch->g;
48 49
49 gk20a_dbg_info("bind channel %d", ch->chid); 50 nvgpu_log_info(g, "bind channel %d", ch->chid);
50 51
51 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_BIND; 52 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_BIND;
52 msg.handle = vgpu_get_handle(ch->g); 53 msg.handle = vgpu_get_handle(ch->g);
@@ -60,8 +61,9 @@ void vgpu_channel_bind(struct channel_gk20a *ch)
60 61
61void vgpu_channel_unbind(struct channel_gk20a *ch) 62void vgpu_channel_unbind(struct channel_gk20a *ch)
62{ 63{
64 struct gk20a *g = ch->g;
63 65
64 gk20a_dbg_fn(""); 66 nvgpu_log_fn(g, " ");
65 67
66 if (nvgpu_atomic_cmpxchg(&ch->bound, true, false)) { 68 if (nvgpu_atomic_cmpxchg(&ch->bound, true, false)) {
67 struct tegra_vgpu_cmd_msg msg; 69 struct tegra_vgpu_cmd_msg msg;
@@ -84,7 +86,7 @@ int vgpu_channel_alloc_inst(struct gk20a *g, struct channel_gk20a *ch)
84 struct tegra_vgpu_channel_hwctx_params *p = &msg.params.channel_hwctx; 86 struct tegra_vgpu_channel_hwctx_params *p = &msg.params.channel_hwctx;
85 int err; 87 int err;
86 88
87 gk20a_dbg_fn(""); 89 nvgpu_log_fn(g, " ");
88 90
89 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ALLOC_HWCTX; 91 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ALLOC_HWCTX;
90 msg.handle = vgpu_get_handle(g); 92 msg.handle = vgpu_get_handle(g);
@@ -97,7 +99,7 @@ int vgpu_channel_alloc_inst(struct gk20a *g, struct channel_gk20a *ch)
97 } 99 }
98 100
99 ch->virt_ctx = p->handle; 101 ch->virt_ctx = p->handle;
100 gk20a_dbg_fn("done"); 102 nvgpu_log_fn(g, "done");
101 return 0; 103 return 0;
102} 104}
103 105
@@ -107,7 +109,7 @@ void vgpu_channel_free_inst(struct gk20a *g, struct channel_gk20a *ch)
107 struct tegra_vgpu_channel_hwctx_params *p = &msg.params.channel_hwctx; 109 struct tegra_vgpu_channel_hwctx_params *p = &msg.params.channel_hwctx;
108 int err; 110 int err;
109 111
110 gk20a_dbg_fn(""); 112 nvgpu_log_fn(g, " ");
111 113
112 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_FREE_HWCTX; 114 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_FREE_HWCTX;
113 msg.handle = vgpu_get_handle(g); 115 msg.handle = vgpu_get_handle(g);
@@ -122,8 +124,9 @@ void vgpu_channel_enable(struct channel_gk20a *ch)
122 struct tegra_vgpu_channel_config_params *p = 124 struct tegra_vgpu_channel_config_params *p =
123 &msg.params.channel_config; 125 &msg.params.channel_config;
124 int err; 126 int err;
127 struct gk20a *g = ch->g;
125 128
126 gk20a_dbg_fn(""); 129 nvgpu_log_fn(g, " ");
127 130
128 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ENABLE; 131 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ENABLE;
129 msg.handle = vgpu_get_handle(ch->g); 132 msg.handle = vgpu_get_handle(ch->g);
@@ -138,8 +141,9 @@ void vgpu_channel_disable(struct channel_gk20a *ch)
138 struct tegra_vgpu_channel_config_params *p = 141 struct tegra_vgpu_channel_config_params *p =
139 &msg.params.channel_config; 142 &msg.params.channel_config;
140 int err; 143 int err;
144 struct gk20a *g = ch->g;
141 145
142 gk20a_dbg_fn(""); 146 nvgpu_log_fn(g, " ");
143 147
144 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_DISABLE; 148 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_DISABLE;
145 msg.handle = vgpu_get_handle(ch->g); 149 msg.handle = vgpu_get_handle(ch->g);
@@ -155,8 +159,9 @@ int vgpu_channel_setup_ramfc(struct channel_gk20a *ch, u64 gpfifo_base,
155 struct tegra_vgpu_cmd_msg msg; 159 struct tegra_vgpu_cmd_msg msg;
156 struct tegra_vgpu_ramfc_params *p = &msg.params.ramfc; 160 struct tegra_vgpu_ramfc_params *p = &msg.params.ramfc;
157 int err; 161 int err;
162 struct gk20a *g = ch->g;
158 163
159 gk20a_dbg_fn(""); 164 nvgpu_log_fn(g, " ");
160 165
161 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SETUP_RAMFC; 166 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SETUP_RAMFC;
162 msg.handle = vgpu_get_handle(ch->g); 167 msg.handle = vgpu_get_handle(ch->g);
@@ -175,8 +180,9 @@ int vgpu_fifo_init_engine_info(struct fifo_gk20a *f)
175 struct vgpu_priv_data *priv = vgpu_get_priv_data(f->g); 180 struct vgpu_priv_data *priv = vgpu_get_priv_data(f->g);
176 struct tegra_vgpu_engines_info *engines = &priv->constants.engines_info; 181 struct tegra_vgpu_engines_info *engines = &priv->constants.engines_info;
177 u32 i; 182 u32 i;
183 struct gk20a *g = f->g;
178 184
179 gk20a_dbg_fn(""); 185 nvgpu_log_fn(g, " ");
180 186
181 if (engines->num_engines > TEGRA_VGPU_MAX_ENGINES) { 187 if (engines->num_engines > TEGRA_VGPU_MAX_ENGINES) {
182 nvgpu_err(f->g, "num_engines %d larger than max %d", 188 nvgpu_err(f->g, "num_engines %d larger than max %d",
@@ -207,7 +213,7 @@ int vgpu_fifo_init_engine_info(struct fifo_gk20a *f)
207 f->active_engines_list[i] = engines->info[i].engine_id; 213 f->active_engines_list[i] = engines->info[i].engine_id;
208 } 214 }
209 215
210 gk20a_dbg_fn("done"); 216 nvgpu_log_fn(g, "done");
211 217
212 return 0; 218 return 0;
213} 219}
@@ -219,7 +225,7 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f)
219 u32 i; 225 u32 i;
220 u64 runlist_size; 226 u64 runlist_size;
221 227
222 gk20a_dbg_fn(""); 228 nvgpu_log_fn(g, " ");
223 229
224 f->max_runlists = g->ops.fifo.eng_runlist_base_size(); 230 f->max_runlists = g->ops.fifo.eng_runlist_base_size();
225 f->runlist_info = nvgpu_kzalloc(g, 231 f->runlist_info = nvgpu_kzalloc(g,
@@ -256,12 +262,12 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f)
256 runlist->cur_buffer = MAX_RUNLIST_BUFFERS; 262 runlist->cur_buffer = MAX_RUNLIST_BUFFERS;
257 } 263 }
258 264
259 gk20a_dbg_fn("done"); 265 nvgpu_log_fn(g, "done");
260 return 0; 266 return 0;
261 267
262clean_up_runlist: 268clean_up_runlist:
263 gk20a_fifo_delete_runlist(f); 269 gk20a_fifo_delete_runlist(f);
264 gk20a_dbg_fn("fail"); 270 nvgpu_log_fn(g, "fail");
265 return -ENOMEM; 271 return -ENOMEM;
266} 272}
267 273
@@ -272,10 +278,10 @@ static int vgpu_init_fifo_setup_sw(struct gk20a *g)
272 unsigned int chid; 278 unsigned int chid;
273 int err = 0; 279 int err = 0;
274 280
275 gk20a_dbg_fn(""); 281 nvgpu_log_fn(g, " ");
276 282
277 if (f->sw_ready) { 283 if (f->sw_ready) {
278 gk20a_dbg_fn("skip init"); 284 nvgpu_log_fn(g, "skip init");
279 return 0; 285 return 0;
280 } 286 }
281 287
@@ -306,7 +312,7 @@ static int vgpu_init_fifo_setup_sw(struct gk20a *g)
306 f->userd.gpu_va = 0; 312 f->userd.gpu_va = 0;
307 } 313 }
308 314
309 gk20a_dbg(gpu_dbg_map_v, "userd bar1 va = 0x%llx", f->userd.gpu_va); 315 nvgpu_log(g, gpu_dbg_map_v, "userd bar1 va = 0x%llx", f->userd.gpu_va);
310 316
311 f->channel = nvgpu_vzalloc(g, f->num_channels * sizeof(*f->channel)); 317 f->channel = nvgpu_vzalloc(g, f->num_channels * sizeof(*f->channel));
312 f->tsg = nvgpu_vzalloc(g, f->num_channels * sizeof(*f->tsg)); 318 f->tsg = nvgpu_vzalloc(g, f->num_channels * sizeof(*f->tsg));
@@ -350,11 +356,11 @@ static int vgpu_init_fifo_setup_sw(struct gk20a *g)
350 356
351 f->sw_ready = true; 357 f->sw_ready = true;
352 358
353 gk20a_dbg_fn("done"); 359 nvgpu_log_fn(g, "done");
354 return 0; 360 return 0;
355 361
356clean_up: 362clean_up:
357 gk20a_dbg_fn("fail"); 363 nvgpu_log_fn(g, "fail");
358 /* FIXME: unmap from bar1 */ 364 /* FIXME: unmap from bar1 */
359 nvgpu_dma_free(g, &f->userd); 365 nvgpu_dma_free(g, &f->userd);
360 366
@@ -374,7 +380,7 @@ clean_up:
374 380
375int vgpu_init_fifo_setup_hw(struct gk20a *g) 381int vgpu_init_fifo_setup_hw(struct gk20a *g)
376{ 382{
377 gk20a_dbg_fn(""); 383 nvgpu_log_fn(g, " ");
378 384
379 /* test write, read through bar1 @ userd region before 385 /* test write, read through bar1 @ userd region before
380 * turning on the snooping */ 386 * turning on the snooping */
@@ -385,7 +391,7 @@ int vgpu_init_fifo_setup_hw(struct gk20a *g)
385 u32 bar1_vaddr = f->userd.gpu_va; 391 u32 bar1_vaddr = f->userd.gpu_va;
386 volatile u32 *cpu_vaddr = f->userd.cpu_va; 392 volatile u32 *cpu_vaddr = f->userd.cpu_va;
387 393
388 gk20a_dbg_info("test bar1 @ vaddr 0x%x", 394 nvgpu_log_info(g, "test bar1 @ vaddr 0x%x",
389 bar1_vaddr); 395 bar1_vaddr);
390 396
391 v = gk20a_bar1_readl(g, bar1_vaddr); 397 v = gk20a_bar1_readl(g, bar1_vaddr);
@@ -415,7 +421,7 @@ int vgpu_init_fifo_setup_hw(struct gk20a *g)
415 gk20a_bar1_writel(g, bar1_vaddr, v); 421 gk20a_bar1_writel(g, bar1_vaddr, v);
416 } 422 }
417 423
418 gk20a_dbg_fn("done"); 424 nvgpu_log_fn(g, "done");
419 425
420 return 0; 426 return 0;
421} 427}
@@ -424,7 +430,7 @@ int vgpu_init_fifo_support(struct gk20a *g)
424{ 430{
425 u32 err; 431 u32 err;
426 432
427 gk20a_dbg_fn(""); 433 nvgpu_log_fn(g, " ");
428 434
429 err = vgpu_init_fifo_setup_sw(g); 435 err = vgpu_init_fifo_setup_sw(g);
430 if (err) 436 if (err)
@@ -444,7 +450,7 @@ int vgpu_fifo_preempt_channel(struct gk20a *g, u32 chid)
444 &msg.params.channel_config; 450 &msg.params.channel_config;
445 int err; 451 int err;
446 452
447 gk20a_dbg_fn(""); 453 nvgpu_log_fn(g, " ");
448 454
449 if (!nvgpu_atomic_read(&ch->bound)) 455 if (!nvgpu_atomic_read(&ch->bound))
450 return 0; 456 return 0;
@@ -470,7 +476,7 @@ int vgpu_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
470 &msg.params.tsg_preempt; 476 &msg.params.tsg_preempt;
471 int err; 477 int err;
472 478
473 gk20a_dbg_fn(""); 479 nvgpu_log_fn(g, " ");
474 480
475 msg.cmd = TEGRA_VGPU_CMD_TSG_PREEMPT; 481 msg.cmd = TEGRA_VGPU_CMD_TSG_PREEMPT;
476 msg.handle = vgpu_get_handle(g); 482 msg.handle = vgpu_get_handle(g);
@@ -533,7 +539,7 @@ static int vgpu_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
533 u16 *runlist_entry = NULL; 539 u16 *runlist_entry = NULL;
534 u32 count = 0; 540 u32 count = 0;
535 541
536 gk20a_dbg_fn(""); 542 nvgpu_log_fn(g, " ");
537 543
538 runlist = &f->runlist_info[runlist_id]; 544 runlist = &f->runlist_info[runlist_id];
539 545
@@ -558,7 +564,7 @@ static int vgpu_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
558 runlist_entry = runlist->mem[0].cpu_va; 564 runlist_entry = runlist->mem[0].cpu_va;
559 for_each_set_bit(cid, 565 for_each_set_bit(cid,
560 runlist->active_channels, f->num_channels) { 566 runlist->active_channels, f->num_channels) {
561 gk20a_dbg_info("add channel %d to runlist", cid); 567 nvgpu_log_info(g, "add channel %d to runlist", cid);
562 runlist_entry[0] = cid; 568 runlist_entry[0] = cid;
563 runlist_entry++; 569 runlist_entry++;
564 count++; 570 count++;
@@ -581,7 +587,7 @@ int vgpu_fifo_update_runlist(struct gk20a *g, u32 runlist_id,
581 struct fifo_gk20a *f = &g->fifo; 587 struct fifo_gk20a *f = &g->fifo;
582 u32 ret = 0; 588 u32 ret = 0;
583 589
584 gk20a_dbg_fn(""); 590 nvgpu_log_fn(g, " ");
585 591
586 runlist = &f->runlist_info[runlist_id]; 592 runlist = &f->runlist_info[runlist_id];
587 593
@@ -596,7 +602,7 @@ int vgpu_fifo_update_runlist(struct gk20a *g, u32 runlist_id,
596 602
597int vgpu_fifo_wait_engine_idle(struct gk20a *g) 603int vgpu_fifo_wait_engine_idle(struct gk20a *g)
598{ 604{
599 gk20a_dbg_fn(""); 605 nvgpu_log_fn(g, " ");
600 606
601 return 0; 607 return 0;
602} 608}
@@ -611,7 +617,7 @@ int vgpu_fifo_set_runlist_interleave(struct gk20a *g,
611 &msg.params.tsg_interleave; 617 &msg.params.tsg_interleave;
612 int err; 618 int err;
613 619
614 gk20a_dbg_fn(""); 620 nvgpu_log_fn(g, " ");
615 621
616 msg.cmd = TEGRA_VGPU_CMD_TSG_SET_RUNLIST_INTERLEAVE; 622 msg.cmd = TEGRA_VGPU_CMD_TSG_SET_RUNLIST_INTERLEAVE;
617 msg.handle = vgpu_get_handle(g); 623 msg.handle = vgpu_get_handle(g);
@@ -633,7 +639,7 @@ int vgpu_fifo_force_reset_ch(struct channel_gk20a *ch,
633 &msg.params.channel_config; 639 &msg.params.channel_config;
634 int err; 640 int err;
635 641
636 gk20a_dbg_fn(""); 642 nvgpu_log_fn(g, " ");
637 643
638 if (gk20a_is_channel_marked_as_tsg(ch)) { 644 if (gk20a_is_channel_marked_as_tsg(ch)) {
639 tsg = &g->fifo.tsg[ch->tsgid]; 645 tsg = &g->fifo.tsg[ch->tsgid];
@@ -716,7 +722,7 @@ int vgpu_fifo_isr(struct gk20a *g, struct tegra_vgpu_fifo_intr_info *info)
716 struct fifo_gk20a *f = &g->fifo; 722 struct fifo_gk20a *f = &g->fifo;
717 struct channel_gk20a *ch = gk20a_channel_get(&f->channel[info->chid]); 723 struct channel_gk20a *ch = gk20a_channel_get(&f->channel[info->chid]);
718 724
719 gk20a_dbg_fn(""); 725 nvgpu_log_fn(g, " ");
720 if (!ch) 726 if (!ch)
721 return 0; 727 return 0;
722 728
@@ -750,7 +756,7 @@ int vgpu_fifo_isr(struct gk20a *g, struct tegra_vgpu_fifo_intr_info *info)
750int vgpu_fifo_nonstall_isr(struct gk20a *g, 756int vgpu_fifo_nonstall_isr(struct gk20a *g,
751 struct tegra_vgpu_fifo_nonstall_intr_info *info) 757 struct tegra_vgpu_fifo_nonstall_intr_info *info)
752{ 758{
753 gk20a_dbg_fn(""); 759 nvgpu_log_fn(g, " ");
754 760
755 switch (info->type) { 761 switch (info->type) {
756 case TEGRA_VGPU_FIFO_NONSTALL_INTR_CHANNEL: 762 case TEGRA_VGPU_FIFO_NONSTALL_INTR_CHANNEL:
diff --git a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c
index ab35dc67..86184336 100644
--- a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c
+++ b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c
@@ -43,7 +43,7 @@ int vgpu_gr_gp10b_alloc_gr_ctx(struct gk20a *g,
43 struct vgpu_priv_data *priv = vgpu_get_priv_data(g); 43 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
44 int err; 44 int err;
45 45
46 gk20a_dbg_fn(""); 46 nvgpu_log_fn(g, " ");
47 47
48 err = vgpu_gr_alloc_gr_ctx(g, gr_ctx, vm, class, flags); 48 err = vgpu_gr_alloc_gr_ctx(g, gr_ctx, vm, class, flags);
49 if (err) 49 if (err)
@@ -78,7 +78,7 @@ int vgpu_gr_gp10b_alloc_gr_ctx(struct gk20a *g,
78 } 78 }
79 } 79 }
80 80
81 gk20a_dbg_fn("done"); 81 nvgpu_log_fn(g, "done");
82 return err; 82 return err;
83 83
84fail: 84fail:
@@ -132,11 +132,11 @@ int vgpu_gr_gp10b_set_ctxsw_preemption_mode(struct gk20a *g,
132 132
133 attrib_cb_size = ALIGN(attrib_cb_size, 128); 133 attrib_cb_size = ALIGN(attrib_cb_size, 128);
134 134
135 gk20a_dbg_info("gfxp context preempt size=%d", 135 nvgpu_log_info(g, "gfxp context preempt size=%d",
136 g->gr.ctx_vars.preempt_image_size); 136 g->gr.ctx_vars.preempt_image_size);
137 gk20a_dbg_info("gfxp context spill size=%d", spill_size); 137 nvgpu_log_info(g, "gfxp context spill size=%d", spill_size);
138 gk20a_dbg_info("gfxp context pagepool size=%d", pagepool_size); 138 nvgpu_log_info(g, "gfxp context pagepool size=%d", pagepool_size);
139 gk20a_dbg_info("gfxp context attrib cb size=%d", 139 nvgpu_log_info(g, "gfxp context attrib cb size=%d",
140 attrib_cb_size); 140 attrib_cb_size);
141 141
142 err = gr_gp10b_alloc_buffer(vm, 142 err = gr_gp10b_alloc_buffer(vm,
@@ -293,7 +293,7 @@ int vgpu_gr_gp10b_init_ctx_state(struct gk20a *g)
293 struct vgpu_priv_data *priv = vgpu_get_priv_data(g); 293 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
294 int err; 294 int err;
295 295
296 gk20a_dbg_fn(""); 296 nvgpu_log_fn(g, " ");
297 297
298 err = vgpu_gr_init_ctx_state(g); 298 err = vgpu_gr_init_ctx_state(g);
299 if (err) 299 if (err)
diff --git a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c
index e615c486..b8c4d2de 100644
--- a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c
+++ b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c
@@ -78,7 +78,7 @@ u64 vgpu_gp10b_locked_gmmu_map(struct vm_gk20a *vm,
78 u8 prot; 78 u8 prot;
79 struct nvgpu_sgl *sgl; 79 struct nvgpu_sgl *sgl;
80 80
81 gk20a_dbg_fn(""); 81 nvgpu_log_fn(g, " ");
82 82
83 /* FIXME: add support for sparse mappings */ 83 /* FIXME: add support for sparse mappings */
84 84
diff --git a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
index 2f1280ac..1e633d5f 100644
--- a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
@@ -43,7 +43,7 @@ void vgpu_gr_detect_sm_arch(struct gk20a *g)
43{ 43{
44 struct vgpu_priv_data *priv = vgpu_get_priv_data(g); 44 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
45 45
46 gk20a_dbg_fn(""); 46 nvgpu_log_fn(g, " ");
47 47
48 g->params.sm_arch_sm_version = 48 g->params.sm_arch_sm_version =
49 priv->constants.sm_arch_sm_version; 49 priv->constants.sm_arch_sm_version;
@@ -58,8 +58,9 @@ int vgpu_gr_commit_inst(struct channel_gk20a *c, u64 gpu_va)
58 struct tegra_vgpu_cmd_msg msg; 58 struct tegra_vgpu_cmd_msg msg;
59 struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx; 59 struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx;
60 int err; 60 int err;
61 struct gk20a *g = c->g;
61 62
62 gk20a_dbg_fn(""); 63 nvgpu_log_fn(g, " ");
63 64
64 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_COMMIT_GR_CTX; 65 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_COMMIT_GR_CTX;
65 msg.handle = vgpu_get_handle(c->g); 66 msg.handle = vgpu_get_handle(c->g);
@@ -76,7 +77,7 @@ static int vgpu_gr_commit_global_ctx_buffers(struct gk20a *g,
76 struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx; 77 struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx;
77 int err; 78 int err;
78 79
79 gk20a_dbg_fn(""); 80 nvgpu_log_fn(g, " ");
80 81
81 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_COMMIT_GR_GLOBAL_CTX; 82 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_COMMIT_GR_GLOBAL_CTX;
82 msg.handle = vgpu_get_handle(g); 83 msg.handle = vgpu_get_handle(g);
@@ -94,7 +95,7 @@ static int vgpu_gr_load_golden_ctx_image(struct gk20a *g,
94 struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx; 95 struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx;
95 int err; 96 int err;
96 97
97 gk20a_dbg_fn(""); 98 nvgpu_log_fn(g, " ");
98 99
99 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_LOAD_GR_GOLDEN_CTX; 100 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_LOAD_GR_GOLDEN_CTX;
100 msg.handle = vgpu_get_handle(g); 101 msg.handle = vgpu_get_handle(g);
@@ -109,7 +110,7 @@ int vgpu_gr_init_ctx_state(struct gk20a *g)
109 struct gr_gk20a *gr = &g->gr; 110 struct gr_gk20a *gr = &g->gr;
110 struct vgpu_priv_data *priv = vgpu_get_priv_data(g); 111 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
111 112
112 gk20a_dbg_fn(""); 113 nvgpu_log_fn(g, " ");
113 114
114 g->gr.ctx_vars.golden_image_size = priv->constants.golden_ctx_size; 115 g->gr.ctx_vars.golden_image_size = priv->constants.golden_ctx_size;
115 g->gr.ctx_vars.zcull_ctxsw_image_size = priv->constants.zcull_ctx_size; 116 g->gr.ctx_vars.zcull_ctxsw_image_size = priv->constants.zcull_ctx_size;
@@ -135,20 +136,20 @@ static int vgpu_gr_alloc_global_ctx_buffers(struct gk20a *g)
135 u32 pagepool_buffer_size = g->ops.gr.pagepool_default_size(g) * 136 u32 pagepool_buffer_size = g->ops.gr.pagepool_default_size(g) *
136 gr_scc_pagepool_total_pages_byte_granularity_v(); 137 gr_scc_pagepool_total_pages_byte_granularity_v();
137 138
138 gk20a_dbg_fn(""); 139 nvgpu_log_fn(g, " ");
139 140
140 attr_buffer_size = g->ops.gr.calc_global_ctx_buffer_size(g); 141 attr_buffer_size = g->ops.gr.calc_global_ctx_buffer_size(g);
141 142
142 gk20a_dbg_info("cb_buffer_size : %d", cb_buffer_size); 143 nvgpu_log_info(g, "cb_buffer_size : %d", cb_buffer_size);
143 gr->global_ctx_buffer[CIRCULAR].mem.size = cb_buffer_size; 144 gr->global_ctx_buffer[CIRCULAR].mem.size = cb_buffer_size;
144 145
145 gk20a_dbg_info("pagepool_buffer_size : %d", pagepool_buffer_size); 146 nvgpu_log_info(g, "pagepool_buffer_size : %d", pagepool_buffer_size);
146 gr->global_ctx_buffer[PAGEPOOL].mem.size = pagepool_buffer_size; 147 gr->global_ctx_buffer[PAGEPOOL].mem.size = pagepool_buffer_size;
147 148
148 gk20a_dbg_info("attr_buffer_size : %d", attr_buffer_size); 149 nvgpu_log_info(g, "attr_buffer_size : %d", attr_buffer_size);
149 gr->global_ctx_buffer[ATTRIBUTE].mem.size = attr_buffer_size; 150 gr->global_ctx_buffer[ATTRIBUTE].mem.size = attr_buffer_size;
150 151
151 gk20a_dbg_info("priv access map size : %d", 152 nvgpu_log_info(g, "priv access map size : %d",
152 gr->ctx_vars.priv_access_map_size); 153 gr->ctx_vars.priv_access_map_size);
153 gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.size = 154 gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.size =
154 gr->ctx_vars.priv_access_map_size; 155 gr->ctx_vars.priv_access_map_size;
@@ -170,7 +171,7 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g,
170 u32 i; 171 u32 i;
171 int err; 172 int err;
172 173
173 gk20a_dbg_fn(""); 174 nvgpu_log_fn(g, " ");
174 175
175 tsg = tsg_gk20a_from_ch(c); 176 tsg = tsg_gk20a_from_ch(c);
176 if (!tsg) 177 if (!tsg)
@@ -249,8 +250,9 @@ static void vgpu_gr_unmap_global_ctx_buffers(struct tsg_gk20a *tsg)
249 u64 *g_bfr_va = tsg->gr_ctx.global_ctx_buffer_va; 250 u64 *g_bfr_va = tsg->gr_ctx.global_ctx_buffer_va;
250 u64 *g_bfr_size = tsg->gr_ctx.global_ctx_buffer_size; 251 u64 *g_bfr_size = tsg->gr_ctx.global_ctx_buffer_size;
251 u32 i; 252 u32 i;
253 struct gk20a *g = tsg->g;
252 254
253 gk20a_dbg_fn(""); 255 nvgpu_log_fn(g, " ");
254 256
255 if (tsg->gr_ctx.global_ctx_buffer_mapped) { 257 if (tsg->gr_ctx.global_ctx_buffer_mapped) {
256 /* server will unmap on channel close */ 258 /* server will unmap on channel close */
@@ -279,7 +281,7 @@ int vgpu_gr_alloc_gr_ctx(struct gk20a *g,
279 struct gr_gk20a *gr = &g->gr; 281 struct gr_gk20a *gr = &g->gr;
280 int err; 282 int err;
281 283
282 gk20a_dbg_fn(""); 284 nvgpu_log_fn(g, " ");
283 285
284 if (gr->ctx_vars.buffer_size == 0) 286 if (gr->ctx_vars.buffer_size == 0)
285 return 0; 287 return 0;
@@ -328,7 +330,7 @@ static int vgpu_gr_alloc_channel_patch_ctx(struct gk20a *g,
328 struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx; 330 struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx;
329 int err; 331 int err;
330 332
331 gk20a_dbg_fn(""); 333 nvgpu_log_fn(g, " ");
332 334
333 tsg = tsg_gk20a_from_ch(c); 335 tsg = tsg_gk20a_from_ch(c);
334 if (!tsg) 336 if (!tsg)
@@ -359,8 +361,9 @@ static int vgpu_gr_alloc_channel_patch_ctx(struct gk20a *g,
359static void vgpu_gr_free_channel_patch_ctx(struct tsg_gk20a *tsg) 361static void vgpu_gr_free_channel_patch_ctx(struct tsg_gk20a *tsg)
360{ 362{
361 struct patch_desc *patch_ctx = &tsg->gr_ctx.patch_ctx; 363 struct patch_desc *patch_ctx = &tsg->gr_ctx.patch_ctx;
364 struct gk20a *g = tsg->g;
362 365
363 gk20a_dbg_fn(""); 366 nvgpu_log_fn(g, " ");
364 367
365 if (patch_ctx->mem.gpu_va) { 368 if (patch_ctx->mem.gpu_va) {
366 /* server will free on channel close */ 369 /* server will free on channel close */
@@ -375,8 +378,9 @@ static void vgpu_gr_free_channel_pm_ctx(struct tsg_gk20a *tsg)
375{ 378{
376 struct nvgpu_gr_ctx *ch_ctx = &tsg->gr_ctx; 379 struct nvgpu_gr_ctx *ch_ctx = &tsg->gr_ctx;
377 struct pm_ctx_desc *pm_ctx = &ch_ctx->pm_ctx; 380 struct pm_ctx_desc *pm_ctx = &ch_ctx->pm_ctx;
381 struct gk20a *g = tsg->g;
378 382
379 gk20a_dbg_fn(""); 383 nvgpu_log_fn(g, " ");
380 384
381 /* check if hwpm was ever initialized. If not, nothing to do */ 385 /* check if hwpm was ever initialized. If not, nothing to do */
382 if (pm_ctx->mem.gpu_va == 0) 386 if (pm_ctx->mem.gpu_va == 0)
@@ -394,7 +398,7 @@ void vgpu_gr_free_gr_ctx(struct gk20a *g,
394{ 398{
395 struct tsg_gk20a *tsg; 399 struct tsg_gk20a *tsg;
396 400
397 gk20a_dbg_fn(""); 401 nvgpu_log_fn(g, " ");
398 402
399 if (gr_ctx->mem.gpu_va) { 403 if (gr_ctx->mem.gpu_va) {
400 struct tegra_vgpu_cmd_msg msg; 404 struct tegra_vgpu_cmd_msg msg;
@@ -477,7 +481,7 @@ int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags)
477 struct tsg_gk20a *tsg = NULL; 481 struct tsg_gk20a *tsg = NULL;
478 int err = 0; 482 int err = 0;
479 483
480 gk20a_dbg_fn(""); 484 nvgpu_log_fn(g, " ");
481 485
482 /* an address space needs to have been bound at this point.*/ 486 /* an address space needs to have been bound at this point.*/
483 if (!gk20a_channel_as_bound(c)) { 487 if (!gk20a_channel_as_bound(c)) {
@@ -577,7 +581,7 @@ int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags)
577 /* PM ctxt switch is off by default */ 581 /* PM ctxt switch is off by default */
578 gr_ctx->pm_ctx.pm_mode = ctxsw_prog_main_image_pm_mode_no_ctxsw_f(); 582 gr_ctx->pm_ctx.pm_mode = ctxsw_prog_main_image_pm_mode_no_ctxsw_f();
579 583
580 gk20a_dbg_fn("done"); 584 nvgpu_log_fn(g, "done");
581 return 0; 585 return 0;
582out: 586out:
583 /* 1. gr_ctx, patch_ctx and global ctx buffer mapping 587 /* 1. gr_ctx, patch_ctx and global ctx buffer mapping
@@ -595,7 +599,7 @@ static int vgpu_gr_init_gr_config(struct gk20a *g, struct gr_gk20a *gr)
595 u32 sm_per_tpc; 599 u32 sm_per_tpc;
596 int err = -ENOMEM; 600 int err = -ENOMEM;
597 601
598 gk20a_dbg_fn(""); 602 nvgpu_log_fn(g, " ");
599 603
600 gr->max_gpc_count = priv->constants.max_gpc_count; 604 gr->max_gpc_count = priv->constants.max_gpc_count;
601 gr->gpc_count = priv->constants.gpc_count; 605 gr->gpc_count = priv->constants.gpc_count;
@@ -658,7 +662,7 @@ int vgpu_gr_bind_ctxsw_zcull(struct gk20a *g, struct gr_gk20a *gr,
658 struct tegra_vgpu_zcull_bind_params *p = &msg.params.zcull_bind; 662 struct tegra_vgpu_zcull_bind_params *p = &msg.params.zcull_bind;
659 int err; 663 int err;
660 664
661 gk20a_dbg_fn(""); 665 nvgpu_log_fn(g, " ");
662 666
663 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_BIND_ZCULL; 667 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_BIND_ZCULL;
664 msg.handle = vgpu_get_handle(g); 668 msg.handle = vgpu_get_handle(g);
@@ -677,7 +681,7 @@ int vgpu_gr_get_zcull_info(struct gk20a *g, struct gr_gk20a *gr,
677 struct tegra_vgpu_zcull_info_params *p = &msg.params.zcull_info; 681 struct tegra_vgpu_zcull_info_params *p = &msg.params.zcull_info;
678 int err; 682 int err;
679 683
680 gk20a_dbg_fn(""); 684 nvgpu_log_fn(g, " ");
681 685
682 msg.cmd = TEGRA_VGPU_CMD_GET_ZCULL_INFO; 686 msg.cmd = TEGRA_VGPU_CMD_GET_ZCULL_INFO;
683 msg.handle = vgpu_get_handle(g); 687 msg.handle = vgpu_get_handle(g);
@@ -712,7 +716,7 @@ u32 vgpu_gr_get_max_fbps_count(struct gk20a *g)
712{ 716{
713 struct vgpu_priv_data *priv = vgpu_get_priv_data(g); 717 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
714 718
715 gk20a_dbg_fn(""); 719 nvgpu_log_fn(g, " ");
716 720
717 return priv->constants.num_fbps; 721 return priv->constants.num_fbps;
718} 722}
@@ -721,7 +725,7 @@ u32 vgpu_gr_get_fbp_en_mask(struct gk20a *g)
721{ 725{
722 struct vgpu_priv_data *priv = vgpu_get_priv_data(g); 726 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
723 727
724 gk20a_dbg_fn(""); 728 nvgpu_log_fn(g, " ");
725 729
726 return priv->constants.fbp_en_mask; 730 return priv->constants.fbp_en_mask;
727} 731}
@@ -730,7 +734,7 @@ u32 vgpu_gr_get_max_ltc_per_fbp(struct gk20a *g)
730{ 734{
731 struct vgpu_priv_data *priv = vgpu_get_priv_data(g); 735 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
732 736
733 gk20a_dbg_fn(""); 737 nvgpu_log_fn(g, " ");
734 738
735 return priv->constants.ltc_per_fbp; 739 return priv->constants.ltc_per_fbp;
736} 740}
@@ -739,7 +743,7 @@ u32 vgpu_gr_get_max_lts_per_ltc(struct gk20a *g)
739{ 743{
740 struct vgpu_priv_data *priv = vgpu_get_priv_data(g); 744 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
741 745
742 gk20a_dbg_fn(""); 746 nvgpu_log_fn(g, " ");
743 747
744 return priv->constants.max_lts_per_ltc; 748 return priv->constants.max_lts_per_ltc;
745} 749}
@@ -749,7 +753,7 @@ u32 *vgpu_gr_rop_l2_en_mask(struct gk20a *g)
749 struct vgpu_priv_data *priv = vgpu_get_priv_data(g); 753 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
750 u32 i, max_fbps_count = priv->constants.num_fbps; 754 u32 i, max_fbps_count = priv->constants.num_fbps;
751 755
752 gk20a_dbg_fn(""); 756 nvgpu_log_fn(g, " ");
753 757
754 if (g->gr.fbp_rop_l2_en_mask == NULL) { 758 if (g->gr.fbp_rop_l2_en_mask == NULL) {
755 g->gr.fbp_rop_l2_en_mask = 759 g->gr.fbp_rop_l2_en_mask =
@@ -772,7 +776,7 @@ int vgpu_gr_add_zbc(struct gk20a *g, struct gr_gk20a *gr,
772 struct tegra_vgpu_zbc_set_table_params *p = &msg.params.zbc_set_table; 776 struct tegra_vgpu_zbc_set_table_params *p = &msg.params.zbc_set_table;
773 int err; 777 int err;
774 778
775 gk20a_dbg_fn(""); 779 nvgpu_log_fn(g, " ");
776 780
777 msg.cmd = TEGRA_VGPU_CMD_ZBC_SET_TABLE; 781 msg.cmd = TEGRA_VGPU_CMD_ZBC_SET_TABLE;
778 msg.handle = vgpu_get_handle(g); 782 msg.handle = vgpu_get_handle(g);
@@ -804,7 +808,7 @@ int vgpu_gr_query_zbc(struct gk20a *g, struct gr_gk20a *gr,
804 &msg.params.zbc_query_table; 808 &msg.params.zbc_query_table;
805 int err; 809 int err;
806 810
807 gk20a_dbg_fn(""); 811 nvgpu_log_fn(g, " ");
808 812
809 msg.cmd = TEGRA_VGPU_CMD_ZBC_QUERY_TABLE; 813 msg.cmd = TEGRA_VGPU_CMD_ZBC_QUERY_TABLE;
810 msg.handle = vgpu_get_handle(g); 814 msg.handle = vgpu_get_handle(g);
@@ -840,7 +844,7 @@ int vgpu_gr_query_zbc(struct gk20a *g, struct gr_gk20a *gr,
840 844
841static void vgpu_remove_gr_support(struct gr_gk20a *gr) 845static void vgpu_remove_gr_support(struct gr_gk20a *gr)
842{ 846{
843 gk20a_dbg_fn(""); 847 nvgpu_log_fn(gr->g, " ");
844 848
845 gk20a_comptag_allocator_destroy(gr->g, &gr->comp_tags); 849 gk20a_comptag_allocator_destroy(gr->g, &gr->comp_tags);
846 850
@@ -865,10 +869,10 @@ static int vgpu_gr_init_gr_setup_sw(struct gk20a *g)
865 struct gr_gk20a *gr = &g->gr; 869 struct gr_gk20a *gr = &g->gr;
866 int err; 870 int err;
867 871
868 gk20a_dbg_fn(""); 872 nvgpu_log_fn(g, " ");
869 873
870 if (gr->sw_ready) { 874 if (gr->sw_ready) {
871 gk20a_dbg_fn("skip init"); 875 nvgpu_log_fn(g, "skip init");
872 return 0; 876 return 0;
873 } 877 }
874 878
@@ -907,7 +911,7 @@ static int vgpu_gr_init_gr_setup_sw(struct gk20a *g)
907 gr->remove_support = vgpu_remove_gr_support; 911 gr->remove_support = vgpu_remove_gr_support;
908 gr->sw_ready = true; 912 gr->sw_ready = true;
909 913
910 gk20a_dbg_fn("done"); 914 nvgpu_log_fn(g, "done");
911 return 0; 915 return 0;
912 916
913clean_up: 917clean_up:
@@ -918,7 +922,7 @@ clean_up:
918 922
919int vgpu_init_gr_support(struct gk20a *g) 923int vgpu_init_gr_support(struct gk20a *g)
920{ 924{
921 gk20a_dbg_fn(""); 925 nvgpu_log_fn(g, " ");
922 926
923 return vgpu_gr_init_gr_setup_sw(g); 927 return vgpu_gr_init_gr_setup_sw(g);
924} 928}
@@ -928,7 +932,7 @@ int vgpu_gr_isr(struct gk20a *g, struct tegra_vgpu_gr_intr_info *info)
928 struct fifo_gk20a *f = &g->fifo; 932 struct fifo_gk20a *f = &g->fifo;
929 struct channel_gk20a *ch = gk20a_channel_get(&f->channel[info->chid]); 933 struct channel_gk20a *ch = gk20a_channel_get(&f->channel[info->chid]);
930 934
931 gk20a_dbg_fn(""); 935 nvgpu_log_fn(g, " ");
932 if (!ch) 936 if (!ch)
933 return 0; 937 return 0;
934 938
@@ -985,7 +989,7 @@ int vgpu_gr_isr(struct gk20a *g, struct tegra_vgpu_gr_intr_info *info)
985int vgpu_gr_nonstall_isr(struct gk20a *g, 989int vgpu_gr_nonstall_isr(struct gk20a *g,
986 struct tegra_vgpu_gr_nonstall_intr_info *info) 990 struct tegra_vgpu_gr_nonstall_intr_info *info)
987{ 991{
988 gk20a_dbg_fn(""); 992 nvgpu_log_fn(g, " ");
989 993
990 switch (info->type) { 994 switch (info->type) {
991 case TEGRA_VGPU_GR_NONSTALL_INTR_SEMAPHORE: 995 case TEGRA_VGPU_GR_NONSTALL_INTR_SEMAPHORE:
@@ -1006,7 +1010,7 @@ int vgpu_gr_set_sm_debug_mode(struct gk20a *g,
1006 struct tegra_vgpu_sm_debug_mode *p = &msg.params.sm_debug_mode; 1010 struct tegra_vgpu_sm_debug_mode *p = &msg.params.sm_debug_mode;
1007 int err; 1011 int err;
1008 1012
1009 gk20a_dbg_fn(""); 1013 nvgpu_log_fn(g, " ");
1010 1014
1011 msg.cmd = TEGRA_VGPU_CMD_SET_SM_DEBUG_MODE; 1015 msg.cmd = TEGRA_VGPU_CMD_SET_SM_DEBUG_MODE;
1012 msg.handle = vgpu_get_handle(g); 1016 msg.handle = vgpu_get_handle(g);
@@ -1026,7 +1030,7 @@ int vgpu_gr_update_smpc_ctxsw_mode(struct gk20a *g,
1026 struct tegra_vgpu_channel_set_ctxsw_mode *p = &msg.params.set_ctxsw_mode; 1030 struct tegra_vgpu_channel_set_ctxsw_mode *p = &msg.params.set_ctxsw_mode;
1027 int err; 1031 int err;
1028 1032
1029 gk20a_dbg_fn(""); 1033 nvgpu_log_fn(g, " ");
1030 1034
1031 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SET_SMPC_CTXSW_MODE; 1035 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SET_SMPC_CTXSW_MODE;
1032 msg.handle = vgpu_get_handle(g); 1036 msg.handle = vgpu_get_handle(g);
@@ -1053,7 +1057,7 @@ int vgpu_gr_update_hwpm_ctxsw_mode(struct gk20a *g,
1053 struct tegra_vgpu_channel_set_ctxsw_mode *p = &msg.params.set_ctxsw_mode; 1057 struct tegra_vgpu_channel_set_ctxsw_mode *p = &msg.params.set_ctxsw_mode;
1054 int err; 1058 int err;
1055 1059
1056 gk20a_dbg_fn(""); 1060 nvgpu_log_fn(g, " ");
1057 1061
1058 tsg = tsg_gk20a_from_ch(ch); 1062 tsg = tsg_gk20a_from_ch(ch);
1059 if (!tsg) 1063 if (!tsg)
diff --git a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_gv11b.c b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_gv11b.c
index 933e8357..1bcd151a 100644
--- a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_gv11b.c
+++ b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_gv11b.c
@@ -30,7 +30,7 @@ int vgpu_gv11b_init_gpu_characteristics(struct gk20a *g)
30{ 30{
31 int err; 31 int err;
32 32
33 gk20a_dbg_fn(""); 33 nvgpu_log_fn(g, " ");
34 34
35 err = vgpu_init_gpu_characteristics(g); 35 err = vgpu_init_gpu_characteristics(g);
36 if (err) { 36 if (err) {
diff --git a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_tsg_gv11b.c b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_tsg_gv11b.c
index b249b5af..367c1299 100644
--- a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_tsg_gv11b.c
+++ b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_tsg_gv11b.c
@@ -33,8 +33,9 @@ int vgpu_gv11b_tsg_bind_channel(struct tsg_gk20a *tsg,
33 struct tegra_vgpu_tsg_bind_channel_ex_params *p = 33 struct tegra_vgpu_tsg_bind_channel_ex_params *p =
34 &msg.params.tsg_bind_channel_ex; 34 &msg.params.tsg_bind_channel_ex;
35 int err; 35 int err;
36 struct gk20a *g = tsg->g;
36 37
37 gk20a_dbg_fn(""); 38 nvgpu_log_fn(g, " ");
38 39
39 err = gk20a_tsg_bind_channel(tsg, ch); 40 err = gk20a_tsg_bind_channel(tsg, ch);
40 if (err) 41 if (err)
diff --git a/drivers/gpu/nvgpu/vgpu/ltc_vgpu.c b/drivers/gpu/nvgpu/vgpu/ltc_vgpu.c
index d451a1f2..f68c8454 100644
--- a/drivers/gpu/nvgpu/vgpu/ltc_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/ltc_vgpu.c
@@ -31,7 +31,7 @@ int vgpu_determine_L2_size_bytes(struct gk20a *g)
31{ 31{
32 struct vgpu_priv_data *priv = vgpu_get_priv_data(g); 32 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
33 33
34 gk20a_dbg_fn(""); 34 nvgpu_log_fn(g, " ");
35 35
36 return priv->constants.l2_size; 36 return priv->constants.l2_size;
37} 37}
@@ -42,7 +42,7 @@ int vgpu_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr)
42 u32 max_comptag_lines = 0; 42 u32 max_comptag_lines = 0;
43 int err; 43 int err;
44 44
45 gk20a_dbg_fn(""); 45 nvgpu_log_fn(g, " ");
46 46
47 gr->cacheline_size = priv->constants.cacheline_size; 47 gr->cacheline_size = priv->constants.cacheline_size;
48 gr->comptags_per_cacheline = priv->constants.comptags_per_cacheline; 48 gr->comptags_per_cacheline = priv->constants.comptags_per_cacheline;
@@ -65,7 +65,7 @@ void vgpu_ltc_init_fs_state(struct gk20a *g)
65{ 65{
66 struct vgpu_priv_data *priv = vgpu_get_priv_data(g); 66 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
67 67
68 gk20a_dbg_fn(""); 68 nvgpu_log_fn(g, " ");
69 69
70 g->ltc_count = priv->constants.ltc_count; 70 g->ltc_count = priv->constants.ltc_count;
71} 71}
diff --git a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
index 3e75cee3..b8eaa1db 100644
--- a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
@@ -40,10 +40,10 @@ static int vgpu_init_mm_setup_sw(struct gk20a *g)
40{ 40{
41 struct mm_gk20a *mm = &g->mm; 41 struct mm_gk20a *mm = &g->mm;
42 42
43 gk20a_dbg_fn(""); 43 nvgpu_log_fn(g, " ");
44 44
45 if (mm->sw_ready) { 45 if (mm->sw_ready) {
46 gk20a_dbg_fn("skip init"); 46 nvgpu_log_fn(g, "skip init");
47 return 0; 47 return 0;
48 } 48 }
49 49
@@ -56,7 +56,7 @@ static int vgpu_init_mm_setup_sw(struct gk20a *g)
56 mm->channel.user_size = NV_MM_DEFAULT_USER_SIZE; 56 mm->channel.user_size = NV_MM_DEFAULT_USER_SIZE;
57 mm->channel.kernel_size = NV_MM_DEFAULT_KERNEL_SIZE; 57 mm->channel.kernel_size = NV_MM_DEFAULT_KERNEL_SIZE;
58 58
59 gk20a_dbg_info("channel vm size: user %dMB kernel %dMB", 59 nvgpu_log_info(g, "channel vm size: user %dMB kernel %dMB",
60 (int)(mm->channel.user_size >> 20), 60 (int)(mm->channel.user_size >> 20),
61 (int)(mm->channel.kernel_size >> 20)); 61 (int)(mm->channel.kernel_size >> 20));
62 62
@@ -69,7 +69,7 @@ int vgpu_init_mm_support(struct gk20a *g)
69{ 69{
70 int err; 70 int err;
71 71
72 gk20a_dbg_fn(""); 72 nvgpu_log_fn(g, " ");
73 73
74 err = vgpu_init_mm_setup_sw(g); 74 err = vgpu_init_mm_setup_sw(g);
75 if (err) 75 if (err)
@@ -95,7 +95,7 @@ void vgpu_locked_gmmu_unmap(struct vm_gk20a *vm,
95 struct tegra_vgpu_as_map_params *p = &msg.params.as_map; 95 struct tegra_vgpu_as_map_params *p = &msg.params.as_map;
96 int err; 96 int err;
97 97
98 gk20a_dbg_fn(""); 98 nvgpu_log_fn(g, " ");
99 99
100 msg.cmd = TEGRA_VGPU_CMD_AS_UNMAP; 100 msg.cmd = TEGRA_VGPU_CMD_AS_UNMAP;
101 msg.handle = vgpu_get_handle(g); 101 msg.handle = vgpu_get_handle(g);
@@ -183,8 +183,9 @@ int vgpu_vm_bind_channel(struct vm_gk20a *vm,
183 struct tegra_vgpu_cmd_msg msg; 183 struct tegra_vgpu_cmd_msg msg;
184 struct tegra_vgpu_as_bind_share_params *p = &msg.params.as_bind_share; 184 struct tegra_vgpu_as_bind_share_params *p = &msg.params.as_bind_share;
185 int err; 185 int err;
186 struct gk20a *g = ch->g;
186 187
187 gk20a_dbg_fn(""); 188 nvgpu_log_fn(g, " ");
188 189
189 ch->vm = vm; 190 ch->vm = vm;
190 msg.cmd = TEGRA_VGPU_CMD_AS_BIND_SHARE; 191 msg.cmd = TEGRA_VGPU_CMD_AS_BIND_SHARE;
@@ -220,7 +221,7 @@ static void vgpu_cache_maint(u64 handle, u8 op)
220int vgpu_mm_fb_flush(struct gk20a *g) 221int vgpu_mm_fb_flush(struct gk20a *g)
221{ 222{
222 223
223 gk20a_dbg_fn(""); 224 nvgpu_log_fn(g, " ");
224 225
225 vgpu_cache_maint(vgpu_get_handle(g), TEGRA_VGPU_FB_FLUSH); 226 vgpu_cache_maint(vgpu_get_handle(g), TEGRA_VGPU_FB_FLUSH);
226 return 0; 227 return 0;
@@ -229,7 +230,7 @@ int vgpu_mm_fb_flush(struct gk20a *g)
229void vgpu_mm_l2_invalidate(struct gk20a *g) 230void vgpu_mm_l2_invalidate(struct gk20a *g)
230{ 231{
231 232
232 gk20a_dbg_fn(""); 233 nvgpu_log_fn(g, " ");
233 234
234 vgpu_cache_maint(vgpu_get_handle(g), TEGRA_VGPU_L2_MAINT_INV); 235 vgpu_cache_maint(vgpu_get_handle(g), TEGRA_VGPU_L2_MAINT_INV);
235} 236}
@@ -238,7 +239,7 @@ void vgpu_mm_l2_flush(struct gk20a *g, bool invalidate)
238{ 239{
239 u8 op; 240 u8 op;
240 241
241 gk20a_dbg_fn(""); 242 nvgpu_log_fn(g, " ");
242 243
243 if (invalidate) 244 if (invalidate)
244 op = TEGRA_VGPU_L2_MAINT_FLUSH_INV; 245 op = TEGRA_VGPU_L2_MAINT_FLUSH_INV;
@@ -250,7 +251,7 @@ void vgpu_mm_l2_flush(struct gk20a *g, bool invalidate)
250 251
251void vgpu_mm_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb) 252void vgpu_mm_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb)
252{ 253{
253 gk20a_dbg_fn(""); 254 nvgpu_log_fn(g, " ");
254 255
255 nvgpu_err(g, "call to RM server not supported"); 256 nvgpu_err(g, "call to RM server not supported");
256} 257}
@@ -261,7 +262,7 @@ void vgpu_mm_mmu_set_debug_mode(struct gk20a *g, bool enable)
261 struct tegra_vgpu_mmu_debug_mode *p = &msg.params.mmu_debug_mode; 262 struct tegra_vgpu_mmu_debug_mode *p = &msg.params.mmu_debug_mode;
262 int err; 263 int err;
263 264
264 gk20a_dbg_fn(""); 265 nvgpu_log_fn(g, " ");
265 266
266 msg.cmd = TEGRA_VGPU_CMD_SET_MMU_DEBUG_MODE; 267 msg.cmd = TEGRA_VGPU_CMD_SET_MMU_DEBUG_MODE;
267 msg.handle = vgpu_get_handle(g); 268 msg.handle = vgpu_get_handle(g);
diff --git a/drivers/gpu/nvgpu/vgpu/tsg_vgpu.c b/drivers/gpu/nvgpu/vgpu/tsg_vgpu.c
index a6e493d0..7bb8f671 100644
--- a/drivers/gpu/nvgpu/vgpu/tsg_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/tsg_vgpu.c
@@ -35,8 +35,9 @@ int vgpu_tsg_open(struct tsg_gk20a *tsg)
35 struct tegra_vgpu_tsg_open_rel_params *p = 35 struct tegra_vgpu_tsg_open_rel_params *p =
36 &msg.params.tsg_open; 36 &msg.params.tsg_open;
37 int err; 37 int err;
38 struct gk20a *g = tsg->g;
38 39
39 gk20a_dbg_fn(""); 40 nvgpu_log_fn(g, " ");
40 41
41 msg.cmd = TEGRA_VGPU_CMD_TSG_OPEN; 42 msg.cmd = TEGRA_VGPU_CMD_TSG_OPEN;
42 msg.handle = vgpu_get_handle(tsg->g); 43 msg.handle = vgpu_get_handle(tsg->g);
@@ -57,8 +58,9 @@ void vgpu_tsg_release(struct tsg_gk20a *tsg)
57 struct tegra_vgpu_tsg_open_rel_params *p = 58 struct tegra_vgpu_tsg_open_rel_params *p =
58 &msg.params.tsg_release; 59 &msg.params.tsg_release;
59 int err; 60 int err;
61 struct gk20a *g = tsg->g;
60 62
61 gk20a_dbg_fn(""); 63 nvgpu_log_fn(g, " ");
62 64
63 msg.cmd = TEGRA_VGPU_CMD_TSG_RELEASE; 65 msg.cmd = TEGRA_VGPU_CMD_TSG_RELEASE;
64 msg.handle = vgpu_get_handle(tsg->g); 66 msg.handle = vgpu_get_handle(tsg->g);
@@ -91,8 +93,9 @@ int vgpu_tsg_bind_channel(struct tsg_gk20a *tsg,
91 struct tegra_vgpu_tsg_bind_unbind_channel_params *p = 93 struct tegra_vgpu_tsg_bind_unbind_channel_params *p =
92 &msg.params.tsg_bind_unbind_channel; 94 &msg.params.tsg_bind_unbind_channel;
93 int err; 95 int err;
96 struct gk20a *g = ch->g;
94 97
95 gk20a_dbg_fn(""); 98 nvgpu_log_fn(g, " ");
96 99
97 err = gk20a_tsg_bind_channel(tsg, ch); 100 err = gk20a_tsg_bind_channel(tsg, ch);
98 if (err) 101 if (err)
@@ -120,8 +123,9 @@ int vgpu_tsg_unbind_channel(struct channel_gk20a *ch)
120 struct tegra_vgpu_tsg_bind_unbind_channel_params *p = 123 struct tegra_vgpu_tsg_bind_unbind_channel_params *p =
121 &msg.params.tsg_bind_unbind_channel; 124 &msg.params.tsg_bind_unbind_channel;
122 int err; 125 int err;
126 struct gk20a *g = ch->g;
123 127
124 gk20a_dbg_fn(""); 128 nvgpu_log_fn(g, " ");
125 129
126 err = gk20a_fifo_tsg_unbind_channel(ch); 130 err = gk20a_fifo_tsg_unbind_channel(ch);
127 if (err) 131 if (err)
@@ -143,8 +147,9 @@ int vgpu_tsg_set_timeslice(struct tsg_gk20a *tsg, u32 timeslice)
143 struct tegra_vgpu_tsg_timeslice_params *p = 147 struct tegra_vgpu_tsg_timeslice_params *p =
144 &msg.params.tsg_timeslice; 148 &msg.params.tsg_timeslice;
145 int err; 149 int err;
150 struct gk20a *g = tsg->g;
146 151
147 gk20a_dbg_fn(""); 152 nvgpu_log_fn(g, " ");
148 153
149 msg.cmd = TEGRA_VGPU_CMD_TSG_SET_TIMESLICE; 154 msg.cmd = TEGRA_VGPU_CMD_TSG_SET_TIMESLICE;
150 msg.handle = vgpu_get_handle(tsg->g); 155 msg.handle = vgpu_get_handle(tsg->g);
diff --git a/drivers/gpu/nvgpu/vgpu/vgpu.c b/drivers/gpu/nvgpu/vgpu/vgpu.c
index 1e77cda9..17e80cd7 100644
--- a/drivers/gpu/nvgpu/vgpu/vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/vgpu.c
@@ -249,7 +249,7 @@ void vgpu_detect_chip(struct gk20a *g)
249 p->gpu_impl = priv->constants.impl; 249 p->gpu_impl = priv->constants.impl;
250 p->gpu_rev = priv->constants.rev; 250 p->gpu_rev = priv->constants.rev;
251 251
252 gk20a_dbg_info("arch: %x, impl: %x, rev: %x\n", 252 nvgpu_log_info(g, "arch: %x, impl: %x, rev: %x\n",
253 p->gpu_arch, 253 p->gpu_arch,
254 p->gpu_impl, 254 p->gpu_impl,
255 p->gpu_rev); 255 p->gpu_rev);
@@ -259,7 +259,7 @@ int vgpu_init_gpu_characteristics(struct gk20a *g)
259{ 259{
260 int err; 260 int err;
261 261
262 gk20a_dbg_fn(""); 262 nvgpu_log_fn(g, " ");
263 263
264 err = gk20a_init_gpu_characteristics(g); 264 err = gk20a_init_gpu_characteristics(g);
265 if (err) 265 if (err)
@@ -279,7 +279,7 @@ int vgpu_read_ptimer(struct gk20a *g, u64 *value)
279 struct tegra_vgpu_read_ptimer_params *p = &msg.params.read_ptimer; 279 struct tegra_vgpu_read_ptimer_params *p = &msg.params.read_ptimer;
280 int err; 280 int err;
281 281
282 gk20a_dbg_fn(""); 282 nvgpu_log_fn(g, " ");
283 283
284 msg.cmd = TEGRA_VGPU_CMD_READ_PTIMER; 284 msg.cmd = TEGRA_VGPU_CMD_READ_PTIMER;
285 msg.handle = vgpu_get_handle(g); 285 msg.handle = vgpu_get_handle(g);
@@ -304,7 +304,7 @@ int vgpu_get_timestamps_zipper(struct gk20a *g,
304 int err; 304 int err;
305 u32 i; 305 u32 i;
306 306
307 gk20a_dbg_fn(""); 307 nvgpu_log_fn(g, " ");
308 308
309 if (count > TEGRA_VGPU_GET_TIMESTAMPS_ZIPPER_MAX_COUNT) { 309 if (count > TEGRA_VGPU_GET_TIMESTAMPS_ZIPPER_MAX_COUNT) {
310 nvgpu_err(g, "count %u overflow", count); 310 nvgpu_err(g, "count %u overflow", count);
@@ -338,7 +338,7 @@ int vgpu_init_hal(struct gk20a *g)
338 338
339 switch (ver) { 339 switch (ver) {
340 case NVGPU_GPUID_GP10B: 340 case NVGPU_GPUID_GP10B:
341 gk20a_dbg_info("gp10b detected"); 341 nvgpu_log_info(g, "gp10b detected");
342 err = vgpu_gp10b_init_hal(g); 342 err = vgpu_gp10b_init_hal(g);
343 break; 343 break;
344 case NVGPU_GPUID_GV11B: 344 case NVGPU_GPUID_GV11B:
@@ -360,7 +360,7 @@ int vgpu_get_constants(struct gk20a *g)
360 struct vgpu_priv_data *priv = vgpu_get_priv_data(g); 360 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
361 int err; 361 int err;
362 362
363 gk20a_dbg_fn(""); 363 nvgpu_log_fn(g, " ");
364 364
365 msg.cmd = TEGRA_VGPU_CMD_GET_CONSTANTS; 365 msg.cmd = TEGRA_VGPU_CMD_GET_CONSTANTS;
366 msg.handle = vgpu_get_handle(g); 366 msg.handle = vgpu_get_handle(g);