summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2018-04-18 22:39:46 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-05-09 21:26:04 -0400
commitdd739fcb039d51606e9a5454ec0aab17bcb01965 (patch)
tree806ba8575d146367ad1be00086ca0cdae35a6b28 /drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
parent7e66f2a63d4855e763fa768047dfc32f6f96b771 (diff)
gpu: nvgpu: Remove gk20a_dbg* functions
Switch all logging to nvgpu_log*(). gk20a_dbg* macros are intentionally left there because of use from other repositories. Because the new functions do not work without a pointer to struct gk20a, and piping it just for logging is excessive, some log messages are deleted. Change-Id: I00e22e75fe4596a330bb0282ab4774b3639ee31e Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1704148 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/vgpu/fifo_vgpu.c')
-rw-r--r--drivers/gpu/nvgpu/vgpu/fifo_vgpu.c70
1 files changed, 38 insertions, 32 deletions
diff --git a/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c b/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
index 3ea326b8..eb25cf3a 100644
--- a/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
@@ -45,8 +45,9 @@ void vgpu_channel_bind(struct channel_gk20a *ch)
45 struct tegra_vgpu_channel_config_params *p = 45 struct tegra_vgpu_channel_config_params *p =
46 &msg.params.channel_config; 46 &msg.params.channel_config;
47 int err; 47 int err;
48 struct gk20a *g = ch->g;
48 49
49 gk20a_dbg_info("bind channel %d", ch->chid); 50 nvgpu_log_info(g, "bind channel %d", ch->chid);
50 51
51 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_BIND; 52 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_BIND;
52 msg.handle = vgpu_get_handle(ch->g); 53 msg.handle = vgpu_get_handle(ch->g);
@@ -60,8 +61,9 @@ void vgpu_channel_bind(struct channel_gk20a *ch)
60 61
61void vgpu_channel_unbind(struct channel_gk20a *ch) 62void vgpu_channel_unbind(struct channel_gk20a *ch)
62{ 63{
64 struct gk20a *g = ch->g;
63 65
64 gk20a_dbg_fn(""); 66 nvgpu_log_fn(g, " ");
65 67
66 if (nvgpu_atomic_cmpxchg(&ch->bound, true, false)) { 68 if (nvgpu_atomic_cmpxchg(&ch->bound, true, false)) {
67 struct tegra_vgpu_cmd_msg msg; 69 struct tegra_vgpu_cmd_msg msg;
@@ -84,7 +86,7 @@ int vgpu_channel_alloc_inst(struct gk20a *g, struct channel_gk20a *ch)
84 struct tegra_vgpu_channel_hwctx_params *p = &msg.params.channel_hwctx; 86 struct tegra_vgpu_channel_hwctx_params *p = &msg.params.channel_hwctx;
85 int err; 87 int err;
86 88
87 gk20a_dbg_fn(""); 89 nvgpu_log_fn(g, " ");
88 90
89 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ALLOC_HWCTX; 91 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ALLOC_HWCTX;
90 msg.handle = vgpu_get_handle(g); 92 msg.handle = vgpu_get_handle(g);
@@ -97,7 +99,7 @@ int vgpu_channel_alloc_inst(struct gk20a *g, struct channel_gk20a *ch)
97 } 99 }
98 100
99 ch->virt_ctx = p->handle; 101 ch->virt_ctx = p->handle;
100 gk20a_dbg_fn("done"); 102 nvgpu_log_fn(g, "done");
101 return 0; 103 return 0;
102} 104}
103 105
@@ -107,7 +109,7 @@ void vgpu_channel_free_inst(struct gk20a *g, struct channel_gk20a *ch)
107 struct tegra_vgpu_channel_hwctx_params *p = &msg.params.channel_hwctx; 109 struct tegra_vgpu_channel_hwctx_params *p = &msg.params.channel_hwctx;
108 int err; 110 int err;
109 111
110 gk20a_dbg_fn(""); 112 nvgpu_log_fn(g, " ");
111 113
112 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_FREE_HWCTX; 114 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_FREE_HWCTX;
113 msg.handle = vgpu_get_handle(g); 115 msg.handle = vgpu_get_handle(g);
@@ -122,8 +124,9 @@ void vgpu_channel_enable(struct channel_gk20a *ch)
122 struct tegra_vgpu_channel_config_params *p = 124 struct tegra_vgpu_channel_config_params *p =
123 &msg.params.channel_config; 125 &msg.params.channel_config;
124 int err; 126 int err;
127 struct gk20a *g = ch->g;
125 128
126 gk20a_dbg_fn(""); 129 nvgpu_log_fn(g, " ");
127 130
128 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ENABLE; 131 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ENABLE;
129 msg.handle = vgpu_get_handle(ch->g); 132 msg.handle = vgpu_get_handle(ch->g);
@@ -138,8 +141,9 @@ void vgpu_channel_disable(struct channel_gk20a *ch)
138 struct tegra_vgpu_channel_config_params *p = 141 struct tegra_vgpu_channel_config_params *p =
139 &msg.params.channel_config; 142 &msg.params.channel_config;
140 int err; 143 int err;
144 struct gk20a *g = ch->g;
141 145
142 gk20a_dbg_fn(""); 146 nvgpu_log_fn(g, " ");
143 147
144 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_DISABLE; 148 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_DISABLE;
145 msg.handle = vgpu_get_handle(ch->g); 149 msg.handle = vgpu_get_handle(ch->g);
@@ -155,8 +159,9 @@ int vgpu_channel_setup_ramfc(struct channel_gk20a *ch, u64 gpfifo_base,
155 struct tegra_vgpu_cmd_msg msg; 159 struct tegra_vgpu_cmd_msg msg;
156 struct tegra_vgpu_ramfc_params *p = &msg.params.ramfc; 160 struct tegra_vgpu_ramfc_params *p = &msg.params.ramfc;
157 int err; 161 int err;
162 struct gk20a *g = ch->g;
158 163
159 gk20a_dbg_fn(""); 164 nvgpu_log_fn(g, " ");
160 165
161 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SETUP_RAMFC; 166 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SETUP_RAMFC;
162 msg.handle = vgpu_get_handle(ch->g); 167 msg.handle = vgpu_get_handle(ch->g);
@@ -175,8 +180,9 @@ int vgpu_fifo_init_engine_info(struct fifo_gk20a *f)
175 struct vgpu_priv_data *priv = vgpu_get_priv_data(f->g); 180 struct vgpu_priv_data *priv = vgpu_get_priv_data(f->g);
176 struct tegra_vgpu_engines_info *engines = &priv->constants.engines_info; 181 struct tegra_vgpu_engines_info *engines = &priv->constants.engines_info;
177 u32 i; 182 u32 i;
183 struct gk20a *g = f->g;
178 184
179 gk20a_dbg_fn(""); 185 nvgpu_log_fn(g, " ");
180 186
181 if (engines->num_engines > TEGRA_VGPU_MAX_ENGINES) { 187 if (engines->num_engines > TEGRA_VGPU_MAX_ENGINES) {
182 nvgpu_err(f->g, "num_engines %d larger than max %d", 188 nvgpu_err(f->g, "num_engines %d larger than max %d",
@@ -207,7 +213,7 @@ int vgpu_fifo_init_engine_info(struct fifo_gk20a *f)
207 f->active_engines_list[i] = engines->info[i].engine_id; 213 f->active_engines_list[i] = engines->info[i].engine_id;
208 } 214 }
209 215
210 gk20a_dbg_fn("done"); 216 nvgpu_log_fn(g, "done");
211 217
212 return 0; 218 return 0;
213} 219}
@@ -219,7 +225,7 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f)
219 u32 i; 225 u32 i;
220 u64 runlist_size; 226 u64 runlist_size;
221 227
222 gk20a_dbg_fn(""); 228 nvgpu_log_fn(g, " ");
223 229
224 f->max_runlists = g->ops.fifo.eng_runlist_base_size(); 230 f->max_runlists = g->ops.fifo.eng_runlist_base_size();
225 f->runlist_info = nvgpu_kzalloc(g, 231 f->runlist_info = nvgpu_kzalloc(g,
@@ -256,12 +262,12 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f)
256 runlist->cur_buffer = MAX_RUNLIST_BUFFERS; 262 runlist->cur_buffer = MAX_RUNLIST_BUFFERS;
257 } 263 }
258 264
259 gk20a_dbg_fn("done"); 265 nvgpu_log_fn(g, "done");
260 return 0; 266 return 0;
261 267
262clean_up_runlist: 268clean_up_runlist:
263 gk20a_fifo_delete_runlist(f); 269 gk20a_fifo_delete_runlist(f);
264 gk20a_dbg_fn("fail"); 270 nvgpu_log_fn(g, "fail");
265 return -ENOMEM; 271 return -ENOMEM;
266} 272}
267 273
@@ -272,10 +278,10 @@ static int vgpu_init_fifo_setup_sw(struct gk20a *g)
272 unsigned int chid; 278 unsigned int chid;
273 int err = 0; 279 int err = 0;
274 280
275 gk20a_dbg_fn(""); 281 nvgpu_log_fn(g, " ");
276 282
277 if (f->sw_ready) { 283 if (f->sw_ready) {
278 gk20a_dbg_fn("skip init"); 284 nvgpu_log_fn(g, "skip init");
279 return 0; 285 return 0;
280 } 286 }
281 287
@@ -306,7 +312,7 @@ static int vgpu_init_fifo_setup_sw(struct gk20a *g)
306 f->userd.gpu_va = 0; 312 f->userd.gpu_va = 0;
307 } 313 }
308 314
309 gk20a_dbg(gpu_dbg_map_v, "userd bar1 va = 0x%llx", f->userd.gpu_va); 315 nvgpu_log(g, gpu_dbg_map_v, "userd bar1 va = 0x%llx", f->userd.gpu_va);
310 316
311 f->channel = nvgpu_vzalloc(g, f->num_channels * sizeof(*f->channel)); 317 f->channel = nvgpu_vzalloc(g, f->num_channels * sizeof(*f->channel));
312 f->tsg = nvgpu_vzalloc(g, f->num_channels * sizeof(*f->tsg)); 318 f->tsg = nvgpu_vzalloc(g, f->num_channels * sizeof(*f->tsg));
@@ -350,11 +356,11 @@ static int vgpu_init_fifo_setup_sw(struct gk20a *g)
350 356
351 f->sw_ready = true; 357 f->sw_ready = true;
352 358
353 gk20a_dbg_fn("done"); 359 nvgpu_log_fn(g, "done");
354 return 0; 360 return 0;
355 361
356clean_up: 362clean_up:
357 gk20a_dbg_fn("fail"); 363 nvgpu_log_fn(g, "fail");
358 /* FIXME: unmap from bar1 */ 364 /* FIXME: unmap from bar1 */
359 nvgpu_dma_free(g, &f->userd); 365 nvgpu_dma_free(g, &f->userd);
360 366
@@ -374,7 +380,7 @@ clean_up:
374 380
375int vgpu_init_fifo_setup_hw(struct gk20a *g) 381int vgpu_init_fifo_setup_hw(struct gk20a *g)
376{ 382{
377 gk20a_dbg_fn(""); 383 nvgpu_log_fn(g, " ");
378 384
379 /* test write, read through bar1 @ userd region before 385 /* test write, read through bar1 @ userd region before
380 * turning on the snooping */ 386 * turning on the snooping */
@@ -385,7 +391,7 @@ int vgpu_init_fifo_setup_hw(struct gk20a *g)
385 u32 bar1_vaddr = f->userd.gpu_va; 391 u32 bar1_vaddr = f->userd.gpu_va;
386 volatile u32 *cpu_vaddr = f->userd.cpu_va; 392 volatile u32 *cpu_vaddr = f->userd.cpu_va;
387 393
388 gk20a_dbg_info("test bar1 @ vaddr 0x%x", 394 nvgpu_log_info(g, "test bar1 @ vaddr 0x%x",
389 bar1_vaddr); 395 bar1_vaddr);
390 396
391 v = gk20a_bar1_readl(g, bar1_vaddr); 397 v = gk20a_bar1_readl(g, bar1_vaddr);
@@ -415,7 +421,7 @@ int vgpu_init_fifo_setup_hw(struct gk20a *g)
415 gk20a_bar1_writel(g, bar1_vaddr, v); 421 gk20a_bar1_writel(g, bar1_vaddr, v);
416 } 422 }
417 423
418 gk20a_dbg_fn("done"); 424 nvgpu_log_fn(g, "done");
419 425
420 return 0; 426 return 0;
421} 427}
@@ -424,7 +430,7 @@ int vgpu_init_fifo_support(struct gk20a *g)
424{ 430{
425 u32 err; 431 u32 err;
426 432
427 gk20a_dbg_fn(""); 433 nvgpu_log_fn(g, " ");
428 434
429 err = vgpu_init_fifo_setup_sw(g); 435 err = vgpu_init_fifo_setup_sw(g);
430 if (err) 436 if (err)
@@ -444,7 +450,7 @@ int vgpu_fifo_preempt_channel(struct gk20a *g, u32 chid)
444 &msg.params.channel_config; 450 &msg.params.channel_config;
445 int err; 451 int err;
446 452
447 gk20a_dbg_fn(""); 453 nvgpu_log_fn(g, " ");
448 454
449 if (!nvgpu_atomic_read(&ch->bound)) 455 if (!nvgpu_atomic_read(&ch->bound))
450 return 0; 456 return 0;
@@ -470,7 +476,7 @@ int vgpu_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
470 &msg.params.tsg_preempt; 476 &msg.params.tsg_preempt;
471 int err; 477 int err;
472 478
473 gk20a_dbg_fn(""); 479 nvgpu_log_fn(g, " ");
474 480
475 msg.cmd = TEGRA_VGPU_CMD_TSG_PREEMPT; 481 msg.cmd = TEGRA_VGPU_CMD_TSG_PREEMPT;
476 msg.handle = vgpu_get_handle(g); 482 msg.handle = vgpu_get_handle(g);
@@ -533,7 +539,7 @@ static int vgpu_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
533 u16 *runlist_entry = NULL; 539 u16 *runlist_entry = NULL;
534 u32 count = 0; 540 u32 count = 0;
535 541
536 gk20a_dbg_fn(""); 542 nvgpu_log_fn(g, " ");
537 543
538 runlist = &f->runlist_info[runlist_id]; 544 runlist = &f->runlist_info[runlist_id];
539 545
@@ -558,7 +564,7 @@ static int vgpu_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
558 runlist_entry = runlist->mem[0].cpu_va; 564 runlist_entry = runlist->mem[0].cpu_va;
559 for_each_set_bit(cid, 565 for_each_set_bit(cid,
560 runlist->active_channels, f->num_channels) { 566 runlist->active_channels, f->num_channels) {
561 gk20a_dbg_info("add channel %d to runlist", cid); 567 nvgpu_log_info(g, "add channel %d to runlist", cid);
562 runlist_entry[0] = cid; 568 runlist_entry[0] = cid;
563 runlist_entry++; 569 runlist_entry++;
564 count++; 570 count++;
@@ -581,7 +587,7 @@ int vgpu_fifo_update_runlist(struct gk20a *g, u32 runlist_id,
581 struct fifo_gk20a *f = &g->fifo; 587 struct fifo_gk20a *f = &g->fifo;
582 u32 ret = 0; 588 u32 ret = 0;
583 589
584 gk20a_dbg_fn(""); 590 nvgpu_log_fn(g, " ");
585 591
586 runlist = &f->runlist_info[runlist_id]; 592 runlist = &f->runlist_info[runlist_id];
587 593
@@ -596,7 +602,7 @@ int vgpu_fifo_update_runlist(struct gk20a *g, u32 runlist_id,
596 602
597int vgpu_fifo_wait_engine_idle(struct gk20a *g) 603int vgpu_fifo_wait_engine_idle(struct gk20a *g)
598{ 604{
599 gk20a_dbg_fn(""); 605 nvgpu_log_fn(g, " ");
600 606
601 return 0; 607 return 0;
602} 608}
@@ -611,7 +617,7 @@ int vgpu_fifo_set_runlist_interleave(struct gk20a *g,
611 &msg.params.tsg_interleave; 617 &msg.params.tsg_interleave;
612 int err; 618 int err;
613 619
614 gk20a_dbg_fn(""); 620 nvgpu_log_fn(g, " ");
615 621
616 msg.cmd = TEGRA_VGPU_CMD_TSG_SET_RUNLIST_INTERLEAVE; 622 msg.cmd = TEGRA_VGPU_CMD_TSG_SET_RUNLIST_INTERLEAVE;
617 msg.handle = vgpu_get_handle(g); 623 msg.handle = vgpu_get_handle(g);
@@ -633,7 +639,7 @@ int vgpu_fifo_force_reset_ch(struct channel_gk20a *ch,
633 &msg.params.channel_config; 639 &msg.params.channel_config;
634 int err; 640 int err;
635 641
636 gk20a_dbg_fn(""); 642 nvgpu_log_fn(g, " ");
637 643
638 if (gk20a_is_channel_marked_as_tsg(ch)) { 644 if (gk20a_is_channel_marked_as_tsg(ch)) {
639 tsg = &g->fifo.tsg[ch->tsgid]; 645 tsg = &g->fifo.tsg[ch->tsgid];
@@ -716,7 +722,7 @@ int vgpu_fifo_isr(struct gk20a *g, struct tegra_vgpu_fifo_intr_info *info)
716 struct fifo_gk20a *f = &g->fifo; 722 struct fifo_gk20a *f = &g->fifo;
717 struct channel_gk20a *ch = gk20a_channel_get(&f->channel[info->chid]); 723 struct channel_gk20a *ch = gk20a_channel_get(&f->channel[info->chid]);
718 724
719 gk20a_dbg_fn(""); 725 nvgpu_log_fn(g, " ");
720 if (!ch) 726 if (!ch)
721 return 0; 727 return 0;
722 728
@@ -750,7 +756,7 @@ int vgpu_fifo_isr(struct gk20a *g, struct tegra_vgpu_fifo_intr_info *info)
750int vgpu_fifo_nonstall_isr(struct gk20a *g, 756int vgpu_fifo_nonstall_isr(struct gk20a *g,
751 struct tegra_vgpu_fifo_nonstall_intr_info *info) 757 struct tegra_vgpu_fifo_nonstall_intr_info *info)
752{ 758{
753 gk20a_dbg_fn(""); 759 nvgpu_log_fn(g, " ");
754 760
755 switch (info->type) { 761 switch (info->type) {
756 case TEGRA_VGPU_FIFO_NONSTALL_INTR_CHANNEL: 762 case TEGRA_VGPU_FIFO_NONSTALL_INTR_CHANNEL: