summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorSrirangan <smadhavan@nvidia.com>2018-08-30 01:07:55 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-09-05 07:35:54 -0400
commit0f97bd4d44c8bcedf298f725fe0b6cfc70fa81ff (patch)
tree469b4746ebedb5843c631c547f102f72f5850ffa /drivers
parent97aa9f705a84186ef0f7f31487988cfd5a8a94e8 (diff)
gpu: nvgpu: gk20a: Fix MISRA 15.6 violations
MISRA Rule-15.6 requires that all if-else blocks be enclosed in braces, including single statement blocks. Fix errors due to single statement if blocks without braces by introducing the braces. JIRA NVGPU-671 Change-Id: Icdeede22dd26fd70fae92aa791d35b115ef49e32 Signed-off-by: Srirangan <smadhavan@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1797691 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/nvgpu/gk20a/hal.c18
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.c74
-rw-r--r--drivers/gpu/nvgpu/gk20a/pmu_gk20a.c103
-rw-r--r--drivers/gpu/nvgpu/gk20a/regops_gk20a.c24
-rw-r--r--drivers/gpu/nvgpu/gk20a/tsg_gk20a.c25
5 files changed, 157 insertions, 87 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/hal.c b/drivers/gpu/nvgpu/gk20a/hal.c
index 1787f573..f2f55d43 100644
--- a/drivers/gpu/nvgpu/gk20a/hal.c
+++ b/drivers/gpu/nvgpu/gk20a/hal.c
@@ -42,30 +42,36 @@ int gpu_init_hal(struct gk20a *g)
42 case GK20A_GPUID_GM20B: 42 case GK20A_GPUID_GM20B:
43 case GK20A_GPUID_GM20B_B: 43 case GK20A_GPUID_GM20B_B:
44 nvgpu_log_info(g, "gm20b detected"); 44 nvgpu_log_info(g, "gm20b detected");
45 if (gm20b_init_hal(g)) 45 if (gm20b_init_hal(g)) {
46 return -ENODEV; 46 return -ENODEV;
47 }
47 break; 48 break;
48 case NVGPU_GPUID_GP10B: 49 case NVGPU_GPUID_GP10B:
49 if (gp10b_init_hal(g)) 50 if (gp10b_init_hal(g)) {
50 return -ENODEV; 51 return -ENODEV;
52 }
51 break; 53 break;
52 case NVGPU_GPUID_GP104: 54 case NVGPU_GPUID_GP104:
53 case NVGPU_GPUID_GP106: 55 case NVGPU_GPUID_GP106:
54 if (gp106_init_hal(g)) 56 if (gp106_init_hal(g)) {
55 return -ENODEV; 57 return -ENODEV;
58 }
56 break; 59 break;
57 case NVGPU_GPUID_GV11B: 60 case NVGPU_GPUID_GV11B:
58 if (gv11b_init_hal(g)) 61 if (gv11b_init_hal(g)) {
59 return -ENODEV; 62 return -ENODEV;
63 }
60 break; 64 break;
61 case NVGPU_GPUID_GV100: 65 case NVGPU_GPUID_GV100:
62 if (gv100_init_hal(g)) 66 if (gv100_init_hal(g)) {
63 return -ENODEV; 67 return -ENODEV;
68 }
64 break; 69 break;
65#if defined(CONFIG_TEGRA_GPU_NEXT) 70#if defined(CONFIG_TEGRA_GPU_NEXT)
66 case NVGPU_GPUID_NEXT: 71 case NVGPU_GPUID_NEXT:
67 if (NVGPU_NEXT_INIT_HAL(g)) 72 if (NVGPU_NEXT_INIT_HAL(g)) {
68 return -ENODEV; 73 return -ENODEV;
74 }
69 break; 75 break;
70#endif 76#endif
71 77
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
index b5626035..9fcaebff 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
@@ -93,23 +93,27 @@ int gk20a_init_mm_setup_hw(struct gk20a *g)
93 nvgpu_log_fn(g, " "); 93 nvgpu_log_fn(g, " ");
94 94
95 g->ops.fb.set_mmu_page_size(g); 95 g->ops.fb.set_mmu_page_size(g);
96 if (g->ops.fb.set_use_full_comp_tag_line) 96 if (g->ops.fb.set_use_full_comp_tag_line) {
97 mm->use_full_comp_tag_line = 97 mm->use_full_comp_tag_line =
98 g->ops.fb.set_use_full_comp_tag_line(g); 98 g->ops.fb.set_use_full_comp_tag_line(g);
99 }
99 100
100 g->ops.fb.init_hw(g); 101 g->ops.fb.init_hw(g);
101 102
102 if (g->ops.bus.bar1_bind) 103 if (g->ops.bus.bar1_bind) {
103 g->ops.bus.bar1_bind(g, &mm->bar1.inst_block); 104 g->ops.bus.bar1_bind(g, &mm->bar1.inst_block);
105 }
104 106
105 if (g->ops.bus.bar2_bind) { 107 if (g->ops.bus.bar2_bind) {
106 err = g->ops.bus.bar2_bind(g, &mm->bar2.inst_block); 108 err = g->ops.bus.bar2_bind(g, &mm->bar2.inst_block);
107 if (err) 109 if (err) {
108 return err; 110 return err;
111 }
109 } 112 }
110 113
111 if (gk20a_mm_fb_flush(g) || gk20a_mm_fb_flush(g)) 114 if (gk20a_mm_fb_flush(g) || gk20a_mm_fb_flush(g)) {
112 return -EBUSY; 115 return -EBUSY;
116 }
113 117
114 nvgpu_log_fn(g, "done"); 118 nvgpu_log_fn(g, "done");
115 return 0; 119 return 0;
@@ -211,8 +215,9 @@ static void __update_pte(struct vm_gk20a *vm,
211 215
212 pte_w[0] = pte_valid | addr; 216 pte_w[0] = pte_valid | addr;
213 217
214 if (attrs->priv) 218 if (attrs->priv) {
215 pte_w[0] |= gmmu_pte_privilege_true_f(); 219 pte_w[0] |= gmmu_pte_privilege_true_f();
220 }
216 221
217 pte_w[1] = __nvgpu_aperture_mask(g, attrs->aperture, 222 pte_w[1] = __nvgpu_aperture_mask(g, attrs->aperture,
218 gmmu_pte_aperture_sys_mem_ncoh_f(), 223 gmmu_pte_aperture_sys_mem_ncoh_f(),
@@ -222,9 +227,10 @@ static void __update_pte(struct vm_gk20a *vm,
222 gmmu_pte_comptagline_f((u32)(attrs->ctag >> ctag_shift)); 227 gmmu_pte_comptagline_f((u32)(attrs->ctag >> ctag_shift));
223 228
224 if (attrs->ctag && vm->mm->use_full_comp_tag_line && 229 if (attrs->ctag && vm->mm->use_full_comp_tag_line &&
225 phys_addr & 0x10000) 230 phys_addr & 0x10000) {
226 pte_w[1] |= gmmu_pte_comptagline_f( 231 pte_w[1] |= gmmu_pte_comptagline_f(
227 1 << (gmmu_pte_comptagline_s() - 1)); 232 1 << (gmmu_pte_comptagline_s() - 1));
233 }
228 234
229 if (attrs->rw_flag == gk20a_mem_flag_read_only) { 235 if (attrs->rw_flag == gk20a_mem_flag_read_only) {
230 pte_w[0] |= gmmu_pte_read_only_true_f(); 236 pte_w[0] |= gmmu_pte_read_only_true_f();
@@ -233,11 +239,13 @@ static void __update_pte(struct vm_gk20a *vm,
233 pte_w[1] |= gmmu_pte_read_disable_true_f(); 239 pte_w[1] |= gmmu_pte_read_disable_true_f();
234 } 240 }
235 241
236 if (!attrs->cacheable) 242 if (!attrs->cacheable) {
237 pte_w[1] |= gmmu_pte_vol_true_f(); 243 pte_w[1] |= gmmu_pte_vol_true_f();
244 }
238 245
239 if (attrs->ctag) 246 if (attrs->ctag) {
240 attrs->ctag += page_size; 247 attrs->ctag += page_size;
248 }
241} 249}
242 250
243static void update_gmmu_pte_locked(struct vm_gk20a *vm, 251static void update_gmmu_pte_locked(struct vm_gk20a *vm,
@@ -254,10 +262,11 @@ static void update_gmmu_pte_locked(struct vm_gk20a *vm,
254 u32 pte_w[2] = {0, 0}; 262 u32 pte_w[2] = {0, 0};
255 int ctag_shift = ilog2(g->ops.fb.compression_page_size(g)); 263 int ctag_shift = ilog2(g->ops.fb.compression_page_size(g));
256 264
257 if (phys_addr) 265 if (phys_addr) {
258 __update_pte(vm, pte_w, phys_addr, attrs); 266 __update_pte(vm, pte_w, phys_addr, attrs);
259 else if (attrs->sparse) 267 } else if (attrs->sparse) {
260 __update_pte_sparse(pte_w); 268 __update_pte_sparse(pte_w);
269 }
261 270
262 pte_dbg(g, attrs, 271 pte_dbg(g, attrs,
263 "PTE: i=%-4u size=%-2u offs=%-4u | " 272 "PTE: i=%-4u size=%-2u offs=%-4u | "
@@ -338,8 +347,9 @@ int gk20a_vm_bind_channel(struct vm_gk20a *vm, struct channel_gk20a *ch)
338 nvgpu_vm_get(vm); 347 nvgpu_vm_get(vm);
339 ch->vm = vm; 348 ch->vm = vm;
340 err = channel_gk20a_commit_va(ch); 349 err = channel_gk20a_commit_va(ch);
341 if (err) 350 if (err) {
342 ch->vm = NULL; 351 ch->vm = NULL;
352 }
343 353
344 nvgpu_log(gk20a_from_vm(vm), gpu_dbg_map, "Binding ch=%d -> VM:%s", 354 nvgpu_log(gk20a_from_vm(vm), gpu_dbg_map, "Binding ch=%d -> VM:%s",
345 ch->chid, vm->name); 355 ch->chid, vm->name);
@@ -384,8 +394,9 @@ void gk20a_init_inst_block(struct nvgpu_mem *inst_block, struct vm_gk20a *vm,
384 nvgpu_mem_wr32(g, inst_block, ram_in_adr_limit_hi_w(), 394 nvgpu_mem_wr32(g, inst_block, ram_in_adr_limit_hi_w(),
385 ram_in_adr_limit_hi_f(u64_hi32(vm->va_limit - 1))); 395 ram_in_adr_limit_hi_f(u64_hi32(vm->va_limit - 1)));
386 396
387 if (big_page_size && g->ops.mm.set_big_page_size) 397 if (big_page_size && g->ops.mm.set_big_page_size) {
388 g->ops.mm.set_big_page_size(g, inst_block, big_page_size); 398 g->ops.mm.set_big_page_size(g, inst_block, big_page_size);
399 }
389} 400}
390 401
391int gk20a_alloc_inst_block(struct gk20a *g, struct nvgpu_mem *inst_block) 402int gk20a_alloc_inst_block(struct gk20a *g, struct nvgpu_mem *inst_block)
@@ -422,8 +433,9 @@ int gk20a_mm_fb_flush(struct gk20a *g)
422 433
423 retries = 100; 434 retries = 100;
424 435
425 if (g->ops.mm.get_flush_retries) 436 if (g->ops.mm.get_flush_retries) {
426 retries = g->ops.mm.get_flush_retries(g, NVGPU_FLUSH_FB); 437 retries = g->ops.mm.get_flush_retries(g, NVGPU_FLUSH_FB);
438 }
427 439
428 nvgpu_timeout_init(g, &timeout, retries, NVGPU_TIMER_RETRY_TIMER); 440 nvgpu_timeout_init(g, &timeout, retries, NVGPU_TIMER_RETRY_TIMER);
429 441
@@ -447,13 +459,15 @@ int gk20a_mm_fb_flush(struct gk20a *g)
447 flush_fb_flush_pending_busy_v()) { 459 flush_fb_flush_pending_busy_v()) {
448 nvgpu_log_info(g, "fb_flush 0x%x", data); 460 nvgpu_log_info(g, "fb_flush 0x%x", data);
449 nvgpu_udelay(5); 461 nvgpu_udelay(5);
450 } else 462 } else {
451 break; 463 break;
464 }
452 } while (!nvgpu_timeout_expired(&timeout)); 465 } while (!nvgpu_timeout_expired(&timeout));
453 466
454 if (nvgpu_timeout_peek_expired(&timeout)) { 467 if (nvgpu_timeout_peek_expired(&timeout)) {
455 if (g->ops.fb.dump_vpr_wpr_info) 468 if (g->ops.fb.dump_vpr_wpr_info) {
456 g->ops.fb.dump_vpr_wpr_info(g); 469 g->ops.fb.dump_vpr_wpr_info(g);
470 }
457 ret = -EBUSY; 471 ret = -EBUSY;
458 } 472 }
459 473
@@ -474,8 +488,9 @@ static void gk20a_mm_l2_invalidate_locked(struct gk20a *g)
474 488
475 trace_gk20a_mm_l2_invalidate(g->name); 489 trace_gk20a_mm_l2_invalidate(g->name);
476 490
477 if (g->ops.mm.get_flush_retries) 491 if (g->ops.mm.get_flush_retries) {
478 retries = g->ops.mm.get_flush_retries(g, NVGPU_FLUSH_L2_INV); 492 retries = g->ops.mm.get_flush_retries(g, NVGPU_FLUSH_L2_INV);
493 }
479 494
480 nvgpu_timeout_init(g, &timeout, retries, NVGPU_TIMER_RETRY_TIMER); 495 nvgpu_timeout_init(g, &timeout, retries, NVGPU_TIMER_RETRY_TIMER);
481 496
@@ -494,12 +509,14 @@ static void gk20a_mm_l2_invalidate_locked(struct gk20a *g)
494 nvgpu_log_info(g, "l2_system_invalidate 0x%x", 509 nvgpu_log_info(g, "l2_system_invalidate 0x%x",
495 data); 510 data);
496 nvgpu_udelay(5); 511 nvgpu_udelay(5);
497 } else 512 } else {
498 break; 513 break;
514 }
499 } while (!nvgpu_timeout_expired(&timeout)); 515 } while (!nvgpu_timeout_expired(&timeout));
500 516
501 if (nvgpu_timeout_peek_expired(&timeout)) 517 if (nvgpu_timeout_peek_expired(&timeout)) {
502 nvgpu_warn(g, "l2_system_invalidate too many retries"); 518 nvgpu_warn(g, "l2_system_invalidate too many retries");
519 }
503 520
504 trace_gk20a_mm_l2_invalidate_done(g->name); 521 trace_gk20a_mm_l2_invalidate_done(g->name);
505} 522}
@@ -526,11 +543,13 @@ void gk20a_mm_l2_flush(struct gk20a *g, bool invalidate)
526 nvgpu_log_fn(g, " "); 543 nvgpu_log_fn(g, " ");
527 544
528 gk20a_busy_noresume(g); 545 gk20a_busy_noresume(g);
529 if (!g->power_on) 546 if (!g->power_on) {
530 goto hw_was_off; 547 goto hw_was_off;
548 }
531 549
532 if (g->ops.mm.get_flush_retries) 550 if (g->ops.mm.get_flush_retries) {
533 retries = g->ops.mm.get_flush_retries(g, NVGPU_FLUSH_L2_FLUSH); 551 retries = g->ops.mm.get_flush_retries(g, NVGPU_FLUSH_L2_FLUSH);
552 }
534 553
535 nvgpu_timeout_init(g, &timeout, retries, NVGPU_TIMER_RETRY_TIMER); 554 nvgpu_timeout_init(g, &timeout, retries, NVGPU_TIMER_RETRY_TIMER);
536 555
@@ -552,15 +571,17 @@ void gk20a_mm_l2_flush(struct gk20a *g, bool invalidate)
552 flush_l2_flush_dirty_pending_busy_v()) { 571 flush_l2_flush_dirty_pending_busy_v()) {
553 nvgpu_log_info(g, "l2_flush_dirty 0x%x", data); 572 nvgpu_log_info(g, "l2_flush_dirty 0x%x", data);
554 nvgpu_udelay(5); 573 nvgpu_udelay(5);
555 } else 574 } else {
556 break; 575 break;
576 }
557 } while (!nvgpu_timeout_expired_msg(&timeout, 577 } while (!nvgpu_timeout_expired_msg(&timeout,
558 "l2_flush_dirty too many retries")); 578 "l2_flush_dirty too many retries"));
559 579
560 trace_gk20a_mm_l2_flush_done(g->name); 580 trace_gk20a_mm_l2_flush_done(g->name);
561 581
562 if (invalidate) 582 if (invalidate) {
563 gk20a_mm_l2_invalidate_locked(g); 583 gk20a_mm_l2_invalidate_locked(g);
584 }
564 585
565 nvgpu_mutex_release(&mm->l2_op_lock); 586 nvgpu_mutex_release(&mm->l2_op_lock);
566 587
@@ -578,11 +599,13 @@ void gk20a_mm_cbc_clean(struct gk20a *g)
578 nvgpu_log_fn(g, " "); 599 nvgpu_log_fn(g, " ");
579 600
580 gk20a_busy_noresume(g); 601 gk20a_busy_noresume(g);
581 if (!g->power_on) 602 if (!g->power_on) {
582 goto hw_was_off; 603 goto hw_was_off;
604 }
583 605
584 if (g->ops.mm.get_flush_retries) 606 if (g->ops.mm.get_flush_retries) {
585 retries = g->ops.mm.get_flush_retries(g, NVGPU_FLUSH_CBC_CLEAN); 607 retries = g->ops.mm.get_flush_retries(g, NVGPU_FLUSH_CBC_CLEAN);
608 }
586 609
587 nvgpu_timeout_init(g, &timeout, retries, NVGPU_TIMER_RETRY_TIMER); 610 nvgpu_timeout_init(g, &timeout, retries, NVGPU_TIMER_RETRY_TIMER);
588 611
@@ -601,8 +624,9 @@ void gk20a_mm_cbc_clean(struct gk20a *g)
601 flush_l2_clean_comptags_pending_busy_v()) { 624 flush_l2_clean_comptags_pending_busy_v()) {
602 nvgpu_log_info(g, "l2_clean_comptags 0x%x", data); 625 nvgpu_log_info(g, "l2_clean_comptags 0x%x", data);
603 nvgpu_udelay(5); 626 nvgpu_udelay(5);
604 } else 627 } else {
605 break; 628 break;
629 }
606 } while (!nvgpu_timeout_expired_msg(&timeout, 630 } while (!nvgpu_timeout_expired_msg(&timeout,
607 "l2_clean_comptags too many retries")); 631 "l2_clean_comptags too many retries"));
608 632
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
index 64e4a567..86cb04d9 100644
--- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
@@ -51,11 +51,12 @@ bool nvgpu_find_hex_in_string(char *strings, struct gk20a *g, u32 *hex_pos)
51 u32 i = 0, j = strlen(strings); 51 u32 i = 0, j = strlen(strings);
52 52
53 for (; i < j; i++) { 53 for (; i < j; i++) {
54 if (strings[i] == '%') 54 if (strings[i] == '%') {
55 if (strings[i + 1] == 'x' || strings[i + 1] == 'X') { 55 if (strings[i + 1] == 'x' || strings[i + 1] == 'X') {
56 *hex_pos = i; 56 *hex_pos = i;
57 return true; 57 return true;
58 } 58 }
59 }
59 } 60 }
60 *hex_pos = -1; 61 *hex_pos = -1;
61 return false; 62 return false;
@@ -72,8 +73,9 @@ static void print_pmu_trace(struct nvgpu_pmu *pmu)
72 73
73 /* allocate system memory to copy pmu trace buffer */ 74 /* allocate system memory to copy pmu trace buffer */
74 tracebuffer = nvgpu_kzalloc(g, GK20A_PMU_TRACE_BUFSIZE); 75 tracebuffer = nvgpu_kzalloc(g, GK20A_PMU_TRACE_BUFSIZE);
75 if (tracebuffer == NULL) 76 if (tracebuffer == NULL) {
76 return; 77 return;
78 }
77 79
78 /* read pmu traces into system memory buffer */ 80 /* read pmu traces into system memory buffer */
79 nvgpu_mem_rd_n(g, &pmu->trace_buf, 0, tracebuffer, 81 nvgpu_mem_rd_n(g, &pmu->trace_buf, 0, tracebuffer,
@@ -85,17 +87,20 @@ static void print_pmu_trace(struct nvgpu_pmu *pmu)
85 nvgpu_err(g, "dump PMU trace buffer"); 87 nvgpu_err(g, "dump PMU trace buffer");
86 for (i = 0; i < GK20A_PMU_TRACE_BUFSIZE; i += 0x40) { 88 for (i = 0; i < GK20A_PMU_TRACE_BUFSIZE; i += 0x40) {
87 for (j = 0; j < 0x40; j++) { 89 for (j = 0; j < 0x40; j++) {
88 if (trace1[(i / 4) + j]) 90 if (trace1[(i / 4) + j]) {
89 break; 91 break;
92 }
90 } 93 }
91 if (j == 0x40) 94 if (j == 0x40) {
92 break; 95 break;
96 }
93 count = scnprintf(buf, 0x40, "Index %x: ", trace1[(i / 4)]); 97 count = scnprintf(buf, 0x40, "Index %x: ", trace1[(i / 4)]);
94 l = 0; 98 l = 0;
95 m = 0; 99 m = 0;
96 while (nvgpu_find_hex_in_string((trace+i+20+m), g, &k)) { 100 while (nvgpu_find_hex_in_string((trace+i+20+m), g, &k)) {
97 if (k >= 40) 101 if (k >= 40) {
98 break; 102 break;
103 }
99 strncpy(part_str, (trace+i+20+m), k); 104 strncpy(part_str, (trace+i+20+m), k);
100 part_str[k] = '\0'; 105 part_str[k] = '\0';
101 count += scnprintf((buf + count), 0x40, "%s0x%x", 106 count += scnprintf((buf + count), 0x40, "%s0x%x",
@@ -277,8 +282,9 @@ int gk20a_pmu_mutex_acquire(struct nvgpu_pmu *pmu, u32 id, u32 *token)
277 struct pmu_mutex *mutex; 282 struct pmu_mutex *mutex;
278 u32 data, owner, max_retry; 283 u32 data, owner, max_retry;
279 284
280 if (!pmu->initialized) 285 if (!pmu->initialized) {
281 return -EINVAL; 286 return -EINVAL;
287 }
282 288
283 BUG_ON(!token); 289 BUG_ON(!token);
284 BUG_ON(!PMU_MUTEX_ID_IS_VALID(id)); 290 BUG_ON(!PMU_MUTEX_ID_IS_VALID(id));
@@ -346,8 +352,9 @@ int gk20a_pmu_mutex_release(struct nvgpu_pmu *pmu, u32 id, u32 *token)
346 struct pmu_mutex *mutex; 352 struct pmu_mutex *mutex;
347 u32 owner, data; 353 u32 owner, data;
348 354
349 if (!pmu->initialized) 355 if (!pmu->initialized) {
350 return -EINVAL; 356 return -EINVAL;
357 }
351 358
352 BUG_ON(!token); 359 BUG_ON(!token);
353 BUG_ON(!PMU_MUTEX_ID_IS_VALID(id)); 360 BUG_ON(!PMU_MUTEX_ID_IS_VALID(id));
@@ -364,8 +371,9 @@ int gk20a_pmu_mutex_release(struct nvgpu_pmu *pmu, u32 id, u32 *token)
364 return -EINVAL; 371 return -EINVAL;
365 } 372 }
366 373
367 if (--mutex->ref_cnt > 0) 374 if (--mutex->ref_cnt > 0) {
368 return -EBUSY; 375 return -EBUSY;
376 }
369 377
370 gk20a_writel(g, pwr_pmu_mutex_r(mutex->index), 378 gk20a_writel(g, pwr_pmu_mutex_r(mutex->index),
371 pwr_pmu_mutex_value_initial_lock_f()); 379 pwr_pmu_mutex_value_initial_lock_f());
@@ -386,32 +394,36 @@ int gk20a_pmu_queue_head(struct gk20a *g, struct nvgpu_falcon_queue *queue,
386{ 394{
387 u32 queue_head_size = 0; 395 u32 queue_head_size = 0;
388 396
389 if (g->ops.pmu.pmu_get_queue_head_size) 397 if (g->ops.pmu.pmu_get_queue_head_size) {
390 queue_head_size = g->ops.pmu.pmu_get_queue_head_size(); 398 queue_head_size = g->ops.pmu.pmu_get_queue_head_size();
399 }
391 400
392 BUG_ON(!head || !queue_head_size); 401 BUG_ON(!head || !queue_head_size);
393 402
394 if (PMU_IS_COMMAND_QUEUE(queue->id)) { 403 if (PMU_IS_COMMAND_QUEUE(queue->id)) {
395 404
396 if (queue->index >= queue_head_size) 405 if (queue->index >= queue_head_size) {
397 return -EINVAL; 406 return -EINVAL;
407 }
398 408
399 if (!set) 409 if (!set) {
400 *head = pwr_pmu_queue_head_address_v( 410 *head = pwr_pmu_queue_head_address_v(
401 gk20a_readl(g, 411 gk20a_readl(g,
402 g->ops.pmu.pmu_get_queue_head(queue->index))); 412 g->ops.pmu.pmu_get_queue_head(queue->index)));
403 else 413 } else {
404 gk20a_writel(g, 414 gk20a_writel(g,
405 g->ops.pmu.pmu_get_queue_head(queue->index), 415 g->ops.pmu.pmu_get_queue_head(queue->index),
406 pwr_pmu_queue_head_address_f(*head)); 416 pwr_pmu_queue_head_address_f(*head));
417 }
407 } else { 418 } else {
408 if (!set) 419 if (!set) {
409 *head = pwr_pmu_msgq_head_val_v( 420 *head = pwr_pmu_msgq_head_val_v(
410 gk20a_readl(g, pwr_pmu_msgq_head_r())); 421 gk20a_readl(g, pwr_pmu_msgq_head_r()));
411 else 422 } else {
412 gk20a_writel(g, 423 gk20a_writel(g,
413 pwr_pmu_msgq_head_r(), 424 pwr_pmu_msgq_head_r(),
414 pwr_pmu_msgq_head_val_f(*head)); 425 pwr_pmu_msgq_head_val_f(*head));
426 }
415 } 427 }
416 428
417 return 0; 429 return 0;
@@ -422,33 +434,36 @@ int gk20a_pmu_queue_tail(struct gk20a *g, struct nvgpu_falcon_queue *queue,
422{ 434{
423 u32 queue_tail_size = 0; 435 u32 queue_tail_size = 0;
424 436
425 if (g->ops.pmu.pmu_get_queue_tail_size) 437 if (g->ops.pmu.pmu_get_queue_tail_size) {
426 queue_tail_size = g->ops.pmu.pmu_get_queue_tail_size(); 438 queue_tail_size = g->ops.pmu.pmu_get_queue_tail_size();
439 }
427 440
428 BUG_ON(!tail || !queue_tail_size); 441 BUG_ON(!tail || !queue_tail_size);
429 442
430 if (PMU_IS_COMMAND_QUEUE(queue->id)) { 443 if (PMU_IS_COMMAND_QUEUE(queue->id)) {
431 444
432 if (queue->index >= queue_tail_size) 445 if (queue->index >= queue_tail_size) {
433 return -EINVAL; 446 return -EINVAL;
447 }
434 448
435 if (!set) 449 if (!set) {
436 *tail = pwr_pmu_queue_tail_address_v( 450 *tail = pwr_pmu_queue_tail_address_v(gk20a_readl(g,
437 gk20a_readl(g, 451 g->ops.pmu.pmu_get_queue_tail(queue->index)));
438 g->ops.pmu.pmu_get_queue_tail(queue->index))); 452 } else {
439 else
440 gk20a_writel(g, 453 gk20a_writel(g,
441 g->ops.pmu.pmu_get_queue_tail(queue->index), 454 g->ops.pmu.pmu_get_queue_tail(queue->index),
442 pwr_pmu_queue_tail_address_f(*tail)); 455 pwr_pmu_queue_tail_address_f(*tail));
456 }
443 457
444 } else { 458 } else {
445 if (!set) 459 if (!set) {
446 *tail = pwr_pmu_msgq_tail_val_v( 460 *tail = pwr_pmu_msgq_tail_val_v(
447 gk20a_readl(g, pwr_pmu_msgq_tail_r())); 461 gk20a_readl(g, pwr_pmu_msgq_tail_r()));
448 else 462 } else {
449 gk20a_writel(g, 463 gk20a_writel(g,
450 pwr_pmu_msgq_tail_r(), 464 pwr_pmu_msgq_tail_r(),
451 pwr_pmu_msgq_tail_val_f(*tail)); 465 pwr_pmu_msgq_tail_val_f(*tail));
466 }
452 } 467 }
453 468
454 return 0; 469 return 0;
@@ -459,18 +474,20 @@ void gk20a_pmu_msgq_tail(struct nvgpu_pmu *pmu, u32 *tail, bool set)
459 struct gk20a *g = gk20a_from_pmu(pmu); 474 struct gk20a *g = gk20a_from_pmu(pmu);
460 u32 queue_tail_size = 0; 475 u32 queue_tail_size = 0;
461 476
462 if (g->ops.pmu.pmu_get_queue_tail_size) 477 if (g->ops.pmu.pmu_get_queue_tail_size) {
463 queue_tail_size = g->ops.pmu.pmu_get_queue_tail_size(); 478 queue_tail_size = g->ops.pmu.pmu_get_queue_tail_size();
479 }
464 480
465 BUG_ON(!tail || !queue_tail_size); 481 BUG_ON(!tail || !queue_tail_size);
466 482
467 if (!set) 483 if (!set) {
468 *tail = pwr_pmu_msgq_tail_val_v( 484 *tail = pwr_pmu_msgq_tail_val_v(
469 gk20a_readl(g, pwr_pmu_msgq_tail_r())); 485 gk20a_readl(g, pwr_pmu_msgq_tail_r()));
470 else 486 } else {
471 gk20a_writel(g, 487 gk20a_writel(g,
472 pwr_pmu_msgq_tail_r(), 488 pwr_pmu_msgq_tail_r(),
473 pwr_pmu_msgq_tail_val_f(*tail)); 489 pwr_pmu_msgq_tail_val_f(*tail));
490 }
474} 491}
475 492
476int gk20a_init_pmu_setup_hw1(struct gk20a *g) 493int gk20a_init_pmu_setup_hw1(struct gk20a *g)
@@ -519,18 +536,20 @@ bool gk20a_pmu_is_engine_in_reset(struct gk20a *g)
519 536
520 pmc_enable = gk20a_readl(g, mc_enable_r()); 537 pmc_enable = gk20a_readl(g, mc_enable_r());
521 if (mc_enable_pwr_v(pmc_enable) == 538 if (mc_enable_pwr_v(pmc_enable) ==
522 mc_enable_pwr_disabled_v()) 539 mc_enable_pwr_disabled_v()) {
523 status = true; 540 status = true;
541 }
524 542
525 return status; 543 return status;
526} 544}
527 545
528int gk20a_pmu_engine_reset(struct gk20a *g, bool do_reset) 546int gk20a_pmu_engine_reset(struct gk20a *g, bool do_reset)
529{ 547{
530 if (do_reset) 548 if (do_reset) {
531 g->ops.mc.enable(g, mc_enable_pwr_enabled_f()); 549 g->ops.mc.enable(g, mc_enable_pwr_enabled_f());
532 else 550 } else {
533 g->ops.mc.disable(g, mc_enable_pwr_enabled_f()); 551 g->ops.mc.disable(g, mc_enable_pwr_enabled_f());
552 }
534 553
535 return 0; 554 return 0;
536} 555}
@@ -547,8 +566,9 @@ u32 gk20a_pmu_pg_engines_list(struct gk20a *g)
547 566
548u32 gk20a_pmu_pg_feature_list(struct gk20a *g, u32 pg_engine_id) 567u32 gk20a_pmu_pg_feature_list(struct gk20a *g, u32 pg_engine_id)
549{ 568{
550 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) 569 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) {
551 return NVGPU_PMU_GR_FEATURE_MASK_POWER_GATING; 570 return NVGPU_PMU_GR_FEATURE_MASK_POWER_GATING;
571 }
552 572
553 return 0; 573 return 0;
554} 574}
@@ -567,8 +587,9 @@ void gk20a_pmu_save_zbc(struct gk20a *g, u32 entries)
567 struct pmu_cmd cmd; 587 struct pmu_cmd cmd;
568 u32 seq; 588 u32 seq;
569 589
570 if (!pmu->pmu_ready || !entries || !pmu->zbc_ready) 590 if (!pmu->pmu_ready || !entries || !pmu->zbc_ready) {
571 return; 591 return;
592 }
572 593
573 memset(&cmd, 0, sizeof(struct pmu_cmd)); 594 memset(&cmd, 0, sizeof(struct pmu_cmd));
574 cmd.hdr.unit_id = PMU_UNIT_PG; 595 cmd.hdr.unit_id = PMU_UNIT_PG;
@@ -583,8 +604,9 @@ void gk20a_pmu_save_zbc(struct gk20a *g, u32 entries)
583 pmu_handle_zbc_msg, pmu, &seq, ~0); 604 pmu_handle_zbc_msg, pmu, &seq, ~0);
584 pmu_wait_message_cond(pmu, gk20a_get_gr_idle_timeout(g), 605 pmu_wait_message_cond(pmu, gk20a_get_gr_idle_timeout(g),
585 &pmu->zbc_save_done, 1); 606 &pmu->zbc_save_done, 1);
586 if (!pmu->zbc_save_done) 607 if (!pmu->zbc_save_done) {
587 nvgpu_err(g, "ZBC save timeout"); 608 nvgpu_err(g, "ZBC save timeout");
609 }
588} 610}
589 611
590int nvgpu_pmu_handle_therm_event(struct nvgpu_pmu *pmu, 612int nvgpu_pmu_handle_therm_event(struct nvgpu_pmu *pmu,
@@ -596,11 +618,12 @@ int nvgpu_pmu_handle_therm_event(struct nvgpu_pmu *pmu,
596 618
597 switch (msg->msg_type) { 619 switch (msg->msg_type) {
598 case NV_PMU_THERM_MSG_ID_EVENT_HW_SLOWDOWN_NOTIFICATION: 620 case NV_PMU_THERM_MSG_ID_EVENT_HW_SLOWDOWN_NOTIFICATION:
599 if (msg->hw_slct_msg.mask == BIT(NV_PMU_THERM_EVENT_THERMAL_1)) 621 if (msg->hw_slct_msg.mask == BIT(NV_PMU_THERM_EVENT_THERMAL_1)) {
600 nvgpu_clk_arb_send_thermal_alarm(pmu->g); 622 nvgpu_clk_arb_send_thermal_alarm(pmu->g);
601 else 623 } else {
602 gk20a_dbg_pmu(g, "Unwanted/Unregistered thermal event received %d", 624 gk20a_dbg_pmu(g, "Unwanted/Unregistered thermal event received %d",
603 msg->hw_slct_msg.mask); 625 msg->hw_slct_msg.mask);
626 }
604 break; 627 break;
605 default: 628 default:
606 gk20a_dbg_pmu(g, "unkown therm event received %d", msg->msg_type); 629 gk20a_dbg_pmu(g, "unkown therm event received %d", msg->msg_type);
@@ -687,8 +710,9 @@ bool gk20a_pmu_is_interrupted(struct nvgpu_pmu *pmu)
687 pwr_falcon_irqstat_exterr_true_f() | 710 pwr_falcon_irqstat_exterr_true_f() |
688 pwr_falcon_irqstat_swgen0_true_f(); 711 pwr_falcon_irqstat_swgen0_true_f();
689 712
690 if (gk20a_readl(g, pwr_falcon_irqstat_r()) & servicedpmuint) 713 if (gk20a_readl(g, pwr_falcon_irqstat_r()) & servicedpmuint) {
691 return true; 714 return true;
715 }
692 716
693 return false; 717 return false;
694} 718}
@@ -727,9 +751,11 @@ void gk20a_pmu_isr(struct gk20a *g)
727 nvgpu_pmu_dump_falcon_stats(pmu); 751 nvgpu_pmu_dump_falcon_stats(pmu);
728 if (gk20a_readl(g, pwr_pmu_mailbox_r 752 if (gk20a_readl(g, pwr_pmu_mailbox_r
729 (PMU_MODE_MISMATCH_STATUS_MAILBOX_R)) == 753 (PMU_MODE_MISMATCH_STATUS_MAILBOX_R)) ==
730 PMU_MODE_MISMATCH_STATUS_VAL) 754 PMU_MODE_MISMATCH_STATUS_VAL) {
731 if (g->ops.pmu.dump_secure_fuses) 755 if (g->ops.pmu.dump_secure_fuses) {
732 g->ops.pmu.dump_secure_fuses(g); 756 g->ops.pmu.dump_secure_fuses(g);
757 }
758 }
733 } 759 }
734 if (intr & pwr_falcon_irqstat_exterr_true_f()) { 760 if (intr & pwr_falcon_irqstat_exterr_true_f()) {
735 nvgpu_err(g, 761 nvgpu_err(g,
@@ -741,8 +767,9 @@ void gk20a_pmu_isr(struct gk20a *g)
741 ~pwr_falcon_exterrstat_valid_m()); 767 ~pwr_falcon_exterrstat_valid_m());
742 } 768 }
743 769
744 if (g->ops.pmu.handle_ext_irq) 770 if (g->ops.pmu.handle_ext_irq) {
745 g->ops.pmu.handle_ext_irq(g, intr); 771 g->ops.pmu.handle_ext_irq(g, intr);
772 }
746 773
747 if (intr & pwr_falcon_irqstat_swgen0_true_f()) { 774 if (intr & pwr_falcon_irqstat_swgen0_true_f()) {
748 nvgpu_pmu_process_message(pmu); 775 nvgpu_pmu_process_message(pmu);
diff --git a/drivers/gpu/nvgpu/gk20a/regops_gk20a.c b/drivers/gpu/nvgpu/gk20a/regops_gk20a.c
index 26ba944a..80d27c25 100644
--- a/drivers/gpu/nvgpu/gk20a/regops_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/regops_gk20a.c
@@ -36,11 +36,12 @@ static int regop_bsearch_range_cmp(const void *pkey, const void *pelem)
36{ 36{
37 u32 key = *(u32 *)pkey; 37 u32 key = *(u32 *)pkey;
38 struct regop_offset_range *prange = (struct regop_offset_range *)pelem; 38 struct regop_offset_range *prange = (struct regop_offset_range *)pelem;
39 if (key < prange->base) 39 if (key < prange->base) {
40 return -1; 40 return -1;
41 else if (prange->base <= key && key < (prange->base + 41 } else if (prange->base <= key && key < (prange->base +
42 (prange->count * 4U))) 42 (prange->count * 4U))) {
43 return 0; 43 return 0;
44 }
44 return 1; 45 return 1;
45} 46}
46 47
@@ -48,8 +49,9 @@ static inline bool linear_search(u32 offset, const u32 *list, int size)
48{ 49{
49 int i; 50 int i;
50 for (i = 0; i < size; i++) { 51 for (i = 0; i < size; i++) {
51 if (list[i] == offset) 52 if (list[i] == offset) {
52 return true; 53 return true;
54 }
53 } 55 }
54 return false; 56 return false;
55} 57}
@@ -111,8 +113,9 @@ int exec_regops_gk20a(struct dbg_session_gk20a *dbg_s,
111 * regops implementation, so we return -ENOSYS. This will allow 113 * regops implementation, so we return -ENOSYS. This will allow
112 * compute apps to run with vgpu. Tools will not work in this 114 * compute apps to run with vgpu. Tools will not work in this
113 * configuration and are not required to work at this time. */ 115 * configuration and are not required to work at this time. */
114 if (g->is_virtual) 116 if (g->is_virtual) {
115 return -ENOSYS; 117 return -ENOSYS;
118 }
116 119
117 ok = validate_reg_ops(dbg_s, 120 ok = validate_reg_ops(dbg_s,
118 &ctx_rd_count, &ctx_wr_count, 121 &ctx_rd_count, &ctx_wr_count,
@@ -134,8 +137,9 @@ int exec_regops_gk20a(struct dbg_session_gk20a *dbg_s,
134 137
135 for (i = 0; i < num_ops; i++) { 138 for (i = 0; i < num_ops; i++) {
136 /* if it isn't global then it is done in the ctx ops... */ 139 /* if it isn't global then it is done in the ctx ops... */
137 if (ops[i].type != REGOP(TYPE_GLOBAL)) 140 if (ops[i].type != REGOP(TYPE_GLOBAL)) {
138 continue; 141 continue;
142 }
139 143
140 switch (ops[i].op) { 144 switch (ops[i].op) {
141 145
@@ -358,8 +362,9 @@ static int validate_reg_op_offset(struct dbg_session_gk20a *dbg_s,
358 } 362 }
359 363
360 valid = check_whitelists(dbg_s, op, offset); 364 valid = check_whitelists(dbg_s, op, offset);
361 if ((op->op == REGOP(READ_64) || op->op == REGOP(WRITE_64)) && valid) 365 if ((op->op == REGOP(READ_64) || op->op == REGOP(WRITE_64)) && valid) {
362 valid = check_whitelists(dbg_s, op, offset + 4); 366 valid = check_whitelists(dbg_s, op, offset + 4);
367 }
363 368
364 if (valid && (op->type != REGOP(TYPE_GLOBAL))) { 369 if (valid && (op->type != REGOP(TYPE_GLOBAL))) {
365 err = gr_gk20a_get_ctx_buffer_offsets(dbg_s->g, 370 err = gr_gk20a_get_ctx_buffer_offsets(dbg_s->g,
@@ -416,10 +421,11 @@ static bool validate_reg_ops(struct dbg_session_gk20a *dbg_s,
416 } 421 }
417 422
418 if (reg_op_is_gr_ctx(ops[i].type)) { 423 if (reg_op_is_gr_ctx(ops[i].type)) {
419 if (reg_op_is_read(ops[i].op)) 424 if (reg_op_is_read(ops[i].op)) {
420 (*ctx_rd_count)++; 425 (*ctx_rd_count)++;
421 else 426 } else {
422 (*ctx_wr_count)++; 427 (*ctx_wr_count)++;
428 }
423 } 429 }
424 430
425 /* if "allow_all" flag enabled, dont validate offset */ 431 /* if "allow_all" flag enabled, dont validate offset */
diff --git a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
index 506d4330..6dc2e282 100644
--- a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
@@ -50,16 +50,18 @@ int gk20a_enable_tsg(struct tsg_gk20a *tsg)
50 is_next = gk20a_fifo_channel_status_is_next(g, ch->chid); 50 is_next = gk20a_fifo_channel_status_is_next(g, ch->chid);
51 is_ctx_reload = gk20a_fifo_channel_status_is_ctx_reload(g, ch->chid); 51 is_ctx_reload = gk20a_fifo_channel_status_is_ctx_reload(g, ch->chid);
52 52
53 if (is_next || is_ctx_reload) 53 if (is_next || is_ctx_reload) {
54 g->ops.fifo.enable_channel(ch); 54 g->ops.fifo.enable_channel(ch);
55 }
55 } 56 }
56 57
57 nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) { 58 nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
58 is_next = gk20a_fifo_channel_status_is_next(g, ch->chid); 59 is_next = gk20a_fifo_channel_status_is_next(g, ch->chid);
59 is_ctx_reload = gk20a_fifo_channel_status_is_ctx_reload(g, ch->chid); 60 is_ctx_reload = gk20a_fifo_channel_status_is_ctx_reload(g, ch->chid);
60 61
61 if (is_next || is_ctx_reload) 62 if (is_next || is_ctx_reload) {
62 continue; 63 continue;
64 }
63 65
64 g->ops.fifo.enable_channel(ch); 66 g->ops.fifo.enable_channel(ch);
65 } 67 }
@@ -92,8 +94,9 @@ static bool gk20a_is_channel_active(struct gk20a *g, struct channel_gk20a *ch)
92 94
93 for (i = 0; i < f->max_runlists; ++i) { 95 for (i = 0; i < f->max_runlists; ++i) {
94 runlist = &f->runlist_info[i]; 96 runlist = &f->runlist_info[i];
95 if (test_bit(ch->chid, runlist->active_channels)) 97 if (test_bit(ch->chid, runlist->active_channels)) {
96 return true; 98 return true;
99 }
97 } 100 }
98 101
99 return false; 102 return false;
@@ -124,9 +127,9 @@ int gk20a_tsg_bind_channel(struct tsg_gk20a *tsg,
124 ch->tsgid = tsg->tsgid; 127 ch->tsgid = tsg->tsgid;
125 128
126 /* all the channel part of TSG should need to be same runlist_id */ 129 /* all the channel part of TSG should need to be same runlist_id */
127 if (tsg->runlist_id == FIFO_INVAL_TSG_ID) 130 if (tsg->runlist_id == FIFO_INVAL_TSG_ID) {
128 tsg->runlist_id = ch->runlist_id; 131 tsg->runlist_id = ch->runlist_id;
129 else if (tsg->runlist_id != ch->runlist_id) { 132 } else if (tsg->runlist_id != ch->runlist_id) {
130 nvgpu_err(tsg->g, 133 nvgpu_err(tsg->g,
131 "Error: TSG channel should be share same runlist ch[%d] tsg[%d]", 134 "Error: TSG channel should be share same runlist ch[%d] tsg[%d]",
132 ch->runlist_id, tsg->runlist_id); 135 ch->runlist_id, tsg->runlist_id);
@@ -180,8 +183,9 @@ int gk20a_init_tsg_support(struct gk20a *g, u32 tsgid)
180 struct tsg_gk20a *tsg = NULL; 183 struct tsg_gk20a *tsg = NULL;
181 int err; 184 int err;
182 185
183 if (tsgid >= g->fifo.num_channels) 186 if (tsgid >= g->fifo.num_channels) {
184 return -EINVAL; 187 return -EINVAL;
188 }
185 189
186 tsg = &g->fifo.tsg[tsgid]; 190 tsg = &g->fifo.tsg[tsgid];
187 191
@@ -214,8 +218,9 @@ int gk20a_tsg_set_runlist_interleave(struct tsg_gk20a *tsg, u32 level)
214 case NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_HIGH: 218 case NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_HIGH:
215 ret = g->ops.fifo.set_runlist_interleave(g, tsg->tsgid, 219 ret = g->ops.fifo.set_runlist_interleave(g, tsg->tsgid,
216 0, level); 220 0, level);
217 if (!ret) 221 if (!ret) {
218 tsg->interleave_level = level; 222 tsg->interleave_level = level;
223 }
219 break; 224 break;
220 default: 225 default:
221 ret = -EINVAL; 226 ret = -EINVAL;
@@ -238,8 +243,9 @@ u32 gk20a_tsg_get_timeslice(struct tsg_gk20a *tsg)
238{ 243{
239 struct gk20a *g = tsg->g; 244 struct gk20a *g = tsg->g;
240 245
241 if (!tsg->timeslice_us) 246 if (!tsg->timeslice_us) {
242 return g->ops.fifo.default_timeslice_us(g); 247 return g->ops.fifo.default_timeslice_us(g);
248 }
243 249
244 return tsg->timeslice_us; 250 return tsg->timeslice_us;
245} 251}
@@ -306,8 +312,9 @@ struct tsg_gk20a *gk20a_tsg_open(struct gk20a *g, pid_t pid)
306 tsg->tgid = pid; 312 tsg->tgid = pid;
307 tsg->sm_exception_mask_type = NVGPU_SM_EXCEPTION_TYPE_MASK_NONE; 313 tsg->sm_exception_mask_type = NVGPU_SM_EXCEPTION_TYPE_MASK_NONE;
308 314
309 if (g->ops.fifo.init_eng_method_buffers) 315 if (g->ops.fifo.init_eng_method_buffers) {
310 g->ops.fifo.init_eng_method_buffers(g, tsg); 316 g->ops.fifo.init_eng_method_buffers(g, tsg);
317 }
311 318
312 if (g->ops.fifo.tsg_open) { 319 if (g->ops.fifo.tsg_open) {
313 err = g->ops.fifo.tsg_open(tsg); 320 err = g->ops.fifo.tsg_open(tsg);