summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSrirangan <smadhavan@nvidia.com>2018-08-20 05:13:41 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-08-23 00:55:49 -0400
commit3fbaee7099039eee84343027dd1ce20679c0c113 (patch)
tree0de4934723f58cad9cdcdb642927ffce0cfac6d8
parent52305f0514d29e7fb2cb5e2154188e09faa3fe94 (diff)
gpu: nvgpu: common: Fix MISRA 15.6 violations
MISRA Rule-15.6 requires that all if-else blocks be enclosed in braces, including single statement blocks. Fix errors due to single statement if blocks without braces, introducing the braces. JIRA NVGPU-671 Change-Id: I4d9933c51a297a725f48cbb15520a70494d74aeb Signed-off-by: Srirangan <smadhavan@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1800833 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
-rw-r--r--drivers/gpu/nvgpu/common/fifo/submit.c106
-rw-r--r--drivers/gpu/nvgpu/common/ltc/ltc.c6
-rw-r--r--drivers/gpu/nvgpu/common/ltc/ltc_gv11b.c38
-rw-r--r--drivers/gpu/nvgpu/common/pramin.c8
-rw-r--r--drivers/gpu/nvgpu/common/ptimer/ptimer.c5
-rw-r--r--drivers/gpu/nvgpu/common/ptimer/ptimer_gk20a.c6
-rw-r--r--drivers/gpu/nvgpu/common/rbtree.c70
-rw-r--r--drivers/gpu/nvgpu/common/semaphore.c45
-rw-r--r--drivers/gpu/nvgpu/common/therm/therm_gv11b.c3
-rw-r--r--drivers/gpu/nvgpu/common/vbios/bios.c55
10 files changed, 223 insertions, 119 deletions
diff --git a/drivers/gpu/nvgpu/common/fifo/submit.c b/drivers/gpu/nvgpu/common/fifo/submit.c
index 7f2f677d..47b086f7 100644
--- a/drivers/gpu/nvgpu/common/fifo/submit.c
+++ b/drivers/gpu/nvgpu/common/fifo/submit.c
@@ -69,8 +69,9 @@ static int nvgpu_submit_prepare_syncs(struct channel_gk20a *c,
69 69
70 if (g->ops.fifo.resetup_ramfc && new_sync_created) { 70 if (g->ops.fifo.resetup_ramfc && new_sync_created) {
71 err = g->ops.fifo.resetup_ramfc(c); 71 err = g->ops.fifo.resetup_ramfc(c);
72 if (err) 72 if (err) {
73 goto fail; 73 goto fail;
74 }
74 } 75 }
75 76
76 /* 77 /*
@@ -80,9 +81,10 @@ static int nvgpu_submit_prepare_syncs(struct channel_gk20a *c,
80 if (flags & NVGPU_SUBMIT_FLAGS_FENCE_WAIT) { 81 if (flags & NVGPU_SUBMIT_FLAGS_FENCE_WAIT) {
81 int max_wait_cmds = c->deterministic ? 1 : 0; 82 int max_wait_cmds = c->deterministic ? 1 : 0;
82 83
83 if (!pre_alloc_enabled) 84 if (!pre_alloc_enabled) {
84 job->wait_cmd = nvgpu_kzalloc(g, 85 job->wait_cmd = nvgpu_kzalloc(g,
85 sizeof(struct priv_cmd_entry)); 86 sizeof(struct priv_cmd_entry));
87 }
86 88
87 if (!job->wait_cmd) { 89 if (!job->wait_cmd) {
88 err = -ENOMEM; 90 err = -ENOMEM;
@@ -99,16 +101,19 @@ static int nvgpu_submit_prepare_syncs(struct channel_gk20a *c,
99 job->wait_cmd); 101 job->wait_cmd);
100 } 102 }
101 103
102 if (err) 104 if (err) {
103 goto clean_up_wait_cmd; 105 goto clean_up_wait_cmd;
106 }
104 107
105 if (job->wait_cmd->valid) 108 if (job->wait_cmd->valid) {
106 *wait_cmd = job->wait_cmd; 109 *wait_cmd = job->wait_cmd;
110 }
107 } 111 }
108 112
109 if ((flags & NVGPU_SUBMIT_FLAGS_FENCE_GET) && 113 if ((flags & NVGPU_SUBMIT_FLAGS_FENCE_GET) &&
110 (flags & NVGPU_SUBMIT_FLAGS_SYNC_FENCE)) 114 (flags & NVGPU_SUBMIT_FLAGS_SYNC_FENCE)) {
111 need_sync_fence = true; 115 need_sync_fence = true;
116 }
112 117
113 /* 118 /*
114 * Always generate an increment at the end of a GPFIFO submission. This 119 * Always generate an increment at the end of a GPFIFO submission. This
@@ -120,42 +125,48 @@ static int nvgpu_submit_prepare_syncs(struct channel_gk20a *c,
120 err = -ENOMEM; 125 err = -ENOMEM;
121 goto clean_up_wait_cmd; 126 goto clean_up_wait_cmd;
122 } 127 }
123 if (!pre_alloc_enabled) 128 if (!pre_alloc_enabled) {
124 job->incr_cmd = nvgpu_kzalloc(g, sizeof(struct priv_cmd_entry)); 129 job->incr_cmd = nvgpu_kzalloc(g, sizeof(struct priv_cmd_entry));
130 }
125 131
126 if (!job->incr_cmd) { 132 if (!job->incr_cmd) {
127 err = -ENOMEM; 133 err = -ENOMEM;
128 goto clean_up_post_fence; 134 goto clean_up_post_fence;
129 } 135 }
130 136
131 if (flags & NVGPU_SUBMIT_FLAGS_FENCE_GET) 137 if (flags & NVGPU_SUBMIT_FLAGS_FENCE_GET) {
132 err = c->sync->incr_user(c->sync, wait_fence_fd, job->incr_cmd, 138 err = c->sync->incr_user(c->sync, wait_fence_fd, job->incr_cmd,
133 job->post_fence, need_wfi, need_sync_fence, 139 job->post_fence, need_wfi, need_sync_fence,
134 register_irq); 140 register_irq);
135 else 141 } else {
136 err = c->sync->incr(c->sync, job->incr_cmd, 142 err = c->sync->incr(c->sync, job->incr_cmd,
137 job->post_fence, need_sync_fence, 143 job->post_fence, need_sync_fence,
138 register_irq); 144 register_irq);
145 }
139 if (!err) { 146 if (!err) {
140 *incr_cmd = job->incr_cmd; 147 *incr_cmd = job->incr_cmd;
141 *post_fence = job->post_fence; 148 *post_fence = job->post_fence;
142 } else 149 } else {
143 goto clean_up_incr_cmd; 150 goto clean_up_incr_cmd;
151 }
144 152
145 return 0; 153 return 0;
146 154
147clean_up_incr_cmd: 155clean_up_incr_cmd:
148 free_priv_cmdbuf(c, job->incr_cmd); 156 free_priv_cmdbuf(c, job->incr_cmd);
149 if (!pre_alloc_enabled) 157 if (!pre_alloc_enabled) {
150 job->incr_cmd = NULL; 158 job->incr_cmd = NULL;
159 }
151clean_up_post_fence: 160clean_up_post_fence:
152 gk20a_fence_put(job->post_fence); 161 gk20a_fence_put(job->post_fence);
153 job->post_fence = NULL; 162 job->post_fence = NULL;
154clean_up_wait_cmd: 163clean_up_wait_cmd:
155 if (job->wait_cmd) 164 if (job->wait_cmd) {
156 free_priv_cmdbuf(c, job->wait_cmd); 165 free_priv_cmdbuf(c, job->wait_cmd);
157 if (!pre_alloc_enabled) 166 }
167 if (!pre_alloc_enabled) {
158 job->wait_cmd = NULL; 168 job->wait_cmd = NULL;
169 }
159fail: 170fail:
160 *wait_cmd = NULL; 171 *wait_cmd = NULL;
161 return err; 172 return err;
@@ -175,9 +186,10 @@ static void nvgpu_submit_append_priv_cmdbuf(struct channel_gk20a *c,
175 nvgpu_mem_wr_n(g, gpfifo_mem, c->gpfifo.put * sizeof(x), 186 nvgpu_mem_wr_n(g, gpfifo_mem, c->gpfifo.put * sizeof(x),
176 &x, sizeof(x)); 187 &x, sizeof(x));
177 188
178 if (cmd->mem->aperture == APERTURE_SYSMEM) 189 if (cmd->mem->aperture == APERTURE_SYSMEM) {
179 trace_gk20a_push_cmdbuf(g->name, 0, cmd->size, 0, 190 trace_gk20a_push_cmdbuf(g->name, 0, cmd->size, 0,
180 (u32 *)cmd->mem->cpu_va + cmd->off); 191 (u32 *)cmd->mem->cpu_va + cmd->off);
192 }
181 193
182 c->gpfifo.put = (c->gpfifo.put + 1U) & (c->gpfifo.entry_num - 1U); 194 c->gpfifo.put = (c->gpfifo.put + 1U) & (c->gpfifo.entry_num - 1U);
183} 195}
@@ -202,20 +214,23 @@ static int nvgpu_submit_append_gpfifo_user_direct(struct channel_gk20a *c,
202 err = g->os_channel.copy_user_gpfifo( 214 err = g->os_channel.copy_user_gpfifo(
203 gpfifo_cpu + start, userdata, 215 gpfifo_cpu + start, userdata,
204 0, length0); 216 0, length0);
205 if (err) 217 if (err) {
206 return err; 218 return err;
219 }
207 220
208 err = g->os_channel.copy_user_gpfifo( 221 err = g->os_channel.copy_user_gpfifo(
209 gpfifo_cpu, userdata, 222 gpfifo_cpu, userdata,
210 length0, length1); 223 length0, length1);
211 if (err) 224 if (err) {
212 return err; 225 return err;
226 }
213 } else { 227 } else {
214 err = g->os_channel.copy_user_gpfifo( 228 err = g->os_channel.copy_user_gpfifo(
215 gpfifo_cpu + start, userdata, 229 gpfifo_cpu + start, userdata,
216 0, len); 230 0, len);
217 if (err) 231 if (err) {
218 return err; 232 return err;
233 }
219 } 234 }
220 235
221 return 0; 236 return 0;
@@ -266,14 +281,16 @@ static int nvgpu_submit_append_gpfifo(struct channel_gk20a *c,
266 */ 281 */
267 err = nvgpu_submit_append_gpfifo_user_direct(c, userdata, 282 err = nvgpu_submit_append_gpfifo_user_direct(c, userdata,
268 num_entries); 283 num_entries);
269 if (err) 284 if (err) {
270 return err; 285 return err;
286 }
271 } else if (!kern_gpfifo) { 287 } else if (!kern_gpfifo) {
272 /* from userspace to vidmem, use the common path */ 288 /* from userspace to vidmem, use the common path */
273 err = g->os_channel.copy_user_gpfifo(c->gpfifo.pipe, userdata, 289 err = g->os_channel.copy_user_gpfifo(c->gpfifo.pipe, userdata,
274 0, num_entries); 290 0, num_entries);
275 if (err) 291 if (err) {
276 return err; 292 return err;
293 }
277 294
278 nvgpu_submit_append_gpfifo_common(c, c->gpfifo.pipe, 295 nvgpu_submit_append_gpfifo_common(c, c->gpfifo.pipe,
279 num_entries); 296 num_entries);
@@ -314,17 +331,21 @@ static int nvgpu_submit_channel_gpfifo(struct channel_gk20a *c,
314 bool need_job_tracking; 331 bool need_job_tracking;
315 bool need_deferred_cleanup = false; 332 bool need_deferred_cleanup = false;
316 333
317 if (nvgpu_is_enabled(g, NVGPU_DRIVER_IS_DYING)) 334 if (nvgpu_is_enabled(g, NVGPU_DRIVER_IS_DYING)) {
318 return -ENODEV; 335 return -ENODEV;
336 }
319 337
320 if (c->has_timedout) 338 if (c->has_timedout) {
321 return -ETIMEDOUT; 339 return -ETIMEDOUT;
340 }
322 341
323 if (!nvgpu_mem_is_valid(&c->gpfifo.mem)) 342 if (!nvgpu_mem_is_valid(&c->gpfifo.mem)) {
324 return -ENOMEM; 343 return -ENOMEM;
344 }
325 345
326 if (c->usermode_submit_enabled) 346 if (c->usermode_submit_enabled) {
327 return -EINVAL; 347 return -EINVAL;
348 }
328 349
329 /* fifo not large enough for request. Return error immediately. 350 /* fifo not large enough for request. Return error immediately.
330 * Kernel can insert gpfifo entries before and after user gpfifos. 351 * Kernel can insert gpfifo entries before and after user gpfifos.
@@ -337,8 +358,9 @@ static int nvgpu_submit_channel_gpfifo(struct channel_gk20a *c,
337 358
338 if ((flags & (NVGPU_SUBMIT_FLAGS_FENCE_WAIT | 359 if ((flags & (NVGPU_SUBMIT_FLAGS_FENCE_WAIT |
339 NVGPU_SUBMIT_FLAGS_FENCE_GET)) && 360 NVGPU_SUBMIT_FLAGS_FENCE_GET)) &&
340 !fence) 361 !fence) {
341 return -EINVAL; 362 return -EINVAL;
363 }
342 364
343 /* an address space needs to have been bound at this point. */ 365 /* an address space needs to have been bound at this point. */
344 if (!gk20a_channel_as_bound(c)) { 366 if (!gk20a_channel_as_bound(c)) {
@@ -381,8 +403,9 @@ static int nvgpu_submit_channel_gpfifo(struct channel_gk20a *c,
381 * job tracking is required, the channel must have 403 * job tracking is required, the channel must have
382 * pre-allocated resources. Otherwise, we fail the submit here 404 * pre-allocated resources. Otherwise, we fail the submit here
383 */ 405 */
384 if (c->deterministic && !channel_gk20a_is_prealloc_enabled(c)) 406 if (c->deterministic && !channel_gk20a_is_prealloc_enabled(c)) {
385 return -EINVAL; 407 return -EINVAL;
408 }
386 409
387 need_sync_framework = 410 need_sync_framework =
388 gk20a_channel_sync_needs_sync_framework(g) || 411 gk20a_channel_sync_needs_sync_framework(g) ||
@@ -415,8 +438,9 @@ static int nvgpu_submit_channel_gpfifo(struct channel_gk20a *c,
415 * For deterministic channels, we don't allow deferred clean_up 438 * For deterministic channels, we don't allow deferred clean_up
416 * processing to occur. In cases we hit this, we fail the submit 439 * processing to occur. In cases we hit this, we fail the submit
417 */ 440 */
418 if (c->deterministic && need_deferred_cleanup) 441 if (c->deterministic && need_deferred_cleanup) {
419 return -EINVAL; 442 return -EINVAL;
443 }
420 444
421 if (!c->deterministic) { 445 if (!c->deterministic) {
422 /* 446 /*
@@ -442,8 +466,9 @@ static int nvgpu_submit_channel_gpfifo(struct channel_gk20a *c,
442 466
443 467
444 /* Grab access to HW to deal with do_idle */ 468 /* Grab access to HW to deal with do_idle */
445 if (c->deterministic) 469 if (c->deterministic) {
446 nvgpu_rwsem_down_read(&g->deterministic_busy); 470 nvgpu_rwsem_down_read(&g->deterministic_busy);
471 }
447 472
448 if (c->deterministic && c->deterministic_railgate_allowed) { 473 if (c->deterministic && c->deterministic_railgate_allowed) {
449 /* 474 /*
@@ -485,48 +510,56 @@ static int nvgpu_submit_channel_gpfifo(struct channel_gk20a *c,
485 510
486 if (need_job_tracking) { 511 if (need_job_tracking) {
487 err = channel_gk20a_alloc_job(c, &job); 512 err = channel_gk20a_alloc_job(c, &job);
488 if (err) 513 if (err) {
489 goto clean_up; 514 goto clean_up;
515 }
490 516
491 err = nvgpu_submit_prepare_syncs(c, fence, job, 517 err = nvgpu_submit_prepare_syncs(c, fence, job,
492 &wait_cmd, &incr_cmd, 518 &wait_cmd, &incr_cmd,
493 &post_fence, 519 &post_fence,
494 need_deferred_cleanup, 520 need_deferred_cleanup,
495 flags); 521 flags);
496 if (err) 522 if (err) {
497 goto clean_up_job; 523 goto clean_up_job;
524 }
498 } 525 }
499 526
500 gk20a_fifo_profile_snapshot(profile, PROFILE_JOB_TRACKING); 527 gk20a_fifo_profile_snapshot(profile, PROFILE_JOB_TRACKING);
501 528
502 if (wait_cmd) 529 if (wait_cmd) {
503 nvgpu_submit_append_priv_cmdbuf(c, wait_cmd); 530 nvgpu_submit_append_priv_cmdbuf(c, wait_cmd);
531 }
504 532
505 err = nvgpu_submit_append_gpfifo(c, gpfifo, userdata, 533 err = nvgpu_submit_append_gpfifo(c, gpfifo, userdata,
506 num_entries); 534 num_entries);
507 if (err) 535 if (err) {
508 goto clean_up_job; 536 goto clean_up_job;
537 }
509 538
510 /* 539 /*
511 * And here's where we add the incr_cmd we generated earlier. It should 540 * And here's where we add the incr_cmd we generated earlier. It should
512 * always run! 541 * always run!
513 */ 542 */
514 if (incr_cmd) 543 if (incr_cmd) {
515 nvgpu_submit_append_priv_cmdbuf(c, incr_cmd); 544 nvgpu_submit_append_priv_cmdbuf(c, incr_cmd);
545 }
516 546
517 if (fence_out) 547 if (fence_out) {
518 *fence_out = gk20a_fence_get(post_fence); 548 *fence_out = gk20a_fence_get(post_fence);
549 }
519 550
520 if (need_job_tracking) 551 if (need_job_tracking) {
521 /* TODO! Check for errors... */ 552 /* TODO! Check for errors... */
522 gk20a_channel_add_job(c, job, skip_buffer_refcounting); 553 gk20a_channel_add_job(c, job, skip_buffer_refcounting);
554 }
523 gk20a_fifo_profile_snapshot(profile, PROFILE_APPEND); 555 gk20a_fifo_profile_snapshot(profile, PROFILE_APPEND);
524 556
525 g->ops.fifo.userd_gp_put(g, c); 557 g->ops.fifo.userd_gp_put(g, c);
526 558
527 /* No hw access beyond this point */ 559 /* No hw access beyond this point */
528 if (c->deterministic) 560 if (c->deterministic) {
529 nvgpu_rwsem_up_read(&g->deterministic_busy); 561 nvgpu_rwsem_up_read(&g->deterministic_busy);
562 }
530 563
531 trace_gk20a_channel_submitted_gpfifo(g->name, 564 trace_gk20a_channel_submitted_gpfifo(g->name,
532 c->chid, 565 c->chid,
@@ -548,10 +581,11 @@ clean_up_job:
548clean_up: 581clean_up:
549 nvgpu_log_fn(g, "fail"); 582 nvgpu_log_fn(g, "fail");
550 gk20a_fence_put(post_fence); 583 gk20a_fence_put(post_fence);
551 if (c->deterministic) 584 if (c->deterministic) {
552 nvgpu_rwsem_up_read(&g->deterministic_busy); 585 nvgpu_rwsem_up_read(&g->deterministic_busy);
553 else if (need_deferred_cleanup) 586 } else if (need_deferred_cleanup) {
554 gk20a_idle(g); 587 gk20a_idle(g);
588 }
555 589
556 return err; 590 return err;
557} 591}
diff --git a/drivers/gpu/nvgpu/common/ltc/ltc.c b/drivers/gpu/nvgpu/common/ltc/ltc.c
index 1beb1974..3d85db3f 100644
--- a/drivers/gpu/nvgpu/common/ltc/ltc.c
+++ b/drivers/gpu/nvgpu/common/ltc/ltc.c
@@ -34,16 +34,18 @@ int nvgpu_init_ltc_support(struct gk20a *g)
34 g->mm.ltc_enabled_current = true; 34 g->mm.ltc_enabled_current = true;
35 g->mm.ltc_enabled_target = true; 35 g->mm.ltc_enabled_target = true;
36 36
37 if (g->ops.ltc.init_fs_state) 37 if (g->ops.ltc.init_fs_state) {
38 g->ops.ltc.init_fs_state(g); 38 g->ops.ltc.init_fs_state(g);
39 }
39 40
40 return 0; 41 return 0;
41} 42}
42 43
43void nvgpu_ltc_sync_enabled(struct gk20a *g) 44void nvgpu_ltc_sync_enabled(struct gk20a *g)
44{ 45{
45 if (!g->ops.ltc.set_enabled) 46 if (!g->ops.ltc.set_enabled) {
46 return; 47 return;
48 }
47 49
48 nvgpu_spinlock_acquire(&g->ltc_enabled_lock); 50 nvgpu_spinlock_acquire(&g->ltc_enabled_lock);
49 if (g->mm.ltc_enabled_current != g->mm.ltc_enabled_target) { 51 if (g->mm.ltc_enabled_current != g->mm.ltc_enabled_target) {
diff --git a/drivers/gpu/nvgpu/common/ltc/ltc_gv11b.c b/drivers/gpu/nvgpu/common/ltc/ltc_gv11b.c
index 98306079..c5bf40c1 100644
--- a/drivers/gpu/nvgpu/common/ltc/ltc_gv11b.c
+++ b/drivers/gpu/nvgpu/common/ltc/ltc_gv11b.c
@@ -75,9 +75,10 @@ void gv11b_ltc_init_fs_state(struct gk20a *g)
75 reg &= ~ltc_ltcs_ltss_intr_en_illegal_compstat_access_m(); 75 reg &= ~ltc_ltcs_ltss_intr_en_illegal_compstat_access_m();
76 nvgpu_writel_check(g, ltc_ltcs_ltss_intr_r(), reg); 76 nvgpu_writel_check(g, ltc_ltcs_ltss_intr_r(), reg);
77 77
78 if (g->ops.ltc.intr_en_illegal_compstat) 78 if (g->ops.ltc.intr_en_illegal_compstat) {
79 g->ops.ltc.intr_en_illegal_compstat(g, 79 g->ops.ltc.intr_en_illegal_compstat(g,
80 g->ltc_intr_en_illegal_compstat); 80 g->ltc_intr_en_illegal_compstat);
81 }
81 82
82 /* Enable ECC interrupts */ 83 /* Enable ECC interrupts */
83 ltc_intr = gk20a_readl(g, ltc_ltcs_ltss_intr_r()); 84 ltc_intr = gk20a_readl(g, ltc_ltcs_ltss_intr_r());
@@ -93,14 +94,15 @@ void gv11b_ltc_intr_en_illegal_compstat(struct gk20a *g, bool enable)
93 94
94 /* disble/enble illegal_compstat interrupt */ 95 /* disble/enble illegal_compstat interrupt */
95 val = gk20a_readl(g, ltc_ltcs_ltss_intr_r()); 96 val = gk20a_readl(g, ltc_ltcs_ltss_intr_r());
96 if (enable) 97 if (enable) {
97 val = set_field(val, 98 val = set_field(val,
98 ltc_ltcs_ltss_intr_en_illegal_compstat_m(), 99 ltc_ltcs_ltss_intr_en_illegal_compstat_m(),
99 ltc_ltcs_ltss_intr_en_illegal_compstat_enabled_f()); 100 ltc_ltcs_ltss_intr_en_illegal_compstat_enabled_f());
100 else 101 } else {
101 val = set_field(val, 102 val = set_field(val,
102 ltc_ltcs_ltss_intr_en_illegal_compstat_m(), 103 ltc_ltcs_ltss_intr_en_illegal_compstat_m(),
103 ltc_ltcs_ltss_intr_en_illegal_compstat_disabled_f()); 104 ltc_ltcs_ltss_intr_en_illegal_compstat_disabled_f());
105 }
104 gk20a_writel(g, ltc_ltcs_ltss_intr_r(), val); 106 gk20a_writel(g, ltc_ltcs_ltss_intr_r(), val);
105} 107}
106 108
@@ -117,8 +119,9 @@ void gv11b_ltc_isr(struct gk20a *g)
117 119
118 mc_intr = gk20a_readl(g, mc_intr_ltc_r()); 120 mc_intr = gk20a_readl(g, mc_intr_ltc_r());
119 for (ltc = 0; ltc < g->ltc_count; ltc++) { 121 for (ltc = 0; ltc < g->ltc_count; ltc++) {
120 if ((mc_intr & 1U << ltc) == 0) 122 if ((mc_intr & 1U << ltc) == 0) {
121 continue; 123 continue;
124 }
122 125
123 for (slice = 0; slice < g->gr.slices_per_ltc; slice++) { 126 for (slice = 0; slice < g->gr.slices_per_ltc; slice++) {
124 u32 offset = ltc_stride * ltc + lts_stride * slice; 127 u32 offset = ltc_stride * ltc + lts_stride * slice;
@@ -167,31 +170,40 @@ void gv11b_ltc_isr(struct gk20a *g)
167 ltc_ltc0_lts0_l2_cache_ecc_status_reset_task_f()); 170 ltc_ltc0_lts0_l2_cache_ecc_status_reset_task_f());
168 171
169 /* update counters per slice */ 172 /* update counters per slice */
170 if (corrected_overflow) 173 if (corrected_overflow) {
171 corrected_delta += (0x1U << ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_total_s()); 174 corrected_delta += (0x1U << ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_total_s());
172 if (uncorrected_overflow) 175 }
176 if (uncorrected_overflow) {
173 uncorrected_delta += (0x1U << ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_total_s()); 177 uncorrected_delta += (0x1U << ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_total_s());
178 }
174 179
175 g->ecc.ltc.ecc_sec_count[ltc][slice].counter += corrected_delta; 180 g->ecc.ltc.ecc_sec_count[ltc][slice].counter += corrected_delta;
176 g->ecc.ltc.ecc_ded_count[ltc][slice].counter += uncorrected_delta; 181 g->ecc.ltc.ecc_ded_count[ltc][slice].counter += uncorrected_delta;
177 nvgpu_log(g, gpu_dbg_intr, 182 nvgpu_log(g, gpu_dbg_intr,
178 "ltc:%d lts: %d cache ecc interrupt intr: 0x%x", ltc, slice, ltc_intr3); 183 "ltc:%d lts: %d cache ecc interrupt intr: 0x%x", ltc, slice, ltc_intr3);
179 184
180 if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_rstg_m()) 185 if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_rstg_m()) {
181 nvgpu_log(g, gpu_dbg_intr, "rstg ecc error corrected"); 186 nvgpu_log(g, gpu_dbg_intr, "rstg ecc error corrected");
182 if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_rstg_m()) 187 }
188 if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_rstg_m()) {
183 nvgpu_log(g, gpu_dbg_intr, "rstg ecc error uncorrected"); 189 nvgpu_log(g, gpu_dbg_intr, "rstg ecc error uncorrected");
184 if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_tstg_m()) 190 }
191 if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_tstg_m()) {
185 nvgpu_log(g, gpu_dbg_intr, "tstg ecc error corrected"); 192 nvgpu_log(g, gpu_dbg_intr, "tstg ecc error corrected");
186 if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_tstg_m()) 193 }
194 if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_tstg_m()) {
187 nvgpu_log(g, gpu_dbg_intr, "tstg ecc error uncorrected"); 195 nvgpu_log(g, gpu_dbg_intr, "tstg ecc error uncorrected");
188 if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_dstg_m()) 196 }
197 if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_dstg_m()) {
189 nvgpu_log(g, gpu_dbg_intr, "dstg ecc error corrected"); 198 nvgpu_log(g, gpu_dbg_intr, "dstg ecc error corrected");
190 if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_dstg_m()) 199 }
200 if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_dstg_m()) {
191 nvgpu_log(g, gpu_dbg_intr, "dstg ecc error uncorrected"); 201 nvgpu_log(g, gpu_dbg_intr, "dstg ecc error uncorrected");
202 }
192 203
193 if (corrected_overflow || uncorrected_overflow) 204 if (corrected_overflow || uncorrected_overflow) {
194 nvgpu_info(g, "ecc counter overflow!"); 205 nvgpu_info(g, "ecc counter overflow!");
206 }
195 207
196 nvgpu_log(g, gpu_dbg_intr, 208 nvgpu_log(g, gpu_dbg_intr,
197 "ecc error address: 0x%x", ecc_addr); 209 "ecc error address: 0x%x", ecc_addr);
diff --git a/drivers/gpu/nvgpu/common/pramin.c b/drivers/gpu/nvgpu/common/pramin.c
index ba6a92ba..1448fed1 100644
--- a/drivers/gpu/nvgpu/common/pramin.c
+++ b/drivers/gpu/nvgpu/common/pramin.c
@@ -53,17 +53,19 @@ static void nvgpu_pramin_access_batched(struct gk20a *g, struct nvgpu_mem *mem,
53 * driver should be refactored to prevent this from happening, but for 53 * driver should be refactored to prevent this from happening, but for
54 * now it is ok just to ignore the writes 54 * now it is ok just to ignore the writes
55 */ 55 */
56 if (!gk20a_io_exists(g) && nvgpu_is_enabled(g, NVGPU_DRIVER_IS_DYING)) 56 if (!gk20a_io_exists(g) && nvgpu_is_enabled(g, NVGPU_DRIVER_IS_DYING)) {
57 return; 57 return;
58 }
58 59
59 alloc = mem->vidmem_alloc; 60 alloc = mem->vidmem_alloc;
60 sgt = &alloc->sgt; 61 sgt = &alloc->sgt;
61 62
62 nvgpu_sgt_for_each_sgl(sgl, sgt) { 63 nvgpu_sgt_for_each_sgl(sgl, sgt) {
63 if (offset >= nvgpu_sgt_get_length(sgt, sgl)) 64 if (offset >= nvgpu_sgt_get_length(sgt, sgl)) {
64 offset -= nvgpu_sgt_get_length(sgt, sgl); 65 offset -= nvgpu_sgt_get_length(sgt, sgl);
65 else 66 } else {
66 break; 67 break;
68 }
67 } 69 }
68 70
69 while (size) { 71 while (size) {
diff --git a/drivers/gpu/nvgpu/common/ptimer/ptimer.c b/drivers/gpu/nvgpu/common/ptimer/ptimer.c
index d5f9470d..3f3a5f9b 100644
--- a/drivers/gpu/nvgpu/common/ptimer/ptimer.c
+++ b/drivers/gpu/nvgpu/common/ptimer/ptimer.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -39,8 +39,9 @@ int nvgpu_get_timestamps_zipper(struct gk20a *g,
39 39
40 for (i = 0; i < count; i++) { 40 for (i = 0; i < count; i++) {
41 err = g->ops.ptimer.read_ptimer(g, &samples[i].gpu_timestamp); 41 err = g->ops.ptimer.read_ptimer(g, &samples[i].gpu_timestamp);
42 if (err) 42 if (err) {
43 return err; 43 return err;
44 }
44 45
45 samples[i].cpu_timestamp = nvgpu_hr_timestamp(); 46 samples[i].cpu_timestamp = nvgpu_hr_timestamp();
46 } 47 }
diff --git a/drivers/gpu/nvgpu/common/ptimer/ptimer_gk20a.c b/drivers/gpu/nvgpu/common/ptimer/ptimer_gk20a.c
index 0b3188ee..a9c971df 100644
--- a/drivers/gpu/nvgpu/common/ptimer/ptimer_gk20a.c
+++ b/drivers/gpu/nvgpu/common/ptimer/ptimer_gk20a.c
@@ -54,9 +54,10 @@ void gk20a_ptimer_isr(struct gk20a *g)
54 54
55 if (fecs_errcode) { 55 if (fecs_errcode) {
56 nvgpu_err(g, "FECS_ERRCODE 0x%08x", fecs_errcode); 56 nvgpu_err(g, "FECS_ERRCODE 0x%08x", fecs_errcode);
57 if (g->ops.priv_ring.decode_error_code) 57 if (g->ops.priv_ring.decode_error_code) {
58 g->ops.priv_ring.decode_error_code(g, 58 g->ops.priv_ring.decode_error_code(g,
59 fecs_errcode); 59 fecs_errcode);
60 }
60 } 61 }
61} 62}
62 63
@@ -66,8 +67,9 @@ int gk20a_read_ptimer(struct gk20a *g, u64 *value)
66 unsigned int i = 0; 67 unsigned int i = 0;
67 u32 gpu_timestamp_hi_prev = 0; 68 u32 gpu_timestamp_hi_prev = 0;
68 69
69 if (!value) 70 if (!value) {
70 return -EINVAL; 71 return -EINVAL;
72 }
71 73
72 /* Note. The GPU nanosecond timer consists of two 32-bit 74 /* Note. The GPU nanosecond timer consists of two 32-bit
73 * registers (high & low). To detect a possible low register 75 * registers (high & low). To detect a possible low register
diff --git a/drivers/gpu/nvgpu/common/rbtree.c b/drivers/gpu/nvgpu/common/rbtree.c
index 86bab688..a0e97ee9 100644
--- a/drivers/gpu/nvgpu/common/rbtree.c
+++ b/drivers/gpu/nvgpu/common/rbtree.c
@@ -32,16 +32,18 @@ static void rotate_left(struct nvgpu_rbtree_node **root,
32 32
33 /* establish x->right link */ 33 /* establish x->right link */
34 x->right = y->left; 34 x->right = y->left;
35 if (y->left) 35 if (y->left) {
36 y->left->parent = x; 36 y->left->parent = x;
37 }
37 38
38 /* establish y->parent link */ 39 /* establish y->parent link */
39 y->parent = x->parent; 40 y->parent = x->parent;
40 if (x->parent) { 41 if (x->parent) {
41 if (x == x->parent->left) 42 if (x == x->parent->left) {
42 x->parent->left = y; 43 x->parent->left = y;
43 else 44 } else {
44 x->parent->right = y; 45 x->parent->right = y;
46 }
45 } else { 47 } else {
46 *root = y; 48 *root = y;
47 } 49 }
@@ -61,16 +63,18 @@ static void rotate_right(struct nvgpu_rbtree_node **root,
61 63
62 /* establish x->left link */ 64 /* establish x->left link */
63 x->left = y->right; 65 x->left = y->right;
64 if (y->right) 66 if (y->right) {
65 y->right->parent = x; 67 y->right->parent = x;
68 }
66 69
67 /* establish y->parent link */ 70 /* establish y->parent link */
68 y->parent = x->parent; 71 y->parent = x->parent;
69 if (x->parent) { 72 if (x->parent) {
70 if (x == x->parent->right) 73 if (x == x->parent->right) {
71 x->parent->right = y; 74 x->parent->right = y;
72 else 75 } else {
73 x->parent->left = y; 76 x->parent->left = y;
77 }
74 } else { 78 } else {
75 *root = y; 79 *root = y;
76 } 80 }
@@ -149,12 +153,13 @@ void nvgpu_rbtree_insert(struct nvgpu_rbtree_node *new_node,
149 153
150 while (curr) { 154 while (curr) {
151 parent = curr; 155 parent = curr;
152 if (new_node->key_start < curr->key_start) 156 if (new_node->key_start < curr->key_start) {
153 curr = curr->left; 157 curr = curr->left;
154 else if (new_node->key_start > curr->key_start) 158 } else if (new_node->key_start > curr->key_start) {
155 curr = curr->right; 159 curr = curr->right;
156 else 160 } else {
157 return; /* duplicate entry */ 161 return; /* duplicate entry */
162 }
158 } 163 }
159 164
160 /* the caller allocated the node already, just fix the links */ 165 /* the caller allocated the node already, just fix the links */
@@ -165,10 +170,11 @@ void nvgpu_rbtree_insert(struct nvgpu_rbtree_node *new_node,
165 170
166 /* insert node in tree */ 171 /* insert node in tree */
167 if (parent) { 172 if (parent) {
168 if (new_node->key_start < parent->key_start) 173 if (new_node->key_start < parent->key_start) {
169 parent->left = new_node; 174 parent->left = new_node;
170 else 175 } else {
171 parent->right = new_node; 176 parent->right = new_node;
177 }
172 } else { 178 } else {
173 *root = new_node; 179 *root = new_node;
174 } 180 }
@@ -203,8 +209,9 @@ static void _delete_fixup(struct nvgpu_rbtree_node **root,
203 209
204 if (!w || ((!w->left || !w->left->is_red) 210 if (!w || ((!w->left || !w->left->is_red)
205 && (!w->right || !w->right->is_red))) { 211 && (!w->right || !w->right->is_red))) {
206 if (w) 212 if (w) {
207 w->is_red = true; 213 w->is_red = true;
214 }
208 x = parent_of_x; 215 x = parent_of_x;
209 } else { 216 } else {
210 if (!w->right || !w->right->is_red) { 217 if (!w->right || !w->right->is_red) {
@@ -231,8 +238,9 @@ static void _delete_fixup(struct nvgpu_rbtree_node **root,
231 238
232 if (!w || ((!w->right || !w->right->is_red) 239 if (!w || ((!w->right || !w->right->is_red)
233 && (!w->left || !w->left->is_red))) { 240 && (!w->left || !w->left->is_red))) {
234 if (w) 241 if (w) {
235 w->is_red = true; 242 w->is_red = true;
243 }
236 x = parent_of_x; 244 x = parent_of_x;
237 } else { 245 } else {
238 if (!w->left || !w->left->is_red) { 246 if (!w->left || !w->left->is_red) {
@@ -251,8 +259,9 @@ static void _delete_fixup(struct nvgpu_rbtree_node **root,
251 parent_of_x = x->parent; 259 parent_of_x = x->parent;
252 } 260 }
253 261
254 if (x) 262 if (x) {
255 x->is_red = false; 263 x->is_red = false;
264 }
256} 265}
257 266
258void nvgpu_rbtree_unlink(struct nvgpu_rbtree_node *node, 267void nvgpu_rbtree_unlink(struct nvgpu_rbtree_node *node,
@@ -279,21 +288,24 @@ void nvgpu_rbtree_unlink(struct nvgpu_rbtree_node *node,
279 } 288 }
280 289
281 /* x is y's only child */ 290 /* x is y's only child */
282 if (y->left) 291 if (y->left) {
283 x = y->left; 292 x = y->left;
284 else 293 } else {
285 x = y->right; 294 x = y->right;
295 }
286 296
287 /* remove y from the parent chain */ 297 /* remove y from the parent chain */
288 parent_of_x = y->parent; 298 parent_of_x = y->parent;
289 if (x) 299 if (x) {
290 x->parent = parent_of_x; 300 x->parent = parent_of_x;
301 }
291 302
292 if (y->parent) { 303 if (y->parent) {
293 if (y == y->parent->left) 304 if (y == y->parent->left) {
294 y->parent->left = x; 305 y->parent->left = x;
295 else 306 } else {
296 y->parent->right = x; 307 y->parent->right = x;
308 }
297 } else { 309 } else {
298 *root = x; 310 *root = x;
299 } 311 }
@@ -305,10 +317,11 @@ void nvgpu_rbtree_unlink(struct nvgpu_rbtree_node *node,
305 */ 317 */
306 y->parent = z->parent; 318 y->parent = z->parent;
307 if (z->parent) { 319 if (z->parent) {
308 if (z == z->parent->left) 320 if (z == z->parent->left) {
309 z->parent->left = y; 321 z->parent->left = y;
310 else 322 } else {
311 z->parent->right = y; 323 z->parent->right = y;
324 }
312 } else { 325 } else {
313 *root = y; 326 *root = y;
314 } 327 }
@@ -316,19 +329,23 @@ void nvgpu_rbtree_unlink(struct nvgpu_rbtree_node *node,
316 y->is_red = z->is_red; 329 y->is_red = z->is_red;
317 330
318 y->left = z->left; 331 y->left = z->left;
319 if (z->left) 332 if (z->left) {
320 z->left->parent = y; 333 z->left->parent = y;
334 }
321 335
322 y->right = z->right; 336 y->right = z->right;
323 if (z->right) 337 if (z->right) {
324 z->right->parent = y; 338 z->right->parent = y;
339 }
325 340
326 if (parent_of_x == z) 341 if (parent_of_x == z) {
327 parent_of_x = y; 342 parent_of_x = y;
343 }
328 } 344 }
329 345
330 if (y_was_black) 346 if (y_was_black) {
331 _delete_fixup(root, parent_of_x, x); 347 _delete_fixup(root, parent_of_x, x);
348 }
332} 349}
333 350
334void nvgpu_rbtree_search(u64 key_start, struct nvgpu_rbtree_node **node, 351void nvgpu_rbtree_search(u64 key_start, struct nvgpu_rbtree_node **node,
@@ -427,8 +444,9 @@ void nvgpu_rbtree_enum_next(struct nvgpu_rbtree_node **node,
427 } else { 444 } else {
428 /* go up until we find the right inorder node */ 445 /* go up until we find the right inorder node */
429 for (curr = curr->parent; curr; curr = curr->parent) { 446 for (curr = curr->parent; curr; curr = curr->parent) {
430 if (curr->key_start > (*node)->key_start) 447 if (curr->key_start > (*node)->key_start) {
431 break; 448 break;
449 }
432 } 450 }
433 } 451 }
434 } 452 }
diff --git a/drivers/gpu/nvgpu/common/semaphore.c b/drivers/gpu/nvgpu/common/semaphore.c
index 65aeb9eb..25bd3be3 100644
--- a/drivers/gpu/nvgpu/common/semaphore.c
+++ b/drivers/gpu/nvgpu/common/semaphore.c
@@ -66,8 +66,9 @@ static int __nvgpu_semaphore_sea_grow(struct nvgpu_semaphore_sea *sea)
66 ret = nvgpu_dma_alloc_sys(gk20a, 66 ret = nvgpu_dma_alloc_sys(gk20a,
67 PAGE_SIZE * SEMAPHORE_POOL_COUNT, 67 PAGE_SIZE * SEMAPHORE_POOL_COUNT,
68 &sea->sea_mem); 68 &sea->sea_mem);
69 if (ret) 69 if (ret) {
70 goto out; 70 goto out;
71 }
71 72
72 sea->size = SEMAPHORE_POOL_COUNT; 73 sea->size = SEMAPHORE_POOL_COUNT;
73 sea->map_size = SEMAPHORE_POOL_COUNT * PAGE_SIZE; 74 sea->map_size = SEMAPHORE_POOL_COUNT * PAGE_SIZE;
@@ -88,8 +89,9 @@ out:
88 89
89void nvgpu_semaphore_sea_destroy(struct gk20a *g) 90void nvgpu_semaphore_sea_destroy(struct gk20a *g)
90{ 91{
91 if (!g->sema_sea) 92 if (!g->sema_sea) {
92 return; 93 return;
94 }
93 95
94 nvgpu_dma_free(g, &g->sema_sea->sea_mem); 96 nvgpu_dma_free(g, &g->sema_sea->sea_mem);
95 nvgpu_mutex_destroy(&g->sema_sea->sea_lock); 97 nvgpu_mutex_destroy(&g->sema_sea->sea_lock);
@@ -103,22 +105,26 @@ void nvgpu_semaphore_sea_destroy(struct gk20a *g)
103 */ 105 */
104struct nvgpu_semaphore_sea *nvgpu_semaphore_sea_create(struct gk20a *g) 106struct nvgpu_semaphore_sea *nvgpu_semaphore_sea_create(struct gk20a *g)
105{ 107{
106 if (g->sema_sea) 108 if (g->sema_sea) {
107 return g->sema_sea; 109 return g->sema_sea;
110 }
108 111
109 g->sema_sea = nvgpu_kzalloc(g, sizeof(*g->sema_sea)); 112 g->sema_sea = nvgpu_kzalloc(g, sizeof(*g->sema_sea));
110 if (!g->sema_sea) 113 if (!g->sema_sea) {
111 return NULL; 114 return NULL;
115 }
112 116
113 g->sema_sea->size = 0; 117 g->sema_sea->size = 0;
114 g->sema_sea->page_count = 0; 118 g->sema_sea->page_count = 0;
115 g->sema_sea->gk20a = g; 119 g->sema_sea->gk20a = g;
116 nvgpu_init_list_node(&g->sema_sea->pool_list); 120 nvgpu_init_list_node(&g->sema_sea->pool_list);
117 if (nvgpu_mutex_init(&g->sema_sea->sea_lock)) 121 if (nvgpu_mutex_init(&g->sema_sea->sea_lock)) {
118 goto cleanup_free; 122 goto cleanup_free;
123 }
119 124
120 if (__nvgpu_semaphore_sea_grow(g->sema_sea)) 125 if (__nvgpu_semaphore_sea_grow(g->sema_sea)) {
121 goto cleanup_destroy; 126 goto cleanup_destroy;
127 }
122 128
123 gpu_sema_dbg(g, "Created semaphore sea!"); 129 gpu_sema_dbg(g, "Created semaphore sea!");
124 return g->sema_sea; 130 return g->sema_sea;
@@ -136,8 +142,9 @@ static int __semaphore_bitmap_alloc(unsigned long *bitmap, unsigned long len)
136{ 142{
137 unsigned long idx = find_first_zero_bit(bitmap, len); 143 unsigned long idx = find_first_zero_bit(bitmap, len);
138 144
139 if (idx == len) 145 if (idx == len) {
140 return -ENOSPC; 146 return -ENOSPC;
147 }
141 148
142 set_bit(idx, bitmap); 149 set_bit(idx, bitmap);
143 150
@@ -155,19 +162,22 @@ int nvgpu_semaphore_pool_alloc(struct nvgpu_semaphore_sea *sea,
155 int ret; 162 int ret;
156 163
157 p = nvgpu_kzalloc(sea->gk20a, sizeof(*p)); 164 p = nvgpu_kzalloc(sea->gk20a, sizeof(*p));
158 if (!p) 165 if (!p) {
159 return -ENOMEM; 166 return -ENOMEM;
167 }
160 168
161 __lock_sema_sea(sea); 169 __lock_sema_sea(sea);
162 170
163 ret = nvgpu_mutex_init(&p->pool_lock); 171 ret = nvgpu_mutex_init(&p->pool_lock);
164 if (ret) 172 if (ret) {
165 goto fail; 173 goto fail;
174 }
166 175
167 ret = __semaphore_bitmap_alloc(sea->pools_alloced, 176 ret = __semaphore_bitmap_alloc(sea->pools_alloced,
168 SEMAPHORE_POOL_COUNT); 177 SEMAPHORE_POOL_COUNT);
169 if (ret < 0) 178 if (ret < 0) {
170 goto fail_alloc; 179 goto fail_alloc;
180 }
171 181
172 page_idx = (unsigned long)ret; 182 page_idx = (unsigned long)ret;
173 183
@@ -205,8 +215,9 @@ int nvgpu_semaphore_pool_map(struct nvgpu_semaphore_pool *p,
205 int err = 0; 215 int err = 0;
206 u64 addr; 216 u64 addr;
207 217
208 if (p->mapped) 218 if (p->mapped) {
209 return -EBUSY; 219 return -EBUSY;
220 }
210 221
211 gpu_sema_dbg(pool_to_gk20a(p), 222 gpu_sema_dbg(pool_to_gk20a(p),
212 "Mapping semaphore pool! (idx=%d)", p->page_idx); 223 "Mapping semaphore pool! (idx=%d)", p->page_idx);
@@ -242,8 +253,9 @@ int nvgpu_semaphore_pool_map(struct nvgpu_semaphore_pool *p,
242 err = nvgpu_mem_create_from_mem(vm->mm->g, 253 err = nvgpu_mem_create_from_mem(vm->mm->g,
243 &p->rw_mem, &p->sema_sea->sea_mem, 254 &p->rw_mem, &p->sema_sea->sea_mem,
244 p->page_idx, 1); 255 p->page_idx, 1);
245 if (err) 256 if (err) {
246 goto fail_unmap; 257 goto fail_unmap;
258 }
247 259
248 addr = nvgpu_gmmu_map(vm, &p->rw_mem, SZ_4K, 0, 260 addr = nvgpu_gmmu_map(vm, &p->rw_mem, SZ_4K, 0,
249 gk20a_mem_flag_none, 0, 261 gk20a_mem_flag_none, 0,
@@ -342,8 +354,9 @@ void nvgpu_semaphore_pool_put(struct nvgpu_semaphore_pool *p)
342 */ 354 */
343u64 __nvgpu_semaphore_pool_gpu_va(struct nvgpu_semaphore_pool *p, bool global) 355u64 __nvgpu_semaphore_pool_gpu_va(struct nvgpu_semaphore_pool *p, bool global)
344{ 356{
345 if (!global) 357 if (!global) {
346 return p->gpu_va; 358 return p->gpu_va;
359 }
347 360
348 return p->gpu_va_ro + (PAGE_SIZE * p->page_idx); 361 return p->gpu_va_ro + (PAGE_SIZE * p->page_idx);
349} 362}
@@ -427,13 +440,15 @@ struct nvgpu_semaphore *nvgpu_semaphore_alloc(struct channel_gk20a *ch)
427 440
428 if (!ch->hw_sema) { 441 if (!ch->hw_sema) {
429 ret = __nvgpu_init_hw_sema(ch); 442 ret = __nvgpu_init_hw_sema(ch);
430 if (ret) 443 if (ret) {
431 return NULL; 444 return NULL;
445 }
432 } 446 }
433 447
434 s = nvgpu_kzalloc(ch->g, sizeof(*s)); 448 s = nvgpu_kzalloc(ch->g, sizeof(*s));
435 if (!s) 449 if (!s) {
436 return NULL; 450 return NULL;
451 }
437 452
438 nvgpu_ref_init(&s->ref); 453 nvgpu_ref_init(&s->ref);
439 s->g = ch->g; 454 s->g = ch->g;
diff --git a/drivers/gpu/nvgpu/common/therm/therm_gv11b.c b/drivers/gpu/nvgpu/common/therm/therm_gv11b.c
index 77edd7e1..419dd75e 100644
--- a/drivers/gpu/nvgpu/common/therm/therm_gv11b.c
+++ b/drivers/gpu/nvgpu/common/therm/therm_gv11b.c
@@ -143,8 +143,9 @@ int gv11b_elcg_init_idle_filters(struct gk20a *g)
143 u32 active_engine_id = 0; 143 u32 active_engine_id = 0;
144 struct fifo_gk20a *f = &g->fifo; 144 struct fifo_gk20a *f = &g->fifo;
145 145
146 if (nvgpu_platform_is_simulation(g)) 146 if (nvgpu_platform_is_simulation(g)) {
147 return 0; 147 return 0;
148 }
148 149
149 nvgpu_log_info(g, "init clock/power gate reg"); 150 nvgpu_log_info(g, "init clock/power gate reg");
150 151
diff --git a/drivers/gpu/nvgpu/common/vbios/bios.c b/drivers/gpu/nvgpu/common/vbios/bios.c
index 12c0eded..0760a6cd 100644
--- a/drivers/gpu/nvgpu/common/vbios/bios.c
+++ b/drivers/gpu/nvgpu/common/vbios/bios.c
@@ -352,10 +352,11 @@ int nvgpu_bios_parse_rom(struct gk20a *g)
352 } 352 }
353 } 353 }
354 354
355 if (!found) 355 if (!found) {
356 return -EINVAL; 356 return -EINVAL;
357 else 357 } else {
358 return 0; 358 return 0;
359 }
359} 360}
360 361
361static void nvgpu_bios_parse_biosdata(struct gk20a *g, int offset) 362static void nvgpu_bios_parse_biosdata(struct gk20a *g, int offset)
@@ -393,8 +394,9 @@ u32 nvgpu_bios_get_nvlink_config_data(struct gk20a *g)
393{ 394{
394 struct nvlink_config_data_hdr_v1 config; 395 struct nvlink_config_data_hdr_v1 config;
395 396
396 if (g->bios.nvlink_config_data_offset == 0) 397 if (g->bios.nvlink_config_data_offset == 0) {
397 return -EINVAL; 398 return -EINVAL;
399 }
398 400
399 memcpy(&config, &g->bios.data[g->bios.nvlink_config_data_offset], 401 memcpy(&config, &g->bios.data[g->bios.nvlink_config_data_offset],
400 sizeof(config)); 402 sizeof(config));
@@ -458,8 +460,9 @@ static void nvgpu_bios_parse_devinit_appinfo(struct gk20a *g, int dmem_offset)
458 interface.script_phys_base, 460 interface.script_phys_base,
459 interface.script_size); 461 interface.script_size);
460 462
461 if (interface.version != 1) 463 if (interface.version != 1) {
462 return; 464 return;
465 }
463 g->bios.devinit_tables_phys_base = interface.tables_phys_base; 466 g->bios.devinit_tables_phys_base = interface.tables_phys_base;
464 g->bios.devinit_script_phys_base = interface.script_phys_base; 467 g->bios.devinit_script_phys_base = interface.script_phys_base;
465} 468}
@@ -475,8 +478,9 @@ static int nvgpu_bios_parse_appinfo_table(struct gk20a *g, int offset)
475 hdr.version, hdr.header_size, 478 hdr.version, hdr.header_size,
476 hdr.entry_size, hdr.entry_count); 479 hdr.entry_size, hdr.entry_count);
477 480
478 if (hdr.version != 1) 481 if (hdr.version != 1) {
479 return 0; 482 return 0;
483 }
480 484
481 offset += sizeof(hdr); 485 offset += sizeof(hdr);
482 for (i = 0; i < hdr.entry_count; i++) { 486 for (i = 0; i < hdr.entry_count; i++) {
@@ -487,8 +491,9 @@ static int nvgpu_bios_parse_appinfo_table(struct gk20a *g, int offset)
487 nvgpu_log_fn(g, "appInfo id %d dmem_offset %d", 491 nvgpu_log_fn(g, "appInfo id %d dmem_offset %d",
488 entry.id, entry.dmem_offset); 492 entry.id, entry.dmem_offset);
489 493
490 if (entry.id == APPINFO_ID_DEVINIT) 494 if (entry.id == APPINFO_ID_DEVINIT) {
491 nvgpu_bios_parse_devinit_appinfo(g, entry.dmem_offset); 495 nvgpu_bios_parse_devinit_appinfo(g, entry.dmem_offset);
496 }
492 497
493 offset += hdr.entry_size; 498 offset += hdr.entry_size;
494 } 499 }
@@ -583,8 +588,9 @@ static int nvgpu_bios_parse_falcon_ucode_table(struct gk20a *g, int offset)
583 hdr.entry_size, hdr.entry_count, 588 hdr.entry_size, hdr.entry_count,
584 hdr.desc_version, hdr.desc_size); 589 hdr.desc_version, hdr.desc_size);
585 590
586 if (hdr.version != 1) 591 if (hdr.version != 1) {
587 return -EINVAL; 592 return -EINVAL;
593 }
588 594
589 offset += hdr.header_size; 595 offset += hdr.header_size;
590 596
@@ -603,30 +609,34 @@ static int nvgpu_bios_parse_falcon_ucode_table(struct gk20a *g, int offset)
603 609
604 err = nvgpu_bios_parse_falcon_ucode_desc(g, 610 err = nvgpu_bios_parse_falcon_ucode_desc(g,
605 &g->bios.devinit, entry.desc_ptr); 611 &g->bios.devinit, entry.desc_ptr);
606 if (err) 612 if (err) {
607 err = nvgpu_bios_parse_falcon_ucode_desc(g, 613 err = nvgpu_bios_parse_falcon_ucode_desc(g,
608 &g->bios.devinit, 614 &g->bios.devinit,
609 entry.desc_ptr + 615 entry.desc_ptr +
610 g->bios.expansion_rom_offset); 616 g->bios.expansion_rom_offset);
617 }
611 618
612 if (err) 619 if (err) {
613 nvgpu_err(g, 620 nvgpu_err(g,
614 "could not parse devinit ucode desc"); 621 "could not parse devinit ucode desc");
622 }
615 } else if (entry.target_id == TARGET_ID_PMU && 623 } else if (entry.target_id == TARGET_ID_PMU &&
616 entry.application_id == APPLICATION_ID_PRE_OS) { 624 entry.application_id == APPLICATION_ID_PRE_OS) {
617 int err; 625 int err;
618 626
619 err = nvgpu_bios_parse_falcon_ucode_desc(g, 627 err = nvgpu_bios_parse_falcon_ucode_desc(g,
620 &g->bios.preos, entry.desc_ptr); 628 &g->bios.preos, entry.desc_ptr);
621 if (err) 629 if (err) {
622 err = nvgpu_bios_parse_falcon_ucode_desc(g, 630 err = nvgpu_bios_parse_falcon_ucode_desc(g,
623 &g->bios.preos, 631 &g->bios.preos,
624 entry.desc_ptr + 632 entry.desc_ptr +
625 g->bios.expansion_rom_offset); 633 g->bios.expansion_rom_offset);
634 }
626 635
627 if (err) 636 if (err) {
628 nvgpu_err(g, 637 nvgpu_err(g,
629 "could not parse preos ucode desc"); 638 "could not parse preos ucode desc");
639 }
630 } 640 }
631 641
632 offset += hdr.entry_size; 642 offset += hdr.entry_size;
@@ -645,13 +655,15 @@ static void nvgpu_bios_parse_falcon_data_v2(struct gk20a *g, int offset)
645 falcon_data.falcon_ucode_table_ptr); 655 falcon_data.falcon_ucode_table_ptr);
646 err = nvgpu_bios_parse_falcon_ucode_table(g, 656 err = nvgpu_bios_parse_falcon_ucode_table(g,
647 falcon_data.falcon_ucode_table_ptr); 657 falcon_data.falcon_ucode_table_ptr);
648 if (err) 658 if (err) {
649 err = nvgpu_bios_parse_falcon_ucode_table(g, 659 err = nvgpu_bios_parse_falcon_ucode_table(g,
650 falcon_data.falcon_ucode_table_ptr + 660 falcon_data.falcon_ucode_table_ptr +
651 g->bios.expansion_rom_offset); 661 g->bios.expansion_rom_offset);
662 }
652 663
653 if (err) 664 if (err) {
654 nvgpu_err(g, "could not parse falcon ucode table"); 665 nvgpu_err(g, "could not parse falcon ucode table");
666 }
655} 667}
656 668
657void *nvgpu_bios_get_perf_table_ptrs(struct gk20a *g, 669void *nvgpu_bios_get_perf_table_ptrs(struct gk20a *g,
@@ -674,8 +686,9 @@ void *nvgpu_bios_get_perf_table_ptrs(struct gk20a *g,
674 (table_id * PERF_PTRS_WIDTH)]); 686 (table_id * PERF_PTRS_WIDTH)]);
675 data_size = PERF_PTRS_WIDTH; 687 data_size = PERF_PTRS_WIDTH;
676 } 688 }
677 } else 689 } else {
678 return (void *)perf_table_ptr; 690 return (void *)perf_table_ptr;
691 }
679 692
680 if (table_id < (ptoken->data_size/data_size)) { 693 if (table_id < (ptoken->data_size/data_size)) {
681 694
@@ -686,18 +699,21 @@ void *nvgpu_bios_get_perf_table_ptrs(struct gk20a *g,
686 699
687 if (perf_table_id_offset != 0) { 700 if (perf_table_id_offset != 0) {
688 /* check is perf_table_id_offset is > 64k */ 701 /* check is perf_table_id_offset is > 64k */
689 if (perf_table_id_offset & ~0xFFFF) 702 if (perf_table_id_offset & ~0xFFFF) {
690 perf_table_ptr = 703 perf_table_ptr =
691 &g->bios.data[g->bios.expansion_rom_offset + 704 &g->bios.data[g->bios.expansion_rom_offset +
692 perf_table_id_offset]; 705 perf_table_id_offset];
693 else 706 } else {
694 perf_table_ptr = 707 perf_table_ptr =
695 &g->bios.data[perf_table_id_offset]; 708 &g->bios.data[perf_table_id_offset];
696 } else 709 }
710 } else {
697 nvgpu_warn(g, "PERF TABLE ID %d is NULL", 711 nvgpu_warn(g, "PERF TABLE ID %d is NULL",
698 table_id); 712 table_id);
699 } else 713 }
714 } else {
700 nvgpu_warn(g, "INVALID PERF TABLE ID - %d ", table_id); 715 nvgpu_warn(g, "INVALID PERF TABLE ID - %d ", table_id);
716 }
701 717
702 return (void *)perf_table_ptr; 718 return (void *)perf_table_ptr;
703} 719}
@@ -731,9 +747,10 @@ static void nvgpu_bios_parse_bit(struct gk20a *g, int offset)
731 nvgpu_bios_parse_nvinit_ptrs(g, bit_token.data_ptr); 747 nvgpu_bios_parse_nvinit_ptrs(g, bit_token.data_ptr);
732 break; 748 break;
733 case TOKEN_ID_FALCON_DATA: 749 case TOKEN_ID_FALCON_DATA:
734 if (bit_token.data_version == 2) 750 if (bit_token.data_version == 2) {
735 nvgpu_bios_parse_falcon_data_v2(g, 751 nvgpu_bios_parse_falcon_data_v2(g,
736 bit_token.data_ptr); 752 bit_token.data_ptr);
753 }
737 break; 754 break;
738 case TOKEN_ID_PERF_PTRS: 755 case TOKEN_ID_PERF_PTRS:
739 g->bios.perf_token = 756 g->bios.perf_token =