summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/fifo
diff options
context:
space:
mode:
authorSrirangan <smadhavan@nvidia.com>2018-08-20 05:13:41 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-08-23 00:55:49 -0400
commit3fbaee7099039eee84343027dd1ce20679c0c113 (patch)
tree0de4934723f58cad9cdcdb642927ffce0cfac6d8 /drivers/gpu/nvgpu/common/fifo
parent52305f0514d29e7fb2cb5e2154188e09faa3fe94 (diff)
gpu: nvgpu: common: Fix MISRA 15.6 violations
MISRA Rule-15.6 requires that all if-else blocks be enclosed in braces, including single statement blocks. Fix errors due to single statement if blocks without braces, introducing the braces. JIRA NVGPU-671 Change-Id: I4d9933c51a297a725f48cbb15520a70494d74aeb Signed-off-by: Srirangan <smadhavan@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1800833 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/fifo')
-rw-r--r--drivers/gpu/nvgpu/common/fifo/submit.c106
1 files changed, 70 insertions, 36 deletions
diff --git a/drivers/gpu/nvgpu/common/fifo/submit.c b/drivers/gpu/nvgpu/common/fifo/submit.c
index 7f2f677d..47b086f7 100644
--- a/drivers/gpu/nvgpu/common/fifo/submit.c
+++ b/drivers/gpu/nvgpu/common/fifo/submit.c
@@ -69,8 +69,9 @@ static int nvgpu_submit_prepare_syncs(struct channel_gk20a *c,
69 69
70 if (g->ops.fifo.resetup_ramfc && new_sync_created) { 70 if (g->ops.fifo.resetup_ramfc && new_sync_created) {
71 err = g->ops.fifo.resetup_ramfc(c); 71 err = g->ops.fifo.resetup_ramfc(c);
72 if (err) 72 if (err) {
73 goto fail; 73 goto fail;
74 }
74 } 75 }
75 76
76 /* 77 /*
@@ -80,9 +81,10 @@ static int nvgpu_submit_prepare_syncs(struct channel_gk20a *c,
80 if (flags & NVGPU_SUBMIT_FLAGS_FENCE_WAIT) { 81 if (flags & NVGPU_SUBMIT_FLAGS_FENCE_WAIT) {
81 int max_wait_cmds = c->deterministic ? 1 : 0; 82 int max_wait_cmds = c->deterministic ? 1 : 0;
82 83
83 if (!pre_alloc_enabled) 84 if (!pre_alloc_enabled) {
84 job->wait_cmd = nvgpu_kzalloc(g, 85 job->wait_cmd = nvgpu_kzalloc(g,
85 sizeof(struct priv_cmd_entry)); 86 sizeof(struct priv_cmd_entry));
87 }
86 88
87 if (!job->wait_cmd) { 89 if (!job->wait_cmd) {
88 err = -ENOMEM; 90 err = -ENOMEM;
@@ -99,16 +101,19 @@ static int nvgpu_submit_prepare_syncs(struct channel_gk20a *c,
99 job->wait_cmd); 101 job->wait_cmd);
100 } 102 }
101 103
102 if (err) 104 if (err) {
103 goto clean_up_wait_cmd; 105 goto clean_up_wait_cmd;
106 }
104 107
105 if (job->wait_cmd->valid) 108 if (job->wait_cmd->valid) {
106 *wait_cmd = job->wait_cmd; 109 *wait_cmd = job->wait_cmd;
110 }
107 } 111 }
108 112
109 if ((flags & NVGPU_SUBMIT_FLAGS_FENCE_GET) && 113 if ((flags & NVGPU_SUBMIT_FLAGS_FENCE_GET) &&
110 (flags & NVGPU_SUBMIT_FLAGS_SYNC_FENCE)) 114 (flags & NVGPU_SUBMIT_FLAGS_SYNC_FENCE)) {
111 need_sync_fence = true; 115 need_sync_fence = true;
116 }
112 117
113 /* 118 /*
114 * Always generate an increment at the end of a GPFIFO submission. This 119 * Always generate an increment at the end of a GPFIFO submission. This
@@ -120,42 +125,48 @@ static int nvgpu_submit_prepare_syncs(struct channel_gk20a *c,
120 err = -ENOMEM; 125 err = -ENOMEM;
121 goto clean_up_wait_cmd; 126 goto clean_up_wait_cmd;
122 } 127 }
123 if (!pre_alloc_enabled) 128 if (!pre_alloc_enabled) {
124 job->incr_cmd = nvgpu_kzalloc(g, sizeof(struct priv_cmd_entry)); 129 job->incr_cmd = nvgpu_kzalloc(g, sizeof(struct priv_cmd_entry));
130 }
125 131
126 if (!job->incr_cmd) { 132 if (!job->incr_cmd) {
127 err = -ENOMEM; 133 err = -ENOMEM;
128 goto clean_up_post_fence; 134 goto clean_up_post_fence;
129 } 135 }
130 136
131 if (flags & NVGPU_SUBMIT_FLAGS_FENCE_GET) 137 if (flags & NVGPU_SUBMIT_FLAGS_FENCE_GET) {
132 err = c->sync->incr_user(c->sync, wait_fence_fd, job->incr_cmd, 138 err = c->sync->incr_user(c->sync, wait_fence_fd, job->incr_cmd,
133 job->post_fence, need_wfi, need_sync_fence, 139 job->post_fence, need_wfi, need_sync_fence,
134 register_irq); 140 register_irq);
135 else 141 } else {
136 err = c->sync->incr(c->sync, job->incr_cmd, 142 err = c->sync->incr(c->sync, job->incr_cmd,
137 job->post_fence, need_sync_fence, 143 job->post_fence, need_sync_fence,
138 register_irq); 144 register_irq);
145 }
139 if (!err) { 146 if (!err) {
140 *incr_cmd = job->incr_cmd; 147 *incr_cmd = job->incr_cmd;
141 *post_fence = job->post_fence; 148 *post_fence = job->post_fence;
142 } else 149 } else {
143 goto clean_up_incr_cmd; 150 goto clean_up_incr_cmd;
151 }
144 152
145 return 0; 153 return 0;
146 154
147clean_up_incr_cmd: 155clean_up_incr_cmd:
148 free_priv_cmdbuf(c, job->incr_cmd); 156 free_priv_cmdbuf(c, job->incr_cmd);
149 if (!pre_alloc_enabled) 157 if (!pre_alloc_enabled) {
150 job->incr_cmd = NULL; 158 job->incr_cmd = NULL;
159 }
151clean_up_post_fence: 160clean_up_post_fence:
152 gk20a_fence_put(job->post_fence); 161 gk20a_fence_put(job->post_fence);
153 job->post_fence = NULL; 162 job->post_fence = NULL;
154clean_up_wait_cmd: 163clean_up_wait_cmd:
155 if (job->wait_cmd) 164 if (job->wait_cmd) {
156 free_priv_cmdbuf(c, job->wait_cmd); 165 free_priv_cmdbuf(c, job->wait_cmd);
157 if (!pre_alloc_enabled) 166 }
167 if (!pre_alloc_enabled) {
158 job->wait_cmd = NULL; 168 job->wait_cmd = NULL;
169 }
159fail: 170fail:
160 *wait_cmd = NULL; 171 *wait_cmd = NULL;
161 return err; 172 return err;
@@ -175,9 +186,10 @@ static void nvgpu_submit_append_priv_cmdbuf(struct channel_gk20a *c,
175 nvgpu_mem_wr_n(g, gpfifo_mem, c->gpfifo.put * sizeof(x), 186 nvgpu_mem_wr_n(g, gpfifo_mem, c->gpfifo.put * sizeof(x),
176 &x, sizeof(x)); 187 &x, sizeof(x));
177 188
178 if (cmd->mem->aperture == APERTURE_SYSMEM) 189 if (cmd->mem->aperture == APERTURE_SYSMEM) {
179 trace_gk20a_push_cmdbuf(g->name, 0, cmd->size, 0, 190 trace_gk20a_push_cmdbuf(g->name, 0, cmd->size, 0,
180 (u32 *)cmd->mem->cpu_va + cmd->off); 191 (u32 *)cmd->mem->cpu_va + cmd->off);
192 }
181 193
182 c->gpfifo.put = (c->gpfifo.put + 1U) & (c->gpfifo.entry_num - 1U); 194 c->gpfifo.put = (c->gpfifo.put + 1U) & (c->gpfifo.entry_num - 1U);
183} 195}
@@ -202,20 +214,23 @@ static int nvgpu_submit_append_gpfifo_user_direct(struct channel_gk20a *c,
202 err = g->os_channel.copy_user_gpfifo( 214 err = g->os_channel.copy_user_gpfifo(
203 gpfifo_cpu + start, userdata, 215 gpfifo_cpu + start, userdata,
204 0, length0); 216 0, length0);
205 if (err) 217 if (err) {
206 return err; 218 return err;
219 }
207 220
208 err = g->os_channel.copy_user_gpfifo( 221 err = g->os_channel.copy_user_gpfifo(
209 gpfifo_cpu, userdata, 222 gpfifo_cpu, userdata,
210 length0, length1); 223 length0, length1);
211 if (err) 224 if (err) {
212 return err; 225 return err;
226 }
213 } else { 227 } else {
214 err = g->os_channel.copy_user_gpfifo( 228 err = g->os_channel.copy_user_gpfifo(
215 gpfifo_cpu + start, userdata, 229 gpfifo_cpu + start, userdata,
216 0, len); 230 0, len);
217 if (err) 231 if (err) {
218 return err; 232 return err;
233 }
219 } 234 }
220 235
221 return 0; 236 return 0;
@@ -266,14 +281,16 @@ static int nvgpu_submit_append_gpfifo(struct channel_gk20a *c,
266 */ 281 */
267 err = nvgpu_submit_append_gpfifo_user_direct(c, userdata, 282 err = nvgpu_submit_append_gpfifo_user_direct(c, userdata,
268 num_entries); 283 num_entries);
269 if (err) 284 if (err) {
270 return err; 285 return err;
286 }
271 } else if (!kern_gpfifo) { 287 } else if (!kern_gpfifo) {
272 /* from userspace to vidmem, use the common path */ 288 /* from userspace to vidmem, use the common path */
273 err = g->os_channel.copy_user_gpfifo(c->gpfifo.pipe, userdata, 289 err = g->os_channel.copy_user_gpfifo(c->gpfifo.pipe, userdata,
274 0, num_entries); 290 0, num_entries);
275 if (err) 291 if (err) {
276 return err; 292 return err;
293 }
277 294
278 nvgpu_submit_append_gpfifo_common(c, c->gpfifo.pipe, 295 nvgpu_submit_append_gpfifo_common(c, c->gpfifo.pipe,
279 num_entries); 296 num_entries);
@@ -314,17 +331,21 @@ static int nvgpu_submit_channel_gpfifo(struct channel_gk20a *c,
314 bool need_job_tracking; 331 bool need_job_tracking;
315 bool need_deferred_cleanup = false; 332 bool need_deferred_cleanup = false;
316 333
317 if (nvgpu_is_enabled(g, NVGPU_DRIVER_IS_DYING)) 334 if (nvgpu_is_enabled(g, NVGPU_DRIVER_IS_DYING)) {
318 return -ENODEV; 335 return -ENODEV;
336 }
319 337
320 if (c->has_timedout) 338 if (c->has_timedout) {
321 return -ETIMEDOUT; 339 return -ETIMEDOUT;
340 }
322 341
323 if (!nvgpu_mem_is_valid(&c->gpfifo.mem)) 342 if (!nvgpu_mem_is_valid(&c->gpfifo.mem)) {
324 return -ENOMEM; 343 return -ENOMEM;
344 }
325 345
326 if (c->usermode_submit_enabled) 346 if (c->usermode_submit_enabled) {
327 return -EINVAL; 347 return -EINVAL;
348 }
328 349
329 /* fifo not large enough for request. Return error immediately. 350 /* fifo not large enough for request. Return error immediately.
330 * Kernel can insert gpfifo entries before and after user gpfifos. 351 * Kernel can insert gpfifo entries before and after user gpfifos.
@@ -337,8 +358,9 @@ static int nvgpu_submit_channel_gpfifo(struct channel_gk20a *c,
337 358
338 if ((flags & (NVGPU_SUBMIT_FLAGS_FENCE_WAIT | 359 if ((flags & (NVGPU_SUBMIT_FLAGS_FENCE_WAIT |
339 NVGPU_SUBMIT_FLAGS_FENCE_GET)) && 360 NVGPU_SUBMIT_FLAGS_FENCE_GET)) &&
340 !fence) 361 !fence) {
341 return -EINVAL; 362 return -EINVAL;
363 }
342 364
343 /* an address space needs to have been bound at this point. */ 365 /* an address space needs to have been bound at this point. */
344 if (!gk20a_channel_as_bound(c)) { 366 if (!gk20a_channel_as_bound(c)) {
@@ -381,8 +403,9 @@ static int nvgpu_submit_channel_gpfifo(struct channel_gk20a *c,
381 * job tracking is required, the channel must have 403 * job tracking is required, the channel must have
382 * pre-allocated resources. Otherwise, we fail the submit here 404 * pre-allocated resources. Otherwise, we fail the submit here
383 */ 405 */
384 if (c->deterministic && !channel_gk20a_is_prealloc_enabled(c)) 406 if (c->deterministic && !channel_gk20a_is_prealloc_enabled(c)) {
385 return -EINVAL; 407 return -EINVAL;
408 }
386 409
387 need_sync_framework = 410 need_sync_framework =
388 gk20a_channel_sync_needs_sync_framework(g) || 411 gk20a_channel_sync_needs_sync_framework(g) ||
@@ -415,8 +438,9 @@ static int nvgpu_submit_channel_gpfifo(struct channel_gk20a *c,
415 * For deterministic channels, we don't allow deferred clean_up 438 * For deterministic channels, we don't allow deferred clean_up
416 * processing to occur. In cases we hit this, we fail the submit 439 * processing to occur. In cases we hit this, we fail the submit
417 */ 440 */
418 if (c->deterministic && need_deferred_cleanup) 441 if (c->deterministic && need_deferred_cleanup) {
419 return -EINVAL; 442 return -EINVAL;
443 }
420 444
421 if (!c->deterministic) { 445 if (!c->deterministic) {
422 /* 446 /*
@@ -442,8 +466,9 @@ static int nvgpu_submit_channel_gpfifo(struct channel_gk20a *c,
442 466
443 467
444 /* Grab access to HW to deal with do_idle */ 468 /* Grab access to HW to deal with do_idle */
445 if (c->deterministic) 469 if (c->deterministic) {
446 nvgpu_rwsem_down_read(&g->deterministic_busy); 470 nvgpu_rwsem_down_read(&g->deterministic_busy);
471 }
447 472
448 if (c->deterministic && c->deterministic_railgate_allowed) { 473 if (c->deterministic && c->deterministic_railgate_allowed) {
449 /* 474 /*
@@ -485,48 +510,56 @@ static int nvgpu_submit_channel_gpfifo(struct channel_gk20a *c,
485 510
486 if (need_job_tracking) { 511 if (need_job_tracking) {
487 err = channel_gk20a_alloc_job(c, &job); 512 err = channel_gk20a_alloc_job(c, &job);
488 if (err) 513 if (err) {
489 goto clean_up; 514 goto clean_up;
515 }
490 516
491 err = nvgpu_submit_prepare_syncs(c, fence, job, 517 err = nvgpu_submit_prepare_syncs(c, fence, job,
492 &wait_cmd, &incr_cmd, 518 &wait_cmd, &incr_cmd,
493 &post_fence, 519 &post_fence,
494 need_deferred_cleanup, 520 need_deferred_cleanup,
495 flags); 521 flags);
496 if (err) 522 if (err) {
497 goto clean_up_job; 523 goto clean_up_job;
524 }
498 } 525 }
499 526
500 gk20a_fifo_profile_snapshot(profile, PROFILE_JOB_TRACKING); 527 gk20a_fifo_profile_snapshot(profile, PROFILE_JOB_TRACKING);
501 528
502 if (wait_cmd) 529 if (wait_cmd) {
503 nvgpu_submit_append_priv_cmdbuf(c, wait_cmd); 530 nvgpu_submit_append_priv_cmdbuf(c, wait_cmd);
531 }
504 532
505 err = nvgpu_submit_append_gpfifo(c, gpfifo, userdata, 533 err = nvgpu_submit_append_gpfifo(c, gpfifo, userdata,
506 num_entries); 534 num_entries);
507 if (err) 535 if (err) {
508 goto clean_up_job; 536 goto clean_up_job;
537 }
509 538
510 /* 539 /*
511 * And here's where we add the incr_cmd we generated earlier. It should 540 * And here's where we add the incr_cmd we generated earlier. It should
512 * always run! 541 * always run!
513 */ 542 */
514 if (incr_cmd) 543 if (incr_cmd) {
515 nvgpu_submit_append_priv_cmdbuf(c, incr_cmd); 544 nvgpu_submit_append_priv_cmdbuf(c, incr_cmd);
545 }
516 546
517 if (fence_out) 547 if (fence_out) {
518 *fence_out = gk20a_fence_get(post_fence); 548 *fence_out = gk20a_fence_get(post_fence);
549 }
519 550
520 if (need_job_tracking) 551 if (need_job_tracking) {
521 /* TODO! Check for errors... */ 552 /* TODO! Check for errors... */
522 gk20a_channel_add_job(c, job, skip_buffer_refcounting); 553 gk20a_channel_add_job(c, job, skip_buffer_refcounting);
554 }
523 gk20a_fifo_profile_snapshot(profile, PROFILE_APPEND); 555 gk20a_fifo_profile_snapshot(profile, PROFILE_APPEND);
524 556
525 g->ops.fifo.userd_gp_put(g, c); 557 g->ops.fifo.userd_gp_put(g, c);
526 558
527 /* No hw access beyond this point */ 559 /* No hw access beyond this point */
528 if (c->deterministic) 560 if (c->deterministic) {
529 nvgpu_rwsem_up_read(&g->deterministic_busy); 561 nvgpu_rwsem_up_read(&g->deterministic_busy);
562 }
530 563
531 trace_gk20a_channel_submitted_gpfifo(g->name, 564 trace_gk20a_channel_submitted_gpfifo(g->name,
532 c->chid, 565 c->chid,
@@ -548,10 +581,11 @@ clean_up_job:
548clean_up: 581clean_up:
549 nvgpu_log_fn(g, "fail"); 582 nvgpu_log_fn(g, "fail");
550 gk20a_fence_put(post_fence); 583 gk20a_fence_put(post_fence);
551 if (c->deterministic) 584 if (c->deterministic) {
552 nvgpu_rwsem_up_read(&g->deterministic_busy); 585 nvgpu_rwsem_up_read(&g->deterministic_busy);
553 else if (need_deferred_cleanup) 586 } else if (need_deferred_cleanup) {
554 gk20a_idle(g); 587 gk20a_idle(g);
588 }
555 589
556 return err; 590 return err;
557} 591}