summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
diff options
context:
space:
mode:
authorSrirangan <smadhavan@nvidia.com>2018-08-31 03:50:52 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-09-05 07:51:32 -0400
commit43851d41b187c92f5ea9c2f503a882277f661d7e (patch)
tree964a76c136c8c0dc14ec95358d27f930532b7dcb /drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
parent0f97bd4d44c8bcedf298f725fe0b6cfc70fa81ff (diff)
gpu: nvgpu: gk20a: Fix MISRA 15.6 violations
MISRA Rule-15.6 requires that all if-else blocks be enclosed in braces, including single statement blocks. Fix errors due to single statement if blocks without braces by introducing the braces. JIRA NVGPU-671 Change-Id: Iedac7d50aa2ebd409434eea5fda902b16d9c6fea Signed-off-by: Srirangan <smadhavan@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1797695 Reviewed-by: svc-misra-checker <svc-misra-checker@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c38
1 files changed, 25 insertions, 13 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
index 09668d49..f6134460 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
@@ -358,24 +358,26 @@ static void add_sema_cmd(struct gk20a *g, struct channel_gk20a *c,
358 * If the op is not an acquire (so therefor a release) we should 358 * If the op is not an acquire (so therefor a release) we should
359 * incr the underlying sema next_value. 359 * incr the underlying sema next_value.
360 */ 360 */
361 if (!acquire) 361 if (!acquire) {
362 nvgpu_semaphore_prepare(s, c->hw_sema); 362 nvgpu_semaphore_prepare(s, c->hw_sema);
363 }
363 364
364 g->ops.fifo.add_sema_cmd(g, s, va, cmd, off, acquire, wfi); 365 g->ops.fifo.add_sema_cmd(g, s, va, cmd, off, acquire, wfi);
365 366
366 if (acquire) 367 if (acquire) {
367 gpu_sema_verbose_dbg(g, "(A) c=%d ACQ_GE %-4u pool=%-3d" 368 gpu_sema_verbose_dbg(g, "(A) c=%d ACQ_GE %-4u pool=%-3d"
368 "va=0x%llx cmd_mem=0x%llx b=0x%llx off=%u", 369 "va=0x%llx cmd_mem=0x%llx b=0x%llx off=%u",
369 ch, nvgpu_semaphore_get_value(s), 370 ch, nvgpu_semaphore_get_value(s),
370 s->location.pool->page_idx, va, cmd->gva, 371 s->location.pool->page_idx, va, cmd->gva,
371 cmd->mem->gpu_va, ob); 372 cmd->mem->gpu_va, ob);
372 else 373 } else {
373 gpu_sema_verbose_dbg(g, "(R) c=%d INCR %u (%u) pool=%-3d" 374 gpu_sema_verbose_dbg(g, "(R) c=%d INCR %u (%u) pool=%-3d"
374 "va=0x%llx cmd_mem=0x%llx b=0x%llx off=%u", 375 "va=0x%llx cmd_mem=0x%llx b=0x%llx off=%u",
375 ch, nvgpu_semaphore_get_value(s), 376 ch, nvgpu_semaphore_get_value(s),
376 nvgpu_semaphore_read(s), 377 nvgpu_semaphore_read(s),
377 s->location.pool->page_idx, 378 s->location.pool->page_idx,
378 va, cmd->gva, cmd->mem->gpu_va, ob); 379 va, cmd->gva, cmd->mem->gpu_va, ob);
380 }
379} 381}
380 382
381void gk20a_channel_gen_sema_wait_cmd(struct channel_gk20a *c, 383void gk20a_channel_gen_sema_wait_cmd(struct channel_gk20a *c,
@@ -418,8 +420,9 @@ static int gk20a_channel_semaphore_wait_fd(
418 int err; 420 int err;
419 421
420 err = nvgpu_os_fence_fdget(&os_fence, c, fd); 422 err = nvgpu_os_fence_fdget(&os_fence, c, fd);
421 if (err) 423 if (err) {
422 return err; 424 return err;
425 }
423 426
424 err = os_fence.ops->program_waits(&os_fence, 427 err = os_fence.ops->program_waits(&os_fence,
425 entry, c, max_wait_cmds); 428 entry, c, max_wait_cmds);
@@ -465,8 +468,9 @@ static int __gk20a_channel_semaphore_incr(
465 err = nvgpu_os_fence_sema_create(&os_fence, c, 468 err = nvgpu_os_fence_sema_create(&os_fence, c,
466 semaphore); 469 semaphore);
467 470
468 if (err) 471 if (err) {
469 goto clean_up_sema; 472 goto clean_up_sema;
473 }
470 } 474 }
471 475
472 err = gk20a_fence_from_semaphore(fence, 476 err = gk20a_fence_from_semaphore(fence,
@@ -475,8 +479,9 @@ static int __gk20a_channel_semaphore_incr(
475 os_fence); 479 os_fence);
476 480
477 if (err) { 481 if (err) {
478 if (nvgpu_os_fence_is_initialized(&os_fence)) 482 if (nvgpu_os_fence_is_initialized(&os_fence)) {
479 os_fence.ops->drop_ref(&os_fence); 483 os_fence.ops->drop_ref(&os_fence);
484 }
480 goto clean_up_sema; 485 goto clean_up_sema;
481 } 486 }
482 487
@@ -535,13 +540,15 @@ static void gk20a_channel_semaphore_set_min_eq_max(struct gk20a_channel_sync *s)
535 struct channel_gk20a *c = sp->c; 540 struct channel_gk20a *c = sp->c;
536 bool updated; 541 bool updated;
537 542
538 if (!c->hw_sema) 543 if (!c->hw_sema) {
539 return; 544 return;
545 }
540 546
541 updated = nvgpu_semaphore_reset(c->hw_sema); 547 updated = nvgpu_semaphore_reset(c->hw_sema);
542 548
543 if (updated) 549 if (updated) {
544 nvgpu_cond_broadcast_interruptible(&c->semaphore_wq); 550 nvgpu_cond_broadcast_interruptible(&c->semaphore_wq);
551 }
545} 552}
546 553
547static void gk20a_channel_semaphore_set_safe_state(struct gk20a_channel_sync *s) 554static void gk20a_channel_semaphore_set_safe_state(struct gk20a_channel_sync *s)
@@ -568,8 +575,9 @@ static void gk20a_channel_semaphore_destroy(struct gk20a_channel_sync *s)
568 struct gk20a *g = c->g; 575 struct gk20a *g = c->g;
569 576
570 if (c->has_os_fence_framework_support && 577 if (c->has_os_fence_framework_support &&
571 g->os_channel.os_fence_framework_inst_exists(c)) 578 g->os_channel.os_fence_framework_inst_exists(c)) {
572 g->os_channel.destroy_os_fence_framework(c); 579 g->os_channel.destroy_os_fence_framework(c);
580 }
573 581
574 /* The sema pool is cleaned up by the VM destroy. */ 582 /* The sema pool is cleaned up by the VM destroy. */
575 sema->pool = NULL; 583 sema->pool = NULL;
@@ -586,19 +594,22 @@ gk20a_channel_semaphore_create(struct channel_gk20a *c, bool user_managed)
586 int asid = -1; 594 int asid = -1;
587 int err; 595 int err;
588 596
589 if (WARN_ON(!c->vm)) 597 if (WARN_ON(!c->vm)) {
590 return NULL; 598 return NULL;
599 }
591 600
592 sema = nvgpu_kzalloc(c->g, sizeof(*sema)); 601 sema = nvgpu_kzalloc(c->g, sizeof(*sema));
593 if (!sema) 602 if (!sema) {
594 return NULL; 603 return NULL;
604 }
595 sema->c = c; 605 sema->c = c;
596 606
597 sprintf(pool_name, "semaphore_pool-%d", c->chid); 607 sprintf(pool_name, "semaphore_pool-%d", c->chid);
598 sema->pool = c->vm->sema_pool; 608 sema->pool = c->vm->sema_pool;
599 609
600 if (c->vm->as_share) 610 if (c->vm->as_share) {
601 asid = c->vm->as_share->id; 611 asid = c->vm->as_share->id;
612 }
602 613
603 if (c->has_os_fence_framework_support) { 614 if (c->has_os_fence_framework_support) {
604 /*Init the sync_timeline for this channel */ 615 /*Init the sync_timeline for this channel */
@@ -628,8 +639,9 @@ gk20a_channel_semaphore_create(struct channel_gk20a *c, bool user_managed)
628void gk20a_channel_sync_destroy(struct gk20a_channel_sync *sync, 639void gk20a_channel_sync_destroy(struct gk20a_channel_sync *sync,
629 bool set_safe_state) 640 bool set_safe_state)
630{ 641{
631 if (set_safe_state) 642 if (set_safe_state) {
632 sync->set_safe_state(sync); 643 sync->set_safe_state(sync);
644 }
633 sync->destroy(sync); 645 sync->destroy(sync);
634} 646}
635 647