From 3fbaee7099039eee84343027dd1ce20679c0c113 Mon Sep 17 00:00:00 2001 From: Srirangan Date: Mon, 20 Aug 2018 14:43:41 +0530 Subject: gpu: nvgpu: common: Fix MISRA 15.6 violations MISRA Rule-15.6 requires that all if-else blocks be enclosed in braces, including single statement blocks. Fix errors due to single statement if blocks without braces, introducing the braces. JIRA NVGPU-671 Change-Id: I4d9933c51a297a725f48cbb15520a70494d74aeb Signed-off-by: Srirangan Reviewed-on: https://git-master.nvidia.com/r/1800833 Reviewed-by: mobile promotions Tested-by: mobile promotions --- drivers/gpu/nvgpu/common/fifo/submit.c | 106 ++++++++++++++++--------- drivers/gpu/nvgpu/common/ltc/ltc.c | 6 +- drivers/gpu/nvgpu/common/ltc/ltc_gv11b.c | 38 ++++++--- drivers/gpu/nvgpu/common/pramin.c | 8 +- drivers/gpu/nvgpu/common/ptimer/ptimer.c | 5 +- drivers/gpu/nvgpu/common/ptimer/ptimer_gk20a.c | 6 +- drivers/gpu/nvgpu/common/rbtree.c | 70 ++++++++++------ drivers/gpu/nvgpu/common/semaphore.c | 45 +++++++---- drivers/gpu/nvgpu/common/therm/therm_gv11b.c | 3 +- drivers/gpu/nvgpu/common/vbios/bios.c | 55 ++++++++----- 10 files changed, 223 insertions(+), 119 deletions(-) diff --git a/drivers/gpu/nvgpu/common/fifo/submit.c b/drivers/gpu/nvgpu/common/fifo/submit.c index 7f2f677d..47b086f7 100644 --- a/drivers/gpu/nvgpu/common/fifo/submit.c +++ b/drivers/gpu/nvgpu/common/fifo/submit.c @@ -69,8 +69,9 @@ static int nvgpu_submit_prepare_syncs(struct channel_gk20a *c, if (g->ops.fifo.resetup_ramfc && new_sync_created) { err = g->ops.fifo.resetup_ramfc(c); - if (err) + if (err) { goto fail; + } } /* @@ -80,9 +81,10 @@ static int nvgpu_submit_prepare_syncs(struct channel_gk20a *c, if (flags & NVGPU_SUBMIT_FLAGS_FENCE_WAIT) { int max_wait_cmds = c->deterministic ? 1 : 0; - if (!pre_alloc_enabled) + if (!pre_alloc_enabled) { job->wait_cmd = nvgpu_kzalloc(g, sizeof(struct priv_cmd_entry)); + } if (!job->wait_cmd) { err = -ENOMEM; @@ -99,16 +101,19 @@ static int nvgpu_submit_prepare_syncs(struct channel_gk20a *c, job->wait_cmd); } - if (err) + if (err) { goto clean_up_wait_cmd; + } - if (job->wait_cmd->valid) + if (job->wait_cmd->valid) { *wait_cmd = job->wait_cmd; + } } if ((flags & NVGPU_SUBMIT_FLAGS_FENCE_GET) && - (flags & NVGPU_SUBMIT_FLAGS_SYNC_FENCE)) + (flags & NVGPU_SUBMIT_FLAGS_SYNC_FENCE)) { need_sync_fence = true; + } /* * Always generate an increment at the end of a GPFIFO submission. This @@ -120,42 +125,48 @@ static int nvgpu_submit_prepare_syncs(struct channel_gk20a *c, err = -ENOMEM; goto clean_up_wait_cmd; } - if (!pre_alloc_enabled) + if (!pre_alloc_enabled) { job->incr_cmd = nvgpu_kzalloc(g, sizeof(struct priv_cmd_entry)); + } if (!job->incr_cmd) { err = -ENOMEM; goto clean_up_post_fence; } - if (flags & NVGPU_SUBMIT_FLAGS_FENCE_GET) + if (flags & NVGPU_SUBMIT_FLAGS_FENCE_GET) { err = c->sync->incr_user(c->sync, wait_fence_fd, job->incr_cmd, job->post_fence, need_wfi, need_sync_fence, register_irq); - else + } else { err = c->sync->incr(c->sync, job->incr_cmd, job->post_fence, need_sync_fence, register_irq); + } if (!err) { *incr_cmd = job->incr_cmd; *post_fence = job->post_fence; - } else + } else { goto clean_up_incr_cmd; + } return 0; clean_up_incr_cmd: free_priv_cmdbuf(c, job->incr_cmd); - if (!pre_alloc_enabled) + if (!pre_alloc_enabled) { job->incr_cmd = NULL; + } clean_up_post_fence: gk20a_fence_put(job->post_fence); job->post_fence = NULL; clean_up_wait_cmd: - if (job->wait_cmd) + if (job->wait_cmd) { free_priv_cmdbuf(c, job->wait_cmd); - if (!pre_alloc_enabled) + } + if (!pre_alloc_enabled) { job->wait_cmd = NULL; + } fail: *wait_cmd = NULL; return err; @@ -175,9 +186,10 @@ static void nvgpu_submit_append_priv_cmdbuf(struct channel_gk20a *c, nvgpu_mem_wr_n(g, gpfifo_mem, c->gpfifo.put * sizeof(x), &x, sizeof(x)); - if (cmd->mem->aperture == APERTURE_SYSMEM) + if (cmd->mem->aperture == APERTURE_SYSMEM) { trace_gk20a_push_cmdbuf(g->name, 0, cmd->size, 0, (u32 *)cmd->mem->cpu_va + cmd->off); + } c->gpfifo.put = (c->gpfifo.put + 1U) & (c->gpfifo.entry_num - 1U); } @@ -202,20 +214,23 @@ static int nvgpu_submit_append_gpfifo_user_direct(struct channel_gk20a *c, err = g->os_channel.copy_user_gpfifo( gpfifo_cpu + start, userdata, 0, length0); - if (err) + if (err) { return err; + } err = g->os_channel.copy_user_gpfifo( gpfifo_cpu, userdata, length0, length1); - if (err) + if (err) { return err; + } } else { err = g->os_channel.copy_user_gpfifo( gpfifo_cpu + start, userdata, 0, len); - if (err) + if (err) { return err; + } } return 0; @@ -266,14 +281,16 @@ static int nvgpu_submit_append_gpfifo(struct channel_gk20a *c, */ err = nvgpu_submit_append_gpfifo_user_direct(c, userdata, num_entries); - if (err) + if (err) { return err; + } } else if (!kern_gpfifo) { /* from userspace to vidmem, use the common path */ err = g->os_channel.copy_user_gpfifo(c->gpfifo.pipe, userdata, 0, num_entries); - if (err) + if (err) { return err; + } nvgpu_submit_append_gpfifo_common(c, c->gpfifo.pipe, num_entries); @@ -314,17 +331,21 @@ static int nvgpu_submit_channel_gpfifo(struct channel_gk20a *c, bool need_job_tracking; bool need_deferred_cleanup = false; - if (nvgpu_is_enabled(g, NVGPU_DRIVER_IS_DYING)) + if (nvgpu_is_enabled(g, NVGPU_DRIVER_IS_DYING)) { return -ENODEV; + } - if (c->has_timedout) + if (c->has_timedout) { return -ETIMEDOUT; + } - if (!nvgpu_mem_is_valid(&c->gpfifo.mem)) + if (!nvgpu_mem_is_valid(&c->gpfifo.mem)) { return -ENOMEM; + } - if (c->usermode_submit_enabled) + if (c->usermode_submit_enabled) { return -EINVAL; + } /* fifo not large enough for request. Return error immediately. * Kernel can insert gpfifo entries before and after user gpfifos. @@ -337,8 +358,9 @@ static int nvgpu_submit_channel_gpfifo(struct channel_gk20a *c, if ((flags & (NVGPU_SUBMIT_FLAGS_FENCE_WAIT | NVGPU_SUBMIT_FLAGS_FENCE_GET)) && - !fence) + !fence) { return -EINVAL; + } /* an address space needs to have been bound at this point. */ if (!gk20a_channel_as_bound(c)) { @@ -381,8 +403,9 @@ static int nvgpu_submit_channel_gpfifo(struct channel_gk20a *c, * job tracking is required, the channel must have * pre-allocated resources. Otherwise, we fail the submit here */ - if (c->deterministic && !channel_gk20a_is_prealloc_enabled(c)) + if (c->deterministic && !channel_gk20a_is_prealloc_enabled(c)) { return -EINVAL; + } need_sync_framework = gk20a_channel_sync_needs_sync_framework(g) || @@ -415,8 +438,9 @@ static int nvgpu_submit_channel_gpfifo(struct channel_gk20a *c, * For deterministic channels, we don't allow deferred clean_up * processing to occur. In cases we hit this, we fail the submit */ - if (c->deterministic && need_deferred_cleanup) + if (c->deterministic && need_deferred_cleanup) { return -EINVAL; + } if (!c->deterministic) { /* @@ -442,8 +466,9 @@ static int nvgpu_submit_channel_gpfifo(struct channel_gk20a *c, /* Grab access to HW to deal with do_idle */ - if (c->deterministic) + if (c->deterministic) { nvgpu_rwsem_down_read(&g->deterministic_busy); + } if (c->deterministic && c->deterministic_railgate_allowed) { /* @@ -485,48 +510,56 @@ static int nvgpu_submit_channel_gpfifo(struct channel_gk20a *c, if (need_job_tracking) { err = channel_gk20a_alloc_job(c, &job); - if (err) + if (err) { goto clean_up; + } err = nvgpu_submit_prepare_syncs(c, fence, job, &wait_cmd, &incr_cmd, &post_fence, need_deferred_cleanup, flags); - if (err) + if (err) { goto clean_up_job; + } } gk20a_fifo_profile_snapshot(profile, PROFILE_JOB_TRACKING); - if (wait_cmd) + if (wait_cmd) { nvgpu_submit_append_priv_cmdbuf(c, wait_cmd); + } err = nvgpu_submit_append_gpfifo(c, gpfifo, userdata, num_entries); - if (err) + if (err) { goto clean_up_job; + } /* * And here's where we add the incr_cmd we generated earlier. It should * always run! */ - if (incr_cmd) + if (incr_cmd) { nvgpu_submit_append_priv_cmdbuf(c, incr_cmd); + } - if (fence_out) + if (fence_out) { *fence_out = gk20a_fence_get(post_fence); + } - if (need_job_tracking) + if (need_job_tracking) { /* TODO! Check for errors... */ gk20a_channel_add_job(c, job, skip_buffer_refcounting); + } gk20a_fifo_profile_snapshot(profile, PROFILE_APPEND); g->ops.fifo.userd_gp_put(g, c); /* No hw access beyond this point */ - if (c->deterministic) + if (c->deterministic) { nvgpu_rwsem_up_read(&g->deterministic_busy); + } trace_gk20a_channel_submitted_gpfifo(g->name, c->chid, @@ -548,10 +581,11 @@ clean_up_job: clean_up: nvgpu_log_fn(g, "fail"); gk20a_fence_put(post_fence); - if (c->deterministic) + if (c->deterministic) { nvgpu_rwsem_up_read(&g->deterministic_busy); - else if (need_deferred_cleanup) + } else if (need_deferred_cleanup) { gk20a_idle(g); + } return err; } diff --git a/drivers/gpu/nvgpu/common/ltc/ltc.c b/drivers/gpu/nvgpu/common/ltc/ltc.c index 1beb1974..3d85db3f 100644 --- a/drivers/gpu/nvgpu/common/ltc/ltc.c +++ b/drivers/gpu/nvgpu/common/ltc/ltc.c @@ -34,16 +34,18 @@ int nvgpu_init_ltc_support(struct gk20a *g) g->mm.ltc_enabled_current = true; g->mm.ltc_enabled_target = true; - if (g->ops.ltc.init_fs_state) + if (g->ops.ltc.init_fs_state) { g->ops.ltc.init_fs_state(g); + } return 0; } void nvgpu_ltc_sync_enabled(struct gk20a *g) { - if (!g->ops.ltc.set_enabled) + if (!g->ops.ltc.set_enabled) { return; + } nvgpu_spinlock_acquire(&g->ltc_enabled_lock); if (g->mm.ltc_enabled_current != g->mm.ltc_enabled_target) { diff --git a/drivers/gpu/nvgpu/common/ltc/ltc_gv11b.c b/drivers/gpu/nvgpu/common/ltc/ltc_gv11b.c index 98306079..c5bf40c1 100644 --- a/drivers/gpu/nvgpu/common/ltc/ltc_gv11b.c +++ b/drivers/gpu/nvgpu/common/ltc/ltc_gv11b.c @@ -75,9 +75,10 @@ void gv11b_ltc_init_fs_state(struct gk20a *g) reg &= ~ltc_ltcs_ltss_intr_en_illegal_compstat_access_m(); nvgpu_writel_check(g, ltc_ltcs_ltss_intr_r(), reg); - if (g->ops.ltc.intr_en_illegal_compstat) + if (g->ops.ltc.intr_en_illegal_compstat) { g->ops.ltc.intr_en_illegal_compstat(g, g->ltc_intr_en_illegal_compstat); + } /* Enable ECC interrupts */ ltc_intr = gk20a_readl(g, ltc_ltcs_ltss_intr_r()); @@ -93,14 +94,15 @@ void gv11b_ltc_intr_en_illegal_compstat(struct gk20a *g, bool enable) /* disble/enble illegal_compstat interrupt */ val = gk20a_readl(g, ltc_ltcs_ltss_intr_r()); - if (enable) + if (enable) { val = set_field(val, ltc_ltcs_ltss_intr_en_illegal_compstat_m(), ltc_ltcs_ltss_intr_en_illegal_compstat_enabled_f()); - else + } else { val = set_field(val, ltc_ltcs_ltss_intr_en_illegal_compstat_m(), ltc_ltcs_ltss_intr_en_illegal_compstat_disabled_f()); + } gk20a_writel(g, ltc_ltcs_ltss_intr_r(), val); } @@ -117,8 +119,9 @@ void gv11b_ltc_isr(struct gk20a *g) mc_intr = gk20a_readl(g, mc_intr_ltc_r()); for (ltc = 0; ltc < g->ltc_count; ltc++) { - if ((mc_intr & 1U << ltc) == 0) + if ((mc_intr & 1U << ltc) == 0) { continue; + } for (slice = 0; slice < g->gr.slices_per_ltc; slice++) { u32 offset = ltc_stride * ltc + lts_stride * slice; @@ -167,31 +170,40 @@ void gv11b_ltc_isr(struct gk20a *g) ltc_ltc0_lts0_l2_cache_ecc_status_reset_task_f()); /* update counters per slice */ - if (corrected_overflow) + if (corrected_overflow) { corrected_delta += (0x1U << ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_total_s()); - if (uncorrected_overflow) + } + if (uncorrected_overflow) { uncorrected_delta += (0x1U << ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_total_s()); + } g->ecc.ltc.ecc_sec_count[ltc][slice].counter += corrected_delta; g->ecc.ltc.ecc_ded_count[ltc][slice].counter += uncorrected_delta; nvgpu_log(g, gpu_dbg_intr, "ltc:%d lts: %d cache ecc interrupt intr: 0x%x", ltc, slice, ltc_intr3); - if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_rstg_m()) + if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_rstg_m()) { nvgpu_log(g, gpu_dbg_intr, "rstg ecc error corrected"); - if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_rstg_m()) + } + if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_rstg_m()) { nvgpu_log(g, gpu_dbg_intr, "rstg ecc error uncorrected"); - if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_tstg_m()) + } + if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_tstg_m()) { nvgpu_log(g, gpu_dbg_intr, "tstg ecc error corrected"); - if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_tstg_m()) + } + if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_tstg_m()) { nvgpu_log(g, gpu_dbg_intr, "tstg ecc error uncorrected"); - if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_dstg_m()) + } + if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_dstg_m()) { nvgpu_log(g, gpu_dbg_intr, "dstg ecc error corrected"); - if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_dstg_m()) + } + if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_dstg_m()) { nvgpu_log(g, gpu_dbg_intr, "dstg ecc error uncorrected"); + } - if (corrected_overflow || uncorrected_overflow) + if (corrected_overflow || uncorrected_overflow) { nvgpu_info(g, "ecc counter overflow!"); + } nvgpu_log(g, gpu_dbg_intr, "ecc error address: 0x%x", ecc_addr); diff --git a/drivers/gpu/nvgpu/common/pramin.c b/drivers/gpu/nvgpu/common/pramin.c index ba6a92ba..1448fed1 100644 --- a/drivers/gpu/nvgpu/common/pramin.c +++ b/drivers/gpu/nvgpu/common/pramin.c @@ -53,17 +53,19 @@ static void nvgpu_pramin_access_batched(struct gk20a *g, struct nvgpu_mem *mem, * driver should be refactored to prevent this from happening, but for * now it is ok just to ignore the writes */ - if (!gk20a_io_exists(g) && nvgpu_is_enabled(g, NVGPU_DRIVER_IS_DYING)) + if (!gk20a_io_exists(g) && nvgpu_is_enabled(g, NVGPU_DRIVER_IS_DYING)) { return; + } alloc = mem->vidmem_alloc; sgt = &alloc->sgt; nvgpu_sgt_for_each_sgl(sgl, sgt) { - if (offset >= nvgpu_sgt_get_length(sgt, sgl)) + if (offset >= nvgpu_sgt_get_length(sgt, sgl)) { offset -= nvgpu_sgt_get_length(sgt, sgl); - else + } else { break; + } } while (size) { diff --git a/drivers/gpu/nvgpu/common/ptimer/ptimer.c b/drivers/gpu/nvgpu/common/ptimer/ptimer.c index d5f9470d..3f3a5f9b 100644 --- a/drivers/gpu/nvgpu/common/ptimer/ptimer.c +++ b/drivers/gpu/nvgpu/common/ptimer/ptimer.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -39,8 +39,9 @@ int nvgpu_get_timestamps_zipper(struct gk20a *g, for (i = 0; i < count; i++) { err = g->ops.ptimer.read_ptimer(g, &samples[i].gpu_timestamp); - if (err) + if (err) { return err; + } samples[i].cpu_timestamp = nvgpu_hr_timestamp(); } diff --git a/drivers/gpu/nvgpu/common/ptimer/ptimer_gk20a.c b/drivers/gpu/nvgpu/common/ptimer/ptimer_gk20a.c index 0b3188ee..a9c971df 100644 --- a/drivers/gpu/nvgpu/common/ptimer/ptimer_gk20a.c +++ b/drivers/gpu/nvgpu/common/ptimer/ptimer_gk20a.c @@ -54,9 +54,10 @@ void gk20a_ptimer_isr(struct gk20a *g) if (fecs_errcode) { nvgpu_err(g, "FECS_ERRCODE 0x%08x", fecs_errcode); - if (g->ops.priv_ring.decode_error_code) + if (g->ops.priv_ring.decode_error_code) { g->ops.priv_ring.decode_error_code(g, fecs_errcode); + } } } @@ -66,8 +67,9 @@ int gk20a_read_ptimer(struct gk20a *g, u64 *value) unsigned int i = 0; u32 gpu_timestamp_hi_prev = 0; - if (!value) + if (!value) { return -EINVAL; + } /* Note. The GPU nanosecond timer consists of two 32-bit * registers (high & low). To detect a possible low register diff --git a/drivers/gpu/nvgpu/common/rbtree.c b/drivers/gpu/nvgpu/common/rbtree.c index 86bab688..a0e97ee9 100644 --- a/drivers/gpu/nvgpu/common/rbtree.c +++ b/drivers/gpu/nvgpu/common/rbtree.c @@ -32,16 +32,18 @@ static void rotate_left(struct nvgpu_rbtree_node **root, /* establish x->right link */ x->right = y->left; - if (y->left) + if (y->left) { y->left->parent = x; + } /* establish y->parent link */ y->parent = x->parent; if (x->parent) { - if (x == x->parent->left) + if (x == x->parent->left) { x->parent->left = y; - else + } else { x->parent->right = y; + } } else { *root = y; } @@ -61,16 +63,18 @@ static void rotate_right(struct nvgpu_rbtree_node **root, /* establish x->left link */ x->left = y->right; - if (y->right) + if (y->right) { y->right->parent = x; + } /* establish y->parent link */ y->parent = x->parent; if (x->parent) { - if (x == x->parent->right) + if (x == x->parent->right) { x->parent->right = y; - else + } else { x->parent->left = y; + } } else { *root = y; } @@ -149,12 +153,13 @@ void nvgpu_rbtree_insert(struct nvgpu_rbtree_node *new_node, while (curr) { parent = curr; - if (new_node->key_start < curr->key_start) + if (new_node->key_start < curr->key_start) { curr = curr->left; - else if (new_node->key_start > curr->key_start) + } else if (new_node->key_start > curr->key_start) { curr = curr->right; - else + } else { return; /* duplicate entry */ + } } /* the caller allocated the node already, just fix the links */ @@ -165,10 +170,11 @@ void nvgpu_rbtree_insert(struct nvgpu_rbtree_node *new_node, /* insert node in tree */ if (parent) { - if (new_node->key_start < parent->key_start) + if (new_node->key_start < parent->key_start) { parent->left = new_node; - else + } else { parent->right = new_node; + } } else { *root = new_node; } @@ -203,8 +209,9 @@ static void _delete_fixup(struct nvgpu_rbtree_node **root, if (!w || ((!w->left || !w->left->is_red) && (!w->right || !w->right->is_red))) { - if (w) + if (w) { w->is_red = true; + } x = parent_of_x; } else { if (!w->right || !w->right->is_red) { @@ -231,8 +238,9 @@ static void _delete_fixup(struct nvgpu_rbtree_node **root, if (!w || ((!w->right || !w->right->is_red) && (!w->left || !w->left->is_red))) { - if (w) + if (w) { w->is_red = true; + } x = parent_of_x; } else { if (!w->left || !w->left->is_red) { @@ -251,8 +259,9 @@ static void _delete_fixup(struct nvgpu_rbtree_node **root, parent_of_x = x->parent; } - if (x) + if (x) { x->is_red = false; + } } void nvgpu_rbtree_unlink(struct nvgpu_rbtree_node *node, @@ -279,21 +288,24 @@ void nvgpu_rbtree_unlink(struct nvgpu_rbtree_node *node, } /* x is y's only child */ - if (y->left) + if (y->left) { x = y->left; - else + } else { x = y->right; + } /* remove y from the parent chain */ parent_of_x = y->parent; - if (x) + if (x) { x->parent = parent_of_x; + } if (y->parent) { - if (y == y->parent->left) + if (y == y->parent->left) { y->parent->left = x; - else + } else { y->parent->right = x; + } } else { *root = x; } @@ -305,10 +317,11 @@ void nvgpu_rbtree_unlink(struct nvgpu_rbtree_node *node, */ y->parent = z->parent; if (z->parent) { - if (z == z->parent->left) + if (z == z->parent->left) { z->parent->left = y; - else + } else { z->parent->right = y; + } } else { *root = y; } @@ -316,19 +329,23 @@ void nvgpu_rbtree_unlink(struct nvgpu_rbtree_node *node, y->is_red = z->is_red; y->left = z->left; - if (z->left) + if (z->left) { z->left->parent = y; + } y->right = z->right; - if (z->right) + if (z->right) { z->right->parent = y; + } - if (parent_of_x == z) + if (parent_of_x == z) { parent_of_x = y; + } } - if (y_was_black) + if (y_was_black) { _delete_fixup(root, parent_of_x, x); + } } void nvgpu_rbtree_search(u64 key_start, struct nvgpu_rbtree_node **node, @@ -427,8 +444,9 @@ void nvgpu_rbtree_enum_next(struct nvgpu_rbtree_node **node, } else { /* go up until we find the right inorder node */ for (curr = curr->parent; curr; curr = curr->parent) { - if (curr->key_start > (*node)->key_start) + if (curr->key_start > (*node)->key_start) { break; + } } } } diff --git a/drivers/gpu/nvgpu/common/semaphore.c b/drivers/gpu/nvgpu/common/semaphore.c index 65aeb9eb..25bd3be3 100644 --- a/drivers/gpu/nvgpu/common/semaphore.c +++ b/drivers/gpu/nvgpu/common/semaphore.c @@ -66,8 +66,9 @@ static int __nvgpu_semaphore_sea_grow(struct nvgpu_semaphore_sea *sea) ret = nvgpu_dma_alloc_sys(gk20a, PAGE_SIZE * SEMAPHORE_POOL_COUNT, &sea->sea_mem); - if (ret) + if (ret) { goto out; + } sea->size = SEMAPHORE_POOL_COUNT; sea->map_size = SEMAPHORE_POOL_COUNT * PAGE_SIZE; @@ -88,8 +89,9 @@ out: void nvgpu_semaphore_sea_destroy(struct gk20a *g) { - if (!g->sema_sea) + if (!g->sema_sea) { return; + } nvgpu_dma_free(g, &g->sema_sea->sea_mem); nvgpu_mutex_destroy(&g->sema_sea->sea_lock); @@ -103,22 +105,26 @@ void nvgpu_semaphore_sea_destroy(struct gk20a *g) */ struct nvgpu_semaphore_sea *nvgpu_semaphore_sea_create(struct gk20a *g) { - if (g->sema_sea) + if (g->sema_sea) { return g->sema_sea; + } g->sema_sea = nvgpu_kzalloc(g, sizeof(*g->sema_sea)); - if (!g->sema_sea) + if (!g->sema_sea) { return NULL; + } g->sema_sea->size = 0; g->sema_sea->page_count = 0; g->sema_sea->gk20a = g; nvgpu_init_list_node(&g->sema_sea->pool_list); - if (nvgpu_mutex_init(&g->sema_sea->sea_lock)) + if (nvgpu_mutex_init(&g->sema_sea->sea_lock)) { goto cleanup_free; + } - if (__nvgpu_semaphore_sea_grow(g->sema_sea)) + if (__nvgpu_semaphore_sea_grow(g->sema_sea)) { goto cleanup_destroy; + } gpu_sema_dbg(g, "Created semaphore sea!"); return g->sema_sea; @@ -136,8 +142,9 @@ static int __semaphore_bitmap_alloc(unsigned long *bitmap, unsigned long len) { unsigned long idx = find_first_zero_bit(bitmap, len); - if (idx == len) + if (idx == len) { return -ENOSPC; + } set_bit(idx, bitmap); @@ -155,19 +162,22 @@ int nvgpu_semaphore_pool_alloc(struct nvgpu_semaphore_sea *sea, int ret; p = nvgpu_kzalloc(sea->gk20a, sizeof(*p)); - if (!p) + if (!p) { return -ENOMEM; + } __lock_sema_sea(sea); ret = nvgpu_mutex_init(&p->pool_lock); - if (ret) + if (ret) { goto fail; + } ret = __semaphore_bitmap_alloc(sea->pools_alloced, SEMAPHORE_POOL_COUNT); - if (ret < 0) + if (ret < 0) { goto fail_alloc; + } page_idx = (unsigned long)ret; @@ -205,8 +215,9 @@ int nvgpu_semaphore_pool_map(struct nvgpu_semaphore_pool *p, int err = 0; u64 addr; - if (p->mapped) + if (p->mapped) { return -EBUSY; + } gpu_sema_dbg(pool_to_gk20a(p), "Mapping semaphore pool! (idx=%d)", p->page_idx); @@ -242,8 +253,9 @@ int nvgpu_semaphore_pool_map(struct nvgpu_semaphore_pool *p, err = nvgpu_mem_create_from_mem(vm->mm->g, &p->rw_mem, &p->sema_sea->sea_mem, p->page_idx, 1); - if (err) + if (err) { goto fail_unmap; + } addr = nvgpu_gmmu_map(vm, &p->rw_mem, SZ_4K, 0, gk20a_mem_flag_none, 0, @@ -342,8 +354,9 @@ void nvgpu_semaphore_pool_put(struct nvgpu_semaphore_pool *p) */ u64 __nvgpu_semaphore_pool_gpu_va(struct nvgpu_semaphore_pool *p, bool global) { - if (!global) + if (!global) { return p->gpu_va; + } return p->gpu_va_ro + (PAGE_SIZE * p->page_idx); } @@ -427,13 +440,15 @@ struct nvgpu_semaphore *nvgpu_semaphore_alloc(struct channel_gk20a *ch) if (!ch->hw_sema) { ret = __nvgpu_init_hw_sema(ch); - if (ret) + if (ret) { return NULL; + } } s = nvgpu_kzalloc(ch->g, sizeof(*s)); - if (!s) + if (!s) { return NULL; + } nvgpu_ref_init(&s->ref); s->g = ch->g; diff --git a/drivers/gpu/nvgpu/common/therm/therm_gv11b.c b/drivers/gpu/nvgpu/common/therm/therm_gv11b.c index 77edd7e1..419dd75e 100644 --- a/drivers/gpu/nvgpu/common/therm/therm_gv11b.c +++ b/drivers/gpu/nvgpu/common/therm/therm_gv11b.c @@ -143,8 +143,9 @@ int gv11b_elcg_init_idle_filters(struct gk20a *g) u32 active_engine_id = 0; struct fifo_gk20a *f = &g->fifo; - if (nvgpu_platform_is_simulation(g)) + if (nvgpu_platform_is_simulation(g)) { return 0; + } nvgpu_log_info(g, "init clock/power gate reg"); diff --git a/drivers/gpu/nvgpu/common/vbios/bios.c b/drivers/gpu/nvgpu/common/vbios/bios.c index 12c0eded..0760a6cd 100644 --- a/drivers/gpu/nvgpu/common/vbios/bios.c +++ b/drivers/gpu/nvgpu/common/vbios/bios.c @@ -352,10 +352,11 @@ int nvgpu_bios_parse_rom(struct gk20a *g) } } - if (!found) + if (!found) { return -EINVAL; - else + } else { return 0; + } } static void nvgpu_bios_parse_biosdata(struct gk20a *g, int offset) @@ -393,8 +394,9 @@ u32 nvgpu_bios_get_nvlink_config_data(struct gk20a *g) { struct nvlink_config_data_hdr_v1 config; - if (g->bios.nvlink_config_data_offset == 0) + if (g->bios.nvlink_config_data_offset == 0) { return -EINVAL; + } memcpy(&config, &g->bios.data[g->bios.nvlink_config_data_offset], sizeof(config)); @@ -458,8 +460,9 @@ static void nvgpu_bios_parse_devinit_appinfo(struct gk20a *g, int dmem_offset) interface.script_phys_base, interface.script_size); - if (interface.version != 1) + if (interface.version != 1) { return; + } g->bios.devinit_tables_phys_base = interface.tables_phys_base; g->bios.devinit_script_phys_base = interface.script_phys_base; } @@ -475,8 +478,9 @@ static int nvgpu_bios_parse_appinfo_table(struct gk20a *g, int offset) hdr.version, hdr.header_size, hdr.entry_size, hdr.entry_count); - if (hdr.version != 1) + if (hdr.version != 1) { return 0; + } offset += sizeof(hdr); for (i = 0; i < hdr.entry_count; i++) { @@ -487,8 +491,9 @@ static int nvgpu_bios_parse_appinfo_table(struct gk20a *g, int offset) nvgpu_log_fn(g, "appInfo id %d dmem_offset %d", entry.id, entry.dmem_offset); - if (entry.id == APPINFO_ID_DEVINIT) + if (entry.id == APPINFO_ID_DEVINIT) { nvgpu_bios_parse_devinit_appinfo(g, entry.dmem_offset); + } offset += hdr.entry_size; } @@ -583,8 +588,9 @@ static int nvgpu_bios_parse_falcon_ucode_table(struct gk20a *g, int offset) hdr.entry_size, hdr.entry_count, hdr.desc_version, hdr.desc_size); - if (hdr.version != 1) + if (hdr.version != 1) { return -EINVAL; + } offset += hdr.header_size; @@ -603,30 +609,34 @@ static int nvgpu_bios_parse_falcon_ucode_table(struct gk20a *g, int offset) err = nvgpu_bios_parse_falcon_ucode_desc(g, &g->bios.devinit, entry.desc_ptr); - if (err) + if (err) { err = nvgpu_bios_parse_falcon_ucode_desc(g, &g->bios.devinit, entry.desc_ptr + g->bios.expansion_rom_offset); + } - if (err) + if (err) { nvgpu_err(g, "could not parse devinit ucode desc"); + } } else if (entry.target_id == TARGET_ID_PMU && entry.application_id == APPLICATION_ID_PRE_OS) { int err; err = nvgpu_bios_parse_falcon_ucode_desc(g, &g->bios.preos, entry.desc_ptr); - if (err) + if (err) { err = nvgpu_bios_parse_falcon_ucode_desc(g, &g->bios.preos, entry.desc_ptr + g->bios.expansion_rom_offset); + } - if (err) + if (err) { nvgpu_err(g, "could not parse preos ucode desc"); + } } offset += hdr.entry_size; @@ -645,13 +655,15 @@ static void nvgpu_bios_parse_falcon_data_v2(struct gk20a *g, int offset) falcon_data.falcon_ucode_table_ptr); err = nvgpu_bios_parse_falcon_ucode_table(g, falcon_data.falcon_ucode_table_ptr); - if (err) + if (err) { err = nvgpu_bios_parse_falcon_ucode_table(g, falcon_data.falcon_ucode_table_ptr + g->bios.expansion_rom_offset); + } - if (err) + if (err) { nvgpu_err(g, "could not parse falcon ucode table"); + } } void *nvgpu_bios_get_perf_table_ptrs(struct gk20a *g, @@ -674,8 +686,9 @@ void *nvgpu_bios_get_perf_table_ptrs(struct gk20a *g, (table_id * PERF_PTRS_WIDTH)]); data_size = PERF_PTRS_WIDTH; } - } else + } else { return (void *)perf_table_ptr; + } if (table_id < (ptoken->data_size/data_size)) { @@ -686,18 +699,21 @@ void *nvgpu_bios_get_perf_table_ptrs(struct gk20a *g, if (perf_table_id_offset != 0) { /* check is perf_table_id_offset is > 64k */ - if (perf_table_id_offset & ~0xFFFF) + if (perf_table_id_offset & ~0xFFFF) { perf_table_ptr = &g->bios.data[g->bios.expansion_rom_offset + perf_table_id_offset]; - else + } else { perf_table_ptr = &g->bios.data[perf_table_id_offset]; - } else + } + } else { nvgpu_warn(g, "PERF TABLE ID %d is NULL", table_id); - } else + } + } else { nvgpu_warn(g, "INVALID PERF TABLE ID - %d ", table_id); + } return (void *)perf_table_ptr; } @@ -731,9 +747,10 @@ static void nvgpu_bios_parse_bit(struct gk20a *g, int offset) nvgpu_bios_parse_nvinit_ptrs(g, bit_token.data_ptr); break; case TOKEN_ID_FALCON_DATA: - if (bit_token.data_version == 2) + if (bit_token.data_version == 2) { nvgpu_bios_parse_falcon_data_v2(g, bit_token.data_ptr); + } break; case TOKEN_ID_PERF_PTRS: g->bios.perf_token = -- cgit v1.2.2