From 385d3473e72c653906c82062eb2545abd7eeefe4 Mon Sep 17 00:00:00 2001 From: Srirangan Date: Thu, 23 Aug 2018 12:19:48 +0530 Subject: gpu: nvgpu: common: Fix MISRA 15.6 violations MISRA Rule-15.6 requires that all if-else blocks be enclosed in braces, including single statement blocks. Fix errors due to single statement if blocks without braces, introducing the braces. JIRA NVGPU-671 Change-Id: I18cf871f2d1296b931d6ab43f1ac668551171dcc Signed-off-by: Srirangan Reviewed-on: https://git-master.nvidia.com/r/1805076 Reviewed-by: svc-misra-checker Reviewed-by: Konsta Holtta GVS: Gerrit_Virtual_Submit Reviewed-by: Vijayakumar Subbu Reviewed-by: mobile promotions Tested-by: mobile promotions --- drivers/gpu/nvgpu/common/ltc/ltc_gm20b.c | 45 ++++++++++++++++++++------------ drivers/gpu/nvgpu/common/ltc/ltc_gp10b.c | 34 +++++++++++++++--------- 2 files changed, 51 insertions(+), 28 deletions(-) (limited to 'drivers/gpu/nvgpu/common/ltc') diff --git a/drivers/gpu/nvgpu/common/ltc/ltc_gm20b.c b/drivers/gpu/nvgpu/common/ltc/ltc_gm20b.c index 28d63e82..623b0935 100644 --- a/drivers/gpu/nvgpu/common/ltc/ltc_gm20b.c +++ b/drivers/gpu/nvgpu/common/ltc/ltc_gm20b.c @@ -61,11 +61,13 @@ int gm20b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr) nvgpu_log_fn(g, " "); - if (max_comptag_lines == 0U) + if (max_comptag_lines == 0U) { return 0; + } - if (max_comptag_lines > hw_max_comptag_lines) + if (max_comptag_lines > hw_max_comptag_lines) { max_comptag_lines = hw_max_comptag_lines; + } compbit_backing_size = DIV_ROUND_UP(max_comptag_lines, comptags_per_cacheline) * @@ -82,8 +84,9 @@ int gm20b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr) (compbit_backing_size * comptags_per_cacheline) / (gr->cacheline_size * gr->slices_per_ltc * g->ltc_count); - if (max_comptag_lines > hw_max_comptag_lines) + if (max_comptag_lines > hw_max_comptag_lines) { max_comptag_lines = hw_max_comptag_lines; + } nvgpu_log_info(g, "compbit backing store size : %d", compbit_backing_size); @@ -91,12 +94,14 @@ int gm20b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr) max_comptag_lines); err = nvgpu_ltc_alloc_cbc(g, compbit_backing_size); - if (err) + if (err) { return err; + } err = gk20a_comptag_allocator_init(g, &gr->comp_tags, max_comptag_lines); - if (err) + if (err) { return err; + } gr->max_comptag_lines = max_comptag_lines; gr->comptags_per_cacheline = comptags_per_cacheline; @@ -121,8 +126,9 @@ int gm20b_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op, trace_gk20a_ltc_cbc_ctrl_start(g->name, op, min, max); - if (gr->compbit_store.mem.size == 0) + if (gr->compbit_store.mem.size == 0) { return 0; + } while (1) { const u32 iter_max = min(min + max_lines - 1, max); @@ -168,8 +174,9 @@ int gm20b_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op, NVGPU_TIMER_RETRY_TIMER); do { val = gk20a_readl(g, ctrl1); - if (!(val & hw_op)) + if (!(val & hw_op)) { break; + } nvgpu_udelay(5); } while (!nvgpu_timeout_expired(&timeout)); @@ -182,8 +189,9 @@ int gm20b_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op, } /* are we done? */ - if (full_cache_op || iter_max == max) + if (full_cache_op || iter_max == max) { break; + } /* note: iter_max is inclusive upper bound */ min = iter_max + 1; @@ -240,8 +248,9 @@ void gm20b_ltc_isr(struct gk20a *g) mc_intr = gk20a_readl(g, mc_intr_ltc_r()); nvgpu_err(g, "mc_ltc_intr: %08x", mc_intr); for (ltc = 0; ltc < g->ltc_count; ltc++) { - if ((mc_intr & 1U << ltc) == 0) + if ((mc_intr & 1U << ltc) == 0) { continue; + } for (slice = 0; slice < g->gr.slices_per_ltc; slice++) { ltc_intr = gk20a_readl(g, ltc_ltc0_lts0_intr_r() + ltc_stride * ltc + @@ -433,12 +442,13 @@ void gm20b_ltc_init_cbc(struct gk20a *g, struct gr_gk20a *gr) u64 compbit_store_iova; u64 compbit_base_post_divide64; - if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) + if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) { compbit_store_iova = nvgpu_mem_get_phys_addr(g, &gr->compbit_store.mem); - else + } else { compbit_store_iova = nvgpu_mem_get_addr(g, &gr->compbit_store.mem); + } compbit_base_post_divide64 = compbit_store_iova >> ltc_ltcs_ltss_cbc_base_alignment_shift_v(); @@ -449,13 +459,15 @@ void gm20b_ltc_init_cbc(struct gk20a *g, struct gr_gk20a *gr) compbit_base_post_multiply64 = ((u64)compbit_base_post_divide * g->ltc_count) << ltc_ltcs_ltss_cbc_base_alignment_shift_v(); - if (compbit_base_post_multiply64 < compbit_store_iova) + if (compbit_base_post_multiply64 < compbit_store_iova) { compbit_base_post_divide++; + } /* Bug 1477079 indicates sw adjustment on the posted divided base. */ - if (g->ops.ltc.cbc_fix_config) + if (g->ops.ltc.cbc_fix_config) { compbit_base_post_divide = g->ops.ltc.cbc_fix_config(g, compbit_base_post_divide); + } gk20a_writel(g, ltc_ltcs_ltss_cbc_base_r(), compbit_base_post_divide); @@ -478,12 +490,13 @@ void gm20b_ltc_set_enabled(struct gk20a *g, bool enabled) u32 reg_f = ltc_ltcs_ltss_tstg_set_mgmt_2_l2_bypass_mode_enabled_f(); u32 reg = gk20a_readl(g, ltc_ltcs_ltss_tstg_set_mgmt_2_r()); - if (enabled) - /* bypass disabled (normal caching ops)*/ + if (enabled) { + /* bypass disabled (normal caching ops) */ reg &= ~reg_f; - else + } else { /* bypass enabled (no caching) */ reg |= reg_f; + } gk20a_writel(g, ltc_ltcs_ltss_tstg_set_mgmt_2_r(), reg); } diff --git a/drivers/gpu/nvgpu/common/ltc/ltc_gp10b.c b/drivers/gpu/nvgpu/common/ltc/ltc_gp10b.c index 5d0a8850..b72346ee 100644 --- a/drivers/gpu/nvgpu/common/ltc/ltc_gp10b.c +++ b/drivers/gpu/nvgpu/common/ltc/ltc_gp10b.c @@ -83,15 +83,18 @@ int gp10b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr) nvgpu_log_fn(g, " "); - if (max_comptag_lines == 0U) + if (max_comptag_lines == 0U) { return 0; + } /* Already initialized */ - if (gr->max_comptag_lines) + if (gr->max_comptag_lines) { return 0; + } - if (max_comptag_lines > hw_max_comptag_lines) + if (max_comptag_lines > hw_max_comptag_lines) { max_comptag_lines = hw_max_comptag_lines; + } compbit_backing_size = roundup(max_comptag_lines * gobs_per_comptagline_per_slice, @@ -115,12 +118,14 @@ int gp10b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr) gobs_per_comptagline_per_slice); err = nvgpu_ltc_alloc_cbc(g, compbit_backing_size); - if (err) + if (err) { return err; + } err = gk20a_comptag_allocator_init(g, &gr->comp_tags, max_comptag_lines); - if (err) + if (err) { return err; + } gr->max_comptag_lines = max_comptag_lines; gr->comptags_per_cacheline = comptags_per_cacheline; @@ -146,8 +151,9 @@ int gp10b_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op, trace_gk20a_ltc_cbc_ctrl_start(g->name, op, min, max); - if (gr->compbit_store.mem.size == 0U) + if (gr->compbit_store.mem.size == 0U) { return 0; + } while (1) { const u32 iter_max = min(min + max_lines - 1, max); @@ -195,8 +201,9 @@ int gp10b_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op, NVGPU_TIMER_RETRY_TIMER); do { val = gk20a_readl(g, ctrl1); - if (!(val & hw_op)) + if (!(val & hw_op)) { break; + } nvgpu_udelay(5); } while (!nvgpu_timeout_expired(&timeout)); @@ -209,8 +216,9 @@ int gp10b_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op, } /* are we done? */ - if (full_cache_op || iter_max == max) + if (full_cache_op || iter_max == max) { break; + } /* note: iter_max is inclusive upper bound */ min = iter_max + 1; @@ -234,8 +242,9 @@ void gp10b_ltc_isr(struct gk20a *g) mc_intr = gk20a_readl(g, mc_intr_ltc_r()); nvgpu_err(g, "mc_ltc_intr: %08x", mc_intr); for (ltc = 0; ltc < g->ltc_count; ltc++) { - if ((mc_intr & 1U << ltc) == 0) + if ((mc_intr & 1U << ltc) == 0) { continue; + } for (slice = 0; slice < g->gr.slices_per_ltc; slice++) { u32 offset = ltc_stride * ltc + lts_stride * slice; ltc_intr = gk20a_readl(g, ltc_ltc0_lts0_intr_r() + offset); @@ -310,12 +319,13 @@ void gp10b_ltc_set_enabled(struct gk20a *g, bool enabled) u32 reg_f = ltc_ltcs_ltss_tstg_set_mgmt_2_l2_bypass_mode_enabled_f(); u32 reg = gk20a_readl(g, ltc_ltcs_ltss_tstg_set_mgmt_2_r()); - if (enabled) - /* bypass disabled (normal caching ops)*/ + if (enabled) { + /* bypass disabled (normal caching ops) */ reg &= ~reg_f; - else + } else { /* bypass enabled (no caching) */ reg |= reg_f; + } nvgpu_writel_check(g, ltc_ltcs_ltss_tstg_set_mgmt_2_r(), reg); } -- cgit v1.2.2