diff options
author | Srirangan <smadhavan@nvidia.com> | 2018-08-23 02:49:48 -0400 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2018-08-27 10:52:22 -0400 |
commit | 385d3473e72c653906c82062eb2545abd7eeefe4 (patch) | |
tree | 82b51998c166623074f7f4c2e4415d128559cb86 /drivers/gpu/nvgpu/common/ltc | |
parent | 5c9bedf6f6e3213cd830d045d70f61de49f6e42b (diff) |
gpu: nvgpu: common: Fix MISRA 15.6 violations
MISRA Rule-15.6 requires that all if-else blocks be enclosed in braces,
including single statement blocks. Fix errors due to single statement
if blocks without braces, introducing the braces.
JIRA NVGPU-671
Change-Id: I18cf871f2d1296b931d6ab43f1ac668551171dcc
Signed-off-by: Srirangan <smadhavan@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1805076
Reviewed-by: svc-misra-checker <svc-misra-checker@nvidia.com>
Reviewed-by: Konsta Holtta <kholtta@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/ltc')
-rw-r--r-- | drivers/gpu/nvgpu/common/ltc/ltc_gm20b.c | 45 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/common/ltc/ltc_gp10b.c | 34 |
2 files changed, 51 insertions, 28 deletions
diff --git a/drivers/gpu/nvgpu/common/ltc/ltc_gm20b.c b/drivers/gpu/nvgpu/common/ltc/ltc_gm20b.c index 28d63e82..623b0935 100644 --- a/drivers/gpu/nvgpu/common/ltc/ltc_gm20b.c +++ b/drivers/gpu/nvgpu/common/ltc/ltc_gm20b.c | |||
@@ -61,11 +61,13 @@ int gm20b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr) | |||
61 | 61 | ||
62 | nvgpu_log_fn(g, " "); | 62 | nvgpu_log_fn(g, " "); |
63 | 63 | ||
64 | if (max_comptag_lines == 0U) | 64 | if (max_comptag_lines == 0U) { |
65 | return 0; | 65 | return 0; |
66 | } | ||
66 | 67 | ||
67 | if (max_comptag_lines > hw_max_comptag_lines) | 68 | if (max_comptag_lines > hw_max_comptag_lines) { |
68 | max_comptag_lines = hw_max_comptag_lines; | 69 | max_comptag_lines = hw_max_comptag_lines; |
70 | } | ||
69 | 71 | ||
70 | compbit_backing_size = | 72 | compbit_backing_size = |
71 | DIV_ROUND_UP(max_comptag_lines, comptags_per_cacheline) * | 73 | DIV_ROUND_UP(max_comptag_lines, comptags_per_cacheline) * |
@@ -82,8 +84,9 @@ int gm20b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr) | |||
82 | (compbit_backing_size * comptags_per_cacheline) / | 84 | (compbit_backing_size * comptags_per_cacheline) / |
83 | (gr->cacheline_size * gr->slices_per_ltc * g->ltc_count); | 85 | (gr->cacheline_size * gr->slices_per_ltc * g->ltc_count); |
84 | 86 | ||
85 | if (max_comptag_lines > hw_max_comptag_lines) | 87 | if (max_comptag_lines > hw_max_comptag_lines) { |
86 | max_comptag_lines = hw_max_comptag_lines; | 88 | max_comptag_lines = hw_max_comptag_lines; |
89 | } | ||
87 | 90 | ||
88 | nvgpu_log_info(g, "compbit backing store size : %d", | 91 | nvgpu_log_info(g, "compbit backing store size : %d", |
89 | compbit_backing_size); | 92 | compbit_backing_size); |
@@ -91,12 +94,14 @@ int gm20b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr) | |||
91 | max_comptag_lines); | 94 | max_comptag_lines); |
92 | 95 | ||
93 | err = nvgpu_ltc_alloc_cbc(g, compbit_backing_size); | 96 | err = nvgpu_ltc_alloc_cbc(g, compbit_backing_size); |
94 | if (err) | 97 | if (err) { |
95 | return err; | 98 | return err; |
99 | } | ||
96 | 100 | ||
97 | err = gk20a_comptag_allocator_init(g, &gr->comp_tags, max_comptag_lines); | 101 | err = gk20a_comptag_allocator_init(g, &gr->comp_tags, max_comptag_lines); |
98 | if (err) | 102 | if (err) { |
99 | return err; | 103 | return err; |
104 | } | ||
100 | 105 | ||
101 | gr->max_comptag_lines = max_comptag_lines; | 106 | gr->max_comptag_lines = max_comptag_lines; |
102 | gr->comptags_per_cacheline = comptags_per_cacheline; | 107 | gr->comptags_per_cacheline = comptags_per_cacheline; |
@@ -121,8 +126,9 @@ int gm20b_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op, | |||
121 | 126 | ||
122 | trace_gk20a_ltc_cbc_ctrl_start(g->name, op, min, max); | 127 | trace_gk20a_ltc_cbc_ctrl_start(g->name, op, min, max); |
123 | 128 | ||
124 | if (gr->compbit_store.mem.size == 0) | 129 | if (gr->compbit_store.mem.size == 0) { |
125 | return 0; | 130 | return 0; |
131 | } | ||
126 | 132 | ||
127 | while (1) { | 133 | while (1) { |
128 | const u32 iter_max = min(min + max_lines - 1, max); | 134 | const u32 iter_max = min(min + max_lines - 1, max); |
@@ -168,8 +174,9 @@ int gm20b_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op, | |||
168 | NVGPU_TIMER_RETRY_TIMER); | 174 | NVGPU_TIMER_RETRY_TIMER); |
169 | do { | 175 | do { |
170 | val = gk20a_readl(g, ctrl1); | 176 | val = gk20a_readl(g, ctrl1); |
171 | if (!(val & hw_op)) | 177 | if (!(val & hw_op)) { |
172 | break; | 178 | break; |
179 | } | ||
173 | nvgpu_udelay(5); | 180 | nvgpu_udelay(5); |
174 | } while (!nvgpu_timeout_expired(&timeout)); | 181 | } while (!nvgpu_timeout_expired(&timeout)); |
175 | 182 | ||
@@ -182,8 +189,9 @@ int gm20b_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op, | |||
182 | } | 189 | } |
183 | 190 | ||
184 | /* are we done? */ | 191 | /* are we done? */ |
185 | if (full_cache_op || iter_max == max) | 192 | if (full_cache_op || iter_max == max) { |
186 | break; | 193 | break; |
194 | } | ||
187 | 195 | ||
188 | /* note: iter_max is inclusive upper bound */ | 196 | /* note: iter_max is inclusive upper bound */ |
189 | min = iter_max + 1; | 197 | min = iter_max + 1; |
@@ -240,8 +248,9 @@ void gm20b_ltc_isr(struct gk20a *g) | |||
240 | mc_intr = gk20a_readl(g, mc_intr_ltc_r()); | 248 | mc_intr = gk20a_readl(g, mc_intr_ltc_r()); |
241 | nvgpu_err(g, "mc_ltc_intr: %08x", mc_intr); | 249 | nvgpu_err(g, "mc_ltc_intr: %08x", mc_intr); |
242 | for (ltc = 0; ltc < g->ltc_count; ltc++) { | 250 | for (ltc = 0; ltc < g->ltc_count; ltc++) { |
243 | if ((mc_intr & 1U << ltc) == 0) | 251 | if ((mc_intr & 1U << ltc) == 0) { |
244 | continue; | 252 | continue; |
253 | } | ||
245 | for (slice = 0; slice < g->gr.slices_per_ltc; slice++) { | 254 | for (slice = 0; slice < g->gr.slices_per_ltc; slice++) { |
246 | ltc_intr = gk20a_readl(g, ltc_ltc0_lts0_intr_r() + | 255 | ltc_intr = gk20a_readl(g, ltc_ltc0_lts0_intr_r() + |
247 | ltc_stride * ltc + | 256 | ltc_stride * ltc + |
@@ -433,12 +442,13 @@ void gm20b_ltc_init_cbc(struct gk20a *g, struct gr_gk20a *gr) | |||
433 | u64 compbit_store_iova; | 442 | u64 compbit_store_iova; |
434 | u64 compbit_base_post_divide64; | 443 | u64 compbit_base_post_divide64; |
435 | 444 | ||
436 | if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) | 445 | if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) { |
437 | compbit_store_iova = nvgpu_mem_get_phys_addr(g, | 446 | compbit_store_iova = nvgpu_mem_get_phys_addr(g, |
438 | &gr->compbit_store.mem); | 447 | &gr->compbit_store.mem); |
439 | else | 448 | } else { |
440 | compbit_store_iova = nvgpu_mem_get_addr(g, | 449 | compbit_store_iova = nvgpu_mem_get_addr(g, |
441 | &gr->compbit_store.mem); | 450 | &gr->compbit_store.mem); |
451 | } | ||
442 | 452 | ||
443 | compbit_base_post_divide64 = compbit_store_iova >> | 453 | compbit_base_post_divide64 = compbit_store_iova >> |
444 | ltc_ltcs_ltss_cbc_base_alignment_shift_v(); | 454 | ltc_ltcs_ltss_cbc_base_alignment_shift_v(); |
@@ -449,13 +459,15 @@ void gm20b_ltc_init_cbc(struct gk20a *g, struct gr_gk20a *gr) | |||
449 | compbit_base_post_multiply64 = ((u64)compbit_base_post_divide * | 459 | compbit_base_post_multiply64 = ((u64)compbit_base_post_divide * |
450 | g->ltc_count) << ltc_ltcs_ltss_cbc_base_alignment_shift_v(); | 460 | g->ltc_count) << ltc_ltcs_ltss_cbc_base_alignment_shift_v(); |
451 | 461 | ||
452 | if (compbit_base_post_multiply64 < compbit_store_iova) | 462 | if (compbit_base_post_multiply64 < compbit_store_iova) { |
453 | compbit_base_post_divide++; | 463 | compbit_base_post_divide++; |
464 | } | ||
454 | 465 | ||
455 | /* Bug 1477079 indicates sw adjustment on the posted divided base. */ | 466 | /* Bug 1477079 indicates sw adjustment on the posted divided base. */ |
456 | if (g->ops.ltc.cbc_fix_config) | 467 | if (g->ops.ltc.cbc_fix_config) { |
457 | compbit_base_post_divide = | 468 | compbit_base_post_divide = |
458 | g->ops.ltc.cbc_fix_config(g, compbit_base_post_divide); | 469 | g->ops.ltc.cbc_fix_config(g, compbit_base_post_divide); |
470 | } | ||
459 | 471 | ||
460 | gk20a_writel(g, ltc_ltcs_ltss_cbc_base_r(), | 472 | gk20a_writel(g, ltc_ltcs_ltss_cbc_base_r(), |
461 | compbit_base_post_divide); | 473 | compbit_base_post_divide); |
@@ -478,12 +490,13 @@ void gm20b_ltc_set_enabled(struct gk20a *g, bool enabled) | |||
478 | u32 reg_f = ltc_ltcs_ltss_tstg_set_mgmt_2_l2_bypass_mode_enabled_f(); | 490 | u32 reg_f = ltc_ltcs_ltss_tstg_set_mgmt_2_l2_bypass_mode_enabled_f(); |
479 | u32 reg = gk20a_readl(g, ltc_ltcs_ltss_tstg_set_mgmt_2_r()); | 491 | u32 reg = gk20a_readl(g, ltc_ltcs_ltss_tstg_set_mgmt_2_r()); |
480 | 492 | ||
481 | if (enabled) | 493 | if (enabled) { |
482 | /* bypass disabled (normal caching ops)*/ | 494 | /* bypass disabled (normal caching ops) */ |
483 | reg &= ~reg_f; | 495 | reg &= ~reg_f; |
484 | else | 496 | } else { |
485 | /* bypass enabled (no caching) */ | 497 | /* bypass enabled (no caching) */ |
486 | reg |= reg_f; | 498 | reg |= reg_f; |
499 | } | ||
487 | 500 | ||
488 | gk20a_writel(g, ltc_ltcs_ltss_tstg_set_mgmt_2_r(), reg); | 501 | gk20a_writel(g, ltc_ltcs_ltss_tstg_set_mgmt_2_r(), reg); |
489 | } | 502 | } |
diff --git a/drivers/gpu/nvgpu/common/ltc/ltc_gp10b.c b/drivers/gpu/nvgpu/common/ltc/ltc_gp10b.c index 5d0a8850..b72346ee 100644 --- a/drivers/gpu/nvgpu/common/ltc/ltc_gp10b.c +++ b/drivers/gpu/nvgpu/common/ltc/ltc_gp10b.c | |||
@@ -83,15 +83,18 @@ int gp10b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr) | |||
83 | 83 | ||
84 | nvgpu_log_fn(g, " "); | 84 | nvgpu_log_fn(g, " "); |
85 | 85 | ||
86 | if (max_comptag_lines == 0U) | 86 | if (max_comptag_lines == 0U) { |
87 | return 0; | 87 | return 0; |
88 | } | ||
88 | 89 | ||
89 | /* Already initialized */ | 90 | /* Already initialized */ |
90 | if (gr->max_comptag_lines) | 91 | if (gr->max_comptag_lines) { |
91 | return 0; | 92 | return 0; |
93 | } | ||
92 | 94 | ||
93 | if (max_comptag_lines > hw_max_comptag_lines) | 95 | if (max_comptag_lines > hw_max_comptag_lines) { |
94 | max_comptag_lines = hw_max_comptag_lines; | 96 | max_comptag_lines = hw_max_comptag_lines; |
97 | } | ||
95 | 98 | ||
96 | compbit_backing_size = | 99 | compbit_backing_size = |
97 | roundup(max_comptag_lines * gobs_per_comptagline_per_slice, | 100 | roundup(max_comptag_lines * gobs_per_comptagline_per_slice, |
@@ -115,12 +118,14 @@ int gp10b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr) | |||
115 | gobs_per_comptagline_per_slice); | 118 | gobs_per_comptagline_per_slice); |
116 | 119 | ||
117 | err = nvgpu_ltc_alloc_cbc(g, compbit_backing_size); | 120 | err = nvgpu_ltc_alloc_cbc(g, compbit_backing_size); |
118 | if (err) | 121 | if (err) { |
119 | return err; | 122 | return err; |
123 | } | ||
120 | 124 | ||
121 | err = gk20a_comptag_allocator_init(g, &gr->comp_tags, max_comptag_lines); | 125 | err = gk20a_comptag_allocator_init(g, &gr->comp_tags, max_comptag_lines); |
122 | if (err) | 126 | if (err) { |
123 | return err; | 127 | return err; |
128 | } | ||
124 | 129 | ||
125 | gr->max_comptag_lines = max_comptag_lines; | 130 | gr->max_comptag_lines = max_comptag_lines; |
126 | gr->comptags_per_cacheline = comptags_per_cacheline; | 131 | gr->comptags_per_cacheline = comptags_per_cacheline; |
@@ -146,8 +151,9 @@ int gp10b_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op, | |||
146 | 151 | ||
147 | trace_gk20a_ltc_cbc_ctrl_start(g->name, op, min, max); | 152 | trace_gk20a_ltc_cbc_ctrl_start(g->name, op, min, max); |
148 | 153 | ||
149 | if (gr->compbit_store.mem.size == 0U) | 154 | if (gr->compbit_store.mem.size == 0U) { |
150 | return 0; | 155 | return 0; |
156 | } | ||
151 | 157 | ||
152 | while (1) { | 158 | while (1) { |
153 | const u32 iter_max = min(min + max_lines - 1, max); | 159 | const u32 iter_max = min(min + max_lines - 1, max); |
@@ -195,8 +201,9 @@ int gp10b_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op, | |||
195 | NVGPU_TIMER_RETRY_TIMER); | 201 | NVGPU_TIMER_RETRY_TIMER); |
196 | do { | 202 | do { |
197 | val = gk20a_readl(g, ctrl1); | 203 | val = gk20a_readl(g, ctrl1); |
198 | if (!(val & hw_op)) | 204 | if (!(val & hw_op)) { |
199 | break; | 205 | break; |
206 | } | ||
200 | nvgpu_udelay(5); | 207 | nvgpu_udelay(5); |
201 | } while (!nvgpu_timeout_expired(&timeout)); | 208 | } while (!nvgpu_timeout_expired(&timeout)); |
202 | 209 | ||
@@ -209,8 +216,9 @@ int gp10b_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op, | |||
209 | } | 216 | } |
210 | 217 | ||
211 | /* are we done? */ | 218 | /* are we done? */ |
212 | if (full_cache_op || iter_max == max) | 219 | if (full_cache_op || iter_max == max) { |
213 | break; | 220 | break; |
221 | } | ||
214 | 222 | ||
215 | /* note: iter_max is inclusive upper bound */ | 223 | /* note: iter_max is inclusive upper bound */ |
216 | min = iter_max + 1; | 224 | min = iter_max + 1; |
@@ -234,8 +242,9 @@ void gp10b_ltc_isr(struct gk20a *g) | |||
234 | mc_intr = gk20a_readl(g, mc_intr_ltc_r()); | 242 | mc_intr = gk20a_readl(g, mc_intr_ltc_r()); |
235 | nvgpu_err(g, "mc_ltc_intr: %08x", mc_intr); | 243 | nvgpu_err(g, "mc_ltc_intr: %08x", mc_intr); |
236 | for (ltc = 0; ltc < g->ltc_count; ltc++) { | 244 | for (ltc = 0; ltc < g->ltc_count; ltc++) { |
237 | if ((mc_intr & 1U << ltc) == 0) | 245 | if ((mc_intr & 1U << ltc) == 0) { |
238 | continue; | 246 | continue; |
247 | } | ||
239 | for (slice = 0; slice < g->gr.slices_per_ltc; slice++) { | 248 | for (slice = 0; slice < g->gr.slices_per_ltc; slice++) { |
240 | u32 offset = ltc_stride * ltc + lts_stride * slice; | 249 | u32 offset = ltc_stride * ltc + lts_stride * slice; |
241 | ltc_intr = gk20a_readl(g, ltc_ltc0_lts0_intr_r() + offset); | 250 | ltc_intr = gk20a_readl(g, ltc_ltc0_lts0_intr_r() + offset); |
@@ -310,12 +319,13 @@ void gp10b_ltc_set_enabled(struct gk20a *g, bool enabled) | |||
310 | u32 reg_f = ltc_ltcs_ltss_tstg_set_mgmt_2_l2_bypass_mode_enabled_f(); | 319 | u32 reg_f = ltc_ltcs_ltss_tstg_set_mgmt_2_l2_bypass_mode_enabled_f(); |
311 | u32 reg = gk20a_readl(g, ltc_ltcs_ltss_tstg_set_mgmt_2_r()); | 320 | u32 reg = gk20a_readl(g, ltc_ltcs_ltss_tstg_set_mgmt_2_r()); |
312 | 321 | ||
313 | if (enabled) | 322 | if (enabled) { |
314 | /* bypass disabled (normal caching ops)*/ | 323 | /* bypass disabled (normal caching ops) */ |
315 | reg &= ~reg_f; | 324 | reg &= ~reg_f; |
316 | else | 325 | } else { |
317 | /* bypass enabled (no caching) */ | 326 | /* bypass enabled (no caching) */ |
318 | reg |= reg_f; | 327 | reg |= reg_f; |
328 | } | ||
319 | 329 | ||
320 | nvgpu_writel_check(g, ltc_ltcs_ltss_tstg_set_mgmt_2_r(), reg); | 330 | nvgpu_writel_check(g, ltc_ltcs_ltss_tstg_set_mgmt_2_r(), reg); |
321 | } | 331 | } |