summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/ltc
diff options
context:
space:
mode:
authorSrirangan <smadhavan@nvidia.com>2018-08-20 05:13:41 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-08-23 00:55:49 -0400
commit3fbaee7099039eee84343027dd1ce20679c0c113 (patch)
tree0de4934723f58cad9cdcdb642927ffce0cfac6d8 /drivers/gpu/nvgpu/common/ltc
parent52305f0514d29e7fb2cb5e2154188e09faa3fe94 (diff)
gpu: nvgpu: common: Fix MISRA 15.6 violations
MISRA Rule-15.6 requires that all if-else blocks be enclosed in braces, including single statement blocks. Fix errors due to single statement if blocks without braces, introducing the braces. JIRA NVGPU-671 Change-Id: I4d9933c51a297a725f48cbb15520a70494d74aeb Signed-off-by: Srirangan <smadhavan@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1800833 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/ltc')
-rw-r--r--drivers/gpu/nvgpu/common/ltc/ltc.c6
-rw-r--r--drivers/gpu/nvgpu/common/ltc/ltc_gv11b.c38
2 files changed, 29 insertions, 15 deletions
diff --git a/drivers/gpu/nvgpu/common/ltc/ltc.c b/drivers/gpu/nvgpu/common/ltc/ltc.c
index 1beb1974..3d85db3f 100644
--- a/drivers/gpu/nvgpu/common/ltc/ltc.c
+++ b/drivers/gpu/nvgpu/common/ltc/ltc.c
@@ -34,16 +34,18 @@ int nvgpu_init_ltc_support(struct gk20a *g)
34 g->mm.ltc_enabled_current = true; 34 g->mm.ltc_enabled_current = true;
35 g->mm.ltc_enabled_target = true; 35 g->mm.ltc_enabled_target = true;
36 36
37 if (g->ops.ltc.init_fs_state) 37 if (g->ops.ltc.init_fs_state) {
38 g->ops.ltc.init_fs_state(g); 38 g->ops.ltc.init_fs_state(g);
39 }
39 40
40 return 0; 41 return 0;
41} 42}
42 43
43void nvgpu_ltc_sync_enabled(struct gk20a *g) 44void nvgpu_ltc_sync_enabled(struct gk20a *g)
44{ 45{
45 if (!g->ops.ltc.set_enabled) 46 if (!g->ops.ltc.set_enabled) {
46 return; 47 return;
48 }
47 49
48 nvgpu_spinlock_acquire(&g->ltc_enabled_lock); 50 nvgpu_spinlock_acquire(&g->ltc_enabled_lock);
49 if (g->mm.ltc_enabled_current != g->mm.ltc_enabled_target) { 51 if (g->mm.ltc_enabled_current != g->mm.ltc_enabled_target) {
diff --git a/drivers/gpu/nvgpu/common/ltc/ltc_gv11b.c b/drivers/gpu/nvgpu/common/ltc/ltc_gv11b.c
index 98306079..c5bf40c1 100644
--- a/drivers/gpu/nvgpu/common/ltc/ltc_gv11b.c
+++ b/drivers/gpu/nvgpu/common/ltc/ltc_gv11b.c
@@ -75,9 +75,10 @@ void gv11b_ltc_init_fs_state(struct gk20a *g)
75 reg &= ~ltc_ltcs_ltss_intr_en_illegal_compstat_access_m(); 75 reg &= ~ltc_ltcs_ltss_intr_en_illegal_compstat_access_m();
76 nvgpu_writel_check(g, ltc_ltcs_ltss_intr_r(), reg); 76 nvgpu_writel_check(g, ltc_ltcs_ltss_intr_r(), reg);
77 77
78 if (g->ops.ltc.intr_en_illegal_compstat) 78 if (g->ops.ltc.intr_en_illegal_compstat) {
79 g->ops.ltc.intr_en_illegal_compstat(g, 79 g->ops.ltc.intr_en_illegal_compstat(g,
80 g->ltc_intr_en_illegal_compstat); 80 g->ltc_intr_en_illegal_compstat);
81 }
81 82
82 /* Enable ECC interrupts */ 83 /* Enable ECC interrupts */
83 ltc_intr = gk20a_readl(g, ltc_ltcs_ltss_intr_r()); 84 ltc_intr = gk20a_readl(g, ltc_ltcs_ltss_intr_r());
@@ -93,14 +94,15 @@ void gv11b_ltc_intr_en_illegal_compstat(struct gk20a *g, bool enable)
93 94
94 /* disble/enble illegal_compstat interrupt */ 95 /* disble/enble illegal_compstat interrupt */
95 val = gk20a_readl(g, ltc_ltcs_ltss_intr_r()); 96 val = gk20a_readl(g, ltc_ltcs_ltss_intr_r());
96 if (enable) 97 if (enable) {
97 val = set_field(val, 98 val = set_field(val,
98 ltc_ltcs_ltss_intr_en_illegal_compstat_m(), 99 ltc_ltcs_ltss_intr_en_illegal_compstat_m(),
99 ltc_ltcs_ltss_intr_en_illegal_compstat_enabled_f()); 100 ltc_ltcs_ltss_intr_en_illegal_compstat_enabled_f());
100 else 101 } else {
101 val = set_field(val, 102 val = set_field(val,
102 ltc_ltcs_ltss_intr_en_illegal_compstat_m(), 103 ltc_ltcs_ltss_intr_en_illegal_compstat_m(),
103 ltc_ltcs_ltss_intr_en_illegal_compstat_disabled_f()); 104 ltc_ltcs_ltss_intr_en_illegal_compstat_disabled_f());
105 }
104 gk20a_writel(g, ltc_ltcs_ltss_intr_r(), val); 106 gk20a_writel(g, ltc_ltcs_ltss_intr_r(), val);
105} 107}
106 108
@@ -117,8 +119,9 @@ void gv11b_ltc_isr(struct gk20a *g)
117 119
118 mc_intr = gk20a_readl(g, mc_intr_ltc_r()); 120 mc_intr = gk20a_readl(g, mc_intr_ltc_r());
119 for (ltc = 0; ltc < g->ltc_count; ltc++) { 121 for (ltc = 0; ltc < g->ltc_count; ltc++) {
120 if ((mc_intr & 1U << ltc) == 0) 122 if ((mc_intr & 1U << ltc) == 0) {
121 continue; 123 continue;
124 }
122 125
123 for (slice = 0; slice < g->gr.slices_per_ltc; slice++) { 126 for (slice = 0; slice < g->gr.slices_per_ltc; slice++) {
124 u32 offset = ltc_stride * ltc + lts_stride * slice; 127 u32 offset = ltc_stride * ltc + lts_stride * slice;
@@ -167,31 +170,40 @@ void gv11b_ltc_isr(struct gk20a *g)
167 ltc_ltc0_lts0_l2_cache_ecc_status_reset_task_f()); 170 ltc_ltc0_lts0_l2_cache_ecc_status_reset_task_f());
168 171
169 /* update counters per slice */ 172 /* update counters per slice */
170 if (corrected_overflow) 173 if (corrected_overflow) {
171 corrected_delta += (0x1U << ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_total_s()); 174 corrected_delta += (0x1U << ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_total_s());
172 if (uncorrected_overflow) 175 }
176 if (uncorrected_overflow) {
173 uncorrected_delta += (0x1U << ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_total_s()); 177 uncorrected_delta += (0x1U << ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_total_s());
178 }
174 179
175 g->ecc.ltc.ecc_sec_count[ltc][slice].counter += corrected_delta; 180 g->ecc.ltc.ecc_sec_count[ltc][slice].counter += corrected_delta;
176 g->ecc.ltc.ecc_ded_count[ltc][slice].counter += uncorrected_delta; 181 g->ecc.ltc.ecc_ded_count[ltc][slice].counter += uncorrected_delta;
177 nvgpu_log(g, gpu_dbg_intr, 182 nvgpu_log(g, gpu_dbg_intr,
178 "ltc:%d lts: %d cache ecc interrupt intr: 0x%x", ltc, slice, ltc_intr3); 183 "ltc:%d lts: %d cache ecc interrupt intr: 0x%x", ltc, slice, ltc_intr3);
179 184
180 if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_rstg_m()) 185 if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_rstg_m()) {
181 nvgpu_log(g, gpu_dbg_intr, "rstg ecc error corrected"); 186 nvgpu_log(g, gpu_dbg_intr, "rstg ecc error corrected");
182 if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_rstg_m()) 187 }
188 if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_rstg_m()) {
183 nvgpu_log(g, gpu_dbg_intr, "rstg ecc error uncorrected"); 189 nvgpu_log(g, gpu_dbg_intr, "rstg ecc error uncorrected");
184 if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_tstg_m()) 190 }
191 if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_tstg_m()) {
185 nvgpu_log(g, gpu_dbg_intr, "tstg ecc error corrected"); 192 nvgpu_log(g, gpu_dbg_intr, "tstg ecc error corrected");
186 if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_tstg_m()) 193 }
194 if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_tstg_m()) {
187 nvgpu_log(g, gpu_dbg_intr, "tstg ecc error uncorrected"); 195 nvgpu_log(g, gpu_dbg_intr, "tstg ecc error uncorrected");
188 if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_dstg_m()) 196 }
197 if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_dstg_m()) {
189 nvgpu_log(g, gpu_dbg_intr, "dstg ecc error corrected"); 198 nvgpu_log(g, gpu_dbg_intr, "dstg ecc error corrected");
190 if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_dstg_m()) 199 }
200 if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_dstg_m()) {
191 nvgpu_log(g, gpu_dbg_intr, "dstg ecc error uncorrected"); 201 nvgpu_log(g, gpu_dbg_intr, "dstg ecc error uncorrected");
202 }
192 203
193 if (corrected_overflow || uncorrected_overflow) 204 if (corrected_overflow || uncorrected_overflow) {
194 nvgpu_info(g, "ecc counter overflow!"); 205 nvgpu_info(g, "ecc counter overflow!");
206 }
195 207
196 nvgpu_log(g, gpu_dbg_intr, 208 nvgpu_log(g, gpu_dbg_intr,
197 "ecc error address: 0x%x", ecc_addr); 209 "ecc error address: 0x%x", ecc_addr);