diff options
author | Srirangan <smadhavan@nvidia.com> | 2018-08-16 02:03:55 -0400 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2018-08-20 08:46:25 -0400 |
commit | 9e69e0cf978b53706f55ffb873e3966b4bb3a7a8 (patch) | |
tree | 2437cda373f2c37419e14b89772fb3c5f6d234e4 /drivers/gpu/nvgpu/common/falcon/falcon.c | |
parent | de10cedf8caca9fd01f1b85031e538843da23252 (diff) |
gpu: nvgpu: common: Fix MISRA 15.6 violations
MISRA Rule-15.6 requires that all if-else blocks be enclosed in braces,
including single statement blocks. Fix errors due to single statement
if blocks without braces, introducing the braces.
JIRA NVGPU-671
Change-Id: I599cce2af1d6cdc24efefba4ec42abfe998aec47
Signed-off-by: Srirangan <smadhavan@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1795845
Reviewed-by: Adeel Raza <araza@nvidia.com>
Reviewed-by: svc-misra-checker <svc-misra-checker@nvidia.com>
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/falcon/falcon.c')
-rw-r--r-- | drivers/gpu/nvgpu/common/falcon/falcon.c | 83 |
1 files changed, 52 insertions, 31 deletions
diff --git a/drivers/gpu/nvgpu/common/falcon/falcon.c b/drivers/gpu/nvgpu/common/falcon/falcon.c index 41dca0cd..81ba5e81 100644 --- a/drivers/gpu/nvgpu/common/falcon/falcon.c +++ b/drivers/gpu/nvgpu/common/falcon/falcon.c | |||
@@ -50,12 +50,14 @@ int nvgpu_flcn_wait_idle(struct nvgpu_falcon *flcn) | |||
50 | do { | 50 | do { |
51 | idle_stat = flcn_ops->is_falcon_idle(flcn); | 51 | idle_stat = flcn_ops->is_falcon_idle(flcn); |
52 | 52 | ||
53 | if (idle_stat) | 53 | if (idle_stat) { |
54 | break; | 54 | break; |
55 | } | ||
55 | 56 | ||
56 | if (nvgpu_timeout_expired_msg(&timeout, | 57 | if (nvgpu_timeout_expired_msg(&timeout, |
57 | "waiting for falcon idle: 0x%08x", idle_stat)) | 58 | "waiting for falcon idle: 0x%08x", idle_stat)) { |
58 | return -EBUSY; | 59 | return -EBUSY; |
60 | } | ||
59 | 61 | ||
60 | nvgpu_usleep_range(100, 200); | 62 | nvgpu_usleep_range(100, 200); |
61 | } while (1); | 63 | } while (1); |
@@ -74,13 +76,15 @@ int nvgpu_flcn_mem_scrub_wait(struct nvgpu_falcon *flcn) | |||
74 | MEM_SCRUBBING_TIMEOUT_DEFAULT, | 76 | MEM_SCRUBBING_TIMEOUT_DEFAULT, |
75 | NVGPU_TIMER_RETRY_TIMER); | 77 | NVGPU_TIMER_RETRY_TIMER); |
76 | do { | 78 | do { |
77 | if (nvgpu_flcn_get_mem_scrubbing_status(flcn)) | 79 | if (nvgpu_flcn_get_mem_scrubbing_status(flcn)) { |
78 | goto exit; | 80 | goto exit; |
81 | } | ||
79 | nvgpu_udelay(MEM_SCRUBBING_TIMEOUT_DEFAULT); | 82 | nvgpu_udelay(MEM_SCRUBBING_TIMEOUT_DEFAULT); |
80 | } while (!nvgpu_timeout_expired(&timeout)); | 83 | } while (!nvgpu_timeout_expired(&timeout)); |
81 | 84 | ||
82 | if (nvgpu_timeout_peek_expired(&timeout)) | 85 | if (nvgpu_timeout_peek_expired(&timeout)) { |
83 | status = -ETIMEDOUT; | 86 | status = -ETIMEDOUT; |
87 | } | ||
84 | 88 | ||
85 | exit: | 89 | exit: |
86 | return status; | 90 | return status; |
@@ -92,8 +96,9 @@ int nvgpu_flcn_reset(struct nvgpu_falcon *flcn) | |||
92 | 96 | ||
93 | if (flcn->flcn_ops.reset) { | 97 | if (flcn->flcn_ops.reset) { |
94 | status = flcn->flcn_ops.reset(flcn); | 98 | status = flcn->flcn_ops.reset(flcn); |
95 | if (!status) | 99 | if (!status) { |
96 | status = nvgpu_flcn_mem_scrub_wait(flcn); | 100 | status = nvgpu_flcn_mem_scrub_wait(flcn); |
101 | } | ||
97 | } else { | 102 | } else { |
98 | nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ", | 103 | nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ", |
99 | flcn->flcn_id); | 104 | flcn->flcn_id); |
@@ -112,9 +117,10 @@ void nvgpu_flcn_set_irq(struct nvgpu_falcon *flcn, bool enable, | |||
112 | flcn->intr_mask = intr_mask; | 117 | flcn->intr_mask = intr_mask; |
113 | flcn->intr_dest = intr_dest; | 118 | flcn->intr_dest = intr_dest; |
114 | flcn_ops->set_irq(flcn, enable); | 119 | flcn_ops->set_irq(flcn, enable); |
115 | } else | 120 | } else { |
116 | nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ", | 121 | nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ", |
117 | flcn->flcn_id); | 122 | flcn->flcn_id); |
123 | } | ||
118 | } | 124 | } |
119 | 125 | ||
120 | bool nvgpu_flcn_get_mem_scrubbing_status(struct nvgpu_falcon *flcn) | 126 | bool nvgpu_flcn_get_mem_scrubbing_status(struct nvgpu_falcon *flcn) |
@@ -122,11 +128,12 @@ bool nvgpu_flcn_get_mem_scrubbing_status(struct nvgpu_falcon *flcn) | |||
122 | struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops; | 128 | struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops; |
123 | bool status = false; | 129 | bool status = false; |
124 | 130 | ||
125 | if (flcn_ops->is_falcon_scrubbing_done) | 131 | if (flcn_ops->is_falcon_scrubbing_done) { |
126 | status = flcn_ops->is_falcon_scrubbing_done(flcn); | 132 | status = flcn_ops->is_falcon_scrubbing_done(flcn); |
127 | else | 133 | } else { |
128 | nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ", | 134 | nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ", |
129 | flcn->flcn_id); | 135 | flcn->flcn_id); |
136 | } | ||
130 | 137 | ||
131 | return status; | 138 | return status; |
132 | } | 139 | } |
@@ -136,11 +143,12 @@ bool nvgpu_flcn_get_cpu_halted_status(struct nvgpu_falcon *flcn) | |||
136 | struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops; | 143 | struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops; |
137 | bool status = false; | 144 | bool status = false; |
138 | 145 | ||
139 | if (flcn_ops->is_falcon_cpu_halted) | 146 | if (flcn_ops->is_falcon_cpu_halted) { |
140 | status = flcn_ops->is_falcon_cpu_halted(flcn); | 147 | status = flcn_ops->is_falcon_cpu_halted(flcn); |
141 | else | 148 | } else { |
142 | nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ", | 149 | nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ", |
143 | flcn->flcn_id); | 150 | flcn->flcn_id); |
151 | } | ||
144 | 152 | ||
145 | return status; | 153 | return status; |
146 | } | 154 | } |
@@ -153,14 +161,16 @@ int nvgpu_flcn_wait_for_halt(struct nvgpu_falcon *flcn, unsigned int timeout) | |||
153 | 161 | ||
154 | nvgpu_timeout_init(g, &to, timeout, NVGPU_TIMER_CPU_TIMER); | 162 | nvgpu_timeout_init(g, &to, timeout, NVGPU_TIMER_CPU_TIMER); |
155 | do { | 163 | do { |
156 | if (nvgpu_flcn_get_cpu_halted_status(flcn)) | 164 | if (nvgpu_flcn_get_cpu_halted_status(flcn)) { |
157 | break; | 165 | break; |
166 | } | ||
158 | 167 | ||
159 | nvgpu_udelay(10); | 168 | nvgpu_udelay(10); |
160 | } while (!nvgpu_timeout_expired(&to)); | 169 | } while (!nvgpu_timeout_expired(&to)); |
161 | 170 | ||
162 | if (nvgpu_timeout_peek_expired(&to)) | 171 | if (nvgpu_timeout_peek_expired(&to)) { |
163 | status = -EBUSY; | 172 | status = -EBUSY; |
173 | } | ||
164 | 174 | ||
165 | return status; | 175 | return status; |
166 | } | 176 | } |
@@ -181,14 +191,16 @@ int nvgpu_flcn_clear_halt_intr_status(struct nvgpu_falcon *flcn, | |||
181 | 191 | ||
182 | nvgpu_timeout_init(g, &to, timeout, NVGPU_TIMER_CPU_TIMER); | 192 | nvgpu_timeout_init(g, &to, timeout, NVGPU_TIMER_CPU_TIMER); |
183 | do { | 193 | do { |
184 | if (flcn_ops->clear_halt_interrupt_status(flcn)) | 194 | if (flcn_ops->clear_halt_interrupt_status(flcn)) { |
185 | break; | 195 | break; |
196 | } | ||
186 | 197 | ||
187 | nvgpu_udelay(1); | 198 | nvgpu_udelay(1); |
188 | } while (!nvgpu_timeout_expired(&to)); | 199 | } while (!nvgpu_timeout_expired(&to)); |
189 | 200 | ||
190 | if (nvgpu_timeout_peek_expired(&to)) | 201 | if (nvgpu_timeout_peek_expired(&to)) { |
191 | status = -EBUSY; | 202 | status = -EBUSY; |
203 | } | ||
192 | 204 | ||
193 | return status; | 205 | return status; |
194 | } | 206 | } |
@@ -198,11 +210,12 @@ bool nvgpu_flcn_get_idle_status(struct nvgpu_falcon *flcn) | |||
198 | struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops; | 210 | struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops; |
199 | bool status = false; | 211 | bool status = false; |
200 | 212 | ||
201 | if (flcn_ops->is_falcon_idle) | 213 | if (flcn_ops->is_falcon_idle) { |
202 | status = flcn_ops->is_falcon_idle(flcn); | 214 | status = flcn_ops->is_falcon_idle(flcn); |
203 | else | 215 | } else { |
204 | nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ", | 216 | nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ", |
205 | flcn->flcn_id); | 217 | flcn->flcn_id); |
218 | } | ||
206 | 219 | ||
207 | return status; | 220 | return status; |
208 | } | 221 | } |
@@ -229,11 +242,12 @@ int nvgpu_flcn_copy_from_imem(struct nvgpu_falcon *flcn, | |||
229 | struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops; | 242 | struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops; |
230 | int status = -EINVAL; | 243 | int status = -EINVAL; |
231 | 244 | ||
232 | if (flcn_ops->copy_from_imem) | 245 | if (flcn_ops->copy_from_imem) { |
233 | status = flcn_ops->copy_from_imem(flcn, src, dst, size, port); | 246 | status = flcn_ops->copy_from_imem(flcn, src, dst, size, port); |
234 | else | 247 | } else { |
235 | nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ", | 248 | nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ", |
236 | flcn->flcn_id); | 249 | flcn->flcn_id); |
250 | } | ||
237 | 251 | ||
238 | return status; | 252 | return status; |
239 | } | 253 | } |
@@ -244,12 +258,13 @@ int nvgpu_flcn_copy_to_imem(struct nvgpu_falcon *flcn, | |||
244 | struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops; | 258 | struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops; |
245 | int status = -EINVAL; | 259 | int status = -EINVAL; |
246 | 260 | ||
247 | if (flcn_ops->copy_to_imem) | 261 | if (flcn_ops->copy_to_imem) { |
248 | status = flcn_ops->copy_to_imem(flcn, dst, src, size, port, | 262 | status = flcn_ops->copy_to_imem(flcn, dst, src, size, port, |
249 | sec, tag); | 263 | sec, tag); |
250 | else | 264 | } else { |
251 | nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ", | 265 | nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ", |
252 | flcn->flcn_id); | 266 | flcn->flcn_id); |
267 | } | ||
253 | 268 | ||
254 | return status; | 269 | return status; |
255 | } | 270 | } |
@@ -269,15 +284,17 @@ static void nvgpu_flcn_print_mem(struct nvgpu_falcon *flcn, u32 src, | |||
269 | do { | 284 | do { |
270 | byte_read_count = total_block_read ? sizeof(buff) : size; | 285 | byte_read_count = total_block_read ? sizeof(buff) : size; |
271 | 286 | ||
272 | if (!byte_read_count) | 287 | if (!byte_read_count) { |
273 | break; | 288 | break; |
289 | } | ||
274 | 290 | ||
275 | if (mem_type == MEM_DMEM) | 291 | if (mem_type == MEM_DMEM) { |
276 | status = nvgpu_flcn_copy_from_dmem(flcn, src, | 292 | status = nvgpu_flcn_copy_from_dmem(flcn, src, |
277 | (u8 *)buff, byte_read_count, 0); | 293 | (u8 *)buff, byte_read_count, 0); |
278 | else | 294 | } else { |
279 | status = nvgpu_flcn_copy_from_imem(flcn, src, | 295 | status = nvgpu_flcn_copy_from_imem(flcn, src, |
280 | (u8 *)buff, byte_read_count, 0); | 296 | (u8 *)buff, byte_read_count, 0); |
297 | } | ||
281 | 298 | ||
282 | if (status) { | 299 | if (status) { |
283 | nvgpu_err(flcn->g, "MEM print failed"); | 300 | nvgpu_err(flcn->g, "MEM print failed"); |
@@ -312,11 +329,12 @@ int nvgpu_flcn_bootstrap(struct nvgpu_falcon *flcn, u32 boot_vector) | |||
312 | struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops; | 329 | struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops; |
313 | int status = -EINVAL; | 330 | int status = -EINVAL; |
314 | 331 | ||
315 | if (flcn_ops->bootstrap) | 332 | if (flcn_ops->bootstrap) { |
316 | status = flcn_ops->bootstrap(flcn, boot_vector); | 333 | status = flcn_ops->bootstrap(flcn, boot_vector); |
317 | else | 334 | } else { |
318 | nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ", | 335 | nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ", |
319 | flcn->flcn_id); | 336 | flcn->flcn_id); |
337 | } | ||
320 | 338 | ||
321 | return status; | 339 | return status; |
322 | } | 340 | } |
@@ -326,11 +344,12 @@ u32 nvgpu_flcn_mailbox_read(struct nvgpu_falcon *flcn, u32 mailbox_index) | |||
326 | struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops; | 344 | struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops; |
327 | u32 data = 0; | 345 | u32 data = 0; |
328 | 346 | ||
329 | if (flcn_ops->mailbox_read) | 347 | if (flcn_ops->mailbox_read) { |
330 | data = flcn_ops->mailbox_read(flcn, mailbox_index); | 348 | data = flcn_ops->mailbox_read(flcn, mailbox_index); |
331 | else | 349 | } else { |
332 | nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ", | 350 | nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ", |
333 | flcn->flcn_id); | 351 | flcn->flcn_id); |
352 | } | ||
334 | 353 | ||
335 | return data; | 354 | return data; |
336 | } | 355 | } |
@@ -340,22 +359,24 @@ void nvgpu_flcn_mailbox_write(struct nvgpu_falcon *flcn, u32 mailbox_index, | |||
340 | { | 359 | { |
341 | struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops; | 360 | struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops; |
342 | 361 | ||
343 | if (flcn_ops->mailbox_write) | 362 | if (flcn_ops->mailbox_write) { |
344 | flcn_ops->mailbox_write(flcn, mailbox_index, data); | 363 | flcn_ops->mailbox_write(flcn, mailbox_index, data); |
345 | else | 364 | } else { |
346 | nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ", | 365 | nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ", |
347 | flcn->flcn_id); | 366 | flcn->flcn_id); |
367 | } | ||
348 | } | 368 | } |
349 | 369 | ||
350 | void nvgpu_flcn_dump_stats(struct nvgpu_falcon *flcn) | 370 | void nvgpu_flcn_dump_stats(struct nvgpu_falcon *flcn) |
351 | { | 371 | { |
352 | struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops; | 372 | struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops; |
353 | 373 | ||
354 | if (flcn_ops->dump_falcon_stats) | 374 | if (flcn_ops->dump_falcon_stats) { |
355 | flcn_ops->dump_falcon_stats(flcn); | 375 | flcn_ops->dump_falcon_stats(flcn); |
356 | else | 376 | } else { |
357 | nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ", | 377 | nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ", |
358 | flcn->flcn_id); | 378 | flcn->flcn_id); |
379 | } | ||
359 | } | 380 | } |
360 | 381 | ||
361 | int nvgpu_flcn_bl_bootstrap(struct nvgpu_falcon *flcn, | 382 | int nvgpu_flcn_bl_bootstrap(struct nvgpu_falcon *flcn, |