summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c
diff options
context:
space:
mode:
authorSrirangan <smadhavan@nvidia.com>2018-08-31 03:50:52 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-09-05 07:51:32 -0400
commit43851d41b187c92f5ea9c2f503a882277f661d7e (patch)
tree964a76c136c8c0dc14ec95358d27f930532b7dcb /drivers/gpu/nvgpu/gk20a/ce2_gk20a.c
parent0f97bd4d44c8bcedf298f725fe0b6cfc70fa81ff (diff)
gpu: nvgpu: gk20a: Fix MISRA 15.6 violations
MISRA Rule-15.6 requires that all if-else blocks be enclosed in braces, including single statement blocks. Fix errors due to single statement if blocks without braces by introducing the braces. JIRA NVGPU-671 Change-Id: Iedac7d50aa2ebd409434eea5fda902b16d9c6fea Signed-off-by: Srirangan <smadhavan@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1797695 Reviewed-by: svc-misra-checker <svc-misra-checker@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/ce2_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/ce2_gk20a.c72
1 files changed, 45 insertions, 27 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c b/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c
index f905243e..4cc6c8ca 100644
--- a/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c
@@ -79,11 +79,13 @@ void gk20a_ce2_isr(struct gk20a *g, u32 inst_id, u32 pri_base)
79 nvgpu_log(g, gpu_dbg_intr, "ce2 isr %08x\n", ce2_intr); 79 nvgpu_log(g, gpu_dbg_intr, "ce2 isr %08x\n", ce2_intr);
80 80
81 /* clear blocking interrupts: they exibit broken behavior */ 81 /* clear blocking interrupts: they exibit broken behavior */
82 if (ce2_intr & ce2_intr_status_blockpipe_pending_f()) 82 if (ce2_intr & ce2_intr_status_blockpipe_pending_f()) {
83 clear_intr |= ce2_blockpipe_isr(g, ce2_intr); 83 clear_intr |= ce2_blockpipe_isr(g, ce2_intr);
84 }
84 85
85 if (ce2_intr & ce2_intr_status_launcherr_pending_f()) 86 if (ce2_intr & ce2_intr_status_launcherr_pending_f()) {
86 clear_intr |= ce2_launcherr_isr(g, ce2_intr); 87 clear_intr |= ce2_launcherr_isr(g, ce2_intr);
88 }
87 89
88 gk20a_writel(g, ce2_intr_status_r(), clear_intr); 90 gk20a_writel(g, ce2_intr_status_r(), clear_intr);
89 return; 91 return;
@@ -112,8 +114,9 @@ static void gk20a_ce_put_fences(struct gk20a_gpu_ctx *ce_ctx)
112 114
113 for (i = 0; i < NVGPU_CE_MAX_INFLIGHT_JOBS; i++) { 115 for (i = 0; i < NVGPU_CE_MAX_INFLIGHT_JOBS; i++) {
114 struct gk20a_fence **fence = &ce_ctx->postfences[i]; 116 struct gk20a_fence **fence = &ce_ctx->postfences[i];
115 if (*fence) 117 if (*fence) {
116 gk20a_fence_put(*fence); 118 gk20a_fence_put(*fence);
119 }
117 *fence = NULL; 120 *fence = NULL;
118 } 121 }
119} 122}
@@ -140,8 +143,9 @@ static void gk20a_ce_delete_gpu_context(struct gk20a_gpu_ctx *ce_ctx)
140 nvgpu_ref_put(&ce_ctx->tsg->refcount, gk20a_tsg_release); 143 nvgpu_ref_put(&ce_ctx->tsg->refcount, gk20a_tsg_release);
141 144
142 /* housekeeping on app */ 145 /* housekeeping on app */
143 if (list->prev && list->next) 146 if (list->prev && list->next) {
144 nvgpu_list_del(list); 147 nvgpu_list_del(list);
148 }
145 149
146 nvgpu_mutex_release(&ce_ctx->gpu_ctx_mutex); 150 nvgpu_mutex_release(&ce_ctx->gpu_ctx_mutex);
147 nvgpu_mutex_destroy(&ce_ctx->gpu_ctx_mutex); 151 nvgpu_mutex_destroy(&ce_ctx->gpu_ctx_mutex);
@@ -171,10 +175,11 @@ static inline unsigned int gk20a_ce_get_method_size(int request_operation,
171 chunk -= (u64) height * width; 175 chunk -= (u64) height * width;
172 } 176 }
173 177
174 if (request_operation & NVGPU_CE_PHYS_MODE_TRANSFER) 178 if (request_operation & NVGPU_CE_PHYS_MODE_TRANSFER) {
175 methodsize = (2 + (16 * iterations)) * sizeof(u32); 179 methodsize = (2 + (16 * iterations)) * sizeof(u32);
176 else if (request_operation & NVGPU_CE_MEMSET) 180 } else if (request_operation & NVGPU_CE_MEMSET) {
177 methodsize = (2 + (15 * iterations)) * sizeof(u32); 181 methodsize = (2 + (15 * iterations)) * sizeof(u32);
182 }
178 183
179 return methodsize; 184 return methodsize;
180} 185}
@@ -198,8 +203,9 @@ int gk20a_ce_prepare_submit(u64 src_buf,
198 /* failure case handling */ 203 /* failure case handling */
199 if ((gk20a_ce_get_method_size(request_operation, size) > 204 if ((gk20a_ce_get_method_size(request_operation, size) >
200 max_cmd_buf_size) || (!size) || 205 max_cmd_buf_size) || (!size) ||
201 (request_operation > NVGPU_CE_MEMSET)) 206 (request_operation > NVGPU_CE_MEMSET)) {
202 return 0; 207 return 0;
208 }
203 209
204 /* set the channel object */ 210 /* set the channel object */
205 cmd_buf_cpu_va[methodSize++] = 0x20018000; 211 cmd_buf_cpu_va[methodSize++] = 0x20018000;
@@ -252,13 +258,14 @@ int gk20a_ce_prepare_submit(u64 src_buf,
252 offset) & NVGPU_CE_LOWER_ADDRESS_OFFSET_MASK); 258 offset) & NVGPU_CE_LOWER_ADDRESS_OFFSET_MASK);
253 259
254 cmd_buf_cpu_va[methodSize++] = 0x20018098; 260 cmd_buf_cpu_va[methodSize++] = 0x20018098;
255 if (launch_flags & NVGPU_CE_SRC_LOCATION_LOCAL_FB) 261 if (launch_flags & NVGPU_CE_SRC_LOCATION_LOCAL_FB) {
256 cmd_buf_cpu_va[methodSize++] = 0x00000000; 262 cmd_buf_cpu_va[methodSize++] = 0x00000000;
257 else if (launch_flags & 263 } else if (launch_flags &
258 NVGPU_CE_SRC_LOCATION_NONCOHERENT_SYSMEM) 264 NVGPU_CE_SRC_LOCATION_NONCOHERENT_SYSMEM) {
259 cmd_buf_cpu_va[methodSize++] = 0x00000002; 265 cmd_buf_cpu_va[methodSize++] = 0x00000002;
260 else 266 } else {
261 cmd_buf_cpu_va[methodSize++] = 0x00000001; 267 cmd_buf_cpu_va[methodSize++] = 0x00000001;
268 }
262 269
263 launch |= 0x00001000; 270 launch |= 0x00001000;
264 } else if (request_operation & NVGPU_CE_MEMSET) { 271 } else if (request_operation & NVGPU_CE_MEMSET) {
@@ -289,25 +296,28 @@ int gk20a_ce_prepare_submit(u64 src_buf,
289 cmd_buf_cpu_va[methodSize++] = height; 296 cmd_buf_cpu_va[methodSize++] = height;
290 297
291 cmd_buf_cpu_va[methodSize++] = 0x20018099; 298 cmd_buf_cpu_va[methodSize++] = 0x20018099;
292 if (launch_flags & NVGPU_CE_DST_LOCATION_LOCAL_FB) 299 if (launch_flags & NVGPU_CE_DST_LOCATION_LOCAL_FB) {
293 cmd_buf_cpu_va[methodSize++] = 0x00000000; 300 cmd_buf_cpu_va[methodSize++] = 0x00000000;
294 else if (launch_flags & 301 } else if (launch_flags &
295 NVGPU_CE_DST_LOCATION_NONCOHERENT_SYSMEM) 302 NVGPU_CE_DST_LOCATION_NONCOHERENT_SYSMEM) {
296 cmd_buf_cpu_va[methodSize++] = 0x00000002; 303 cmd_buf_cpu_va[methodSize++] = 0x00000002;
297 else 304 } else {
298 cmd_buf_cpu_va[methodSize++] = 0x00000001; 305 cmd_buf_cpu_va[methodSize++] = 0x00000001;
306 }
299 307
300 launch |= 0x00002005; 308 launch |= 0x00002005;
301 309
302 if (launch_flags & NVGPU_CE_SRC_MEMORY_LAYOUT_BLOCKLINEAR) 310 if (launch_flags & NVGPU_CE_SRC_MEMORY_LAYOUT_BLOCKLINEAR) {
303 launch |= 0x00000000; 311 launch |= 0x00000000;
304 else 312 } else {
305 launch |= 0x00000080; 313 launch |= 0x00000080;
314 }
306 315
307 if (launch_flags & NVGPU_CE_DST_MEMORY_LAYOUT_BLOCKLINEAR) 316 if (launch_flags & NVGPU_CE_DST_MEMORY_LAYOUT_BLOCKLINEAR) {
308 launch |= 0x00000000; 317 launch |= 0x00000000;
309 else 318 } else {
310 launch |= 0x00000100; 319 launch |= 0x00000100;
320 }
311 321
312 cmd_buf_cpu_va[methodSize++] = 0x200180c0; 322 cmd_buf_cpu_va[methodSize++] = 0x200180c0;
313 cmd_buf_cpu_va[methodSize++] = launch; 323 cmd_buf_cpu_va[methodSize++] = launch;
@@ -329,12 +339,14 @@ int gk20a_init_ce_support(struct gk20a *g)
329 339
330 g->ops.mc.reset(g, ce_reset_mask); 340 g->ops.mc.reset(g, ce_reset_mask);
331 341
332 if (g->ops.clock_gating.slcg_ce2_load_gating_prod) 342 if (g->ops.clock_gating.slcg_ce2_load_gating_prod) {
333 g->ops.clock_gating.slcg_ce2_load_gating_prod(g, 343 g->ops.clock_gating.slcg_ce2_load_gating_prod(g,
334 g->slcg_enabled); 344 g->slcg_enabled);
335 if (g->ops.clock_gating.blcg_ce_load_gating_prod) 345 }
346 if (g->ops.clock_gating.blcg_ce_load_gating_prod) {
336 g->ops.clock_gating.blcg_ce_load_gating_prod(g, 347 g->ops.clock_gating.blcg_ce_load_gating_prod(g,
337 g->blcg_enabled); 348 g->blcg_enabled);
349 }
338 350
339 if (ce_app->initialised) { 351 if (ce_app->initialised) {
340 /* assume this happen during poweron/poweroff GPU sequence */ 352 /* assume this happen during poweron/poweroff GPU sequence */
@@ -345,8 +357,9 @@ int gk20a_init_ce_support(struct gk20a *g)
345 nvgpu_log(g, gpu_dbg_fn, "ce: init"); 357 nvgpu_log(g, gpu_dbg_fn, "ce: init");
346 358
347 err = nvgpu_mutex_init(&ce_app->app_mutex); 359 err = nvgpu_mutex_init(&ce_app->app_mutex);
348 if (err) 360 if (err) {
349 return err; 361 return err;
362 }
350 363
351 nvgpu_mutex_acquire(&ce_app->app_mutex); 364 nvgpu_mutex_acquire(&ce_app->app_mutex);
352 365
@@ -367,8 +380,9 @@ void gk20a_ce_destroy(struct gk20a *g)
367 struct gk20a_ce_app *ce_app = &g->ce_app; 380 struct gk20a_ce_app *ce_app = &g->ce_app;
368 struct gk20a_gpu_ctx *ce_ctx, *ce_ctx_save; 381 struct gk20a_gpu_ctx *ce_ctx, *ce_ctx_save;
369 382
370 if (!ce_app->initialised) 383 if (!ce_app->initialised) {
371 return; 384 return;
385 }
372 386
373 ce_app->app_state = NVGPU_CE_SUSPEND; 387 ce_app->app_state = NVGPU_CE_SUSPEND;
374 ce_app->initialised = false; 388 ce_app->initialised = false;
@@ -393,8 +407,9 @@ void gk20a_ce_suspend(struct gk20a *g)
393{ 407{
394 struct gk20a_ce_app *ce_app = &g->ce_app; 408 struct gk20a_ce_app *ce_app = &g->ce_app;
395 409
396 if (!ce_app->initialised) 410 if (!ce_app->initialised) {
397 return; 411 return;
412 }
398 413
399 ce_app->app_state = NVGPU_CE_SUSPEND; 414 ce_app->app_state = NVGPU_CE_SUSPEND;
400 415
@@ -413,12 +428,14 @@ u32 gk20a_ce_create_context(struct gk20a *g,
413 u32 ctx_id = ~0; 428 u32 ctx_id = ~0;
414 int err = 0; 429 int err = 0;
415 430
416 if (!ce_app->initialised || ce_app->app_state != NVGPU_CE_ACTIVE) 431 if (!ce_app->initialised || ce_app->app_state != NVGPU_CE_ACTIVE) {
417 return ctx_id; 432 return ctx_id;
433 }
418 434
419 ce_ctx = nvgpu_kzalloc(g, sizeof(*ce_ctx)); 435 ce_ctx = nvgpu_kzalloc(g, sizeof(*ce_ctx));
420 if (!ce_ctx) 436 if (!ce_ctx) {
421 return ctx_id; 437 return ctx_id;
438 }
422 439
423 err = nvgpu_mutex_init(&ce_ctx->gpu_ctx_mutex); 440 err = nvgpu_mutex_init(&ce_ctx->gpu_ctx_mutex);
424 if (err) { 441 if (err) {
@@ -538,8 +555,9 @@ void gk20a_ce_delete_context_priv(struct gk20a *g,
538 struct gk20a_ce_app *ce_app = &g->ce_app; 555 struct gk20a_ce_app *ce_app = &g->ce_app;
539 struct gk20a_gpu_ctx *ce_ctx, *ce_ctx_save; 556 struct gk20a_gpu_ctx *ce_ctx, *ce_ctx_save;
540 557
541 if (!ce_app->initialised ||ce_app->app_state != NVGPU_CE_ACTIVE) 558 if (!ce_app->initialised || ce_app->app_state != NVGPU_CE_ACTIVE) {
542 return; 559 return;
560 }
543 561
544 nvgpu_mutex_acquire(&ce_app->app_mutex); 562 nvgpu_mutex_acquire(&ce_app->app_mutex);
545 563