summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/gk20a.c
diff options
context:
space:
mode:
authorSrirangan <smadhavan@nvidia.com>2018-08-31 03:50:52 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-09-05 07:51:32 -0400
commit43851d41b187c92f5ea9c2f503a882277f661d7e (patch)
tree964a76c136c8c0dc14ec95358d27f930532b7dcb /drivers/gpu/nvgpu/gk20a/gk20a.c
parent0f97bd4d44c8bcedf298f725fe0b6cfc70fa81ff (diff)
gpu: nvgpu: gk20a: Fix MISRA 15.6 violations
MISRA Rule-15.6 requires that all if-else blocks be enclosed in braces, including single statement blocks. Fix errors due to single statement if blocks without braces by introducing the braces. JIRA NVGPU-671 Change-Id: Iedac7d50aa2ebd409434eea5fda902b16d9c6fea Signed-off-by: Srirangan <smadhavan@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1797695 Reviewed-by: svc-misra-checker <svc-misra-checker@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a.c82
1 files changed, 54 insertions, 28 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.c b/drivers/gpu/nvgpu/gk20a/gk20a.c
index 859a7b6a..f5e35927 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gk20a.c
@@ -74,16 +74,18 @@ int gk20a_detect_chip(struct gk20a *g)
74{ 74{
75 struct nvgpu_gpu_params *p = &g->params; 75 struct nvgpu_gpu_params *p = &g->params;
76 76
77 if (p->gpu_arch) 77 if (p->gpu_arch) {
78 return 0; 78 return 0;
79 }
79 80
80 gk20a_mc_boot_0(g, &p->gpu_arch, &p->gpu_impl, &p->gpu_rev); 81 gk20a_mc_boot_0(g, &p->gpu_arch, &p->gpu_impl, &p->gpu_rev);
81 82
82 if ((p->gpu_arch + p->gpu_impl) == NVGPU_GPUID_GV11B) { 83 if ((p->gpu_arch + p->gpu_impl) == NVGPU_GPUID_GV11B) {
83 84
84 /* overwrite gpu revison for A02 */ 85 /* overwrite gpu revison for A02 */
85 if (!nvgpu_is_soc_t194_a01(g)) 86 if (!nvgpu_is_soc_t194_a01(g)) {
86 p->gpu_rev = 0xa2; 87 p->gpu_rev = 0xa2;
88 }
87 89
88 } 90 }
89 91
@@ -114,13 +116,15 @@ int gk20a_prepare_poweroff(struct gk20a *g)
114 116
115 if (g->ops.fifo.channel_suspend) { 117 if (g->ops.fifo.channel_suspend) {
116 ret = g->ops.fifo.channel_suspend(g); 118 ret = g->ops.fifo.channel_suspend(g);
117 if (ret) 119 if (ret) {
118 return ret; 120 return ret;
121 }
119 } 122 }
120 123
121 /* disable elpg before gr or fifo suspend */ 124 /* disable elpg before gr or fifo suspend */
122 if (g->ops.pmu.is_pmu_supported(g)) 125 if (g->ops.pmu.is_pmu_supported(g)) {
123 ret |= nvgpu_pmu_destroy(g); 126 ret |= nvgpu_pmu_destroy(g);
127 }
124 128
125 ret |= gk20a_gr_suspend(g); 129 ret |= gk20a_gr_suspend(g);
126 ret |= nvgpu_mm_suspend(g); 130 ret |= nvgpu_mm_suspend(g);
@@ -129,11 +133,13 @@ int gk20a_prepare_poweroff(struct gk20a *g)
129 gk20a_ce_suspend(g); 133 gk20a_ce_suspend(g);
130 134
131 /* Disable GPCPLL */ 135 /* Disable GPCPLL */
132 if (g->ops.clk.suspend_clk_support) 136 if (g->ops.clk.suspend_clk_support) {
133 ret |= g->ops.clk.suspend_clk_support(g); 137 ret |= g->ops.clk.suspend_clk_support(g);
138 }
134 139
135 if (nvgpu_is_enabled(g, NVGPU_PMU_PSTATE)) 140 if (nvgpu_is_enabled(g, NVGPU_PMU_PSTATE)) {
136 gk20a_deinit_pstate_support(g); 141 gk20a_deinit_pstate_support(g);
142 }
137 143
138 gk20a_mask_interrupts(g); 144 gk20a_mask_interrupts(g);
139 145
@@ -151,8 +157,9 @@ int gk20a_finalize_poweron(struct gk20a *g)
151 157
152 nvgpu_log_fn(g, " "); 158 nvgpu_log_fn(g, " ");
153 159
154 if (g->power_on) 160 if (g->power_on) {
155 return 0; 161 return 0;
162 }
156 163
157 g->power_on = true; 164 g->power_on = true;
158 165
@@ -170,23 +177,27 @@ int gk20a_finalize_poweron(struct gk20a *g)
170 * buffers. 177 * buffers.
171 */ 178 */
172 err = nvgpu_pd_cache_init(g); 179 err = nvgpu_pd_cache_init(g);
173 if (err) 180 if (err) {
174 return err; 181 return err;
182 }
175 183
176 /* init interface layer support for PMU falcon */ 184 /* init interface layer support for PMU falcon */
177 nvgpu_flcn_sw_init(g, FALCON_ID_PMU); 185 nvgpu_flcn_sw_init(g, FALCON_ID_PMU);
178 nvgpu_flcn_sw_init(g, FALCON_ID_SEC2); 186 nvgpu_flcn_sw_init(g, FALCON_ID_SEC2);
179 nvgpu_flcn_sw_init(g, FALCON_ID_NVDEC); 187 nvgpu_flcn_sw_init(g, FALCON_ID_NVDEC);
180 188
181 if (g->ops.bios.init) 189 if (g->ops.bios.init) {
182 err = g->ops.bios.init(g); 190 err = g->ops.bios.init(g);
183 if (err) 191 }
192 if (err) {
184 goto done; 193 goto done;
194 }
185 195
186 g->ops.bus.init_hw(g); 196 g->ops.bus.init_hw(g);
187 197
188 if (g->ops.clk.disable_slowboot) 198 if (g->ops.clk.disable_slowboot) {
189 g->ops.clk.disable_slowboot(g); 199 g->ops.clk.disable_slowboot(g);
200 }
190 201
191 g->ops.priv_ring.enable_priv_ring(g); 202 g->ops.priv_ring.enable_priv_ring(g);
192 203
@@ -253,8 +264,9 @@ int gk20a_finalize_poweron(struct gk20a *g)
253 goto done; 264 goto done;
254 } 265 }
255 266
256 if (g->ops.therm.elcg_init_idle_filters) 267 if (g->ops.therm.elcg_init_idle_filters) {
257 g->ops.therm.elcg_init_idle_filters(g); 268 g->ops.therm.elcg_init_idle_filters(g);
269 }
258 270
259 g->ops.mc.intr_enable(g); 271 g->ops.mc.intr_enable(g);
260 272
@@ -265,8 +277,9 @@ int gk20a_finalize_poweron(struct gk20a *g)
265 } 277 }
266 278
267 if (g->ops.pmu.is_pmu_supported(g)) { 279 if (g->ops.pmu.is_pmu_supported(g)) {
268 if (g->ops.pmu.prepare_ucode) 280 if (g->ops.pmu.prepare_ucode) {
269 err = g->ops.pmu.prepare_ucode(g); 281 err = g->ops.pmu.prepare_ucode(g);
282 }
270 if (err) { 283 if (err) {
271 nvgpu_err(g, "failed to init pmu ucode"); 284 nvgpu_err(g, "failed to init pmu ucode");
272 goto done; 285 goto done;
@@ -314,9 +327,9 @@ int gk20a_finalize_poweron(struct gk20a *g)
314 } 327 }
315 } 328 }
316 329
317 if (g->ops.pmu_ver.clk.clk_set_boot_clk && nvgpu_is_enabled(g, NVGPU_PMU_PSTATE)) 330 if (g->ops.pmu_ver.clk.clk_set_boot_clk && nvgpu_is_enabled(g, NVGPU_PMU_PSTATE)) {
318 g->ops.pmu_ver.clk.clk_set_boot_clk(g); 331 g->ops.pmu_ver.clk.clk_set_boot_clk(g);
319 else { 332 } else {
320 err = nvgpu_clk_arb_init_arbiter(g); 333 err = nvgpu_clk_arb_init_arbiter(g);
321 if (err) { 334 if (err) {
322 nvgpu_err(g, "failed to init clk arb"); 335 nvgpu_err(g, "failed to init clk arb");
@@ -350,8 +363,9 @@ int gk20a_finalize_poweron(struct gk20a *g)
350 if (g->ops.xve.available_speeds) { 363 if (g->ops.xve.available_speeds) {
351 u32 speed; 364 u32 speed;
352 365
353 if (!nvgpu_is_enabled(g, NVGPU_SUPPORT_ASPM) && g->ops.xve.disable_aspm) 366 if (!nvgpu_is_enabled(g, NVGPU_SUPPORT_ASPM) && g->ops.xve.disable_aspm) {
354 g->ops.xve.disable_aspm(g); 367 g->ops.xve.disable_aspm(g);
368 }
355 369
356 g->ops.xve.available_speeds(g, &speed); 370 g->ops.xve.available_speeds(g, &speed);
357 371
@@ -374,12 +388,14 @@ int gk20a_finalize_poweron(struct gk20a *g)
374 } 388 }
375#endif 389#endif
376 390
377 if (g->ops.fifo.channel_resume) 391 if (g->ops.fifo.channel_resume) {
378 g->ops.fifo.channel_resume(g); 392 g->ops.fifo.channel_resume(g);
393 }
379 394
380done: 395done:
381 if (err) 396 if (err) {
382 g->power_on = false; 397 g->power_on = false;
398 }
383 399
384 return err; 400 return err;
385} 401}
@@ -390,8 +406,9 @@ done:
390 */ 406 */
391int gk20a_can_busy(struct gk20a *g) 407int gk20a_can_busy(struct gk20a *g)
392{ 408{
393 if (nvgpu_is_enabled(g, NVGPU_DRIVER_IS_DYING)) 409 if (nvgpu_is_enabled(g, NVGPU_DRIVER_IS_DYING)) {
394 return 0; 410 return 0;
411 }
395 return 1; 412 return 1;
396} 413}
397 414
@@ -400,8 +417,9 @@ int gk20a_wait_for_idle(struct gk20a *g)
400 int wait_length = 150; /* 3 second overall max wait. */ 417 int wait_length = 150; /* 3 second overall max wait. */
401 int target_usage_count = 0; 418 int target_usage_count = 0;
402 419
403 if (!g) 420 if (!g) {
404 return -ENODEV; 421 return -ENODEV;
422 }
405 423
406 while ((nvgpu_atomic_read(&g->usage_count) != target_usage_count) 424 while ((nvgpu_atomic_read(&g->usage_count) != target_usage_count)
407 && (wait_length-- >= 0)) { 425 && (wait_length-- >= 0)) {
@@ -423,14 +441,17 @@ int gk20a_init_gpu_characteristics(struct gk20a *g)
423 __nvgpu_set_enabled(g, NVGPU_SUPPORT_MAP_DIRECT_KIND_CTRL, true); 441 __nvgpu_set_enabled(g, NVGPU_SUPPORT_MAP_DIRECT_KIND_CTRL, true);
424 __nvgpu_set_enabled(g, NVGPU_SUPPORT_MAP_BUFFER_BATCH, true); 442 __nvgpu_set_enabled(g, NVGPU_SUPPORT_MAP_BUFFER_BATCH, true);
425 443
426 if (IS_ENABLED(CONFIG_SYNC)) 444 if (IS_ENABLED(CONFIG_SYNC)) {
427 __nvgpu_set_enabled(g, NVGPU_SUPPORT_SYNC_FENCE_FDS, true); 445 __nvgpu_set_enabled(g, NVGPU_SUPPORT_SYNC_FENCE_FDS, true);
446 }
428 447
429 if (g->ops.mm.support_sparse && g->ops.mm.support_sparse(g)) 448 if (g->ops.mm.support_sparse && g->ops.mm.support_sparse(g)) {
430 __nvgpu_set_enabled(g, NVGPU_SUPPORT_SPARSE_ALLOCS, true); 449 __nvgpu_set_enabled(g, NVGPU_SUPPORT_SPARSE_ALLOCS, true);
450 }
431 451
432 if (gk20a_platform_has_syncpoints(g)) 452 if (gk20a_platform_has_syncpoints(g)) {
433 __nvgpu_set_enabled(g, NVGPU_HAS_SYNCPOINTS, true); 453 __nvgpu_set_enabled(g, NVGPU_HAS_SYNCPOINTS, true);
454 }
434 455
435 /* 456 /*
436 * Fast submits are supported as long as the user doesn't request 457 * Fast submits are supported as long as the user doesn't request
@@ -447,23 +468,26 @@ int gk20a_init_gpu_characteristics(struct gk20a *g)
447 * supported otherwise, provided that the user doesn't request anything 468 * supported otherwise, provided that the user doesn't request anything
448 * that depends on deferred cleanup. 469 * that depends on deferred cleanup.
449 */ 470 */
450 if (!gk20a_channel_sync_needs_sync_framework(g)) 471 if (!gk20a_channel_sync_needs_sync_framework(g)) {
451 __nvgpu_set_enabled(g, 472 __nvgpu_set_enabled(g,
452 NVGPU_SUPPORT_DETERMINISTIC_SUBMIT_FULL, 473 NVGPU_SUPPORT_DETERMINISTIC_SUBMIT_FULL,
453 true); 474 true);
475 }
454 476
455 __nvgpu_set_enabled(g, NVGPU_SUPPORT_DETERMINISTIC_OPTS, true); 477 __nvgpu_set_enabled(g, NVGPU_SUPPORT_DETERMINISTIC_OPTS, true);
456 478
457 __nvgpu_set_enabled(g, NVGPU_SUPPORT_USERSPACE_MANAGED_AS, true); 479 __nvgpu_set_enabled(g, NVGPU_SUPPORT_USERSPACE_MANAGED_AS, true);
458 __nvgpu_set_enabled(g, NVGPU_SUPPORT_TSG, true); 480 __nvgpu_set_enabled(g, NVGPU_SUPPORT_TSG, true);
459 481
460 if (g->ops.clk_arb.get_arbiter_clk_domains) 482 if (g->ops.clk_arb.get_arbiter_clk_domains) {
461 __nvgpu_set_enabled(g, NVGPU_SUPPORT_CLOCK_CONTROLS, true); 483 __nvgpu_set_enabled(g, NVGPU_SUPPORT_CLOCK_CONTROLS, true);
484 }
462 485
463 g->ops.gr.detect_sm_arch(g); 486 g->ops.gr.detect_sm_arch(g);
464 487
465 if (g->ops.gr.init_cyclestats) 488 if (g->ops.gr.init_cyclestats) {
466 g->ops.gr.init_cyclestats(g); 489 g->ops.gr.init_cyclestats(g);
490 }
467 491
468 g->ops.gr.get_rop_l2_en_mask(g); 492 g->ops.gr.get_rop_l2_en_mask(g);
469 493
@@ -482,11 +506,13 @@ static void gk20a_free_cb(struct nvgpu_ref *refcount)
482 506
483 gk20a_ce_destroy(g); 507 gk20a_ce_destroy(g);
484 508
485 if (g->remove_support) 509 if (g->remove_support) {
486 g->remove_support(g); 510 g->remove_support(g);
511 }
487 512
488 if (g->free) 513 if (g->free) {
489 g->free(g); 514 g->free(g);
515 }
490} 516}
491 517
492/** 518/**