summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/pmu/pmu.c
diff options
context:
space:
mode:
authorSrirangan <smadhavan@nvidia.com>2018-08-14 05:29:27 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-08-21 18:44:28 -0400
commite988951ccab1031022ac354bbe8f53e1dc849b7a (patch)
tree7fe8d7fa8b46f501c2e1a873b84873a5173478d5 /drivers/gpu/nvgpu/common/pmu/pmu.c
parent652da8116966af2a8438a9a9f135a11b4e5c6c7b (diff)
gpu: nvgpu: common: pmu: Fix MISRA 15.6 violations
MISRA Rule-15.6 requires that all if-else blocks be enclosed in braces, including single statement blocks. Fix errors due to single statement if blocks without braces, introducing the braces. JIRA NVGPU-671 Change-Id: I497fbdb07bb2ec5a404046f06db3c713b3859e8e Signed-off-by: Srirangan <smadhavan@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1799525 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/pmu/pmu.c')
-rw-r--r--drivers/gpu/nvgpu/common/pmu/pmu.c57
1 files changed, 38 insertions, 19 deletions
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu.c b/drivers/gpu/nvgpu/common/pmu/pmu.c
index c71928c3..d72629b5 100644
--- a/drivers/gpu/nvgpu/common/pmu/pmu.c
+++ b/drivers/gpu/nvgpu/common/pmu/pmu.c
@@ -45,13 +45,15 @@ static int pmu_enable_hw(struct nvgpu_pmu *pmu, bool enable)
45 /* bring PMU falcon/engine out of reset */ 45 /* bring PMU falcon/engine out of reset */
46 g->ops.pmu.reset_engine(g, true); 46 g->ops.pmu.reset_engine(g, true);
47 47
48 if (g->ops.clock_gating.slcg_pmu_load_gating_prod) 48 if (g->ops.clock_gating.slcg_pmu_load_gating_prod) {
49 g->ops.clock_gating.slcg_pmu_load_gating_prod(g, 49 g->ops.clock_gating.slcg_pmu_load_gating_prod(g,
50 g->slcg_enabled); 50 g->slcg_enabled);
51 }
51 52
52 if (g->ops.clock_gating.blcg_pmu_load_gating_prod) 53 if (g->ops.clock_gating.blcg_pmu_load_gating_prod) {
53 g->ops.clock_gating.blcg_pmu_load_gating_prod(g, 54 g->ops.clock_gating.blcg_pmu_load_gating_prod(g,
54 g->blcg_enabled); 55 g->blcg_enabled);
56 }
55 57
56 if (nvgpu_flcn_mem_scrub_wait(pmu->flcn)) { 58 if (nvgpu_flcn_mem_scrub_wait(pmu->flcn)) {
57 /* keep PMU falcon/engine in reset 59 /* keep PMU falcon/engine in reset
@@ -84,12 +86,14 @@ static int pmu_enable(struct nvgpu_pmu *pmu, bool enable)
84 } 86 }
85 } else { 87 } else {
86 err = pmu_enable_hw(pmu, true); 88 err = pmu_enable_hw(pmu, true);
87 if (err) 89 if (err) {
88 goto exit; 90 goto exit;
91 }
89 92
90 err = nvgpu_flcn_wait_idle(pmu->flcn); 93 err = nvgpu_flcn_wait_idle(pmu->flcn);
91 if (err) 94 if (err) {
92 goto exit; 95 goto exit;
96 }
93 97
94 pmu_enable_irq(pmu, true); 98 pmu_enable_irq(pmu, true);
95 } 99 }
@@ -107,12 +111,14 @@ int nvgpu_pmu_reset(struct gk20a *g)
107 nvgpu_log_fn(g, " %s ", g->name); 111 nvgpu_log_fn(g, " %s ", g->name);
108 112
109 err = nvgpu_flcn_wait_idle(pmu->flcn); 113 err = nvgpu_flcn_wait_idle(pmu->flcn);
110 if (err) 114 if (err) {
111 goto exit; 115 goto exit;
116 }
112 117
113 err = pmu_enable(pmu, false); 118 err = pmu_enable(pmu, false);
114 if (err) 119 if (err) {
115 goto exit; 120 goto exit;
121 }
116 122
117 err = pmu_enable(pmu, true); 123 err = pmu_enable(pmu, true);
118 124
@@ -136,8 +142,9 @@ static int nvgpu_init_task_pg_init(struct gk20a *g)
136 142
137 err = nvgpu_thread_create(&pmu->pg_init.state_task, g, 143 err = nvgpu_thread_create(&pmu->pg_init.state_task, g,
138 nvgpu_pg_init_task, thread_name); 144 nvgpu_pg_init_task, thread_name);
139 if (err) 145 if (err) {
140 nvgpu_err(g, "failed to start nvgpu_pg_init thread"); 146 nvgpu_err(g, "failed to start nvgpu_pg_init thread");
147 }
141 148
142 return err; 149 return err;
143} 150}
@@ -159,8 +166,9 @@ void nvgpu_kill_task_pg_init(struct gk20a *g)
159 /* wait to confirm thread stopped */ 166 /* wait to confirm thread stopped */
160 nvgpu_timeout_init(g, &timeout, 1000, NVGPU_TIMER_RETRY_TIMER); 167 nvgpu_timeout_init(g, &timeout, 1000, NVGPU_TIMER_RETRY_TIMER);
161 do { 168 do {
162 if (!nvgpu_thread_is_running(&pmu->pg_init.state_task)) 169 if (!nvgpu_thread_is_running(&pmu->pg_init.state_task)) {
163 break; 170 break;
171 }
164 nvgpu_udelay(2); 172 nvgpu_udelay(2);
165 } while (!nvgpu_timeout_expired_msg(&timeout, 173 } while (!nvgpu_timeout_expired_msg(&timeout,
166 "timeout - waiting PMU state machine thread stop")); 174 "timeout - waiting PMU state machine thread stop"));
@@ -199,8 +207,9 @@ static int nvgpu_init_pmu_setup_sw(struct gk20a *g)
199 207
200 /* TBD: sysmon subtask */ 208 /* TBD: sysmon subtask */
201 209
202 if (IS_ENABLED(CONFIG_TEGRA_GK20A_PERFMON)) 210 if (IS_ENABLED(CONFIG_TEGRA_GK20A_PERFMON)) {
203 pmu->perfmon_sampling_enabled = true; 211 pmu->perfmon_sampling_enabled = true;
212 }
204 213
205 pmu->mutex_cnt = g->ops.pmu.pmu_mutex_size(); 214 pmu->mutex_cnt = g->ops.pmu.pmu_mutex_size();
206 pmu->mutex = nvgpu_kzalloc(g, pmu->mutex_cnt * 215 pmu->mutex = nvgpu_kzalloc(g, pmu->mutex_cnt *
@@ -246,8 +255,9 @@ static int nvgpu_init_pmu_setup_sw(struct gk20a *g)
246 err = g->ops.pmu.alloc_super_surface(g, 255 err = g->ops.pmu.alloc_super_surface(g,
247 &pmu->super_surface_buf, 256 &pmu->super_surface_buf,
248 sizeof(struct nv_pmu_super_surface)); 257 sizeof(struct nv_pmu_super_surface));
249 if (err) 258 if (err) {
250 goto err_free_seq_buf; 259 goto err_free_seq_buf;
260 }
251 } 261 }
252 262
253 err = nvgpu_dma_alloc_map(vm, GK20A_PMU_TRACE_BUFSIZE, 263 err = nvgpu_dma_alloc_map(vm, GK20A_PMU_TRACE_BUFSIZE,
@@ -263,8 +273,9 @@ skip_init:
263 nvgpu_log_fn(g, "done"); 273 nvgpu_log_fn(g, "done");
264 return 0; 274 return 0;
265 err_free_super_surface: 275 err_free_super_surface:
266 if (g->ops.pmu.alloc_super_surface) 276 if (g->ops.pmu.alloc_super_surface) {
267 nvgpu_dma_unmap_free(vm, &pmu->super_surface_buf); 277 nvgpu_dma_unmap_free(vm, &pmu->super_surface_buf);
278 }
268 err_free_seq_buf: 279 err_free_seq_buf:
269 nvgpu_dma_unmap_free(vm, &pmu->seq_buf); 280 nvgpu_dma_unmap_free(vm, &pmu->seq_buf);
270 err_free_seq: 281 err_free_seq:
@@ -283,20 +294,24 @@ int nvgpu_init_pmu_support(struct gk20a *g)
283 294
284 nvgpu_log_fn(g, " "); 295 nvgpu_log_fn(g, " ");
285 296
286 if (pmu->initialized) 297 if (pmu->initialized) {
287 return 0; 298 return 0;
299 }
288 300
289 err = pmu_enable_hw(pmu, true); 301 err = pmu_enable_hw(pmu, true);
290 if (err) 302 if (err) {
291 return err; 303 return err;
304 }
292 305
293 if (g->support_pmu) { 306 if (g->support_pmu) {
294 err = nvgpu_init_pmu_setup_sw(g); 307 err = nvgpu_init_pmu_setup_sw(g);
295 if (err) 308 if (err) {
296 return err; 309 return err;
310 }
297 err = g->ops.pmu.pmu_setup_hw_and_bootstrap(g); 311 err = g->ops.pmu.pmu_setup_hw_and_bootstrap(g);
298 if (err) 312 if (err) {
299 return err; 313 return err;
314 }
300 315
301 nvgpu_pmu_state_change(g, PMU_STATE_STARTING, false); 316 nvgpu_pmu_state_change(g, PMU_STATE_STARTING, false);
302 } 317 }
@@ -402,8 +417,9 @@ static void pmu_setup_hw_enable_elpg(struct gk20a *g)
402 417
403 if (g->elpg_enabled) { 418 if (g->elpg_enabled) {
404 /* Init reg with prod values*/ 419 /* Init reg with prod values*/
405 if (g->ops.pmu.pmu_setup_elpg) 420 if (g->ops.pmu.pmu_setup_elpg) {
406 g->ops.pmu.pmu_setup_elpg(g); 421 g->ops.pmu.pmu_setup_elpg(g);
422 }
407 nvgpu_pmu_enable_elpg(g); 423 nvgpu_pmu_enable_elpg(g);
408 } 424 }
409 425
@@ -459,8 +475,9 @@ static int nvgpu_pg_init_task(void *arg)
459 switch (pmu_state) { 475 switch (pmu_state) {
460 case PMU_STATE_INIT_RECEIVED: 476 case PMU_STATE_INIT_RECEIVED:
461 nvgpu_pmu_dbg(g, "pmu starting"); 477 nvgpu_pmu_dbg(g, "pmu starting");
462 if (g->can_elpg) 478 if (g->can_elpg) {
463 nvgpu_pmu_init_powergating(g); 479 nvgpu_pmu_init_powergating(g);
480 }
464 break; 481 break;
465 case PMU_STATE_ELPG_BOOTED: 482 case PMU_STATE_ELPG_BOOTED:
466 nvgpu_pmu_dbg(g, "elpg booted"); 483 nvgpu_pmu_dbg(g, "elpg booted");
@@ -499,16 +516,18 @@ int nvgpu_pmu_destroy(struct gk20a *g)
499 516
500 nvgpu_log_fn(g, " "); 517 nvgpu_log_fn(g, " ");
501 518
502 if (!g->support_pmu) 519 if (!g->support_pmu) {
503 return 0; 520 return 0;
521 }
504 522
505 nvgpu_kill_task_pg_init(g); 523 nvgpu_kill_task_pg_init(g);
506 524
507 nvgpu_pmu_get_pg_stats(g, 525 nvgpu_pmu_get_pg_stats(g,
508 PMU_PG_ELPG_ENGINE_ID_GRAPHICS, &pg_stat_data); 526 PMU_PG_ELPG_ENGINE_ID_GRAPHICS, &pg_stat_data);
509 527
510 if (nvgpu_pmu_disable_elpg(g)) 528 if (nvgpu_pmu_disable_elpg(g)) {
511 nvgpu_err(g, "failed to set disable elpg"); 529 nvgpu_err(g, "failed to set disable elpg");
530 }
512 pmu->initialized = false; 531 pmu->initialized = false;
513 532
514 /* update the s/w ELPG residency counters */ 533 /* update the s/w ELPG residency counters */