summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/pmu/pmu_pg.c
diff options
context:
space:
mode:
authorSrirangan <smadhavan@nvidia.com>2018-08-14 05:29:27 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-08-21 18:44:28 -0400
commite988951ccab1031022ac354bbe8f53e1dc849b7a (patch)
tree7fe8d7fa8b46f501c2e1a873b84873a5173478d5 /drivers/gpu/nvgpu/common/pmu/pmu_pg.c
parent652da8116966af2a8438a9a9f135a11b4e5c6c7b (diff)
gpu: nvgpu: common: pmu: Fix MISRA 15.6 violations
MISRA Rule-15.6 requires that all if-else blocks be enclosed in braces, including single statement blocks. Fix errors due to single statement if blocks without braces, introducing the braces. JIRA NVGPU-671 Change-Id: I497fbdb07bb2ec5a404046f06db3c713b3859e8e Signed-off-by: Srirangan <smadhavan@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1799525 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/pmu/pmu_pg.c')
-rw-r--r--drivers/gpu/nvgpu/common/pmu/pmu_pg.c116
1 files changed, 74 insertions, 42 deletions
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_pg.c b/drivers/gpu/nvgpu/common/pmu/pmu_pg.c
index c8559fdb..4978708c 100644
--- a/drivers/gpu/nvgpu/common/pmu/pmu_pg.c
+++ b/drivers/gpu/nvgpu/common/pmu/pmu_pg.c
@@ -72,19 +72,21 @@ static void pmu_handle_pg_elpg_msg(struct gk20a *g, struct pmu_msg *msg,
72 case PMU_PG_ELPG_MSG_ALLOW_ACK: 72 case PMU_PG_ELPG_MSG_ALLOW_ACK:
73 nvgpu_pmu_dbg(g, "ALLOW is ack from PMU, eng - %d", 73 nvgpu_pmu_dbg(g, "ALLOW is ack from PMU, eng - %d",
74 elpg_msg->engine_id); 74 elpg_msg->engine_id);
75 if (elpg_msg->engine_id == PMU_PG_ELPG_ENGINE_ID_MS) 75 if (elpg_msg->engine_id == PMU_PG_ELPG_ENGINE_ID_MS) {
76 pmu->mscg_transition_state = PMU_ELPG_STAT_ON; 76 pmu->mscg_transition_state = PMU_ELPG_STAT_ON;
77 else 77 } else {
78 pmu->elpg_stat = PMU_ELPG_STAT_ON; 78 pmu->elpg_stat = PMU_ELPG_STAT_ON;
79 }
79 break; 80 break;
80 case PMU_PG_ELPG_MSG_DISALLOW_ACK: 81 case PMU_PG_ELPG_MSG_DISALLOW_ACK:
81 nvgpu_pmu_dbg(g, "DISALLOW is ack from PMU, eng - %d", 82 nvgpu_pmu_dbg(g, "DISALLOW is ack from PMU, eng - %d",
82 elpg_msg->engine_id); 83 elpg_msg->engine_id);
83 84
84 if (elpg_msg->engine_id == PMU_PG_ELPG_ENGINE_ID_MS) 85 if (elpg_msg->engine_id == PMU_PG_ELPG_ENGINE_ID_MS) {
85 pmu->mscg_transition_state = PMU_ELPG_STAT_OFF; 86 pmu->mscg_transition_state = PMU_ELPG_STAT_OFF;
86 else 87 } else {
87 pmu->elpg_stat = PMU_ELPG_STAT_OFF; 88 pmu->elpg_stat = PMU_ELPG_STAT_OFF;
89 }
88 90
89 if (pmu->pmu_state == PMU_STATE_ELPG_BOOTING) { 91 if (pmu->pmu_state == PMU_STATE_ELPG_BOOTING) {
90 if (g->ops.pmu.pmu_pg_engines_feature_list && 92 if (g->ops.pmu.pmu_pg_engines_feature_list &&
@@ -97,9 +99,10 @@ static void pmu_handle_pg_elpg_msg(struct gk20a *g, struct pmu_msg *msg,
97 WRITE_ONCE(pmu->mscg_stat, PMU_MSCG_DISABLED); 99 WRITE_ONCE(pmu->mscg_stat, PMU_MSCG_DISABLED);
98 /* make status visible */ 100 /* make status visible */
99 nvgpu_smp_mb(); 101 nvgpu_smp_mb();
100 } else 102 } else {
101 nvgpu_pmu_state_change(g, PMU_STATE_ELPG_BOOTED, 103 nvgpu_pmu_state_change(g, PMU_STATE_ELPG_BOOTED,
102 true); 104 true);
105 }
103 } 106 }
104 break; 107 break;
105 default: 108 default:
@@ -118,21 +121,25 @@ int nvgpu_pmu_pg_global_enable(struct gk20a *g, u32 enable_pg)
118 g->ops.pmu.pmu_pg_engines_feature_list(g, 121 g->ops.pmu.pmu_pg_engines_feature_list(g,
119 PMU_PG_ELPG_ENGINE_ID_GRAPHICS) != 122 PMU_PG_ELPG_ENGINE_ID_GRAPHICS) !=
120 NVGPU_PMU_GR_FEATURE_MASK_POWER_GATING) { 123 NVGPU_PMU_GR_FEATURE_MASK_POWER_GATING) {
121 if (g->ops.pmu.pmu_lpwr_enable_pg) 124 if (g->ops.pmu.pmu_lpwr_enable_pg) {
122 status = g->ops.pmu.pmu_lpwr_enable_pg(g, 125 status = g->ops.pmu.pmu_lpwr_enable_pg(g,
123 true); 126 true);
124 } else if (g->support_pmu && g->can_elpg) 127 }
128 } else if (g->support_pmu && g->can_elpg) {
125 status = nvgpu_pmu_enable_elpg(g); 129 status = nvgpu_pmu_enable_elpg(g);
130 }
126 } else if (enable_pg == false) { 131 } else if (enable_pg == false) {
127 if (g->ops.pmu.pmu_pg_engines_feature_list && 132 if (g->ops.pmu.pmu_pg_engines_feature_list &&
128 g->ops.pmu.pmu_pg_engines_feature_list(g, 133 g->ops.pmu.pmu_pg_engines_feature_list(g,
129 PMU_PG_ELPG_ENGINE_ID_GRAPHICS) != 134 PMU_PG_ELPG_ENGINE_ID_GRAPHICS) !=
130 NVGPU_PMU_GR_FEATURE_MASK_POWER_GATING) { 135 NVGPU_PMU_GR_FEATURE_MASK_POWER_GATING) {
131 if (g->ops.pmu.pmu_lpwr_disable_pg) 136 if (g->ops.pmu.pmu_lpwr_disable_pg) {
132 status = g->ops.pmu.pmu_lpwr_disable_pg(g, 137 status = g->ops.pmu.pmu_lpwr_disable_pg(g,
133 true); 138 true);
134 } else if (g->support_pmu && g->can_elpg) 139 }
140 } else if (g->support_pmu && g->can_elpg) {
135 status = nvgpu_pmu_disable_elpg(g); 141 status = nvgpu_pmu_disable_elpg(g);
142 }
136 } 143 }
137 144
138 return status; 145 return status;
@@ -157,10 +164,11 @@ static int pmu_enable_elpg_locked(struct gk20a *g, u32 pg_engine_id)
157 /* no need to wait ack for ELPG enable but set 164 /* no need to wait ack for ELPG enable but set
158 * pending to sync with follow up ELPG disable 165 * pending to sync with follow up ELPG disable
159 */ 166 */
160 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) 167 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) {
161 pmu->elpg_stat = PMU_ELPG_STAT_ON_PENDING; 168 pmu->elpg_stat = PMU_ELPG_STAT_ON_PENDING;
162 else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS) 169 } else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS) {
163 pmu->mscg_transition_state = PMU_ELPG_STAT_ON_PENDING; 170 pmu->mscg_transition_state = PMU_ELPG_STAT_ON_PENDING;
171 }
164 172
165 nvgpu_pmu_dbg(g, "cmd post PMU_PG_ELPG_CMD_ALLOW"); 173 nvgpu_pmu_dbg(g, "cmd post PMU_PG_ELPG_CMD_ALLOW");
166 status = nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, 174 status = nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL,
@@ -183,14 +191,16 @@ int nvgpu_pmu_enable_elpg(struct gk20a *g)
183 191
184 nvgpu_log_fn(g, " "); 192 nvgpu_log_fn(g, " ");
185 193
186 if (!g->support_pmu) 194 if (!g->support_pmu) {
187 return ret; 195 return ret;
196 }
188 197
189 nvgpu_mutex_acquire(&pmu->elpg_mutex); 198 nvgpu_mutex_acquire(&pmu->elpg_mutex);
190 199
191 pmu->elpg_refcnt++; 200 pmu->elpg_refcnt++;
192 if (pmu->elpg_refcnt <= 0) 201 if (pmu->elpg_refcnt <= 0) {
193 goto exit_unlock; 202 goto exit_unlock;
203 }
194 204
195 /* something is not right if we end up in following code path */ 205 /* something is not right if we end up in following code path */
196 if (unlikely(pmu->elpg_refcnt > 1)) { 206 if (unlikely(pmu->elpg_refcnt > 1)) {
@@ -203,26 +213,31 @@ int nvgpu_pmu_enable_elpg(struct gk20a *g)
203 /* do NOT enable elpg until golden ctx is created, 213 /* do NOT enable elpg until golden ctx is created,
204 * which is related with the ctx that ELPG save and restore. 214 * which is related with the ctx that ELPG save and restore.
205 */ 215 */
206 if (unlikely(!gr->ctx_vars.golden_image_initialized)) 216 if (unlikely(!gr->ctx_vars.golden_image_initialized)) {
207 goto exit_unlock; 217 goto exit_unlock;
218 }
208 219
209 /* return if ELPG is already on or on_pending or off_on_pending */ 220 /* return if ELPG is already on or on_pending or off_on_pending */
210 if (pmu->elpg_stat != PMU_ELPG_STAT_OFF) 221 if (pmu->elpg_stat != PMU_ELPG_STAT_OFF) {
211 goto exit_unlock; 222 goto exit_unlock;
223 }
212 224
213 if (g->ops.pmu.pmu_pg_supported_engines_list) 225 if (g->ops.pmu.pmu_pg_supported_engines_list) {
214 pg_engine_id_list = g->ops.pmu.pmu_pg_supported_engines_list(g); 226 pg_engine_id_list = g->ops.pmu.pmu_pg_supported_engines_list(g);
227 }
215 228
216 for (pg_engine_id = PMU_PG_ELPG_ENGINE_ID_GRAPHICS; 229 for (pg_engine_id = PMU_PG_ELPG_ENGINE_ID_GRAPHICS;
217 pg_engine_id < PMU_PG_ELPG_ENGINE_ID_INVALID_ENGINE; 230 pg_engine_id < PMU_PG_ELPG_ENGINE_ID_INVALID_ENGINE;
218 pg_engine_id++) { 231 pg_engine_id++) {
219 232
220 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS && 233 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS &&
221 pmu->mscg_stat == PMU_MSCG_DISABLED) 234 pmu->mscg_stat == PMU_MSCG_DISABLED) {
222 continue; 235 continue;
236 }
223 237
224 if (BIT(pg_engine_id) & pg_engine_id_list) 238 if (BIT(pg_engine_id) & pg_engine_id_list) {
225 ret = pmu_enable_elpg_locked(g, pg_engine_id); 239 ret = pmu_enable_elpg_locked(g, pg_engine_id);
240 }
226 } 241 }
227 242
228exit_unlock: 243exit_unlock:
@@ -243,11 +258,13 @@ int nvgpu_pmu_disable_elpg(struct gk20a *g)
243 258
244 nvgpu_log_fn(g, " "); 259 nvgpu_log_fn(g, " ");
245 260
246 if (g->ops.pmu.pmu_pg_supported_engines_list) 261 if (g->ops.pmu.pmu_pg_supported_engines_list) {
247 pg_engine_id_list = g->ops.pmu.pmu_pg_supported_engines_list(g); 262 pg_engine_id_list = g->ops.pmu.pmu_pg_supported_engines_list(g);
263 }
248 264
249 if (!g->support_pmu) 265 if (!g->support_pmu) {
250 return ret; 266 return ret;
267 }
251 268
252 nvgpu_mutex_acquire(&pmu->elpg_mutex); 269 nvgpu_mutex_acquire(&pmu->elpg_mutex);
253 270
@@ -293,8 +310,9 @@ int nvgpu_pmu_disable_elpg(struct gk20a *g)
293 pg_engine_id++) { 310 pg_engine_id++) {
294 311
295 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS && 312 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS &&
296 pmu->mscg_stat == PMU_MSCG_DISABLED) 313 pmu->mscg_stat == PMU_MSCG_DISABLED) {
297 continue; 314 continue;
315 }
298 316
299 if (BIT(pg_engine_id) & pg_engine_id_list) { 317 if (BIT(pg_engine_id) & pg_engine_id_list) {
300 memset(&cmd, 0, sizeof(struct pmu_cmd)); 318 memset(&cmd, 0, sizeof(struct pmu_cmd));
@@ -305,16 +323,17 @@ int nvgpu_pmu_disable_elpg(struct gk20a *g)
305 cmd.cmd.pg.elpg_cmd.engine_id = pg_engine_id; 323 cmd.cmd.pg.elpg_cmd.engine_id = pg_engine_id;
306 cmd.cmd.pg.elpg_cmd.cmd = PMU_PG_ELPG_CMD_DISALLOW; 324 cmd.cmd.pg.elpg_cmd.cmd = PMU_PG_ELPG_CMD_DISALLOW;
307 325
308 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) 326 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) {
309 pmu->elpg_stat = PMU_ELPG_STAT_OFF_PENDING; 327 pmu->elpg_stat = PMU_ELPG_STAT_OFF_PENDING;
310 else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS) 328 } else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS) {
311 pmu->mscg_transition_state = 329 pmu->mscg_transition_state =
312 PMU_ELPG_STAT_OFF_PENDING; 330 PMU_ELPG_STAT_OFF_PENDING;
313 331 }
314 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) 332 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) {
315 ptr = &pmu->elpg_stat; 333 ptr = &pmu->elpg_stat;
316 else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS) 334 } else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS) {
317 ptr = &pmu->mscg_transition_state; 335 ptr = &pmu->mscg_transition_state;
336 }
318 337
319 nvgpu_pmu_dbg(g, "cmd post PMU_PG_ELPG_CMD_DISALLOW"); 338 nvgpu_pmu_dbg(g, "cmd post PMU_PG_ELPG_CMD_DISALLOW");
320 nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, 339 nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL,
@@ -377,8 +396,9 @@ static int pmu_pg_init_send(struct gk20a *g, u32 pg_engine_id)
377 396
378 gk20a_pmu_pg_idle_counter_config(g, pg_engine_id); 397 gk20a_pmu_pg_idle_counter_config(g, pg_engine_id);
379 398
380 if (g->ops.pmu.pmu_pg_init_param) 399 if (g->ops.pmu.pmu_pg_init_param) {
381 g->ops.pmu.pmu_pg_init_param(g, pg_engine_id); 400 g->ops.pmu.pmu_pg_init_param(g, pg_engine_id);
401 }
382 402
383 /* init ELPG */ 403 /* init ELPG */
384 memset(&cmd, 0, sizeof(struct pmu_cmd)); 404 memset(&cmd, 0, sizeof(struct pmu_cmd));
@@ -391,8 +411,9 @@ static int pmu_pg_init_send(struct gk20a *g, u32 pg_engine_id)
391 nvgpu_pmu_dbg(g, "cmd post PMU_PG_ELPG_CMD_INIT"); 411 nvgpu_pmu_dbg(g, "cmd post PMU_PG_ELPG_CMD_INIT");
392 err = nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, 412 err = nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
393 pmu_handle_pg_elpg_msg, pmu, &seq, ~0); 413 pmu_handle_pg_elpg_msg, pmu, &seq, ~0);
394 if (err) 414 if (err) {
395 nvgpu_err(g, "PMU_PG_ELPG_CMD_INIT cmd failed\n"); 415 nvgpu_err(g, "PMU_PG_ELPG_CMD_INIT cmd failed\n");
416 }
396 417
397 /* alloc dmem for powergating state log */ 418 /* alloc dmem for powergating state log */
398 pmu->stat_dmem_offset[pg_engine_id] = 0; 419 pmu->stat_dmem_offset[pg_engine_id] = 0;
@@ -407,17 +428,19 @@ static int pmu_pg_init_send(struct gk20a *g, u32 pg_engine_id)
407 nvgpu_pmu_dbg(g, "cmd post PMU_PG_STAT_CMD_ALLOC_DMEM"); 428 nvgpu_pmu_dbg(g, "cmd post PMU_PG_STAT_CMD_ALLOC_DMEM");
408 err = nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_LPQ, 429 err = nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_LPQ,
409 pmu_handle_pg_stat_msg, pmu, &seq, ~0); 430 pmu_handle_pg_stat_msg, pmu, &seq, ~0);
410 if (err) 431 if (err) {
411 nvgpu_err(g, "PMU_PG_STAT_CMD_ALLOC_DMEM cmd failed\n"); 432 nvgpu_err(g, "PMU_PG_STAT_CMD_ALLOC_DMEM cmd failed\n");
433 }
412 434
413 /* disallow ELPG initially 435 /* disallow ELPG initially
414 * PMU ucode requires a disallow cmd before allow cmd 436 * PMU ucode requires a disallow cmd before allow cmd
415 */ 437 */
416 /* set for wait_event PMU_ELPG_STAT_OFF */ 438 /* set for wait_event PMU_ELPG_STAT_OFF */
417 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) 439 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) {
418 pmu->elpg_stat = PMU_ELPG_STAT_OFF; 440 pmu->elpg_stat = PMU_ELPG_STAT_OFF;
419 else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS) 441 } else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS) {
420 pmu->mscg_transition_state = PMU_ELPG_STAT_OFF; 442 pmu->mscg_transition_state = PMU_ELPG_STAT_OFF;
443 }
421 memset(&cmd, 0, sizeof(struct pmu_cmd)); 444 memset(&cmd, 0, sizeof(struct pmu_cmd));
422 cmd.hdr.unit_id = PMU_UNIT_PG; 445 cmd.hdr.unit_id = PMU_UNIT_PG;
423 cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(struct pmu_pg_cmd_elpg_cmd); 446 cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(struct pmu_pg_cmd_elpg_cmd);
@@ -428,11 +451,13 @@ static int pmu_pg_init_send(struct gk20a *g, u32 pg_engine_id)
428 nvgpu_pmu_dbg(g, "cmd post PMU_PG_ELPG_CMD_DISALLOW"); 451 nvgpu_pmu_dbg(g, "cmd post PMU_PG_ELPG_CMD_DISALLOW");
429 err = nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, 452 err = nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
430 pmu_handle_pg_elpg_msg, pmu, &seq, ~0); 453 pmu_handle_pg_elpg_msg, pmu, &seq, ~0);
431 if (err) 454 if (err) {
432 nvgpu_err(g, "PMU_PG_ELPG_CMD_DISALLOW cmd failed\n"); 455 nvgpu_err(g, "PMU_PG_ELPG_CMD_DISALLOW cmd failed\n");
456 }
433 457
434 if (g->ops.pmu.pmu_pg_set_sub_feature_mask) 458 if (g->ops.pmu.pmu_pg_set_sub_feature_mask) {
435 g->ops.pmu.pmu_pg_set_sub_feature_mask(g, pg_engine_id); 459 g->ops.pmu.pmu_pg_set_sub_feature_mask(g, pg_engine_id);
460 }
436 461
437 return 0; 462 return 0;
438} 463}
@@ -445,8 +470,9 @@ int nvgpu_pmu_init_powergating(struct gk20a *g)
445 470
446 nvgpu_log_fn(g, " "); 471 nvgpu_log_fn(g, " ");
447 472
448 if (g->ops.pmu.pmu_pg_supported_engines_list) 473 if (g->ops.pmu.pmu_pg_supported_engines_list) {
449 pg_engine_id_list = g->ops.pmu.pmu_pg_supported_engines_list(g); 474 pg_engine_id_list = g->ops.pmu.pmu_pg_supported_engines_list(g);
475 }
450 476
451 gk20a_gr_wait_initialized(g); 477 gk20a_gr_wait_initialized(g);
452 478
@@ -455,15 +481,17 @@ int nvgpu_pmu_init_powergating(struct gk20a *g)
455 pg_engine_id++) { 481 pg_engine_id++) {
456 482
457 if (BIT(pg_engine_id) & pg_engine_id_list) { 483 if (BIT(pg_engine_id) & pg_engine_id_list) {
458 if (pmu && pmu->pmu_state == PMU_STATE_INIT_RECEIVED) 484 if (pmu && pmu->pmu_state == PMU_STATE_INIT_RECEIVED) {
459 nvgpu_pmu_state_change(g, 485 nvgpu_pmu_state_change(g,
460 PMU_STATE_ELPG_BOOTING, false); 486 PMU_STATE_ELPG_BOOTING, false);
487 }
461 pmu_pg_init_send(g, pg_engine_id); 488 pmu_pg_init_send(g, pg_engine_id);
462 } 489 }
463 } 490 }
464 491
465 if (g->ops.pmu.pmu_pg_param_post_init) 492 if (g->ops.pmu.pmu_pg_param_post_init) {
466 g->ops.pmu.pmu_pg_param_post_init(g); 493 g->ops.pmu.pmu_pg_param_post_init(g);
494 }
467 495
468 return 0; 496 return 0;
469} 497}
@@ -487,9 +515,9 @@ static void pmu_handle_pg_buf_config_msg(struct gk20a *g, struct pmu_msg *msg,
487 515
488 pmu->buf_loaded = (eng_buf_stat->status == PMU_PG_MSG_ENG_BUF_LOADED); 516 pmu->buf_loaded = (eng_buf_stat->status == PMU_PG_MSG_ENG_BUF_LOADED);
489 if ((!pmu->buf_loaded) && 517 if ((!pmu->buf_loaded) &&
490 (pmu->pmu_state == PMU_STATE_LOADING_PG_BUF)) 518 (pmu->pmu_state == PMU_STATE_LOADING_PG_BUF)) {
491 nvgpu_err(g, "failed to load PGENG buffer"); 519 nvgpu_err(g, "failed to load PGENG buffer");
492 else { 520 } else {
493 nvgpu_pmu_state_change(g, pmu->pmu_state, true); 521 nvgpu_pmu_state_change(g, pmu->pmu_state, true);
494 } 522 }
495} 523}
@@ -530,8 +558,9 @@ int nvgpu_pmu_init_bind_fecs(struct gk20a *g)
530 nvgpu_pmu_state_change(g, PMU_STATE_LOADING_PG_BUF, false); 558 nvgpu_pmu_state_change(g, PMU_STATE_LOADING_PG_BUF, false);
531 err = nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_LPQ, 559 err = nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_LPQ,
532 pmu_handle_pg_buf_config_msg, pmu, &desc, ~0); 560 pmu_handle_pg_buf_config_msg, pmu, &desc, ~0);
533 if (err) 561 if (err) {
534 nvgpu_err(g, "cmd LOAD PMU_PGENG_GR_BUFFER_IDX_FECS failed\n"); 562 nvgpu_err(g, "cmd LOAD PMU_PGENG_GR_BUFFER_IDX_FECS failed\n");
563 }
535 564
536 return err; 565 return err;
537} 566}
@@ -570,8 +599,9 @@ void nvgpu_pmu_setup_hw_load_zbc(struct gk20a *g)
570 nvgpu_pmu_state_change(g, PMU_STATE_LOADING_ZBC, false); 599 nvgpu_pmu_state_change(g, PMU_STATE_LOADING_ZBC, false);
571 err = nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_LPQ, 600 err = nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_LPQ,
572 pmu_handle_pg_buf_config_msg, pmu, &desc, ~0); 601 pmu_handle_pg_buf_config_msg, pmu, &desc, ~0);
573 if (err) 602 if (err) {
574 nvgpu_err(g, "CMD LOAD PMU_PGENG_GR_BUFFER_IDX_ZBC failed\n"); 603 nvgpu_err(g, "CMD LOAD PMU_PGENG_GR_BUFFER_IDX_ZBC failed\n");
604 }
575} 605}
576 606
577/* stats */ 607/* stats */
@@ -588,12 +618,14 @@ int nvgpu_pmu_get_pg_stats(struct gk20a *g, u32 pg_engine_id,
588 return 0; 618 return 0;
589 } 619 }
590 620
591 if (g->ops.pmu.pmu_pg_supported_engines_list) 621 if (g->ops.pmu.pmu_pg_supported_engines_list) {
592 pg_engine_id_list = g->ops.pmu.pmu_pg_supported_engines_list(g); 622 pg_engine_id_list = g->ops.pmu.pmu_pg_supported_engines_list(g);
623 }
593 624
594 if (BIT(pg_engine_id) & pg_engine_id_list) 625 if (BIT(pg_engine_id) & pg_engine_id_list) {
595 g->ops.pmu.pmu_elpg_statistics(g, pg_engine_id, 626 g->ops.pmu.pmu_elpg_statistics(g, pg_engine_id,
596 pg_stat_data); 627 pg_stat_data);
628 }
597 629
598 return 0; 630 return 0;
599} 631}