summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/pmu/pmu_pg.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common/pmu/pmu_pg.c')
-rw-r--r--drivers/gpu/nvgpu/common/pmu/pmu_pg.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_pg.c b/drivers/gpu/nvgpu/common/pmu/pmu_pg.c
index 47ac8b64..06dab8ea 100644
--- a/drivers/gpu/nvgpu/common/pmu/pmu_pg.c
+++ b/drivers/gpu/nvgpu/common/pmu/pmu_pg.c
@@ -151,7 +151,7 @@ static int pmu_enable_elpg_locked(struct gk20a *g, u32 pg_engine_id)
151 pmu->mscg_transition_state = PMU_ELPG_STAT_ON_PENDING; 151 pmu->mscg_transition_state = PMU_ELPG_STAT_ON_PENDING;
152 152
153 nvgpu_pmu_dbg(g, "cmd post PMU_PG_ELPG_CMD_ALLOW"); 153 nvgpu_pmu_dbg(g, "cmd post PMU_PG_ELPG_CMD_ALLOW");
154 status = gk20a_pmu_cmd_post(g, &cmd, NULL, NULL, 154 status = nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL,
155 PMU_COMMAND_QUEUE_HPQ, pmu_handle_pg_elpg_msg, 155 PMU_COMMAND_QUEUE_HPQ, pmu_handle_pg_elpg_msg,
156 pmu, &seq, ~0); 156 pmu, &seq, ~0);
157 WARN_ON(status != 0); 157 WARN_ON(status != 0);
@@ -305,7 +305,7 @@ int nvgpu_pmu_disable_elpg(struct gk20a *g)
305 ptr = &pmu->mscg_transition_state; 305 ptr = &pmu->mscg_transition_state;
306 306
307 nvgpu_pmu_dbg(g, "cmd post PMU_PG_ELPG_CMD_DISALLOW"); 307 nvgpu_pmu_dbg(g, "cmd post PMU_PG_ELPG_CMD_DISALLOW");
308 gk20a_pmu_cmd_post(g, &cmd, NULL, NULL, 308 nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL,
309 PMU_COMMAND_QUEUE_HPQ, pmu_handle_pg_elpg_msg, 309 PMU_COMMAND_QUEUE_HPQ, pmu_handle_pg_elpg_msg,
310 pmu, &seq, ~0); 310 pmu, &seq, ~0);
311 311
@@ -376,7 +376,7 @@ static int pmu_pg_init_send(struct gk20a *g, u32 pg_engine_id)
376 cmd.cmd.pg.elpg_cmd.cmd = PMU_PG_ELPG_CMD_INIT; 376 cmd.cmd.pg.elpg_cmd.cmd = PMU_PG_ELPG_CMD_INIT;
377 377
378 nvgpu_pmu_dbg(g, "cmd post PMU_PG_ELPG_CMD_INIT"); 378 nvgpu_pmu_dbg(g, "cmd post PMU_PG_ELPG_CMD_INIT");
379 gk20a_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, 379 nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
380 pmu_handle_pg_elpg_msg, pmu, &seq, ~0); 380 pmu_handle_pg_elpg_msg, pmu, &seq, ~0);
381 381
382 /* alloc dmem for powergating state log */ 382 /* alloc dmem for powergating state log */
@@ -390,7 +390,7 @@ static int pmu_pg_init_send(struct gk20a *g, u32 pg_engine_id)
390 cmd.cmd.pg.stat.data = 0; 390 cmd.cmd.pg.stat.data = 0;
391 391
392 nvgpu_pmu_dbg(g, "cmd post PMU_PG_STAT_CMD_ALLOC_DMEM"); 392 nvgpu_pmu_dbg(g, "cmd post PMU_PG_STAT_CMD_ALLOC_DMEM");
393 gk20a_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_LPQ, 393 nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_LPQ,
394 pmu_handle_pg_stat_msg, pmu, &seq, ~0); 394 pmu_handle_pg_stat_msg, pmu, &seq, ~0);
395 395
396 /* disallow ELPG initially 396 /* disallow ELPG initially
@@ -409,7 +409,7 @@ static int pmu_pg_init_send(struct gk20a *g, u32 pg_engine_id)
409 cmd.cmd.pg.elpg_cmd.cmd = PMU_PG_ELPG_CMD_DISALLOW; 409 cmd.cmd.pg.elpg_cmd.cmd = PMU_PG_ELPG_CMD_DISALLOW;
410 410
411 nvgpu_pmu_dbg(g, "cmd post PMU_PG_ELPG_CMD_DISALLOW"); 411 nvgpu_pmu_dbg(g, "cmd post PMU_PG_ELPG_CMD_DISALLOW");
412 gk20a_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, 412 nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
413 pmu_handle_pg_elpg_msg, pmu, &seq, ~0); 413 pmu_handle_pg_elpg_msg, pmu, &seq, ~0);
414 414
415 if (g->ops.pmu.pmu_pg_set_sub_feature_mask) 415 if (g->ops.pmu.pmu_pg_set_sub_feature_mask)
@@ -508,7 +508,7 @@ int nvgpu_pmu_init_bind_fecs(struct gk20a *g)
508 508
509 pmu->buf_loaded = false; 509 pmu->buf_loaded = false;
510 nvgpu_pmu_dbg(g, "cmd post PMU_PG_CMD_ID_ENG_BUF_LOAD PMU_PGENG_GR_BUFFER_IDX_FECS"); 510 nvgpu_pmu_dbg(g, "cmd post PMU_PG_CMD_ID_ENG_BUF_LOAD PMU_PGENG_GR_BUFFER_IDX_FECS");
511 gk20a_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_LPQ, 511 nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_LPQ,
512 pmu_handle_pg_buf_config_msg, pmu, &desc, ~0); 512 pmu_handle_pg_buf_config_msg, pmu, &desc, ~0);
513 nvgpu_pmu_state_change(g, PMU_STATE_LOADING_PG_BUF, false); 513 nvgpu_pmu_state_change(g, PMU_STATE_LOADING_PG_BUF, false);
514 return err; 514 return err;
@@ -544,7 +544,7 @@ void nvgpu_pmu_setup_hw_load_zbc(struct gk20a *g)
544 544
545 pmu->buf_loaded = false; 545 pmu->buf_loaded = false;
546 nvgpu_pmu_dbg(g, "cmd post PMU_PG_CMD_ID_ENG_BUF_LOAD PMU_PGENG_GR_BUFFER_IDX_ZBC"); 546 nvgpu_pmu_dbg(g, "cmd post PMU_PG_CMD_ID_ENG_BUF_LOAD PMU_PGENG_GR_BUFFER_IDX_ZBC");
547 gk20a_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_LPQ, 547 nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_LPQ,
548 pmu_handle_pg_buf_config_msg, pmu, &desc, ~0); 548 pmu_handle_pg_buf_config_msg, pmu, &desc, ~0);
549 nvgpu_pmu_state_change(g, PMU_STATE_LOADING_ZBC, false); 549 nvgpu_pmu_state_change(g, PMU_STATE_LOADING_ZBC, false);
550} 550}
@@ -662,7 +662,7 @@ int nvgpu_pmu_ap_send_command(struct gk20a *g,
662 return 0x2f; 662 return 0x2f;
663 } 663 }
664 664
665 status = gk20a_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, 665 status = nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
666 p_callback, pmu, &seq, ~0); 666 p_callback, pmu, &seq, ~0);
667 667
668 if (status) { 668 if (status) {