From e988951ccab1031022ac354bbe8f53e1dc849b7a Mon Sep 17 00:00:00 2001 From: Srirangan Date: Tue, 14 Aug 2018 14:59:27 +0530 Subject: gpu: nvgpu: common: pmu: Fix MISRA 15.6 violations MISRA Rule-15.6 requires that all if-else blocks be enclosed in braces, including single statement blocks. Fix errors due to single statement if blocks without braces, introducing the braces. JIRA NVGPU-671 Change-Id: I497fbdb07bb2ec5a404046f06db3c713b3859e8e Signed-off-by: Srirangan Reviewed-on: https://git-master.nvidia.com/r/1799525 Reviewed-by: mobile promotions Tested-by: mobile promotions --- drivers/gpu/nvgpu/common/pmu/pmu.c | 57 ++++++++----- drivers/gpu/nvgpu/common/pmu/pmu_fw.c | 69 +++++++++------ drivers/gpu/nvgpu/common/pmu/pmu_ipc.c | 132 +++++++++++++++++++---------- drivers/gpu/nvgpu/common/pmu/pmu_perfmon.c | 39 ++++++--- drivers/gpu/nvgpu/common/pmu/pmu_pg.c | 116 ++++++++++++++++--------- 5 files changed, 266 insertions(+), 147 deletions(-) diff --git a/drivers/gpu/nvgpu/common/pmu/pmu.c b/drivers/gpu/nvgpu/common/pmu/pmu.c index c71928c3..d72629b5 100644 --- a/drivers/gpu/nvgpu/common/pmu/pmu.c +++ b/drivers/gpu/nvgpu/common/pmu/pmu.c @@ -45,13 +45,15 @@ static int pmu_enable_hw(struct nvgpu_pmu *pmu, bool enable) /* bring PMU falcon/engine out of reset */ g->ops.pmu.reset_engine(g, true); - if (g->ops.clock_gating.slcg_pmu_load_gating_prod) + if (g->ops.clock_gating.slcg_pmu_load_gating_prod) { g->ops.clock_gating.slcg_pmu_load_gating_prod(g, g->slcg_enabled); + } - if (g->ops.clock_gating.blcg_pmu_load_gating_prod) + if (g->ops.clock_gating.blcg_pmu_load_gating_prod) { g->ops.clock_gating.blcg_pmu_load_gating_prod(g, g->blcg_enabled); + } if (nvgpu_flcn_mem_scrub_wait(pmu->flcn)) { /* keep PMU falcon/engine in reset @@ -84,12 +86,14 @@ static int pmu_enable(struct nvgpu_pmu *pmu, bool enable) } } else { err = pmu_enable_hw(pmu, true); - if (err) + if (err) { goto exit; + } err = nvgpu_flcn_wait_idle(pmu->flcn); - if (err) + if (err) { goto exit; + } pmu_enable_irq(pmu, true); } @@ -107,12 +111,14 @@ int nvgpu_pmu_reset(struct gk20a *g) nvgpu_log_fn(g, " %s ", g->name); err = nvgpu_flcn_wait_idle(pmu->flcn); - if (err) + if (err) { goto exit; + } err = pmu_enable(pmu, false); - if (err) + if (err) { goto exit; + } err = pmu_enable(pmu, true); @@ -136,8 +142,9 @@ static int nvgpu_init_task_pg_init(struct gk20a *g) err = nvgpu_thread_create(&pmu->pg_init.state_task, g, nvgpu_pg_init_task, thread_name); - if (err) + if (err) { nvgpu_err(g, "failed to start nvgpu_pg_init thread"); + } return err; } @@ -159,8 +166,9 @@ void nvgpu_kill_task_pg_init(struct gk20a *g) /* wait to confirm thread stopped */ nvgpu_timeout_init(g, &timeout, 1000, NVGPU_TIMER_RETRY_TIMER); do { - if (!nvgpu_thread_is_running(&pmu->pg_init.state_task)) + if (!nvgpu_thread_is_running(&pmu->pg_init.state_task)) { break; + } nvgpu_udelay(2); } while (!nvgpu_timeout_expired_msg(&timeout, "timeout - waiting PMU state machine thread stop")); @@ -199,8 +207,9 @@ static int nvgpu_init_pmu_setup_sw(struct gk20a *g) /* TBD: sysmon subtask */ - if (IS_ENABLED(CONFIG_TEGRA_GK20A_PERFMON)) + if (IS_ENABLED(CONFIG_TEGRA_GK20A_PERFMON)) { pmu->perfmon_sampling_enabled = true; + } pmu->mutex_cnt = g->ops.pmu.pmu_mutex_size(); pmu->mutex = nvgpu_kzalloc(g, pmu->mutex_cnt * @@ -246,8 +255,9 @@ static int nvgpu_init_pmu_setup_sw(struct gk20a *g) err = g->ops.pmu.alloc_super_surface(g, &pmu->super_surface_buf, sizeof(struct nv_pmu_super_surface)); - if (err) + if (err) { goto err_free_seq_buf; + } } err = nvgpu_dma_alloc_map(vm, GK20A_PMU_TRACE_BUFSIZE, @@ -263,8 +273,9 @@ skip_init: nvgpu_log_fn(g, "done"); return 0; err_free_super_surface: - if (g->ops.pmu.alloc_super_surface) + if (g->ops.pmu.alloc_super_surface) { nvgpu_dma_unmap_free(vm, &pmu->super_surface_buf); + } err_free_seq_buf: nvgpu_dma_unmap_free(vm, &pmu->seq_buf); err_free_seq: @@ -283,20 +294,24 @@ int nvgpu_init_pmu_support(struct gk20a *g) nvgpu_log_fn(g, " "); - if (pmu->initialized) + if (pmu->initialized) { return 0; + } err = pmu_enable_hw(pmu, true); - if (err) + if (err) { return err; + } if (g->support_pmu) { err = nvgpu_init_pmu_setup_sw(g); - if (err) + if (err) { return err; + } err = g->ops.pmu.pmu_setup_hw_and_bootstrap(g); - if (err) + if (err) { return err; + } nvgpu_pmu_state_change(g, PMU_STATE_STARTING, false); } @@ -402,8 +417,9 @@ static void pmu_setup_hw_enable_elpg(struct gk20a *g) if (g->elpg_enabled) { /* Init reg with prod values*/ - if (g->ops.pmu.pmu_setup_elpg) + if (g->ops.pmu.pmu_setup_elpg) { g->ops.pmu.pmu_setup_elpg(g); + } nvgpu_pmu_enable_elpg(g); } @@ -459,8 +475,9 @@ static int nvgpu_pg_init_task(void *arg) switch (pmu_state) { case PMU_STATE_INIT_RECEIVED: nvgpu_pmu_dbg(g, "pmu starting"); - if (g->can_elpg) + if (g->can_elpg) { nvgpu_pmu_init_powergating(g); + } break; case PMU_STATE_ELPG_BOOTED: nvgpu_pmu_dbg(g, "elpg booted"); @@ -499,16 +516,18 @@ int nvgpu_pmu_destroy(struct gk20a *g) nvgpu_log_fn(g, " "); - if (!g->support_pmu) + if (!g->support_pmu) { return 0; + } nvgpu_kill_task_pg_init(g); nvgpu_pmu_get_pg_stats(g, PMU_PG_ELPG_ENGINE_ID_GRAPHICS, &pg_stat_data); - if (nvgpu_pmu_disable_elpg(g)) + if (nvgpu_pmu_disable_elpg(g)) { nvgpu_err(g, "failed to set disable elpg"); + } pmu->initialized = false; /* update the s/w ELPG residency counters */ diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_fw.c b/drivers/gpu/nvgpu/common/pmu/pmu_fw.c index 8a071e32..87fd2f2a 100644 --- a/drivers/gpu/nvgpu/common/pmu/pmu_fw.c +++ b/drivers/gpu/nvgpu/common/pmu/pmu_fw.c @@ -870,14 +870,15 @@ static void get_pmu_init_msg_pmu_queue_params_v4( u8 i; u8 tmp_id = id; - if (tmp_id == PMU_COMMAND_QUEUE_HPQ) + if (tmp_id == PMU_COMMAND_QUEUE_HPQ) { tmp_id = PMU_QUEUE_HPQ_IDX_FOR_V3; - else if (tmp_id == PMU_COMMAND_QUEUE_LPQ) + } else if (tmp_id == PMU_COMMAND_QUEUE_LPQ) { tmp_id = PMU_QUEUE_LPQ_IDX_FOR_V3; - else if (tmp_id == PMU_MESSAGE_QUEUE) + } else if (tmp_id == PMU_MESSAGE_QUEUE) { tmp_id = PMU_QUEUE_MSG_IDX_FOR_V3; - else + } else { return; + } queue->index = init->queue_index[tmp_id]; queue->size = init->queue_size[tmp_id]; @@ -898,14 +899,15 @@ static void get_pmu_init_msg_pmu_queue_params_v5( u8 i; u8 tmp_id = id; - if (tmp_id == PMU_COMMAND_QUEUE_HPQ) + if (tmp_id == PMU_COMMAND_QUEUE_HPQ) { tmp_id = PMU_QUEUE_HPQ_IDX_FOR_V3; - else if (tmp_id == PMU_COMMAND_QUEUE_LPQ) + } else if (tmp_id == PMU_COMMAND_QUEUE_LPQ) { tmp_id = PMU_QUEUE_LPQ_IDX_FOR_V3; - else if (tmp_id == PMU_MESSAGE_QUEUE) + } else if (tmp_id == PMU_MESSAGE_QUEUE) { tmp_id = PMU_QUEUE_MSG_IDX_FOR_V5; - else + } else { return; + } queue->index = init->queue_index[tmp_id]; queue->size = init->queue_size[tmp_id]; @@ -927,14 +929,15 @@ static void get_pmu_init_msg_pmu_queue_params_v3( u8 i; u8 tmp_id = id; - if (tmp_id == PMU_COMMAND_QUEUE_HPQ) + if (tmp_id == PMU_COMMAND_QUEUE_HPQ) { tmp_id = PMU_QUEUE_HPQ_IDX_FOR_V3; - else if (tmp_id == PMU_COMMAND_QUEUE_LPQ) + } else if (tmp_id == PMU_COMMAND_QUEUE_LPQ) { tmp_id = PMU_QUEUE_LPQ_IDX_FOR_V3; - else if (tmp_id == PMU_MESSAGE_QUEUE) + } else if (tmp_id == PMU_MESSAGE_QUEUE) { tmp_id = PMU_QUEUE_MSG_IDX_FOR_V3; - else + } else { return; + } queue->index = init->queue_index[tmp_id]; queue->size = init->queue_size[tmp_id]; if (tmp_id != 0) { @@ -1623,8 +1626,9 @@ static void nvgpu_remove_pmu_support(struct nvgpu_pmu *pmu) nvgpu_log_fn(g, " "); - if (nvgpu_alloc_initialized(&pmu->dmem)) + if (nvgpu_alloc_initialized(&pmu->dmem)) { nvgpu_alloc_destroy(&pmu->dmem); + } nvgpu_list_for_each_entry_safe(pboardobjgrp, pboardobjgrp_tmp, &g->boardobjgrp_head, boardobjgrp, node) { @@ -1636,20 +1640,25 @@ static void nvgpu_remove_pmu_support(struct nvgpu_pmu *pmu) pboardobj->destruct(pboardobj); } - if (pmu->fw) + if (pmu->fw) { nvgpu_release_firmware(g, pmu->fw); + } - if (g->acr.pmu_fw) + if (g->acr.pmu_fw) { nvgpu_release_firmware(g, g->acr.pmu_fw); + } - if (g->acr.pmu_desc) + if (g->acr.pmu_desc) { nvgpu_release_firmware(g, g->acr.pmu_desc); + } - if (g->acr.acr_fw) + if (g->acr.acr_fw) { nvgpu_release_firmware(g, g->acr.acr_fw); + } - if (g->acr.hsbl_fw) + if (g->acr.hsbl_fw) { nvgpu_release_firmware(g, g->acr.hsbl_fw); + } nvgpu_dma_unmap_free(vm, &g->acr.acr_ucode); nvgpu_dma_unmap_free(vm, &g->acr.hsbl_ucode); @@ -1673,30 +1682,36 @@ int nvgpu_init_pmu_fw_support(struct nvgpu_pmu *pmu) nvgpu_log_fn(g, " "); err = nvgpu_mutex_init(&pmu->elpg_mutex); - if (err) + if (err) { return err; + } err = nvgpu_mutex_init(&pmu->pg_mutex); - if (err) + if (err) { goto fail_elpg; + } err = nvgpu_mutex_init(&pmu->isr_mutex); - if (err) + if (err) { goto fail_pg; + } err = nvgpu_mutex_init(&pmu->pmu_copy_lock); - if (err) + if (err) { goto fail_isr; + } err = nvgpu_mutex_init(&pmu->pmu_seq_lock); - if (err) + if (err) { goto fail_pmu_copy; + } pmu->remove_support = nvgpu_remove_pmu_support; err = nvgpu_init_pmu_fw_ver_ops(pmu); - if (err) + if (err) { goto fail_pmu_seq; + } goto exit; @@ -1723,8 +1738,9 @@ int nvgpu_pmu_prepare_ns_ucode_blob(struct gk20a *g) nvgpu_log_fn(g, " "); - if (pmu->fw) + if (pmu->fw) { return nvgpu_init_pmu_fw_support(pmu); + } pmu->fw = nvgpu_request_firmware(g, NVGPU_PMU_NS_UCODE_IMAGE, 0); if (!pmu->fw) { @@ -1740,8 +1756,9 @@ int nvgpu_pmu_prepare_ns_ucode_blob(struct gk20a *g) err = nvgpu_dma_alloc_map_sys(vm, GK20A_PMU_UCODE_SIZE_MAX, &pmu->ucode); - if (err) + if (err) { goto err_release_fw; + } nvgpu_mem_wr_n(g, &pmu->ucode, 0, pmu->ucode_image, pmu->desc->app_start_offset + pmu->desc->app_size); diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c b/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c index 37abb34c..39be07cc 100644 --- a/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c +++ b/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c @@ -154,33 +154,41 @@ static bool pmu_validate_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd, struct nvgpu_falcon_queue *queue; u32 in_size, out_size; - if (!PMU_IS_SW_COMMAND_QUEUE(queue_id)) + if (!PMU_IS_SW_COMMAND_QUEUE(queue_id)) { goto invalid_cmd; + } queue = &pmu->queue[queue_id]; - if (cmd->hdr.size < PMU_CMD_HDR_SIZE) + if (cmd->hdr.size < PMU_CMD_HDR_SIZE) { goto invalid_cmd; + } - if (cmd->hdr.size > (queue->size >> 1)) + if (cmd->hdr.size > (queue->size >> 1)) { goto invalid_cmd; + } - if (msg != NULL && msg->hdr.size < PMU_MSG_HDR_SIZE) + if (msg != NULL && msg->hdr.size < PMU_MSG_HDR_SIZE) { goto invalid_cmd; + } - if (!PMU_UNIT_ID_IS_VALID(cmd->hdr.unit_id)) + if (!PMU_UNIT_ID_IS_VALID(cmd->hdr.unit_id)) { goto invalid_cmd; + } - if (payload == NULL) + if (payload == NULL) { return true; + } if (payload->in.buf == NULL && payload->out.buf == NULL && - payload->rpc.prpc == NULL) + payload->rpc.prpc == NULL) { goto invalid_cmd; + } if ((payload->in.buf != NULL && payload->in.size == 0) || (payload->out.buf != NULL && payload->out.size == 0) || - (payload->rpc.prpc != NULL && payload->rpc.size_rpc == 0)) + (payload->rpc.prpc != NULL && payload->rpc.size_rpc == 0)) { goto invalid_cmd; + } in_size = PMU_CMD_HDR_SIZE; if (payload->in.buf) { @@ -194,13 +202,15 @@ static bool pmu_validate_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd, out_size += g->ops.pmu_ver.get_pmu_allocation_struct_size(pmu); } - if (in_size > cmd->hdr.size || out_size > cmd->hdr.size) + if (in_size > cmd->hdr.size || out_size > cmd->hdr.size) { goto invalid_cmd; + } if ((payload->in.offset != 0 && payload->in.buf == NULL) || - (payload->out.offset != 0 && payload->out.buf == NULL)) + (payload->out.offset != 0 && payload->out.buf == NULL)) { goto invalid_cmd; + } return true; @@ -233,16 +243,18 @@ static int pmu_write_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd, do { err = nvgpu_flcn_queue_push(pmu->flcn, queue, cmd, cmd->hdr.size); - if (err == -EAGAIN && !nvgpu_timeout_expired(&timeout)) + if (err == -EAGAIN && !nvgpu_timeout_expired(&timeout)) { nvgpu_usleep_range(1000, 2000); - else + } else { break; + } } while (1); - if (err) + if (err) { nvgpu_err(g, "fail to write cmd to queue %d", queue_id); - else + } else { nvgpu_log_fn(g, "done"); + } return err; } @@ -281,10 +293,11 @@ static int pmu_cmd_payload_extract_rpc(struct gk20a *g, struct pmu_cmd *cmd, dmem_alloc_offset); clean_up: - if (err) + if (err) { nvgpu_log_fn(g, "fail"); - else + } else { nvgpu_log_fn(g, "done"); + } return err; } @@ -299,25 +312,28 @@ static int pmu_cmd_payload_extract(struct gk20a *g, struct pmu_cmd *cmd, nvgpu_log_fn(g, " "); - if (payload) + if (payload) { seq->out_payload = payload->out.buf; + } if (payload && payload->in.offset != 0) { pv->set_pmu_allocation_ptr(pmu, &in, ((u8 *)&cmd->cmd + payload->in.offset)); - if (payload->in.buf != payload->out.buf) + if (payload->in.buf != payload->out.buf) { pv->pmu_allocation_set_dmem_size(pmu, in, (u16)payload->in.size); - else + } else { pv->pmu_allocation_set_dmem_size(pmu, in, (u16)max(payload->in.size, payload->out.size)); + } *(pv->pmu_allocation_get_dmem_offset_addr(pmu, in)) = nvgpu_alloc(&pmu->dmem, pv->pmu_allocation_get_dmem_size(pmu, in)); - if (!*(pv->pmu_allocation_get_dmem_offset_addr(pmu, in))) + if (!*(pv->pmu_allocation_get_dmem_offset_addr(pmu, in))) { goto clean_up; + } if (payload->in.fb_size != 0x0) { seq->in_mem = nvgpu_kzalloc(g, @@ -361,8 +377,9 @@ static int pmu_cmd_payload_extract(struct gk20a *g, struct pmu_cmd *cmd, pv->pmu_allocation_get_dmem_size(pmu, out)); if (!*(pv->pmu_allocation_get_dmem_offset_addr(pmu, - out))) + out))) { goto clean_up; + } if (payload->out.fb_size != 0x0) { seq->out_mem = nvgpu_kzalloc(g, @@ -396,14 +413,17 @@ static int pmu_cmd_payload_extract(struct gk20a *g, struct pmu_cmd *cmd, clean_up: if (err) { nvgpu_log_fn(g, "fail"); - if (in) + if (in) { nvgpu_free(&pmu->dmem, pv->pmu_allocation_get_dmem_offset(pmu, in)); - if (out) + } + if (out) { nvgpu_free(&pmu->dmem, pv->pmu_allocation_get_dmem_offset(pmu, out)); - } else + } + } else { nvgpu_log_fn(g, "done"); + } return err; } @@ -420,23 +440,26 @@ int nvgpu_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd, nvgpu_log_fn(g, " "); if ((!cmd) || (!seq_desc) || (!pmu->pmu_ready)) { - if (!cmd) + if (!cmd) { nvgpu_warn(g, "%s(): PMU cmd buffer is NULL", __func__); - else if (!seq_desc) + } else if (!seq_desc) { nvgpu_warn(g, "%s(): Seq descriptor is NULL", __func__); - else + } else { nvgpu_warn(g, "%s(): PMU is not ready", __func__); + } WARN_ON(1); return -EINVAL; } - if (!pmu_validate_cmd(pmu, cmd, msg, payload, queue_id)) + if (!pmu_validate_cmd(pmu, cmd, msg, payload, queue_id)) { return -EINVAL; + } err = pmu_seq_acquire(pmu, &seq); - if (err) + if (err) { return err; + } cmd->hdr.seq_id = seq->id; @@ -452,19 +475,22 @@ int nvgpu_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd, *seq_desc = seq->desc; - if (cmd->cmd.rpc.cmd_type == NV_PMU_RPC_CMD_ID) + if (cmd->cmd.rpc.cmd_type == NV_PMU_RPC_CMD_ID) { err = pmu_cmd_payload_extract_rpc(g, cmd, payload, seq); - else + } else { err = pmu_cmd_payload_extract(g, cmd, payload, seq); + } - if (err) + if (err) { goto clean_up; + } seq->state = PMU_SEQ_STATE_USED; err = pmu_write_cmd(pmu, cmd, queue_id, timeout); - if (err) + if (err) { seq->state = PMU_SEQ_STATE_PENDING; + } nvgpu_log_fn(g, "done"); @@ -516,18 +542,21 @@ static int pmu_response_handle(struct nvgpu_pmu *pmu, pv->pmu_allocation_get_dmem_size(pmu, pv->get_pmu_seq_out_a_ptr(seq)), 0); } - } else + } else { seq->callback = NULL; + } if (pv->pmu_allocation_get_dmem_size(pmu, - pv->get_pmu_seq_in_a_ptr(seq)) != 0) + pv->get_pmu_seq_in_a_ptr(seq)) != 0) { nvgpu_free(&pmu->dmem, pv->pmu_allocation_get_dmem_offset(pmu, pv->get_pmu_seq_in_a_ptr(seq))); + } if (pv->pmu_allocation_get_dmem_size(pmu, - pv->get_pmu_seq_out_a_ptr(seq)) != 0) + pv->get_pmu_seq_out_a_ptr(seq)) != 0) { nvgpu_free(&pmu->dmem, pv->pmu_allocation_get_dmem_offset(pmu, pv->get_pmu_seq_out_a_ptr(seq))); + } if (seq->out_mem != NULL) { memset(pv->pmu_allocation_get_fb_addr(pmu, @@ -536,10 +565,11 @@ static int pmu_response_handle(struct nvgpu_pmu *pmu, pv->get_pmu_seq_out_a_ptr(seq))); nvgpu_pmu_surface_free(g, seq->out_mem); - if (seq->out_mem != seq->in_mem) + if (seq->out_mem != seq->in_mem) { nvgpu_kfree(g, seq->out_mem); - else + } else { seq->out_mem = NULL; + } } if (seq->in_mem != NULL) { @@ -553,8 +583,9 @@ static int pmu_response_handle(struct nvgpu_pmu *pmu, seq->in_mem = NULL; } - if (seq->callback) + if (seq->callback) { seq->callback(g, msg, seq->cb_params, seq->desc, ret); + } pmu_seq_release(pmu, seq); @@ -667,11 +698,13 @@ int nvgpu_pmu_process_message(struct nvgpu_pmu *pmu) if (unlikely(!pmu->pmu_ready)) { nvgpu_pmu_process_init_msg(pmu, &msg); - if (g->ops.pmu.init_wpr_region != NULL) + if (g->ops.pmu.init_wpr_region != NULL) { g->ops.pmu.init_wpr_region(g); + } - if (nvgpu_is_enabled(g, NVGPU_PMU_PERFMON)) + if (nvgpu_is_enabled(g, NVGPU_PMU_PERFMON)) { g->ops.pmu.pmu_init_perfmon(pmu); + } return 0; } @@ -687,10 +720,11 @@ int nvgpu_pmu_process_message(struct nvgpu_pmu *pmu) msg.hdr.ctrl_flags &= ~PMU_CMD_FLAGS_PMU_MASK; - if (msg.hdr.ctrl_flags == PMU_CMD_FLAGS_EVENT) + if (msg.hdr.ctrl_flags == PMU_CMD_FLAGS_EVENT) { pmu_handle_event(pmu, &msg); - else + } else { pmu_response_handle(pmu, &msg); + } } return 0; @@ -706,11 +740,13 @@ int pmu_wait_message_cond(struct nvgpu_pmu *pmu, u32 timeout_ms, nvgpu_timeout_init(g, &timeout, timeout_ms, NVGPU_TIMER_CPU_TIMER); do { - if (*(u8 *)var == val) + if (*(u8 *)var == val) { return 0; + } - if (gk20a_pmu_is_interrupted(pmu)) + if (gk20a_pmu_is_interrupted(pmu)) { gk20a_pmu_isr(g); + } nvgpu_usleep_range(delay, delay * 2); delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); @@ -816,8 +852,9 @@ static void pmu_rpc_handler(struct gk20a *g, struct pmu_msg *msg, exit: /* free allocated memory */ - if (rpc_payload->is_mem_free_set) + if (rpc_payload->is_mem_free_set) { nvgpu_kfree(g, rpc_payload); + } } int nvgpu_pmu_rpc_execute(struct nvgpu_pmu *pmu, struct nv_pmu_rpc_header *rpc, @@ -914,8 +951,9 @@ int nvgpu_pmu_rpc_execute(struct nvgpu_pmu *pmu, struct nv_pmu_rpc_header *rpc, exit: if (status) { - if (rpc_payload) + if (rpc_payload) { nvgpu_kfree(g, rpc_payload); + } } return status; diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_perfmon.c b/drivers/gpu/nvgpu/common/pmu/pmu_perfmon.c index 964b1488..73893f2c 100644 --- a/drivers/gpu/nvgpu/common/pmu/pmu_perfmon.c +++ b/drivers/gpu/nvgpu/common/pmu/pmu_perfmon.c @@ -65,8 +65,9 @@ int nvgpu_pmu_init_perfmon(struct nvgpu_pmu *pmu) struct pmu_payload payload; u32 seq; - if (!nvgpu_is_enabled(g, NVGPU_PMU_PERFMON)) + if (!nvgpu_is_enabled(g, NVGPU_PMU_PERFMON)) { return 0; + } nvgpu_log_fn(g, " "); @@ -74,9 +75,10 @@ int nvgpu_pmu_init_perfmon(struct nvgpu_pmu *pmu) gk20a_pmu_init_perfmon_counter(g); - if (!pmu->sample_buffer) + if (!pmu->sample_buffer) { pmu->sample_buffer = nvgpu_alloc(&pmu->dmem, 2 * sizeof(u16)); + } if (!pmu->sample_buffer) { nvgpu_err(g, "failed to allocate perfmon sample buffer"); return -ENOMEM; @@ -134,8 +136,9 @@ int nvgpu_pmu_perfmon_start_sampling(struct nvgpu_pmu *pmu) struct pmu_payload payload; u32 seq; - if (!nvgpu_is_enabled(g, NVGPU_PMU_PERFMON)) + if (!nvgpu_is_enabled(g, NVGPU_PMU_PERFMON)) { return 0; + } /* PERFMON Start */ memset(&cmd, 0, sizeof(struct pmu_cmd)); @@ -183,8 +186,9 @@ int nvgpu_pmu_perfmon_stop_sampling(struct nvgpu_pmu *pmu) struct pmu_cmd cmd; u32 seq; - if (!nvgpu_is_enabled(g, NVGPU_PMU_PERFMON)) + if (!nvgpu_is_enabled(g, NVGPU_PMU_PERFMON)) { return 0; + } /* PERFMON Stop */ memset(&cmd, 0, sizeof(struct pmu_cmd)); @@ -250,8 +254,9 @@ void nvgpu_pmu_get_load_counters(struct gk20a *g, u32 *busy_cycles, void nvgpu_pmu_reset_load_counters(struct gk20a *g) { - if (!g->power_on || gk20a_busy(g)) + if (!g->power_on || gk20a_busy(g)) { return; + } gk20a_pmu_reset_idle_counter(g, 2); gk20a_pmu_reset_idle_counter(g, 1); @@ -288,8 +293,9 @@ int nvgpu_pmu_handle_perfmon_event(struct nvgpu_pmu *pmu, } /* restart sampling */ - if (pmu->perfmon_sampling_enabled) + if (pmu->perfmon_sampling_enabled) { return g->ops.pmu.pmu_perfmon_start_sampling(&(g->pmu)); + } return 0; } @@ -301,8 +307,9 @@ int nvgpu_pmu_init_perfmon_rpc(struct nvgpu_pmu *pmu) struct nv_pmu_rpc_struct_perfmon_init rpc; int status = 0; - if (!nvgpu_is_enabled(g, NVGPU_PMU_PERFMON)) + if (!nvgpu_is_enabled(g, NVGPU_PMU_PERFMON)) { return 0; + } nvgpu_log_fn(g, " "); @@ -348,8 +355,9 @@ int nvgpu_pmu_perfmon_start_sampling_rpc(struct nvgpu_pmu *pmu) struct nv_pmu_rpc_struct_perfmon_start rpc; int status = 0; - if (!nvgpu_is_enabled(g, NVGPU_PMU_PERFMON)) + if (!nvgpu_is_enabled(g, NVGPU_PMU_PERFMON)) { return 0; + } nvgpu_log_fn(g, " "); @@ -365,8 +373,9 @@ int nvgpu_pmu_perfmon_start_sampling_rpc(struct nvgpu_pmu *pmu) nvgpu_pmu_dbg(g, "RPC post NV_PMU_RPC_ID_PERFMON_START\n"); PMU_RPC_EXECUTE(status, pmu, PERFMON_T18X, START, &rpc, 0); - if (status) + if (status) { nvgpu_err(g, "Failed to execute RPC, status=0x%x", status); + } return status; } @@ -377,8 +386,9 @@ int nvgpu_pmu_perfmon_stop_sampling_rpc(struct nvgpu_pmu *pmu) struct nv_pmu_rpc_struct_perfmon_stop rpc; int status = 0; - if (!nvgpu_is_enabled(g, NVGPU_PMU_PERFMON)) + if (!nvgpu_is_enabled(g, NVGPU_PMU_PERFMON)) { return 0; + } nvgpu_log_fn(g, " "); @@ -386,8 +396,9 @@ int nvgpu_pmu_perfmon_stop_sampling_rpc(struct nvgpu_pmu *pmu) /* PERFMON Stop */ nvgpu_pmu_dbg(g, "RPC post NV_PMU_RPC_ID_PERFMON_STOP\n"); PMU_RPC_EXECUTE(status, pmu, PERFMON_T18X, STOP, &rpc, 0); - if (status) + if (status) { nvgpu_err(g, "Failed to execute RPC, status=0x%x", status); + } return status; } @@ -398,8 +409,9 @@ int nvgpu_pmu_perfmon_get_samples_rpc(struct nvgpu_pmu *pmu) struct nv_pmu_rpc_struct_perfmon_query rpc; int status = 0; - if (!nvgpu_is_enabled(g, NVGPU_PMU_PERFMON)) + if (!nvgpu_is_enabled(g, NVGPU_PMU_PERFMON)) { return 0; + } nvgpu_log_fn(g, " "); pmu->perfmon_query = 0; @@ -407,8 +419,9 @@ int nvgpu_pmu_perfmon_get_samples_rpc(struct nvgpu_pmu *pmu) /* PERFMON QUERY */ nvgpu_pmu_dbg(g, "RPC post NV_PMU_RPC_ID_PERFMON_QUERY\n"); PMU_RPC_EXECUTE(status, pmu, PERFMON_T18X, QUERY, &rpc, 0); - if (status) + if (status) { nvgpu_err(g, "Failed to execute RPC, status=0x%x", status); + } pmu_wait_message_cond(pmu, gk20a_get_gr_idle_timeout(g), &pmu->perfmon_query, 1); diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_pg.c b/drivers/gpu/nvgpu/common/pmu/pmu_pg.c index c8559fdb..4978708c 100644 --- a/drivers/gpu/nvgpu/common/pmu/pmu_pg.c +++ b/drivers/gpu/nvgpu/common/pmu/pmu_pg.c @@ -72,19 +72,21 @@ static void pmu_handle_pg_elpg_msg(struct gk20a *g, struct pmu_msg *msg, case PMU_PG_ELPG_MSG_ALLOW_ACK: nvgpu_pmu_dbg(g, "ALLOW is ack from PMU, eng - %d", elpg_msg->engine_id); - if (elpg_msg->engine_id == PMU_PG_ELPG_ENGINE_ID_MS) + if (elpg_msg->engine_id == PMU_PG_ELPG_ENGINE_ID_MS) { pmu->mscg_transition_state = PMU_ELPG_STAT_ON; - else + } else { pmu->elpg_stat = PMU_ELPG_STAT_ON; + } break; case PMU_PG_ELPG_MSG_DISALLOW_ACK: nvgpu_pmu_dbg(g, "DISALLOW is ack from PMU, eng - %d", elpg_msg->engine_id); - if (elpg_msg->engine_id == PMU_PG_ELPG_ENGINE_ID_MS) + if (elpg_msg->engine_id == PMU_PG_ELPG_ENGINE_ID_MS) { pmu->mscg_transition_state = PMU_ELPG_STAT_OFF; - else + } else { pmu->elpg_stat = PMU_ELPG_STAT_OFF; + } if (pmu->pmu_state == PMU_STATE_ELPG_BOOTING) { if (g->ops.pmu.pmu_pg_engines_feature_list && @@ -97,9 +99,10 @@ static void pmu_handle_pg_elpg_msg(struct gk20a *g, struct pmu_msg *msg, WRITE_ONCE(pmu->mscg_stat, PMU_MSCG_DISABLED); /* make status visible */ nvgpu_smp_mb(); - } else + } else { nvgpu_pmu_state_change(g, PMU_STATE_ELPG_BOOTED, true); + } } break; default: @@ -118,21 +121,25 @@ int nvgpu_pmu_pg_global_enable(struct gk20a *g, u32 enable_pg) g->ops.pmu.pmu_pg_engines_feature_list(g, PMU_PG_ELPG_ENGINE_ID_GRAPHICS) != NVGPU_PMU_GR_FEATURE_MASK_POWER_GATING) { - if (g->ops.pmu.pmu_lpwr_enable_pg) + if (g->ops.pmu.pmu_lpwr_enable_pg) { status = g->ops.pmu.pmu_lpwr_enable_pg(g, true); - } else if (g->support_pmu && g->can_elpg) + } + } else if (g->support_pmu && g->can_elpg) { status = nvgpu_pmu_enable_elpg(g); + } } else if (enable_pg == false) { if (g->ops.pmu.pmu_pg_engines_feature_list && g->ops.pmu.pmu_pg_engines_feature_list(g, PMU_PG_ELPG_ENGINE_ID_GRAPHICS) != NVGPU_PMU_GR_FEATURE_MASK_POWER_GATING) { - if (g->ops.pmu.pmu_lpwr_disable_pg) + if (g->ops.pmu.pmu_lpwr_disable_pg) { status = g->ops.pmu.pmu_lpwr_disable_pg(g, true); - } else if (g->support_pmu && g->can_elpg) + } + } else if (g->support_pmu && g->can_elpg) { status = nvgpu_pmu_disable_elpg(g); + } } return status; @@ -157,10 +164,11 @@ static int pmu_enable_elpg_locked(struct gk20a *g, u32 pg_engine_id) /* no need to wait ack for ELPG enable but set * pending to sync with follow up ELPG disable */ - if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) + if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) { pmu->elpg_stat = PMU_ELPG_STAT_ON_PENDING; - else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS) + } else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS) { pmu->mscg_transition_state = PMU_ELPG_STAT_ON_PENDING; + } nvgpu_pmu_dbg(g, "cmd post PMU_PG_ELPG_CMD_ALLOW"); status = nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, @@ -183,14 +191,16 @@ int nvgpu_pmu_enable_elpg(struct gk20a *g) nvgpu_log_fn(g, " "); - if (!g->support_pmu) + if (!g->support_pmu) { return ret; + } nvgpu_mutex_acquire(&pmu->elpg_mutex); pmu->elpg_refcnt++; - if (pmu->elpg_refcnt <= 0) + if (pmu->elpg_refcnt <= 0) { goto exit_unlock; + } /* something is not right if we end up in following code path */ if (unlikely(pmu->elpg_refcnt > 1)) { @@ -203,26 +213,31 @@ int nvgpu_pmu_enable_elpg(struct gk20a *g) /* do NOT enable elpg until golden ctx is created, * which is related with the ctx that ELPG save and restore. */ - if (unlikely(!gr->ctx_vars.golden_image_initialized)) + if (unlikely(!gr->ctx_vars.golden_image_initialized)) { goto exit_unlock; + } /* return if ELPG is already on or on_pending or off_on_pending */ - if (pmu->elpg_stat != PMU_ELPG_STAT_OFF) + if (pmu->elpg_stat != PMU_ELPG_STAT_OFF) { goto exit_unlock; + } - if (g->ops.pmu.pmu_pg_supported_engines_list) + if (g->ops.pmu.pmu_pg_supported_engines_list) { pg_engine_id_list = g->ops.pmu.pmu_pg_supported_engines_list(g); + } for (pg_engine_id = PMU_PG_ELPG_ENGINE_ID_GRAPHICS; pg_engine_id < PMU_PG_ELPG_ENGINE_ID_INVALID_ENGINE; pg_engine_id++) { if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS && - pmu->mscg_stat == PMU_MSCG_DISABLED) + pmu->mscg_stat == PMU_MSCG_DISABLED) { continue; + } - if (BIT(pg_engine_id) & pg_engine_id_list) + if (BIT(pg_engine_id) & pg_engine_id_list) { ret = pmu_enable_elpg_locked(g, pg_engine_id); + } } exit_unlock: @@ -243,11 +258,13 @@ int nvgpu_pmu_disable_elpg(struct gk20a *g) nvgpu_log_fn(g, " "); - if (g->ops.pmu.pmu_pg_supported_engines_list) + if (g->ops.pmu.pmu_pg_supported_engines_list) { pg_engine_id_list = g->ops.pmu.pmu_pg_supported_engines_list(g); + } - if (!g->support_pmu) + if (!g->support_pmu) { return ret; + } nvgpu_mutex_acquire(&pmu->elpg_mutex); @@ -293,8 +310,9 @@ int nvgpu_pmu_disable_elpg(struct gk20a *g) pg_engine_id++) { if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS && - pmu->mscg_stat == PMU_MSCG_DISABLED) + pmu->mscg_stat == PMU_MSCG_DISABLED) { continue; + } if (BIT(pg_engine_id) & pg_engine_id_list) { memset(&cmd, 0, sizeof(struct pmu_cmd)); @@ -305,16 +323,17 @@ int nvgpu_pmu_disable_elpg(struct gk20a *g) cmd.cmd.pg.elpg_cmd.engine_id = pg_engine_id; cmd.cmd.pg.elpg_cmd.cmd = PMU_PG_ELPG_CMD_DISALLOW; - if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) + if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) { pmu->elpg_stat = PMU_ELPG_STAT_OFF_PENDING; - else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS) + } else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS) { pmu->mscg_transition_state = PMU_ELPG_STAT_OFF_PENDING; - - if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) + } + if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) { ptr = &pmu->elpg_stat; - else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS) + } else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS) { ptr = &pmu->mscg_transition_state; + } nvgpu_pmu_dbg(g, "cmd post PMU_PG_ELPG_CMD_DISALLOW"); nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, @@ -377,8 +396,9 @@ static int pmu_pg_init_send(struct gk20a *g, u32 pg_engine_id) gk20a_pmu_pg_idle_counter_config(g, pg_engine_id); - if (g->ops.pmu.pmu_pg_init_param) + if (g->ops.pmu.pmu_pg_init_param) { g->ops.pmu.pmu_pg_init_param(g, pg_engine_id); + } /* init ELPG */ memset(&cmd, 0, sizeof(struct pmu_cmd)); @@ -391,8 +411,9 @@ static int pmu_pg_init_send(struct gk20a *g, u32 pg_engine_id) nvgpu_pmu_dbg(g, "cmd post PMU_PG_ELPG_CMD_INIT"); err = nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, pmu_handle_pg_elpg_msg, pmu, &seq, ~0); - if (err) + if (err) { nvgpu_err(g, "PMU_PG_ELPG_CMD_INIT cmd failed\n"); + } /* alloc dmem for powergating state log */ pmu->stat_dmem_offset[pg_engine_id] = 0; @@ -407,17 +428,19 @@ static int pmu_pg_init_send(struct gk20a *g, u32 pg_engine_id) nvgpu_pmu_dbg(g, "cmd post PMU_PG_STAT_CMD_ALLOC_DMEM"); err = nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_LPQ, pmu_handle_pg_stat_msg, pmu, &seq, ~0); - if (err) + if (err) { nvgpu_err(g, "PMU_PG_STAT_CMD_ALLOC_DMEM cmd failed\n"); + } /* disallow ELPG initially * PMU ucode requires a disallow cmd before allow cmd */ /* set for wait_event PMU_ELPG_STAT_OFF */ - if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) + if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) { pmu->elpg_stat = PMU_ELPG_STAT_OFF; - else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS) + } else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS) { pmu->mscg_transition_state = PMU_ELPG_STAT_OFF; + } memset(&cmd, 0, sizeof(struct pmu_cmd)); cmd.hdr.unit_id = PMU_UNIT_PG; cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(struct pmu_pg_cmd_elpg_cmd); @@ -428,11 +451,13 @@ static int pmu_pg_init_send(struct gk20a *g, u32 pg_engine_id) nvgpu_pmu_dbg(g, "cmd post PMU_PG_ELPG_CMD_DISALLOW"); err = nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, pmu_handle_pg_elpg_msg, pmu, &seq, ~0); - if (err) + if (err) { nvgpu_err(g, "PMU_PG_ELPG_CMD_DISALLOW cmd failed\n"); + } - if (g->ops.pmu.pmu_pg_set_sub_feature_mask) + if (g->ops.pmu.pmu_pg_set_sub_feature_mask) { g->ops.pmu.pmu_pg_set_sub_feature_mask(g, pg_engine_id); + } return 0; } @@ -445,8 +470,9 @@ int nvgpu_pmu_init_powergating(struct gk20a *g) nvgpu_log_fn(g, " "); - if (g->ops.pmu.pmu_pg_supported_engines_list) + if (g->ops.pmu.pmu_pg_supported_engines_list) { pg_engine_id_list = g->ops.pmu.pmu_pg_supported_engines_list(g); + } gk20a_gr_wait_initialized(g); @@ -455,15 +481,17 @@ int nvgpu_pmu_init_powergating(struct gk20a *g) pg_engine_id++) { if (BIT(pg_engine_id) & pg_engine_id_list) { - if (pmu && pmu->pmu_state == PMU_STATE_INIT_RECEIVED) + if (pmu && pmu->pmu_state == PMU_STATE_INIT_RECEIVED) { nvgpu_pmu_state_change(g, PMU_STATE_ELPG_BOOTING, false); + } pmu_pg_init_send(g, pg_engine_id); } } - if (g->ops.pmu.pmu_pg_param_post_init) + if (g->ops.pmu.pmu_pg_param_post_init) { g->ops.pmu.pmu_pg_param_post_init(g); + } return 0; } @@ -487,9 +515,9 @@ static void pmu_handle_pg_buf_config_msg(struct gk20a *g, struct pmu_msg *msg, pmu->buf_loaded = (eng_buf_stat->status == PMU_PG_MSG_ENG_BUF_LOADED); if ((!pmu->buf_loaded) && - (pmu->pmu_state == PMU_STATE_LOADING_PG_BUF)) + (pmu->pmu_state == PMU_STATE_LOADING_PG_BUF)) { nvgpu_err(g, "failed to load PGENG buffer"); - else { + } else { nvgpu_pmu_state_change(g, pmu->pmu_state, true); } } @@ -530,8 +558,9 @@ int nvgpu_pmu_init_bind_fecs(struct gk20a *g) nvgpu_pmu_state_change(g, PMU_STATE_LOADING_PG_BUF, false); err = nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_LPQ, pmu_handle_pg_buf_config_msg, pmu, &desc, ~0); - if (err) + if (err) { nvgpu_err(g, "cmd LOAD PMU_PGENG_GR_BUFFER_IDX_FECS failed\n"); + } return err; } @@ -570,8 +599,9 @@ void nvgpu_pmu_setup_hw_load_zbc(struct gk20a *g) nvgpu_pmu_state_change(g, PMU_STATE_LOADING_ZBC, false); err = nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_LPQ, pmu_handle_pg_buf_config_msg, pmu, &desc, ~0); - if (err) + if (err) { nvgpu_err(g, "CMD LOAD PMU_PGENG_GR_BUFFER_IDX_ZBC failed\n"); + } } /* stats */ @@ -588,12 +618,14 @@ int nvgpu_pmu_get_pg_stats(struct gk20a *g, u32 pg_engine_id, return 0; } - if (g->ops.pmu.pmu_pg_supported_engines_list) + if (g->ops.pmu.pmu_pg_supported_engines_list) { pg_engine_id_list = g->ops.pmu.pmu_pg_supported_engines_list(g); + } - if (BIT(pg_engine_id) & pg_engine_id_list) + if (BIT(pg_engine_id) & pg_engine_id_list) { g->ops.pmu.pmu_elpg_statistics(g, pg_engine_id, pg_stat_data); + } return 0; } -- cgit v1.2.2