From d28a401e6d872f7ea6abb0c5cfc8f63e0235fe21 Mon Sep 17 00:00:00 2001 From: Sai Nikhil Date: Wed, 22 Aug 2018 10:42:37 +0530 Subject: gpu: nvgpu: common: fix MISRA 10.4 violations MISRA Rule 10.4 only allows the usage of arithmetic operations on operands of the same essential type category. Adding "U" at the end of the integer literals to have same type of operands when an arithmetic operation is performed. This fix violations where an arithmetic operation is performed on signed and unsigned int types. Jira NVGPU-992 Change-Id: Iab512139a025e035ec82a9dd74245bcf1f3869fb Signed-off-by: Sai Nikhil Reviewed-on: https://git-master.nvidia.com/r/1789425 Reviewed-by: svc-misra-checker GVS: Gerrit_Virtual_Submit Reviewed-by: Adeel Raza Reviewed-by: Alex Waterman Reviewed-by: mobile promotions Tested-by: mobile promotions --- drivers/gpu/nvgpu/common/fifo/submit.c | 8 ++++---- drivers/gpu/nvgpu/common/pmu/pmu.c | 6 +++--- drivers/gpu/nvgpu/common/pmu/pmu_fw.c | 22 +++++++++++----------- drivers/gpu/nvgpu/common/pmu/pmu_ipc.c | 26 +++++++++++++------------- drivers/gpu/nvgpu/common/pmu/pmu_perfmon.c | 8 ++++---- drivers/gpu/nvgpu/common/pmu/pmu_pg.c | 22 +++++++++++----------- 6 files changed, 46 insertions(+), 46 deletions(-) (limited to 'drivers/gpu/nvgpu/common') diff --git a/drivers/gpu/nvgpu/common/fifo/submit.c b/drivers/gpu/nvgpu/common/fifo/submit.c index d034f2d3..7f2f677d 100644 --- a/drivers/gpu/nvgpu/common/fifo/submit.c +++ b/drivers/gpu/nvgpu/common/fifo/submit.c @@ -179,7 +179,7 @@ static void nvgpu_submit_append_priv_cmdbuf(struct channel_gk20a *c, trace_gk20a_push_cmdbuf(g->name, 0, cmd->size, 0, (u32 *)cmd->mem->cpu_va + cmd->off); - c->gpfifo.put = (c->gpfifo.put + 1) & (c->gpfifo.entry_num - 1); + c->gpfifo.put = (c->gpfifo.put + 1U) & (c->gpfifo.entry_num - 1U); } static int nvgpu_submit_append_gpfifo_user_direct(struct channel_gk20a *c, @@ -286,7 +286,7 @@ static int nvgpu_submit_append_gpfifo(struct channel_gk20a *c, trace_write_pushbuffers(c, num_entries); c->gpfifo.put = (c->gpfifo.put + num_entries) & - (c->gpfifo.entry_num - 1); + (c->gpfifo.entry_num - 1U); return 0; } @@ -307,7 +307,7 @@ static int nvgpu_submit_channel_gpfifo(struct channel_gk20a *c, struct channel_gk20a_job *job = NULL; /* we might need two extra gpfifo entries - one for pre fence * and one for post fence. */ - const int extra_entries = 2; + const u32 extra_entries = 2U; bool skip_buffer_refcounting = (flags & NVGPU_SUBMIT_FLAGS_SKIP_BUFFER_REFCOUNTING); int err = 0; @@ -330,7 +330,7 @@ static int nvgpu_submit_channel_gpfifo(struct channel_gk20a *c, * Kernel can insert gpfifo entries before and after user gpfifos. * So, add extra_entries in user request. Also, HW with fifo size N * can accept only N-1 entreis and so the below condition */ - if (c->gpfifo.entry_num - 1 < num_entries + extra_entries) { + if (c->gpfifo.entry_num - 1U < num_entries + extra_entries) { nvgpu_err(g, "not enough gpfifo space allocated"); return -ENOMEM; } diff --git a/drivers/gpu/nvgpu/common/pmu/pmu.c b/drivers/gpu/nvgpu/common/pmu/pmu.c index d72629b5..86e56d9e 100644 --- a/drivers/gpu/nvgpu/common/pmu/pmu.c +++ b/drivers/gpu/nvgpu/common/pmu/pmu.c @@ -512,7 +512,7 @@ int nvgpu_pmu_destroy(struct gk20a *g) { struct nvgpu_pmu *pmu = &g->pmu; struct pmu_pg_stats_data pg_stat_data = { 0 }; - int i; + u32 i; nvgpu_log_fn(g, " "); @@ -539,7 +539,7 @@ int nvgpu_pmu_destroy(struct gk20a *g) pmu->isr_enabled = false; nvgpu_mutex_release(&pmu->isr_mutex); - for (i = 0; i < PMU_QUEUE_COUNT; i++) { + for (i = 0U; i < PMU_QUEUE_COUNT; i++) { nvgpu_flcn_queue_free(pmu->flcn, &pmu->queue[i]); } @@ -559,7 +559,7 @@ void nvgpu_pmu_surface_describe(struct gk20a *g, struct nvgpu_mem *mem, { fb->address.lo = u64_lo32(mem->gpu_va); fb->address.hi = u64_hi32(mem->gpu_va); - fb->params = ((u32)mem->size & 0xFFFFFF); + fb->params = ((u32)mem->size & 0xFFFFFFU); fb->params |= (GK20A_PMU_DMAIDX_VIRT << 24); } diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_fw.c b/drivers/gpu/nvgpu/common/pmu/pmu_fw.c index 87fd2f2a..bf54e0d6 100644 --- a/drivers/gpu/nvgpu/common/pmu/pmu_fw.c +++ b/drivers/gpu/nvgpu/common/pmu/pmu_fw.c @@ -37,12 +37,12 @@ #define NVGPU_PMU_NS_UCODE_IMAGE "gpmu_ucode.bin" /* PMU F/W version */ -#define APP_VERSION_GPU_NEXT 24313845 -#define APP_VERSION_GV11B 24379482 -#define APP_VERSION_GV10X 23647491 -#define APP_VERSION_GP10X 24076634 -#define APP_VERSION_GP10B 23782727 -#define APP_VERSION_GM20B 20490253 +#define APP_VERSION_GPU_NEXT 24313845U +#define APP_VERSION_GV11B 24379482U +#define APP_VERSION_GV10X 23647491U +#define APP_VERSION_GP10X 24076634U +#define APP_VERSION_GP10B 23782727U +#define APP_VERSION_GM20B 20490253U /* PMU version specific functions */ static u32 pmu_perfmon_cntr_sz_v2(struct nvgpu_pmu *pmu) @@ -82,7 +82,7 @@ static void set_perfmon_cntr_group_id_v2(struct nvgpu_pmu *pmu, u8 gid) static void set_pmu_cmdline_args_falctracedmabase_v4(struct nvgpu_pmu *pmu) { - pmu->args_v4.dma_addr.dma_base = ((u32)pmu->trace_buf.gpu_va)/0x100; + pmu->args_v4.dma_addr.dma_base = ((u32)pmu->trace_buf.gpu_va)/0x100U; pmu->args_v4.dma_addr.dma_base1 = 0; pmu->args_v4.dma_addr.dma_offset = 0; } @@ -182,7 +182,7 @@ static void set_pmu_cmdline_args_falctracesize_v3( static void set_pmu_cmdline_args_falctracedmabase_v3(struct nvgpu_pmu *pmu) { - pmu->args_v3.falc_trace_dma_base = ((u32)pmu->trace_buf.gpu_va)/0x100; + pmu->args_v3.falc_trace_dma_base = ((u32)pmu->trace_buf.gpu_va)/0x100U; } static void set_pmu_cmdline_args_falctracedmaidx_v3( @@ -882,7 +882,7 @@ static void get_pmu_init_msg_pmu_queue_params_v4( queue->index = init->queue_index[tmp_id]; queue->size = init->queue_size[tmp_id]; - if (tmp_id != 0) { + if (tmp_id != 0U) { for (i = 0 ; i < tmp_id; i++) { current_ptr += init->queue_size[i]; } @@ -911,7 +911,7 @@ static void get_pmu_init_msg_pmu_queue_params_v5( queue->index = init->queue_index[tmp_id]; queue->size = init->queue_size[tmp_id]; - if (tmp_id != 0) { + if (tmp_id != 0U) { for (i = 0 ; i < tmp_id; i++) { current_ptr += init->queue_size[i]; } @@ -940,7 +940,7 @@ static void get_pmu_init_msg_pmu_queue_params_v3( } queue->index = init->queue_index[tmp_id]; queue->size = init->queue_size[tmp_id]; - if (tmp_id != 0) { + if (tmp_id != 0U) { for (i = 0 ; i < tmp_id; i++) { current_ptr += init->queue_size[i]; } diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c b/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c index 39be07cc..68654a70 100644 --- a/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c +++ b/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c @@ -184,9 +184,9 @@ static bool pmu_validate_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd, goto invalid_cmd; } - if ((payload->in.buf != NULL && payload->in.size == 0) || - (payload->out.buf != NULL && payload->out.size == 0) || - (payload->rpc.prpc != NULL && payload->rpc.size_rpc == 0)) { + if ((payload->in.buf != NULL && payload->in.size == 0U) || + (payload->out.buf != NULL && payload->out.size == 0U) || + (payload->rpc.prpc != NULL && payload->rpc.size_rpc == 0U)) { goto invalid_cmd; } @@ -207,8 +207,8 @@ static bool pmu_validate_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd, } - if ((payload->in.offset != 0 && payload->in.buf == NULL) || - (payload->out.offset != 0 && payload->out.buf == NULL)) { + if ((payload->in.offset != 0U && payload->in.buf == NULL) || + (payload->out.offset != 0U && payload->out.buf == NULL)) { goto invalid_cmd; } @@ -316,7 +316,7 @@ static int pmu_cmd_payload_extract(struct gk20a *g, struct pmu_cmd *cmd, seq->out_payload = payload->out.buf; } - if (payload && payload->in.offset != 0) { + if (payload && payload->in.offset != 0U) { pv->set_pmu_allocation_ptr(pmu, &in, ((u8 *)&cmd->cmd + payload->in.offset)); @@ -335,7 +335,7 @@ static int pmu_cmd_payload_extract(struct gk20a *g, struct pmu_cmd *cmd, goto clean_up; } - if (payload->in.fb_size != 0x0) { + if (payload->in.fb_size != 0x0U) { seq->in_mem = nvgpu_kzalloc(g, sizeof(struct nvgpu_mem)); if (!seq->in_mem) { @@ -365,7 +365,7 @@ static int pmu_cmd_payload_extract(struct gk20a *g, struct pmu_cmd *cmd, pv->pmu_allocation_get_dmem_offset(pmu, in)); } - if (payload && payload->out.offset != 0) { + if (payload && payload->out.offset != 0U) { pv->set_pmu_allocation_ptr(pmu, &out, ((u8 *)&cmd->cmd + payload->out.offset)); pv->pmu_allocation_set_dmem_size(pmu, out, @@ -381,7 +381,7 @@ static int pmu_cmd_payload_extract(struct gk20a *g, struct pmu_cmd *cmd, goto clean_up; } - if (payload->out.fb_size != 0x0) { + if (payload->out.fb_size != 0x0U) { seq->out_mem = nvgpu_kzalloc(g, sizeof(struct nvgpu_mem)); if (!seq->out_mem) { @@ -534,7 +534,7 @@ static int pmu_response_handle(struct nvgpu_pmu *pmu, } } if (pv->pmu_allocation_get_dmem_size(pmu, - pv->get_pmu_seq_out_a_ptr(seq)) != 0) { + pv->get_pmu_seq_out_a_ptr(seq)) != 0U) { nvgpu_flcn_copy_from_dmem(pmu->flcn, pv->pmu_allocation_get_dmem_offset(pmu, pv->get_pmu_seq_out_a_ptr(seq)), @@ -546,13 +546,13 @@ static int pmu_response_handle(struct nvgpu_pmu *pmu, seq->callback = NULL; } if (pv->pmu_allocation_get_dmem_size(pmu, - pv->get_pmu_seq_in_a_ptr(seq)) != 0) { + pv->get_pmu_seq_in_a_ptr(seq)) != 0U) { nvgpu_free(&pmu->dmem, pv->pmu_allocation_get_dmem_offset(pmu, pv->get_pmu_seq_in_a_ptr(seq))); } if (pv->pmu_allocation_get_dmem_size(pmu, - pv->get_pmu_seq_out_a_ptr(seq)) != 0) { + pv->get_pmu_seq_out_a_ptr(seq)) != 0U) { nvgpu_free(&pmu->dmem, pv->pmu_allocation_get_dmem_offset(pmu, pv->get_pmu_seq_out_a_ptr(seq))); @@ -748,7 +748,7 @@ int pmu_wait_message_cond(struct nvgpu_pmu *pmu, u32 timeout_ms, gk20a_pmu_isr(g); } - nvgpu_usleep_range(delay, delay * 2); + nvgpu_usleep_range(delay, delay * 2U); delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); } while (!nvgpu_timeout_expired(&timeout)); diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_perfmon.c b/drivers/gpu/nvgpu/common/pmu/pmu_perfmon.c index 73893f2c..5d736591 100644 --- a/drivers/gpu/nvgpu/common/pmu/pmu_perfmon.c +++ b/drivers/gpu/nvgpu/common/pmu/pmu_perfmon.c @@ -77,7 +77,7 @@ int nvgpu_pmu_init_perfmon(struct nvgpu_pmu *pmu) if (!pmu->sample_buffer) { pmu->sample_buffer = nvgpu_alloc(&pmu->dmem, - 2 * sizeof(u16)); + 2U * sizeof(u16)); } if (!pmu->sample_buffer) { nvgpu_err(g, "failed to allocate perfmon sample buffer"); @@ -215,7 +215,7 @@ int nvgpu_pmu_load_norm(struct gk20a *g, u32 *load) int nvgpu_pmu_load_update(struct gk20a *g) { struct nvgpu_pmu *pmu = &g->pmu; - u16 load = 0; + u32 load = 0; if (!pmu->perfmon_ready) { pmu->load_shadow = 0; @@ -231,8 +231,8 @@ int nvgpu_pmu_load_update(struct gk20a *g) (u8 *)&load, 2 * 1, 0); } - pmu->load_shadow = load / 10; - pmu->load_avg = (((9*pmu->load_avg) + pmu->load_shadow) / 10); + pmu->load_shadow = load / 10U; + pmu->load_avg = (((9U*pmu->load_avg) + pmu->load_shadow) / 10U); return 0; } diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_pg.c b/drivers/gpu/nvgpu/common/pmu/pmu_pg.c index 4978708c..76ed0621 100644 --- a/drivers/gpu/nvgpu/common/pmu/pmu_pg.c +++ b/drivers/gpu/nvgpu/common/pmu/pmu_pg.c @@ -34,17 +34,17 @@ * ON => OFF is always synchronized */ /* elpg is off */ -#define PMU_ELPG_STAT_OFF 0 +#define PMU_ELPG_STAT_OFF 0U /* elpg is on */ -#define PMU_ELPG_STAT_ON 1 +#define PMU_ELPG_STAT_ON 1U /* elpg is off, ALLOW cmd has been sent, wait for ack */ -#define PMU_ELPG_STAT_ON_PENDING 2 +#define PMU_ELPG_STAT_ON_PENDING 2U /* elpg is on, DISALLOW cmd has been sent, wait for ack */ -#define PMU_ELPG_STAT_OFF_PENDING 3 +#define PMU_ELPG_STAT_OFF_PENDING 3U /* elpg is off, caller has requested on, but ALLOW * cmd hasn't been sent due to ENABLE_ALLOW delay */ -#define PMU_ELPG_STAT_OFF_ON_PENDING 4 +#define PMU_ELPG_STAT_OFF_ON_PENDING 4U #define PMU_PGENG_GR_BUFFER_IDX_INIT (0) #define PMU_PGENG_GR_BUFFER_IDX_ZBC (1) @@ -58,7 +58,7 @@ static void pmu_handle_pg_elpg_msg(struct gk20a *g, struct pmu_msg *msg, nvgpu_log_fn(g, " "); - if (status != 0) { + if (status != 0U) { nvgpu_err(g, "ELPG cmd aborted"); /* TBD: disable ELPG */ return; @@ -174,7 +174,7 @@ static int pmu_enable_elpg_locked(struct gk20a *g, u32 pg_engine_id) status = nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, pmu_handle_pg_elpg_msg, pmu, &seq, ~0); - WARN_ON(status != 0); + WARN_ON(status != 0U); nvgpu_log_fn(g, "done"); return 0; @@ -368,7 +368,7 @@ static void pmu_handle_pg_stat_msg(struct gk20a *g, struct pmu_msg *msg, nvgpu_log_fn(g, " "); - if (status != 0) { + if (status != 0U) { nvgpu_err(g, "ELPG cmd aborted"); /* TBD: disable ELPG */ return; @@ -507,7 +507,7 @@ static void pmu_handle_pg_buf_config_msg(struct gk20a *g, struct pmu_msg *msg, nvgpu_pmu_dbg(g, "reply PMU_PG_CMD_ID_ENG_BUF_LOAD PMU_PGENG_GR_BUFFER_IDX_FECS"); - if (status != 0) { + if (status != 0U) { nvgpu_err(g, "PGENG cmd aborted"); /* TBD: disable ELPG */ return; @@ -549,7 +549,7 @@ int nvgpu_pmu_init_bind_fecs(struct gk20a *g) g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_base(&cmd.cmd.pg, u64_lo32(pmu->pg_buf.gpu_va)); g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_offset(&cmd.cmd.pg, - (u8)(pmu->pg_buf.gpu_va & 0xFF)); + (u8)(pmu->pg_buf.gpu_va & 0xFFU)); g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_idx(&cmd.cmd.pg, PMU_DMAIDX_VIRT); @@ -590,7 +590,7 @@ void nvgpu_pmu_setup_hw_load_zbc(struct gk20a *g) g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_base(&cmd.cmd.pg, u64_lo32(pmu->seq_buf.gpu_va)); g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_offset(&cmd.cmd.pg, - (u8)(pmu->seq_buf.gpu_va & 0xFF)); + (u8)(pmu->seq_buf.gpu_va & 0xFFU)); g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_idx(&cmd.cmd.pg, PMU_DMAIDX_VIRT); -- cgit v1.2.2