summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common
diff options
context:
space:
mode:
authorSai Nikhil <snikhil@nvidia.com>2018-08-22 01:12:37 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-08-22 20:30:19 -0400
commitd28a401e6d872f7ea6abb0c5cfc8f63e0235fe21 (patch)
tree66b5c4ad42135dbd9f5535fa3c86f8ecdd1a067c /drivers/gpu/nvgpu/common
parent650171566bff59e9eb372f213fdce4dfbb6da5bd (diff)
gpu: nvgpu: common: fix MISRA 10.4 violations
MISRA Rule 10.4 only allows the usage of arithmetic operations on operands of the same essential type category. Adding "U" at the end of the integer literals to have same type of operands when an arithmetic operation is performed. This fix violations where an arithmetic operation is performed on signed and unsigned int types. Jira NVGPU-992 Change-Id: Iab512139a025e035ec82a9dd74245bcf1f3869fb Signed-off-by: Sai Nikhil <snikhil@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1789425 Reviewed-by: svc-misra-checker <svc-misra-checker@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Adeel Raza <araza@nvidia.com> Reviewed-by: Alex Waterman <alexw@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common')
-rw-r--r--drivers/gpu/nvgpu/common/fifo/submit.c8
-rw-r--r--drivers/gpu/nvgpu/common/pmu/pmu.c6
-rw-r--r--drivers/gpu/nvgpu/common/pmu/pmu_fw.c22
-rw-r--r--drivers/gpu/nvgpu/common/pmu/pmu_ipc.c26
-rw-r--r--drivers/gpu/nvgpu/common/pmu/pmu_perfmon.c8
-rw-r--r--drivers/gpu/nvgpu/common/pmu/pmu_pg.c22
6 files changed, 46 insertions, 46 deletions
diff --git a/drivers/gpu/nvgpu/common/fifo/submit.c b/drivers/gpu/nvgpu/common/fifo/submit.c
index d034f2d3..7f2f677d 100644
--- a/drivers/gpu/nvgpu/common/fifo/submit.c
+++ b/drivers/gpu/nvgpu/common/fifo/submit.c
@@ -179,7 +179,7 @@ static void nvgpu_submit_append_priv_cmdbuf(struct channel_gk20a *c,
179 trace_gk20a_push_cmdbuf(g->name, 0, cmd->size, 0, 179 trace_gk20a_push_cmdbuf(g->name, 0, cmd->size, 0,
180 (u32 *)cmd->mem->cpu_va + cmd->off); 180 (u32 *)cmd->mem->cpu_va + cmd->off);
181 181
182 c->gpfifo.put = (c->gpfifo.put + 1) & (c->gpfifo.entry_num - 1); 182 c->gpfifo.put = (c->gpfifo.put + 1U) & (c->gpfifo.entry_num - 1U);
183} 183}
184 184
185static int nvgpu_submit_append_gpfifo_user_direct(struct channel_gk20a *c, 185static int nvgpu_submit_append_gpfifo_user_direct(struct channel_gk20a *c,
@@ -286,7 +286,7 @@ static int nvgpu_submit_append_gpfifo(struct channel_gk20a *c,
286 trace_write_pushbuffers(c, num_entries); 286 trace_write_pushbuffers(c, num_entries);
287 287
288 c->gpfifo.put = (c->gpfifo.put + num_entries) & 288 c->gpfifo.put = (c->gpfifo.put + num_entries) &
289 (c->gpfifo.entry_num - 1); 289 (c->gpfifo.entry_num - 1U);
290 290
291 return 0; 291 return 0;
292} 292}
@@ -307,7 +307,7 @@ static int nvgpu_submit_channel_gpfifo(struct channel_gk20a *c,
307 struct channel_gk20a_job *job = NULL; 307 struct channel_gk20a_job *job = NULL;
308 /* we might need two extra gpfifo entries - one for pre fence 308 /* we might need two extra gpfifo entries - one for pre fence
309 * and one for post fence. */ 309 * and one for post fence. */
310 const int extra_entries = 2; 310 const u32 extra_entries = 2U;
311 bool skip_buffer_refcounting = (flags & 311 bool skip_buffer_refcounting = (flags &
312 NVGPU_SUBMIT_FLAGS_SKIP_BUFFER_REFCOUNTING); 312 NVGPU_SUBMIT_FLAGS_SKIP_BUFFER_REFCOUNTING);
313 int err = 0; 313 int err = 0;
@@ -330,7 +330,7 @@ static int nvgpu_submit_channel_gpfifo(struct channel_gk20a *c,
330 * Kernel can insert gpfifo entries before and after user gpfifos. 330 * Kernel can insert gpfifo entries before and after user gpfifos.
331 * So, add extra_entries in user request. Also, HW with fifo size N 331 * So, add extra_entries in user request. Also, HW with fifo size N
332 * can accept only N-1 entreis and so the below condition */ 332 * can accept only N-1 entreis and so the below condition */
333 if (c->gpfifo.entry_num - 1 < num_entries + extra_entries) { 333 if (c->gpfifo.entry_num - 1U < num_entries + extra_entries) {
334 nvgpu_err(g, "not enough gpfifo space allocated"); 334 nvgpu_err(g, "not enough gpfifo space allocated");
335 return -ENOMEM; 335 return -ENOMEM;
336 } 336 }
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu.c b/drivers/gpu/nvgpu/common/pmu/pmu.c
index d72629b5..86e56d9e 100644
--- a/drivers/gpu/nvgpu/common/pmu/pmu.c
+++ b/drivers/gpu/nvgpu/common/pmu/pmu.c
@@ -512,7 +512,7 @@ int nvgpu_pmu_destroy(struct gk20a *g)
512{ 512{
513 struct nvgpu_pmu *pmu = &g->pmu; 513 struct nvgpu_pmu *pmu = &g->pmu;
514 struct pmu_pg_stats_data pg_stat_data = { 0 }; 514 struct pmu_pg_stats_data pg_stat_data = { 0 };
515 int i; 515 u32 i;
516 516
517 nvgpu_log_fn(g, " "); 517 nvgpu_log_fn(g, " ");
518 518
@@ -539,7 +539,7 @@ int nvgpu_pmu_destroy(struct gk20a *g)
539 pmu->isr_enabled = false; 539 pmu->isr_enabled = false;
540 nvgpu_mutex_release(&pmu->isr_mutex); 540 nvgpu_mutex_release(&pmu->isr_mutex);
541 541
542 for (i = 0; i < PMU_QUEUE_COUNT; i++) { 542 for (i = 0U; i < PMU_QUEUE_COUNT; i++) {
543 nvgpu_flcn_queue_free(pmu->flcn, &pmu->queue[i]); 543 nvgpu_flcn_queue_free(pmu->flcn, &pmu->queue[i]);
544 } 544 }
545 545
@@ -559,7 +559,7 @@ void nvgpu_pmu_surface_describe(struct gk20a *g, struct nvgpu_mem *mem,
559{ 559{
560 fb->address.lo = u64_lo32(mem->gpu_va); 560 fb->address.lo = u64_lo32(mem->gpu_va);
561 fb->address.hi = u64_hi32(mem->gpu_va); 561 fb->address.hi = u64_hi32(mem->gpu_va);
562 fb->params = ((u32)mem->size & 0xFFFFFF); 562 fb->params = ((u32)mem->size & 0xFFFFFFU);
563 fb->params |= (GK20A_PMU_DMAIDX_VIRT << 24); 563 fb->params |= (GK20A_PMU_DMAIDX_VIRT << 24);
564} 564}
565 565
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_fw.c b/drivers/gpu/nvgpu/common/pmu/pmu_fw.c
index 87fd2f2a..bf54e0d6 100644
--- a/drivers/gpu/nvgpu/common/pmu/pmu_fw.c
+++ b/drivers/gpu/nvgpu/common/pmu/pmu_fw.c
@@ -37,12 +37,12 @@
37#define NVGPU_PMU_NS_UCODE_IMAGE "gpmu_ucode.bin" 37#define NVGPU_PMU_NS_UCODE_IMAGE "gpmu_ucode.bin"
38 38
39/* PMU F/W version */ 39/* PMU F/W version */
40#define APP_VERSION_GPU_NEXT 24313845 40#define APP_VERSION_GPU_NEXT 24313845U
41#define APP_VERSION_GV11B 24379482 41#define APP_VERSION_GV11B 24379482U
42#define APP_VERSION_GV10X 23647491 42#define APP_VERSION_GV10X 23647491U
43#define APP_VERSION_GP10X 24076634 43#define APP_VERSION_GP10X 24076634U
44#define APP_VERSION_GP10B 23782727 44#define APP_VERSION_GP10B 23782727U
45#define APP_VERSION_GM20B 20490253 45#define APP_VERSION_GM20B 20490253U
46 46
47/* PMU version specific functions */ 47/* PMU version specific functions */
48static u32 pmu_perfmon_cntr_sz_v2(struct nvgpu_pmu *pmu) 48static u32 pmu_perfmon_cntr_sz_v2(struct nvgpu_pmu *pmu)
@@ -82,7 +82,7 @@ static void set_perfmon_cntr_group_id_v2(struct nvgpu_pmu *pmu, u8 gid)
82 82
83static void set_pmu_cmdline_args_falctracedmabase_v4(struct nvgpu_pmu *pmu) 83static void set_pmu_cmdline_args_falctracedmabase_v4(struct nvgpu_pmu *pmu)
84{ 84{
85 pmu->args_v4.dma_addr.dma_base = ((u32)pmu->trace_buf.gpu_va)/0x100; 85 pmu->args_v4.dma_addr.dma_base = ((u32)pmu->trace_buf.gpu_va)/0x100U;
86 pmu->args_v4.dma_addr.dma_base1 = 0; 86 pmu->args_v4.dma_addr.dma_base1 = 0;
87 pmu->args_v4.dma_addr.dma_offset = 0; 87 pmu->args_v4.dma_addr.dma_offset = 0;
88} 88}
@@ -182,7 +182,7 @@ static void set_pmu_cmdline_args_falctracesize_v3(
182 182
183static void set_pmu_cmdline_args_falctracedmabase_v3(struct nvgpu_pmu *pmu) 183static void set_pmu_cmdline_args_falctracedmabase_v3(struct nvgpu_pmu *pmu)
184{ 184{
185 pmu->args_v3.falc_trace_dma_base = ((u32)pmu->trace_buf.gpu_va)/0x100; 185 pmu->args_v3.falc_trace_dma_base = ((u32)pmu->trace_buf.gpu_va)/0x100U;
186} 186}
187 187
188static void set_pmu_cmdline_args_falctracedmaidx_v3( 188static void set_pmu_cmdline_args_falctracedmaidx_v3(
@@ -882,7 +882,7 @@ static void get_pmu_init_msg_pmu_queue_params_v4(
882 882
883 queue->index = init->queue_index[tmp_id]; 883 queue->index = init->queue_index[tmp_id];
884 queue->size = init->queue_size[tmp_id]; 884 queue->size = init->queue_size[tmp_id];
885 if (tmp_id != 0) { 885 if (tmp_id != 0U) {
886 for (i = 0 ; i < tmp_id; i++) { 886 for (i = 0 ; i < tmp_id; i++) {
887 current_ptr += init->queue_size[i]; 887 current_ptr += init->queue_size[i];
888 } 888 }
@@ -911,7 +911,7 @@ static void get_pmu_init_msg_pmu_queue_params_v5(
911 911
912 queue->index = init->queue_index[tmp_id]; 912 queue->index = init->queue_index[tmp_id];
913 queue->size = init->queue_size[tmp_id]; 913 queue->size = init->queue_size[tmp_id];
914 if (tmp_id != 0) { 914 if (tmp_id != 0U) {
915 for (i = 0 ; i < tmp_id; i++) { 915 for (i = 0 ; i < tmp_id; i++) {
916 current_ptr += init->queue_size[i]; 916 current_ptr += init->queue_size[i];
917 } 917 }
@@ -940,7 +940,7 @@ static void get_pmu_init_msg_pmu_queue_params_v3(
940 } 940 }
941 queue->index = init->queue_index[tmp_id]; 941 queue->index = init->queue_index[tmp_id];
942 queue->size = init->queue_size[tmp_id]; 942 queue->size = init->queue_size[tmp_id];
943 if (tmp_id != 0) { 943 if (tmp_id != 0U) {
944 for (i = 0 ; i < tmp_id; i++) { 944 for (i = 0 ; i < tmp_id; i++) {
945 current_ptr += init->queue_size[i]; 945 current_ptr += init->queue_size[i];
946 } 946 }
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c b/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c
index 39be07cc..68654a70 100644
--- a/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c
+++ b/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c
@@ -184,9 +184,9 @@ static bool pmu_validate_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd,
184 goto invalid_cmd; 184 goto invalid_cmd;
185 } 185 }
186 186
187 if ((payload->in.buf != NULL && payload->in.size == 0) || 187 if ((payload->in.buf != NULL && payload->in.size == 0U) ||
188 (payload->out.buf != NULL && payload->out.size == 0) || 188 (payload->out.buf != NULL && payload->out.size == 0U) ||
189 (payload->rpc.prpc != NULL && payload->rpc.size_rpc == 0)) { 189 (payload->rpc.prpc != NULL && payload->rpc.size_rpc == 0U)) {
190 goto invalid_cmd; 190 goto invalid_cmd;
191 } 191 }
192 192
@@ -207,8 +207,8 @@ static bool pmu_validate_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd,
207 } 207 }
208 208
209 209
210 if ((payload->in.offset != 0 && payload->in.buf == NULL) || 210 if ((payload->in.offset != 0U && payload->in.buf == NULL) ||
211 (payload->out.offset != 0 && payload->out.buf == NULL)) { 211 (payload->out.offset != 0U && payload->out.buf == NULL)) {
212 goto invalid_cmd; 212 goto invalid_cmd;
213 } 213 }
214 214
@@ -316,7 +316,7 @@ static int pmu_cmd_payload_extract(struct gk20a *g, struct pmu_cmd *cmd,
316 seq->out_payload = payload->out.buf; 316 seq->out_payload = payload->out.buf;
317 } 317 }
318 318
319 if (payload && payload->in.offset != 0) { 319 if (payload && payload->in.offset != 0U) {
320 pv->set_pmu_allocation_ptr(pmu, &in, 320 pv->set_pmu_allocation_ptr(pmu, &in,
321 ((u8 *)&cmd->cmd + payload->in.offset)); 321 ((u8 *)&cmd->cmd + payload->in.offset));
322 322
@@ -335,7 +335,7 @@ static int pmu_cmd_payload_extract(struct gk20a *g, struct pmu_cmd *cmd,
335 goto clean_up; 335 goto clean_up;
336 } 336 }
337 337
338 if (payload->in.fb_size != 0x0) { 338 if (payload->in.fb_size != 0x0U) {
339 seq->in_mem = nvgpu_kzalloc(g, 339 seq->in_mem = nvgpu_kzalloc(g,
340 sizeof(struct nvgpu_mem)); 340 sizeof(struct nvgpu_mem));
341 if (!seq->in_mem) { 341 if (!seq->in_mem) {
@@ -365,7 +365,7 @@ static int pmu_cmd_payload_extract(struct gk20a *g, struct pmu_cmd *cmd,
365 pv->pmu_allocation_get_dmem_offset(pmu, in)); 365 pv->pmu_allocation_get_dmem_offset(pmu, in));
366 } 366 }
367 367
368 if (payload && payload->out.offset != 0) { 368 if (payload && payload->out.offset != 0U) {
369 pv->set_pmu_allocation_ptr(pmu, &out, 369 pv->set_pmu_allocation_ptr(pmu, &out,
370 ((u8 *)&cmd->cmd + payload->out.offset)); 370 ((u8 *)&cmd->cmd + payload->out.offset));
371 pv->pmu_allocation_set_dmem_size(pmu, out, 371 pv->pmu_allocation_set_dmem_size(pmu, out,
@@ -381,7 +381,7 @@ static int pmu_cmd_payload_extract(struct gk20a *g, struct pmu_cmd *cmd,
381 goto clean_up; 381 goto clean_up;
382 } 382 }
383 383
384 if (payload->out.fb_size != 0x0) { 384 if (payload->out.fb_size != 0x0U) {
385 seq->out_mem = nvgpu_kzalloc(g, 385 seq->out_mem = nvgpu_kzalloc(g,
386 sizeof(struct nvgpu_mem)); 386 sizeof(struct nvgpu_mem));
387 if (!seq->out_mem) { 387 if (!seq->out_mem) {
@@ -534,7 +534,7 @@ static int pmu_response_handle(struct nvgpu_pmu *pmu,
534 } 534 }
535 } 535 }
536 if (pv->pmu_allocation_get_dmem_size(pmu, 536 if (pv->pmu_allocation_get_dmem_size(pmu,
537 pv->get_pmu_seq_out_a_ptr(seq)) != 0) { 537 pv->get_pmu_seq_out_a_ptr(seq)) != 0U) {
538 nvgpu_flcn_copy_from_dmem(pmu->flcn, 538 nvgpu_flcn_copy_from_dmem(pmu->flcn,
539 pv->pmu_allocation_get_dmem_offset(pmu, 539 pv->pmu_allocation_get_dmem_offset(pmu,
540 pv->get_pmu_seq_out_a_ptr(seq)), 540 pv->get_pmu_seq_out_a_ptr(seq)),
@@ -546,13 +546,13 @@ static int pmu_response_handle(struct nvgpu_pmu *pmu,
546 seq->callback = NULL; 546 seq->callback = NULL;
547 } 547 }
548 if (pv->pmu_allocation_get_dmem_size(pmu, 548 if (pv->pmu_allocation_get_dmem_size(pmu,
549 pv->get_pmu_seq_in_a_ptr(seq)) != 0) { 549 pv->get_pmu_seq_in_a_ptr(seq)) != 0U) {
550 nvgpu_free(&pmu->dmem, 550 nvgpu_free(&pmu->dmem,
551 pv->pmu_allocation_get_dmem_offset(pmu, 551 pv->pmu_allocation_get_dmem_offset(pmu,
552 pv->get_pmu_seq_in_a_ptr(seq))); 552 pv->get_pmu_seq_in_a_ptr(seq)));
553 } 553 }
554 if (pv->pmu_allocation_get_dmem_size(pmu, 554 if (pv->pmu_allocation_get_dmem_size(pmu,
555 pv->get_pmu_seq_out_a_ptr(seq)) != 0) { 555 pv->get_pmu_seq_out_a_ptr(seq)) != 0U) {
556 nvgpu_free(&pmu->dmem, 556 nvgpu_free(&pmu->dmem,
557 pv->pmu_allocation_get_dmem_offset(pmu, 557 pv->pmu_allocation_get_dmem_offset(pmu,
558 pv->get_pmu_seq_out_a_ptr(seq))); 558 pv->get_pmu_seq_out_a_ptr(seq)));
@@ -748,7 +748,7 @@ int pmu_wait_message_cond(struct nvgpu_pmu *pmu, u32 timeout_ms,
748 gk20a_pmu_isr(g); 748 gk20a_pmu_isr(g);
749 } 749 }
750 750
751 nvgpu_usleep_range(delay, delay * 2); 751 nvgpu_usleep_range(delay, delay * 2U);
752 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); 752 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
753 } while (!nvgpu_timeout_expired(&timeout)); 753 } while (!nvgpu_timeout_expired(&timeout));
754 754
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_perfmon.c b/drivers/gpu/nvgpu/common/pmu/pmu_perfmon.c
index 73893f2c..5d736591 100644
--- a/drivers/gpu/nvgpu/common/pmu/pmu_perfmon.c
+++ b/drivers/gpu/nvgpu/common/pmu/pmu_perfmon.c
@@ -77,7 +77,7 @@ int nvgpu_pmu_init_perfmon(struct nvgpu_pmu *pmu)
77 77
78 if (!pmu->sample_buffer) { 78 if (!pmu->sample_buffer) {
79 pmu->sample_buffer = nvgpu_alloc(&pmu->dmem, 79 pmu->sample_buffer = nvgpu_alloc(&pmu->dmem,
80 2 * sizeof(u16)); 80 2U * sizeof(u16));
81 } 81 }
82 if (!pmu->sample_buffer) { 82 if (!pmu->sample_buffer) {
83 nvgpu_err(g, "failed to allocate perfmon sample buffer"); 83 nvgpu_err(g, "failed to allocate perfmon sample buffer");
@@ -215,7 +215,7 @@ int nvgpu_pmu_load_norm(struct gk20a *g, u32 *load)
215int nvgpu_pmu_load_update(struct gk20a *g) 215int nvgpu_pmu_load_update(struct gk20a *g)
216{ 216{
217 struct nvgpu_pmu *pmu = &g->pmu; 217 struct nvgpu_pmu *pmu = &g->pmu;
218 u16 load = 0; 218 u32 load = 0;
219 219
220 if (!pmu->perfmon_ready) { 220 if (!pmu->perfmon_ready) {
221 pmu->load_shadow = 0; 221 pmu->load_shadow = 0;
@@ -231,8 +231,8 @@ int nvgpu_pmu_load_update(struct gk20a *g)
231 (u8 *)&load, 2 * 1, 0); 231 (u8 *)&load, 2 * 1, 0);
232 } 232 }
233 233
234 pmu->load_shadow = load / 10; 234 pmu->load_shadow = load / 10U;
235 pmu->load_avg = (((9*pmu->load_avg) + pmu->load_shadow) / 10); 235 pmu->load_avg = (((9U*pmu->load_avg) + pmu->load_shadow) / 10U);
236 236
237 return 0; 237 return 0;
238} 238}
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_pg.c b/drivers/gpu/nvgpu/common/pmu/pmu_pg.c
index 4978708c..76ed0621 100644
--- a/drivers/gpu/nvgpu/common/pmu/pmu_pg.c
+++ b/drivers/gpu/nvgpu/common/pmu/pmu_pg.c
@@ -34,17 +34,17 @@
34 * ON => OFF is always synchronized 34 * ON => OFF is always synchronized
35 */ 35 */
36/* elpg is off */ 36/* elpg is off */
37#define PMU_ELPG_STAT_OFF 0 37#define PMU_ELPG_STAT_OFF 0U
38/* elpg is on */ 38/* elpg is on */
39#define PMU_ELPG_STAT_ON 1 39#define PMU_ELPG_STAT_ON 1U
40/* elpg is off, ALLOW cmd has been sent, wait for ack */ 40/* elpg is off, ALLOW cmd has been sent, wait for ack */
41#define PMU_ELPG_STAT_ON_PENDING 2 41#define PMU_ELPG_STAT_ON_PENDING 2U
42/* elpg is on, DISALLOW cmd has been sent, wait for ack */ 42/* elpg is on, DISALLOW cmd has been sent, wait for ack */
43#define PMU_ELPG_STAT_OFF_PENDING 3 43#define PMU_ELPG_STAT_OFF_PENDING 3U
44/* elpg is off, caller has requested on, but ALLOW 44/* elpg is off, caller has requested on, but ALLOW
45 * cmd hasn't been sent due to ENABLE_ALLOW delay 45 * cmd hasn't been sent due to ENABLE_ALLOW delay
46 */ 46 */
47#define PMU_ELPG_STAT_OFF_ON_PENDING 4 47#define PMU_ELPG_STAT_OFF_ON_PENDING 4U
48 48
49#define PMU_PGENG_GR_BUFFER_IDX_INIT (0) 49#define PMU_PGENG_GR_BUFFER_IDX_INIT (0)
50#define PMU_PGENG_GR_BUFFER_IDX_ZBC (1) 50#define PMU_PGENG_GR_BUFFER_IDX_ZBC (1)
@@ -58,7 +58,7 @@ static void pmu_handle_pg_elpg_msg(struct gk20a *g, struct pmu_msg *msg,
58 58
59 nvgpu_log_fn(g, " "); 59 nvgpu_log_fn(g, " ");
60 60
61 if (status != 0) { 61 if (status != 0U) {
62 nvgpu_err(g, "ELPG cmd aborted"); 62 nvgpu_err(g, "ELPG cmd aborted");
63 /* TBD: disable ELPG */ 63 /* TBD: disable ELPG */
64 return; 64 return;
@@ -174,7 +174,7 @@ static int pmu_enable_elpg_locked(struct gk20a *g, u32 pg_engine_id)
174 status = nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, 174 status = nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL,
175 PMU_COMMAND_QUEUE_HPQ, pmu_handle_pg_elpg_msg, 175 PMU_COMMAND_QUEUE_HPQ, pmu_handle_pg_elpg_msg,
176 pmu, &seq, ~0); 176 pmu, &seq, ~0);
177 WARN_ON(status != 0); 177 WARN_ON(status != 0U);
178 178
179 nvgpu_log_fn(g, "done"); 179 nvgpu_log_fn(g, "done");
180 return 0; 180 return 0;
@@ -368,7 +368,7 @@ static void pmu_handle_pg_stat_msg(struct gk20a *g, struct pmu_msg *msg,
368 368
369 nvgpu_log_fn(g, " "); 369 nvgpu_log_fn(g, " ");
370 370
371 if (status != 0) { 371 if (status != 0U) {
372 nvgpu_err(g, "ELPG cmd aborted"); 372 nvgpu_err(g, "ELPG cmd aborted");
373 /* TBD: disable ELPG */ 373 /* TBD: disable ELPG */
374 return; 374 return;
@@ -507,7 +507,7 @@ static void pmu_handle_pg_buf_config_msg(struct gk20a *g, struct pmu_msg *msg,
507 507
508 nvgpu_pmu_dbg(g, 508 nvgpu_pmu_dbg(g,
509 "reply PMU_PG_CMD_ID_ENG_BUF_LOAD PMU_PGENG_GR_BUFFER_IDX_FECS"); 509 "reply PMU_PG_CMD_ID_ENG_BUF_LOAD PMU_PGENG_GR_BUFFER_IDX_FECS");
510 if (status != 0) { 510 if (status != 0U) {
511 nvgpu_err(g, "PGENG cmd aborted"); 511 nvgpu_err(g, "PGENG cmd aborted");
512 /* TBD: disable ELPG */ 512 /* TBD: disable ELPG */
513 return; 513 return;
@@ -549,7 +549,7 @@ int nvgpu_pmu_init_bind_fecs(struct gk20a *g)
549 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_base(&cmd.cmd.pg, 549 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_base(&cmd.cmd.pg,
550 u64_lo32(pmu->pg_buf.gpu_va)); 550 u64_lo32(pmu->pg_buf.gpu_va));
551 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_offset(&cmd.cmd.pg, 551 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_offset(&cmd.cmd.pg,
552 (u8)(pmu->pg_buf.gpu_va & 0xFF)); 552 (u8)(pmu->pg_buf.gpu_va & 0xFFU));
553 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_idx(&cmd.cmd.pg, 553 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_idx(&cmd.cmd.pg,
554 PMU_DMAIDX_VIRT); 554 PMU_DMAIDX_VIRT);
555 555
@@ -590,7 +590,7 @@ void nvgpu_pmu_setup_hw_load_zbc(struct gk20a *g)
590 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_base(&cmd.cmd.pg, 590 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_base(&cmd.cmd.pg,
591 u64_lo32(pmu->seq_buf.gpu_va)); 591 u64_lo32(pmu->seq_buf.gpu_va));
592 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_offset(&cmd.cmd.pg, 592 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_offset(&cmd.cmd.pg,
593 (u8)(pmu->seq_buf.gpu_va & 0xFF)); 593 (u8)(pmu->seq_buf.gpu_va & 0xFFU));
594 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_idx(&cmd.cmd.pg, 594 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_idx(&cmd.cmd.pg,
595 PMU_DMAIDX_VIRT); 595 PMU_DMAIDX_VIRT);
596 596